Merge "Add upper-constraints.txt for Functest"
[functest.git] / functest / opnfv_tests / openstack / rally / rally.py
1 #!/usr/bin/env python
2 #
3 # Copyright (c) 2015 All rights reserved
4 # This program and the accompanying materials
5 # are made available under the terms of the Apache License, Version 2.0
6 # which accompanies this distribution, and is available at
7 #
8 # http://www.apache.org/licenses/LICENSE-2.0
9 #
10
11 from __future__ import division
12
13 import json
14 import logging
15 import os
16 import pkg_resources
17 import re
18 import subprocess
19 import time
20
21 import iniparse
22 import yaml
23
24 from functest.core import testcase
25 from functest.utils.constants import CONST
26 import functest.utils.openstack_utils as os_utils
27
28 logger = logging.getLogger(__name__)
29
30
31 class RallyBase(testcase.OSGCTestCase):
32     TESTS = ['authenticate', 'glance', 'cinder', 'heat', 'keystone',
33              'neutron', 'nova', 'quotas', 'vm', 'all']
34     GLANCE_IMAGE_NAME = CONST.__getattribute__('openstack_image_name')
35     GLANCE_IMAGE_FILENAME = CONST.__getattribute__('openstack_image_file_name')
36     GLANCE_IMAGE_PATH = os.path.join(
37         CONST.__getattribute__('dir_functest_images'),
38         GLANCE_IMAGE_FILENAME)
39     GLANCE_IMAGE_FORMAT = CONST.__getattribute__('openstack_image_disk_format')
40     FLAVOR_NAME = "m1.tiny"
41
42     RALLY_DIR = pkg_resources.resource_filename(
43         'functest', 'opnfv_tests/openstack/rally')
44     RALLY_SCENARIO_DIR = pkg_resources.resource_filename(
45         'functest', 'opnfv_tests/openstack/rally/scenario')
46     TEMPLATE_DIR = pkg_resources.resource_filename(
47         'functest', 'opnfv_tests/openstack/rally/scenario/templates')
48     SUPPORT_DIR = pkg_resources.resource_filename(
49         'functest', 'opnfv_tests/openstack/rally/scenario/support')
50     USERS_AMOUNT = 2
51     TENANTS_AMOUNT = 3
52     ITERATIONS_AMOUNT = 10
53     CONCURRENCY = 4
54     RESULTS_DIR = os.path.join(CONST.__getattribute__('dir_results'), 'rally')
55     TEMPEST_CONF_FILE = os.path.join(CONST.__getattribute__('dir_results'),
56                                      'tempest/tempest.conf')
57     BLACKLIST_FILE = os.path.join(RALLY_DIR, "blacklist.txt")
58     TEMP_DIR = os.path.join(RALLY_DIR, "var")
59
60     CINDER_VOLUME_TYPE_NAME = "volume_test"
61     RALLY_PRIVATE_NET_NAME = CONST.__getattribute__('rally_network_name')
62     RALLY_PRIVATE_SUBNET_NAME = CONST.__getattribute__('rally_subnet_name')
63     RALLY_PRIVATE_SUBNET_CIDR = CONST.__getattribute__('rally_subnet_cidr')
64     RALLY_ROUTER_NAME = CONST.__getattribute__('rally_router_name')
65
66     def __init__(self, **kwargs):
67         super(RallyBase, self).__init__(**kwargs)
68         self.mode = ''
69         self.summary = []
70         self.scenario_dir = ''
71         self.nova_client = os_utils.get_nova_client()
72         self.neutron_client = os_utils.get_neutron_client()
73         self.cinder_client = os_utils.get_cinder_client()
74         self.network_dict = {}
75         self.volume_type = None
76         self.smoke = None
77
78     def _build_task_args(self, test_file_name):
79         task_args = {'service_list': [test_file_name]}
80         task_args['image_name'] = self.GLANCE_IMAGE_NAME
81         task_args['flavor_name'] = self.FLAVOR_NAME
82         task_args['glance_image_location'] = self.GLANCE_IMAGE_PATH
83         task_args['glance_image_format'] = self.GLANCE_IMAGE_FORMAT
84         task_args['tmpl_dir'] = self.TEMPLATE_DIR
85         task_args['sup_dir'] = self.SUPPORT_DIR
86         task_args['users_amount'] = self.USERS_AMOUNT
87         task_args['tenants_amount'] = self.TENANTS_AMOUNT
88         task_args['use_existing_users'] = False
89         task_args['iterations'] = self.ITERATIONS_AMOUNT
90         task_args['concurrency'] = self.CONCURRENCY
91         task_args['smoke'] = self.smoke
92
93         ext_net = os_utils.get_external_net(self.neutron_client)
94         if ext_net:
95             task_args['floating_network'] = str(ext_net)
96         else:
97             task_args['floating_network'] = ''
98
99         net_id = self.network_dict['net_id']
100         if net_id:
101             task_args['netid'] = str(net_id)
102         else:
103             task_args['netid'] = ''
104
105         return task_args
106
107     def _prepare_test_list(self, test_name):
108         test_yaml_file_name = 'opnfv-{}.yaml'.format(test_name)
109         scenario_file_name = os.path.join(self.RALLY_SCENARIO_DIR,
110                                           test_yaml_file_name)
111
112         if not os.path.exists(scenario_file_name):
113             scenario_file_name = os.path.join(self.scenario_dir,
114                                               test_yaml_file_name)
115
116             if not os.path.exists(scenario_file_name):
117                 raise Exception("The scenario '%s' does not exist."
118                                 % scenario_file_name)
119
120         logger.debug('Scenario fetched from : {}'.format(scenario_file_name))
121         test_file_name = os.path.join(self.TEMP_DIR, test_yaml_file_name)
122
123         if not os.path.exists(self.TEMP_DIR):
124             os.makedirs(self.TEMP_DIR)
125
126         self.apply_blacklist(scenario_file_name, test_file_name)
127         return test_file_name
128
129     @staticmethod
130     def get_task_id(cmd_raw):
131         """
132         get task id from command rally result
133         :param cmd_raw:
134         :return: task_id as string
135         """
136         taskid_re = re.compile('^Task +(.*): started$')
137         for line in cmd_raw.splitlines(True):
138             line = line.strip()
139             match = taskid_re.match(line)
140             if match:
141                 return match.group(1)
142         return None
143
144     @staticmethod
145     def task_succeed(json_raw):
146         """
147         Parse JSON from rally JSON results
148         :param json_raw:
149         :return: Bool
150         """
151         rally_report = json.loads(json_raw)
152         for report in rally_report:
153             if report is None or report.get('result') is None:
154                 return False
155
156             for result in report.get('result'):
157                 if result is None or len(result.get('error')) > 0:
158                     return False
159
160         return True
161
162     @staticmethod
163     def live_migration_supported():
164         config = iniparse.ConfigParser()
165         if (config.read(RallyBase.TEMPEST_CONF_FILE) and
166                 config.has_section('compute-feature-enabled') and
167                 config.has_option('compute-feature-enabled',
168                                   'live_migration')):
169             return config.getboolean('compute-feature-enabled',
170                                      'live_migration')
171
172         return False
173
174     @staticmethod
175     def get_cmd_output(proc):
176         result = ""
177         while proc.poll() is None:
178             line = proc.stdout.readline()
179             result += line
180         return result
181
182     @staticmethod
183     def excl_scenario():
184         black_tests = []
185         try:
186             with open(RallyBase.BLACKLIST_FILE, 'r') as black_list_file:
187                 black_list_yaml = yaml.safe_load(black_list_file)
188
189             installer_type = CONST.__getattribute__('INSTALLER_TYPE')
190             deploy_scenario = CONST.__getattribute__('DEPLOY_SCENARIO')
191             if (bool(installer_type) and bool(deploy_scenario) and
192                     'scenario' in black_list_yaml.keys()):
193                 for item in black_list_yaml['scenario']:
194                     scenarios = item['scenarios']
195                     installers = item['installers']
196                     in_it = RallyBase.in_iterable_re
197                     if (in_it(deploy_scenario, scenarios) and
198                             in_it(installer_type, installers)):
199                         tests = item['tests']
200                         black_tests.extend(tests)
201         except Exception:
202             logger.debug("Scenario exclusion not applied.")
203
204         return black_tests
205
206     @staticmethod
207     def in_iterable_re(needle, haystack):
208         """
209         Check if given needle is in the iterable haystack, using regex.
210
211         :param needle: string to be matched
212         :param haystack: iterable of strings (optionally regex patterns)
213         :return: True if needle is eqial to any of the elements in haystack,
214                  or if a nonempty regex pattern in haystack is found in needle.
215         """
216
217         # match without regex
218         if needle in haystack:
219             return True
220
221         for pattern in haystack:
222             # match if regex pattern is set and found in the needle
223             if pattern and re.search(pattern, needle) is not None:
224                 return True
225         else:
226             return False
227
228     @staticmethod
229     def excl_func():
230         black_tests = []
231         func_list = []
232
233         try:
234             with open(RallyBase.BLACKLIST_FILE, 'r') as black_list_file:
235                 black_list_yaml = yaml.safe_load(black_list_file)
236
237             if not RallyBase.live_migration_supported():
238                 func_list.append("no_live_migration")
239
240             if 'functionality' in black_list_yaml.keys():
241                 for item in black_list_yaml['functionality']:
242                     functions = item['functions']
243                     for func in func_list:
244                         if func in functions:
245                             tests = item['tests']
246                             black_tests.extend(tests)
247         except Exception:
248             logger.debug("Functionality exclusion not applied.")
249
250         return black_tests
251
252     @staticmethod
253     def apply_blacklist(case_file_name, result_file_name):
254         logger.debug("Applying blacklist...")
255         cases_file = open(case_file_name, 'r')
256         result_file = open(result_file_name, 'w')
257
258         black_tests = list(set(RallyBase.excl_func() +
259                            RallyBase.excl_scenario()))
260
261         if black_tests:
262             logger.debug("Blacklisted tests: " + str(black_tests))
263
264         include = True
265         for cases_line in cases_file:
266             if include:
267                 for black_tests_line in black_tests:
268                     if re.search(black_tests_line,
269                                  cases_line.strip().rstrip(':')):
270                         include = False
271                         break
272                 else:
273                     result_file.write(str(cases_line))
274             else:
275                 if cases_line.isspace():
276                     include = True
277
278         cases_file.close()
279         result_file.close()
280
281     @staticmethod
282     def file_is_empty(file_name):
283         try:
284             if os.stat(file_name).st_size > 0:
285                 return False
286         except:
287             pass
288
289         return True
290
291     def _run_task(self, test_name):
292         logger.info('Starting test scenario "{}" ...'.format(test_name))
293
294         task_file = os.path.join(self.RALLY_DIR, 'task.yaml')
295         if not os.path.exists(task_file):
296             logger.error("Task file '%s' does not exist." % task_file)
297             raise Exception("Task file '%s' does not exist." % task_file)
298
299         file_name = self._prepare_test_list(test_name)
300         if self.file_is_empty(file_name):
301             logger.info('No tests for scenario "{}"'.format(test_name))
302             return
303
304         cmd_line = ("rally task start --abort-on-sla-failure "
305                     "--task {0} "
306                     "--task-args \"{1}\""
307                     .format(task_file, self._build_task_args(test_name)))
308         logger.debug('running command line: {}'.format(cmd_line))
309
310         p = subprocess.Popen(cmd_line, stdout=subprocess.PIPE,
311                              stderr=subprocess.STDOUT, shell=True)
312         output = self._get_output(p, test_name)
313         task_id = self.get_task_id(output)
314         logger.debug('task_id : {}'.format(task_id))
315
316         if task_id is None:
317             logger.error('Failed to retrieve task_id, validating task...')
318             cmd_line = ("rally task validate "
319                         "--task {0} "
320                         "--task-args \"{1}\""
321                         .format(task_file, self._build_task_args(test_name)))
322             logger.debug('running command line: {}'.format(cmd_line))
323             p = subprocess.Popen(cmd_line, stdout=subprocess.PIPE,
324                                  stderr=subprocess.STDOUT, shell=True)
325             output = self.get_cmd_output(p)
326             logger.error("Task validation result:" + "\n" + output)
327             return
328
329         # check for result directory and create it otherwise
330         if not os.path.exists(self.RESULTS_DIR):
331             logger.debug('{} does not exist, we create it.'
332                          .format(self.RESULTS_DIR))
333             os.makedirs(self.RESULTS_DIR)
334
335         # write html report file
336         report_html_name = 'opnfv-{}.html'.format(test_name)
337         report_html_dir = os.path.join(self.RESULTS_DIR, report_html_name)
338         cmd_line = "rally task report {} --out {}".format(task_id,
339                                                           report_html_dir)
340
341         logger.debug('running command line: {}'.format(cmd_line))
342         os.popen(cmd_line)
343
344         # get and save rally operation JSON result
345         cmd_line = "rally task results %s" % task_id
346         logger.debug('running command line: {}'.format(cmd_line))
347         cmd = os.popen(cmd_line)
348         json_results = cmd.read()
349         report_json_name = 'opnfv-{}.json'.format(test_name)
350         report_json_dir = os.path.join(self.RESULTS_DIR, report_json_name)
351         with open(report_json_dir, 'w') as f:
352             logger.debug('saving json file')
353             f.write(json_results)
354
355         """ parse JSON operation result """
356         if self.task_succeed(json_results):
357             logger.info('Test scenario: "{}" OK.'.format(test_name) + "\n")
358         else:
359             logger.info('Test scenario: "{}" Failed.'.format(test_name) + "\n")
360
361     def _get_output(self, proc, test_name):
362         result = ""
363         nb_tests = 0
364         overall_duration = 0.0
365         success = 0.0
366         nb_totals = 0
367
368         while proc.poll() is None:
369             line = proc.stdout.readline()
370             if ("Load duration" in line or
371                     "started" in line or
372                     "finished" in line or
373                     " Preparing" in line or
374                     "+-" in line or
375                     "|" in line):
376                 result += line
377             elif "test scenario" in line:
378                 result += "\n" + line
379             elif "Full duration" in line:
380                 result += line + "\n\n"
381
382             # parse output for summary report
383             if ("| " in line and
384                     "| action" not in line and
385                     "| Starting" not in line and
386                     "| Completed" not in line and
387                     "| ITER" not in line and
388                     "|   " not in line and
389                     "| total" not in line):
390                 nb_tests += 1
391             elif "| total" in line:
392                 percentage = ((line.split('|')[8]).strip(' ')).strip('%')
393                 try:
394                     success += float(percentage)
395                 except ValueError:
396                     logger.info('Percentage error: %s, %s' %
397                                 (percentage, line))
398                 nb_totals += 1
399             elif "Full duration" in line:
400                 duration = line.split(': ')[1]
401                 try:
402                     overall_duration += float(duration)
403                 except ValueError:
404                     logger.info('Duration error: %s, %s' % (duration, line))
405
406         overall_duration = "{:10.2f}".format(overall_duration)
407         if nb_totals == 0:
408             success_avg = 0
409         else:
410             success_avg = "{:0.2f}".format(success / nb_totals)
411
412         scenario_summary = {'test_name': test_name,
413                             'overall_duration': overall_duration,
414                             'nb_tests': nb_tests,
415                             'success': success_avg}
416         self.summary.append(scenario_summary)
417
418         logger.debug("\n" + result)
419
420         return result
421
422     def _prepare_env(self):
423         logger.debug('Validating the test name...')
424         if not (self.test_name in self.TESTS):
425             raise Exception("Test name '%s' is invalid" % self.test_name)
426
427         volume_types = os_utils.list_volume_types(self.cinder_client,
428                                                   private=False)
429         if volume_types:
430             logger.debug("Using existing volume type(s)...")
431         else:
432             logger.debug('Creating volume type...')
433             self.volume_type = os_utils.create_volume_type(
434                 self.cinder_client, self.CINDER_VOLUME_TYPE_NAME)
435             if self.volume_type is None:
436                 raise Exception("Failed to create volume type '%s'" %
437                                 self.CINDER_VOLUME_TYPE_NAME)
438             logger.debug("Volume type '%s' is created succesfully." %
439                          self.CINDER_VOLUME_TYPE_NAME)
440
441         logger.debug('Getting or creating image...')
442         self.image_exists, self.image_id = os_utils.get_or_create_image(
443             self.GLANCE_IMAGE_NAME,
444             self.GLANCE_IMAGE_PATH,
445             self.GLANCE_IMAGE_FORMAT)
446         if self.image_id is None:
447             raise Exception("Failed to get or create image '%s'" %
448                             self.GLANCE_IMAGE_NAME)
449
450         logger.debug("Creating network '%s'..." % self.RALLY_PRIVATE_NET_NAME)
451         self.network_dict = os_utils.create_shared_network_full(
452             self.RALLY_PRIVATE_NET_NAME,
453             self.RALLY_PRIVATE_SUBNET_NAME,
454             self.RALLY_ROUTER_NAME,
455             self.RALLY_PRIVATE_SUBNET_CIDR)
456         if self.network_dict is None:
457             raise Exception("Failed to create shared network '%s'" %
458                             self.RALLY_PRIVATE_NET_NAME)
459
460     def _run_tests(self):
461         if self.test_name == 'all':
462             for test in self.TESTS:
463                 if (test == 'all' or test == 'vm'):
464                     continue
465                 self._run_task(test)
466         else:
467             self._run_task(self.test_name)
468
469     def _generate_report(self):
470         report = (
471             "\n"
472             "                                                              "
473             "\n"
474             "                     Rally Summary Report\n"
475             "\n"
476             "+===================+============+===============+===========+"
477             "\n"
478             "| Module            | Duration   | nb. Test Run  | Success   |"
479             "\n"
480             "+===================+============+===============+===========+"
481             "\n")
482         payload = []
483
484         # for each scenario we draw a row for the table
485         total_duration = 0.0
486         total_nb_tests = 0
487         total_success = 0.0
488         for s in self.summary:
489             name = "{0:<17}".format(s['test_name'])
490             duration = float(s['overall_duration'])
491             total_duration += duration
492             duration = time.strftime("%M:%S", time.gmtime(duration))
493             duration = "{0:<10}".format(duration)
494             nb_tests = "{0:<13}".format(s['nb_tests'])
495             total_nb_tests += int(s['nb_tests'])
496             success = "{0:<10}".format(str(s['success']) + '%')
497             total_success += float(s['success'])
498             report += ("" +
499                        "| " + name + " | " + duration + " | " +
500                        nb_tests + " | " + success + "|\n" +
501                        "+-------------------+------------"
502                        "+---------------+-----------+\n")
503             payload.append({'module': name,
504                             'details': {'duration': s['overall_duration'],
505                                         'nb tests': s['nb_tests'],
506                                         'success': s['success']}})
507
508         total_duration_str = time.strftime("%H:%M:%S",
509                                            time.gmtime(total_duration))
510         total_duration_str2 = "{0:<10}".format(total_duration_str)
511         total_nb_tests_str = "{0:<13}".format(total_nb_tests)
512
513         try:
514             self.result = total_success / len(self.summary)
515         except ZeroDivisionError:
516             self.result = 100
517
518         success_rate = "{:0.2f}".format(self.result)
519         success_rate_str = "{0:<10}".format(str(success_rate) + '%')
520         report += ("+===================+============"
521                    "+===============+===========+")
522         report += "\n"
523         report += ("| TOTAL:            | " + total_duration_str2 + " | " +
524                    total_nb_tests_str + " | " + success_rate_str + "|\n")
525         report += ("+===================+============"
526                    "+===============+===========+")
527         report += "\n"
528
529         logger.info("\n" + report)
530         payload.append({'summary': {'duration': total_duration,
531                                     'nb tests': total_nb_tests,
532                                     'nb success': success_rate}})
533
534         self.details = payload
535
536         logger.info("Rally '%s' success_rate is %s%%"
537                     % (self.case_name, success_rate))
538
539     def _clean_up(self):
540         if self.volume_type:
541             logger.debug("Deleting volume type '%s'..." % self.volume_type)
542             os_utils.delete_volume_type(self.cinder_client, self.volume_type)
543
544         if not self.image_exists:
545             logger.debug("Deleting image '%s' with ID '%s'..."
546                          % (self.GLANCE_IMAGE_NAME, self.image_id))
547             if not os_utils.delete_glance_image(self.nova_client,
548                                                 self.image_id):
549                 logger.error("Error deleting the glance image")
550
551     def run(self):
552         self.start_time = time.time()
553         try:
554             self._prepare_env()
555             self._run_tests()
556             self._generate_report()
557             self._clean_up()
558             res = testcase.TestCase.EX_OK
559         except Exception as e:
560             logger.error('Error with run: %s' % e)
561             res = testcase.TestCase.EX_RUN_ERROR
562
563         self.stop_time = time.time()
564         return res
565
566
567 class RallySanity(RallyBase):
568     def __init__(self, **kwargs):
569         if "case_name" not in kwargs:
570             kwargs["case_name"] = "rally_sanity"
571         super(RallySanity, self).__init__(**kwargs)
572         self.mode = 'sanity'
573         self.test_name = 'all'
574         self.smoke = True
575         self.scenario_dir = os.path.join(self.RALLY_SCENARIO_DIR, 'sanity')
576
577
578 class RallyFull(RallyBase):
579     def __init__(self, **kwargs):
580         if "case_name" not in kwargs:
581             kwargs["case_name"] = "rally_full"
582         super(RallyFull, self).__init__(**kwargs)
583         self.mode = 'full'
584         self.test_name = 'all'
585         self.smoke = False
586         self.scenario_dir = os.path.join(self.RALLY_SCENARIO_DIR, 'full')