Remove Rally requests scenario
[functest.git] / functest / opnfv_tests / openstack / rally / rally.py
1 #!/usr/bin/env python
2 #
3 # Copyright (c) 2015 All rights reserved
4 # This program and the accompanying materials
5 # are made available under the terms of the Apache License, Version 2.0
6 # which accompanies this distribution, and is available at
7 #
8 # http://www.apache.org/licenses/LICENSE-2.0
9 #
10
11 from __future__ import division
12
13 import json
14 import logging
15 import os
16 import pkg_resources
17 import re
18 import subprocess
19 import time
20
21 import iniparse
22 import yaml
23
24 from functest.core import testcase
25 from functest.utils.constants import CONST
26 import functest.utils.openstack_utils as os_utils
27
28 logger = logging.getLogger(__name__)
29
30
31 class RallyBase(testcase.OSGCTestCase):
32     TESTS = ['authenticate', 'glance', 'cinder', 'heat', 'keystone',
33              'neutron', 'nova', 'quotas', 'vm', 'all']
34     GLANCE_IMAGE_NAME = CONST.__getattribute__('openstack_image_name')
35     GLANCE_IMAGE_FILENAME = CONST.__getattribute__('openstack_image_file_name')
36     GLANCE_IMAGE_PATH = os.path.join(
37         CONST.__getattribute__('dir_functest_images'),
38         GLANCE_IMAGE_FILENAME)
39     GLANCE_IMAGE_FORMAT = CONST.__getattribute__('openstack_image_disk_format')
40     FLAVOR_NAME = "m1.tiny"
41
42     RALLY_DIR = pkg_resources.resource_filename(
43         'functest', 'opnfv_tests/openstack/rally')
44     RALLY_SCENARIO_DIR = pkg_resources.resource_filename(
45         'functest', 'opnfv_tests/openstack/rally/scenario')
46     TEMPLATE_DIR = pkg_resources.resource_filename(
47         'functest', 'opnfv_tests/openstack/rally/scenario/templates')
48     SUPPORT_DIR = pkg_resources.resource_filename(
49         'functest', 'opnfv_tests/openstack/rally/scenario/support')
50     USERS_AMOUNT = 2
51     TENANTS_AMOUNT = 3
52     ITERATIONS_AMOUNT = 10
53     CONCURRENCY = 4
54     RESULTS_DIR = os.path.join(CONST.__getattribute__('dir_results'), 'rally')
55     TEMPEST_CONF_FILE = os.path.join(CONST.__getattribute__('dir_results'),
56                                      'tempest/tempest.conf')
57     BLACKLIST_FILE = os.path.join(RALLY_DIR, "blacklist.txt")
58     TEMP_DIR = os.path.join(RALLY_DIR, "var")
59
60     CINDER_VOLUME_TYPE_NAME = "volume_test"
61     RALLY_PRIVATE_NET_NAME = CONST.__getattribute__('rally_network_name')
62     RALLY_PRIVATE_SUBNET_NAME = CONST.__getattribute__('rally_subnet_name')
63     RALLY_PRIVATE_SUBNET_CIDR = CONST.__getattribute__('rally_subnet_cidr')
64     RALLY_ROUTER_NAME = CONST.__getattribute__('rally_router_name')
65
66     def __init__(self, **kwargs):
67         super(RallyBase, self).__init__(**kwargs)
68         self.mode = ''
69         self.summary = []
70         self.scenario_dir = ''
71         self.nova_client = os_utils.get_nova_client()
72         self.neutron_client = os_utils.get_neutron_client()
73         self.cinder_client = os_utils.get_cinder_client()
74         self.network_dict = {}
75         self.volume_type = None
76         self.smoke = None
77
78     def _build_task_args(self, test_file_name):
79         task_args = {'service_list': [test_file_name]}
80         task_args['image_name'] = self.GLANCE_IMAGE_NAME
81         task_args['flavor_name'] = self.FLAVOR_NAME
82         task_args['glance_image_location'] = self.GLANCE_IMAGE_PATH
83         task_args['glance_image_format'] = self.GLANCE_IMAGE_FORMAT
84         task_args['tmpl_dir'] = self.TEMPLATE_DIR
85         task_args['sup_dir'] = self.SUPPORT_DIR
86         task_args['users_amount'] = self.USERS_AMOUNT
87         task_args['tenants_amount'] = self.TENANTS_AMOUNT
88         task_args['use_existing_users'] = False
89         task_args['iterations'] = self.ITERATIONS_AMOUNT
90         task_args['concurrency'] = self.CONCURRENCY
91         task_args['smoke'] = self.smoke
92
93         ext_net = os_utils.get_external_net(self.neutron_client)
94         if ext_net:
95             task_args['floating_network'] = str(ext_net)
96         else:
97             task_args['floating_network'] = ''
98
99         net_id = self.network_dict['net_id']
100         if net_id:
101             task_args['netid'] = str(net_id)
102         else:
103             task_args['netid'] = ''
104
105         return task_args
106
107     def _prepare_test_list(self, test_name):
108         test_yaml_file_name = 'opnfv-{}.yaml'.format(test_name)
109         scenario_file_name = os.path.join(self.RALLY_SCENARIO_DIR,
110                                           test_yaml_file_name)
111
112         if not os.path.exists(scenario_file_name):
113             scenario_file_name = os.path.join(self.scenario_dir,
114                                               test_yaml_file_name)
115
116             if not os.path.exists(scenario_file_name):
117                 raise Exception("The scenario '%s' does not exist."
118                                 % scenario_file_name)
119
120         logger.debug('Scenario fetched from : {}'.format(scenario_file_name))
121         test_file_name = os.path.join(self.TEMP_DIR, test_yaml_file_name)
122
123         if not os.path.exists(self.TEMP_DIR):
124             os.makedirs(self.TEMP_DIR)
125
126         self.apply_blacklist(scenario_file_name, test_file_name)
127         return test_file_name
128
129     @staticmethod
130     def get_task_id(cmd_raw):
131         """
132         get task id from command rally result
133         :param cmd_raw:
134         :return: task_id as string
135         """
136         taskid_re = re.compile('^Task +(.*): started$')
137         for line in cmd_raw.splitlines(True):
138             line = line.strip()
139             match = taskid_re.match(line)
140             if match:
141                 return match.group(1)
142         return None
143
144     @staticmethod
145     def task_succeed(json_raw):
146         """
147         Parse JSON from rally JSON results
148         :param json_raw:
149         :return: Bool
150         """
151         rally_report = json.loads(json_raw)
152         for report in rally_report:
153             if report is None or report.get('result') is None:
154                 return False
155
156             for result in report.get('result'):
157                 if result is None or len(result.get('error')) > 0:
158                     return False
159
160         return True
161
162     @staticmethod
163     def live_migration_supported():
164         config = iniparse.ConfigParser()
165         if (config.read(RallyBase.TEMPEST_CONF_FILE) and
166                 config.has_section('compute-feature-enabled') and
167                 config.has_option('compute-feature-enabled',
168                                   'live_migration')):
169             return config.getboolean('compute-feature-enabled',
170                                      'live_migration')
171
172         return False
173
174     @staticmethod
175     def get_cmd_output(proc):
176         result = ""
177         while proc.poll() is None:
178             line = proc.stdout.readline()
179             result += line
180         return result
181
182     @staticmethod
183     def excl_scenario():
184         black_tests = []
185         try:
186             with open(RallyBase.BLACKLIST_FILE, 'r') as black_list_file:
187                 black_list_yaml = yaml.safe_load(black_list_file)
188
189             installer_type = CONST.__getattribute__('INSTALLER_TYPE')
190             deploy_scenario = CONST.__getattribute__('DEPLOY_SCENARIO')
191             if (bool(installer_type) * bool(deploy_scenario)):
192                 if 'scenario' in black_list_yaml.keys():
193                     for item in black_list_yaml['scenario']:
194                         scenarios = item['scenarios']
195                         installers = item['installers']
196                         if (deploy_scenario in scenarios and
197                                 installer_type in installers):
198                             tests = item['tests']
199                             black_tests.extend(tests)
200         except Exception:
201             logger.debug("Scenario exclusion not applied.")
202
203         return black_tests
204
205     @staticmethod
206     def excl_func():
207         black_tests = []
208         func_list = []
209
210         try:
211             with open(RallyBase.BLACKLIST_FILE, 'r') as black_list_file:
212                 black_list_yaml = yaml.safe_load(black_list_file)
213
214             if not RallyBase.live_migration_supported():
215                 func_list.append("no_live_migration")
216
217             if 'functionality' in black_list_yaml.keys():
218                 for item in black_list_yaml['functionality']:
219                     functions = item['functions']
220                     for func in func_list:
221                         if func in functions:
222                             tests = item['tests']
223                             black_tests.extend(tests)
224         except Exception:
225             logger.debug("Functionality exclusion not applied.")
226
227         return black_tests
228
229     @staticmethod
230     def apply_blacklist(case_file_name, result_file_name):
231         logger.debug("Applying blacklist...")
232         cases_file = open(case_file_name, 'r')
233         result_file = open(result_file_name, 'w')
234
235         black_tests = list(set(RallyBase.excl_func() +
236                            RallyBase.excl_scenario()))
237
238         include = True
239         for cases_line in cases_file:
240             if include:
241                 for black_tests_line in black_tests:
242                     if re.search(black_tests_line,
243                                  cases_line.strip().rstrip(':')):
244                         include = False
245                         break
246                 else:
247                     result_file.write(str(cases_line))
248             else:
249                 if cases_line.isspace():
250                     include = True
251
252         cases_file.close()
253         result_file.close()
254
255     @staticmethod
256     def file_is_empty(file_name):
257         try:
258             if os.stat(file_name).st_size > 0:
259                 return False
260         except:
261             pass
262
263         return True
264
265     def _run_task(self, test_name):
266         logger.info('Starting test scenario "{}" ...'.format(test_name))
267
268         task_file = os.path.join(self.RALLY_DIR, 'task.yaml')
269         if not os.path.exists(task_file):
270             logger.error("Task file '%s' does not exist." % task_file)
271             raise Exception("Task file '%s' does not exist." % task_file)
272
273         file_name = self._prepare_test_list(test_name)
274         if self.file_is_empty(file_name):
275             logger.info('No tests for scenario "{}"'.format(test_name))
276             return
277
278         cmd_line = ("rally task start --abort-on-sla-failure "
279                     "--task {0} "
280                     "--task-args \"{1}\""
281                     .format(task_file, self._build_task_args(test_name)))
282         logger.debug('running command line: {}'.format(cmd_line))
283
284         p = subprocess.Popen(cmd_line, stdout=subprocess.PIPE,
285                              stderr=subprocess.STDOUT, shell=True)
286         output = self._get_output(p, test_name)
287         task_id = self.get_task_id(output)
288         logger.debug('task_id : {}'.format(task_id))
289
290         if task_id is None:
291             logger.error('Failed to retrieve task_id, validating task...')
292             cmd_line = ("rally task validate "
293                         "--task {0} "
294                         "--task-args \"{1}\""
295                         .format(task_file, self._build_task_args(test_name)))
296             logger.debug('running command line: {}'.format(cmd_line))
297             p = subprocess.Popen(cmd_line, stdout=subprocess.PIPE,
298                                  stderr=subprocess.STDOUT, shell=True)
299             output = self.get_cmd_output(p)
300             logger.error("Task validation result:" + "\n" + output)
301             return
302
303         # check for result directory and create it otherwise
304         if not os.path.exists(self.RESULTS_DIR):
305             logger.debug('{} does not exist, we create it.'
306                          .format(self.RESULTS_DIR))
307             os.makedirs(self.RESULTS_DIR)
308
309         # write html report file
310         report_html_name = 'opnfv-{}.html'.format(test_name)
311         report_html_dir = os.path.join(self.RESULTS_DIR, report_html_name)
312         cmd_line = "rally task report {} --out {}".format(task_id,
313                                                           report_html_dir)
314
315         logger.debug('running command line: {}'.format(cmd_line))
316         os.popen(cmd_line)
317
318         # get and save rally operation JSON result
319         cmd_line = "rally task results %s" % task_id
320         logger.debug('running command line: {}'.format(cmd_line))
321         cmd = os.popen(cmd_line)
322         json_results = cmd.read()
323         report_json_name = 'opnfv-{}.json'.format(test_name)
324         report_json_dir = os.path.join(self.RESULTS_DIR, report_json_name)
325         with open(report_json_dir, 'w') as f:
326             logger.debug('saving json file')
327             f.write(json_results)
328
329         """ parse JSON operation result """
330         if self.task_succeed(json_results):
331             logger.info('Test scenario: "{}" OK.'.format(test_name) + "\n")
332         else:
333             logger.info('Test scenario: "{}" Failed.'.format(test_name) + "\n")
334
335     def _get_output(self, proc, test_name):
336         result = ""
337         nb_tests = 0
338         overall_duration = 0.0
339         success = 0.0
340         nb_totals = 0
341
342         while proc.poll() is None:
343             line = proc.stdout.readline()
344             if ("Load duration" in line or
345                     "started" in line or
346                     "finished" in line or
347                     " Preparing" in line or
348                     "+-" in line or
349                     "|" in line):
350                 result += line
351             elif "test scenario" in line:
352                 result += "\n" + line
353             elif "Full duration" in line:
354                 result += line + "\n\n"
355
356             # parse output for summary report
357             if ("| " in line and
358                     "| action" not in line and
359                     "| Starting" not in line and
360                     "| Completed" not in line and
361                     "| ITER" not in line and
362                     "|   " not in line and
363                     "| total" not in line):
364                 nb_tests += 1
365             elif "| total" in line:
366                 percentage = ((line.split('|')[8]).strip(' ')).strip('%')
367                 try:
368                     success += float(percentage)
369                 except ValueError:
370                     logger.info('Percentage error: %s, %s' %
371                                 (percentage, line))
372                 nb_totals += 1
373             elif "Full duration" in line:
374                 duration = line.split(': ')[1]
375                 try:
376                     overall_duration += float(duration)
377                 except ValueError:
378                     logger.info('Duration error: %s, %s' % (duration, line))
379
380         overall_duration = "{:10.2f}".format(overall_duration)
381         if nb_totals == 0:
382             success_avg = 0
383         else:
384             success_avg = "{:0.2f}".format(success / nb_totals)
385
386         scenario_summary = {'test_name': test_name,
387                             'overall_duration': overall_duration,
388                             'nb_tests': nb_tests,
389                             'success': success_avg}
390         self.summary.append(scenario_summary)
391
392         logger.debug("\n" + result)
393
394         return result
395
396     def _prepare_env(self):
397         logger.debug('Validating the test name...')
398         if not (self.test_name in self.TESTS):
399             raise Exception("Test name '%s' is invalid" % self.test_name)
400
401         volume_types = os_utils.list_volume_types(self.cinder_client,
402                                                   private=False)
403         if volume_types:
404             logger.debug("Using existing volume type(s)...")
405         else:
406             logger.debug('Creating volume type...')
407             self.volume_type = os_utils.create_volume_type(
408                 self.cinder_client, self.CINDER_VOLUME_TYPE_NAME)
409             if self.volume_type is None:
410                 raise Exception("Failed to create volume type '%s'" %
411                                 self.CINDER_VOLUME_TYPE_NAME)
412             logger.debug("Volume type '%s' is created succesfully." %
413                          self.CINDER_VOLUME_TYPE_NAME)
414
415         logger.debug('Getting or creating image...')
416         self.image_exists, self.image_id = os_utils.get_or_create_image(
417             self.GLANCE_IMAGE_NAME,
418             self.GLANCE_IMAGE_PATH,
419             self.GLANCE_IMAGE_FORMAT)
420         if self.image_id is None:
421             raise Exception("Failed to get or create image '%s'" %
422                             self.GLANCE_IMAGE_NAME)
423
424         logger.debug("Creating network '%s'..." % self.RALLY_PRIVATE_NET_NAME)
425         self.network_dict = os_utils.create_shared_network_full(
426             self.RALLY_PRIVATE_NET_NAME,
427             self.RALLY_PRIVATE_SUBNET_NAME,
428             self.RALLY_ROUTER_NAME,
429             self.RALLY_PRIVATE_SUBNET_CIDR)
430         if self.network_dict is None:
431             raise Exception("Failed to create shared network '%s'" %
432                             self.RALLY_PRIVATE_NET_NAME)
433
434     def _run_tests(self):
435         if self.test_name == 'all':
436             for test in self.TESTS:
437                 if (test == 'all' or test == 'vm'):
438                     continue
439                 self._run_task(test)
440         else:
441             self._run_task(self.test_name)
442
443     def _generate_report(self):
444         report = (
445             "\n"
446             "                                                              "
447             "\n"
448             "                     Rally Summary Report\n"
449             "\n"
450             "+===================+============+===============+===========+"
451             "\n"
452             "| Module            | Duration   | nb. Test Run  | Success   |"
453             "\n"
454             "+===================+============+===============+===========+"
455             "\n")
456         payload = []
457
458         # for each scenario we draw a row for the table
459         total_duration = 0.0
460         total_nb_tests = 0
461         total_success = 0.0
462         for s in self.summary:
463             name = "{0:<17}".format(s['test_name'])
464             duration = float(s['overall_duration'])
465             total_duration += duration
466             duration = time.strftime("%M:%S", time.gmtime(duration))
467             duration = "{0:<10}".format(duration)
468             nb_tests = "{0:<13}".format(s['nb_tests'])
469             total_nb_tests += int(s['nb_tests'])
470             success = "{0:<10}".format(str(s['success']) + '%')
471             total_success += float(s['success'])
472             report += ("" +
473                        "| " + name + " | " + duration + " | " +
474                        nb_tests + " | " + success + "|\n" +
475                        "+-------------------+------------"
476                        "+---------------+-----------+\n")
477             payload.append({'module': name,
478                             'details': {'duration': s['overall_duration'],
479                                         'nb tests': s['nb_tests'],
480                                         'success': s['success']}})
481
482         total_duration_str = time.strftime("%H:%M:%S",
483                                            time.gmtime(total_duration))
484         total_duration_str2 = "{0:<10}".format(total_duration_str)
485         total_nb_tests_str = "{0:<13}".format(total_nb_tests)
486
487         try:
488             self.result = total_success / len(self.summary)
489         except ZeroDivisionError:
490             self.result = 100
491
492         success_rate = "{:0.2f}".format(self.result)
493         success_rate_str = "{0:<10}".format(str(success_rate) + '%')
494         report += ("+===================+============"
495                    "+===============+===========+")
496         report += "\n"
497         report += ("| TOTAL:            | " + total_duration_str2 + " | " +
498                    total_nb_tests_str + " | " + success_rate_str + "|\n")
499         report += ("+===================+============"
500                    "+===============+===========+")
501         report += "\n"
502
503         logger.info("\n" + report)
504         payload.append({'summary': {'duration': total_duration,
505                                     'nb tests': total_nb_tests,
506                                     'nb success': success_rate}})
507
508         self.details = payload
509
510         logger.info("Rally '%s' success_rate is %s%%"
511                     % (self.case_name, success_rate))
512
513     def _clean_up(self):
514         if self.volume_type:
515             logger.debug("Deleting volume type '%s'..." % self.volume_type)
516             os_utils.delete_volume_type(self.cinder_client, self.volume_type)
517
518         if not self.image_exists:
519             logger.debug("Deleting image '%s' with ID '%s'..."
520                          % (self.GLANCE_IMAGE_NAME, self.image_id))
521             if not os_utils.delete_glance_image(self.nova_client,
522                                                 self.image_id):
523                 logger.error("Error deleting the glance image")
524
525     def run(self):
526         self.start_time = time.time()
527         try:
528             self._prepare_env()
529             self._run_tests()
530             self._generate_report()
531             self._clean_up()
532             res = testcase.TestCase.EX_OK
533         except Exception as e:
534             logger.error('Error with run: %s' % e)
535             res = testcase.TestCase.EX_RUN_ERROR
536
537         self.stop_time = time.time()
538         return res
539
540
541 class RallySanity(RallyBase):
542     def __init__(self, **kwargs):
543         if "case_name" not in kwargs:
544             kwargs["case_name"] = "rally_sanity"
545         super(RallySanity, self).__init__(**kwargs)
546         self.mode = 'sanity'
547         self.test_name = 'all'
548         self.smoke = True
549         self.scenario_dir = os.path.join(self.RALLY_SCENARIO_DIR, 'sanity')
550
551
552 class RallyFull(RallyBase):
553     def __init__(self, **kwargs):
554         if "case_name" not in kwargs:
555             kwargs["case_name"] = "rally_full"
556         super(RallyFull, self).__init__(**kwargs)
557         self.mode = 'full'
558         self.test_name = 'all'
559         self.smoke = False
560         self.scenario_dir = os.path.join(self.RALLY_SCENARIO_DIR, 'full')