Merge "Check a rule existence for a specific security group"
[functest.git] / functest / opnfv_tests / openstack / rally / rally.py
1 #!/usr/bin/python
2 #
3 # Copyright (c) 2015 All rights reserved
4 # This program and the accompanying materials
5 # are made available under the terms of the Apache License, Version 2.0
6 # which accompanies this distribution, and is available at
7 #
8 # http://www.apache.org/licenses/LICENSE-2.0
9 #
10
11 import json
12 import os
13 import re
14 import subprocess
15 import time
16
17 import iniparse
18 import yaml
19
20 from functest.core import testcase
21 from functest.utils.constants import CONST
22 import functest.utils.functest_logger as ft_logger
23 import functest.utils.functest_utils as ft_utils
24 import functest.utils.openstack_utils as os_utils
25
26 logger = ft_logger.Logger('Rally').getLogger()
27
28
29 class RallyBase(testcase.TestCase):
30     TESTS = ['authenticate', 'glance', 'cinder', 'heat', 'keystone',
31              'neutron', 'nova', 'quotas', 'requests', 'vm', 'all']
32     GLANCE_IMAGE_NAME = CONST.openstack_image_name
33     GLANCE_IMAGE_FILENAME = CONST.openstack_image_file_name
34     GLANCE_IMAGE_PATH = os.path.join(CONST.dir_functest_data,
35                                      GLANCE_IMAGE_FILENAME)
36     GLANCE_IMAGE_FORMAT = CONST.openstack_image_disk_format
37     FLAVOR_NAME = "m1.tiny"
38
39     RALLY_DIR = os.path.join(CONST.dir_repo_functest, CONST.dir_rally)
40     RALLY_SCENARIO_DIR = os.path.join(RALLY_DIR, "scenario")
41     TEMPLATE_DIR = os.path.join(RALLY_SCENARIO_DIR, "templates")
42     SUPPORT_DIR = os.path.join(RALLY_SCENARIO_DIR, "support")
43     USERS_AMOUNT = 2
44     TENANTS_AMOUNT = 3
45     ITERATIONS_AMOUNT = 10
46     CONCURRENCY = 4
47     RESULTS_DIR = os.path.join(CONST.dir_results, 'rally')
48     TEMPEST_CONF_FILE = os.path.join(CONST.dir_results,
49                                      'tempest/tempest.conf')
50     BLACKLIST_FILE = os.path.join(RALLY_DIR, "blacklist.txt")
51     TEMP_DIR = os.path.join(RALLY_DIR, "var")
52
53     CINDER_VOLUME_TYPE_NAME = "volume_test"
54     RALLY_PRIVATE_NET_NAME = CONST.rally_network_name
55     RALLY_PRIVATE_SUBNET_NAME = CONST.rally_subnet_name
56     RALLY_PRIVATE_SUBNET_CIDR = CONST.rally_subnet_cidr
57     RALLY_ROUTER_NAME = CONST.rally_router_name
58
59     def __init__(self):
60         super(RallyBase, self).__init__()
61         self.mode = ''
62         self.summary = []
63         self.scenario_dir = ''
64         self.nova_client = os_utils.get_nova_client()
65         self.neutron_client = os_utils.get_neutron_client()
66         self.cinder_client = os_utils.get_cinder_client()
67         self.network_dict = {}
68         self.volume_type = None
69         self.smoke = None
70
71     def _build_task_args(self, test_file_name):
72         task_args = {'service_list': [test_file_name]}
73         task_args['image_name'] = self.GLANCE_IMAGE_NAME
74         task_args['flavor_name'] = self.FLAVOR_NAME
75         task_args['glance_image_location'] = self.GLANCE_IMAGE_PATH
76         task_args['glance_image_format'] = self.GLANCE_IMAGE_FORMAT
77         task_args['tmpl_dir'] = self.TEMPLATE_DIR
78         task_args['sup_dir'] = self.SUPPORT_DIR
79         task_args['users_amount'] = self.USERS_AMOUNT
80         task_args['tenants_amount'] = self.TENANTS_AMOUNT
81         task_args['use_existing_users'] = False
82         task_args['iterations'] = self.ITERATIONS_AMOUNT
83         task_args['concurrency'] = self.CONCURRENCY
84         task_args['smoke'] = self.smoke
85
86         ext_net = os_utils.get_external_net(self.neutron_client)
87         if ext_net:
88             task_args['floating_network'] = str(ext_net)
89         else:
90             task_args['floating_network'] = ''
91
92         net_id = self.network_dict['net_id']
93         if net_id:
94             task_args['netid'] = str(net_id)
95         else:
96             task_args['netid'] = ''
97
98         # get keystone auth endpoint
99         task_args['request_url'] = CONST.OS_AUTH_URL or ''
100
101         return task_args
102
103     def _prepare_test_list(self, test_name):
104         test_yaml_file_name = 'opnfv-{}.yaml'.format(test_name)
105         scenario_file_name = os.path.join(self.RALLY_SCENARIO_DIR,
106                                           test_yaml_file_name)
107
108         if not os.path.exists(scenario_file_name):
109             scenario_file_name = os.path.join(self.scenario_dir,
110                                               test_yaml_file_name)
111
112             if not os.path.exists(scenario_file_name):
113                 raise Exception("The scenario '%s' does not exist."
114                                 % scenario_file_name)
115
116         logger.debug('Scenario fetched from : {}'.format(scenario_file_name))
117         test_file_name = os.path.join(self.TEMP_DIR, test_yaml_file_name)
118
119         if not os.path.exists(self.TEMP_DIR):
120             os.makedirs(self.TEMP_DIR)
121
122         self.apply_blacklist(scenario_file_name, test_file_name)
123         return test_file_name
124
125     @staticmethod
126     def get_task_id(cmd_raw):
127         """
128         get task id from command rally result
129         :param cmd_raw:
130         :return: task_id as string
131         """
132         taskid_re = re.compile('^Task +(.*): started$')
133         for line in cmd_raw.splitlines(True):
134             line = line.strip()
135             match = taskid_re.match(line)
136             if match:
137                 return match.group(1)
138         return None
139
140     @staticmethod
141     def task_succeed(json_raw):
142         """
143         Parse JSON from rally JSON results
144         :param json_raw:
145         :return: Bool
146         """
147         rally_report = json.loads(json_raw)
148         for report in rally_report:
149             if report is None or report.get('result') is None:
150                 return False
151
152             for result in report.get('result'):
153                 if result is None or len(result.get('error')) > 0:
154                     return False
155
156         return True
157
158     @staticmethod
159     def live_migration_supported():
160         config = iniparse.ConfigParser()
161         if (config.read(RallyBase.TEMPEST_CONF_FILE) and
162                 config.has_section('compute-feature-enabled') and
163                 config.has_option('compute-feature-enabled',
164                                   'live_migration')):
165             return config.getboolean('compute-feature-enabled',
166                                      'live_migration')
167
168         return False
169
170     @staticmethod
171     def get_cmd_output(proc):
172         result = ""
173         while proc.poll() is None:
174             line = proc.stdout.readline()
175             result += line
176         return result
177
178     @staticmethod
179     def excl_scenario():
180         black_tests = []
181         try:
182             with open(RallyBase.BLACKLIST_FILE, 'r') as black_list_file:
183                 black_list_yaml = yaml.safe_load(black_list_file)
184
185             installer_type = CONST.INSTALLER_TYPE
186             deploy_scenario = CONST.DEPLOY_SCENARIO
187             if (bool(installer_type) * bool(deploy_scenario)):
188                 if 'scenario' in black_list_yaml.keys():
189                     for item in black_list_yaml['scenario']:
190                         scenarios = item['scenarios']
191                         installers = item['installers']
192                         if (deploy_scenario in scenarios and
193                                 installer_type in installers):
194                             tests = item['tests']
195                             black_tests.extend(tests)
196         except Exception:
197             logger.debug("Scenario exclusion not applied.")
198
199         return black_tests
200
201     @staticmethod
202     def excl_func():
203         black_tests = []
204         func_list = []
205
206         try:
207             with open(RallyBase.BLACKLIST_FILE, 'r') as black_list_file:
208                 black_list_yaml = yaml.safe_load(black_list_file)
209
210             if not RallyBase.live_migration_supported():
211                 func_list.append("no_live_migration")
212
213             if 'functionality' in black_list_yaml.keys():
214                 for item in black_list_yaml['functionality']:
215                     functions = item['functions']
216                     for func in func_list:
217                         if func in functions:
218                             tests = item['tests']
219                             black_tests.extend(tests)
220         except Exception:
221             logger.debug("Functionality exclusion not applied.")
222
223         return black_tests
224
225     @staticmethod
226     def apply_blacklist(case_file_name, result_file_name):
227         logger.debug("Applying blacklist...")
228         cases_file = open(case_file_name, 'r')
229         result_file = open(result_file_name, 'w')
230
231         black_tests = list(set(RallyBase.excl_func() +
232                            RallyBase.excl_scenario()))
233
234         include = True
235         for cases_line in cases_file:
236             if include:
237                 for black_tests_line in black_tests:
238                     if re.search(black_tests_line,
239                                  cases_line.strip().rstrip(':')):
240                         include = False
241                         break
242                 else:
243                     result_file.write(str(cases_line))
244             else:
245                 if cases_line.isspace():
246                     include = True
247
248         cases_file.close()
249         result_file.close()
250
251     @staticmethod
252     def file_is_empty(file_name):
253         try:
254             if os.stat(file_name).st_size > 0:
255                 return False
256         except:
257             pass
258
259         return True
260
261     def _run_task(self, test_name):
262         logger.info('Starting test scenario "{}" ...'.format(test_name))
263
264         task_file = os.path.join(self.RALLY_DIR, 'task.yaml')
265         if not os.path.exists(task_file):
266             logger.error("Task file '%s' does not exist." % task_file)
267             raise Exception("Task file '%s' does not exist." % task_file)
268
269         file_name = self._prepare_test_list(test_name)
270         if self.file_is_empty(file_name):
271             logger.info('No tests for scenario "{}"'.format(test_name))
272             return
273
274         cmd_line = ("rally task start --abort-on-sla-failure "
275                     "--task {0} "
276                     "--task-args \"{1}\""
277                     .format(task_file, self._build_task_args(test_name)))
278         logger.debug('running command line: {}'.format(cmd_line))
279
280         p = subprocess.Popen(cmd_line, stdout=subprocess.PIPE,
281                              stderr=subprocess.STDOUT, shell=True)
282         output = self._get_output(p, test_name)
283         task_id = self.get_task_id(output)
284         logger.debug('task_id : {}'.format(task_id))
285
286         if task_id is None:
287             logger.error('Failed to retrieve task_id, validating task...')
288             cmd_line = ("rally task validate "
289                         "--task {0} "
290                         "--task-args \"{1}\""
291                         .format(task_file, self._build_task_args(test_name)))
292             logger.debug('running command line: {}'.format(cmd_line))
293             p = subprocess.Popen(cmd_line, stdout=subprocess.PIPE,
294                                  stderr=subprocess.STDOUT, shell=True)
295             output = self.get_cmd_output(p)
296             logger.error("Task validation result:" + "\n" + output)
297             return
298
299         # check for result directory and create it otherwise
300         if not os.path.exists(self.RESULTS_DIR):
301             logger.debug('{} does not exist, we create it.'
302                          .format(self.RESULTS_DIR))
303             os.makedirs(self.RESULTS_DIR)
304
305         # write html report file
306         report_html_name = 'opnfv-{}.html'.format(test_name)
307         report_html_dir = os.path.join(self.RESULTS_DIR, report_html_name)
308         cmd_line = "rally task report {} --out {}".format(task_id,
309                                                           report_html_dir)
310
311         logger.debug('running command line: {}'.format(cmd_line))
312         os.popen(cmd_line)
313
314         # get and save rally operation JSON result
315         cmd_line = "rally task results %s" % task_id
316         logger.debug('running command line: {}'.format(cmd_line))
317         cmd = os.popen(cmd_line)
318         json_results = cmd.read()
319         report_json_name = 'opnfv-{}.json'.format(test_name)
320         report_json_dir = os.path.join(self.RESULTS_DIR, report_json_name)
321         with open(report_json_dir, 'w') as f:
322             logger.debug('saving json file')
323             f.write(json_results)
324
325         """ parse JSON operation result """
326         if self.task_succeed(json_results):
327             logger.info('Test scenario: "{}" OK.'.format(test_name) + "\n")
328         else:
329             logger.info('Test scenario: "{}" Failed.'.format(test_name) + "\n")
330
331     def _get_output(self, proc, test_name):
332         result = ""
333         nb_tests = 0
334         overall_duration = 0.0
335         success = 0.0
336         nb_totals = 0
337
338         while proc.poll() is None:
339             line = proc.stdout.readline()
340             if ("Load duration" in line or
341                     "started" in line or
342                     "finished" in line or
343                     " Preparing" in line or
344                     "+-" in line or
345                     "|" in line):
346                 result += line
347             elif "test scenario" in line:
348                 result += "\n" + line
349             elif "Full duration" in line:
350                 result += line + "\n\n"
351
352             # parse output for summary report
353             if ("| " in line and
354                     "| action" not in line and
355                     "| Starting" not in line and
356                     "| Completed" not in line and
357                     "| ITER" not in line and
358                     "|   " not in line and
359                     "| total" not in line):
360                 nb_tests += 1
361             elif "| total" in line:
362                 percentage = ((line.split('|')[8]).strip(' ')).strip('%')
363                 try:
364                     success += float(percentage)
365                 except ValueError:
366                     logger.info('Percentage error: %s, %s' %
367                                 (percentage, line))
368                 nb_totals += 1
369             elif "Full duration" in line:
370                 duration = line.split(': ')[1]
371                 try:
372                     overall_duration += float(duration)
373                 except ValueError:
374                     logger.info('Duration error: %s, %s' % (duration, line))
375
376         overall_duration = "{:10.2f}".format(overall_duration)
377         if nb_totals == 0:
378             success_avg = 0
379         else:
380             success_avg = "{:0.2f}".format(success / nb_totals)
381
382         scenario_summary = {'test_name': test_name,
383                             'overall_duration': overall_duration,
384                             'nb_tests': nb_tests,
385                             'success': success_avg}
386         self.summary.append(scenario_summary)
387
388         logger.debug("\n" + result)
389
390         return result
391
392     def _prepare_env(self):
393         logger.debug('Validating the test name...')
394         if not (self.test_name in self.TESTS):
395             raise Exception("Test name '%s' is invalid" % self.test_name)
396
397         volume_types = os_utils.list_volume_types(self.cinder_client,
398                                                   private=False)
399         if volume_types:
400             logger.debug("Using existing volume type(s)...")
401         else:
402             logger.debug('Creating volume type...')
403             self.volume_type = os_utils.create_volume_type(
404                 self.cinder_client, self.CINDER_VOLUME_TYPE_NAME)
405             if self.volume_type is None:
406                 raise Exception("Failed to create volume type '%s'" %
407                                 self.CINDER_VOLUME_TYPE_NAME)
408             logger.debug("Volume type '%s' is created succesfully." %
409                          self.CINDER_VOLUME_TYPE_NAME)
410
411         logger.debug('Getting or creating image...')
412         self.image_exists, self.image_id = os_utils.get_or_create_image(
413             self.GLANCE_IMAGE_NAME,
414             self.GLANCE_IMAGE_PATH,
415             self.GLANCE_IMAGE_FORMAT)
416         if self.image_id is None:
417             raise Exception("Failed to get or create image '%s'" %
418                             self.GLANCE_IMAGE_NAME)
419
420         logger.debug("Creating network '%s'..." % self.RALLY_PRIVATE_NET_NAME)
421         self.network_dict = os_utils.create_shared_network_full(
422             self.RALLY_PRIVATE_NET_NAME,
423             self.RALLY_PRIVATE_SUBNET_NAME,
424             self.RALLY_ROUTER_NAME,
425             self.RALLY_PRIVATE_SUBNET_CIDR)
426         if self.network_dict is None:
427             raise Exception("Failed to create shared network '%s'" %
428                             self.RALLY_PRIVATE_NET_NAME)
429
430     def _run_tests(self):
431         if self.test_name == 'all':
432             for test in self.TESTS:
433                 if (test == 'all' or test == 'vm'):
434                     continue
435                 self._run_task(test)
436         else:
437             self._run_task(self.test_name)
438
439     def _generate_report(self):
440         report = (
441             "\n"
442             "                                                              "
443             "\n"
444             "                     Rally Summary Report\n"
445             "\n"
446             "+===================+============+===============+===========+"
447             "\n"
448             "| Module            | Duration   | nb. Test Run  | Success   |"
449             "\n"
450             "+===================+============+===============+===========+"
451             "\n")
452         payload = []
453
454         # for each scenario we draw a row for the table
455         total_duration = 0.0
456         total_nb_tests = 0
457         total_success = 0.0
458         for s in self.summary:
459             name = "{0:<17}".format(s['test_name'])
460             duration = float(s['overall_duration'])
461             total_duration += duration
462             duration = time.strftime("%M:%S", time.gmtime(duration))
463             duration = "{0:<10}".format(duration)
464             nb_tests = "{0:<13}".format(s['nb_tests'])
465             total_nb_tests += int(s['nb_tests'])
466             success = "{0:<10}".format(str(s['success']) + '%')
467             total_success += float(s['success'])
468             report += ("" +
469                        "| " + name + " | " + duration + " | " +
470                        nb_tests + " | " + success + "|\n" +
471                        "+-------------------+------------"
472                        "+---------------+-----------+\n")
473             payload.append({'module': name,
474                             'details': {'duration': s['overall_duration'],
475                                         'nb tests': s['nb_tests'],
476                                         'success': s['success']}})
477
478         total_duration_str = time.strftime("%H:%M:%S",
479                                            time.gmtime(total_duration))
480         total_duration_str2 = "{0:<10}".format(total_duration_str)
481         total_nb_tests_str = "{0:<13}".format(total_nb_tests)
482
483         if len(self.summary):
484             success_rate = total_success / len(self.summary)
485         else:
486             success_rate = 100
487         success_rate = "{:0.2f}".format(success_rate)
488         success_rate_str = "{0:<10}".format(str(success_rate) + '%')
489         report += ("+===================+============"
490                    "+===============+===========+")
491         report += "\n"
492         report += ("| TOTAL:            | " + total_duration_str2 + " | " +
493                    total_nb_tests_str + " | " + success_rate_str + "|\n")
494         report += ("+===================+============"
495                    "+===============+===========+")
496         report += "\n"
497
498         logger.info("\n" + report)
499         payload.append({'summary': {'duration': total_duration,
500                                     'nb tests': total_nb_tests,
501                                     'nb success': success_rate}})
502
503         self.criteria = ft_utils.check_success_rate(
504             self.case_name, success_rate)
505         self.details = payload
506
507         logger.info("Rally '%s' success_rate is %s%%, is marked as %s"
508                     % (self.case_name, success_rate, self.criteria))
509
510     def _clean_up(self):
511         if self.volume_type:
512             logger.debug("Deleting volume type '%s'..." % self.volume_type)
513             os_utils.delete_volume_type(self.cinder_client, self.volume_type)
514
515         if not self.image_exists:
516             logger.debug("Deleting image '%s' with ID '%s'..."
517                          % (self.GLANCE_IMAGE_NAME, self.image_id))
518             if not os_utils.delete_glance_image(self.nova_client,
519                                                 self.image_id):
520                 logger.error("Error deleting the glance image")
521
522     def run(self):
523         self.start_time = time.time()
524         try:
525             self._prepare_env()
526             self._run_tests()
527             self._generate_report()
528             self._clean_up()
529             res = testcase.TestCase.EX_OK
530         except Exception as e:
531             logger.error('Error with run: %s' % e)
532             res = testcase.TestCase.EX_RUN_ERROR
533
534         self.stop_time = time.time()
535         return res
536
537
538 class RallySanity(RallyBase):
539     def __init__(self):
540         super(RallySanity, self).__init__()
541         self.case_name = 'rally_sanity'
542         self.mode = 'sanity'
543         self.test_name = 'all'
544         self.smoke = True
545         self.scenario_dir = os.path.join(self.RALLY_SCENARIO_DIR, 'sanity')
546
547
548 class RallyFull(RallyBase):
549     def __init__(self):
550         super(RallyFull, self).__init__()
551         self.case_name = 'rally_full'
552         self.mode = 'full'
553         self.test_name = 'all'
554         self.smoke = False
555         self.scenario_dir = os.path.join(self.RALLY_SCENARIO_DIR, 'full')