Dedicated flavors for rally tests
[functest-xtesting.git] / functest / opnfv_tests / openstack / rally / rally.py
1 #!/usr/bin/env python
2 #
3 # Copyright (c) 2015 All rights reserved
4 # This program and the accompanying materials
5 # are made available under the terms of the Apache License, Version 2.0
6 # which accompanies this distribution, and is available at
7 #
8 # http://www.apache.org/licenses/LICENSE-2.0
9 #
10
11 """Rally testcases implementation."""
12
13 from __future__ import division
14
15 import json
16 import logging
17 import os
18 import re
19 import subprocess
20 import time
21 import uuid
22
23 import iniparse
24 import pkg_resources
25 import yaml
26
27 from functest.core import testcase
28 from functest.energy import energy
29 from functest.opnfv_tests.openstack.snaps import snaps_utils
30 from functest.utils.constants import CONST
31
32 from snaps.openstack.create_flavor import FlavorSettings, OpenStackFlavor
33 from snaps.openstack.create_image import ImageSettings
34 from snaps.openstack.create_network import NetworkSettings, SubnetSettings
35 from snaps.openstack.create_router import RouterSettings
36 from snaps.openstack.tests import openstack_tests
37 from snaps.openstack.utils import deploy_utils
38
39 LOGGER = logging.getLogger(__name__)
40
41
42 class RallyBase(testcase.TestCase):
43     """Base class form Rally testcases implementation."""
44
45     TESTS = ['authenticate', 'glance', 'ceilometer', 'cinder', 'heat',
46              'keystone', 'neutron', 'nova', 'quotas', 'vm', 'all']
47     GLANCE_IMAGE_NAME = CONST.__getattribute__('openstack_image_name')
48     GLANCE_IMAGE_FILENAME = CONST.__getattribute__('openstack_image_file_name')
49     GLANCE_IMAGE_PATH = os.path.join(
50         CONST.__getattribute__('dir_functest_images'),
51         GLANCE_IMAGE_FILENAME)
52     GLANCE_IMAGE_FORMAT = CONST.__getattribute__('openstack_image_disk_format')
53     GLANCE_IMAGE_USERNAME = CONST.__getattribute__('openstack_image_username')
54     GLANCE_IMAGE_EXTRA_PROPERTIES = {}
55     if hasattr(CONST, 'openstack_extra_properties'):
56         GLANCE_IMAGE_EXTRA_PROPERTIES = CONST.__getattribute__(
57             'openstack_extra_properties')
58     FLAVOR_NAME = CONST.__getattribute__('rally_flavor_name')
59     FLAVOR_ALT_NAME = CONST.__getattribute__('rally_flavor_alt_name')
60     FLAVOR_EXTRA_SPECS = None
61     if hasattr(CONST, 'flavor_extra_specs'):
62         FLAVOR_EXTRA_SPECS = CONST.__getattribute__('flavor_extra_specs')
63
64     RALLY_DIR = pkg_resources.resource_filename(
65         'functest', 'opnfv_tests/openstack/rally')
66     RALLY_SCENARIO_DIR = pkg_resources.resource_filename(
67         'functest', 'opnfv_tests/openstack/rally/scenario')
68     TEMPLATE_DIR = pkg_resources.resource_filename(
69         'functest', 'opnfv_tests/openstack/rally/scenario/templates')
70     SUPPORT_DIR = pkg_resources.resource_filename(
71         'functest', 'opnfv_tests/openstack/rally/scenario/support')
72     USERS_AMOUNT = 2
73     TENANTS_AMOUNT = 3
74     ITERATIONS_AMOUNT = 10
75     CONCURRENCY = 4
76     RESULTS_DIR = os.path.join(CONST.__getattribute__('dir_results'), 'rally')
77     TEMPEST_CONF_FILE = os.path.join(CONST.__getattribute__('dir_results'),
78                                      'tempest/tempest.conf')
79     BLACKLIST_FILE = os.path.join(RALLY_DIR, "blacklist.txt")
80     TEMP_DIR = os.path.join(RALLY_DIR, "var")
81
82     RALLY_PRIVATE_NET_NAME = CONST.__getattribute__('rally_network_name')
83     RALLY_PRIVATE_SUBNET_NAME = CONST.__getattribute__('rally_subnet_name')
84     RALLY_PRIVATE_SUBNET_CIDR = CONST.__getattribute__('rally_subnet_cidr')
85     RALLY_ROUTER_NAME = CONST.__getattribute__('rally_router_name')
86
87     def __init__(self, **kwargs):
88         """Initialize RallyBase object."""
89         super(RallyBase, self).__init__(**kwargs)
90         if 'os_creds' in kwargs:
91             self.os_creds = kwargs['os_creds']
92         else:
93             creds_override = None
94             if hasattr(CONST, 'snaps_os_creds_override'):
95                 creds_override = CONST.__getattribute__(
96                     'snaps_os_creds_override')
97
98             self.os_creds = openstack_tests.get_credentials(
99                 os_env_file=CONST.__getattribute__('openstack_creds'),
100                 overrides=creds_override)
101
102         self.guid = ''
103         if CONST.__getattribute__('rally_unique_names'):
104             self.guid = '-' + str(uuid.uuid4())
105
106         self.creators = []
107         self.mode = ''
108         self.summary = []
109         self.scenario_dir = ''
110         self.image_name = None
111         self.ext_net_name = None
112         self.priv_net_id = None
113         self.flavor_name = None
114         self.flavor_alt_name = None
115         self.smoke = None
116         self.test_name = None
117         self.start_time = None
118         self.result = None
119         self.details = None
120
121     def _build_task_args(self, test_file_name):
122         task_args = {'service_list': [test_file_name]}
123         task_args['image_name'] = self.image_name
124         task_args['flavor_name'] = self.flavor_name
125         task_args['flavor_alt_name'] = self.flavor_alt_name
126         task_args['glance_image_location'] = self.GLANCE_IMAGE_PATH
127         task_args['glance_image_format'] = self.GLANCE_IMAGE_FORMAT
128         task_args['tmpl_dir'] = self.TEMPLATE_DIR
129         task_args['sup_dir'] = self.SUPPORT_DIR
130         task_args['users_amount'] = self.USERS_AMOUNT
131         task_args['tenants_amount'] = self.TENANTS_AMOUNT
132         task_args['use_existing_users'] = False
133         task_args['iterations'] = self.ITERATIONS_AMOUNT
134         task_args['concurrency'] = self.CONCURRENCY
135         task_args['smoke'] = self.smoke
136
137         ext_net = self.ext_net_name
138         if ext_net:
139             task_args['floating_network'] = str(ext_net)
140         else:
141             task_args['floating_network'] = ''
142
143         net_id = self.priv_net_id
144         if net_id:
145             task_args['netid'] = str(net_id)
146         else:
147             task_args['netid'] = ''
148
149         return task_args
150
151     def _prepare_test_list(self, test_name):
152         test_yaml_file_name = 'opnfv-{}.yaml'.format(test_name)
153         scenario_file_name = os.path.join(self.RALLY_SCENARIO_DIR,
154                                           test_yaml_file_name)
155
156         if not os.path.exists(scenario_file_name):
157             scenario_file_name = os.path.join(self.scenario_dir,
158                                               test_yaml_file_name)
159
160             if not os.path.exists(scenario_file_name):
161                 raise Exception("The scenario '%s' does not exist."
162                                 % scenario_file_name)
163
164         LOGGER.debug('Scenario fetched from : %s', scenario_file_name)
165         test_file_name = os.path.join(self.TEMP_DIR, test_yaml_file_name)
166
167         if not os.path.exists(self.TEMP_DIR):
168             os.makedirs(self.TEMP_DIR)
169
170         self.apply_blacklist(scenario_file_name, test_file_name)
171         return test_file_name
172
173     @staticmethod
174     def get_task_id(cmd_raw):
175         """
176         Get task id from command rally result.
177
178         :param cmd_raw:
179         :return: task_id as string
180         """
181         taskid_re = re.compile('^Task +(.*): started$')
182         for line in cmd_raw.splitlines(True):
183             line = line.strip()
184             match = taskid_re.match(line)
185             if match:
186                 return match.group(1)
187         return None
188
189     @staticmethod
190     def task_succeed(json_raw):
191         """
192         Parse JSON from rally JSON results.
193
194         :param json_raw:
195         :return: Bool
196         """
197         rally_report = json.loads(json_raw)
198         for report in rally_report:
199             if report is None or report.get('result') is None:
200                 return False
201
202             for result in report.get('result'):
203                 if result is None or len(result.get('error')) > 0:
204                     return False
205
206         return True
207
208     @staticmethod
209     def live_migration_supported():
210         """Determine if live migration is supported."""
211         config = iniparse.ConfigParser()
212         if (config.read(RallyBase.TEMPEST_CONF_FILE) and
213                 config.has_section('compute-feature-enabled') and
214                 config.has_option('compute-feature-enabled',
215                                   'live_migration')):
216             return config.getboolean('compute-feature-enabled',
217                                      'live_migration')
218
219         return False
220
221     @staticmethod
222     def get_cmd_output(proc):
223         """Get command stdout."""
224         result = ""
225         while proc.poll() is None:
226             line = proc.stdout.readline()
227             result += line
228         return result
229
230     @staticmethod
231     def excl_scenario():
232         """Exclude scenario."""
233         black_tests = []
234         try:
235             with open(RallyBase.BLACKLIST_FILE, 'r') as black_list_file:
236                 black_list_yaml = yaml.safe_load(black_list_file)
237
238             installer_type = CONST.__getattribute__('INSTALLER_TYPE')
239             deploy_scenario = CONST.__getattribute__('DEPLOY_SCENARIO')
240             if (bool(installer_type) and bool(deploy_scenario) and
241                     'scenario' in black_list_yaml.keys()):
242                 for item in black_list_yaml['scenario']:
243                     scenarios = item['scenarios']
244                     installers = item['installers']
245                     in_it = RallyBase.in_iterable_re
246                     if (in_it(deploy_scenario, scenarios) and
247                             in_it(installer_type, installers)):
248                         tests = item['tests']
249                         black_tests.extend(tests)
250         except Exception:
251             LOGGER.debug("Scenario exclusion not applied.")
252
253         return black_tests
254
255     @staticmethod
256     def in_iterable_re(needle, haystack):
257         """
258         Check if given needle is in the iterable haystack, using regex.
259
260         :param needle: string to be matched
261         :param haystack: iterable of strings (optionally regex patterns)
262         :return: True if needle is eqial to any of the elements in haystack,
263                  or if a nonempty regex pattern in haystack is found in needle.
264         """
265         # match without regex
266         if needle in haystack:
267             return True
268
269         for pattern in haystack:
270             # match if regex pattern is set and found in the needle
271             if pattern and re.search(pattern, needle) is not None:
272                 return True
273         else:
274             return False
275
276     @staticmethod
277     def excl_func():
278         """Exclude functionalities."""
279         black_tests = []
280         func_list = []
281
282         try:
283             with open(RallyBase.BLACKLIST_FILE, 'r') as black_list_file:
284                 black_list_yaml = yaml.safe_load(black_list_file)
285
286             if not RallyBase.live_migration_supported():
287                 func_list.append("no_live_migration")
288
289             if 'functionality' in black_list_yaml.keys():
290                 for item in black_list_yaml['functionality']:
291                     functions = item['functions']
292                     for func in func_list:
293                         if func in functions:
294                             tests = item['tests']
295                             black_tests.extend(tests)
296         except Exception:  # pylint: disable=broad-except
297             LOGGER.debug("Functionality exclusion not applied.")
298
299         return black_tests
300
301     @staticmethod
302     def apply_blacklist(case_file_name, result_file_name):
303         """Apply blacklist."""
304         LOGGER.debug("Applying blacklist...")
305         cases_file = open(case_file_name, 'r')
306         result_file = open(result_file_name, 'w')
307
308         black_tests = list(set(RallyBase.excl_func() +
309                                RallyBase.excl_scenario()))
310
311         if black_tests:
312             LOGGER.debug("Blacklisted tests: " + str(black_tests))
313
314         include = True
315         for cases_line in cases_file:
316             if include:
317                 for black_tests_line in black_tests:
318                     if re.search(black_tests_line,
319                                  cases_line.strip().rstrip(':')):
320                         include = False
321                         break
322                 else:
323                     result_file.write(str(cases_line))
324             else:
325                 if cases_line.isspace():
326                     include = True
327
328         cases_file.close()
329         result_file.close()
330
331     @staticmethod
332     def file_is_empty(file_name):
333         """Determine is a file is empty."""
334         try:
335             if os.stat(file_name).st_size > 0:
336                 return False
337         except Exception:  # pylint: disable=broad-except
338             pass
339
340         return True
341
342     def _run_task(self, test_name):
343         """Run a task."""
344         LOGGER.info('Starting test scenario "%s" ...', test_name)
345
346         task_file = os.path.join(self.RALLY_DIR, 'task.yaml')
347         if not os.path.exists(task_file):
348             LOGGER.error("Task file '%s' does not exist.", task_file)
349             raise Exception("Task file '%s' does not exist.", task_file)
350
351         file_name = self._prepare_test_list(test_name)
352         if self.file_is_empty(file_name):
353             LOGGER.info('No tests for scenario "%s"', test_name)
354             return
355
356         cmd_line = ("rally task start --abort-on-sla-failure "
357                     "--task {0} "
358                     "--task-args \"{1}\""
359                     .format(task_file, self._build_task_args(test_name)))
360         LOGGER.debug('running command line: %s', cmd_line)
361
362         proc = subprocess.Popen(cmd_line, stdout=subprocess.PIPE,
363                                 stderr=subprocess.STDOUT, shell=True)
364         output = self._get_output(proc, test_name)
365         task_id = self.get_task_id(output)
366         LOGGER.debug('task_id : %s', task_id)
367
368         if task_id is None:
369             LOGGER.error('Failed to retrieve task_id, validating task...')
370             cmd_line = ("rally task validate "
371                         "--task {0} "
372                         "--task-args \"{1}\""
373                         .format(task_file, self._build_task_args(test_name)))
374             LOGGER.debug('running command line: %s', cmd_line)
375             proc = subprocess.Popen(cmd_line, stdout=subprocess.PIPE,
376                                     stderr=subprocess.STDOUT, shell=True)
377             output = self.get_cmd_output(proc)
378             LOGGER.error("Task validation result:" + "\n" + output)
379             return
380
381         # check for result directory and create it otherwise
382         if not os.path.exists(self.RESULTS_DIR):
383             LOGGER.debug('%s does not exist, we create it.',
384                          self.RESULTS_DIR)
385             os.makedirs(self.RESULTS_DIR)
386
387         # write html report file
388         report_html_name = 'opnfv-{}.html'.format(test_name)
389         report_html_dir = os.path.join(self.RESULTS_DIR, report_html_name)
390         cmd_line = "rally task report {} --out {}".format(task_id,
391                                                           report_html_dir)
392
393         LOGGER.debug('running command line: %s', cmd_line)
394         os.popen(cmd_line)
395
396         # get and save rally operation JSON result
397         cmd_line = "rally task results %s" % task_id
398         LOGGER.debug('running command line: %s', cmd_line)
399         cmd = os.popen(cmd_line)
400         json_results = cmd.read()
401         report_json_name = 'opnfv-{}.json'.format(test_name)
402         report_json_dir = os.path.join(self.RESULTS_DIR, report_json_name)
403         with open(report_json_dir, 'w') as r_file:
404             LOGGER.debug('saving json file')
405             r_file.write(json_results)
406
407         # parse JSON operation result
408         if self.task_succeed(json_results):
409             LOGGER.info('Test scenario: "{}" OK.'.format(test_name) + "\n")
410         else:
411             LOGGER.info('Test scenario: "{}" Failed.'.format(test_name) + "\n")
412
413     def _get_output(self, proc, test_name):
414         result = ""
415         nb_tests = 0
416         overall_duration = 0.0
417         success = 0.0
418         nb_totals = 0
419
420         while proc.poll() is None:
421             line = proc.stdout.readline()
422             if ("Load duration" in line or
423                     "started" in line or
424                     "finished" in line or
425                     " Preparing" in line or
426                     "+-" in line or
427                     "|" in line):
428                 result += line
429             elif "test scenario" in line:
430                 result += "\n" + line
431             elif "Full duration" in line:
432                 result += line + "\n\n"
433
434             # parse output for summary report
435             if ("| " in line and
436                     "| action" not in line and
437                     "| Starting" not in line and
438                     "| Completed" not in line and
439                     "| ITER" not in line and
440                     "|   " not in line and
441                     "| total" not in line):
442                 nb_tests += 1
443             elif "| total" in line:
444                 percentage = ((line.split('|')[8]).strip(' ')).strip('%')
445                 try:
446                     success += float(percentage)
447                 except ValueError:
448                     LOGGER.info('Percentage error: %s, %s',
449                                 percentage, line)
450                 nb_totals += 1
451             elif "Full duration" in line:
452                 duration = line.split(': ')[1]
453                 try:
454                     overall_duration += float(duration)
455                 except ValueError:
456                     LOGGER.info('Duration error: %s, %s', duration, line)
457
458         overall_duration = "{:10.2f}".format(overall_duration)
459         if nb_totals == 0:
460             success_avg = 0
461         else:
462             success_avg = "{:0.2f}".format(success / nb_totals)
463
464         scenario_summary = {'test_name': test_name,
465                             'overall_duration': overall_duration,
466                             'nb_tests': nb_tests,
467                             'success': success_avg}
468         self.summary.append(scenario_summary)
469
470         LOGGER.debug("\n" + result)
471
472         return result
473
474     def _prepare_env(self):
475         LOGGER.debug('Validating the test name...')
476         if self.test_name not in self.TESTS:
477             raise Exception("Test name '%s' is invalid" % self.test_name)
478
479         network_name = self.RALLY_PRIVATE_NET_NAME + self.guid
480         subnet_name = self.RALLY_PRIVATE_SUBNET_NAME + self.guid
481         router_name = self.RALLY_ROUTER_NAME + self.guid
482         self.image_name = self.GLANCE_IMAGE_NAME + self.guid
483         self.flavor_name = self.FLAVOR_NAME + self.guid
484         self.flavor_alt_name = self.FLAVOR_ALT_NAME + self.guid
485         self.ext_net_name = snaps_utils.get_ext_net_name(self.os_creds)
486
487         LOGGER.debug("Creating image '%s'...", self.image_name)
488         image_creator = deploy_utils.create_image(
489             self.os_creds, ImageSettings(
490                 name=self.image_name,
491                 image_file=self.GLANCE_IMAGE_PATH,
492                 img_format=self.GLANCE_IMAGE_FORMAT,
493                 image_user=self.GLANCE_IMAGE_USERNAME,
494                 public=True,
495                 extra_properties=self.GLANCE_IMAGE_EXTRA_PROPERTIES))
496         if image_creator is None:
497             raise Exception("Failed to create image")
498         self.creators.append(image_creator)
499
500         LOGGER.debug("Creating network '%s'...", network_name)
501         network_creator = deploy_utils.create_network(
502             self.os_creds, NetworkSettings(
503                 name=network_name,
504                 shared=True,
505                 subnet_settings=[SubnetSettings(
506                     name=subnet_name,
507                     cidr=self.RALLY_PRIVATE_SUBNET_CIDR)
508                 ]))
509         if network_creator is None:
510             raise Exception("Failed to create private network")
511         self.priv_net_id = network_creator.get_network().id
512         self.creators.append(network_creator)
513
514         LOGGER.debug("Creating router '%s'...", router_name)
515         router_creator = deploy_utils.create_router(
516             self.os_creds, RouterSettings(
517                 name=router_name,
518                 external_gateway=self.ext_net_name,
519                 internal_subnets=[subnet_name]))
520         if router_creator is None:
521             raise Exception("Failed to create router")
522         self.creators.append(router_creator)
523
524         LOGGER.debug("Creating flavor '%s'...", self.flavor_name)
525         flavor_creator = OpenStackFlavor(
526             self.os_creds, FlavorSettings(
527                 name=self.flavor_name, ram=512, disk=1, vcpus=1,
528                 metadata=self.FLAVOR_EXTRA_SPECS))
529         if flavor_creator is None or flavor_creator.create() is None:
530             raise Exception("Failed to create flavor")
531         self.creators.append(flavor_creator)
532
533         LOGGER.debug("Creating flavor '%s'...", self.flavor_alt_name)
534         flavor_alt_creator = OpenStackFlavor(
535             self.os_creds, FlavorSettings(
536                 name=self.flavor_alt_name, ram=1024, disk=1, vcpus=1,
537                 metadata=self.FLAVOR_EXTRA_SPECS))
538         if flavor_alt_creator is None or flavor_alt_creator.create() is None:
539             raise Exception("Failed to create flavor")
540         self.creators.append(flavor_alt_creator)
541
542     def _run_tests(self):
543         if self.test_name == 'all':
544             for test in self.TESTS:
545                 if test == 'all' or test == 'vm':
546                     continue
547                 self._run_task(test)
548         else:
549             self._run_task(self.test_name)
550
551     def _generate_report(self):
552         report = (
553             "\n"
554             "                                                              "
555             "\n"
556             "                     Rally Summary Report\n"
557             "\n"
558             "+===================+============+===============+===========+"
559             "\n"
560             "| Module            | Duration   | nb. Test Run  | Success   |"
561             "\n"
562             "+===================+============+===============+===========+"
563             "\n")
564         payload = []
565
566         # for each scenario we draw a row for the table
567         total_duration = 0.0
568         total_nb_tests = 0
569         total_success = 0.0
570         for item in self.summary:
571             name = "{0:<17}".format(item['test_name'])
572             duration = float(item['overall_duration'])
573             total_duration += duration
574             duration = time.strftime("%M:%S", time.gmtime(duration))
575             duration = "{0:<10}".format(duration)
576             nb_tests = "{0:<13}".format(item['nb_tests'])
577             total_nb_tests += int(item['nb_tests'])
578             success = "{0:<10}".format(str(item['success']) + '%')
579             total_success += float(item['success'])
580             report += ("" +
581                        "| " + name + " | " + duration + " | " +
582                        nb_tests + " | " + success + "|\n" +
583                        "+-------------------+------------"
584                        "+---------------+-----------+\n")
585             payload.append({'module': name,
586                             'details': {'duration': item['overall_duration'],
587                                         'nb tests': item['nb_tests'],
588                                         'success': item['success']}})
589
590         total_duration_str = time.strftime("%H:%M:%S",
591                                            time.gmtime(total_duration))
592         total_duration_str2 = "{0:<10}".format(total_duration_str)
593         total_nb_tests_str = "{0:<13}".format(total_nb_tests)
594
595         try:
596             self.result = total_success / len(self.summary)
597         except ZeroDivisionError:
598             self.result = 100
599
600         success_rate = "{:0.2f}".format(self.result)
601         success_rate_str = "{0:<10}".format(str(success_rate) + '%')
602         report += ("+===================+============"
603                    "+===============+===========+")
604         report += "\n"
605         report += ("| TOTAL:            | " + total_duration_str2 + " | " +
606                    total_nb_tests_str + " | " + success_rate_str + "|\n")
607         report += ("+===================+============"
608                    "+===============+===========+")
609         report += "\n"
610
611         LOGGER.info("\n" + report)
612         payload.append({'summary': {'duration': total_duration,
613                                     'nb tests': total_nb_tests,
614                                     'nb success': success_rate}})
615
616         self.details = payload
617
618         LOGGER.info("Rally '%s' success_rate is %s%%",
619                     self.case_name, success_rate)
620
621     def _clean_up(self):
622         for creator in reversed(self.creators):
623             try:
624                 creator.clean()
625             except Exception as e:
626                 LOGGER.error('Unexpected error cleaning - %s', e)
627
628     @energy.enable_recording
629     def run(self, **kwargs):
630         """Run testcase."""
631         self.start_time = time.time()
632         try:
633             self._prepare_env()
634             self._run_tests()
635             self._generate_report()
636             res = testcase.TestCase.EX_OK
637         except Exception as exc:   # pylint: disable=broad-except
638             LOGGER.error('Error with run: %s', exc)
639             res = testcase.TestCase.EX_RUN_ERROR
640         finally:
641             self._clean_up()
642
643         self.stop_time = time.time()
644         return res
645
646
647 class RallySanity(RallyBase):
648     """Rally sanity testcase implementation."""
649
650     def __init__(self, **kwargs):
651         """Initialize RallySanity object."""
652         if "case_name" not in kwargs:
653             kwargs["case_name"] = "rally_sanity"
654         super(RallySanity, self).__init__(**kwargs)
655         self.mode = 'sanity'
656         self.test_name = 'all'
657         self.smoke = True
658         self.scenario_dir = os.path.join(self.RALLY_SCENARIO_DIR, 'sanity')
659
660
661 class RallyFull(RallyBase):
662     """Rally full testcase implementation."""
663
664     def __init__(self, **kwargs):
665         """Initialize RallyFull object."""
666         if "case_name" not in kwargs:
667             kwargs["case_name"] = "rally_full"
668         super(RallyFull, self).__init__(**kwargs)
669         self.mode = 'full'
670         self.test_name = 'all'
671         self.smoke = False
672         self.scenario_dir = os.path.join(self.RALLY_SCENARIO_DIR, 'full')