Fix last Pylint error in Functest
[functest.git] / functest / opnfv_tests / openstack / rally / rally.py
1 #!/usr/bin/env python
2 #
3 # Copyright (c) 2015 All rights reserved
4 # This program and the accompanying materials
5 # are made available under the terms of the Apache License, Version 2.0
6 # which accompanies this distribution, and is available at
7 #
8 # http://www.apache.org/licenses/LICENSE-2.0
9 #
10
11 """Rally testcases implementation."""
12
13 from __future__ import division
14
15 import json
16 import logging
17 import os
18 import re
19 import subprocess
20 import time
21 import uuid
22
23 import pkg_resources
24 import yaml
25
26 from functest.core import testcase
27 from functest.energy import energy
28 from functest.opnfv_tests.openstack.snaps import snaps_utils
29 from functest.opnfv_tests.openstack.tempest import conf_utils
30 from functest.utils.constants import CONST
31
32 from snaps.config.flavor import FlavorConfig
33 from snaps.config.image import ImageConfig
34 from snaps.config.network import NetworkConfig, SubnetConfig
35 from snaps.config.router import RouterConfig
36
37 from snaps.openstack.create_flavor import OpenStackFlavor
38 from snaps.openstack.tests import openstack_tests
39 from snaps.openstack.utils import deploy_utils
40
41 LOGGER = logging.getLogger(__name__)
42
43
44 class RallyBase(testcase.TestCase):
45     """Base class form Rally testcases implementation."""
46
47     TESTS = ['authenticate', 'glance', 'ceilometer', 'cinder', 'heat',
48              'keystone', 'neutron', 'nova', 'quotas', 'vm', 'all']
49     GLANCE_IMAGE_NAME = CONST.__getattribute__('openstack_image_name')
50     GLANCE_IMAGE_FILENAME = CONST.__getattribute__('openstack_image_file_name')
51     GLANCE_IMAGE_PATH = os.path.join(
52         CONST.__getattribute__('dir_functest_images'),
53         GLANCE_IMAGE_FILENAME)
54     GLANCE_IMAGE_FORMAT = CONST.__getattribute__('openstack_image_disk_format')
55     GLANCE_IMAGE_USERNAME = CONST.__getattribute__('openstack_image_username')
56     GLANCE_IMAGE_EXTRA_PROPERTIES = {}
57     if hasattr(CONST, 'openstack_extra_properties'):
58         GLANCE_IMAGE_EXTRA_PROPERTIES = CONST.__getattribute__(
59             'openstack_extra_properties')
60     FLAVOR_NAME = CONST.__getattribute__('rally_flavor_name')
61     FLAVOR_ALT_NAME = CONST.__getattribute__('rally_flavor_alt_name')
62     FLAVOR_EXTRA_SPECS = None
63     FLAVOR_RAM = 512
64     FLAVOR_RAM_ALT = 1024
65     if hasattr(CONST, 'flavor_extra_specs'):
66         FLAVOR_EXTRA_SPECS = CONST.__getattribute__('flavor_extra_specs')
67         FLAVOR_RAM = 1024
68         FLAVOR_RAM_ALT = 2048
69
70     RALLY_DIR = pkg_resources.resource_filename(
71         'functest', 'opnfv_tests/openstack/rally')
72     RALLY_SCENARIO_DIR = pkg_resources.resource_filename(
73         'functest', 'opnfv_tests/openstack/rally/scenario')
74     TEMPLATE_DIR = pkg_resources.resource_filename(
75         'functest', 'opnfv_tests/openstack/rally/scenario/templates')
76     SUPPORT_DIR = pkg_resources.resource_filename(
77         'functest', 'opnfv_tests/openstack/rally/scenario/support')
78     USERS_AMOUNT = 2
79     TENANTS_AMOUNT = 3
80     ITERATIONS_AMOUNT = 10
81     CONCURRENCY = 4
82     RESULTS_DIR = os.path.join(CONST.__getattribute__('dir_results'), 'rally')
83     BLACKLIST_FILE = os.path.join(RALLY_DIR, "blacklist.txt")
84     TEMP_DIR = os.path.join(RALLY_DIR, "var")
85
86     RALLY_PRIVATE_NET_NAME = CONST.__getattribute__('rally_network_name')
87     RALLY_PRIVATE_SUBNET_NAME = CONST.__getattribute__('rally_subnet_name')
88     RALLY_PRIVATE_SUBNET_CIDR = CONST.__getattribute__('rally_subnet_cidr')
89     RALLY_ROUTER_NAME = CONST.__getattribute__('rally_router_name')
90
91     def __init__(self, **kwargs):
92         """Initialize RallyBase object."""
93         super(RallyBase, self).__init__(**kwargs)
94         if 'os_creds' in kwargs:
95             self.os_creds = kwargs['os_creds']
96         else:
97             creds_override = None
98             if hasattr(CONST, 'snaps_os_creds_override'):
99                 creds_override = CONST.__getattribute__(
100                     'snaps_os_creds_override')
101
102             self.os_creds = openstack_tests.get_credentials(
103                 os_env_file=CONST.__getattribute__('openstack_creds'),
104                 overrides=creds_override)
105
106         self.guid = '-' + str(uuid.uuid4())
107
108         self.creators = []
109         self.mode = ''
110         self.summary = []
111         self.scenario_dir = ''
112         self.image_name = None
113         self.ext_net_name = None
114         self.priv_net_id = None
115         self.flavor_name = None
116         self.flavor_alt_name = None
117         self.smoke = None
118         self.test_name = None
119         self.start_time = None
120         self.result = None
121         self.details = None
122         self.compute_cnt = 0
123
124     def _build_task_args(self, test_file_name):
125         task_args = {'service_list': [test_file_name]}
126         task_args['image_name'] = self.image_name
127         task_args['flavor_name'] = self.flavor_name
128         task_args['flavor_alt_name'] = self.flavor_alt_name
129         task_args['glance_image_location'] = self.GLANCE_IMAGE_PATH
130         task_args['glance_image_format'] = self.GLANCE_IMAGE_FORMAT
131         task_args['tmpl_dir'] = self.TEMPLATE_DIR
132         task_args['sup_dir'] = self.SUPPORT_DIR
133         task_args['users_amount'] = self.USERS_AMOUNT
134         task_args['tenants_amount'] = self.TENANTS_AMOUNT
135         task_args['use_existing_users'] = False
136         task_args['iterations'] = self.ITERATIONS_AMOUNT
137         task_args['concurrency'] = self.CONCURRENCY
138         task_args['smoke'] = self.smoke
139
140         ext_net = self.ext_net_name
141         if ext_net:
142             task_args['floating_network'] = str(ext_net)
143         else:
144             task_args['floating_network'] = ''
145
146         net_id = self.priv_net_id
147         if net_id:
148             task_args['netid'] = str(net_id)
149         else:
150             task_args['netid'] = ''
151
152         return task_args
153
154     def _prepare_test_list(self, test_name):
155         test_yaml_file_name = 'opnfv-{}.yaml'.format(test_name)
156         scenario_file_name = os.path.join(self.RALLY_SCENARIO_DIR,
157                                           test_yaml_file_name)
158
159         if not os.path.exists(scenario_file_name):
160             scenario_file_name = os.path.join(self.scenario_dir,
161                                               test_yaml_file_name)
162
163             if not os.path.exists(scenario_file_name):
164                 raise Exception("The scenario '%s' does not exist."
165                                 % scenario_file_name)
166
167         LOGGER.debug('Scenario fetched from : %s', scenario_file_name)
168         test_file_name = os.path.join(self.TEMP_DIR, test_yaml_file_name)
169
170         if not os.path.exists(self.TEMP_DIR):
171             os.makedirs(self.TEMP_DIR)
172
173         self._apply_blacklist(scenario_file_name, test_file_name)
174         return test_file_name
175
176     @staticmethod
177     def get_task_id(cmd_raw):
178         """
179         Get task id from command rally result.
180
181         :param cmd_raw:
182         :return: task_id as string
183         """
184         taskid_re = re.compile('^Task +(.*): started$')
185         for line in cmd_raw.splitlines(True):
186             line = line.strip()
187             match = taskid_re.match(line)
188             if match:
189                 return match.group(1)
190         return None
191
192     @staticmethod
193     def task_succeed(json_raw):
194         """
195         Parse JSON from rally JSON results.
196
197         :param json_raw:
198         :return: Bool
199         """
200         rally_report = json.loads(json_raw)
201         for report in rally_report:
202             if report is None or report.get('result') is None:
203                 return False
204
205             for result in report.get('result'):
206                 if result is None or len(result.get('error')) > 0:
207                     return False
208
209         return True
210
211     def _migration_supported(self):
212         """Determine if migration is supported."""
213         if self.compute_cnt > 1:
214             return True
215
216         return False
217
218     @staticmethod
219     def get_cmd_output(proc):
220         """Get command stdout."""
221         result = ""
222         for line in proc.stdout:
223             result += line
224         return result
225
226     @staticmethod
227     def excl_scenario():
228         """Exclude scenario."""
229         black_tests = []
230         try:
231             with open(RallyBase.BLACKLIST_FILE, 'r') as black_list_file:
232                 black_list_yaml = yaml.safe_load(black_list_file)
233
234             installer_type = CONST.__getattribute__('INSTALLER_TYPE')
235             deploy_scenario = CONST.__getattribute__('DEPLOY_SCENARIO')
236             if (bool(installer_type) and bool(deploy_scenario) and
237                     'scenario' in black_list_yaml.keys()):
238                 for item in black_list_yaml['scenario']:
239                     scenarios = item['scenarios']
240                     installers = item['installers']
241                     in_it = RallyBase.in_iterable_re
242                     if (in_it(deploy_scenario, scenarios) and
243                             in_it(installer_type, installers)):
244                         tests = item['tests']
245                         black_tests.extend(tests)
246         except Exception:
247             LOGGER.debug("Scenario exclusion not applied.")
248
249         return black_tests
250
251     @staticmethod
252     def in_iterable_re(needle, haystack):
253         """
254         Check if given needle is in the iterable haystack, using regex.
255
256         :param needle: string to be matched
257         :param haystack: iterable of strings (optionally regex patterns)
258         :return: True if needle is eqial to any of the elements in haystack,
259                  or if a nonempty regex pattern in haystack is found in needle.
260         """
261         # match without regex
262         if needle in haystack:
263             return True
264
265         for pattern in haystack:
266             # match if regex pattern is set and found in the needle
267             if pattern and re.search(pattern, needle) is not None:
268                 return True
269         else:
270             return False
271
272     def excl_func(self):
273         """Exclude functionalities."""
274         black_tests = []
275         func_list = []
276
277         try:
278             with open(RallyBase.BLACKLIST_FILE, 'r') as black_list_file:
279                 black_list_yaml = yaml.safe_load(black_list_file)
280
281             if not self._migration_supported():
282                 func_list.append("no_migration")
283
284             if 'functionality' in black_list_yaml.keys():
285                 for item in black_list_yaml['functionality']:
286                     functions = item['functions']
287                     for func in func_list:
288                         if func in functions:
289                             tests = item['tests']
290                             black_tests.extend(tests)
291         except Exception:  # pylint: disable=broad-except
292             LOGGER.debug("Functionality exclusion not applied.")
293
294         return black_tests
295
296     def _apply_blacklist(self, case_file_name, result_file_name):
297         """Apply blacklist."""
298         LOGGER.debug("Applying blacklist...")
299         cases_file = open(case_file_name, 'r')
300         result_file = open(result_file_name, 'w')
301
302         black_tests = list(set(self.excl_func() +
303                                self.excl_scenario()))
304
305         if black_tests:
306             LOGGER.debug("Blacklisted tests: " + str(black_tests))
307
308         include = True
309         for cases_line in cases_file:
310             if include:
311                 for black_tests_line in black_tests:
312                     if re.search(black_tests_line,
313                                  cases_line.strip().rstrip(':')):
314                         include = False
315                         break
316                 else:
317                     result_file.write(str(cases_line))
318             else:
319                 if cases_line.isspace():
320                     include = True
321
322         cases_file.close()
323         result_file.close()
324
325     @staticmethod
326     def file_is_empty(file_name):
327         """Determine is a file is empty."""
328         try:
329             if os.stat(file_name).st_size > 0:
330                 return False
331         except Exception:  # pylint: disable=broad-except
332             pass
333
334         return True
335
336     def _run_task(self, test_name):
337         """Run a task."""
338         LOGGER.info('Starting test scenario "%s" ...', test_name)
339
340         task_file = os.path.join(self.RALLY_DIR, 'task.yaml')
341         if not os.path.exists(task_file):
342             LOGGER.error("Task file '%s' does not exist.", task_file)
343             raise Exception("Task file '%s' does not exist.", task_file)
344
345         file_name = self._prepare_test_list(test_name)
346         if self.file_is_empty(file_name):
347             LOGGER.info('No tests for scenario "%s"', test_name)
348             return
349
350         cmd = (["rally", "task", "start", "--abort-on-sla-failure", "--task",
351                 task_file, "--task-args",
352                 str(self._build_task_args(test_name))])
353         LOGGER.debug('running command: %s', cmd)
354
355         proc = subprocess.Popen(cmd, stdout=subprocess.PIPE,
356                                 stderr=subprocess.STDOUT)
357         output = self._get_output(proc, test_name)
358         task_id = self.get_task_id(output)
359         LOGGER.debug('task_id : %s', task_id)
360
361         if task_id is None:
362             LOGGER.error('Failed to retrieve task_id, validating task...')
363             cmd = (["rally", "task", "validate", "--task", task_file,
364                     "--task-args", str(self._build_task_args(test_name))])
365             LOGGER.debug('running command: %s', cmd)
366             proc = subprocess.Popen(cmd, stdout=subprocess.PIPE,
367                                     stderr=subprocess.STDOUT)
368             output = self.get_cmd_output(proc)
369             LOGGER.error("Task validation result:" + "\n" + output)
370             return
371
372         # check for result directory and create it otherwise
373         if not os.path.exists(self.RESULTS_DIR):
374             LOGGER.debug('%s does not exist, we create it.',
375                          self.RESULTS_DIR)
376             os.makedirs(self.RESULTS_DIR)
377
378         # write html report file
379         report_html_name = 'opnfv-{}.html'.format(test_name)
380         report_html_dir = os.path.join(self.RESULTS_DIR, report_html_name)
381         cmd = (["rally", "task", "report", task_id, "--out", report_html_dir])
382
383         LOGGER.debug('running command: %s', cmd)
384         subprocess.Popen(cmd, stdout=subprocess.PIPE,
385                          stderr=subprocess.STDOUT)
386
387         # get and save rally operation JSON result
388         cmd = (["rally", "task", "results", task_id])
389         LOGGER.debug('running command: %s', cmd)
390         proc = subprocess.Popen(cmd, stdout=subprocess.PIPE,
391                                 stderr=subprocess.STDOUT)
392         json_results = self.get_cmd_output(proc)
393         report_json_name = 'opnfv-{}.json'.format(test_name)
394         report_json_dir = os.path.join(self.RESULTS_DIR, report_json_name)
395         with open(report_json_dir, 'w') as r_file:
396             LOGGER.debug('saving json file')
397             r_file.write(json_results)
398
399         # parse JSON operation result
400         if self.task_succeed(json_results):
401             LOGGER.info('Test scenario: "{}" OK.'.format(test_name) + "\n")
402         else:
403             LOGGER.info('Test scenario: "{}" Failed.'.format(test_name) + "\n")
404
405     def _get_output(self, proc, test_name):
406         result = ""
407         nb_tests = 0
408         overall_duration = 0.0
409         success = 0.0
410         nb_totals = 0
411
412         for line in proc.stdout:
413             if ("Load duration" in line or
414                     "started" in line or
415                     "finished" in line or
416                     " Preparing" in line or
417                     "+-" in line or
418                     "|" in line):
419                 result += line
420             elif "test scenario" in line:
421                 result += "\n" + line
422             elif "Full duration" in line:
423                 result += line + "\n\n"
424
425             # parse output for summary report
426             if ("| " in line and
427                     "| action" not in line and
428                     "| Starting" not in line and
429                     "| Completed" not in line and
430                     "| ITER" not in line and
431                     "|   " not in line and
432                     "| total" not in line):
433                 nb_tests += 1
434             elif "| total" in line:
435                 percentage = ((line.split('|')[8]).strip(' ')).strip('%')
436                 try:
437                     success += float(percentage)
438                 except ValueError:
439                     LOGGER.info('Percentage error: %s, %s',
440                                 percentage, line)
441                 nb_totals += 1
442             elif "Full duration" in line:
443                 duration = line.split(': ')[1]
444                 try:
445                     overall_duration += float(duration)
446                 except ValueError:
447                     LOGGER.info('Duration error: %s, %s', duration, line)
448
449         overall_duration = "{:10.2f}".format(overall_duration)
450         if nb_totals == 0:
451             success_avg = 0
452         else:
453             success_avg = "{:0.2f}".format(success / nb_totals)
454
455         scenario_summary = {'test_name': test_name,
456                             'overall_duration': overall_duration,
457                             'nb_tests': nb_tests,
458                             'success': success_avg}
459         self.summary.append(scenario_summary)
460
461         LOGGER.debug("\n" + result)
462
463         return result
464
465     def _prepare_env(self):
466         LOGGER.debug('Validating the test name...')
467         if self.test_name not in self.TESTS:
468             raise Exception("Test name '%s' is invalid" % self.test_name)
469
470         network_name = self.RALLY_PRIVATE_NET_NAME + self.guid
471         subnet_name = self.RALLY_PRIVATE_SUBNET_NAME + self.guid
472         router_name = self.RALLY_ROUTER_NAME + self.guid
473         self.image_name = self.GLANCE_IMAGE_NAME + self.guid
474         self.flavor_name = self.FLAVOR_NAME + self.guid
475         self.flavor_alt_name = self.FLAVOR_ALT_NAME + self.guid
476         self.ext_net_name = snaps_utils.get_ext_net_name(self.os_creds)
477         self.compute_cnt = snaps_utils.get_active_compute_cnt(self.os_creds)
478
479         LOGGER.debug("Creating image '%s'...", self.image_name)
480         image_creator = deploy_utils.create_image(
481             self.os_creds, ImageConfig(
482                 name=self.image_name,
483                 image_file=self.GLANCE_IMAGE_PATH,
484                 img_format=self.GLANCE_IMAGE_FORMAT,
485                 image_user=self.GLANCE_IMAGE_USERNAME,
486                 public=True,
487                 extra_properties=self.GLANCE_IMAGE_EXTRA_PROPERTIES))
488         if image_creator is None:
489             raise Exception("Failed to create image")
490         self.creators.append(image_creator)
491
492         LOGGER.debug("Creating network '%s'...", network_name)
493
494         rally_network_type = None
495         rally_physical_network = None
496         rally_segmentation_id = None
497
498         if hasattr(CONST, 'rally_network_type'):
499             rally_network_type = CONST.__getattribute__(
500                 'rally_network_type')
501         if hasattr(CONST, 'rally_physical_network'):
502             rally_physical_network = CONST.__getattribute__(
503                 'rally_physical_network')
504         if hasattr(CONST, 'rally_segmentation_id'):
505             rally_segmentation_id = CONST.__getattribute__(
506                 'rally_segmentation_id')
507
508         network_creator = deploy_utils.create_network(
509             self.os_creds, NetworkConfig(
510                 name=network_name,
511                 shared=True,
512                 network_type=rally_network_type,
513                 physical_network=rally_physical_network,
514                 segmentation_id=rally_segmentation_id,
515                 subnet_settings=[SubnetConfig(
516                     name=subnet_name,
517                     cidr=self.RALLY_PRIVATE_SUBNET_CIDR)
518                 ]))
519         if network_creator is None:
520             raise Exception("Failed to create private network")
521         self.priv_net_id = network_creator.get_network().id
522         self.creators.append(network_creator)
523
524         LOGGER.debug("Creating router '%s'...", router_name)
525         router_creator = deploy_utils.create_router(
526             self.os_creds, RouterConfig(
527                 name=router_name,
528                 external_gateway=self.ext_net_name,
529                 internal_subnets=[subnet_name]))
530         if router_creator is None:
531             raise Exception("Failed to create router")
532         self.creators.append(router_creator)
533
534         LOGGER.debug("Creating flavor '%s'...", self.flavor_name)
535         flavor_creator = OpenStackFlavor(
536             self.os_creds, FlavorConfig(
537                 name=self.flavor_name, ram=self.FLAVOR_RAM, disk=1, vcpus=1,
538                 metadata=self.FLAVOR_EXTRA_SPECS))
539         if flavor_creator is None or flavor_creator.create() is None:
540             raise Exception("Failed to create flavor")
541         self.creators.append(flavor_creator)
542
543         LOGGER.debug("Creating flavor '%s'...", self.flavor_alt_name)
544         flavor_alt_creator = OpenStackFlavor(
545             self.os_creds, FlavorConfig(
546                 name=self.flavor_alt_name, ram=self.FLAVOR_RAM_ALT, disk=1,
547                 vcpus=1, metadata=self.FLAVOR_EXTRA_SPECS))
548         if flavor_alt_creator is None or flavor_alt_creator.create() is None:
549             raise Exception("Failed to create flavor")
550         self.creators.append(flavor_alt_creator)
551
552     def _run_tests(self):
553         if self.test_name == 'all':
554             for test in self.TESTS:
555                 if test == 'all' or test == 'vm':
556                     continue
557                 self._run_task(test)
558         else:
559             self._run_task(self.test_name)
560
561     def _generate_report(self):
562         report = (
563             "\n"
564             "                                                              "
565             "\n"
566             "                     Rally Summary Report\n"
567             "\n"
568             "+===================+============+===============+===========+"
569             "\n"
570             "| Module            | Duration   | nb. Test Run  | Success   |"
571             "\n"
572             "+===================+============+===============+===========+"
573             "\n")
574         payload = []
575
576         # for each scenario we draw a row for the table
577         total_duration = 0.0
578         total_nb_tests = 0
579         total_success = 0.0
580         for item in self.summary:
581             name = "{0:<17}".format(item['test_name'])
582             duration = float(item['overall_duration'])
583             total_duration += duration
584             duration = time.strftime("%M:%S", time.gmtime(duration))
585             duration = "{0:<10}".format(duration)
586             nb_tests = "{0:<13}".format(item['nb_tests'])
587             total_nb_tests += int(item['nb_tests'])
588             success = "{0:<10}".format(str(item['success']) + '%')
589             total_success += float(item['success'])
590             report += ("" +
591                        "| " + name + " | " + duration + " | " +
592                        nb_tests + " | " + success + "|\n" +
593                        "+-------------------+------------"
594                        "+---------------+-----------+\n")
595             payload.append({'module': name,
596                             'details': {'duration': item['overall_duration'],
597                                         'nb tests': item['nb_tests'],
598                                         'success': item['success']}})
599
600         total_duration_str = time.strftime("%H:%M:%S",
601                                            time.gmtime(total_duration))
602         total_duration_str2 = "{0:<10}".format(total_duration_str)
603         total_nb_tests_str = "{0:<13}".format(total_nb_tests)
604
605         try:
606             self.result = total_success / len(self.summary)
607         except ZeroDivisionError:
608             self.result = 100
609
610         success_rate = "{:0.2f}".format(self.result)
611         success_rate_str = "{0:<10}".format(str(success_rate) + '%')
612         report += ("+===================+============"
613                    "+===============+===========+")
614         report += "\n"
615         report += ("| TOTAL:            | " + total_duration_str2 + " | " +
616                    total_nb_tests_str + " | " + success_rate_str + "|\n")
617         report += ("+===================+============"
618                    "+===============+===========+")
619         report += "\n"
620
621         LOGGER.info("\n" + report)
622         payload.append({'summary': {'duration': total_duration,
623                                     'nb tests': total_nb_tests,
624                                     'nb success': success_rate}})
625
626         self.details = payload
627
628         LOGGER.info("Rally '%s' success_rate is %s%%",
629                     self.case_name, success_rate)
630
631     def _clean_up(self):
632         for creator in reversed(self.creators):
633             try:
634                 creator.clean()
635             except Exception as e:
636                 LOGGER.error('Unexpected error cleaning - %s', e)
637
638     @energy.enable_recording
639     def run(self, **kwargs):
640         """Run testcase."""
641         self.start_time = time.time()
642         try:
643             conf_utils.create_rally_deployment()
644             self._prepare_env()
645             self._run_tests()
646             self._generate_report()
647             res = testcase.TestCase.EX_OK
648         except Exception as exc:   # pylint: disable=broad-except
649             LOGGER.error('Error with run: %s', exc)
650             res = testcase.TestCase.EX_RUN_ERROR
651         finally:
652             self._clean_up()
653
654         self.stop_time = time.time()
655         return res
656
657
658 class RallySanity(RallyBase):
659     """Rally sanity testcase implementation."""
660
661     def __init__(self, **kwargs):
662         """Initialize RallySanity object."""
663         if "case_name" not in kwargs:
664             kwargs["case_name"] = "rally_sanity"
665         super(RallySanity, self).__init__(**kwargs)
666         self.mode = 'sanity'
667         self.test_name = 'all'
668         self.smoke = True
669         self.scenario_dir = os.path.join(self.RALLY_SCENARIO_DIR, 'sanity')
670
671
672 class RallyFull(RallyBase):
673     """Rally full testcase implementation."""
674
675     def __init__(self, **kwargs):
676         """Initialize RallyFull object."""
677         if "case_name" not in kwargs:
678             kwargs["case_name"] = "rally_full"
679         super(RallyFull, self).__init__(**kwargs)
680         self.mode = 'full'
681         self.test_name = 'all'
682         self.smoke = False
683         self.scenario_dir = os.path.join(self.RALLY_SCENARIO_DIR, 'full')