c16e6d13ad878719776baaa1c363a3b306db0cfd
[functest.git] / functest / opnfv_tests / openstack / rally / rally.py
1 #!/usr/bin/env python
2 #
3 # Copyright (c) 2015 All rights reserved
4 # This program and the accompanying materials
5 # are made available under the terms of the Apache License, Version 2.0
6 # which accompanies this distribution, and is available at
7 #
8 # http://www.apache.org/licenses/LICENSE-2.0
9 #
10
11 """Rally testcases implementation."""
12
13 from __future__ import division
14
15 import json
16 import logging
17 import os
18 import re
19 import subprocess
20 import time
21 import uuid
22
23 import pkg_resources
24 import yaml
25
26 from functest.core import testcase
27 from functest.energy import energy
28 from functest.opnfv_tests.openstack.snaps import snaps_utils
29 from functest.opnfv_tests.openstack.tempest import conf_utils
30 from functest.utils.constants import CONST
31
32 from snaps.config.flavor import FlavorConfig
33 from snaps.config.image import ImageConfig
34 from snaps.config.network import NetworkConfig, SubnetConfig
35 from snaps.config.router import RouterConfig
36
37 from snaps.openstack.create_flavor import OpenStackFlavor
38 from snaps.openstack.tests import openstack_tests
39 from snaps.openstack.utils import deploy_utils
40
41 LOGGER = logging.getLogger(__name__)
42
43
44 class RallyBase(testcase.TestCase):
45     """Base class form Rally testcases implementation."""
46
47     TESTS = ['authenticate', 'glance', 'ceilometer', 'cinder', 'heat',
48              'keystone', 'neutron', 'nova', 'quotas', 'vm', 'all']
49     GLANCE_IMAGE_NAME = CONST.__getattribute__('openstack_image_name')
50     GLANCE_IMAGE_FILENAME = CONST.__getattribute__('openstack_image_file_name')
51     GLANCE_IMAGE_PATH = os.path.join(
52         CONST.__getattribute__('dir_functest_images'),
53         GLANCE_IMAGE_FILENAME)
54     GLANCE_IMAGE_FORMAT = CONST.__getattribute__('openstack_image_disk_format')
55     GLANCE_IMAGE_USERNAME = CONST.__getattribute__('openstack_image_username')
56     GLANCE_IMAGE_EXTRA_PROPERTIES = {}
57     if hasattr(CONST, 'openstack_extra_properties'):
58         GLANCE_IMAGE_EXTRA_PROPERTIES = CONST.__getattribute__(
59             'openstack_extra_properties')
60     FLAVOR_NAME = CONST.__getattribute__('rally_flavor_name')
61     FLAVOR_ALT_NAME = CONST.__getattribute__('rally_flavor_alt_name')
62     FLAVOR_EXTRA_SPECS = None
63     FLAVOR_RAM = 512
64     FLAVOR_RAM_ALT = 1024
65     if hasattr(CONST, 'flavor_extra_specs'):
66         FLAVOR_EXTRA_SPECS = CONST.__getattribute__('flavor_extra_specs')
67         FLAVOR_RAM = 1024
68         FLAVOR_RAM_ALT = 2048
69
70     RALLY_DIR = pkg_resources.resource_filename(
71         'functest', 'opnfv_tests/openstack/rally')
72     RALLY_SCENARIO_DIR = pkg_resources.resource_filename(
73         'functest', 'opnfv_tests/openstack/rally/scenario')
74     TEMPLATE_DIR = pkg_resources.resource_filename(
75         'functest', 'opnfv_tests/openstack/rally/scenario/templates')
76     SUPPORT_DIR = pkg_resources.resource_filename(
77         'functest', 'opnfv_tests/openstack/rally/scenario/support')
78     USERS_AMOUNT = 2
79     TENANTS_AMOUNT = 3
80     ITERATIONS_AMOUNT = 10
81     CONCURRENCY = 4
82     RESULTS_DIR = os.path.join(CONST.__getattribute__('dir_results'), 'rally')
83     BLACKLIST_FILE = os.path.join(RALLY_DIR, "blacklist.txt")
84     TEMP_DIR = os.path.join(RALLY_DIR, "var")
85
86     RALLY_PRIVATE_NET_NAME = CONST.__getattribute__('rally_network_name')
87     RALLY_PRIVATE_SUBNET_NAME = CONST.__getattribute__('rally_subnet_name')
88     RALLY_PRIVATE_SUBNET_CIDR = CONST.__getattribute__('rally_subnet_cidr')
89     RALLY_ROUTER_NAME = CONST.__getattribute__('rally_router_name')
90
91     def __init__(self, **kwargs):
92         """Initialize RallyBase object."""
93         super(RallyBase, self).__init__(**kwargs)
94         if 'os_creds' in kwargs:
95             self.os_creds = kwargs['os_creds']
96         else:
97             creds_override = None
98             if hasattr(CONST, 'snaps_os_creds_override'):
99                 creds_override = CONST.__getattribute__(
100                     'snaps_os_creds_override')
101
102             self.os_creds = openstack_tests.get_credentials(
103                 os_env_file=CONST.__getattribute__('openstack_creds'),
104                 overrides=creds_override)
105
106         self.guid = '-' + str(uuid.uuid4())
107
108         self.creators = []
109         self.mode = ''
110         self.summary = []
111         self.scenario_dir = ''
112         self.image_name = None
113         self.ext_net_name = None
114         self.priv_net_id = None
115         self.flavor_name = None
116         self.flavor_alt_name = None
117         self.smoke = None
118         self.test_name = None
119         self.start_time = None
120         self.result = None
121         self.details = None
122         self.compute_cnt = 0
123
124     def _build_task_args(self, test_file_name):
125         task_args = {'service_list': [test_file_name]}
126         task_args['image_name'] = self.image_name
127         task_args['flavor_name'] = self.flavor_name
128         task_args['flavor_alt_name'] = self.flavor_alt_name
129         task_args['glance_image_location'] = self.GLANCE_IMAGE_PATH
130         task_args['glance_image_format'] = self.GLANCE_IMAGE_FORMAT
131         task_args['tmpl_dir'] = self.TEMPLATE_DIR
132         task_args['sup_dir'] = self.SUPPORT_DIR
133         task_args['users_amount'] = self.USERS_AMOUNT
134         task_args['tenants_amount'] = self.TENANTS_AMOUNT
135         task_args['use_existing_users'] = False
136         task_args['iterations'] = self.ITERATIONS_AMOUNT
137         task_args['concurrency'] = self.CONCURRENCY
138         task_args['smoke'] = self.smoke
139
140         ext_net = self.ext_net_name
141         if ext_net:
142             task_args['floating_network'] = str(ext_net)
143         else:
144             task_args['floating_network'] = ''
145
146         net_id = self.priv_net_id
147         if net_id:
148             task_args['netid'] = str(net_id)
149         else:
150             task_args['netid'] = ''
151
152         return task_args
153
154     def _prepare_test_list(self, test_name):
155         test_yaml_file_name = 'opnfv-{}.yaml'.format(test_name)
156         scenario_file_name = os.path.join(self.RALLY_SCENARIO_DIR,
157                                           test_yaml_file_name)
158
159         if not os.path.exists(scenario_file_name):
160             scenario_file_name = os.path.join(self.scenario_dir,
161                                               test_yaml_file_name)
162
163             if not os.path.exists(scenario_file_name):
164                 raise Exception("The scenario '%s' does not exist."
165                                 % scenario_file_name)
166
167         LOGGER.debug('Scenario fetched from : %s', scenario_file_name)
168         test_file_name = os.path.join(self.TEMP_DIR, test_yaml_file_name)
169
170         if not os.path.exists(self.TEMP_DIR):
171             os.makedirs(self.TEMP_DIR)
172
173         self._apply_blacklist(scenario_file_name, test_file_name)
174         return test_file_name
175
176     @staticmethod
177     def get_task_id(cmd_raw):
178         """
179         Get task id from command rally result.
180
181         :param cmd_raw:
182         :return: task_id as string
183         """
184         taskid_re = re.compile('^Task +(.*): started$')
185         for line in cmd_raw.splitlines(True):
186             line = line.strip()
187             match = taskid_re.match(line)
188             if match:
189                 return match.group(1)
190         return None
191
192     @staticmethod
193     def task_succeed(json_raw):
194         """
195         Parse JSON from rally JSON results.
196
197         :param json_raw:
198         :return: Bool
199         """
200         rally_report = json.loads(json_raw)
201         for report in rally_report:
202             if report is None or report.get('result') is None:
203                 return False
204
205             for result in report.get('result'):
206                 if result is None or len(result.get('error')) > 0:
207                     return False
208
209         return True
210
211     def _migration_supported(self):
212         """Determine if migration is supported."""
213         if self.compute_cnt > 1:
214             return True
215
216         return False
217
218     @staticmethod
219     def get_cmd_output(proc):
220         """Get command stdout."""
221         result = ""
222         while proc.poll() is None:
223             line = proc.stdout.readline()
224             result += line
225         return result
226
227     @staticmethod
228     def excl_scenario():
229         """Exclude scenario."""
230         black_tests = []
231         try:
232             with open(RallyBase.BLACKLIST_FILE, 'r') as black_list_file:
233                 black_list_yaml = yaml.safe_load(black_list_file)
234
235             installer_type = CONST.__getattribute__('INSTALLER_TYPE')
236             deploy_scenario = CONST.__getattribute__('DEPLOY_SCENARIO')
237             if (bool(installer_type) and bool(deploy_scenario) and
238                     'scenario' in black_list_yaml.keys()):
239                 for item in black_list_yaml['scenario']:
240                     scenarios = item['scenarios']
241                     installers = item['installers']
242                     in_it = RallyBase.in_iterable_re
243                     if (in_it(deploy_scenario, scenarios) and
244                             in_it(installer_type, installers)):
245                         tests = item['tests']
246                         black_tests.extend(tests)
247         except Exception:
248             LOGGER.debug("Scenario exclusion not applied.")
249
250         return black_tests
251
252     @staticmethod
253     def in_iterable_re(needle, haystack):
254         """
255         Check if given needle is in the iterable haystack, using regex.
256
257         :param needle: string to be matched
258         :param haystack: iterable of strings (optionally regex patterns)
259         :return: True if needle is eqial to any of the elements in haystack,
260                  or if a nonempty regex pattern in haystack is found in needle.
261         """
262         # match without regex
263         if needle in haystack:
264             return True
265
266         for pattern in haystack:
267             # match if regex pattern is set and found in the needle
268             if pattern and re.search(pattern, needle) is not None:
269                 return True
270         else:
271             return False
272
273     def excl_func(self):
274         """Exclude functionalities."""
275         black_tests = []
276         func_list = []
277
278         try:
279             with open(RallyBase.BLACKLIST_FILE, 'r') as black_list_file:
280                 black_list_yaml = yaml.safe_load(black_list_file)
281
282             if not self._migration_supported():
283                 func_list.append("no_migration")
284
285             if 'functionality' in black_list_yaml.keys():
286                 for item in black_list_yaml['functionality']:
287                     functions = item['functions']
288                     for func in func_list:
289                         if func in functions:
290                             tests = item['tests']
291                             black_tests.extend(tests)
292         except Exception:  # pylint: disable=broad-except
293             LOGGER.debug("Functionality exclusion not applied.")
294
295         return black_tests
296
297     def _apply_blacklist(self, case_file_name, result_file_name):
298         """Apply blacklist."""
299         LOGGER.debug("Applying blacklist...")
300         cases_file = open(case_file_name, 'r')
301         result_file = open(result_file_name, 'w')
302
303         black_tests = list(set(self.excl_func() +
304                                self.excl_scenario()))
305
306         if black_tests:
307             LOGGER.debug("Blacklisted tests: " + str(black_tests))
308
309         include = True
310         for cases_line in cases_file:
311             if include:
312                 for black_tests_line in black_tests:
313                     if re.search(black_tests_line,
314                                  cases_line.strip().rstrip(':')):
315                         include = False
316                         break
317                 else:
318                     result_file.write(str(cases_line))
319             else:
320                 if cases_line.isspace():
321                     include = True
322
323         cases_file.close()
324         result_file.close()
325
326     @staticmethod
327     def file_is_empty(file_name):
328         """Determine is a file is empty."""
329         try:
330             if os.stat(file_name).st_size > 0:
331                 return False
332         except Exception:  # pylint: disable=broad-except
333             pass
334
335         return True
336
337     def _run_task(self, test_name):
338         """Run a task."""
339         LOGGER.info('Starting test scenario "%s" ...', test_name)
340
341         task_file = os.path.join(self.RALLY_DIR, 'task.yaml')
342         if not os.path.exists(task_file):
343             LOGGER.error("Task file '%s' does not exist.", task_file)
344             raise Exception("Task file '%s' does not exist.", task_file)
345
346         file_name = self._prepare_test_list(test_name)
347         if self.file_is_empty(file_name):
348             LOGGER.info('No tests for scenario "%s"', test_name)
349             return
350
351         cmd = (["rally", "task", "start", "--abort-on-sla-failure", "--task",
352                 task_file, "--task-args",
353                 str(self._build_task_args(test_name))])
354         LOGGER.debug('running command: %s', cmd)
355
356         proc = subprocess.Popen(cmd, stdout=subprocess.PIPE,
357                                 stderr=subprocess.STDOUT)
358         output = self._get_output(proc, test_name)
359         task_id = self.get_task_id(output)
360         LOGGER.debug('task_id : %s', task_id)
361
362         if task_id is None:
363             LOGGER.error('Failed to retrieve task_id, validating task...')
364             cmd = (["rally", "task", "validate", "--task", task_file,
365                     "--task-args", str(self._build_task_args(test_name))])
366             LOGGER.debug('running command: %s', cmd)
367             proc = subprocess.Popen(cmd, stdout=subprocess.PIPE,
368                                     stderr=subprocess.STDOUT)
369             output = self.get_cmd_output(proc)
370             LOGGER.error("Task validation result:" + "\n" + output)
371             return
372
373         # check for result directory and create it otherwise
374         if not os.path.exists(self.RESULTS_DIR):
375             LOGGER.debug('%s does not exist, we create it.',
376                          self.RESULTS_DIR)
377             os.makedirs(self.RESULTS_DIR)
378
379         # write html report file
380         report_html_name = 'opnfv-{}.html'.format(test_name)
381         report_html_dir = os.path.join(self.RESULTS_DIR, report_html_name)
382         cmd = (["rally", "task", "report", task_id, "--out", report_html_dir])
383
384         LOGGER.debug('running command: %s', cmd)
385         subprocess.Popen(cmd, stdout=subprocess.PIPE,
386                          stderr=subprocess.STDOUT)
387
388         # get and save rally operation JSON result
389         cmd = (["rally", "task", "results", task_id])
390         LOGGER.debug('running command: %s', cmd)
391         proc = subprocess.Popen(cmd, stdout=subprocess.PIPE,
392                                 stderr=subprocess.STDOUT)
393         json_results = self.get_cmd_output(proc)
394         report_json_name = 'opnfv-{}.json'.format(test_name)
395         report_json_dir = os.path.join(self.RESULTS_DIR, report_json_name)
396         with open(report_json_dir, 'w') as r_file:
397             LOGGER.debug('saving json file')
398             r_file.write(json_results)
399
400         # parse JSON operation result
401         if self.task_succeed(json_results):
402             LOGGER.info('Test scenario: "{}" OK.'.format(test_name) + "\n")
403         else:
404             LOGGER.info('Test scenario: "{}" Failed.'.format(test_name) + "\n")
405
406     def _get_output(self, proc, test_name):
407         result = ""
408         nb_tests = 0
409         overall_duration = 0.0
410         success = 0.0
411         nb_totals = 0
412
413         while proc.poll() is None:
414             line = proc.stdout.readline()
415             if ("Load duration" in line or
416                     "started" in line or
417                     "finished" in line or
418                     " Preparing" in line or
419                     "+-" in line or
420                     "|" in line):
421                 result += line
422             elif "test scenario" in line:
423                 result += "\n" + line
424             elif "Full duration" in line:
425                 result += line + "\n\n"
426
427             # parse output for summary report
428             if ("| " in line and
429                     "| action" not in line and
430                     "| Starting" not in line and
431                     "| Completed" not in line and
432                     "| ITER" not in line and
433                     "|   " not in line and
434                     "| total" not in line):
435                 nb_tests += 1
436             elif "| total" in line:
437                 percentage = ((line.split('|')[8]).strip(' ')).strip('%')
438                 try:
439                     success += float(percentage)
440                 except ValueError:
441                     LOGGER.info('Percentage error: %s, %s',
442                                 percentage, line)
443                 nb_totals += 1
444             elif "Full duration" in line:
445                 duration = line.split(': ')[1]
446                 try:
447                     overall_duration += float(duration)
448                 except ValueError:
449                     LOGGER.info('Duration error: %s, %s', duration, line)
450
451         overall_duration = "{:10.2f}".format(overall_duration)
452         if nb_totals == 0:
453             success_avg = 0
454         else:
455             success_avg = "{:0.2f}".format(success / nb_totals)
456
457         scenario_summary = {'test_name': test_name,
458                             'overall_duration': overall_duration,
459                             'nb_tests': nb_tests,
460                             'success': success_avg}
461         self.summary.append(scenario_summary)
462
463         LOGGER.debug("\n" + result)
464
465         return result
466
467     def _prepare_env(self):
468         LOGGER.debug('Validating the test name...')
469         if self.test_name not in self.TESTS:
470             raise Exception("Test name '%s' is invalid" % self.test_name)
471
472         network_name = self.RALLY_PRIVATE_NET_NAME + self.guid
473         subnet_name = self.RALLY_PRIVATE_SUBNET_NAME + self.guid
474         router_name = self.RALLY_ROUTER_NAME + self.guid
475         self.image_name = self.GLANCE_IMAGE_NAME + self.guid
476         self.flavor_name = self.FLAVOR_NAME + self.guid
477         self.flavor_alt_name = self.FLAVOR_ALT_NAME + self.guid
478         self.ext_net_name = snaps_utils.get_ext_net_name(self.os_creds)
479         self.compute_cnt = snaps_utils.get_active_compute_cnt(self.os_creds)
480
481         LOGGER.debug("Creating image '%s'...", self.image_name)
482         image_creator = deploy_utils.create_image(
483             self.os_creds, ImageConfig(
484                 name=self.image_name,
485                 image_file=self.GLANCE_IMAGE_PATH,
486                 img_format=self.GLANCE_IMAGE_FORMAT,
487                 image_user=self.GLANCE_IMAGE_USERNAME,
488                 public=True,
489                 extra_properties=self.GLANCE_IMAGE_EXTRA_PROPERTIES))
490         if image_creator is None:
491             raise Exception("Failed to create image")
492         self.creators.append(image_creator)
493
494         LOGGER.debug("Creating network '%s'...", network_name)
495         network_creator = deploy_utils.create_network(
496             self.os_creds, NetworkConfig(
497                 name=network_name,
498                 shared=True,
499                 subnet_settings=[SubnetConfig(
500                     name=subnet_name,
501                     cidr=self.RALLY_PRIVATE_SUBNET_CIDR)
502                 ]))
503         if network_creator is None:
504             raise Exception("Failed to create private network")
505         self.priv_net_id = network_creator.get_network().id
506         self.creators.append(network_creator)
507
508         LOGGER.debug("Creating router '%s'...", router_name)
509         router_creator = deploy_utils.create_router(
510             self.os_creds, RouterConfig(
511                 name=router_name,
512                 external_gateway=self.ext_net_name,
513                 internal_subnets=[subnet_name]))
514         if router_creator is None:
515             raise Exception("Failed to create router")
516         self.creators.append(router_creator)
517
518         LOGGER.debug("Creating flavor '%s'...", self.flavor_name)
519         flavor_creator = OpenStackFlavor(
520             self.os_creds, FlavorConfig(
521                 name=self.flavor_name, ram=self.FLAVOR_RAM, disk=1, vcpus=1,
522                 metadata=self.FLAVOR_EXTRA_SPECS))
523         if flavor_creator is None or flavor_creator.create() is None:
524             raise Exception("Failed to create flavor")
525         self.creators.append(flavor_creator)
526
527         LOGGER.debug("Creating flavor '%s'...", self.flavor_alt_name)
528         flavor_alt_creator = OpenStackFlavor(
529             self.os_creds, FlavorConfig(
530                 name=self.flavor_alt_name, ram=self.FLAVOR_RAM_ALT, disk=1,
531                 vcpus=1, metadata=self.FLAVOR_EXTRA_SPECS))
532         if flavor_alt_creator is None or flavor_alt_creator.create() is None:
533             raise Exception("Failed to create flavor")
534         self.creators.append(flavor_alt_creator)
535
536     def _run_tests(self):
537         if self.test_name == 'all':
538             for test in self.TESTS:
539                 if test == 'all' or test == 'vm':
540                     continue
541                 self._run_task(test)
542         else:
543             self._run_task(self.test_name)
544
545     def _generate_report(self):
546         report = (
547             "\n"
548             "                                                              "
549             "\n"
550             "                     Rally Summary Report\n"
551             "\n"
552             "+===================+============+===============+===========+"
553             "\n"
554             "| Module            | Duration   | nb. Test Run  | Success   |"
555             "\n"
556             "+===================+============+===============+===========+"
557             "\n")
558         payload = []
559
560         # for each scenario we draw a row for the table
561         total_duration = 0.0
562         total_nb_tests = 0
563         total_success = 0.0
564         for item in self.summary:
565             name = "{0:<17}".format(item['test_name'])
566             duration = float(item['overall_duration'])
567             total_duration += duration
568             duration = time.strftime("%M:%S", time.gmtime(duration))
569             duration = "{0:<10}".format(duration)
570             nb_tests = "{0:<13}".format(item['nb_tests'])
571             total_nb_tests += int(item['nb_tests'])
572             success = "{0:<10}".format(str(item['success']) + '%')
573             total_success += float(item['success'])
574             report += ("" +
575                        "| " + name + " | " + duration + " | " +
576                        nb_tests + " | " + success + "|\n" +
577                        "+-------------------+------------"
578                        "+---------------+-----------+\n")
579             payload.append({'module': name,
580                             'details': {'duration': item['overall_duration'],
581                                         'nb tests': item['nb_tests'],
582                                         'success': item['success']}})
583
584         total_duration_str = time.strftime("%H:%M:%S",
585                                            time.gmtime(total_duration))
586         total_duration_str2 = "{0:<10}".format(total_duration_str)
587         total_nb_tests_str = "{0:<13}".format(total_nb_tests)
588
589         try:
590             self.result = total_success / len(self.summary)
591         except ZeroDivisionError:
592             self.result = 100
593
594         success_rate = "{:0.2f}".format(self.result)
595         success_rate_str = "{0:<10}".format(str(success_rate) + '%')
596         report += ("+===================+============"
597                    "+===============+===========+")
598         report += "\n"
599         report += ("| TOTAL:            | " + total_duration_str2 + " | " +
600                    total_nb_tests_str + " | " + success_rate_str + "|\n")
601         report += ("+===================+============"
602                    "+===============+===========+")
603         report += "\n"
604
605         LOGGER.info("\n" + report)
606         payload.append({'summary': {'duration': total_duration,
607                                     'nb tests': total_nb_tests,
608                                     'nb success': success_rate}})
609
610         self.details = payload
611
612         LOGGER.info("Rally '%s' success_rate is %s%%",
613                     self.case_name, success_rate)
614
615     def _clean_up(self):
616         for creator in reversed(self.creators):
617             try:
618                 creator.clean()
619             except Exception as e:
620                 LOGGER.error('Unexpected error cleaning - %s', e)
621
622     @energy.enable_recording
623     def run(self, **kwargs):
624         """Run testcase."""
625         self.start_time = time.time()
626         try:
627             conf_utils.create_rally_deployment()
628             self._prepare_env()
629             self._run_tests()
630             self._generate_report()
631             res = testcase.TestCase.EX_OK
632         except Exception as exc:   # pylint: disable=broad-except
633             LOGGER.error('Error with run: %s', exc)
634             res = testcase.TestCase.EX_RUN_ERROR
635         finally:
636             self._clean_up()
637
638         self.stop_time = time.time()
639         return res
640
641
642 class RallySanity(RallyBase):
643     """Rally sanity testcase implementation."""
644
645     def __init__(self, **kwargs):
646         """Initialize RallySanity object."""
647         if "case_name" not in kwargs:
648             kwargs["case_name"] = "rally_sanity"
649         super(RallySanity, self).__init__(**kwargs)
650         self.mode = 'sanity'
651         self.test_name = 'all'
652         self.smoke = True
653         self.scenario_dir = os.path.join(self.RALLY_SCENARIO_DIR, 'sanity')
654
655
656 class RallyFull(RallyBase):
657     """Rally full testcase implementation."""
658
659     def __init__(self, **kwargs):
660         """Initialize RallyFull object."""
661         if "case_name" not in kwargs:
662             kwargs["case_name"] = "rally_full"
663         super(RallyFull, self).__init__(**kwargs)
664         self.mode = 'full'
665         self.test_name = 'all'
666         self.smoke = False
667         self.scenario_dir = os.path.join(self.RALLY_SCENARIO_DIR, 'full')