0c657688e0ec474cb24c5e66415b2c0e08db4c59
[functest.git] / functest / opnfv_tests / openstack / rally / rally.py
1 #!/usr/bin/env python
2 #
3 # Copyright (c) 2015 All rights reserved
4 # This program and the accompanying materials
5 # are made available under the terms of the Apache License, Version 2.0
6 # which accompanies this distribution, and is available at
7 #
8 # http://www.apache.org/licenses/LICENSE-2.0
9 #
10
11 """Rally testcases implementation."""
12
13 from __future__ import division
14
15 import json
16 import logging
17 import os
18 import re
19 import subprocess
20 import time
21 import uuid
22
23 import pkg_resources
24 import yaml
25
26 from functest.core import testcase
27 from functest.energy import energy
28 from functest.opnfv_tests.openstack.snaps import snaps_utils
29 from functest.opnfv_tests.openstack.tempest import conf_utils
30 from functest.utils.constants import CONST
31
32 from snaps.config.flavor import FlavorConfig
33 from snaps.config.image import ImageConfig
34 from snaps.config.network import NetworkConfig, SubnetConfig
35 from snaps.config.router import RouterConfig
36
37 from snaps.openstack.create_flavor import OpenStackFlavor
38 from snaps.openstack.tests import openstack_tests
39 from snaps.openstack.utils import deploy_utils
40
41 LOGGER = logging.getLogger(__name__)
42
43
44 class RallyBase(testcase.TestCase):
45     """Base class form Rally testcases implementation."""
46
47     TESTS = ['authenticate', 'glance', 'ceilometer', 'cinder', 'heat',
48              'keystone', 'neutron', 'nova', 'quotas', 'vm', 'all']
49     GLANCE_IMAGE_NAME = CONST.__getattribute__('openstack_image_name')
50     GLANCE_IMAGE_FILENAME = CONST.__getattribute__('openstack_image_file_name')
51     GLANCE_IMAGE_PATH = os.path.join(
52         CONST.__getattribute__('dir_functest_images'),
53         GLANCE_IMAGE_FILENAME)
54     GLANCE_IMAGE_FORMAT = CONST.__getattribute__('openstack_image_disk_format')
55     GLANCE_IMAGE_USERNAME = CONST.__getattribute__('openstack_image_username')
56     GLANCE_IMAGE_EXTRA_PROPERTIES = {}
57     if hasattr(CONST, 'openstack_extra_properties'):
58         GLANCE_IMAGE_EXTRA_PROPERTIES = CONST.__getattribute__(
59             'openstack_extra_properties')
60     FLAVOR_NAME = CONST.__getattribute__('rally_flavor_name')
61     FLAVOR_ALT_NAME = CONST.__getattribute__('rally_flavor_alt_name')
62     FLAVOR_EXTRA_SPECS = None
63     if hasattr(CONST, 'flavor_extra_specs'):
64         FLAVOR_EXTRA_SPECS = CONST.__getattribute__('flavor_extra_specs')
65
66     RALLY_DIR = pkg_resources.resource_filename(
67         'functest', 'opnfv_tests/openstack/rally')
68     RALLY_SCENARIO_DIR = pkg_resources.resource_filename(
69         'functest', 'opnfv_tests/openstack/rally/scenario')
70     TEMPLATE_DIR = pkg_resources.resource_filename(
71         'functest', 'opnfv_tests/openstack/rally/scenario/templates')
72     SUPPORT_DIR = pkg_resources.resource_filename(
73         'functest', 'opnfv_tests/openstack/rally/scenario/support')
74     USERS_AMOUNT = 2
75     TENANTS_AMOUNT = 3
76     ITERATIONS_AMOUNT = 10
77     CONCURRENCY = 4
78     RESULTS_DIR = os.path.join(CONST.__getattribute__('dir_results'), 'rally')
79     BLACKLIST_FILE = os.path.join(RALLY_DIR, "blacklist.txt")
80     TEMP_DIR = os.path.join(RALLY_DIR, "var")
81
82     RALLY_PRIVATE_NET_NAME = CONST.__getattribute__('rally_network_name')
83     RALLY_PRIVATE_SUBNET_NAME = CONST.__getattribute__('rally_subnet_name')
84     RALLY_PRIVATE_SUBNET_CIDR = CONST.__getattribute__('rally_subnet_cidr')
85     RALLY_ROUTER_NAME = CONST.__getattribute__('rally_router_name')
86
87     def __init__(self, **kwargs):
88         """Initialize RallyBase object."""
89         super(RallyBase, self).__init__(**kwargs)
90         if 'os_creds' in kwargs:
91             self.os_creds = kwargs['os_creds']
92         else:
93             creds_override = None
94             if hasattr(CONST, 'snaps_os_creds_override'):
95                 creds_override = CONST.__getattribute__(
96                     'snaps_os_creds_override')
97
98             self.os_creds = openstack_tests.get_credentials(
99                 os_env_file=CONST.__getattribute__('openstack_creds'),
100                 overrides=creds_override)
101
102         self.guid = ''
103         if CONST.__getattribute__('rally_unique_names'):
104             self.guid = '-' + str(uuid.uuid4())
105
106         self.creators = []
107         self.mode = ''
108         self.summary = []
109         self.scenario_dir = ''
110         self.image_name = None
111         self.ext_net_name = None
112         self.priv_net_id = None
113         self.flavor_name = None
114         self.flavor_alt_name = None
115         self.smoke = None
116         self.test_name = None
117         self.start_time = None
118         self.result = None
119         self.details = None
120         self.compute_cnt = 0
121
122     def _build_task_args(self, test_file_name):
123         task_args = {'service_list': [test_file_name]}
124         task_args['image_name'] = self.image_name
125         task_args['flavor_name'] = self.flavor_name
126         task_args['flavor_alt_name'] = self.flavor_alt_name
127         task_args['glance_image_location'] = self.GLANCE_IMAGE_PATH
128         task_args['glance_image_format'] = self.GLANCE_IMAGE_FORMAT
129         task_args['tmpl_dir'] = self.TEMPLATE_DIR
130         task_args['sup_dir'] = self.SUPPORT_DIR
131         task_args['users_amount'] = self.USERS_AMOUNT
132         task_args['tenants_amount'] = self.TENANTS_AMOUNT
133         task_args['use_existing_users'] = False
134         task_args['iterations'] = self.ITERATIONS_AMOUNT
135         task_args['concurrency'] = self.CONCURRENCY
136         task_args['smoke'] = self.smoke
137
138         ext_net = self.ext_net_name
139         if ext_net:
140             task_args['floating_network'] = str(ext_net)
141         else:
142             task_args['floating_network'] = ''
143
144         net_id = self.priv_net_id
145         if net_id:
146             task_args['netid'] = str(net_id)
147         else:
148             task_args['netid'] = ''
149
150         return task_args
151
152     def _prepare_test_list(self, test_name):
153         test_yaml_file_name = 'opnfv-{}.yaml'.format(test_name)
154         scenario_file_name = os.path.join(self.RALLY_SCENARIO_DIR,
155                                           test_yaml_file_name)
156
157         if not os.path.exists(scenario_file_name):
158             scenario_file_name = os.path.join(self.scenario_dir,
159                                               test_yaml_file_name)
160
161             if not os.path.exists(scenario_file_name):
162                 raise Exception("The scenario '%s' does not exist."
163                                 % scenario_file_name)
164
165         LOGGER.debug('Scenario fetched from : %s', scenario_file_name)
166         test_file_name = os.path.join(self.TEMP_DIR, test_yaml_file_name)
167
168         if not os.path.exists(self.TEMP_DIR):
169             os.makedirs(self.TEMP_DIR)
170
171         self._apply_blacklist(scenario_file_name, test_file_name)
172         return test_file_name
173
174     @staticmethod
175     def get_task_id(cmd_raw):
176         """
177         Get task id from command rally result.
178
179         :param cmd_raw:
180         :return: task_id as string
181         """
182         taskid_re = re.compile('^Task +(.*): started$')
183         for line in cmd_raw.splitlines(True):
184             line = line.strip()
185             match = taskid_re.match(line)
186             if match:
187                 return match.group(1)
188         return None
189
190     @staticmethod
191     def task_succeed(json_raw):
192         """
193         Parse JSON from rally JSON results.
194
195         :param json_raw:
196         :return: Bool
197         """
198         rally_report = json.loads(json_raw)
199         for report in rally_report:
200             if report is None or report.get('result') is None:
201                 return False
202
203             for result in report.get('result'):
204                 if result is None or len(result.get('error')) > 0:
205                     return False
206
207         return True
208
209     def _migration_supported(self):
210         """Determine if migration is supported."""
211         if self.compute_cnt > 1:
212             return True
213
214         return False
215
216     @staticmethod
217     def get_cmd_output(proc):
218         """Get command stdout."""
219         result = ""
220         while proc.poll() is None:
221             line = proc.stdout.readline()
222             result += line
223         return result
224
225     @staticmethod
226     def excl_scenario():
227         """Exclude scenario."""
228         black_tests = []
229         try:
230             with open(RallyBase.BLACKLIST_FILE, 'r') as black_list_file:
231                 black_list_yaml = yaml.safe_load(black_list_file)
232
233             installer_type = CONST.__getattribute__('INSTALLER_TYPE')
234             deploy_scenario = CONST.__getattribute__('DEPLOY_SCENARIO')
235             if (bool(installer_type) and bool(deploy_scenario) and
236                     'scenario' in black_list_yaml.keys()):
237                 for item in black_list_yaml['scenario']:
238                     scenarios = item['scenarios']
239                     installers = item['installers']
240                     in_it = RallyBase.in_iterable_re
241                     if (in_it(deploy_scenario, scenarios) and
242                             in_it(installer_type, installers)):
243                         tests = item['tests']
244                         black_tests.extend(tests)
245         except Exception:
246             LOGGER.debug("Scenario exclusion not applied.")
247
248         return black_tests
249
250     @staticmethod
251     def in_iterable_re(needle, haystack):
252         """
253         Check if given needle is in the iterable haystack, using regex.
254
255         :param needle: string to be matched
256         :param haystack: iterable of strings (optionally regex patterns)
257         :return: True if needle is eqial to any of the elements in haystack,
258                  or if a nonempty regex pattern in haystack is found in needle.
259         """
260         # match without regex
261         if needle in haystack:
262             return True
263
264         for pattern in haystack:
265             # match if regex pattern is set and found in the needle
266             if pattern and re.search(pattern, needle) is not None:
267                 return True
268         else:
269             return False
270
271     def excl_func(self):
272         """Exclude functionalities."""
273         black_tests = []
274         func_list = []
275
276         try:
277             with open(RallyBase.BLACKLIST_FILE, 'r') as black_list_file:
278                 black_list_yaml = yaml.safe_load(black_list_file)
279
280             if not self._migration_supported():
281                 func_list.append("no_migration")
282
283             if 'functionality' in black_list_yaml.keys():
284                 for item in black_list_yaml['functionality']:
285                     functions = item['functions']
286                     for func in func_list:
287                         if func in functions:
288                             tests = item['tests']
289                             black_tests.extend(tests)
290         except Exception:  # pylint: disable=broad-except
291             LOGGER.debug("Functionality exclusion not applied.")
292
293         return black_tests
294
295     def _apply_blacklist(self, case_file_name, result_file_name):
296         """Apply blacklist."""
297         LOGGER.debug("Applying blacklist...")
298         cases_file = open(case_file_name, 'r')
299         result_file = open(result_file_name, 'w')
300
301         black_tests = list(set(self.excl_func() +
302                                self.excl_scenario()))
303
304         if black_tests:
305             LOGGER.debug("Blacklisted tests: " + str(black_tests))
306
307         include = True
308         for cases_line in cases_file:
309             if include:
310                 for black_tests_line in black_tests:
311                     if re.search(black_tests_line,
312                                  cases_line.strip().rstrip(':')):
313                         include = False
314                         break
315                 else:
316                     result_file.write(str(cases_line))
317             else:
318                 if cases_line.isspace():
319                     include = True
320
321         cases_file.close()
322         result_file.close()
323
324     @staticmethod
325     def file_is_empty(file_name):
326         """Determine is a file is empty."""
327         try:
328             if os.stat(file_name).st_size > 0:
329                 return False
330         except Exception:  # pylint: disable=broad-except
331             pass
332
333         return True
334
335     def _run_task(self, test_name):
336         """Run a task."""
337         LOGGER.info('Starting test scenario "%s" ...', test_name)
338
339         task_file = os.path.join(self.RALLY_DIR, 'task.yaml')
340         if not os.path.exists(task_file):
341             LOGGER.error("Task file '%s' does not exist.", task_file)
342             raise Exception("Task file '%s' does not exist.", task_file)
343
344         file_name = self._prepare_test_list(test_name)
345         if self.file_is_empty(file_name):
346             LOGGER.info('No tests for scenario "%s"', test_name)
347             return
348
349         cmd = (["rally", "task", "start", "--abort-on-sla-failure", "--task",
350                 task_file, "--task-args",
351                 str(self._build_task_args(test_name))])
352         LOGGER.debug('running command: %s', cmd)
353
354         proc = subprocess.Popen(cmd, stdout=subprocess.PIPE,
355                                 stderr=subprocess.STDOUT)
356         output = self._get_output(proc, test_name)
357         task_id = self.get_task_id(output)
358         LOGGER.debug('task_id : %s', task_id)
359
360         if task_id is None:
361             LOGGER.error('Failed to retrieve task_id, validating task...')
362             cmd = (["rally", "task", "validate", "--task", task_file,
363                     "--task-args", str(self._build_task_args(test_name))])
364             LOGGER.debug('running command: %s', cmd)
365             proc = subprocess.Popen(cmd, stdout=subprocess.PIPE,
366                                     stderr=subprocess.STDOUT)
367             output = self.get_cmd_output(proc)
368             LOGGER.error("Task validation result:" + "\n" + output)
369             return
370
371         # check for result directory and create it otherwise
372         if not os.path.exists(self.RESULTS_DIR):
373             LOGGER.debug('%s does not exist, we create it.',
374                          self.RESULTS_DIR)
375             os.makedirs(self.RESULTS_DIR)
376
377         # write html report file
378         report_html_name = 'opnfv-{}.html'.format(test_name)
379         report_html_dir = os.path.join(self.RESULTS_DIR, report_html_name)
380         cmd = (["rally", "task", "report", task_id, "--out", report_html_dir])
381
382         LOGGER.debug('running command: %s', cmd)
383         subprocess.Popen(cmd, stdout=subprocess.PIPE,
384                          stderr=subprocess.STDOUT)
385
386         # get and save rally operation JSON result
387         cmd = (["rally", "task", "results", task_id])
388         LOGGER.debug('running command: %s', cmd)
389         proc = subprocess.Popen(cmd, stdout=subprocess.PIPE,
390                                 stderr=subprocess.STDOUT)
391         json_results = self.get_cmd_output(proc)
392         report_json_name = 'opnfv-{}.json'.format(test_name)
393         report_json_dir = os.path.join(self.RESULTS_DIR, report_json_name)
394         with open(report_json_dir, 'w') as r_file:
395             LOGGER.debug('saving json file')
396             r_file.write(json_results)
397
398         # parse JSON operation result
399         if self.task_succeed(json_results):
400             LOGGER.info('Test scenario: "{}" OK.'.format(test_name) + "\n")
401         else:
402             LOGGER.info('Test scenario: "{}" Failed.'.format(test_name) + "\n")
403
404     def _get_output(self, proc, test_name):
405         result = ""
406         nb_tests = 0
407         overall_duration = 0.0
408         success = 0.0
409         nb_totals = 0
410
411         while proc.poll() is None:
412             line = proc.stdout.readline()
413             if ("Load duration" in line or
414                     "started" in line or
415                     "finished" in line or
416                     " Preparing" in line or
417                     "+-" in line or
418                     "|" in line):
419                 result += line
420             elif "test scenario" in line:
421                 result += "\n" + line
422             elif "Full duration" in line:
423                 result += line + "\n\n"
424
425             # parse output for summary report
426             if ("| " in line and
427                     "| action" not in line and
428                     "| Starting" not in line and
429                     "| Completed" not in line and
430                     "| ITER" not in line and
431                     "|   " not in line and
432                     "| total" not in line):
433                 nb_tests += 1
434             elif "| total" in line:
435                 percentage = ((line.split('|')[8]).strip(' ')).strip('%')
436                 try:
437                     success += float(percentage)
438                 except ValueError:
439                     LOGGER.info('Percentage error: %s, %s',
440                                 percentage, line)
441                 nb_totals += 1
442             elif "Full duration" in line:
443                 duration = line.split(': ')[1]
444                 try:
445                     overall_duration += float(duration)
446                 except ValueError:
447                     LOGGER.info('Duration error: %s, %s', duration, line)
448
449         overall_duration = "{:10.2f}".format(overall_duration)
450         if nb_totals == 0:
451             success_avg = 0
452         else:
453             success_avg = "{:0.2f}".format(success / nb_totals)
454
455         scenario_summary = {'test_name': test_name,
456                             'overall_duration': overall_duration,
457                             'nb_tests': nb_tests,
458                             'success': success_avg}
459         self.summary.append(scenario_summary)
460
461         LOGGER.debug("\n" + result)
462
463         return result
464
465     def _prepare_env(self):
466         LOGGER.debug('Validating the test name...')
467         if self.test_name not in self.TESTS:
468             raise Exception("Test name '%s' is invalid" % self.test_name)
469
470         network_name = self.RALLY_PRIVATE_NET_NAME + self.guid
471         subnet_name = self.RALLY_PRIVATE_SUBNET_NAME + self.guid
472         router_name = self.RALLY_ROUTER_NAME + self.guid
473         self.image_name = self.GLANCE_IMAGE_NAME + self.guid
474         self.flavor_name = self.FLAVOR_NAME + self.guid
475         self.flavor_alt_name = self.FLAVOR_ALT_NAME + self.guid
476         self.ext_net_name = snaps_utils.get_ext_net_name(self.os_creds)
477         self.compute_cnt = snaps_utils.get_active_compute_cnt(self.os_creds)
478
479         LOGGER.debug("Creating image '%s'...", self.image_name)
480         image_creator = deploy_utils.create_image(
481             self.os_creds, ImageConfig(
482                 name=self.image_name,
483                 image_file=self.GLANCE_IMAGE_PATH,
484                 img_format=self.GLANCE_IMAGE_FORMAT,
485                 image_user=self.GLANCE_IMAGE_USERNAME,
486                 public=True,
487                 extra_properties=self.GLANCE_IMAGE_EXTRA_PROPERTIES))
488         if image_creator is None:
489             raise Exception("Failed to create image")
490         self.creators.append(image_creator)
491
492         LOGGER.debug("Creating network '%s'...", network_name)
493         network_creator = deploy_utils.create_network(
494             self.os_creds, NetworkConfig(
495                 name=network_name,
496                 shared=True,
497                 subnet_settings=[SubnetConfig(
498                     name=subnet_name,
499                     cidr=self.RALLY_PRIVATE_SUBNET_CIDR)
500                 ]))
501         if network_creator is None:
502             raise Exception("Failed to create private network")
503         self.priv_net_id = network_creator.get_network().id
504         self.creators.append(network_creator)
505
506         LOGGER.debug("Creating router '%s'...", router_name)
507         router_creator = deploy_utils.create_router(
508             self.os_creds, RouterConfig(
509                 name=router_name,
510                 external_gateway=self.ext_net_name,
511                 internal_subnets=[subnet_name]))
512         if router_creator is None:
513             raise Exception("Failed to create router")
514         self.creators.append(router_creator)
515
516         LOGGER.debug("Creating flavor '%s'...", self.flavor_name)
517         flavor_creator = OpenStackFlavor(
518             self.os_creds, FlavorConfig(
519                 name=self.flavor_name, ram=512, disk=1, vcpus=1,
520                 metadata=self.FLAVOR_EXTRA_SPECS))
521         if flavor_creator is None or flavor_creator.create() is None:
522             raise Exception("Failed to create flavor")
523         self.creators.append(flavor_creator)
524
525         LOGGER.debug("Creating flavor '%s'...", self.flavor_alt_name)
526         flavor_alt_creator = OpenStackFlavor(
527             self.os_creds, FlavorConfig(
528                 name=self.flavor_alt_name, ram=1024, disk=1, vcpus=1,
529                 metadata=self.FLAVOR_EXTRA_SPECS))
530         if flavor_alt_creator is None or flavor_alt_creator.create() is None:
531             raise Exception("Failed to create flavor")
532         self.creators.append(flavor_alt_creator)
533
534     def _run_tests(self):
535         if self.test_name == 'all':
536             for test in self.TESTS:
537                 if test == 'all' or test == 'vm':
538                     continue
539                 self._run_task(test)
540         else:
541             self._run_task(self.test_name)
542
543     def _generate_report(self):
544         report = (
545             "\n"
546             "                                                              "
547             "\n"
548             "                     Rally Summary Report\n"
549             "\n"
550             "+===================+============+===============+===========+"
551             "\n"
552             "| Module            | Duration   | nb. Test Run  | Success   |"
553             "\n"
554             "+===================+============+===============+===========+"
555             "\n")
556         payload = []
557
558         # for each scenario we draw a row for the table
559         total_duration = 0.0
560         total_nb_tests = 0
561         total_success = 0.0
562         for item in self.summary:
563             name = "{0:<17}".format(item['test_name'])
564             duration = float(item['overall_duration'])
565             total_duration += duration
566             duration = time.strftime("%M:%S", time.gmtime(duration))
567             duration = "{0:<10}".format(duration)
568             nb_tests = "{0:<13}".format(item['nb_tests'])
569             total_nb_tests += int(item['nb_tests'])
570             success = "{0:<10}".format(str(item['success']) + '%')
571             total_success += float(item['success'])
572             report += ("" +
573                        "| " + name + " | " + duration + " | " +
574                        nb_tests + " | " + success + "|\n" +
575                        "+-------------------+------------"
576                        "+---------------+-----------+\n")
577             payload.append({'module': name,
578                             'details': {'duration': item['overall_duration'],
579                                         'nb tests': item['nb_tests'],
580                                         'success': item['success']}})
581
582         total_duration_str = time.strftime("%H:%M:%S",
583                                            time.gmtime(total_duration))
584         total_duration_str2 = "{0:<10}".format(total_duration_str)
585         total_nb_tests_str = "{0:<13}".format(total_nb_tests)
586
587         try:
588             self.result = total_success / len(self.summary)
589         except ZeroDivisionError:
590             self.result = 100
591
592         success_rate = "{:0.2f}".format(self.result)
593         success_rate_str = "{0:<10}".format(str(success_rate) + '%')
594         report += ("+===================+============"
595                    "+===============+===========+")
596         report += "\n"
597         report += ("| TOTAL:            | " + total_duration_str2 + " | " +
598                    total_nb_tests_str + " | " + success_rate_str + "|\n")
599         report += ("+===================+============"
600                    "+===============+===========+")
601         report += "\n"
602
603         LOGGER.info("\n" + report)
604         payload.append({'summary': {'duration': total_duration,
605                                     'nb tests': total_nb_tests,
606                                     'nb success': success_rate}})
607
608         self.details = payload
609
610         LOGGER.info("Rally '%s' success_rate is %s%%",
611                     self.case_name, success_rate)
612
613     def _clean_up(self):
614         for creator in reversed(self.creators):
615             try:
616                 creator.clean()
617             except Exception as e:
618                 LOGGER.error('Unexpected error cleaning - %s', e)
619
620     @energy.enable_recording
621     def run(self, **kwargs):
622         """Run testcase."""
623         self.start_time = time.time()
624         try:
625             conf_utils.create_rally_deployment()
626             self._prepare_env()
627             self._run_tests()
628             self._generate_report()
629             res = testcase.TestCase.EX_OK
630         except Exception as exc:   # pylint: disable=broad-except
631             LOGGER.error('Error with run: %s', exc)
632             res = testcase.TestCase.EX_RUN_ERROR
633         finally:
634             self._clean_up()
635
636         self.stop_time = time.time()
637         return res
638
639
640 class RallySanity(RallyBase):
641     """Rally sanity testcase implementation."""
642
643     def __init__(self, **kwargs):
644         """Initialize RallySanity object."""
645         if "case_name" not in kwargs:
646             kwargs["case_name"] = "rally_sanity"
647         super(RallySanity, self).__init__(**kwargs)
648         self.mode = 'sanity'
649         self.test_name = 'all'
650         self.smoke = True
651         self.scenario_dir = os.path.join(self.RALLY_SCENARIO_DIR, 'sanity')
652
653
654 class RallyFull(RallyBase):
655     """Rally full testcase implementation."""
656
657     def __init__(self, **kwargs):
658         """Initialize RallyFull object."""
659         if "case_name" not in kwargs:
660             kwargs["case_name"] = "rally_full"
661         super(RallyFull, self).__init__(**kwargs)
662         self.mode = 'full'
663         self.test_name = 'all'
664         self.smoke = False
665         self.scenario_dir = os.path.join(self.RALLY_SCENARIO_DIR, 'full')