Removal of deprecated SNAPS-OO classes.
[functest.git] / functest / opnfv_tests / openstack / rally / rally.py
1 #!/usr/bin/env python
2 #
3 # Copyright (c) 2015 All rights reserved
4 # This program and the accompanying materials
5 # are made available under the terms of the Apache License, Version 2.0
6 # which accompanies this distribution, and is available at
7 #
8 # http://www.apache.org/licenses/LICENSE-2.0
9 #
10
11 """Rally testcases implementation."""
12
13 from __future__ import division
14
15 import json
16 import logging
17 import os
18 import re
19 import subprocess
20 import time
21 import uuid
22
23 import pkg_resources
24 import yaml
25
26 from functest.core import testcase
27 from functest.energy import energy
28 from functest.opnfv_tests.openstack.snaps import snaps_utils
29 from functest.opnfv_tests.openstack.tempest import conf_utils
30 from functest.utils.constants import CONST
31
32 from snaps.config.flavor import FlavorConfig
33 from snaps.config.image import ImageConfig
34 from snaps.config.network import NetworkConfig, SubnetConfig
35 from snaps.config.router import RouterConfig
36
37 from snaps.openstack.create_flavor import OpenStackFlavor
38 from snaps.openstack.tests import openstack_tests
39 from snaps.openstack.utils import deploy_utils
40
41 LOGGER = logging.getLogger(__name__)
42
43
44 class RallyBase(testcase.TestCase):
45     """Base class form Rally testcases implementation."""
46
47     TESTS = ['authenticate', 'glance', 'ceilometer', 'cinder', 'heat',
48              'keystone', 'neutron', 'nova', 'quotas', 'vm', 'all']
49     GLANCE_IMAGE_NAME = CONST.__getattribute__('openstack_image_name')
50     GLANCE_IMAGE_FILENAME = CONST.__getattribute__('openstack_image_file_name')
51     GLANCE_IMAGE_PATH = os.path.join(
52         CONST.__getattribute__('dir_functest_images'),
53         GLANCE_IMAGE_FILENAME)
54     GLANCE_IMAGE_FORMAT = CONST.__getattribute__('openstack_image_disk_format')
55     GLANCE_IMAGE_USERNAME = CONST.__getattribute__('openstack_image_username')
56     GLANCE_IMAGE_EXTRA_PROPERTIES = {}
57     if hasattr(CONST, 'openstack_extra_properties'):
58         GLANCE_IMAGE_EXTRA_PROPERTIES = CONST.__getattribute__(
59             'openstack_extra_properties')
60     FLAVOR_NAME = CONST.__getattribute__('rally_flavor_name')
61     FLAVOR_ALT_NAME = CONST.__getattribute__('rally_flavor_alt_name')
62     FLAVOR_EXTRA_SPECS = None
63     if hasattr(CONST, 'flavor_extra_specs'):
64         FLAVOR_EXTRA_SPECS = CONST.__getattribute__('flavor_extra_specs')
65
66     RALLY_DIR = pkg_resources.resource_filename(
67         'functest', 'opnfv_tests/openstack/rally')
68     RALLY_SCENARIO_DIR = pkg_resources.resource_filename(
69         'functest', 'opnfv_tests/openstack/rally/scenario')
70     TEMPLATE_DIR = pkg_resources.resource_filename(
71         'functest', 'opnfv_tests/openstack/rally/scenario/templates')
72     SUPPORT_DIR = pkg_resources.resource_filename(
73         'functest', 'opnfv_tests/openstack/rally/scenario/support')
74     USERS_AMOUNT = 2
75     TENANTS_AMOUNT = 3
76     ITERATIONS_AMOUNT = 10
77     CONCURRENCY = 4
78     RESULTS_DIR = os.path.join(CONST.__getattribute__('dir_results'), 'rally')
79     BLACKLIST_FILE = os.path.join(RALLY_DIR, "blacklist.txt")
80     TEMP_DIR = os.path.join(RALLY_DIR, "var")
81
82     RALLY_PRIVATE_NET_NAME = CONST.__getattribute__('rally_network_name')
83     RALLY_PRIVATE_SUBNET_NAME = CONST.__getattribute__('rally_subnet_name')
84     RALLY_PRIVATE_SUBNET_CIDR = CONST.__getattribute__('rally_subnet_cidr')
85     RALLY_ROUTER_NAME = CONST.__getattribute__('rally_router_name')
86
87     def __init__(self, **kwargs):
88         """Initialize RallyBase object."""
89         super(RallyBase, self).__init__(**kwargs)
90         if 'os_creds' in kwargs:
91             self.os_creds = kwargs['os_creds']
92         else:
93             creds_override = None
94             if hasattr(CONST, 'snaps_os_creds_override'):
95                 creds_override = CONST.__getattribute__(
96                     'snaps_os_creds_override')
97
98             self.os_creds = openstack_tests.get_credentials(
99                 os_env_file=CONST.__getattribute__('openstack_creds'),
100                 overrides=creds_override)
101
102         self.guid = ''
103         if CONST.__getattribute__('rally_unique_names'):
104             self.guid = '-' + str(uuid.uuid4())
105
106         self.creators = []
107         self.mode = ''
108         self.summary = []
109         self.scenario_dir = ''
110         self.image_name = None
111         self.ext_net_name = None
112         self.priv_net_id = None
113         self.flavor_name = None
114         self.flavor_alt_name = None
115         self.smoke = None
116         self.test_name = None
117         self.start_time = None
118         self.result = None
119         self.details = None
120         self.compute_cnt = 0
121
122     def _build_task_args(self, test_file_name):
123         task_args = {'service_list': [test_file_name]}
124         task_args['image_name'] = self.image_name
125         task_args['flavor_name'] = self.flavor_name
126         task_args['flavor_alt_name'] = self.flavor_alt_name
127         task_args['glance_image_location'] = self.GLANCE_IMAGE_PATH
128         task_args['glance_image_format'] = self.GLANCE_IMAGE_FORMAT
129         task_args['tmpl_dir'] = self.TEMPLATE_DIR
130         task_args['sup_dir'] = self.SUPPORT_DIR
131         task_args['users_amount'] = self.USERS_AMOUNT
132         task_args['tenants_amount'] = self.TENANTS_AMOUNT
133         task_args['use_existing_users'] = False
134         task_args['iterations'] = self.ITERATIONS_AMOUNT
135         task_args['concurrency'] = self.CONCURRENCY
136         task_args['smoke'] = self.smoke
137
138         ext_net = self.ext_net_name
139         if ext_net:
140             task_args['floating_network'] = str(ext_net)
141         else:
142             task_args['floating_network'] = ''
143
144         net_id = self.priv_net_id
145         if net_id:
146             task_args['netid'] = str(net_id)
147         else:
148             task_args['netid'] = ''
149
150         return task_args
151
152     def _prepare_test_list(self, test_name):
153         test_yaml_file_name = 'opnfv-{}.yaml'.format(test_name)
154         scenario_file_name = os.path.join(self.RALLY_SCENARIO_DIR,
155                                           test_yaml_file_name)
156
157         if not os.path.exists(scenario_file_name):
158             scenario_file_name = os.path.join(self.scenario_dir,
159                                               test_yaml_file_name)
160
161             if not os.path.exists(scenario_file_name):
162                 raise Exception("The scenario '%s' does not exist."
163                                 % scenario_file_name)
164
165         LOGGER.debug('Scenario fetched from : %s', scenario_file_name)
166         test_file_name = os.path.join(self.TEMP_DIR, test_yaml_file_name)
167
168         if not os.path.exists(self.TEMP_DIR):
169             os.makedirs(self.TEMP_DIR)
170
171         self._apply_blacklist(scenario_file_name, test_file_name)
172         return test_file_name
173
174     @staticmethod
175     def get_task_id(cmd_raw):
176         """
177         Get task id from command rally result.
178
179         :param cmd_raw:
180         :return: task_id as string
181         """
182         taskid_re = re.compile('^Task +(.*): started$')
183         for line in cmd_raw.splitlines(True):
184             line = line.strip()
185             match = taskid_re.match(line)
186             if match:
187                 return match.group(1)
188         return None
189
190     @staticmethod
191     def task_succeed(json_raw):
192         """
193         Parse JSON from rally JSON results.
194
195         :param json_raw:
196         :return: Bool
197         """
198         rally_report = json.loads(json_raw)
199         for report in rally_report:
200             if report is None or report.get('result') is None:
201                 return False
202
203             for result in report.get('result'):
204                 if result is None or len(result.get('error')) > 0:
205                     return False
206
207         return True
208
209     def _migration_supported(self):
210         """Determine if migration is supported."""
211         if self.compute_cnt > 1:
212             return True
213
214         return False
215
216     @staticmethod
217     def get_cmd_output(proc):
218         """Get command stdout."""
219         result = ""
220         while proc.poll() is None:
221             line = proc.stdout.readline()
222             result += line
223         return result
224
225     @staticmethod
226     def excl_scenario():
227         """Exclude scenario."""
228         black_tests = []
229         try:
230             with open(RallyBase.BLACKLIST_FILE, 'r') as black_list_file:
231                 black_list_yaml = yaml.safe_load(black_list_file)
232
233             installer_type = CONST.__getattribute__('INSTALLER_TYPE')
234             deploy_scenario = CONST.__getattribute__('DEPLOY_SCENARIO')
235             if (bool(installer_type) and bool(deploy_scenario) and
236                     'scenario' in black_list_yaml.keys()):
237                 for item in black_list_yaml['scenario']:
238                     scenarios = item['scenarios']
239                     installers = item['installers']
240                     in_it = RallyBase.in_iterable_re
241                     if (in_it(deploy_scenario, scenarios) and
242                             in_it(installer_type, installers)):
243                         tests = item['tests']
244                         black_tests.extend(tests)
245         except Exception:
246             LOGGER.debug("Scenario exclusion not applied.")
247
248         return black_tests
249
250     @staticmethod
251     def in_iterable_re(needle, haystack):
252         """
253         Check if given needle is in the iterable haystack, using regex.
254
255         :param needle: string to be matched
256         :param haystack: iterable of strings (optionally regex patterns)
257         :return: True if needle is eqial to any of the elements in haystack,
258                  or if a nonempty regex pattern in haystack is found in needle.
259         """
260         # match without regex
261         if needle in haystack:
262             return True
263
264         for pattern in haystack:
265             # match if regex pattern is set and found in the needle
266             if pattern and re.search(pattern, needle) is not None:
267                 return True
268         else:
269             return False
270
271     def excl_func(self):
272         """Exclude functionalities."""
273         black_tests = []
274         func_list = []
275
276         try:
277             with open(RallyBase.BLACKLIST_FILE, 'r') as black_list_file:
278                 black_list_yaml = yaml.safe_load(black_list_file)
279
280             if not self._migration_supported():
281                 func_list.append("no_migration")
282
283             if 'functionality' in black_list_yaml.keys():
284                 for item in black_list_yaml['functionality']:
285                     functions = item['functions']
286                     for func in func_list:
287                         if func in functions:
288                             tests = item['tests']
289                             black_tests.extend(tests)
290         except Exception:  # pylint: disable=broad-except
291             LOGGER.debug("Functionality exclusion not applied.")
292
293         return black_tests
294
295     def _apply_blacklist(self, case_file_name, result_file_name):
296         """Apply blacklist."""
297         LOGGER.debug("Applying blacklist...")
298         cases_file = open(case_file_name, 'r')
299         result_file = open(result_file_name, 'w')
300
301         black_tests = list(set(self.excl_func() +
302                                self.excl_scenario()))
303
304         if black_tests:
305             LOGGER.debug("Blacklisted tests: " + str(black_tests))
306
307         include = True
308         for cases_line in cases_file:
309             if include:
310                 for black_tests_line in black_tests:
311                     if re.search(black_tests_line,
312                                  cases_line.strip().rstrip(':')):
313                         include = False
314                         break
315                 else:
316                     result_file.write(str(cases_line))
317             else:
318                 if cases_line.isspace():
319                     include = True
320
321         cases_file.close()
322         result_file.close()
323
324     @staticmethod
325     def file_is_empty(file_name):
326         """Determine is a file is empty."""
327         try:
328             if os.stat(file_name).st_size > 0:
329                 return False
330         except Exception:  # pylint: disable=broad-except
331             pass
332
333         return True
334
335     def _run_task(self, test_name):
336         """Run a task."""
337         LOGGER.info('Starting test scenario "%s" ...', test_name)
338
339         task_file = os.path.join(self.RALLY_DIR, 'task.yaml')
340         if not os.path.exists(task_file):
341             LOGGER.error("Task file '%s' does not exist.", task_file)
342             raise Exception("Task file '%s' does not exist.", task_file)
343
344         file_name = self._prepare_test_list(test_name)
345         if self.file_is_empty(file_name):
346             LOGGER.info('No tests for scenario "%s"', test_name)
347             return
348
349         cmd_line = ("rally task start --abort-on-sla-failure "
350                     "--task {0} "
351                     "--task-args \"{1}\""
352                     .format(task_file, self._build_task_args(test_name)))
353         LOGGER.debug('running command line: %s', cmd_line)
354
355         proc = subprocess.Popen(cmd_line, stdout=subprocess.PIPE,
356                                 stderr=subprocess.STDOUT, shell=True)
357         output = self._get_output(proc, test_name)
358         task_id = self.get_task_id(output)
359         LOGGER.debug('task_id : %s', task_id)
360
361         if task_id is None:
362             LOGGER.error('Failed to retrieve task_id, validating task...')
363             cmd_line = ("rally task validate "
364                         "--task {0} "
365                         "--task-args \"{1}\""
366                         .format(task_file, self._build_task_args(test_name)))
367             LOGGER.debug('running command line: %s', cmd_line)
368             proc = subprocess.Popen(cmd_line, stdout=subprocess.PIPE,
369                                     stderr=subprocess.STDOUT, shell=True)
370             output = self.get_cmd_output(proc)
371             LOGGER.error("Task validation result:" + "\n" + output)
372             return
373
374         # check for result directory and create it otherwise
375         if not os.path.exists(self.RESULTS_DIR):
376             LOGGER.debug('%s does not exist, we create it.',
377                          self.RESULTS_DIR)
378             os.makedirs(self.RESULTS_DIR)
379
380         # write html report file
381         report_html_name = 'opnfv-{}.html'.format(test_name)
382         report_html_dir = os.path.join(self.RESULTS_DIR, report_html_name)
383         cmd_line = "rally task report {} --out {}".format(task_id,
384                                                           report_html_dir)
385
386         LOGGER.debug('running command line: %s', cmd_line)
387         os.popen(cmd_line)
388
389         # get and save rally operation JSON result
390         cmd_line = "rally task results %s" % task_id
391         LOGGER.debug('running command line: %s', cmd_line)
392         cmd = os.popen(cmd_line)
393         json_results = cmd.read()
394         report_json_name = 'opnfv-{}.json'.format(test_name)
395         report_json_dir = os.path.join(self.RESULTS_DIR, report_json_name)
396         with open(report_json_dir, 'w') as r_file:
397             LOGGER.debug('saving json file')
398             r_file.write(json_results)
399
400         # parse JSON operation result
401         if self.task_succeed(json_results):
402             LOGGER.info('Test scenario: "{}" OK.'.format(test_name) + "\n")
403         else:
404             LOGGER.info('Test scenario: "{}" Failed.'.format(test_name) + "\n")
405
406     def _get_output(self, proc, test_name):
407         result = ""
408         nb_tests = 0
409         overall_duration = 0.0
410         success = 0.0
411         nb_totals = 0
412
413         while proc.poll() is None:
414             line = proc.stdout.readline()
415             if ("Load duration" in line or
416                     "started" in line or
417                     "finished" in line or
418                     " Preparing" in line or
419                     "+-" in line or
420                     "|" in line):
421                 result += line
422             elif "test scenario" in line:
423                 result += "\n" + line
424             elif "Full duration" in line:
425                 result += line + "\n\n"
426
427             # parse output for summary report
428             if ("| " in line and
429                     "| action" not in line and
430                     "| Starting" not in line and
431                     "| Completed" not in line and
432                     "| ITER" not in line and
433                     "|   " not in line and
434                     "| total" not in line):
435                 nb_tests += 1
436             elif "| total" in line:
437                 percentage = ((line.split('|')[8]).strip(' ')).strip('%')
438                 try:
439                     success += float(percentage)
440                 except ValueError:
441                     LOGGER.info('Percentage error: %s, %s',
442                                 percentage, line)
443                 nb_totals += 1
444             elif "Full duration" in line:
445                 duration = line.split(': ')[1]
446                 try:
447                     overall_duration += float(duration)
448                 except ValueError:
449                     LOGGER.info('Duration error: %s, %s', duration, line)
450
451         overall_duration = "{:10.2f}".format(overall_duration)
452         if nb_totals == 0:
453             success_avg = 0
454         else:
455             success_avg = "{:0.2f}".format(success / nb_totals)
456
457         scenario_summary = {'test_name': test_name,
458                             'overall_duration': overall_duration,
459                             'nb_tests': nb_tests,
460                             'success': success_avg}
461         self.summary.append(scenario_summary)
462
463         LOGGER.debug("\n" + result)
464
465         return result
466
467     def _prepare_env(self):
468         LOGGER.debug('Validating the test name...')
469         if self.test_name not in self.TESTS:
470             raise Exception("Test name '%s' is invalid" % self.test_name)
471
472         network_name = self.RALLY_PRIVATE_NET_NAME + self.guid
473         subnet_name = self.RALLY_PRIVATE_SUBNET_NAME + self.guid
474         router_name = self.RALLY_ROUTER_NAME + self.guid
475         self.image_name = self.GLANCE_IMAGE_NAME + self.guid
476         self.flavor_name = self.FLAVOR_NAME + self.guid
477         self.flavor_alt_name = self.FLAVOR_ALT_NAME + self.guid
478         self.ext_net_name = snaps_utils.get_ext_net_name(self.os_creds)
479         self.compute_cnt = snaps_utils.get_active_compute_cnt(self.os_creds)
480
481         LOGGER.debug("Creating image '%s'...", self.image_name)
482         image_creator = deploy_utils.create_image(
483             self.os_creds, ImageConfig(
484                 name=self.image_name,
485                 image_file=self.GLANCE_IMAGE_PATH,
486                 img_format=self.GLANCE_IMAGE_FORMAT,
487                 image_user=self.GLANCE_IMAGE_USERNAME,
488                 public=True,
489                 extra_properties=self.GLANCE_IMAGE_EXTRA_PROPERTIES))
490         if image_creator is None:
491             raise Exception("Failed to create image")
492         self.creators.append(image_creator)
493
494         LOGGER.debug("Creating network '%s'...", network_name)
495         network_creator = deploy_utils.create_network(
496             self.os_creds, NetworkConfig(
497                 name=network_name,
498                 shared=True,
499                 subnet_settings=[SubnetConfig(
500                     name=subnet_name,
501                     cidr=self.RALLY_PRIVATE_SUBNET_CIDR)
502                 ]))
503         if network_creator is None:
504             raise Exception("Failed to create private network")
505         self.priv_net_id = network_creator.get_network().id
506         self.creators.append(network_creator)
507
508         LOGGER.debug("Creating router '%s'...", router_name)
509         router_creator = deploy_utils.create_router(
510             self.os_creds, RouterConfig(
511                 name=router_name,
512                 external_gateway=self.ext_net_name,
513                 internal_subnets=[subnet_name]))
514         if router_creator is None:
515             raise Exception("Failed to create router")
516         self.creators.append(router_creator)
517
518         LOGGER.debug("Creating flavor '%s'...", self.flavor_name)
519         flavor_creator = OpenStackFlavor(
520             self.os_creds, FlavorConfig(
521                 name=self.flavor_name, ram=512, disk=1, vcpus=1,
522                 metadata=self.FLAVOR_EXTRA_SPECS))
523         if flavor_creator is None or flavor_creator.create() is None:
524             raise Exception("Failed to create flavor")
525         self.creators.append(flavor_creator)
526
527         LOGGER.debug("Creating flavor '%s'...", self.flavor_alt_name)
528         flavor_alt_creator = OpenStackFlavor(
529             self.os_creds, FlavorConfig(
530                 name=self.flavor_alt_name, ram=1024, disk=1, vcpus=1,
531                 metadata=self.FLAVOR_EXTRA_SPECS))
532         if flavor_alt_creator is None or flavor_alt_creator.create() is None:
533             raise Exception("Failed to create flavor")
534         self.creators.append(flavor_alt_creator)
535
536     def _run_tests(self):
537         if self.test_name == 'all':
538             for test in self.TESTS:
539                 if test == 'all' or test == 'vm':
540                     continue
541                 self._run_task(test)
542         else:
543             self._run_task(self.test_name)
544
545     def _generate_report(self):
546         report = (
547             "\n"
548             "                                                              "
549             "\n"
550             "                     Rally Summary Report\n"
551             "\n"
552             "+===================+============+===============+===========+"
553             "\n"
554             "| Module            | Duration   | nb. Test Run  | Success   |"
555             "\n"
556             "+===================+============+===============+===========+"
557             "\n")
558         payload = []
559
560         # for each scenario we draw a row for the table
561         total_duration = 0.0
562         total_nb_tests = 0
563         total_success = 0.0
564         for item in self.summary:
565             name = "{0:<17}".format(item['test_name'])
566             duration = float(item['overall_duration'])
567             total_duration += duration
568             duration = time.strftime("%M:%S", time.gmtime(duration))
569             duration = "{0:<10}".format(duration)
570             nb_tests = "{0:<13}".format(item['nb_tests'])
571             total_nb_tests += int(item['nb_tests'])
572             success = "{0:<10}".format(str(item['success']) + '%')
573             total_success += float(item['success'])
574             report += ("" +
575                        "| " + name + " | " + duration + " | " +
576                        nb_tests + " | " + success + "|\n" +
577                        "+-------------------+------------"
578                        "+---------------+-----------+\n")
579             payload.append({'module': name,
580                             'details': {'duration': item['overall_duration'],
581                                         'nb tests': item['nb_tests'],
582                                         'success': item['success']}})
583
584         total_duration_str = time.strftime("%H:%M:%S",
585                                            time.gmtime(total_duration))
586         total_duration_str2 = "{0:<10}".format(total_duration_str)
587         total_nb_tests_str = "{0:<13}".format(total_nb_tests)
588
589         try:
590             self.result = total_success / len(self.summary)
591         except ZeroDivisionError:
592             self.result = 100
593
594         success_rate = "{:0.2f}".format(self.result)
595         success_rate_str = "{0:<10}".format(str(success_rate) + '%')
596         report += ("+===================+============"
597                    "+===============+===========+")
598         report += "\n"
599         report += ("| TOTAL:            | " + total_duration_str2 + " | " +
600                    total_nb_tests_str + " | " + success_rate_str + "|\n")
601         report += ("+===================+============"
602                    "+===============+===========+")
603         report += "\n"
604
605         LOGGER.info("\n" + report)
606         payload.append({'summary': {'duration': total_duration,
607                                     'nb tests': total_nb_tests,
608                                     'nb success': success_rate}})
609
610         self.details = payload
611
612         LOGGER.info("Rally '%s' success_rate is %s%%",
613                     self.case_name, success_rate)
614
615     def _clean_up(self):
616         for creator in reversed(self.creators):
617             try:
618                 creator.clean()
619             except Exception as e:
620                 LOGGER.error('Unexpected error cleaning - %s', e)
621
622     @energy.enable_recording
623     def run(self, **kwargs):
624         """Run testcase."""
625         self.start_time = time.time()
626         try:
627             conf_utils.create_rally_deployment()
628             self._prepare_env()
629             self._run_tests()
630             self._generate_report()
631             res = testcase.TestCase.EX_OK
632         except Exception as exc:   # pylint: disable=broad-except
633             LOGGER.error('Error with run: %s', exc)
634             res = testcase.TestCase.EX_RUN_ERROR
635         finally:
636             self._clean_up()
637
638         self.stop_time = time.time()
639         return res
640
641
642 class RallySanity(RallyBase):
643     """Rally sanity testcase implementation."""
644
645     def __init__(self, **kwargs):
646         """Initialize RallySanity object."""
647         if "case_name" not in kwargs:
648             kwargs["case_name"] = "rally_sanity"
649         super(RallySanity, self).__init__(**kwargs)
650         self.mode = 'sanity'
651         self.test_name = 'all'
652         self.smoke = True
653         self.scenario_dir = os.path.join(self.RALLY_SCENARIO_DIR, 'sanity')
654
655
656 class RallyFull(RallyBase):
657     """Rally full testcase implementation."""
658
659     def __init__(self, **kwargs):
660         """Initialize RallyFull object."""
661         if "case_name" not in kwargs:
662             kwargs["case_name"] = "rally_full"
663         super(RallyFull, self).__init__(**kwargs)
664         self.mode = 'full'
665         self.test_name = 'all'
666         self.smoke = False
667         self.scenario_dir = os.path.join(self.RALLY_SCENARIO_DIR, 'full')