Move rally and tempest out of functest-core
[functest.git] / functest / opnfv_tests / openstack / rally / rally.py
1 #!/usr/bin/env python
2 #
3 # Copyright (c) 2015 All rights reserved
4 # This program and the accompanying materials
5 # are made available under the terms of the Apache License, Version 2.0
6 # which accompanies this distribution, and is available at
7 #
8 # http://www.apache.org/licenses/LICENSE-2.0
9 #
10
11 """Rally testcases implementation."""
12
13 from __future__ import division
14
15 import json
16 import logging
17 import os
18 import re
19 import subprocess
20 import time
21 import uuid
22
23 import pkg_resources
24 import yaml
25
26 from functest.core import testcase
27 from functest.energy import energy
28 from functest.opnfv_tests.openstack.snaps import snaps_utils
29 from functest.opnfv_tests.openstack.tempest import conf_utils
30 from functest.utils.constants import CONST
31
32 from snaps.openstack.create_flavor import FlavorSettings, OpenStackFlavor
33 from snaps.openstack.create_image import ImageSettings
34 from snaps.openstack.create_network import NetworkSettings, SubnetSettings
35 from snaps.openstack.create_router import RouterSettings
36 from snaps.openstack.tests import openstack_tests
37 from snaps.openstack.utils import deploy_utils
38
39 LOGGER = logging.getLogger(__name__)
40
41
42 class RallyBase(testcase.TestCase):
43     """Base class form Rally testcases implementation."""
44
45     TESTS = ['authenticate', 'glance', 'ceilometer', 'cinder', 'heat',
46              'keystone', 'neutron', 'nova', 'quotas', 'vm', 'all']
47     GLANCE_IMAGE_NAME = CONST.__getattribute__('openstack_image_name')
48     GLANCE_IMAGE_FILENAME = CONST.__getattribute__('openstack_image_file_name')
49     GLANCE_IMAGE_PATH = os.path.join(
50         CONST.__getattribute__('dir_functest_images'),
51         GLANCE_IMAGE_FILENAME)
52     GLANCE_IMAGE_FORMAT = CONST.__getattribute__('openstack_image_disk_format')
53     GLANCE_IMAGE_USERNAME = CONST.__getattribute__('openstack_image_username')
54     GLANCE_IMAGE_EXTRA_PROPERTIES = {}
55     if hasattr(CONST, 'openstack_extra_properties'):
56         GLANCE_IMAGE_EXTRA_PROPERTIES = CONST.__getattribute__(
57             'openstack_extra_properties')
58     FLAVOR_NAME = CONST.__getattribute__('rally_flavor_name')
59     FLAVOR_ALT_NAME = CONST.__getattribute__('rally_flavor_alt_name')
60     FLAVOR_EXTRA_SPECS = None
61     if hasattr(CONST, 'flavor_extra_specs'):
62         FLAVOR_EXTRA_SPECS = CONST.__getattribute__('flavor_extra_specs')
63
64     RALLY_DIR = pkg_resources.resource_filename(
65         'functest', 'opnfv_tests/openstack/rally')
66     RALLY_SCENARIO_DIR = pkg_resources.resource_filename(
67         'functest', 'opnfv_tests/openstack/rally/scenario')
68     TEMPLATE_DIR = pkg_resources.resource_filename(
69         'functest', 'opnfv_tests/openstack/rally/scenario/templates')
70     SUPPORT_DIR = pkg_resources.resource_filename(
71         'functest', 'opnfv_tests/openstack/rally/scenario/support')
72     USERS_AMOUNT = 2
73     TENANTS_AMOUNT = 3
74     ITERATIONS_AMOUNT = 10
75     CONCURRENCY = 4
76     RESULTS_DIR = os.path.join(CONST.__getattribute__('dir_results'), 'rally')
77     BLACKLIST_FILE = os.path.join(RALLY_DIR, "blacklist.txt")
78     TEMP_DIR = os.path.join(RALLY_DIR, "var")
79
80     RALLY_PRIVATE_NET_NAME = CONST.__getattribute__('rally_network_name')
81     RALLY_PRIVATE_SUBNET_NAME = CONST.__getattribute__('rally_subnet_name')
82     RALLY_PRIVATE_SUBNET_CIDR = CONST.__getattribute__('rally_subnet_cidr')
83     RALLY_ROUTER_NAME = CONST.__getattribute__('rally_router_name')
84
85     def __init__(self, **kwargs):
86         """Initialize RallyBase object."""
87         super(RallyBase, self).__init__(**kwargs)
88         if 'os_creds' in kwargs:
89             self.os_creds = kwargs['os_creds']
90         else:
91             creds_override = None
92             if hasattr(CONST, 'snaps_os_creds_override'):
93                 creds_override = CONST.__getattribute__(
94                     'snaps_os_creds_override')
95
96             self.os_creds = openstack_tests.get_credentials(
97                 os_env_file=CONST.__getattribute__('openstack_creds'),
98                 overrides=creds_override)
99
100         self.guid = ''
101         if CONST.__getattribute__('rally_unique_names'):
102             self.guid = '-' + str(uuid.uuid4())
103
104         self.creators = []
105         self.mode = ''
106         self.summary = []
107         self.scenario_dir = ''
108         self.image_name = None
109         self.ext_net_name = None
110         self.priv_net_id = None
111         self.flavor_name = None
112         self.flavor_alt_name = None
113         self.smoke = None
114         self.test_name = None
115         self.start_time = None
116         self.result = None
117         self.details = None
118         self.compute_cnt = 0
119
120     def _build_task_args(self, test_file_name):
121         task_args = {'service_list': [test_file_name]}
122         task_args['image_name'] = self.image_name
123         task_args['flavor_name'] = self.flavor_name
124         task_args['flavor_alt_name'] = self.flavor_alt_name
125         task_args['glance_image_location'] = self.GLANCE_IMAGE_PATH
126         task_args['glance_image_format'] = self.GLANCE_IMAGE_FORMAT
127         task_args['tmpl_dir'] = self.TEMPLATE_DIR
128         task_args['sup_dir'] = self.SUPPORT_DIR
129         task_args['users_amount'] = self.USERS_AMOUNT
130         task_args['tenants_amount'] = self.TENANTS_AMOUNT
131         task_args['use_existing_users'] = False
132         task_args['iterations'] = self.ITERATIONS_AMOUNT
133         task_args['concurrency'] = self.CONCURRENCY
134         task_args['smoke'] = self.smoke
135
136         ext_net = self.ext_net_name
137         if ext_net:
138             task_args['floating_network'] = str(ext_net)
139         else:
140             task_args['floating_network'] = ''
141
142         net_id = self.priv_net_id
143         if net_id:
144             task_args['netid'] = str(net_id)
145         else:
146             task_args['netid'] = ''
147
148         return task_args
149
150     def _prepare_test_list(self, test_name):
151         test_yaml_file_name = 'opnfv-{}.yaml'.format(test_name)
152         scenario_file_name = os.path.join(self.RALLY_SCENARIO_DIR,
153                                           test_yaml_file_name)
154
155         if not os.path.exists(scenario_file_name):
156             scenario_file_name = os.path.join(self.scenario_dir,
157                                               test_yaml_file_name)
158
159             if not os.path.exists(scenario_file_name):
160                 raise Exception("The scenario '%s' does not exist."
161                                 % scenario_file_name)
162
163         LOGGER.debug('Scenario fetched from : %s', scenario_file_name)
164         test_file_name = os.path.join(self.TEMP_DIR, test_yaml_file_name)
165
166         if not os.path.exists(self.TEMP_DIR):
167             os.makedirs(self.TEMP_DIR)
168
169         self._apply_blacklist(scenario_file_name, test_file_name)
170         return test_file_name
171
172     @staticmethod
173     def get_task_id(cmd_raw):
174         """
175         Get task id from command rally result.
176
177         :param cmd_raw:
178         :return: task_id as string
179         """
180         taskid_re = re.compile('^Task +(.*): started$')
181         for line in cmd_raw.splitlines(True):
182             line = line.strip()
183             match = taskid_re.match(line)
184             if match:
185                 return match.group(1)
186         return None
187
188     @staticmethod
189     def task_succeed(json_raw):
190         """
191         Parse JSON from rally JSON results.
192
193         :param json_raw:
194         :return: Bool
195         """
196         rally_report = json.loads(json_raw)
197         for report in rally_report:
198             if report is None or report.get('result') is None:
199                 return False
200
201             for result in report.get('result'):
202                 if result is None or len(result.get('error')) > 0:
203                     return False
204
205         return True
206
207     def _migration_supported(self):
208         """Determine if migration is supported."""
209         if self.compute_cnt > 1:
210             return True
211
212         return False
213
214     @staticmethod
215     def get_cmd_output(proc):
216         """Get command stdout."""
217         result = ""
218         while proc.poll() is None:
219             line = proc.stdout.readline()
220             result += line
221         return result
222
223     @staticmethod
224     def excl_scenario():
225         """Exclude scenario."""
226         black_tests = []
227         try:
228             with open(RallyBase.BLACKLIST_FILE, 'r') as black_list_file:
229                 black_list_yaml = yaml.safe_load(black_list_file)
230
231             installer_type = CONST.__getattribute__('INSTALLER_TYPE')
232             deploy_scenario = CONST.__getattribute__('DEPLOY_SCENARIO')
233             if (bool(installer_type) and bool(deploy_scenario) and
234                     'scenario' in black_list_yaml.keys()):
235                 for item in black_list_yaml['scenario']:
236                     scenarios = item['scenarios']
237                     installers = item['installers']
238                     in_it = RallyBase.in_iterable_re
239                     if (in_it(deploy_scenario, scenarios) and
240                             in_it(installer_type, installers)):
241                         tests = item['tests']
242                         black_tests.extend(tests)
243         except Exception:
244             LOGGER.debug("Scenario exclusion not applied.")
245
246         return black_tests
247
248     @staticmethod
249     def in_iterable_re(needle, haystack):
250         """
251         Check if given needle is in the iterable haystack, using regex.
252
253         :param needle: string to be matched
254         :param haystack: iterable of strings (optionally regex patterns)
255         :return: True if needle is eqial to any of the elements in haystack,
256                  or if a nonempty regex pattern in haystack is found in needle.
257         """
258         # match without regex
259         if needle in haystack:
260             return True
261
262         for pattern in haystack:
263             # match if regex pattern is set and found in the needle
264             if pattern and re.search(pattern, needle) is not None:
265                 return True
266         else:
267             return False
268
269     def excl_func(self):
270         """Exclude functionalities."""
271         black_tests = []
272         func_list = []
273
274         try:
275             with open(RallyBase.BLACKLIST_FILE, 'r') as black_list_file:
276                 black_list_yaml = yaml.safe_load(black_list_file)
277
278             if not self._migration_supported():
279                 func_list.append("no_migration")
280
281             if 'functionality' in black_list_yaml.keys():
282                 for item in black_list_yaml['functionality']:
283                     functions = item['functions']
284                     for func in func_list:
285                         if func in functions:
286                             tests = item['tests']
287                             black_tests.extend(tests)
288         except Exception:  # pylint: disable=broad-except
289             LOGGER.debug("Functionality exclusion not applied.")
290
291         return black_tests
292
293     def _apply_blacklist(self, case_file_name, result_file_name):
294         """Apply blacklist."""
295         LOGGER.debug("Applying blacklist...")
296         cases_file = open(case_file_name, 'r')
297         result_file = open(result_file_name, 'w')
298
299         black_tests = list(set(self.excl_func() +
300                                self.excl_scenario()))
301
302         if black_tests:
303             LOGGER.debug("Blacklisted tests: " + str(black_tests))
304
305         include = True
306         for cases_line in cases_file:
307             if include:
308                 for black_tests_line in black_tests:
309                     if re.search(black_tests_line,
310                                  cases_line.strip().rstrip(':')):
311                         include = False
312                         break
313                 else:
314                     result_file.write(str(cases_line))
315             else:
316                 if cases_line.isspace():
317                     include = True
318
319         cases_file.close()
320         result_file.close()
321
322     @staticmethod
323     def file_is_empty(file_name):
324         """Determine is a file is empty."""
325         try:
326             if os.stat(file_name).st_size > 0:
327                 return False
328         except Exception:  # pylint: disable=broad-except
329             pass
330
331         return True
332
333     def _run_task(self, test_name):
334         """Run a task."""
335         LOGGER.info('Starting test scenario "%s" ...', test_name)
336
337         task_file = os.path.join(self.RALLY_DIR, 'task.yaml')
338         if not os.path.exists(task_file):
339             LOGGER.error("Task file '%s' does not exist.", task_file)
340             raise Exception("Task file '%s' does not exist.", task_file)
341
342         file_name = self._prepare_test_list(test_name)
343         if self.file_is_empty(file_name):
344             LOGGER.info('No tests for scenario "%s"', test_name)
345             return
346
347         cmd_line = ("rally task start --abort-on-sla-failure "
348                     "--task {0} "
349                     "--task-args \"{1}\""
350                     .format(task_file, self._build_task_args(test_name)))
351         LOGGER.debug('running command line: %s', cmd_line)
352
353         proc = subprocess.Popen(cmd_line, stdout=subprocess.PIPE,
354                                 stderr=subprocess.STDOUT, shell=True)
355         output = self._get_output(proc, test_name)
356         task_id = self.get_task_id(output)
357         LOGGER.debug('task_id : %s', task_id)
358
359         if task_id is None:
360             LOGGER.error('Failed to retrieve task_id, validating task...')
361             cmd_line = ("rally task validate "
362                         "--task {0} "
363                         "--task-args \"{1}\""
364                         .format(task_file, self._build_task_args(test_name)))
365             LOGGER.debug('running command line: %s', cmd_line)
366             proc = subprocess.Popen(cmd_line, stdout=subprocess.PIPE,
367                                     stderr=subprocess.STDOUT, shell=True)
368             output = self.get_cmd_output(proc)
369             LOGGER.error("Task validation result:" + "\n" + output)
370             return
371
372         # check for result directory and create it otherwise
373         if not os.path.exists(self.RESULTS_DIR):
374             LOGGER.debug('%s does not exist, we create it.',
375                          self.RESULTS_DIR)
376             os.makedirs(self.RESULTS_DIR)
377
378         # write html report file
379         report_html_name = 'opnfv-{}.html'.format(test_name)
380         report_html_dir = os.path.join(self.RESULTS_DIR, report_html_name)
381         cmd_line = "rally task report {} --out {}".format(task_id,
382                                                           report_html_dir)
383
384         LOGGER.debug('running command line: %s', cmd_line)
385         os.popen(cmd_line)
386
387         # get and save rally operation JSON result
388         cmd_line = "rally task results %s" % task_id
389         LOGGER.debug('running command line: %s', cmd_line)
390         cmd = os.popen(cmd_line)
391         json_results = cmd.read()
392         report_json_name = 'opnfv-{}.json'.format(test_name)
393         report_json_dir = os.path.join(self.RESULTS_DIR, report_json_name)
394         with open(report_json_dir, 'w') as r_file:
395             LOGGER.debug('saving json file')
396             r_file.write(json_results)
397
398         # parse JSON operation result
399         if self.task_succeed(json_results):
400             LOGGER.info('Test scenario: "{}" OK.'.format(test_name) + "\n")
401         else:
402             LOGGER.info('Test scenario: "{}" Failed.'.format(test_name) + "\n")
403
404     def _get_output(self, proc, test_name):
405         result = ""
406         nb_tests = 0
407         overall_duration = 0.0
408         success = 0.0
409         nb_totals = 0
410
411         while proc.poll() is None:
412             line = proc.stdout.readline()
413             if ("Load duration" in line or
414                     "started" in line or
415                     "finished" in line or
416                     " Preparing" in line or
417                     "+-" in line or
418                     "|" in line):
419                 result += line
420             elif "test scenario" in line:
421                 result += "\n" + line
422             elif "Full duration" in line:
423                 result += line + "\n\n"
424
425             # parse output for summary report
426             if ("| " in line and
427                     "| action" not in line and
428                     "| Starting" not in line and
429                     "| Completed" not in line and
430                     "| ITER" not in line and
431                     "|   " not in line and
432                     "| total" not in line):
433                 nb_tests += 1
434             elif "| total" in line:
435                 percentage = ((line.split('|')[8]).strip(' ')).strip('%')
436                 try:
437                     success += float(percentage)
438                 except ValueError:
439                     LOGGER.info('Percentage error: %s, %s',
440                                 percentage, line)
441                 nb_totals += 1
442             elif "Full duration" in line:
443                 duration = line.split(': ')[1]
444                 try:
445                     overall_duration += float(duration)
446                 except ValueError:
447                     LOGGER.info('Duration error: %s, %s', duration, line)
448
449         overall_duration = "{:10.2f}".format(overall_duration)
450         if nb_totals == 0:
451             success_avg = 0
452         else:
453             success_avg = "{:0.2f}".format(success / nb_totals)
454
455         scenario_summary = {'test_name': test_name,
456                             'overall_duration': overall_duration,
457                             'nb_tests': nb_tests,
458                             'success': success_avg}
459         self.summary.append(scenario_summary)
460
461         LOGGER.debug("\n" + result)
462
463         return result
464
465     def _prepare_env(self):
466         LOGGER.debug('Validating the test name...')
467         if self.test_name not in self.TESTS:
468             raise Exception("Test name '%s' is invalid" % self.test_name)
469
470         network_name = self.RALLY_PRIVATE_NET_NAME + self.guid
471         subnet_name = self.RALLY_PRIVATE_SUBNET_NAME + self.guid
472         router_name = self.RALLY_ROUTER_NAME + self.guid
473         self.image_name = self.GLANCE_IMAGE_NAME + self.guid
474         self.flavor_name = self.FLAVOR_NAME + self.guid
475         self.flavor_alt_name = self.FLAVOR_ALT_NAME + self.guid
476         self.ext_net_name = snaps_utils.get_ext_net_name(self.os_creds)
477         self.compute_cnt = snaps_utils.get_active_compute_cnt(self.os_creds)
478
479         LOGGER.debug("Creating image '%s'...", self.image_name)
480         image_creator = deploy_utils.create_image(
481             self.os_creds, ImageSettings(
482                 name=self.image_name,
483                 image_file=self.GLANCE_IMAGE_PATH,
484                 img_format=self.GLANCE_IMAGE_FORMAT,
485                 image_user=self.GLANCE_IMAGE_USERNAME,
486                 public=True,
487                 extra_properties=self.GLANCE_IMAGE_EXTRA_PROPERTIES))
488         if image_creator is None:
489             raise Exception("Failed to create image")
490         self.creators.append(image_creator)
491
492         LOGGER.debug("Creating network '%s'...", network_name)
493         network_creator = deploy_utils.create_network(
494             self.os_creds, NetworkSettings(
495                 name=network_name,
496                 shared=True,
497                 subnet_settings=[SubnetSettings(
498                     name=subnet_name,
499                     cidr=self.RALLY_PRIVATE_SUBNET_CIDR)
500                 ]))
501         if network_creator is None:
502             raise Exception("Failed to create private network")
503         self.priv_net_id = network_creator.get_network().id
504         self.creators.append(network_creator)
505
506         LOGGER.debug("Creating router '%s'...", router_name)
507         router_creator = deploy_utils.create_router(
508             self.os_creds, RouterSettings(
509                 name=router_name,
510                 external_gateway=self.ext_net_name,
511                 internal_subnets=[subnet_name]))
512         if router_creator is None:
513             raise Exception("Failed to create router")
514         self.creators.append(router_creator)
515
516         LOGGER.debug("Creating flavor '%s'...", self.flavor_name)
517         flavor_creator = OpenStackFlavor(
518             self.os_creds, FlavorSettings(
519                 name=self.flavor_name, ram=512, disk=1, vcpus=1,
520                 metadata=self.FLAVOR_EXTRA_SPECS))
521         if flavor_creator is None or flavor_creator.create() is None:
522             raise Exception("Failed to create flavor")
523         self.creators.append(flavor_creator)
524
525         LOGGER.debug("Creating flavor '%s'...", self.flavor_alt_name)
526         flavor_alt_creator = OpenStackFlavor(
527             self.os_creds, FlavorSettings(
528                 name=self.flavor_alt_name, ram=1024, disk=1, vcpus=1,
529                 metadata=self.FLAVOR_EXTRA_SPECS))
530         if flavor_alt_creator is None or flavor_alt_creator.create() is None:
531             raise Exception("Failed to create flavor")
532         self.creators.append(flavor_alt_creator)
533
534     def _run_tests(self):
535         if self.test_name == 'all':
536             for test in self.TESTS:
537                 if test == 'all' or test == 'vm':
538                     continue
539                 self._run_task(test)
540         else:
541             self._run_task(self.test_name)
542
543     def _generate_report(self):
544         report = (
545             "\n"
546             "                                                              "
547             "\n"
548             "                     Rally Summary Report\n"
549             "\n"
550             "+===================+============+===============+===========+"
551             "\n"
552             "| Module            | Duration   | nb. Test Run  | Success   |"
553             "\n"
554             "+===================+============+===============+===========+"
555             "\n")
556         payload = []
557
558         # for each scenario we draw a row for the table
559         total_duration = 0.0
560         total_nb_tests = 0
561         total_success = 0.0
562         for item in self.summary:
563             name = "{0:<17}".format(item['test_name'])
564             duration = float(item['overall_duration'])
565             total_duration += duration
566             duration = time.strftime("%M:%S", time.gmtime(duration))
567             duration = "{0:<10}".format(duration)
568             nb_tests = "{0:<13}".format(item['nb_tests'])
569             total_nb_tests += int(item['nb_tests'])
570             success = "{0:<10}".format(str(item['success']) + '%')
571             total_success += float(item['success'])
572             report += ("" +
573                        "| " + name + " | " + duration + " | " +
574                        nb_tests + " | " + success + "|\n" +
575                        "+-------------------+------------"
576                        "+---------------+-----------+\n")
577             payload.append({'module': name,
578                             'details': {'duration': item['overall_duration'],
579                                         'nb tests': item['nb_tests'],
580                                         'success': item['success']}})
581
582         total_duration_str = time.strftime("%H:%M:%S",
583                                            time.gmtime(total_duration))
584         total_duration_str2 = "{0:<10}".format(total_duration_str)
585         total_nb_tests_str = "{0:<13}".format(total_nb_tests)
586
587         try:
588             self.result = total_success / len(self.summary)
589         except ZeroDivisionError:
590             self.result = 100
591
592         success_rate = "{:0.2f}".format(self.result)
593         success_rate_str = "{0:<10}".format(str(success_rate) + '%')
594         report += ("+===================+============"
595                    "+===============+===========+")
596         report += "\n"
597         report += ("| TOTAL:            | " + total_duration_str2 + " | " +
598                    total_nb_tests_str + " | " + success_rate_str + "|\n")
599         report += ("+===================+============"
600                    "+===============+===========+")
601         report += "\n"
602
603         LOGGER.info("\n" + report)
604         payload.append({'summary': {'duration': total_duration,
605                                     'nb tests': total_nb_tests,
606                                     'nb success': success_rate}})
607
608         self.details = payload
609
610         LOGGER.info("Rally '%s' success_rate is %s%%",
611                     self.case_name, success_rate)
612
613     def _clean_up(self):
614         for creator in reversed(self.creators):
615             try:
616                 creator.clean()
617             except Exception as e:
618                 LOGGER.error('Unexpected error cleaning - %s', e)
619
620     @energy.enable_recording
621     def run(self, **kwargs):
622         """Run testcase."""
623         self.start_time = time.time()
624         try:
625             conf_utils.create_rally_deployment()
626             self._prepare_env()
627             self._run_tests()
628             self._generate_report()
629             res = testcase.TestCase.EX_OK
630         except Exception as exc:   # pylint: disable=broad-except
631             LOGGER.error('Error with run: %s', exc)
632             res = testcase.TestCase.EX_RUN_ERROR
633         finally:
634             self._clean_up()
635
636         self.stop_time = time.time()
637         return res
638
639
640 class RallySanity(RallyBase):
641     """Rally sanity testcase implementation."""
642
643     def __init__(self, **kwargs):
644         """Initialize RallySanity object."""
645         if "case_name" not in kwargs:
646             kwargs["case_name"] = "rally_sanity"
647         super(RallySanity, self).__init__(**kwargs)
648         self.mode = 'sanity'
649         self.test_name = 'all'
650         self.smoke = True
651         self.scenario_dir = os.path.join(self.RALLY_SCENARIO_DIR, 'sanity')
652
653
654 class RallyFull(RallyBase):
655     """Rally full testcase implementation."""
656
657     def __init__(self, **kwargs):
658         """Initialize RallyFull object."""
659         if "case_name" not in kwargs:
660             kwargs["case_name"] = "rally_full"
661         super(RallyFull, self).__init__(**kwargs)
662         self.mode = 'full'
663         self.test_name = 'all'
664         self.smoke = False
665         self.scenario_dir = os.path.join(self.RALLY_SCENARIO_DIR, 'full')