Fixes permissions issue for Nova with NFS
[apex.git] / apex / overcloud / deploy.py
index ef916a4..f40c8bd 100644 (file)
@@ -11,15 +11,20 @@ import base64
 import fileinput
 import logging
 import os
-import re
+import platform
+import pprint
 import shutil
 import uuid
 import struct
 import time
+import yaml
+import apex.builders.overcloud_builder as oc_builder
+import apex.builders.common_builder as c_builder
 
 from apex.common import constants as con
 from apex.common.exceptions import ApexDeployException
 from apex.common import parsers
+from apex.common import utils
 from apex.virtual import utils as virt_utils
 from cryptography.hazmat.primitives import serialization as \
     crypto_serialization
@@ -38,6 +43,8 @@ SDN_FILE_MAP = {
             'dvr': 'neutron-opendaylight-fdio-dvr.yaml',
             'default': 'neutron-opendaylight-honeycomb.yaml'
         },
+        'l2gw': 'neutron-l2gw-opendaylight.yaml',
+        'sriov': 'neutron-opendaylight-sriov.yaml',
         'default': 'neutron-opendaylight.yaml',
     },
     'onos': {
@@ -65,24 +72,69 @@ OVS_PERF_MAP = {
     'NeutronDpdkMemoryChannels': 'memory_channels'
 }
 
-OVS_NSH_KMOD_RPM = "openvswitch-kmod-2.6.1-1.el7.centos.x86_64.rpm"
-OVS_NSH_RPM = "openvswitch-2.6.1-1.el7.centos.x86_64.rpm"
 ODL_NETVIRT_VPP_RPM = "/root/opendaylight-7.0.0-0.1.20170531snap665.el7" \
                       ".noarch.rpm"
 
+LOOP_DEVICE_SIZE = "10G"
+
+LOSETUP_SERVICE = """[Unit]
+Description=Setup loop devices
+Before=network.target
+
+[Service]
+Type=oneshot
+ExecStart=/sbin/losetup /dev/loop3 /srv/data.img
+ExecStop=/sbin/losetup -d /dev/loop3
+TimeoutSec=60
+RemainAfterExit=yes
+
+[Install]
+WantedBy=multi-user.target
+"""
+
+DUPLICATE_COMPUTE_SERVICES = [
+    'OS::TripleO::Services::ComputeNeutronCorePlugin',
+    'OS::TripleO::Services::ComputeNeutronMetadataAgent',
+    'OS::TripleO::Services::ComputeNeutronOvsAgent',
+    'OS::TripleO::Services::ComputeNeutronL3Agent'
+]
+
 
 def build_sdn_env_list(ds, sdn_map, env_list=None):
+    """
+    Builds a list of SDN environment files to be used in the deploy cmd.
+
+    This function recursively searches an sdn_map.  First the sdn controller is
+    matched and then the function looks for enabled features for that
+    controller to determine which environment files should be used.  By
+    default the feature will be checked if set to true in deploy settings to be
+    added to the list.  If a feature does not have a boolean value, then the
+    key and value pair to compare with are checked as a tuple (k,v).
+
+    :param ds: deploy settings
+    :param sdn_map: SDN map to recursively search
+    :param env_list: recursive var to hold previously found env_list
+    :return: A list of env files
+    """
     if env_list is None:
         env_list = list()
     for k, v in sdn_map.items():
-        if ds['sdn_controller'] == k or (k in ds and ds[k] is True):
+        if ds['sdn_controller'] == k or (k in ds and ds[k]):
             if isinstance(v, dict):
+                # Append default SDN env file first
+                # The assumption is that feature-enabled SDN env files
+                # override and do not conflict with previously set default
+                # settings
+                if ds['sdn_controller'] == k and 'default' in v:
+                    env_list.append(os.path.join(con.THT_ENV_DIR,
+                                                 v['default']))
                 env_list.extend(build_sdn_env_list(ds, v))
+            # check if the value is not a boolean
+            elif isinstance(v, tuple):
+                    if ds[k] == v[0]:
+                        env_list.append(os.path.join(con.THT_ENV_DIR, v[1]))
             else:
                 env_list.append(os.path.join(con.THT_ENV_DIR, v))
-        elif isinstance(v, tuple):
-                if ds[k] == v[0]:
-                    env_list.append(os.path.join(con.THT_ENV_DIR, v[1]))
     if len(env_list) == 0:
         try:
             env_list.append(os.path.join(
@@ -93,25 +145,79 @@ def build_sdn_env_list(ds, sdn_map, env_list=None):
     return env_list
 
 
+def get_docker_sdn_files(ds_opts):
+    """
+    Returns docker env file for detected SDN
+    :param ds_opts: deploy options
+    :return: list of docker THT env files for an SDN
+    """
+    docker_services = con.VALID_DOCKER_SERVICES
+    tht_dir = con.THT_DOCKER_ENV_DIR
+    sdn_env_list = build_sdn_env_list(ds_opts, SDN_FILE_MAP)
+    for i, sdn_file in enumerate(sdn_env_list):
+        sdn_base = os.path.basename(sdn_file)
+        if sdn_base in docker_services:
+            if docker_services[sdn_base] is not None:
+                sdn_env_list[i] = \
+                    os.path.join(tht_dir, docker_services[sdn_base])
+            else:
+                sdn_env_list[i] = os.path.join(tht_dir, sdn_base)
+    return sdn_env_list
+
+
 def create_deploy_cmd(ds, ns, inv, tmp_dir,
-                      virtual, env_file='opnfv-environment.yaml'):
+                      virtual, env_file='opnfv-environment.yaml',
+                      net_data=False):
 
     logging.info("Creating deployment command")
-    deploy_options = [env_file, 'network-environment.yaml']
+    deploy_options = ['network-environment.yaml']
+
     ds_opts = ds['deploy_options']
-    deploy_options += build_sdn_env_list(ds_opts, SDN_FILE_MAP)
+
+    if ds_opts['containers']:
+        deploy_options.append(os.path.join(con.THT_ENV_DIR,
+                                           'docker.yaml'))
+
+    if ds['global_params']['ha_enabled']:
+        if ds_opts['containers']:
+            deploy_options.append(os.path.join(con.THT_ENV_DIR,
+                                               'docker-ha.yaml'))
+        else:
+            deploy_options.append(os.path.join(con.THT_ENV_DIR,
+                                               'puppet-pacemaker.yaml'))
+
+    if env_file:
+        deploy_options.append(env_file)
+
+    if ds_opts['containers']:
+        deploy_options.append('docker-images.yaml')
+        sdn_docker_files = get_docker_sdn_files(ds_opts)
+        for sdn_docker_file in sdn_docker_files:
+            deploy_options.append(sdn_docker_file)
+    else:
+        deploy_options += build_sdn_env_list(ds_opts, SDN_FILE_MAP)
 
     for k, v in OTHER_FILE_MAP.items():
         if k in ds_opts and ds_opts[k]:
-            deploy_options.append(os.path.join(con.THT_ENV_DIR, v))
+            if ds_opts['containers']:
+                deploy_options.append(os.path.join(con.THT_DOCKER_ENV_DIR,
+                                                   "{}.yaml".format(k)))
+            else:
+                deploy_options.append(os.path.join(con.THT_ENV_DIR, v))
 
-    if ds_opts['ceph']:
-        prep_storage_env(ds, tmp_dir)
+    # TODO(trozet) Fix this check to look for if ceph is in controller services
+    # and not use name of the file
+    if ds_opts['ceph'] and 'csit' not in env_file:
+        prep_storage_env(ds, ns, virtual, tmp_dir)
         deploy_options.append(os.path.join(con.THT_ENV_DIR,
                                            'storage-environment.yaml'))
-    if ds['global_params']['ha_enabled']:
-        deploy_options.append(os.path.join(con.THT_ENV_DIR,
-                                           'puppet-pacemaker.yaml'))
+    if ds_opts['sriov']:
+        prep_sriov_env(ds, tmp_dir)
+
+    # Check for 'k8s' here intentionally, as we may support other values
+    # such as openstack/openshift for 'vim' option.
+    if ds_opts['vim'] == 'k8s':
+        deploy_options.append('kubernetes-environment.yaml')
 
     if virtual:
         deploy_options.append('virtual-environment.yaml')
@@ -119,14 +225,18 @@ def create_deploy_cmd(ds, ns, inv, tmp_dir,
         deploy_options.append('baremetal-environment.yaml')
 
     num_control, num_compute = inv.get_node_counts()
-    if num_control == 0 or num_compute == 0:
-        logging.error("Detected 0 control or compute nodes.  Control nodes: "
-                      "{}, compute nodes{}".format(num_control, num_compute))
-        raise ApexDeployException("Invalid number of control or computes")
-    elif num_control > 1 and not ds['global_params']['ha_enabled']:
+    if num_control > 1 and not ds['global_params']['ha_enabled']:
         num_control = 1
+    if platform.machine() == 'aarch64':
+        # aarch64 deploys were not completing in the default 90 mins.
+        # Not sure if this is related to the hardware the OOO support
+        # was developed on or the virtualization support in CentOS
+        # Either way it will probably get better over time  as the aarch
+        # support matures in CentOS and deploy time should be tested in
+        # the future so this multiplier can be removed.
+        con.DEPLOY_TIMEOUT *= 2
     cmd = "openstack overcloud deploy --templates --timeout {} " \
-          "--libvirt-type kvm".format(con.DEPLOY_TIMEOUT)
+          .format(con.DEPLOY_TIMEOUT)
     # build cmd env args
     for option in deploy_options:
         cmd += " -e {}".format(option)
@@ -134,6 +244,19 @@ def create_deploy_cmd(ds, ns, inv, tmp_dir,
     cmd += " --control-scale {}".format(num_control)
     cmd += " --compute-scale {}".format(num_compute)
     cmd += ' --control-flavor control --compute-flavor compute'
+    if net_data:
+        cmd += ' --networks-file network_data.yaml'
+    libvirt_type = 'kvm'
+    if virtual and (platform.machine() != 'aarch64'):
+        with open('/sys/module/kvm_intel/parameters/nested') as f:
+            nested_kvm = f.read().strip()
+            if nested_kvm != 'Y':
+                libvirt_type = 'qemu'
+    elif virtual and (platform.machine() == 'aarch64'):
+        libvirt_type = 'qemu'
+    cmd += ' --libvirt-type {}'.format(libvirt_type)
+    if platform.machine() == 'aarch64':
+        cmd += ' --override-ansible-cfg /home/stack/ansible.cfg '
     logging.info("Deploy command set: {}".format(cmd))
 
     with open(os.path.join(tmp_dir, 'deploy_command'), 'w') as fh:
@@ -141,13 +264,17 @@ def create_deploy_cmd(ds, ns, inv, tmp_dir,
     return cmd
 
 
-def prep_image(ds, img, tmp_dir, root_pw=None):
+def prep_image(ds, ns, img, tmp_dir, root_pw=None, docker_tag=None,
+               patches=None):
     """
     Locates sdn image and preps for deployment.
     :param ds: deploy settings
+    :param ns: network settings
     :param img: sdn image
     :param tmp_dir: dir to store modified sdn image
     :param root_pw: password to configure for overcloud image
+    :param docker_tag: Docker image tag for RDO version (default None)
+    :param patches: List of patches to apply to overcloud image
     :return: None
     """
     # TODO(trozet): Come up with a better way to organize this logic in this
@@ -160,6 +287,7 @@ def prep_image(ds, img, tmp_dir, root_pw=None):
     ds_opts = ds['deploy_options']
     virt_cmds = list()
     sdn = ds_opts['sdn_controller']
+    patched_containers = set()
     # we need this due to rhbz #1436021
     # fixed in systemd-219-37.el7
     if sdn is not False:
@@ -174,9 +302,40 @@ def prep_image(ds, img, tmp_dir, root_pw=None):
                 ".service"
         }])
 
+    if ns.get('http_proxy', ''):
+        virt_cmds.append({
+            con.VIRT_RUN_CMD:
+                "echo 'http_proxy={}' >> /etc/environment".format(
+                    ns['http_proxy'])})
+
+    if ns.get('https_proxy', ''):
+        virt_cmds.append({
+            con.VIRT_RUN_CMD:
+                "echo 'https_proxy={}' >> /etc/environment".format(
+                    ns['https_proxy'])})
+
+    tmp_oc_image = os.path.join(tmp_dir, 'overcloud-full.qcow2')
+    shutil.copyfile(img, tmp_oc_image)
+    logging.debug("Temporary overcloud image stored as: {}".format(
+        tmp_oc_image))
+
     if ds_opts['vpn']:
-        virt_cmds.append({con.VIRT_RUN_CMD: "systemctl enable zrpcd"})
-        logging.info("ZRPC and Quagga enabled")
+        oc_builder.inject_quagga(tmp_oc_image, tmp_dir)
+        virt_cmds.append({con.VIRT_RUN_CMD: "chmod +x /etc/rc.d/rc.local"})
+        virt_cmds.append({
+            con.VIRT_RUN_CMD:
+                "echo 'sudo /opt/quagga/etc/init.d/zrpcd start' > "
+                "/opt/quagga/etc/init.d/zrpcd_start.sh"})
+        virt_cmds.append({
+            con.VIRT_RUN_CMD: "chmod +x /opt/quagga/etc/init.d/"
+                              "zrpcd_start.sh"})
+        virt_cmds.append({
+            con.VIRT_RUN_CMD: "sed -i '$a /opt/quagga/etc/"
+                              "init.d/zrpcd_start.sh' /etc/rc.local "})
+        virt_cmds.append({
+            con.VIRT_RUN_CMD: "sed -i '$a /opt/quagga/etc/"
+                              "init.d/zrpcd_start.sh' /etc/rc.d/rc.local"})
+        logging.info("ZRPCD process started")
 
     dataplane = ds_opts['dataplane']
     if dataplane == 'ovs_dpdk' or dataplane == 'fdio':
@@ -201,15 +360,24 @@ def prep_image(ds, img, tmp_dir, root_pw=None):
     if root_pw:
         pw_op = "password:{}".format(root_pw)
         virt_cmds.append({con.VIRT_PW: pw_op})
-    if ds_opts['sfc'] and dataplane == 'ovs':
-        virt_cmds.extend([
-            {con.VIRT_RUN_CMD: "yum -y install "
-                               "/root/ovs/rpm/rpmbuild/RPMS/x86_64/"
-                               "{}".format(OVS_NSH_KMOD_RPM)},
-            {con.VIRT_RUN_CMD: "yum downgrade -y "
-                               "/root/ovs/rpm/rpmbuild/RPMS/x86_64/"
-                               "{}".format(OVS_NSH_RPM)}
-        ])
+
+    if dataplane == 'ovs':
+        if ds_opts['sfc']:
+            oc_builder.inject_ovs_nsh(tmp_oc_image, tmp_dir)
+        elif sdn == 'opendaylight':
+            # FIXME(trozet) remove this after RDO is updated with fix for
+            # https://bugzilla.redhat.com/show_bug.cgi?id=1544892
+            ovs_file = os.path.basename(con.CUSTOM_OVS)
+            ovs_url = con.CUSTOM_OVS.replace(ovs_file, '')
+            utils.fetch_upstream_and_unpack(dest=tmp_dir, url=ovs_url,
+                                            targets=[ovs_file])
+            virt_cmds.extend([
+                {con.VIRT_UPLOAD: "{}:/root/".format(os.path.join(tmp_dir,
+                                                                  ovs_file))},
+                {con.VIRT_RUN_CMD: "yum downgrade -y /root/{}".format(
+                    ovs_file)}
+            ])
+
     if dataplane == 'fdio':
         # Patch neutron with using OVS external interface for router
         # and add generic linux NS interface driver
@@ -223,48 +391,74 @@ def prep_image(ds, img, tmp_dir, root_pw=None):
                                    "/root/nosdn_vpp_rpms/*.rpm"}
             ])
 
+    undercloud_admin_ip = ns['networks'][con.ADMIN_NETWORK][
+        'installer_vm']['ip']
     if sdn == 'opendaylight':
-        if ds_opts['odl_version'] != con.DEFAULT_ODL_VERSION:
-            virt_cmds.extend([
-                {con.VIRT_RUN_CMD: "yum -y remove opendaylight"},
-                {con.VIRT_RUN_CMD: "rm -rf /etc/puppet/modules/opendaylight"},
-                {con.VIRT_RUN_CMD: "cd /etc/puppet/modules && tar xzf "
-                                   "/root/puppet-opendaylight-"
-                                   "{}.tar.gz".format(ds_opts['odl_version'])}
-            ])
-            if ds_opts['odl_version'] == 'master':
-                virt_cmds.extend([
-                    {con.VIRT_RUN_CMD: "rpm -ivh --nodeps /root/{}/*".format(
-                        ds_opts['odl_version'])}
-                ])
-            else:
-                virt_cmds.extend([
-                    {con.VIRT_RUN_CMD: "yum -y install /root/{}/*".format(
-                        ds_opts['odl_version'])}
-                ])
-
-        elif sdn == 'opendaylight' and 'odl_vpp_netvirt' in ds_opts \
-                and ds_opts['odl_vpp_netvirt']:
-            virt_cmds.extend([
-                {con.VIRT_RUN_CMD: "yum -y remove opendaylight"},
-                {con.VIRT_RUN_CMD: "yum -y install /root/{}/*".format(
-                    ODL_NETVIRT_VPP_RPM)}
-            ])
-
-    if sdn == 'ovn':
+        oc_builder.inject_opendaylight(
+            odl_version=ds_opts['odl_version'],
+            image=tmp_oc_image,
+            tmp_dir=tmp_dir,
+            uc_ip=undercloud_admin_ip,
+            os_version=ds_opts['os_version'],
+            docker_tag=docker_tag,
+        )
+        if docker_tag:
+            patched_containers = patched_containers.union({'opendaylight'})
+
+    if patches:
+        if ds_opts['os_version'] == 'master':
+            branch = ds_opts['os_version']
+        else:
+            branch = "stable/{}".format(ds_opts['os_version'])
+        logging.info('Adding patches to overcloud')
+        patched_containers = patched_containers.union(
+            c_builder.add_upstream_patches(patches,
+                                           tmp_oc_image, tmp_dir,
+                                           branch,
+                                           uc_ip=undercloud_admin_ip,
+                                           docker_tag=docker_tag))
+    # if containers with ceph, and no ceph device we need to use a
+    # persistent loop device for Ceph OSDs
+    if docker_tag and ds_opts['ceph_device'] == '/dev/loop3':
+        tmp_losetup = os.path.join(tmp_dir, 'losetup.service')
+        with open(tmp_losetup, 'w') as fh:
+            fh.write(LOSETUP_SERVICE)
         virt_cmds.extend([
-            {con.VIRT_RUN_CMD: "cd /root/ovs28 && yum update -y "
-                               "*openvswitch*"},
-            {con.VIRT_RUN_CMD: "cd /root/ovs28 && yum downgrade -y "
-                               "*openvswitch*"}
+            {con.VIRT_UPLOAD: "{}:/usr/lib/systemd/system/".format(tmp_losetup)
+             },
+            {con.VIRT_RUN_CMD: 'truncate /srv/data.img --size {}'
+                .format(LOOP_DEVICE_SIZE)},
+            {con.VIRT_RUN_CMD: 'systemctl daemon-reload'},
+            {con.VIRT_RUN_CMD: 'systemctl enable losetup.service'},
         ])
-
-    tmp_oc_image = os.path.join(tmp_dir, 'overcloud-full.qcow2')
-    shutil.copyfile(img, tmp_oc_image)
-    logging.debug("Temporary overcloud image stored as: {}".format(
-        tmp_oc_image))
+    # TODO(trozet) remove this after LP#173474 is fixed
+    dhcp_unit = '/usr/lib/systemd/system/dhcp-interface@.service'
+    virt_cmds.append(
+        {con.VIRT_RUN_CMD: "crudini --del {} Unit "
+                           "ConditionPathExists".format(dhcp_unit)})
+    # Prep for NFS
+    virt_cmds.extend([
+        {con.VIRT_INSTALL: "nfs-utils"},
+        {con.VIRT_RUN_CMD: "ln -s /usr/lib/systemd/system/nfs-server.service "
+                           "/etc/systemd/system/multi-user.target.wants/"
+                           "nfs-server.service"},
+        {con.VIRT_RUN_CMD: "mkdir -p /root/nfs/glance"},
+        {con.VIRT_RUN_CMD: "mkdir -p /root/nfs/cinder"},
+        {con.VIRT_RUN_CMD: "mkdir -p /root/nfs/nova"},
+        {con.VIRT_RUN_CMD: "chmod 777 /root/nfs/glance"},
+        {con.VIRT_RUN_CMD: "chmod 777 /root/nfs/cinder"},
+        {con.VIRT_RUN_CMD: "chmod 777 /root/nfs/nova"},
+        {con.VIRT_RUN_CMD: "echo '/root/nfs/glance *(rw,sync,"
+                           "no_root_squash,no_acl)' > /etc/exports"},
+        {con.VIRT_RUN_CMD: "echo '/root/nfs/cinder *(rw,sync,"
+                           "no_root_squash,no_acl)' >> /etc/exports"},
+        {con.VIRT_RUN_CMD: "echo '/root/nfs/nova *(rw,sync,"
+                           "no_root_squash,no_acl)' >> /etc/exports"},
+        {con.VIRT_RUN_CMD: "exportfs -avr"},
+    ])
     virt_utils.virt_customize(virt_cmds, tmp_oc_image)
     logging.info("Overcloud image customization complete")
+    return patched_containers
 
 
 def make_ssh_key():
@@ -306,12 +500,20 @@ def prep_env(ds, ns, inv, opnfv_env, net_env, tmp_dir):
     tmp_opnfv_env = os.path.join(tmp_dir, os.path.basename(opnfv_env))
     shutil.copyfile(opnfv_env, tmp_opnfv_env)
     tenant_nic_map = ns['networks']['tenant']['nic_mapping']
-    tenant_ctrl_nic = tenant_nic_map['controller']['members'][0]
-    tenant_comp_nic = tenant_nic_map['compute']['members'][0]
+    tenant_nic = dict()
+    tenant_nic['Controller'] = tenant_nic_map['controller']['members'][0]
+    tenant_nic['NovaCompute'] = tenant_nic_map['compute']['members'][0]
+    external_nic_map = ns['networks']['external'][0]['nic_mapping']
+    external_nic = dict()
+    external_nic['NovaCompute'] = external_nic_map['compute']['members'][0]
 
     # SSH keys
     private_key, public_key = make_ssh_key()
 
+    num_control, num_compute = inv.get_node_counts()
+    if num_control > 1 and not ds['global_params']['ha_enabled']:
+        num_control = 1
+
     # Make easier/faster variables to index in the file editor
     if 'performance' in ds_opts:
         perf = True
@@ -339,8 +541,12 @@ def prep_env(ds, ns, inv, opnfv_env, net_env, tmp_dir):
     else:
         perf = False
 
+    tenant_settings = ns['networks']['tenant']
+    tenant_vlan_enabled = 'tenant' in ns.enabled_network_list and \
+        ns['networks']['tenant'].get('segmentation_type') == 'vlan'
+
     # Modify OPNFV environment
-    # TODO: Change to build a dict and outputing yaml rather than parsing
+    # TODO: Change to build a dict and outputting yaml rather than parsing
     for line in fileinput.input(tmp_opnfv_env, inplace=True):
         line = line.strip('\n')
         output_line = line
@@ -354,6 +560,54 @@ def prep_env(ds, ns, inv, opnfv_env, net_env, tmp_dir):
             output_line += key_out
         elif 'replace_public_key' in line:
             output_line = "    public_key: '{}'".format(public_key)
+        elif ((perf and perf_kern_comp) or ds_opts.get('rt_kvm')) and \
+                'resource_registry' in line:
+            output_line = "resource_registry:\n" \
+                          "  OS::TripleO::NodeUserData: first-boot.yaml"
+        elif 'ComputeExtraConfigPre' in line and \
+                ds_opts['dataplane'] == 'ovs_dpdk':
+            output_line = '  OS::TripleO::ComputeExtraConfigPre: ' \
+                          './ovs-dpdk-preconfig.yaml'
+        elif 'NeutronNetworkVLANRanges' in line:
+            vlan_setting = ''
+            if tenant_vlan_enabled:
+                if ns['networks']['tenant']['overlay_id_range']:
+                    vlan_setting = ns['networks']['tenant']['overlay_id_range']
+                    if 'datacentre' not in vlan_setting:
+                        vlan_setting += ',datacentre:1:1000'
+            # SRIOV networks are VLAN based provider networks. In order to
+            # simplify the deployment, nfv_sriov will be the default physnet.
+            # VLANs are not needed in advance, and the user will have to create
+            # the network specifying the segmentation-id.
+            if ds_opts['sriov']:
+                if vlan_setting:
+                    vlan_setting += ",nfv_sriov"
+                else:
+                    vlan_setting = "datacentre:1:1000,nfv_sriov"
+            if vlan_setting:
+                output_line = "  NeutronNetworkVLANRanges: " + vlan_setting
+        elif 'NeutronBridgeMappings' in line and tenant_vlan_enabled:
+            if tenant_settings['overlay_id_range']:
+                physnets = tenant_settings['overlay_id_range'].split(',')
+                output_line = "  NeutronBridgeMappings: "
+                for physnet in physnets:
+                    physnet_name = physnet.split(':')[0]
+                    if physnet_name != 'datacentre':
+                        output_line += "{}:br-vlan,".format(physnet_name)
+                output_line += "datacentre:br-ex"
+        elif 'OpenDaylightProviderMappings' in line and tenant_vlan_enabled \
+                and ds_opts['sdn_controller'] == 'opendaylight':
+            if tenant_settings['overlay_id_range']:
+                physnets = tenant_settings['overlay_id_range'].split(',')
+                output_line = "  OpenDaylightProviderMappings: "
+                for physnet in physnets:
+                    physnet_name = physnet.split(':')[0]
+                    if physnet_name != 'datacentre':
+                        output_line += "{}:br-vlan,".format(physnet_name)
+                output_line += "datacentre:br-ex"
+        elif 'NeutronNetworkType' in line and tenant_vlan_enabled:
+            output_line = "  NeutronNetworkType: vlan\n" \
+                          "  NeutronTunnelTypes: ''"
 
         if ds_opts['sdn_controller'] == 'opendaylight' and \
                 'odl_vpp_routing_node' in ds_opts:
@@ -361,30 +615,24 @@ def prep_env(ds, ns, inv, opnfv_env, net_env, tmp_dir):
                 output_line = ("    opendaylight::vpp_routing_node: {}.{}"
                                .format(ds_opts['odl_vpp_routing_node'],
                                        ns['domain_name']))
-            elif 'ControllerExtraConfig' in line:
-                output_line = ("  ControllerExtraConfig:\n    "
-                               "tripleo::profile::base::neutron::agents::"
-                               "honeycomb::interface_role_mapping:"
-                               " ['{}:tenant-interface]'"
-                               .format(tenant_ctrl_nic))
-            elif 'NovaComputeExtraConfig' in line:
-                output_line = ("  NovaComputeExtraConfig:\n    "
-                               "tripleo::profile::base::neutron::agents::"
-                               "honeycomb::interface_role_mapping:"
-                               " ['{}:tenant-interface]'"
-                               .format(tenant_comp_nic))
         elif not ds_opts['sdn_controller'] and ds_opts['dataplane'] == 'fdio':
             if 'NeutronVPPAgentPhysnets' in line:
-                output_line = ("  NeutronVPPAgentPhysnets: 'datacentre:{}'".
-                               format(tenant_ctrl_nic))
+                # VPP interface tap0 will be used for external network
+                # connectivity.
+                output_line = ("  NeutronVPPAgentPhysnets: "
+                               "'datacentre:{},external:tap0'"
+                               .format(tenant_nic['Controller']))
         elif ds_opts['sdn_controller'] == 'opendaylight' and ds_opts.get(
                 'dvr') is True:
             if 'OS::TripleO::Services::NeutronDhcpAgent' in line:
                 output_line = ''
             elif 'NeutronDhcpAgentsPerNetwork' in line:
-                num_control, num_compute = inv.get_node_counts()
+                if num_compute == 0:
+                    num_dhcp_agents = num_control
+                else:
+                    num_dhcp_agents = num_compute
                 output_line = ("  NeutronDhcpAgentsPerNetwork: {}"
-                               .format(num_compute))
+                               .format(num_dhcp_agents))
             elif 'ComputeServices' in line:
                 output_line = ("  ComputeServices:\n"
                                "    - OS::TripleO::Services::NeutronDhcpAgent")
@@ -405,48 +653,91 @@ def prep_env(ds, ns, inv, opnfv_env, net_env, tmp_dir):
                         perf_line += ("\n    "
                                       "fdio::vpp_cpu_corelist_workers: '{}'"
                                       .format(perf_opts['corelist-workers']))
+                    if ds_opts['sdn_controller'] == 'opendaylight' and \
+                            ds_opts['dataplane'] == 'fdio':
+                        if role == 'NovaCompute':
+                            perf_line += ("\n    "
+                                          "tripleo::profile::base::neutron::"
+                                          "agents::honeycomb::"
+                                          "interface_role_mapping:"
+                                          " ['{}:tenant-interface',"
+                                          "'{}:public-interface']"
+                                          .format(tenant_nic[role],
+                                                  external_nic[role]))
+                        else:
+                            perf_line += ("\n    "
+                                          "tripleo::profile::base::neutron::"
+                                          "agents::honeycomb::"
+                                          "interface_role_mapping:"
+                                          " ['{}:tenant-interface']"
+                                          .format(tenant_nic[role]))
                     if perf_line:
                         output_line = ("  {}:{}".format(cfg, perf_line))
 
-            # kernel args
-            # (FIXME) use compute's kernel settings for all nodes for now.
-            if 'ComputeKernelArgs' in line and perf_kern_comp:
-                kernel_args = ''
-                for k, v in perf_kern_comp.items():
-                    kernel_args += "{}={} ".format(k, v)
-                if kernel_args:
-                    output_line = "  ComputeKernelArgs: '{}'".\
-                        format(kernel_args)
             if ds_opts['dataplane'] == 'ovs_dpdk' and perf_ovs_comp:
                 for k, v in OVS_PERF_MAP.items():
                     if k in line and v in perf_ovs_comp:
                         output_line = "  {}: '{}'".format(k, perf_ovs_comp[v])
 
-        print(output_line)
+            # kernel args
+            # (FIXME) use compute's kernel settings for all nodes for now.
+            if perf_kern_comp:
+                if 'NovaSchedulerDefaultFilters' in line:
+                    output_line = \
+                        "  NovaSchedulerDefaultFilters: 'RamFilter," \
+                        "ComputeFilter,AvailabilityZoneFilter," \
+                        "ComputeCapabilitiesFilter," \
+                        "ImagePropertiesFilter,NUMATopologyFilter'"
+                elif 'ComputeKernelArgs' in line:
+                    kernel_args = ''
+                    for k, v in perf_kern_comp.items():
+                        kernel_args += "{}={} ".format(k, v)
+                    if kernel_args:
+                        output_line = "  ComputeKernelArgs: '{}'".\
+                            format(kernel_args)
 
-    logging.info("opnfv-environment file written to {}".format(tmp_opnfv_env))
+        print(output_line)
 
-    # Modify Network environment
-    for line in fileinput.input(net_env, inplace=True):
-        line = line.strip('\n')
-        if 'ComputeExtraConfigPre' in line and \
-                ds_opts['dataplane'] == 'ovs_dpdk':
-            print('  OS::TripleO::ComputeExtraConfigPre: '
-                  './ovs-dpdk-preconfig.yaml')
-        elif ((perf and perf_kern_comp) or ds_opts.get('rt_kvm')) and \
-                'resource_registry' in line:
-            print("resource_registry:\n"
-                  "  OS::TripleO::NodeUserData: first-boot.yaml")
-        elif perf and perf_kern_comp and \
-                'NovaSchedulerDefaultFilters' in line:
-            print("  NovaSchedulerDefaultFilters: 'RamFilter,"
-                  "ComputeFilter,AvailabilityZoneFilter,"
-                  "ComputeCapabilitiesFilter,ImagePropertiesFilter,"
-                  "NUMATopologyFilter'")
+    # Merge compute services into control services if only a single
+    # node deployment
+    if num_compute == 0:
+        logging.info("All in one deployment. Checking if service merging "
+                     "required into control services")
+        with open(tmp_opnfv_env, 'r') as fh:
+            data = yaml.safe_load(fh)
+        param_data = data['parameter_defaults']
+        # Check to see if any parameters are set for Compute
+        for param in param_data.keys():
+            if param != 'ComputeServices' and param.startswith('Compute'):
+                logging.warning("Compute parameter set, but will not be used "
+                                "in deployment: {}. Please use Controller "
+                                "based parameters when using All-in-one "
+                                "deployments".format(param))
+        if ('ControllerServices' in param_data and 'ComputeServices' in
+                param_data):
+            logging.info("Services detected in environment file. Merging...")
+            ctrl_services = param_data['ControllerServices']
+            cmp_services = param_data['ComputeServices']
+            param_data['ControllerServices'] = list(set().union(
+                ctrl_services, cmp_services))
+            for dup_service in DUPLICATE_COMPUTE_SERVICES:
+                if dup_service in param_data['ControllerServices']:
+                    param_data['ControllerServices'].remove(dup_service)
+            param_data.pop('ComputeServices')
+            logging.debug("Merged controller services: {}".format(
+                pprint.pformat(param_data['ControllerServices'])
+            ))
+            with open(tmp_opnfv_env, 'w') as fh:
+                yaml.safe_dump(data, fh, default_flow_style=False)
         else:
-            print(line)
+            logging.info("No services detected in env file, not merging "
+                         "services")
 
-    logging.info("network-environment file written to {}".format(net_env))
+    logging.info("opnfv-environment file written to {}".format(tmp_opnfv_env))
+    with open(tmp_opnfv_env, 'r') as fh:
+        logging.debug("opnfv-environment content is : {}".format(
+            pprint.pformat(yaml.safe_load(fh.read()))
+        ))
 
 
 def generate_ceph_key():
@@ -455,11 +746,13 @@ def generate_ceph_key():
     return base64.b64encode(header + key)
 
 
-def prep_storage_env(ds, tmp_dir):
+def prep_storage_env(ds, ns, virtual, tmp_dir):
     """
     Creates storage environment file for deployment.  Source file is copied by
     undercloud playbook to host.
     :param ds:
+    :param ns:
+    :param virtual:
     :param tmp_dir:
     :return:
     """
@@ -481,9 +774,35 @@ def prep_storage_env(ds, tmp_dir):
         elif 'CephAdminKey' in line:
             print("  CephAdminKey: {}".format(generate_ceph_key().decode(
                 'utf-8')))
+        elif 'CephClientKey' in line:
+            print("  CephClientKey: {}".format(generate_ceph_key().decode(
+                'utf-8')))
         else:
             print(line)
-    if 'ceph_device' in ds_opts and ds_opts['ceph_device']:
+
+    if ds_opts['containers']:
+        ceph_params = {}
+
+        # max pgs allowed are calculated as num_mons * 200. Therefore we
+        # set number of pgs and pools so that the total will be less:
+        # num_pgs * num_pools * num_osds
+        ceph_params['CephPoolDefaultSize'] = 2
+        ceph_params['CephPoolDefaultPgNum'] = 32
+        if virtual:
+            ceph_params['CephAnsibleExtraConfig'] = {
+                'centos_package_dependencies': [],
+                'ceph_osd_docker_memory_limit': '1g',
+                'ceph_mds_docker_memory_limit': '1g',
+            }
+        ceph_device = ds_opts['ceph_device']
+        ceph_params['CephAnsibleDisksConfig'] = {
+            'devices': [ceph_device],
+            'journal_size': 512,
+            'osd_scenario': 'collocated'
+        }
+        utils.edit_tht_env(storage_file, 'parameter_defaults', ceph_params)
+    # TODO(trozet): remove following block as we only support containers now
+    elif 'ceph_device' in ds_opts and ds_opts['ceph_device']:
         with open(storage_file, 'a') as fh:
             fh.write('  ExtraConfig:\n')
             fh.write("    ceph::profile::params::osds:{{{}:{{}}}}\n".format(
@@ -491,12 +810,58 @@ def prep_storage_env(ds, tmp_dir):
             ))
 
 
-def external_network_cmds(ns):
+def prep_sriov_env(ds, tmp_dir):
+    """
+    Creates SRIOV environment file for deployment. Source file is copied by
+    undercloud playbook to host.
+    :param ds:
+    :param tmp_dir:
+    :return:
+    """
+    ds_opts = ds['deploy_options']
+    sriov_iface = ds_opts['sriov']
+    sriov_file = os.path.join(tmp_dir, 'neutron-opendaylight-sriov.yaml')
+    if not os.path.isfile(sriov_file):
+        logging.error("sriov-environment file is not in tmp directory: {}. "
+                      "Check if file was copied from "
+                      "undercloud".format(tmp_dir))
+        raise ApexDeployException("sriov-environment file not copied from "
+                                  "undercloud")
+    # TODO(rnoriega): Instead of line editing, refactor this code to load
+    # yaml file into a dict, edit it and write the file back.
+    for line in fileinput.input(sriov_file, inplace=True):
+        line = line.strip('\n')
+        if 'NovaSchedulerDefaultFilters' in line:
+            print("  {}".format(line[3:]))
+        elif 'NovaSchedulerAvailableFilters' in line:
+            print("  {}".format(line[3:]))
+        elif 'NeutronPhysicalDevMappings' in line:
+            print("  NeutronPhysicalDevMappings: \"nfv_sriov:{}\""
+                  .format(sriov_iface))
+        elif 'NeutronSriovNumVFs' in line:
+            print("  NeutronSriovNumVFs: \"{}:8\"".format(sriov_iface))
+        elif 'NovaPCIPassthrough' in line:
+            print("  NovaPCIPassthrough:")
+        elif 'devname' in line:
+            print("    - devname: \"{}\"".format(sriov_iface))
+        elif 'physical_network' in line:
+            print("      physical_network: \"nfv_sriov\"")
+        else:
+            print(line)
+
+
+def external_network_cmds(ns, ds):
     """
     Generates external network openstack commands
     :param ns: network settings
+    :param ds: deploy settings
     :return: list of commands to configure external network
     """
+    ds_opts = ds['deploy_options']
+    external_physnet = 'datacentre'
+    if ds_opts['dataplane'] == 'fdio' and \
+       ds_opts['sdn_controller'] != 'opendaylight':
+        external_physnet = 'external'
     if 'external' in ns.enabled_network_list:
         net_config = ns['networks']['external'][0]
         external = True
@@ -517,7 +882,8 @@ def external_network_cmds(ns):
                                                        'compute']['vlan'])
     cmds.append("openstack network create external --project service "
                 "--external --provider-network-type {} "
-                "--provider-physical-network datacentre".format(ext_type))
+                "--provider-physical-network {}"
+                .format(ext_type, external_physnet))
     # create subnet command
     cidr = net_config['cidr']
     subnet_cmd = "openstack subnet create external-subnet --project " \
@@ -525,8 +891,7 @@ def external_network_cmds(ns):
                  "--allocation-pool start={},end={} --subnet-range " \
                  "{}".format(gateway, pool_start, pool_end, str(cidr))
     if external and cidr.version == 6:
-        subnet_cmd += ' --ip-version 6 --ipv6-ra-mode slaac ' \
-                      '--ipv6-address-mode slaac'
+        subnet_cmd += ' --ip-version 6'
     cmds.append(subnet_cmd)
     logging.debug("Neutron external network commands determined "
                   "as: {}".format(cmds))