Fixes permissions issue for Nova with NFS
[apex.git] / apex / overcloud / deploy.py
index dc7d84f..f40c8bd 100644 (file)
@@ -12,10 +12,12 @@ import fileinput
 import logging
 import os
 import platform
+import pprint
 import shutil
 import uuid
 import struct
 import time
+import yaml
 import apex.builders.overcloud_builder as oc_builder
 import apex.builders.common_builder as c_builder
 
@@ -70,8 +72,6 @@ OVS_PERF_MAP = {
     'NeutronDpdkMemoryChannels': 'memory_channels'
 }
 
-OVS_NSH_KMOD_RPM = "openvswitch-kmod-2.6.1-1.el7.centos.x86_64.rpm"
-OVS_NSH_RPM = "openvswitch-2.6.1-1.el7.centos.x86_64.rpm"
 ODL_NETVIRT_VPP_RPM = "/root/opendaylight-7.0.0-0.1.20170531snap665.el7" \
                       ".noarch.rpm"
 
@@ -92,6 +92,13 @@ RemainAfterExit=yes
 WantedBy=multi-user.target
 """
 
+DUPLICATE_COMPUTE_SERVICES = [
+    'OS::TripleO::Services::ComputeNeutronCorePlugin',
+    'OS::TripleO::Services::ComputeNeutronMetadataAgent',
+    'OS::TripleO::Services::ComputeNeutronOvsAgent',
+    'OS::TripleO::Services::ComputeNeutronL3Agent'
+]
+
 
 def build_sdn_env_list(ds, sdn_map, env_list=None):
     """
@@ -138,24 +145,24 @@ def build_sdn_env_list(ds, sdn_map, env_list=None):
     return env_list
 
 
-def get_docker_sdn_file(ds_opts):
+def get_docker_sdn_files(ds_opts):
     """
     Returns docker env file for detected SDN
     :param ds_opts: deploy options
-    :return: docker THT env file for an SDN
+    :return: list of docker THT env files for an SDN
     """
-    # FIXME(trozet): We assume right now there is only one docker SDN file
     docker_services = con.VALID_DOCKER_SERVICES
-    tht_dir = con.THT_DOCKER_ENV_DIR[ds_opts['os_version']]
+    tht_dir = con.THT_DOCKER_ENV_DIR
     sdn_env_list = build_sdn_env_list(ds_opts, SDN_FILE_MAP)
-    for sdn_file in sdn_env_list:
+    for i, sdn_file in enumerate(sdn_env_list):
         sdn_base = os.path.basename(sdn_file)
         if sdn_base in docker_services:
             if docker_services[sdn_base] is not None:
-                return os.path.join(tht_dir,
-                                    docker_services[sdn_base])
+                sdn_env_list[i] = \
+                    os.path.join(tht_dir, docker_services[sdn_base])
             else:
-                return os.path.join(tht_dir, sdn_base)
+                sdn_env_list[i] = os.path.join(tht_dir, sdn_base)
+    return sdn_env_list
 
 
 def create_deploy_cmd(ds, ns, inv, tmp_dir,
@@ -184,10 +191,9 @@ def create_deploy_cmd(ds, ns, inv, tmp_dir,
 
     if ds_opts['containers']:
         deploy_options.append('docker-images.yaml')
-        sdn_docker_file = get_docker_sdn_file(ds_opts)
-        if sdn_docker_file:
+        sdn_docker_files = get_docker_sdn_files(ds_opts)
+        for sdn_docker_file in sdn_docker_files:
             deploy_options.append(sdn_docker_file)
-            deploy_options.append('sdn-images.yaml')
     else:
         deploy_options += build_sdn_env_list(ds_opts, SDN_FILE_MAP)
 
@@ -199,6 +205,8 @@ def create_deploy_cmd(ds, ns, inv, tmp_dir,
             else:
                 deploy_options.append(os.path.join(con.THT_ENV_DIR, v))
 
+    # TODO(trozet) Fix this check to look for if ceph is in controller services
+    # and not use name of the file
     if ds_opts['ceph'] and 'csit' not in env_file:
         prep_storage_env(ds, ns, virtual, tmp_dir)
         deploy_options.append(os.path.join(con.THT_ENV_DIR,
@@ -217,11 +225,7 @@ def create_deploy_cmd(ds, ns, inv, tmp_dir,
         deploy_options.append('baremetal-environment.yaml')
 
     num_control, num_compute = inv.get_node_counts()
-    if num_control == 0 or num_compute == 0:
-        logging.error("Detected 0 control or compute nodes.  Control nodes: "
-                      "{}, compute nodes{}".format(num_control, num_compute))
-        raise ApexDeployException("Invalid number of control or computes")
-    elif num_control > 1 and not ds['global_params']['ha_enabled']:
+    if num_control > 1 and not ds['global_params']['ha_enabled']:
         num_control = 1
     if platform.machine() == 'aarch64':
         # aarch64 deploys were not completing in the default 90 mins.
@@ -243,12 +247,16 @@ def create_deploy_cmd(ds, ns, inv, tmp_dir,
     if net_data:
         cmd += ' --networks-file network_data.yaml'
     libvirt_type = 'kvm'
-    if virtual:
+    if virtual and (platform.machine() != 'aarch64'):
         with open('/sys/module/kvm_intel/parameters/nested') as f:
             nested_kvm = f.read().strip()
             if nested_kvm != 'Y':
                 libvirt_type = 'qemu'
+    elif virtual and (platform.machine() == 'aarch64'):
+        libvirt_type = 'qemu'
     cmd += ' --libvirt-type {}'.format(libvirt_type)
+    if platform.machine() == 'aarch64':
+        cmd += ' --override-ansible-cfg /home/stack/ansible.cfg '
     logging.info("Deploy command set: {}".format(cmd))
 
     with open(os.path.join(tmp_dir, 'deploy_command'), 'w') as fh:
@@ -306,7 +314,13 @@ def prep_image(ds, ns, img, tmp_dir, root_pw=None, docker_tag=None,
                 "echo 'https_proxy={}' >> /etc/environment".format(
                     ns['https_proxy'])})
 
+    tmp_oc_image = os.path.join(tmp_dir, 'overcloud-full.qcow2')
+    shutil.copyfile(img, tmp_oc_image)
+    logging.debug("Temporary overcloud image stored as: {}".format(
+        tmp_oc_image))
+
     if ds_opts['vpn']:
+        oc_builder.inject_quagga(tmp_oc_image, tmp_dir)
         virt_cmds.append({con.VIRT_RUN_CMD: "chmod +x /etc/rc.d/rc.local"})
         virt_cmds.append({
             con.VIRT_RUN_CMD:
@@ -346,15 +360,24 @@ def prep_image(ds, ns, img, tmp_dir, root_pw=None, docker_tag=None,
     if root_pw:
         pw_op = "password:{}".format(root_pw)
         virt_cmds.append({con.VIRT_PW: pw_op})
-    if ds_opts['sfc'] and dataplane == 'ovs':
-        virt_cmds.extend([
-            {con.VIRT_RUN_CMD: "yum -y install "
-                               "/root/ovs/rpm/rpmbuild/RPMS/x86_64/"
-                               "{}".format(OVS_NSH_KMOD_RPM)},
-            {con.VIRT_RUN_CMD: "yum downgrade -y "
-                               "/root/ovs/rpm/rpmbuild/RPMS/x86_64/"
-                               "{}".format(OVS_NSH_RPM)}
-        ])
+
+    if dataplane == 'ovs':
+        if ds_opts['sfc']:
+            oc_builder.inject_ovs_nsh(tmp_oc_image, tmp_dir)
+        elif sdn == 'opendaylight':
+            # FIXME(trozet) remove this after RDO is updated with fix for
+            # https://bugzilla.redhat.com/show_bug.cgi?id=1544892
+            ovs_file = os.path.basename(con.CUSTOM_OVS)
+            ovs_url = con.CUSTOM_OVS.replace(ovs_file, '')
+            utils.fetch_upstream_and_unpack(dest=tmp_dir, url=ovs_url,
+                                            targets=[ovs_file])
+            virt_cmds.extend([
+                {con.VIRT_UPLOAD: "{}:/root/".format(os.path.join(tmp_dir,
+                                                                  ovs_file))},
+                {con.VIRT_RUN_CMD: "yum downgrade -y /root/{}".format(
+                    ovs_file)}
+            ])
+
     if dataplane == 'fdio':
         # Patch neutron with using OVS external interface for router
         # and add generic linux NS interface driver
@@ -368,14 +391,9 @@ def prep_image(ds, ns, img, tmp_dir, root_pw=None, docker_tag=None,
                                    "/root/nosdn_vpp_rpms/*.rpm"}
             ])
 
-    tmp_oc_image = os.path.join(tmp_dir, 'overcloud-full.qcow2')
-    shutil.copyfile(img, tmp_oc_image)
-    logging.debug("Temporary overcloud image stored as: {}".format(
-        tmp_oc_image))
-
+    undercloud_admin_ip = ns['networks'][con.ADMIN_NETWORK][
+        'installer_vm']['ip']
     if sdn == 'opendaylight':
-        undercloud_admin_ip = ns['networks'][con.ADMIN_NETWORK][
-            'installer_vm']['ip']
         oc_builder.inject_opendaylight(
             odl_version=ds_opts['odl_version'],
             image=tmp_oc_image,
@@ -418,6 +436,26 @@ def prep_image(ds, ns, img, tmp_dir, root_pw=None, docker_tag=None,
     virt_cmds.append(
         {con.VIRT_RUN_CMD: "crudini --del {} Unit "
                            "ConditionPathExists".format(dhcp_unit)})
+    # Prep for NFS
+    virt_cmds.extend([
+        {con.VIRT_INSTALL: "nfs-utils"},
+        {con.VIRT_RUN_CMD: "ln -s /usr/lib/systemd/system/nfs-server.service "
+                           "/etc/systemd/system/multi-user.target.wants/"
+                           "nfs-server.service"},
+        {con.VIRT_RUN_CMD: "mkdir -p /root/nfs/glance"},
+        {con.VIRT_RUN_CMD: "mkdir -p /root/nfs/cinder"},
+        {con.VIRT_RUN_CMD: "mkdir -p /root/nfs/nova"},
+        {con.VIRT_RUN_CMD: "chmod 777 /root/nfs/glance"},
+        {con.VIRT_RUN_CMD: "chmod 777 /root/nfs/cinder"},
+        {con.VIRT_RUN_CMD: "chmod 777 /root/nfs/nova"},
+        {con.VIRT_RUN_CMD: "echo '/root/nfs/glance *(rw,sync,"
+                           "no_root_squash,no_acl)' > /etc/exports"},
+        {con.VIRT_RUN_CMD: "echo '/root/nfs/cinder *(rw,sync,"
+                           "no_root_squash,no_acl)' >> /etc/exports"},
+        {con.VIRT_RUN_CMD: "echo '/root/nfs/nova *(rw,sync,"
+                           "no_root_squash,no_acl)' >> /etc/exports"},
+        {con.VIRT_RUN_CMD: "exportfs -avr"},
+    ])
     virt_utils.virt_customize(virt_cmds, tmp_oc_image)
     logging.info("Overcloud image customization complete")
     return patched_containers
@@ -472,6 +510,10 @@ def prep_env(ds, ns, inv, opnfv_env, net_env, tmp_dir):
     # SSH keys
     private_key, public_key = make_ssh_key()
 
+    num_control, num_compute = inv.get_node_counts()
+    if num_control > 1 and not ds['global_params']['ha_enabled']:
+        num_control = 1
+
     # Make easier/faster variables to index in the file editor
     if 'performance' in ds_opts:
         perf = True
@@ -585,9 +627,12 @@ def prep_env(ds, ns, inv, opnfv_env, net_env, tmp_dir):
             if 'OS::TripleO::Services::NeutronDhcpAgent' in line:
                 output_line = ''
             elif 'NeutronDhcpAgentsPerNetwork' in line:
-                num_control, num_compute = inv.get_node_counts()
+                if num_compute == 0:
+                    num_dhcp_agents = num_control
+                else:
+                    num_dhcp_agents = num_compute
                 output_line = ("  NeutronDhcpAgentsPerNetwork: {}"
-                               .format(num_compute))
+                               .format(num_dhcp_agents))
             elif 'ComputeServices' in line:
                 output_line = ("  ComputeServices:\n"
                                "    - OS::TripleO::Services::NeutronDhcpAgent")
@@ -653,7 +698,46 @@ def prep_env(ds, ns, inv, opnfv_env, net_env, tmp_dir):
 
         print(output_line)
 
+    # Merge compute services into control services if only a single
+    # node deployment
+    if num_compute == 0:
+        logging.info("All in one deployment. Checking if service merging "
+                     "required into control services")
+        with open(tmp_opnfv_env, 'r') as fh:
+            data = yaml.safe_load(fh)
+        param_data = data['parameter_defaults']
+        # Check to see if any parameters are set for Compute
+        for param in param_data.keys():
+            if param != 'ComputeServices' and param.startswith('Compute'):
+                logging.warning("Compute parameter set, but will not be used "
+                                "in deployment: {}. Please use Controller "
+                                "based parameters when using All-in-one "
+                                "deployments".format(param))
+        if ('ControllerServices' in param_data and 'ComputeServices' in
+                param_data):
+            logging.info("Services detected in environment file. Merging...")
+            ctrl_services = param_data['ControllerServices']
+            cmp_services = param_data['ComputeServices']
+            param_data['ControllerServices'] = list(set().union(
+                ctrl_services, cmp_services))
+            for dup_service in DUPLICATE_COMPUTE_SERVICES:
+                if dup_service in param_data['ControllerServices']:
+                    param_data['ControllerServices'].remove(dup_service)
+            param_data.pop('ComputeServices')
+            logging.debug("Merged controller services: {}".format(
+                pprint.pformat(param_data['ControllerServices'])
+            ))
+            with open(tmp_opnfv_env, 'w') as fh:
+                yaml.safe_dump(data, fh, default_flow_style=False)
+        else:
+            logging.info("No services detected in env file, not merging "
+                         "services")
+
     logging.info("opnfv-environment file written to {}".format(tmp_opnfv_env))
+    with open(tmp_opnfv_env, 'r') as fh:
+        logging.debug("opnfv-environment content is : {}".format(
+            pprint.pformat(yaml.safe_load(fh.read()))
+        ))
 
 
 def generate_ceph_key():
@@ -807,8 +891,7 @@ def external_network_cmds(ns, ds):
                  "--allocation-pool start={},end={} --subnet-range " \
                  "{}".format(gateway, pool_start, pool_end, str(cidr))
     if external and cidr.version == 6:
-        subnet_cmd += ' --ip-version 6 --ipv6-ra-mode slaac ' \
-                      '--ipv6-address-mode slaac'
+        subnet_cmd += ' --ip-version 6'
     cmds.append(subnet_cmd)
     logging.debug("Neutron external network commands determined "
                   "as: {}".format(cmds))