import fileinput
import logging
import os
-import re
+import platform
import shutil
import uuid
import struct
'dvr': 'neutron-opendaylight-fdio-dvr.yaml',
'default': 'neutron-opendaylight-honeycomb.yaml'
},
+ 'l2gw': 'neutron-l2gw-opendaylight.yaml',
'default': 'neutron-opendaylight.yaml',
},
'onos': {
def build_sdn_env_list(ds, sdn_map, env_list=None):
+ """
+ Builds a list of SDN environment files to be used in the deploy cmd.
+
+ This function recursively searches an sdn_map. First the sdn controller is
+ matched and then the function looks for enabled features for that
+ controller to determine which environment files should be used. By
+ default the feature will be checked if set to true in deploy settings to be
+ added to the list. If a feature does not have a boolean value, then the
+ key and value pair to compare with are checked as a tuple (k,v).
+
+ :param ds: deploy settings
+ :param sdn_map: SDN map to recursively search
+ :param env_list: recursive var to hold previously found env_list
+ :return: A list of env files
+ """
if env_list is None:
env_list = list()
for k, v in sdn_map.items():
if ds['sdn_controller'] == k or (k in ds and ds[k] is True):
if isinstance(v, dict):
+ # Append default SDN env file first
+ # The assumption is that feature-enabled SDN env files
+ # override and do not conflict with previously set default
+ # settings
+ if ds['sdn_controller'] == k and 'default' in v:
+ env_list.append(os.path.join(con.THT_ENV_DIR,
+ v['default']))
env_list.extend(build_sdn_env_list(ds, v))
else:
env_list.append(os.path.join(con.THT_ENV_DIR, v))
+ # check if the value is not a boolean
elif isinstance(v, tuple):
if ds[k] == v[0]:
env_list.append(os.path.join(con.THT_ENV_DIR, v[1]))
def create_deploy_cmd(ds, ns, inv, tmp_dir,
- virtual, env_file='opnfv-environment.yaml'):
+ virtual, env_file='opnfv-environment.yaml',
+ net_data=False):
logging.info("Creating deployment command")
- deploy_options = [env_file, 'network-environment.yaml']
+ deploy_options = ['network-environment.yaml']
+
+ if env_file:
+ deploy_options.append(env_file)
ds_opts = ds['deploy_options']
deploy_options += build_sdn_env_list(ds_opts, SDN_FILE_MAP)
raise ApexDeployException("Invalid number of control or computes")
elif num_control > 1 and not ds['global_params']['ha_enabled']:
num_control = 1
+ if platform.machine() == 'aarch64':
+ # aarch64 deploys were not completing in the default 90 mins.
+ # Not sure if this is related to the hardware the OOO support
+ # was developed on or the virtualization support in CentOS
+ # Either way it will probably get better over time as the aarch
+ # support matures in CentOS and deploy time should be tested in
+ # the future so this multiplier can be removed.
+ con.DEPLOY_TIMEOUT *= 2
cmd = "openstack overcloud deploy --templates --timeout {} " \
- "--libvirt-type kvm".format(con.DEPLOY_TIMEOUT)
+ .format(con.DEPLOY_TIMEOUT)
# build cmd env args
for option in deploy_options:
cmd += " -e {}".format(option)
cmd += " --control-scale {}".format(num_control)
cmd += " --compute-scale {}".format(num_compute)
cmd += ' --control-flavor control --compute-flavor compute'
+ if net_data:
+ cmd += ' --networks-file network_data.yaml'
+ libvirt_type = 'kvm'
+ if virtual:
+ with open('/sys/module/kvm_intel/parameters/nested') as f:
+ nested_kvm = f.read().strip()
+ if nested_kvm != 'Y':
+ libvirt_type = 'qemu'
+ cmd += ' --libvirt-type {}'.format(libvirt_type)
logging.info("Deploy command set: {}".format(cmd))
with open(os.path.join(tmp_dir, 'deploy_command'), 'w') as fh:
return cmd
-def prep_image(ds, img, tmp_dir, root_pw=None):
+def prep_image(ds, ns, img, tmp_dir, root_pw=None):
"""
Locates sdn image and preps for deployment.
:param ds: deploy settings
+ :param ns: network settings
:param img: sdn image
:param tmp_dir: dir to store modified sdn image
:param root_pw: password to configure for overcloud image
".service"
}])
+ if ns.get('http_proxy', ''):
+ virt_cmds.append({
+ con.VIRT_RUN_CMD:
+ "echo 'http_proxy={}' >> /etc/environment".format(
+ ns['http_proxy'])})
+
+ if ns.get('https_proxy', ''):
+ virt_cmds.append({
+ con.VIRT_RUN_CMD:
+ "echo 'https_proxy={}' >> /etc/environment".format(
+ ns['https_proxy'])})
+
if ds_opts['vpn']:
- virt_cmds.append({con.VIRT_RUN_CMD: "systemctl enable zrpcd"})
- logging.info("ZRPC and Quagga enabled")
+ virt_cmds.append({con.VIRT_RUN_CMD: "chmod +x /etc/rc.d/rc.local"})
+ virt_cmds.append({
+ con.VIRT_RUN_CMD:
+ "echo 'sudo /opt/quagga/etc/init.d/zrpcd start' > "
+ "/opt/quagga/etc/init.d/zrpcd_start.sh"})
+ virt_cmds.append({
+ con.VIRT_RUN_CMD: "chmod +x /opt/quagga/etc/init.d/"
+ "zrpcd_start.sh"})
+ virt_cmds.append({
+ con.VIRT_RUN_CMD: "sed -i '$a /opt/quagga/etc/"
+ "init.d/zrpcd_start.sh' /etc/rc.local "})
+ virt_cmds.append({
+ con.VIRT_RUN_CMD: "sed -i '$a /opt/quagga/etc/"
+ "init.d/zrpcd_start.sh' /etc/rc.d/rc.local"})
+ logging.info("ZRPCD process started")
dataplane = ds_opts['dataplane']
if dataplane == 'ovs_dpdk' or dataplane == 'fdio':
perf = False
# Modify OPNFV environment
- # TODO: Change to build a dict and outputing yaml rather than parsing
+ # TODO: Change to build a dict and outputting yaml rather than parsing
for line in fileinput.input(tmp_opnfv_env, inplace=True):
line = line.strip('\n')
output_line = line
output_line += key_out
elif 'replace_public_key' in line:
output_line = " public_key: '{}'".format(public_key)
+ elif ((perf and perf_kern_comp) or ds_opts.get('rt_kvm')) and \
+ 'resource_registry' in line:
+ output_line = "resource_registry:\n" \
+ " OS::TripleO::NodeUserData: first-boot.yaml"
+ elif 'ComputeExtraConfigPre' in line and \
+ ds_opts['dataplane'] == 'ovs_dpdk':
+ output_line = ' OS::TripleO::ComputeExtraConfigPre: ' \
+ './ovs-dpdk-preconfig.yaml'
if ds_opts['sdn_controller'] == 'opendaylight' and \
'odl_vpp_routing_node' in ds_opts:
if perf_line:
output_line = (" {}:{}".format(cfg, perf_line))
- # kernel args
- # (FIXME) use compute's kernel settings for all nodes for now.
- if 'ComputeKernelArgs' in line and perf_kern_comp:
- kernel_args = ''
- for k, v in perf_kern_comp.items():
- kernel_args += "{}={} ".format(k, v)
- if kernel_args:
- output_line = " ComputeKernelArgs: '{}'".\
- format(kernel_args)
if ds_opts['dataplane'] == 'ovs_dpdk' and perf_ovs_comp:
for k, v in OVS_PERF_MAP.items():
if k in line and v in perf_ovs_comp:
output_line = " {}: '{}'".format(k, perf_ovs_comp[v])
+ # kernel args
+ # (FIXME) use compute's kernel settings for all nodes for now.
+ if perf_kern_comp:
+ if 'NovaSchedulerDefaultFilters' in line:
+ output_line = \
+ " NovaSchedulerDefaultFilters: 'RamFilter," \
+ "ComputeFilter,AvailabilityZoneFilter," \
+ "ComputeCapabilitiesFilter," \
+ "ImagePropertiesFilter,NUMATopologyFilter'"
+ elif 'ComputeKernelArgs' in line:
+ kernel_args = ''
+ for k, v in perf_kern_comp.items():
+ kernel_args += "{}={} ".format(k, v)
+ if kernel_args:
+ output_line = " ComputeKernelArgs: '{}'".\
+ format(kernel_args)
+
print(output_line)
logging.info("opnfv-environment file written to {}".format(tmp_opnfv_env))
- # Modify Network environment
- for line in fileinput.input(net_env, inplace=True):
- line = line.strip('\n')
- if 'ComputeExtraConfigPre' in line and \
- ds_opts['dataplane'] == 'ovs_dpdk':
- print(' OS::TripleO::ComputeExtraConfigPre: '
- './ovs-dpdk-preconfig.yaml')
- elif ((perf and perf_kern_comp) or ds_opts.get('rt_kvm')) and \
- 'resource_registry' in line:
- print("resource_registry:\n"
- " OS::TripleO::NodeUserData: first-boot.yaml")
- elif perf and perf_kern_comp and \
- 'NovaSchedulerDefaultFilters' in line:
- print(" NovaSchedulerDefaultFilters: 'RamFilter,"
- "ComputeFilter,AvailabilityZoneFilter,"
- "ComputeCapabilitiesFilter,ImagePropertiesFilter,"
- "NUMATopologyFilter'")
- else:
- print(line)
-
- logging.info("network-environment file written to {}".format(net_env))
-
def generate_ceph_key():
key = os.urandom(16)
elif 'CephAdminKey' in line:
print(" CephAdminKey: {}".format(generate_ceph_key().decode(
'utf-8')))
+ elif 'CephClientKey' in line:
+ print(" CephClientKey: {}".format(generate_ceph_key().decode(
+ 'utf-8')))
else:
print(line)
if 'ceph_device' in ds_opts and ds_opts['ceph_device']: