1 ##############################################################################
2 # Copyright (c) 2017 Tim Rozet (trozet@redhat.com) and others.
4 # All rights reserved. This program and the accompanying materials
5 # are made available under the terms of the Apache License, Version 2.0
6 # which accompanies this distribution, and is available at
7 # http://www.apache.org/licenses/LICENSE-2.0
8 ##############################################################################
21 import apex.builders.overcloud_builder as oc_builder
22 import apex.builders.common_builder as c_builder
24 from apex.common import constants as con
25 from apex.common.exceptions import ApexDeployException
26 from apex.common import parsers
27 from apex.common import utils
28 from apex.virtual import utils as virt_utils
29 from cryptography.hazmat.primitives import serialization as \
31 from cryptography.hazmat.primitives.asymmetric import rsa
32 from cryptography.hazmat.backends import default_backend as \
33 crypto_default_backend
38 'sfc': 'neutron-sfc-opendaylight.yaml',
39 'vpn': 'neutron-bgpvpn-opendaylight.yaml',
40 'gluon': 'gluon.yaml',
42 'odl_vpp_netvirt': 'neutron-opendaylight-netvirt-vpp.yaml',
43 'dvr': 'neutron-opendaylight-fdio-dvr.yaml',
44 'default': 'neutron-opendaylight-honeycomb.yaml'
46 'l2gw': 'neutron-l2gw-opendaylight.yaml',
47 'sriov': 'neutron-opendaylight-sriov.yaml',
48 'default': 'neutron-opendaylight.yaml',
51 'sfc': 'neutron-onos-sfc.yaml',
52 'default': 'neutron-onos.yaml'
54 'ovn': 'neutron-ml2-ovn.yaml',
56 'vpp': 'neutron-ml2-vpp.yaml',
57 'dataplane': ('ovs_dpdk', 'neutron-ovs-dpdk.yaml')
62 'tacker': 'enable_tacker.yaml',
63 'congress': 'enable_congress.yaml',
64 'barometer': 'enable_barometer.yaml',
65 'rt_kvm': 'enable_rt_kvm.yaml'
69 'HostCpusList': 'dpdk_cores',
70 'NeutronDpdkCoreList': 'pmd_cores',
71 'NeutronDpdkSocketMemory': 'socket_memory',
72 'NeutronDpdkMemoryChannels': 'memory_channels'
75 ODL_NETVIRT_VPP_RPM = "/root/opendaylight-7.0.0-0.1.20170531snap665.el7" \
78 LOOP_DEVICE_SIZE = "10G"
80 LOSETUP_SERVICE = """[Unit]
81 Description=Setup loop devices
86 ExecStart=/sbin/losetup /dev/loop3 /srv/data.img
87 ExecStop=/sbin/losetup -d /dev/loop3
92 WantedBy=multi-user.target
95 DUPLICATE_COMPUTE_SERVICES = [
96 'OS::TripleO::Services::ComputeNeutronCorePlugin',
97 'OS::TripleO::Services::ComputeNeutronMetadataAgent',
98 'OS::TripleO::Services::ComputeNeutronOvsAgent',
99 'OS::TripleO::Services::ComputeNeutronL3Agent'
103 def build_sdn_env_list(ds, sdn_map, env_list=None):
105 Builds a list of SDN environment files to be used in the deploy cmd.
107 This function recursively searches an sdn_map. First the sdn controller is
108 matched and then the function looks for enabled features for that
109 controller to determine which environment files should be used. By
110 default the feature will be checked if set to true in deploy settings to be
111 added to the list. If a feature does not have a boolean value, then the
112 key and value pair to compare with are checked as a tuple (k,v).
114 :param ds: deploy settings
115 :param sdn_map: SDN map to recursively search
116 :param env_list: recursive var to hold previously found env_list
117 :return: A list of env files
121 for k, v in sdn_map.items():
122 if ds['sdn_controller'] == k or (k in ds and ds[k]):
123 if isinstance(v, dict):
124 # Append default SDN env file first
125 # The assumption is that feature-enabled SDN env files
126 # override and do not conflict with previously set default
128 if ds['sdn_controller'] == k and 'default' in v:
129 env_list.append(os.path.join(con.THT_ENV_DIR,
131 env_list.extend(build_sdn_env_list(ds, v))
132 # check if the value is not a boolean
133 elif isinstance(v, tuple):
135 env_list.append(os.path.join(con.THT_ENV_DIR, v[1]))
137 env_list.append(os.path.join(con.THT_ENV_DIR, v))
138 if len(env_list) == 0:
140 env_list.append(os.path.join(
141 con.THT_ENV_DIR, sdn_map['default']))
143 logging.warning("Unable to find default file for SDN")
148 def get_docker_sdn_files(ds_opts):
150 Returns docker env file for detected SDN
151 :param ds_opts: deploy options
152 :return: list of docker THT env files for an SDN
154 docker_services = con.VALID_DOCKER_SERVICES
155 tht_dir = con.THT_DOCKER_ENV_DIR[ds_opts['os_version']]
156 sdn_env_list = build_sdn_env_list(ds_opts, SDN_FILE_MAP)
157 for i, sdn_file in enumerate(sdn_env_list):
158 sdn_base = os.path.basename(sdn_file)
159 if sdn_base in docker_services:
160 if docker_services[sdn_base] is not None:
162 os.path.join(tht_dir, docker_services[sdn_base])
164 sdn_env_list[i] = os.path.join(tht_dir, sdn_base)
168 def create_deploy_cmd(ds, ns, inv, tmp_dir,
169 virtual, env_file='opnfv-environment.yaml',
172 logging.info("Creating deployment command")
173 deploy_options = ['network-environment.yaml']
175 ds_opts = ds['deploy_options']
177 if ds_opts['containers']:
178 deploy_options.append(os.path.join(con.THT_ENV_DIR,
181 if ds['global_params']['ha_enabled']:
182 if ds_opts['containers']:
183 deploy_options.append(os.path.join(con.THT_ENV_DIR,
186 deploy_options.append(os.path.join(con.THT_ENV_DIR,
187 'puppet-pacemaker.yaml'))
190 deploy_options.append(env_file)
192 if ds_opts['containers']:
193 deploy_options.append('docker-images.yaml')
194 sdn_docker_files = get_docker_sdn_files(ds_opts)
195 for sdn_docker_file in sdn_docker_files:
196 deploy_options.append(sdn_docker_file)
198 deploy_options.append('sdn-images.yaml')
200 deploy_options += build_sdn_env_list(ds_opts, SDN_FILE_MAP)
202 for k, v in OTHER_FILE_MAP.items():
203 if k in ds_opts and ds_opts[k]:
204 if ds_opts['containers']:
205 deploy_options.append(os.path.join(con.THT_DOCKER_ENV_DIR,
206 "{}.yaml".format(k)))
208 deploy_options.append(os.path.join(con.THT_ENV_DIR, v))
210 if ds_opts['ceph'] and 'csit' not in env_file:
211 prep_storage_env(ds, ns, virtual, tmp_dir)
212 deploy_options.append(os.path.join(con.THT_ENV_DIR,
213 'storage-environment.yaml'))
215 prep_sriov_env(ds, tmp_dir)
217 # Check for 'k8s' here intentionally, as we may support other values
218 # such as openstack/openshift for 'vim' option.
219 if ds_opts['vim'] == 'k8s':
220 deploy_options.append('kubernetes-environment.yaml')
223 deploy_options.append('virtual-environment.yaml')
225 deploy_options.append('baremetal-environment.yaml')
227 num_control, num_compute = inv.get_node_counts()
228 if num_control > 1 and not ds['global_params']['ha_enabled']:
230 if platform.machine() == 'aarch64':
231 # aarch64 deploys were not completing in the default 90 mins.
232 # Not sure if this is related to the hardware the OOO support
233 # was developed on or the virtualization support in CentOS
234 # Either way it will probably get better over time as the aarch
235 # support matures in CentOS and deploy time should be tested in
236 # the future so this multiplier can be removed.
237 con.DEPLOY_TIMEOUT *= 2
238 cmd = "openstack overcloud deploy --templates --timeout {} " \
239 .format(con.DEPLOY_TIMEOUT)
241 for option in deploy_options:
242 cmd += " -e {}".format(option)
243 cmd += " --ntp-server {}".format(ns['ntp'][0])
244 cmd += " --control-scale {}".format(num_control)
245 cmd += " --compute-scale {}".format(num_compute)
246 cmd += ' --control-flavor control --compute-flavor compute'
248 cmd += ' --networks-file network_data.yaml'
251 with open('/sys/module/kvm_intel/parameters/nested') as f:
252 nested_kvm = f.read().strip()
253 if nested_kvm != 'Y':
254 libvirt_type = 'qemu'
255 cmd += ' --libvirt-type {}'.format(libvirt_type)
256 logging.info("Deploy command set: {}".format(cmd))
258 with open(os.path.join(tmp_dir, 'deploy_command'), 'w') as fh:
263 def prep_image(ds, ns, img, tmp_dir, root_pw=None, docker_tag=None,
266 Locates sdn image and preps for deployment.
267 :param ds: deploy settings
268 :param ns: network settings
269 :param img: sdn image
270 :param tmp_dir: dir to store modified sdn image
271 :param root_pw: password to configure for overcloud image
272 :param docker_tag: Docker image tag for RDO version (default None)
273 :param patches: List of patches to apply to overcloud image
276 # TODO(trozet): Come up with a better way to organize this logic in this
278 logging.info("Preparing image: {} for deployment".format(img))
279 if not os.path.isfile(img):
280 logging.error("Missing SDN image {}".format(img))
281 raise ApexDeployException("Missing SDN image file: {}".format(img))
283 ds_opts = ds['deploy_options']
285 sdn = ds_opts['sdn_controller']
286 patched_containers = set()
287 # we need this due to rhbz #1436021
288 # fixed in systemd-219-37.el7
290 logging.info("Neutron openvswitch-agent disabled")
293 "rm -f /etc/systemd/system/multi-user.target.wants/"
294 "neutron-openvswitch-agent.service"},
297 "rm -f /usr/lib/systemd/system/neutron-openvswitch-agent"
301 if ns.get('http_proxy', ''):
304 "echo 'http_proxy={}' >> /etc/environment".format(
307 if ns.get('https_proxy', ''):
310 "echo 'https_proxy={}' >> /etc/environment".format(
313 tmp_oc_image = os.path.join(tmp_dir, 'overcloud-full.qcow2')
314 shutil.copyfile(img, tmp_oc_image)
315 logging.debug("Temporary overcloud image stored as: {}".format(
319 oc_builder.inject_quagga(tmp_oc_image, tmp_dir)
320 virt_cmds.append({con.VIRT_RUN_CMD: "chmod +x /etc/rc.d/rc.local"})
323 "echo 'sudo /opt/quagga/etc/init.d/zrpcd start' > "
324 "/opt/quagga/etc/init.d/zrpcd_start.sh"})
326 con.VIRT_RUN_CMD: "chmod +x /opt/quagga/etc/init.d/"
329 con.VIRT_RUN_CMD: "sed -i '$a /opt/quagga/etc/"
330 "init.d/zrpcd_start.sh' /etc/rc.local "})
332 con.VIRT_RUN_CMD: "sed -i '$a /opt/quagga/etc/"
333 "init.d/zrpcd_start.sh' /etc/rc.d/rc.local"})
334 logging.info("ZRPCD process started")
336 dataplane = ds_opts['dataplane']
337 if dataplane == 'ovs_dpdk' or dataplane == 'fdio':
338 logging.info("Enabling kernel modules for dpdk")
339 # file to module mapping
341 os.path.join(tmp_dir, 'vfio_pci.modules'): 'vfio_pci',
342 os.path.join(tmp_dir, 'uio_pci_generic.modules'): 'uio_pci_generic'
344 for mod_file, mod in uio_types.items():
345 with open(mod_file, 'w') as fh:
346 fh.write('#!/bin/bash\n')
347 fh.write('exec /sbin/modprobe {}'.format(mod))
351 {con.VIRT_UPLOAD: "{}:/etc/sysconfig/modules/".format(
353 {con.VIRT_RUN_CMD: "chmod 0755 /etc/sysconfig/modules/"
354 "{}".format(os.path.basename(mod_file))}
357 pw_op = "password:{}".format(root_pw)
358 virt_cmds.append({con.VIRT_PW: pw_op})
360 if dataplane == 'ovs':
362 oc_builder.inject_ovs_nsh(tmp_oc_image, tmp_dir)
363 elif sdn == 'opendaylight':
364 # FIXME(trozet) remove this after RDO is updated with fix for
365 # https://bugzilla.redhat.com/show_bug.cgi?id=1544892
366 ovs_file = os.path.basename(con.CUSTOM_OVS)
367 ovs_url = con.CUSTOM_OVS.replace(ovs_file, '')
368 utils.fetch_upstream_and_unpack(dest=tmp_dir, url=ovs_url,
371 {con.VIRT_UPLOAD: "{}:/root/".format(os.path.join(tmp_dir,
373 {con.VIRT_RUN_CMD: "yum downgrade -y /root/{}".format(
377 if dataplane == 'fdio':
378 # Patch neutron with using OVS external interface for router
379 # and add generic linux NS interface driver
381 {con.VIRT_RUN_CMD: "cd /usr/lib/python2.7/site-packages && patch "
382 "-p1 < neutron-patch-NSDriver.patch"})
385 {con.VIRT_RUN_CMD: "yum remove -y vpp-lib"},
386 {con.VIRT_RUN_CMD: "yum install -y "
387 "/root/nosdn_vpp_rpms/*.rpm"}
390 undercloud_admin_ip = ns['networks'][con.ADMIN_NETWORK][
391 'installer_vm']['ip']
392 if sdn == 'opendaylight':
393 oc_builder.inject_opendaylight(
394 odl_version=ds_opts['odl_version'],
397 uc_ip=undercloud_admin_ip,
398 os_version=ds_opts['os_version'],
399 docker_tag=docker_tag,
402 patched_containers = patched_containers.union({'opendaylight'})
405 if ds_opts['os_version'] == 'master':
406 branch = ds_opts['os_version']
408 branch = "stable/{}".format(ds_opts['os_version'])
409 logging.info('Adding patches to overcloud')
410 patched_containers = patched_containers.union(
411 c_builder.add_upstream_patches(patches,
412 tmp_oc_image, tmp_dir,
414 uc_ip=undercloud_admin_ip,
415 docker_tag=docker_tag))
416 # if containers with ceph, and no ceph device we need to use a
417 # persistent loop device for Ceph OSDs
418 if docker_tag and ds_opts['ceph_device'] == '/dev/loop3':
419 tmp_losetup = os.path.join(tmp_dir, 'losetup.service')
420 with open(tmp_losetup, 'w') as fh:
421 fh.write(LOSETUP_SERVICE)
423 {con.VIRT_UPLOAD: "{}:/usr/lib/systemd/system/".format(tmp_losetup)
425 {con.VIRT_RUN_CMD: 'truncate /srv/data.img --size {}'
426 .format(LOOP_DEVICE_SIZE)},
427 {con.VIRT_RUN_CMD: 'systemctl daemon-reload'},
428 {con.VIRT_RUN_CMD: 'systemctl enable losetup.service'},
430 # TODO(trozet) remove this after LP#173474 is fixed
431 dhcp_unit = '/usr/lib/systemd/system/dhcp-interface@.service'
433 {con.VIRT_RUN_CMD: "crudini --del {} Unit "
434 "ConditionPathExists".format(dhcp_unit)})
435 virt_utils.virt_customize(virt_cmds, tmp_oc_image)
436 logging.info("Overcloud image customization complete")
437 return patched_containers
442 Creates public and private ssh keys with 1024 bit RSA encryption
443 :return: private, public key
445 key = rsa.generate_private_key(
446 backend=crypto_default_backend(),
447 public_exponent=65537,
451 private_key = key.private_bytes(
452 crypto_serialization.Encoding.PEM,
453 crypto_serialization.PrivateFormat.PKCS8,
454 crypto_serialization.NoEncryption())
455 public_key = key.public_key().public_bytes(
456 crypto_serialization.Encoding.OpenSSH,
457 crypto_serialization.PublicFormat.OpenSSH
459 return private_key.decode('utf-8'), public_key.decode('utf-8')
462 def prep_env(ds, ns, inv, opnfv_env, net_env, tmp_dir):
464 Creates modified opnfv/network environments for deployment
465 :param ds: deploy settings
466 :param ns: network settings
467 :param inv: node inventory
468 :param opnfv_env: file path for opnfv-environment file
469 :param net_env: file path for network-environment file
470 :param tmp_dir: Apex tmp dir
474 logging.info("Preparing opnfv-environment and network-environment files")
475 ds_opts = ds['deploy_options']
476 tmp_opnfv_env = os.path.join(tmp_dir, os.path.basename(opnfv_env))
477 shutil.copyfile(opnfv_env, tmp_opnfv_env)
478 tenant_nic_map = ns['networks']['tenant']['nic_mapping']
480 tenant_nic['Controller'] = tenant_nic_map['controller']['members'][0]
481 tenant_nic['NovaCompute'] = tenant_nic_map['compute']['members'][0]
482 external_nic_map = ns['networks']['external'][0]['nic_mapping']
483 external_nic = dict()
484 external_nic['NovaCompute'] = external_nic_map['compute']['members'][0]
487 private_key, public_key = make_ssh_key()
489 num_control, num_compute = inv.get_node_counts()
490 if num_control > 1 and not ds['global_params']['ha_enabled']:
493 # Make easier/faster variables to index in the file editor
494 if 'performance' in ds_opts:
497 if 'vpp' in ds_opts['performance']['Compute']:
498 perf_vpp_comp = ds_opts['performance']['Compute']['vpp']
501 if 'vpp' in ds_opts['performance']['Controller']:
502 perf_vpp_ctrl = ds_opts['performance']['Controller']['vpp']
507 if 'ovs' in ds_opts['performance']['Compute']:
508 perf_ovs_comp = ds_opts['performance']['Compute']['ovs']
513 if 'kernel' in ds_opts['performance']['Compute']:
514 perf_kern_comp = ds_opts['performance']['Compute']['kernel']
516 perf_kern_comp = None
520 tenant_settings = ns['networks']['tenant']
521 tenant_vlan_enabled = 'tenant' in ns.enabled_network_list and \
522 ns['networks']['tenant'].get('segmentation_type') == 'vlan'
524 # Modify OPNFV environment
525 # TODO: Change to build a dict and outputting yaml rather than parsing
526 for line in fileinput.input(tmp_opnfv_env, inplace=True):
527 line = line.strip('\n')
529 if 'CloudDomain' in line:
530 output_line = " CloudDomain: {}".format(ns['domain_name'])
531 elif 'replace_private_key' in line:
532 output_line = " private_key: |\n"
534 for line in private_key.splitlines():
535 key_out += " {}\n".format(line)
536 output_line += key_out
537 elif 'replace_public_key' in line:
538 output_line = " public_key: '{}'".format(public_key)
539 elif ((perf and perf_kern_comp) or ds_opts.get('rt_kvm')) and \
540 'resource_registry' in line:
541 output_line = "resource_registry:\n" \
542 " OS::TripleO::NodeUserData: first-boot.yaml"
543 elif 'ComputeExtraConfigPre' in line and \
544 ds_opts['dataplane'] == 'ovs_dpdk':
545 output_line = ' OS::TripleO::ComputeExtraConfigPre: ' \
546 './ovs-dpdk-preconfig.yaml'
547 elif 'NeutronNetworkVLANRanges' in line:
549 if tenant_vlan_enabled:
550 if ns['networks']['tenant']['overlay_id_range']:
551 vlan_setting = ns['networks']['tenant']['overlay_id_range']
552 if 'datacentre' not in vlan_setting:
553 vlan_setting += ',datacentre:1:1000'
554 # SRIOV networks are VLAN based provider networks. In order to
555 # simplify the deployment, nfv_sriov will be the default physnet.
556 # VLANs are not needed in advance, and the user will have to create
557 # the network specifying the segmentation-id.
560 vlan_setting += ",nfv_sriov"
562 vlan_setting = "datacentre:1:1000,nfv_sriov"
564 output_line = " NeutronNetworkVLANRanges: " + vlan_setting
565 elif 'NeutronBridgeMappings' in line and tenant_vlan_enabled:
566 if tenant_settings['overlay_id_range']:
567 physnets = tenant_settings['overlay_id_range'].split(',')
568 output_line = " NeutronBridgeMappings: "
569 for physnet in physnets:
570 physnet_name = physnet.split(':')[0]
571 if physnet_name != 'datacentre':
572 output_line += "{}:br-vlan,".format(physnet_name)
573 output_line += "datacentre:br-ex"
574 elif 'OpenDaylightProviderMappings' in line and tenant_vlan_enabled \
575 and ds_opts['sdn_controller'] == 'opendaylight':
576 if tenant_settings['overlay_id_range']:
577 physnets = tenant_settings['overlay_id_range'].split(',')
578 output_line = " OpenDaylightProviderMappings: "
579 for physnet in physnets:
580 physnet_name = physnet.split(':')[0]
581 if physnet_name != 'datacentre':
582 output_line += "{}:br-vlan,".format(physnet_name)
583 output_line += "datacentre:br-ex"
584 elif 'NeutronNetworkType' in line and tenant_vlan_enabled:
585 output_line = " NeutronNetworkType: vlan\n" \
586 " NeutronTunnelTypes: ''"
588 if ds_opts['sdn_controller'] == 'opendaylight' and \
589 'odl_vpp_routing_node' in ds_opts:
590 if 'opendaylight::vpp_routing_node' in line:
591 output_line = (" opendaylight::vpp_routing_node: {}.{}"
592 .format(ds_opts['odl_vpp_routing_node'],
594 elif not ds_opts['sdn_controller'] and ds_opts['dataplane'] == 'fdio':
595 if 'NeutronVPPAgentPhysnets' in line:
596 # VPP interface tap0 will be used for external network
598 output_line = (" NeutronVPPAgentPhysnets: "
599 "'datacentre:{},external:tap0'"
600 .format(tenant_nic['Controller']))
601 elif ds_opts['sdn_controller'] == 'opendaylight' and ds_opts.get(
603 if 'OS::TripleO::Services::NeutronDhcpAgent' in line:
605 elif 'NeutronDhcpAgentsPerNetwork' in line:
607 num_dhcp_agents = num_control
609 num_dhcp_agents = num_compute
610 output_line = (" NeutronDhcpAgentsPerNetwork: {}"
611 .format(num_dhcp_agents))
612 elif 'ComputeServices' in line:
613 output_line = (" ComputeServices:\n"
614 " - OS::TripleO::Services::NeutronDhcpAgent")
617 for role in 'NovaCompute', 'Controller':
618 if role == 'NovaCompute':
619 perf_opts = perf_vpp_comp
621 perf_opts = perf_vpp_ctrl
622 cfg = "{}ExtraConfig".format(role)
623 if cfg in line and perf_opts:
625 if 'main-core' in perf_opts:
626 perf_line += ("\n fdio::vpp_cpu_main_core: '{}'"
627 .format(perf_opts['main-core']))
628 if 'corelist-workers' in perf_opts:
630 "fdio::vpp_cpu_corelist_workers: '{}'"
631 .format(perf_opts['corelist-workers']))
632 if ds_opts['sdn_controller'] == 'opendaylight' and \
633 ds_opts['dataplane'] == 'fdio':
634 if role == 'NovaCompute':
636 "tripleo::profile::base::neutron::"
637 "agents::honeycomb::"
638 "interface_role_mapping:"
639 " ['{}:tenant-interface',"
640 "'{}:public-interface']"
641 .format(tenant_nic[role],
645 "tripleo::profile::base::neutron::"
646 "agents::honeycomb::"
647 "interface_role_mapping:"
648 " ['{}:tenant-interface']"
649 .format(tenant_nic[role]))
651 output_line = (" {}:{}".format(cfg, perf_line))
653 if ds_opts['dataplane'] == 'ovs_dpdk' and perf_ovs_comp:
654 for k, v in OVS_PERF_MAP.items():
655 if k in line and v in perf_ovs_comp:
656 output_line = " {}: '{}'".format(k, perf_ovs_comp[v])
659 # (FIXME) use compute's kernel settings for all nodes for now.
661 if 'NovaSchedulerDefaultFilters' in line:
663 " NovaSchedulerDefaultFilters: 'RamFilter," \
664 "ComputeFilter,AvailabilityZoneFilter," \
665 "ComputeCapabilitiesFilter," \
666 "ImagePropertiesFilter,NUMATopologyFilter'"
667 elif 'ComputeKernelArgs' in line:
669 for k, v in perf_kern_comp.items():
670 kernel_args += "{}={} ".format(k, v)
672 output_line = " ComputeKernelArgs: '{}'".\
677 # Merge compute services into control services if only a single
680 logging.info("All in one deployment. Checking if service merging "
681 "required into control services")
682 with open(tmp_opnfv_env, 'r') as fh:
683 data = yaml.safe_load(fh)
684 param_data = data['parameter_defaults']
685 # Check to see if any parameters are set for Compute
686 for param in param_data.keys():
687 if param != 'ComputeServices' and param.startswith('Compute'):
688 logging.warning("Compute parameter set, but will not be used "
689 "in deployment: {}. Please use Controller "
690 "based parameters when using All-in-one "
691 "deployments".format(param))
692 if ('ControllerServices' in param_data and 'ComputeServices' in
694 logging.info("Services detected in environment file. Merging...")
695 ctrl_services = param_data['ControllerServices']
696 cmp_services = param_data['ComputeServices']
697 param_data['ControllerServices'] = list(set().union(
698 ctrl_services, cmp_services))
699 for dup_service in DUPLICATE_COMPUTE_SERVICES:
700 if dup_service in param_data['ControllerServices']:
701 param_data['ControllerServices'].remove(dup_service)
702 param_data.pop('ComputeServices')
703 logging.debug("Merged controller services: {}".format(
704 pprint.pformat(param_data['ControllerServices'])
706 with open(tmp_opnfv_env, 'w') as fh:
707 yaml.safe_dump(data, fh, default_flow_style=False)
709 logging.info("No services detected in env file, not merging "
712 logging.info("opnfv-environment file written to {}".format(tmp_opnfv_env))
713 with open(tmp_opnfv_env, 'r') as fh:
714 logging.debug("opnfv-environment content is : {}".format(
715 pprint.pformat(yaml.safe_load(fh.read()))
719 def generate_ceph_key():
721 header = struct.pack('<hiih', 1, int(time.time()), 0, len(key))
722 return base64.b64encode(header + key)
725 def prep_storage_env(ds, ns, virtual, tmp_dir):
727 Creates storage environment file for deployment. Source file is copied by
728 undercloud playbook to host.
735 ds_opts = ds['deploy_options']
736 storage_file = os.path.join(tmp_dir, 'storage-environment.yaml')
737 if not os.path.isfile(storage_file):
738 logging.error("storage-environment file is not in tmp directory: {}. "
739 "Check if file was copied from "
740 "undercloud".format(tmp_dir))
741 raise ApexDeployException("storage-environment file not copied from "
743 for line in fileinput.input(storage_file, inplace=True):
744 line = line.strip('\n')
745 if 'CephClusterFSID' in line:
746 print(" CephClusterFSID: {}".format(str(uuid.uuid4())))
747 elif 'CephMonKey' in line:
748 print(" CephMonKey: {}".format(generate_ceph_key().decode(
750 elif 'CephAdminKey' in line:
751 print(" CephAdminKey: {}".format(generate_ceph_key().decode(
753 elif 'CephClientKey' in line:
754 print(" CephClientKey: {}".format(generate_ceph_key().decode(
759 if ds_opts['containers']:
762 # max pgs allowed are calculated as num_mons * 200. Therefore we
763 # set number of pgs and pools so that the total will be less:
764 # num_pgs * num_pools * num_osds
765 ceph_params['CephPoolDefaultSize'] = 2
766 ceph_params['CephPoolDefaultPgNum'] = 32
768 ceph_params['CephAnsibleExtraConfig'] = {
769 'centos_package_dependencies': [],
770 'ceph_osd_docker_memory_limit': '1g',
771 'ceph_mds_docker_memory_limit': '1g',
773 ceph_device = ds_opts['ceph_device']
774 ceph_params['CephAnsibleDisksConfig'] = {
775 'devices': [ceph_device],
777 'osd_scenario': 'collocated'
779 utils.edit_tht_env(storage_file, 'parameter_defaults', ceph_params)
780 # TODO(trozet): remove following block as we only support containers now
781 elif 'ceph_device' in ds_opts and ds_opts['ceph_device']:
782 with open(storage_file, 'a') as fh:
783 fh.write(' ExtraConfig:\n')
784 fh.write(" ceph::profile::params::osds:{{{}:{{}}}}\n".format(
785 ds_opts['ceph_device']
789 def prep_sriov_env(ds, tmp_dir):
791 Creates SRIOV environment file for deployment. Source file is copied by
792 undercloud playbook to host.
797 ds_opts = ds['deploy_options']
798 sriov_iface = ds_opts['sriov']
799 sriov_file = os.path.join(tmp_dir, 'neutron-opendaylight-sriov.yaml')
800 if not os.path.isfile(sriov_file):
801 logging.error("sriov-environment file is not in tmp directory: {}. "
802 "Check if file was copied from "
803 "undercloud".format(tmp_dir))
804 raise ApexDeployException("sriov-environment file not copied from "
806 # TODO(rnoriega): Instead of line editing, refactor this code to load
807 # yaml file into a dict, edit it and write the file back.
808 for line in fileinput.input(sriov_file, inplace=True):
809 line = line.strip('\n')
810 if 'NovaSchedulerDefaultFilters' in line:
811 print(" {}".format(line[3:]))
812 elif 'NovaSchedulerAvailableFilters' in line:
813 print(" {}".format(line[3:]))
814 elif 'NeutronPhysicalDevMappings' in line:
815 print(" NeutronPhysicalDevMappings: \"nfv_sriov:{}\""
816 .format(sriov_iface))
817 elif 'NeutronSriovNumVFs' in line:
818 print(" NeutronSriovNumVFs: \"{}:8\"".format(sriov_iface))
819 elif 'NovaPCIPassthrough' in line:
820 print(" NovaPCIPassthrough:")
821 elif 'devname' in line:
822 print(" - devname: \"{}\"".format(sriov_iface))
823 elif 'physical_network' in line:
824 print(" physical_network: \"nfv_sriov\"")
829 def external_network_cmds(ns, ds):
831 Generates external network openstack commands
832 :param ns: network settings
833 :param ds: deploy settings
834 :return: list of commands to configure external network
836 ds_opts = ds['deploy_options']
837 external_physnet = 'datacentre'
838 if ds_opts['dataplane'] == 'fdio' and \
839 ds_opts['sdn_controller'] != 'opendaylight':
840 external_physnet = 'external'
841 if 'external' in ns.enabled_network_list:
842 net_config = ns['networks']['external'][0]
844 pool_start, pool_end = net_config['floating_ip_range']
846 net_config = ns['networks']['admin']
848 pool_start, pool_end = ns['apex']['networks']['admin'][
849 'introspection_range']
850 nic_config = net_config['nic_mapping']
851 gateway = net_config['gateway']
853 # create network command
854 if nic_config['compute']['vlan'] == 'native':
857 ext_type = "vlan --provider-segment {}".format(nic_config[
859 cmds.append("openstack network create external --project service "
860 "--external --provider-network-type {} "
861 "--provider-physical-network {}"
862 .format(ext_type, external_physnet))
863 # create subnet command
864 cidr = net_config['cidr']
865 subnet_cmd = "openstack subnet create external-subnet --project " \
866 "service --network external --no-dhcp --gateway {} " \
867 "--allocation-pool start={},end={} --subnet-range " \
868 "{}".format(gateway, pool_start, pool_end, str(cidr))
869 if external and cidr.version == 6:
870 subnet_cmd += ' --ip-version 6'
871 cmds.append(subnet_cmd)
872 logging.debug("Neutron external network commands determined "
873 "as: {}".format(cmds))
877 def create_congress_cmds(overcloud_file):
878 drivers = ['nova', 'neutronv2', 'cinder', 'glancev2', 'keystone', 'doctor']
879 overcloudrc = parsers.parse_overcloudrc(overcloud_file)
880 logging.info("Creating congress commands")
883 "username={}".format(overcloudrc['OS_USERNAME']),
884 "tenant_name={}".format(overcloudrc['OS_PROJECT_NAME']),
885 "password={}".format(overcloudrc['OS_PASSWORD']),
886 "auth_url={}".format(overcloudrc['OS_AUTH_URL'])
889 logging.error("Unable to find all keys required for congress in "
890 "overcloudrc: OS_USERNAME, OS_PROJECT_NAME, "
891 "OS_PASSWORD, OS_AUTH_URL. Please check overcloudrc "
892 "file: {}".format(overcloud_file))
895 ds_cfg = '--config ' + ' --config '.join(ds_cfg)
897 for driver in drivers:
898 if driver == 'doctor':
899 cmd = "{} \"{}\"".format(driver, driver)
901 cmd = "{} \"{}\" {}".format(driver, driver, ds_cfg)
903 cmd += ' --config api_version="2.34"'
904 logging.debug("Congress command created: {}".format(cmd))