1 ##############################################################################
2 # Copyright (c) 2017 Tim Rozet (trozet@redhat.com) and others.
4 # All rights reserved. This program and the accompanying materials
5 # are made available under the terms of the Apache License, Version 2.0
6 # which accompanies this distribution, and is available at
7 # http://www.apache.org/licenses/LICENSE-2.0
8 ##############################################################################
21 import apex.builders.overcloud_builder as oc_builder
22 import apex.builders.common_builder as c_builder
24 from apex.common import constants as con
25 from apex.common.exceptions import ApexDeployException
26 from apex.common import parsers
27 from apex.common import utils
28 from apex.virtual import utils as virt_utils
29 from cryptography.hazmat.primitives import serialization as \
31 from cryptography.hazmat.primitives.asymmetric import rsa
32 from cryptography.hazmat.backends import default_backend as \
33 crypto_default_backend
38 'sfc': 'neutron-sfc-opendaylight.yaml',
39 'vpn': 'neutron-bgpvpn-opendaylight.yaml',
40 'gluon': 'gluon.yaml',
42 'odl_vpp_netvirt': 'neutron-opendaylight-netvirt-vpp.yaml',
43 'dvr': 'neutron-opendaylight-fdio-dvr.yaml',
44 'default': 'neutron-opendaylight-honeycomb.yaml'
46 'l2gw': 'neutron-l2gw-opendaylight.yaml',
47 'sriov': 'neutron-opendaylight-sriov.yaml',
48 'default': 'neutron-opendaylight.yaml',
51 'sfc': 'neutron-onos-sfc.yaml',
52 'default': 'neutron-onos.yaml'
54 'ovn': 'neutron-ml2-ovn.yaml',
56 'vpp': 'neutron-ml2-vpp.yaml',
57 'dataplane': ('ovs_dpdk', 'neutron-ovs-dpdk.yaml')
62 'tacker': 'enable_tacker.yaml',
63 'congress': 'enable_congress.yaml',
64 'barometer': 'enable_barometer.yaml',
65 'rt_kvm': 'enable_rt_kvm.yaml'
69 'HostCpusList': 'dpdk_cores',
70 'NeutronDpdkCoreList': 'pmd_cores',
71 'NeutronDpdkSocketMemory': 'socket_memory',
72 'NeutronDpdkMemoryChannels': 'memory_channels'
75 OVS_NSH_KMOD_RPM = "openvswitch-kmod-2.6.1-1.el7.centos.x86_64.rpm"
76 OVS_NSH_RPM = "openvswitch-2.6.1-1.el7.centos.x86_64.rpm"
77 ODL_NETVIRT_VPP_RPM = "/root/opendaylight-7.0.0-0.1.20170531snap665.el7" \
80 LOOP_DEVICE_SIZE = "10G"
82 LOSETUP_SERVICE = """[Unit]
83 Description=Setup loop devices
88 ExecStart=/sbin/losetup /dev/loop3 /srv/data.img
89 ExecStop=/sbin/losetup -d /dev/loop3
94 WantedBy=multi-user.target
97 DUPLICATE_COMPUTE_SERVICES = [
98 'OS::TripleO::Services::ComputeNeutronCorePlugin',
99 'OS::TripleO::Services::ComputeNeutronMetadataAgent',
100 'OS::TripleO::Services::ComputeNeutronOvsAgent',
101 'OS::TripleO::Services::ComputeNeutronL3Agent'
105 def build_sdn_env_list(ds, sdn_map, env_list=None):
107 Builds a list of SDN environment files to be used in the deploy cmd.
109 This function recursively searches an sdn_map. First the sdn controller is
110 matched and then the function looks for enabled features for that
111 controller to determine which environment files should be used. By
112 default the feature will be checked if set to true in deploy settings to be
113 added to the list. If a feature does not have a boolean value, then the
114 key and value pair to compare with are checked as a tuple (k,v).
116 :param ds: deploy settings
117 :param sdn_map: SDN map to recursively search
118 :param env_list: recursive var to hold previously found env_list
119 :return: A list of env files
123 for k, v in sdn_map.items():
124 if ds['sdn_controller'] == k or (k in ds and ds[k]):
125 if isinstance(v, dict):
126 # Append default SDN env file first
127 # The assumption is that feature-enabled SDN env files
128 # override and do not conflict with previously set default
130 if ds['sdn_controller'] == k and 'default' in v:
131 env_list.append(os.path.join(con.THT_ENV_DIR,
133 env_list.extend(build_sdn_env_list(ds, v))
134 # check if the value is not a boolean
135 elif isinstance(v, tuple):
137 env_list.append(os.path.join(con.THT_ENV_DIR, v[1]))
139 env_list.append(os.path.join(con.THT_ENV_DIR, v))
140 if len(env_list) == 0:
142 env_list.append(os.path.join(
143 con.THT_ENV_DIR, sdn_map['default']))
145 logging.warning("Unable to find default file for SDN")
150 def get_docker_sdn_files(ds_opts):
152 Returns docker env file for detected SDN
153 :param ds_opts: deploy options
154 :return: list of docker THT env files for an SDN
156 docker_services = con.VALID_DOCKER_SERVICES
157 tht_dir = con.THT_DOCKER_ENV_DIR[ds_opts['os_version']]
158 sdn_env_list = build_sdn_env_list(ds_opts, SDN_FILE_MAP)
159 for i, sdn_file in enumerate(sdn_env_list):
160 sdn_base = os.path.basename(sdn_file)
161 if sdn_base in docker_services:
162 if docker_services[sdn_base] is not None:
164 os.path.join(tht_dir, docker_services[sdn_base])
166 sdn_env_list[i] = os.path.join(tht_dir, sdn_base)
170 def create_deploy_cmd(ds, ns, inv, tmp_dir,
171 virtual, env_file='opnfv-environment.yaml',
174 logging.info("Creating deployment command")
175 deploy_options = ['network-environment.yaml']
177 ds_opts = ds['deploy_options']
179 if ds_opts['containers']:
180 deploy_options.append(os.path.join(con.THT_ENV_DIR,
183 if ds['global_params']['ha_enabled']:
184 if ds_opts['containers']:
185 deploy_options.append(os.path.join(con.THT_ENV_DIR,
188 deploy_options.append(os.path.join(con.THT_ENV_DIR,
189 'puppet-pacemaker.yaml'))
192 deploy_options.append(env_file)
194 if ds_opts['containers']:
195 deploy_options.append('docker-images.yaml')
196 sdn_docker_files = get_docker_sdn_files(ds_opts)
197 for sdn_docker_file in sdn_docker_files:
198 deploy_options.append(sdn_docker_file)
200 deploy_options.append('sdn-images.yaml')
202 deploy_options += build_sdn_env_list(ds_opts, SDN_FILE_MAP)
204 for k, v in OTHER_FILE_MAP.items():
205 if k in ds_opts and ds_opts[k]:
206 if ds_opts['containers']:
207 deploy_options.append(os.path.join(con.THT_DOCKER_ENV_DIR,
208 "{}.yaml".format(k)))
210 deploy_options.append(os.path.join(con.THT_ENV_DIR, v))
212 if ds_opts['ceph'] and 'csit' not in env_file:
213 prep_storage_env(ds, ns, virtual, tmp_dir)
214 deploy_options.append(os.path.join(con.THT_ENV_DIR,
215 'storage-environment.yaml'))
217 prep_sriov_env(ds, tmp_dir)
219 # Check for 'k8s' here intentionally, as we may support other values
220 # such as openstack/openshift for 'vim' option.
221 if ds_opts['vim'] == 'k8s':
222 deploy_options.append('kubernetes-environment.yaml')
225 deploy_options.append('virtual-environment.yaml')
227 deploy_options.append('baremetal-environment.yaml')
229 num_control, num_compute = inv.get_node_counts()
230 if num_control > 1 and not ds['global_params']['ha_enabled']:
232 if platform.machine() == 'aarch64':
233 # aarch64 deploys were not completing in the default 90 mins.
234 # Not sure if this is related to the hardware the OOO support
235 # was developed on or the virtualization support in CentOS
236 # Either way it will probably get better over time as the aarch
237 # support matures in CentOS and deploy time should be tested in
238 # the future so this multiplier can be removed.
239 con.DEPLOY_TIMEOUT *= 2
240 cmd = "openstack overcloud deploy --templates --timeout {} " \
241 .format(con.DEPLOY_TIMEOUT)
243 for option in deploy_options:
244 cmd += " -e {}".format(option)
245 cmd += " --ntp-server {}".format(ns['ntp'][0])
246 cmd += " --control-scale {}".format(num_control)
247 cmd += " --compute-scale {}".format(num_compute)
248 cmd += ' --control-flavor control --compute-flavor compute'
250 cmd += ' --networks-file network_data.yaml'
253 with open('/sys/module/kvm_intel/parameters/nested') as f:
254 nested_kvm = f.read().strip()
255 if nested_kvm != 'Y':
256 libvirt_type = 'qemu'
257 cmd += ' --libvirt-type {}'.format(libvirt_type)
258 logging.info("Deploy command set: {}".format(cmd))
260 with open(os.path.join(tmp_dir, 'deploy_command'), 'w') as fh:
265 def prep_image(ds, ns, img, tmp_dir, root_pw=None, docker_tag=None,
268 Locates sdn image and preps for deployment.
269 :param ds: deploy settings
270 :param ns: network settings
271 :param img: sdn image
272 :param tmp_dir: dir to store modified sdn image
273 :param root_pw: password to configure for overcloud image
274 :param docker_tag: Docker image tag for RDO version (default None)
275 :param patches: List of patches to apply to overcloud image
278 # TODO(trozet): Come up with a better way to organize this logic in this
280 logging.info("Preparing image: {} for deployment".format(img))
281 if not os.path.isfile(img):
282 logging.error("Missing SDN image {}".format(img))
283 raise ApexDeployException("Missing SDN image file: {}".format(img))
285 ds_opts = ds['deploy_options']
287 sdn = ds_opts['sdn_controller']
288 patched_containers = set()
289 # we need this due to rhbz #1436021
290 # fixed in systemd-219-37.el7
292 logging.info("Neutron openvswitch-agent disabled")
295 "rm -f /etc/systemd/system/multi-user.target.wants/"
296 "neutron-openvswitch-agent.service"},
299 "rm -f /usr/lib/systemd/system/neutron-openvswitch-agent"
303 if ns.get('http_proxy', ''):
306 "echo 'http_proxy={}' >> /etc/environment".format(
309 if ns.get('https_proxy', ''):
312 "echo 'https_proxy={}' >> /etc/environment".format(
315 tmp_oc_image = os.path.join(tmp_dir, 'overcloud-full.qcow2')
316 shutil.copyfile(img, tmp_oc_image)
317 logging.debug("Temporary overcloud image stored as: {}".format(
321 oc_builder.inject_quagga(tmp_oc_image, tmp_dir)
322 virt_cmds.append({con.VIRT_RUN_CMD: "chmod +x /etc/rc.d/rc.local"})
325 "echo 'sudo /opt/quagga/etc/init.d/zrpcd start' > "
326 "/opt/quagga/etc/init.d/zrpcd_start.sh"})
328 con.VIRT_RUN_CMD: "chmod +x /opt/quagga/etc/init.d/"
331 con.VIRT_RUN_CMD: "sed -i '$a /opt/quagga/etc/"
332 "init.d/zrpcd_start.sh' /etc/rc.local "})
334 con.VIRT_RUN_CMD: "sed -i '$a /opt/quagga/etc/"
335 "init.d/zrpcd_start.sh' /etc/rc.d/rc.local"})
336 logging.info("ZRPCD process started")
338 dataplane = ds_opts['dataplane']
339 if dataplane == 'ovs_dpdk' or dataplane == 'fdio':
340 logging.info("Enabling kernel modules for dpdk")
341 # file to module mapping
343 os.path.join(tmp_dir, 'vfio_pci.modules'): 'vfio_pci',
344 os.path.join(tmp_dir, 'uio_pci_generic.modules'): 'uio_pci_generic'
346 for mod_file, mod in uio_types.items():
347 with open(mod_file, 'w') as fh:
348 fh.write('#!/bin/bash\n')
349 fh.write('exec /sbin/modprobe {}'.format(mod))
353 {con.VIRT_UPLOAD: "{}:/etc/sysconfig/modules/".format(
355 {con.VIRT_RUN_CMD: "chmod 0755 /etc/sysconfig/modules/"
356 "{}".format(os.path.basename(mod_file))}
359 pw_op = "password:{}".format(root_pw)
360 virt_cmds.append({con.VIRT_PW: pw_op})
362 if dataplane == 'ovs':
365 {con.VIRT_RUN_CMD: "yum -y install "
366 "/root/ovs/rpm/rpmbuild/RPMS/x86_64/"
367 "{}".format(OVS_NSH_KMOD_RPM)},
368 {con.VIRT_RUN_CMD: "yum downgrade -y "
369 "/root/ovs/rpm/rpmbuild/RPMS/x86_64/"
370 "{}".format(OVS_NSH_RPM)}
372 elif sdn == 'opendaylight':
373 # FIXME(trozet) remove this after RDO is updated with fix for
374 # https://bugzilla.redhat.com/show_bug.cgi?id=1544892
375 ovs_file = os.path.basename(con.CUSTOM_OVS)
376 ovs_url = con.CUSTOM_OVS.replace(ovs_file, '')
377 utils.fetch_upstream_and_unpack(dest=tmp_dir, url=ovs_url,
380 {con.VIRT_UPLOAD: "{}:/root/".format(os.path.join(tmp_dir,
382 {con.VIRT_RUN_CMD: "yum downgrade -y /root/{}".format(
385 if dataplane == 'fdio':
386 # Patch neutron with using OVS external interface for router
387 # and add generic linux NS interface driver
389 {con.VIRT_RUN_CMD: "cd /usr/lib/python2.7/site-packages && patch "
390 "-p1 < neutron-patch-NSDriver.patch"})
393 {con.VIRT_RUN_CMD: "yum remove -y vpp-lib"},
394 {con.VIRT_RUN_CMD: "yum install -y "
395 "/root/nosdn_vpp_rpms/*.rpm"}
398 undercloud_admin_ip = ns['networks'][con.ADMIN_NETWORK][
399 'installer_vm']['ip']
400 if sdn == 'opendaylight':
401 oc_builder.inject_opendaylight(
402 odl_version=ds_opts['odl_version'],
405 uc_ip=undercloud_admin_ip,
406 os_version=ds_opts['os_version'],
407 docker_tag=docker_tag,
410 patched_containers = patched_containers.union({'opendaylight'})
413 if ds_opts['os_version'] == 'master':
414 branch = ds_opts['os_version']
416 branch = "stable/{}".format(ds_opts['os_version'])
417 logging.info('Adding patches to overcloud')
418 patched_containers = patched_containers.union(
419 c_builder.add_upstream_patches(patches,
420 tmp_oc_image, tmp_dir,
422 uc_ip=undercloud_admin_ip,
423 docker_tag=docker_tag))
424 # if containers with ceph, and no ceph device we need to use a
425 # persistent loop device for Ceph OSDs
426 if docker_tag and ds_opts['ceph_device'] == '/dev/loop3':
427 tmp_losetup = os.path.join(tmp_dir, 'losetup.service')
428 with open(tmp_losetup, 'w') as fh:
429 fh.write(LOSETUP_SERVICE)
431 {con.VIRT_UPLOAD: "{}:/usr/lib/systemd/system/".format(tmp_losetup)
433 {con.VIRT_RUN_CMD: 'truncate /srv/data.img --size {}'
434 .format(LOOP_DEVICE_SIZE)},
435 {con.VIRT_RUN_CMD: 'systemctl daemon-reload'},
436 {con.VIRT_RUN_CMD: 'systemctl enable losetup.service'},
438 # TODO(trozet) remove this after LP#173474 is fixed
439 dhcp_unit = '/usr/lib/systemd/system/dhcp-interface@.service'
441 {con.VIRT_RUN_CMD: "crudini --del {} Unit "
442 "ConditionPathExists".format(dhcp_unit)})
443 virt_utils.virt_customize(virt_cmds, tmp_oc_image)
444 logging.info("Overcloud image customization complete")
445 return patched_containers
450 Creates public and private ssh keys with 1024 bit RSA encryption
451 :return: private, public key
453 key = rsa.generate_private_key(
454 backend=crypto_default_backend(),
455 public_exponent=65537,
459 private_key = key.private_bytes(
460 crypto_serialization.Encoding.PEM,
461 crypto_serialization.PrivateFormat.PKCS8,
462 crypto_serialization.NoEncryption())
463 public_key = key.public_key().public_bytes(
464 crypto_serialization.Encoding.OpenSSH,
465 crypto_serialization.PublicFormat.OpenSSH
467 return private_key.decode('utf-8'), public_key.decode('utf-8')
470 def prep_env(ds, ns, inv, opnfv_env, net_env, tmp_dir):
472 Creates modified opnfv/network environments for deployment
473 :param ds: deploy settings
474 :param ns: network settings
475 :param inv: node inventory
476 :param opnfv_env: file path for opnfv-environment file
477 :param net_env: file path for network-environment file
478 :param tmp_dir: Apex tmp dir
482 logging.info("Preparing opnfv-environment and network-environment files")
483 ds_opts = ds['deploy_options']
484 tmp_opnfv_env = os.path.join(tmp_dir, os.path.basename(opnfv_env))
485 shutil.copyfile(opnfv_env, tmp_opnfv_env)
486 tenant_nic_map = ns['networks']['tenant']['nic_mapping']
488 tenant_nic['Controller'] = tenant_nic_map['controller']['members'][0]
489 tenant_nic['NovaCompute'] = tenant_nic_map['compute']['members'][0]
490 external_nic_map = ns['networks']['external'][0]['nic_mapping']
491 external_nic = dict()
492 external_nic['NovaCompute'] = external_nic_map['compute']['members'][0]
495 private_key, public_key = make_ssh_key()
497 num_control, num_compute = inv.get_node_counts()
498 if num_control > 1 and not ds['global_params']['ha_enabled']:
501 # Make easier/faster variables to index in the file editor
502 if 'performance' in ds_opts:
505 if 'vpp' in ds_opts['performance']['Compute']:
506 perf_vpp_comp = ds_opts['performance']['Compute']['vpp']
509 if 'vpp' in ds_opts['performance']['Controller']:
510 perf_vpp_ctrl = ds_opts['performance']['Controller']['vpp']
515 if 'ovs' in ds_opts['performance']['Compute']:
516 perf_ovs_comp = ds_opts['performance']['Compute']['ovs']
521 if 'kernel' in ds_opts['performance']['Compute']:
522 perf_kern_comp = ds_opts['performance']['Compute']['kernel']
524 perf_kern_comp = None
528 tenant_settings = ns['networks']['tenant']
529 tenant_vlan_enabled = 'tenant' in ns.enabled_network_list and \
530 ns['networks']['tenant'].get('segmentation_type') == 'vlan'
532 # Modify OPNFV environment
533 # TODO: Change to build a dict and outputting yaml rather than parsing
534 for line in fileinput.input(tmp_opnfv_env, inplace=True):
535 line = line.strip('\n')
537 if 'CloudDomain' in line:
538 output_line = " CloudDomain: {}".format(ns['domain_name'])
539 elif 'replace_private_key' in line:
540 output_line = " private_key: |\n"
542 for line in private_key.splitlines():
543 key_out += " {}\n".format(line)
544 output_line += key_out
545 elif 'replace_public_key' in line:
546 output_line = " public_key: '{}'".format(public_key)
547 elif ((perf and perf_kern_comp) or ds_opts.get('rt_kvm')) and \
548 'resource_registry' in line:
549 output_line = "resource_registry:\n" \
550 " OS::TripleO::NodeUserData: first-boot.yaml"
551 elif 'ComputeExtraConfigPre' in line and \
552 ds_opts['dataplane'] == 'ovs_dpdk':
553 output_line = ' OS::TripleO::ComputeExtraConfigPre: ' \
554 './ovs-dpdk-preconfig.yaml'
555 elif 'NeutronNetworkVLANRanges' in line:
557 if tenant_vlan_enabled:
558 if ns['networks']['tenant']['overlay_id_range']:
559 vlan_setting = ns['networks']['tenant']['overlay_id_range']
560 if 'datacentre' not in vlan_setting:
561 vlan_setting += ',datacentre:1:1000'
562 # SRIOV networks are VLAN based provider networks. In order to
563 # simplify the deployment, nfv_sriov will be the default physnet.
564 # VLANs are not needed in advance, and the user will have to create
565 # the network specifying the segmentation-id.
568 vlan_setting += ",nfv_sriov"
570 vlan_setting = "datacentre:1:1000,nfv_sriov"
572 output_line = " NeutronNetworkVLANRanges: " + vlan_setting
573 elif 'NeutronBridgeMappings' in line and tenant_vlan_enabled:
574 if tenant_settings['overlay_id_range']:
575 physnets = tenant_settings['overlay_id_range'].split(',')
576 output_line = " NeutronBridgeMappings: "
577 for physnet in physnets:
578 physnet_name = physnet.split(':')[0]
579 if physnet_name != 'datacentre':
580 output_line += "{}:br-vlan,".format(physnet_name)
581 output_line += "datacentre:br-ex"
582 elif 'OpenDaylightProviderMappings' in line and tenant_vlan_enabled \
583 and ds_opts['sdn_controller'] == 'opendaylight':
584 if tenant_settings['overlay_id_range']:
585 physnets = tenant_settings['overlay_id_range'].split(',')
586 output_line = " OpenDaylightProviderMappings: "
587 for physnet in physnets:
588 physnet_name = physnet.split(':')[0]
589 if physnet_name != 'datacentre':
590 output_line += "{}:br-vlan,".format(physnet_name)
591 output_line += "datacentre:br-ex"
592 elif 'NeutronNetworkType' in line and tenant_vlan_enabled:
593 output_line = " NeutronNetworkType: vlan\n" \
594 " NeutronTunnelTypes: ''"
596 if ds_opts['sdn_controller'] == 'opendaylight' and \
597 'odl_vpp_routing_node' in ds_opts:
598 if 'opendaylight::vpp_routing_node' in line:
599 output_line = (" opendaylight::vpp_routing_node: {}.{}"
600 .format(ds_opts['odl_vpp_routing_node'],
602 elif not ds_opts['sdn_controller'] and ds_opts['dataplane'] == 'fdio':
603 if 'NeutronVPPAgentPhysnets' in line:
604 # VPP interface tap0 will be used for external network
606 output_line = (" NeutronVPPAgentPhysnets: "
607 "'datacentre:{},external:tap0'"
608 .format(tenant_nic['Controller']))
609 elif ds_opts['sdn_controller'] == 'opendaylight' and ds_opts.get(
611 if 'OS::TripleO::Services::NeutronDhcpAgent' in line:
613 elif 'NeutronDhcpAgentsPerNetwork' in line:
615 num_dhcp_agents = num_control
617 num_dhcp_agents = num_compute
618 output_line = (" NeutronDhcpAgentsPerNetwork: {}"
619 .format(num_dhcp_agents))
620 elif 'ComputeServices' in line:
621 output_line = (" ComputeServices:\n"
622 " - OS::TripleO::Services::NeutronDhcpAgent")
625 for role in 'NovaCompute', 'Controller':
626 if role == 'NovaCompute':
627 perf_opts = perf_vpp_comp
629 perf_opts = perf_vpp_ctrl
630 cfg = "{}ExtraConfig".format(role)
631 if cfg in line and perf_opts:
633 if 'main-core' in perf_opts:
634 perf_line += ("\n fdio::vpp_cpu_main_core: '{}'"
635 .format(perf_opts['main-core']))
636 if 'corelist-workers' in perf_opts:
638 "fdio::vpp_cpu_corelist_workers: '{}'"
639 .format(perf_opts['corelist-workers']))
640 if ds_opts['sdn_controller'] == 'opendaylight' and \
641 ds_opts['dataplane'] == 'fdio':
642 if role == 'NovaCompute':
644 "tripleo::profile::base::neutron::"
645 "agents::honeycomb::"
646 "interface_role_mapping:"
647 " ['{}:tenant-interface',"
648 "'{}:public-interface']"
649 .format(tenant_nic[role],
653 "tripleo::profile::base::neutron::"
654 "agents::honeycomb::"
655 "interface_role_mapping:"
656 " ['{}:tenant-interface']"
657 .format(tenant_nic[role]))
659 output_line = (" {}:{}".format(cfg, perf_line))
661 if ds_opts['dataplane'] == 'ovs_dpdk' and perf_ovs_comp:
662 for k, v in OVS_PERF_MAP.items():
663 if k in line and v in perf_ovs_comp:
664 output_line = " {}: '{}'".format(k, perf_ovs_comp[v])
667 # (FIXME) use compute's kernel settings for all nodes for now.
669 if 'NovaSchedulerDefaultFilters' in line:
671 " NovaSchedulerDefaultFilters: 'RamFilter," \
672 "ComputeFilter,AvailabilityZoneFilter," \
673 "ComputeCapabilitiesFilter," \
674 "ImagePropertiesFilter,NUMATopologyFilter'"
675 elif 'ComputeKernelArgs' in line:
677 for k, v in perf_kern_comp.items():
678 kernel_args += "{}={} ".format(k, v)
680 output_line = " ComputeKernelArgs: '{}'".\
685 # Merge compute services into control services if only a single
688 logging.info("All in one deployment. Checking if service merging "
689 "required into control services")
690 with open(tmp_opnfv_env, 'r') as fh:
691 data = yaml.safe_load(fh)
692 param_data = data['parameter_defaults']
693 # Check to see if any parameters are set for Compute
694 for param in param_data.keys():
695 if param != 'ComputeServices' and param.startswith('Compute'):
696 logging.warning("Compute parameter set, but will not be used "
697 "in deployment: {}. Please use Controller "
698 "based parameters when using All-in-one "
699 "deployments".format(param))
700 if ('ControllerServices' in param_data and 'ComputeServices' in
702 logging.info("Services detected in environment file. Merging...")
703 ctrl_services = param_data['ControllerServices']
704 cmp_services = param_data['ComputeServices']
705 param_data['ControllerServices'] = list(set().union(
706 ctrl_services, cmp_services))
707 for dup_service in DUPLICATE_COMPUTE_SERVICES:
708 if dup_service in param_data['ControllerServices']:
709 param_data['ControllerServices'].remove(dup_service)
710 param_data.pop('ComputeServices')
711 logging.debug("Merged controller services: {}".format(
712 pprint.pformat(param_data['ControllerServices'])
714 with open(tmp_opnfv_env, 'w') as fh:
715 yaml.safe_dump(data, fh, default_flow_style=False)
717 logging.info("No services detected in env file, not merging "
720 logging.info("opnfv-environment file written to {}".format(tmp_opnfv_env))
723 def generate_ceph_key():
725 header = struct.pack('<hiih', 1, int(time.time()), 0, len(key))
726 return base64.b64encode(header + key)
729 def prep_storage_env(ds, ns, virtual, tmp_dir):
731 Creates storage environment file for deployment. Source file is copied by
732 undercloud playbook to host.
739 ds_opts = ds['deploy_options']
740 storage_file = os.path.join(tmp_dir, 'storage-environment.yaml')
741 if not os.path.isfile(storage_file):
742 logging.error("storage-environment file is not in tmp directory: {}. "
743 "Check if file was copied from "
744 "undercloud".format(tmp_dir))
745 raise ApexDeployException("storage-environment file not copied from "
747 for line in fileinput.input(storage_file, inplace=True):
748 line = line.strip('\n')
749 if 'CephClusterFSID' in line:
750 print(" CephClusterFSID: {}".format(str(uuid.uuid4())))
751 elif 'CephMonKey' in line:
752 print(" CephMonKey: {}".format(generate_ceph_key().decode(
754 elif 'CephAdminKey' in line:
755 print(" CephAdminKey: {}".format(generate_ceph_key().decode(
757 elif 'CephClientKey' in line:
758 print(" CephClientKey: {}".format(generate_ceph_key().decode(
763 if ds_opts['containers']:
766 # max pgs allowed are calculated as num_mons * 200. Therefore we
767 # set number of pgs and pools so that the total will be less:
768 # num_pgs * num_pools * num_osds
769 ceph_params['CephPoolDefaultSize'] = 2
770 ceph_params['CephPoolDefaultPgNum'] = 32
772 ceph_params['CephAnsibleExtraConfig'] = {
773 'centos_package_dependencies': [],
774 'ceph_osd_docker_memory_limit': '1g',
775 'ceph_mds_docker_memory_limit': '1g',
777 ceph_device = ds_opts['ceph_device']
778 ceph_params['CephAnsibleDisksConfig'] = {
779 'devices': [ceph_device],
781 'osd_scenario': 'collocated'
783 utils.edit_tht_env(storage_file, 'parameter_defaults', ceph_params)
784 # TODO(trozet): remove following block as we only support containers now
785 elif 'ceph_device' in ds_opts and ds_opts['ceph_device']:
786 with open(storage_file, 'a') as fh:
787 fh.write(' ExtraConfig:\n')
788 fh.write(" ceph::profile::params::osds:{{{}:{{}}}}\n".format(
789 ds_opts['ceph_device']
793 def prep_sriov_env(ds, tmp_dir):
795 Creates SRIOV environment file for deployment. Source file is copied by
796 undercloud playbook to host.
801 ds_opts = ds['deploy_options']
802 sriov_iface = ds_opts['sriov']
803 sriov_file = os.path.join(tmp_dir, 'neutron-opendaylight-sriov.yaml')
804 if not os.path.isfile(sriov_file):
805 logging.error("sriov-environment file is not in tmp directory: {}. "
806 "Check if file was copied from "
807 "undercloud".format(tmp_dir))
808 raise ApexDeployException("sriov-environment file not copied from "
810 # TODO(rnoriega): Instead of line editing, refactor this code to load
811 # yaml file into a dict, edit it and write the file back.
812 for line in fileinput.input(sriov_file, inplace=True):
813 line = line.strip('\n')
814 if 'NovaSchedulerDefaultFilters' in line:
815 print(" {}".format(line[3:]))
816 elif 'NovaSchedulerAvailableFilters' in line:
817 print(" {}".format(line[3:]))
818 elif 'NeutronPhysicalDevMappings' in line:
819 print(" NeutronPhysicalDevMappings: \"nfv_sriov:{}\""
820 .format(sriov_iface))
821 elif 'NeutronSriovNumVFs' in line:
822 print(" NeutronSriovNumVFs: \"{}:8\"".format(sriov_iface))
823 elif 'NovaPCIPassthrough' in line:
824 print(" NovaPCIPassthrough:")
825 elif 'devname' in line:
826 print(" - devname: \"{}\"".format(sriov_iface))
827 elif 'physical_network' in line:
828 print(" physical_network: \"nfv_sriov\"")
833 def external_network_cmds(ns, ds):
835 Generates external network openstack commands
836 :param ns: network settings
837 :param ds: deploy settings
838 :return: list of commands to configure external network
840 ds_opts = ds['deploy_options']
841 external_physnet = 'datacentre'
842 if ds_opts['dataplane'] == 'fdio' and \
843 ds_opts['sdn_controller'] != 'opendaylight':
844 external_physnet = 'external'
845 if 'external' in ns.enabled_network_list:
846 net_config = ns['networks']['external'][0]
848 pool_start, pool_end = net_config['floating_ip_range']
850 net_config = ns['networks']['admin']
852 pool_start, pool_end = ns['apex']['networks']['admin'][
853 'introspection_range']
854 nic_config = net_config['nic_mapping']
855 gateway = net_config['gateway']
857 # create network command
858 if nic_config['compute']['vlan'] == 'native':
861 ext_type = "vlan --provider-segment {}".format(nic_config[
863 cmds.append("openstack network create external --project service "
864 "--external --provider-network-type {} "
865 "--provider-physical-network {}"
866 .format(ext_type, external_physnet))
867 # create subnet command
868 cidr = net_config['cidr']
869 subnet_cmd = "openstack subnet create external-subnet --project " \
870 "service --network external --no-dhcp --gateway {} " \
871 "--allocation-pool start={},end={} --subnet-range " \
872 "{}".format(gateway, pool_start, pool_end, str(cidr))
873 if external and cidr.version == 6:
874 subnet_cmd += ' --ip-version 6 --ipv6-ra-mode slaac ' \
875 '--ipv6-address-mode slaac'
876 cmds.append(subnet_cmd)
877 logging.debug("Neutron external network commands determined "
878 "as: {}".format(cmds))
882 def create_congress_cmds(overcloud_file):
883 drivers = ['nova', 'neutronv2', 'cinder', 'glancev2', 'keystone', 'doctor']
884 overcloudrc = parsers.parse_overcloudrc(overcloud_file)
885 logging.info("Creating congress commands")
888 "username={}".format(overcloudrc['OS_USERNAME']),
889 "tenant_name={}".format(overcloudrc['OS_PROJECT_NAME']),
890 "password={}".format(overcloudrc['OS_PASSWORD']),
891 "auth_url={}".format(overcloudrc['OS_AUTH_URL'])
894 logging.error("Unable to find all keys required for congress in "
895 "overcloudrc: OS_USERNAME, OS_PROJECT_NAME, "
896 "OS_PASSWORD, OS_AUTH_URL. Please check overcloudrc "
897 "file: {}".format(overcloud_file))
900 ds_cfg = '--config ' + ' --config '.join(ds_cfg)
902 for driver in drivers:
903 if driver == 'doctor':
904 cmd = "{} \"{}\"".format(driver, driver)
906 cmd = "{} \"{}\" {}".format(driver, driver, ds_cfg)
908 cmd += ' --config api_version="2.34"'
909 logging.debug("Congress command created: {}".format(cmd))