1 ##############################################################################
2 # Copyright (c) 2017 Tim Rozet (trozet@redhat.com) and others.
4 # All rights reserved. This program and the accompanying materials
5 # are made available under the terms of the Apache License, Version 2.0
6 # which accompanies this distribution, and is available at
7 # http://www.apache.org/licenses/LICENSE-2.0
8 ##############################################################################
21 import apex.builders.overcloud_builder as oc_builder
22 import apex.builders.common_builder as c_builder
24 from apex.common import constants as con
25 from apex.common.exceptions import ApexDeployException
26 from apex.common import parsers
27 from apex.common import utils
28 from apex.virtual import utils as virt_utils
29 from cryptography.hazmat.primitives import serialization as \
31 from cryptography.hazmat.primitives.asymmetric import rsa
32 from cryptography.hazmat.backends import default_backend as \
33 crypto_default_backend
38 'sfc': 'neutron-sfc-opendaylight.yaml',
39 'vpn': 'neutron-bgpvpn-opendaylight.yaml',
40 'gluon': 'gluon.yaml',
42 'odl_vpp_netvirt': 'neutron-opendaylight-netvirt-vpp.yaml',
43 'dvr': 'neutron-opendaylight-fdio-dvr.yaml',
44 'default': 'neutron-opendaylight-honeycomb.yaml'
46 'l2gw': 'neutron-l2gw-opendaylight.yaml',
47 'sriov': 'neutron-opendaylight-sriov.yaml',
48 'default': 'neutron-opendaylight.yaml',
51 'sfc': 'neutron-onos-sfc.yaml',
52 'default': 'neutron-onos.yaml'
54 'ovn': 'neutron-ml2-ovn.yaml',
56 'vpp': 'neutron-ml2-vpp.yaml',
57 'dataplane': ('ovs_dpdk', 'neutron-ovs-dpdk.yaml')
62 'tacker': 'enable_tacker.yaml',
63 'congress': 'enable_congress.yaml',
64 'barometer': 'enable_barometer.yaml',
65 'rt_kvm': 'enable_rt_kvm.yaml'
69 'HostCpusList': 'dpdk_cores',
70 'NeutronDpdkCoreList': 'pmd_cores',
71 'NeutronDpdkSocketMemory': 'socket_memory',
72 'NeutronDpdkMemoryChannels': 'memory_channels'
75 ODL_NETVIRT_VPP_RPM = "/root/opendaylight-7.0.0-0.1.20170531snap665.el7" \
78 LOOP_DEVICE_SIZE = "10G"
80 LOSETUP_SERVICE = """[Unit]
81 Description=Setup loop devices
86 ExecStart=/sbin/losetup /dev/loop3 /srv/data.img
87 ExecStop=/sbin/losetup -d /dev/loop3
92 WantedBy=multi-user.target
95 DUPLICATE_COMPUTE_SERVICES = [
96 'OS::TripleO::Services::ComputeNeutronCorePlugin',
97 'OS::TripleO::Services::ComputeNeutronMetadataAgent',
98 'OS::TripleO::Services::ComputeNeutronOvsAgent',
99 'OS::TripleO::Services::ComputeNeutronL3Agent'
103 def build_sdn_env_list(ds, sdn_map, env_list=None):
105 Builds a list of SDN environment files to be used in the deploy cmd.
107 This function recursively searches an sdn_map. First the sdn controller is
108 matched and then the function looks for enabled features for that
109 controller to determine which environment files should be used. By
110 default the feature will be checked if set to true in deploy settings to be
111 added to the list. If a feature does not have a boolean value, then the
112 key and value pair to compare with are checked as a tuple (k,v).
114 :param ds: deploy settings
115 :param sdn_map: SDN map to recursively search
116 :param env_list: recursive var to hold previously found env_list
117 :return: A list of env files
121 for k, v in sdn_map.items():
122 if ds['sdn_controller'] == k or (k in ds and ds[k]):
123 if isinstance(v, dict):
124 # Append default SDN env file first
125 # The assumption is that feature-enabled SDN env files
126 # override and do not conflict with previously set default
128 if ds['sdn_controller'] == k and 'default' in v:
129 env_list.append(os.path.join(con.THT_ENV_DIR,
131 env_list.extend(build_sdn_env_list(ds, v))
132 # check if the value is not a boolean
133 elif isinstance(v, tuple):
135 env_list.append(os.path.join(con.THT_ENV_DIR, v[1]))
137 env_list.append(os.path.join(con.THT_ENV_DIR, v))
138 if len(env_list) == 0:
140 env_list.append(os.path.join(
141 con.THT_ENV_DIR, sdn_map['default']))
143 logging.warning("Unable to find default file for SDN")
148 def get_docker_sdn_files(ds_opts):
150 Returns docker env file for detected SDN
151 :param ds_opts: deploy options
152 :return: list of docker THT env files for an SDN
154 docker_services = con.VALID_DOCKER_SERVICES
155 tht_dir = con.THT_DOCKER_ENV_DIR
156 sdn_env_list = build_sdn_env_list(ds_opts, SDN_FILE_MAP)
157 for i, sdn_file in enumerate(sdn_env_list):
158 sdn_base = os.path.basename(sdn_file)
159 if sdn_base in docker_services:
160 if docker_services[sdn_base] is not None:
162 os.path.join(tht_dir, docker_services[sdn_base])
164 sdn_env_list[i] = os.path.join(tht_dir, sdn_base)
168 def create_deploy_cmd(ds, ns, inv, tmp_dir,
169 virtual, env_file='opnfv-environment.yaml',
172 logging.info("Creating deployment command")
173 deploy_options = ['network-environment.yaml']
175 ds_opts = ds['deploy_options']
177 if ds_opts['containers']:
178 deploy_options.append(os.path.join(con.THT_ENV_DIR,
181 if ds['global_params']['ha_enabled']:
182 if ds_opts['containers']:
183 deploy_options.append(os.path.join(con.THT_ENV_DIR,
186 deploy_options.append(os.path.join(con.THT_ENV_DIR,
187 'puppet-pacemaker.yaml'))
190 deploy_options.append(env_file)
192 if ds_opts['containers']:
193 deploy_options.append('docker-images.yaml')
194 sdn_docker_files = get_docker_sdn_files(ds_opts)
195 for sdn_docker_file in sdn_docker_files:
196 deploy_options.append(sdn_docker_file)
198 deploy_options += build_sdn_env_list(ds_opts, SDN_FILE_MAP)
200 for k, v in OTHER_FILE_MAP.items():
201 if k in ds_opts and ds_opts[k]:
202 if ds_opts['containers']:
203 deploy_options.append(os.path.join(con.THT_DOCKER_ENV_DIR,
204 "{}.yaml".format(k)))
206 deploy_options.append(os.path.join(con.THT_ENV_DIR, v))
208 # TODO(trozet) Fix this check to look for if ceph is in controller services
209 # and not use name of the file
210 if ds_opts['ceph'] and 'csit' not in env_file:
211 prep_storage_env(ds, ns, virtual, tmp_dir)
212 deploy_options.append(os.path.join(con.THT_ENV_DIR,
213 'storage-environment.yaml'))
215 prep_sriov_env(ds, tmp_dir)
217 # Check for 'k8s' here intentionally, as we may support other values
218 # such as openstack/openshift for 'vim' option.
219 if ds_opts['vim'] == 'k8s':
220 deploy_options.append('kubernetes-environment.yaml')
223 deploy_options.append('virtual-environment.yaml')
225 deploy_options.append('baremetal-environment.yaml')
227 num_control, num_compute = inv.get_node_counts()
228 if num_control > 1 and not ds['global_params']['ha_enabled']:
230 if platform.machine() == 'aarch64':
231 # aarch64 deploys were not completing in the default 90 mins.
232 # Not sure if this is related to the hardware the OOO support
233 # was developed on or the virtualization support in CentOS
234 # Either way it will probably get better over time as the aarch
235 # support matures in CentOS and deploy time should be tested in
236 # the future so this multiplier can be removed.
237 con.DEPLOY_TIMEOUT *= 2
238 cmd = "openstack overcloud deploy --templates --timeout {} " \
239 .format(con.DEPLOY_TIMEOUT)
241 for option in deploy_options:
242 cmd += " -e {}".format(option)
243 cmd += " --ntp-server {}".format(ns['ntp'][0])
244 cmd += " --control-scale {}".format(num_control)
245 cmd += " --compute-scale {}".format(num_compute)
246 cmd += ' --control-flavor control --compute-flavor compute'
248 cmd += ' --networks-file network_data.yaml'
250 if virtual and (platform.machine() != 'aarch64'):
251 with open('/sys/module/kvm_intel/parameters/nested') as f:
252 nested_kvm = f.read().strip()
253 if nested_kvm != 'Y':
254 libvirt_type = 'qemu'
255 elif virtual and (platform.machine() == 'aarch64'):
256 libvirt_type = 'qemu'
257 cmd += ' --libvirt-type {}'.format(libvirt_type)
258 if platform.machine() == 'aarch64':
259 cmd += ' --override-ansible-cfg /home/stack/ansible.cfg '
260 logging.info("Deploy command set: {}".format(cmd))
262 with open(os.path.join(tmp_dir, 'deploy_command'), 'w') as fh:
267 def prep_image(ds, ns, img, tmp_dir, root_pw=None, docker_tag=None,
270 Locates sdn image and preps for deployment.
271 :param ds: deploy settings
272 :param ns: network settings
273 :param img: sdn image
274 :param tmp_dir: dir to store modified sdn image
275 :param root_pw: password to configure for overcloud image
276 :param docker_tag: Docker image tag for RDO version (default None)
277 :param patches: List of patches to apply to overcloud image
280 # TODO(trozet): Come up with a better way to organize this logic in this
282 logging.info("Preparing image: {} for deployment".format(img))
283 if not os.path.isfile(img):
284 logging.error("Missing SDN image {}".format(img))
285 raise ApexDeployException("Missing SDN image file: {}".format(img))
287 ds_opts = ds['deploy_options']
289 sdn = ds_opts['sdn_controller']
290 patched_containers = set()
291 # we need this due to rhbz #1436021
292 # fixed in systemd-219-37.el7
294 logging.info("Neutron openvswitch-agent disabled")
297 "rm -f /etc/systemd/system/multi-user.target.wants/"
298 "neutron-openvswitch-agent.service"},
301 "rm -f /usr/lib/systemd/system/neutron-openvswitch-agent"
305 if ns.get('http_proxy', ''):
308 "echo 'http_proxy={}' >> /etc/environment".format(
311 if ns.get('https_proxy', ''):
314 "echo 'https_proxy={}' >> /etc/environment".format(
317 tmp_oc_image = os.path.join(tmp_dir, 'overcloud-full.qcow2')
318 shutil.copyfile(img, tmp_oc_image)
319 logging.debug("Temporary overcloud image stored as: {}".format(
323 oc_builder.inject_quagga(tmp_oc_image, tmp_dir)
324 virt_cmds.append({con.VIRT_RUN_CMD: "chmod +x /etc/rc.d/rc.local"})
327 "echo 'sudo /opt/quagga/etc/init.d/zrpcd start' > "
328 "/opt/quagga/etc/init.d/zrpcd_start.sh"})
330 con.VIRT_RUN_CMD: "chmod +x /opt/quagga/etc/init.d/"
333 con.VIRT_RUN_CMD: "sed -i '$a /opt/quagga/etc/"
334 "init.d/zrpcd_start.sh' /etc/rc.local "})
336 con.VIRT_RUN_CMD: "sed -i '$a /opt/quagga/etc/"
337 "init.d/zrpcd_start.sh' /etc/rc.d/rc.local"})
338 logging.info("ZRPCD process started")
340 dataplane = ds_opts['dataplane']
341 if dataplane == 'ovs_dpdk' or dataplane == 'fdio':
342 logging.info("Enabling kernel modules for dpdk")
343 # file to module mapping
345 os.path.join(tmp_dir, 'vfio_pci.modules'): 'vfio_pci',
346 os.path.join(tmp_dir, 'uio_pci_generic.modules'): 'uio_pci_generic'
348 for mod_file, mod in uio_types.items():
349 with open(mod_file, 'w') as fh:
350 fh.write('#!/bin/bash\n')
351 fh.write('exec /sbin/modprobe {}'.format(mod))
355 {con.VIRT_UPLOAD: "{}:/etc/sysconfig/modules/".format(
357 {con.VIRT_RUN_CMD: "chmod 0755 /etc/sysconfig/modules/"
358 "{}".format(os.path.basename(mod_file))}
361 pw_op = "password:{}".format(root_pw)
362 virt_cmds.append({con.VIRT_PW: pw_op})
364 if dataplane == 'ovs':
366 oc_builder.inject_ovs_nsh(tmp_oc_image, tmp_dir)
367 elif sdn == 'opendaylight':
368 # FIXME(trozet) remove this after RDO is updated with fix for
369 # https://bugzilla.redhat.com/show_bug.cgi?id=1544892
370 ovs_file = os.path.basename(con.CUSTOM_OVS)
371 ovs_url = con.CUSTOM_OVS.replace(ovs_file, '')
372 utils.fetch_upstream_and_unpack(dest=tmp_dir, url=ovs_url,
375 {con.VIRT_UPLOAD: "{}:/root/".format(os.path.join(tmp_dir,
377 {con.VIRT_RUN_CMD: "yum downgrade -y /root/{}".format(
381 if dataplane == 'fdio':
382 # Patch neutron with using OVS external interface for router
383 # and add generic linux NS interface driver
385 {con.VIRT_RUN_CMD: "cd /usr/lib/python2.7/site-packages && patch "
386 "-p1 < neutron-patch-NSDriver.patch"})
389 {con.VIRT_RUN_CMD: "yum remove -y vpp-lib"},
390 {con.VIRT_RUN_CMD: "yum install -y "
391 "/root/nosdn_vpp_rpms/*.rpm"}
394 undercloud_admin_ip = ns['networks'][con.ADMIN_NETWORK][
395 'installer_vm']['ip']
396 if sdn == 'opendaylight':
397 oc_builder.inject_opendaylight(
398 odl_version=ds_opts['odl_version'],
401 uc_ip=undercloud_admin_ip,
402 os_version=ds_opts['os_version'],
403 docker_tag=docker_tag,
406 patched_containers = patched_containers.union({'opendaylight'})
409 if ds_opts['os_version'] == 'master':
410 branch = ds_opts['os_version']
412 branch = "stable/{}".format(ds_opts['os_version'])
413 logging.info('Adding patches to overcloud')
414 patched_containers = patched_containers.union(
415 c_builder.add_upstream_patches(patches,
416 tmp_oc_image, tmp_dir,
418 uc_ip=undercloud_admin_ip,
419 docker_tag=docker_tag))
420 # if containers with ceph, and no ceph device we need to use a
421 # persistent loop device for Ceph OSDs
422 if docker_tag and ds_opts['ceph_device'] == '/dev/loop3':
423 tmp_losetup = os.path.join(tmp_dir, 'losetup.service')
424 with open(tmp_losetup, 'w') as fh:
425 fh.write(LOSETUP_SERVICE)
427 {con.VIRT_UPLOAD: "{}:/usr/lib/systemd/system/".format(tmp_losetup)
429 {con.VIRT_RUN_CMD: 'truncate /srv/data.img --size {}'
430 .format(LOOP_DEVICE_SIZE)},
431 {con.VIRT_RUN_CMD: 'systemctl daemon-reload'},
432 {con.VIRT_RUN_CMD: 'systemctl enable losetup.service'},
434 # TODO(trozet) remove this after LP#173474 is fixed
435 dhcp_unit = '/usr/lib/systemd/system/dhcp-interface@.service'
437 {con.VIRT_RUN_CMD: "crudini --del {} Unit "
438 "ConditionPathExists".format(dhcp_unit)})
441 {con.VIRT_INSTALL: "nfs-utils"},
442 {con.VIRT_RUN_CMD: "ln -s /usr/lib/systemd/system/nfs-server.service "
443 "/etc/systemd/system/multi-user.target.wants/"
444 "nfs-server.service"},
445 {con.VIRT_RUN_CMD: "mkdir -p /root/nfs/glance"},
446 {con.VIRT_RUN_CMD: "mkdir -p /root/nfs/cinder"},
447 {con.VIRT_RUN_CMD: "mkdir -p /root/nfs/nova"},
448 {con.VIRT_RUN_CMD: "echo '/root/nfs/glance *(rw,sync,"
449 "no_root_squash,no_acl)' > /etc/exports"},
450 {con.VIRT_RUN_CMD: "echo '/root/nfs/cinder *(rw,sync,"
451 "no_root_squash,no_acl)' >> /etc/exports"},
452 {con.VIRT_RUN_CMD: "echo '/root/nfs/nova *(rw,sync,"
453 "no_root_squash,no_acl)' >> /etc/exports"},
454 {con.VIRT_RUN_CMD: "exportfs -avr"},
456 virt_utils.virt_customize(virt_cmds, tmp_oc_image)
457 logging.info("Overcloud image customization complete")
458 return patched_containers
463 Creates public and private ssh keys with 1024 bit RSA encryption
464 :return: private, public key
466 key = rsa.generate_private_key(
467 backend=crypto_default_backend(),
468 public_exponent=65537,
472 private_key = key.private_bytes(
473 crypto_serialization.Encoding.PEM,
474 crypto_serialization.PrivateFormat.PKCS8,
475 crypto_serialization.NoEncryption())
476 public_key = key.public_key().public_bytes(
477 crypto_serialization.Encoding.OpenSSH,
478 crypto_serialization.PublicFormat.OpenSSH
480 return private_key.decode('utf-8'), public_key.decode('utf-8')
483 def prep_env(ds, ns, inv, opnfv_env, net_env, tmp_dir):
485 Creates modified opnfv/network environments for deployment
486 :param ds: deploy settings
487 :param ns: network settings
488 :param inv: node inventory
489 :param opnfv_env: file path for opnfv-environment file
490 :param net_env: file path for network-environment file
491 :param tmp_dir: Apex tmp dir
495 logging.info("Preparing opnfv-environment and network-environment files")
496 ds_opts = ds['deploy_options']
497 tmp_opnfv_env = os.path.join(tmp_dir, os.path.basename(opnfv_env))
498 shutil.copyfile(opnfv_env, tmp_opnfv_env)
499 tenant_nic_map = ns['networks']['tenant']['nic_mapping']
501 tenant_nic['Controller'] = tenant_nic_map['controller']['members'][0]
502 tenant_nic['NovaCompute'] = tenant_nic_map['compute']['members'][0]
503 external_nic_map = ns['networks']['external'][0]['nic_mapping']
504 external_nic = dict()
505 external_nic['NovaCompute'] = external_nic_map['compute']['members'][0]
508 private_key, public_key = make_ssh_key()
510 num_control, num_compute = inv.get_node_counts()
511 if num_control > 1 and not ds['global_params']['ha_enabled']:
514 # Make easier/faster variables to index in the file editor
515 if 'performance' in ds_opts:
518 if 'vpp' in ds_opts['performance']['Compute']:
519 perf_vpp_comp = ds_opts['performance']['Compute']['vpp']
522 if 'vpp' in ds_opts['performance']['Controller']:
523 perf_vpp_ctrl = ds_opts['performance']['Controller']['vpp']
528 if 'ovs' in ds_opts['performance']['Compute']:
529 perf_ovs_comp = ds_opts['performance']['Compute']['ovs']
534 if 'kernel' in ds_opts['performance']['Compute']:
535 perf_kern_comp = ds_opts['performance']['Compute']['kernel']
537 perf_kern_comp = None
541 tenant_settings = ns['networks']['tenant']
542 tenant_vlan_enabled = 'tenant' in ns.enabled_network_list and \
543 ns['networks']['tenant'].get('segmentation_type') == 'vlan'
545 # Modify OPNFV environment
546 # TODO: Change to build a dict and outputting yaml rather than parsing
547 for line in fileinput.input(tmp_opnfv_env, inplace=True):
548 line = line.strip('\n')
550 if 'CloudDomain' in line:
551 output_line = " CloudDomain: {}".format(ns['domain_name'])
552 elif 'replace_private_key' in line:
553 output_line = " private_key: |\n"
555 for line in private_key.splitlines():
556 key_out += " {}\n".format(line)
557 output_line += key_out
558 elif 'replace_public_key' in line:
559 output_line = " public_key: '{}'".format(public_key)
560 elif ((perf and perf_kern_comp) or ds_opts.get('rt_kvm')) and \
561 'resource_registry' in line:
562 output_line = "resource_registry:\n" \
563 " OS::TripleO::NodeUserData: first-boot.yaml"
564 elif 'ComputeExtraConfigPre' in line and \
565 ds_opts['dataplane'] == 'ovs_dpdk':
566 output_line = ' OS::TripleO::ComputeExtraConfigPre: ' \
567 './ovs-dpdk-preconfig.yaml'
568 elif 'NeutronNetworkVLANRanges' in line:
570 if tenant_vlan_enabled:
571 if ns['networks']['tenant']['overlay_id_range']:
572 vlan_setting = ns['networks']['tenant']['overlay_id_range']
573 if 'datacentre' not in vlan_setting:
574 vlan_setting += ',datacentre:1:1000'
575 # SRIOV networks are VLAN based provider networks. In order to
576 # simplify the deployment, nfv_sriov will be the default physnet.
577 # VLANs are not needed in advance, and the user will have to create
578 # the network specifying the segmentation-id.
581 vlan_setting += ",nfv_sriov"
583 vlan_setting = "datacentre:1:1000,nfv_sriov"
585 output_line = " NeutronNetworkVLANRanges: " + vlan_setting
586 elif 'NeutronBridgeMappings' in line and tenant_vlan_enabled:
587 if tenant_settings['overlay_id_range']:
588 physnets = tenant_settings['overlay_id_range'].split(',')
589 output_line = " NeutronBridgeMappings: "
590 for physnet in physnets:
591 physnet_name = physnet.split(':')[0]
592 if physnet_name != 'datacentre':
593 output_line += "{}:br-vlan,".format(physnet_name)
594 output_line += "datacentre:br-ex"
595 elif 'OpenDaylightProviderMappings' in line and tenant_vlan_enabled \
596 and ds_opts['sdn_controller'] == 'opendaylight':
597 if tenant_settings['overlay_id_range']:
598 physnets = tenant_settings['overlay_id_range'].split(',')
599 output_line = " OpenDaylightProviderMappings: "
600 for physnet in physnets:
601 physnet_name = physnet.split(':')[0]
602 if physnet_name != 'datacentre':
603 output_line += "{}:br-vlan,".format(physnet_name)
604 output_line += "datacentre:br-ex"
605 elif 'NeutronNetworkType' in line and tenant_vlan_enabled:
606 output_line = " NeutronNetworkType: vlan\n" \
607 " NeutronTunnelTypes: ''"
609 if ds_opts['sdn_controller'] == 'opendaylight' and \
610 'odl_vpp_routing_node' in ds_opts:
611 if 'opendaylight::vpp_routing_node' in line:
612 output_line = (" opendaylight::vpp_routing_node: {}.{}"
613 .format(ds_opts['odl_vpp_routing_node'],
615 elif not ds_opts['sdn_controller'] and ds_opts['dataplane'] == 'fdio':
616 if 'NeutronVPPAgentPhysnets' in line:
617 # VPP interface tap0 will be used for external network
619 output_line = (" NeutronVPPAgentPhysnets: "
620 "'datacentre:{},external:tap0'"
621 .format(tenant_nic['Controller']))
622 elif ds_opts['sdn_controller'] == 'opendaylight' and ds_opts.get(
624 if 'OS::TripleO::Services::NeutronDhcpAgent' in line:
626 elif 'NeutronDhcpAgentsPerNetwork' in line:
628 num_dhcp_agents = num_control
630 num_dhcp_agents = num_compute
631 output_line = (" NeutronDhcpAgentsPerNetwork: {}"
632 .format(num_dhcp_agents))
633 elif 'ComputeServices' in line:
634 output_line = (" ComputeServices:\n"
635 " - OS::TripleO::Services::NeutronDhcpAgent")
638 for role in 'NovaCompute', 'Controller':
639 if role == 'NovaCompute':
640 perf_opts = perf_vpp_comp
642 perf_opts = perf_vpp_ctrl
643 cfg = "{}ExtraConfig".format(role)
644 if cfg in line and perf_opts:
646 if 'main-core' in perf_opts:
647 perf_line += ("\n fdio::vpp_cpu_main_core: '{}'"
648 .format(perf_opts['main-core']))
649 if 'corelist-workers' in perf_opts:
651 "fdio::vpp_cpu_corelist_workers: '{}'"
652 .format(perf_opts['corelist-workers']))
653 if ds_opts['sdn_controller'] == 'opendaylight' and \
654 ds_opts['dataplane'] == 'fdio':
655 if role == 'NovaCompute':
657 "tripleo::profile::base::neutron::"
658 "agents::honeycomb::"
659 "interface_role_mapping:"
660 " ['{}:tenant-interface',"
661 "'{}:public-interface']"
662 .format(tenant_nic[role],
666 "tripleo::profile::base::neutron::"
667 "agents::honeycomb::"
668 "interface_role_mapping:"
669 " ['{}:tenant-interface']"
670 .format(tenant_nic[role]))
672 output_line = (" {}:{}".format(cfg, perf_line))
674 if ds_opts['dataplane'] == 'ovs_dpdk' and perf_ovs_comp:
675 for k, v in OVS_PERF_MAP.items():
676 if k in line and v in perf_ovs_comp:
677 output_line = " {}: '{}'".format(k, perf_ovs_comp[v])
680 # (FIXME) use compute's kernel settings for all nodes for now.
682 if 'NovaSchedulerDefaultFilters' in line:
684 " NovaSchedulerDefaultFilters: 'RamFilter," \
685 "ComputeFilter,AvailabilityZoneFilter," \
686 "ComputeCapabilitiesFilter," \
687 "ImagePropertiesFilter,NUMATopologyFilter'"
688 elif 'ComputeKernelArgs' in line:
690 for k, v in perf_kern_comp.items():
691 kernel_args += "{}={} ".format(k, v)
693 output_line = " ComputeKernelArgs: '{}'".\
698 # Merge compute services into control services if only a single
701 logging.info("All in one deployment. Checking if service merging "
702 "required into control services")
703 with open(tmp_opnfv_env, 'r') as fh:
704 data = yaml.safe_load(fh)
705 param_data = data['parameter_defaults']
706 # Check to see if any parameters are set for Compute
707 for param in param_data.keys():
708 if param != 'ComputeServices' and param.startswith('Compute'):
709 logging.warning("Compute parameter set, but will not be used "
710 "in deployment: {}. Please use Controller "
711 "based parameters when using All-in-one "
712 "deployments".format(param))
713 if ('ControllerServices' in param_data and 'ComputeServices' in
715 logging.info("Services detected in environment file. Merging...")
716 ctrl_services = param_data['ControllerServices']
717 cmp_services = param_data['ComputeServices']
718 param_data['ControllerServices'] = list(set().union(
719 ctrl_services, cmp_services))
720 for dup_service in DUPLICATE_COMPUTE_SERVICES:
721 if dup_service in param_data['ControllerServices']:
722 param_data['ControllerServices'].remove(dup_service)
723 param_data.pop('ComputeServices')
724 logging.debug("Merged controller services: {}".format(
725 pprint.pformat(param_data['ControllerServices'])
727 with open(tmp_opnfv_env, 'w') as fh:
728 yaml.safe_dump(data, fh, default_flow_style=False)
730 logging.info("No services detected in env file, not merging "
733 logging.info("opnfv-environment file written to {}".format(tmp_opnfv_env))
734 with open(tmp_opnfv_env, 'r') as fh:
735 logging.debug("opnfv-environment content is : {}".format(
736 pprint.pformat(yaml.safe_load(fh.read()))
740 def generate_ceph_key():
742 header = struct.pack('<hiih', 1, int(time.time()), 0, len(key))
743 return base64.b64encode(header + key)
746 def prep_storage_env(ds, ns, virtual, tmp_dir):
748 Creates storage environment file for deployment. Source file is copied by
749 undercloud playbook to host.
756 ds_opts = ds['deploy_options']
757 storage_file = os.path.join(tmp_dir, 'storage-environment.yaml')
758 if not os.path.isfile(storage_file):
759 logging.error("storage-environment file is not in tmp directory: {}. "
760 "Check if file was copied from "
761 "undercloud".format(tmp_dir))
762 raise ApexDeployException("storage-environment file not copied from "
764 for line in fileinput.input(storage_file, inplace=True):
765 line = line.strip('\n')
766 if 'CephClusterFSID' in line:
767 print(" CephClusterFSID: {}".format(str(uuid.uuid4())))
768 elif 'CephMonKey' in line:
769 print(" CephMonKey: {}".format(generate_ceph_key().decode(
771 elif 'CephAdminKey' in line:
772 print(" CephAdminKey: {}".format(generate_ceph_key().decode(
774 elif 'CephClientKey' in line:
775 print(" CephClientKey: {}".format(generate_ceph_key().decode(
780 if ds_opts['containers']:
783 # max pgs allowed are calculated as num_mons * 200. Therefore we
784 # set number of pgs and pools so that the total will be less:
785 # num_pgs * num_pools * num_osds
786 ceph_params['CephPoolDefaultSize'] = 2
787 ceph_params['CephPoolDefaultPgNum'] = 32
789 ceph_params['CephAnsibleExtraConfig'] = {
790 'centos_package_dependencies': [],
791 'ceph_osd_docker_memory_limit': '1g',
792 'ceph_mds_docker_memory_limit': '1g',
794 ceph_device = ds_opts['ceph_device']
795 ceph_params['CephAnsibleDisksConfig'] = {
796 'devices': [ceph_device],
798 'osd_scenario': 'collocated'
800 utils.edit_tht_env(storage_file, 'parameter_defaults', ceph_params)
801 # TODO(trozet): remove following block as we only support containers now
802 elif 'ceph_device' in ds_opts and ds_opts['ceph_device']:
803 with open(storage_file, 'a') as fh:
804 fh.write(' ExtraConfig:\n')
805 fh.write(" ceph::profile::params::osds:{{{}:{{}}}}\n".format(
806 ds_opts['ceph_device']
810 def prep_sriov_env(ds, tmp_dir):
812 Creates SRIOV environment file for deployment. Source file is copied by
813 undercloud playbook to host.
818 ds_opts = ds['deploy_options']
819 sriov_iface = ds_opts['sriov']
820 sriov_file = os.path.join(tmp_dir, 'neutron-opendaylight-sriov.yaml')
821 if not os.path.isfile(sriov_file):
822 logging.error("sriov-environment file is not in tmp directory: {}. "
823 "Check if file was copied from "
824 "undercloud".format(tmp_dir))
825 raise ApexDeployException("sriov-environment file not copied from "
827 # TODO(rnoriega): Instead of line editing, refactor this code to load
828 # yaml file into a dict, edit it and write the file back.
829 for line in fileinput.input(sriov_file, inplace=True):
830 line = line.strip('\n')
831 if 'NovaSchedulerDefaultFilters' in line:
832 print(" {}".format(line[3:]))
833 elif 'NovaSchedulerAvailableFilters' in line:
834 print(" {}".format(line[3:]))
835 elif 'NeutronPhysicalDevMappings' in line:
836 print(" NeutronPhysicalDevMappings: \"nfv_sriov:{}\""
837 .format(sriov_iface))
838 elif 'NeutronSriovNumVFs' in line:
839 print(" NeutronSriovNumVFs: \"{}:8\"".format(sriov_iface))
840 elif 'NovaPCIPassthrough' in line:
841 print(" NovaPCIPassthrough:")
842 elif 'devname' in line:
843 print(" - devname: \"{}\"".format(sriov_iface))
844 elif 'physical_network' in line:
845 print(" physical_network: \"nfv_sriov\"")
850 def external_network_cmds(ns, ds):
852 Generates external network openstack commands
853 :param ns: network settings
854 :param ds: deploy settings
855 :return: list of commands to configure external network
857 ds_opts = ds['deploy_options']
858 external_physnet = 'datacentre'
859 if ds_opts['dataplane'] == 'fdio' and \
860 ds_opts['sdn_controller'] != 'opendaylight':
861 external_physnet = 'external'
862 if 'external' in ns.enabled_network_list:
863 net_config = ns['networks']['external'][0]
865 pool_start, pool_end = net_config['floating_ip_range']
867 net_config = ns['networks']['admin']
869 pool_start, pool_end = ns['apex']['networks']['admin'][
870 'introspection_range']
871 nic_config = net_config['nic_mapping']
872 gateway = net_config['gateway']
874 # create network command
875 if nic_config['compute']['vlan'] == 'native':
878 ext_type = "vlan --provider-segment {}".format(nic_config[
880 cmds.append("openstack network create external --project service "
881 "--external --provider-network-type {} "
882 "--provider-physical-network {}"
883 .format(ext_type, external_physnet))
884 # create subnet command
885 cidr = net_config['cidr']
886 subnet_cmd = "openstack subnet create external-subnet --project " \
887 "service --network external --no-dhcp --gateway {} " \
888 "--allocation-pool start={},end={} --subnet-range " \
889 "{}".format(gateway, pool_start, pool_end, str(cidr))
890 if external and cidr.version == 6:
891 subnet_cmd += ' --ip-version 6'
892 cmds.append(subnet_cmd)
893 logging.debug("Neutron external network commands determined "
894 "as: {}".format(cmds))
898 def create_congress_cmds(overcloud_file):
899 drivers = ['nova', 'neutronv2', 'cinder', 'glancev2', 'keystone', 'doctor']
900 overcloudrc = parsers.parse_overcloudrc(overcloud_file)
901 logging.info("Creating congress commands")
904 "username={}".format(overcloudrc['OS_USERNAME']),
905 "tenant_name={}".format(overcloudrc['OS_PROJECT_NAME']),
906 "password={}".format(overcloudrc['OS_PASSWORD']),
907 "auth_url={}".format(overcloudrc['OS_AUTH_URL'])
910 logging.error("Unable to find all keys required for congress in "
911 "overcloudrc: OS_USERNAME, OS_PROJECT_NAME, "
912 "OS_PASSWORD, OS_AUTH_URL. Please check overcloudrc "
913 "file: {}".format(overcloud_file))
916 ds_cfg = '--config ' + ' --config '.join(ds_cfg)
918 for driver in drivers:
919 if driver == 'doctor':
920 cmd = "{} \"{}\"".format(driver, driver)
922 cmd = "{} \"{}\" {}".format(driver, driver, ds_cfg)
924 cmd += ' --config api_version="2.34"'
925 logging.debug("Congress command created: {}".format(cmd))