1 ##############################################################################
2 # Copyright (c) 2017 Tim Rozet (trozet@redhat.com) and others.
4 # All rights reserved. This program and the accompanying materials
5 # are made available under the terms of the Apache License, Version 2.0
6 # which accompanies this distribution, and is available at
7 # http://www.apache.org/licenses/LICENSE-2.0
8 ##############################################################################
21 import apex.builders.overcloud_builder as oc_builder
22 import apex.builders.common_builder as c_builder
24 from apex.common import constants as con
25 from apex.common.exceptions import ApexDeployException
26 from apex.common import parsers
27 from apex.common import utils
28 from apex.virtual import utils as virt_utils
29 from cryptography.hazmat.primitives import serialization as \
31 from cryptography.hazmat.primitives.asymmetric import rsa
32 from cryptography.hazmat.backends import default_backend as \
33 crypto_default_backend
38 'sfc': 'neutron-sfc-opendaylight.yaml',
39 'vpn': 'neutron-bgpvpn-opendaylight.yaml',
40 'gluon': 'gluon.yaml',
42 'odl_vpp_netvirt': 'neutron-opendaylight-netvirt-vpp.yaml',
43 'dvr': 'neutron-opendaylight-fdio-dvr.yaml',
44 'default': 'neutron-opendaylight-honeycomb.yaml'
46 'l2gw': 'neutron-l2gw-opendaylight.yaml',
47 'sriov': 'neutron-opendaylight-sriov.yaml',
48 'default': 'neutron-opendaylight.yaml',
51 'sfc': 'neutron-onos-sfc.yaml',
52 'default': 'neutron-onos.yaml'
54 'ovn': 'neutron-ml2-ovn.yaml',
56 'vpp': 'neutron-ml2-vpp.yaml',
57 'dataplane': ('ovs_dpdk', 'neutron-ovs-dpdk.yaml')
62 'tacker': 'enable_tacker.yaml',
63 'congress': 'enable_congress.yaml',
64 'barometer': 'enable_barometer.yaml',
65 'rt_kvm': 'enable_rt_kvm.yaml'
69 'HostCpusList': 'dpdk_cores',
70 'NeutronDpdkCoreList': 'pmd_cores',
71 'NeutronDpdkSocketMemory': 'socket_memory',
72 'NeutronDpdkMemoryChannels': 'memory_channels'
75 ODL_NETVIRT_VPP_RPM = "/root/opendaylight-7.0.0-0.1.20170531snap665.el7" \
78 LOOP_DEVICE_SIZE = "10G"
80 LOSETUP_SERVICE = """[Unit]
81 Description=Setup loop devices
86 ExecStart=/sbin/losetup /dev/loop3 /srv/data.img
87 ExecStop=/sbin/losetup -d /dev/loop3
92 WantedBy=multi-user.target
95 DUPLICATE_COMPUTE_SERVICES = [
96 'OS::TripleO::Services::ComputeNeutronCorePlugin',
97 'OS::TripleO::Services::ComputeNeutronMetadataAgent',
98 'OS::TripleO::Services::ComputeNeutronOvsAgent',
99 'OS::TripleO::Services::ComputeNeutronL3Agent'
105 'CinderNfsEnabledBackend'
109 def build_sdn_env_list(ds, sdn_map, env_list=None):
111 Builds a list of SDN environment files to be used in the deploy cmd.
113 This function recursively searches an sdn_map. First the sdn controller is
114 matched and then the function looks for enabled features for that
115 controller to determine which environment files should be used. By
116 default the feature will be checked if set to true in deploy settings to be
117 added to the list. If a feature does not have a boolean value, then the
118 key and value pair to compare with are checked as a tuple (k,v).
120 :param ds: deploy settings
121 :param sdn_map: SDN map to recursively search
122 :param env_list: recursive var to hold previously found env_list
123 :return: A list of env files
127 for k, v in sdn_map.items():
128 if ds['sdn_controller'] == k or (k in ds and ds[k]):
129 if isinstance(v, dict):
130 # Append default SDN env file first
131 # The assumption is that feature-enabled SDN env files
132 # override and do not conflict with previously set default
134 if ds['sdn_controller'] == k and 'default' in v:
135 env_list.append(os.path.join(con.THT_ENV_DIR,
137 env_list.extend(build_sdn_env_list(ds, v))
138 # check if the value is not a boolean
139 elif isinstance(v, tuple):
141 env_list.append(os.path.join(con.THT_ENV_DIR, v[1]))
143 env_list.append(os.path.join(con.THT_ENV_DIR, v))
144 if len(env_list) == 0:
146 env_list.append(os.path.join(
147 con.THT_ENV_DIR, sdn_map['default']))
149 logging.warning("Unable to find default file for SDN")
154 def get_docker_sdn_files(ds_opts):
156 Returns docker env file for detected SDN
157 :param ds_opts: deploy options
158 :return: list of docker THT env files for an SDN
160 docker_services = con.VALID_DOCKER_SERVICES
161 tht_dir = con.THT_DOCKER_ENV_DIR
162 sdn_env_list = build_sdn_env_list(ds_opts, SDN_FILE_MAP)
163 for i, sdn_file in enumerate(sdn_env_list):
164 sdn_base = os.path.basename(sdn_file)
165 if sdn_base in docker_services:
166 if docker_services[sdn_base] is not None:
168 os.path.join(tht_dir, docker_services[sdn_base])
170 sdn_env_list[i] = os.path.join(tht_dir, sdn_base)
174 def create_deploy_cmd(ds, ns, inv, tmp_dir,
175 virtual, env_file='opnfv-environment.yaml',
178 logging.info("Creating deployment command")
179 deploy_options = ['network-environment.yaml']
181 ds_opts = ds['deploy_options']
183 if ds_opts['containers']:
184 deploy_options.append(os.path.join(con.THT_ENV_DIR,
187 if ds['global_params']['ha_enabled']:
188 if ds_opts['containers']:
189 deploy_options.append(os.path.join(con.THT_ENV_DIR,
192 deploy_options.append(os.path.join(con.THT_ENV_DIR,
193 'puppet-pacemaker.yaml'))
196 deploy_options.append(env_file)
198 if ds_opts['containers']:
199 deploy_options.append('docker-images.yaml')
200 sdn_docker_files = get_docker_sdn_files(ds_opts)
201 for sdn_docker_file in sdn_docker_files:
202 deploy_options.append(sdn_docker_file)
204 deploy_options += build_sdn_env_list(ds_opts, SDN_FILE_MAP)
206 for k, v in OTHER_FILE_MAP.items():
207 if k in ds_opts and ds_opts[k]:
208 if ds_opts['containers']:
209 deploy_options.append(os.path.join(con.THT_DOCKER_ENV_DIR,
210 "{}.yaml".format(k)))
212 deploy_options.append(os.path.join(con.THT_ENV_DIR, v))
214 # TODO(trozet) Fix this check to look for if ceph is in controller services
215 # and not use name of the file
216 if ds_opts['ceph'] and 'csit' not in env_file:
217 prep_storage_env(ds, ns, virtual, tmp_dir)
218 deploy_options.append(os.path.join(con.THT_ENV_DIR,
219 'storage-environment.yaml'))
221 prep_sriov_env(ds, tmp_dir)
223 # Check for 'k8s' here intentionally, as we may support other values
224 # such as openstack/openshift for 'vim' option.
225 if ds_opts['vim'] == 'k8s':
226 deploy_options.append('kubernetes-environment.yaml')
229 deploy_options.append('virtual-environment.yaml')
231 deploy_options.append('baremetal-environment.yaml')
233 num_control, num_compute = inv.get_node_counts()
234 if num_control > 1 and not ds['global_params']['ha_enabled']:
236 if platform.machine() == 'aarch64':
237 # aarch64 deploys were not completing in the default 90 mins.
238 # Not sure if this is related to the hardware the OOO support
239 # was developed on or the virtualization support in CentOS
240 # Either way it will probably get better over time as the aarch
241 # support matures in CentOS and deploy time should be tested in
242 # the future so this multiplier can be removed.
243 con.DEPLOY_TIMEOUT *= 2
244 cmd = "openstack overcloud deploy --templates --timeout {} " \
245 .format(con.DEPLOY_TIMEOUT)
247 for option in deploy_options:
248 cmd += " -e {}".format(option)
249 cmd += " --ntp-server {}".format(ns['ntp'][0])
250 cmd += " --control-scale {}".format(num_control)
251 cmd += " --compute-scale {}".format(num_compute)
252 cmd += ' --control-flavor control --compute-flavor compute'
254 cmd += ' --networks-file network_data.yaml'
256 if virtual and (platform.machine() != 'aarch64'):
257 with open('/sys/module/kvm_intel/parameters/nested') as f:
258 nested_kvm = f.read().strip()
259 if nested_kvm != 'Y':
260 libvirt_type = 'qemu'
261 elif virtual and (platform.machine() == 'aarch64'):
262 libvirt_type = 'qemu'
263 cmd += ' --libvirt-type {}'.format(libvirt_type)
264 if platform.machine() == 'aarch64':
265 cmd += ' --override-ansible-cfg /home/stack/ansible.cfg '
266 logging.info("Deploy command set: {}".format(cmd))
268 with open(os.path.join(tmp_dir, 'deploy_command'), 'w') as fh:
273 def prep_image(ds, ns, img, tmp_dir, root_pw=None, docker_tag=None,
276 Locates sdn image and preps for deployment.
277 :param ds: deploy settings
278 :param ns: network settings
279 :param img: sdn image
280 :param tmp_dir: dir to store modified sdn image
281 :param root_pw: password to configure for overcloud image
282 :param docker_tag: Docker image tag for RDO version (default None)
283 :param patches: List of patches to apply to overcloud image
286 # TODO(trozet): Come up with a better way to organize this logic in this
288 logging.info("Preparing image: {} for deployment".format(img))
289 if not os.path.isfile(img):
290 logging.error("Missing SDN image {}".format(img))
291 raise ApexDeployException("Missing SDN image file: {}".format(img))
293 ds_opts = ds['deploy_options']
295 sdn = ds_opts['sdn_controller']
296 patched_containers = set()
297 # we need this due to rhbz #1436021
298 # fixed in systemd-219-37.el7
300 logging.info("Neutron openvswitch-agent disabled")
303 "rm -f /etc/systemd/system/multi-user.target.wants/"
304 "neutron-openvswitch-agent.service"},
307 "rm -f /usr/lib/systemd/system/neutron-openvswitch-agent"
311 if ns.get('http_proxy', ''):
314 "echo 'http_proxy={}' >> /etc/environment".format(
317 if ns.get('https_proxy', ''):
320 "echo 'https_proxy={}' >> /etc/environment".format(
323 tmp_oc_image = os.path.join(tmp_dir, 'overcloud-full.qcow2')
324 shutil.copyfile(img, tmp_oc_image)
325 logging.debug("Temporary overcloud image stored as: {}".format(
329 oc_builder.inject_quagga(tmp_oc_image, tmp_dir)
330 virt_cmds.append({con.VIRT_RUN_CMD: "chmod +x /etc/rc.d/rc.local"})
333 "echo 'sudo /opt/quagga/etc/init.d/zrpcd start' > "
334 "/opt/quagga/etc/init.d/zrpcd_start.sh"})
336 con.VIRT_RUN_CMD: "chmod +x /opt/quagga/etc/init.d/"
339 con.VIRT_RUN_CMD: "sed -i '$a /opt/quagga/etc/"
340 "init.d/zrpcd_start.sh' /etc/rc.local "})
342 con.VIRT_RUN_CMD: "sed -i '$a /opt/quagga/etc/"
343 "init.d/zrpcd_start.sh' /etc/rc.d/rc.local"})
344 logging.info("ZRPCD process started")
346 dataplane = ds_opts['dataplane']
347 if dataplane == 'ovs_dpdk' or dataplane == 'fdio':
348 logging.info("Enabling kernel modules for dpdk")
349 # file to module mapping
351 os.path.join(tmp_dir, 'vfio_pci.modules'): 'vfio_pci',
352 os.path.join(tmp_dir, 'uio_pci_generic.modules'): 'uio_pci_generic'
354 for mod_file, mod in uio_types.items():
355 with open(mod_file, 'w') as fh:
356 fh.write('#!/bin/bash\n')
357 fh.write('exec /sbin/modprobe {}'.format(mod))
361 {con.VIRT_UPLOAD: "{}:/etc/sysconfig/modules/".format(
363 {con.VIRT_RUN_CMD: "chmod 0755 /etc/sysconfig/modules/"
364 "{}".format(os.path.basename(mod_file))}
367 pw_op = "password:{}".format(root_pw)
368 virt_cmds.append({con.VIRT_PW: pw_op})
370 if dataplane == 'ovs':
372 oc_builder.inject_ovs_nsh(tmp_oc_image, tmp_dir)
373 elif sdn == 'opendaylight':
374 # FIXME(trozet) remove this after RDO is updated with fix for
375 # https://bugzilla.redhat.com/show_bug.cgi?id=1544892
376 ovs_file = os.path.basename(con.CUSTOM_OVS)
377 ovs_url = con.CUSTOM_OVS.replace(ovs_file, '')
378 utils.fetch_upstream_and_unpack(dest=tmp_dir, url=ovs_url,
381 {con.VIRT_UPLOAD: "{}:/root/".format(os.path.join(tmp_dir,
383 {con.VIRT_RUN_CMD: "yum downgrade -y /root/{}".format(
387 if dataplane == 'fdio':
388 # Patch neutron with using OVS external interface for router
389 # and add generic linux NS interface driver
391 {con.VIRT_RUN_CMD: "cd /usr/lib/python2.7/site-packages && patch "
392 "-p1 < neutron-patch-NSDriver.patch"})
395 {con.VIRT_RUN_CMD: "yum remove -y vpp-lib"},
396 {con.VIRT_RUN_CMD: "yum install -y "
397 "/root/nosdn_vpp_rpms/*.rpm"}
400 undercloud_admin_ip = ns['networks'][con.ADMIN_NETWORK][
401 'installer_vm']['ip']
402 if sdn == 'opendaylight':
403 oc_builder.inject_opendaylight(
404 odl_version=ds_opts['odl_version'],
407 uc_ip=undercloud_admin_ip,
408 os_version=ds_opts['os_version'],
409 docker_tag=docker_tag,
412 patched_containers = patched_containers.union({'opendaylight'})
415 if ds_opts['os_version'] == 'master':
416 branch = ds_opts['os_version']
418 branch = "stable/{}".format(ds_opts['os_version'])
419 logging.info('Adding patches to overcloud')
420 patched_containers = patched_containers.union(
421 c_builder.add_upstream_patches(patches,
422 tmp_oc_image, tmp_dir,
424 uc_ip=undercloud_admin_ip,
425 docker_tag=docker_tag))
426 # if containers with ceph, and no ceph device we need to use a
427 # persistent loop device for Ceph OSDs
428 if docker_tag and ds_opts['ceph_device'] == '/dev/loop3':
429 tmp_losetup = os.path.join(tmp_dir, 'losetup.service')
430 with open(tmp_losetup, 'w') as fh:
431 fh.write(LOSETUP_SERVICE)
433 {con.VIRT_UPLOAD: "{}:/usr/lib/systemd/system/".format(tmp_losetup)
435 {con.VIRT_RUN_CMD: 'truncate /srv/data.img --size {}'
436 .format(LOOP_DEVICE_SIZE)},
437 {con.VIRT_RUN_CMD: 'systemctl daemon-reload'},
438 {con.VIRT_RUN_CMD: 'systemctl enable losetup.service'},
440 # TODO(trozet) remove this after LP#173474 is fixed
441 dhcp_unit = '/usr/lib/systemd/system/dhcp-interface@.service'
443 {con.VIRT_RUN_CMD: "crudini --del {} Unit "
444 "ConditionPathExists".format(dhcp_unit)})
447 {con.VIRT_INSTALL: "nfs-utils"},
448 {con.VIRT_RUN_CMD: "ln -s /usr/lib/systemd/system/nfs-server.service "
449 "/etc/systemd/system/multi-user.target.wants/"
450 "nfs-server.service"},
451 {con.VIRT_RUN_CMD: "mkdir -p /glance"},
452 {con.VIRT_RUN_CMD: "mkdir -p /cinder"},
453 {con.VIRT_RUN_CMD: "mkdir -p /nova"},
454 {con.VIRT_RUN_CMD: "chmod 777 /glance"},
455 {con.VIRT_RUN_CMD: "chmod 777 /cinder"},
456 {con.VIRT_RUN_CMD: "chmod 777 /nova"},
457 {con.VIRT_RUN_CMD: "chown nfsnobody:nfsnobody /glance"},
458 {con.VIRT_RUN_CMD: "chown nfsnobody:nfsnobody /cinder"},
459 {con.VIRT_RUN_CMD: "chown nfsnobody:nfsnobody /nova"},
460 {con.VIRT_RUN_CMD: "echo '/glance *(rw,sync,"
461 "no_root_squash,no_acl)' > /etc/exports"},
462 {con.VIRT_RUN_CMD: "echo '/cinder *(rw,sync,"
463 "no_root_squash,no_acl)' >> /etc/exports"},
464 {con.VIRT_RUN_CMD: "echo '/nova *(rw,sync,"
465 "no_root_squash,no_acl)' >> /etc/exports"},
466 {con.VIRT_RUN_CMD: "exportfs -avr"},
468 virt_utils.virt_customize(virt_cmds, tmp_oc_image)
469 logging.info("Overcloud image customization complete")
470 return patched_containers
475 Creates public and private ssh keys with 1024 bit RSA encryption
476 :return: private, public key
478 key = rsa.generate_private_key(
479 backend=crypto_default_backend(),
480 public_exponent=65537,
484 private_key = key.private_bytes(
485 crypto_serialization.Encoding.PEM,
486 crypto_serialization.PrivateFormat.PKCS8,
487 crypto_serialization.NoEncryption())
488 public_key = key.public_key().public_bytes(
489 crypto_serialization.Encoding.OpenSSH,
490 crypto_serialization.PublicFormat.OpenSSH
492 return private_key.decode('utf-8'), public_key.decode('utf-8')
495 def prep_env(ds, ns, inv, opnfv_env, net_env, tmp_dir):
497 Creates modified opnfv/network environments for deployment
498 :param ds: deploy settings
499 :param ns: network settings
500 :param inv: node inventory
501 :param opnfv_env: file path for opnfv-environment file
502 :param net_env: file path for network-environment file
503 :param tmp_dir: Apex tmp dir
507 logging.info("Preparing opnfv-environment and network-environment files")
508 ds_opts = ds['deploy_options']
509 tmp_opnfv_env = os.path.join(tmp_dir, os.path.basename(opnfv_env))
510 shutil.copyfile(opnfv_env, tmp_opnfv_env)
511 tenant_nic_map = ns['networks']['tenant']['nic_mapping']
513 tenant_nic['Controller'] = tenant_nic_map['controller']['members'][0]
514 tenant_nic['NovaCompute'] = tenant_nic_map['compute']['members'][0]
515 external_nic_map = ns['networks']['external'][0]['nic_mapping']
516 external_nic = dict()
517 external_nic['NovaCompute'] = external_nic_map['compute']['members'][0]
520 private_key, public_key = make_ssh_key()
522 num_control, num_compute = inv.get_node_counts()
523 if num_control > 1 and not ds['global_params']['ha_enabled']:
526 # Make easier/faster variables to index in the file editor
527 if 'performance' in ds_opts:
530 if 'vpp' in ds_opts['performance']['Compute']:
531 perf_vpp_comp = ds_opts['performance']['Compute']['vpp']
534 if 'vpp' in ds_opts['performance']['Controller']:
535 perf_vpp_ctrl = ds_opts['performance']['Controller']['vpp']
540 if 'ovs' in ds_opts['performance']['Compute']:
541 perf_ovs_comp = ds_opts['performance']['Compute']['ovs']
546 if 'kernel' in ds_opts['performance']['Compute']:
547 perf_kern_comp = ds_opts['performance']['Compute']['kernel']
549 perf_kern_comp = None
553 tenant_settings = ns['networks']['tenant']
554 tenant_vlan_enabled = 'tenant' in ns.enabled_network_list and \
555 ns['networks']['tenant'].get('segmentation_type') == 'vlan'
557 # Modify OPNFV environment
558 # TODO: Change to build a dict and outputting yaml rather than parsing
559 for line in fileinput.input(tmp_opnfv_env, inplace=True):
560 line = line.strip('\n')
562 if 'CloudDomain' in line:
563 output_line = " CloudDomain: {}".format(ns['domain_name'])
564 elif 'replace_private_key' in line:
565 output_line = " private_key: |\n"
567 for line in private_key.splitlines():
568 key_out += " {}\n".format(line)
569 output_line += key_out
570 elif 'replace_public_key' in line:
571 output_line = " public_key: '{}'".format(public_key)
572 elif ((perf and perf_kern_comp) or ds_opts.get('rt_kvm')) and \
573 'resource_registry' in line:
574 output_line = "resource_registry:\n" \
575 " OS::TripleO::NodeUserData: first-boot.yaml"
576 elif 'ComputeExtraConfigPre' in line and \
577 ds_opts['dataplane'] == 'ovs_dpdk':
578 output_line = ' OS::TripleO::ComputeExtraConfigPre: ' \
579 './ovs-dpdk-preconfig.yaml'
580 elif 'NeutronNetworkVLANRanges' in line:
582 if tenant_vlan_enabled:
583 if ns['networks']['tenant']['overlay_id_range']:
584 vlan_setting = ns['networks']['tenant']['overlay_id_range']
585 if 'datacentre' not in vlan_setting:
586 vlan_setting += ',datacentre:1:1000'
587 # SRIOV networks are VLAN based provider networks. In order to
588 # simplify the deployment, nfv_sriov will be the default physnet.
589 # VLANs are not needed in advance, and the user will have to create
590 # the network specifying the segmentation-id.
593 vlan_setting += ",nfv_sriov"
595 vlan_setting = "datacentre:1:1000,nfv_sriov"
597 output_line = " NeutronNetworkVLANRanges: " + vlan_setting
598 elif 'NeutronBridgeMappings' in line and tenant_vlan_enabled:
599 if tenant_settings['overlay_id_range']:
600 physnets = tenant_settings['overlay_id_range'].split(',')
601 output_line = " NeutronBridgeMappings: "
602 for physnet in physnets:
603 physnet_name = physnet.split(':')[0]
604 if physnet_name != 'datacentre':
605 output_line += "{}:br-vlan,".format(physnet_name)
606 output_line += "datacentre:br-ex"
607 elif 'OpenDaylightProviderMappings' in line and tenant_vlan_enabled \
608 and ds_opts['sdn_controller'] == 'opendaylight':
609 if tenant_settings['overlay_id_range']:
610 physnets = tenant_settings['overlay_id_range'].split(',')
611 output_line = " OpenDaylightProviderMappings: "
612 for physnet in physnets:
613 physnet_name = physnet.split(':')[0]
614 if physnet_name != 'datacentre':
615 output_line += "{}:br-vlan,".format(physnet_name)
616 output_line += "datacentre:br-ex"
617 elif 'NeutronNetworkType' in line and tenant_vlan_enabled:
618 output_line = " NeutronNetworkType: vlan\n" \
619 " NeutronTunnelTypes: ''"
621 if ds_opts['sdn_controller'] == 'opendaylight' and \
622 'odl_vpp_routing_node' in ds_opts:
623 if 'opendaylight::vpp_routing_node' in line:
624 output_line = (" opendaylight::vpp_routing_node: {}.{}"
625 .format(ds_opts['odl_vpp_routing_node'],
627 elif not ds_opts['sdn_controller'] and ds_opts['dataplane'] == 'fdio':
628 if 'NeutronVPPAgentPhysnets' in line:
629 # VPP interface tap0 will be used for external network
631 output_line = (" NeutronVPPAgentPhysnets: "
632 "'datacentre:{},external:tap0'"
633 .format(tenant_nic['Controller']))
634 elif ds_opts['sdn_controller'] == 'opendaylight' and ds_opts.get(
636 if 'OS::TripleO::Services::NeutronDhcpAgent' in line:
638 elif 'NeutronDhcpAgentsPerNetwork' in line:
640 num_dhcp_agents = num_control
642 num_dhcp_agents = num_compute
643 output_line = (" NeutronDhcpAgentsPerNetwork: {}"
644 .format(num_dhcp_agents))
645 elif 'ComputeServices' in line:
646 output_line = (" ComputeServices:\n"
647 " - OS::TripleO::Services::NeutronDhcpAgent")
650 for role in 'NovaCompute', 'Controller':
651 if role == 'NovaCompute':
652 perf_opts = perf_vpp_comp
654 perf_opts = perf_vpp_ctrl
655 cfg = "{}ExtraConfig".format(role)
656 if cfg in line and perf_opts:
658 if 'main-core' in perf_opts:
659 perf_line += ("\n fdio::vpp_cpu_main_core: '{}'"
660 .format(perf_opts['main-core']))
661 if 'corelist-workers' in perf_opts:
663 "fdio::vpp_cpu_corelist_workers: '{}'"
664 .format(perf_opts['corelist-workers']))
665 if ds_opts['sdn_controller'] == 'opendaylight' and \
666 ds_opts['dataplane'] == 'fdio':
667 if role == 'NovaCompute':
669 "tripleo::profile::base::neutron::"
670 "agents::honeycomb::"
671 "interface_role_mapping:"
672 " ['{}:tenant-interface',"
673 "'{}:public-interface']"
674 .format(tenant_nic[role],
678 "tripleo::profile::base::neutron::"
679 "agents::honeycomb::"
680 "interface_role_mapping:"
681 " ['{}:tenant-interface']"
682 .format(tenant_nic[role]))
684 output_line = (" {}:{}".format(cfg, perf_line))
686 if ds_opts['dataplane'] == 'ovs_dpdk' and perf_ovs_comp:
687 for k, v in OVS_PERF_MAP.items():
688 if k in line and v in perf_ovs_comp:
689 output_line = " {}: '{}'".format(k, perf_ovs_comp[v])
692 # (FIXME) use compute's kernel settings for all nodes for now.
694 if 'NovaSchedulerDefaultFilters' in line:
696 " NovaSchedulerDefaultFilters: 'RamFilter," \
697 "ComputeFilter,AvailabilityZoneFilter," \
698 "ComputeCapabilitiesFilter," \
699 "ImagePropertiesFilter,NUMATopologyFilter'"
700 elif 'ComputeKernelArgs' in line:
702 for k, v in perf_kern_comp.items():
703 kernel_args += "{}={} ".format(k, v)
705 output_line = " ComputeKernelArgs: '{}'".\
710 # Merge compute services into control services if only a single
713 with open(tmp_opnfv_env, 'r') as fh:
714 data = yaml.safe_load(fh)
715 param_data = data['parameter_defaults']
716 logging.info("All in one deployment detected")
717 logging.info("Disabling NFS in env file")
718 # Check to see if any parameters are set for Compute
719 for param in param_data.keys():
720 if param != 'ComputeServices' and param.startswith('Compute'):
721 logging.warning("Compute parameter set, but will not be used "
722 "in deployment: {}. Please use Controller "
723 "based parameters when using All-in-one "
724 "deployments".format(param))
725 if param in NFS_VARS:
726 param_data[param] = False
727 logging.info("Checking if service merging required into "
729 if ('ControllerServices' in param_data and 'ComputeServices' in
731 logging.info("Services detected in environment file. Merging...")
732 ctrl_services = param_data['ControllerServices']
733 cmp_services = param_data['ComputeServices']
734 param_data['ControllerServices'] = list(set().union(
735 ctrl_services, cmp_services))
736 for dup_service in DUPLICATE_COMPUTE_SERVICES:
737 if dup_service in param_data['ControllerServices']:
738 param_data['ControllerServices'].remove(dup_service)
739 param_data.pop('ComputeServices')
740 logging.debug("Merged controller services: {}".format(
741 pprint.pformat(param_data['ControllerServices'])
744 logging.info("No services detected in env file, not merging "
746 with open(tmp_opnfv_env, 'w') as fh:
747 yaml.safe_dump(data, fh, default_flow_style=False)
749 logging.info("opnfv-environment file written to {}".format(tmp_opnfv_env))
750 with open(tmp_opnfv_env, 'r') as fh:
751 logging.debug("opnfv-environment content is : {}".format(
752 pprint.pformat(yaml.safe_load(fh.read()))
756 def generate_ceph_key():
758 header = struct.pack('<hiih', 1, int(time.time()), 0, len(key))
759 return base64.b64encode(header + key)
762 def prep_storage_env(ds, ns, virtual, tmp_dir):
764 Creates storage environment file for deployment. Source file is copied by
765 undercloud playbook to host.
772 ds_opts = ds['deploy_options']
773 storage_file = os.path.join(tmp_dir, 'storage-environment.yaml')
774 if not os.path.isfile(storage_file):
775 logging.error("storage-environment file is not in tmp directory: {}. "
776 "Check if file was copied from "
777 "undercloud".format(tmp_dir))
778 raise ApexDeployException("storage-environment file not copied from "
780 for line in fileinput.input(storage_file, inplace=True):
781 line = line.strip('\n')
782 if 'CephClusterFSID' in line:
783 print(" CephClusterFSID: {}".format(str(uuid.uuid4())))
784 elif 'CephMonKey' in line:
785 print(" CephMonKey: {}".format(generate_ceph_key().decode(
787 elif 'CephAdminKey' in line:
788 print(" CephAdminKey: {}".format(generate_ceph_key().decode(
790 elif 'CephClientKey' in line:
791 print(" CephClientKey: {}".format(generate_ceph_key().decode(
796 if ds_opts['containers']:
799 # max pgs allowed are calculated as num_mons * 200. Therefore we
800 # set number of pgs and pools so that the total will be less:
801 # num_pgs * num_pools * num_osds
802 ceph_params['CephPoolDefaultSize'] = 2
803 ceph_params['CephPoolDefaultPgNum'] = 32
805 ceph_params['CephAnsibleExtraConfig'] = {
806 'centos_package_dependencies': [],
807 'ceph_osd_docker_memory_limit': '1g',
808 'ceph_mds_docker_memory_limit': '1g',
810 ceph_device = ds_opts['ceph_device']
811 ceph_params['CephAnsibleDisksConfig'] = {
812 'devices': [ceph_device],
814 'osd_scenario': 'collocated'
816 utils.edit_tht_env(storage_file, 'parameter_defaults', ceph_params)
817 # TODO(trozet): remove following block as we only support containers now
818 elif 'ceph_device' in ds_opts and ds_opts['ceph_device']:
819 with open(storage_file, 'a') as fh:
820 fh.write(' ExtraConfig:\n')
821 fh.write(" ceph::profile::params::osds:{{{}:{{}}}}\n".format(
822 ds_opts['ceph_device']
826 def prep_sriov_env(ds, tmp_dir):
828 Creates SRIOV environment file for deployment. Source file is copied by
829 undercloud playbook to host.
834 ds_opts = ds['deploy_options']
835 sriov_iface = ds_opts['sriov']
836 sriov_file = os.path.join(tmp_dir, 'neutron-opendaylight-sriov.yaml')
837 if not os.path.isfile(sriov_file):
838 logging.error("sriov-environment file is not in tmp directory: {}. "
839 "Check if file was copied from "
840 "undercloud".format(tmp_dir))
841 raise ApexDeployException("sriov-environment file not copied from "
843 # TODO(rnoriega): Instead of line editing, refactor this code to load
844 # yaml file into a dict, edit it and write the file back.
845 for line in fileinput.input(sriov_file, inplace=True):
846 line = line.strip('\n')
847 if 'NovaSchedulerDefaultFilters' in line:
848 print(" {}".format(line[3:]))
849 elif 'NovaSchedulerAvailableFilters' in line:
850 print(" {}".format(line[3:]))
851 elif 'NeutronPhysicalDevMappings' in line:
852 print(" NeutronPhysicalDevMappings: \"nfv_sriov:{}\""
853 .format(sriov_iface))
854 elif 'NeutronSriovNumVFs' in line:
855 print(" NeutronSriovNumVFs: \"{}:8\"".format(sriov_iface))
856 elif 'NovaPCIPassthrough' in line:
857 print(" NovaPCIPassthrough:")
858 elif 'devname' in line:
859 print(" - devname: \"{}\"".format(sriov_iface))
860 elif 'physical_network' in line:
861 print(" physical_network: \"nfv_sriov\"")
866 def external_network_cmds(ns, ds):
868 Generates external network openstack commands
869 :param ns: network settings
870 :param ds: deploy settings
871 :return: list of commands to configure external network
873 ds_opts = ds['deploy_options']
874 external_physnet = 'datacentre'
875 if ds_opts['dataplane'] == 'fdio' and \
876 ds_opts['sdn_controller'] != 'opendaylight':
877 external_physnet = 'external'
878 if 'external' in ns.enabled_network_list:
879 net_config = ns['networks']['external'][0]
881 pool_start, pool_end = net_config['floating_ip_range']
883 net_config = ns['networks']['admin']
885 pool_start, pool_end = ns['apex']['networks']['admin'][
886 'introspection_range']
887 nic_config = net_config['nic_mapping']
888 gateway = net_config['gateway']
890 # create network command
891 if nic_config['compute']['vlan'] == 'native':
894 ext_type = "vlan --provider-segment {}".format(nic_config[
896 cmds.append("openstack network create external --project service "
897 "--external --provider-network-type {} "
898 "--provider-physical-network {}"
899 .format(ext_type, external_physnet))
900 # create subnet command
901 cidr = net_config['cidr']
902 subnet_cmd = "openstack subnet create external-subnet --project " \
903 "service --network external --no-dhcp --gateway {} " \
904 "--allocation-pool start={},end={} --subnet-range " \
905 "{}".format(gateway, pool_start, pool_end, str(cidr))
906 if external and cidr.version == 6:
907 subnet_cmd += ' --ip-version 6'
908 cmds.append(subnet_cmd)
909 logging.debug("Neutron external network commands determined "
910 "as: {}".format(cmds))
914 def create_congress_cmds(overcloud_file):
915 drivers = ['nova', 'neutronv2', 'cinder', 'glancev2', 'keystone', 'doctor']
916 overcloudrc = parsers.parse_overcloudrc(overcloud_file)
917 logging.info("Creating congress commands")
920 "username={}".format(overcloudrc['OS_USERNAME']),
921 "tenant_name={}".format(overcloudrc['OS_PROJECT_NAME']),
922 "password={}".format(overcloudrc['OS_PASSWORD']),
923 "auth_url={}".format(overcloudrc['OS_AUTH_URL'])
926 logging.error("Unable to find all keys required for congress in "
927 "overcloudrc: OS_USERNAME, OS_PROJECT_NAME, "
928 "OS_PASSWORD, OS_AUTH_URL. Please check overcloudrc "
929 "file: {}".format(overcloud_file))
932 ds_cfg = '--config ' + ' --config '.join(ds_cfg)
934 for driver in drivers:
935 if driver == 'doctor':
936 cmd = "{} \"{}\"".format(driver, driver)
938 cmd = "{} \"{}\" {}".format(driver, driver, ds_cfg)
940 cmd += ' --config api_version="2.34"'
941 logging.debug("Congress command created: {}".format(cmd))