1 ##############################################################################
2 # Copyright (c) 2017 Tim Rozet (trozet@redhat.com) and others.
4 # All rights reserved. This program and the accompanying materials
5 # are made available under the terms of the Apache License, Version 2.0
6 # which accompanies this distribution, and is available at
7 # http://www.apache.org/licenses/LICENSE-2.0
8 ##############################################################################
21 import apex.builders.overcloud_builder as oc_builder
22 import apex.builders.common_builder as c_builder
24 from apex.common import constants as con
25 from apex.common.exceptions import ApexDeployException
26 from apex.common import parsers
27 from apex.common import utils
28 from apex.virtual import utils as virt_utils
29 from cryptography.hazmat.primitives import serialization as \
31 from cryptography.hazmat.primitives.asymmetric import rsa
32 from cryptography.hazmat.backends import default_backend as \
33 crypto_default_backend
38 'sfc': 'neutron-sfc-opendaylight.yaml',
39 'vpn': 'neutron-bgpvpn-opendaylight.yaml',
40 'gluon': 'gluon.yaml',
42 'odl_vpp_netvirt': 'neutron-opendaylight-netvirt-vpp.yaml',
43 'dvr': 'neutron-opendaylight-fdio-dvr.yaml',
44 'default': 'neutron-opendaylight-honeycomb.yaml'
46 'l2gw': 'neutron-l2gw-opendaylight.yaml',
47 'sriov': 'neutron-opendaylight-sriov.yaml',
48 'default': 'neutron-opendaylight.yaml',
51 'sfc': 'neutron-onos-sfc.yaml',
52 'default': 'neutron-onos.yaml'
54 'ovn': 'neutron-ml2-ovn.yaml',
56 'vpp': 'neutron-ml2-vpp.yaml',
57 'dataplane': ('ovs_dpdk', 'neutron-ovs-dpdk.yaml')
62 'tacker': 'enable_tacker.yaml',
63 'congress': 'enable_congress.yaml',
64 'barometer': 'enable_barometer.yaml',
65 'rt_kvm': 'enable_rt_kvm.yaml'
69 'HostCpusList': 'dpdk_cores',
70 'NeutronDpdkCoreList': 'pmd_cores',
71 'NeutronDpdkSocketMemory': 'socket_memory',
72 'NeutronDpdkMemoryChannels': 'memory_channels'
75 ODL_NETVIRT_VPP_RPM = "/root/opendaylight-7.0.0-0.1.20170531snap665.el7" \
78 LOOP_DEVICE_SIZE = "10G"
80 LOSETUP_SERVICE = """[Unit]
81 Description=Setup loop devices
86 ExecStart=/sbin/losetup /dev/loop3 /srv/data.img
87 ExecStop=/sbin/losetup -d /dev/loop3
92 WantedBy=multi-user.target
95 DUPLICATE_COMPUTE_SERVICES = [
96 'OS::TripleO::Services::ComputeNeutronCorePlugin',
97 'OS::TripleO::Services::ComputeNeutronMetadataAgent',
98 'OS::TripleO::Services::ComputeNeutronOvsAgent',
99 'OS::TripleO::Services::ComputeNeutronL3Agent'
105 'CinderNfsEnabledBackend'
109 def build_sdn_env_list(ds, sdn_map, env_list=None):
111 Builds a list of SDN environment files to be used in the deploy cmd.
113 This function recursively searches an sdn_map. First the sdn controller is
114 matched and then the function looks for enabled features for that
115 controller to determine which environment files should be used. By
116 default the feature will be checked if set to true in deploy settings to be
117 added to the list. If a feature does not have a boolean value, then the
118 key and value pair to compare with are checked as a tuple (k,v).
120 :param ds: deploy settings
121 :param sdn_map: SDN map to recursively search
122 :param env_list: recursive var to hold previously found env_list
123 :return: A list of env files
127 for k, v in sdn_map.items():
128 if ds['sdn_controller'] == k or (k in ds and ds[k]):
129 if isinstance(v, dict):
130 # Append default SDN env file first
131 # The assumption is that feature-enabled SDN env files
132 # override and do not conflict with previously set default
134 if ds['sdn_controller'] == k and 'default' in v:
135 env_list.append(os.path.join(con.THT_ENV_DIR,
137 env_list.extend(build_sdn_env_list(ds, v))
138 # check if the value is not a boolean
139 elif isinstance(v, tuple):
141 env_list.append(os.path.join(con.THT_ENV_DIR, v[1]))
143 env_list.append(os.path.join(con.THT_ENV_DIR, v))
144 if len(env_list) == 0:
146 env_list.append(os.path.join(
147 con.THT_ENV_DIR, sdn_map['default']))
149 logging.warning("Unable to find default file for SDN")
154 def get_docker_sdn_files(ds_opts):
156 Returns docker env file for detected SDN
157 :param ds_opts: deploy options
158 :return: list of docker THT env files for an SDN
160 docker_services = con.VALID_DOCKER_SERVICES
161 tht_dir = con.THT_DOCKER_ENV_DIR[ds_opts['os_version']]
162 sdn_env_list = build_sdn_env_list(ds_opts, SDN_FILE_MAP)
163 for i, sdn_file in enumerate(sdn_env_list):
164 sdn_base = os.path.basename(sdn_file)
165 if sdn_base in docker_services:
166 if docker_services[sdn_base] is not None:
168 os.path.join(tht_dir, docker_services[sdn_base])
170 sdn_env_list[i] = os.path.join(tht_dir, sdn_base)
174 def create_deploy_cmd(ds, ns, inv, tmp_dir,
175 virtual, env_file='opnfv-environment.yaml',
178 logging.info("Creating deployment command")
179 deploy_options = ['network-environment.yaml']
181 ds_opts = ds['deploy_options']
183 if ds_opts['containers']:
184 deploy_options.append(os.path.join(con.THT_ENV_DIR,
187 if ds['global_params']['ha_enabled']:
188 if ds_opts['containers']:
189 deploy_options.append(os.path.join(con.THT_ENV_DIR,
192 deploy_options.append(os.path.join(con.THT_ENV_DIR,
193 'puppet-pacemaker.yaml'))
196 deploy_options.append(env_file)
198 if ds_opts['containers']:
199 deploy_options.append('docker-images.yaml')
200 sdn_docker_files = get_docker_sdn_files(ds_opts)
201 for sdn_docker_file in sdn_docker_files:
202 deploy_options.append(sdn_docker_file)
204 deploy_options.append('sdn-images.yaml')
206 deploy_options += build_sdn_env_list(ds_opts, SDN_FILE_MAP)
208 for k, v in OTHER_FILE_MAP.items():
209 if k in ds_opts and ds_opts[k]:
210 if ds_opts['containers']:
211 deploy_options.append(os.path.join(con.THT_DOCKER_ENV_DIR,
212 "{}.yaml".format(k)))
214 deploy_options.append(os.path.join(con.THT_ENV_DIR, v))
216 # TODO(trozet) Fix this check to look for if ceph is in controller services
217 # and not use name of the file
218 if ds_opts['ceph'] and 'csit' not in env_file:
219 prep_storage_env(ds, ns, virtual, tmp_dir)
220 deploy_options.append(os.path.join(con.THT_ENV_DIR,
221 'storage-environment.yaml'))
223 prep_sriov_env(ds, tmp_dir)
225 # Check for 'k8s' here intentionally, as we may support other values
226 # such as openstack/openshift for 'vim' option.
227 if ds_opts['vim'] == 'k8s':
228 deploy_options.append('kubernetes-environment.yaml')
231 deploy_options.append('virtual-environment.yaml')
233 deploy_options.append('baremetal-environment.yaml')
235 num_control, num_compute = inv.get_node_counts()
236 if num_control > 1 and not ds['global_params']['ha_enabled']:
238 if platform.machine() == 'aarch64':
239 # aarch64 deploys were not completing in the default 90 mins.
240 # Not sure if this is related to the hardware the OOO support
241 # was developed on or the virtualization support in CentOS
242 # Either way it will probably get better over time as the aarch
243 # support matures in CentOS and deploy time should be tested in
244 # the future so this multiplier can be removed.
245 con.DEPLOY_TIMEOUT *= 2
246 cmd = "openstack overcloud deploy --templates --timeout {} " \
247 .format(con.DEPLOY_TIMEOUT)
249 for option in deploy_options:
250 cmd += " -e {}".format(option)
251 cmd += " --ntp-server {}".format(ns['ntp'][0])
252 cmd += " --control-scale {}".format(num_control)
253 cmd += " --compute-scale {}".format(num_compute)
254 cmd += ' --control-flavor control --compute-flavor compute'
256 cmd += ' --networks-file network_data.yaml'
259 with open('/sys/module/kvm_intel/parameters/nested') as f:
260 nested_kvm = f.read().strip()
261 if nested_kvm != 'Y':
262 libvirt_type = 'qemu'
263 cmd += ' --libvirt-type {}'.format(libvirt_type)
264 logging.info("Deploy command set: {}".format(cmd))
266 with open(os.path.join(tmp_dir, 'deploy_command'), 'w') as fh:
271 def prep_image(ds, ns, img, tmp_dir, root_pw=None, docker_tag=None,
274 Locates sdn image and preps for deployment.
275 :param ds: deploy settings
276 :param ns: network settings
277 :param img: sdn image
278 :param tmp_dir: dir to store modified sdn image
279 :param root_pw: password to configure for overcloud image
280 :param docker_tag: Docker image tag for RDO version (default None)
281 :param patches: List of patches to apply to overcloud image
284 # TODO(trozet): Come up with a better way to organize this logic in this
286 logging.info("Preparing image: {} for deployment".format(img))
287 if not os.path.isfile(img):
288 logging.error("Missing SDN image {}".format(img))
289 raise ApexDeployException("Missing SDN image file: {}".format(img))
291 ds_opts = ds['deploy_options']
293 sdn = ds_opts['sdn_controller']
294 patched_containers = set()
295 # we need this due to rhbz #1436021
296 # fixed in systemd-219-37.el7
298 logging.info("Neutron openvswitch-agent disabled")
301 "rm -f /etc/systemd/system/multi-user.target.wants/"
302 "neutron-openvswitch-agent.service"},
305 "rm -f /usr/lib/systemd/system/neutron-openvswitch-agent"
309 if ns.get('http_proxy', ''):
312 "echo 'http_proxy={}' >> /etc/environment".format(
315 if ns.get('https_proxy', ''):
318 "echo 'https_proxy={}' >> /etc/environment".format(
321 tmp_oc_image = os.path.join(tmp_dir, 'overcloud-full.qcow2')
322 shutil.copyfile(img, tmp_oc_image)
323 logging.debug("Temporary overcloud image stored as: {}".format(
327 oc_builder.inject_quagga(tmp_oc_image, tmp_dir)
328 virt_cmds.append({con.VIRT_RUN_CMD: "chmod +x /etc/rc.d/rc.local"})
331 "echo 'sudo /opt/quagga/etc/init.d/zrpcd start' > "
332 "/opt/quagga/etc/init.d/zrpcd_start.sh"})
334 con.VIRT_RUN_CMD: "chmod +x /opt/quagga/etc/init.d/"
337 con.VIRT_RUN_CMD: "sed -i '$a /opt/quagga/etc/"
338 "init.d/zrpcd_start.sh' /etc/rc.local "})
340 con.VIRT_RUN_CMD: "sed -i '$a /opt/quagga/etc/"
341 "init.d/zrpcd_start.sh' /etc/rc.d/rc.local"})
342 logging.info("ZRPCD process started")
344 dataplane = ds_opts['dataplane']
345 if dataplane == 'ovs_dpdk' or dataplane == 'fdio':
346 logging.info("Enabling kernel modules for dpdk")
347 # file to module mapping
349 os.path.join(tmp_dir, 'vfio_pci.modules'): 'vfio_pci',
350 os.path.join(tmp_dir, 'uio_pci_generic.modules'): 'uio_pci_generic'
352 for mod_file, mod in uio_types.items():
353 with open(mod_file, 'w') as fh:
354 fh.write('#!/bin/bash\n')
355 fh.write('exec /sbin/modprobe {}'.format(mod))
359 {con.VIRT_UPLOAD: "{}:/etc/sysconfig/modules/".format(
361 {con.VIRT_RUN_CMD: "chmod 0755 /etc/sysconfig/modules/"
362 "{}".format(os.path.basename(mod_file))}
365 pw_op = "password:{}".format(root_pw)
366 virt_cmds.append({con.VIRT_PW: pw_op})
368 if dataplane == 'ovs':
370 oc_builder.inject_ovs_nsh(tmp_oc_image, tmp_dir)
371 elif sdn == 'opendaylight':
372 # FIXME(trozet) remove this after RDO is updated with fix for
373 # https://bugzilla.redhat.com/show_bug.cgi?id=1544892
374 ovs_file = os.path.basename(con.CUSTOM_OVS)
375 ovs_url = con.CUSTOM_OVS.replace(ovs_file, '')
376 utils.fetch_upstream_and_unpack(dest=tmp_dir, url=ovs_url,
379 {con.VIRT_UPLOAD: "{}:/root/".format(os.path.join(tmp_dir,
381 {con.VIRT_RUN_CMD: "yum downgrade -y /root/{}".format(
385 if dataplane == 'fdio':
386 # Patch neutron with using OVS external interface for router
387 # and add generic linux NS interface driver
389 {con.VIRT_RUN_CMD: "cd /usr/lib/python2.7/site-packages && patch "
390 "-p1 < neutron-patch-NSDriver.patch"})
393 {con.VIRT_RUN_CMD: "yum remove -y vpp-lib"},
394 {con.VIRT_RUN_CMD: "yum install -y "
395 "/root/nosdn_vpp_rpms/*.rpm"}
398 undercloud_admin_ip = ns['networks'][con.ADMIN_NETWORK][
399 'installer_vm']['ip']
400 if sdn == 'opendaylight':
401 oc_builder.inject_opendaylight(
402 odl_version=ds_opts['odl_version'],
405 uc_ip=undercloud_admin_ip,
406 os_version=ds_opts['os_version'],
407 docker_tag=docker_tag,
410 patched_containers = patched_containers.union({'opendaylight'})
413 if ds_opts['os_version'] == 'master':
414 branch = ds_opts['os_version']
416 branch = "stable/{}".format(ds_opts['os_version'])
417 logging.info('Adding patches to overcloud')
418 patched_containers = patched_containers.union(
419 c_builder.add_upstream_patches(patches,
420 tmp_oc_image, tmp_dir,
422 uc_ip=undercloud_admin_ip,
423 docker_tag=docker_tag))
424 # if containers with ceph, and no ceph device we need to use a
425 # persistent loop device for Ceph OSDs
426 if docker_tag and ds_opts['ceph_device'] == '/dev/loop3':
427 tmp_losetup = os.path.join(tmp_dir, 'losetup.service')
428 with open(tmp_losetup, 'w') as fh:
429 fh.write(LOSETUP_SERVICE)
431 {con.VIRT_UPLOAD: "{}:/usr/lib/systemd/system/".format(tmp_losetup)
433 {con.VIRT_RUN_CMD: 'truncate /srv/data.img --size {}'
434 .format(LOOP_DEVICE_SIZE)},
435 {con.VIRT_RUN_CMD: 'systemctl daemon-reload'},
436 {con.VIRT_RUN_CMD: 'systemctl enable losetup.service'},
438 # TODO(trozet) remove this after LP#173474 is fixed
439 dhcp_unit = '/usr/lib/systemd/system/dhcp-interface@.service'
441 {con.VIRT_RUN_CMD: "crudini --del {} Unit "
442 "ConditionPathExists".format(dhcp_unit)})
445 {con.VIRT_INSTALL: "nfs-utils"},
446 {con.VIRT_RUN_CMD: "ln -s /usr/lib/systemd/system/nfs-server.service "
447 "/etc/systemd/system/multi-user.target.wants/"
448 "nfs-server.service"},
449 {con.VIRT_RUN_CMD: "mkdir -p /glance"},
450 {con.VIRT_RUN_CMD: "mkdir -p /cinder"},
451 {con.VIRT_RUN_CMD: "mkdir -p /nova"},
452 {con.VIRT_RUN_CMD: "chmod 777 /glance"},
453 {con.VIRT_RUN_CMD: "chmod 777 /cinder"},
454 {con.VIRT_RUN_CMD: "chmod 777 /nova"},
455 {con.VIRT_RUN_CMD: "chown nfsnobody:nfsnobody /glance"},
456 {con.VIRT_RUN_CMD: "chown nfsnobody:nfsnobody /cinder"},
457 {con.VIRT_RUN_CMD: "chown nfsnobody:nfsnobody /nova"},
458 {con.VIRT_RUN_CMD: "echo '/glance *(rw,sync,"
459 "no_root_squash,no_acl)' > /etc/exports"},
460 {con.VIRT_RUN_CMD: "echo '/cinder *(rw,sync,"
461 "no_root_squash,no_acl)' >> /etc/exports"},
462 {con.VIRT_RUN_CMD: "echo '/nova *(rw,sync,"
463 "no_root_squash,no_acl)' >> /etc/exports"},
464 {con.VIRT_RUN_CMD: "exportfs -avr"},
466 virt_utils.virt_customize(virt_cmds, tmp_oc_image)
467 logging.info("Overcloud image customization complete")
468 return patched_containers
473 Creates public and private ssh keys with 1024 bit RSA encryption
474 :return: private, public key
476 key = rsa.generate_private_key(
477 backend=crypto_default_backend(),
478 public_exponent=65537,
482 private_key = key.private_bytes(
483 crypto_serialization.Encoding.PEM,
484 crypto_serialization.PrivateFormat.PKCS8,
485 crypto_serialization.NoEncryption())
486 public_key = key.public_key().public_bytes(
487 crypto_serialization.Encoding.OpenSSH,
488 crypto_serialization.PublicFormat.OpenSSH
490 return private_key.decode('utf-8'), public_key.decode('utf-8')
493 def prep_env(ds, ns, inv, opnfv_env, net_env, tmp_dir):
495 Creates modified opnfv/network environments for deployment
496 :param ds: deploy settings
497 :param ns: network settings
498 :param inv: node inventory
499 :param opnfv_env: file path for opnfv-environment file
500 :param net_env: file path for network-environment file
501 :param tmp_dir: Apex tmp dir
505 logging.info("Preparing opnfv-environment and network-environment files")
506 ds_opts = ds['deploy_options']
507 tmp_opnfv_env = os.path.join(tmp_dir, os.path.basename(opnfv_env))
508 shutil.copyfile(opnfv_env, tmp_opnfv_env)
509 tenant_nic_map = ns['networks']['tenant']['nic_mapping']
511 tenant_nic['Controller'] = tenant_nic_map['controller']['members'][0]
512 tenant_nic['NovaCompute'] = tenant_nic_map['compute']['members'][0]
513 external_nic_map = ns['networks']['external'][0]['nic_mapping']
514 external_nic = dict()
515 external_nic['NovaCompute'] = external_nic_map['compute']['members'][0]
518 private_key, public_key = make_ssh_key()
520 num_control, num_compute = inv.get_node_counts()
521 if num_control > 1 and not ds['global_params']['ha_enabled']:
524 # Make easier/faster variables to index in the file editor
525 if 'performance' in ds_opts:
528 if 'vpp' in ds_opts['performance']['Compute']:
529 perf_vpp_comp = ds_opts['performance']['Compute']['vpp']
532 if 'vpp' in ds_opts['performance']['Controller']:
533 perf_vpp_ctrl = ds_opts['performance']['Controller']['vpp']
538 if 'ovs' in ds_opts['performance']['Compute']:
539 perf_ovs_comp = ds_opts['performance']['Compute']['ovs']
544 if 'kernel' in ds_opts['performance']['Compute']:
545 perf_kern_comp = ds_opts['performance']['Compute']['kernel']
547 perf_kern_comp = None
551 tenant_settings = ns['networks']['tenant']
552 tenant_vlan_enabled = 'tenant' in ns.enabled_network_list and \
553 ns['networks']['tenant'].get('segmentation_type') == 'vlan'
555 # Modify OPNFV environment
556 # TODO: Change to build a dict and outputting yaml rather than parsing
557 for line in fileinput.input(tmp_opnfv_env, inplace=True):
558 line = line.strip('\n')
560 if 'CloudDomain' in line:
561 output_line = " CloudDomain: {}".format(ns['domain_name'])
562 elif 'replace_private_key' in line:
563 output_line = " private_key: |\n"
565 for line in private_key.splitlines():
566 key_out += " {}\n".format(line)
567 output_line += key_out
568 elif 'replace_public_key' in line:
569 output_line = " public_key: '{}'".format(public_key)
570 elif ((perf and perf_kern_comp) or ds_opts.get('rt_kvm')) and \
571 'resource_registry' in line:
572 output_line = "resource_registry:\n" \
573 " OS::TripleO::NodeUserData: first-boot.yaml"
574 elif 'ComputeExtraConfigPre' in line and \
575 ds_opts['dataplane'] == 'ovs_dpdk':
576 output_line = ' OS::TripleO::ComputeExtraConfigPre: ' \
577 './ovs-dpdk-preconfig.yaml'
578 elif 'NeutronNetworkVLANRanges' in line:
580 if tenant_vlan_enabled:
581 if ns['networks']['tenant']['overlay_id_range']:
582 vlan_setting = ns['networks']['tenant']['overlay_id_range']
583 if 'datacentre' not in vlan_setting:
584 vlan_setting += ',datacentre:1:1000'
585 # SRIOV networks are VLAN based provider networks. In order to
586 # simplify the deployment, nfv_sriov will be the default physnet.
587 # VLANs are not needed in advance, and the user will have to create
588 # the network specifying the segmentation-id.
591 vlan_setting += ",nfv_sriov"
593 vlan_setting = "datacentre:1:1000,nfv_sriov"
595 output_line = " NeutronNetworkVLANRanges: " + vlan_setting
596 elif 'NeutronBridgeMappings' in line and tenant_vlan_enabled:
597 if tenant_settings['overlay_id_range']:
598 physnets = tenant_settings['overlay_id_range'].split(',')
599 output_line = " NeutronBridgeMappings: "
600 for physnet in physnets:
601 physnet_name = physnet.split(':')[0]
602 if physnet_name != 'datacentre':
603 output_line += "{}:br-vlan,".format(physnet_name)
604 output_line += "datacentre:br-ex"
605 elif 'OpenDaylightProviderMappings' in line and tenant_vlan_enabled \
606 and ds_opts['sdn_controller'] == 'opendaylight':
607 if tenant_settings['overlay_id_range']:
608 physnets = tenant_settings['overlay_id_range'].split(',')
609 output_line = " OpenDaylightProviderMappings: "
610 for physnet in physnets:
611 physnet_name = physnet.split(':')[0]
612 if physnet_name != 'datacentre':
613 output_line += "{}:br-vlan,".format(physnet_name)
614 output_line += "datacentre:br-ex"
615 elif 'NeutronNetworkType' in line and tenant_vlan_enabled:
616 output_line = " NeutronNetworkType: vlan\n" \
617 " NeutronTunnelTypes: ''"
619 if ds_opts['sdn_controller'] == 'opendaylight' and \
620 'odl_vpp_routing_node' in ds_opts:
621 if 'opendaylight::vpp_routing_node' in line:
622 output_line = (" opendaylight::vpp_routing_node: {}.{}"
623 .format(ds_opts['odl_vpp_routing_node'],
625 elif not ds_opts['sdn_controller'] and ds_opts['dataplane'] == 'fdio':
626 if 'NeutronVPPAgentPhysnets' in line:
627 # VPP interface tap0 will be used for external network
629 output_line = (" NeutronVPPAgentPhysnets: "
630 "'datacentre:{},external:tap0'"
631 .format(tenant_nic['Controller']))
632 elif ds_opts['sdn_controller'] == 'opendaylight' and ds_opts.get(
634 if 'OS::TripleO::Services::NeutronDhcpAgent' in line:
636 elif 'NeutronDhcpAgentsPerNetwork' in line:
638 num_dhcp_agents = num_control
640 num_dhcp_agents = num_compute
641 output_line = (" NeutronDhcpAgentsPerNetwork: {}"
642 .format(num_dhcp_agents))
643 elif 'ComputeServices' in line:
644 output_line = (" ComputeServices:\n"
645 " - OS::TripleO::Services::NeutronDhcpAgent")
648 for role in 'NovaCompute', 'Controller':
649 if role == 'NovaCompute':
650 perf_opts = perf_vpp_comp
652 perf_opts = perf_vpp_ctrl
653 cfg = "{}ExtraConfig".format(role)
654 if cfg in line and perf_opts:
656 if 'main-core' in perf_opts:
657 perf_line += ("\n fdio::vpp_cpu_main_core: '{}'"
658 .format(perf_opts['main-core']))
659 if 'corelist-workers' in perf_opts:
661 "fdio::vpp_cpu_corelist_workers: '{}'"
662 .format(perf_opts['corelist-workers']))
663 if ds_opts['sdn_controller'] == 'opendaylight' and \
664 ds_opts['dataplane'] == 'fdio':
665 if role == 'NovaCompute':
667 "tripleo::profile::base::neutron::"
668 "agents::honeycomb::"
669 "interface_role_mapping:"
670 " ['{}:tenant-interface',"
671 "'{}:public-interface']"
672 .format(tenant_nic[role],
676 "tripleo::profile::base::neutron::"
677 "agents::honeycomb::"
678 "interface_role_mapping:"
679 " ['{}:tenant-interface']"
680 .format(tenant_nic[role]))
682 output_line = (" {}:{}".format(cfg, perf_line))
684 if ds_opts['dataplane'] == 'ovs_dpdk' and perf_ovs_comp:
685 for k, v in OVS_PERF_MAP.items():
686 if k in line and v in perf_ovs_comp:
687 output_line = " {}: '{}'".format(k, perf_ovs_comp[v])
690 # (FIXME) use compute's kernel settings for all nodes for now.
692 if 'NovaSchedulerDefaultFilters' in line:
694 " NovaSchedulerDefaultFilters: 'RamFilter," \
695 "ComputeFilter,AvailabilityZoneFilter," \
696 "ComputeCapabilitiesFilter," \
697 "ImagePropertiesFilter,NUMATopologyFilter'"
698 elif 'ComputeKernelArgs' in line:
700 for k, v in perf_kern_comp.items():
701 kernel_args += "{}={} ".format(k, v)
703 output_line = " ComputeKernelArgs: '{}'".\
708 # Merge compute services into control services if only a single
711 with open(tmp_opnfv_env, 'r') as fh:
712 data = yaml.safe_load(fh)
713 param_data = data['parameter_defaults']
714 logging.info("All in one deployment detected")
715 logging.info("Disabling NFS in env file")
716 # Check to see if any parameters are set for Compute
717 for param in param_data.keys():
718 if param != 'ComputeServices' and param.startswith('Compute'):
719 logging.warning("Compute parameter set, but will not be used "
720 "in deployment: {}. Please use Controller "
721 "based parameters when using All-in-one "
722 "deployments".format(param))
723 if param in NFS_VARS:
724 param_data[param] = False
725 logging.info("Checking if service merging required into "
727 if ('ControllerServices' in param_data and 'ComputeServices' in
729 logging.info("Services detected in environment file. Merging...")
730 ctrl_services = param_data['ControllerServices']
731 cmp_services = param_data['ComputeServices']
732 param_data['ControllerServices'] = list(set().union(
733 ctrl_services, cmp_services))
734 for dup_service in DUPLICATE_COMPUTE_SERVICES:
735 if dup_service in param_data['ControllerServices']:
736 param_data['ControllerServices'].remove(dup_service)
737 param_data.pop('ComputeServices')
738 logging.debug("Merged controller services: {}".format(
739 pprint.pformat(param_data['ControllerServices'])
742 logging.info("No services detected in env file, not merging "
744 with open(tmp_opnfv_env, 'w') as fh:
745 yaml.safe_dump(data, fh, default_flow_style=False)
747 logging.info("opnfv-environment file written to {}".format(tmp_opnfv_env))
748 with open(tmp_opnfv_env, 'r') as fh:
749 logging.debug("opnfv-environment content is : {}".format(
750 pprint.pformat(yaml.safe_load(fh.read()))
754 def generate_ceph_key():
756 header = struct.pack('<hiih', 1, int(time.time()), 0, len(key))
757 return base64.b64encode(header + key)
760 def prep_storage_env(ds, ns, virtual, tmp_dir):
762 Creates storage environment file for deployment. Source file is copied by
763 undercloud playbook to host.
770 ds_opts = ds['deploy_options']
771 storage_file = os.path.join(tmp_dir, 'storage-environment.yaml')
772 if not os.path.isfile(storage_file):
773 logging.error("storage-environment file is not in tmp directory: {}. "
774 "Check if file was copied from "
775 "undercloud".format(tmp_dir))
776 raise ApexDeployException("storage-environment file not copied from "
778 for line in fileinput.input(storage_file, inplace=True):
779 line = line.strip('\n')
780 if 'CephClusterFSID' in line:
781 print(" CephClusterFSID: {}".format(str(uuid.uuid4())))
782 elif 'CephMonKey' in line:
783 print(" CephMonKey: {}".format(generate_ceph_key().decode(
785 elif 'CephAdminKey' in line:
786 print(" CephAdminKey: {}".format(generate_ceph_key().decode(
788 elif 'CephClientKey' in line:
789 print(" CephClientKey: {}".format(generate_ceph_key().decode(
794 if ds_opts['containers']:
797 # max pgs allowed are calculated as num_mons * 200. Therefore we
798 # set number of pgs and pools so that the total will be less:
799 # num_pgs * num_pools * num_osds
800 ceph_params['CephPoolDefaultSize'] = 2
801 ceph_params['CephPoolDefaultPgNum'] = 32
803 ceph_params['CephAnsibleExtraConfig'] = {
804 'centos_package_dependencies': [],
805 'ceph_osd_docker_memory_limit': '1g',
806 'ceph_mds_docker_memory_limit': '1g',
808 ceph_device = ds_opts['ceph_device']
809 ceph_params['CephAnsibleDisksConfig'] = {
810 'devices': [ceph_device],
812 'osd_scenario': 'collocated'
814 utils.edit_tht_env(storage_file, 'parameter_defaults', ceph_params)
815 # TODO(trozet): remove following block as we only support containers now
816 elif 'ceph_device' in ds_opts and ds_opts['ceph_device']:
817 with open(storage_file, 'a') as fh:
818 fh.write(' ExtraConfig:\n')
819 fh.write(" ceph::profile::params::osds:{{{}:{{}}}}\n".format(
820 ds_opts['ceph_device']
824 def prep_sriov_env(ds, tmp_dir):
826 Creates SRIOV environment file for deployment. Source file is copied by
827 undercloud playbook to host.
832 ds_opts = ds['deploy_options']
833 sriov_iface = ds_opts['sriov']
834 sriov_file = os.path.join(tmp_dir, 'neutron-opendaylight-sriov.yaml')
835 if not os.path.isfile(sriov_file):
836 logging.error("sriov-environment file is not in tmp directory: {}. "
837 "Check if file was copied from "
838 "undercloud".format(tmp_dir))
839 raise ApexDeployException("sriov-environment file not copied from "
841 # TODO(rnoriega): Instead of line editing, refactor this code to load
842 # yaml file into a dict, edit it and write the file back.
843 for line in fileinput.input(sriov_file, inplace=True):
844 line = line.strip('\n')
845 if 'NovaSchedulerDefaultFilters' in line:
846 print(" {}".format(line[3:]))
847 elif 'NovaSchedulerAvailableFilters' in line:
848 print(" {}".format(line[3:]))
849 elif 'NeutronPhysicalDevMappings' in line:
850 print(" NeutronPhysicalDevMappings: \"nfv_sriov:{}\""
851 .format(sriov_iface))
852 elif 'NeutronSriovNumVFs' in line:
853 print(" NeutronSriovNumVFs: \"{}:8\"".format(sriov_iface))
854 elif 'NovaPCIPassthrough' in line:
855 print(" NovaPCIPassthrough:")
856 elif 'devname' in line:
857 print(" - devname: \"{}\"".format(sriov_iface))
858 elif 'physical_network' in line:
859 print(" physical_network: \"nfv_sriov\"")
864 def external_network_cmds(ns, ds):
866 Generates external network openstack commands
867 :param ns: network settings
868 :param ds: deploy settings
869 :return: list of commands to configure external network
871 ds_opts = ds['deploy_options']
872 external_physnet = 'datacentre'
873 if ds_opts['dataplane'] == 'fdio' and \
874 ds_opts['sdn_controller'] != 'opendaylight':
875 external_physnet = 'external'
876 if 'external' in ns.enabled_network_list:
877 net_config = ns['networks']['external'][0]
879 pool_start, pool_end = net_config['floating_ip_range']
881 net_config = ns['networks']['admin']
883 pool_start, pool_end = ns['apex']['networks']['admin'][
884 'introspection_range']
885 nic_config = net_config['nic_mapping']
886 gateway = net_config['gateway']
888 # create network command
889 if nic_config['compute']['vlan'] == 'native':
892 ext_type = "vlan --provider-segment {}".format(nic_config[
894 cmds.append("openstack network create external --project service "
895 "--external --provider-network-type {} "
896 "--provider-physical-network {}"
897 .format(ext_type, external_physnet))
898 # create subnet command
899 cidr = net_config['cidr']
900 subnet_cmd = "openstack subnet create external-subnet --project " \
901 "service --network external --no-dhcp --gateway {} " \
902 "--allocation-pool start={},end={} --subnet-range " \
903 "{}".format(gateway, pool_start, pool_end, str(cidr))
904 if external and cidr.version == 6:
905 subnet_cmd += ' --ip-version 6'
906 cmds.append(subnet_cmd)
907 logging.debug("Neutron external network commands determined "
908 "as: {}".format(cmds))
912 def create_congress_cmds(overcloud_file):
913 drivers = ['nova', 'neutronv2', 'cinder', 'glancev2', 'keystone', 'doctor']
914 overcloudrc = parsers.parse_overcloudrc(overcloud_file)
915 logging.info("Creating congress commands")
918 "username={}".format(overcloudrc['OS_USERNAME']),
919 "tenant_name={}".format(overcloudrc['OS_PROJECT_NAME']),
920 "password={}".format(overcloudrc['OS_PASSWORD']),
921 "auth_url={}".format(overcloudrc['OS_AUTH_URL'])
924 logging.error("Unable to find all keys required for congress in "
925 "overcloudrc: OS_USERNAME, OS_PROJECT_NAME, "
926 "OS_PASSWORD, OS_AUTH_URL. Please check overcloudrc "
927 "file: {}".format(overcloud_file))
930 ds_cfg = '--config ' + ' --config '.join(ds_cfg)
932 for driver in drivers:
933 if driver == 'doctor':
934 cmd = "{} \"{}\"".format(driver, driver)
936 cmd = "{} \"{}\" {}".format(driver, driver, ds_cfg)
938 cmd += ' --config api_version="2.34"'
939 logging.debug("Congress command created: {}".format(cmd))