1 ##############################################################################
2 # Copyright (c) 2017 Tim Rozet (trozet@redhat.com) and others.
4 # All rights reserved. This program and the accompanying materials
5 # are made available under the terms of the Apache License, Version 2.0
6 # which accompanies this distribution, and is available at
7 # http://www.apache.org/licenses/LICENSE-2.0
8 ##############################################################################
21 import apex.builders.overcloud_builder as oc_builder
22 import apex.builders.common_builder as c_builder
24 from apex.common import constants as con
25 from apex.common.exceptions import ApexDeployException
26 from apex.common import parsers
27 from apex.common import utils
28 from apex.virtual import utils as virt_utils
29 from cryptography.hazmat.primitives import serialization as \
31 from cryptography.hazmat.primitives.asymmetric import rsa
32 from cryptography.hazmat.backends import default_backend as \
33 crypto_default_backend
38 'sfc': 'neutron-sfc-opendaylight.yaml',
39 'vpn': 'neutron-bgpvpn-opendaylight.yaml',
40 'gluon': 'gluon.yaml',
42 'odl_vpp_netvirt': 'neutron-opendaylight-netvirt-vpp.yaml',
43 'dvr': 'neutron-opendaylight-fdio-dvr.yaml',
44 'default': 'neutron-opendaylight-honeycomb.yaml'
46 'l2gw': 'neutron-l2gw-opendaylight.yaml',
47 'sriov': 'neutron-opendaylight-sriov.yaml',
48 'default': 'neutron-opendaylight.yaml',
51 'sfc': 'neutron-onos-sfc.yaml',
52 'default': 'neutron-onos.yaml'
54 'ovn': 'neutron-ml2-ovn.yaml',
56 'vpp': 'neutron-ml2-vpp.yaml',
57 'dataplane': ('ovs_dpdk', 'neutron-ovs-dpdk.yaml')
62 'tacker': 'enable_tacker.yaml',
63 'congress': 'enable_congress.yaml',
64 'barometer': 'enable_barometer.yaml',
65 'rt_kvm': 'enable_rt_kvm.yaml'
69 'HostCpusList': 'dpdk_cores',
70 'NeutronDpdkCoreList': 'pmd_cores',
71 'NeutronDpdkSocketMemory': 'socket_memory',
72 'NeutronDpdkMemoryChannels': 'memory_channels'
75 ODL_NETVIRT_VPP_RPM = "/root/opendaylight-7.0.0-0.1.20170531snap665.el7" \
78 LOOP_DEVICE_SIZE = "10G"
80 LOSETUP_SERVICE = """[Unit]
81 Description=Setup loop devices
86 ExecStart=/sbin/losetup /dev/loop3 /srv/data.img
87 ExecStop=/sbin/losetup -d /dev/loop3
92 WantedBy=multi-user.target
95 DUPLICATE_COMPUTE_SERVICES = [
96 'OS::TripleO::Services::ComputeNeutronCorePlugin',
97 'OS::TripleO::Services::ComputeNeutronMetadataAgent',
98 'OS::TripleO::Services::ComputeNeutronOvsAgent',
99 'OS::TripleO::Services::ComputeNeutronL3Agent'
103 def build_sdn_env_list(ds, sdn_map, env_list=None):
105 Builds a list of SDN environment files to be used in the deploy cmd.
107 This function recursively searches an sdn_map. First the sdn controller is
108 matched and then the function looks for enabled features for that
109 controller to determine which environment files should be used. By
110 default the feature will be checked if set to true in deploy settings to be
111 added to the list. If a feature does not have a boolean value, then the
112 key and value pair to compare with are checked as a tuple (k,v).
114 :param ds: deploy settings
115 :param sdn_map: SDN map to recursively search
116 :param env_list: recursive var to hold previously found env_list
117 :return: A list of env files
121 for k, v in sdn_map.items():
122 if ds['sdn_controller'] == k or (k in ds and ds[k]):
123 if isinstance(v, dict):
124 # Append default SDN env file first
125 # The assumption is that feature-enabled SDN env files
126 # override and do not conflict with previously set default
128 if ds['sdn_controller'] == k and 'default' in v:
129 env_list.append(os.path.join(con.THT_ENV_DIR,
131 env_list.extend(build_sdn_env_list(ds, v))
132 # check if the value is not a boolean
133 elif isinstance(v, tuple):
135 env_list.append(os.path.join(con.THT_ENV_DIR, v[1]))
137 env_list.append(os.path.join(con.THT_ENV_DIR, v))
138 if len(env_list) == 0:
140 env_list.append(os.path.join(
141 con.THT_ENV_DIR, sdn_map['default']))
143 logging.warning("Unable to find default file for SDN")
148 def get_docker_sdn_files(ds_opts):
150 Returns docker env file for detected SDN
151 :param ds_opts: deploy options
152 :return: list of docker THT env files for an SDN
154 docker_services = con.VALID_DOCKER_SERVICES
155 tht_dir = con.THT_DOCKER_ENV_DIR
156 sdn_env_list = build_sdn_env_list(ds_opts, SDN_FILE_MAP)
157 for i, sdn_file in enumerate(sdn_env_list):
158 sdn_base = os.path.basename(sdn_file)
159 if sdn_base in docker_services:
160 if docker_services[sdn_base] is not None:
162 os.path.join(tht_dir, docker_services[sdn_base])
164 sdn_env_list[i] = os.path.join(tht_dir, sdn_base)
168 def create_deploy_cmd(ds, ns, inv, tmp_dir,
169 virtual, env_file='opnfv-environment.yaml',
172 logging.info("Creating deployment command")
173 deploy_options = ['network-environment.yaml']
175 ds_opts = ds['deploy_options']
177 if ds_opts['containers']:
178 deploy_options.append(os.path.join(con.THT_ENV_DIR,
181 if ds['global_params']['ha_enabled']:
182 if ds_opts['containers']:
183 deploy_options.append(os.path.join(con.THT_ENV_DIR,
186 deploy_options.append(os.path.join(con.THT_ENV_DIR,
187 'puppet-pacemaker.yaml'))
190 deploy_options.append(env_file)
192 if ds_opts['containers']:
193 deploy_options.append('docker-images.yaml')
194 sdn_docker_files = get_docker_sdn_files(ds_opts)
195 for sdn_docker_file in sdn_docker_files:
196 deploy_options.append(sdn_docker_file)
198 deploy_options += build_sdn_env_list(ds_opts, SDN_FILE_MAP)
200 for k, v in OTHER_FILE_MAP.items():
201 if k in ds_opts and ds_opts[k]:
202 if ds_opts['containers']:
203 deploy_options.append(os.path.join(con.THT_DOCKER_ENV_DIR,
204 "{}.yaml".format(k)))
206 deploy_options.append(os.path.join(con.THT_ENV_DIR, v))
208 # TODO(trozet) Fix this check to look for if ceph is in controller services
209 # and not use name of the file
210 if ds_opts['ceph'] and 'csit' not in env_file:
211 prep_storage_env(ds, ns, virtual, tmp_dir)
212 deploy_options.append(os.path.join(con.THT_ENV_DIR,
213 'storage-environment.yaml'))
215 prep_sriov_env(ds, tmp_dir)
217 # Check for 'k8s' here intentionally, as we may support other values
218 # such as openstack/openshift for 'vim' option.
219 if ds_opts['vim'] == 'k8s':
220 deploy_options.append('kubernetes-environment.yaml')
223 deploy_options.append('virtual-environment.yaml')
225 deploy_options.append('baremetal-environment.yaml')
227 num_control, num_compute = inv.get_node_counts()
228 if num_control > 1 and not ds['global_params']['ha_enabled']:
230 if platform.machine() == 'aarch64':
231 # aarch64 deploys were not completing in the default 90 mins.
232 # Not sure if this is related to the hardware the OOO support
233 # was developed on or the virtualization support in CentOS
234 # Either way it will probably get better over time as the aarch
235 # support matures in CentOS and deploy time should be tested in
236 # the future so this multiplier can be removed.
237 con.DEPLOY_TIMEOUT *= 2
238 cmd = "openstack overcloud deploy --templates --timeout {} " \
239 .format(con.DEPLOY_TIMEOUT)
241 for option in deploy_options:
242 cmd += " -e {}".format(option)
243 cmd += " --ntp-server {}".format(ns['ntp'][0])
244 cmd += " --control-scale {}".format(num_control)
245 cmd += " --compute-scale {}".format(num_compute)
246 cmd += ' --control-flavor control --compute-flavor compute'
248 cmd += ' --networks-file network_data.yaml'
250 if virtual and (platform.machine() != 'aarch64'):
251 with open('/sys/module/kvm_intel/parameters/nested') as f:
252 nested_kvm = f.read().strip()
253 if nested_kvm != 'Y':
254 libvirt_type = 'qemu'
255 elif virtual and (platform.machine() == 'aarch64'):
256 libvirt_type = 'qemu'
257 cmd += ' --libvirt-type {}'.format(libvirt_type)
258 if platform.machine() == 'aarch64':
259 cmd += ' --override-ansible-cfg /home/stack/ansible.cfg '
260 logging.info("Deploy command set: {}".format(cmd))
262 with open(os.path.join(tmp_dir, 'deploy_command'), 'w') as fh:
267 def prep_image(ds, ns, img, tmp_dir, root_pw=None, docker_tag=None,
270 Locates sdn image and preps for deployment.
271 :param ds: deploy settings
272 :param ns: network settings
273 :param img: sdn image
274 :param tmp_dir: dir to store modified sdn image
275 :param root_pw: password to configure for overcloud image
276 :param docker_tag: Docker image tag for RDO version (default None)
277 :param patches: List of patches to apply to overcloud image
280 # TODO(trozet): Come up with a better way to organize this logic in this
282 logging.info("Preparing image: {} for deployment".format(img))
283 if not os.path.isfile(img):
284 logging.error("Missing SDN image {}".format(img))
285 raise ApexDeployException("Missing SDN image file: {}".format(img))
287 ds_opts = ds['deploy_options']
289 sdn = ds_opts['sdn_controller']
290 patched_containers = set()
291 # we need this due to rhbz #1436021
292 # fixed in systemd-219-37.el7
294 logging.info("Neutron openvswitch-agent disabled")
297 "rm -f /etc/systemd/system/multi-user.target.wants/"
298 "neutron-openvswitch-agent.service"},
301 "rm -f /usr/lib/systemd/system/neutron-openvswitch-agent"
305 if ns.get('http_proxy', ''):
308 "echo 'http_proxy={}' >> /etc/environment".format(
311 if ns.get('https_proxy', ''):
314 "echo 'https_proxy={}' >> /etc/environment".format(
317 tmp_oc_image = os.path.join(tmp_dir, 'overcloud-full.qcow2')
318 shutil.copyfile(img, tmp_oc_image)
319 logging.debug("Temporary overcloud image stored as: {}".format(
323 oc_builder.inject_quagga(tmp_oc_image, tmp_dir)
324 virt_cmds.append({con.VIRT_RUN_CMD: "chmod +x /etc/rc.d/rc.local"})
327 "echo 'sudo /opt/quagga/etc/init.d/zrpcd start' > "
328 "/opt/quagga/etc/init.d/zrpcd_start.sh"})
330 con.VIRT_RUN_CMD: "chmod +x /opt/quagga/etc/init.d/"
333 con.VIRT_RUN_CMD: "sed -i '$a /opt/quagga/etc/"
334 "init.d/zrpcd_start.sh' /etc/rc.local "})
336 con.VIRT_RUN_CMD: "sed -i '$a /opt/quagga/etc/"
337 "init.d/zrpcd_start.sh' /etc/rc.d/rc.local"})
338 logging.info("ZRPCD process started")
340 dataplane = ds_opts['dataplane']
341 if dataplane == 'ovs_dpdk' or dataplane == 'fdio':
342 logging.info("Enabling kernel modules for dpdk")
343 # file to module mapping
345 os.path.join(tmp_dir, 'vfio_pci.modules'): 'vfio_pci',
346 os.path.join(tmp_dir, 'uio_pci_generic.modules'): 'uio_pci_generic'
348 for mod_file, mod in uio_types.items():
349 with open(mod_file, 'w') as fh:
350 fh.write('#!/bin/bash\n')
351 fh.write('exec /sbin/modprobe {}'.format(mod))
355 {con.VIRT_UPLOAD: "{}:/etc/sysconfig/modules/".format(
357 {con.VIRT_RUN_CMD: "chmod 0755 /etc/sysconfig/modules/"
358 "{}".format(os.path.basename(mod_file))}
361 pw_op = "password:{}".format(root_pw)
362 virt_cmds.append({con.VIRT_PW: pw_op})
364 if dataplane == 'ovs':
365 # FIXME(trozet) remove this after RDO is updated with fix for
366 # https://bugzilla.redhat.com/show_bug.cgi?id=1544892
367 # https://review.rdoproject.org/r/#/c/13839/
368 oc_builder.inject_ovs_nsh(tmp_oc_image, tmp_dir)
370 if dataplane == 'fdio':
371 # Patch neutron with using OVS external interface for router
372 # and add generic linux NS interface driver
374 {con.VIRT_RUN_CMD: "cd /usr/lib/python2.7/site-packages && patch "
375 "-p1 < neutron-patch-NSDriver.patch"})
378 {con.VIRT_RUN_CMD: "yum remove -y vpp-lib"},
379 {con.VIRT_RUN_CMD: "yum install -y "
380 "/root/nosdn_vpp_rpms/*.rpm"}
383 undercloud_admin_ip = ns['networks'][con.ADMIN_NETWORK][
384 'installer_vm']['ip']
385 if sdn == 'opendaylight':
386 oc_builder.inject_opendaylight(
387 odl_version=ds_opts['odl_version'],
390 uc_ip=undercloud_admin_ip,
391 os_version=ds_opts['os_version'],
392 docker_tag=docker_tag,
395 patched_containers = patched_containers.union({'opendaylight'})
398 if ds_opts['os_version'] == 'master':
399 branch = ds_opts['os_version']
401 branch = "stable/{}".format(ds_opts['os_version'])
402 logging.info('Adding patches to overcloud')
403 patched_containers = patched_containers.union(
404 c_builder.add_upstream_patches(patches,
405 tmp_oc_image, tmp_dir,
407 uc_ip=undercloud_admin_ip,
408 docker_tag=docker_tag))
409 # if containers with ceph, and no ceph device we need to use a
410 # persistent loop device for Ceph OSDs
411 if docker_tag and ds_opts['ceph_device'] == '/dev/loop3':
412 tmp_losetup = os.path.join(tmp_dir, 'losetup.service')
413 with open(tmp_losetup, 'w') as fh:
414 fh.write(LOSETUP_SERVICE)
416 {con.VIRT_UPLOAD: "{}:/usr/lib/systemd/system/".format(tmp_losetup)
418 {con.VIRT_RUN_CMD: 'truncate /srv/data.img --size {}'
419 .format(LOOP_DEVICE_SIZE)},
420 {con.VIRT_RUN_CMD: 'systemctl daemon-reload'},
421 {con.VIRT_RUN_CMD: 'systemctl enable losetup.service'},
423 # TODO(trozet) remove this after LP#173474 is fixed
424 dhcp_unit = '/usr/lib/systemd/system/dhcp-interface@.service'
426 {con.VIRT_RUN_CMD: "crudini --del {} Unit "
427 "ConditionPathExists".format(dhcp_unit)})
430 {con.VIRT_INSTALL: "nfs-utils"},
431 {con.VIRT_RUN_CMD: "ln -s /usr/lib/systemd/system/nfs-server.service "
432 "/etc/systemd/system/multi-user.target.wants/"
433 "nfs-server.service"},
434 {con.VIRT_RUN_CMD: "mkdir -p /glance"},
435 {con.VIRT_RUN_CMD: "mkdir -p /cinder"},
436 {con.VIRT_RUN_CMD: "mkdir -p /nova"},
437 {con.VIRT_RUN_CMD: "chmod 777 /glance"},
438 {con.VIRT_RUN_CMD: "chmod 777 /cinder"},
439 {con.VIRT_RUN_CMD: "chmod 777 /nova"},
440 {con.VIRT_RUN_CMD: "chown nfsnobody:nfsnobody /glance"},
441 {con.VIRT_RUN_CMD: "chown nfsnobody:nfsnobody /cinder"},
442 {con.VIRT_RUN_CMD: "chown nfsnobody:nfsnobody /nova"},
443 {con.VIRT_RUN_CMD: "echo '/glance *(rw,sync,"
444 "no_root_squash,no_acl)' > /etc/exports"},
445 {con.VIRT_RUN_CMD: "echo '/cinder *(rw,sync,"
446 "no_root_squash,no_acl)' >> /etc/exports"},
447 {con.VIRT_RUN_CMD: "echo '/nova *(rw,sync,"
448 "no_root_squash,no_acl)' >> /etc/exports"},
449 {con.VIRT_RUN_CMD: "exportfs -avr"},
451 virt_utils.virt_customize(virt_cmds, tmp_oc_image)
452 logging.info("Overcloud image customization complete")
453 return patched_containers
458 Creates public and private ssh keys with 1024 bit RSA encryption
459 :return: private, public key
461 key = rsa.generate_private_key(
462 backend=crypto_default_backend(),
463 public_exponent=65537,
467 private_key = key.private_bytes(
468 crypto_serialization.Encoding.PEM,
469 crypto_serialization.PrivateFormat.PKCS8,
470 crypto_serialization.NoEncryption())
471 public_key = key.public_key().public_bytes(
472 crypto_serialization.Encoding.OpenSSH,
473 crypto_serialization.PublicFormat.OpenSSH
475 return private_key.decode('utf-8'), public_key.decode('utf-8')
478 def prep_env(ds, ns, inv, opnfv_env, net_env, tmp_dir):
480 Creates modified opnfv/network environments for deployment
481 :param ds: deploy settings
482 :param ns: network settings
483 :param inv: node inventory
484 :param opnfv_env: file path for opnfv-environment file
485 :param net_env: file path for network-environment file
486 :param tmp_dir: Apex tmp dir
490 logging.info("Preparing opnfv-environment and network-environment files")
491 ds_opts = ds['deploy_options']
492 tmp_opnfv_env = os.path.join(tmp_dir, os.path.basename(opnfv_env))
493 shutil.copyfile(opnfv_env, tmp_opnfv_env)
494 tenant_nic_map = ns['networks']['tenant']['nic_mapping']
496 tenant_nic['Controller'] = tenant_nic_map['controller']['members'][0]
497 tenant_nic['NovaCompute'] = tenant_nic_map['compute']['members'][0]
498 external_nic_map = ns['networks']['external'][0]['nic_mapping']
499 external_nic = dict()
500 external_nic['NovaCompute'] = external_nic_map['compute']['members'][0]
503 private_key, public_key = make_ssh_key()
505 num_control, num_compute = inv.get_node_counts()
506 if num_control > 1 and not ds['global_params']['ha_enabled']:
509 # Make easier/faster variables to index in the file editor
510 if 'performance' in ds_opts:
513 if 'vpp' in ds_opts['performance']['Compute']:
514 perf_vpp_comp = ds_opts['performance']['Compute']['vpp']
517 if 'vpp' in ds_opts['performance']['Controller']:
518 perf_vpp_ctrl = ds_opts['performance']['Controller']['vpp']
523 if 'ovs' in ds_opts['performance']['Compute']:
524 perf_ovs_comp = ds_opts['performance']['Compute']['ovs']
529 if 'kernel' in ds_opts['performance']['Compute']:
530 perf_kern_comp = ds_opts['performance']['Compute']['kernel']
532 perf_kern_comp = None
536 tenant_settings = ns['networks']['tenant']
537 tenant_vlan_enabled = 'tenant' in ns.enabled_network_list and \
538 ns['networks']['tenant'].get('segmentation_type') == 'vlan'
540 # Modify OPNFV environment
541 # TODO: Change to build a dict and outputting yaml rather than parsing
542 for line in fileinput.input(tmp_opnfv_env, inplace=True):
543 line = line.strip('\n')
545 if 'CloudDomain' in line:
546 output_line = " CloudDomain: {}".format(ns['domain_name'])
547 elif 'replace_private_key' in line:
548 output_line = " private_key: |\n"
550 for line in private_key.splitlines():
551 key_out += " {}\n".format(line)
552 output_line += key_out
553 elif 'replace_public_key' in line:
554 output_line = " public_key: '{}'".format(public_key)
555 elif ((perf and perf_kern_comp) or ds_opts.get('rt_kvm')) and \
556 'resource_registry' in line:
557 output_line = "resource_registry:\n" \
558 " OS::TripleO::NodeUserData: first-boot.yaml"
559 elif 'ComputeExtraConfigPre' in line and \
560 ds_opts['dataplane'] == 'ovs_dpdk':
561 output_line = ' OS::TripleO::ComputeExtraConfigPre: ' \
562 './ovs-dpdk-preconfig.yaml'
563 elif 'NeutronNetworkVLANRanges' in line:
565 if tenant_vlan_enabled:
566 if ns['networks']['tenant']['overlay_id_range']:
567 vlan_setting = ns['networks']['tenant']['overlay_id_range']
568 if 'datacentre' not in vlan_setting:
569 vlan_setting += ',datacentre:1:1000'
570 # SRIOV networks are VLAN based provider networks. In order to
571 # simplify the deployment, nfv_sriov will be the default physnet.
572 # VLANs are not needed in advance, and the user will have to create
573 # the network specifying the segmentation-id.
576 vlan_setting += ",nfv_sriov"
578 vlan_setting = "datacentre:1:1000,nfv_sriov"
580 output_line = " NeutronNetworkVLANRanges: " + vlan_setting
581 elif 'NeutronBridgeMappings' in line and tenant_vlan_enabled:
582 if tenant_settings['overlay_id_range']:
583 physnets = tenant_settings['overlay_id_range'].split(',')
584 output_line = " NeutronBridgeMappings: "
585 for physnet in physnets:
586 physnet_name = physnet.split(':')[0]
587 if physnet_name != 'datacentre':
588 output_line += "{}:br-vlan,".format(physnet_name)
589 output_line += "datacentre:br-ex"
590 elif 'OpenDaylightProviderMappings' in line and tenant_vlan_enabled \
591 and ds_opts['sdn_controller'] == 'opendaylight':
592 if tenant_settings['overlay_id_range']:
593 physnets = tenant_settings['overlay_id_range'].split(',')
594 output_line = " OpenDaylightProviderMappings: "
595 for physnet in physnets:
596 physnet_name = physnet.split(':')[0]
597 if physnet_name != 'datacentre':
598 output_line += "{}:br-vlan,".format(physnet_name)
599 output_line += "datacentre:br-ex"
600 elif 'NeutronNetworkType' in line and tenant_vlan_enabled:
601 output_line = " NeutronNetworkType: vlan\n" \
602 " NeutronTunnelTypes: ''"
604 if ds_opts['sdn_controller'] == 'opendaylight' and \
605 'odl_vpp_routing_node' in ds_opts:
606 if 'opendaylight::vpp_routing_node' in line:
607 output_line = (" opendaylight::vpp_routing_node: {}.{}"
608 .format(ds_opts['odl_vpp_routing_node'],
610 elif not ds_opts['sdn_controller'] and ds_opts['dataplane'] == 'fdio':
611 if 'NeutronVPPAgentPhysnets' in line:
612 # VPP interface tap0 will be used for external network
614 output_line = (" NeutronVPPAgentPhysnets: "
615 "'datacentre:{},external:tap0'"
616 .format(tenant_nic['Controller']))
617 elif ds_opts['sdn_controller'] == 'opendaylight' and ds_opts.get(
619 if 'OS::TripleO::Services::NeutronDhcpAgent' in line:
621 elif 'NeutronDhcpAgentsPerNetwork' in line:
623 num_dhcp_agents = num_control
625 num_dhcp_agents = num_compute
626 output_line = (" NeutronDhcpAgentsPerNetwork: {}"
627 .format(num_dhcp_agents))
628 elif 'ComputeServices' in line:
629 output_line = (" ComputeServices:\n"
630 " - OS::TripleO::Services::NeutronDhcpAgent")
633 for role in 'NovaCompute', 'Controller':
634 if role == 'NovaCompute':
635 perf_opts = perf_vpp_comp
637 perf_opts = perf_vpp_ctrl
638 cfg = "{}ExtraConfig".format(role)
639 if cfg in line and perf_opts:
641 if 'main-core' in perf_opts:
642 perf_line += ("\n fdio::vpp_cpu_main_core: '{}'"
643 .format(perf_opts['main-core']))
644 if 'corelist-workers' in perf_opts:
646 "fdio::vpp_cpu_corelist_workers: '{}'"
647 .format(perf_opts['corelist-workers']))
648 if ds_opts['sdn_controller'] == 'opendaylight' and \
649 ds_opts['dataplane'] == 'fdio':
650 if role == 'NovaCompute':
652 "tripleo::profile::base::neutron::"
653 "agents::honeycomb::"
654 "interface_role_mapping:"
655 " ['{}:tenant-interface',"
656 "'{}:public-interface']"
657 .format(tenant_nic[role],
661 "tripleo::profile::base::neutron::"
662 "agents::honeycomb::"
663 "interface_role_mapping:"
664 " ['{}:tenant-interface']"
665 .format(tenant_nic[role]))
667 output_line = (" {}:{}".format(cfg, perf_line))
669 if ds_opts['dataplane'] == 'ovs_dpdk' and perf_ovs_comp:
670 for k, v in OVS_PERF_MAP.items():
671 if k in line and v in perf_ovs_comp:
672 output_line = " {}: '{}'".format(k, perf_ovs_comp[v])
675 # (FIXME) use compute's kernel settings for all nodes for now.
677 if 'NovaSchedulerDefaultFilters' in line:
679 " NovaSchedulerDefaultFilters: 'RamFilter," \
680 "ComputeFilter,AvailabilityZoneFilter," \
681 "ComputeCapabilitiesFilter," \
682 "ImagePropertiesFilter,NUMATopologyFilter'"
683 elif 'ComputeKernelArgs' in line:
685 for k, v in perf_kern_comp.items():
686 kernel_args += "{}={} ".format(k, v)
688 output_line = " ComputeKernelArgs: '{}'".\
693 # Merge compute services into control services if only a single
696 logging.info("All in one deployment. Checking if service merging "
697 "required into control services")
698 with open(tmp_opnfv_env, 'r') as fh:
699 data = yaml.safe_load(fh)
700 param_data = data['parameter_defaults']
701 # Check to see if any parameters are set for Compute
702 for param in param_data.keys():
703 if param != 'ComputeServices' and param.startswith('Compute'):
704 logging.warning("Compute parameter set, but will not be used "
705 "in deployment: {}. Please use Controller "
706 "based parameters when using All-in-one "
707 "deployments".format(param))
708 if ('ControllerServices' in param_data and 'ComputeServices' in
710 logging.info("Services detected in environment file. Merging...")
711 ctrl_services = param_data['ControllerServices']
712 cmp_services = param_data['ComputeServices']
713 param_data['ControllerServices'] = list(set().union(
714 ctrl_services, cmp_services))
715 for dup_service in DUPLICATE_COMPUTE_SERVICES:
716 if dup_service in param_data['ControllerServices']:
717 param_data['ControllerServices'].remove(dup_service)
718 param_data.pop('ComputeServices')
719 logging.debug("Merged controller services: {}".format(
720 pprint.pformat(param_data['ControllerServices'])
722 with open(tmp_opnfv_env, 'w') as fh:
723 yaml.safe_dump(data, fh, default_flow_style=False)
725 logging.info("No services detected in env file, not merging "
728 logging.info("opnfv-environment file written to {}".format(tmp_opnfv_env))
729 with open(tmp_opnfv_env, 'r') as fh:
730 logging.debug("opnfv-environment content is : {}".format(
731 pprint.pformat(yaml.safe_load(fh.read()))
735 def generate_ceph_key():
737 header = struct.pack('<hiih', 1, int(time.time()), 0, len(key))
738 return base64.b64encode(header + key)
741 def prep_storage_env(ds, ns, virtual, tmp_dir):
743 Creates storage environment file for deployment. Source file is copied by
744 undercloud playbook to host.
751 ds_opts = ds['deploy_options']
752 storage_file = os.path.join(tmp_dir, 'storage-environment.yaml')
753 if not os.path.isfile(storage_file):
754 logging.error("storage-environment file is not in tmp directory: {}. "
755 "Check if file was copied from "
756 "undercloud".format(tmp_dir))
757 raise ApexDeployException("storage-environment file not copied from "
759 for line in fileinput.input(storage_file, inplace=True):
760 line = line.strip('\n')
761 if 'CephClusterFSID' in line:
762 print(" CephClusterFSID: {}".format(str(uuid.uuid4())))
763 elif 'CephMonKey' in line:
764 print(" CephMonKey: {}".format(generate_ceph_key().decode(
766 elif 'CephAdminKey' in line:
767 print(" CephAdminKey: {}".format(generate_ceph_key().decode(
769 elif 'CephClientKey' in line:
770 print(" CephClientKey: {}".format(generate_ceph_key().decode(
775 if ds_opts['containers']:
778 # max pgs allowed are calculated as num_mons * 200. Therefore we
779 # set number of pgs and pools so that the total will be less:
780 # num_pgs * num_pools * num_osds
781 ceph_params['CephPoolDefaultSize'] = 2
782 ceph_params['CephPoolDefaultPgNum'] = 32
784 ceph_params['CephAnsibleExtraConfig'] = {
785 'centos_package_dependencies': [],
786 'ceph_osd_docker_memory_limit': '1g',
787 'ceph_mds_docker_memory_limit': '1g',
789 ceph_device = ds_opts['ceph_device']
790 ceph_params['CephAnsibleDisksConfig'] = {
791 'devices': [ceph_device],
793 'osd_scenario': 'collocated'
795 utils.edit_tht_env(storage_file, 'parameter_defaults', ceph_params)
796 # TODO(trozet): remove following block as we only support containers now
797 elif 'ceph_device' in ds_opts and ds_opts['ceph_device']:
798 with open(storage_file, 'a') as fh:
799 fh.write(' ExtraConfig:\n')
800 fh.write(" ceph::profile::params::osds:{{{}:{{}}}}\n".format(
801 ds_opts['ceph_device']
805 def prep_sriov_env(ds, tmp_dir):
807 Creates SRIOV environment file for deployment. Source file is copied by
808 undercloud playbook to host.
813 ds_opts = ds['deploy_options']
814 sriov_iface = ds_opts['sriov']
815 sriov_file = os.path.join(tmp_dir, 'neutron-opendaylight-sriov.yaml')
816 if not os.path.isfile(sriov_file):
817 logging.error("sriov-environment file is not in tmp directory: {}. "
818 "Check if file was copied from "
819 "undercloud".format(tmp_dir))
820 raise ApexDeployException("sriov-environment file not copied from "
822 # TODO(rnoriega): Instead of line editing, refactor this code to load
823 # yaml file into a dict, edit it and write the file back.
824 for line in fileinput.input(sriov_file, inplace=True):
825 line = line.strip('\n')
826 if 'NovaSchedulerDefaultFilters' in line:
827 print(" {}".format(line[3:]))
828 elif 'NovaSchedulerAvailableFilters' in line:
829 print(" {}".format(line[3:]))
830 elif 'NeutronPhysicalDevMappings' in line:
831 print(" NeutronPhysicalDevMappings: \"nfv_sriov:{}\""
832 .format(sriov_iface))
833 elif 'NeutronSriovNumVFs' in line:
834 print(" NeutronSriovNumVFs: \"{}:8\"".format(sriov_iface))
835 elif 'NovaPCIPassthrough' in line:
836 print(" NovaPCIPassthrough:")
837 elif 'devname' in line:
838 print(" - devname: \"{}\"".format(sriov_iface))
839 elif 'physical_network' in line:
840 print(" physical_network: \"nfv_sriov\"")
845 def external_network_cmds(ns, ds):
847 Generates external network openstack commands
848 :param ns: network settings
849 :param ds: deploy settings
850 :return: list of commands to configure external network
852 ds_opts = ds['deploy_options']
853 external_physnet = 'datacentre'
854 if ds_opts['dataplane'] == 'fdio' and \
855 ds_opts['sdn_controller'] != 'opendaylight':
856 external_physnet = 'external'
857 if 'external' in ns.enabled_network_list:
858 net_config = ns['networks']['external'][0]
860 pool_start, pool_end = net_config['floating_ip_range']
862 net_config = ns['networks']['admin']
864 pool_start, pool_end = ns['apex']['networks']['admin'][
865 'introspection_range']
866 nic_config = net_config['nic_mapping']
867 gateway = net_config['gateway']
869 # create network command
870 if nic_config['compute']['vlan'] == 'native':
873 ext_type = "vlan --provider-segment {}".format(nic_config[
875 cmds.append("openstack network create external --project service "
876 "--external --provider-network-type {} "
877 "--provider-physical-network {}"
878 .format(ext_type, external_physnet))
879 # create subnet command
880 cidr = net_config['cidr']
881 subnet_cmd = "openstack subnet create external-subnet --project " \
882 "service --network external --no-dhcp --gateway {} " \
883 "--allocation-pool start={},end={} --subnet-range " \
884 "{}".format(gateway, pool_start, pool_end, str(cidr))
885 if external and cidr.version == 6:
886 subnet_cmd += ' --ip-version 6'
887 cmds.append(subnet_cmd)
888 logging.debug("Neutron external network commands determined "
889 "as: {}".format(cmds))
893 def create_congress_cmds(overcloud_file):
894 drivers = ['nova', 'neutronv2', 'cinder', 'glancev2', 'keystone', 'doctor']
895 overcloudrc = parsers.parse_overcloudrc(overcloud_file)
896 logging.info("Creating congress commands")
899 "username={}".format(overcloudrc['OS_USERNAME']),
900 "tenant_name={}".format(overcloudrc['OS_PROJECT_NAME']),
901 "password={}".format(overcloudrc['OS_PASSWORD']),
902 "auth_url={}".format(overcloudrc['OS_AUTH_URL'])
905 logging.error("Unable to find all keys required for congress in "
906 "overcloudrc: OS_USERNAME, OS_PROJECT_NAME, "
907 "OS_PASSWORD, OS_AUTH_URL. Please check overcloudrc "
908 "file: {}".format(overcloud_file))
911 ds_cfg = '--config ' + ' --config '.join(ds_cfg)
913 for driver in drivers:
914 if driver == 'doctor':
915 cmd = "{} \"{}\"".format(driver, driver)
917 cmd = "{} \"{}\" {}".format(driver, driver, ds_cfg)
919 cmd += ' --config api_version="2.34"'
920 logging.debug("Congress command created: {}".format(cmd))