1 ##############################################################################
2 # Copyright (c) 2017 Tim Rozet (trozet@redhat.com) and others.
4 # All rights reserved. This program and the accompanying materials
5 # are made available under the terms of the Apache License, Version 2.0
6 # which accompanies this distribution, and is available at
7 # http://www.apache.org/licenses/LICENSE-2.0
8 ##############################################################################
21 import apex.builders.overcloud_builder as oc_builder
22 import apex.builders.common_builder as c_builder
24 from apex.common import constants as con
25 from apex.common.exceptions import ApexDeployException
26 from apex.common import parsers
27 from apex.common import utils
28 from apex.virtual import utils as virt_utils
29 from cryptography.hazmat.primitives import serialization as \
31 from cryptography.hazmat.primitives.asymmetric import rsa
32 from cryptography.hazmat.backends import default_backend as \
33 crypto_default_backend
38 'sfc': 'neutron-sfc-opendaylight.yaml',
39 'vpn': 'neutron-bgpvpn-opendaylight.yaml',
40 'gluon': 'gluon.yaml',
42 'odl_vpp_netvirt': 'neutron-opendaylight-netvirt-vpp.yaml',
43 'dvr': 'neutron-opendaylight-fdio-dvr.yaml',
44 'default': 'neutron-opendaylight-honeycomb.yaml'
46 'l2gw': 'neutron-l2gw-opendaylight.yaml',
47 'sriov': 'neutron-opendaylight-sriov.yaml',
48 'default': 'neutron-opendaylight.yaml',
51 'sfc': 'neutron-onos-sfc.yaml',
52 'default': 'neutron-onos.yaml'
54 'ovn': 'neutron-ml2-ovn.yaml',
56 'vpp': 'neutron-ml2-vpp.yaml',
57 'dataplane': ('ovs_dpdk', 'neutron-ovs-dpdk.yaml')
62 'tacker': 'enable_tacker.yaml',
63 'congress': 'enable_congress.yaml',
64 'barometer': 'enable_barometer.yaml',
65 'rt_kvm': 'enable_rt_kvm.yaml'
69 'HostCpusList': 'dpdk_cores',
70 'NeutronDpdkCoreList': 'pmd_cores',
71 'NeutronDpdkSocketMemory': 'socket_memory',
72 'NeutronDpdkMemoryChannels': 'memory_channels'
75 ODL_NETVIRT_VPP_RPM = "/root/opendaylight-7.0.0-0.1.20170531snap665.el7" \
78 LOOP_DEVICE_SIZE = "10G"
80 LOSETUP_SERVICE = """[Unit]
81 Description=Setup loop devices
86 ExecStart=/sbin/losetup /dev/loop3 /srv/data.img
87 ExecStop=/sbin/losetup -d /dev/loop3
92 WantedBy=multi-user.target
95 DUPLICATE_COMPUTE_SERVICES = [
96 'OS::TripleO::Services::ComputeNeutronCorePlugin',
97 'OS::TripleO::Services::ComputeNeutronMetadataAgent',
98 'OS::TripleO::Services::ComputeNeutronOvsAgent',
99 'OS::TripleO::Services::ComputeNeutronL3Agent'
103 def build_sdn_env_list(ds, sdn_map, env_list=None):
105 Builds a list of SDN environment files to be used in the deploy cmd.
107 This function recursively searches an sdn_map. First the sdn controller is
108 matched and then the function looks for enabled features for that
109 controller to determine which environment files should be used. By
110 default the feature will be checked if set to true in deploy settings to be
111 added to the list. If a feature does not have a boolean value, then the
112 key and value pair to compare with are checked as a tuple (k,v).
114 :param ds: deploy settings
115 :param sdn_map: SDN map to recursively search
116 :param env_list: recursive var to hold previously found env_list
117 :return: A list of env files
121 for k, v in sdn_map.items():
122 if ds['sdn_controller'] == k or (k in ds and ds[k]):
123 if isinstance(v, dict):
124 # Append default SDN env file first
125 # The assumption is that feature-enabled SDN env files
126 # override and do not conflict with previously set default
128 if ds['sdn_controller'] == k and 'default' in v:
129 env_list.append(os.path.join(con.THT_ENV_DIR,
131 env_list.extend(build_sdn_env_list(ds, v))
132 # check if the value is not a boolean
133 elif isinstance(v, tuple):
135 env_list.append(os.path.join(con.THT_ENV_DIR, v[1]))
137 env_list.append(os.path.join(con.THT_ENV_DIR, v))
138 if len(env_list) == 0:
140 env_list.append(os.path.join(
141 con.THT_ENV_DIR, sdn_map['default']))
143 logging.warning("Unable to find default file for SDN")
148 def get_docker_sdn_files(ds_opts):
150 Returns docker env file for detected SDN
151 :param ds_opts: deploy options
152 :return: list of docker THT env files for an SDN
154 docker_services = con.VALID_DOCKER_SERVICES
155 tht_dir = con.THT_DOCKER_ENV_DIR
156 sdn_env_list = build_sdn_env_list(ds_opts, SDN_FILE_MAP)
157 for i, sdn_file in enumerate(sdn_env_list):
158 sdn_base = os.path.basename(sdn_file)
159 if sdn_base in docker_services:
160 if docker_services[sdn_base] is not None:
162 os.path.join(tht_dir, docker_services[sdn_base])
164 sdn_env_list[i] = os.path.join(tht_dir, sdn_base)
168 def create_deploy_cmd(ds, ns, inv, tmp_dir,
169 virtual, env_file='opnfv-environment.yaml',
172 logging.info("Creating deployment command")
173 deploy_options = ['network-environment.yaml']
175 ds_opts = ds['deploy_options']
177 if ds_opts['containers']:
178 deploy_options.append(os.path.join(con.THT_ENV_DIR,
181 if ds['global_params']['ha_enabled']:
182 if ds_opts['containers']:
183 deploy_options.append(os.path.join(con.THT_ENV_DIR,
186 deploy_options.append(os.path.join(con.THT_ENV_DIR,
187 'puppet-pacemaker.yaml'))
190 deploy_options.append(env_file)
192 if ds_opts['containers']:
193 deploy_options.append('docker-images.yaml')
194 sdn_docker_files = get_docker_sdn_files(ds_opts)
195 for sdn_docker_file in sdn_docker_files:
196 deploy_options.append(sdn_docker_file)
198 deploy_options += build_sdn_env_list(ds_opts, SDN_FILE_MAP)
200 for k, v in OTHER_FILE_MAP.items():
201 if k in ds_opts and ds_opts[k]:
202 if ds_opts['containers']:
203 deploy_options.append(os.path.join(con.THT_DOCKER_ENV_DIR,
204 "{}.yaml".format(k)))
206 deploy_options.append(os.path.join(con.THT_ENV_DIR, v))
208 # TODO(trozet) Fix this check to look for if ceph is in controller services
209 # and not use name of the file
210 if ds_opts['ceph'] and 'csit' not in env_file:
211 prep_storage_env(ds, ns, virtual, tmp_dir)
212 deploy_options.append(os.path.join(con.THT_ENV_DIR,
213 'storage-environment.yaml'))
215 prep_sriov_env(ds, tmp_dir)
217 # Check for 'k8s' here intentionally, as we may support other values
218 # such as openstack/openshift for 'vim' option.
219 if ds_opts['vim'] == 'k8s':
220 deploy_options.append('kubernetes-environment.yaml')
223 deploy_options.append('virtual-environment.yaml')
225 deploy_options.append('baremetal-environment.yaml')
227 num_control, num_compute = inv.get_node_counts()
228 if num_control > 1 and not ds['global_params']['ha_enabled']:
230 if platform.machine() == 'aarch64':
231 # aarch64 deploys were not completing in the default 90 mins.
232 # Not sure if this is related to the hardware the OOO support
233 # was developed on or the virtualization support in CentOS
234 # Either way it will probably get better over time as the aarch
235 # support matures in CentOS and deploy time should be tested in
236 # the future so this multiplier can be removed.
237 con.DEPLOY_TIMEOUT *= 2
238 cmd = "openstack overcloud deploy --templates --timeout {} " \
239 .format(con.DEPLOY_TIMEOUT)
241 for option in deploy_options:
242 cmd += " -e {}".format(option)
243 cmd += " --ntp-server {}".format(ns['ntp'][0])
244 cmd += " --control-scale {}".format(num_control)
245 cmd += " --compute-scale {}".format(num_compute)
246 cmd += ' --control-flavor control --compute-flavor compute'
248 cmd += ' --networks-file network_data.yaml'
250 if virtual and (platform.machine() != 'aarch64'):
251 with open('/sys/module/kvm_intel/parameters/nested') as f:
252 nested_kvm = f.read().strip()
253 if nested_kvm != 'Y':
254 libvirt_type = 'qemu'
255 elif virtual and (platform.machine() == 'aarch64'):
256 libvirt_type = 'qemu'
257 cmd += ' --libvirt-type {}'.format(libvirt_type)
258 if platform.machine() == 'aarch64':
259 cmd += ' --override-ansible-cfg /home/stack/ansible.cfg '
260 logging.info("Deploy command set: {}".format(cmd))
262 with open(os.path.join(tmp_dir, 'deploy_command'), 'w') as fh:
267 def prep_image(ds, ns, img, tmp_dir, root_pw=None, docker_tag=None,
270 Locates sdn image and preps for deployment.
271 :param ds: deploy settings
272 :param ns: network settings
273 :param img: sdn image
274 :param tmp_dir: dir to store modified sdn image
275 :param root_pw: password to configure for overcloud image
276 :param docker_tag: Docker image tag for RDO version (default None)
277 :param patches: List of patches to apply to overcloud image
280 # TODO(trozet): Come up with a better way to organize this logic in this
282 logging.info("Preparing image: {} for deployment".format(img))
283 if not os.path.isfile(img):
284 logging.error("Missing SDN image {}".format(img))
285 raise ApexDeployException("Missing SDN image file: {}".format(img))
287 ds_opts = ds['deploy_options']
289 sdn = ds_opts['sdn_controller']
290 patched_containers = set()
291 # we need this due to rhbz #1436021
292 # fixed in systemd-219-37.el7
294 logging.info("Neutron openvswitch-agent disabled")
297 "rm -f /etc/systemd/system/multi-user.target.wants/"
298 "neutron-openvswitch-agent.service"},
301 "rm -f /usr/lib/systemd/system/neutron-openvswitch-agent"
305 if ns.get('http_proxy', ''):
308 "echo 'http_proxy={}' >> /etc/environment".format(
311 if ns.get('https_proxy', ''):
314 "echo 'https_proxy={}' >> /etc/environment".format(
317 tmp_oc_image = os.path.join(tmp_dir, 'overcloud-full.qcow2')
318 shutil.copyfile(img, tmp_oc_image)
319 logging.debug("Temporary overcloud image stored as: {}".format(
323 oc_builder.inject_quagga(tmp_oc_image, tmp_dir)
324 virt_cmds.append({con.VIRT_RUN_CMD: "chmod +x /etc/rc.d/rc.local"})
327 "echo 'sudo /opt/quagga/etc/init.d/zrpcd start' > "
328 "/opt/quagga/etc/init.d/zrpcd_start.sh"})
330 con.VIRT_RUN_CMD: "chmod +x /opt/quagga/etc/init.d/"
333 con.VIRT_RUN_CMD: "sed -i '$a /opt/quagga/etc/"
334 "init.d/zrpcd_start.sh' /etc/rc.local "})
336 con.VIRT_RUN_CMD: "sed -i '$a /opt/quagga/etc/"
337 "init.d/zrpcd_start.sh' /etc/rc.d/rc.local"})
338 logging.info("ZRPCD process started")
340 dataplane = ds_opts['dataplane']
341 if dataplane == 'ovs_dpdk' or dataplane == 'fdio':
342 logging.info("Enabling kernel modules for dpdk")
343 # file to module mapping
345 os.path.join(tmp_dir, 'vfio_pci.modules'): 'vfio_pci',
346 os.path.join(tmp_dir, 'uio_pci_generic.modules'): 'uio_pci_generic'
348 for mod_file, mod in uio_types.items():
349 with open(mod_file, 'w') as fh:
350 fh.write('#!/bin/bash\n')
351 fh.write('exec /sbin/modprobe {}'.format(mod))
355 {con.VIRT_UPLOAD: "{}:/etc/sysconfig/modules/".format(
357 {con.VIRT_RUN_CMD: "chmod 0755 /etc/sysconfig/modules/"
358 "{}".format(os.path.basename(mod_file))}
361 pw_op = "password:{}".format(root_pw)
362 virt_cmds.append({con.VIRT_PW: pw_op})
364 if dataplane == 'ovs':
366 oc_builder.inject_ovs_nsh(tmp_oc_image, tmp_dir)
367 elif sdn == 'opendaylight':
368 # FIXME(trozet) remove this after RDO is updated with fix for
369 # https://bugzilla.redhat.com/show_bug.cgi?id=1544892
370 ovs_file = os.path.basename(con.CUSTOM_OVS)
371 ovs_url = con.CUSTOM_OVS.replace(ovs_file, '')
372 utils.fetch_upstream_and_unpack(dest=tmp_dir, url=ovs_url,
375 {con.VIRT_UPLOAD: "{}:/root/".format(os.path.join(tmp_dir,
377 {con.VIRT_RUN_CMD: "yum downgrade -y /root/{}".format(
381 if dataplane == 'fdio':
382 # Patch neutron with using OVS external interface for router
383 # and add generic linux NS interface driver
385 {con.VIRT_RUN_CMD: "cd /usr/lib/python2.7/site-packages && patch "
386 "-p1 < neutron-patch-NSDriver.patch"})
389 {con.VIRT_RUN_CMD: "yum remove -y vpp-lib"},
390 {con.VIRT_RUN_CMD: "yum install -y "
391 "/root/nosdn_vpp_rpms/*.rpm"}
394 undercloud_admin_ip = ns['networks'][con.ADMIN_NETWORK][
395 'installer_vm']['ip']
396 if sdn == 'opendaylight':
397 oc_builder.inject_opendaylight(
398 odl_version=ds_opts['odl_version'],
401 uc_ip=undercloud_admin_ip,
402 os_version=ds_opts['os_version'],
403 docker_tag=docker_tag,
406 patched_containers = patched_containers.union({'opendaylight'})
409 if ds_opts['os_version'] == 'master':
410 branch = ds_opts['os_version']
412 branch = "stable/{}".format(ds_opts['os_version'])
413 logging.info('Adding patches to overcloud')
414 patched_containers = patched_containers.union(
415 c_builder.add_upstream_patches(patches,
416 tmp_oc_image, tmp_dir,
418 uc_ip=undercloud_admin_ip,
419 docker_tag=docker_tag))
420 # if containers with ceph, and no ceph device we need to use a
421 # persistent loop device for Ceph OSDs
422 if docker_tag and ds_opts['ceph_device'] == '/dev/loop3':
423 tmp_losetup = os.path.join(tmp_dir, 'losetup.service')
424 with open(tmp_losetup, 'w') as fh:
425 fh.write(LOSETUP_SERVICE)
427 {con.VIRT_UPLOAD: "{}:/usr/lib/systemd/system/".format(tmp_losetup)
429 {con.VIRT_RUN_CMD: 'truncate /srv/data.img --size {}'
430 .format(LOOP_DEVICE_SIZE)},
431 {con.VIRT_RUN_CMD: 'systemctl daemon-reload'},
432 {con.VIRT_RUN_CMD: 'systemctl enable losetup.service'},
434 # TODO(trozet) remove this after LP#173474 is fixed
435 dhcp_unit = '/usr/lib/systemd/system/dhcp-interface@.service'
437 {con.VIRT_RUN_CMD: "crudini --del {} Unit "
438 "ConditionPathExists".format(dhcp_unit)})
441 {con.VIRT_INSTALL: "nfs-utils"},
442 {con.VIRT_RUN_CMD: "ln -s /usr/lib/systemd/system/nfs-server.service "
443 "/etc/systemd/system/multi-user.target.wants/"
444 "nfs-server.service"},
445 {con.VIRT_RUN_CMD: "mkdir -p /glance"},
446 {con.VIRT_RUN_CMD: "mkdir -p /cinder"},
447 {con.VIRT_RUN_CMD: "mkdir -p /nova"},
448 {con.VIRT_RUN_CMD: "chmod 777 /glance"},
449 {con.VIRT_RUN_CMD: "chmod 777 /cinder"},
450 {con.VIRT_RUN_CMD: "chmod 777 /nova"},
451 {con.VIRT_RUN_CMD: "chown nfsnobody:nfsnobody /glance"},
452 {con.VIRT_RUN_CMD: "chown nfsnobody:nfsnobody /cinder"},
453 {con.VIRT_RUN_CMD: "chown nfsnobody:nfsnobody /nova"},
454 {con.VIRT_RUN_CMD: "echo '/glance *(rw,sync,"
455 "no_root_squash,no_acl)' > /etc/exports"},
456 {con.VIRT_RUN_CMD: "echo '/cinder *(rw,sync,"
457 "no_root_squash,no_acl)' >> /etc/exports"},
458 {con.VIRT_RUN_CMD: "echo '/nova *(rw,sync,"
459 "no_root_squash,no_acl)' >> /etc/exports"},
460 {con.VIRT_RUN_CMD: "exportfs -avr"},
462 virt_utils.virt_customize(virt_cmds, tmp_oc_image)
463 logging.info("Overcloud image customization complete")
464 return patched_containers
469 Creates public and private ssh keys with 1024 bit RSA encryption
470 :return: private, public key
472 key = rsa.generate_private_key(
473 backend=crypto_default_backend(),
474 public_exponent=65537,
478 private_key = key.private_bytes(
479 crypto_serialization.Encoding.PEM,
480 crypto_serialization.PrivateFormat.PKCS8,
481 crypto_serialization.NoEncryption())
482 public_key = key.public_key().public_bytes(
483 crypto_serialization.Encoding.OpenSSH,
484 crypto_serialization.PublicFormat.OpenSSH
486 return private_key.decode('utf-8'), public_key.decode('utf-8')
489 def prep_env(ds, ns, inv, opnfv_env, net_env, tmp_dir):
491 Creates modified opnfv/network environments for deployment
492 :param ds: deploy settings
493 :param ns: network settings
494 :param inv: node inventory
495 :param opnfv_env: file path for opnfv-environment file
496 :param net_env: file path for network-environment file
497 :param tmp_dir: Apex tmp dir
501 logging.info("Preparing opnfv-environment and network-environment files")
502 ds_opts = ds['deploy_options']
503 tmp_opnfv_env = os.path.join(tmp_dir, os.path.basename(opnfv_env))
504 shutil.copyfile(opnfv_env, tmp_opnfv_env)
505 tenant_nic_map = ns['networks']['tenant']['nic_mapping']
507 tenant_nic['Controller'] = tenant_nic_map['controller']['members'][0]
508 tenant_nic['NovaCompute'] = tenant_nic_map['compute']['members'][0]
509 external_nic_map = ns['networks']['external'][0]['nic_mapping']
510 external_nic = dict()
511 external_nic['NovaCompute'] = external_nic_map['compute']['members'][0]
514 private_key, public_key = make_ssh_key()
516 num_control, num_compute = inv.get_node_counts()
517 if num_control > 1 and not ds['global_params']['ha_enabled']:
520 # Make easier/faster variables to index in the file editor
521 if 'performance' in ds_opts:
524 if 'vpp' in ds_opts['performance']['Compute']:
525 perf_vpp_comp = ds_opts['performance']['Compute']['vpp']
528 if 'vpp' in ds_opts['performance']['Controller']:
529 perf_vpp_ctrl = ds_opts['performance']['Controller']['vpp']
534 if 'ovs' in ds_opts['performance']['Compute']:
535 perf_ovs_comp = ds_opts['performance']['Compute']['ovs']
540 if 'kernel' in ds_opts['performance']['Compute']:
541 perf_kern_comp = ds_opts['performance']['Compute']['kernel']
543 perf_kern_comp = None
547 tenant_settings = ns['networks']['tenant']
548 tenant_vlan_enabled = 'tenant' in ns.enabled_network_list and \
549 ns['networks']['tenant'].get('segmentation_type') == 'vlan'
551 # Modify OPNFV environment
552 # TODO: Change to build a dict and outputting yaml rather than parsing
553 for line in fileinput.input(tmp_opnfv_env, inplace=True):
554 line = line.strip('\n')
556 if 'CloudDomain' in line:
557 output_line = " CloudDomain: {}".format(ns['domain_name'])
558 elif 'replace_private_key' in line:
559 output_line = " private_key: |\n"
561 for line in private_key.splitlines():
562 key_out += " {}\n".format(line)
563 output_line += key_out
564 elif 'replace_public_key' in line:
565 output_line = " public_key: '{}'".format(public_key)
566 elif ((perf and perf_kern_comp) or ds_opts.get('rt_kvm')) and \
567 'resource_registry' in line:
568 output_line = "resource_registry:\n" \
569 " OS::TripleO::NodeUserData: first-boot.yaml"
570 elif 'ComputeExtraConfigPre' in line and \
571 ds_opts['dataplane'] == 'ovs_dpdk':
572 output_line = ' OS::TripleO::ComputeExtraConfigPre: ' \
573 './ovs-dpdk-preconfig.yaml'
574 elif 'NeutronNetworkVLANRanges' in line:
576 if tenant_vlan_enabled:
577 if ns['networks']['tenant']['overlay_id_range']:
578 vlan_setting = ns['networks']['tenant']['overlay_id_range']
579 if 'datacentre' not in vlan_setting:
580 vlan_setting += ',datacentre:1:1000'
581 # SRIOV networks are VLAN based provider networks. In order to
582 # simplify the deployment, nfv_sriov will be the default physnet.
583 # VLANs are not needed in advance, and the user will have to create
584 # the network specifying the segmentation-id.
587 vlan_setting += ",nfv_sriov"
589 vlan_setting = "datacentre:1:1000,nfv_sriov"
591 output_line = " NeutronNetworkVLANRanges: " + vlan_setting
592 elif 'NeutronBridgeMappings' in line and tenant_vlan_enabled:
593 if tenant_settings['overlay_id_range']:
594 physnets = tenant_settings['overlay_id_range'].split(',')
595 output_line = " NeutronBridgeMappings: "
596 for physnet in physnets:
597 physnet_name = physnet.split(':')[0]
598 if physnet_name != 'datacentre':
599 output_line += "{}:br-vlan,".format(physnet_name)
600 output_line += "datacentre:br-ex"
601 elif 'OpenDaylightProviderMappings' in line and tenant_vlan_enabled \
602 and ds_opts['sdn_controller'] == 'opendaylight':
603 if tenant_settings['overlay_id_range']:
604 physnets = tenant_settings['overlay_id_range'].split(',')
605 output_line = " OpenDaylightProviderMappings: "
606 for physnet in physnets:
607 physnet_name = physnet.split(':')[0]
608 if physnet_name != 'datacentre':
609 output_line += "{}:br-vlan,".format(physnet_name)
610 output_line += "datacentre:br-ex"
611 elif 'NeutronNetworkType' in line and tenant_vlan_enabled:
612 output_line = " NeutronNetworkType: vlan\n" \
613 " NeutronTunnelTypes: ''"
615 if ds_opts['sdn_controller'] == 'opendaylight' and \
616 'odl_vpp_routing_node' in ds_opts:
617 if 'opendaylight::vpp_routing_node' in line:
618 output_line = (" opendaylight::vpp_routing_node: {}.{}"
619 .format(ds_opts['odl_vpp_routing_node'],
621 elif not ds_opts['sdn_controller'] and ds_opts['dataplane'] == 'fdio':
622 if 'NeutronVPPAgentPhysnets' in line:
623 # VPP interface tap0 will be used for external network
625 output_line = (" NeutronVPPAgentPhysnets: "
626 "'datacentre:{},external:tap0'"
627 .format(tenant_nic['Controller']))
628 elif ds_opts['sdn_controller'] == 'opendaylight' and ds_opts.get(
630 if 'OS::TripleO::Services::NeutronDhcpAgent' in line:
632 elif 'NeutronDhcpAgentsPerNetwork' in line:
634 num_dhcp_agents = num_control
636 num_dhcp_agents = num_compute
637 output_line = (" NeutronDhcpAgentsPerNetwork: {}"
638 .format(num_dhcp_agents))
639 elif 'ComputeServices' in line:
640 output_line = (" ComputeServices:\n"
641 " - OS::TripleO::Services::NeutronDhcpAgent")
644 for role in 'NovaCompute', 'Controller':
645 if role == 'NovaCompute':
646 perf_opts = perf_vpp_comp
648 perf_opts = perf_vpp_ctrl
649 cfg = "{}ExtraConfig".format(role)
650 if cfg in line and perf_opts:
652 if 'main-core' in perf_opts:
653 perf_line += ("\n fdio::vpp_cpu_main_core: '{}'"
654 .format(perf_opts['main-core']))
655 if 'corelist-workers' in perf_opts:
657 "fdio::vpp_cpu_corelist_workers: '{}'"
658 .format(perf_opts['corelist-workers']))
659 if ds_opts['sdn_controller'] == 'opendaylight' and \
660 ds_opts['dataplane'] == 'fdio':
661 if role == 'NovaCompute':
663 "tripleo::profile::base::neutron::"
664 "agents::honeycomb::"
665 "interface_role_mapping:"
666 " ['{}:tenant-interface',"
667 "'{}:public-interface']"
668 .format(tenant_nic[role],
672 "tripleo::profile::base::neutron::"
673 "agents::honeycomb::"
674 "interface_role_mapping:"
675 " ['{}:tenant-interface']"
676 .format(tenant_nic[role]))
678 output_line = (" {}:{}".format(cfg, perf_line))
680 if ds_opts['dataplane'] == 'ovs_dpdk' and perf_ovs_comp:
681 for k, v in OVS_PERF_MAP.items():
682 if k in line and v in perf_ovs_comp:
683 output_line = " {}: '{}'".format(k, perf_ovs_comp[v])
686 # (FIXME) use compute's kernel settings for all nodes for now.
688 if 'NovaSchedulerDefaultFilters' in line:
690 " NovaSchedulerDefaultFilters: 'RamFilter," \
691 "ComputeFilter,AvailabilityZoneFilter," \
692 "ComputeCapabilitiesFilter," \
693 "ImagePropertiesFilter,NUMATopologyFilter'"
694 elif 'ComputeKernelArgs' in line:
696 for k, v in perf_kern_comp.items():
697 kernel_args += "{}={} ".format(k, v)
699 output_line = " ComputeKernelArgs: '{}'".\
704 # Merge compute services into control services if only a single
707 logging.info("All in one deployment. Checking if service merging "
708 "required into control services")
709 with open(tmp_opnfv_env, 'r') as fh:
710 data = yaml.safe_load(fh)
711 param_data = data['parameter_defaults']
712 # Check to see if any parameters are set for Compute
713 for param in param_data.keys():
714 if param != 'ComputeServices' and param.startswith('Compute'):
715 logging.warning("Compute parameter set, but will not be used "
716 "in deployment: {}. Please use Controller "
717 "based parameters when using All-in-one "
718 "deployments".format(param))
719 if ('ControllerServices' in param_data and 'ComputeServices' in
721 logging.info("Services detected in environment file. Merging...")
722 ctrl_services = param_data['ControllerServices']
723 cmp_services = param_data['ComputeServices']
724 param_data['ControllerServices'] = list(set().union(
725 ctrl_services, cmp_services))
726 for dup_service in DUPLICATE_COMPUTE_SERVICES:
727 if dup_service in param_data['ControllerServices']:
728 param_data['ControllerServices'].remove(dup_service)
729 param_data.pop('ComputeServices')
730 logging.debug("Merged controller services: {}".format(
731 pprint.pformat(param_data['ControllerServices'])
733 with open(tmp_opnfv_env, 'w') as fh:
734 yaml.safe_dump(data, fh, default_flow_style=False)
736 logging.info("No services detected in env file, not merging "
739 logging.info("opnfv-environment file written to {}".format(tmp_opnfv_env))
740 with open(tmp_opnfv_env, 'r') as fh:
741 logging.debug("opnfv-environment content is : {}".format(
742 pprint.pformat(yaml.safe_load(fh.read()))
746 def generate_ceph_key():
748 header = struct.pack('<hiih', 1, int(time.time()), 0, len(key))
749 return base64.b64encode(header + key)
752 def prep_storage_env(ds, ns, virtual, tmp_dir):
754 Creates storage environment file for deployment. Source file is copied by
755 undercloud playbook to host.
762 ds_opts = ds['deploy_options']
763 storage_file = os.path.join(tmp_dir, 'storage-environment.yaml')
764 if not os.path.isfile(storage_file):
765 logging.error("storage-environment file is not in tmp directory: {}. "
766 "Check if file was copied from "
767 "undercloud".format(tmp_dir))
768 raise ApexDeployException("storage-environment file not copied from "
770 for line in fileinput.input(storage_file, inplace=True):
771 line = line.strip('\n')
772 if 'CephClusterFSID' in line:
773 print(" CephClusterFSID: {}".format(str(uuid.uuid4())))
774 elif 'CephMonKey' in line:
775 print(" CephMonKey: {}".format(generate_ceph_key().decode(
777 elif 'CephAdminKey' in line:
778 print(" CephAdminKey: {}".format(generate_ceph_key().decode(
780 elif 'CephClientKey' in line:
781 print(" CephClientKey: {}".format(generate_ceph_key().decode(
786 if ds_opts['containers']:
789 # max pgs allowed are calculated as num_mons * 200. Therefore we
790 # set number of pgs and pools so that the total will be less:
791 # num_pgs * num_pools * num_osds
792 ceph_params['CephPoolDefaultSize'] = 2
793 ceph_params['CephPoolDefaultPgNum'] = 32
795 ceph_params['CephAnsibleExtraConfig'] = {
796 'centos_package_dependencies': [],
797 'ceph_osd_docker_memory_limit': '1g',
798 'ceph_mds_docker_memory_limit': '1g',
800 ceph_device = ds_opts['ceph_device']
801 ceph_params['CephAnsibleDisksConfig'] = {
802 'devices': [ceph_device],
804 'osd_scenario': 'collocated'
806 utils.edit_tht_env(storage_file, 'parameter_defaults', ceph_params)
807 # TODO(trozet): remove following block as we only support containers now
808 elif 'ceph_device' in ds_opts and ds_opts['ceph_device']:
809 with open(storage_file, 'a') as fh:
810 fh.write(' ExtraConfig:\n')
811 fh.write(" ceph::profile::params::osds:{{{}:{{}}}}\n".format(
812 ds_opts['ceph_device']
816 def prep_sriov_env(ds, tmp_dir):
818 Creates SRIOV environment file for deployment. Source file is copied by
819 undercloud playbook to host.
824 ds_opts = ds['deploy_options']
825 sriov_iface = ds_opts['sriov']
826 sriov_file = os.path.join(tmp_dir, 'neutron-opendaylight-sriov.yaml')
827 if not os.path.isfile(sriov_file):
828 logging.error("sriov-environment file is not in tmp directory: {}. "
829 "Check if file was copied from "
830 "undercloud".format(tmp_dir))
831 raise ApexDeployException("sriov-environment file not copied from "
833 # TODO(rnoriega): Instead of line editing, refactor this code to load
834 # yaml file into a dict, edit it and write the file back.
835 for line in fileinput.input(sriov_file, inplace=True):
836 line = line.strip('\n')
837 if 'NovaSchedulerDefaultFilters' in line:
838 print(" {}".format(line[3:]))
839 elif 'NovaSchedulerAvailableFilters' in line:
840 print(" {}".format(line[3:]))
841 elif 'NeutronPhysicalDevMappings' in line:
842 print(" NeutronPhysicalDevMappings: \"nfv_sriov:{}\""
843 .format(sriov_iface))
844 elif 'NeutronSriovNumVFs' in line:
845 print(" NeutronSriovNumVFs: \"{}:8\"".format(sriov_iface))
846 elif 'NovaPCIPassthrough' in line:
847 print(" NovaPCIPassthrough:")
848 elif 'devname' in line:
849 print(" - devname: \"{}\"".format(sriov_iface))
850 elif 'physical_network' in line:
851 print(" physical_network: \"nfv_sriov\"")
856 def external_network_cmds(ns, ds):
858 Generates external network openstack commands
859 :param ns: network settings
860 :param ds: deploy settings
861 :return: list of commands to configure external network
863 ds_opts = ds['deploy_options']
864 external_physnet = 'datacentre'
865 if ds_opts['dataplane'] == 'fdio' and \
866 ds_opts['sdn_controller'] != 'opendaylight':
867 external_physnet = 'external'
868 if 'external' in ns.enabled_network_list:
869 net_config = ns['networks']['external'][0]
871 pool_start, pool_end = net_config['floating_ip_range']
873 net_config = ns['networks']['admin']
875 pool_start, pool_end = ns['apex']['networks']['admin'][
876 'introspection_range']
877 nic_config = net_config['nic_mapping']
878 gateway = net_config['gateway']
880 # create network command
881 if nic_config['compute']['vlan'] == 'native':
884 ext_type = "vlan --provider-segment {}".format(nic_config[
886 cmds.append("openstack network create external --project service "
887 "--external --provider-network-type {} "
888 "--provider-physical-network {}"
889 .format(ext_type, external_physnet))
890 # create subnet command
891 cidr = net_config['cidr']
892 subnet_cmd = "openstack subnet create external-subnet --project " \
893 "service --network external --no-dhcp --gateway {} " \
894 "--allocation-pool start={},end={} --subnet-range " \
895 "{}".format(gateway, pool_start, pool_end, str(cidr))
896 if external and cidr.version == 6:
897 subnet_cmd += ' --ip-version 6'
898 cmds.append(subnet_cmd)
899 logging.debug("Neutron external network commands determined "
900 "as: {}".format(cmds))
904 def create_congress_cmds(overcloud_file):
905 drivers = ['nova', 'neutronv2', 'cinder', 'glancev2', 'keystone', 'doctor']
906 overcloudrc = parsers.parse_overcloudrc(overcloud_file)
907 logging.info("Creating congress commands")
910 "username={}".format(overcloudrc['OS_USERNAME']),
911 "tenant_name={}".format(overcloudrc['OS_PROJECT_NAME']),
912 "password={}".format(overcloudrc['OS_PASSWORD']),
913 "auth_url={}".format(overcloudrc['OS_AUTH_URL'])
916 logging.error("Unable to find all keys required for congress in "
917 "overcloudrc: OS_USERNAME, OS_PROJECT_NAME, "
918 "OS_PASSWORD, OS_AUTH_URL. Please check overcloudrc "
919 "file: {}".format(overcloud_file))
922 ds_cfg = '--config ' + ' --config '.join(ds_cfg)
924 for driver in drivers:
925 if driver == 'doctor':
926 cmd = "{} \"{}\"".format(driver, driver)
928 cmd = "{} \"{}\" {}".format(driver, driver, ds_cfg)
930 cmd += ' --config api_version="2.34"'
931 logging.debug("Congress command created: {}".format(cmd))