1 ##############################################################################
2 # Copyright (c) 2017 Tim Rozet (trozet@redhat.com) and others.
4 # All rights reserved. This program and the accompanying materials
5 # are made available under the terms of the Apache License, Version 2.0
6 # which accompanies this distribution, and is available at
7 # http://www.apache.org/licenses/LICENSE-2.0
8 ##############################################################################
21 import apex.builders.overcloud_builder as oc_builder
22 import apex.builders.common_builder as c_builder
24 from apex.common import constants as con
25 from apex.common.exceptions import ApexDeployException
26 from apex.common import parsers
27 from apex.common import utils
28 from apex.virtual import utils as virt_utils
29 from cryptography.hazmat.primitives import serialization as \
31 from cryptography.hazmat.primitives.asymmetric import rsa
32 from cryptography.hazmat.backends import default_backend as \
33 crypto_default_backend
38 'sfc': 'neutron-sfc-opendaylight.yaml',
39 'vpn': 'neutron-bgpvpn-opendaylight.yaml',
40 'gluon': 'gluon.yaml',
42 'odl_vpp_netvirt': 'neutron-opendaylight-netvirt-vpp.yaml',
43 'dvr': 'neutron-opendaylight-fdio-dvr.yaml',
44 'default': 'neutron-opendaylight-honeycomb.yaml'
46 'l2gw': 'neutron-l2gw-opendaylight.yaml',
47 'sriov': 'neutron-opendaylight-sriov.yaml',
48 'default': 'neutron-opendaylight.yaml',
51 'sfc': 'neutron-onos-sfc.yaml',
52 'default': 'neutron-onos.yaml'
54 'ovn': 'neutron-ml2-ovn.yaml',
56 'vpp': 'neutron-ml2-vpp.yaml',
57 'dataplane': ('ovs_dpdk', 'neutron-ovs-dpdk.yaml')
62 'tacker': 'enable_tacker.yaml',
63 'congress': 'enable_congress.yaml',
64 'barometer': 'enable_barometer.yaml',
65 'rt_kvm': 'enable_rt_kvm.yaml'
69 'HostCpusList': 'dpdk_cores',
70 'NeutronDpdkCoreList': 'pmd_cores',
71 'NeutronDpdkSocketMemory': 'socket_memory',
72 'NeutronDpdkMemoryChannels': 'memory_channels'
75 ODL_NETVIRT_VPP_RPM = "/root/opendaylight-7.0.0-0.1.20170531snap665.el7" \
78 LOOP_DEVICE_SIZE = "10G"
80 LOSETUP_SERVICE = """[Unit]
81 Description=Setup loop devices
86 ExecStart=/sbin/losetup /dev/loop3 /srv/data.img
87 ExecStop=/sbin/losetup -d /dev/loop3
92 WantedBy=multi-user.target
95 DUPLICATE_COMPUTE_SERVICES = [
96 'OS::TripleO::Services::ComputeNeutronCorePlugin',
97 'OS::TripleO::Services::ComputeNeutronMetadataAgent',
98 'OS::TripleO::Services::ComputeNeutronOvsAgent',
99 'OS::TripleO::Services::ComputeNeutronL3Agent'
103 def build_sdn_env_list(ds, sdn_map, env_list=None):
105 Builds a list of SDN environment files to be used in the deploy cmd.
107 This function recursively searches an sdn_map. First the sdn controller is
108 matched and then the function looks for enabled features for that
109 controller to determine which environment files should be used. By
110 default the feature will be checked if set to true in deploy settings to be
111 added to the list. If a feature does not have a boolean value, then the
112 key and value pair to compare with are checked as a tuple (k,v).
114 :param ds: deploy settings
115 :param sdn_map: SDN map to recursively search
116 :param env_list: recursive var to hold previously found env_list
117 :return: A list of env files
121 for k, v in sdn_map.items():
122 if ds['sdn_controller'] == k or (k in ds and ds[k]):
123 if isinstance(v, dict):
124 # Append default SDN env file first
125 # The assumption is that feature-enabled SDN env files
126 # override and do not conflict with previously set default
128 if ds['sdn_controller'] == k and 'default' in v:
129 env_list.append(os.path.join(con.THT_ENV_DIR,
131 env_list.extend(build_sdn_env_list(ds, v))
132 # check if the value is not a boolean
133 elif isinstance(v, tuple):
135 env_list.append(os.path.join(con.THT_ENV_DIR, v[1]))
137 env_list.append(os.path.join(con.THT_ENV_DIR, v))
138 if len(env_list) == 0:
140 env_list.append(os.path.join(
141 con.THT_ENV_DIR, sdn_map['default']))
143 logging.warning("Unable to find default file for SDN")
148 def get_docker_sdn_files(ds_opts):
150 Returns docker env file for detected SDN
151 :param ds_opts: deploy options
152 :return: list of docker THT env files for an SDN
154 docker_services = con.VALID_DOCKER_SERVICES
155 tht_dir = con.THT_DOCKER_ENV_DIR[ds_opts['os_version']]
156 sdn_env_list = build_sdn_env_list(ds_opts, SDN_FILE_MAP)
157 for i, sdn_file in enumerate(sdn_env_list):
158 sdn_base = os.path.basename(sdn_file)
159 if sdn_base in docker_services:
160 if docker_services[sdn_base] is not None:
162 os.path.join(tht_dir, docker_services[sdn_base])
164 sdn_env_list[i] = os.path.join(tht_dir, sdn_base)
168 def create_deploy_cmd(ds, ns, inv, tmp_dir,
169 virtual, env_file='opnfv-environment.yaml',
172 logging.info("Creating deployment command")
173 deploy_options = ['network-environment.yaml']
175 ds_opts = ds['deploy_options']
177 if ds_opts['containers']:
178 deploy_options.append(os.path.join(con.THT_ENV_DIR,
181 if ds['global_params']['ha_enabled']:
182 if ds_opts['containers']:
183 deploy_options.append(os.path.join(con.THT_ENV_DIR,
186 deploy_options.append(os.path.join(con.THT_ENV_DIR,
187 'puppet-pacemaker.yaml'))
190 deploy_options.append(env_file)
192 if ds_opts['containers']:
193 deploy_options.append('docker-images.yaml')
194 sdn_docker_files = get_docker_sdn_files(ds_opts)
195 for sdn_docker_file in sdn_docker_files:
196 deploy_options.append(sdn_docker_file)
198 deploy_options.append('sdn-images.yaml')
200 deploy_options += build_sdn_env_list(ds_opts, SDN_FILE_MAP)
202 for k, v in OTHER_FILE_MAP.items():
203 if k in ds_opts and ds_opts[k]:
204 if ds_opts['containers']:
205 deploy_options.append(os.path.join(con.THT_DOCKER_ENV_DIR,
206 "{}.yaml".format(k)))
208 deploy_options.append(os.path.join(con.THT_ENV_DIR, v))
210 # TODO(trozet) Fix this check to look for if ceph is in controller services
211 # and not use name of the file
212 if ds_opts['ceph'] and 'csit' not in env_file:
213 prep_storage_env(ds, ns, virtual, tmp_dir)
214 deploy_options.append(os.path.join(con.THT_ENV_DIR,
215 'storage-environment.yaml'))
217 prep_sriov_env(ds, tmp_dir)
219 # Check for 'k8s' here intentionally, as we may support other values
220 # such as openstack/openshift for 'vim' option.
221 if ds_opts['vim'] == 'k8s':
222 deploy_options.append('kubernetes-environment.yaml')
225 deploy_options.append('virtual-environment.yaml')
227 deploy_options.append('baremetal-environment.yaml')
229 num_control, num_compute = inv.get_node_counts()
230 if num_control > 1 and not ds['global_params']['ha_enabled']:
232 if platform.machine() == 'aarch64':
233 # aarch64 deploys were not completing in the default 90 mins.
234 # Not sure if this is related to the hardware the OOO support
235 # was developed on or the virtualization support in CentOS
236 # Either way it will probably get better over time as the aarch
237 # support matures in CentOS and deploy time should be tested in
238 # the future so this multiplier can be removed.
239 con.DEPLOY_TIMEOUT *= 2
240 cmd = "openstack overcloud deploy --templates --timeout {} " \
241 .format(con.DEPLOY_TIMEOUT)
243 for option in deploy_options:
244 cmd += " -e {}".format(option)
245 cmd += " --ntp-server {}".format(ns['ntp'][0])
246 cmd += " --control-scale {}".format(num_control)
247 cmd += " --compute-scale {}".format(num_compute)
248 cmd += ' --control-flavor control --compute-flavor compute'
250 cmd += ' --networks-file network_data.yaml'
253 with open('/sys/module/kvm_intel/parameters/nested') as f:
254 nested_kvm = f.read().strip()
255 if nested_kvm != 'Y':
256 libvirt_type = 'qemu'
257 cmd += ' --libvirt-type {}'.format(libvirt_type)
258 logging.info("Deploy command set: {}".format(cmd))
260 with open(os.path.join(tmp_dir, 'deploy_command'), 'w') as fh:
265 def prep_image(ds, ns, img, tmp_dir, root_pw=None, docker_tag=None,
268 Locates sdn image and preps for deployment.
269 :param ds: deploy settings
270 :param ns: network settings
271 :param img: sdn image
272 :param tmp_dir: dir to store modified sdn image
273 :param root_pw: password to configure for overcloud image
274 :param docker_tag: Docker image tag for RDO version (default None)
275 :param patches: List of patches to apply to overcloud image
278 # TODO(trozet): Come up with a better way to organize this logic in this
280 logging.info("Preparing image: {} for deployment".format(img))
281 if not os.path.isfile(img):
282 logging.error("Missing SDN image {}".format(img))
283 raise ApexDeployException("Missing SDN image file: {}".format(img))
285 ds_opts = ds['deploy_options']
287 sdn = ds_opts['sdn_controller']
288 patched_containers = set()
289 # we need this due to rhbz #1436021
290 # fixed in systemd-219-37.el7
292 logging.info("Neutron openvswitch-agent disabled")
295 "rm -f /etc/systemd/system/multi-user.target.wants/"
296 "neutron-openvswitch-agent.service"},
299 "rm -f /usr/lib/systemd/system/neutron-openvswitch-agent"
303 if ns.get('http_proxy', ''):
306 "echo 'http_proxy={}' >> /etc/environment".format(
309 if ns.get('https_proxy', ''):
312 "echo 'https_proxy={}' >> /etc/environment".format(
315 tmp_oc_image = os.path.join(tmp_dir, 'overcloud-full.qcow2')
316 shutil.copyfile(img, tmp_oc_image)
317 logging.debug("Temporary overcloud image stored as: {}".format(
321 oc_builder.inject_quagga(tmp_oc_image, tmp_dir)
322 virt_cmds.append({con.VIRT_RUN_CMD: "chmod +x /etc/rc.d/rc.local"})
325 "echo 'sudo /opt/quagga/etc/init.d/zrpcd start' > "
326 "/opt/quagga/etc/init.d/zrpcd_start.sh"})
328 con.VIRT_RUN_CMD: "chmod +x /opt/quagga/etc/init.d/"
331 con.VIRT_RUN_CMD: "sed -i '$a /opt/quagga/etc/"
332 "init.d/zrpcd_start.sh' /etc/rc.local "})
334 con.VIRT_RUN_CMD: "sed -i '$a /opt/quagga/etc/"
335 "init.d/zrpcd_start.sh' /etc/rc.d/rc.local"})
336 logging.info("ZRPCD process started")
338 dataplane = ds_opts['dataplane']
339 if dataplane == 'ovs_dpdk' or dataplane == 'fdio':
340 logging.info("Enabling kernel modules for dpdk")
341 # file to module mapping
343 os.path.join(tmp_dir, 'vfio_pci.modules'): 'vfio_pci',
344 os.path.join(tmp_dir, 'uio_pci_generic.modules'): 'uio_pci_generic'
346 for mod_file, mod in uio_types.items():
347 with open(mod_file, 'w') as fh:
348 fh.write('#!/bin/bash\n')
349 fh.write('exec /sbin/modprobe {}'.format(mod))
353 {con.VIRT_UPLOAD: "{}:/etc/sysconfig/modules/".format(
355 {con.VIRT_RUN_CMD: "chmod 0755 /etc/sysconfig/modules/"
356 "{}".format(os.path.basename(mod_file))}
359 pw_op = "password:{}".format(root_pw)
360 virt_cmds.append({con.VIRT_PW: pw_op})
362 if dataplane == 'ovs':
364 oc_builder.inject_ovs_nsh(tmp_oc_image, tmp_dir)
365 elif sdn == 'opendaylight':
366 # FIXME(trozet) remove this after RDO is updated with fix for
367 # https://bugzilla.redhat.com/show_bug.cgi?id=1544892
368 ovs_file = os.path.basename(con.CUSTOM_OVS)
369 ovs_url = con.CUSTOM_OVS.replace(ovs_file, '')
370 utils.fetch_upstream_and_unpack(dest=tmp_dir, url=ovs_url,
373 {con.VIRT_UPLOAD: "{}:/root/".format(os.path.join(tmp_dir,
375 {con.VIRT_RUN_CMD: "yum downgrade -y /root/{}".format(
379 if dataplane == 'fdio':
380 # Patch neutron with using OVS external interface for router
381 # and add generic linux NS interface driver
383 {con.VIRT_RUN_CMD: "cd /usr/lib/python2.7/site-packages && patch "
384 "-p1 < neutron-patch-NSDriver.patch"})
387 {con.VIRT_RUN_CMD: "yum remove -y vpp-lib"},
388 {con.VIRT_RUN_CMD: "yum install -y "
389 "/root/nosdn_vpp_rpms/*.rpm"}
392 undercloud_admin_ip = ns['networks'][con.ADMIN_NETWORK][
393 'installer_vm']['ip']
394 if sdn == 'opendaylight':
395 oc_builder.inject_opendaylight(
396 odl_version=ds_opts['odl_version'],
399 uc_ip=undercloud_admin_ip,
400 os_version=ds_opts['os_version'],
401 docker_tag=docker_tag,
404 patched_containers = patched_containers.union({'opendaylight'})
407 if ds_opts['os_version'] == 'master':
408 branch = ds_opts['os_version']
410 branch = "stable/{}".format(ds_opts['os_version'])
411 logging.info('Adding patches to overcloud')
412 patched_containers = patched_containers.union(
413 c_builder.add_upstream_patches(patches,
414 tmp_oc_image, tmp_dir,
416 uc_ip=undercloud_admin_ip,
417 docker_tag=docker_tag))
418 # if containers with ceph, and no ceph device we need to use a
419 # persistent loop device for Ceph OSDs
420 if docker_tag and ds_opts['ceph_device'] == '/dev/loop3':
421 tmp_losetup = os.path.join(tmp_dir, 'losetup.service')
422 with open(tmp_losetup, 'w') as fh:
423 fh.write(LOSETUP_SERVICE)
425 {con.VIRT_UPLOAD: "{}:/usr/lib/systemd/system/".format(tmp_losetup)
427 {con.VIRT_RUN_CMD: 'truncate /srv/data.img --size {}'
428 .format(LOOP_DEVICE_SIZE)},
429 {con.VIRT_RUN_CMD: 'systemctl daemon-reload'},
430 {con.VIRT_RUN_CMD: 'systemctl enable losetup.service'},
432 # TODO(trozet) remove this after LP#173474 is fixed
433 dhcp_unit = '/usr/lib/systemd/system/dhcp-interface@.service'
435 {con.VIRT_RUN_CMD: "crudini --del {} Unit "
436 "ConditionPathExists".format(dhcp_unit)})
439 {con.VIRT_INSTALL: "nfs-utils"},
440 {con.VIRT_RUN_CMD: "ln -s /usr/lib/systemd/system/nfs-server.service "
441 "/etc/systemd/system/multi-user.target.wants/"
442 "nfs-server.service"},
443 {con.VIRT_RUN_CMD: "mkdir -p /glance"},
444 {con.VIRT_RUN_CMD: "mkdir -p /cinder"},
445 {con.VIRT_RUN_CMD: "mkdir -p /nova"},
446 {con.VIRT_RUN_CMD: "chmod 777 /glance"},
447 {con.VIRT_RUN_CMD: "chmod 777 /cinder"},
448 {con.VIRT_RUN_CMD: "chmod 777 /nova"},
449 {con.VIRT_RUN_CMD: "chown nfsnobody:nfsnobody /glance"},
450 {con.VIRT_RUN_CMD: "chown nfsnobody:nfsnobody /cinder"},
451 {con.VIRT_RUN_CMD: "chown nfsnobody:nfsnobody /nova"},
452 {con.VIRT_RUN_CMD: "echo '/glance *(rw,sync,"
453 "no_root_squash,no_acl)' > /etc/exports"},
454 {con.VIRT_RUN_CMD: "echo '/cinder *(rw,sync,"
455 "no_root_squash,no_acl)' >> /etc/exports"},
456 {con.VIRT_RUN_CMD: "echo '/nova *(rw,sync,"
457 "no_root_squash,no_acl)' >> /etc/exports"},
458 {con.VIRT_RUN_CMD: "exportfs -avr"},
460 virt_utils.virt_customize(virt_cmds, tmp_oc_image)
461 logging.info("Overcloud image customization complete")
462 return patched_containers
467 Creates public and private ssh keys with 1024 bit RSA encryption
468 :return: private, public key
470 key = rsa.generate_private_key(
471 backend=crypto_default_backend(),
472 public_exponent=65537,
476 private_key = key.private_bytes(
477 crypto_serialization.Encoding.PEM,
478 crypto_serialization.PrivateFormat.PKCS8,
479 crypto_serialization.NoEncryption())
480 public_key = key.public_key().public_bytes(
481 crypto_serialization.Encoding.OpenSSH,
482 crypto_serialization.PublicFormat.OpenSSH
484 return private_key.decode('utf-8'), public_key.decode('utf-8')
487 def prep_env(ds, ns, inv, opnfv_env, net_env, tmp_dir):
489 Creates modified opnfv/network environments for deployment
490 :param ds: deploy settings
491 :param ns: network settings
492 :param inv: node inventory
493 :param opnfv_env: file path for opnfv-environment file
494 :param net_env: file path for network-environment file
495 :param tmp_dir: Apex tmp dir
499 logging.info("Preparing opnfv-environment and network-environment files")
500 ds_opts = ds['deploy_options']
501 tmp_opnfv_env = os.path.join(tmp_dir, os.path.basename(opnfv_env))
502 shutil.copyfile(opnfv_env, tmp_opnfv_env)
503 tenant_nic_map = ns['networks']['tenant']['nic_mapping']
505 tenant_nic['Controller'] = tenant_nic_map['controller']['members'][0]
506 tenant_nic['NovaCompute'] = tenant_nic_map['compute']['members'][0]
507 external_nic_map = ns['networks']['external'][0]['nic_mapping']
508 external_nic = dict()
509 external_nic['NovaCompute'] = external_nic_map['compute']['members'][0]
512 private_key, public_key = make_ssh_key()
514 num_control, num_compute = inv.get_node_counts()
515 if num_control > 1 and not ds['global_params']['ha_enabled']:
518 # Make easier/faster variables to index in the file editor
519 if 'performance' in ds_opts:
522 if 'vpp' in ds_opts['performance']['Compute']:
523 perf_vpp_comp = ds_opts['performance']['Compute']['vpp']
526 if 'vpp' in ds_opts['performance']['Controller']:
527 perf_vpp_ctrl = ds_opts['performance']['Controller']['vpp']
532 if 'ovs' in ds_opts['performance']['Compute']:
533 perf_ovs_comp = ds_opts['performance']['Compute']['ovs']
538 if 'kernel' in ds_opts['performance']['Compute']:
539 perf_kern_comp = ds_opts['performance']['Compute']['kernel']
541 perf_kern_comp = None
545 tenant_settings = ns['networks']['tenant']
546 tenant_vlan_enabled = 'tenant' in ns.enabled_network_list and \
547 ns['networks']['tenant'].get('segmentation_type') == 'vlan'
549 # Modify OPNFV environment
550 # TODO: Change to build a dict and outputting yaml rather than parsing
551 for line in fileinput.input(tmp_opnfv_env, inplace=True):
552 line = line.strip('\n')
554 if 'CloudDomain' in line:
555 output_line = " CloudDomain: {}".format(ns['domain_name'])
556 elif 'replace_private_key' in line:
557 output_line = " private_key: |\n"
559 for line in private_key.splitlines():
560 key_out += " {}\n".format(line)
561 output_line += key_out
562 elif 'replace_public_key' in line:
563 output_line = " public_key: '{}'".format(public_key)
564 elif ((perf and perf_kern_comp) or ds_opts.get('rt_kvm')) and \
565 'resource_registry' in line:
566 output_line = "resource_registry:\n" \
567 " OS::TripleO::NodeUserData: first-boot.yaml"
568 elif 'ComputeExtraConfigPre' in line and \
569 ds_opts['dataplane'] == 'ovs_dpdk':
570 output_line = ' OS::TripleO::ComputeExtraConfigPre: ' \
571 './ovs-dpdk-preconfig.yaml'
572 elif 'NeutronNetworkVLANRanges' in line:
574 if tenant_vlan_enabled:
575 if ns['networks']['tenant']['overlay_id_range']:
576 vlan_setting = ns['networks']['tenant']['overlay_id_range']
577 if 'datacentre' not in vlan_setting:
578 vlan_setting += ',datacentre:1:1000'
579 # SRIOV networks are VLAN based provider networks. In order to
580 # simplify the deployment, nfv_sriov will be the default physnet.
581 # VLANs are not needed in advance, and the user will have to create
582 # the network specifying the segmentation-id.
585 vlan_setting += ",nfv_sriov"
587 vlan_setting = "datacentre:1:1000,nfv_sriov"
589 output_line = " NeutronNetworkVLANRanges: " + vlan_setting
590 elif 'NeutronBridgeMappings' in line and tenant_vlan_enabled:
591 if tenant_settings['overlay_id_range']:
592 physnets = tenant_settings['overlay_id_range'].split(',')
593 output_line = " NeutronBridgeMappings: "
594 for physnet in physnets:
595 physnet_name = physnet.split(':')[0]
596 if physnet_name != 'datacentre':
597 output_line += "{}:br-vlan,".format(physnet_name)
598 output_line += "datacentre:br-ex"
599 elif 'OpenDaylightProviderMappings' in line and tenant_vlan_enabled \
600 and ds_opts['sdn_controller'] == 'opendaylight':
601 if tenant_settings['overlay_id_range']:
602 physnets = tenant_settings['overlay_id_range'].split(',')
603 output_line = " OpenDaylightProviderMappings: "
604 for physnet in physnets:
605 physnet_name = physnet.split(':')[0]
606 if physnet_name != 'datacentre':
607 output_line += "{}:br-vlan,".format(physnet_name)
608 output_line += "datacentre:br-ex"
609 elif 'NeutronNetworkType' in line and tenant_vlan_enabled:
610 output_line = " NeutronNetworkType: vlan\n" \
611 " NeutronTunnelTypes: ''"
613 if ds_opts['sdn_controller'] == 'opendaylight' and \
614 'odl_vpp_routing_node' in ds_opts:
615 if 'opendaylight::vpp_routing_node' in line:
616 output_line = (" opendaylight::vpp_routing_node: {}.{}"
617 .format(ds_opts['odl_vpp_routing_node'],
619 elif not ds_opts['sdn_controller'] and ds_opts['dataplane'] == 'fdio':
620 if 'NeutronVPPAgentPhysnets' in line:
621 # VPP interface tap0 will be used for external network
623 output_line = (" NeutronVPPAgentPhysnets: "
624 "'datacentre:{},external:tap0'"
625 .format(tenant_nic['Controller']))
626 elif ds_opts['sdn_controller'] == 'opendaylight' and ds_opts.get(
628 if 'OS::TripleO::Services::NeutronDhcpAgent' in line:
630 elif 'NeutronDhcpAgentsPerNetwork' in line:
632 num_dhcp_agents = num_control
634 num_dhcp_agents = num_compute
635 output_line = (" NeutronDhcpAgentsPerNetwork: {}"
636 .format(num_dhcp_agents))
637 elif 'ComputeServices' in line:
638 output_line = (" ComputeServices:\n"
639 " - OS::TripleO::Services::NeutronDhcpAgent")
642 for role in 'NovaCompute', 'Controller':
643 if role == 'NovaCompute':
644 perf_opts = perf_vpp_comp
646 perf_opts = perf_vpp_ctrl
647 cfg = "{}ExtraConfig".format(role)
648 if cfg in line and perf_opts:
650 if 'main-core' in perf_opts:
651 perf_line += ("\n fdio::vpp_cpu_main_core: '{}'"
652 .format(perf_opts['main-core']))
653 if 'corelist-workers' in perf_opts:
655 "fdio::vpp_cpu_corelist_workers: '{}'"
656 .format(perf_opts['corelist-workers']))
657 if ds_opts['sdn_controller'] == 'opendaylight' and \
658 ds_opts['dataplane'] == 'fdio':
659 if role == 'NovaCompute':
661 "tripleo::profile::base::neutron::"
662 "agents::honeycomb::"
663 "interface_role_mapping:"
664 " ['{}:tenant-interface',"
665 "'{}:public-interface']"
666 .format(tenant_nic[role],
670 "tripleo::profile::base::neutron::"
671 "agents::honeycomb::"
672 "interface_role_mapping:"
673 " ['{}:tenant-interface']"
674 .format(tenant_nic[role]))
676 output_line = (" {}:{}".format(cfg, perf_line))
678 if ds_opts['dataplane'] == 'ovs_dpdk' and perf_ovs_comp:
679 for k, v in OVS_PERF_MAP.items():
680 if k in line and v in perf_ovs_comp:
681 output_line = " {}: '{}'".format(k, perf_ovs_comp[v])
684 # (FIXME) use compute's kernel settings for all nodes for now.
686 if 'NovaSchedulerDefaultFilters' in line:
688 " NovaSchedulerDefaultFilters: 'RamFilter," \
689 "ComputeFilter,AvailabilityZoneFilter," \
690 "ComputeCapabilitiesFilter," \
691 "ImagePropertiesFilter,NUMATopologyFilter'"
692 elif 'ComputeKernelArgs' in line:
694 for k, v in perf_kern_comp.items():
695 kernel_args += "{}={} ".format(k, v)
697 output_line = " ComputeKernelArgs: '{}'".\
702 # Merge compute services into control services if only a single
705 logging.info("All in one deployment. Checking if service merging "
706 "required into control services")
707 with open(tmp_opnfv_env, 'r') as fh:
708 data = yaml.safe_load(fh)
709 param_data = data['parameter_defaults']
710 # Check to see if any parameters are set for Compute
711 for param in param_data.keys():
712 if param != 'ComputeServices' and param.startswith('Compute'):
713 logging.warning("Compute parameter set, but will not be used "
714 "in deployment: {}. Please use Controller "
715 "based parameters when using All-in-one "
716 "deployments".format(param))
717 if ('ControllerServices' in param_data and 'ComputeServices' in
719 logging.info("Services detected in environment file. Merging...")
720 ctrl_services = param_data['ControllerServices']
721 cmp_services = param_data['ComputeServices']
722 param_data['ControllerServices'] = list(set().union(
723 ctrl_services, cmp_services))
724 for dup_service in DUPLICATE_COMPUTE_SERVICES:
725 if dup_service in param_data['ControllerServices']:
726 param_data['ControllerServices'].remove(dup_service)
727 param_data.pop('ComputeServices')
728 logging.debug("Merged controller services: {}".format(
729 pprint.pformat(param_data['ControllerServices'])
731 with open(tmp_opnfv_env, 'w') as fh:
732 yaml.safe_dump(data, fh, default_flow_style=False)
734 logging.info("No services detected in env file, not merging "
737 logging.info("opnfv-environment file written to {}".format(tmp_opnfv_env))
738 with open(tmp_opnfv_env, 'r') as fh:
739 logging.debug("opnfv-environment content is : {}".format(
740 pprint.pformat(yaml.safe_load(fh.read()))
744 def generate_ceph_key():
746 header = struct.pack('<hiih', 1, int(time.time()), 0, len(key))
747 return base64.b64encode(header + key)
750 def prep_storage_env(ds, ns, virtual, tmp_dir):
752 Creates storage environment file for deployment. Source file is copied by
753 undercloud playbook to host.
760 ds_opts = ds['deploy_options']
761 storage_file = os.path.join(tmp_dir, 'storage-environment.yaml')
762 if not os.path.isfile(storage_file):
763 logging.error("storage-environment file is not in tmp directory: {}. "
764 "Check if file was copied from "
765 "undercloud".format(tmp_dir))
766 raise ApexDeployException("storage-environment file not copied from "
768 for line in fileinput.input(storage_file, inplace=True):
769 line = line.strip('\n')
770 if 'CephClusterFSID' in line:
771 print(" CephClusterFSID: {}".format(str(uuid.uuid4())))
772 elif 'CephMonKey' in line:
773 print(" CephMonKey: {}".format(generate_ceph_key().decode(
775 elif 'CephAdminKey' in line:
776 print(" CephAdminKey: {}".format(generate_ceph_key().decode(
778 elif 'CephClientKey' in line:
779 print(" CephClientKey: {}".format(generate_ceph_key().decode(
784 if ds_opts['containers']:
787 # max pgs allowed are calculated as num_mons * 200. Therefore we
788 # set number of pgs and pools so that the total will be less:
789 # num_pgs * num_pools * num_osds
790 ceph_params['CephPoolDefaultSize'] = 2
791 ceph_params['CephPoolDefaultPgNum'] = 32
793 ceph_params['CephAnsibleExtraConfig'] = {
794 'centos_package_dependencies': [],
795 'ceph_osd_docker_memory_limit': '1g',
796 'ceph_mds_docker_memory_limit': '1g',
798 ceph_device = ds_opts['ceph_device']
799 ceph_params['CephAnsibleDisksConfig'] = {
800 'devices': [ceph_device],
802 'osd_scenario': 'collocated'
804 utils.edit_tht_env(storage_file, 'parameter_defaults', ceph_params)
805 # TODO(trozet): remove following block as we only support containers now
806 elif 'ceph_device' in ds_opts and ds_opts['ceph_device']:
807 with open(storage_file, 'a') as fh:
808 fh.write(' ExtraConfig:\n')
809 fh.write(" ceph::profile::params::osds:{{{}:{{}}}}\n".format(
810 ds_opts['ceph_device']
814 def prep_sriov_env(ds, tmp_dir):
816 Creates SRIOV environment file for deployment. Source file is copied by
817 undercloud playbook to host.
822 ds_opts = ds['deploy_options']
823 sriov_iface = ds_opts['sriov']
824 sriov_file = os.path.join(tmp_dir, 'neutron-opendaylight-sriov.yaml')
825 if not os.path.isfile(sriov_file):
826 logging.error("sriov-environment file is not in tmp directory: {}. "
827 "Check if file was copied from "
828 "undercloud".format(tmp_dir))
829 raise ApexDeployException("sriov-environment file not copied from "
831 # TODO(rnoriega): Instead of line editing, refactor this code to load
832 # yaml file into a dict, edit it and write the file back.
833 for line in fileinput.input(sriov_file, inplace=True):
834 line = line.strip('\n')
835 if 'NovaSchedulerDefaultFilters' in line:
836 print(" {}".format(line[3:]))
837 elif 'NovaSchedulerAvailableFilters' in line:
838 print(" {}".format(line[3:]))
839 elif 'NeutronPhysicalDevMappings' in line:
840 print(" NeutronPhysicalDevMappings: \"nfv_sriov:{}\""
841 .format(sriov_iface))
842 elif 'NeutronSriovNumVFs' in line:
843 print(" NeutronSriovNumVFs: \"{}:8\"".format(sriov_iface))
844 elif 'NovaPCIPassthrough' in line:
845 print(" NovaPCIPassthrough:")
846 elif 'devname' in line:
847 print(" - devname: \"{}\"".format(sriov_iface))
848 elif 'physical_network' in line:
849 print(" physical_network: \"nfv_sriov\"")
854 def external_network_cmds(ns, ds):
856 Generates external network openstack commands
857 :param ns: network settings
858 :param ds: deploy settings
859 :return: list of commands to configure external network
861 ds_opts = ds['deploy_options']
862 external_physnet = 'datacentre'
863 if ds_opts['dataplane'] == 'fdio' and \
864 ds_opts['sdn_controller'] != 'opendaylight':
865 external_physnet = 'external'
866 if 'external' in ns.enabled_network_list:
867 net_config = ns['networks']['external'][0]
869 pool_start, pool_end = net_config['floating_ip_range']
871 net_config = ns['networks']['admin']
873 pool_start, pool_end = ns['apex']['networks']['admin'][
874 'introspection_range']
875 nic_config = net_config['nic_mapping']
876 gateway = net_config['gateway']
878 # create network command
879 if nic_config['compute']['vlan'] == 'native':
882 ext_type = "vlan --provider-segment {}".format(nic_config[
884 cmds.append("openstack network create external --project service "
885 "--external --provider-network-type {} "
886 "--provider-physical-network {}"
887 .format(ext_type, external_physnet))
888 # create subnet command
889 cidr = net_config['cidr']
890 subnet_cmd = "openstack subnet create external-subnet --project " \
891 "service --network external --no-dhcp --gateway {} " \
892 "--allocation-pool start={},end={} --subnet-range " \
893 "{}".format(gateway, pool_start, pool_end, str(cidr))
894 if external and cidr.version == 6:
895 subnet_cmd += ' --ip-version 6'
896 cmds.append(subnet_cmd)
897 logging.debug("Neutron external network commands determined "
898 "as: {}".format(cmds))
902 def create_congress_cmds(overcloud_file):
903 drivers = ['nova', 'neutronv2', 'cinder', 'glancev2', 'keystone', 'doctor']
904 overcloudrc = parsers.parse_overcloudrc(overcloud_file)
905 logging.info("Creating congress commands")
908 "username={}".format(overcloudrc['OS_USERNAME']),
909 "tenant_name={}".format(overcloudrc['OS_PROJECT_NAME']),
910 "password={}".format(overcloudrc['OS_PASSWORD']),
911 "auth_url={}".format(overcloudrc['OS_AUTH_URL'])
914 logging.error("Unable to find all keys required for congress in "
915 "overcloudrc: OS_USERNAME, OS_PROJECT_NAME, "
916 "OS_PASSWORD, OS_AUTH_URL. Please check overcloudrc "
917 "file: {}".format(overcloud_file))
920 ds_cfg = '--config ' + ' --config '.join(ds_cfg)
922 for driver in drivers:
923 if driver == 'doctor':
924 cmd = "{} \"{}\"".format(driver, driver)
926 cmd = "{} \"{}\" {}".format(driver, driver, ds_cfg)
928 cmd += ' --config api_version="2.34"'
929 logging.debug("Congress command created: {}".format(cmd))