1 ##############################################################################
2 # Copyright (c) 2017 Tim Rozet (trozet@redhat.com) and others.
4 # All rights reserved. This program and the accompanying materials
5 # are made available under the terms of the Apache License, Version 2.0
6 # which accompanies this distribution, and is available at
7 # http://www.apache.org/licenses/LICENSE-2.0
8 ##############################################################################
21 import apex.builders.overcloud_builder as oc_builder
22 import apex.builders.common_builder as c_builder
24 from apex.common import constants as con
25 from apex.common.exceptions import ApexDeployException
26 from apex.common import parsers
27 from apex.common import utils
28 from apex.virtual import utils as virt_utils
29 from cryptography.hazmat.primitives import serialization as \
31 from cryptography.hazmat.primitives.asymmetric import rsa
32 from cryptography.hazmat.backends import default_backend as \
33 crypto_default_backend
38 'sfc': 'neutron-sfc-opendaylight.yaml',
39 'vpn': 'neutron-bgpvpn-opendaylight.yaml',
40 'gluon': 'gluon.yaml',
42 'odl_vpp_netvirt': 'neutron-opendaylight-netvirt-vpp.yaml',
43 'dvr': 'neutron-opendaylight-fdio-dvr.yaml',
44 'default': 'neutron-opendaylight-honeycomb.yaml'
46 'l2gw': 'neutron-l2gw-opendaylight.yaml',
47 'sriov': 'neutron-opendaylight-sriov.yaml',
48 'default': 'neutron-opendaylight.yaml',
51 'sfc': 'neutron-onos-sfc.yaml',
52 'default': 'neutron-onos.yaml'
54 'ovn': 'neutron-ml2-ovn.yaml',
56 'vpp': 'neutron-ml2-vpp.yaml',
57 'dataplane': ('ovs_dpdk', 'neutron-ovs-dpdk.yaml')
62 'tacker': 'enable_tacker.yaml',
63 'congress': 'enable_congress.yaml',
64 'barometer': 'enable_barometer.yaml',
65 'rt_kvm': 'enable_rt_kvm.yaml'
69 'HostCpusList': 'dpdk_cores',
70 'NeutronDpdkCoreList': 'pmd_cores',
71 'NeutronDpdkSocketMemory': 'socket_memory',
72 'NeutronDpdkMemoryChannels': 'memory_channels'
75 ODL_NETVIRT_VPP_RPM = "/root/opendaylight-7.0.0-0.1.20170531snap665.el7" \
78 LOOP_DEVICE_SIZE = "10G"
80 LOSETUP_SERVICE = """[Unit]
81 Description=Setup loop devices
86 ExecStart=/sbin/losetup /dev/loop3 /srv/data.img
87 ExecStop=/sbin/losetup -d /dev/loop3
92 WantedBy=multi-user.target
95 DUPLICATE_COMPUTE_SERVICES = [
96 'OS::TripleO::Services::ComputeNeutronCorePlugin',
97 'OS::TripleO::Services::ComputeNeutronMetadataAgent',
98 'OS::TripleO::Services::ComputeNeutronOvsAgent',
99 'OS::TripleO::Services::ComputeNeutronL3Agent'
103 def build_sdn_env_list(ds, sdn_map, env_list=None):
105 Builds a list of SDN environment files to be used in the deploy cmd.
107 This function recursively searches an sdn_map. First the sdn controller is
108 matched and then the function looks for enabled features for that
109 controller to determine which environment files should be used. By
110 default the feature will be checked if set to true in deploy settings to be
111 added to the list. If a feature does not have a boolean value, then the
112 key and value pair to compare with are checked as a tuple (k,v).
114 :param ds: deploy settings
115 :param sdn_map: SDN map to recursively search
116 :param env_list: recursive var to hold previously found env_list
117 :return: A list of env files
121 for k, v in sdn_map.items():
122 if ds['sdn_controller'] == k or (k in ds and ds[k]):
123 if isinstance(v, dict):
124 # Append default SDN env file first
125 # The assumption is that feature-enabled SDN env files
126 # override and do not conflict with previously set default
128 if ds['sdn_controller'] == k and 'default' in v:
129 env_list.append(os.path.join(con.THT_ENV_DIR,
131 env_list.extend(build_sdn_env_list(ds, v))
132 # check if the value is not a boolean
133 elif isinstance(v, tuple):
135 env_list.append(os.path.join(con.THT_ENV_DIR, v[1]))
137 env_list.append(os.path.join(con.THT_ENV_DIR, v))
138 if len(env_list) == 0:
140 env_list.append(os.path.join(
141 con.THT_ENV_DIR, sdn_map['default']))
143 logging.warning("Unable to find default file for SDN")
148 def get_docker_sdn_files(ds_opts):
150 Returns docker env file for detected SDN
151 :param ds_opts: deploy options
152 :return: list of docker THT env files for an SDN
154 docker_services = con.VALID_DOCKER_SERVICES
155 tht_dir = con.THT_DOCKER_ENV_DIR
156 sdn_env_list = build_sdn_env_list(ds_opts, SDN_FILE_MAP)
157 for i, sdn_file in enumerate(sdn_env_list):
158 sdn_base = os.path.basename(sdn_file)
159 if sdn_base in docker_services:
160 if docker_services[sdn_base] is not None:
162 os.path.join(tht_dir, docker_services[sdn_base])
164 sdn_env_list[i] = os.path.join(tht_dir, sdn_base)
168 def create_deploy_cmd(ds, ns, inv, tmp_dir,
169 virtual, env_file='opnfv-environment.yaml',
172 logging.info("Creating deployment command")
173 deploy_options = ['network-environment.yaml']
175 ds_opts = ds['deploy_options']
177 if ds_opts['containers']:
178 deploy_options.append(os.path.join(con.THT_ENV_DIR,
181 if ds['global_params']['ha_enabled']:
182 if ds_opts['containers']:
183 deploy_options.append(os.path.join(con.THT_ENV_DIR,
186 deploy_options.append(os.path.join(con.THT_ENV_DIR,
187 'puppet-pacemaker.yaml'))
190 deploy_options.append(env_file)
192 if ds_opts['containers']:
193 deploy_options.append('docker-images.yaml')
194 sdn_docker_files = get_docker_sdn_files(ds_opts)
195 for sdn_docker_file in sdn_docker_files:
196 deploy_options.append(sdn_docker_file)
198 deploy_options += build_sdn_env_list(ds_opts, SDN_FILE_MAP)
200 for k, v in OTHER_FILE_MAP.items():
201 if k in ds_opts and ds_opts[k]:
202 if ds_opts['containers']:
203 deploy_options.append(os.path.join(con.THT_DOCKER_ENV_DIR,
204 "{}.yaml".format(k)))
206 deploy_options.append(os.path.join(con.THT_ENV_DIR, v))
208 if ds_opts['ceph'] and 'csit' not in env_file:
209 prep_storage_env(ds, ns, virtual, tmp_dir)
210 deploy_options.append(os.path.join(con.THT_ENV_DIR,
211 'storage-environment.yaml'))
213 prep_sriov_env(ds, tmp_dir)
215 # Check for 'k8s' here intentionally, as we may support other values
216 # such as openstack/openshift for 'vim' option.
217 if ds_opts['vim'] == 'k8s':
218 deploy_options.append('kubernetes-environment.yaml')
221 deploy_options.append('virtual-environment.yaml')
223 deploy_options.append('baremetal-environment.yaml')
225 num_control, num_compute = inv.get_node_counts()
226 if num_control > 1 and not ds['global_params']['ha_enabled']:
228 if platform.machine() == 'aarch64':
229 # aarch64 deploys were not completing in the default 90 mins.
230 # Not sure if this is related to the hardware the OOO support
231 # was developed on or the virtualization support in CentOS
232 # Either way it will probably get better over time as the aarch
233 # support matures in CentOS and deploy time should be tested in
234 # the future so this multiplier can be removed.
235 con.DEPLOY_TIMEOUT *= 2
236 cmd = "openstack overcloud deploy --templates --timeout {} " \
237 .format(con.DEPLOY_TIMEOUT)
239 for option in deploy_options:
240 cmd += " -e {}".format(option)
241 cmd += " --ntp-server {}".format(ns['ntp'][0])
242 cmd += " --control-scale {}".format(num_control)
243 cmd += " --compute-scale {}".format(num_compute)
244 cmd += ' --control-flavor control --compute-flavor compute'
246 cmd += ' --networks-file network_data.yaml'
249 with open('/sys/module/kvm_intel/parameters/nested') as f:
250 nested_kvm = f.read().strip()
251 if nested_kvm != 'Y':
252 libvirt_type = 'qemu'
253 cmd += ' --libvirt-type {}'.format(libvirt_type)
254 logging.info("Deploy command set: {}".format(cmd))
256 with open(os.path.join(tmp_dir, 'deploy_command'), 'w') as fh:
261 def prep_image(ds, ns, img, tmp_dir, root_pw=None, docker_tag=None,
264 Locates sdn image and preps for deployment.
265 :param ds: deploy settings
266 :param ns: network settings
267 :param img: sdn image
268 :param tmp_dir: dir to store modified sdn image
269 :param root_pw: password to configure for overcloud image
270 :param docker_tag: Docker image tag for RDO version (default None)
271 :param patches: List of patches to apply to overcloud image
274 # TODO(trozet): Come up with a better way to organize this logic in this
276 logging.info("Preparing image: {} for deployment".format(img))
277 if not os.path.isfile(img):
278 logging.error("Missing SDN image {}".format(img))
279 raise ApexDeployException("Missing SDN image file: {}".format(img))
281 ds_opts = ds['deploy_options']
283 sdn = ds_opts['sdn_controller']
284 patched_containers = set()
285 # we need this due to rhbz #1436021
286 # fixed in systemd-219-37.el7
288 logging.info("Neutron openvswitch-agent disabled")
291 "rm -f /etc/systemd/system/multi-user.target.wants/"
292 "neutron-openvswitch-agent.service"},
295 "rm -f /usr/lib/systemd/system/neutron-openvswitch-agent"
299 if ns.get('http_proxy', ''):
302 "echo 'http_proxy={}' >> /etc/environment".format(
305 if ns.get('https_proxy', ''):
308 "echo 'https_proxy={}' >> /etc/environment".format(
311 tmp_oc_image = os.path.join(tmp_dir, 'overcloud-full.qcow2')
312 shutil.copyfile(img, tmp_oc_image)
313 logging.debug("Temporary overcloud image stored as: {}".format(
317 oc_builder.inject_quagga(tmp_oc_image, tmp_dir)
318 virt_cmds.append({con.VIRT_RUN_CMD: "chmod +x /etc/rc.d/rc.local"})
321 "echo 'sudo /opt/quagga/etc/init.d/zrpcd start' > "
322 "/opt/quagga/etc/init.d/zrpcd_start.sh"})
324 con.VIRT_RUN_CMD: "chmod +x /opt/quagga/etc/init.d/"
327 con.VIRT_RUN_CMD: "sed -i '$a /opt/quagga/etc/"
328 "init.d/zrpcd_start.sh' /etc/rc.local "})
330 con.VIRT_RUN_CMD: "sed -i '$a /opt/quagga/etc/"
331 "init.d/zrpcd_start.sh' /etc/rc.d/rc.local"})
332 logging.info("ZRPCD process started")
334 dataplane = ds_opts['dataplane']
335 if dataplane == 'ovs_dpdk' or dataplane == 'fdio':
336 logging.info("Enabling kernel modules for dpdk")
337 # file to module mapping
339 os.path.join(tmp_dir, 'vfio_pci.modules'): 'vfio_pci',
340 os.path.join(tmp_dir, 'uio_pci_generic.modules'): 'uio_pci_generic'
342 for mod_file, mod in uio_types.items():
343 with open(mod_file, 'w') as fh:
344 fh.write('#!/bin/bash\n')
345 fh.write('exec /sbin/modprobe {}'.format(mod))
349 {con.VIRT_UPLOAD: "{}:/etc/sysconfig/modules/".format(
351 {con.VIRT_RUN_CMD: "chmod 0755 /etc/sysconfig/modules/"
352 "{}".format(os.path.basename(mod_file))}
355 pw_op = "password:{}".format(root_pw)
356 virt_cmds.append({con.VIRT_PW: pw_op})
358 if dataplane == 'ovs':
360 oc_builder.inject_ovs_nsh(tmp_oc_image, tmp_dir)
361 elif sdn == 'opendaylight':
362 # FIXME(trozet) remove this after RDO is updated with fix for
363 # https://bugzilla.redhat.com/show_bug.cgi?id=1544892
364 ovs_file = os.path.basename(con.CUSTOM_OVS)
365 ovs_url = con.CUSTOM_OVS.replace(ovs_file, '')
366 utils.fetch_upstream_and_unpack(dest=tmp_dir, url=ovs_url,
369 {con.VIRT_UPLOAD: "{}:/root/".format(os.path.join(tmp_dir,
371 {con.VIRT_RUN_CMD: "yum downgrade -y /root/{}".format(
375 if dataplane == 'fdio':
376 # Patch neutron with using OVS external interface for router
377 # and add generic linux NS interface driver
379 {con.VIRT_RUN_CMD: "cd /usr/lib/python2.7/site-packages && patch "
380 "-p1 < neutron-patch-NSDriver.patch"})
383 {con.VIRT_RUN_CMD: "yum remove -y vpp-lib"},
384 {con.VIRT_RUN_CMD: "yum install -y "
385 "/root/nosdn_vpp_rpms/*.rpm"}
388 undercloud_admin_ip = ns['networks'][con.ADMIN_NETWORK][
389 'installer_vm']['ip']
390 if sdn == 'opendaylight':
391 oc_builder.inject_opendaylight(
392 odl_version=ds_opts['odl_version'],
395 uc_ip=undercloud_admin_ip,
396 os_version=ds_opts['os_version'],
397 docker_tag=docker_tag,
400 patched_containers = patched_containers.union({'opendaylight'})
403 if ds_opts['os_version'] == 'master':
404 branch = ds_opts['os_version']
406 branch = "stable/{}".format(ds_opts['os_version'])
407 logging.info('Adding patches to overcloud')
408 patched_containers = patched_containers.union(
409 c_builder.add_upstream_patches(patches,
410 tmp_oc_image, tmp_dir,
412 uc_ip=undercloud_admin_ip,
413 docker_tag=docker_tag))
414 # if containers with ceph, and no ceph device we need to use a
415 # persistent loop device for Ceph OSDs
416 if docker_tag and ds_opts['ceph_device'] == '/dev/loop3':
417 tmp_losetup = os.path.join(tmp_dir, 'losetup.service')
418 with open(tmp_losetup, 'w') as fh:
419 fh.write(LOSETUP_SERVICE)
421 {con.VIRT_UPLOAD: "{}:/usr/lib/systemd/system/".format(tmp_losetup)
423 {con.VIRT_RUN_CMD: 'truncate /srv/data.img --size {}'
424 .format(LOOP_DEVICE_SIZE)},
425 {con.VIRT_RUN_CMD: 'systemctl daemon-reload'},
426 {con.VIRT_RUN_CMD: 'systemctl enable losetup.service'},
428 # TODO(trozet) remove this after LP#173474 is fixed
429 dhcp_unit = '/usr/lib/systemd/system/dhcp-interface@.service'
431 {con.VIRT_RUN_CMD: "crudini --del {} Unit "
432 "ConditionPathExists".format(dhcp_unit)})
433 virt_utils.virt_customize(virt_cmds, tmp_oc_image)
434 logging.info("Overcloud image customization complete")
435 return patched_containers
440 Creates public and private ssh keys with 1024 bit RSA encryption
441 :return: private, public key
443 key = rsa.generate_private_key(
444 backend=crypto_default_backend(),
445 public_exponent=65537,
449 private_key = key.private_bytes(
450 crypto_serialization.Encoding.PEM,
451 crypto_serialization.PrivateFormat.PKCS8,
452 crypto_serialization.NoEncryption())
453 public_key = key.public_key().public_bytes(
454 crypto_serialization.Encoding.OpenSSH,
455 crypto_serialization.PublicFormat.OpenSSH
457 return private_key.decode('utf-8'), public_key.decode('utf-8')
460 def prep_env(ds, ns, inv, opnfv_env, net_env, tmp_dir):
462 Creates modified opnfv/network environments for deployment
463 :param ds: deploy settings
464 :param ns: network settings
465 :param inv: node inventory
466 :param opnfv_env: file path for opnfv-environment file
467 :param net_env: file path for network-environment file
468 :param tmp_dir: Apex tmp dir
472 logging.info("Preparing opnfv-environment and network-environment files")
473 ds_opts = ds['deploy_options']
474 tmp_opnfv_env = os.path.join(tmp_dir, os.path.basename(opnfv_env))
475 shutil.copyfile(opnfv_env, tmp_opnfv_env)
476 tenant_nic_map = ns['networks']['tenant']['nic_mapping']
478 tenant_nic['Controller'] = tenant_nic_map['controller']['members'][0]
479 tenant_nic['NovaCompute'] = tenant_nic_map['compute']['members'][0]
480 external_nic_map = ns['networks']['external'][0]['nic_mapping']
481 external_nic = dict()
482 external_nic['NovaCompute'] = external_nic_map['compute']['members'][0]
485 private_key, public_key = make_ssh_key()
487 num_control, num_compute = inv.get_node_counts()
488 if num_control > 1 and not ds['global_params']['ha_enabled']:
491 # Make easier/faster variables to index in the file editor
492 if 'performance' in ds_opts:
495 if 'vpp' in ds_opts['performance']['Compute']:
496 perf_vpp_comp = ds_opts['performance']['Compute']['vpp']
499 if 'vpp' in ds_opts['performance']['Controller']:
500 perf_vpp_ctrl = ds_opts['performance']['Controller']['vpp']
505 if 'ovs' in ds_opts['performance']['Compute']:
506 perf_ovs_comp = ds_opts['performance']['Compute']['ovs']
511 if 'kernel' in ds_opts['performance']['Compute']:
512 perf_kern_comp = ds_opts['performance']['Compute']['kernel']
514 perf_kern_comp = None
518 tenant_settings = ns['networks']['tenant']
519 tenant_vlan_enabled = 'tenant' in ns.enabled_network_list and \
520 ns['networks']['tenant'].get('segmentation_type') == 'vlan'
522 # Modify OPNFV environment
523 # TODO: Change to build a dict and outputting yaml rather than parsing
524 for line in fileinput.input(tmp_opnfv_env, inplace=True):
525 line = line.strip('\n')
527 if 'CloudDomain' in line:
528 output_line = " CloudDomain: {}".format(ns['domain_name'])
529 elif 'replace_private_key' in line:
530 output_line = " private_key: |\n"
532 for line in private_key.splitlines():
533 key_out += " {}\n".format(line)
534 output_line += key_out
535 elif 'replace_public_key' in line:
536 output_line = " public_key: '{}'".format(public_key)
537 elif ((perf and perf_kern_comp) or ds_opts.get('rt_kvm')) and \
538 'resource_registry' in line:
539 output_line = "resource_registry:\n" \
540 " OS::TripleO::NodeUserData: first-boot.yaml"
541 elif 'ComputeExtraConfigPre' in line and \
542 ds_opts['dataplane'] == 'ovs_dpdk':
543 output_line = ' OS::TripleO::ComputeExtraConfigPre: ' \
544 './ovs-dpdk-preconfig.yaml'
545 elif 'NeutronNetworkVLANRanges' in line:
547 if tenant_vlan_enabled:
548 if ns['networks']['tenant']['overlay_id_range']:
549 vlan_setting = ns['networks']['tenant']['overlay_id_range']
550 if 'datacentre' not in vlan_setting:
551 vlan_setting += ',datacentre:1:1000'
552 # SRIOV networks are VLAN based provider networks. In order to
553 # simplify the deployment, nfv_sriov will be the default physnet.
554 # VLANs are not needed in advance, and the user will have to create
555 # the network specifying the segmentation-id.
558 vlan_setting += ",nfv_sriov"
560 vlan_setting = "datacentre:1:1000,nfv_sriov"
562 output_line = " NeutronNetworkVLANRanges: " + vlan_setting
563 elif 'NeutronBridgeMappings' in line and tenant_vlan_enabled:
564 if tenant_settings['overlay_id_range']:
565 physnets = tenant_settings['overlay_id_range'].split(',')
566 output_line = " NeutronBridgeMappings: "
567 for physnet in physnets:
568 physnet_name = physnet.split(':')[0]
569 if physnet_name != 'datacentre':
570 output_line += "{}:br-vlan,".format(physnet_name)
571 output_line += "datacentre:br-ex"
572 elif 'OpenDaylightProviderMappings' in line and tenant_vlan_enabled \
573 and ds_opts['sdn_controller'] == 'opendaylight':
574 if tenant_settings['overlay_id_range']:
575 physnets = tenant_settings['overlay_id_range'].split(',')
576 output_line = " OpenDaylightProviderMappings: "
577 for physnet in physnets:
578 physnet_name = physnet.split(':')[0]
579 if physnet_name != 'datacentre':
580 output_line += "{}:br-vlan,".format(physnet_name)
581 output_line += "datacentre:br-ex"
582 elif 'NeutronNetworkType' in line and tenant_vlan_enabled:
583 output_line = " NeutronNetworkType: vlan\n" \
584 " NeutronTunnelTypes: ''"
586 if ds_opts['sdn_controller'] == 'opendaylight' and \
587 'odl_vpp_routing_node' in ds_opts:
588 if 'opendaylight::vpp_routing_node' in line:
589 output_line = (" opendaylight::vpp_routing_node: {}.{}"
590 .format(ds_opts['odl_vpp_routing_node'],
592 elif not ds_opts['sdn_controller'] and ds_opts['dataplane'] == 'fdio':
593 if 'NeutronVPPAgentPhysnets' in line:
594 # VPP interface tap0 will be used for external network
596 output_line = (" NeutronVPPAgentPhysnets: "
597 "'datacentre:{},external:tap0'"
598 .format(tenant_nic['Controller']))
599 elif ds_opts['sdn_controller'] == 'opendaylight' and ds_opts.get(
601 if 'OS::TripleO::Services::NeutronDhcpAgent' in line:
603 elif 'NeutronDhcpAgentsPerNetwork' in line:
605 num_dhcp_agents = num_control
607 num_dhcp_agents = num_compute
608 output_line = (" NeutronDhcpAgentsPerNetwork: {}"
609 .format(num_dhcp_agents))
610 elif 'ComputeServices' in line:
611 output_line = (" ComputeServices:\n"
612 " - OS::TripleO::Services::NeutronDhcpAgent")
615 for role in 'NovaCompute', 'Controller':
616 if role == 'NovaCompute':
617 perf_opts = perf_vpp_comp
619 perf_opts = perf_vpp_ctrl
620 cfg = "{}ExtraConfig".format(role)
621 if cfg in line and perf_opts:
623 if 'main-core' in perf_opts:
624 perf_line += ("\n fdio::vpp_cpu_main_core: '{}'"
625 .format(perf_opts['main-core']))
626 if 'corelist-workers' in perf_opts:
628 "fdio::vpp_cpu_corelist_workers: '{}'"
629 .format(perf_opts['corelist-workers']))
630 if ds_opts['sdn_controller'] == 'opendaylight' and \
631 ds_opts['dataplane'] == 'fdio':
632 if role == 'NovaCompute':
634 "tripleo::profile::base::neutron::"
635 "agents::honeycomb::"
636 "interface_role_mapping:"
637 " ['{}:tenant-interface',"
638 "'{}:public-interface']"
639 .format(tenant_nic[role],
643 "tripleo::profile::base::neutron::"
644 "agents::honeycomb::"
645 "interface_role_mapping:"
646 " ['{}:tenant-interface']"
647 .format(tenant_nic[role]))
649 output_line = (" {}:{}".format(cfg, perf_line))
651 if ds_opts['dataplane'] == 'ovs_dpdk' and perf_ovs_comp:
652 for k, v in OVS_PERF_MAP.items():
653 if k in line and v in perf_ovs_comp:
654 output_line = " {}: '{}'".format(k, perf_ovs_comp[v])
657 # (FIXME) use compute's kernel settings for all nodes for now.
659 if 'NovaSchedulerDefaultFilters' in line:
661 " NovaSchedulerDefaultFilters: 'RamFilter," \
662 "ComputeFilter,AvailabilityZoneFilter," \
663 "ComputeCapabilitiesFilter," \
664 "ImagePropertiesFilter,NUMATopologyFilter'"
665 elif 'ComputeKernelArgs' in line:
667 for k, v in perf_kern_comp.items():
668 kernel_args += "{}={} ".format(k, v)
670 output_line = " ComputeKernelArgs: '{}'".\
675 # Merge compute services into control services if only a single
678 logging.info("All in one deployment. Checking if service merging "
679 "required into control services")
680 with open(tmp_opnfv_env, 'r') as fh:
681 data = yaml.safe_load(fh)
682 param_data = data['parameter_defaults']
683 # Check to see if any parameters are set for Compute
684 for param in param_data.keys():
685 if param != 'ComputeServices' and param.startswith('Compute'):
686 logging.warning("Compute parameter set, but will not be used "
687 "in deployment: {}. Please use Controller "
688 "based parameters when using All-in-one "
689 "deployments".format(param))
690 if ('ControllerServices' in param_data and 'ComputeServices' in
692 logging.info("Services detected in environment file. Merging...")
693 ctrl_services = param_data['ControllerServices']
694 cmp_services = param_data['ComputeServices']
695 param_data['ControllerServices'] = list(set().union(
696 ctrl_services, cmp_services))
697 for dup_service in DUPLICATE_COMPUTE_SERVICES:
698 if dup_service in param_data['ControllerServices']:
699 param_data['ControllerServices'].remove(dup_service)
700 param_data.pop('ComputeServices')
701 logging.debug("Merged controller services: {}".format(
702 pprint.pformat(param_data['ControllerServices'])
704 with open(tmp_opnfv_env, 'w') as fh:
705 yaml.safe_dump(data, fh, default_flow_style=False)
707 logging.info("No services detected in env file, not merging "
710 logging.info("opnfv-environment file written to {}".format(tmp_opnfv_env))
711 with open(tmp_opnfv_env, 'r') as fh:
712 logging.debug("opnfv-environment content is : {}".format(
713 pprint.pformat(yaml.safe_load(fh.read()))
717 def generate_ceph_key():
719 header = struct.pack('<hiih', 1, int(time.time()), 0, len(key))
720 return base64.b64encode(header + key)
723 def prep_storage_env(ds, ns, virtual, tmp_dir):
725 Creates storage environment file for deployment. Source file is copied by
726 undercloud playbook to host.
733 ds_opts = ds['deploy_options']
734 storage_file = os.path.join(tmp_dir, 'storage-environment.yaml')
735 if not os.path.isfile(storage_file):
736 logging.error("storage-environment file is not in tmp directory: {}. "
737 "Check if file was copied from "
738 "undercloud".format(tmp_dir))
739 raise ApexDeployException("storage-environment file not copied from "
741 for line in fileinput.input(storage_file, inplace=True):
742 line = line.strip('\n')
743 if 'CephClusterFSID' in line:
744 print(" CephClusterFSID: {}".format(str(uuid.uuid4())))
745 elif 'CephMonKey' in line:
746 print(" CephMonKey: {}".format(generate_ceph_key().decode(
748 elif 'CephAdminKey' in line:
749 print(" CephAdminKey: {}".format(generate_ceph_key().decode(
751 elif 'CephClientKey' in line:
752 print(" CephClientKey: {}".format(generate_ceph_key().decode(
757 if ds_opts['containers']:
760 # max pgs allowed are calculated as num_mons * 200. Therefore we
761 # set number of pgs and pools so that the total will be less:
762 # num_pgs * num_pools * num_osds
763 ceph_params['CephPoolDefaultSize'] = 2
764 ceph_params['CephPoolDefaultPgNum'] = 32
766 ceph_params['CephAnsibleExtraConfig'] = {
767 'centos_package_dependencies': [],
768 'ceph_osd_docker_memory_limit': '1g',
769 'ceph_mds_docker_memory_limit': '1g',
771 ceph_device = ds_opts['ceph_device']
772 ceph_params['CephAnsibleDisksConfig'] = {
773 'devices': [ceph_device],
775 'osd_scenario': 'collocated'
777 utils.edit_tht_env(storage_file, 'parameter_defaults', ceph_params)
778 # TODO(trozet): remove following block as we only support containers now
779 elif 'ceph_device' in ds_opts and ds_opts['ceph_device']:
780 with open(storage_file, 'a') as fh:
781 fh.write(' ExtraConfig:\n')
782 fh.write(" ceph::profile::params::osds:{{{}:{{}}}}\n".format(
783 ds_opts['ceph_device']
787 def prep_sriov_env(ds, tmp_dir):
789 Creates SRIOV environment file for deployment. Source file is copied by
790 undercloud playbook to host.
795 ds_opts = ds['deploy_options']
796 sriov_iface = ds_opts['sriov']
797 sriov_file = os.path.join(tmp_dir, 'neutron-opendaylight-sriov.yaml')
798 if not os.path.isfile(sriov_file):
799 logging.error("sriov-environment file is not in tmp directory: {}. "
800 "Check if file was copied from "
801 "undercloud".format(tmp_dir))
802 raise ApexDeployException("sriov-environment file not copied from "
804 # TODO(rnoriega): Instead of line editing, refactor this code to load
805 # yaml file into a dict, edit it and write the file back.
806 for line in fileinput.input(sriov_file, inplace=True):
807 line = line.strip('\n')
808 if 'NovaSchedulerDefaultFilters' in line:
809 print(" {}".format(line[3:]))
810 elif 'NovaSchedulerAvailableFilters' in line:
811 print(" {}".format(line[3:]))
812 elif 'NeutronPhysicalDevMappings' in line:
813 print(" NeutronPhysicalDevMappings: \"nfv_sriov:{}\""
814 .format(sriov_iface))
815 elif 'NeutronSriovNumVFs' in line:
816 print(" NeutronSriovNumVFs: \"{}:8\"".format(sriov_iface))
817 elif 'NovaPCIPassthrough' in line:
818 print(" NovaPCIPassthrough:")
819 elif 'devname' in line:
820 print(" - devname: \"{}\"".format(sriov_iface))
821 elif 'physical_network' in line:
822 print(" physical_network: \"nfv_sriov\"")
827 def external_network_cmds(ns, ds):
829 Generates external network openstack commands
830 :param ns: network settings
831 :param ds: deploy settings
832 :return: list of commands to configure external network
834 ds_opts = ds['deploy_options']
835 external_physnet = 'datacentre'
836 if ds_opts['dataplane'] == 'fdio' and \
837 ds_opts['sdn_controller'] != 'opendaylight':
838 external_physnet = 'external'
839 if 'external' in ns.enabled_network_list:
840 net_config = ns['networks']['external'][0]
842 pool_start, pool_end = net_config['floating_ip_range']
844 net_config = ns['networks']['admin']
846 pool_start, pool_end = ns['apex']['networks']['admin'][
847 'introspection_range']
848 nic_config = net_config['nic_mapping']
849 gateway = net_config['gateway']
851 # create network command
852 if nic_config['compute']['vlan'] == 'native':
855 ext_type = "vlan --provider-segment {}".format(nic_config[
857 cmds.append("openstack network create external --project service "
858 "--external --provider-network-type {} "
859 "--provider-physical-network {}"
860 .format(ext_type, external_physnet))
861 # create subnet command
862 cidr = net_config['cidr']
863 subnet_cmd = "openstack subnet create external-subnet --project " \
864 "service --network external --no-dhcp --gateway {} " \
865 "--allocation-pool start={},end={} --subnet-range " \
866 "{}".format(gateway, pool_start, pool_end, str(cidr))
867 if external and cidr.version == 6:
868 subnet_cmd += ' --ip-version 6'
869 cmds.append(subnet_cmd)
870 logging.debug("Neutron external network commands determined "
871 "as: {}".format(cmds))
875 def create_congress_cmds(overcloud_file):
876 drivers = ['nova', 'neutronv2', 'cinder', 'glancev2', 'keystone', 'doctor']
877 overcloudrc = parsers.parse_overcloudrc(overcloud_file)
878 logging.info("Creating congress commands")
881 "username={}".format(overcloudrc['OS_USERNAME']),
882 "tenant_name={}".format(overcloudrc['OS_PROJECT_NAME']),
883 "password={}".format(overcloudrc['OS_PASSWORD']),
884 "auth_url={}".format(overcloudrc['OS_AUTH_URL'])
887 logging.error("Unable to find all keys required for congress in "
888 "overcloudrc: OS_USERNAME, OS_PROJECT_NAME, "
889 "OS_PASSWORD, OS_AUTH_URL. Please check overcloudrc "
890 "file: {}".format(overcloud_file))
893 ds_cfg = '--config ' + ' --config '.join(ds_cfg)
895 for driver in drivers:
896 if driver == 'doctor':
897 cmd = "{} \"{}\"".format(driver, driver)
899 cmd = "{} \"{}\" {}".format(driver, driver, ds_cfg)
901 cmd += ' --config api_version="2.34"'
902 logging.debug("Congress command created: {}".format(cmd))