1 ##############################################################################
2 # Copyright (c) 2017 Tim Rozet (trozet@redhat.com) and others.
4 # All rights reserved. This program and the accompanying materials
5 # are made available under the terms of the Apache License, Version 2.0
6 # which accompanies this distribution, and is available at
7 # http://www.apache.org/licenses/LICENSE-2.0
8 ##############################################################################
21 import apex.builders.overcloud_builder as oc_builder
22 import apex.builders.common_builder as c_builder
24 from apex.common import constants as con
25 from apex.common.exceptions import ApexDeployException
26 from apex.common import parsers
27 from apex.common import utils
28 from apex.virtual import utils as virt_utils
29 from cryptography.hazmat.primitives import serialization as \
31 from cryptography.hazmat.primitives.asymmetric import rsa
32 from cryptography.hazmat.backends import default_backend as \
33 crypto_default_backend
38 'sfc': 'neutron-sfc-opendaylight.yaml',
39 'vpn': 'neutron-bgpvpn-opendaylight.yaml',
40 'gluon': 'gluon.yaml',
42 'odl_vpp_netvirt': 'neutron-opendaylight-netvirt-vpp.yaml',
43 'dvr': 'neutron-opendaylight-fdio-dvr.yaml',
44 'default': 'neutron-opendaylight-honeycomb.yaml'
46 'l2gw': 'neutron-l2gw-opendaylight.yaml',
47 'sriov': 'neutron-opendaylight-sriov.yaml',
48 'default': 'neutron-opendaylight.yaml',
51 'sfc': 'neutron-onos-sfc.yaml',
52 'default': 'neutron-onos.yaml'
54 'ovn': 'neutron-ml2-ovn.yaml',
56 'vpp': 'neutron-ml2-vpp.yaml',
57 'dataplane': ('ovs_dpdk', 'neutron-ovs-dpdk.yaml')
62 'tacker': 'enable_tacker.yaml',
63 'congress': 'enable_congress.yaml',
64 'barometer': 'enable_barometer.yaml',
65 'rt_kvm': 'enable_rt_kvm.yaml'
69 'HostCpusList': 'dpdk_cores',
70 'NeutronDpdkCoreList': 'pmd_cores',
71 'NeutronDpdkSocketMemory': 'socket_memory',
72 'NeutronDpdkMemoryChannels': 'memory_channels'
75 ODL_NETVIRT_VPP_RPM = "/root/opendaylight-7.0.0-0.1.20170531snap665.el7" \
78 LOOP_DEVICE_SIZE = "10G"
80 LOSETUP_SERVICE = """[Unit]
81 Description=Setup loop devices
86 ExecStart=/sbin/losetup /dev/loop3 /srv/data.img
87 ExecStop=/sbin/losetup -d /dev/loop3
92 WantedBy=multi-user.target
95 DUPLICATE_COMPUTE_SERVICES = [
96 'OS::TripleO::Services::ComputeNeutronCorePlugin',
97 'OS::TripleO::Services::ComputeNeutronMetadataAgent',
98 'OS::TripleO::Services::ComputeNeutronOvsAgent',
99 'OS::TripleO::Services::ComputeNeutronL3Agent'
103 def build_sdn_env_list(ds, sdn_map, env_list=None):
105 Builds a list of SDN environment files to be used in the deploy cmd.
107 This function recursively searches an sdn_map. First the sdn controller is
108 matched and then the function looks for enabled features for that
109 controller to determine which environment files should be used. By
110 default the feature will be checked if set to true in deploy settings to be
111 added to the list. If a feature does not have a boolean value, then the
112 key and value pair to compare with are checked as a tuple (k,v).
114 :param ds: deploy settings
115 :param sdn_map: SDN map to recursively search
116 :param env_list: recursive var to hold previously found env_list
117 :return: A list of env files
121 for k, v in sdn_map.items():
122 if ds['sdn_controller'] == k or (k in ds and ds[k]):
123 if isinstance(v, dict):
124 # Append default SDN env file first
125 # The assumption is that feature-enabled SDN env files
126 # override and do not conflict with previously set default
128 if ds['sdn_controller'] == k and 'default' in v:
129 env_list.append(os.path.join(con.THT_ENV_DIR,
131 env_list.extend(build_sdn_env_list(ds, v))
132 # check if the value is not a boolean
133 elif isinstance(v, tuple):
135 env_list.append(os.path.join(con.THT_ENV_DIR, v[1]))
137 env_list.append(os.path.join(con.THT_ENV_DIR, v))
138 if len(env_list) == 0:
140 env_list.append(os.path.join(
141 con.THT_ENV_DIR, sdn_map['default']))
143 logging.warning("Unable to find default file for SDN")
148 def get_docker_sdn_files(ds_opts):
150 Returns docker env file for detected SDN
151 :param ds_opts: deploy options
152 :return: list of docker THT env files for an SDN
154 docker_services = con.VALID_DOCKER_SERVICES
155 tht_dir = con.THT_DOCKER_ENV_DIR
156 sdn_env_list = build_sdn_env_list(ds_opts, SDN_FILE_MAP)
157 for i, sdn_file in enumerate(sdn_env_list):
158 sdn_base = os.path.basename(sdn_file)
159 if sdn_base in docker_services:
160 if docker_services[sdn_base] is not None:
162 os.path.join(tht_dir, docker_services[sdn_base])
164 sdn_env_list[i] = os.path.join(tht_dir, sdn_base)
168 def create_deploy_cmd(ds, ns, inv, tmp_dir,
169 virtual, env_file='opnfv-environment.yaml',
172 logging.info("Creating deployment command")
173 deploy_options = ['network-environment.yaml']
175 ds_opts = ds['deploy_options']
177 if ds_opts['containers']:
178 deploy_options.append(os.path.join(con.THT_ENV_DIR,
181 if ds['global_params']['ha_enabled']:
182 if ds_opts['containers']:
183 deploy_options.append(os.path.join(con.THT_ENV_DIR,
186 deploy_options.append(os.path.join(con.THT_ENV_DIR,
187 'puppet-pacemaker.yaml'))
190 deploy_options.append(env_file)
192 if ds_opts['containers']:
193 deploy_options.append('docker-images.yaml')
194 sdn_docker_files = get_docker_sdn_files(ds_opts)
195 for sdn_docker_file in sdn_docker_files:
196 deploy_options.append(sdn_docker_file)
198 deploy_options += build_sdn_env_list(ds_opts, SDN_FILE_MAP)
200 for k, v in OTHER_FILE_MAP.items():
201 if k in ds_opts and ds_opts[k]:
202 if ds_opts['containers']:
203 deploy_options.append(os.path.join(con.THT_DOCKER_ENV_DIR,
204 "{}.yaml".format(k)))
206 deploy_options.append(os.path.join(con.THT_ENV_DIR, v))
208 if ds_opts['ceph'] and 'csit' not in env_file:
209 prep_storage_env(ds, ns, virtual, tmp_dir)
210 deploy_options.append(os.path.join(con.THT_ENV_DIR,
211 'storage-environment.yaml'))
213 prep_sriov_env(ds, tmp_dir)
215 # Check for 'k8s' here intentionally, as we may support other values
216 # such as openstack/openshift for 'vim' option.
217 if ds_opts['vim'] == 'k8s':
218 deploy_options.append('kubernetes-environment.yaml')
221 deploy_options.append('virtual-environment.yaml')
223 deploy_options.append('baremetal-environment.yaml')
225 num_control, num_compute = inv.get_node_counts()
226 if num_control > 1 and not ds['global_params']['ha_enabled']:
228 if platform.machine() == 'aarch64':
229 # aarch64 deploys were not completing in the default 90 mins.
230 # Not sure if this is related to the hardware the OOO support
231 # was developed on or the virtualization support in CentOS
232 # Either way it will probably get better over time as the aarch
233 # support matures in CentOS and deploy time should be tested in
234 # the future so this multiplier can be removed.
235 con.DEPLOY_TIMEOUT *= 2
236 cmd = "openstack overcloud deploy --templates --timeout {} " \
237 .format(con.DEPLOY_TIMEOUT)
239 for option in deploy_options:
240 cmd += " -e {}".format(option)
241 cmd += " --ntp-server {}".format(ns['ntp'][0])
242 cmd += " --control-scale {}".format(num_control)
243 cmd += " --compute-scale {}".format(num_compute)
244 cmd += ' --control-flavor control --compute-flavor compute'
246 cmd += ' --networks-file network_data.yaml'
248 if virtual and (platform.machine() != 'aarch64'):
249 with open('/sys/module/kvm_intel/parameters/nested') as f:
250 nested_kvm = f.read().strip()
251 if nested_kvm != 'Y':
252 libvirt_type = 'qemu'
253 elif virtual and (platform.machine() == 'aarch64'):
254 libvirt_type = 'qemu'
255 cmd += ' --libvirt-type {}'.format(libvirt_type)
256 if platform.machine() == 'aarch64':
257 cmd += ' --override-ansible-cfg /home/stack/ansible.cfg '
258 logging.info("Deploy command set: {}".format(cmd))
260 with open(os.path.join(tmp_dir, 'deploy_command'), 'w') as fh:
265 def prep_image(ds, ns, img, tmp_dir, root_pw=None, docker_tag=None,
268 Locates sdn image and preps for deployment.
269 :param ds: deploy settings
270 :param ns: network settings
271 :param img: sdn image
272 :param tmp_dir: dir to store modified sdn image
273 :param root_pw: password to configure for overcloud image
274 :param docker_tag: Docker image tag for RDO version (default None)
275 :param patches: List of patches to apply to overcloud image
278 # TODO(trozet): Come up with a better way to organize this logic in this
280 logging.info("Preparing image: {} for deployment".format(img))
281 if not os.path.isfile(img):
282 logging.error("Missing SDN image {}".format(img))
283 raise ApexDeployException("Missing SDN image file: {}".format(img))
285 ds_opts = ds['deploy_options']
287 sdn = ds_opts['sdn_controller']
288 patched_containers = set()
289 # we need this due to rhbz #1436021
290 # fixed in systemd-219-37.el7
292 logging.info("Neutron openvswitch-agent disabled")
295 "rm -f /etc/systemd/system/multi-user.target.wants/"
296 "neutron-openvswitch-agent.service"},
299 "rm -f /usr/lib/systemd/system/neutron-openvswitch-agent"
303 if ns.get('http_proxy', ''):
306 "echo 'http_proxy={}' >> /etc/environment".format(
309 if ns.get('https_proxy', ''):
312 "echo 'https_proxy={}' >> /etc/environment".format(
315 tmp_oc_image = os.path.join(tmp_dir, 'overcloud-full.qcow2')
316 shutil.copyfile(img, tmp_oc_image)
317 logging.debug("Temporary overcloud image stored as: {}".format(
321 oc_builder.inject_quagga(tmp_oc_image, tmp_dir)
322 virt_cmds.append({con.VIRT_RUN_CMD: "chmod +x /etc/rc.d/rc.local"})
325 "echo 'sudo /opt/quagga/etc/init.d/zrpcd start' > "
326 "/opt/quagga/etc/init.d/zrpcd_start.sh"})
328 con.VIRT_RUN_CMD: "chmod +x /opt/quagga/etc/init.d/"
331 con.VIRT_RUN_CMD: "sed -i '$a /opt/quagga/etc/"
332 "init.d/zrpcd_start.sh' /etc/rc.local "})
334 con.VIRT_RUN_CMD: "sed -i '$a /opt/quagga/etc/"
335 "init.d/zrpcd_start.sh' /etc/rc.d/rc.local"})
336 logging.info("ZRPCD process started")
338 dataplane = ds_opts['dataplane']
339 if dataplane == 'ovs_dpdk' or dataplane == 'fdio':
340 logging.info("Enabling kernel modules for dpdk")
341 # file to module mapping
343 os.path.join(tmp_dir, 'vfio_pci.modules'): 'vfio_pci',
344 os.path.join(tmp_dir, 'uio_pci_generic.modules'): 'uio_pci_generic'
346 for mod_file, mod in uio_types.items():
347 with open(mod_file, 'w') as fh:
348 fh.write('#!/bin/bash\n')
349 fh.write('exec /sbin/modprobe {}'.format(mod))
353 {con.VIRT_UPLOAD: "{}:/etc/sysconfig/modules/".format(
355 {con.VIRT_RUN_CMD: "chmod 0755 /etc/sysconfig/modules/"
356 "{}".format(os.path.basename(mod_file))}
359 pw_op = "password:{}".format(root_pw)
360 virt_cmds.append({con.VIRT_PW: pw_op})
362 if dataplane == 'ovs':
364 oc_builder.inject_ovs_nsh(tmp_oc_image, tmp_dir)
365 elif sdn == 'opendaylight':
366 # FIXME(trozet) remove this after RDO is updated with fix for
367 # https://bugzilla.redhat.com/show_bug.cgi?id=1544892
368 ovs_file = os.path.basename(con.CUSTOM_OVS)
369 ovs_url = con.CUSTOM_OVS.replace(ovs_file, '')
370 utils.fetch_upstream_and_unpack(dest=tmp_dir, url=ovs_url,
373 {con.VIRT_UPLOAD: "{}:/root/".format(os.path.join(tmp_dir,
375 {con.VIRT_RUN_CMD: "yum downgrade -y /root/{}".format(
379 if dataplane == 'fdio':
380 # Patch neutron with using OVS external interface for router
381 # and add generic linux NS interface driver
383 {con.VIRT_RUN_CMD: "cd /usr/lib/python2.7/site-packages && patch "
384 "-p1 < neutron-patch-NSDriver.patch"})
387 {con.VIRT_RUN_CMD: "yum remove -y vpp-lib"},
388 {con.VIRT_RUN_CMD: "yum install -y "
389 "/root/nosdn_vpp_rpms/*.rpm"}
392 undercloud_admin_ip = ns['networks'][con.ADMIN_NETWORK][
393 'installer_vm']['ip']
394 if sdn == 'opendaylight':
395 oc_builder.inject_opendaylight(
396 odl_version=ds_opts['odl_version'],
399 uc_ip=undercloud_admin_ip,
400 os_version=ds_opts['os_version'],
401 docker_tag=docker_tag,
404 patched_containers = patched_containers.union({'opendaylight'})
407 if ds_opts['os_version'] == 'master':
408 branch = ds_opts['os_version']
410 branch = "stable/{}".format(ds_opts['os_version'])
411 logging.info('Adding patches to overcloud')
412 patched_containers = patched_containers.union(
413 c_builder.add_upstream_patches(patches,
414 tmp_oc_image, tmp_dir,
416 uc_ip=undercloud_admin_ip,
417 docker_tag=docker_tag))
418 # if containers with ceph, and no ceph device we need to use a
419 # persistent loop device for Ceph OSDs
420 if docker_tag and ds_opts['ceph_device'] == '/dev/loop3':
421 tmp_losetup = os.path.join(tmp_dir, 'losetup.service')
422 with open(tmp_losetup, 'w') as fh:
423 fh.write(LOSETUP_SERVICE)
425 {con.VIRT_UPLOAD: "{}:/usr/lib/systemd/system/".format(tmp_losetup)
427 {con.VIRT_RUN_CMD: 'truncate /srv/data.img --size {}'
428 .format(LOOP_DEVICE_SIZE)},
429 {con.VIRT_RUN_CMD: 'systemctl daemon-reload'},
430 {con.VIRT_RUN_CMD: 'systemctl enable losetup.service'},
432 # TODO(trozet) remove this after LP#173474 is fixed
433 dhcp_unit = '/usr/lib/systemd/system/dhcp-interface@.service'
435 {con.VIRT_RUN_CMD: "crudini --del {} Unit "
436 "ConditionPathExists".format(dhcp_unit)})
437 virt_utils.virt_customize(virt_cmds, tmp_oc_image)
438 logging.info("Overcloud image customization complete")
439 return patched_containers
444 Creates public and private ssh keys with 1024 bit RSA encryption
445 :return: private, public key
447 key = rsa.generate_private_key(
448 backend=crypto_default_backend(),
449 public_exponent=65537,
453 private_key = key.private_bytes(
454 crypto_serialization.Encoding.PEM,
455 crypto_serialization.PrivateFormat.PKCS8,
456 crypto_serialization.NoEncryption())
457 public_key = key.public_key().public_bytes(
458 crypto_serialization.Encoding.OpenSSH,
459 crypto_serialization.PublicFormat.OpenSSH
461 return private_key.decode('utf-8'), public_key.decode('utf-8')
464 def prep_env(ds, ns, inv, opnfv_env, net_env, tmp_dir):
466 Creates modified opnfv/network environments for deployment
467 :param ds: deploy settings
468 :param ns: network settings
469 :param inv: node inventory
470 :param opnfv_env: file path for opnfv-environment file
471 :param net_env: file path for network-environment file
472 :param tmp_dir: Apex tmp dir
476 logging.info("Preparing opnfv-environment and network-environment files")
477 ds_opts = ds['deploy_options']
478 tmp_opnfv_env = os.path.join(tmp_dir, os.path.basename(opnfv_env))
479 shutil.copyfile(opnfv_env, tmp_opnfv_env)
480 tenant_nic_map = ns['networks']['tenant']['nic_mapping']
482 tenant_nic['Controller'] = tenant_nic_map['controller']['members'][0]
483 tenant_nic['NovaCompute'] = tenant_nic_map['compute']['members'][0]
484 external_nic_map = ns['networks']['external'][0]['nic_mapping']
485 external_nic = dict()
486 external_nic['NovaCompute'] = external_nic_map['compute']['members'][0]
489 private_key, public_key = make_ssh_key()
491 num_control, num_compute = inv.get_node_counts()
492 if num_control > 1 and not ds['global_params']['ha_enabled']:
495 # Make easier/faster variables to index in the file editor
496 if 'performance' in ds_opts:
499 if 'vpp' in ds_opts['performance']['Compute']:
500 perf_vpp_comp = ds_opts['performance']['Compute']['vpp']
503 if 'vpp' in ds_opts['performance']['Controller']:
504 perf_vpp_ctrl = ds_opts['performance']['Controller']['vpp']
509 if 'ovs' in ds_opts['performance']['Compute']:
510 perf_ovs_comp = ds_opts['performance']['Compute']['ovs']
515 if 'kernel' in ds_opts['performance']['Compute']:
516 perf_kern_comp = ds_opts['performance']['Compute']['kernel']
518 perf_kern_comp = None
522 tenant_settings = ns['networks']['tenant']
523 tenant_vlan_enabled = 'tenant' in ns.enabled_network_list and \
524 ns['networks']['tenant'].get('segmentation_type') == 'vlan'
526 # Modify OPNFV environment
527 # TODO: Change to build a dict and outputting yaml rather than parsing
528 for line in fileinput.input(tmp_opnfv_env, inplace=True):
529 line = line.strip('\n')
531 if 'CloudDomain' in line:
532 output_line = " CloudDomain: {}".format(ns['domain_name'])
533 elif 'replace_private_key' in line:
534 output_line = " private_key: |\n"
536 for line in private_key.splitlines():
537 key_out += " {}\n".format(line)
538 output_line += key_out
539 elif 'replace_public_key' in line:
540 output_line = " public_key: '{}'".format(public_key)
541 elif ((perf and perf_kern_comp) or ds_opts.get('rt_kvm')) and \
542 'resource_registry' in line:
543 output_line = "resource_registry:\n" \
544 " OS::TripleO::NodeUserData: first-boot.yaml"
545 elif 'ComputeExtraConfigPre' in line and \
546 ds_opts['dataplane'] == 'ovs_dpdk':
547 output_line = ' OS::TripleO::ComputeExtraConfigPre: ' \
548 './ovs-dpdk-preconfig.yaml'
549 elif 'NeutronNetworkVLANRanges' in line:
551 if tenant_vlan_enabled:
552 if ns['networks']['tenant']['overlay_id_range']:
553 vlan_setting = ns['networks']['tenant']['overlay_id_range']
554 if 'datacentre' not in vlan_setting:
555 vlan_setting += ',datacentre:1:1000'
556 # SRIOV networks are VLAN based provider networks. In order to
557 # simplify the deployment, nfv_sriov will be the default physnet.
558 # VLANs are not needed in advance, and the user will have to create
559 # the network specifying the segmentation-id.
562 vlan_setting += ",nfv_sriov"
564 vlan_setting = "datacentre:1:1000,nfv_sriov"
566 output_line = " NeutronNetworkVLANRanges: " + vlan_setting
567 elif 'NeutronBridgeMappings' in line and tenant_vlan_enabled:
568 if tenant_settings['overlay_id_range']:
569 physnets = tenant_settings['overlay_id_range'].split(',')
570 output_line = " NeutronBridgeMappings: "
571 for physnet in physnets:
572 physnet_name = physnet.split(':')[0]
573 if physnet_name != 'datacentre':
574 output_line += "{}:br-vlan,".format(physnet_name)
575 output_line += "datacentre:br-ex"
576 elif 'OpenDaylightProviderMappings' in line and tenant_vlan_enabled \
577 and ds_opts['sdn_controller'] == 'opendaylight':
578 if tenant_settings['overlay_id_range']:
579 physnets = tenant_settings['overlay_id_range'].split(',')
580 output_line = " OpenDaylightProviderMappings: "
581 for physnet in physnets:
582 physnet_name = physnet.split(':')[0]
583 if physnet_name != 'datacentre':
584 output_line += "{}:br-vlan,".format(physnet_name)
585 output_line += "datacentre:br-ex"
586 elif 'NeutronNetworkType' in line and tenant_vlan_enabled:
587 output_line = " NeutronNetworkType: vlan\n" \
588 " NeutronTunnelTypes: ''"
590 if ds_opts['sdn_controller'] == 'opendaylight' and \
591 'odl_vpp_routing_node' in ds_opts:
592 if 'opendaylight::vpp_routing_node' in line:
593 output_line = (" opendaylight::vpp_routing_node: {}.{}"
594 .format(ds_opts['odl_vpp_routing_node'],
596 elif not ds_opts['sdn_controller'] and ds_opts['dataplane'] == 'fdio':
597 if 'NeutronVPPAgentPhysnets' in line:
598 # VPP interface tap0 will be used for external network
600 output_line = (" NeutronVPPAgentPhysnets: "
601 "'datacentre:{},external:tap0'"
602 .format(tenant_nic['Controller']))
603 elif ds_opts['sdn_controller'] == 'opendaylight' and ds_opts.get(
605 if 'OS::TripleO::Services::NeutronDhcpAgent' in line:
607 elif 'NeutronDhcpAgentsPerNetwork' in line:
609 num_dhcp_agents = num_control
611 num_dhcp_agents = num_compute
612 output_line = (" NeutronDhcpAgentsPerNetwork: {}"
613 .format(num_dhcp_agents))
614 elif 'ComputeServices' in line:
615 output_line = (" ComputeServices:\n"
616 " - OS::TripleO::Services::NeutronDhcpAgent")
619 for role in 'NovaCompute', 'Controller':
620 if role == 'NovaCompute':
621 perf_opts = perf_vpp_comp
623 perf_opts = perf_vpp_ctrl
624 cfg = "{}ExtraConfig".format(role)
625 if cfg in line and perf_opts:
627 if 'main-core' in perf_opts:
628 perf_line += ("\n fdio::vpp_cpu_main_core: '{}'"
629 .format(perf_opts['main-core']))
630 if 'corelist-workers' in perf_opts:
632 "fdio::vpp_cpu_corelist_workers: '{}'"
633 .format(perf_opts['corelist-workers']))
634 if ds_opts['sdn_controller'] == 'opendaylight' and \
635 ds_opts['dataplane'] == 'fdio':
636 if role == 'NovaCompute':
638 "tripleo::profile::base::neutron::"
639 "agents::honeycomb::"
640 "interface_role_mapping:"
641 " ['{}:tenant-interface',"
642 "'{}:public-interface']"
643 .format(tenant_nic[role],
647 "tripleo::profile::base::neutron::"
648 "agents::honeycomb::"
649 "interface_role_mapping:"
650 " ['{}:tenant-interface']"
651 .format(tenant_nic[role]))
653 output_line = (" {}:{}".format(cfg, perf_line))
655 if ds_opts['dataplane'] == 'ovs_dpdk' and perf_ovs_comp:
656 for k, v in OVS_PERF_MAP.items():
657 if k in line and v in perf_ovs_comp:
658 output_line = " {}: '{}'".format(k, perf_ovs_comp[v])
661 # (FIXME) use compute's kernel settings for all nodes for now.
663 if 'NovaSchedulerDefaultFilters' in line:
665 " NovaSchedulerDefaultFilters: 'RamFilter," \
666 "ComputeFilter,AvailabilityZoneFilter," \
667 "ComputeCapabilitiesFilter," \
668 "ImagePropertiesFilter,NUMATopologyFilter'"
669 elif 'ComputeKernelArgs' in line:
671 for k, v in perf_kern_comp.items():
672 kernel_args += "{}={} ".format(k, v)
674 output_line = " ComputeKernelArgs: '{}'".\
679 # Merge compute services into control services if only a single
682 logging.info("All in one deployment. Checking if service merging "
683 "required into control services")
684 with open(tmp_opnfv_env, 'r') as fh:
685 data = yaml.safe_load(fh)
686 param_data = data['parameter_defaults']
687 # Check to see if any parameters are set for Compute
688 for param in param_data.keys():
689 if param != 'ComputeServices' and param.startswith('Compute'):
690 logging.warning("Compute parameter set, but will not be used "
691 "in deployment: {}. Please use Controller "
692 "based parameters when using All-in-one "
693 "deployments".format(param))
694 if ('ControllerServices' in param_data and 'ComputeServices' in
696 logging.info("Services detected in environment file. Merging...")
697 ctrl_services = param_data['ControllerServices']
698 cmp_services = param_data['ComputeServices']
699 param_data['ControllerServices'] = list(set().union(
700 ctrl_services, cmp_services))
701 for dup_service in DUPLICATE_COMPUTE_SERVICES:
702 if dup_service in param_data['ControllerServices']:
703 param_data['ControllerServices'].remove(dup_service)
704 param_data.pop('ComputeServices')
705 logging.debug("Merged controller services: {}".format(
706 pprint.pformat(param_data['ControllerServices'])
708 with open(tmp_opnfv_env, 'w') as fh:
709 yaml.safe_dump(data, fh, default_flow_style=False)
711 logging.info("No services detected in env file, not merging "
714 logging.info("opnfv-environment file written to {}".format(tmp_opnfv_env))
715 with open(tmp_opnfv_env, 'r') as fh:
716 logging.debug("opnfv-environment content is : {}".format(
717 pprint.pformat(yaml.safe_load(fh.read()))
721 def generate_ceph_key():
723 header = struct.pack('<hiih', 1, int(time.time()), 0, len(key))
724 return base64.b64encode(header + key)
727 def prep_storage_env(ds, ns, virtual, tmp_dir):
729 Creates storage environment file for deployment. Source file is copied by
730 undercloud playbook to host.
737 ds_opts = ds['deploy_options']
738 storage_file = os.path.join(tmp_dir, 'storage-environment.yaml')
739 if not os.path.isfile(storage_file):
740 logging.error("storage-environment file is not in tmp directory: {}. "
741 "Check if file was copied from "
742 "undercloud".format(tmp_dir))
743 raise ApexDeployException("storage-environment file not copied from "
745 for line in fileinput.input(storage_file, inplace=True):
746 line = line.strip('\n')
747 if 'CephClusterFSID' in line:
748 print(" CephClusterFSID: {}".format(str(uuid.uuid4())))
749 elif 'CephMonKey' in line:
750 print(" CephMonKey: {}".format(generate_ceph_key().decode(
752 elif 'CephAdminKey' in line:
753 print(" CephAdminKey: {}".format(generate_ceph_key().decode(
755 elif 'CephClientKey' in line:
756 print(" CephClientKey: {}".format(generate_ceph_key().decode(
761 if ds_opts['containers']:
764 # max pgs allowed are calculated as num_mons * 200. Therefore we
765 # set number of pgs and pools so that the total will be less:
766 # num_pgs * num_pools * num_osds
767 ceph_params['CephPoolDefaultSize'] = 2
768 ceph_params['CephPoolDefaultPgNum'] = 32
770 ceph_params['CephAnsibleExtraConfig'] = {
771 'centos_package_dependencies': [],
772 'ceph_osd_docker_memory_limit': '1g',
773 'ceph_mds_docker_memory_limit': '1g',
775 ceph_device = ds_opts['ceph_device']
776 ceph_params['CephAnsibleDisksConfig'] = {
777 'devices': [ceph_device],
779 'osd_scenario': 'collocated'
781 utils.edit_tht_env(storage_file, 'parameter_defaults', ceph_params)
782 # TODO(trozet): remove following block as we only support containers now
783 elif 'ceph_device' in ds_opts and ds_opts['ceph_device']:
784 with open(storage_file, 'a') as fh:
785 fh.write(' ExtraConfig:\n')
786 fh.write(" ceph::profile::params::osds:{{{}:{{}}}}\n".format(
787 ds_opts['ceph_device']
791 def prep_sriov_env(ds, tmp_dir):
793 Creates SRIOV environment file for deployment. Source file is copied by
794 undercloud playbook to host.
799 ds_opts = ds['deploy_options']
800 sriov_iface = ds_opts['sriov']
801 sriov_file = os.path.join(tmp_dir, 'neutron-opendaylight-sriov.yaml')
802 if not os.path.isfile(sriov_file):
803 logging.error("sriov-environment file is not in tmp directory: {}. "
804 "Check if file was copied from "
805 "undercloud".format(tmp_dir))
806 raise ApexDeployException("sriov-environment file not copied from "
808 # TODO(rnoriega): Instead of line editing, refactor this code to load
809 # yaml file into a dict, edit it and write the file back.
810 for line in fileinput.input(sriov_file, inplace=True):
811 line = line.strip('\n')
812 if 'NovaSchedulerDefaultFilters' in line:
813 print(" {}".format(line[3:]))
814 elif 'NovaSchedulerAvailableFilters' in line:
815 print(" {}".format(line[3:]))
816 elif 'NeutronPhysicalDevMappings' in line:
817 print(" NeutronPhysicalDevMappings: \"nfv_sriov:{}\""
818 .format(sriov_iface))
819 elif 'NeutronSriovNumVFs' in line:
820 print(" NeutronSriovNumVFs: \"{}:8\"".format(sriov_iface))
821 elif 'NovaPCIPassthrough' in line:
822 print(" NovaPCIPassthrough:")
823 elif 'devname' in line:
824 print(" - devname: \"{}\"".format(sriov_iface))
825 elif 'physical_network' in line:
826 print(" physical_network: \"nfv_sriov\"")
831 def external_network_cmds(ns, ds):
833 Generates external network openstack commands
834 :param ns: network settings
835 :param ds: deploy settings
836 :return: list of commands to configure external network
838 ds_opts = ds['deploy_options']
839 external_physnet = 'datacentre'
840 if ds_opts['dataplane'] == 'fdio' and \
841 ds_opts['sdn_controller'] != 'opendaylight':
842 external_physnet = 'external'
843 if 'external' in ns.enabled_network_list:
844 net_config = ns['networks']['external'][0]
846 pool_start, pool_end = net_config['floating_ip_range']
848 net_config = ns['networks']['admin']
850 pool_start, pool_end = ns['apex']['networks']['admin'][
851 'introspection_range']
852 nic_config = net_config['nic_mapping']
853 gateway = net_config['gateway']
855 # create network command
856 if nic_config['compute']['vlan'] == 'native':
859 ext_type = "vlan --provider-segment {}".format(nic_config[
861 cmds.append("openstack network create external --project service "
862 "--external --provider-network-type {} "
863 "--provider-physical-network {}"
864 .format(ext_type, external_physnet))
865 # create subnet command
866 cidr = net_config['cidr']
867 subnet_cmd = "openstack subnet create external-subnet --project " \
868 "service --network external --no-dhcp --gateway {} " \
869 "--allocation-pool start={},end={} --subnet-range " \
870 "{}".format(gateway, pool_start, pool_end, str(cidr))
871 if external and cidr.version == 6:
872 subnet_cmd += ' --ip-version 6'
873 cmds.append(subnet_cmd)
874 logging.debug("Neutron external network commands determined "
875 "as: {}".format(cmds))
879 def create_congress_cmds(overcloud_file):
880 drivers = ['nova', 'neutronv2', 'cinder', 'glancev2', 'keystone', 'doctor']
881 overcloudrc = parsers.parse_overcloudrc(overcloud_file)
882 logging.info("Creating congress commands")
885 "username={}".format(overcloudrc['OS_USERNAME']),
886 "tenant_name={}".format(overcloudrc['OS_PROJECT_NAME']),
887 "password={}".format(overcloudrc['OS_PASSWORD']),
888 "auth_url={}".format(overcloudrc['OS_AUTH_URL'])
891 logging.error("Unable to find all keys required for congress in "
892 "overcloudrc: OS_USERNAME, OS_PROJECT_NAME, "
893 "OS_PASSWORD, OS_AUTH_URL. Please check overcloudrc "
894 "file: {}".format(overcloud_file))
897 ds_cfg = '--config ' + ' --config '.join(ds_cfg)
899 for driver in drivers:
900 if driver == 'doctor':
901 cmd = "{} \"{}\"".format(driver, driver)
903 cmd = "{} \"{}\" {}".format(driver, driver, ds_cfg)
905 cmd += ' --config api_version="2.34"'
906 logging.debug("Congress command created: {}".format(cmd))