1 ##############################################################################
2 # Copyright (c) 2017 Tim Rozet (trozet@redhat.com) and others.
4 # All rights reserved. This program and the accompanying materials
5 # are made available under the terms of the Apache License, Version 2.0
6 # which accompanies this distribution, and is available at
7 # http://www.apache.org/licenses/LICENSE-2.0
8 ##############################################################################
19 import apex.builders.overcloud_builder as oc_builder
20 import apex.builders.common_builder as c_builder
22 from apex.common import constants as con
23 from apex.common.exceptions import ApexDeployException
24 from apex.common import parsers
25 from apex.common import utils
26 from apex.virtual import utils as virt_utils
27 from cryptography.hazmat.primitives import serialization as \
29 from cryptography.hazmat.primitives.asymmetric import rsa
30 from cryptography.hazmat.backends import default_backend as \
31 crypto_default_backend
36 'sfc': 'neutron-sfc-opendaylight.yaml',
37 'vpn': 'neutron-bgpvpn-opendaylight.yaml',
38 'gluon': 'gluon.yaml',
40 'odl_vpp_netvirt': 'neutron-opendaylight-netvirt-vpp.yaml',
41 'dvr': 'neutron-opendaylight-fdio-dvr.yaml',
42 'default': 'neutron-opendaylight-honeycomb.yaml'
44 'l2gw': 'neutron-l2gw-opendaylight.yaml',
45 'sriov': 'neutron-opendaylight-sriov.yaml',
46 'default': 'neutron-opendaylight.yaml',
49 'sfc': 'neutron-onos-sfc.yaml',
50 'default': 'neutron-onos.yaml'
52 'ovn': 'neutron-ml2-ovn.yaml',
54 'vpp': 'neutron-ml2-vpp.yaml',
55 'dataplane': ('ovs_dpdk', 'neutron-ovs-dpdk.yaml')
60 'tacker': 'enable_tacker.yaml',
61 'congress': 'enable_congress.yaml',
62 'barometer': 'enable_barometer.yaml',
63 'rt_kvm': 'enable_rt_kvm.yaml'
67 'HostCpusList': 'dpdk_cores',
68 'NeutronDpdkCoreList': 'pmd_cores',
69 'NeutronDpdkSocketMemory': 'socket_memory',
70 'NeutronDpdkMemoryChannels': 'memory_channels'
73 OVS_NSH_KMOD_RPM = "openvswitch-kmod-2.6.1-1.el7.centos.x86_64.rpm"
74 OVS_NSH_RPM = "openvswitch-2.6.1-1.el7.centos.x86_64.rpm"
75 ODL_NETVIRT_VPP_RPM = "/root/opendaylight-7.0.0-0.1.20170531snap665.el7" \
78 LOSETUP_SERVICE = """[Unit]
79 Description=Setup loop devices
84 ExecStart=/sbin/losetup /dev/loop3 /srv/data.img
85 ExecStop=/sbin/losetup -d /dev/loop3
90 WantedBy=multi-user.target
94 def build_sdn_env_list(ds, sdn_map, env_list=None):
96 Builds a list of SDN environment files to be used in the deploy cmd.
98 This function recursively searches an sdn_map. First the sdn controller is
99 matched and then the function looks for enabled features for that
100 controller to determine which environment files should be used. By
101 default the feature will be checked if set to true in deploy settings to be
102 added to the list. If a feature does not have a boolean value, then the
103 key and value pair to compare with are checked as a tuple (k,v).
105 :param ds: deploy settings
106 :param sdn_map: SDN map to recursively search
107 :param env_list: recursive var to hold previously found env_list
108 :return: A list of env files
112 for k, v in sdn_map.items():
113 if ds['sdn_controller'] == k or (k in ds and ds[k]):
114 if isinstance(v, dict):
115 # Append default SDN env file first
116 # The assumption is that feature-enabled SDN env files
117 # override and do not conflict with previously set default
119 if ds['sdn_controller'] == k and 'default' in v:
120 env_list.append(os.path.join(con.THT_ENV_DIR,
122 env_list.extend(build_sdn_env_list(ds, v))
123 # check if the value is not a boolean
124 elif isinstance(v, tuple):
126 env_list.append(os.path.join(con.THT_ENV_DIR, v[1]))
128 env_list.append(os.path.join(con.THT_ENV_DIR, v))
129 if len(env_list) == 0:
131 env_list.append(os.path.join(
132 con.THT_ENV_DIR, sdn_map['default']))
134 logging.warning("Unable to find default file for SDN")
139 def get_docker_sdn_file(ds_opts):
141 Returns docker env file for detected SDN
142 :param ds_opts: deploy options
143 :return: docker THT env file for an SDN
145 # FIXME(trozet): We assume right now there is only one docker SDN file
146 docker_services = con.VALID_DOCKER_SERVICES
147 tht_dir = con.THT_DOCKER_ENV_DIR[ds_opts['os_version']]
148 sdn_env_list = build_sdn_env_list(ds_opts, SDN_FILE_MAP)
149 for sdn_file in sdn_env_list:
150 sdn_base = os.path.basename(sdn_file)
151 if sdn_base in docker_services:
152 if docker_services[sdn_base] is not None:
153 return os.path.join(tht_dir,
154 docker_services[sdn_base])
156 return os.path.join(tht_dir, sdn_base)
159 def create_deploy_cmd(ds, ns, inv, tmp_dir,
160 virtual, env_file='opnfv-environment.yaml',
163 logging.info("Creating deployment command")
164 deploy_options = ['network-environment.yaml']
166 ds_opts = ds['deploy_options']
168 if ds_opts['containers']:
169 deploy_options.append(os.path.join(con.THT_ENV_DIR,
172 if ds['global_params']['ha_enabled']:
173 if ds_opts['containers']:
174 deploy_options.append(os.path.join(con.THT_ENV_DIR,
177 deploy_options.append(os.path.join(con.THT_ENV_DIR,
178 'puppet-pacemaker.yaml'))
181 deploy_options.append(env_file)
183 if ds_opts['containers']:
184 deploy_options.append('docker-images.yaml')
185 sdn_docker_file = get_docker_sdn_file(ds_opts)
187 deploy_options.append(sdn_docker_file)
188 deploy_options.append('sdn-images.yaml')
190 deploy_options += build_sdn_env_list(ds_opts, SDN_FILE_MAP)
192 for k, v in OTHER_FILE_MAP.items():
193 if k in ds_opts and ds_opts[k]:
194 if ds_opts['containers']:
195 deploy_options.append(os.path.join(con.THT_DOCKER_ENV_DIR,
196 "{}.yaml".format(k)))
198 deploy_options.append(os.path.join(con.THT_ENV_DIR, v))
200 if ds_opts['ceph'] and 'csit' not in env_file:
201 prep_storage_env(ds, ns, virtual, tmp_dir)
202 deploy_options.append(os.path.join(con.THT_ENV_DIR,
203 'storage-environment.yaml'))
205 prep_sriov_env(ds, tmp_dir)
207 # Check for 'k8s' here intentionally, as we may support other values
208 # such as openstack/openshift for 'vim' option.
209 if ds_opts['vim'] == 'k8s':
210 deploy_options.append('kubernetes-environment.yaml')
213 deploy_options.append('virtual-environment.yaml')
215 deploy_options.append('baremetal-environment.yaml')
217 num_control, num_compute = inv.get_node_counts()
218 if num_control == 0 or num_compute == 0:
219 logging.error("Detected 0 control or compute nodes. Control nodes: "
220 "{}, compute nodes{}".format(num_control, num_compute))
221 raise ApexDeployException("Invalid number of control or computes")
222 elif num_control > 1 and not ds['global_params']['ha_enabled']:
224 if platform.machine() == 'aarch64':
225 # aarch64 deploys were not completing in the default 90 mins.
226 # Not sure if this is related to the hardware the OOO support
227 # was developed on or the virtualization support in CentOS
228 # Either way it will probably get better over time as the aarch
229 # support matures in CentOS and deploy time should be tested in
230 # the future so this multiplier can be removed.
231 con.DEPLOY_TIMEOUT *= 2
232 cmd = "openstack overcloud deploy --templates --timeout {} " \
233 .format(con.DEPLOY_TIMEOUT)
235 for option in deploy_options:
236 cmd += " -e {}".format(option)
237 cmd += " --ntp-server {}".format(ns['ntp'][0])
238 cmd += " --control-scale {}".format(num_control)
239 cmd += " --compute-scale {}".format(num_compute)
240 cmd += ' --control-flavor control --compute-flavor compute'
242 cmd += ' --networks-file network_data.yaml'
245 with open('/sys/module/kvm_intel/parameters/nested') as f:
246 nested_kvm = f.read().strip()
247 if nested_kvm != 'Y':
248 libvirt_type = 'qemu'
249 cmd += ' --libvirt-type {}'.format(libvirt_type)
250 logging.info("Deploy command set: {}".format(cmd))
252 with open(os.path.join(tmp_dir, 'deploy_command'), 'w') as fh:
257 def prep_image(ds, ns, img, tmp_dir, root_pw=None, docker_tag=None,
260 Locates sdn image and preps for deployment.
261 :param ds: deploy settings
262 :param ns: network settings
263 :param img: sdn image
264 :param tmp_dir: dir to store modified sdn image
265 :param root_pw: password to configure for overcloud image
266 :param docker_tag: Docker image tag for RDO version (default None)
267 :param patches: List of patches to apply to overcloud image
270 # TODO(trozet): Come up with a better way to organize this logic in this
272 logging.info("Preparing image: {} for deployment".format(img))
273 if not os.path.isfile(img):
274 logging.error("Missing SDN image {}".format(img))
275 raise ApexDeployException("Missing SDN image file: {}".format(img))
277 ds_opts = ds['deploy_options']
279 sdn = ds_opts['sdn_controller']
280 patched_containers = set()
281 # we need this due to rhbz #1436021
282 # fixed in systemd-219-37.el7
284 logging.info("Neutron openvswitch-agent disabled")
287 "rm -f /etc/systemd/system/multi-user.target.wants/"
288 "neutron-openvswitch-agent.service"},
291 "rm -f /usr/lib/systemd/system/neutron-openvswitch-agent"
295 if ns.get('http_proxy', ''):
298 "echo 'http_proxy={}' >> /etc/environment".format(
301 if ns.get('https_proxy', ''):
304 "echo 'https_proxy={}' >> /etc/environment".format(
308 virt_cmds.append({con.VIRT_RUN_CMD: "chmod +x /etc/rc.d/rc.local"})
311 "echo 'sudo /opt/quagga/etc/init.d/zrpcd start' > "
312 "/opt/quagga/etc/init.d/zrpcd_start.sh"})
314 con.VIRT_RUN_CMD: "chmod +x /opt/quagga/etc/init.d/"
317 con.VIRT_RUN_CMD: "sed -i '$a /opt/quagga/etc/"
318 "init.d/zrpcd_start.sh' /etc/rc.local "})
320 con.VIRT_RUN_CMD: "sed -i '$a /opt/quagga/etc/"
321 "init.d/zrpcd_start.sh' /etc/rc.d/rc.local"})
322 logging.info("ZRPCD process started")
324 dataplane = ds_opts['dataplane']
325 if dataplane == 'ovs_dpdk' or dataplane == 'fdio':
326 logging.info("Enabling kernel modules for dpdk")
327 # file to module mapping
329 os.path.join(tmp_dir, 'vfio_pci.modules'): 'vfio_pci',
330 os.path.join(tmp_dir, 'uio_pci_generic.modules'): 'uio_pci_generic'
332 for mod_file, mod in uio_types.items():
333 with open(mod_file, 'w') as fh:
334 fh.write('#!/bin/bash\n')
335 fh.write('exec /sbin/modprobe {}'.format(mod))
339 {con.VIRT_UPLOAD: "{}:/etc/sysconfig/modules/".format(
341 {con.VIRT_RUN_CMD: "chmod 0755 /etc/sysconfig/modules/"
342 "{}".format(os.path.basename(mod_file))}
345 pw_op = "password:{}".format(root_pw)
346 virt_cmds.append({con.VIRT_PW: pw_op})
348 if dataplane == 'ovs':
351 {con.VIRT_RUN_CMD: "yum -y install "
352 "/root/ovs/rpm/rpmbuild/RPMS/x86_64/"
353 "{}".format(OVS_NSH_KMOD_RPM)},
354 {con.VIRT_RUN_CMD: "yum downgrade -y "
355 "/root/ovs/rpm/rpmbuild/RPMS/x86_64/"
356 "{}".format(OVS_NSH_RPM)}
358 elif sdn == 'opendaylight':
359 # FIXME(trozet) remove this after RDO is updated with fix for
360 # https://bugzilla.redhat.com/show_bug.cgi?id=1544892
361 ovs_file = os.path.basename(con.CUSTOM_OVS)
362 ovs_url = con.CUSTOM_OVS.replace(ovs_file, '')
363 utils.fetch_upstream_and_unpack(dest=tmp_dir, url=ovs_url,
366 {con.VIRT_UPLOAD: "{}:/root/".format(os.path.join(tmp_dir,
368 {con.VIRT_RUN_CMD: "yum downgrade -y /root/{}".format(
371 if dataplane == 'fdio':
372 # Patch neutron with using OVS external interface for router
373 # and add generic linux NS interface driver
375 {con.VIRT_RUN_CMD: "cd /usr/lib/python2.7/site-packages && patch "
376 "-p1 < neutron-patch-NSDriver.patch"})
379 {con.VIRT_RUN_CMD: "yum remove -y vpp-lib"},
380 {con.VIRT_RUN_CMD: "yum install -y "
381 "/root/nosdn_vpp_rpms/*.rpm"}
384 tmp_oc_image = os.path.join(tmp_dir, 'overcloud-full.qcow2')
385 shutil.copyfile(img, tmp_oc_image)
386 logging.debug("Temporary overcloud image stored as: {}".format(
389 if sdn == 'opendaylight':
390 undercloud_admin_ip = ns['networks'][con.ADMIN_NETWORK][
391 'installer_vm']['ip']
392 oc_builder.inject_opendaylight(
393 odl_version=ds_opts['odl_version'],
396 uc_ip=undercloud_admin_ip,
397 os_version=ds_opts['os_version'],
398 docker_tag=docker_tag,
401 patched_containers = patched_containers.union({'opendaylight'})
404 if ds_opts['os_version'] == 'master':
405 branch = ds_opts['os_version']
407 branch = "stable/{}".format(ds_opts['os_version'])
408 logging.info('Adding patches to overcloud')
409 patched_containers = patched_containers.union(
410 c_builder.add_upstream_patches(patches,
411 tmp_oc_image, tmp_dir,
413 uc_ip=undercloud_admin_ip,
414 docker_tag=docker_tag))
415 # if containers with ceph, and no ceph device we need to use a
416 # persistent loop device for Ceph OSDs
417 if docker_tag and ds_opts['ceph_device'] == '/dev/loop3':
418 tmp_losetup = os.path.join(tmp_dir, 'losetup.service')
419 with open(tmp_losetup, 'w') as fh:
420 fh.write(LOSETUP_SERVICE)
422 {con.VIRT_UPLOAD: "{}:/usr/lib/systemd/system/".format(tmp_losetup)
424 {con.VIRT_RUN_CMD: 'truncate /srv/data.img --size 10G'},
425 {con.VIRT_RUN_CMD: 'systemctl daemon-reload'},
426 {con.VIRT_RUN_CMD: 'systemctl enable losetup.service'},
428 # TODO(trozet) remove this after LP#173474 is fixed
429 dhcp_unit = '/usr/lib/systemd/system/dhcp-interface@.service'
431 {con.VIRT_RUN_CMD: "crudini --del {} Unit "
432 "ConditionPathExists".format(dhcp_unit)})
433 virt_utils.virt_customize(virt_cmds, tmp_oc_image)
434 logging.info("Overcloud image customization complete")
435 return patched_containers
440 Creates public and private ssh keys with 1024 bit RSA encryption
441 :return: private, public key
443 key = rsa.generate_private_key(
444 backend=crypto_default_backend(),
445 public_exponent=65537,
449 private_key = key.private_bytes(
450 crypto_serialization.Encoding.PEM,
451 crypto_serialization.PrivateFormat.PKCS8,
452 crypto_serialization.NoEncryption())
453 public_key = key.public_key().public_bytes(
454 crypto_serialization.Encoding.OpenSSH,
455 crypto_serialization.PublicFormat.OpenSSH
457 return private_key.decode('utf-8'), public_key.decode('utf-8')
460 def prep_env(ds, ns, inv, opnfv_env, net_env, tmp_dir):
462 Creates modified opnfv/network environments for deployment
463 :param ds: deploy settings
464 :param ns: network settings
465 :param inv: node inventory
466 :param opnfv_env: file path for opnfv-environment file
467 :param net_env: file path for network-environment file
468 :param tmp_dir: Apex tmp dir
472 logging.info("Preparing opnfv-environment and network-environment files")
473 ds_opts = ds['deploy_options']
474 tmp_opnfv_env = os.path.join(tmp_dir, os.path.basename(opnfv_env))
475 shutil.copyfile(opnfv_env, tmp_opnfv_env)
476 tenant_nic_map = ns['networks']['tenant']['nic_mapping']
478 tenant_nic['Controller'] = tenant_nic_map['controller']['members'][0]
479 tenant_nic['NovaCompute'] = tenant_nic_map['compute']['members'][0]
480 external_nic_map = ns['networks']['external'][0]['nic_mapping']
481 external_nic = dict()
482 external_nic['NovaCompute'] = external_nic_map['compute']['members'][0]
485 private_key, public_key = make_ssh_key()
487 # Make easier/faster variables to index in the file editor
488 if 'performance' in ds_opts:
491 if 'vpp' in ds_opts['performance']['Compute']:
492 perf_vpp_comp = ds_opts['performance']['Compute']['vpp']
495 if 'vpp' in ds_opts['performance']['Controller']:
496 perf_vpp_ctrl = ds_opts['performance']['Controller']['vpp']
501 if 'ovs' in ds_opts['performance']['Compute']:
502 perf_ovs_comp = ds_opts['performance']['Compute']['ovs']
507 if 'kernel' in ds_opts['performance']['Compute']:
508 perf_kern_comp = ds_opts['performance']['Compute']['kernel']
510 perf_kern_comp = None
514 tenant_settings = ns['networks']['tenant']
515 tenant_vlan_enabled = 'tenant' in ns.enabled_network_list and \
516 ns['networks']['tenant'].get('segmentation_type') == 'vlan'
518 # Modify OPNFV environment
519 # TODO: Change to build a dict and outputting yaml rather than parsing
520 for line in fileinput.input(tmp_opnfv_env, inplace=True):
521 line = line.strip('\n')
523 if 'CloudDomain' in line:
524 output_line = " CloudDomain: {}".format(ns['domain_name'])
525 elif 'replace_private_key' in line:
526 output_line = " private_key: |\n"
528 for line in private_key.splitlines():
529 key_out += " {}\n".format(line)
530 output_line += key_out
531 elif 'replace_public_key' in line:
532 output_line = " public_key: '{}'".format(public_key)
533 elif ((perf and perf_kern_comp) or ds_opts.get('rt_kvm')) and \
534 'resource_registry' in line:
535 output_line = "resource_registry:\n" \
536 " OS::TripleO::NodeUserData: first-boot.yaml"
537 elif 'ComputeExtraConfigPre' in line and \
538 ds_opts['dataplane'] == 'ovs_dpdk':
539 output_line = ' OS::TripleO::ComputeExtraConfigPre: ' \
540 './ovs-dpdk-preconfig.yaml'
541 elif 'NeutronNetworkVLANRanges' in line:
543 if tenant_vlan_enabled:
544 if ns['networks']['tenant']['overlay_id_range']:
545 vlan_setting = ns['networks']['tenant']['overlay_id_range']
546 if 'datacentre' not in vlan_setting:
547 vlan_setting += ',datacentre:1:1000'
548 # SRIOV networks are VLAN based provider networks. In order to
549 # simplify the deployment, nfv_sriov will be the default physnet.
550 # VLANs are not needed in advance, and the user will have to create
551 # the network specifying the segmentation-id.
554 vlan_setting += ",nfv_sriov"
556 vlan_setting = "datacentre:1:1000,nfv_sriov"
558 output_line = " NeutronNetworkVLANRanges: " + vlan_setting
559 elif 'NeutronBridgeMappings' in line and tenant_vlan_enabled:
560 if tenant_settings['overlay_id_range']:
561 physnets = tenant_settings['overlay_id_range'].split(',')
562 output_line = " NeutronBridgeMappings: "
563 for physnet in physnets:
564 physnet_name = physnet.split(':')[0]
565 if physnet_name != 'datacentre':
566 output_line += "{}:br-vlan,".format(physnet_name)
567 output_line += "datacentre:br-ex"
568 elif 'OpenDaylightProviderMappings' in line and tenant_vlan_enabled \
569 and ds_opts['sdn_controller'] == 'opendaylight':
570 if tenant_settings['overlay_id_range']:
571 physnets = tenant_settings['overlay_id_range'].split(',')
572 output_line = " OpenDaylightProviderMappings: "
573 for physnet in physnets:
574 physnet_name = physnet.split(':')[0]
575 if physnet_name != 'datacentre':
576 output_line += "{}:br-vlan,".format(physnet_name)
577 output_line += "datacentre:br-ex"
578 elif 'NeutronNetworkType' in line and tenant_vlan_enabled:
579 output_line = " NeutronNetworkType: vlan\n" \
580 " NeutronTunnelTypes: ''"
582 if ds_opts['sdn_controller'] == 'opendaylight' and \
583 'odl_vpp_routing_node' in ds_opts:
584 if 'opendaylight::vpp_routing_node' in line:
585 output_line = (" opendaylight::vpp_routing_node: {}.{}"
586 .format(ds_opts['odl_vpp_routing_node'],
588 elif not ds_opts['sdn_controller'] and ds_opts['dataplane'] == 'fdio':
589 if 'NeutronVPPAgentPhysnets' in line:
590 # VPP interface tap0 will be used for external network
592 output_line = (" NeutronVPPAgentPhysnets: "
593 "'datacentre:{},external:tap0'"
594 .format(tenant_nic['Controller']))
595 elif ds_opts['sdn_controller'] == 'opendaylight' and ds_opts.get(
597 if 'OS::TripleO::Services::NeutronDhcpAgent' in line:
599 elif 'NeutronDhcpAgentsPerNetwork' in line:
600 num_control, num_compute = inv.get_node_counts()
601 output_line = (" NeutronDhcpAgentsPerNetwork: {}"
602 .format(num_compute))
603 elif 'ComputeServices' in line:
604 output_line = (" ComputeServices:\n"
605 " - OS::TripleO::Services::NeutronDhcpAgent")
608 for role in 'NovaCompute', 'Controller':
609 if role == 'NovaCompute':
610 perf_opts = perf_vpp_comp
612 perf_opts = perf_vpp_ctrl
613 cfg = "{}ExtraConfig".format(role)
614 if cfg in line and perf_opts:
616 if 'main-core' in perf_opts:
617 perf_line += ("\n fdio::vpp_cpu_main_core: '{}'"
618 .format(perf_opts['main-core']))
619 if 'corelist-workers' in perf_opts:
621 "fdio::vpp_cpu_corelist_workers: '{}'"
622 .format(perf_opts['corelist-workers']))
623 if ds_opts['sdn_controller'] == 'opendaylight' and \
624 ds_opts['dataplane'] == 'fdio':
625 if role == 'NovaCompute':
627 "tripleo::profile::base::neutron::"
628 "agents::honeycomb::"
629 "interface_role_mapping:"
630 " ['{}:tenant-interface',"
631 "'{}:public-interface']"
632 .format(tenant_nic[role],
636 "tripleo::profile::base::neutron::"
637 "agents::honeycomb::"
638 "interface_role_mapping:"
639 " ['{}:tenant-interface']"
640 .format(tenant_nic[role]))
642 output_line = (" {}:{}".format(cfg, perf_line))
644 if ds_opts['dataplane'] == 'ovs_dpdk' and perf_ovs_comp:
645 for k, v in OVS_PERF_MAP.items():
646 if k in line and v in perf_ovs_comp:
647 output_line = " {}: '{}'".format(k, perf_ovs_comp[v])
650 # (FIXME) use compute's kernel settings for all nodes for now.
652 if 'NovaSchedulerDefaultFilters' in line:
654 " NovaSchedulerDefaultFilters: 'RamFilter," \
655 "ComputeFilter,AvailabilityZoneFilter," \
656 "ComputeCapabilitiesFilter," \
657 "ImagePropertiesFilter,NUMATopologyFilter'"
658 elif 'ComputeKernelArgs' in line:
660 for k, v in perf_kern_comp.items():
661 kernel_args += "{}={} ".format(k, v)
663 output_line = " ComputeKernelArgs: '{}'".\
668 logging.info("opnfv-environment file written to {}".format(tmp_opnfv_env))
671 def generate_ceph_key():
673 header = struct.pack('<hiih', 1, int(time.time()), 0, len(key))
674 return base64.b64encode(header + key)
677 def prep_storage_env(ds, ns, virtual, tmp_dir):
679 Creates storage environment file for deployment. Source file is copied by
680 undercloud playbook to host.
687 ds_opts = ds['deploy_options']
688 storage_file = os.path.join(tmp_dir, 'storage-environment.yaml')
689 if not os.path.isfile(storage_file):
690 logging.error("storage-environment file is not in tmp directory: {}. "
691 "Check if file was copied from "
692 "undercloud".format(tmp_dir))
693 raise ApexDeployException("storage-environment file not copied from "
695 for line in fileinput.input(storage_file, inplace=True):
696 line = line.strip('\n')
697 if 'CephClusterFSID' in line:
698 print(" CephClusterFSID: {}".format(str(uuid.uuid4())))
699 elif 'CephMonKey' in line:
700 print(" CephMonKey: {}".format(generate_ceph_key().decode(
702 elif 'CephAdminKey' in line:
703 print(" CephAdminKey: {}".format(generate_ceph_key().decode(
705 elif 'CephClientKey' in line:
706 print(" CephClientKey: {}".format(generate_ceph_key().decode(
711 if ds_opts['containers']:
714 # max pgs allowed are calculated as num_mons * 200. Therefore we
715 # set number of pgs and pools so that the total will be less:
716 # num_pgs * num_pools * num_osds
717 ceph_params['CephPoolDefaultSize'] = 2
718 ceph_params['CephPoolDefaultPgNum'] = 32
720 ceph_params['CephAnsibleExtraConfig'] = {
721 'centos_package_dependencies': [],
722 'ceph_osd_docker_memory_limit': '1g',
723 'ceph_mds_docker_memory_limit': '1g',
725 ceph_device = ds_opts['ceph_device']
726 ceph_params['CephAnsibleDisksConfig'] = {
727 'devices': [ceph_device],
729 'osd_scenario': 'collocated'
731 utils.edit_tht_env(storage_file, 'parameter_defaults', ceph_params)
732 # TODO(trozet): remove following block as we only support containers now
733 elif 'ceph_device' in ds_opts and ds_opts['ceph_device']:
734 with open(storage_file, 'a') as fh:
735 fh.write(' ExtraConfig:\n')
736 fh.write(" ceph::profile::params::osds:{{{}:{{}}}}\n".format(
737 ds_opts['ceph_device']
741 def prep_sriov_env(ds, tmp_dir):
743 Creates SRIOV environment file for deployment. Source file is copied by
744 undercloud playbook to host.
749 ds_opts = ds['deploy_options']
750 sriov_iface = ds_opts['sriov']
751 sriov_file = os.path.join(tmp_dir, 'neutron-opendaylight-sriov.yaml')
752 if not os.path.isfile(sriov_file):
753 logging.error("sriov-environment file is not in tmp directory: {}. "
754 "Check if file was copied from "
755 "undercloud".format(tmp_dir))
756 raise ApexDeployException("sriov-environment file not copied from "
758 # TODO(rnoriega): Instead of line editing, refactor this code to load
759 # yaml file into a dict, edit it and write the file back.
760 for line in fileinput.input(sriov_file, inplace=True):
761 line = line.strip('\n')
762 if 'NovaSchedulerDefaultFilters' in line:
763 print(" {}".format(line[3:]))
764 elif 'NovaSchedulerAvailableFilters' in line:
765 print(" {}".format(line[3:]))
766 elif 'NeutronPhysicalDevMappings' in line:
767 print(" NeutronPhysicalDevMappings: \"nfv_sriov:{}\""
768 .format(sriov_iface))
769 elif 'NeutronSriovNumVFs' in line:
770 print(" NeutronSriovNumVFs: \"{}:8\"".format(sriov_iface))
771 elif 'NovaPCIPassthrough' in line:
772 print(" NovaPCIPassthrough:")
773 elif 'devname' in line:
774 print(" - devname: \"{}\"".format(sriov_iface))
775 elif 'physical_network' in line:
776 print(" physical_network: \"nfv_sriov\"")
781 def external_network_cmds(ns, ds):
783 Generates external network openstack commands
784 :param ns: network settings
785 :param ds: deploy settings
786 :return: list of commands to configure external network
788 ds_opts = ds['deploy_options']
789 external_physnet = 'datacentre'
790 if ds_opts['dataplane'] == 'fdio' and \
791 ds_opts['sdn_controller'] != 'opendaylight':
792 external_physnet = 'external'
793 if 'external' in ns.enabled_network_list:
794 net_config = ns['networks']['external'][0]
796 pool_start, pool_end = net_config['floating_ip_range']
798 net_config = ns['networks']['admin']
800 pool_start, pool_end = ns['apex']['networks']['admin'][
801 'introspection_range']
802 nic_config = net_config['nic_mapping']
803 gateway = net_config['gateway']
805 # create network command
806 if nic_config['compute']['vlan'] == 'native':
809 ext_type = "vlan --provider-segment {}".format(nic_config[
811 cmds.append("openstack network create external --project service "
812 "--external --provider-network-type {} "
813 "--provider-physical-network {}"
814 .format(ext_type, external_physnet))
815 # create subnet command
816 cidr = net_config['cidr']
817 subnet_cmd = "openstack subnet create external-subnet --project " \
818 "service --network external --no-dhcp --gateway {} " \
819 "--allocation-pool start={},end={} --subnet-range " \
820 "{}".format(gateway, pool_start, pool_end, str(cidr))
821 if external and cidr.version == 6:
822 subnet_cmd += ' --ip-version 6 --ipv6-ra-mode slaac ' \
823 '--ipv6-address-mode slaac'
824 cmds.append(subnet_cmd)
825 logging.debug("Neutron external network commands determined "
826 "as: {}".format(cmds))
830 def create_congress_cmds(overcloud_file):
831 drivers = ['nova', 'neutronv2', 'cinder', 'glancev2', 'keystone', 'doctor']
832 overcloudrc = parsers.parse_overcloudrc(overcloud_file)
833 logging.info("Creating congress commands")
836 "username={}".format(overcloudrc['OS_USERNAME']),
837 "tenant_name={}".format(overcloudrc['OS_PROJECT_NAME']),
838 "password={}".format(overcloudrc['OS_PASSWORD']),
839 "auth_url={}".format(overcloudrc['OS_AUTH_URL'])
842 logging.error("Unable to find all keys required for congress in "
843 "overcloudrc: OS_USERNAME, OS_PROJECT_NAME, "
844 "OS_PASSWORD, OS_AUTH_URL. Please check overcloudrc "
845 "file: {}".format(overcloud_file))
848 ds_cfg = '--config ' + ' --config '.join(ds_cfg)
850 for driver in drivers:
851 if driver == 'doctor':
852 cmd = "{} \"{}\"".format(driver, driver)
854 cmd = "{} \"{}\" {}".format(driver, driver, ds_cfg)
856 cmd += ' --config api_version="2.34"'
857 logging.debug("Congress command created: {}".format(cmd))