1 ##############################################################################
2 # Copyright (c) 2017 Tim Rozet (trozet@redhat.com) and others.
4 # All rights reserved. This program and the accompanying materials
5 # are made available under the terms of the Apache License, Version 2.0
6 # which accompanies this distribution, and is available at
7 # http://www.apache.org/licenses/LICENSE-2.0
8 ##############################################################################
19 import apex.builders.overcloud_builder as oc_builder
20 import apex.builders.common_builder as c_builder
22 from apex.common import constants as con
23 from apex.common.exceptions import ApexDeployException
24 from apex.common import parsers
25 from apex.common import utils
26 from apex.virtual import utils as virt_utils
27 from cryptography.hazmat.primitives import serialization as \
29 from cryptography.hazmat.primitives.asymmetric import rsa
30 from cryptography.hazmat.backends import default_backend as \
31 crypto_default_backend
36 'sfc': 'neutron-sfc-opendaylight.yaml',
37 'vpn': 'neutron-bgpvpn-opendaylight.yaml',
38 'gluon': 'gluon.yaml',
40 'odl_vpp_netvirt': 'neutron-opendaylight-netvirt-vpp.yaml',
41 'dvr': 'neutron-opendaylight-fdio-dvr.yaml',
42 'default': 'neutron-opendaylight-honeycomb.yaml'
44 'l2gw': 'neutron-l2gw-opendaylight.yaml',
45 'sriov': 'neutron-opendaylight-sriov.yaml',
46 'default': 'neutron-opendaylight.yaml',
49 'sfc': 'neutron-onos-sfc.yaml',
50 'default': 'neutron-onos.yaml'
52 'ovn': 'neutron-ml2-ovn.yaml',
54 'vpp': 'neutron-ml2-vpp.yaml',
55 'dataplane': ('ovs_dpdk', 'neutron-ovs-dpdk.yaml')
60 'tacker': 'enable_tacker.yaml',
61 'congress': 'enable_congress.yaml',
62 'barometer': 'enable_barometer.yaml',
63 'rt_kvm': 'enable_rt_kvm.yaml'
67 'HostCpusList': 'dpdk_cores',
68 'NeutronDpdkCoreList': 'pmd_cores',
69 'NeutronDpdkSocketMemory': 'socket_memory',
70 'NeutronDpdkMemoryChannels': 'memory_channels'
73 OVS_NSH_KMOD_RPM = "openvswitch-kmod-2.6.1-1.el7.centos.x86_64.rpm"
74 OVS_NSH_RPM = "openvswitch-2.6.1-1.el7.centos.x86_64.rpm"
75 ODL_NETVIRT_VPP_RPM = "/root/opendaylight-7.0.0-0.1.20170531snap665.el7" \
78 LOOP_DEVICE_SIZE = "10G"
80 LOSETUP_SERVICE = """[Unit]
81 Description=Setup loop devices
86 ExecStart=/sbin/losetup /dev/loop3 /srv/data.img
87 ExecStop=/sbin/losetup -d /dev/loop3
92 WantedBy=multi-user.target
96 def build_sdn_env_list(ds, sdn_map, env_list=None):
98 Builds a list of SDN environment files to be used in the deploy cmd.
100 This function recursively searches an sdn_map. First the sdn controller is
101 matched and then the function looks for enabled features for that
102 controller to determine which environment files should be used. By
103 default the feature will be checked if set to true in deploy settings to be
104 added to the list. If a feature does not have a boolean value, then the
105 key and value pair to compare with are checked as a tuple (k,v).
107 :param ds: deploy settings
108 :param sdn_map: SDN map to recursively search
109 :param env_list: recursive var to hold previously found env_list
110 :return: A list of env files
114 for k, v in sdn_map.items():
115 if ds['sdn_controller'] == k or (k in ds and ds[k]):
116 if isinstance(v, dict):
117 # Append default SDN env file first
118 # The assumption is that feature-enabled SDN env files
119 # override and do not conflict with previously set default
121 if ds['sdn_controller'] == k and 'default' in v:
122 env_list.append(os.path.join(con.THT_ENV_DIR,
124 env_list.extend(build_sdn_env_list(ds, v))
125 # check if the value is not a boolean
126 elif isinstance(v, tuple):
128 env_list.append(os.path.join(con.THT_ENV_DIR, v[1]))
130 env_list.append(os.path.join(con.THT_ENV_DIR, v))
131 if len(env_list) == 0:
133 env_list.append(os.path.join(
134 con.THT_ENV_DIR, sdn_map['default']))
136 logging.warning("Unable to find default file for SDN")
141 def get_docker_sdn_files(ds_opts):
143 Returns docker env file for detected SDN
144 :param ds_opts: deploy options
145 :return: list of docker THT env files for an SDN
147 docker_services = con.VALID_DOCKER_SERVICES
148 tht_dir = con.THT_DOCKER_ENV_DIR[ds_opts['os_version']]
149 sdn_env_list = build_sdn_env_list(ds_opts, SDN_FILE_MAP)
150 for i, sdn_file in enumerate(sdn_env_list):
151 sdn_base = os.path.basename(sdn_file)
152 if sdn_base in docker_services:
153 if docker_services[sdn_base] is not None:
155 os.path.join(tht_dir, docker_services[sdn_base])
157 sdn_env_list[i] = os.path.join(tht_dir, sdn_base)
161 def create_deploy_cmd(ds, ns, inv, tmp_dir,
162 virtual, env_file='opnfv-environment.yaml',
165 logging.info("Creating deployment command")
166 deploy_options = ['network-environment.yaml']
168 ds_opts = ds['deploy_options']
170 if ds_opts['containers']:
171 deploy_options.append(os.path.join(con.THT_ENV_DIR,
174 if ds['global_params']['ha_enabled']:
175 if ds_opts['containers']:
176 deploy_options.append(os.path.join(con.THT_ENV_DIR,
179 deploy_options.append(os.path.join(con.THT_ENV_DIR,
180 'puppet-pacemaker.yaml'))
183 deploy_options.append(env_file)
185 if ds_opts['containers']:
186 deploy_options.append('docker-images.yaml')
187 sdn_docker_files = get_docker_sdn_files(ds_opts)
188 for sdn_docker_file in sdn_docker_files:
189 deploy_options.append(sdn_docker_file)
191 deploy_options.append('sdn-images.yaml')
193 deploy_options += build_sdn_env_list(ds_opts, SDN_FILE_MAP)
195 for k, v in OTHER_FILE_MAP.items():
196 if k in ds_opts and ds_opts[k]:
197 if ds_opts['containers']:
198 deploy_options.append(os.path.join(con.THT_DOCKER_ENV_DIR,
199 "{}.yaml".format(k)))
201 deploy_options.append(os.path.join(con.THT_ENV_DIR, v))
203 if ds_opts['ceph'] and 'csit' not in env_file:
204 prep_storage_env(ds, ns, virtual, tmp_dir)
205 deploy_options.append(os.path.join(con.THT_ENV_DIR,
206 'storage-environment.yaml'))
208 prep_sriov_env(ds, tmp_dir)
210 # Check for 'k8s' here intentionally, as we may support other values
211 # such as openstack/openshift for 'vim' option.
212 if ds_opts['vim'] == 'k8s':
213 deploy_options.append('kubernetes-environment.yaml')
216 deploy_options.append('virtual-environment.yaml')
218 deploy_options.append('baremetal-environment.yaml')
220 num_control, num_compute = inv.get_node_counts()
221 if num_control == 0 or num_compute == 0:
222 logging.error("Detected 0 control or compute nodes. Control nodes: "
223 "{}, compute nodes{}".format(num_control, num_compute))
224 raise ApexDeployException("Invalid number of control or computes")
225 elif num_control > 1 and not ds['global_params']['ha_enabled']:
227 if platform.machine() == 'aarch64':
228 # aarch64 deploys were not completing in the default 90 mins.
229 # Not sure if this is related to the hardware the OOO support
230 # was developed on or the virtualization support in CentOS
231 # Either way it will probably get better over time as the aarch
232 # support matures in CentOS and deploy time should be tested in
233 # the future so this multiplier can be removed.
234 con.DEPLOY_TIMEOUT *= 2
235 cmd = "openstack overcloud deploy --templates --timeout {} " \
236 .format(con.DEPLOY_TIMEOUT)
238 for option in deploy_options:
239 cmd += " -e {}".format(option)
240 cmd += " --ntp-server {}".format(ns['ntp'][0])
241 cmd += " --control-scale {}".format(num_control)
242 cmd += " --compute-scale {}".format(num_compute)
243 cmd += ' --control-flavor control --compute-flavor compute'
245 cmd += ' --networks-file network_data.yaml'
248 with open('/sys/module/kvm_intel/parameters/nested') as f:
249 nested_kvm = f.read().strip()
250 if nested_kvm != 'Y':
251 libvirt_type = 'qemu'
252 cmd += ' --libvirt-type {}'.format(libvirt_type)
253 logging.info("Deploy command set: {}".format(cmd))
255 with open(os.path.join(tmp_dir, 'deploy_command'), 'w') as fh:
260 def prep_image(ds, ns, img, tmp_dir, root_pw=None, docker_tag=None,
263 Locates sdn image and preps for deployment.
264 :param ds: deploy settings
265 :param ns: network settings
266 :param img: sdn image
267 :param tmp_dir: dir to store modified sdn image
268 :param root_pw: password to configure for overcloud image
269 :param docker_tag: Docker image tag for RDO version (default None)
270 :param patches: List of patches to apply to overcloud image
273 # TODO(trozet): Come up with a better way to organize this logic in this
275 logging.info("Preparing image: {} for deployment".format(img))
276 if not os.path.isfile(img):
277 logging.error("Missing SDN image {}".format(img))
278 raise ApexDeployException("Missing SDN image file: {}".format(img))
280 ds_opts = ds['deploy_options']
282 sdn = ds_opts['sdn_controller']
283 patched_containers = set()
284 # we need this due to rhbz #1436021
285 # fixed in systemd-219-37.el7
287 logging.info("Neutron openvswitch-agent disabled")
290 "rm -f /etc/systemd/system/multi-user.target.wants/"
291 "neutron-openvswitch-agent.service"},
294 "rm -f /usr/lib/systemd/system/neutron-openvswitch-agent"
298 if ns.get('http_proxy', ''):
301 "echo 'http_proxy={}' >> /etc/environment".format(
304 if ns.get('https_proxy', ''):
307 "echo 'https_proxy={}' >> /etc/environment".format(
310 tmp_oc_image = os.path.join(tmp_dir, 'overcloud-full.qcow2')
311 shutil.copyfile(img, tmp_oc_image)
312 logging.debug("Temporary overcloud image stored as: {}".format(
316 oc_builder.inject_quagga(tmp_oc_image, tmp_dir)
317 virt_cmds.append({con.VIRT_RUN_CMD: "chmod +x /etc/rc.d/rc.local"})
320 "echo 'sudo /opt/quagga/etc/init.d/zrpcd start' > "
321 "/opt/quagga/etc/init.d/zrpcd_start.sh"})
323 con.VIRT_RUN_CMD: "chmod +x /opt/quagga/etc/init.d/"
326 con.VIRT_RUN_CMD: "sed -i '$a /opt/quagga/etc/"
327 "init.d/zrpcd_start.sh' /etc/rc.local "})
329 con.VIRT_RUN_CMD: "sed -i '$a /opt/quagga/etc/"
330 "init.d/zrpcd_start.sh' /etc/rc.d/rc.local"})
331 logging.info("ZRPCD process started")
333 dataplane = ds_opts['dataplane']
334 if dataplane == 'ovs_dpdk' or dataplane == 'fdio':
335 logging.info("Enabling kernel modules for dpdk")
336 # file to module mapping
338 os.path.join(tmp_dir, 'vfio_pci.modules'): 'vfio_pci',
339 os.path.join(tmp_dir, 'uio_pci_generic.modules'): 'uio_pci_generic'
341 for mod_file, mod in uio_types.items():
342 with open(mod_file, 'w') as fh:
343 fh.write('#!/bin/bash\n')
344 fh.write('exec /sbin/modprobe {}'.format(mod))
348 {con.VIRT_UPLOAD: "{}:/etc/sysconfig/modules/".format(
350 {con.VIRT_RUN_CMD: "chmod 0755 /etc/sysconfig/modules/"
351 "{}".format(os.path.basename(mod_file))}
354 pw_op = "password:{}".format(root_pw)
355 virt_cmds.append({con.VIRT_PW: pw_op})
357 if dataplane == 'ovs':
360 {con.VIRT_RUN_CMD: "yum -y install "
361 "/root/ovs/rpm/rpmbuild/RPMS/x86_64/"
362 "{}".format(OVS_NSH_KMOD_RPM)},
363 {con.VIRT_RUN_CMD: "yum downgrade -y "
364 "/root/ovs/rpm/rpmbuild/RPMS/x86_64/"
365 "{}".format(OVS_NSH_RPM)}
367 elif sdn == 'opendaylight':
368 # FIXME(trozet) remove this after RDO is updated with fix for
369 # https://bugzilla.redhat.com/show_bug.cgi?id=1544892
370 ovs_file = os.path.basename(con.CUSTOM_OVS)
371 ovs_url = con.CUSTOM_OVS.replace(ovs_file, '')
372 utils.fetch_upstream_and_unpack(dest=tmp_dir, url=ovs_url,
375 {con.VIRT_UPLOAD: "{}:/root/".format(os.path.join(tmp_dir,
377 {con.VIRT_RUN_CMD: "yum downgrade -y /root/{}".format(
380 if dataplane == 'fdio':
381 # Patch neutron with using OVS external interface for router
382 # and add generic linux NS interface driver
384 {con.VIRT_RUN_CMD: "cd /usr/lib/python2.7/site-packages && patch "
385 "-p1 < neutron-patch-NSDriver.patch"})
388 {con.VIRT_RUN_CMD: "yum remove -y vpp-lib"},
389 {con.VIRT_RUN_CMD: "yum install -y "
390 "/root/nosdn_vpp_rpms/*.rpm"}
393 if sdn == 'opendaylight':
394 undercloud_admin_ip = ns['networks'][con.ADMIN_NETWORK][
395 'installer_vm']['ip']
396 oc_builder.inject_opendaylight(
397 odl_version=ds_opts['odl_version'],
400 uc_ip=undercloud_admin_ip,
401 os_version=ds_opts['os_version'],
402 docker_tag=docker_tag,
405 patched_containers = patched_containers.union({'opendaylight'})
408 if ds_opts['os_version'] == 'master':
409 branch = ds_opts['os_version']
411 branch = "stable/{}".format(ds_opts['os_version'])
412 logging.info('Adding patches to overcloud')
413 patched_containers = patched_containers.union(
414 c_builder.add_upstream_patches(patches,
415 tmp_oc_image, tmp_dir,
417 uc_ip=undercloud_admin_ip,
418 docker_tag=docker_tag))
419 # if containers with ceph, and no ceph device we need to use a
420 # persistent loop device for Ceph OSDs
421 if docker_tag and ds_opts['ceph_device'] == '/dev/loop3':
422 tmp_losetup = os.path.join(tmp_dir, 'losetup.service')
423 with open(tmp_losetup, 'w') as fh:
424 fh.write(LOSETUP_SERVICE)
426 {con.VIRT_UPLOAD: "{}:/usr/lib/systemd/system/".format(tmp_losetup)
428 {con.VIRT_RUN_CMD: 'truncate /srv/data.img --size {}'
429 .format(LOOP_DEVICE_SIZE)},
430 {con.VIRT_RUN_CMD: 'systemctl daemon-reload'},
431 {con.VIRT_RUN_CMD: 'systemctl enable losetup.service'},
433 # TODO(trozet) remove this after LP#173474 is fixed
434 dhcp_unit = '/usr/lib/systemd/system/dhcp-interface@.service'
436 {con.VIRT_RUN_CMD: "crudini --del {} Unit "
437 "ConditionPathExists".format(dhcp_unit)})
438 virt_utils.virt_customize(virt_cmds, tmp_oc_image)
439 logging.info("Overcloud image customization complete")
440 return patched_containers
445 Creates public and private ssh keys with 1024 bit RSA encryption
446 :return: private, public key
448 key = rsa.generate_private_key(
449 backend=crypto_default_backend(),
450 public_exponent=65537,
454 private_key = key.private_bytes(
455 crypto_serialization.Encoding.PEM,
456 crypto_serialization.PrivateFormat.PKCS8,
457 crypto_serialization.NoEncryption())
458 public_key = key.public_key().public_bytes(
459 crypto_serialization.Encoding.OpenSSH,
460 crypto_serialization.PublicFormat.OpenSSH
462 return private_key.decode('utf-8'), public_key.decode('utf-8')
465 def prep_env(ds, ns, inv, opnfv_env, net_env, tmp_dir):
467 Creates modified opnfv/network environments for deployment
468 :param ds: deploy settings
469 :param ns: network settings
470 :param inv: node inventory
471 :param opnfv_env: file path for opnfv-environment file
472 :param net_env: file path for network-environment file
473 :param tmp_dir: Apex tmp dir
477 logging.info("Preparing opnfv-environment and network-environment files")
478 ds_opts = ds['deploy_options']
479 tmp_opnfv_env = os.path.join(tmp_dir, os.path.basename(opnfv_env))
480 shutil.copyfile(opnfv_env, tmp_opnfv_env)
481 tenant_nic_map = ns['networks']['tenant']['nic_mapping']
483 tenant_nic['Controller'] = tenant_nic_map['controller']['members'][0]
484 tenant_nic['NovaCompute'] = tenant_nic_map['compute']['members'][0]
485 external_nic_map = ns['networks']['external'][0]['nic_mapping']
486 external_nic = dict()
487 external_nic['NovaCompute'] = external_nic_map['compute']['members'][0]
490 private_key, public_key = make_ssh_key()
492 # Make easier/faster variables to index in the file editor
493 if 'performance' in ds_opts:
496 if 'vpp' in ds_opts['performance']['Compute']:
497 perf_vpp_comp = ds_opts['performance']['Compute']['vpp']
500 if 'vpp' in ds_opts['performance']['Controller']:
501 perf_vpp_ctrl = ds_opts['performance']['Controller']['vpp']
506 if 'ovs' in ds_opts['performance']['Compute']:
507 perf_ovs_comp = ds_opts['performance']['Compute']['ovs']
512 if 'kernel' in ds_opts['performance']['Compute']:
513 perf_kern_comp = ds_opts['performance']['Compute']['kernel']
515 perf_kern_comp = None
519 tenant_settings = ns['networks']['tenant']
520 tenant_vlan_enabled = 'tenant' in ns.enabled_network_list and \
521 ns['networks']['tenant'].get('segmentation_type') == 'vlan'
523 # Modify OPNFV environment
524 # TODO: Change to build a dict and outputting yaml rather than parsing
525 for line in fileinput.input(tmp_opnfv_env, inplace=True):
526 line = line.strip('\n')
528 if 'CloudDomain' in line:
529 output_line = " CloudDomain: {}".format(ns['domain_name'])
530 elif 'replace_private_key' in line:
531 output_line = " private_key: |\n"
533 for line in private_key.splitlines():
534 key_out += " {}\n".format(line)
535 output_line += key_out
536 elif 'replace_public_key' in line:
537 output_line = " public_key: '{}'".format(public_key)
538 elif ((perf and perf_kern_comp) or ds_opts.get('rt_kvm')) and \
539 'resource_registry' in line:
540 output_line = "resource_registry:\n" \
541 " OS::TripleO::NodeUserData: first-boot.yaml"
542 elif 'ComputeExtraConfigPre' in line and \
543 ds_opts['dataplane'] == 'ovs_dpdk':
544 output_line = ' OS::TripleO::ComputeExtraConfigPre: ' \
545 './ovs-dpdk-preconfig.yaml'
546 elif 'NeutronNetworkVLANRanges' in line:
548 if tenant_vlan_enabled:
549 if ns['networks']['tenant']['overlay_id_range']:
550 vlan_setting = ns['networks']['tenant']['overlay_id_range']
551 if 'datacentre' not in vlan_setting:
552 vlan_setting += ',datacentre:1:1000'
553 # SRIOV networks are VLAN based provider networks. In order to
554 # simplify the deployment, nfv_sriov will be the default physnet.
555 # VLANs are not needed in advance, and the user will have to create
556 # the network specifying the segmentation-id.
559 vlan_setting += ",nfv_sriov"
561 vlan_setting = "datacentre:1:1000,nfv_sriov"
563 output_line = " NeutronNetworkVLANRanges: " + vlan_setting
564 elif 'NeutronBridgeMappings' in line and tenant_vlan_enabled:
565 if tenant_settings['overlay_id_range']:
566 physnets = tenant_settings['overlay_id_range'].split(',')
567 output_line = " NeutronBridgeMappings: "
568 for physnet in physnets:
569 physnet_name = physnet.split(':')[0]
570 if physnet_name != 'datacentre':
571 output_line += "{}:br-vlan,".format(physnet_name)
572 output_line += "datacentre:br-ex"
573 elif 'OpenDaylightProviderMappings' in line and tenant_vlan_enabled \
574 and ds_opts['sdn_controller'] == 'opendaylight':
575 if tenant_settings['overlay_id_range']:
576 physnets = tenant_settings['overlay_id_range'].split(',')
577 output_line = " OpenDaylightProviderMappings: "
578 for physnet in physnets:
579 physnet_name = physnet.split(':')[0]
580 if physnet_name != 'datacentre':
581 output_line += "{}:br-vlan,".format(physnet_name)
582 output_line += "datacentre:br-ex"
583 elif 'NeutronNetworkType' in line and tenant_vlan_enabled:
584 output_line = " NeutronNetworkType: vlan\n" \
585 " NeutronTunnelTypes: ''"
587 if ds_opts['sdn_controller'] == 'opendaylight' and \
588 'odl_vpp_routing_node' in ds_opts:
589 if 'opendaylight::vpp_routing_node' in line:
590 output_line = (" opendaylight::vpp_routing_node: {}.{}"
591 .format(ds_opts['odl_vpp_routing_node'],
593 elif not ds_opts['sdn_controller'] and ds_opts['dataplane'] == 'fdio':
594 if 'NeutronVPPAgentPhysnets' in line:
595 # VPP interface tap0 will be used for external network
597 output_line = (" NeutronVPPAgentPhysnets: "
598 "'datacentre:{},external:tap0'"
599 .format(tenant_nic['Controller']))
600 elif ds_opts['sdn_controller'] == 'opendaylight' and ds_opts.get(
602 if 'OS::TripleO::Services::NeutronDhcpAgent' in line:
604 elif 'NeutronDhcpAgentsPerNetwork' in line:
605 num_control, num_compute = inv.get_node_counts()
606 output_line = (" NeutronDhcpAgentsPerNetwork: {}"
607 .format(num_compute))
608 elif 'ComputeServices' in line:
609 output_line = (" ComputeServices:\n"
610 " - OS::TripleO::Services::NeutronDhcpAgent")
613 for role in 'NovaCompute', 'Controller':
614 if role == 'NovaCompute':
615 perf_opts = perf_vpp_comp
617 perf_opts = perf_vpp_ctrl
618 cfg = "{}ExtraConfig".format(role)
619 if cfg in line and perf_opts:
621 if 'main-core' in perf_opts:
622 perf_line += ("\n fdio::vpp_cpu_main_core: '{}'"
623 .format(perf_opts['main-core']))
624 if 'corelist-workers' in perf_opts:
626 "fdio::vpp_cpu_corelist_workers: '{}'"
627 .format(perf_opts['corelist-workers']))
628 if ds_opts['sdn_controller'] == 'opendaylight' and \
629 ds_opts['dataplane'] == 'fdio':
630 if role == 'NovaCompute':
632 "tripleo::profile::base::neutron::"
633 "agents::honeycomb::"
634 "interface_role_mapping:"
635 " ['{}:tenant-interface',"
636 "'{}:public-interface']"
637 .format(tenant_nic[role],
641 "tripleo::profile::base::neutron::"
642 "agents::honeycomb::"
643 "interface_role_mapping:"
644 " ['{}:tenant-interface']"
645 .format(tenant_nic[role]))
647 output_line = (" {}:{}".format(cfg, perf_line))
649 if ds_opts['dataplane'] == 'ovs_dpdk' and perf_ovs_comp:
650 for k, v in OVS_PERF_MAP.items():
651 if k in line and v in perf_ovs_comp:
652 output_line = " {}: '{}'".format(k, perf_ovs_comp[v])
655 # (FIXME) use compute's kernel settings for all nodes for now.
657 if 'NovaSchedulerDefaultFilters' in line:
659 " NovaSchedulerDefaultFilters: 'RamFilter," \
660 "ComputeFilter,AvailabilityZoneFilter," \
661 "ComputeCapabilitiesFilter," \
662 "ImagePropertiesFilter,NUMATopologyFilter'"
663 elif 'ComputeKernelArgs' in line:
665 for k, v in perf_kern_comp.items():
666 kernel_args += "{}={} ".format(k, v)
668 output_line = " ComputeKernelArgs: '{}'".\
673 logging.info("opnfv-environment file written to {}".format(tmp_opnfv_env))
676 def generate_ceph_key():
678 header = struct.pack('<hiih', 1, int(time.time()), 0, len(key))
679 return base64.b64encode(header + key)
682 def prep_storage_env(ds, ns, virtual, tmp_dir):
684 Creates storage environment file for deployment. Source file is copied by
685 undercloud playbook to host.
692 ds_opts = ds['deploy_options']
693 storage_file = os.path.join(tmp_dir, 'storage-environment.yaml')
694 if not os.path.isfile(storage_file):
695 logging.error("storage-environment file is not in tmp directory: {}. "
696 "Check if file was copied from "
697 "undercloud".format(tmp_dir))
698 raise ApexDeployException("storage-environment file not copied from "
700 for line in fileinput.input(storage_file, inplace=True):
701 line = line.strip('\n')
702 if 'CephClusterFSID' in line:
703 print(" CephClusterFSID: {}".format(str(uuid.uuid4())))
704 elif 'CephMonKey' in line:
705 print(" CephMonKey: {}".format(generate_ceph_key().decode(
707 elif 'CephAdminKey' in line:
708 print(" CephAdminKey: {}".format(generate_ceph_key().decode(
710 elif 'CephClientKey' in line:
711 print(" CephClientKey: {}".format(generate_ceph_key().decode(
716 if ds_opts['containers']:
719 # max pgs allowed are calculated as num_mons * 200. Therefore we
720 # set number of pgs and pools so that the total will be less:
721 # num_pgs * num_pools * num_osds
722 ceph_params['CephPoolDefaultSize'] = 2
723 ceph_params['CephPoolDefaultPgNum'] = 32
725 ceph_params['CephAnsibleExtraConfig'] = {
726 'centos_package_dependencies': [],
727 'ceph_osd_docker_memory_limit': '1g',
728 'ceph_mds_docker_memory_limit': '1g',
730 ceph_device = ds_opts['ceph_device']
731 ceph_params['CephAnsibleDisksConfig'] = {
732 'devices': [ceph_device],
734 'osd_scenario': 'collocated'
736 utils.edit_tht_env(storage_file, 'parameter_defaults', ceph_params)
737 # TODO(trozet): remove following block as we only support containers now
738 elif 'ceph_device' in ds_opts and ds_opts['ceph_device']:
739 with open(storage_file, 'a') as fh:
740 fh.write(' ExtraConfig:\n')
741 fh.write(" ceph::profile::params::osds:{{{}:{{}}}}\n".format(
742 ds_opts['ceph_device']
746 def prep_sriov_env(ds, tmp_dir):
748 Creates SRIOV environment file for deployment. Source file is copied by
749 undercloud playbook to host.
754 ds_opts = ds['deploy_options']
755 sriov_iface = ds_opts['sriov']
756 sriov_file = os.path.join(tmp_dir, 'neutron-opendaylight-sriov.yaml')
757 if not os.path.isfile(sriov_file):
758 logging.error("sriov-environment file is not in tmp directory: {}. "
759 "Check if file was copied from "
760 "undercloud".format(tmp_dir))
761 raise ApexDeployException("sriov-environment file not copied from "
763 # TODO(rnoriega): Instead of line editing, refactor this code to load
764 # yaml file into a dict, edit it and write the file back.
765 for line in fileinput.input(sriov_file, inplace=True):
766 line = line.strip('\n')
767 if 'NovaSchedulerDefaultFilters' in line:
768 print(" {}".format(line[3:]))
769 elif 'NovaSchedulerAvailableFilters' in line:
770 print(" {}".format(line[3:]))
771 elif 'NeutronPhysicalDevMappings' in line:
772 print(" NeutronPhysicalDevMappings: \"nfv_sriov:{}\""
773 .format(sriov_iface))
774 elif 'NeutronSriovNumVFs' in line:
775 print(" NeutronSriovNumVFs: \"{}:8\"".format(sriov_iface))
776 elif 'NovaPCIPassthrough' in line:
777 print(" NovaPCIPassthrough:")
778 elif 'devname' in line:
779 print(" - devname: \"{}\"".format(sriov_iface))
780 elif 'physical_network' in line:
781 print(" physical_network: \"nfv_sriov\"")
786 def external_network_cmds(ns, ds):
788 Generates external network openstack commands
789 :param ns: network settings
790 :param ds: deploy settings
791 :return: list of commands to configure external network
793 ds_opts = ds['deploy_options']
794 external_physnet = 'datacentre'
795 if ds_opts['dataplane'] == 'fdio' and \
796 ds_opts['sdn_controller'] != 'opendaylight':
797 external_physnet = 'external'
798 if 'external' in ns.enabled_network_list:
799 net_config = ns['networks']['external'][0]
801 pool_start, pool_end = net_config['floating_ip_range']
803 net_config = ns['networks']['admin']
805 pool_start, pool_end = ns['apex']['networks']['admin'][
806 'introspection_range']
807 nic_config = net_config['nic_mapping']
808 gateway = net_config['gateway']
810 # create network command
811 if nic_config['compute']['vlan'] == 'native':
814 ext_type = "vlan --provider-segment {}".format(nic_config[
816 cmds.append("openstack network create external --project service "
817 "--external --provider-network-type {} "
818 "--provider-physical-network {}"
819 .format(ext_type, external_physnet))
820 # create subnet command
821 cidr = net_config['cidr']
822 subnet_cmd = "openstack subnet create external-subnet --project " \
823 "service --network external --no-dhcp --gateway {} " \
824 "--allocation-pool start={},end={} --subnet-range " \
825 "{}".format(gateway, pool_start, pool_end, str(cidr))
826 if external and cidr.version == 6:
827 subnet_cmd += ' --ip-version 6 --ipv6-ra-mode slaac ' \
828 '--ipv6-address-mode slaac'
829 cmds.append(subnet_cmd)
830 logging.debug("Neutron external network commands determined "
831 "as: {}".format(cmds))
835 def create_congress_cmds(overcloud_file):
836 drivers = ['nova', 'neutronv2', 'cinder', 'glancev2', 'keystone', 'doctor']
837 overcloudrc = parsers.parse_overcloudrc(overcloud_file)
838 logging.info("Creating congress commands")
841 "username={}".format(overcloudrc['OS_USERNAME']),
842 "tenant_name={}".format(overcloudrc['OS_PROJECT_NAME']),
843 "password={}".format(overcloudrc['OS_PASSWORD']),
844 "auth_url={}".format(overcloudrc['OS_AUTH_URL'])
847 logging.error("Unable to find all keys required for congress in "
848 "overcloudrc: OS_USERNAME, OS_PROJECT_NAME, "
849 "OS_PASSWORD, OS_AUTH_URL. Please check overcloudrc "
850 "file: {}".format(overcloud_file))
853 ds_cfg = '--config ' + ' --config '.join(ds_cfg)
855 for driver in drivers:
856 if driver == 'doctor':
857 cmd = "{} \"{}\"".format(driver, driver)
859 cmd = "{} \"{}\" {}".format(driver, driver, ds_cfg)
861 cmd += ' --config api_version="2.34"'
862 logging.debug("Congress command created: {}".format(cmd))