1 ##############################################################################
2 # Copyright (c) 2017 Tim Rozet (trozet@redhat.com) and others.
4 # All rights reserved. This program and the accompanying materials
5 # are made available under the terms of the Apache License, Version 2.0
6 # which accompanies this distribution, and is available at
7 # http://www.apache.org/licenses/LICENSE-2.0
8 ##############################################################################
19 import apex.builders.overcloud_builder as oc_builder
20 import apex.builders.common_builder as c_builder
22 from apex.common import constants as con
23 from apex.common.exceptions import ApexDeployException
24 from apex.common import parsers
25 from apex.common import utils
26 from apex.virtual import utils as virt_utils
27 from cryptography.hazmat.primitives import serialization as \
29 from cryptography.hazmat.primitives.asymmetric import rsa
30 from cryptography.hazmat.backends import default_backend as \
31 crypto_default_backend
36 'sfc': 'neutron-sfc-opendaylight.yaml',
37 'vpn': 'neutron-bgpvpn-opendaylight.yaml',
38 'gluon': 'gluon.yaml',
40 'odl_vpp_netvirt': 'neutron-opendaylight-netvirt-vpp.yaml',
41 'dvr': 'neutron-opendaylight-fdio-dvr.yaml',
42 'default': 'neutron-opendaylight-honeycomb.yaml'
44 'l2gw': 'neutron-l2gw-opendaylight.yaml',
45 'sriov': 'neutron-opendaylight-sriov.yaml',
46 'default': 'neutron-opendaylight.yaml',
49 'sfc': 'neutron-onos-sfc.yaml',
50 'default': 'neutron-onos.yaml'
52 'ovn': 'neutron-ml2-ovn.yaml',
54 'vpp': 'neutron-ml2-vpp.yaml',
55 'dataplane': ('ovs_dpdk', 'neutron-ovs-dpdk.yaml')
60 'tacker': 'enable_tacker.yaml',
61 'congress': 'enable_congress.yaml',
62 'barometer': 'enable_barometer.yaml',
63 'rt_kvm': 'enable_rt_kvm.yaml'
67 'HostCpusList': 'dpdk_cores',
68 'NeutronDpdkCoreList': 'pmd_cores',
69 'NeutronDpdkSocketMemory': 'socket_memory',
70 'NeutronDpdkMemoryChannels': 'memory_channels'
73 OVS_NSH_KMOD_RPM = "openvswitch-kmod-2.6.1-1.el7.centos.x86_64.rpm"
74 OVS_NSH_RPM = "openvswitch-2.6.1-1.el7.centos.x86_64.rpm"
75 ODL_NETVIRT_VPP_RPM = "/root/opendaylight-7.0.0-0.1.20170531snap665.el7" \
78 LOSETUP_SERVICE = """[Unit]
79 Description=Setup loop devices
84 ExecStart=/sbin/losetup /dev/loop3 /srv/data.img
85 ExecStop=/sbin/losetup -d /dev/loop3
90 WantedBy=multi-user.target
94 def build_sdn_env_list(ds, sdn_map, env_list=None):
96 Builds a list of SDN environment files to be used in the deploy cmd.
98 This function recursively searches an sdn_map. First the sdn controller is
99 matched and then the function looks for enabled features for that
100 controller to determine which environment files should be used. By
101 default the feature will be checked if set to true in deploy settings to be
102 added to the list. If a feature does not have a boolean value, then the
103 key and value pair to compare with are checked as a tuple (k,v).
105 :param ds: deploy settings
106 :param sdn_map: SDN map to recursively search
107 :param env_list: recursive var to hold previously found env_list
108 :return: A list of env files
112 for k, v in sdn_map.items():
113 if ds['sdn_controller'] == k or (k in ds and ds[k]):
114 if isinstance(v, dict):
115 # Append default SDN env file first
116 # The assumption is that feature-enabled SDN env files
117 # override and do not conflict with previously set default
119 if ds['sdn_controller'] == k and 'default' in v:
120 env_list.append(os.path.join(con.THT_ENV_DIR,
122 env_list.extend(build_sdn_env_list(ds, v))
123 # check if the value is not a boolean
124 elif isinstance(v, tuple):
126 env_list.append(os.path.join(con.THT_ENV_DIR, v[1]))
128 env_list.append(os.path.join(con.THT_ENV_DIR, v))
129 if len(env_list) == 0:
131 env_list.append(os.path.join(
132 con.THT_ENV_DIR, sdn_map['default']))
134 logging.warning("Unable to find default file for SDN")
139 def get_docker_sdn_file(ds_opts):
141 Returns docker env file for detected SDN
142 :param ds_opts: deploy options
143 :return: docker THT env file for an SDN
145 # FIXME(trozet): We assume right now there is only one docker SDN file
146 docker_services = con.VALID_DOCKER_SERVICES
147 tht_dir = con.THT_DOCKER_ENV_DIR[ds_opts['os_version']]
148 sdn_env_list = build_sdn_env_list(ds_opts, SDN_FILE_MAP)
149 for sdn_file in sdn_env_list:
150 sdn_base = os.path.basename(sdn_file)
151 if sdn_base in docker_services:
152 if docker_services[sdn_base] is not None:
153 return os.path.join(tht_dir,
154 docker_services[sdn_base])
156 return os.path.join(tht_dir, sdn_base)
159 def create_deploy_cmd(ds, ns, inv, tmp_dir,
160 virtual, env_file='opnfv-environment.yaml',
163 logging.info("Creating deployment command")
164 deploy_options = ['network-environment.yaml']
166 ds_opts = ds['deploy_options']
168 if ds_opts['containers']:
169 deploy_options.append(os.path.join(con.THT_ENV_DIR,
172 if ds['global_params']['ha_enabled']:
173 if ds_opts['containers']:
174 deploy_options.append(os.path.join(con.THT_ENV_DIR,
177 deploy_options.append(os.path.join(con.THT_ENV_DIR,
178 'puppet-pacemaker.yaml'))
181 deploy_options.append(env_file)
183 if ds_opts['containers']:
184 deploy_options.append('docker-images.yaml')
185 sdn_docker_file = get_docker_sdn_file(ds_opts)
187 deploy_options.append(sdn_docker_file)
188 deploy_options.append('sdn-images.yaml')
190 deploy_options += build_sdn_env_list(ds_opts, SDN_FILE_MAP)
192 for k, v in OTHER_FILE_MAP.items():
193 if k in ds_opts and ds_opts[k]:
194 if ds_opts['containers']:
195 deploy_options.append(os.path.join(con.THT_DOCKER_ENV_DIR,
196 "{}.yaml".format(k)))
198 deploy_options.append(os.path.join(con.THT_ENV_DIR, v))
200 if ds_opts['ceph'] and 'csit' not in env_file:
201 prep_storage_env(ds, ns, virtual, tmp_dir)
202 deploy_options.append(os.path.join(con.THT_ENV_DIR,
203 'storage-environment.yaml'))
205 prep_sriov_env(ds, tmp_dir)
207 # Check for 'k8s' here intentionally, as we may support other values
208 # such as openstack/openshift for 'vim' option.
209 if ds_opts['vim'] == 'k8s':
210 deploy_options.append('kubernetes-environment.yaml')
213 deploy_options.append('virtual-environment.yaml')
215 deploy_options.append('baremetal-environment.yaml')
217 num_control, num_compute = inv.get_node_counts()
218 if num_control == 0 or num_compute == 0:
219 logging.error("Detected 0 control or compute nodes. Control nodes: "
220 "{}, compute nodes{}".format(num_control, num_compute))
221 raise ApexDeployException("Invalid number of control or computes")
222 elif num_control > 1 and not ds['global_params']['ha_enabled']:
224 if platform.machine() == 'aarch64':
225 # aarch64 deploys were not completing in the default 90 mins.
226 # Not sure if this is related to the hardware the OOO support
227 # was developed on or the virtualization support in CentOS
228 # Either way it will probably get better over time as the aarch
229 # support matures in CentOS and deploy time should be tested in
230 # the future so this multiplier can be removed.
231 con.DEPLOY_TIMEOUT *= 2
232 cmd = "openstack overcloud deploy --templates --timeout {} " \
233 .format(con.DEPLOY_TIMEOUT)
235 for option in deploy_options:
236 cmd += " -e {}".format(option)
237 cmd += " --ntp-server {}".format(ns['ntp'][0])
238 cmd += " --control-scale {}".format(num_control)
239 cmd += " --compute-scale {}".format(num_compute)
240 cmd += ' --control-flavor control --compute-flavor compute'
242 cmd += ' --networks-file network_data.yaml'
245 with open('/sys/module/kvm_intel/parameters/nested') as f:
246 nested_kvm = f.read().strip()
247 if nested_kvm != 'Y':
248 libvirt_type = 'qemu'
249 cmd += ' --libvirt-type {}'.format(libvirt_type)
250 logging.info("Deploy command set: {}".format(cmd))
252 with open(os.path.join(tmp_dir, 'deploy_command'), 'w') as fh:
257 def prep_image(ds, ns, img, tmp_dir, root_pw=None, docker_tag=None,
260 Locates sdn image and preps for deployment.
261 :param ds: deploy settings
262 :param ns: network settings
263 :param img: sdn image
264 :param tmp_dir: dir to store modified sdn image
265 :param root_pw: password to configure for overcloud image
266 :param docker_tag: Docker image tag for RDO version (default None)
267 :param patches: List of patches to apply to overcloud image
270 # TODO(trozet): Come up with a better way to organize this logic in this
272 logging.info("Preparing image: {} for deployment".format(img))
273 if not os.path.isfile(img):
274 logging.error("Missing SDN image {}".format(img))
275 raise ApexDeployException("Missing SDN image file: {}".format(img))
277 ds_opts = ds['deploy_options']
279 sdn = ds_opts['sdn_controller']
280 patched_containers = set()
281 # we need this due to rhbz #1436021
282 # fixed in systemd-219-37.el7
284 logging.info("Neutron openvswitch-agent disabled")
287 "rm -f /etc/systemd/system/multi-user.target.wants/"
288 "neutron-openvswitch-agent.service"},
291 "rm -f /usr/lib/systemd/system/neutron-openvswitch-agent"
295 if ns.get('http_proxy', ''):
298 "echo 'http_proxy={}' >> /etc/environment".format(
301 if ns.get('https_proxy', ''):
304 "echo 'https_proxy={}' >> /etc/environment".format(
308 virt_cmds.append({con.VIRT_RUN_CMD: "chmod +x /etc/rc.d/rc.local"})
311 "echo 'sudo /opt/quagga/etc/init.d/zrpcd start' > "
312 "/opt/quagga/etc/init.d/zrpcd_start.sh"})
314 con.VIRT_RUN_CMD: "chmod +x /opt/quagga/etc/init.d/"
317 con.VIRT_RUN_CMD: "sed -i '$a /opt/quagga/etc/"
318 "init.d/zrpcd_start.sh' /etc/rc.local "})
320 con.VIRT_RUN_CMD: "sed -i '$a /opt/quagga/etc/"
321 "init.d/zrpcd_start.sh' /etc/rc.d/rc.local"})
322 logging.info("ZRPCD process started")
324 dataplane = ds_opts['dataplane']
325 if dataplane == 'ovs_dpdk' or dataplane == 'fdio':
326 logging.info("Enabling kernel modules for dpdk")
327 # file to module mapping
329 os.path.join(tmp_dir, 'vfio_pci.modules'): 'vfio_pci',
330 os.path.join(tmp_dir, 'uio_pci_generic.modules'): 'uio_pci_generic'
332 for mod_file, mod in uio_types.items():
333 with open(mod_file, 'w') as fh:
334 fh.write('#!/bin/bash\n')
335 fh.write('exec /sbin/modprobe {}'.format(mod))
339 {con.VIRT_UPLOAD: "{}:/etc/sysconfig/modules/".format(
341 {con.VIRT_RUN_CMD: "chmod 0755 /etc/sysconfig/modules/"
342 "{}".format(os.path.basename(mod_file))}
345 pw_op = "password:{}".format(root_pw)
346 virt_cmds.append({con.VIRT_PW: pw_op})
347 if ds_opts['sfc'] and dataplane == 'ovs':
349 {con.VIRT_RUN_CMD: "yum -y install "
350 "/root/ovs/rpm/rpmbuild/RPMS/x86_64/"
351 "{}".format(OVS_NSH_KMOD_RPM)},
352 {con.VIRT_RUN_CMD: "yum downgrade -y "
353 "/root/ovs/rpm/rpmbuild/RPMS/x86_64/"
354 "{}".format(OVS_NSH_RPM)}
356 if dataplane == 'fdio':
357 # Patch neutron with using OVS external interface for router
358 # and add generic linux NS interface driver
360 {con.VIRT_RUN_CMD: "cd /usr/lib/python2.7/site-packages && patch "
361 "-p1 < neutron-patch-NSDriver.patch"})
364 {con.VIRT_RUN_CMD: "yum remove -y vpp-lib"},
365 {con.VIRT_RUN_CMD: "yum install -y "
366 "/root/nosdn_vpp_rpms/*.rpm"}
369 tmp_oc_image = os.path.join(tmp_dir, 'overcloud-full.qcow2')
370 shutil.copyfile(img, tmp_oc_image)
371 logging.debug("Temporary overcloud image stored as: {}".format(
374 if sdn == 'opendaylight':
375 undercloud_admin_ip = ns['networks'][con.ADMIN_NETWORK][
376 'installer_vm']['ip']
377 oc_builder.inject_opendaylight(
378 odl_version=ds_opts['odl_version'],
381 uc_ip=undercloud_admin_ip,
382 os_version=ds_opts['os_version'],
383 docker_tag=docker_tag,
386 patched_containers = patched_containers.union({'opendaylight'})
389 if ds_opts['os_version'] == 'master':
390 branch = ds_opts['os_version']
392 branch = "stable/{}".format(ds_opts['os_version'])
393 logging.info('Adding patches to overcloud')
394 patched_containers = patched_containers.union(
395 c_builder.add_upstream_patches(patches,
396 tmp_oc_image, tmp_dir,
398 uc_ip=undercloud_admin_ip,
399 docker_tag=docker_tag))
400 # if containers with ceph, and no ceph device we need to use a
401 # persistent loop device for Ceph OSDs
402 if docker_tag and ds_opts['ceph_device'] == '/dev/loop3':
403 tmp_losetup = os.path.join(tmp_dir, 'losetup.service')
404 with open(tmp_losetup, 'w') as fh:
405 fh.write(LOSETUP_SERVICE)
407 {con.VIRT_UPLOAD: "{}:/usr/lib/systemd/system/".format(tmp_losetup)
409 {con.VIRT_RUN_CMD: 'truncate /srv/data.img --size 10G'},
410 {con.VIRT_RUN_CMD: 'systemctl daemon-reload'},
411 {con.VIRT_RUN_CMD: 'systemctl enable losetup.service'},
413 # TODO(trozet) remove this after LP#173474 is fixed
414 dhcp_unit = '/usr/lib/systemd/system/dhcp-interface@.service'
416 {con.VIRT_RUN_CMD: "crudini --del {} Unit "
417 "ConditionPathExists".format(dhcp_unit)})
418 virt_utils.virt_customize(virt_cmds, tmp_oc_image)
419 logging.info("Overcloud image customization complete")
420 return patched_containers
425 Creates public and private ssh keys with 1024 bit RSA encryption
426 :return: private, public key
428 key = rsa.generate_private_key(
429 backend=crypto_default_backend(),
430 public_exponent=65537,
434 private_key = key.private_bytes(
435 crypto_serialization.Encoding.PEM,
436 crypto_serialization.PrivateFormat.PKCS8,
437 crypto_serialization.NoEncryption())
438 public_key = key.public_key().public_bytes(
439 crypto_serialization.Encoding.OpenSSH,
440 crypto_serialization.PublicFormat.OpenSSH
442 return private_key.decode('utf-8'), public_key.decode('utf-8')
445 def prep_env(ds, ns, inv, opnfv_env, net_env, tmp_dir):
447 Creates modified opnfv/network environments for deployment
448 :param ds: deploy settings
449 :param ns: network settings
450 :param inv: node inventory
451 :param opnfv_env: file path for opnfv-environment file
452 :param net_env: file path for network-environment file
453 :param tmp_dir: Apex tmp dir
457 logging.info("Preparing opnfv-environment and network-environment files")
458 ds_opts = ds['deploy_options']
459 tmp_opnfv_env = os.path.join(tmp_dir, os.path.basename(opnfv_env))
460 shutil.copyfile(opnfv_env, tmp_opnfv_env)
461 tenant_nic_map = ns['networks']['tenant']['nic_mapping']
463 tenant_nic['Controller'] = tenant_nic_map['controller']['members'][0]
464 tenant_nic['NovaCompute'] = tenant_nic_map['compute']['members'][0]
465 external_nic_map = ns['networks']['external'][0]['nic_mapping']
466 external_nic = dict()
467 external_nic['NovaCompute'] = external_nic_map['compute']['members'][0]
470 private_key, public_key = make_ssh_key()
472 # Make easier/faster variables to index in the file editor
473 if 'performance' in ds_opts:
476 if 'vpp' in ds_opts['performance']['Compute']:
477 perf_vpp_comp = ds_opts['performance']['Compute']['vpp']
480 if 'vpp' in ds_opts['performance']['Controller']:
481 perf_vpp_ctrl = ds_opts['performance']['Controller']['vpp']
486 if 'ovs' in ds_opts['performance']['Compute']:
487 perf_ovs_comp = ds_opts['performance']['Compute']['ovs']
492 if 'kernel' in ds_opts['performance']['Compute']:
493 perf_kern_comp = ds_opts['performance']['Compute']['kernel']
495 perf_kern_comp = None
499 # Modify OPNFV environment
500 # TODO: Change to build a dict and outputting yaml rather than parsing
501 for line in fileinput.input(tmp_opnfv_env, inplace=True):
502 line = line.strip('\n')
504 if 'CloudDomain' in line:
505 output_line = " CloudDomain: {}".format(ns['domain_name'])
506 elif 'replace_private_key' in line:
507 output_line = " private_key: |\n"
509 for line in private_key.splitlines():
510 key_out += " {}\n".format(line)
511 output_line += key_out
512 elif 'replace_public_key' in line:
513 output_line = " public_key: '{}'".format(public_key)
514 elif ((perf and perf_kern_comp) or ds_opts.get('rt_kvm')) and \
515 'resource_registry' in line:
516 output_line = "resource_registry:\n" \
517 " OS::TripleO::NodeUserData: first-boot.yaml"
518 elif 'ComputeExtraConfigPre' in line and \
519 ds_opts['dataplane'] == 'ovs_dpdk':
520 output_line = ' OS::TripleO::ComputeExtraConfigPre: ' \
521 './ovs-dpdk-preconfig.yaml'
523 if ds_opts['sdn_controller'] == 'opendaylight' and \
524 'odl_vpp_routing_node' in ds_opts:
525 if 'opendaylight::vpp_routing_node' in line:
526 output_line = (" opendaylight::vpp_routing_node: {}.{}"
527 .format(ds_opts['odl_vpp_routing_node'],
529 elif not ds_opts['sdn_controller'] and ds_opts['dataplane'] == 'fdio':
530 if 'NeutronVPPAgentPhysnets' in line:
531 # VPP interface tap0 will be used for external network
533 output_line = (" NeutronVPPAgentPhysnets: "
534 "'datacentre:{},external:tap0'"
535 .format(tenant_nic['Controller']))
536 elif ds_opts['sdn_controller'] == 'opendaylight' and ds_opts.get(
538 if 'OS::TripleO::Services::NeutronDhcpAgent' in line:
540 elif 'NeutronDhcpAgentsPerNetwork' in line:
541 num_control, num_compute = inv.get_node_counts()
542 output_line = (" NeutronDhcpAgentsPerNetwork: {}"
543 .format(num_compute))
544 elif 'ComputeServices' in line:
545 output_line = (" ComputeServices:\n"
546 " - OS::TripleO::Services::NeutronDhcpAgent")
547 # SRIOV networks are VLAN based provider networks. In order to simplify
548 # the deployment, nfv_sriov will be the default physnet. VLANs are not
549 # needed in advance, and the user will have to create the network
550 # specifying the segmentation-id.
552 if 'NeutronNetworkVLANRanges' in line:
553 output_line = ("{},nfv_sriov'".format(line[:-1]))
556 for role in 'NovaCompute', 'Controller':
557 if role == 'NovaCompute':
558 perf_opts = perf_vpp_comp
560 perf_opts = perf_vpp_ctrl
561 cfg = "{}ExtraConfig".format(role)
562 if cfg in line and perf_opts:
564 if 'main-core' in perf_opts:
565 perf_line += ("\n fdio::vpp_cpu_main_core: '{}'"
566 .format(perf_opts['main-core']))
567 if 'corelist-workers' in perf_opts:
569 "fdio::vpp_cpu_corelist_workers: '{}'"
570 .format(perf_opts['corelist-workers']))
571 if ds_opts['sdn_controller'] == 'opendaylight' and \
572 ds_opts['dataplane'] == 'fdio':
573 if role == 'NovaCompute':
575 "tripleo::profile::base::neutron::"
576 "agents::honeycomb::"
577 "interface_role_mapping:"
578 " ['{}:tenant-interface',"
579 "'{}:public-interface']"
580 .format(tenant_nic[role],
584 "tripleo::profile::base::neutron::"
585 "agents::honeycomb::"
586 "interface_role_mapping:"
587 " ['{}:tenant-interface']"
588 .format(tenant_nic[role]))
590 output_line = (" {}:{}".format(cfg, perf_line))
592 if ds_opts['dataplane'] == 'ovs_dpdk' and perf_ovs_comp:
593 for k, v in OVS_PERF_MAP.items():
594 if k in line and v in perf_ovs_comp:
595 output_line = " {}: '{}'".format(k, perf_ovs_comp[v])
598 # (FIXME) use compute's kernel settings for all nodes for now.
600 if 'NovaSchedulerDefaultFilters' in line:
602 " NovaSchedulerDefaultFilters: 'RamFilter," \
603 "ComputeFilter,AvailabilityZoneFilter," \
604 "ComputeCapabilitiesFilter," \
605 "ImagePropertiesFilter,NUMATopologyFilter'"
606 elif 'ComputeKernelArgs' in line:
608 for k, v in perf_kern_comp.items():
609 kernel_args += "{}={} ".format(k, v)
611 output_line = " ComputeKernelArgs: '{}'".\
616 logging.info("opnfv-environment file written to {}".format(tmp_opnfv_env))
619 def generate_ceph_key():
621 header = struct.pack('<hiih', 1, int(time.time()), 0, len(key))
622 return base64.b64encode(header + key)
625 def prep_storage_env(ds, ns, virtual, tmp_dir):
627 Creates storage environment file for deployment. Source file is copied by
628 undercloud playbook to host.
635 ds_opts = ds['deploy_options']
636 storage_file = os.path.join(tmp_dir, 'storage-environment.yaml')
637 if not os.path.isfile(storage_file):
638 logging.error("storage-environment file is not in tmp directory: {}. "
639 "Check if file was copied from "
640 "undercloud".format(tmp_dir))
641 raise ApexDeployException("storage-environment file not copied from "
643 for line in fileinput.input(storage_file, inplace=True):
644 line = line.strip('\n')
645 if 'CephClusterFSID' in line:
646 print(" CephClusterFSID: {}".format(str(uuid.uuid4())))
647 elif 'CephMonKey' in line:
648 print(" CephMonKey: {}".format(generate_ceph_key().decode(
650 elif 'CephAdminKey' in line:
651 print(" CephAdminKey: {}".format(generate_ceph_key().decode(
653 elif 'CephClientKey' in line:
654 print(" CephClientKey: {}".format(generate_ceph_key().decode(
659 if ds_opts['containers']:
660 undercloud_admin_ip = ns['networks'][con.ADMIN_NETWORK][
661 'installer_vm']['ip']
662 ceph_version = con.CEPH_VERSION_MAP[ds_opts['os_version']]
663 docker_image = "{}:8787/ceph/daemon:tag-build-master-" \
664 "{}-centos-7".format(undercloud_admin_ip,
667 'DockerCephDaemonImage': docker_image,
670 # max pgs allowed are calculated as num_mons * 200. Therefore we
671 # set number of pgs and pools so that the total will be less:
672 # num_pgs * num_pools * num_osds
673 ceph_params['CephPoolDefaultSize'] = 2
674 ceph_params['CephPoolDefaultPgNum'] = 32
676 ceph_params['CephAnsibleExtraConfig'] = {
677 'centos_package_dependencies': [],
678 'ceph_osd_docker_memory_limit': '1g',
679 'ceph_mds_docker_memory_limit': '1g',
681 ceph_device = ds_opts['ceph_device']
682 ceph_params['CephAnsibleDisksConfig'] = {
683 'devices': [ceph_device],
685 'osd_scenario': 'collocated'
687 utils.edit_tht_env(storage_file, 'parameter_defaults', ceph_params)
688 # TODO(trozet): remove following block as we only support containers now
689 elif 'ceph_device' in ds_opts and ds_opts['ceph_device']:
690 with open(storage_file, 'a') as fh:
691 fh.write(' ExtraConfig:\n')
692 fh.write(" ceph::profile::params::osds:{{{}:{{}}}}\n".format(
693 ds_opts['ceph_device']
697 def prep_sriov_env(ds, tmp_dir):
699 Creates SRIOV environment file for deployment. Source file is copied by
700 undercloud playbook to host.
705 ds_opts = ds['deploy_options']
706 sriov_iface = ds_opts['sriov']
707 sriov_file = os.path.join(tmp_dir, 'neutron-opendaylight-sriov.yaml')
708 if not os.path.isfile(sriov_file):
709 logging.error("sriov-environment file is not in tmp directory: {}. "
710 "Check if file was copied from "
711 "undercloud".format(tmp_dir))
712 raise ApexDeployException("sriov-environment file not copied from "
714 # TODO(rnoriega): Instead of line editing, refactor this code to load
715 # yaml file into a dict, edit it and write the file back.
716 for line in fileinput.input(sriov_file, inplace=True):
717 line = line.strip('\n')
718 if 'NovaSchedulerDefaultFilters' in line:
719 print(" {}".format(line[3:]))
720 elif 'NovaSchedulerAvailableFilters' in line:
721 print(" {}".format(line[3:]))
722 elif 'NeutronPhysicalDevMappings' in line:
723 print(" NeutronPhysicalDevMappings: \"nfv_sriov:{}\""
724 .format(sriov_iface))
725 elif 'NeutronSriovNumVFs' in line:
726 print(" NeutronSriovNumVFs: \"{}:8\"".format(sriov_iface))
727 elif 'NovaPCIPassthrough' in line:
728 print(" NovaPCIPassthrough:")
729 elif 'devname' in line:
730 print(" - devname: \"{}\"".format(sriov_iface))
731 elif 'physical_network' in line:
732 print(" physical_network: \"nfv_sriov\"")
737 def external_network_cmds(ns, ds):
739 Generates external network openstack commands
740 :param ns: network settings
741 :param ds: deploy settings
742 :return: list of commands to configure external network
744 ds_opts = ds['deploy_options']
745 external_physnet = 'datacentre'
746 if ds_opts['dataplane'] == 'fdio' and \
747 ds_opts['sdn_controller'] != 'opendaylight':
748 external_physnet = 'external'
749 if 'external' in ns.enabled_network_list:
750 net_config = ns['networks']['external'][0]
752 pool_start, pool_end = net_config['floating_ip_range']
754 net_config = ns['networks']['admin']
756 pool_start, pool_end = ns['apex']['networks']['admin'][
757 'introspection_range']
758 nic_config = net_config['nic_mapping']
759 gateway = net_config['gateway']
761 # create network command
762 if nic_config['compute']['vlan'] == 'native':
765 ext_type = "vlan --provider-segment {}".format(nic_config[
767 cmds.append("openstack network create external --project service "
768 "--external --provider-network-type {} "
769 "--provider-physical-network {}"
770 .format(ext_type, external_physnet))
771 # create subnet command
772 cidr = net_config['cidr']
773 subnet_cmd = "openstack subnet create external-subnet --project " \
774 "service --network external --no-dhcp --gateway {} " \
775 "--allocation-pool start={},end={} --subnet-range " \
776 "{}".format(gateway, pool_start, pool_end, str(cidr))
777 if external and cidr.version == 6:
778 subnet_cmd += ' --ip-version 6 --ipv6-ra-mode slaac ' \
779 '--ipv6-address-mode slaac'
780 cmds.append(subnet_cmd)
781 logging.debug("Neutron external network commands determined "
782 "as: {}".format(cmds))
786 def create_congress_cmds(overcloud_file):
787 drivers = ['nova', 'neutronv2', 'cinder', 'glancev2', 'keystone', 'doctor']
788 overcloudrc = parsers.parse_overcloudrc(overcloud_file)
789 logging.info("Creating congress commands")
792 "username={}".format(overcloudrc['OS_USERNAME']),
793 "tenant_name={}".format(overcloudrc['OS_PROJECT_NAME']),
794 "password={}".format(overcloudrc['OS_PASSWORD']),
795 "auth_url={}".format(overcloudrc['OS_AUTH_URL'])
798 logging.error("Unable to find all keys required for congress in "
799 "overcloudrc: OS_USERNAME, OS_PROJECT_NAME, "
800 "OS_PASSWORD, OS_AUTH_URL. Please check overcloudrc "
801 "file: {}".format(overcloud_file))
804 ds_cfg = '--config ' + ' --config '.join(ds_cfg)
806 for driver in drivers:
807 if driver == 'doctor':
808 cmd = "{} \"{}\"".format(driver, driver)
810 cmd = "{} \"{}\" {}".format(driver, driver, ds_cfg)
812 cmd += ' --config api_version="2.34"'
813 logging.debug("Congress command created: {}".format(cmd))