1 ##############################################################################
2 # Copyright (c) 2017 Tim Rozet (trozet@redhat.com) and others.
4 # All rights reserved. This program and the accompanying materials
5 # are made available under the terms of the Apache License, Version 2.0
6 # which accompanies this distribution, and is available at
7 # http://www.apache.org/licenses/LICENSE-2.0
8 ##############################################################################
19 import apex.builders.overcloud_builder as oc_builder
20 import apex.builders.common_builder as c_builder
22 from apex.common import constants as con
23 from apex.common.exceptions import ApexDeployException
24 from apex.common import parsers
25 from apex.common import utils
26 from apex.virtual import utils as virt_utils
27 from cryptography.hazmat.primitives import serialization as \
29 from cryptography.hazmat.primitives.asymmetric import rsa
30 from cryptography.hazmat.backends import default_backend as \
31 crypto_default_backend
36 'sfc': 'neutron-sfc-opendaylight.yaml',
37 'vpn': 'neutron-bgpvpn-opendaylight.yaml',
38 'gluon': 'gluon.yaml',
40 'odl_vpp_netvirt': 'neutron-opendaylight-netvirt-vpp.yaml',
41 'dvr': 'neutron-opendaylight-fdio-dvr.yaml',
42 'default': 'neutron-opendaylight-honeycomb.yaml'
44 'l2gw': 'neutron-l2gw-opendaylight.yaml',
45 'sriov': 'neutron-opendaylight-sriov.yaml',
46 'default': 'neutron-opendaylight.yaml',
49 'sfc': 'neutron-onos-sfc.yaml',
50 'default': 'neutron-onos.yaml'
52 'ovn': 'neutron-ml2-ovn.yaml',
54 'vpp': 'neutron-ml2-vpp.yaml',
55 'dataplane': ('ovs_dpdk', 'neutron-ovs-dpdk.yaml')
60 'tacker': 'enable_tacker.yaml',
61 'congress': 'enable_congress.yaml',
62 'barometer': 'enable_barometer.yaml',
63 'rt_kvm': 'enable_rt_kvm.yaml'
67 'HostCpusList': 'dpdk_cores',
68 'NeutronDpdkCoreList': 'pmd_cores',
69 'NeutronDpdkSocketMemory': 'socket_memory',
70 'NeutronDpdkMemoryChannels': 'memory_channels'
73 OVS_NSH_KMOD_RPM = "openvswitch-kmod-2.6.1-1.el7.centos.x86_64.rpm"
74 OVS_NSH_RPM = "openvswitch-2.6.1-1.el7.centos.x86_64.rpm"
75 ODL_NETVIRT_VPP_RPM = "/root/opendaylight-7.0.0-0.1.20170531snap665.el7" \
78 LOSETUP_SERVICE = """[Unit]
79 Description=Setup loop devices
84 ExecStart=/sbin/losetup /dev/loop3 /srv/data.img
85 ExecStop=/sbin/losetup -d /dev/loop3
90 WantedBy=multi-user.target
94 def build_sdn_env_list(ds, sdn_map, env_list=None):
96 Builds a list of SDN environment files to be used in the deploy cmd.
98 This function recursively searches an sdn_map. First the sdn controller is
99 matched and then the function looks for enabled features for that
100 controller to determine which environment files should be used. By
101 default the feature will be checked if set to true in deploy settings to be
102 added to the list. If a feature does not have a boolean value, then the
103 key and value pair to compare with are checked as a tuple (k,v).
105 :param ds: deploy settings
106 :param sdn_map: SDN map to recursively search
107 :param env_list: recursive var to hold previously found env_list
108 :return: A list of env files
112 for k, v in sdn_map.items():
113 if ds['sdn_controller'] == k or (k in ds and ds[k]):
114 if isinstance(v, dict):
115 # Append default SDN env file first
116 # The assumption is that feature-enabled SDN env files
117 # override and do not conflict with previously set default
119 if ds['sdn_controller'] == k and 'default' in v:
120 env_list.append(os.path.join(con.THT_ENV_DIR,
122 env_list.extend(build_sdn_env_list(ds, v))
123 # check if the value is not a boolean
124 elif isinstance(v, tuple):
126 env_list.append(os.path.join(con.THT_ENV_DIR, v[1]))
128 env_list.append(os.path.join(con.THT_ENV_DIR, v))
129 if len(env_list) == 0:
131 env_list.append(os.path.join(
132 con.THT_ENV_DIR, sdn_map['default']))
134 logging.warning("Unable to find default file for SDN")
139 def get_docker_sdn_file(ds_opts):
141 Returns docker env file for detected SDN
142 :param ds_opts: deploy options
143 :return: docker THT env file for an SDN
145 # FIXME(trozet): We assume right now there is only one docker SDN file
146 docker_services = con.VALID_DOCKER_SERVICES
147 sdn_env_list = build_sdn_env_list(ds_opts, SDN_FILE_MAP)
148 for sdn_file in sdn_env_list:
149 sdn_base = os.path.basename(sdn_file)
150 if sdn_base in docker_services:
151 if docker_services[sdn_base] is not None:
152 return os.path.join(con.THT_DOCKER_ENV_DIR,
153 docker_services[sdn_base])
155 return os.path.join(con.THT_DOCKER_ENV_DIR, sdn_base)
158 def create_deploy_cmd(ds, ns, inv, tmp_dir,
159 virtual, env_file='opnfv-environment.yaml',
162 logging.info("Creating deployment command")
163 deploy_options = ['network-environment.yaml']
165 ds_opts = ds['deploy_options']
167 if ds_opts['containers']:
168 deploy_options.append(os.path.join(con.THT_ENV_DIR,
171 if ds['global_params']['ha_enabled']:
172 if ds_opts['containers']:
173 deploy_options.append(os.path.join(con.THT_ENV_DIR,
176 deploy_options.append(os.path.join(con.THT_ENV_DIR,
177 'puppet-pacemaker.yaml'))
180 deploy_options.append(env_file)
182 if ds_opts['containers']:
183 deploy_options.append('docker-images.yaml')
184 sdn_docker_file = get_docker_sdn_file(ds_opts)
186 deploy_options.append(sdn_docker_file)
187 deploy_options.append('sdn-images.yaml')
189 deploy_options += build_sdn_env_list(ds_opts, SDN_FILE_MAP)
191 for k, v in OTHER_FILE_MAP.items():
192 if k in ds_opts and ds_opts[k]:
193 if ds_opts['containers']:
194 deploy_options.append(os.path.join(con.THT_DOCKER_ENV_DIR,
195 "{}.yaml".format(k)))
197 deploy_options.append(os.path.join(con.THT_ENV_DIR, v))
200 prep_storage_env(ds, ns, virtual, tmp_dir)
201 deploy_options.append(os.path.join(con.THT_ENV_DIR,
202 'storage-environment.yaml'))
204 prep_sriov_env(ds, tmp_dir)
207 deploy_options.append('virtual-environment.yaml')
209 deploy_options.append('baremetal-environment.yaml')
211 num_control, num_compute = inv.get_node_counts()
212 if num_control == 0 or num_compute == 0:
213 logging.error("Detected 0 control or compute nodes. Control nodes: "
214 "{}, compute nodes{}".format(num_control, num_compute))
215 raise ApexDeployException("Invalid number of control or computes")
216 elif num_control > 1 and not ds['global_params']['ha_enabled']:
218 if platform.machine() == 'aarch64':
219 # aarch64 deploys were not completing in the default 90 mins.
220 # Not sure if this is related to the hardware the OOO support
221 # was developed on or the virtualization support in CentOS
222 # Either way it will probably get better over time as the aarch
223 # support matures in CentOS and deploy time should be tested in
224 # the future so this multiplier can be removed.
225 con.DEPLOY_TIMEOUT *= 2
226 cmd = "openstack overcloud deploy --templates --timeout {} " \
227 .format(con.DEPLOY_TIMEOUT)
229 for option in deploy_options:
230 cmd += " -e {}".format(option)
231 cmd += " --ntp-server {}".format(ns['ntp'][0])
232 cmd += " --control-scale {}".format(num_control)
233 cmd += " --compute-scale {}".format(num_compute)
234 cmd += ' --control-flavor control --compute-flavor compute'
236 cmd += ' --networks-file network_data.yaml'
239 with open('/sys/module/kvm_intel/parameters/nested') as f:
240 nested_kvm = f.read().strip()
241 if nested_kvm != 'Y':
242 libvirt_type = 'qemu'
243 cmd += ' --libvirt-type {}'.format(libvirt_type)
244 logging.info("Deploy command set: {}".format(cmd))
246 with open(os.path.join(tmp_dir, 'deploy_command'), 'w') as fh:
251 def prep_image(ds, ns, img, tmp_dir, root_pw=None, docker_tag=None,
252 patches=None, upstream=False):
254 Locates sdn image and preps for deployment.
255 :param ds: deploy settings
256 :param ns: network settings
257 :param img: sdn image
258 :param tmp_dir: dir to store modified sdn image
259 :param root_pw: password to configure for overcloud image
260 :param docker_tag: Docker image tag for RDO version (default None)
261 :param patches: List of patches to apply to overcloud image
262 :param upstream: (boolean) Indicates if upstream deployment or not
265 # TODO(trozet): Come up with a better way to organize this logic in this
267 logging.info("Preparing image: {} for deployment".format(img))
268 if not os.path.isfile(img):
269 logging.error("Missing SDN image {}".format(img))
270 raise ApexDeployException("Missing SDN image file: {}".format(img))
272 ds_opts = ds['deploy_options']
274 sdn = ds_opts['sdn_controller']
275 patched_containers = set()
276 # we need this due to rhbz #1436021
277 # fixed in systemd-219-37.el7
279 logging.info("Neutron openvswitch-agent disabled")
282 "rm -f /etc/systemd/system/multi-user.target.wants/"
283 "neutron-openvswitch-agent.service"},
286 "rm -f /usr/lib/systemd/system/neutron-openvswitch-agent"
290 if ns.get('http_proxy', ''):
293 "echo 'http_proxy={}' >> /etc/environment".format(
296 if ns.get('https_proxy', ''):
299 "echo 'https_proxy={}' >> /etc/environment".format(
303 virt_cmds.append({con.VIRT_RUN_CMD: "chmod +x /etc/rc.d/rc.local"})
306 "echo 'sudo /opt/quagga/etc/init.d/zrpcd start' > "
307 "/opt/quagga/etc/init.d/zrpcd_start.sh"})
309 con.VIRT_RUN_CMD: "chmod +x /opt/quagga/etc/init.d/"
312 con.VIRT_RUN_CMD: "sed -i '$a /opt/quagga/etc/"
313 "init.d/zrpcd_start.sh' /etc/rc.local "})
315 con.VIRT_RUN_CMD: "sed -i '$a /opt/quagga/etc/"
316 "init.d/zrpcd_start.sh' /etc/rc.d/rc.local"})
317 logging.info("ZRPCD process started")
319 dataplane = ds_opts['dataplane']
320 if dataplane == 'ovs_dpdk' or dataplane == 'fdio':
321 logging.info("Enabling kernel modules for dpdk")
322 # file to module mapping
324 os.path.join(tmp_dir, 'vfio_pci.modules'): 'vfio_pci',
325 os.path.join(tmp_dir, 'uio_pci_generic.modules'): 'uio_pci_generic'
327 for mod_file, mod in uio_types.items():
328 with open(mod_file, 'w') as fh:
329 fh.write('#!/bin/bash\n')
330 fh.write('exec /sbin/modprobe {}'.format(mod))
334 {con.VIRT_UPLOAD: "{}:/etc/sysconfig/modules/".format(
336 {con.VIRT_RUN_CMD: "chmod 0755 /etc/sysconfig/modules/"
337 "{}".format(os.path.basename(mod_file))}
340 pw_op = "password:{}".format(root_pw)
341 virt_cmds.append({con.VIRT_PW: pw_op})
342 if ds_opts['sfc'] and dataplane == 'ovs':
344 {con.VIRT_RUN_CMD: "yum -y install "
345 "/root/ovs/rpm/rpmbuild/RPMS/x86_64/"
346 "{}".format(OVS_NSH_KMOD_RPM)},
347 {con.VIRT_RUN_CMD: "yum downgrade -y "
348 "/root/ovs/rpm/rpmbuild/RPMS/x86_64/"
349 "{}".format(OVS_NSH_RPM)}
351 if dataplane == 'fdio':
352 # Patch neutron with using OVS external interface for router
353 # and add generic linux NS interface driver
355 {con.VIRT_RUN_CMD: "cd /usr/lib/python2.7/site-packages && patch "
356 "-p1 < neutron-patch-NSDriver.patch"})
359 {con.VIRT_RUN_CMD: "yum remove -y vpp-lib"},
360 {con.VIRT_RUN_CMD: "yum install -y "
361 "/root/nosdn_vpp_rpms/*.rpm"}
364 tmp_oc_image = os.path.join(tmp_dir, 'overcloud-full.qcow2')
365 shutil.copyfile(img, tmp_oc_image)
366 logging.debug("Temporary overcloud image stored as: {}".format(
369 # TODO (trozet): remove this if block after Fraser
370 if sdn == 'opendaylight' and not upstream:
371 if ds_opts['odl_version'] != con.DEFAULT_ODL_VERSION:
373 {con.VIRT_RUN_CMD: "yum -y remove opendaylight"},
374 {con.VIRT_RUN_CMD: "rm -rf /etc/puppet/modules/opendaylight"},
375 {con.VIRT_RUN_CMD: "cd /etc/puppet/modules && tar xzf "
376 "/root/puppet-opendaylight-"
377 "{}.tar.gz".format(ds_opts['odl_version'])}
379 if ds_opts['odl_version'] == 'master':
381 {con.VIRT_RUN_CMD: "rpm -ivh --nodeps /root/{}/*".format(
382 ds_opts['odl_version'])}
386 {con.VIRT_RUN_CMD: "yum -y install /root/{}/*".format(
387 ds_opts['odl_version'])}
390 elif sdn == 'opendaylight' and 'odl_vpp_netvirt' in ds_opts \
391 and ds_opts['odl_vpp_netvirt']:
393 {con.VIRT_RUN_CMD: "yum -y remove opendaylight"},
394 {con.VIRT_RUN_CMD: "yum -y install /root/{}/*".format(
395 ODL_NETVIRT_VPP_RPM)}
397 elif sdn == 'opendaylight':
398 undercloud_admin_ip = ns['networks'][con.ADMIN_NETWORK][
399 'installer_vm']['ip']
400 oc_builder.inject_opendaylight(
401 odl_version=ds_opts['odl_version'],
404 uc_ip=undercloud_admin_ip,
405 os_version=ds_opts['os_version'],
406 docker_tag=docker_tag,
409 patched_containers = patched_containers.union({'opendaylight'})
412 if ds_opts['os_version'] == 'master':
413 branch = ds_opts['os_version']
415 branch = "stable/{}".format(ds_opts['os_version'])
416 logging.info('Adding patches to overcloud')
417 patched_containers = patched_containers.union(
418 c_builder.add_upstream_patches(patches,
419 tmp_oc_image, tmp_dir,
421 uc_ip=undercloud_admin_ip,
422 docker_tag=docker_tag))
423 # if containers with ceph, and no ceph device we need to use a
424 # persistent loop device for Ceph OSDs
425 if docker_tag and not ds_opts.get('ceph_device', None):
426 tmp_losetup = os.path.join(tmp_dir, 'losetup.service')
427 with open(tmp_losetup, 'w') as fh:
428 fh.write(LOSETUP_SERVICE)
430 {con.VIRT_UPLOAD: "{}:/usr/lib/systemd/system/".format(tmp_losetup)
432 {con.VIRT_RUN_CMD: 'truncate /srv/data.img --size 10G'},
433 {con.VIRT_RUN_CMD: 'mkfs.ext4 -F /srv/data.img'},
434 {con.VIRT_RUN_CMD: 'systemctl daemon-reload'},
435 {con.VIRT_RUN_CMD: 'systemctl enable losetup.service'},
437 virt_utils.virt_customize(virt_cmds, tmp_oc_image)
438 logging.info("Overcloud image customization complete")
439 return patched_containers
444 Creates public and private ssh keys with 1024 bit RSA encryption
445 :return: private, public key
447 key = rsa.generate_private_key(
448 backend=crypto_default_backend(),
449 public_exponent=65537,
453 private_key = key.private_bytes(
454 crypto_serialization.Encoding.PEM,
455 crypto_serialization.PrivateFormat.PKCS8,
456 crypto_serialization.NoEncryption())
457 public_key = key.public_key().public_bytes(
458 crypto_serialization.Encoding.OpenSSH,
459 crypto_serialization.PublicFormat.OpenSSH
461 return private_key.decode('utf-8'), public_key.decode('utf-8')
464 def prep_env(ds, ns, inv, opnfv_env, net_env, tmp_dir):
466 Creates modified opnfv/network environments for deployment
467 :param ds: deploy settings
468 :param ns: network settings
469 :param inv: node inventory
470 :param opnfv_env: file path for opnfv-environment file
471 :param net_env: file path for network-environment file
472 :param tmp_dir: Apex tmp dir
476 logging.info("Preparing opnfv-environment and network-environment files")
477 ds_opts = ds['deploy_options']
478 tmp_opnfv_env = os.path.join(tmp_dir, os.path.basename(opnfv_env))
479 shutil.copyfile(opnfv_env, tmp_opnfv_env)
480 tenant_nic_map = ns['networks']['tenant']['nic_mapping']
482 tenant_nic['Controller'] = tenant_nic_map['controller']['members'][0]
483 tenant_nic['NovaCompute'] = tenant_nic_map['compute']['members'][0]
484 external_nic_map = ns['networks']['external'][0]['nic_mapping']
485 external_nic = dict()
486 external_nic['NovaCompute'] = external_nic_map['compute']['members'][0]
489 private_key, public_key = make_ssh_key()
491 # Make easier/faster variables to index in the file editor
492 if 'performance' in ds_opts:
495 if 'vpp' in ds_opts['performance']['Compute']:
496 perf_vpp_comp = ds_opts['performance']['Compute']['vpp']
499 if 'vpp' in ds_opts['performance']['Controller']:
500 perf_vpp_ctrl = ds_opts['performance']['Controller']['vpp']
505 if 'ovs' in ds_opts['performance']['Compute']:
506 perf_ovs_comp = ds_opts['performance']['Compute']['ovs']
511 if 'kernel' in ds_opts['performance']['Compute']:
512 perf_kern_comp = ds_opts['performance']['Compute']['kernel']
514 perf_kern_comp = None
518 # Modify OPNFV environment
519 # TODO: Change to build a dict and outputting yaml rather than parsing
520 for line in fileinput.input(tmp_opnfv_env, inplace=True):
521 line = line.strip('\n')
523 if 'CloudDomain' in line:
524 output_line = " CloudDomain: {}".format(ns['domain_name'])
525 elif 'replace_private_key' in line:
526 output_line = " private_key: |\n"
528 for line in private_key.splitlines():
529 key_out += " {}\n".format(line)
530 output_line += key_out
531 elif 'replace_public_key' in line:
532 output_line = " public_key: '{}'".format(public_key)
533 elif ((perf and perf_kern_comp) or ds_opts.get('rt_kvm')) and \
534 'resource_registry' in line:
535 output_line = "resource_registry:\n" \
536 " OS::TripleO::NodeUserData: first-boot.yaml"
537 elif 'ComputeExtraConfigPre' in line and \
538 ds_opts['dataplane'] == 'ovs_dpdk':
539 output_line = ' OS::TripleO::ComputeExtraConfigPre: ' \
540 './ovs-dpdk-preconfig.yaml'
542 if ds_opts['sdn_controller'] == 'opendaylight' and \
543 'odl_vpp_routing_node' in ds_opts:
544 if 'opendaylight::vpp_routing_node' in line:
545 output_line = (" opendaylight::vpp_routing_node: {}.{}"
546 .format(ds_opts['odl_vpp_routing_node'],
548 elif not ds_opts['sdn_controller'] and ds_opts['dataplane'] == 'fdio':
549 if 'NeutronVPPAgentPhysnets' in line:
550 output_line = (" NeutronVPPAgentPhysnets: "
552 .format(tenant_nic['Controller']))
553 elif ds_opts['sdn_controller'] == 'opendaylight' and ds_opts.get(
555 if 'OS::TripleO::Services::NeutronDhcpAgent' in line:
557 elif 'NeutronDhcpAgentsPerNetwork' in line:
558 num_control, num_compute = inv.get_node_counts()
559 output_line = (" NeutronDhcpAgentsPerNetwork: {}"
560 .format(num_compute))
561 elif 'ComputeServices' in line:
562 output_line = (" ComputeServices:\n"
563 " - OS::TripleO::Services::NeutronDhcpAgent")
564 # SRIOV networks are VLAN based provider networks. In order to simplify
565 # the deployment, nfv_sriov will be the default physnet. VLANs are not
566 # needed in advance, and the user will have to create the network
567 # specifying the segmentation-id.
569 if 'NeutronNetworkVLANRanges' in line:
570 output_line = ("{},nfv_sriov'".format(line[:-1]))
573 for role in 'NovaCompute', 'Controller':
574 if role == 'NovaCompute':
575 perf_opts = perf_vpp_comp
577 perf_opts = perf_vpp_ctrl
578 cfg = "{}ExtraConfig".format(role)
579 if cfg in line and perf_opts:
581 if 'main-core' in perf_opts:
582 perf_line += ("\n fdio::vpp_cpu_main_core: '{}'"
583 .format(perf_opts['main-core']))
584 if 'corelist-workers' in perf_opts:
586 "fdio::vpp_cpu_corelist_workers: '{}'"
587 .format(perf_opts['corelist-workers']))
588 if ds_opts['sdn_controller'] == 'opendaylight' and \
589 ds_opts['dataplane'] == 'fdio':
590 if role == 'NovaCompute':
592 "tripleo::profile::base::neutron::"
593 "agents::honeycomb::"
594 "interface_role_mapping:"
595 " ['{}:tenant-interface',"
596 "'{}:public-interface']"
597 .format(tenant_nic[role],
601 "tripleo::profile::base::neutron::"
602 "agents::honeycomb::"
603 "interface_role_mapping:"
604 " ['{}:tenant-interface']"
605 .format(tenant_nic[role]))
607 output_line = (" {}:{}".format(cfg, perf_line))
609 if ds_opts['dataplane'] == 'ovs_dpdk' and perf_ovs_comp:
610 for k, v in OVS_PERF_MAP.items():
611 if k in line and v in perf_ovs_comp:
612 output_line = " {}: '{}'".format(k, perf_ovs_comp[v])
615 # (FIXME) use compute's kernel settings for all nodes for now.
617 if 'NovaSchedulerDefaultFilters' in line:
619 " NovaSchedulerDefaultFilters: 'RamFilter," \
620 "ComputeFilter,AvailabilityZoneFilter," \
621 "ComputeCapabilitiesFilter," \
622 "ImagePropertiesFilter,NUMATopologyFilter'"
623 elif 'ComputeKernelArgs' in line:
625 for k, v in perf_kern_comp.items():
626 kernel_args += "{}={} ".format(k, v)
628 output_line = " ComputeKernelArgs: '{}'".\
633 logging.info("opnfv-environment file written to {}".format(tmp_opnfv_env))
636 def generate_ceph_key():
638 header = struct.pack('<hiih', 1, int(time.time()), 0, len(key))
639 return base64.b64encode(header + key)
642 def prep_storage_env(ds, ns, virtual, tmp_dir):
644 Creates storage environment file for deployment. Source file is copied by
645 undercloud playbook to host.
652 ds_opts = ds['deploy_options']
653 storage_file = os.path.join(tmp_dir, 'storage-environment.yaml')
654 if not os.path.isfile(storage_file):
655 logging.error("storage-environment file is not in tmp directory: {}. "
656 "Check if file was copied from "
657 "undercloud".format(tmp_dir))
658 raise ApexDeployException("storage-environment file not copied from "
660 for line in fileinput.input(storage_file, inplace=True):
661 line = line.strip('\n')
662 if 'CephClusterFSID' in line:
663 print(" CephClusterFSID: {}".format(str(uuid.uuid4())))
664 elif 'CephMonKey' in line:
665 print(" CephMonKey: {}".format(generate_ceph_key().decode(
667 elif 'CephAdminKey' in line:
668 print(" CephAdminKey: {}".format(generate_ceph_key().decode(
670 elif 'CephClientKey' in line:
671 print(" CephClientKey: {}".format(generate_ceph_key().decode(
676 if ds_opts['containers']:
677 undercloud_admin_ip = ns['networks'][con.ADMIN_NETWORK][
678 'installer_vm']['ip']
679 ceph_version = con.CEPH_VERSION_MAP[ds_opts['os_version']]
680 docker_image = "{}:8787/ceph/daemon:tag-build-master-" \
681 "{}-centos-7".format(undercloud_admin_ip,
684 'DockerCephDaemonImage': docker_image,
686 if not ds['global_params']['ha_enabled']:
687 ceph_params['CephPoolDefaultSize'] = 1
690 ceph_params['CephAnsibleExtraConfig'] = {
691 'centos_package_dependencies': [],
692 'ceph_osd_docker_memory_limit': '1g',
693 'ceph_mds_docker_memory_limit': '1g',
695 ceph_params['CephPoolDefaultPgNum'] = 32
696 if 'ceph_device' in ds_opts and ds_opts['ceph_device']:
697 ceph_device = ds_opts['ceph_device']
699 # TODO(trozet): make this DS default after Fraser
700 ceph_device = '/dev/loop3'
702 ceph_params['CephAnsibleDisksConfig'] = {
703 'devices': [ceph_device],
705 'osd_scenario': 'collocated'
707 utils.edit_tht_env(storage_file, 'parameter_defaults', ceph_params)
708 elif 'ceph_device' in ds_opts and ds_opts['ceph_device']:
709 with open(storage_file, 'a') as fh:
710 fh.write(' ExtraConfig:\n')
711 fh.write(" ceph::profile::params::osds:{{{}:{{}}}}\n".format(
712 ds_opts['ceph_device']
716 def prep_sriov_env(ds, tmp_dir):
718 Creates SRIOV environment file for deployment. Source file is copied by
719 undercloud playbook to host.
724 ds_opts = ds['deploy_options']
725 sriov_iface = ds_opts['sriov']
726 sriov_file = os.path.join(tmp_dir, 'neutron-opendaylight-sriov.yaml')
727 if not os.path.isfile(sriov_file):
728 logging.error("sriov-environment file is not in tmp directory: {}. "
729 "Check if file was copied from "
730 "undercloud".format(tmp_dir))
731 raise ApexDeployException("sriov-environment file not copied from "
733 # TODO(rnoriega): Instead of line editing, refactor this code to load
734 # yaml file into a dict, edit it and write the file back.
735 for line in fileinput.input(sriov_file, inplace=True):
736 line = line.strip('\n')
737 if 'NovaSchedulerDefaultFilters' in line:
738 print(" {}".format(line[3:]))
739 elif 'NovaSchedulerAvailableFilters' in line:
740 print(" {}".format(line[3:]))
741 elif 'NeutronPhysicalDevMappings' in line:
742 print(" NeutronPhysicalDevMappings: \"nfv_sriov:{}\""
743 .format(sriov_iface))
744 elif 'NeutronSriovNumVFs' in line:
745 print(" NeutronSriovNumVFs: \"{}:8\"".format(sriov_iface))
746 elif 'NovaPCIPassthrough' in line:
747 print(" NovaPCIPassthrough:")
748 elif 'devname' in line:
749 print(" - devname: \"{}\"".format(sriov_iface))
750 elif 'physical_network' in line:
751 print(" physical_network: \"nfv_sriov\"")
756 def external_network_cmds(ns, ds):
758 Generates external network openstack commands
759 :param ns: network settings
760 :param ds: deploy settings
761 :return: list of commands to configure external network
763 ds_opts = ds['deploy_options']
764 external_physnet = 'datacentre'
765 if ds_opts['dataplane'] == 'fdio' and \
766 ds_opts['sdn_controller'] != 'opendaylight':
767 external_physnet = 'external'
768 if 'external' in ns.enabled_network_list:
769 net_config = ns['networks']['external'][0]
771 pool_start, pool_end = net_config['floating_ip_range']
773 net_config = ns['networks']['admin']
775 pool_start, pool_end = ns['apex']['networks']['admin'][
776 'introspection_range']
777 nic_config = net_config['nic_mapping']
778 gateway = net_config['gateway']
780 # create network command
781 if nic_config['compute']['vlan'] == 'native':
784 ext_type = "vlan --provider-segment {}".format(nic_config[
786 cmds.append("openstack network create external --project service "
787 "--external --provider-network-type {} "
788 "--provider-physical-network {}"
789 .format(ext_type, external_physnet))
790 # create subnet command
791 cidr = net_config['cidr']
792 subnet_cmd = "openstack subnet create external-subnet --project " \
793 "service --network external --no-dhcp --gateway {} " \
794 "--allocation-pool start={},end={} --subnet-range " \
795 "{}".format(gateway, pool_start, pool_end, str(cidr))
796 if external and cidr.version == 6:
797 subnet_cmd += ' --ip-version 6 --ipv6-ra-mode slaac ' \
798 '--ipv6-address-mode slaac'
799 cmds.append(subnet_cmd)
800 logging.debug("Neutron external network commands determined "
801 "as: {}".format(cmds))
805 def create_congress_cmds(overcloud_file):
806 drivers = ['nova', 'neutronv2', 'cinder', 'glancev2', 'keystone', 'doctor']
807 overcloudrc = parsers.parse_overcloudrc(overcloud_file)
808 logging.info("Creating congress commands")
811 "username={}".format(overcloudrc['OS_USERNAME']),
812 "tenant_name={}".format(overcloudrc['OS_PROJECT_NAME']),
813 "password={}".format(overcloudrc['OS_PASSWORD']),
814 "auth_url={}".format(overcloudrc['OS_AUTH_URL'])
817 logging.error("Unable to find all keys required for congress in "
818 "overcloudrc: OS_USERNAME, OS_PROJECT_NAME, "
819 "OS_PASSWORD, OS_AUTH_URL. Please check overcloudrc "
820 "file: {}".format(overcloud_file))
823 ds_cfg = '--config ' + ' --config '.join(ds_cfg)
825 for driver in drivers:
826 if driver == 'doctor':
827 cmd = "{} \"{}\"".format(driver, driver)
829 cmd = "{} \"{}\" {}".format(driver, driver, ds_cfg)
831 cmd += ' --config api_version="2.34"'
832 logging.debug("Congress command created: {}".format(cmd))