1 ##############################################################################
2 # Copyright (c) 2017 Tim Rozet (trozet@redhat.com) and others.
4 # All rights reserved. This program and the accompanying materials
5 # are made available under the terms of the Apache License, Version 2.0
6 # which accompanies this distribution, and is available at
7 # http://www.apache.org/licenses/LICENSE-2.0
8 ##############################################################################
19 import apex.builders.overcloud_builder as oc_builder
20 import apex.builders.common_builder as c_builder
22 from apex.common import constants as con
23 from apex.common.exceptions import ApexDeployException
24 from apex.common import parsers
25 from apex.common import utils
26 from apex.virtual import utils as virt_utils
27 from cryptography.hazmat.primitives import serialization as \
29 from cryptography.hazmat.primitives.asymmetric import rsa
30 from cryptography.hazmat.backends import default_backend as \
31 crypto_default_backend
36 'sfc': 'neutron-sfc-opendaylight.yaml',
37 'vpn': 'neutron-bgpvpn-opendaylight.yaml',
38 'gluon': 'gluon.yaml',
40 'odl_vpp_netvirt': 'neutron-opendaylight-netvirt-vpp.yaml',
41 'dvr': 'neutron-opendaylight-fdio-dvr.yaml',
42 'default': 'neutron-opendaylight-honeycomb.yaml'
44 'l2gw': 'neutron-l2gw-opendaylight.yaml',
45 'sriov': 'neutron-opendaylight-sriov.yaml',
46 'default': 'neutron-opendaylight.yaml',
49 'sfc': 'neutron-onos-sfc.yaml',
50 'default': 'neutron-onos.yaml'
52 'ovn': 'neutron-ml2-ovn.yaml',
54 'vpp': 'neutron-ml2-vpp.yaml',
55 'dataplane': ('ovs_dpdk', 'neutron-ovs-dpdk.yaml')
60 'tacker': 'enable_tacker.yaml',
61 'congress': 'enable_congress.yaml',
62 'barometer': 'enable_barometer.yaml',
63 'rt_kvm': 'enable_rt_kvm.yaml'
67 'HostCpusList': 'dpdk_cores',
68 'NeutronDpdkCoreList': 'pmd_cores',
69 'NeutronDpdkSocketMemory': 'socket_memory',
70 'NeutronDpdkMemoryChannels': 'memory_channels'
73 OVS_NSH_KMOD_RPM = "openvswitch-kmod-2.6.1-1.el7.centos.x86_64.rpm"
74 OVS_NSH_RPM = "openvswitch-2.6.1-1.el7.centos.x86_64.rpm"
75 ODL_NETVIRT_VPP_RPM = "/root/opendaylight-7.0.0-0.1.20170531snap665.el7" \
78 LOSETUP_SERVICE = """[Unit]
79 Description=Setup loop devices
84 ExecStart=/sbin/losetup /dev/loop3 /srv/data.img
85 ExecStop=/sbin/losetup -d /dev/loop3
90 WantedBy=multi-user.target
94 def build_sdn_env_list(ds, sdn_map, env_list=None):
96 Builds a list of SDN environment files to be used in the deploy cmd.
98 This function recursively searches an sdn_map. First the sdn controller is
99 matched and then the function looks for enabled features for that
100 controller to determine which environment files should be used. By
101 default the feature will be checked if set to true in deploy settings to be
102 added to the list. If a feature does not have a boolean value, then the
103 key and value pair to compare with are checked as a tuple (k,v).
105 :param ds: deploy settings
106 :param sdn_map: SDN map to recursively search
107 :param env_list: recursive var to hold previously found env_list
108 :return: A list of env files
112 for k, v in sdn_map.items():
113 if ds['sdn_controller'] == k or (k in ds and ds[k]):
114 if isinstance(v, dict):
115 # Append default SDN env file first
116 # The assumption is that feature-enabled SDN env files
117 # override and do not conflict with previously set default
119 if ds['sdn_controller'] == k and 'default' in v:
120 env_list.append(os.path.join(con.THT_ENV_DIR,
122 env_list.extend(build_sdn_env_list(ds, v))
123 # check if the value is not a boolean
124 elif isinstance(v, tuple):
126 env_list.append(os.path.join(con.THT_ENV_DIR, v[1]))
128 env_list.append(os.path.join(con.THT_ENV_DIR, v))
129 if len(env_list) == 0:
131 env_list.append(os.path.join(
132 con.THT_ENV_DIR, sdn_map['default']))
134 logging.warning("Unable to find default file for SDN")
139 def get_docker_sdn_file(ds_opts):
141 Returns docker env file for detected SDN
142 :param ds_opts: deploy options
143 :return: docker THT env file for an SDN
145 # FIXME(trozet): We assume right now there is only one docker SDN file
146 docker_services = con.VALID_DOCKER_SERVICES
147 tht_dir = con.THT_DOCKER_ENV_DIR[ds_opts['os_version']]
148 sdn_env_list = build_sdn_env_list(ds_opts, SDN_FILE_MAP)
149 for sdn_file in sdn_env_list:
150 sdn_base = os.path.basename(sdn_file)
151 if sdn_base in docker_services:
152 if docker_services[sdn_base] is not None:
153 return os.path.join(tht_dir,
154 docker_services[sdn_base])
156 return os.path.join(tht_dir, sdn_base)
159 def create_deploy_cmd(ds, ns, inv, tmp_dir,
160 virtual, env_file='opnfv-environment.yaml',
163 logging.info("Creating deployment command")
164 deploy_options = ['network-environment.yaml']
166 ds_opts = ds['deploy_options']
168 if ds_opts['containers']:
169 deploy_options.append(os.path.join(con.THT_ENV_DIR,
172 if ds['global_params']['ha_enabled']:
173 if ds_opts['containers']:
174 deploy_options.append(os.path.join(con.THT_ENV_DIR,
177 deploy_options.append(os.path.join(con.THT_ENV_DIR,
178 'puppet-pacemaker.yaml'))
181 deploy_options.append(env_file)
183 if ds_opts['containers']:
184 deploy_options.append('docker-images.yaml')
185 sdn_docker_file = get_docker_sdn_file(ds_opts)
187 deploy_options.append(sdn_docker_file)
188 deploy_options.append('sdn-images.yaml')
190 deploy_options += build_sdn_env_list(ds_opts, SDN_FILE_MAP)
192 for k, v in OTHER_FILE_MAP.items():
193 if k in ds_opts and ds_opts[k]:
194 if ds_opts['containers']:
195 deploy_options.append(os.path.join(con.THT_DOCKER_ENV_DIR,
196 "{}.yaml".format(k)))
198 deploy_options.append(os.path.join(con.THT_ENV_DIR, v))
200 if ds_opts['ceph'] and 'csit' not in env_file:
201 prep_storage_env(ds, ns, virtual, tmp_dir)
202 deploy_options.append(os.path.join(con.THT_ENV_DIR,
203 'storage-environment.yaml'))
205 prep_sriov_env(ds, tmp_dir)
208 deploy_options.append('virtual-environment.yaml')
210 deploy_options.append('baremetal-environment.yaml')
212 num_control, num_compute = inv.get_node_counts()
213 if num_control == 0 or num_compute == 0:
214 logging.error("Detected 0 control or compute nodes. Control nodes: "
215 "{}, compute nodes{}".format(num_control, num_compute))
216 raise ApexDeployException("Invalid number of control or computes")
217 elif num_control > 1 and not ds['global_params']['ha_enabled']:
219 if platform.machine() == 'aarch64':
220 # aarch64 deploys were not completing in the default 90 mins.
221 # Not sure if this is related to the hardware the OOO support
222 # was developed on or the virtualization support in CentOS
223 # Either way it will probably get better over time as the aarch
224 # support matures in CentOS and deploy time should be tested in
225 # the future so this multiplier can be removed.
226 con.DEPLOY_TIMEOUT *= 2
227 cmd = "openstack overcloud deploy --templates --timeout {} " \
228 .format(con.DEPLOY_TIMEOUT)
230 for option in deploy_options:
231 cmd += " -e {}".format(option)
232 cmd += " --ntp-server {}".format(ns['ntp'][0])
233 cmd += " --control-scale {}".format(num_control)
234 cmd += " --compute-scale {}".format(num_compute)
235 cmd += ' --control-flavor control --compute-flavor compute'
237 cmd += ' --networks-file network_data.yaml'
240 with open('/sys/module/kvm_intel/parameters/nested') as f:
241 nested_kvm = f.read().strip()
242 if nested_kvm != 'Y':
243 libvirt_type = 'qemu'
244 cmd += ' --libvirt-type {}'.format(libvirt_type)
245 logging.info("Deploy command set: {}".format(cmd))
247 with open(os.path.join(tmp_dir, 'deploy_command'), 'w') as fh:
252 def prep_image(ds, ns, img, tmp_dir, root_pw=None, docker_tag=None,
253 patches=None, upstream=False):
255 Locates sdn image and preps for deployment.
256 :param ds: deploy settings
257 :param ns: network settings
258 :param img: sdn image
259 :param tmp_dir: dir to store modified sdn image
260 :param root_pw: password to configure for overcloud image
261 :param docker_tag: Docker image tag for RDO version (default None)
262 :param patches: List of patches to apply to overcloud image
263 :param upstream: (boolean) Indicates if upstream deployment or not
266 # TODO(trozet): Come up with a better way to organize this logic in this
268 logging.info("Preparing image: {} for deployment".format(img))
269 if not os.path.isfile(img):
270 logging.error("Missing SDN image {}".format(img))
271 raise ApexDeployException("Missing SDN image file: {}".format(img))
273 ds_opts = ds['deploy_options']
275 sdn = ds_opts['sdn_controller']
276 patched_containers = set()
277 # we need this due to rhbz #1436021
278 # fixed in systemd-219-37.el7
280 logging.info("Neutron openvswitch-agent disabled")
283 "rm -f /etc/systemd/system/multi-user.target.wants/"
284 "neutron-openvswitch-agent.service"},
287 "rm -f /usr/lib/systemd/system/neutron-openvswitch-agent"
291 if ns.get('http_proxy', ''):
294 "echo 'http_proxy={}' >> /etc/environment".format(
297 if ns.get('https_proxy', ''):
300 "echo 'https_proxy={}' >> /etc/environment".format(
304 virt_cmds.append({con.VIRT_RUN_CMD: "chmod +x /etc/rc.d/rc.local"})
307 "echo 'sudo /opt/quagga/etc/init.d/zrpcd start' > "
308 "/opt/quagga/etc/init.d/zrpcd_start.sh"})
310 con.VIRT_RUN_CMD: "chmod +x /opt/quagga/etc/init.d/"
313 con.VIRT_RUN_CMD: "sed -i '$a /opt/quagga/etc/"
314 "init.d/zrpcd_start.sh' /etc/rc.local "})
316 con.VIRT_RUN_CMD: "sed -i '$a /opt/quagga/etc/"
317 "init.d/zrpcd_start.sh' /etc/rc.d/rc.local"})
318 logging.info("ZRPCD process started")
320 dataplane = ds_opts['dataplane']
321 if dataplane == 'ovs_dpdk' or dataplane == 'fdio':
322 logging.info("Enabling kernel modules for dpdk")
323 # file to module mapping
325 os.path.join(tmp_dir, 'vfio_pci.modules'): 'vfio_pci',
326 os.path.join(tmp_dir, 'uio_pci_generic.modules'): 'uio_pci_generic'
328 for mod_file, mod in uio_types.items():
329 with open(mod_file, 'w') as fh:
330 fh.write('#!/bin/bash\n')
331 fh.write('exec /sbin/modprobe {}'.format(mod))
335 {con.VIRT_UPLOAD: "{}:/etc/sysconfig/modules/".format(
337 {con.VIRT_RUN_CMD: "chmod 0755 /etc/sysconfig/modules/"
338 "{}".format(os.path.basename(mod_file))}
341 pw_op = "password:{}".format(root_pw)
342 virt_cmds.append({con.VIRT_PW: pw_op})
343 if ds_opts['sfc'] and dataplane == 'ovs':
345 {con.VIRT_RUN_CMD: "yum -y install "
346 "/root/ovs/rpm/rpmbuild/RPMS/x86_64/"
347 "{}".format(OVS_NSH_KMOD_RPM)},
348 {con.VIRT_RUN_CMD: "yum downgrade -y "
349 "/root/ovs/rpm/rpmbuild/RPMS/x86_64/"
350 "{}".format(OVS_NSH_RPM)}
352 if dataplane == 'fdio':
353 # Patch neutron with using OVS external interface for router
354 # and add generic linux NS interface driver
356 {con.VIRT_RUN_CMD: "cd /usr/lib/python2.7/site-packages && patch "
357 "-p1 < neutron-patch-NSDriver.patch"})
360 {con.VIRT_RUN_CMD: "yum remove -y vpp-lib"},
361 {con.VIRT_RUN_CMD: "yum install -y "
362 "/root/nosdn_vpp_rpms/*.rpm"}
365 tmp_oc_image = os.path.join(tmp_dir, 'overcloud-full.qcow2')
366 shutil.copyfile(img, tmp_oc_image)
367 logging.debug("Temporary overcloud image stored as: {}".format(
370 # TODO (trozet): remove this if block after Fraser
371 if sdn == 'opendaylight' and not upstream:
372 if ds_opts['odl_version'] != con.DEFAULT_ODL_VERSION:
374 {con.VIRT_RUN_CMD: "yum -y remove opendaylight"},
375 {con.VIRT_RUN_CMD: "rm -rf /etc/puppet/modules/opendaylight"},
376 {con.VIRT_RUN_CMD: "cd /etc/puppet/modules && tar xzf "
377 "/root/puppet-opendaylight-"
378 "{}.tar.gz".format(ds_opts['odl_version'])}
380 if ds_opts['odl_version'] == 'master':
382 {con.VIRT_RUN_CMD: "rpm -ivh --nodeps /root/{}/*".format(
383 ds_opts['odl_version'])}
387 {con.VIRT_RUN_CMD: "yum -y install /root/{}/*".format(
388 ds_opts['odl_version'])}
391 elif sdn == 'opendaylight' and 'odl_vpp_netvirt' in ds_opts \
392 and ds_opts['odl_vpp_netvirt']:
394 {con.VIRT_RUN_CMD: "yum -y remove opendaylight"},
395 {con.VIRT_RUN_CMD: "yum -y install /root/{}/*".format(
396 ODL_NETVIRT_VPP_RPM)}
398 elif sdn == 'opendaylight':
399 undercloud_admin_ip = ns['networks'][con.ADMIN_NETWORK][
400 'installer_vm']['ip']
401 oc_builder.inject_opendaylight(
402 odl_version=ds_opts['odl_version'],
405 uc_ip=undercloud_admin_ip,
406 os_version=ds_opts['os_version'],
407 docker_tag=docker_tag,
410 patched_containers = patched_containers.union({'opendaylight'})
413 if ds_opts['os_version'] == 'master':
414 branch = ds_opts['os_version']
416 branch = "stable/{}".format(ds_opts['os_version'])
417 logging.info('Adding patches to overcloud')
418 patched_containers = patched_containers.union(
419 c_builder.add_upstream_patches(patches,
420 tmp_oc_image, tmp_dir,
422 uc_ip=undercloud_admin_ip,
423 docker_tag=docker_tag))
424 # if containers with ceph, and no ceph device we need to use a
425 # persistent loop device for Ceph OSDs
426 if docker_tag and ds_opts['ceph_device'] == '/dev/loop3':
427 tmp_losetup = os.path.join(tmp_dir, 'losetup.service')
428 with open(tmp_losetup, 'w') as fh:
429 fh.write(LOSETUP_SERVICE)
431 {con.VIRT_UPLOAD: "{}:/usr/lib/systemd/system/".format(tmp_losetup)
433 {con.VIRT_RUN_CMD: 'truncate /srv/data.img --size 10G'},
434 {con.VIRT_RUN_CMD: 'systemctl daemon-reload'},
435 {con.VIRT_RUN_CMD: 'systemctl enable losetup.service'},
437 virt_utils.virt_customize(virt_cmds, tmp_oc_image)
438 logging.info("Overcloud image customization complete")
439 return patched_containers
444 Creates public and private ssh keys with 1024 bit RSA encryption
445 :return: private, public key
447 key = rsa.generate_private_key(
448 backend=crypto_default_backend(),
449 public_exponent=65537,
453 private_key = key.private_bytes(
454 crypto_serialization.Encoding.PEM,
455 crypto_serialization.PrivateFormat.PKCS8,
456 crypto_serialization.NoEncryption())
457 public_key = key.public_key().public_bytes(
458 crypto_serialization.Encoding.OpenSSH,
459 crypto_serialization.PublicFormat.OpenSSH
461 return private_key.decode('utf-8'), public_key.decode('utf-8')
464 def prep_env(ds, ns, inv, opnfv_env, net_env, tmp_dir):
466 Creates modified opnfv/network environments for deployment
467 :param ds: deploy settings
468 :param ns: network settings
469 :param inv: node inventory
470 :param opnfv_env: file path for opnfv-environment file
471 :param net_env: file path for network-environment file
472 :param tmp_dir: Apex tmp dir
476 logging.info("Preparing opnfv-environment and network-environment files")
477 ds_opts = ds['deploy_options']
478 tmp_opnfv_env = os.path.join(tmp_dir, os.path.basename(opnfv_env))
479 shutil.copyfile(opnfv_env, tmp_opnfv_env)
480 tenant_nic_map = ns['networks']['tenant']['nic_mapping']
482 tenant_nic['Controller'] = tenant_nic_map['controller']['members'][0]
483 tenant_nic['NovaCompute'] = tenant_nic_map['compute']['members'][0]
484 external_nic_map = ns['networks']['external'][0]['nic_mapping']
485 external_nic = dict()
486 external_nic['NovaCompute'] = external_nic_map['compute']['members'][0]
489 private_key, public_key = make_ssh_key()
491 # Make easier/faster variables to index in the file editor
492 if 'performance' in ds_opts:
495 if 'vpp' in ds_opts['performance']['Compute']:
496 perf_vpp_comp = ds_opts['performance']['Compute']['vpp']
499 if 'vpp' in ds_opts['performance']['Controller']:
500 perf_vpp_ctrl = ds_opts['performance']['Controller']['vpp']
505 if 'ovs' in ds_opts['performance']['Compute']:
506 perf_ovs_comp = ds_opts['performance']['Compute']['ovs']
511 if 'kernel' in ds_opts['performance']['Compute']:
512 perf_kern_comp = ds_opts['performance']['Compute']['kernel']
514 perf_kern_comp = None
518 # Modify OPNFV environment
519 # TODO: Change to build a dict and outputting yaml rather than parsing
520 for line in fileinput.input(tmp_opnfv_env, inplace=True):
521 line = line.strip('\n')
523 if 'CloudDomain' in line:
524 output_line = " CloudDomain: {}".format(ns['domain_name'])
525 elif 'replace_private_key' in line:
526 output_line = " private_key: |\n"
528 for line in private_key.splitlines():
529 key_out += " {}\n".format(line)
530 output_line += key_out
531 elif 'replace_public_key' in line:
532 output_line = " public_key: '{}'".format(public_key)
533 elif ((perf and perf_kern_comp) or ds_opts.get('rt_kvm')) and \
534 'resource_registry' in line:
535 output_line = "resource_registry:\n" \
536 " OS::TripleO::NodeUserData: first-boot.yaml"
537 elif 'ComputeExtraConfigPre' in line and \
538 ds_opts['dataplane'] == 'ovs_dpdk':
539 output_line = ' OS::TripleO::ComputeExtraConfigPre: ' \
540 './ovs-dpdk-preconfig.yaml'
542 if ds_opts['sdn_controller'] == 'opendaylight' and \
543 'odl_vpp_routing_node' in ds_opts:
544 if 'opendaylight::vpp_routing_node' in line:
545 output_line = (" opendaylight::vpp_routing_node: {}.{}"
546 .format(ds_opts['odl_vpp_routing_node'],
548 elif not ds_opts['sdn_controller'] and ds_opts['dataplane'] == 'fdio':
549 if 'NeutronVPPAgentPhysnets' in line:
550 # VPP interface tap0 will be used for external network
552 output_line = (" NeutronVPPAgentPhysnets: "
553 "'datacentre:{},external:tap0'"
554 .format(tenant_nic['Controller']))
555 elif ds_opts['sdn_controller'] == 'opendaylight' and ds_opts.get(
557 if 'OS::TripleO::Services::NeutronDhcpAgent' in line:
559 elif 'NeutronDhcpAgentsPerNetwork' in line:
560 num_control, num_compute = inv.get_node_counts()
561 output_line = (" NeutronDhcpAgentsPerNetwork: {}"
562 .format(num_compute))
563 elif 'ComputeServices' in line:
564 output_line = (" ComputeServices:\n"
565 " - OS::TripleO::Services::NeutronDhcpAgent")
566 # SRIOV networks are VLAN based provider networks. In order to simplify
567 # the deployment, nfv_sriov will be the default physnet. VLANs are not
568 # needed in advance, and the user will have to create the network
569 # specifying the segmentation-id.
571 if 'NeutronNetworkVLANRanges' in line:
572 output_line = ("{},nfv_sriov'".format(line[:-1]))
575 for role in 'NovaCompute', 'Controller':
576 if role == 'NovaCompute':
577 perf_opts = perf_vpp_comp
579 perf_opts = perf_vpp_ctrl
580 cfg = "{}ExtraConfig".format(role)
581 if cfg in line and perf_opts:
583 if 'main-core' in perf_opts:
584 perf_line += ("\n fdio::vpp_cpu_main_core: '{}'"
585 .format(perf_opts['main-core']))
586 if 'corelist-workers' in perf_opts:
588 "fdio::vpp_cpu_corelist_workers: '{}'"
589 .format(perf_opts['corelist-workers']))
590 if ds_opts['sdn_controller'] == 'opendaylight' and \
591 ds_opts['dataplane'] == 'fdio':
592 if role == 'NovaCompute':
594 "tripleo::profile::base::neutron::"
595 "agents::honeycomb::"
596 "interface_role_mapping:"
597 " ['{}:tenant-interface',"
598 "'{}:public-interface']"
599 .format(tenant_nic[role],
603 "tripleo::profile::base::neutron::"
604 "agents::honeycomb::"
605 "interface_role_mapping:"
606 " ['{}:tenant-interface']"
607 .format(tenant_nic[role]))
609 output_line = (" {}:{}".format(cfg, perf_line))
611 if ds_opts['dataplane'] == 'ovs_dpdk' and perf_ovs_comp:
612 for k, v in OVS_PERF_MAP.items():
613 if k in line and v in perf_ovs_comp:
614 output_line = " {}: '{}'".format(k, perf_ovs_comp[v])
617 # (FIXME) use compute's kernel settings for all nodes for now.
619 if 'NovaSchedulerDefaultFilters' in line:
621 " NovaSchedulerDefaultFilters: 'RamFilter," \
622 "ComputeFilter,AvailabilityZoneFilter," \
623 "ComputeCapabilitiesFilter," \
624 "ImagePropertiesFilter,NUMATopologyFilter'"
625 elif 'ComputeKernelArgs' in line:
627 for k, v in perf_kern_comp.items():
628 kernel_args += "{}={} ".format(k, v)
630 output_line = " ComputeKernelArgs: '{}'".\
635 logging.info("opnfv-environment file written to {}".format(tmp_opnfv_env))
638 def generate_ceph_key():
640 header = struct.pack('<hiih', 1, int(time.time()), 0, len(key))
641 return base64.b64encode(header + key)
644 def prep_storage_env(ds, ns, virtual, tmp_dir):
646 Creates storage environment file for deployment. Source file is copied by
647 undercloud playbook to host.
654 ds_opts = ds['deploy_options']
655 storage_file = os.path.join(tmp_dir, 'storage-environment.yaml')
656 if not os.path.isfile(storage_file):
657 logging.error("storage-environment file is not in tmp directory: {}. "
658 "Check if file was copied from "
659 "undercloud".format(tmp_dir))
660 raise ApexDeployException("storage-environment file not copied from "
662 for line in fileinput.input(storage_file, inplace=True):
663 line = line.strip('\n')
664 if 'CephClusterFSID' in line:
665 print(" CephClusterFSID: {}".format(str(uuid.uuid4())))
666 elif 'CephMonKey' in line:
667 print(" CephMonKey: {}".format(generate_ceph_key().decode(
669 elif 'CephAdminKey' in line:
670 print(" CephAdminKey: {}".format(generate_ceph_key().decode(
672 elif 'CephClientKey' in line:
673 print(" CephClientKey: {}".format(generate_ceph_key().decode(
678 if ds_opts['containers']:
679 undercloud_admin_ip = ns['networks'][con.ADMIN_NETWORK][
680 'installer_vm']['ip']
681 ceph_version = con.CEPH_VERSION_MAP[ds_opts['os_version']]
682 docker_image = "{}:8787/ceph/daemon:tag-build-master-" \
683 "{}-centos-7".format(undercloud_admin_ip,
686 'DockerCephDaemonImage': docker_image,
689 # max pgs allowed are calculated as num_mons * 200. Therefore we
690 # set number of pgs and pools so that the total will be less:
691 # num_pgs * num_pools * num_osds
692 ceph_params['CephPoolDefaultSize'] = 2
693 ceph_params['CephPoolDefaultPgNum'] = 32
695 ceph_params['CephAnsibleExtraConfig'] = {
696 'centos_package_dependencies': [],
697 'ceph_osd_docker_memory_limit': '1g',
698 'ceph_mds_docker_memory_limit': '1g',
700 ceph_device = ds_opts['ceph_device']
701 ceph_params['CephAnsibleDisksConfig'] = {
702 'devices': [ceph_device],
704 'osd_scenario': 'collocated'
706 utils.edit_tht_env(storage_file, 'parameter_defaults', ceph_params)
707 # TODO(trozet): remove following block as we only support containers now
708 elif 'ceph_device' in ds_opts and ds_opts['ceph_device']:
709 with open(storage_file, 'a') as fh:
710 fh.write(' ExtraConfig:\n')
711 fh.write(" ceph::profile::params::osds:{{{}:{{}}}}\n".format(
712 ds_opts['ceph_device']
716 def prep_sriov_env(ds, tmp_dir):
718 Creates SRIOV environment file for deployment. Source file is copied by
719 undercloud playbook to host.
724 ds_opts = ds['deploy_options']
725 sriov_iface = ds_opts['sriov']
726 sriov_file = os.path.join(tmp_dir, 'neutron-opendaylight-sriov.yaml')
727 if not os.path.isfile(sriov_file):
728 logging.error("sriov-environment file is not in tmp directory: {}. "
729 "Check if file was copied from "
730 "undercloud".format(tmp_dir))
731 raise ApexDeployException("sriov-environment file not copied from "
733 # TODO(rnoriega): Instead of line editing, refactor this code to load
734 # yaml file into a dict, edit it and write the file back.
735 for line in fileinput.input(sriov_file, inplace=True):
736 line = line.strip('\n')
737 if 'NovaSchedulerDefaultFilters' in line:
738 print(" {}".format(line[3:]))
739 elif 'NovaSchedulerAvailableFilters' in line:
740 print(" {}".format(line[3:]))
741 elif 'NeutronPhysicalDevMappings' in line:
742 print(" NeutronPhysicalDevMappings: \"nfv_sriov:{}\""
743 .format(sriov_iface))
744 elif 'NeutronSriovNumVFs' in line:
745 print(" NeutronSriovNumVFs: \"{}:8\"".format(sriov_iface))
746 elif 'NovaPCIPassthrough' in line:
747 print(" NovaPCIPassthrough:")
748 elif 'devname' in line:
749 print(" - devname: \"{}\"".format(sriov_iface))
750 elif 'physical_network' in line:
751 print(" physical_network: \"nfv_sriov\"")
756 def external_network_cmds(ns, ds):
758 Generates external network openstack commands
759 :param ns: network settings
760 :param ds: deploy settings
761 :return: list of commands to configure external network
763 ds_opts = ds['deploy_options']
764 external_physnet = 'datacentre'
765 if ds_opts['dataplane'] == 'fdio' and \
766 ds_opts['sdn_controller'] != 'opendaylight':
767 external_physnet = 'external'
768 if 'external' in ns.enabled_network_list:
769 net_config = ns['networks']['external'][0]
771 pool_start, pool_end = net_config['floating_ip_range']
773 net_config = ns['networks']['admin']
775 pool_start, pool_end = ns['apex']['networks']['admin'][
776 'introspection_range']
777 nic_config = net_config['nic_mapping']
778 gateway = net_config['gateway']
780 # create network command
781 if nic_config['compute']['vlan'] == 'native':
784 ext_type = "vlan --provider-segment {}".format(nic_config[
786 cmds.append("openstack network create external --project service "
787 "--external --provider-network-type {} "
788 "--provider-physical-network {}"
789 .format(ext_type, external_physnet))
790 # create subnet command
791 cidr = net_config['cidr']
792 subnet_cmd = "openstack subnet create external-subnet --project " \
793 "service --network external --no-dhcp --gateway {} " \
794 "--allocation-pool start={},end={} --subnet-range " \
795 "{}".format(gateway, pool_start, pool_end, str(cidr))
796 if external and cidr.version == 6:
797 subnet_cmd += ' --ip-version 6 --ipv6-ra-mode slaac ' \
798 '--ipv6-address-mode slaac'
799 cmds.append(subnet_cmd)
800 logging.debug("Neutron external network commands determined "
801 "as: {}".format(cmds))
805 def create_congress_cmds(overcloud_file):
806 drivers = ['nova', 'neutronv2', 'cinder', 'glancev2', 'keystone', 'doctor']
807 overcloudrc = parsers.parse_overcloudrc(overcloud_file)
808 logging.info("Creating congress commands")
811 "username={}".format(overcloudrc['OS_USERNAME']),
812 "tenant_name={}".format(overcloudrc['OS_PROJECT_NAME']),
813 "password={}".format(overcloudrc['OS_PASSWORD']),
814 "auth_url={}".format(overcloudrc['OS_AUTH_URL'])
817 logging.error("Unable to find all keys required for congress in "
818 "overcloudrc: OS_USERNAME, OS_PROJECT_NAME, "
819 "OS_PASSWORD, OS_AUTH_URL. Please check overcloudrc "
820 "file: {}".format(overcloud_file))
823 ds_cfg = '--config ' + ' --config '.join(ds_cfg)
825 for driver in drivers:
826 if driver == 'doctor':
827 cmd = "{} \"{}\"".format(driver, driver)
829 cmd = "{} \"{}\" {}".format(driver, driver, ds_cfg)
831 cmd += ' --config api_version="2.34"'
832 logging.debug("Congress command created: {}".format(cmd))