1 ##############################################################################
2 # Copyright (c) 2017 Tim Rozet (trozet@redhat.com) and others.
4 # All rights reserved. This program and the accompanying materials
5 # are made available under the terms of the Apache License, Version 2.0
6 # which accompanies this distribution, and is available at
7 # http://www.apache.org/licenses/LICENSE-2.0
8 ##############################################################################
19 import apex.builders.overcloud_builder as oc_builder
20 import apex.builders.common_builder as c_builder
22 from apex.common import constants as con
23 from apex.common.exceptions import ApexDeployException
24 from apex.common import parsers
25 from apex.common import utils
26 from apex.virtual import utils as virt_utils
27 from cryptography.hazmat.primitives import serialization as \
29 from cryptography.hazmat.primitives.asymmetric import rsa
30 from cryptography.hazmat.backends import default_backend as \
31 crypto_default_backend
36 'sfc': 'neutron-sfc-opendaylight.yaml',
37 'vpn': 'neutron-bgpvpn-opendaylight.yaml',
38 'gluon': 'gluon.yaml',
40 'odl_vpp_netvirt': 'neutron-opendaylight-netvirt-vpp.yaml',
41 'dvr': 'neutron-opendaylight-fdio-dvr.yaml',
42 'default': 'neutron-opendaylight-honeycomb.yaml'
44 'l2gw': 'neutron-l2gw-opendaylight.yaml',
45 'sriov': 'neutron-opendaylight-sriov.yaml',
46 'default': 'neutron-opendaylight.yaml',
49 'sfc': 'neutron-onos-sfc.yaml',
50 'default': 'neutron-onos.yaml'
52 'ovn': 'neutron-ml2-ovn.yaml',
54 'vpp': 'neutron-ml2-vpp.yaml',
55 'dataplane': ('ovs_dpdk', 'neutron-ovs-dpdk.yaml')
60 'tacker': 'enable_tacker.yaml',
61 'congress': 'enable_congress.yaml',
62 'barometer': 'enable_barometer.yaml',
63 'rt_kvm': 'enable_rt_kvm.yaml'
67 'HostCpusList': 'dpdk_cores',
68 'NeutronDpdkCoreList': 'pmd_cores',
69 'NeutronDpdkSocketMemory': 'socket_memory',
70 'NeutronDpdkMemoryChannels': 'memory_channels'
73 OVS_NSH_KMOD_RPM = "openvswitch-kmod-2.6.1-1.el7.centos.x86_64.rpm"
74 OVS_NSH_RPM = "openvswitch-2.6.1-1.el7.centos.x86_64.rpm"
75 ODL_NETVIRT_VPP_RPM = "/root/opendaylight-7.0.0-0.1.20170531snap665.el7" \
78 LOSETUP_SERVICE = """[Unit]
79 Description=Setup loop devices
84 ExecStart=/sbin/losetup /dev/loop3 /srv/data.img
85 ExecStop=/sbin/losetup -d /dev/loop3
90 WantedBy=multi-user.target
94 def build_sdn_env_list(ds, sdn_map, env_list=None):
96 Builds a list of SDN environment files to be used in the deploy cmd.
98 This function recursively searches an sdn_map. First the sdn controller is
99 matched and then the function looks for enabled features for that
100 controller to determine which environment files should be used. By
101 default the feature will be checked if set to true in deploy settings to be
102 added to the list. If a feature does not have a boolean value, then the
103 key and value pair to compare with are checked as a tuple (k,v).
105 :param ds: deploy settings
106 :param sdn_map: SDN map to recursively search
107 :param env_list: recursive var to hold previously found env_list
108 :return: A list of env files
112 for k, v in sdn_map.items():
113 if ds['sdn_controller'] == k or (k in ds and ds[k]):
114 if isinstance(v, dict):
115 # Append default SDN env file first
116 # The assumption is that feature-enabled SDN env files
117 # override and do not conflict with previously set default
119 if ds['sdn_controller'] == k and 'default' in v:
120 env_list.append(os.path.join(con.THT_ENV_DIR,
122 env_list.extend(build_sdn_env_list(ds, v))
123 # check if the value is not a boolean
124 elif isinstance(v, tuple):
126 env_list.append(os.path.join(con.THT_ENV_DIR, v[1]))
128 env_list.append(os.path.join(con.THT_ENV_DIR, v))
129 if len(env_list) == 0:
131 env_list.append(os.path.join(
132 con.THT_ENV_DIR, sdn_map['default']))
134 logging.warning("Unable to find default file for SDN")
139 def get_docker_sdn_file(ds_opts):
141 Returns docker env file for detected SDN
142 :param ds_opts: deploy options
143 :return: docker THT env file for an SDN
145 # FIXME(trozet): We assume right now there is only one docker SDN file
146 docker_services = con.VALID_DOCKER_SERVICES
147 sdn_env_list = build_sdn_env_list(ds_opts, SDN_FILE_MAP)
148 for sdn_file in sdn_env_list:
149 sdn_base = os.path.basename(sdn_file)
150 if sdn_base in docker_services:
151 if docker_services[sdn_base] is not None:
152 return os.path.join(con.THT_DOCKER_ENV_DIR,
153 docker_services[sdn_base])
155 return os.path.join(con.THT_DOCKER_ENV_DIR, sdn_base)
158 def create_deploy_cmd(ds, ns, inv, tmp_dir,
159 virtual, env_file='opnfv-environment.yaml',
162 logging.info("Creating deployment command")
163 deploy_options = ['network-environment.yaml']
165 ds_opts = ds['deploy_options']
167 if ds_opts['containers']:
168 deploy_options.append(os.path.join(con.THT_ENV_DIR,
171 if ds['global_params']['ha_enabled']:
172 if ds_opts['containers']:
173 deploy_options.append(os.path.join(con.THT_ENV_DIR,
176 deploy_options.append(os.path.join(con.THT_ENV_DIR,
177 'puppet-pacemaker.yaml'))
180 deploy_options.append(env_file)
182 if ds_opts['containers']:
183 deploy_options.append('docker-images.yaml')
184 sdn_docker_file = get_docker_sdn_file(ds_opts)
186 deploy_options.append(sdn_docker_file)
187 deploy_options.append('sdn-images.yaml')
189 deploy_options += build_sdn_env_list(ds_opts, SDN_FILE_MAP)
191 for k, v in OTHER_FILE_MAP.items():
192 if k in ds_opts and ds_opts[k]:
193 if ds_opts['containers']:
194 deploy_options.append(os.path.join(con.THT_DOCKER_ENV_DIR,
195 "{}.yaml".format(k)))
197 deploy_options.append(os.path.join(con.THT_ENV_DIR, v))
200 prep_storage_env(ds, ns, virtual, tmp_dir)
201 deploy_options.append(os.path.join(con.THT_ENV_DIR,
202 'storage-environment.yaml'))
204 prep_sriov_env(ds, tmp_dir)
207 deploy_options.append('virtual-environment.yaml')
209 deploy_options.append('baremetal-environment.yaml')
211 num_control, num_compute = inv.get_node_counts()
212 if num_control == 0 or num_compute == 0:
213 logging.error("Detected 0 control or compute nodes. Control nodes: "
214 "{}, compute nodes{}".format(num_control, num_compute))
215 raise ApexDeployException("Invalid number of control or computes")
216 elif num_control > 1 and not ds['global_params']['ha_enabled']:
218 if platform.machine() == 'aarch64':
219 # aarch64 deploys were not completing in the default 90 mins.
220 # Not sure if this is related to the hardware the OOO support
221 # was developed on or the virtualization support in CentOS
222 # Either way it will probably get better over time as the aarch
223 # support matures in CentOS and deploy time should be tested in
224 # the future so this multiplier can be removed.
225 con.DEPLOY_TIMEOUT *= 2
226 cmd = "openstack overcloud deploy --templates --timeout {} " \
227 .format(con.DEPLOY_TIMEOUT)
229 for option in deploy_options:
230 cmd += " -e {}".format(option)
231 cmd += " --ntp-server {}".format(ns['ntp'][0])
232 cmd += " --control-scale {}".format(num_control)
233 cmd += " --compute-scale {}".format(num_compute)
234 cmd += ' --control-flavor control --compute-flavor compute'
236 cmd += ' --networks-file network_data.yaml'
239 with open('/sys/module/kvm_intel/parameters/nested') as f:
240 nested_kvm = f.read().strip()
241 if nested_kvm != 'Y':
242 libvirt_type = 'qemu'
243 cmd += ' --libvirt-type {}'.format(libvirt_type)
244 logging.info("Deploy command set: {}".format(cmd))
246 with open(os.path.join(tmp_dir, 'deploy_command'), 'w') as fh:
251 def prep_image(ds, ns, img, tmp_dir, root_pw=None, docker_tag=None,
252 patches=None, upstream=False):
254 Locates sdn image and preps for deployment.
255 :param ds: deploy settings
256 :param ns: network settings
257 :param img: sdn image
258 :param tmp_dir: dir to store modified sdn image
259 :param root_pw: password to configure for overcloud image
260 :param docker_tag: Docker image tag for RDO version (default None)
261 :param patches: List of patches to apply to overcloud image
262 :param upstream: (boolean) Indicates if upstream deployment or not
265 # TODO(trozet): Come up with a better way to organize this logic in this
267 logging.info("Preparing image: {} for deployment".format(img))
268 if not os.path.isfile(img):
269 logging.error("Missing SDN image {}".format(img))
270 raise ApexDeployException("Missing SDN image file: {}".format(img))
272 ds_opts = ds['deploy_options']
274 sdn = ds_opts['sdn_controller']
275 patched_containers = set()
276 # we need this due to rhbz #1436021
277 # fixed in systemd-219-37.el7
279 logging.info("Neutron openvswitch-agent disabled")
282 "rm -f /etc/systemd/system/multi-user.target.wants/"
283 "neutron-openvswitch-agent.service"},
286 "rm -f /usr/lib/systemd/system/neutron-openvswitch-agent"
290 if ns.get('http_proxy', ''):
293 "echo 'http_proxy={}' >> /etc/environment".format(
296 if ns.get('https_proxy', ''):
299 "echo 'https_proxy={}' >> /etc/environment".format(
303 virt_cmds.append({con.VIRT_RUN_CMD: "chmod +x /etc/rc.d/rc.local"})
306 "echo 'sudo /opt/quagga/etc/init.d/zrpcd start' > "
307 "/opt/quagga/etc/init.d/zrpcd_start.sh"})
309 con.VIRT_RUN_CMD: "chmod +x /opt/quagga/etc/init.d/"
312 con.VIRT_RUN_CMD: "sed -i '$a /opt/quagga/etc/"
313 "init.d/zrpcd_start.sh' /etc/rc.local "})
315 con.VIRT_RUN_CMD: "sed -i '$a /opt/quagga/etc/"
316 "init.d/zrpcd_start.sh' /etc/rc.d/rc.local"})
317 logging.info("ZRPCD process started")
319 dataplane = ds_opts['dataplane']
320 if dataplane == 'ovs_dpdk' or dataplane == 'fdio':
321 logging.info("Enabling kernel modules for dpdk")
322 # file to module mapping
324 os.path.join(tmp_dir, 'vfio_pci.modules'): 'vfio_pci',
325 os.path.join(tmp_dir, 'uio_pci_generic.modules'): 'uio_pci_generic'
327 for mod_file, mod in uio_types.items():
328 with open(mod_file, 'w') as fh:
329 fh.write('#!/bin/bash\n')
330 fh.write('exec /sbin/modprobe {}'.format(mod))
334 {con.VIRT_UPLOAD: "{}:/etc/sysconfig/modules/".format(
336 {con.VIRT_RUN_CMD: "chmod 0755 /etc/sysconfig/modules/"
337 "{}".format(os.path.basename(mod_file))}
340 pw_op = "password:{}".format(root_pw)
341 virt_cmds.append({con.VIRT_PW: pw_op})
342 if ds_opts['sfc'] and dataplane == 'ovs':
344 {con.VIRT_RUN_CMD: "yum -y install "
345 "/root/ovs/rpm/rpmbuild/RPMS/x86_64/"
346 "{}".format(OVS_NSH_KMOD_RPM)},
347 {con.VIRT_RUN_CMD: "yum downgrade -y "
348 "/root/ovs/rpm/rpmbuild/RPMS/x86_64/"
349 "{}".format(OVS_NSH_RPM)}
351 if dataplane == 'fdio':
352 # Patch neutron with using OVS external interface for router
353 # and add generic linux NS interface driver
355 {con.VIRT_RUN_CMD: "cd /usr/lib/python2.7/site-packages && patch "
356 "-p1 < neutron-patch-NSDriver.patch"})
359 {con.VIRT_RUN_CMD: "yum remove -y vpp-lib"},
360 {con.VIRT_RUN_CMD: "yum install -y "
361 "/root/nosdn_vpp_rpms/*.rpm"}
364 tmp_oc_image = os.path.join(tmp_dir, 'overcloud-full.qcow2')
365 shutil.copyfile(img, tmp_oc_image)
366 logging.debug("Temporary overcloud image stored as: {}".format(
369 # TODO (trozet): remove this if block after Fraser
370 if sdn == 'opendaylight' and not upstream:
371 if ds_opts['odl_version'] != con.DEFAULT_ODL_VERSION:
373 {con.VIRT_RUN_CMD: "yum -y remove opendaylight"},
374 {con.VIRT_RUN_CMD: "rm -rf /etc/puppet/modules/opendaylight"},
375 {con.VIRT_RUN_CMD: "cd /etc/puppet/modules && tar xzf "
376 "/root/puppet-opendaylight-"
377 "{}.tar.gz".format(ds_opts['odl_version'])}
379 if ds_opts['odl_version'] == 'master':
381 {con.VIRT_RUN_CMD: "rpm -ivh --nodeps /root/{}/*".format(
382 ds_opts['odl_version'])}
386 {con.VIRT_RUN_CMD: "yum -y install /root/{}/*".format(
387 ds_opts['odl_version'])}
390 elif sdn == 'opendaylight' and 'odl_vpp_netvirt' in ds_opts \
391 and ds_opts['odl_vpp_netvirt']:
393 {con.VIRT_RUN_CMD: "yum -y remove opendaylight"},
394 {con.VIRT_RUN_CMD: "yum -y install /root/{}/*".format(
395 ODL_NETVIRT_VPP_RPM)}
397 elif sdn == 'opendaylight':
398 undercloud_admin_ip = ns['networks'][con.ADMIN_NETWORK][
399 'installer_vm']['ip']
400 oc_builder.inject_opendaylight(
401 odl_version=ds_opts['odl_version'],
404 uc_ip=undercloud_admin_ip,
405 os_version=ds_opts['os_version'],
406 docker_tag=docker_tag,
409 patched_containers = patched_containers.union({'opendaylight'})
413 {con.VIRT_RUN_CMD: "cd /root/ovs28 && yum update -y "
415 {con.VIRT_RUN_CMD: "cd /root/ovs28 && yum downgrade -y "
420 if ds_opts['os_version'] == 'master':
421 branch = ds_opts['os_version']
423 branch = "stable/{}".format(ds_opts['os_version'])
424 logging.info('Adding patches to overcloud')
425 patched_containers = patched_containers.union(
426 c_builder.add_upstream_patches(patches,
427 tmp_oc_image, tmp_dir,
429 uc_ip=undercloud_admin_ip,
430 docker_tag=docker_tag))
431 # if containers with ceph, and no ceph device we need to use a
432 # persistent loop device for Ceph OSDs
433 if docker_tag and not ds_opts.get('ceph_device', None):
434 tmp_losetup = os.path.join(tmp_dir, 'losetup.service')
435 with open(tmp_losetup, 'w') as fh:
436 fh.write(LOSETUP_SERVICE)
438 {con.VIRT_UPLOAD: "{}:/usr/lib/systemd/system/".format(tmp_losetup)
440 {con.VIRT_RUN_CMD: 'truncate /srv/data.img --size 10G'},
441 {con.VIRT_RUN_CMD: 'mkfs.ext4 -F /srv/data.img'},
442 {con.VIRT_RUN_CMD: 'systemctl daemon-reload'},
443 {con.VIRT_RUN_CMD: 'systemctl enable losetup.service'},
445 virt_utils.virt_customize(virt_cmds, tmp_oc_image)
446 logging.info("Overcloud image customization complete")
447 return patched_containers
452 Creates public and private ssh keys with 1024 bit RSA encryption
453 :return: private, public key
455 key = rsa.generate_private_key(
456 backend=crypto_default_backend(),
457 public_exponent=65537,
461 private_key = key.private_bytes(
462 crypto_serialization.Encoding.PEM,
463 crypto_serialization.PrivateFormat.PKCS8,
464 crypto_serialization.NoEncryption())
465 public_key = key.public_key().public_bytes(
466 crypto_serialization.Encoding.OpenSSH,
467 crypto_serialization.PublicFormat.OpenSSH
469 return private_key.decode('utf-8'), public_key.decode('utf-8')
472 def prep_env(ds, ns, inv, opnfv_env, net_env, tmp_dir):
474 Creates modified opnfv/network environments for deployment
475 :param ds: deploy settings
476 :param ns: network settings
477 :param inv: node inventory
478 :param opnfv_env: file path for opnfv-environment file
479 :param net_env: file path for network-environment file
480 :param tmp_dir: Apex tmp dir
484 logging.info("Preparing opnfv-environment and network-environment files")
485 ds_opts = ds['deploy_options']
486 tmp_opnfv_env = os.path.join(tmp_dir, os.path.basename(opnfv_env))
487 shutil.copyfile(opnfv_env, tmp_opnfv_env)
488 tenant_nic_map = ns['networks']['tenant']['nic_mapping']
490 tenant_nic['Controller'] = tenant_nic_map['controller']['members'][0]
491 tenant_nic['NovaCompute'] = tenant_nic_map['compute']['members'][0]
492 external_nic_map = ns['networks']['external'][0]['nic_mapping']
493 external_nic = dict()
494 external_nic['NovaCompute'] = external_nic_map['compute']['members'][0]
497 private_key, public_key = make_ssh_key()
499 # Make easier/faster variables to index in the file editor
500 if 'performance' in ds_opts:
503 if 'vpp' in ds_opts['performance']['Compute']:
504 perf_vpp_comp = ds_opts['performance']['Compute']['vpp']
507 if 'vpp' in ds_opts['performance']['Controller']:
508 perf_vpp_ctrl = ds_opts['performance']['Controller']['vpp']
513 if 'ovs' in ds_opts['performance']['Compute']:
514 perf_ovs_comp = ds_opts['performance']['Compute']['ovs']
519 if 'kernel' in ds_opts['performance']['Compute']:
520 perf_kern_comp = ds_opts['performance']['Compute']['kernel']
522 perf_kern_comp = None
526 # Modify OPNFV environment
527 # TODO: Change to build a dict and outputting yaml rather than parsing
528 for line in fileinput.input(tmp_opnfv_env, inplace=True):
529 line = line.strip('\n')
531 if 'CloudDomain' in line:
532 output_line = " CloudDomain: {}".format(ns['domain_name'])
533 elif 'replace_private_key' in line:
534 output_line = " private_key: |\n"
536 for line in private_key.splitlines():
537 key_out += " {}\n".format(line)
538 output_line += key_out
539 elif 'replace_public_key' in line:
540 output_line = " public_key: '{}'".format(public_key)
541 elif ((perf and perf_kern_comp) or ds_opts.get('rt_kvm')) and \
542 'resource_registry' in line:
543 output_line = "resource_registry:\n" \
544 " OS::TripleO::NodeUserData: first-boot.yaml"
545 elif 'ComputeExtraConfigPre' in line and \
546 ds_opts['dataplane'] == 'ovs_dpdk':
547 output_line = ' OS::TripleO::ComputeExtraConfigPre: ' \
548 './ovs-dpdk-preconfig.yaml'
550 if ds_opts['sdn_controller'] == 'opendaylight' and \
551 'odl_vpp_routing_node' in ds_opts:
552 if 'opendaylight::vpp_routing_node' in line:
553 output_line = (" opendaylight::vpp_routing_node: {}.{}"
554 .format(ds_opts['odl_vpp_routing_node'],
556 elif not ds_opts['sdn_controller'] and ds_opts['dataplane'] == 'fdio':
557 if 'NeutronVPPAgentPhysnets' in line:
558 # VPP interface tap0 will be used for external network
560 output_line = (" NeutronVPPAgentPhysnets: "
561 "'datacentre:{},external:tap0'"
562 .format(tenant_nic['Controller']))
563 elif ds_opts['sdn_controller'] == 'opendaylight' and ds_opts.get(
565 if 'OS::TripleO::Services::NeutronDhcpAgent' in line:
567 elif 'NeutronDhcpAgentsPerNetwork' in line:
568 num_control, num_compute = inv.get_node_counts()
569 output_line = (" NeutronDhcpAgentsPerNetwork: {}"
570 .format(num_compute))
571 elif 'ComputeServices' in line:
572 output_line = (" ComputeServices:\n"
573 " - OS::TripleO::Services::NeutronDhcpAgent")
574 # SRIOV networks are VLAN based provider networks. In order to simplify
575 # the deployment, nfv_sriov will be the default physnet. VLANs are not
576 # needed in advance, and the user will have to create the network
577 # specifying the segmentation-id.
579 if 'NeutronNetworkVLANRanges' in line:
580 output_line = ("{},nfv_sriov'".format(line[:-1]))
583 for role in 'NovaCompute', 'Controller':
584 if role == 'NovaCompute':
585 perf_opts = perf_vpp_comp
587 perf_opts = perf_vpp_ctrl
588 cfg = "{}ExtraConfig".format(role)
589 if cfg in line and perf_opts:
591 if 'main-core' in perf_opts:
592 perf_line += ("\n fdio::vpp_cpu_main_core: '{}'"
593 .format(perf_opts['main-core']))
594 if 'corelist-workers' in perf_opts:
596 "fdio::vpp_cpu_corelist_workers: '{}'"
597 .format(perf_opts['corelist-workers']))
598 if ds_opts['sdn_controller'] == 'opendaylight' and \
599 ds_opts['dataplane'] == 'fdio':
600 if role == 'NovaCompute':
602 "tripleo::profile::base::neutron::"
603 "agents::honeycomb::"
604 "interface_role_mapping:"
605 " ['{}:tenant-interface',"
606 "'{}:public-interface']"
607 .format(tenant_nic[role],
611 "tripleo::profile::base::neutron::"
612 "agents::honeycomb::"
613 "interface_role_mapping:"
614 " ['{}:tenant-interface']"
615 .format(tenant_nic[role]))
617 output_line = (" {}:{}".format(cfg, perf_line))
619 if ds_opts['dataplane'] == 'ovs_dpdk' and perf_ovs_comp:
620 for k, v in OVS_PERF_MAP.items():
621 if k in line and v in perf_ovs_comp:
622 output_line = " {}: '{}'".format(k, perf_ovs_comp[v])
625 # (FIXME) use compute's kernel settings for all nodes for now.
627 if 'NovaSchedulerDefaultFilters' in line:
629 " NovaSchedulerDefaultFilters: 'RamFilter," \
630 "ComputeFilter,AvailabilityZoneFilter," \
631 "ComputeCapabilitiesFilter," \
632 "ImagePropertiesFilter,NUMATopologyFilter'"
633 elif 'ComputeKernelArgs' in line:
635 for k, v in perf_kern_comp.items():
636 kernel_args += "{}={} ".format(k, v)
638 output_line = " ComputeKernelArgs: '{}'".\
643 logging.info("opnfv-environment file written to {}".format(tmp_opnfv_env))
646 def generate_ceph_key():
648 header = struct.pack('<hiih', 1, int(time.time()), 0, len(key))
649 return base64.b64encode(header + key)
652 def prep_storage_env(ds, ns, virtual, tmp_dir):
654 Creates storage environment file for deployment. Source file is copied by
655 undercloud playbook to host.
662 ds_opts = ds['deploy_options']
663 storage_file = os.path.join(tmp_dir, 'storage-environment.yaml')
664 if not os.path.isfile(storage_file):
665 logging.error("storage-environment file is not in tmp directory: {}. "
666 "Check if file was copied from "
667 "undercloud".format(tmp_dir))
668 raise ApexDeployException("storage-environment file not copied from "
670 for line in fileinput.input(storage_file, inplace=True):
671 line = line.strip('\n')
672 if 'CephClusterFSID' in line:
673 print(" CephClusterFSID: {}".format(str(uuid.uuid4())))
674 elif 'CephMonKey' in line:
675 print(" CephMonKey: {}".format(generate_ceph_key().decode(
677 elif 'CephAdminKey' in line:
678 print(" CephAdminKey: {}".format(generate_ceph_key().decode(
680 elif 'CephClientKey' in line:
681 print(" CephClientKey: {}".format(generate_ceph_key().decode(
686 if ds_opts['containers']:
687 undercloud_admin_ip = ns['networks'][con.ADMIN_NETWORK][
688 'installer_vm']['ip']
689 ceph_version = con.CEPH_VERSION_MAP[ds_opts['os_version']]
690 docker_image = "{}:8787/ceph/daemon:tag-build-master-" \
691 "{}-centos-7".format(undercloud_admin_ip,
694 'DockerCephDaemonImage': docker_image,
696 if not ds['global_params']['ha_enabled']:
697 ceph_params['CephPoolDefaultSize'] = 1
700 ceph_params['CephAnsibleExtraConfig'] = {
701 'centos_package_dependencies': [],
702 'ceph_osd_docker_memory_limit': '1g',
703 'ceph_mds_docker_memory_limit': '1g',
705 ceph_params['CephPoolDefaultPgNum'] = 32
706 if 'ceph_device' in ds_opts and ds_opts['ceph_device']:
707 ceph_device = ds_opts['ceph_device']
709 # TODO(trozet): make this DS default after Fraser
710 ceph_device = '/dev/loop3'
712 ceph_params['CephAnsibleDisksConfig'] = {
713 'devices': [ceph_device],
715 'osd_scenario': 'collocated'
717 utils.edit_tht_env(storage_file, 'parameter_defaults', ceph_params)
718 elif 'ceph_device' in ds_opts and ds_opts['ceph_device']:
719 with open(storage_file, 'a') as fh:
720 fh.write(' ExtraConfig:\n')
721 fh.write(" ceph::profile::params::osds:{{{}:{{}}}}\n".format(
722 ds_opts['ceph_device']
726 def prep_sriov_env(ds, tmp_dir):
728 Creates SRIOV environment file for deployment. Source file is copied by
729 undercloud playbook to host.
734 ds_opts = ds['deploy_options']
735 sriov_iface = ds_opts['sriov']
736 sriov_file = os.path.join(tmp_dir, 'neutron-opendaylight-sriov.yaml')
737 if not os.path.isfile(sriov_file):
738 logging.error("sriov-environment file is not in tmp directory: {}. "
739 "Check if file was copied from "
740 "undercloud".format(tmp_dir))
741 raise ApexDeployException("sriov-environment file not copied from "
743 # TODO(rnoriega): Instead of line editing, refactor this code to load
744 # yaml file into a dict, edit it and write the file back.
745 for line in fileinput.input(sriov_file, inplace=True):
746 line = line.strip('\n')
747 if 'NovaSchedulerDefaultFilters' in line:
748 print(" {}".format(line[3:]))
749 elif 'NovaSchedulerAvailableFilters' in line:
750 print(" {}".format(line[3:]))
751 elif 'NeutronPhysicalDevMappings' in line:
752 print(" NeutronPhysicalDevMappings: \"nfv_sriov:{}\""
753 .format(sriov_iface))
754 elif 'NeutronSriovNumVFs' in line:
755 print(" NeutronSriovNumVFs: \"{}:8\"".format(sriov_iface))
756 elif 'NovaPCIPassthrough' in line:
757 print(" NovaPCIPassthrough:")
758 elif 'devname' in line:
759 print(" - devname: \"{}\"".format(sriov_iface))
760 elif 'physical_network' in line:
761 print(" physical_network: \"nfv_sriov\"")
766 def external_network_cmds(ns, ds):
768 Generates external network openstack commands
769 :param ns: network settings
770 :param ds: deploy settings
771 :return: list of commands to configure external network
773 ds_opts = ds['deploy_options']
774 external_physnet = 'datacentre'
775 if ds_opts['dataplane'] == 'fdio' and \
776 ds_opts['sdn_controller'] != 'opendaylight':
777 external_physnet = 'external'
778 if 'external' in ns.enabled_network_list:
779 net_config = ns['networks']['external'][0]
781 pool_start, pool_end = net_config['floating_ip_range']
783 net_config = ns['networks']['admin']
785 pool_start, pool_end = ns['apex']['networks']['admin'][
786 'introspection_range']
787 nic_config = net_config['nic_mapping']
788 gateway = net_config['gateway']
790 # create network command
791 if nic_config['compute']['vlan'] == 'native':
794 ext_type = "vlan --provider-segment {}".format(nic_config[
796 cmds.append("openstack network create external --project service "
797 "--external --provider-network-type {} "
798 "--provider-physical-network {}"
799 .format(ext_type, external_physnet))
800 # create subnet command
801 cidr = net_config['cidr']
802 subnet_cmd = "openstack subnet create external-subnet --project " \
803 "service --network external --no-dhcp --gateway {} " \
804 "--allocation-pool start={},end={} --subnet-range " \
805 "{}".format(gateway, pool_start, pool_end, str(cidr))
806 if external and cidr.version == 6:
807 subnet_cmd += ' --ip-version 6 --ipv6-ra-mode slaac ' \
808 '--ipv6-address-mode slaac'
809 cmds.append(subnet_cmd)
810 logging.debug("Neutron external network commands determined "
811 "as: {}".format(cmds))
815 def create_congress_cmds(overcloud_file):
816 drivers = ['nova', 'neutronv2', 'cinder', 'glancev2', 'keystone', 'doctor']
817 overcloudrc = parsers.parse_overcloudrc(overcloud_file)
818 logging.info("Creating congress commands")
821 "username={}".format(overcloudrc['OS_USERNAME']),
822 "tenant_name={}".format(overcloudrc['OS_PROJECT_NAME']),
823 "password={}".format(overcloudrc['OS_PASSWORD']),
824 "auth_url={}".format(overcloudrc['OS_AUTH_URL'])
827 logging.error("Unable to find all keys required for congress in "
828 "overcloudrc: OS_USERNAME, OS_PROJECT_NAME, "
829 "OS_PASSWORD, OS_AUTH_URL. Please check overcloudrc "
830 "file: {}".format(overcloud_file))
833 ds_cfg = '--config ' + ' --config '.join(ds_cfg)
835 for driver in drivers:
836 if driver == 'doctor':
837 cmd = "{} \"{}\"".format(driver, driver)
839 cmd = "{} \"{}\" {}".format(driver, driver, ds_cfg)
841 cmd += ' --config api_version="2.34"'
842 logging.debug("Congress command created: {}".format(cmd))