1 ##############################################################################
2 # Copyright (c) 2017 Tim Rozet (trozet@redhat.com) and others.
4 # All rights reserved. This program and the accompanying materials
5 # are made available under the terms of the Apache License, Version 2.0
6 # which accompanies this distribution, and is available at
7 # http://www.apache.org/licenses/LICENSE-2.0
8 ##############################################################################
19 import apex.builders.overcloud_builder as oc_builder
20 import apex.builders.common_builder as c_builder
22 from apex.common import constants as con
23 from apex.common.exceptions import ApexDeployException
24 from apex.common import parsers
25 from apex.common import utils
26 from apex.virtual import utils as virt_utils
27 from cryptography.hazmat.primitives import serialization as \
29 from cryptography.hazmat.primitives.asymmetric import rsa
30 from cryptography.hazmat.backends import default_backend as \
31 crypto_default_backend
36 'sfc': 'neutron-sfc-opendaylight.yaml',
37 'vpn': 'neutron-bgpvpn-opendaylight.yaml',
38 'gluon': 'gluon.yaml',
40 'odl_vpp_netvirt': 'neutron-opendaylight-netvirt-vpp.yaml',
41 'dvr': 'neutron-opendaylight-fdio-dvr.yaml',
42 'default': 'neutron-opendaylight-honeycomb.yaml'
44 'l2gw': 'neutron-l2gw-opendaylight.yaml',
45 'sriov': 'neutron-opendaylight-sriov.yaml',
46 'default': 'neutron-opendaylight.yaml',
49 'sfc': 'neutron-onos-sfc.yaml',
50 'default': 'neutron-onos.yaml'
52 'ovn': 'neutron-ml2-ovn.yaml',
54 'vpp': 'neutron-ml2-vpp.yaml',
55 'dataplane': ('ovs_dpdk', 'neutron-ovs-dpdk.yaml')
60 'tacker': 'enable_tacker.yaml',
61 'congress': 'enable_congress.yaml',
62 'barometer': 'enable_barometer.yaml',
63 'rt_kvm': 'enable_rt_kvm.yaml'
67 'HostCpusList': 'dpdk_cores',
68 'NeutronDpdkCoreList': 'pmd_cores',
69 'NeutronDpdkSocketMemory': 'socket_memory',
70 'NeutronDpdkMemoryChannels': 'memory_channels'
73 OVS_NSH_KMOD_RPM = "openvswitch-kmod-2.6.1-1.el7.centos.x86_64.rpm"
74 OVS_NSH_RPM = "openvswitch-2.6.1-1.el7.centos.x86_64.rpm"
75 ODL_NETVIRT_VPP_RPM = "/root/opendaylight-7.0.0-0.1.20170531snap665.el7" \
78 LOSETUP_SERVICE = """[Unit]
79 Description=Setup loop devices
84 ExecStart=/sbin/losetup /dev/loop3 /srv/data.img
85 ExecStop=/sbin/losetup -d /dev/loop3
90 WantedBy=multi-user.target
94 def build_sdn_env_list(ds, sdn_map, env_list=None):
96 Builds a list of SDN environment files to be used in the deploy cmd.
98 This function recursively searches an sdn_map. First the sdn controller is
99 matched and then the function looks for enabled features for that
100 controller to determine which environment files should be used. By
101 default the feature will be checked if set to true in deploy settings to be
102 added to the list. If a feature does not have a boolean value, then the
103 key and value pair to compare with are checked as a tuple (k,v).
105 :param ds: deploy settings
106 :param sdn_map: SDN map to recursively search
107 :param env_list: recursive var to hold previously found env_list
108 :return: A list of env files
112 for k, v in sdn_map.items():
113 if ds['sdn_controller'] == k or (k in ds and ds[k]):
114 if isinstance(v, dict):
115 # Append default SDN env file first
116 # The assumption is that feature-enabled SDN env files
117 # override and do not conflict with previously set default
119 if ds['sdn_controller'] == k and 'default' in v:
120 env_list.append(os.path.join(con.THT_ENV_DIR,
122 env_list.extend(build_sdn_env_list(ds, v))
123 # check if the value is not a boolean
124 elif isinstance(v, tuple):
126 env_list.append(os.path.join(con.THT_ENV_DIR, v[1]))
128 env_list.append(os.path.join(con.THT_ENV_DIR, v))
129 if len(env_list) == 0:
131 env_list.append(os.path.join(
132 con.THT_ENV_DIR, sdn_map['default']))
134 logging.warning("Unable to find default file for SDN")
139 def get_docker_sdn_file(ds_opts):
141 Returns docker env file for detected SDN
142 :param ds_opts: deploy options
143 :return: docker THT env file for an SDN
145 # FIXME(trozet): We assume right now there is only one docker SDN file
146 docker_services = con.VALID_DOCKER_SERVICES
147 sdn_env_list = build_sdn_env_list(ds_opts, SDN_FILE_MAP)
148 for sdn_file in sdn_env_list:
149 sdn_base = os.path.basename(sdn_file)
150 if sdn_base in docker_services:
151 if docker_services[sdn_base] is not None:
152 return os.path.join(con.THT_DOCKER_ENV_DIR,
153 docker_services[sdn_base])
155 return os.path.join(con.THT_DOCKER_ENV_DIR, sdn_base)
158 def create_deploy_cmd(ds, ns, inv, tmp_dir,
159 virtual, env_file='opnfv-environment.yaml',
162 logging.info("Creating deployment command")
163 deploy_options = ['network-environment.yaml']
165 ds_opts = ds['deploy_options']
167 if ds_opts['containers']:
168 deploy_options.append(os.path.join(con.THT_ENV_DIR,
171 if ds['global_params']['ha_enabled']:
172 if ds_opts['containers']:
173 deploy_options.append(os.path.join(con.THT_ENV_DIR,
176 deploy_options.append(os.path.join(con.THT_ENV_DIR,
177 'puppet-pacemaker.yaml'))
180 deploy_options.append(env_file)
182 if ds_opts['containers']:
183 deploy_options.append('docker-images.yaml')
184 sdn_docker_file = get_docker_sdn_file(ds_opts)
186 deploy_options.append(sdn_docker_file)
187 deploy_options.append('sdn-images.yaml')
189 deploy_options += build_sdn_env_list(ds_opts, SDN_FILE_MAP)
191 for k, v in OTHER_FILE_MAP.items():
192 if k in ds_opts and ds_opts[k]:
193 if ds_opts['containers']:
194 deploy_options.append(os.path.join(con.THT_DOCKER_ENV_DIR,
195 "{}.yaml".format(k)))
197 deploy_options.append(os.path.join(con.THT_ENV_DIR, v))
200 prep_storage_env(ds, ns, virtual, tmp_dir)
201 deploy_options.append(os.path.join(con.THT_ENV_DIR,
202 'storage-environment.yaml'))
204 prep_sriov_env(ds, tmp_dir)
207 deploy_options.append('virtual-environment.yaml')
209 deploy_options.append('baremetal-environment.yaml')
211 num_control, num_compute = inv.get_node_counts()
212 if num_control == 0 or num_compute == 0:
213 logging.error("Detected 0 control or compute nodes. Control nodes: "
214 "{}, compute nodes{}".format(num_control, num_compute))
215 raise ApexDeployException("Invalid number of control or computes")
216 elif num_control > 1 and not ds['global_params']['ha_enabled']:
218 if platform.machine() == 'aarch64':
219 # aarch64 deploys were not completing in the default 90 mins.
220 # Not sure if this is related to the hardware the OOO support
221 # was developed on or the virtualization support in CentOS
222 # Either way it will probably get better over time as the aarch
223 # support matures in CentOS and deploy time should be tested in
224 # the future so this multiplier can be removed.
225 con.DEPLOY_TIMEOUT *= 2
226 cmd = "openstack overcloud deploy --templates --timeout {} " \
227 .format(con.DEPLOY_TIMEOUT)
229 for option in deploy_options:
230 cmd += " -e {}".format(option)
231 cmd += " --ntp-server {}".format(ns['ntp'][0])
232 cmd += " --control-scale {}".format(num_control)
233 cmd += " --compute-scale {}".format(num_compute)
234 cmd += ' --control-flavor control --compute-flavor compute'
236 cmd += ' --networks-file network_data.yaml'
239 with open('/sys/module/kvm_intel/parameters/nested') as f:
240 nested_kvm = f.read().strip()
241 if nested_kvm != 'Y':
242 libvirt_type = 'qemu'
243 cmd += ' --libvirt-type {}'.format(libvirt_type)
244 logging.info("Deploy command set: {}".format(cmd))
246 with open(os.path.join(tmp_dir, 'deploy_command'), 'w') as fh:
251 def prep_image(ds, ns, img, tmp_dir, root_pw=None, docker_tag=None,
252 patches=None, upstream=False):
254 Locates sdn image and preps for deployment.
255 :param ds: deploy settings
256 :param ns: network settings
257 :param img: sdn image
258 :param tmp_dir: dir to store modified sdn image
259 :param root_pw: password to configure for overcloud image
260 :param docker_tag: Docker image tag for RDO version (default None)
261 :param patches: List of patches to apply to overcloud image
262 :param upstream: (boolean) Indicates if upstream deployment or not
265 # TODO(trozet): Come up with a better way to organize this logic in this
267 logging.info("Preparing image: {} for deployment".format(img))
268 if not os.path.isfile(img):
269 logging.error("Missing SDN image {}".format(img))
270 raise ApexDeployException("Missing SDN image file: {}".format(img))
272 ds_opts = ds['deploy_options']
274 sdn = ds_opts['sdn_controller']
275 patched_containers = set()
276 # we need this due to rhbz #1436021
277 # fixed in systemd-219-37.el7
279 logging.info("Neutron openvswitch-agent disabled")
282 "rm -f /etc/systemd/system/multi-user.target.wants/"
283 "neutron-openvswitch-agent.service"},
286 "rm -f /usr/lib/systemd/system/neutron-openvswitch-agent"
290 if ns.get('http_proxy', ''):
293 "echo 'http_proxy={}' >> /etc/environment".format(
296 if ns.get('https_proxy', ''):
299 "echo 'https_proxy={}' >> /etc/environment".format(
303 virt_cmds.append({con.VIRT_RUN_CMD: "chmod +x /etc/rc.d/rc.local"})
306 "echo 'sudo /opt/quagga/etc/init.d/zrpcd start' > "
307 "/opt/quagga/etc/init.d/zrpcd_start.sh"})
309 con.VIRT_RUN_CMD: "chmod +x /opt/quagga/etc/init.d/"
312 con.VIRT_RUN_CMD: "sed -i '$a /opt/quagga/etc/"
313 "init.d/zrpcd_start.sh' /etc/rc.local "})
315 con.VIRT_RUN_CMD: "sed -i '$a /opt/quagga/etc/"
316 "init.d/zrpcd_start.sh' /etc/rc.d/rc.local"})
317 logging.info("ZRPCD process started")
319 dataplane = ds_opts['dataplane']
320 if dataplane == 'ovs_dpdk' or dataplane == 'fdio':
321 logging.info("Enabling kernel modules for dpdk")
322 # file to module mapping
324 os.path.join(tmp_dir, 'vfio_pci.modules'): 'vfio_pci',
325 os.path.join(tmp_dir, 'uio_pci_generic.modules'): 'uio_pci_generic'
327 for mod_file, mod in uio_types.items():
328 with open(mod_file, 'w') as fh:
329 fh.write('#!/bin/bash\n')
330 fh.write('exec /sbin/modprobe {}'.format(mod))
334 {con.VIRT_UPLOAD: "{}:/etc/sysconfig/modules/".format(
336 {con.VIRT_RUN_CMD: "chmod 0755 /etc/sysconfig/modules/"
337 "{}".format(os.path.basename(mod_file))}
340 pw_op = "password:{}".format(root_pw)
341 virt_cmds.append({con.VIRT_PW: pw_op})
342 if ds_opts['sfc'] and dataplane == 'ovs':
344 {con.VIRT_RUN_CMD: "yum -y install "
345 "/root/ovs/rpm/rpmbuild/RPMS/x86_64/"
346 "{}".format(OVS_NSH_KMOD_RPM)},
347 {con.VIRT_RUN_CMD: "yum downgrade -y "
348 "/root/ovs/rpm/rpmbuild/RPMS/x86_64/"
349 "{}".format(OVS_NSH_RPM)}
351 if dataplane == 'fdio':
352 # Patch neutron with using OVS external interface for router
353 # and add generic linux NS interface driver
355 {con.VIRT_RUN_CMD: "cd /usr/lib/python2.7/site-packages && patch "
356 "-p1 < neutron-patch-NSDriver.patch"})
359 {con.VIRT_RUN_CMD: "yum remove -y vpp-lib"},
360 {con.VIRT_RUN_CMD: "yum install -y "
361 "/root/nosdn_vpp_rpms/*.rpm"}
364 tmp_oc_image = os.path.join(tmp_dir, 'overcloud-full.qcow2')
365 shutil.copyfile(img, tmp_oc_image)
366 logging.debug("Temporary overcloud image stored as: {}".format(
369 # TODO (trozet): remove this if block after Fraser
370 if sdn == 'opendaylight' and not upstream:
371 if ds_opts['odl_version'] != con.DEFAULT_ODL_VERSION:
373 {con.VIRT_RUN_CMD: "yum -y remove opendaylight"},
374 {con.VIRT_RUN_CMD: "rm -rf /etc/puppet/modules/opendaylight"},
375 {con.VIRT_RUN_CMD: "cd /etc/puppet/modules && tar xzf "
376 "/root/puppet-opendaylight-"
377 "{}.tar.gz".format(ds_opts['odl_version'])}
379 if ds_opts['odl_version'] == 'master':
381 {con.VIRT_RUN_CMD: "rpm -ivh --nodeps /root/{}/*".format(
382 ds_opts['odl_version'])}
386 {con.VIRT_RUN_CMD: "yum -y install /root/{}/*".format(
387 ds_opts['odl_version'])}
390 elif sdn == 'opendaylight' and 'odl_vpp_netvirt' in ds_opts \
391 and ds_opts['odl_vpp_netvirt']:
393 {con.VIRT_RUN_CMD: "yum -y remove opendaylight"},
394 {con.VIRT_RUN_CMD: "yum -y install /root/{}/*".format(
395 ODL_NETVIRT_VPP_RPM)}
397 elif sdn == 'opendaylight':
398 undercloud_admin_ip = ns['networks'][con.ADMIN_NETWORK][
399 'installer_vm']['ip']
400 oc_builder.inject_opendaylight(
401 odl_version=ds_opts['odl_version'],
404 uc_ip=undercloud_admin_ip,
405 os_version=ds_opts['os_version'],
406 docker_tag=docker_tag,
409 patched_containers = patched_containers.union({'opendaylight'})
413 {con.VIRT_RUN_CMD: "cd /root/ovs28 && yum update -y "
415 {con.VIRT_RUN_CMD: "cd /root/ovs28 && yum downgrade -y "
420 if ds_opts['os_version'] == 'master':
421 branch = ds_opts['os_version']
423 branch = "stable/{}".format(ds_opts['os_version'])
424 logging.info('Adding patches to overcloud')
425 patched_containers = patched_containers.union(
426 c_builder.add_upstream_patches(patches,
427 tmp_oc_image, tmp_dir,
429 uc_ip=undercloud_admin_ip,
430 docker_tag=docker_tag))
431 # if containers with ceph, and no ceph device we need to use a
432 # persistent loop device for Ceph OSDs
433 if docker_tag and not ds_opts.get('ceph_device', None):
434 tmp_losetup = os.path.join(tmp_dir, 'losetup.service')
435 with open(tmp_losetup, 'w') as fh:
436 fh.write(LOSETUP_SERVICE)
438 {con.VIRT_UPLOAD: "{}:/usr/lib/systemd/system/".format(tmp_losetup)
440 {con.VIRT_RUN_CMD: 'truncate /srv/data.img --size 10G'},
441 {con.VIRT_RUN_CMD: 'mkfs.ext4 -F /srv/data.img'},
442 {con.VIRT_RUN_CMD: 'systemctl daemon-reload'},
443 {con.VIRT_RUN_CMD: 'systemctl enable losetup.service'},
445 virt_utils.virt_customize(virt_cmds, tmp_oc_image)
446 logging.info("Overcloud image customization complete")
447 return patched_containers
452 Creates public and private ssh keys with 1024 bit RSA encryption
453 :return: private, public key
455 key = rsa.generate_private_key(
456 backend=crypto_default_backend(),
457 public_exponent=65537,
461 private_key = key.private_bytes(
462 crypto_serialization.Encoding.PEM,
463 crypto_serialization.PrivateFormat.PKCS8,
464 crypto_serialization.NoEncryption())
465 public_key = key.public_key().public_bytes(
466 crypto_serialization.Encoding.OpenSSH,
467 crypto_serialization.PublicFormat.OpenSSH
469 return private_key.decode('utf-8'), public_key.decode('utf-8')
472 def prep_env(ds, ns, inv, opnfv_env, net_env, tmp_dir):
474 Creates modified opnfv/network environments for deployment
475 :param ds: deploy settings
476 :param ns: network settings
477 :param inv: node inventory
478 :param opnfv_env: file path for opnfv-environment file
479 :param net_env: file path for network-environment file
480 :param tmp_dir: Apex tmp dir
484 logging.info("Preparing opnfv-environment and network-environment files")
485 ds_opts = ds['deploy_options']
486 tmp_opnfv_env = os.path.join(tmp_dir, os.path.basename(opnfv_env))
487 shutil.copyfile(opnfv_env, tmp_opnfv_env)
488 tenant_nic_map = ns['networks']['tenant']['nic_mapping']
490 tenant_nic['Controller'] = tenant_nic_map['controller']['members'][0]
491 tenant_nic['NovaCompute'] = tenant_nic_map['compute']['members'][0]
492 external_nic_map = ns['networks']['external'][0]['nic_mapping']
493 external_nic = dict()
494 external_nic['NovaCompute'] = external_nic_map['compute']['members'][0]
497 private_key, public_key = make_ssh_key()
499 # Make easier/faster variables to index in the file editor
500 if 'performance' in ds_opts:
503 if 'vpp' in ds_opts['performance']['Compute']:
504 perf_vpp_comp = ds_opts['performance']['Compute']['vpp']
507 if 'vpp' in ds_opts['performance']['Controller']:
508 perf_vpp_ctrl = ds_opts['performance']['Controller']['vpp']
513 if 'ovs' in ds_opts['performance']['Compute']:
514 perf_ovs_comp = ds_opts['performance']['Compute']['ovs']
519 if 'kernel' in ds_opts['performance']['Compute']:
520 perf_kern_comp = ds_opts['performance']['Compute']['kernel']
522 perf_kern_comp = None
526 # Modify OPNFV environment
527 # TODO: Change to build a dict and outputting yaml rather than parsing
528 for line in fileinput.input(tmp_opnfv_env, inplace=True):
529 line = line.strip('\n')
531 if 'CloudDomain' in line:
532 output_line = " CloudDomain: {}".format(ns['domain_name'])
533 elif 'replace_private_key' in line:
534 output_line = " private_key: |\n"
536 for line in private_key.splitlines():
537 key_out += " {}\n".format(line)
538 output_line += key_out
539 elif 'replace_public_key' in line:
540 output_line = " public_key: '{}'".format(public_key)
541 elif ((perf and perf_kern_comp) or ds_opts.get('rt_kvm')) and \
542 'resource_registry' in line:
543 output_line = "resource_registry:\n" \
544 " OS::TripleO::NodeUserData: first-boot.yaml"
545 elif 'ComputeExtraConfigPre' in line and \
546 ds_opts['dataplane'] == 'ovs_dpdk':
547 output_line = ' OS::TripleO::ComputeExtraConfigPre: ' \
548 './ovs-dpdk-preconfig.yaml'
550 if ds_opts['sdn_controller'] == 'opendaylight' and \
551 'odl_vpp_routing_node' in ds_opts:
552 if 'opendaylight::vpp_routing_node' in line:
553 output_line = (" opendaylight::vpp_routing_node: {}.{}"
554 .format(ds_opts['odl_vpp_routing_node'],
556 elif not ds_opts['sdn_controller'] and ds_opts['dataplane'] == 'fdio':
557 if 'NeutronVPPAgentPhysnets' in line:
558 output_line = (" NeutronVPPAgentPhysnets: 'datacentre:{}'".
559 format(tenant_nic['Controller']))
560 elif ds_opts['sdn_controller'] == 'opendaylight' and ds_opts.get(
562 if 'OS::TripleO::Services::NeutronDhcpAgent' in line:
564 elif 'NeutronDhcpAgentsPerNetwork' in line:
565 num_control, num_compute = inv.get_node_counts()
566 output_line = (" NeutronDhcpAgentsPerNetwork: {}"
567 .format(num_compute))
568 elif 'ComputeServices' in line:
569 output_line = (" ComputeServices:\n"
570 " - OS::TripleO::Services::NeutronDhcpAgent")
571 # SRIOV networks are VLAN based provider networks. In order to simplify
572 # the deployment, nfv_sriov will be the default physnet. VLANs are not
573 # needed in advance, and the user will have to create the network
574 # specifying the segmentation-id.
576 if 'NeutronNetworkVLANRanges' in line:
577 output_line = ("{},nfv_sriov'".format(line[:-1]))
580 for role in 'NovaCompute', 'Controller':
581 if role == 'NovaCompute':
582 perf_opts = perf_vpp_comp
584 perf_opts = perf_vpp_ctrl
585 cfg = "{}ExtraConfig".format(role)
586 if cfg in line and perf_opts:
588 if 'main-core' in perf_opts:
589 perf_line += ("\n fdio::vpp_cpu_main_core: '{}'"
590 .format(perf_opts['main-core']))
591 if 'corelist-workers' in perf_opts:
593 "fdio::vpp_cpu_corelist_workers: '{}'"
594 .format(perf_opts['corelist-workers']))
595 if ds_opts['sdn_controller'] == 'opendaylight' and \
596 ds_opts['dataplane'] == 'fdio':
597 if role == 'NovaCompute':
599 "tripleo::profile::base::neutron::"
600 "agents::honeycomb::"
601 "interface_role_mapping:"
602 " ['{}:tenant-interface',"
603 "'{}:public-interface']"
604 .format(tenant_nic[role],
608 "tripleo::profile::base::neutron::"
609 "agents::honeycomb::"
610 "interface_role_mapping:"
611 " ['{}:tenant-interface']"
612 .format(tenant_nic[role]))
614 output_line = (" {}:{}".format(cfg, perf_line))
616 if ds_opts['dataplane'] == 'ovs_dpdk' and perf_ovs_comp:
617 for k, v in OVS_PERF_MAP.items():
618 if k in line and v in perf_ovs_comp:
619 output_line = " {}: '{}'".format(k, perf_ovs_comp[v])
622 # (FIXME) use compute's kernel settings for all nodes for now.
624 if 'NovaSchedulerDefaultFilters' in line:
626 " NovaSchedulerDefaultFilters: 'RamFilter," \
627 "ComputeFilter,AvailabilityZoneFilter," \
628 "ComputeCapabilitiesFilter," \
629 "ImagePropertiesFilter,NUMATopologyFilter'"
630 elif 'ComputeKernelArgs' in line:
632 for k, v in perf_kern_comp.items():
633 kernel_args += "{}={} ".format(k, v)
635 output_line = " ComputeKernelArgs: '{}'".\
640 logging.info("opnfv-environment file written to {}".format(tmp_opnfv_env))
643 def generate_ceph_key():
645 header = struct.pack('<hiih', 1, int(time.time()), 0, len(key))
646 return base64.b64encode(header + key)
649 def prep_storage_env(ds, ns, virtual, tmp_dir):
651 Creates storage environment file for deployment. Source file is copied by
652 undercloud playbook to host.
659 ds_opts = ds['deploy_options']
660 storage_file = os.path.join(tmp_dir, 'storage-environment.yaml')
661 if not os.path.isfile(storage_file):
662 logging.error("storage-environment file is not in tmp directory: {}. "
663 "Check if file was copied from "
664 "undercloud".format(tmp_dir))
665 raise ApexDeployException("storage-environment file not copied from "
667 for line in fileinput.input(storage_file, inplace=True):
668 line = line.strip('\n')
669 if 'CephClusterFSID' in line:
670 print(" CephClusterFSID: {}".format(str(uuid.uuid4())))
671 elif 'CephMonKey' in line:
672 print(" CephMonKey: {}".format(generate_ceph_key().decode(
674 elif 'CephAdminKey' in line:
675 print(" CephAdminKey: {}".format(generate_ceph_key().decode(
677 elif 'CephClientKey' in line:
678 print(" CephClientKey: {}".format(generate_ceph_key().decode(
683 if ds_opts['containers']:
684 undercloud_admin_ip = ns['networks'][con.ADMIN_NETWORK][
685 'installer_vm']['ip']
686 ceph_version = con.CEPH_VERSION_MAP[ds_opts['os_version']]
687 docker_image = "{}:8787/ceph/daemon:tag-build-master-" \
688 "{}-centos-7".format(undercloud_admin_ip,
691 'DockerCephDaemonImage': docker_image,
693 if not ds['global_params']['ha_enabled']:
694 ceph_params['CephPoolDefaultSize'] = 1
697 ceph_params['CephAnsibleExtraConfig'] = {
698 'centos_package_dependencies': [],
699 'ceph_osd_docker_memory_limit': '1g',
700 'ceph_mds_docker_memory_limit': '1g',
702 ceph_params['CephPoolDefaultPgNum'] = 32
703 if 'ceph_device' in ds_opts and ds_opts['ceph_device']:
704 ceph_device = ds_opts['ceph_device']
706 # TODO(trozet): make this DS default after Fraser
707 ceph_device = '/dev/loop3'
709 ceph_params['CephAnsibleDisksConfig'] = {
710 'devices': [ceph_device],
712 'osd_scenario': 'collocated'
714 utils.edit_tht_env(storage_file, 'parameter_defaults', ceph_params)
715 elif 'ceph_device' in ds_opts and ds_opts['ceph_device']:
716 with open(storage_file, 'a') as fh:
717 fh.write(' ExtraConfig:\n')
718 fh.write(" ceph::profile::params::osds:{{{}:{{}}}}\n".format(
719 ds_opts['ceph_device']
723 def prep_sriov_env(ds, tmp_dir):
725 Creates SRIOV environment file for deployment. Source file is copied by
726 undercloud playbook to host.
731 ds_opts = ds['deploy_options']
732 sriov_iface = ds_opts['sriov']
733 sriov_file = os.path.join(tmp_dir, 'neutron-opendaylight-sriov.yaml')
734 if not os.path.isfile(sriov_file):
735 logging.error("sriov-environment file is not in tmp directory: {}. "
736 "Check if file was copied from "
737 "undercloud".format(tmp_dir))
738 raise ApexDeployException("sriov-environment file not copied from "
740 # TODO(rnoriega): Instead of line editing, refactor this code to load
741 # yaml file into a dict, edit it and write the file back.
742 for line in fileinput.input(sriov_file, inplace=True):
743 line = line.strip('\n')
744 if 'NovaSchedulerDefaultFilters' in line:
745 print(" {}".format(line[3:]))
746 elif 'NovaSchedulerAvailableFilters' in line:
747 print(" {}".format(line[3:]))
748 elif 'NeutronPhysicalDevMappings' in line:
749 print(" NeutronPhysicalDevMappings: \"nfv_sriov:{}\""
750 .format(sriov_iface))
751 elif 'NeutronSriovNumVFs' in line:
752 print(" NeutronSriovNumVFs: \"{}:8\"".format(sriov_iface))
753 elif 'NovaPCIPassthrough' in line:
754 print(" NovaPCIPassthrough:")
755 elif 'devname' in line:
756 print(" - devname: \"{}\"".format(sriov_iface))
757 elif 'physical_network' in line:
758 print(" physical_network: \"nfv_sriov\"")
763 def external_network_cmds(ns):
765 Generates external network openstack commands
766 :param ns: network settings
767 :return: list of commands to configure external network
769 if 'external' in ns.enabled_network_list:
770 net_config = ns['networks']['external'][0]
772 pool_start, pool_end = net_config['floating_ip_range']
774 net_config = ns['networks']['admin']
776 pool_start, pool_end = ns['apex']['networks']['admin'][
777 'introspection_range']
778 nic_config = net_config['nic_mapping']
779 gateway = net_config['gateway']
781 # create network command
782 if nic_config['compute']['vlan'] == 'native':
785 ext_type = "vlan --provider-segment {}".format(nic_config[
787 cmds.append("openstack network create external --project service "
788 "--external --provider-network-type {} "
789 "--provider-physical-network datacentre".format(ext_type))
790 # create subnet command
791 cidr = net_config['cidr']
792 subnet_cmd = "openstack subnet create external-subnet --project " \
793 "service --network external --no-dhcp --gateway {} " \
794 "--allocation-pool start={},end={} --subnet-range " \
795 "{}".format(gateway, pool_start, pool_end, str(cidr))
796 if external and cidr.version == 6:
797 subnet_cmd += ' --ip-version 6 --ipv6-ra-mode slaac ' \
798 '--ipv6-address-mode slaac'
799 cmds.append(subnet_cmd)
800 logging.debug("Neutron external network commands determined "
801 "as: {}".format(cmds))
805 def create_congress_cmds(overcloud_file):
806 drivers = ['nova', 'neutronv2', 'cinder', 'glancev2', 'keystone', 'doctor']
807 overcloudrc = parsers.parse_overcloudrc(overcloud_file)
808 logging.info("Creating congress commands")
811 "username={}".format(overcloudrc['OS_USERNAME']),
812 "tenant_name={}".format(overcloudrc['OS_PROJECT_NAME']),
813 "password={}".format(overcloudrc['OS_PASSWORD']),
814 "auth_url={}".format(overcloudrc['OS_AUTH_URL'])
817 logging.error("Unable to find all keys required for congress in "
818 "overcloudrc: OS_USERNAME, OS_PROJECT_NAME, "
819 "OS_PASSWORD, OS_AUTH_URL. Please check overcloudrc "
820 "file: {}".format(overcloud_file))
823 ds_cfg = '--config ' + ' --config '.join(ds_cfg)
825 for driver in drivers:
826 if driver == 'doctor':
827 cmd = "{} \"{}\"".format(driver, driver)
829 cmd = "{} \"{}\" {}".format(driver, driver, ds_cfg)
831 cmd += ' --config api_version="2.34"'
832 logging.debug("Congress command created: {}".format(cmd))