1 ##############################################################################
2 # Copyright (c) 2017 Tim Rozet (trozet@redhat.com) and others.
4 # All rights reserved. This program and the accompanying materials
5 # are made available under the terms of the Apache License, Version 2.0
6 # which accompanies this distribution, and is available at
7 # http://www.apache.org/licenses/LICENSE-2.0
8 ##############################################################################
19 import apex.builders.overcloud_builder as oc_builder
20 import apex.builders.common_builder as c_builder
22 from apex.common import constants as con
23 from apex.common.exceptions import ApexDeployException
24 from apex.common import parsers
25 from apex.common import utils
26 from apex.virtual import utils as virt_utils
27 from cryptography.hazmat.primitives import serialization as \
29 from cryptography.hazmat.primitives.asymmetric import rsa
30 from cryptography.hazmat.backends import default_backend as \
31 crypto_default_backend
36 'sfc': 'neutron-sfc-opendaylight.yaml',
37 'vpn': 'neutron-bgpvpn-opendaylight.yaml',
38 'gluon': 'gluon.yaml',
40 'odl_vpp_netvirt': 'neutron-opendaylight-netvirt-vpp.yaml',
41 'dvr': 'neutron-opendaylight-fdio-dvr.yaml',
42 'default': 'neutron-opendaylight-honeycomb.yaml'
44 'l2gw': 'neutron-l2gw-opendaylight.yaml',
45 'sriov': 'neutron-opendaylight-sriov.yaml',
46 'default': 'neutron-opendaylight.yaml',
49 'sfc': 'neutron-onos-sfc.yaml',
50 'default': 'neutron-onos.yaml'
52 'ovn': 'neutron-ml2-ovn.yaml',
54 'vpp': 'neutron-ml2-vpp.yaml',
55 'dataplane': ('ovs_dpdk', 'neutron-ovs-dpdk.yaml')
60 'tacker': 'enable_tacker.yaml',
61 'congress': 'enable_congress.yaml',
62 'barometer': 'enable_barometer.yaml',
63 'rt_kvm': 'enable_rt_kvm.yaml'
67 'HostCpusList': 'dpdk_cores',
68 'NeutronDpdkCoreList': 'pmd_cores',
69 'NeutronDpdkSocketMemory': 'socket_memory',
70 'NeutronDpdkMemoryChannels': 'memory_channels'
73 OVS_NSH_KMOD_RPM = "openvswitch-kmod-2.6.1-1.el7.centos.x86_64.rpm"
74 OVS_NSH_RPM = "openvswitch-2.6.1-1.el7.centos.x86_64.rpm"
75 ODL_NETVIRT_VPP_RPM = "/root/opendaylight-7.0.0-0.1.20170531snap665.el7" \
78 LOSETUP_SERVICE = """[Unit]
79 Description=Setup loop devices
84 ExecStart=/sbin/losetup /dev/loop3 /srv/data.img
85 ExecStop=/sbin/losetup -d /dev/loop3
90 WantedBy=multi-user.target
94 def build_sdn_env_list(ds, sdn_map, env_list=None):
96 Builds a list of SDN environment files to be used in the deploy cmd.
98 This function recursively searches an sdn_map. First the sdn controller is
99 matched and then the function looks for enabled features for that
100 controller to determine which environment files should be used. By
101 default the feature will be checked if set to true in deploy settings to be
102 added to the list. If a feature does not have a boolean value, then the
103 key and value pair to compare with are checked as a tuple (k,v).
105 :param ds: deploy settings
106 :param sdn_map: SDN map to recursively search
107 :param env_list: recursive var to hold previously found env_list
108 :return: A list of env files
112 for k, v in sdn_map.items():
113 if ds['sdn_controller'] == k or (k in ds and ds[k]):
114 if isinstance(v, dict):
115 # Append default SDN env file first
116 # The assumption is that feature-enabled SDN env files
117 # override and do not conflict with previously set default
119 if ds['sdn_controller'] == k and 'default' in v:
120 env_list.append(os.path.join(con.THT_ENV_DIR,
122 env_list.extend(build_sdn_env_list(ds, v))
123 # check if the value is not a boolean
124 elif isinstance(v, tuple):
126 env_list.append(os.path.join(con.THT_ENV_DIR, v[1]))
128 env_list.append(os.path.join(con.THT_ENV_DIR, v))
129 if len(env_list) == 0:
131 env_list.append(os.path.join(
132 con.THT_ENV_DIR, sdn_map['default']))
134 logging.warning("Unable to find default file for SDN")
139 def get_docker_sdn_file(ds_opts):
141 Returns docker env file for detected SDN
142 :param ds_opts: deploy options
143 :return: docker THT env file for an SDN
145 # FIXME(trozet): We assume right now there is only one docker SDN file
146 docker_services = con.VALID_DOCKER_SERVICES
147 tht_dir = con.THT_DOCKER_ENV_DIR[ds_opts['os_version']]
148 sdn_env_list = build_sdn_env_list(ds_opts, SDN_FILE_MAP)
149 for sdn_file in sdn_env_list:
150 sdn_base = os.path.basename(sdn_file)
151 if sdn_base in docker_services:
152 if docker_services[sdn_base] is not None:
153 return os.path.join(tht_dir,
154 docker_services[sdn_base])
156 return os.path.join(tht_dir, sdn_base)
159 def create_deploy_cmd(ds, ns, inv, tmp_dir,
160 virtual, env_file='opnfv-environment.yaml',
163 logging.info("Creating deployment command")
164 deploy_options = ['network-environment.yaml']
166 ds_opts = ds['deploy_options']
168 if ds_opts['containers']:
169 deploy_options.append(os.path.join(con.THT_ENV_DIR,
172 if ds['global_params']['ha_enabled']:
173 if ds_opts['containers']:
174 deploy_options.append(os.path.join(con.THT_ENV_DIR,
177 deploy_options.append(os.path.join(con.THT_ENV_DIR,
178 'puppet-pacemaker.yaml'))
181 deploy_options.append(env_file)
183 if ds_opts['containers']:
184 deploy_options.append('docker-images.yaml')
185 sdn_docker_file = get_docker_sdn_file(ds_opts)
187 deploy_options.append(sdn_docker_file)
188 deploy_options.append('sdn-images.yaml')
190 deploy_options += build_sdn_env_list(ds_opts, SDN_FILE_MAP)
192 for k, v in OTHER_FILE_MAP.items():
193 if k in ds_opts and ds_opts[k]:
194 if ds_opts['containers']:
195 deploy_options.append(os.path.join(con.THT_DOCKER_ENV_DIR,
196 "{}.yaml".format(k)))
198 deploy_options.append(os.path.join(con.THT_ENV_DIR, v))
200 if ds_opts['ceph'] and 'csit' not in env_file:
201 prep_storage_env(ds, ns, virtual, tmp_dir)
202 deploy_options.append(os.path.join(con.THT_ENV_DIR,
203 'storage-environment.yaml'))
205 prep_sriov_env(ds, tmp_dir)
207 # Check for 'k8s' here intentionally, as we may support other values
208 # such as openstack/openshift for 'vim' option.
209 if ds_opts['vim'] == 'k8s':
210 deploy_options.append('kubernetes-environment.yaml')
213 deploy_options.append('virtual-environment.yaml')
215 deploy_options.append('baremetal-environment.yaml')
217 num_control, num_compute = inv.get_node_counts()
218 if num_control == 0 or num_compute == 0:
219 logging.error("Detected 0 control or compute nodes. Control nodes: "
220 "{}, compute nodes{}".format(num_control, num_compute))
221 raise ApexDeployException("Invalid number of control or computes")
222 elif num_control > 1 and not ds['global_params']['ha_enabled']:
224 if platform.machine() == 'aarch64':
225 # aarch64 deploys were not completing in the default 90 mins.
226 # Not sure if this is related to the hardware the OOO support
227 # was developed on or the virtualization support in CentOS
228 # Either way it will probably get better over time as the aarch
229 # support matures in CentOS and deploy time should be tested in
230 # the future so this multiplier can be removed.
231 con.DEPLOY_TIMEOUT *= 2
232 cmd = "openstack overcloud deploy --templates --timeout {} " \
233 .format(con.DEPLOY_TIMEOUT)
235 for option in deploy_options:
236 cmd += " -e {}".format(option)
237 cmd += " --ntp-server {}".format(ns['ntp'][0])
238 cmd += " --control-scale {}".format(num_control)
239 cmd += " --compute-scale {}".format(num_compute)
240 cmd += ' --control-flavor control --compute-flavor compute'
242 cmd += ' --networks-file network_data.yaml'
245 with open('/sys/module/kvm_intel/parameters/nested') as f:
246 nested_kvm = f.read().strip()
247 if nested_kvm != 'Y':
248 libvirt_type = 'qemu'
249 cmd += ' --libvirt-type {}'.format(libvirt_type)
250 logging.info("Deploy command set: {}".format(cmd))
252 with open(os.path.join(tmp_dir, 'deploy_command'), 'w') as fh:
257 def prep_image(ds, ns, img, tmp_dir, root_pw=None, docker_tag=None,
260 Locates sdn image and preps for deployment.
261 :param ds: deploy settings
262 :param ns: network settings
263 :param img: sdn image
264 :param tmp_dir: dir to store modified sdn image
265 :param root_pw: password to configure for overcloud image
266 :param docker_tag: Docker image tag for RDO version (default None)
267 :param patches: List of patches to apply to overcloud image
270 # TODO(trozet): Come up with a better way to organize this logic in this
272 logging.info("Preparing image: {} for deployment".format(img))
273 if not os.path.isfile(img):
274 logging.error("Missing SDN image {}".format(img))
275 raise ApexDeployException("Missing SDN image file: {}".format(img))
277 ds_opts = ds['deploy_options']
279 sdn = ds_opts['sdn_controller']
280 patched_containers = set()
281 # we need this due to rhbz #1436021
282 # fixed in systemd-219-37.el7
284 logging.info("Neutron openvswitch-agent disabled")
287 "rm -f /etc/systemd/system/multi-user.target.wants/"
288 "neutron-openvswitch-agent.service"},
291 "rm -f /usr/lib/systemd/system/neutron-openvswitch-agent"
295 if ns.get('http_proxy', ''):
298 "echo 'http_proxy={}' >> /etc/environment".format(
301 if ns.get('https_proxy', ''):
304 "echo 'https_proxy={}' >> /etc/environment".format(
308 virt_cmds.append({con.VIRT_RUN_CMD: "chmod +x /etc/rc.d/rc.local"})
311 "echo 'sudo /opt/quagga/etc/init.d/zrpcd start' > "
312 "/opt/quagga/etc/init.d/zrpcd_start.sh"})
314 con.VIRT_RUN_CMD: "chmod +x /opt/quagga/etc/init.d/"
317 con.VIRT_RUN_CMD: "sed -i '$a /opt/quagga/etc/"
318 "init.d/zrpcd_start.sh' /etc/rc.local "})
320 con.VIRT_RUN_CMD: "sed -i '$a /opt/quagga/etc/"
321 "init.d/zrpcd_start.sh' /etc/rc.d/rc.local"})
322 logging.info("ZRPCD process started")
324 dataplane = ds_opts['dataplane']
325 if dataplane == 'ovs_dpdk' or dataplane == 'fdio':
326 logging.info("Enabling kernel modules for dpdk")
327 # file to module mapping
329 os.path.join(tmp_dir, 'vfio_pci.modules'): 'vfio_pci',
330 os.path.join(tmp_dir, 'uio_pci_generic.modules'): 'uio_pci_generic'
332 for mod_file, mod in uio_types.items():
333 with open(mod_file, 'w') as fh:
334 fh.write('#!/bin/bash\n')
335 fh.write('exec /sbin/modprobe {}'.format(mod))
339 {con.VIRT_UPLOAD: "{}:/etc/sysconfig/modules/".format(
341 {con.VIRT_RUN_CMD: "chmod 0755 /etc/sysconfig/modules/"
342 "{}".format(os.path.basename(mod_file))}
345 pw_op = "password:{}".format(root_pw)
346 virt_cmds.append({con.VIRT_PW: pw_op})
347 if ds_opts['sfc'] and dataplane == 'ovs':
349 {con.VIRT_RUN_CMD: "yum -y install "
350 "/root/ovs/rpm/rpmbuild/RPMS/x86_64/"
351 "{}".format(OVS_NSH_KMOD_RPM)},
352 {con.VIRT_RUN_CMD: "yum downgrade -y "
353 "/root/ovs/rpm/rpmbuild/RPMS/x86_64/"
354 "{}".format(OVS_NSH_RPM)}
356 if dataplane == 'fdio':
357 # Patch neutron with using OVS external interface for router
358 # and add generic linux NS interface driver
360 {con.VIRT_RUN_CMD: "cd /usr/lib/python2.7/site-packages && patch "
361 "-p1 < neutron-patch-NSDriver.patch"})
364 {con.VIRT_RUN_CMD: "yum remove -y vpp-lib"},
365 {con.VIRT_RUN_CMD: "yum install -y "
366 "/root/nosdn_vpp_rpms/*.rpm"}
369 tmp_oc_image = os.path.join(tmp_dir, 'overcloud-full.qcow2')
370 shutil.copyfile(img, tmp_oc_image)
371 logging.debug("Temporary overcloud image stored as: {}".format(
374 if sdn == 'opendaylight':
375 undercloud_admin_ip = ns['networks'][con.ADMIN_NETWORK][
376 'installer_vm']['ip']
377 oc_builder.inject_opendaylight(
378 odl_version=ds_opts['odl_version'],
381 uc_ip=undercloud_admin_ip,
382 os_version=ds_opts['os_version'],
383 docker_tag=docker_tag,
386 patched_containers = patched_containers.union({'opendaylight'})
389 if ds_opts['os_version'] == 'master':
390 branch = ds_opts['os_version']
392 branch = "stable/{}".format(ds_opts['os_version'])
393 logging.info('Adding patches to overcloud')
394 patched_containers = patched_containers.union(
395 c_builder.add_upstream_patches(patches,
396 tmp_oc_image, tmp_dir,
398 uc_ip=undercloud_admin_ip,
399 docker_tag=docker_tag))
400 # if containers with ceph, and no ceph device we need to use a
401 # persistent loop device for Ceph OSDs
402 if docker_tag and ds_opts['ceph_device'] == '/dev/loop3':
403 tmp_losetup = os.path.join(tmp_dir, 'losetup.service')
404 with open(tmp_losetup, 'w') as fh:
405 fh.write(LOSETUP_SERVICE)
407 {con.VIRT_UPLOAD: "{}:/usr/lib/systemd/system/".format(tmp_losetup)
409 {con.VIRT_RUN_CMD: 'truncate /srv/data.img --size 10G'},
410 {con.VIRT_RUN_CMD: 'systemctl daemon-reload'},
411 {con.VIRT_RUN_CMD: 'systemctl enable losetup.service'},
413 # TODO(trozet) remove this after LP#173474 is fixed
414 dhcp_unit = '/usr/lib/systemd/system/dhcp-interface@.service'
416 {con.VIRT_RUN_CMD: "crudini --del {} Unit "
417 "ConditionPathExists".format(dhcp_unit)})
418 virt_utils.virt_customize(virt_cmds, tmp_oc_image)
419 logging.info("Overcloud image customization complete")
420 return patched_containers
425 Creates public and private ssh keys with 1024 bit RSA encryption
426 :return: private, public key
428 key = rsa.generate_private_key(
429 backend=crypto_default_backend(),
430 public_exponent=65537,
434 private_key = key.private_bytes(
435 crypto_serialization.Encoding.PEM,
436 crypto_serialization.PrivateFormat.PKCS8,
437 crypto_serialization.NoEncryption())
438 public_key = key.public_key().public_bytes(
439 crypto_serialization.Encoding.OpenSSH,
440 crypto_serialization.PublicFormat.OpenSSH
442 return private_key.decode('utf-8'), public_key.decode('utf-8')
445 def prep_env(ds, ns, inv, opnfv_env, net_env, tmp_dir):
447 Creates modified opnfv/network environments for deployment
448 :param ds: deploy settings
449 :param ns: network settings
450 :param inv: node inventory
451 :param opnfv_env: file path for opnfv-environment file
452 :param net_env: file path for network-environment file
453 :param tmp_dir: Apex tmp dir
457 logging.info("Preparing opnfv-environment and network-environment files")
458 ds_opts = ds['deploy_options']
459 tmp_opnfv_env = os.path.join(tmp_dir, os.path.basename(opnfv_env))
460 shutil.copyfile(opnfv_env, tmp_opnfv_env)
461 tenant_nic_map = ns['networks']['tenant']['nic_mapping']
463 tenant_nic['Controller'] = tenant_nic_map['controller']['members'][0]
464 tenant_nic['NovaCompute'] = tenant_nic_map['compute']['members'][0]
465 external_nic_map = ns['networks']['external'][0]['nic_mapping']
466 external_nic = dict()
467 external_nic['NovaCompute'] = external_nic_map['compute']['members'][0]
470 private_key, public_key = make_ssh_key()
472 # Make easier/faster variables to index in the file editor
473 if 'performance' in ds_opts:
476 if 'vpp' in ds_opts['performance']['Compute']:
477 perf_vpp_comp = ds_opts['performance']['Compute']['vpp']
480 if 'vpp' in ds_opts['performance']['Controller']:
481 perf_vpp_ctrl = ds_opts['performance']['Controller']['vpp']
486 if 'ovs' in ds_opts['performance']['Compute']:
487 perf_ovs_comp = ds_opts['performance']['Compute']['ovs']
492 if 'kernel' in ds_opts['performance']['Compute']:
493 perf_kern_comp = ds_opts['performance']['Compute']['kernel']
495 perf_kern_comp = None
499 tenant_settings = ns['networks']['tenant']
500 tenant_vlan_enabled = 'tenant' in ns.enabled_network_list and \
501 ns['networks']['tenant'].get('segmentation_type') == 'vlan'
503 # Modify OPNFV environment
504 # TODO: Change to build a dict and outputting yaml rather than parsing
505 for line in fileinput.input(tmp_opnfv_env, inplace=True):
506 line = line.strip('\n')
508 if 'CloudDomain' in line:
509 output_line = " CloudDomain: {}".format(ns['domain_name'])
510 elif 'replace_private_key' in line:
511 output_line = " private_key: |\n"
513 for line in private_key.splitlines():
514 key_out += " {}\n".format(line)
515 output_line += key_out
516 elif 'replace_public_key' in line:
517 output_line = " public_key: '{}'".format(public_key)
518 elif ((perf and perf_kern_comp) or ds_opts.get('rt_kvm')) and \
519 'resource_registry' in line:
520 output_line = "resource_registry:\n" \
521 " OS::TripleO::NodeUserData: first-boot.yaml"
522 elif 'ComputeExtraConfigPre' in line and \
523 ds_opts['dataplane'] == 'ovs_dpdk':
524 output_line = ' OS::TripleO::ComputeExtraConfigPre: ' \
525 './ovs-dpdk-preconfig.yaml'
526 elif 'NeutronNetworkVLANRanges' in line:
528 if tenant_vlan_enabled:
529 if ns['networks']['tenant']['overlay_id_range']:
530 vlan_setting = ns['networks']['tenant']['overlay_id_range']
531 if 'datacentre' not in vlan_setting:
532 vlan_setting += ',datacentre:1:1000'
533 # SRIOV networks are VLAN based provider networks. In order to
534 # simplify the deployment, nfv_sriov will be the default physnet.
535 # VLANs are not needed in advance, and the user will have to create
536 # the network specifying the segmentation-id.
539 vlan_setting += ",nfv_sriov"
541 vlan_setting = "datacentre:1:1000,nfv_sriov"
543 output_line = " NeutronNetworkVLANRanges: " + vlan_setting
544 elif 'NeutronBridgeMappings' in line and tenant_vlan_enabled:
545 if tenant_settings['overlay_id_range']:
546 physnets = tenant_settings['overlay_id_range'].split(',')
547 output_line = " NeutronBridgeMappings: "
548 for physnet in physnets:
549 physnet_name = physnet.split(':')[0]
550 if physnet_name != 'datacentre':
551 output_line += "{}:br-vlan,".format(physnet_name)
552 output_line += "datacentre:br-ex"
553 elif 'OpenDaylightProviderMappings' in line and tenant_vlan_enabled \
554 and ds_opts['sdn_controller'] == 'opendaylight':
555 if tenant_settings['overlay_id_range']:
556 physnets = tenant_settings['overlay_id_range'].split(',')
557 output_line = " OpenDaylightProviderMappings: "
558 for physnet in physnets:
559 physnet_name = physnet.split(':')[0]
560 if physnet_name != 'datacentre':
561 output_line += "{}:br-vlan,".format(physnet_name)
562 output_line += "datacentre:br-ex"
563 elif 'NeutronNetworkType' in line and tenant_vlan_enabled:
564 output_line = " NeutronNetworkType: vlan\n" \
565 " NeutronTunnelTypes: ''"
567 if ds_opts['sdn_controller'] == 'opendaylight' and \
568 'odl_vpp_routing_node' in ds_opts:
569 if 'opendaylight::vpp_routing_node' in line:
570 output_line = (" opendaylight::vpp_routing_node: {}.{}"
571 .format(ds_opts['odl_vpp_routing_node'],
573 elif not ds_opts['sdn_controller'] and ds_opts['dataplane'] == 'fdio':
574 if 'NeutronVPPAgentPhysnets' in line:
575 # VPP interface tap0 will be used for external network
577 output_line = (" NeutronVPPAgentPhysnets: "
578 "'datacentre:{},external:tap0'"
579 .format(tenant_nic['Controller']))
580 elif ds_opts['sdn_controller'] == 'opendaylight' and ds_opts.get(
582 if 'OS::TripleO::Services::NeutronDhcpAgent' in line:
584 elif 'NeutronDhcpAgentsPerNetwork' in line:
585 num_control, num_compute = inv.get_node_counts()
586 output_line = (" NeutronDhcpAgentsPerNetwork: {}"
587 .format(num_compute))
588 elif 'ComputeServices' in line:
589 output_line = (" ComputeServices:\n"
590 " - OS::TripleO::Services::NeutronDhcpAgent")
593 for role in 'NovaCompute', 'Controller':
594 if role == 'NovaCompute':
595 perf_opts = perf_vpp_comp
597 perf_opts = perf_vpp_ctrl
598 cfg = "{}ExtraConfig".format(role)
599 if cfg in line and perf_opts:
601 if 'main-core' in perf_opts:
602 perf_line += ("\n fdio::vpp_cpu_main_core: '{}'"
603 .format(perf_opts['main-core']))
604 if 'corelist-workers' in perf_opts:
606 "fdio::vpp_cpu_corelist_workers: '{}'"
607 .format(perf_opts['corelist-workers']))
608 if ds_opts['sdn_controller'] == 'opendaylight' and \
609 ds_opts['dataplane'] == 'fdio':
610 if role == 'NovaCompute':
612 "tripleo::profile::base::neutron::"
613 "agents::honeycomb::"
614 "interface_role_mapping:"
615 " ['{}:tenant-interface',"
616 "'{}:public-interface']"
617 .format(tenant_nic[role],
621 "tripleo::profile::base::neutron::"
622 "agents::honeycomb::"
623 "interface_role_mapping:"
624 " ['{}:tenant-interface']"
625 .format(tenant_nic[role]))
627 output_line = (" {}:{}".format(cfg, perf_line))
629 if ds_opts['dataplane'] == 'ovs_dpdk' and perf_ovs_comp:
630 for k, v in OVS_PERF_MAP.items():
631 if k in line and v in perf_ovs_comp:
632 output_line = " {}: '{}'".format(k, perf_ovs_comp[v])
635 # (FIXME) use compute's kernel settings for all nodes for now.
637 if 'NovaSchedulerDefaultFilters' in line:
639 " NovaSchedulerDefaultFilters: 'RamFilter," \
640 "ComputeFilter,AvailabilityZoneFilter," \
641 "ComputeCapabilitiesFilter," \
642 "ImagePropertiesFilter,NUMATopologyFilter'"
643 elif 'ComputeKernelArgs' in line:
645 for k, v in perf_kern_comp.items():
646 kernel_args += "{}={} ".format(k, v)
648 output_line = " ComputeKernelArgs: '{}'".\
653 logging.info("opnfv-environment file written to {}".format(tmp_opnfv_env))
656 def generate_ceph_key():
658 header = struct.pack('<hiih', 1, int(time.time()), 0, len(key))
659 return base64.b64encode(header + key)
662 def prep_storage_env(ds, ns, virtual, tmp_dir):
664 Creates storage environment file for deployment. Source file is copied by
665 undercloud playbook to host.
672 ds_opts = ds['deploy_options']
673 storage_file = os.path.join(tmp_dir, 'storage-environment.yaml')
674 if not os.path.isfile(storage_file):
675 logging.error("storage-environment file is not in tmp directory: {}. "
676 "Check if file was copied from "
677 "undercloud".format(tmp_dir))
678 raise ApexDeployException("storage-environment file not copied from "
680 for line in fileinput.input(storage_file, inplace=True):
681 line = line.strip('\n')
682 if 'CephClusterFSID' in line:
683 print(" CephClusterFSID: {}".format(str(uuid.uuid4())))
684 elif 'CephMonKey' in line:
685 print(" CephMonKey: {}".format(generate_ceph_key().decode(
687 elif 'CephAdminKey' in line:
688 print(" CephAdminKey: {}".format(generate_ceph_key().decode(
690 elif 'CephClientKey' in line:
691 print(" CephClientKey: {}".format(generate_ceph_key().decode(
696 if ds_opts['containers']:
697 undercloud_admin_ip = ns['networks'][con.ADMIN_NETWORK][
698 'installer_vm']['ip']
699 ceph_version = con.CEPH_VERSION_MAP[ds_opts['os_version']]
700 docker_image = "{}:8787/ceph/daemon:tag-build-master-" \
701 "{}-centos-7".format(undercloud_admin_ip,
704 'DockerCephDaemonImage': docker_image,
707 # max pgs allowed are calculated as num_mons * 200. Therefore we
708 # set number of pgs and pools so that the total will be less:
709 # num_pgs * num_pools * num_osds
710 ceph_params['CephPoolDefaultSize'] = 2
711 ceph_params['CephPoolDefaultPgNum'] = 32
713 ceph_params['CephAnsibleExtraConfig'] = {
714 'centos_package_dependencies': [],
715 'ceph_osd_docker_memory_limit': '1g',
716 'ceph_mds_docker_memory_limit': '1g',
718 ceph_device = ds_opts['ceph_device']
719 ceph_params['CephAnsibleDisksConfig'] = {
720 'devices': [ceph_device],
722 'osd_scenario': 'collocated'
724 utils.edit_tht_env(storage_file, 'parameter_defaults', ceph_params)
725 # TODO(trozet): remove following block as we only support containers now
726 elif 'ceph_device' in ds_opts and ds_opts['ceph_device']:
727 with open(storage_file, 'a') as fh:
728 fh.write(' ExtraConfig:\n')
729 fh.write(" ceph::profile::params::osds:{{{}:{{}}}}\n".format(
730 ds_opts['ceph_device']
734 def prep_sriov_env(ds, tmp_dir):
736 Creates SRIOV environment file for deployment. Source file is copied by
737 undercloud playbook to host.
742 ds_opts = ds['deploy_options']
743 sriov_iface = ds_opts['sriov']
744 sriov_file = os.path.join(tmp_dir, 'neutron-opendaylight-sriov.yaml')
745 if not os.path.isfile(sriov_file):
746 logging.error("sriov-environment file is not in tmp directory: {}. "
747 "Check if file was copied from "
748 "undercloud".format(tmp_dir))
749 raise ApexDeployException("sriov-environment file not copied from "
751 # TODO(rnoriega): Instead of line editing, refactor this code to load
752 # yaml file into a dict, edit it and write the file back.
753 for line in fileinput.input(sriov_file, inplace=True):
754 line = line.strip('\n')
755 if 'NovaSchedulerDefaultFilters' in line:
756 print(" {}".format(line[3:]))
757 elif 'NovaSchedulerAvailableFilters' in line:
758 print(" {}".format(line[3:]))
759 elif 'NeutronPhysicalDevMappings' in line:
760 print(" NeutronPhysicalDevMappings: \"nfv_sriov:{}\""
761 .format(sriov_iface))
762 elif 'NeutronSriovNumVFs' in line:
763 print(" NeutronSriovNumVFs: \"{}:8\"".format(sriov_iface))
764 elif 'NovaPCIPassthrough' in line:
765 print(" NovaPCIPassthrough:")
766 elif 'devname' in line:
767 print(" - devname: \"{}\"".format(sriov_iface))
768 elif 'physical_network' in line:
769 print(" physical_network: \"nfv_sriov\"")
774 def external_network_cmds(ns, ds):
776 Generates external network openstack commands
777 :param ns: network settings
778 :param ds: deploy settings
779 :return: list of commands to configure external network
781 ds_opts = ds['deploy_options']
782 external_physnet = 'datacentre'
783 if ds_opts['dataplane'] == 'fdio' and \
784 ds_opts['sdn_controller'] != 'opendaylight':
785 external_physnet = 'external'
786 if 'external' in ns.enabled_network_list:
787 net_config = ns['networks']['external'][0]
789 pool_start, pool_end = net_config['floating_ip_range']
791 net_config = ns['networks']['admin']
793 pool_start, pool_end = ns['apex']['networks']['admin'][
794 'introspection_range']
795 nic_config = net_config['nic_mapping']
796 gateway = net_config['gateway']
798 # create network command
799 if nic_config['compute']['vlan'] == 'native':
802 ext_type = "vlan --provider-segment {}".format(nic_config[
804 cmds.append("openstack network create external --project service "
805 "--external --provider-network-type {} "
806 "--provider-physical-network {}"
807 .format(ext_type, external_physnet))
808 # create subnet command
809 cidr = net_config['cidr']
810 subnet_cmd = "openstack subnet create external-subnet --project " \
811 "service --network external --no-dhcp --gateway {} " \
812 "--allocation-pool start={},end={} --subnet-range " \
813 "{}".format(gateway, pool_start, pool_end, str(cidr))
814 if external and cidr.version == 6:
815 subnet_cmd += ' --ip-version 6 --ipv6-ra-mode slaac ' \
816 '--ipv6-address-mode slaac'
817 cmds.append(subnet_cmd)
818 logging.debug("Neutron external network commands determined "
819 "as: {}".format(cmds))
823 def create_congress_cmds(overcloud_file):
824 drivers = ['nova', 'neutronv2', 'cinder', 'glancev2', 'keystone', 'doctor']
825 overcloudrc = parsers.parse_overcloudrc(overcloud_file)
826 logging.info("Creating congress commands")
829 "username={}".format(overcloudrc['OS_USERNAME']),
830 "tenant_name={}".format(overcloudrc['OS_PROJECT_NAME']),
831 "password={}".format(overcloudrc['OS_PASSWORD']),
832 "auth_url={}".format(overcloudrc['OS_AUTH_URL'])
835 logging.error("Unable to find all keys required for congress in "
836 "overcloudrc: OS_USERNAME, OS_PROJECT_NAME, "
837 "OS_PASSWORD, OS_AUTH_URL. Please check overcloudrc "
838 "file: {}".format(overcloud_file))
841 ds_cfg = '--config ' + ' --config '.join(ds_cfg)
843 for driver in drivers:
844 if driver == 'doctor':
845 cmd = "{} \"{}\"".format(driver, driver)
847 cmd = "{} \"{}\" {}".format(driver, driver, ds_cfg)
849 cmd += ' --config api_version="2.34"'
850 logging.debug("Congress command created: {}".format(cmd))