1 ##############################################################################
2 # Copyright (c) 2017 Tim Rozet (trozet@redhat.com) and others.
4 # All rights reserved. This program and the accompanying materials
5 # are made available under the terms of the Apache License, Version 2.0
6 # which accompanies this distribution, and is available at
7 # http://www.apache.org/licenses/LICENSE-2.0
8 ##############################################################################
19 import apex.builders.overcloud_builder as oc_builder
20 import apex.builders.common_builder as c_builder
22 from apex.common import constants as con
23 from apex.common.exceptions import ApexDeployException
24 from apex.common import parsers
25 from apex.common import utils
26 from apex.virtual import utils as virt_utils
27 from cryptography.hazmat.primitives import serialization as \
29 from cryptography.hazmat.primitives.asymmetric import rsa
30 from cryptography.hazmat.backends import default_backend as \
31 crypto_default_backend
36 'sfc': 'neutron-sfc-opendaylight.yaml',
37 'vpn': 'neutron-bgpvpn-opendaylight.yaml',
38 'gluon': 'gluon.yaml',
40 'odl_vpp_netvirt': 'neutron-opendaylight-netvirt-vpp.yaml',
41 'dvr': 'neutron-opendaylight-fdio-dvr.yaml',
42 'default': 'neutron-opendaylight-honeycomb.yaml'
44 'l2gw': 'neutron-l2gw-opendaylight.yaml',
45 'sriov': 'neutron-opendaylight-sriov.yaml',
46 'default': 'neutron-opendaylight.yaml',
49 'sfc': 'neutron-onos-sfc.yaml',
50 'default': 'neutron-onos.yaml'
52 'ovn': 'neutron-ml2-ovn.yaml',
54 'vpp': 'neutron-ml2-vpp.yaml',
55 'dataplane': ('ovs_dpdk', 'neutron-ovs-dpdk.yaml')
60 'tacker': 'enable_tacker.yaml',
61 'congress': 'enable_congress.yaml',
62 'barometer': 'enable_barometer.yaml',
63 'rt_kvm': 'enable_rt_kvm.yaml'
67 'HostCpusList': 'dpdk_cores',
68 'NeutronDpdkCoreList': 'pmd_cores',
69 'NeutronDpdkSocketMemory': 'socket_memory',
70 'NeutronDpdkMemoryChannels': 'memory_channels'
73 OVS_NSH_KMOD_RPM = "openvswitch-kmod-2.6.1-1.el7.centos.x86_64.rpm"
74 OVS_NSH_RPM = "openvswitch-2.6.1-1.el7.centos.x86_64.rpm"
75 ODL_NETVIRT_VPP_RPM = "/root/opendaylight-7.0.0-0.1.20170531snap665.el7" \
78 LOOP_DEVICE_SIZE = "10G"
80 LOSETUP_SERVICE = """[Unit]
81 Description=Setup loop devices
86 ExecStart=/sbin/losetup /dev/loop3 /srv/data.img
87 ExecStop=/sbin/losetup -d /dev/loop3
92 WantedBy=multi-user.target
96 def build_sdn_env_list(ds, sdn_map, env_list=None):
98 Builds a list of SDN environment files to be used in the deploy cmd.
100 This function recursively searches an sdn_map. First the sdn controller is
101 matched and then the function looks for enabled features for that
102 controller to determine which environment files should be used. By
103 default the feature will be checked if set to true in deploy settings to be
104 added to the list. If a feature does not have a boolean value, then the
105 key and value pair to compare with are checked as a tuple (k,v).
107 :param ds: deploy settings
108 :param sdn_map: SDN map to recursively search
109 :param env_list: recursive var to hold previously found env_list
110 :return: A list of env files
114 for k, v in sdn_map.items():
115 if ds['sdn_controller'] == k or (k in ds and ds[k]):
116 if isinstance(v, dict):
117 # Append default SDN env file first
118 # The assumption is that feature-enabled SDN env files
119 # override and do not conflict with previously set default
121 if ds['sdn_controller'] == k and 'default' in v:
122 env_list.append(os.path.join(con.THT_ENV_DIR,
124 env_list.extend(build_sdn_env_list(ds, v))
125 # check if the value is not a boolean
126 elif isinstance(v, tuple):
128 env_list.append(os.path.join(con.THT_ENV_DIR, v[1]))
130 env_list.append(os.path.join(con.THT_ENV_DIR, v))
131 if len(env_list) == 0:
133 env_list.append(os.path.join(
134 con.THT_ENV_DIR, sdn_map['default']))
136 logging.warning("Unable to find default file for SDN")
141 def get_docker_sdn_file(ds_opts):
143 Returns docker env file for detected SDN
144 :param ds_opts: deploy options
145 :return: docker THT env file for an SDN
147 # FIXME(trozet): We assume right now there is only one docker SDN file
148 docker_services = con.VALID_DOCKER_SERVICES
149 tht_dir = con.THT_DOCKER_ENV_DIR[ds_opts['os_version']]
150 sdn_env_list = build_sdn_env_list(ds_opts, SDN_FILE_MAP)
151 for sdn_file in sdn_env_list:
152 sdn_base = os.path.basename(sdn_file)
153 if sdn_base in docker_services:
154 if docker_services[sdn_base] is not None:
155 return os.path.join(tht_dir,
156 docker_services[sdn_base])
158 return os.path.join(tht_dir, sdn_base)
161 def create_deploy_cmd(ds, ns, inv, tmp_dir,
162 virtual, env_file='opnfv-environment.yaml',
165 logging.info("Creating deployment command")
166 deploy_options = ['network-environment.yaml']
168 ds_opts = ds['deploy_options']
170 if ds_opts['containers']:
171 deploy_options.append(os.path.join(con.THT_ENV_DIR,
174 if ds['global_params']['ha_enabled']:
175 if ds_opts['containers']:
176 deploy_options.append(os.path.join(con.THT_ENV_DIR,
179 deploy_options.append(os.path.join(con.THT_ENV_DIR,
180 'puppet-pacemaker.yaml'))
183 deploy_options.append(env_file)
185 if ds_opts['containers']:
186 deploy_options.append('docker-images.yaml')
187 sdn_docker_file = get_docker_sdn_file(ds_opts)
189 deploy_options.append(sdn_docker_file)
190 deploy_options.append('sdn-images.yaml')
192 deploy_options += build_sdn_env_list(ds_opts, SDN_FILE_MAP)
194 for k, v in OTHER_FILE_MAP.items():
195 if k in ds_opts and ds_opts[k]:
196 if ds_opts['containers']:
197 deploy_options.append(os.path.join(con.THT_DOCKER_ENV_DIR,
198 "{}.yaml".format(k)))
200 deploy_options.append(os.path.join(con.THT_ENV_DIR, v))
202 if ds_opts['ceph'] and 'csit' not in env_file:
203 prep_storage_env(ds, ns, virtual, tmp_dir)
204 deploy_options.append(os.path.join(con.THT_ENV_DIR,
205 'storage-environment.yaml'))
207 prep_sriov_env(ds, tmp_dir)
209 # Check for 'k8s' here intentionally, as we may support other values
210 # such as openstack/openshift for 'vim' option.
211 if ds_opts['vim'] == 'k8s':
212 deploy_options.append('kubernetes-environment.yaml')
215 deploy_options.append('virtual-environment.yaml')
217 deploy_options.append('baremetal-environment.yaml')
219 num_control, num_compute = inv.get_node_counts()
220 if num_control == 0 or num_compute == 0:
221 logging.error("Detected 0 control or compute nodes. Control nodes: "
222 "{}, compute nodes{}".format(num_control, num_compute))
223 raise ApexDeployException("Invalid number of control or computes")
224 elif num_control > 1 and not ds['global_params']['ha_enabled']:
226 if platform.machine() == 'aarch64':
227 # aarch64 deploys were not completing in the default 90 mins.
228 # Not sure if this is related to the hardware the OOO support
229 # was developed on or the virtualization support in CentOS
230 # Either way it will probably get better over time as the aarch
231 # support matures in CentOS and deploy time should be tested in
232 # the future so this multiplier can be removed.
233 con.DEPLOY_TIMEOUT *= 2
234 cmd = "openstack overcloud deploy --templates --timeout {} " \
235 .format(con.DEPLOY_TIMEOUT)
237 for option in deploy_options:
238 cmd += " -e {}".format(option)
239 cmd += " --ntp-server {}".format(ns['ntp'][0])
240 cmd += " --control-scale {}".format(num_control)
241 cmd += " --compute-scale {}".format(num_compute)
242 cmd += ' --control-flavor control --compute-flavor compute'
244 cmd += ' --networks-file network_data.yaml'
247 with open('/sys/module/kvm_intel/parameters/nested') as f:
248 nested_kvm = f.read().strip()
249 if nested_kvm != 'Y':
250 libvirt_type = 'qemu'
251 cmd += ' --libvirt-type {}'.format(libvirt_type)
252 logging.info("Deploy command set: {}".format(cmd))
254 with open(os.path.join(tmp_dir, 'deploy_command'), 'w') as fh:
259 def prep_image(ds, ns, img, tmp_dir, root_pw=None, docker_tag=None,
262 Locates sdn image and preps for deployment.
263 :param ds: deploy settings
264 :param ns: network settings
265 :param img: sdn image
266 :param tmp_dir: dir to store modified sdn image
267 :param root_pw: password to configure for overcloud image
268 :param docker_tag: Docker image tag for RDO version (default None)
269 :param patches: List of patches to apply to overcloud image
272 # TODO(trozet): Come up with a better way to organize this logic in this
274 logging.info("Preparing image: {} for deployment".format(img))
275 if not os.path.isfile(img):
276 logging.error("Missing SDN image {}".format(img))
277 raise ApexDeployException("Missing SDN image file: {}".format(img))
279 ds_opts = ds['deploy_options']
281 sdn = ds_opts['sdn_controller']
282 patched_containers = set()
283 # we need this due to rhbz #1436021
284 # fixed in systemd-219-37.el7
286 logging.info("Neutron openvswitch-agent disabled")
289 "rm -f /etc/systemd/system/multi-user.target.wants/"
290 "neutron-openvswitch-agent.service"},
293 "rm -f /usr/lib/systemd/system/neutron-openvswitch-agent"
297 if ns.get('http_proxy', ''):
300 "echo 'http_proxy={}' >> /etc/environment".format(
303 if ns.get('https_proxy', ''):
306 "echo 'https_proxy={}' >> /etc/environment".format(
310 virt_cmds.append({con.VIRT_RUN_CMD: "chmod +x /etc/rc.d/rc.local"})
313 "echo 'sudo /opt/quagga/etc/init.d/zrpcd start' > "
314 "/opt/quagga/etc/init.d/zrpcd_start.sh"})
316 con.VIRT_RUN_CMD: "chmod +x /opt/quagga/etc/init.d/"
319 con.VIRT_RUN_CMD: "sed -i '$a /opt/quagga/etc/"
320 "init.d/zrpcd_start.sh' /etc/rc.local "})
322 con.VIRT_RUN_CMD: "sed -i '$a /opt/quagga/etc/"
323 "init.d/zrpcd_start.sh' /etc/rc.d/rc.local"})
324 logging.info("ZRPCD process started")
326 dataplane = ds_opts['dataplane']
327 if dataplane == 'ovs_dpdk' or dataplane == 'fdio':
328 logging.info("Enabling kernel modules for dpdk")
329 # file to module mapping
331 os.path.join(tmp_dir, 'vfio_pci.modules'): 'vfio_pci',
332 os.path.join(tmp_dir, 'uio_pci_generic.modules'): 'uio_pci_generic'
334 for mod_file, mod in uio_types.items():
335 with open(mod_file, 'w') as fh:
336 fh.write('#!/bin/bash\n')
337 fh.write('exec /sbin/modprobe {}'.format(mod))
341 {con.VIRT_UPLOAD: "{}:/etc/sysconfig/modules/".format(
343 {con.VIRT_RUN_CMD: "chmod 0755 /etc/sysconfig/modules/"
344 "{}".format(os.path.basename(mod_file))}
347 pw_op = "password:{}".format(root_pw)
348 virt_cmds.append({con.VIRT_PW: pw_op})
349 if ds_opts['sfc'] and dataplane == 'ovs':
351 {con.VIRT_RUN_CMD: "yum -y install "
352 "/root/ovs/rpm/rpmbuild/RPMS/x86_64/"
353 "{}".format(OVS_NSH_KMOD_RPM)},
354 {con.VIRT_RUN_CMD: "yum downgrade -y "
355 "/root/ovs/rpm/rpmbuild/RPMS/x86_64/"
356 "{}".format(OVS_NSH_RPM)}
358 if dataplane == 'fdio':
359 # Patch neutron with using OVS external interface for router
360 # and add generic linux NS interface driver
362 {con.VIRT_RUN_CMD: "cd /usr/lib/python2.7/site-packages && patch "
363 "-p1 < neutron-patch-NSDriver.patch"})
366 {con.VIRT_RUN_CMD: "yum remove -y vpp-lib"},
367 {con.VIRT_RUN_CMD: "yum install -y "
368 "/root/nosdn_vpp_rpms/*.rpm"}
371 tmp_oc_image = os.path.join(tmp_dir, 'overcloud-full.qcow2')
372 shutil.copyfile(img, tmp_oc_image)
373 logging.debug("Temporary overcloud image stored as: {}".format(
376 if sdn == 'opendaylight':
377 undercloud_admin_ip = ns['networks'][con.ADMIN_NETWORK][
378 'installer_vm']['ip']
379 oc_builder.inject_opendaylight(
380 odl_version=ds_opts['odl_version'],
383 uc_ip=undercloud_admin_ip,
384 os_version=ds_opts['os_version'],
385 docker_tag=docker_tag,
388 patched_containers = patched_containers.union({'opendaylight'})
391 if ds_opts['os_version'] == 'master':
392 branch = ds_opts['os_version']
394 branch = "stable/{}".format(ds_opts['os_version'])
395 logging.info('Adding patches to overcloud')
396 patched_containers = patched_containers.union(
397 c_builder.add_upstream_patches(patches,
398 tmp_oc_image, tmp_dir,
400 uc_ip=undercloud_admin_ip,
401 docker_tag=docker_tag))
402 # if containers with ceph, and no ceph device we need to use a
403 # persistent loop device for Ceph OSDs
404 if docker_tag and ds_opts['ceph_device'] == '/dev/loop3':
405 tmp_losetup = os.path.join(tmp_dir, 'losetup.service')
406 with open(tmp_losetup, 'w') as fh:
407 fh.write(LOSETUP_SERVICE)
409 {con.VIRT_UPLOAD: "{}:/usr/lib/systemd/system/".format(tmp_losetup)
411 {con.VIRT_RUN_CMD: 'truncate /srv/data.img --size {}'
412 .format(LOOP_DEVICE_SIZE)},
413 {con.VIRT_RUN_CMD: 'systemctl daemon-reload'},
414 {con.VIRT_RUN_CMD: 'systemctl enable losetup.service'},
416 # TODO(trozet) remove this after LP#173474 is fixed
417 dhcp_unit = '/usr/lib/systemd/system/dhcp-interface@.service'
419 {con.VIRT_RUN_CMD: "crudini --del {} Unit "
420 "ConditionPathExists".format(dhcp_unit)})
421 virt_utils.virt_customize(virt_cmds, tmp_oc_image)
422 logging.info("Overcloud image customization complete")
423 return patched_containers
428 Creates public and private ssh keys with 1024 bit RSA encryption
429 :return: private, public key
431 key = rsa.generate_private_key(
432 backend=crypto_default_backend(),
433 public_exponent=65537,
437 private_key = key.private_bytes(
438 crypto_serialization.Encoding.PEM,
439 crypto_serialization.PrivateFormat.PKCS8,
440 crypto_serialization.NoEncryption())
441 public_key = key.public_key().public_bytes(
442 crypto_serialization.Encoding.OpenSSH,
443 crypto_serialization.PublicFormat.OpenSSH
445 return private_key.decode('utf-8'), public_key.decode('utf-8')
448 def prep_env(ds, ns, inv, opnfv_env, net_env, tmp_dir):
450 Creates modified opnfv/network environments for deployment
451 :param ds: deploy settings
452 :param ns: network settings
453 :param inv: node inventory
454 :param opnfv_env: file path for opnfv-environment file
455 :param net_env: file path for network-environment file
456 :param tmp_dir: Apex tmp dir
460 logging.info("Preparing opnfv-environment and network-environment files")
461 ds_opts = ds['deploy_options']
462 tmp_opnfv_env = os.path.join(tmp_dir, os.path.basename(opnfv_env))
463 shutil.copyfile(opnfv_env, tmp_opnfv_env)
464 tenant_nic_map = ns['networks']['tenant']['nic_mapping']
466 tenant_nic['Controller'] = tenant_nic_map['controller']['members'][0]
467 tenant_nic['NovaCompute'] = tenant_nic_map['compute']['members'][0]
468 external_nic_map = ns['networks']['external'][0]['nic_mapping']
469 external_nic = dict()
470 external_nic['NovaCompute'] = external_nic_map['compute']['members'][0]
473 private_key, public_key = make_ssh_key()
475 # Make easier/faster variables to index in the file editor
476 if 'performance' in ds_opts:
479 if 'vpp' in ds_opts['performance']['Compute']:
480 perf_vpp_comp = ds_opts['performance']['Compute']['vpp']
483 if 'vpp' in ds_opts['performance']['Controller']:
484 perf_vpp_ctrl = ds_opts['performance']['Controller']['vpp']
489 if 'ovs' in ds_opts['performance']['Compute']:
490 perf_ovs_comp = ds_opts['performance']['Compute']['ovs']
495 if 'kernel' in ds_opts['performance']['Compute']:
496 perf_kern_comp = ds_opts['performance']['Compute']['kernel']
498 perf_kern_comp = None
502 tenant_settings = ns['networks']['tenant']
503 tenant_vlan_enabled = 'tenant' in ns.enabled_network_list and \
504 ns['networks']['tenant'].get('segmentation_type') == 'vlan'
506 # Modify OPNFV environment
507 # TODO: Change to build a dict and outputting yaml rather than parsing
508 for line in fileinput.input(tmp_opnfv_env, inplace=True):
509 line = line.strip('\n')
511 if 'CloudDomain' in line:
512 output_line = " CloudDomain: {}".format(ns['domain_name'])
513 elif 'replace_private_key' in line:
514 output_line = " private_key: |\n"
516 for line in private_key.splitlines():
517 key_out += " {}\n".format(line)
518 output_line += key_out
519 elif 'replace_public_key' in line:
520 output_line = " public_key: '{}'".format(public_key)
521 elif ((perf and perf_kern_comp) or ds_opts.get('rt_kvm')) and \
522 'resource_registry' in line:
523 output_line = "resource_registry:\n" \
524 " OS::TripleO::NodeUserData: first-boot.yaml"
525 elif 'ComputeExtraConfigPre' in line and \
526 ds_opts['dataplane'] == 'ovs_dpdk':
527 output_line = ' OS::TripleO::ComputeExtraConfigPre: ' \
528 './ovs-dpdk-preconfig.yaml'
529 elif 'NeutronNetworkVLANRanges' in line:
531 if tenant_vlan_enabled:
532 if ns['networks']['tenant']['overlay_id_range']:
533 vlan_setting = ns['networks']['tenant']['overlay_id_range']
534 if 'datacentre' not in vlan_setting:
535 vlan_setting += ',datacentre:1:1000'
536 # SRIOV networks are VLAN based provider networks. In order to
537 # simplify the deployment, nfv_sriov will be the default physnet.
538 # VLANs are not needed in advance, and the user will have to create
539 # the network specifying the segmentation-id.
542 vlan_setting += ",nfv_sriov"
544 vlan_setting = "datacentre:1:1000,nfv_sriov"
546 output_line = " NeutronNetworkVLANRanges: " + vlan_setting
547 elif 'NeutronBridgeMappings' in line and tenant_vlan_enabled:
548 if tenant_settings['overlay_id_range']:
549 physnets = tenant_settings['overlay_id_range'].split(',')
550 output_line = " NeutronBridgeMappings: "
551 for physnet in physnets:
552 physnet_name = physnet.split(':')[0]
553 if physnet_name != 'datacentre':
554 output_line += "{}:br-vlan,".format(physnet_name)
555 output_line += "datacentre:br-ex"
556 elif 'OpenDaylightProviderMappings' in line and tenant_vlan_enabled \
557 and ds_opts['sdn_controller'] == 'opendaylight':
558 if tenant_settings['overlay_id_range']:
559 physnets = tenant_settings['overlay_id_range'].split(',')
560 output_line = " OpenDaylightProviderMappings: "
561 for physnet in physnets:
562 physnet_name = physnet.split(':')[0]
563 if physnet_name != 'datacentre':
564 output_line += "{}:br-vlan,".format(physnet_name)
565 output_line += "datacentre:br-ex"
566 elif 'NeutronNetworkType' in line and tenant_vlan_enabled:
567 output_line = " NeutronNetworkType: vlan\n" \
568 " NeutronTunnelTypes: ''"
570 if ds_opts['sdn_controller'] == 'opendaylight' and \
571 'odl_vpp_routing_node' in ds_opts:
572 if 'opendaylight::vpp_routing_node' in line:
573 output_line = (" opendaylight::vpp_routing_node: {}.{}"
574 .format(ds_opts['odl_vpp_routing_node'],
576 elif not ds_opts['sdn_controller'] and ds_opts['dataplane'] == 'fdio':
577 if 'NeutronVPPAgentPhysnets' in line:
578 # VPP interface tap0 will be used for external network
580 output_line = (" NeutronVPPAgentPhysnets: "
581 "'datacentre:{},external:tap0'"
582 .format(tenant_nic['Controller']))
583 elif ds_opts['sdn_controller'] == 'opendaylight' and ds_opts.get(
585 if 'OS::TripleO::Services::NeutronDhcpAgent' in line:
587 elif 'NeutronDhcpAgentsPerNetwork' in line:
588 num_control, num_compute = inv.get_node_counts()
589 output_line = (" NeutronDhcpAgentsPerNetwork: {}"
590 .format(num_compute))
591 elif 'ComputeServices' in line:
592 output_line = (" ComputeServices:\n"
593 " - OS::TripleO::Services::NeutronDhcpAgent")
596 for role in 'NovaCompute', 'Controller':
597 if role == 'NovaCompute':
598 perf_opts = perf_vpp_comp
600 perf_opts = perf_vpp_ctrl
601 cfg = "{}ExtraConfig".format(role)
602 if cfg in line and perf_opts:
604 if 'main-core' in perf_opts:
605 perf_line += ("\n fdio::vpp_cpu_main_core: '{}'"
606 .format(perf_opts['main-core']))
607 if 'corelist-workers' in perf_opts:
609 "fdio::vpp_cpu_corelist_workers: '{}'"
610 .format(perf_opts['corelist-workers']))
611 if ds_opts['sdn_controller'] == 'opendaylight' and \
612 ds_opts['dataplane'] == 'fdio':
613 if role == 'NovaCompute':
615 "tripleo::profile::base::neutron::"
616 "agents::honeycomb::"
617 "interface_role_mapping:"
618 " ['{}:tenant-interface',"
619 "'{}:public-interface']"
620 .format(tenant_nic[role],
624 "tripleo::profile::base::neutron::"
625 "agents::honeycomb::"
626 "interface_role_mapping:"
627 " ['{}:tenant-interface']"
628 .format(tenant_nic[role]))
630 output_line = (" {}:{}".format(cfg, perf_line))
632 if ds_opts['dataplane'] == 'ovs_dpdk' and perf_ovs_comp:
633 for k, v in OVS_PERF_MAP.items():
634 if k in line and v in perf_ovs_comp:
635 output_line = " {}: '{}'".format(k, perf_ovs_comp[v])
638 # (FIXME) use compute's kernel settings for all nodes for now.
640 if 'NovaSchedulerDefaultFilters' in line:
642 " NovaSchedulerDefaultFilters: 'RamFilter," \
643 "ComputeFilter,AvailabilityZoneFilter," \
644 "ComputeCapabilitiesFilter," \
645 "ImagePropertiesFilter,NUMATopologyFilter'"
646 elif 'ComputeKernelArgs' in line:
648 for k, v in perf_kern_comp.items():
649 kernel_args += "{}={} ".format(k, v)
651 output_line = " ComputeKernelArgs: '{}'".\
656 logging.info("opnfv-environment file written to {}".format(tmp_opnfv_env))
659 def generate_ceph_key():
661 header = struct.pack('<hiih', 1, int(time.time()), 0, len(key))
662 return base64.b64encode(header + key)
665 def prep_storage_env(ds, ns, virtual, tmp_dir):
667 Creates storage environment file for deployment. Source file is copied by
668 undercloud playbook to host.
675 ds_opts = ds['deploy_options']
676 storage_file = os.path.join(tmp_dir, 'storage-environment.yaml')
677 if not os.path.isfile(storage_file):
678 logging.error("storage-environment file is not in tmp directory: {}. "
679 "Check if file was copied from "
680 "undercloud".format(tmp_dir))
681 raise ApexDeployException("storage-environment file not copied from "
683 for line in fileinput.input(storage_file, inplace=True):
684 line = line.strip('\n')
685 if 'CephClusterFSID' in line:
686 print(" CephClusterFSID: {}".format(str(uuid.uuid4())))
687 elif 'CephMonKey' in line:
688 print(" CephMonKey: {}".format(generate_ceph_key().decode(
690 elif 'CephAdminKey' in line:
691 print(" CephAdminKey: {}".format(generate_ceph_key().decode(
693 elif 'CephClientKey' in line:
694 print(" CephClientKey: {}".format(generate_ceph_key().decode(
699 if ds_opts['containers']:
702 # max pgs allowed are calculated as num_mons * 200. Therefore we
703 # set number of pgs and pools so that the total will be less:
704 # num_pgs * num_pools * num_osds
705 ceph_params['CephPoolDefaultSize'] = 2
706 ceph_params['CephPoolDefaultPgNum'] = 32
708 ceph_params['CephAnsibleExtraConfig'] = {
709 'centos_package_dependencies': [],
710 'ceph_osd_docker_memory_limit': '1g',
711 'ceph_mds_docker_memory_limit': '1g',
713 ceph_device = ds_opts['ceph_device']
714 ceph_params['CephAnsibleDisksConfig'] = {
715 'devices': [ceph_device],
717 'osd_scenario': 'collocated'
719 utils.edit_tht_env(storage_file, 'parameter_defaults', ceph_params)
720 # TODO(trozet): remove following block as we only support containers now
721 elif 'ceph_device' in ds_opts and ds_opts['ceph_device']:
722 with open(storage_file, 'a') as fh:
723 fh.write(' ExtraConfig:\n')
724 fh.write(" ceph::profile::params::osds:{{{}:{{}}}}\n".format(
725 ds_opts['ceph_device']
729 def prep_sriov_env(ds, tmp_dir):
731 Creates SRIOV environment file for deployment. Source file is copied by
732 undercloud playbook to host.
737 ds_opts = ds['deploy_options']
738 sriov_iface = ds_opts['sriov']
739 sriov_file = os.path.join(tmp_dir, 'neutron-opendaylight-sriov.yaml')
740 if not os.path.isfile(sriov_file):
741 logging.error("sriov-environment file is not in tmp directory: {}. "
742 "Check if file was copied from "
743 "undercloud".format(tmp_dir))
744 raise ApexDeployException("sriov-environment file not copied from "
746 # TODO(rnoriega): Instead of line editing, refactor this code to load
747 # yaml file into a dict, edit it and write the file back.
748 for line in fileinput.input(sriov_file, inplace=True):
749 line = line.strip('\n')
750 if 'NovaSchedulerDefaultFilters' in line:
751 print(" {}".format(line[3:]))
752 elif 'NovaSchedulerAvailableFilters' in line:
753 print(" {}".format(line[3:]))
754 elif 'NeutronPhysicalDevMappings' in line:
755 print(" NeutronPhysicalDevMappings: \"nfv_sriov:{}\""
756 .format(sriov_iface))
757 elif 'NeutronSriovNumVFs' in line:
758 print(" NeutronSriovNumVFs: \"{}:8\"".format(sriov_iface))
759 elif 'NovaPCIPassthrough' in line:
760 print(" NovaPCIPassthrough:")
761 elif 'devname' in line:
762 print(" - devname: \"{}\"".format(sriov_iface))
763 elif 'physical_network' in line:
764 print(" physical_network: \"nfv_sriov\"")
769 def external_network_cmds(ns, ds):
771 Generates external network openstack commands
772 :param ns: network settings
773 :param ds: deploy settings
774 :return: list of commands to configure external network
776 ds_opts = ds['deploy_options']
777 external_physnet = 'datacentre'
778 if ds_opts['dataplane'] == 'fdio' and \
779 ds_opts['sdn_controller'] != 'opendaylight':
780 external_physnet = 'external'
781 if 'external' in ns.enabled_network_list:
782 net_config = ns['networks']['external'][0]
784 pool_start, pool_end = net_config['floating_ip_range']
786 net_config = ns['networks']['admin']
788 pool_start, pool_end = ns['apex']['networks']['admin'][
789 'introspection_range']
790 nic_config = net_config['nic_mapping']
791 gateway = net_config['gateway']
793 # create network command
794 if nic_config['compute']['vlan'] == 'native':
797 ext_type = "vlan --provider-segment {}".format(nic_config[
799 cmds.append("openstack network create external --project service "
800 "--external --provider-network-type {} "
801 "--provider-physical-network {}"
802 .format(ext_type, external_physnet))
803 # create subnet command
804 cidr = net_config['cidr']
805 subnet_cmd = "openstack subnet create external-subnet --project " \
806 "service --network external --no-dhcp --gateway {} " \
807 "--allocation-pool start={},end={} --subnet-range " \
808 "{}".format(gateway, pool_start, pool_end, str(cidr))
809 if external and cidr.version == 6:
810 subnet_cmd += ' --ip-version 6 --ipv6-ra-mode slaac ' \
811 '--ipv6-address-mode slaac'
812 cmds.append(subnet_cmd)
813 logging.debug("Neutron external network commands determined "
814 "as: {}".format(cmds))
818 def create_congress_cmds(overcloud_file):
819 drivers = ['nova', 'neutronv2', 'cinder', 'glancev2', 'keystone', 'doctor']
820 overcloudrc = parsers.parse_overcloudrc(overcloud_file)
821 logging.info("Creating congress commands")
824 "username={}".format(overcloudrc['OS_USERNAME']),
825 "tenant_name={}".format(overcloudrc['OS_PROJECT_NAME']),
826 "password={}".format(overcloudrc['OS_PASSWORD']),
827 "auth_url={}".format(overcloudrc['OS_AUTH_URL'])
830 logging.error("Unable to find all keys required for congress in "
831 "overcloudrc: OS_USERNAME, OS_PROJECT_NAME, "
832 "OS_PASSWORD, OS_AUTH_URL. Please check overcloudrc "
833 "file: {}".format(overcloud_file))
836 ds_cfg = '--config ' + ' --config '.join(ds_cfg)
838 for driver in drivers:
839 if driver == 'doctor':
840 cmd = "{} \"{}\"".format(driver, driver)
842 cmd = "{} \"{}\" {}".format(driver, driver, ds_cfg)
844 cmd += ' --config api_version="2.34"'
845 logging.debug("Congress command created: {}".format(cmd))