1 ##############################################################################
2 # Copyright (c) 2017 Tim Rozet (trozet@redhat.com) and others.
4 # All rights reserved. This program and the accompanying materials
5 # are made available under the terms of the Apache License, Version 2.0
6 # which accompanies this distribution, and is available at
7 # http://www.apache.org/licenses/LICENSE-2.0
8 ##############################################################################
19 import apex.builders.overcloud_builder as oc_builder
20 import apex.builders.common_builder as c_builder
22 from apex.common import constants as con
23 from apex.common.exceptions import ApexDeployException
24 from apex.common import parsers
25 from apex.common import utils
26 from apex.virtual import utils as virt_utils
27 from cryptography.hazmat.primitives import serialization as \
29 from cryptography.hazmat.primitives.asymmetric import rsa
30 from cryptography.hazmat.backends import default_backend as \
31 crypto_default_backend
36 'sfc': 'neutron-sfc-opendaylight.yaml',
37 'vpn': 'neutron-bgpvpn-opendaylight.yaml',
38 'gluon': 'gluon.yaml',
40 'odl_vpp_netvirt': 'neutron-opendaylight-netvirt-vpp.yaml',
41 'dvr': 'neutron-opendaylight-fdio-dvr.yaml',
42 'default': 'neutron-opendaylight-honeycomb.yaml'
44 'l2gw': 'neutron-l2gw-opendaylight.yaml',
45 'sriov': 'neutron-opendaylight-sriov.yaml',
46 'default': 'neutron-opendaylight.yaml',
49 'sfc': 'neutron-onos-sfc.yaml',
50 'default': 'neutron-onos.yaml'
52 'ovn': 'neutron-ml2-ovn.yaml',
54 'vpp': 'neutron-ml2-vpp.yaml',
55 'dataplane': ('ovs_dpdk', 'neutron-ovs-dpdk.yaml')
60 'tacker': 'enable_tacker.yaml',
61 'congress': 'enable_congress.yaml',
62 'barometer': 'enable_barometer.yaml',
63 'rt_kvm': 'enable_rt_kvm.yaml'
67 'HostCpusList': 'dpdk_cores',
68 'NeutronDpdkCoreList': 'pmd_cores',
69 'NeutronDpdkSocketMemory': 'socket_memory',
70 'NeutronDpdkMemoryChannels': 'memory_channels'
73 OVS_NSH_KMOD_RPM = "openvswitch-kmod-2.6.1-1.el7.centos.x86_64.rpm"
74 OVS_NSH_RPM = "openvswitch-2.6.1-1.el7.centos.x86_64.rpm"
75 ODL_NETVIRT_VPP_RPM = "/root/opendaylight-7.0.0-0.1.20170531snap665.el7" \
78 LOSETUP_SERVICE = """[Unit]
79 Description=Setup loop devices
84 ExecStart=/sbin/losetup /dev/loop3 /srv/data.img
85 ExecStop=/sbin/losetup -d /dev/loop3
90 WantedBy=multi-user.target
94 def build_sdn_env_list(ds, sdn_map, env_list=None):
96 Builds a list of SDN environment files to be used in the deploy cmd.
98 This function recursively searches an sdn_map. First the sdn controller is
99 matched and then the function looks for enabled features for that
100 controller to determine which environment files should be used. By
101 default the feature will be checked if set to true in deploy settings to be
102 added to the list. If a feature does not have a boolean value, then the
103 key and value pair to compare with are checked as a tuple (k,v).
105 :param ds: deploy settings
106 :param sdn_map: SDN map to recursively search
107 :param env_list: recursive var to hold previously found env_list
108 :return: A list of env files
112 for k, v in sdn_map.items():
113 if ds['sdn_controller'] == k or (k in ds and ds[k]):
114 if isinstance(v, dict):
115 # Append default SDN env file first
116 # The assumption is that feature-enabled SDN env files
117 # override and do not conflict with previously set default
119 if ds['sdn_controller'] == k and 'default' in v:
120 env_list.append(os.path.join(con.THT_ENV_DIR,
122 env_list.extend(build_sdn_env_list(ds, v))
123 # check if the value is not a boolean
124 elif isinstance(v, tuple):
126 env_list.append(os.path.join(con.THT_ENV_DIR, v[1]))
128 env_list.append(os.path.join(con.THT_ENV_DIR, v))
129 if len(env_list) == 0:
131 env_list.append(os.path.join(
132 con.THT_ENV_DIR, sdn_map['default']))
134 logging.warning("Unable to find default file for SDN")
139 def get_docker_sdn_file(ds_opts):
141 Returns docker env file for detected SDN
142 :param ds_opts: deploy options
143 :return: docker THT env file for an SDN
145 # FIXME(trozet): We assume right now there is only one docker SDN file
146 docker_services = con.VALID_DOCKER_SERVICES
147 tht_dir = con.THT_DOCKER_ENV_DIR[ds_opts['os_version']]
148 sdn_env_list = build_sdn_env_list(ds_opts, SDN_FILE_MAP)
149 for sdn_file in sdn_env_list:
150 sdn_base = os.path.basename(sdn_file)
151 if sdn_base in docker_services:
152 if docker_services[sdn_base] is not None:
153 return os.path.join(tht_dir,
154 docker_services[sdn_base])
156 return os.path.join(tht_dir, sdn_base)
159 def create_deploy_cmd(ds, ns, inv, tmp_dir,
160 virtual, env_file='opnfv-environment.yaml',
163 logging.info("Creating deployment command")
164 deploy_options = ['network-environment.yaml']
166 ds_opts = ds['deploy_options']
168 if ds_opts['containers']:
169 deploy_options.append(os.path.join(con.THT_ENV_DIR,
172 if ds['global_params']['ha_enabled']:
173 if ds_opts['containers']:
174 deploy_options.append(os.path.join(con.THT_ENV_DIR,
177 deploy_options.append(os.path.join(con.THT_ENV_DIR,
178 'puppet-pacemaker.yaml'))
181 deploy_options.append(env_file)
183 if ds_opts['containers']:
184 deploy_options.append('docker-images.yaml')
185 sdn_docker_file = get_docker_sdn_file(ds_opts)
187 deploy_options.append(sdn_docker_file)
188 deploy_options.append('sdn-images.yaml')
190 deploy_options += build_sdn_env_list(ds_opts, SDN_FILE_MAP)
192 for k, v in OTHER_FILE_MAP.items():
193 if k in ds_opts and ds_opts[k]:
194 if ds_opts['containers']:
195 deploy_options.append(os.path.join(con.THT_DOCKER_ENV_DIR,
196 "{}.yaml".format(k)))
198 deploy_options.append(os.path.join(con.THT_ENV_DIR, v))
200 if ds_opts['ceph'] and 'csit' not in env_file:
201 prep_storage_env(ds, ns, virtual, tmp_dir)
202 deploy_options.append(os.path.join(con.THT_ENV_DIR,
203 'storage-environment.yaml'))
205 prep_sriov_env(ds, tmp_dir)
207 # Check for 'k8s' here intentionally, as we may support other values
208 # such as openstack/openshift for 'vim' option.
209 if ds_opts['vim'] == 'k8s':
210 deploy_options.append('kubernetes-environment.yaml')
213 deploy_options.append('virtual-environment.yaml')
215 deploy_options.append('baremetal-environment.yaml')
217 num_control, num_compute = inv.get_node_counts()
218 if num_control == 0 or num_compute == 0:
219 logging.error("Detected 0 control or compute nodes. Control nodes: "
220 "{}, compute nodes{}".format(num_control, num_compute))
221 raise ApexDeployException("Invalid number of control or computes")
222 elif num_control > 1 and not ds['global_params']['ha_enabled']:
224 if platform.machine() == 'aarch64':
225 # aarch64 deploys were not completing in the default 90 mins.
226 # Not sure if this is related to the hardware the OOO support
227 # was developed on or the virtualization support in CentOS
228 # Either way it will probably get better over time as the aarch
229 # support matures in CentOS and deploy time should be tested in
230 # the future so this multiplier can be removed.
231 con.DEPLOY_TIMEOUT *= 2
232 cmd = "openstack overcloud deploy --templates --timeout {} " \
233 .format(con.DEPLOY_TIMEOUT)
235 for option in deploy_options:
236 cmd += " -e {}".format(option)
237 cmd += " --ntp-server {}".format(ns['ntp'][0])
238 cmd += " --control-scale {}".format(num_control)
239 cmd += " --compute-scale {}".format(num_compute)
240 cmd += ' --control-flavor control --compute-flavor compute'
242 cmd += ' --networks-file network_data.yaml'
245 with open('/sys/module/kvm_intel/parameters/nested') as f:
246 nested_kvm = f.read().strip()
247 if nested_kvm != 'Y':
248 libvirt_type = 'qemu'
249 cmd += ' --libvirt-type {}'.format(libvirt_type)
250 logging.info("Deploy command set: {}".format(cmd))
252 with open(os.path.join(tmp_dir, 'deploy_command'), 'w') as fh:
257 def prep_image(ds, ns, img, tmp_dir, root_pw=None, docker_tag=None,
260 Locates sdn image and preps for deployment.
261 :param ds: deploy settings
262 :param ns: network settings
263 :param img: sdn image
264 :param tmp_dir: dir to store modified sdn image
265 :param root_pw: password to configure for overcloud image
266 :param docker_tag: Docker image tag for RDO version (default None)
267 :param patches: List of patches to apply to overcloud image
270 # TODO(trozet): Come up with a better way to organize this logic in this
272 logging.info("Preparing image: {} for deployment".format(img))
273 if not os.path.isfile(img):
274 logging.error("Missing SDN image {}".format(img))
275 raise ApexDeployException("Missing SDN image file: {}".format(img))
277 ds_opts = ds['deploy_options']
279 sdn = ds_opts['sdn_controller']
280 patched_containers = set()
281 # we need this due to rhbz #1436021
282 # fixed in systemd-219-37.el7
284 logging.info("Neutron openvswitch-agent disabled")
287 "rm -f /etc/systemd/system/multi-user.target.wants/"
288 "neutron-openvswitch-agent.service"},
291 "rm -f /usr/lib/systemd/system/neutron-openvswitch-agent"
295 if ns.get('http_proxy', ''):
298 "echo 'http_proxy={}' >> /etc/environment".format(
301 if ns.get('https_proxy', ''):
304 "echo 'https_proxy={}' >> /etc/environment".format(
308 virt_cmds.append({con.VIRT_RUN_CMD: "chmod +x /etc/rc.d/rc.local"})
311 "echo 'sudo /opt/quagga/etc/init.d/zrpcd start' > "
312 "/opt/quagga/etc/init.d/zrpcd_start.sh"})
314 con.VIRT_RUN_CMD: "chmod +x /opt/quagga/etc/init.d/"
317 con.VIRT_RUN_CMD: "sed -i '$a /opt/quagga/etc/"
318 "init.d/zrpcd_start.sh' /etc/rc.local "})
320 con.VIRT_RUN_CMD: "sed -i '$a /opt/quagga/etc/"
321 "init.d/zrpcd_start.sh' /etc/rc.d/rc.local"})
322 logging.info("ZRPCD process started")
324 dataplane = ds_opts['dataplane']
325 if dataplane == 'ovs_dpdk' or dataplane == 'fdio':
326 logging.info("Enabling kernel modules for dpdk")
327 # file to module mapping
329 os.path.join(tmp_dir, 'vfio_pci.modules'): 'vfio_pci',
330 os.path.join(tmp_dir, 'uio_pci_generic.modules'): 'uio_pci_generic'
332 for mod_file, mod in uio_types.items():
333 with open(mod_file, 'w') as fh:
334 fh.write('#!/bin/bash\n')
335 fh.write('exec /sbin/modprobe {}'.format(mod))
339 {con.VIRT_UPLOAD: "{}:/etc/sysconfig/modules/".format(
341 {con.VIRT_RUN_CMD: "chmod 0755 /etc/sysconfig/modules/"
342 "{}".format(os.path.basename(mod_file))}
345 pw_op = "password:{}".format(root_pw)
346 virt_cmds.append({con.VIRT_PW: pw_op})
347 if ds_opts['sfc'] and dataplane == 'ovs':
349 {con.VIRT_RUN_CMD: "yum -y install "
350 "/root/ovs/rpm/rpmbuild/RPMS/x86_64/"
351 "{}".format(OVS_NSH_KMOD_RPM)},
352 {con.VIRT_RUN_CMD: "yum downgrade -y "
353 "/root/ovs/rpm/rpmbuild/RPMS/x86_64/"
354 "{}".format(OVS_NSH_RPM)}
356 if dataplane == 'fdio':
357 # Patch neutron with using OVS external interface for router
358 # and add generic linux NS interface driver
360 {con.VIRT_RUN_CMD: "cd /usr/lib/python2.7/site-packages && patch "
361 "-p1 < neutron-patch-NSDriver.patch"})
364 {con.VIRT_RUN_CMD: "yum remove -y vpp-lib"},
365 {con.VIRT_RUN_CMD: "yum install -y "
366 "/root/nosdn_vpp_rpms/*.rpm"}
369 tmp_oc_image = os.path.join(tmp_dir, 'overcloud-full.qcow2')
370 shutil.copyfile(img, tmp_oc_image)
371 logging.debug("Temporary overcloud image stored as: {}".format(
374 if sdn == 'opendaylight':
375 undercloud_admin_ip = ns['networks'][con.ADMIN_NETWORK][
376 'installer_vm']['ip']
377 oc_builder.inject_opendaylight(
378 odl_version=ds_opts['odl_version'],
381 uc_ip=undercloud_admin_ip,
382 os_version=ds_opts['os_version'],
383 docker_tag=docker_tag,
386 patched_containers = patched_containers.union({'opendaylight'})
389 if ds_opts['os_version'] == 'master':
390 branch = ds_opts['os_version']
392 branch = "stable/{}".format(ds_opts['os_version'])
393 logging.info('Adding patches to overcloud')
394 patched_containers = patched_containers.union(
395 c_builder.add_upstream_patches(patches,
396 tmp_oc_image, tmp_dir,
398 uc_ip=undercloud_admin_ip,
399 docker_tag=docker_tag))
400 # if containers with ceph, and no ceph device we need to use a
401 # persistent loop device for Ceph OSDs
402 if docker_tag and ds_opts['ceph_device'] == '/dev/loop3':
403 tmp_losetup = os.path.join(tmp_dir, 'losetup.service')
404 with open(tmp_losetup, 'w') as fh:
405 fh.write(LOSETUP_SERVICE)
407 {con.VIRT_UPLOAD: "{}:/usr/lib/systemd/system/".format(tmp_losetup)
409 {con.VIRT_RUN_CMD: 'truncate /srv/data.img --size 10G'},
410 {con.VIRT_RUN_CMD: 'systemctl daemon-reload'},
411 {con.VIRT_RUN_CMD: 'systemctl enable losetup.service'},
413 virt_utils.virt_customize(virt_cmds, tmp_oc_image)
414 logging.info("Overcloud image customization complete")
415 return patched_containers
420 Creates public and private ssh keys with 1024 bit RSA encryption
421 :return: private, public key
423 key = rsa.generate_private_key(
424 backend=crypto_default_backend(),
425 public_exponent=65537,
429 private_key = key.private_bytes(
430 crypto_serialization.Encoding.PEM,
431 crypto_serialization.PrivateFormat.PKCS8,
432 crypto_serialization.NoEncryption())
433 public_key = key.public_key().public_bytes(
434 crypto_serialization.Encoding.OpenSSH,
435 crypto_serialization.PublicFormat.OpenSSH
437 return private_key.decode('utf-8'), public_key.decode('utf-8')
440 def prep_env(ds, ns, inv, opnfv_env, net_env, tmp_dir):
442 Creates modified opnfv/network environments for deployment
443 :param ds: deploy settings
444 :param ns: network settings
445 :param inv: node inventory
446 :param opnfv_env: file path for opnfv-environment file
447 :param net_env: file path for network-environment file
448 :param tmp_dir: Apex tmp dir
452 logging.info("Preparing opnfv-environment and network-environment files")
453 ds_opts = ds['deploy_options']
454 tmp_opnfv_env = os.path.join(tmp_dir, os.path.basename(opnfv_env))
455 shutil.copyfile(opnfv_env, tmp_opnfv_env)
456 tenant_nic_map = ns['networks']['tenant']['nic_mapping']
458 tenant_nic['Controller'] = tenant_nic_map['controller']['members'][0]
459 tenant_nic['NovaCompute'] = tenant_nic_map['compute']['members'][0]
460 external_nic_map = ns['networks']['external'][0]['nic_mapping']
461 external_nic = dict()
462 external_nic['NovaCompute'] = external_nic_map['compute']['members'][0]
465 private_key, public_key = make_ssh_key()
467 # Make easier/faster variables to index in the file editor
468 if 'performance' in ds_opts:
471 if 'vpp' in ds_opts['performance']['Compute']:
472 perf_vpp_comp = ds_opts['performance']['Compute']['vpp']
475 if 'vpp' in ds_opts['performance']['Controller']:
476 perf_vpp_ctrl = ds_opts['performance']['Controller']['vpp']
481 if 'ovs' in ds_opts['performance']['Compute']:
482 perf_ovs_comp = ds_opts['performance']['Compute']['ovs']
487 if 'kernel' in ds_opts['performance']['Compute']:
488 perf_kern_comp = ds_opts['performance']['Compute']['kernel']
490 perf_kern_comp = None
494 # Modify OPNFV environment
495 # TODO: Change to build a dict and outputting yaml rather than parsing
496 for line in fileinput.input(tmp_opnfv_env, inplace=True):
497 line = line.strip('\n')
499 if 'CloudDomain' in line:
500 output_line = " CloudDomain: {}".format(ns['domain_name'])
501 elif 'replace_private_key' in line:
502 output_line = " private_key: |\n"
504 for line in private_key.splitlines():
505 key_out += " {}\n".format(line)
506 output_line += key_out
507 elif 'replace_public_key' in line:
508 output_line = " public_key: '{}'".format(public_key)
509 elif ((perf and perf_kern_comp) or ds_opts.get('rt_kvm')) and \
510 'resource_registry' in line:
511 output_line = "resource_registry:\n" \
512 " OS::TripleO::NodeUserData: first-boot.yaml"
513 elif 'ComputeExtraConfigPre' in line and \
514 ds_opts['dataplane'] == 'ovs_dpdk':
515 output_line = ' OS::TripleO::ComputeExtraConfigPre: ' \
516 './ovs-dpdk-preconfig.yaml'
518 if ds_opts['sdn_controller'] == 'opendaylight' and \
519 'odl_vpp_routing_node' in ds_opts:
520 if 'opendaylight::vpp_routing_node' in line:
521 output_line = (" opendaylight::vpp_routing_node: {}.{}"
522 .format(ds_opts['odl_vpp_routing_node'],
524 elif not ds_opts['sdn_controller'] and ds_opts['dataplane'] == 'fdio':
525 if 'NeutronVPPAgentPhysnets' in line:
526 # VPP interface tap0 will be used for external network
528 output_line = (" NeutronVPPAgentPhysnets: "
529 "'datacentre:{},external:tap0'"
530 .format(tenant_nic['Controller']))
531 elif ds_opts['sdn_controller'] == 'opendaylight' and ds_opts.get(
533 if 'OS::TripleO::Services::NeutronDhcpAgent' in line:
535 elif 'NeutronDhcpAgentsPerNetwork' in line:
536 num_control, num_compute = inv.get_node_counts()
537 output_line = (" NeutronDhcpAgentsPerNetwork: {}"
538 .format(num_compute))
539 elif 'ComputeServices' in line:
540 output_line = (" ComputeServices:\n"
541 " - OS::TripleO::Services::NeutronDhcpAgent")
542 # SRIOV networks are VLAN based provider networks. In order to simplify
543 # the deployment, nfv_sriov will be the default physnet. VLANs are not
544 # needed in advance, and the user will have to create the network
545 # specifying the segmentation-id.
547 if 'NeutronNetworkVLANRanges' in line:
548 output_line = ("{},nfv_sriov'".format(line[:-1]))
551 for role in 'NovaCompute', 'Controller':
552 if role == 'NovaCompute':
553 perf_opts = perf_vpp_comp
555 perf_opts = perf_vpp_ctrl
556 cfg = "{}ExtraConfig".format(role)
557 if cfg in line and perf_opts:
559 if 'main-core' in perf_opts:
560 perf_line += ("\n fdio::vpp_cpu_main_core: '{}'"
561 .format(perf_opts['main-core']))
562 if 'corelist-workers' in perf_opts:
564 "fdio::vpp_cpu_corelist_workers: '{}'"
565 .format(perf_opts['corelist-workers']))
566 if ds_opts['sdn_controller'] == 'opendaylight' and \
567 ds_opts['dataplane'] == 'fdio':
568 if role == 'NovaCompute':
570 "tripleo::profile::base::neutron::"
571 "agents::honeycomb::"
572 "interface_role_mapping:"
573 " ['{}:tenant-interface',"
574 "'{}:public-interface']"
575 .format(tenant_nic[role],
579 "tripleo::profile::base::neutron::"
580 "agents::honeycomb::"
581 "interface_role_mapping:"
582 " ['{}:tenant-interface']"
583 .format(tenant_nic[role]))
585 output_line = (" {}:{}".format(cfg, perf_line))
587 if ds_opts['dataplane'] == 'ovs_dpdk' and perf_ovs_comp:
588 for k, v in OVS_PERF_MAP.items():
589 if k in line and v in perf_ovs_comp:
590 output_line = " {}: '{}'".format(k, perf_ovs_comp[v])
593 # (FIXME) use compute's kernel settings for all nodes for now.
595 if 'NovaSchedulerDefaultFilters' in line:
597 " NovaSchedulerDefaultFilters: 'RamFilter," \
598 "ComputeFilter,AvailabilityZoneFilter," \
599 "ComputeCapabilitiesFilter," \
600 "ImagePropertiesFilter,NUMATopologyFilter'"
601 elif 'ComputeKernelArgs' in line:
603 for k, v in perf_kern_comp.items():
604 kernel_args += "{}={} ".format(k, v)
606 output_line = " ComputeKernelArgs: '{}'".\
611 logging.info("opnfv-environment file written to {}".format(tmp_opnfv_env))
614 def generate_ceph_key():
616 header = struct.pack('<hiih', 1, int(time.time()), 0, len(key))
617 return base64.b64encode(header + key)
620 def prep_storage_env(ds, ns, virtual, tmp_dir):
622 Creates storage environment file for deployment. Source file is copied by
623 undercloud playbook to host.
630 ds_opts = ds['deploy_options']
631 storage_file = os.path.join(tmp_dir, 'storage-environment.yaml')
632 if not os.path.isfile(storage_file):
633 logging.error("storage-environment file is not in tmp directory: {}. "
634 "Check if file was copied from "
635 "undercloud".format(tmp_dir))
636 raise ApexDeployException("storage-environment file not copied from "
638 for line in fileinput.input(storage_file, inplace=True):
639 line = line.strip('\n')
640 if 'CephClusterFSID' in line:
641 print(" CephClusterFSID: {}".format(str(uuid.uuid4())))
642 elif 'CephMonKey' in line:
643 print(" CephMonKey: {}".format(generate_ceph_key().decode(
645 elif 'CephAdminKey' in line:
646 print(" CephAdminKey: {}".format(generate_ceph_key().decode(
648 elif 'CephClientKey' in line:
649 print(" CephClientKey: {}".format(generate_ceph_key().decode(
654 if ds_opts['containers']:
655 undercloud_admin_ip = ns['networks'][con.ADMIN_NETWORK][
656 'installer_vm']['ip']
657 ceph_version = con.CEPH_VERSION_MAP[ds_opts['os_version']]
658 docker_image = "{}:8787/ceph/daemon:tag-build-master-" \
659 "{}-centos-7".format(undercloud_admin_ip,
662 'DockerCephDaemonImage': docker_image,
665 # max pgs allowed are calculated as num_mons * 200. Therefore we
666 # set number of pgs and pools so that the total will be less:
667 # num_pgs * num_pools * num_osds
668 ceph_params['CephPoolDefaultSize'] = 2
669 ceph_params['CephPoolDefaultPgNum'] = 32
671 ceph_params['CephAnsibleExtraConfig'] = {
672 'centos_package_dependencies': [],
673 'ceph_osd_docker_memory_limit': '1g',
674 'ceph_mds_docker_memory_limit': '1g',
676 ceph_device = ds_opts['ceph_device']
677 ceph_params['CephAnsibleDisksConfig'] = {
678 'devices': [ceph_device],
680 'osd_scenario': 'collocated'
682 utils.edit_tht_env(storage_file, 'parameter_defaults', ceph_params)
683 # TODO(trozet): remove following block as we only support containers now
684 elif 'ceph_device' in ds_opts and ds_opts['ceph_device']:
685 with open(storage_file, 'a') as fh:
686 fh.write(' ExtraConfig:\n')
687 fh.write(" ceph::profile::params::osds:{{{}:{{}}}}\n".format(
688 ds_opts['ceph_device']
692 def prep_sriov_env(ds, tmp_dir):
694 Creates SRIOV environment file for deployment. Source file is copied by
695 undercloud playbook to host.
700 ds_opts = ds['deploy_options']
701 sriov_iface = ds_opts['sriov']
702 sriov_file = os.path.join(tmp_dir, 'neutron-opendaylight-sriov.yaml')
703 if not os.path.isfile(sriov_file):
704 logging.error("sriov-environment file is not in tmp directory: {}. "
705 "Check if file was copied from "
706 "undercloud".format(tmp_dir))
707 raise ApexDeployException("sriov-environment file not copied from "
709 # TODO(rnoriega): Instead of line editing, refactor this code to load
710 # yaml file into a dict, edit it and write the file back.
711 for line in fileinput.input(sriov_file, inplace=True):
712 line = line.strip('\n')
713 if 'NovaSchedulerDefaultFilters' in line:
714 print(" {}".format(line[3:]))
715 elif 'NovaSchedulerAvailableFilters' in line:
716 print(" {}".format(line[3:]))
717 elif 'NeutronPhysicalDevMappings' in line:
718 print(" NeutronPhysicalDevMappings: \"nfv_sriov:{}\""
719 .format(sriov_iface))
720 elif 'NeutronSriovNumVFs' in line:
721 print(" NeutronSriovNumVFs: \"{}:8\"".format(sriov_iface))
722 elif 'NovaPCIPassthrough' in line:
723 print(" NovaPCIPassthrough:")
724 elif 'devname' in line:
725 print(" - devname: \"{}\"".format(sriov_iface))
726 elif 'physical_network' in line:
727 print(" physical_network: \"nfv_sriov\"")
732 def external_network_cmds(ns, ds):
734 Generates external network openstack commands
735 :param ns: network settings
736 :param ds: deploy settings
737 :return: list of commands to configure external network
739 ds_opts = ds['deploy_options']
740 external_physnet = 'datacentre'
741 if ds_opts['dataplane'] == 'fdio' and \
742 ds_opts['sdn_controller'] != 'opendaylight':
743 external_physnet = 'external'
744 if 'external' in ns.enabled_network_list:
745 net_config = ns['networks']['external'][0]
747 pool_start, pool_end = net_config['floating_ip_range']
749 net_config = ns['networks']['admin']
751 pool_start, pool_end = ns['apex']['networks']['admin'][
752 'introspection_range']
753 nic_config = net_config['nic_mapping']
754 gateway = net_config['gateway']
756 # create network command
757 if nic_config['compute']['vlan'] == 'native':
760 ext_type = "vlan --provider-segment {}".format(nic_config[
762 cmds.append("openstack network create external --project service "
763 "--external --provider-network-type {} "
764 "--provider-physical-network {}"
765 .format(ext_type, external_physnet))
766 # create subnet command
767 cidr = net_config['cidr']
768 subnet_cmd = "openstack subnet create external-subnet --project " \
769 "service --network external --no-dhcp --gateway {} " \
770 "--allocation-pool start={},end={} --subnet-range " \
771 "{}".format(gateway, pool_start, pool_end, str(cidr))
772 if external and cidr.version == 6:
773 subnet_cmd += ' --ip-version 6 --ipv6-ra-mode slaac ' \
774 '--ipv6-address-mode slaac'
775 cmds.append(subnet_cmd)
776 logging.debug("Neutron external network commands determined "
777 "as: {}".format(cmds))
781 def create_congress_cmds(overcloud_file):
782 drivers = ['nova', 'neutronv2', 'cinder', 'glancev2', 'keystone', 'doctor']
783 overcloudrc = parsers.parse_overcloudrc(overcloud_file)
784 logging.info("Creating congress commands")
787 "username={}".format(overcloudrc['OS_USERNAME']),
788 "tenant_name={}".format(overcloudrc['OS_PROJECT_NAME']),
789 "password={}".format(overcloudrc['OS_PASSWORD']),
790 "auth_url={}".format(overcloudrc['OS_AUTH_URL'])
793 logging.error("Unable to find all keys required for congress in "
794 "overcloudrc: OS_USERNAME, OS_PROJECT_NAME, "
795 "OS_PASSWORD, OS_AUTH_URL. Please check overcloudrc "
796 "file: {}".format(overcloud_file))
799 ds_cfg = '--config ' + ' --config '.join(ds_cfg)
801 for driver in drivers:
802 if driver == 'doctor':
803 cmd = "{} \"{}\"".format(driver, driver)
805 cmd = "{} \"{}\" {}".format(driver, driver, ds_cfg)
807 cmd += ' --config api_version="2.34"'
808 logging.debug("Congress command created: {}".format(cmd))