1 ##############################################################################
2 # Copyright (c) 2017 Tim Rozet (trozet@redhat.com) and others.
4 # All rights reserved. This program and the accompanying materials
5 # are made available under the terms of the Apache License, Version 2.0
6 # which accompanies this distribution, and is available at
7 # http://www.apache.org/licenses/LICENSE-2.0
8 ##############################################################################
19 import apex.builders.overcloud_builder as oc_builder
20 import apex.builders.common_builder as c_builder
22 from apex.common import constants as con
23 from apex.common.exceptions import ApexDeployException
24 from apex.common import parsers
25 from apex.common import utils
26 from apex.virtual import utils as virt_utils
27 from cryptography.hazmat.primitives import serialization as \
29 from cryptography.hazmat.primitives.asymmetric import rsa
30 from cryptography.hazmat.backends import default_backend as \
31 crypto_default_backend
36 'sfc': 'neutron-sfc-opendaylight.yaml',
37 'vpn': 'neutron-bgpvpn-opendaylight.yaml',
38 'gluon': 'gluon.yaml',
40 'odl_vpp_netvirt': 'neutron-opendaylight-netvirt-vpp.yaml',
41 'dvr': 'neutron-opendaylight-fdio-dvr.yaml',
42 'default': 'neutron-opendaylight-honeycomb.yaml'
44 'l2gw': 'neutron-l2gw-opendaylight.yaml',
45 'sriov': 'neutron-opendaylight-sriov.yaml',
46 'default': 'neutron-opendaylight.yaml',
49 'sfc': 'neutron-onos-sfc.yaml',
50 'default': 'neutron-onos.yaml'
52 'ovn': 'neutron-ml2-ovn.yaml',
54 'vpp': 'neutron-ml2-vpp.yaml',
55 'dataplane': ('ovs_dpdk', 'neutron-ovs-dpdk.yaml')
60 'tacker': 'enable_tacker.yaml',
61 'congress': 'enable_congress.yaml',
62 'barometer': 'enable_barometer.yaml',
63 'rt_kvm': 'enable_rt_kvm.yaml'
67 'HostCpusList': 'dpdk_cores',
68 'NeutronDpdkCoreList': 'pmd_cores',
69 'NeutronDpdkSocketMemory': 'socket_memory',
70 'NeutronDpdkMemoryChannels': 'memory_channels'
73 OVS_NSH_KMOD_RPM = "openvswitch-kmod-2.6.1-1.el7.centos.x86_64.rpm"
74 OVS_NSH_RPM = "openvswitch-2.6.1-1.el7.centos.x86_64.rpm"
75 ODL_NETVIRT_VPP_RPM = "/root/opendaylight-7.0.0-0.1.20170531snap665.el7" \
78 LOSETUP_SERVICE = """[Unit]
79 Description=Setup loop devices
84 ExecStart=/sbin/losetup /dev/loop3 /srv/data.img
85 ExecStop=/sbin/losetup -d /dev/loop3
90 WantedBy=multi-user.target
94 def build_sdn_env_list(ds, sdn_map, env_list=None):
96 Builds a list of SDN environment files to be used in the deploy cmd.
98 This function recursively searches an sdn_map. First the sdn controller is
99 matched and then the function looks for enabled features for that
100 controller to determine which environment files should be used. By
101 default the feature will be checked if set to true in deploy settings to be
102 added to the list. If a feature does not have a boolean value, then the
103 key and value pair to compare with are checked as a tuple (k,v).
105 :param ds: deploy settings
106 :param sdn_map: SDN map to recursively search
107 :param env_list: recursive var to hold previously found env_list
108 :return: A list of env files
112 for k, v in sdn_map.items():
113 if ds['sdn_controller'] == k or (k in ds and ds[k]):
114 if isinstance(v, dict):
115 # Append default SDN env file first
116 # The assumption is that feature-enabled SDN env files
117 # override and do not conflict with previously set default
119 if ds['sdn_controller'] == k and 'default' in v:
120 env_list.append(os.path.join(con.THT_ENV_DIR,
122 env_list.extend(build_sdn_env_list(ds, v))
123 # check if the value is not a boolean
124 elif isinstance(v, tuple):
126 env_list.append(os.path.join(con.THT_ENV_DIR, v[1]))
128 env_list.append(os.path.join(con.THT_ENV_DIR, v))
129 if len(env_list) == 0:
131 env_list.append(os.path.join(
132 con.THT_ENV_DIR, sdn_map['default']))
134 logging.warning("Unable to find default file for SDN")
139 def get_docker_sdn_file(ds_opts):
141 Returns docker env file for detected SDN
142 :param ds_opts: deploy options
143 :return: docker THT env file for an SDN
145 # FIXME(trozet): We assume right now there is only one docker SDN file
146 docker_services = con.VALID_DOCKER_SERVICES
147 tht_dir = con.THT_DOCKER_ENV_DIR[ds_opts['os_version']]
148 sdn_env_list = build_sdn_env_list(ds_opts, SDN_FILE_MAP)
149 for sdn_file in sdn_env_list:
150 sdn_base = os.path.basename(sdn_file)
151 if sdn_base in docker_services:
152 if docker_services[sdn_base] is not None:
153 return os.path.join(tht_dir,
154 docker_services[sdn_base])
156 return os.path.join(tht_dir, sdn_base)
159 def create_deploy_cmd(ds, ns, inv, tmp_dir,
160 virtual, env_file='opnfv-environment.yaml',
163 logging.info("Creating deployment command")
164 deploy_options = ['network-environment.yaml']
166 ds_opts = ds['deploy_options']
168 if ds_opts['containers']:
169 deploy_options.append(os.path.join(con.THT_ENV_DIR,
172 if ds['global_params']['ha_enabled']:
173 if ds_opts['containers']:
174 deploy_options.append(os.path.join(con.THT_ENV_DIR,
177 deploy_options.append(os.path.join(con.THT_ENV_DIR,
178 'puppet-pacemaker.yaml'))
181 deploy_options.append(env_file)
183 if ds_opts['containers']:
184 deploy_options.append('docker-images.yaml')
185 sdn_docker_file = get_docker_sdn_file(ds_opts)
187 deploy_options.append(sdn_docker_file)
188 deploy_options.append('sdn-images.yaml')
190 deploy_options += build_sdn_env_list(ds_opts, SDN_FILE_MAP)
192 for k, v in OTHER_FILE_MAP.items():
193 if k in ds_opts and ds_opts[k]:
194 if ds_opts['containers']:
195 deploy_options.append(os.path.join(con.THT_DOCKER_ENV_DIR,
196 "{}.yaml".format(k)))
198 deploy_options.append(os.path.join(con.THT_ENV_DIR, v))
200 if ds_opts['ceph'] and 'csit' not in env_file:
201 prep_storage_env(ds, ns, virtual, tmp_dir)
202 deploy_options.append(os.path.join(con.THT_ENV_DIR,
203 'storage-environment.yaml'))
205 prep_sriov_env(ds, tmp_dir)
208 deploy_options.append('virtual-environment.yaml')
210 deploy_options.append('baremetal-environment.yaml')
212 num_control, num_compute = inv.get_node_counts()
213 if num_control == 0 or num_compute == 0:
214 logging.error("Detected 0 control or compute nodes. Control nodes: "
215 "{}, compute nodes{}".format(num_control, num_compute))
216 raise ApexDeployException("Invalid number of control or computes")
217 elif num_control > 1 and not ds['global_params']['ha_enabled']:
219 if platform.machine() == 'aarch64':
220 # aarch64 deploys were not completing in the default 90 mins.
221 # Not sure if this is related to the hardware the OOO support
222 # was developed on or the virtualization support in CentOS
223 # Either way it will probably get better over time as the aarch
224 # support matures in CentOS and deploy time should be tested in
225 # the future so this multiplier can be removed.
226 con.DEPLOY_TIMEOUT *= 2
227 cmd = "openstack overcloud deploy --templates --timeout {} " \
228 .format(con.DEPLOY_TIMEOUT)
230 for option in deploy_options:
231 cmd += " -e {}".format(option)
232 cmd += " --ntp-server {}".format(ns['ntp'][0])
233 cmd += " --control-scale {}".format(num_control)
234 cmd += " --compute-scale {}".format(num_compute)
235 cmd += ' --control-flavor control --compute-flavor compute'
237 cmd += ' --networks-file network_data.yaml'
240 with open('/sys/module/kvm_intel/parameters/nested') as f:
241 nested_kvm = f.read().strip()
242 if nested_kvm != 'Y':
243 libvirt_type = 'qemu'
244 cmd += ' --libvirt-type {}'.format(libvirt_type)
245 logging.info("Deploy command set: {}".format(cmd))
247 with open(os.path.join(tmp_dir, 'deploy_command'), 'w') as fh:
252 def prep_image(ds, ns, img, tmp_dir, root_pw=None, docker_tag=None,
255 Locates sdn image and preps for deployment.
256 :param ds: deploy settings
257 :param ns: network settings
258 :param img: sdn image
259 :param tmp_dir: dir to store modified sdn image
260 :param root_pw: password to configure for overcloud image
261 :param docker_tag: Docker image tag for RDO version (default None)
262 :param patches: List of patches to apply to overcloud image
265 # TODO(trozet): Come up with a better way to organize this logic in this
267 logging.info("Preparing image: {} for deployment".format(img))
268 if not os.path.isfile(img):
269 logging.error("Missing SDN image {}".format(img))
270 raise ApexDeployException("Missing SDN image file: {}".format(img))
272 ds_opts = ds['deploy_options']
274 sdn = ds_opts['sdn_controller']
275 patched_containers = set()
276 # we need this due to rhbz #1436021
277 # fixed in systemd-219-37.el7
279 logging.info("Neutron openvswitch-agent disabled")
282 "rm -f /etc/systemd/system/multi-user.target.wants/"
283 "neutron-openvswitch-agent.service"},
286 "rm -f /usr/lib/systemd/system/neutron-openvswitch-agent"
290 if ns.get('http_proxy', ''):
293 "echo 'http_proxy={}' >> /etc/environment".format(
296 if ns.get('https_proxy', ''):
299 "echo 'https_proxy={}' >> /etc/environment".format(
303 virt_cmds.append({con.VIRT_RUN_CMD: "chmod +x /etc/rc.d/rc.local"})
306 "echo 'sudo /opt/quagga/etc/init.d/zrpcd start' > "
307 "/opt/quagga/etc/init.d/zrpcd_start.sh"})
309 con.VIRT_RUN_CMD: "chmod +x /opt/quagga/etc/init.d/"
312 con.VIRT_RUN_CMD: "sed -i '$a /opt/quagga/etc/"
313 "init.d/zrpcd_start.sh' /etc/rc.local "})
315 con.VIRT_RUN_CMD: "sed -i '$a /opt/quagga/etc/"
316 "init.d/zrpcd_start.sh' /etc/rc.d/rc.local"})
317 logging.info("ZRPCD process started")
319 dataplane = ds_opts['dataplane']
320 if dataplane == 'ovs_dpdk' or dataplane == 'fdio':
321 logging.info("Enabling kernel modules for dpdk")
322 # file to module mapping
324 os.path.join(tmp_dir, 'vfio_pci.modules'): 'vfio_pci',
325 os.path.join(tmp_dir, 'uio_pci_generic.modules'): 'uio_pci_generic'
327 for mod_file, mod in uio_types.items():
328 with open(mod_file, 'w') as fh:
329 fh.write('#!/bin/bash\n')
330 fh.write('exec /sbin/modprobe {}'.format(mod))
334 {con.VIRT_UPLOAD: "{}:/etc/sysconfig/modules/".format(
336 {con.VIRT_RUN_CMD: "chmod 0755 /etc/sysconfig/modules/"
337 "{}".format(os.path.basename(mod_file))}
340 pw_op = "password:{}".format(root_pw)
341 virt_cmds.append({con.VIRT_PW: pw_op})
342 if ds_opts['sfc'] and dataplane == 'ovs':
344 {con.VIRT_RUN_CMD: "yum -y install "
345 "/root/ovs/rpm/rpmbuild/RPMS/x86_64/"
346 "{}".format(OVS_NSH_KMOD_RPM)},
347 {con.VIRT_RUN_CMD: "yum downgrade -y "
348 "/root/ovs/rpm/rpmbuild/RPMS/x86_64/"
349 "{}".format(OVS_NSH_RPM)}
351 if dataplane == 'fdio':
352 # Patch neutron with using OVS external interface for router
353 # and add generic linux NS interface driver
355 {con.VIRT_RUN_CMD: "cd /usr/lib/python2.7/site-packages && patch "
356 "-p1 < neutron-patch-NSDriver.patch"})
359 {con.VIRT_RUN_CMD: "yum remove -y vpp-lib"},
360 {con.VIRT_RUN_CMD: "yum install -y "
361 "/root/nosdn_vpp_rpms/*.rpm"}
364 tmp_oc_image = os.path.join(tmp_dir, 'overcloud-full.qcow2')
365 shutil.copyfile(img, tmp_oc_image)
366 logging.debug("Temporary overcloud image stored as: {}".format(
369 if sdn == 'opendaylight':
370 undercloud_admin_ip = ns['networks'][con.ADMIN_NETWORK][
371 'installer_vm']['ip']
372 oc_builder.inject_opendaylight(
373 odl_version=ds_opts['odl_version'],
376 uc_ip=undercloud_admin_ip,
377 os_version=ds_opts['os_version'],
378 docker_tag=docker_tag,
381 patched_containers = patched_containers.union({'opendaylight'})
384 if ds_opts['os_version'] == 'master':
385 branch = ds_opts['os_version']
387 branch = "stable/{}".format(ds_opts['os_version'])
388 logging.info('Adding patches to overcloud')
389 patched_containers = patched_containers.union(
390 c_builder.add_upstream_patches(patches,
391 tmp_oc_image, tmp_dir,
393 uc_ip=undercloud_admin_ip,
394 docker_tag=docker_tag))
395 # if containers with ceph, and no ceph device we need to use a
396 # persistent loop device for Ceph OSDs
397 if docker_tag and ds_opts['ceph_device'] == '/dev/loop3':
398 tmp_losetup = os.path.join(tmp_dir, 'losetup.service')
399 with open(tmp_losetup, 'w') as fh:
400 fh.write(LOSETUP_SERVICE)
402 {con.VIRT_UPLOAD: "{}:/usr/lib/systemd/system/".format(tmp_losetup)
404 {con.VIRT_RUN_CMD: 'truncate /srv/data.img --size 10G'},
405 {con.VIRT_RUN_CMD: 'systemctl daemon-reload'},
406 {con.VIRT_RUN_CMD: 'systemctl enable losetup.service'},
408 virt_utils.virt_customize(virt_cmds, tmp_oc_image)
409 logging.info("Overcloud image customization complete")
410 return patched_containers
415 Creates public and private ssh keys with 1024 bit RSA encryption
416 :return: private, public key
418 key = rsa.generate_private_key(
419 backend=crypto_default_backend(),
420 public_exponent=65537,
424 private_key = key.private_bytes(
425 crypto_serialization.Encoding.PEM,
426 crypto_serialization.PrivateFormat.PKCS8,
427 crypto_serialization.NoEncryption())
428 public_key = key.public_key().public_bytes(
429 crypto_serialization.Encoding.OpenSSH,
430 crypto_serialization.PublicFormat.OpenSSH
432 return private_key.decode('utf-8'), public_key.decode('utf-8')
435 def prep_env(ds, ns, inv, opnfv_env, net_env, tmp_dir):
437 Creates modified opnfv/network environments for deployment
438 :param ds: deploy settings
439 :param ns: network settings
440 :param inv: node inventory
441 :param opnfv_env: file path for opnfv-environment file
442 :param net_env: file path for network-environment file
443 :param tmp_dir: Apex tmp dir
447 logging.info("Preparing opnfv-environment and network-environment files")
448 ds_opts = ds['deploy_options']
449 tmp_opnfv_env = os.path.join(tmp_dir, os.path.basename(opnfv_env))
450 shutil.copyfile(opnfv_env, tmp_opnfv_env)
451 tenant_nic_map = ns['networks']['tenant']['nic_mapping']
453 tenant_nic['Controller'] = tenant_nic_map['controller']['members'][0]
454 tenant_nic['NovaCompute'] = tenant_nic_map['compute']['members'][0]
455 external_nic_map = ns['networks']['external'][0]['nic_mapping']
456 external_nic = dict()
457 external_nic['NovaCompute'] = external_nic_map['compute']['members'][0]
460 private_key, public_key = make_ssh_key()
462 # Make easier/faster variables to index in the file editor
463 if 'performance' in ds_opts:
466 if 'vpp' in ds_opts['performance']['Compute']:
467 perf_vpp_comp = ds_opts['performance']['Compute']['vpp']
470 if 'vpp' in ds_opts['performance']['Controller']:
471 perf_vpp_ctrl = ds_opts['performance']['Controller']['vpp']
476 if 'ovs' in ds_opts['performance']['Compute']:
477 perf_ovs_comp = ds_opts['performance']['Compute']['ovs']
482 if 'kernel' in ds_opts['performance']['Compute']:
483 perf_kern_comp = ds_opts['performance']['Compute']['kernel']
485 perf_kern_comp = None
489 # Modify OPNFV environment
490 # TODO: Change to build a dict and outputting yaml rather than parsing
491 for line in fileinput.input(tmp_opnfv_env, inplace=True):
492 line = line.strip('\n')
494 if 'CloudDomain' in line:
495 output_line = " CloudDomain: {}".format(ns['domain_name'])
496 elif 'replace_private_key' in line:
497 output_line = " private_key: |\n"
499 for line in private_key.splitlines():
500 key_out += " {}\n".format(line)
501 output_line += key_out
502 elif 'replace_public_key' in line:
503 output_line = " public_key: '{}'".format(public_key)
504 elif ((perf and perf_kern_comp) or ds_opts.get('rt_kvm')) and \
505 'resource_registry' in line:
506 output_line = "resource_registry:\n" \
507 " OS::TripleO::NodeUserData: first-boot.yaml"
508 elif 'ComputeExtraConfigPre' in line and \
509 ds_opts['dataplane'] == 'ovs_dpdk':
510 output_line = ' OS::TripleO::ComputeExtraConfigPre: ' \
511 './ovs-dpdk-preconfig.yaml'
513 if ds_opts['sdn_controller'] == 'opendaylight' and \
514 'odl_vpp_routing_node' in ds_opts:
515 if 'opendaylight::vpp_routing_node' in line:
516 output_line = (" opendaylight::vpp_routing_node: {}.{}"
517 .format(ds_opts['odl_vpp_routing_node'],
519 elif not ds_opts['sdn_controller'] and ds_opts['dataplane'] == 'fdio':
520 if 'NeutronVPPAgentPhysnets' in line:
521 # VPP interface tap0 will be used for external network
523 output_line = (" NeutronVPPAgentPhysnets: "
524 "'datacentre:{},external:tap0'"
525 .format(tenant_nic['Controller']))
526 elif ds_opts['sdn_controller'] == 'opendaylight' and ds_opts.get(
528 if 'OS::TripleO::Services::NeutronDhcpAgent' in line:
530 elif 'NeutronDhcpAgentsPerNetwork' in line:
531 num_control, num_compute = inv.get_node_counts()
532 output_line = (" NeutronDhcpAgentsPerNetwork: {}"
533 .format(num_compute))
534 elif 'ComputeServices' in line:
535 output_line = (" ComputeServices:\n"
536 " - OS::TripleO::Services::NeutronDhcpAgent")
537 # SRIOV networks are VLAN based provider networks. In order to simplify
538 # the deployment, nfv_sriov will be the default physnet. VLANs are not
539 # needed in advance, and the user will have to create the network
540 # specifying the segmentation-id.
542 if 'NeutronNetworkVLANRanges' in line:
543 output_line = ("{},nfv_sriov'".format(line[:-1]))
546 for role in 'NovaCompute', 'Controller':
547 if role == 'NovaCompute':
548 perf_opts = perf_vpp_comp
550 perf_opts = perf_vpp_ctrl
551 cfg = "{}ExtraConfig".format(role)
552 if cfg in line and perf_opts:
554 if 'main-core' in perf_opts:
555 perf_line += ("\n fdio::vpp_cpu_main_core: '{}'"
556 .format(perf_opts['main-core']))
557 if 'corelist-workers' in perf_opts:
559 "fdio::vpp_cpu_corelist_workers: '{}'"
560 .format(perf_opts['corelist-workers']))
561 if ds_opts['sdn_controller'] == 'opendaylight' and \
562 ds_opts['dataplane'] == 'fdio':
563 if role == 'NovaCompute':
565 "tripleo::profile::base::neutron::"
566 "agents::honeycomb::"
567 "interface_role_mapping:"
568 " ['{}:tenant-interface',"
569 "'{}:public-interface']"
570 .format(tenant_nic[role],
574 "tripleo::profile::base::neutron::"
575 "agents::honeycomb::"
576 "interface_role_mapping:"
577 " ['{}:tenant-interface']"
578 .format(tenant_nic[role]))
580 output_line = (" {}:{}".format(cfg, perf_line))
582 if ds_opts['dataplane'] == 'ovs_dpdk' and perf_ovs_comp:
583 for k, v in OVS_PERF_MAP.items():
584 if k in line and v in perf_ovs_comp:
585 output_line = " {}: '{}'".format(k, perf_ovs_comp[v])
588 # (FIXME) use compute's kernel settings for all nodes for now.
590 if 'NovaSchedulerDefaultFilters' in line:
592 " NovaSchedulerDefaultFilters: 'RamFilter," \
593 "ComputeFilter,AvailabilityZoneFilter," \
594 "ComputeCapabilitiesFilter," \
595 "ImagePropertiesFilter,NUMATopologyFilter'"
596 elif 'ComputeKernelArgs' in line:
598 for k, v in perf_kern_comp.items():
599 kernel_args += "{}={} ".format(k, v)
601 output_line = " ComputeKernelArgs: '{}'".\
606 logging.info("opnfv-environment file written to {}".format(tmp_opnfv_env))
609 def generate_ceph_key():
611 header = struct.pack('<hiih', 1, int(time.time()), 0, len(key))
612 return base64.b64encode(header + key)
615 def prep_storage_env(ds, ns, virtual, tmp_dir):
617 Creates storage environment file for deployment. Source file is copied by
618 undercloud playbook to host.
625 ds_opts = ds['deploy_options']
626 storage_file = os.path.join(tmp_dir, 'storage-environment.yaml')
627 if not os.path.isfile(storage_file):
628 logging.error("storage-environment file is not in tmp directory: {}. "
629 "Check if file was copied from "
630 "undercloud".format(tmp_dir))
631 raise ApexDeployException("storage-environment file not copied from "
633 for line in fileinput.input(storage_file, inplace=True):
634 line = line.strip('\n')
635 if 'CephClusterFSID' in line:
636 print(" CephClusterFSID: {}".format(str(uuid.uuid4())))
637 elif 'CephMonKey' in line:
638 print(" CephMonKey: {}".format(generate_ceph_key().decode(
640 elif 'CephAdminKey' in line:
641 print(" CephAdminKey: {}".format(generate_ceph_key().decode(
643 elif 'CephClientKey' in line:
644 print(" CephClientKey: {}".format(generate_ceph_key().decode(
649 if ds_opts['containers']:
650 undercloud_admin_ip = ns['networks'][con.ADMIN_NETWORK][
651 'installer_vm']['ip']
652 ceph_version = con.CEPH_VERSION_MAP[ds_opts['os_version']]
653 docker_image = "{}:8787/ceph/daemon:tag-build-master-" \
654 "{}-centos-7".format(undercloud_admin_ip,
657 'DockerCephDaemonImage': docker_image,
660 # max pgs allowed are calculated as num_mons * 200. Therefore we
661 # set number of pgs and pools so that the total will be less:
662 # num_pgs * num_pools * num_osds
663 ceph_params['CephPoolDefaultSize'] = 2
664 ceph_params['CephPoolDefaultPgNum'] = 32
666 ceph_params['CephAnsibleExtraConfig'] = {
667 'centos_package_dependencies': [],
668 'ceph_osd_docker_memory_limit': '1g',
669 'ceph_mds_docker_memory_limit': '1g',
671 ceph_device = ds_opts['ceph_device']
672 ceph_params['CephAnsibleDisksConfig'] = {
673 'devices': [ceph_device],
675 'osd_scenario': 'collocated'
677 utils.edit_tht_env(storage_file, 'parameter_defaults', ceph_params)
678 # TODO(trozet): remove following block as we only support containers now
679 elif 'ceph_device' in ds_opts and ds_opts['ceph_device']:
680 with open(storage_file, 'a') as fh:
681 fh.write(' ExtraConfig:\n')
682 fh.write(" ceph::profile::params::osds:{{{}:{{}}}}\n".format(
683 ds_opts['ceph_device']
687 def prep_sriov_env(ds, tmp_dir):
689 Creates SRIOV environment file for deployment. Source file is copied by
690 undercloud playbook to host.
695 ds_opts = ds['deploy_options']
696 sriov_iface = ds_opts['sriov']
697 sriov_file = os.path.join(tmp_dir, 'neutron-opendaylight-sriov.yaml')
698 if not os.path.isfile(sriov_file):
699 logging.error("sriov-environment file is not in tmp directory: {}. "
700 "Check if file was copied from "
701 "undercloud".format(tmp_dir))
702 raise ApexDeployException("sriov-environment file not copied from "
704 # TODO(rnoriega): Instead of line editing, refactor this code to load
705 # yaml file into a dict, edit it and write the file back.
706 for line in fileinput.input(sriov_file, inplace=True):
707 line = line.strip('\n')
708 if 'NovaSchedulerDefaultFilters' in line:
709 print(" {}".format(line[3:]))
710 elif 'NovaSchedulerAvailableFilters' in line:
711 print(" {}".format(line[3:]))
712 elif 'NeutronPhysicalDevMappings' in line:
713 print(" NeutronPhysicalDevMappings: \"nfv_sriov:{}\""
714 .format(sriov_iface))
715 elif 'NeutronSriovNumVFs' in line:
716 print(" NeutronSriovNumVFs: \"{}:8\"".format(sriov_iface))
717 elif 'NovaPCIPassthrough' in line:
718 print(" NovaPCIPassthrough:")
719 elif 'devname' in line:
720 print(" - devname: \"{}\"".format(sriov_iface))
721 elif 'physical_network' in line:
722 print(" physical_network: \"nfv_sriov\"")
727 def external_network_cmds(ns, ds):
729 Generates external network openstack commands
730 :param ns: network settings
731 :param ds: deploy settings
732 :return: list of commands to configure external network
734 ds_opts = ds['deploy_options']
735 external_physnet = 'datacentre'
736 if ds_opts['dataplane'] == 'fdio' and \
737 ds_opts['sdn_controller'] != 'opendaylight':
738 external_physnet = 'external'
739 if 'external' in ns.enabled_network_list:
740 net_config = ns['networks']['external'][0]
742 pool_start, pool_end = net_config['floating_ip_range']
744 net_config = ns['networks']['admin']
746 pool_start, pool_end = ns['apex']['networks']['admin'][
747 'introspection_range']
748 nic_config = net_config['nic_mapping']
749 gateway = net_config['gateway']
751 # create network command
752 if nic_config['compute']['vlan'] == 'native':
755 ext_type = "vlan --provider-segment {}".format(nic_config[
757 cmds.append("openstack network create external --project service "
758 "--external --provider-network-type {} "
759 "--provider-physical-network {}"
760 .format(ext_type, external_physnet))
761 # create subnet command
762 cidr = net_config['cidr']
763 subnet_cmd = "openstack subnet create external-subnet --project " \
764 "service --network external --no-dhcp --gateway {} " \
765 "--allocation-pool start={},end={} --subnet-range " \
766 "{}".format(gateway, pool_start, pool_end, str(cidr))
767 if external and cidr.version == 6:
768 subnet_cmd += ' --ip-version 6 --ipv6-ra-mode slaac ' \
769 '--ipv6-address-mode slaac'
770 cmds.append(subnet_cmd)
771 logging.debug("Neutron external network commands determined "
772 "as: {}".format(cmds))
776 def create_congress_cmds(overcloud_file):
777 drivers = ['nova', 'neutronv2', 'cinder', 'glancev2', 'keystone', 'doctor']
778 overcloudrc = parsers.parse_overcloudrc(overcloud_file)
779 logging.info("Creating congress commands")
782 "username={}".format(overcloudrc['OS_USERNAME']),
783 "tenant_name={}".format(overcloudrc['OS_PROJECT_NAME']),
784 "password={}".format(overcloudrc['OS_PASSWORD']),
785 "auth_url={}".format(overcloudrc['OS_AUTH_URL'])
788 logging.error("Unable to find all keys required for congress in "
789 "overcloudrc: OS_USERNAME, OS_PROJECT_NAME, "
790 "OS_PASSWORD, OS_AUTH_URL. Please check overcloudrc "
791 "file: {}".format(overcloud_file))
794 ds_cfg = '--config ' + ' --config '.join(ds_cfg)
796 for driver in drivers:
797 if driver == 'doctor':
798 cmd = "{} \"{}\"".format(driver, driver)
800 cmd = "{} \"{}\" {}".format(driver, driver, ds_cfg)
802 cmd += ' --config api_version="2.34"'
803 logging.debug("Congress command created: {}".format(cmd))