1 ##############################################################################
2 # Copyright (c) 2017 Tim Rozet (trozet@redhat.com) and others.
4 # All rights reserved. This program and the accompanying materials
5 # are made available under the terms of the Apache License, Version 2.0
6 # which accompanies this distribution, and is available at
7 # http://www.apache.org/licenses/LICENSE-2.0
8 ##############################################################################
19 import apex.builders.overcloud_builder as oc_builder
20 import apex.builders.common_builder as c_builder
22 from apex.common import constants as con
23 from apex.common.exceptions import ApexDeployException
24 from apex.common import parsers
25 from apex.common import utils
26 from apex.virtual import utils as virt_utils
27 from cryptography.hazmat.primitives import serialization as \
29 from cryptography.hazmat.primitives.asymmetric import rsa
30 from cryptography.hazmat.backends import default_backend as \
31 crypto_default_backend
36 'sfc': 'neutron-sfc-opendaylight.yaml',
37 'vpn': 'neutron-bgpvpn-opendaylight.yaml',
38 'gluon': 'gluon.yaml',
40 'odl_vpp_netvirt': 'neutron-opendaylight-netvirt-vpp.yaml',
41 'dvr': 'neutron-opendaylight-fdio-dvr.yaml',
42 'default': 'neutron-opendaylight-honeycomb.yaml'
44 'l2gw': 'neutron-l2gw-opendaylight.yaml',
45 'sriov': 'neutron-opendaylight-sriov.yaml',
46 'default': 'neutron-opendaylight.yaml',
49 'sfc': 'neutron-onos-sfc.yaml',
50 'default': 'neutron-onos.yaml'
52 'ovn': 'neutron-ml2-ovn.yaml',
54 'vpp': 'neutron-ml2-vpp.yaml',
55 'dataplane': ('ovs_dpdk', 'neutron-ovs-dpdk.yaml')
60 'tacker': 'enable_tacker.yaml',
61 'congress': 'enable_congress.yaml',
62 'barometer': 'enable_barometer.yaml',
63 'rt_kvm': 'enable_rt_kvm.yaml'
67 'HostCpusList': 'dpdk_cores',
68 'NeutronDpdkCoreList': 'pmd_cores',
69 'NeutronDpdkSocketMemory': 'socket_memory',
70 'NeutronDpdkMemoryChannels': 'memory_channels'
73 OVS_NSH_KMOD_RPM = "openvswitch-kmod-2.6.1-1.el7.centos.x86_64.rpm"
74 OVS_NSH_RPM = "openvswitch-2.6.1-1.el7.centos.x86_64.rpm"
75 ODL_NETVIRT_VPP_RPM = "/root/opendaylight-7.0.0-0.1.20170531snap665.el7" \
78 LOOP_DEVICE_SIZE = "10G"
80 LOSETUP_SERVICE = """[Unit]
81 Description=Setup loop devices
86 ExecStart=/sbin/losetup /dev/loop3 /srv/data.img
87 ExecStop=/sbin/losetup -d /dev/loop3
92 WantedBy=multi-user.target
96 def build_sdn_env_list(ds, sdn_map, env_list=None):
98 Builds a list of SDN environment files to be used in the deploy cmd.
100 This function recursively searches an sdn_map. First the sdn controller is
101 matched and then the function looks for enabled features for that
102 controller to determine which environment files should be used. By
103 default the feature will be checked if set to true in deploy settings to be
104 added to the list. If a feature does not have a boolean value, then the
105 key and value pair to compare with are checked as a tuple (k,v).
107 :param ds: deploy settings
108 :param sdn_map: SDN map to recursively search
109 :param env_list: recursive var to hold previously found env_list
110 :return: A list of env files
114 for k, v in sdn_map.items():
115 if ds['sdn_controller'] == k or (k in ds and ds[k]):
116 if isinstance(v, dict):
117 # Append default SDN env file first
118 # The assumption is that feature-enabled SDN env files
119 # override and do not conflict with previously set default
121 if ds['sdn_controller'] == k and 'default' in v:
122 env_list.append(os.path.join(con.THT_ENV_DIR,
124 env_list.extend(build_sdn_env_list(ds, v))
125 # check if the value is not a boolean
126 elif isinstance(v, tuple):
128 env_list.append(os.path.join(con.THT_ENV_DIR, v[1]))
130 env_list.append(os.path.join(con.THT_ENV_DIR, v))
131 if len(env_list) == 0:
133 env_list.append(os.path.join(
134 con.THT_ENV_DIR, sdn_map['default']))
136 logging.warning("Unable to find default file for SDN")
141 def get_docker_sdn_file(ds_opts):
143 Returns docker env file for detected SDN
144 :param ds_opts: deploy options
145 :return: docker THT env file for an SDN
147 # FIXME(trozet): We assume right now there is only one docker SDN file
148 docker_services = con.VALID_DOCKER_SERVICES
149 tht_dir = con.THT_DOCKER_ENV_DIR[ds_opts['os_version']]
150 sdn_env_list = build_sdn_env_list(ds_opts, SDN_FILE_MAP)
151 for sdn_file in sdn_env_list:
152 sdn_base = os.path.basename(sdn_file)
153 if sdn_base in docker_services:
154 if docker_services[sdn_base] is not None:
155 return os.path.join(tht_dir,
156 docker_services[sdn_base])
158 return os.path.join(tht_dir, sdn_base)
161 def create_deploy_cmd(ds, ns, inv, tmp_dir,
162 virtual, env_file='opnfv-environment.yaml',
165 logging.info("Creating deployment command")
166 deploy_options = ['network-environment.yaml']
168 ds_opts = ds['deploy_options']
170 if ds_opts['containers']:
171 deploy_options.append(os.path.join(con.THT_ENV_DIR,
174 if ds['global_params']['ha_enabled']:
175 if ds_opts['containers']:
176 deploy_options.append(os.path.join(con.THT_ENV_DIR,
179 deploy_options.append(os.path.join(con.THT_ENV_DIR,
180 'puppet-pacemaker.yaml'))
183 deploy_options.append(env_file)
185 if ds_opts['containers']:
186 deploy_options.append('docker-images.yaml')
187 sdn_docker_file = get_docker_sdn_file(ds_opts)
189 deploy_options.append(sdn_docker_file)
190 deploy_options.append('sdn-images.yaml')
192 deploy_options += build_sdn_env_list(ds_opts, SDN_FILE_MAP)
194 for k, v in OTHER_FILE_MAP.items():
195 if k in ds_opts and ds_opts[k]:
196 if ds_opts['containers']:
197 deploy_options.append(os.path.join(con.THT_DOCKER_ENV_DIR,
198 "{}.yaml".format(k)))
200 deploy_options.append(os.path.join(con.THT_ENV_DIR, v))
202 if ds_opts['ceph'] and 'csit' not in env_file:
203 prep_storage_env(ds, ns, virtual, tmp_dir)
204 deploy_options.append(os.path.join(con.THT_ENV_DIR,
205 'storage-environment.yaml'))
207 prep_sriov_env(ds, tmp_dir)
209 # Check for 'k8s' here intentionally, as we may support other values
210 # such as openstack/openshift for 'vim' option.
211 if ds_opts['vim'] == 'k8s':
212 deploy_options.append('kubernetes-environment.yaml')
215 deploy_options.append('virtual-environment.yaml')
217 deploy_options.append('baremetal-environment.yaml')
219 num_control, num_compute = inv.get_node_counts()
220 if num_control == 0 or num_compute == 0:
221 logging.error("Detected 0 control or compute nodes. Control nodes: "
222 "{}, compute nodes{}".format(num_control, num_compute))
223 raise ApexDeployException("Invalid number of control or computes")
224 elif num_control > 1 and not ds['global_params']['ha_enabled']:
226 if platform.machine() == 'aarch64':
227 # aarch64 deploys were not completing in the default 90 mins.
228 # Not sure if this is related to the hardware the OOO support
229 # was developed on or the virtualization support in CentOS
230 # Either way it will probably get better over time as the aarch
231 # support matures in CentOS and deploy time should be tested in
232 # the future so this multiplier can be removed.
233 con.DEPLOY_TIMEOUT *= 2
234 cmd = "openstack overcloud deploy --templates --timeout {} " \
235 .format(con.DEPLOY_TIMEOUT)
237 for option in deploy_options:
238 cmd += " -e {}".format(option)
239 cmd += " --ntp-server {}".format(ns['ntp'][0])
240 cmd += " --control-scale {}".format(num_control)
241 cmd += " --compute-scale {}".format(num_compute)
242 cmd += ' --control-flavor control --compute-flavor compute'
244 cmd += ' --networks-file network_data.yaml'
247 with open('/sys/module/kvm_intel/parameters/nested') as f:
248 nested_kvm = f.read().strip()
249 if nested_kvm != 'Y':
250 libvirt_type = 'qemu'
251 cmd += ' --libvirt-type {}'.format(libvirt_type)
252 logging.info("Deploy command set: {}".format(cmd))
254 with open(os.path.join(tmp_dir, 'deploy_command'), 'w') as fh:
259 def prep_image(ds, ns, img, tmp_dir, root_pw=None, docker_tag=None,
262 Locates sdn image and preps for deployment.
263 :param ds: deploy settings
264 :param ns: network settings
265 :param img: sdn image
266 :param tmp_dir: dir to store modified sdn image
267 :param root_pw: password to configure for overcloud image
268 :param docker_tag: Docker image tag for RDO version (default None)
269 :param patches: List of patches to apply to overcloud image
272 # TODO(trozet): Come up with a better way to organize this logic in this
274 logging.info("Preparing image: {} for deployment".format(img))
275 if not os.path.isfile(img):
276 logging.error("Missing SDN image {}".format(img))
277 raise ApexDeployException("Missing SDN image file: {}".format(img))
279 ds_opts = ds['deploy_options']
281 sdn = ds_opts['sdn_controller']
282 patched_containers = set()
283 # we need this due to rhbz #1436021
284 # fixed in systemd-219-37.el7
286 logging.info("Neutron openvswitch-agent disabled")
289 "rm -f /etc/systemd/system/multi-user.target.wants/"
290 "neutron-openvswitch-agent.service"},
293 "rm -f /usr/lib/systemd/system/neutron-openvswitch-agent"
297 if ns.get('http_proxy', ''):
300 "echo 'http_proxy={}' >> /etc/environment".format(
303 if ns.get('https_proxy', ''):
306 "echo 'https_proxy={}' >> /etc/environment".format(
310 virt_cmds.append({con.VIRT_RUN_CMD: "chmod +x /etc/rc.d/rc.local"})
313 "echo 'sudo /opt/quagga/etc/init.d/zrpcd start' > "
314 "/opt/quagga/etc/init.d/zrpcd_start.sh"})
316 con.VIRT_RUN_CMD: "chmod +x /opt/quagga/etc/init.d/"
319 con.VIRT_RUN_CMD: "sed -i '$a /opt/quagga/etc/"
320 "init.d/zrpcd_start.sh' /etc/rc.local "})
322 con.VIRT_RUN_CMD: "sed -i '$a /opt/quagga/etc/"
323 "init.d/zrpcd_start.sh' /etc/rc.d/rc.local"})
324 logging.info("ZRPCD process started")
326 dataplane = ds_opts['dataplane']
327 if dataplane == 'ovs_dpdk' or dataplane == 'fdio':
328 logging.info("Enabling kernel modules for dpdk")
329 # file to module mapping
331 os.path.join(tmp_dir, 'vfio_pci.modules'): 'vfio_pci',
332 os.path.join(tmp_dir, 'uio_pci_generic.modules'): 'uio_pci_generic'
334 for mod_file, mod in uio_types.items():
335 with open(mod_file, 'w') as fh:
336 fh.write('#!/bin/bash\n')
337 fh.write('exec /sbin/modprobe {}'.format(mod))
341 {con.VIRT_UPLOAD: "{}:/etc/sysconfig/modules/".format(
343 {con.VIRT_RUN_CMD: "chmod 0755 /etc/sysconfig/modules/"
344 "{}".format(os.path.basename(mod_file))}
347 pw_op = "password:{}".format(root_pw)
348 virt_cmds.append({con.VIRT_PW: pw_op})
350 if dataplane == 'ovs':
353 {con.VIRT_RUN_CMD: "yum -y install "
354 "/root/ovs/rpm/rpmbuild/RPMS/x86_64/"
355 "{}".format(OVS_NSH_KMOD_RPM)},
356 {con.VIRT_RUN_CMD: "yum downgrade -y "
357 "/root/ovs/rpm/rpmbuild/RPMS/x86_64/"
358 "{}".format(OVS_NSH_RPM)}
360 elif sdn == 'opendaylight':
361 # FIXME(trozet) remove this after RDO is updated with fix for
362 # https://bugzilla.redhat.com/show_bug.cgi?id=1544892
363 ovs_file = os.path.basename(con.CUSTOM_OVS)
364 ovs_url = con.CUSTOM_OVS.replace(ovs_file, '')
365 utils.fetch_upstream_and_unpack(dest=tmp_dir, url=ovs_url,
368 {con.VIRT_UPLOAD: "{}:/root/".format(os.path.join(tmp_dir,
370 {con.VIRT_RUN_CMD: "yum downgrade -y /root/{}".format(
373 if dataplane == 'fdio':
374 # Patch neutron with using OVS external interface for router
375 # and add generic linux NS interface driver
377 {con.VIRT_RUN_CMD: "cd /usr/lib/python2.7/site-packages && patch "
378 "-p1 < neutron-patch-NSDriver.patch"})
381 {con.VIRT_RUN_CMD: "yum remove -y vpp-lib"},
382 {con.VIRT_RUN_CMD: "yum install -y "
383 "/root/nosdn_vpp_rpms/*.rpm"}
386 tmp_oc_image = os.path.join(tmp_dir, 'overcloud-full.qcow2')
387 shutil.copyfile(img, tmp_oc_image)
388 logging.debug("Temporary overcloud image stored as: {}".format(
391 if sdn == 'opendaylight':
392 undercloud_admin_ip = ns['networks'][con.ADMIN_NETWORK][
393 'installer_vm']['ip']
394 oc_builder.inject_opendaylight(
395 odl_version=ds_opts['odl_version'],
398 uc_ip=undercloud_admin_ip,
399 os_version=ds_opts['os_version'],
400 docker_tag=docker_tag,
403 patched_containers = patched_containers.union({'opendaylight'})
406 if ds_opts['os_version'] == 'master':
407 branch = ds_opts['os_version']
409 branch = "stable/{}".format(ds_opts['os_version'])
410 logging.info('Adding patches to overcloud')
411 patched_containers = patched_containers.union(
412 c_builder.add_upstream_patches(patches,
413 tmp_oc_image, tmp_dir,
415 uc_ip=undercloud_admin_ip,
416 docker_tag=docker_tag))
417 # if containers with ceph, and no ceph device we need to use a
418 # persistent loop device for Ceph OSDs
419 if docker_tag and ds_opts['ceph_device'] == '/dev/loop3':
420 tmp_losetup = os.path.join(tmp_dir, 'losetup.service')
421 with open(tmp_losetup, 'w') as fh:
422 fh.write(LOSETUP_SERVICE)
424 {con.VIRT_UPLOAD: "{}:/usr/lib/systemd/system/".format(tmp_losetup)
426 {con.VIRT_RUN_CMD: 'truncate /srv/data.img --size {}'
427 .format(LOOP_DEVICE_SIZE)},
428 {con.VIRT_RUN_CMD: 'systemctl daemon-reload'},
429 {con.VIRT_RUN_CMD: 'systemctl enable losetup.service'},
431 # TODO(trozet) remove this after LP#173474 is fixed
432 dhcp_unit = '/usr/lib/systemd/system/dhcp-interface@.service'
434 {con.VIRT_RUN_CMD: "crudini --del {} Unit "
435 "ConditionPathExists".format(dhcp_unit)})
436 virt_utils.virt_customize(virt_cmds, tmp_oc_image)
437 logging.info("Overcloud image customization complete")
438 return patched_containers
443 Creates public and private ssh keys with 1024 bit RSA encryption
444 :return: private, public key
446 key = rsa.generate_private_key(
447 backend=crypto_default_backend(),
448 public_exponent=65537,
452 private_key = key.private_bytes(
453 crypto_serialization.Encoding.PEM,
454 crypto_serialization.PrivateFormat.PKCS8,
455 crypto_serialization.NoEncryption())
456 public_key = key.public_key().public_bytes(
457 crypto_serialization.Encoding.OpenSSH,
458 crypto_serialization.PublicFormat.OpenSSH
460 return private_key.decode('utf-8'), public_key.decode('utf-8')
463 def prep_env(ds, ns, inv, opnfv_env, net_env, tmp_dir):
465 Creates modified opnfv/network environments for deployment
466 :param ds: deploy settings
467 :param ns: network settings
468 :param inv: node inventory
469 :param opnfv_env: file path for opnfv-environment file
470 :param net_env: file path for network-environment file
471 :param tmp_dir: Apex tmp dir
475 logging.info("Preparing opnfv-environment and network-environment files")
476 ds_opts = ds['deploy_options']
477 tmp_opnfv_env = os.path.join(tmp_dir, os.path.basename(opnfv_env))
478 shutil.copyfile(opnfv_env, tmp_opnfv_env)
479 tenant_nic_map = ns['networks']['tenant']['nic_mapping']
481 tenant_nic['Controller'] = tenant_nic_map['controller']['members'][0]
482 tenant_nic['NovaCompute'] = tenant_nic_map['compute']['members'][0]
483 external_nic_map = ns['networks']['external'][0]['nic_mapping']
484 external_nic = dict()
485 external_nic['NovaCompute'] = external_nic_map['compute']['members'][0]
488 private_key, public_key = make_ssh_key()
490 # Make easier/faster variables to index in the file editor
491 if 'performance' in ds_opts:
494 if 'vpp' in ds_opts['performance']['Compute']:
495 perf_vpp_comp = ds_opts['performance']['Compute']['vpp']
498 if 'vpp' in ds_opts['performance']['Controller']:
499 perf_vpp_ctrl = ds_opts['performance']['Controller']['vpp']
504 if 'ovs' in ds_opts['performance']['Compute']:
505 perf_ovs_comp = ds_opts['performance']['Compute']['ovs']
510 if 'kernel' in ds_opts['performance']['Compute']:
511 perf_kern_comp = ds_opts['performance']['Compute']['kernel']
513 perf_kern_comp = None
517 tenant_settings = ns['networks']['tenant']
518 tenant_vlan_enabled = 'tenant' in ns.enabled_network_list and \
519 ns['networks']['tenant'].get('segmentation_type') == 'vlan'
521 # Modify OPNFV environment
522 # TODO: Change to build a dict and outputting yaml rather than parsing
523 for line in fileinput.input(tmp_opnfv_env, inplace=True):
524 line = line.strip('\n')
526 if 'CloudDomain' in line:
527 output_line = " CloudDomain: {}".format(ns['domain_name'])
528 elif 'replace_private_key' in line:
529 output_line = " private_key: |\n"
531 for line in private_key.splitlines():
532 key_out += " {}\n".format(line)
533 output_line += key_out
534 elif 'replace_public_key' in line:
535 output_line = " public_key: '{}'".format(public_key)
536 elif ((perf and perf_kern_comp) or ds_opts.get('rt_kvm')) and \
537 'resource_registry' in line:
538 output_line = "resource_registry:\n" \
539 " OS::TripleO::NodeUserData: first-boot.yaml"
540 elif 'ComputeExtraConfigPre' in line and \
541 ds_opts['dataplane'] == 'ovs_dpdk':
542 output_line = ' OS::TripleO::ComputeExtraConfigPre: ' \
543 './ovs-dpdk-preconfig.yaml'
544 elif 'NeutronNetworkVLANRanges' in line:
546 if tenant_vlan_enabled:
547 if ns['networks']['tenant']['overlay_id_range']:
548 vlan_setting = ns['networks']['tenant']['overlay_id_range']
549 if 'datacentre' not in vlan_setting:
550 vlan_setting += ',datacentre:1:1000'
551 # SRIOV networks are VLAN based provider networks. In order to
552 # simplify the deployment, nfv_sriov will be the default physnet.
553 # VLANs are not needed in advance, and the user will have to create
554 # the network specifying the segmentation-id.
557 vlan_setting += ",nfv_sriov"
559 vlan_setting = "datacentre:1:1000,nfv_sriov"
561 output_line = " NeutronNetworkVLANRanges: " + vlan_setting
562 elif 'NeutronBridgeMappings' in line and tenant_vlan_enabled:
563 if tenant_settings['overlay_id_range']:
564 physnets = tenant_settings['overlay_id_range'].split(',')
565 output_line = " NeutronBridgeMappings: "
566 for physnet in physnets:
567 physnet_name = physnet.split(':')[0]
568 if physnet_name != 'datacentre':
569 output_line += "{}:br-vlan,".format(physnet_name)
570 output_line += "datacentre:br-ex"
571 elif 'OpenDaylightProviderMappings' in line and tenant_vlan_enabled \
572 and ds_opts['sdn_controller'] == 'opendaylight':
573 if tenant_settings['overlay_id_range']:
574 physnets = tenant_settings['overlay_id_range'].split(',')
575 output_line = " OpenDaylightProviderMappings: "
576 for physnet in physnets:
577 physnet_name = physnet.split(':')[0]
578 if physnet_name != 'datacentre':
579 output_line += "{}:br-vlan,".format(physnet_name)
580 output_line += "datacentre:br-ex"
581 elif 'NeutronNetworkType' in line and tenant_vlan_enabled:
582 output_line = " NeutronNetworkType: vlan\n" \
583 " NeutronTunnelTypes: ''"
585 if ds_opts['sdn_controller'] == 'opendaylight' and \
586 'odl_vpp_routing_node' in ds_opts:
587 if 'opendaylight::vpp_routing_node' in line:
588 output_line = (" opendaylight::vpp_routing_node: {}.{}"
589 .format(ds_opts['odl_vpp_routing_node'],
591 elif not ds_opts['sdn_controller'] and ds_opts['dataplane'] == 'fdio':
592 if 'NeutronVPPAgentPhysnets' in line:
593 # VPP interface tap0 will be used for external network
595 output_line = (" NeutronVPPAgentPhysnets: "
596 "'datacentre:{},external:tap0'"
597 .format(tenant_nic['Controller']))
598 elif ds_opts['sdn_controller'] == 'opendaylight' and ds_opts.get(
600 if 'OS::TripleO::Services::NeutronDhcpAgent' in line:
602 elif 'NeutronDhcpAgentsPerNetwork' in line:
603 num_control, num_compute = inv.get_node_counts()
604 output_line = (" NeutronDhcpAgentsPerNetwork: {}"
605 .format(num_compute))
606 elif 'ComputeServices' in line:
607 output_line = (" ComputeServices:\n"
608 " - OS::TripleO::Services::NeutronDhcpAgent")
611 for role in 'NovaCompute', 'Controller':
612 if role == 'NovaCompute':
613 perf_opts = perf_vpp_comp
615 perf_opts = perf_vpp_ctrl
616 cfg = "{}ExtraConfig".format(role)
617 if cfg in line and perf_opts:
619 if 'main-core' in perf_opts:
620 perf_line += ("\n fdio::vpp_cpu_main_core: '{}'"
621 .format(perf_opts['main-core']))
622 if 'corelist-workers' in perf_opts:
624 "fdio::vpp_cpu_corelist_workers: '{}'"
625 .format(perf_opts['corelist-workers']))
626 if ds_opts['sdn_controller'] == 'opendaylight' and \
627 ds_opts['dataplane'] == 'fdio':
628 if role == 'NovaCompute':
630 "tripleo::profile::base::neutron::"
631 "agents::honeycomb::"
632 "interface_role_mapping:"
633 " ['{}:tenant-interface',"
634 "'{}:public-interface']"
635 .format(tenant_nic[role],
639 "tripleo::profile::base::neutron::"
640 "agents::honeycomb::"
641 "interface_role_mapping:"
642 " ['{}:tenant-interface']"
643 .format(tenant_nic[role]))
645 output_line = (" {}:{}".format(cfg, perf_line))
647 if ds_opts['dataplane'] == 'ovs_dpdk' and perf_ovs_comp:
648 for k, v in OVS_PERF_MAP.items():
649 if k in line and v in perf_ovs_comp:
650 output_line = " {}: '{}'".format(k, perf_ovs_comp[v])
653 # (FIXME) use compute's kernel settings for all nodes for now.
655 if 'NovaSchedulerDefaultFilters' in line:
657 " NovaSchedulerDefaultFilters: 'RamFilter," \
658 "ComputeFilter,AvailabilityZoneFilter," \
659 "ComputeCapabilitiesFilter," \
660 "ImagePropertiesFilter,NUMATopologyFilter'"
661 elif 'ComputeKernelArgs' in line:
663 for k, v in perf_kern_comp.items():
664 kernel_args += "{}={} ".format(k, v)
666 output_line = " ComputeKernelArgs: '{}'".\
671 logging.info("opnfv-environment file written to {}".format(tmp_opnfv_env))
674 def generate_ceph_key():
676 header = struct.pack('<hiih', 1, int(time.time()), 0, len(key))
677 return base64.b64encode(header + key)
680 def prep_storage_env(ds, ns, virtual, tmp_dir):
682 Creates storage environment file for deployment. Source file is copied by
683 undercloud playbook to host.
690 ds_opts = ds['deploy_options']
691 storage_file = os.path.join(tmp_dir, 'storage-environment.yaml')
692 if not os.path.isfile(storage_file):
693 logging.error("storage-environment file is not in tmp directory: {}. "
694 "Check if file was copied from "
695 "undercloud".format(tmp_dir))
696 raise ApexDeployException("storage-environment file not copied from "
698 for line in fileinput.input(storage_file, inplace=True):
699 line = line.strip('\n')
700 if 'CephClusterFSID' in line:
701 print(" CephClusterFSID: {}".format(str(uuid.uuid4())))
702 elif 'CephMonKey' in line:
703 print(" CephMonKey: {}".format(generate_ceph_key().decode(
705 elif 'CephAdminKey' in line:
706 print(" CephAdminKey: {}".format(generate_ceph_key().decode(
708 elif 'CephClientKey' in line:
709 print(" CephClientKey: {}".format(generate_ceph_key().decode(
714 if ds_opts['containers']:
717 # max pgs allowed are calculated as num_mons * 200. Therefore we
718 # set number of pgs and pools so that the total will be less:
719 # num_pgs * num_pools * num_osds
720 ceph_params['CephPoolDefaultSize'] = 2
721 ceph_params['CephPoolDefaultPgNum'] = 32
723 ceph_params['CephAnsibleExtraConfig'] = {
724 'centos_package_dependencies': [],
725 'ceph_osd_docker_memory_limit': '1g',
726 'ceph_mds_docker_memory_limit': '1g',
728 ceph_device = ds_opts['ceph_device']
729 ceph_params['CephAnsibleDisksConfig'] = {
730 'devices': [ceph_device],
732 'osd_scenario': 'collocated'
734 utils.edit_tht_env(storage_file, 'parameter_defaults', ceph_params)
735 # TODO(trozet): remove following block as we only support containers now
736 elif 'ceph_device' in ds_opts and ds_opts['ceph_device']:
737 with open(storage_file, 'a') as fh:
738 fh.write(' ExtraConfig:\n')
739 fh.write(" ceph::profile::params::osds:{{{}:{{}}}}\n".format(
740 ds_opts['ceph_device']
744 def prep_sriov_env(ds, tmp_dir):
746 Creates SRIOV environment file for deployment. Source file is copied by
747 undercloud playbook to host.
752 ds_opts = ds['deploy_options']
753 sriov_iface = ds_opts['sriov']
754 sriov_file = os.path.join(tmp_dir, 'neutron-opendaylight-sriov.yaml')
755 if not os.path.isfile(sriov_file):
756 logging.error("sriov-environment file is not in tmp directory: {}. "
757 "Check if file was copied from "
758 "undercloud".format(tmp_dir))
759 raise ApexDeployException("sriov-environment file not copied from "
761 # TODO(rnoriega): Instead of line editing, refactor this code to load
762 # yaml file into a dict, edit it and write the file back.
763 for line in fileinput.input(sriov_file, inplace=True):
764 line = line.strip('\n')
765 if 'NovaSchedulerDefaultFilters' in line:
766 print(" {}".format(line[3:]))
767 elif 'NovaSchedulerAvailableFilters' in line:
768 print(" {}".format(line[3:]))
769 elif 'NeutronPhysicalDevMappings' in line:
770 print(" NeutronPhysicalDevMappings: \"nfv_sriov:{}\""
771 .format(sriov_iface))
772 elif 'NeutronSriovNumVFs' in line:
773 print(" NeutronSriovNumVFs: \"{}:8\"".format(sriov_iface))
774 elif 'NovaPCIPassthrough' in line:
775 print(" NovaPCIPassthrough:")
776 elif 'devname' in line:
777 print(" - devname: \"{}\"".format(sriov_iface))
778 elif 'physical_network' in line:
779 print(" physical_network: \"nfv_sriov\"")
784 def external_network_cmds(ns, ds):
786 Generates external network openstack commands
787 :param ns: network settings
788 :param ds: deploy settings
789 :return: list of commands to configure external network
791 ds_opts = ds['deploy_options']
792 external_physnet = 'datacentre'
793 if ds_opts['dataplane'] == 'fdio' and \
794 ds_opts['sdn_controller'] != 'opendaylight':
795 external_physnet = 'external'
796 if 'external' in ns.enabled_network_list:
797 net_config = ns['networks']['external'][0]
799 pool_start, pool_end = net_config['floating_ip_range']
801 net_config = ns['networks']['admin']
803 pool_start, pool_end = ns['apex']['networks']['admin'][
804 'introspection_range']
805 nic_config = net_config['nic_mapping']
806 gateway = net_config['gateway']
808 # create network command
809 if nic_config['compute']['vlan'] == 'native':
812 ext_type = "vlan --provider-segment {}".format(nic_config[
814 cmds.append("openstack network create external --project service "
815 "--external --provider-network-type {} "
816 "--provider-physical-network {}"
817 .format(ext_type, external_physnet))
818 # create subnet command
819 cidr = net_config['cidr']
820 subnet_cmd = "openstack subnet create external-subnet --project " \
821 "service --network external --no-dhcp --gateway {} " \
822 "--allocation-pool start={},end={} --subnet-range " \
823 "{}".format(gateway, pool_start, pool_end, str(cidr))
824 if external and cidr.version == 6:
825 subnet_cmd += ' --ip-version 6 --ipv6-ra-mode slaac ' \
826 '--ipv6-address-mode slaac'
827 cmds.append(subnet_cmd)
828 logging.debug("Neutron external network commands determined "
829 "as: {}".format(cmds))
833 def create_congress_cmds(overcloud_file):
834 drivers = ['nova', 'neutronv2', 'cinder', 'glancev2', 'keystone', 'doctor']
835 overcloudrc = parsers.parse_overcloudrc(overcloud_file)
836 logging.info("Creating congress commands")
839 "username={}".format(overcloudrc['OS_USERNAME']),
840 "tenant_name={}".format(overcloudrc['OS_PROJECT_NAME']),
841 "password={}".format(overcloudrc['OS_PASSWORD']),
842 "auth_url={}".format(overcloudrc['OS_AUTH_URL'])
845 logging.error("Unable to find all keys required for congress in "
846 "overcloudrc: OS_USERNAME, OS_PROJECT_NAME, "
847 "OS_PASSWORD, OS_AUTH_URL. Please check overcloudrc "
848 "file: {}".format(overcloud_file))
851 ds_cfg = '--config ' + ' --config '.join(ds_cfg)
853 for driver in drivers:
854 if driver == 'doctor':
855 cmd = "{} \"{}\"".format(driver, driver)
857 cmd = "{} \"{}\" {}".format(driver, driver, ds_cfg)
859 cmd += ' --config api_version="2.34"'
860 logging.debug("Congress command created: {}".format(cmd))