1 ##############################################################################
2 # Copyright (c) 2017 Tim Rozet (trozet@redhat.com) and others.
4 # All rights reserved. This program and the accompanying materials
5 # are made available under the terms of the Apache License, Version 2.0
6 # which accompanies this distribution, and is available at
7 # http://www.apache.org/licenses/LICENSE-2.0
8 ##############################################################################
20 from apex.common import constants as con
21 from apex.common.exceptions import ApexDeployException
22 from apex.common import parsers
23 from apex.virtual import utils as virt_utils
24 from cryptography.hazmat.primitives import serialization as \
26 from cryptography.hazmat.primitives.asymmetric import rsa
27 from cryptography.hazmat.backends import default_backend as \
28 crypto_default_backend
33 'sfc': 'neutron-sfc-opendaylight.yaml',
34 'vpn': 'neutron-bgpvpn-opendaylight.yaml',
35 'gluon': 'gluon.yaml',
37 'odl_vpp_netvirt': 'neutron-opendaylight-netvirt-vpp.yaml',
38 'dvr': 'neutron-opendaylight-fdio-dvr.yaml',
39 'default': 'neutron-opendaylight-honeycomb.yaml'
41 'default': 'neutron-opendaylight.yaml',
44 'sfc': 'neutron-onos-sfc.yaml',
45 'default': 'neutron-onos.yaml'
47 'ovn': 'neutron-ml2-ovn.yaml',
49 'vpp': 'neutron-ml2-vpp.yaml',
50 'dataplane': ('ovs_dpdk', 'neutron-ovs-dpdk.yaml')
55 'tacker': 'enable_tacker.yaml',
56 'congress': 'enable_congress.yaml',
57 'barometer': 'enable_barometer.yaml',
58 'rt_kvm': 'enable_rt_kvm.yaml'
62 'HostCpusList': 'dpdk_cores',
63 'NeutronDpdkCoreList': 'pmd_cores',
64 'NeutronDpdkSocketMemory': 'socket_memory',
65 'NeutronDpdkMemoryChannels': 'memory_channels'
68 OVS_NSH_KMOD_RPM = "openvswitch-kmod-2.6.1-1.el7.centos.x86_64.rpm"
69 OVS_NSH_RPM = "openvswitch-2.6.1-1.el7.centos.x86_64.rpm"
70 ODL_NETVIRT_VPP_RPM = "/root/opendaylight-7.0.0-0.1.20170531snap665.el7" \
74 def build_sdn_env_list(ds, sdn_map, env_list=None):
76 Builds a list of SDN environment files to be used in the deploy cmd.
78 This function recursively searches an sdn_map. First the sdn controller is
79 matched and then the function looks for enabled features for that
80 controller to determine which environment files should be used. By
81 default the feature will be checked if set to true in deploy settings to be
82 added to the list. If a feature does not have a boolean value, then the
83 key and value pair to compare with are checked as a tuple (k,v).
85 :param ds: deploy settings
86 :param sdn_map: SDN map to recursively search
87 :param env_list: recursive var to hold previously found env_list
88 :return: A list of env files
92 for k, v in sdn_map.items():
93 if ds['sdn_controller'] == k or (k in ds and ds[k] is True):
94 if isinstance(v, dict):
95 # Append default SDN env file first
96 # The assumption is that feature-enabled SDN env files
97 # override and do not conflict with previously set default
99 if ds['sdn_controller'] == k and 'default' in v:
100 env_list.append(os.path.join(con.THT_ENV_DIR,
102 env_list.extend(build_sdn_env_list(ds, v))
104 env_list.append(os.path.join(con.THT_ENV_DIR, v))
105 # check if the value is not a boolean
106 elif isinstance(v, tuple):
108 env_list.append(os.path.join(con.THT_ENV_DIR, v[1]))
109 if len(env_list) == 0:
111 env_list.append(os.path.join(
112 con.THT_ENV_DIR, sdn_map['default']))
114 logging.warning("Unable to find default file for SDN")
119 def create_deploy_cmd(ds, ns, inv, tmp_dir,
120 virtual, env_file='opnfv-environment.yaml',
123 logging.info("Creating deployment command")
124 deploy_options = ['network-environment.yaml']
127 deploy_options.append(env_file)
128 ds_opts = ds['deploy_options']
129 deploy_options += build_sdn_env_list(ds_opts, SDN_FILE_MAP)
131 for k, v in OTHER_FILE_MAP.items():
132 if k in ds_opts and ds_opts[k]:
133 deploy_options.append(os.path.join(con.THT_ENV_DIR, v))
136 prep_storage_env(ds, tmp_dir)
137 deploy_options.append(os.path.join(con.THT_ENV_DIR,
138 'storage-environment.yaml'))
139 if ds['global_params']['ha_enabled']:
140 deploy_options.append(os.path.join(con.THT_ENV_DIR,
141 'puppet-pacemaker.yaml'))
144 deploy_options.append('virtual-environment.yaml')
146 deploy_options.append('baremetal-environment.yaml')
148 num_control, num_compute = inv.get_node_counts()
149 if num_control == 0 or num_compute == 0:
150 logging.error("Detected 0 control or compute nodes. Control nodes: "
151 "{}, compute nodes{}".format(num_control, num_compute))
152 raise ApexDeployException("Invalid number of control or computes")
153 elif num_control > 1 and not ds['global_params']['ha_enabled']:
155 if platform.machine() == 'aarch64':
156 # aarch64 deploys were not completing in the default 90 mins.
157 # Not sure if this is related to the hardware the OOO support
158 # was developed on or the virtualization support in CentOS
159 # Either way it will probably get better over time as the aarch
160 # support matures in CentOS and deploy time should be tested in
161 # the future so this multiplier can be removed.
162 con.DEPLOY_TIMEOUT *= 2
163 cmd = "openstack overcloud deploy --templates --timeout {} " \
164 .format(con.DEPLOY_TIMEOUT)
166 for option in deploy_options:
167 cmd += " -e {}".format(option)
168 cmd += " --ntp-server {}".format(ns['ntp'][0])
169 cmd += " --control-scale {}".format(num_control)
170 cmd += " --compute-scale {}".format(num_compute)
171 cmd += ' --control-flavor control --compute-flavor compute'
173 cmd += ' --networks-file network_data.yaml'
176 with open('/sys/module/kvm_intel/parameters/nested') as f:
177 nested_kvm = f.read().strip()
178 if nested_kvm != 'Y':
179 libvirt_type = 'qemu'
180 cmd += ' --libvirt-type {}'.format(libvirt_type)
181 logging.info("Deploy command set: {}".format(cmd))
183 with open(os.path.join(tmp_dir, 'deploy_command'), 'w') as fh:
188 def prep_image(ds, img, tmp_dir, root_pw=None):
190 Locates sdn image and preps for deployment.
191 :param ds: deploy settings
192 :param img: sdn image
193 :param tmp_dir: dir to store modified sdn image
194 :param root_pw: password to configure for overcloud image
197 # TODO(trozet): Come up with a better way to organize this logic in this
199 logging.info("Preparing image: {} for deployment".format(img))
200 if not os.path.isfile(img):
201 logging.error("Missing SDN image {}".format(img))
202 raise ApexDeployException("Missing SDN image file: {}".format(img))
204 ds_opts = ds['deploy_options']
206 sdn = ds_opts['sdn_controller']
207 # we need this due to rhbz #1436021
208 # fixed in systemd-219-37.el7
210 logging.info("Neutron openvswitch-agent disabled")
213 "rm -f /etc/systemd/system/multi-user.target.wants/"
214 "neutron-openvswitch-agent.service"},
217 "rm -f /usr/lib/systemd/system/neutron-openvswitch-agent"
222 virt_cmds.append({con.VIRT_RUN_CMD: "chmod +x /etc/rc.d/rc.local"})
225 "echo 'sudo /opt/quagga/etc/init.d/zrpcd start' > "
226 "/opt/quagga/etc/init.d/zrpcd_start.sh"})
228 con.VIRT_RUN_CMD: "chmod +x /opt/quagga/etc/init.d/"
231 con.VIRT_RUN_CMD: "sed -i '$a /opt/quagga/etc/"
232 "init.d/zrpcd_start.sh' /etc/rc.local "})
234 con.VIRT_RUN_CMD: "sed -i '$a /opt/quagga/etc/"
235 "init.d/zrpcd_start.sh' /etc/rc.d/rc.local"})
236 logging.info("ZRPCD process started")
238 dataplane = ds_opts['dataplane']
239 if dataplane == 'ovs_dpdk' or dataplane == 'fdio':
240 logging.info("Enabling kernel modules for dpdk")
241 # file to module mapping
243 os.path.join(tmp_dir, 'vfio_pci.modules'): 'vfio_pci',
244 os.path.join(tmp_dir, 'uio_pci_generic.modules'): 'uio_pci_generic'
246 for mod_file, mod in uio_types.items():
247 with open(mod_file, 'w') as fh:
248 fh.write('#!/bin/bash\n')
249 fh.write('exec /sbin/modprobe {}'.format(mod))
253 {con.VIRT_UPLOAD: "{}:/etc/sysconfig/modules/".format(
255 {con.VIRT_RUN_CMD: "chmod 0755 /etc/sysconfig/modules/"
256 "{}".format(os.path.basename(mod_file))}
259 pw_op = "password:{}".format(root_pw)
260 virt_cmds.append({con.VIRT_PW: pw_op})
261 if ds_opts['sfc'] and dataplane == 'ovs':
263 {con.VIRT_RUN_CMD: "yum -y install "
264 "/root/ovs/rpm/rpmbuild/RPMS/x86_64/"
265 "{}".format(OVS_NSH_KMOD_RPM)},
266 {con.VIRT_RUN_CMD: "yum downgrade -y "
267 "/root/ovs/rpm/rpmbuild/RPMS/x86_64/"
268 "{}".format(OVS_NSH_RPM)}
270 if dataplane == 'fdio':
271 # Patch neutron with using OVS external interface for router
272 # and add generic linux NS interface driver
274 {con.VIRT_RUN_CMD: "cd /usr/lib/python2.7/site-packages && patch "
275 "-p1 < neutron-patch-NSDriver.patch"})
278 {con.VIRT_RUN_CMD: "yum remove -y vpp-lib"},
279 {con.VIRT_RUN_CMD: "yum install -y "
280 "/root/nosdn_vpp_rpms/*.rpm"}
283 if sdn == 'opendaylight':
284 if ds_opts['odl_version'] != con.DEFAULT_ODL_VERSION:
286 {con.VIRT_RUN_CMD: "yum -y remove opendaylight"},
287 {con.VIRT_RUN_CMD: "rm -rf /etc/puppet/modules/opendaylight"},
288 {con.VIRT_RUN_CMD: "cd /etc/puppet/modules && tar xzf "
289 "/root/puppet-opendaylight-"
290 "{}.tar.gz".format(ds_opts['odl_version'])}
292 if ds_opts['odl_version'] == 'master':
294 {con.VIRT_RUN_CMD: "rpm -ivh --nodeps /root/{}/*".format(
295 ds_opts['odl_version'])}
299 {con.VIRT_RUN_CMD: "yum -y install /root/{}/*".format(
300 ds_opts['odl_version'])}
303 elif sdn == 'opendaylight' and 'odl_vpp_netvirt' in ds_opts \
304 and ds_opts['odl_vpp_netvirt']:
306 {con.VIRT_RUN_CMD: "yum -y remove opendaylight"},
307 {con.VIRT_RUN_CMD: "yum -y install /root/{}/*".format(
308 ODL_NETVIRT_VPP_RPM)}
313 {con.VIRT_RUN_CMD: "cd /root/ovs28 && yum update -y "
315 {con.VIRT_RUN_CMD: "cd /root/ovs28 && yum downgrade -y "
319 tmp_oc_image = os.path.join(tmp_dir, 'overcloud-full.qcow2')
320 shutil.copyfile(img, tmp_oc_image)
321 logging.debug("Temporary overcloud image stored as: {}".format(
323 virt_utils.virt_customize(virt_cmds, tmp_oc_image)
324 logging.info("Overcloud image customization complete")
329 Creates public and private ssh keys with 1024 bit RSA encryption
330 :return: private, public key
332 key = rsa.generate_private_key(
333 backend=crypto_default_backend(),
334 public_exponent=65537,
338 private_key = key.private_bytes(
339 crypto_serialization.Encoding.PEM,
340 crypto_serialization.PrivateFormat.PKCS8,
341 crypto_serialization.NoEncryption())
342 public_key = key.public_key().public_bytes(
343 crypto_serialization.Encoding.OpenSSH,
344 crypto_serialization.PublicFormat.OpenSSH
346 return private_key.decode('utf-8'), public_key.decode('utf-8')
349 def prep_env(ds, ns, inv, opnfv_env, net_env, tmp_dir):
351 Creates modified opnfv/network environments for deployment
352 :param ds: deploy settings
353 :param ns: network settings
354 :param inv: node inventory
355 :param opnfv_env: file path for opnfv-environment file
356 :param net_env: file path for network-environment file
357 :param tmp_dir: Apex tmp dir
361 logging.info("Preparing opnfv-environment and network-environment files")
362 ds_opts = ds['deploy_options']
363 tmp_opnfv_env = os.path.join(tmp_dir, os.path.basename(opnfv_env))
364 shutil.copyfile(opnfv_env, tmp_opnfv_env)
365 tenant_nic_map = ns['networks']['tenant']['nic_mapping']
367 tenant_nic['Controller'] = tenant_nic_map['controller']['members'][0]
368 tenant_nic['NovaCompute'] = tenant_nic_map['compute']['members'][0]
369 external_nic_map = ns['networks']['external'][0]['nic_mapping']
370 external_nic = dict()
371 external_nic['NovaCompute'] = external_nic_map['compute']['members'][0]
374 private_key, public_key = make_ssh_key()
376 # Make easier/faster variables to index in the file editor
377 if 'performance' in ds_opts:
380 if 'vpp' in ds_opts['performance']['Compute']:
381 perf_vpp_comp = ds_opts['performance']['Compute']['vpp']
384 if 'vpp' in ds_opts['performance']['Controller']:
385 perf_vpp_ctrl = ds_opts['performance']['Controller']['vpp']
390 if 'ovs' in ds_opts['performance']['Compute']:
391 perf_ovs_comp = ds_opts['performance']['Compute']['ovs']
396 if 'kernel' in ds_opts['performance']['Compute']:
397 perf_kern_comp = ds_opts['performance']['Compute']['kernel']
399 perf_kern_comp = None
403 # Modify OPNFV environment
404 # TODO: Change to build a dict and outputting yaml rather than parsing
405 for line in fileinput.input(tmp_opnfv_env, inplace=True):
406 line = line.strip('\n')
408 if 'CloudDomain' in line:
409 output_line = " CloudDomain: {}".format(ns['domain_name'])
410 elif 'replace_private_key' in line:
411 output_line = " private_key: |\n"
413 for line in private_key.splitlines():
414 key_out += " {}\n".format(line)
415 output_line += key_out
416 elif 'replace_public_key' in line:
417 output_line = " public_key: '{}'".format(public_key)
418 elif ((perf and perf_kern_comp) or ds_opts.get('rt_kvm')) and \
419 'resource_registry' in line:
420 output_line = "resource_registry:\n" \
421 " OS::TripleO::NodeUserData: first-boot.yaml"
422 elif 'ComputeExtraConfigPre' in line and \
423 ds_opts['dataplane'] == 'ovs_dpdk':
424 output_line = ' OS::TripleO::ComputeExtraConfigPre: ' \
425 './ovs-dpdk-preconfig.yaml'
427 if ds_opts['sdn_controller'] == 'opendaylight' and \
428 'odl_vpp_routing_node' in ds_opts:
429 if 'opendaylight::vpp_routing_node' in line:
430 output_line = (" opendaylight::vpp_routing_node: {}.{}"
431 .format(ds_opts['odl_vpp_routing_node'],
433 elif not ds_opts['sdn_controller'] and ds_opts['dataplane'] == 'fdio':
434 if 'NeutronVPPAgentPhysnets' in line:
435 output_line = (" NeutronVPPAgentPhysnets: 'datacentre:{}'".
436 format(tenant_nic['Controller']))
437 elif ds_opts['sdn_controller'] == 'opendaylight' and ds_opts.get(
439 if 'OS::TripleO::Services::NeutronDhcpAgent' in line:
441 elif 'NeutronDhcpAgentsPerNetwork' in line:
442 num_control, num_compute = inv.get_node_counts()
443 output_line = (" NeutronDhcpAgentsPerNetwork: {}"
444 .format(num_compute))
445 elif 'ComputeServices' in line:
446 output_line = (" ComputeServices:\n"
447 " - OS::TripleO::Services::NeutronDhcpAgent")
450 for role in 'NovaCompute', 'Controller':
451 if role == 'NovaCompute':
452 perf_opts = perf_vpp_comp
454 perf_opts = perf_vpp_ctrl
455 cfg = "{}ExtraConfig".format(role)
456 if cfg in line and perf_opts:
458 if 'main-core' in perf_opts:
459 perf_line += ("\n fdio::vpp_cpu_main_core: '{}'"
460 .format(perf_opts['main-core']))
461 if 'corelist-workers' in perf_opts:
463 "fdio::vpp_cpu_corelist_workers: '{}'"
464 .format(perf_opts['corelist-workers']))
465 if ds_opts['sdn_controller'] == 'opendaylight' and \
466 ds_opts['dataplane'] == 'fdio':
467 if role == 'NovaCompute':
469 "tripleo::profile::base::neutron::"
470 "agents::honeycomb::"
471 "interface_role_mapping:"
472 " ['{}:tenant-interface',"
473 "'{}:public-interface']"
474 .format(tenant_nic[role],
478 "tripleo::profile::base::neutron::"
479 "agents::honeycomb::"
480 "interface_role_mapping:"
481 " ['{}:tenant-interface']"
482 .format(tenant_nic[role]))
484 output_line = (" {}:{}".format(cfg, perf_line))
486 if ds_opts['dataplane'] == 'ovs_dpdk' and perf_ovs_comp:
487 for k, v in OVS_PERF_MAP.items():
488 if k in line and v in perf_ovs_comp:
489 output_line = " {}: '{}'".format(k, perf_ovs_comp[v])
492 # (FIXME) use compute's kernel settings for all nodes for now.
494 if 'NovaSchedulerDefaultFilters' in line:
496 " NovaSchedulerDefaultFilters: 'RamFilter," \
497 "ComputeFilter,AvailabilityZoneFilter," \
498 "ComputeCapabilitiesFilter," \
499 "ImagePropertiesFilter,NUMATopologyFilter'"
500 elif 'ComputeKernelArgs' in line:
502 for k, v in perf_kern_comp.items():
503 kernel_args += "{}={} ".format(k, v)
505 output_line = " ComputeKernelArgs: '{}'".\
510 logging.info("opnfv-environment file written to {}".format(tmp_opnfv_env))
513 def generate_ceph_key():
515 header = struct.pack('<hiih', 1, int(time.time()), 0, len(key))
516 return base64.b64encode(header + key)
519 def prep_storage_env(ds, tmp_dir):
521 Creates storage environment file for deployment. Source file is copied by
522 undercloud playbook to host.
527 ds_opts = ds['deploy_options']
528 storage_file = os.path.join(tmp_dir, 'storage-environment.yaml')
529 if not os.path.isfile(storage_file):
530 logging.error("storage-environment file is not in tmp directory: {}. "
531 "Check if file was copied from "
532 "undercloud".format(tmp_dir))
533 raise ApexDeployException("storage-environment file not copied from "
535 for line in fileinput.input(storage_file, inplace=True):
536 line = line.strip('\n')
537 if 'CephClusterFSID' in line:
538 print(" CephClusterFSID: {}".format(str(uuid.uuid4())))
539 elif 'CephMonKey' in line:
540 print(" CephMonKey: {}".format(generate_ceph_key().decode(
542 elif 'CephAdminKey' in line:
543 print(" CephAdminKey: {}".format(generate_ceph_key().decode(
545 elif 'CephClientKey' in line:
546 print(" CephClientKey: {}".format(generate_ceph_key().decode(
550 if 'ceph_device' in ds_opts and ds_opts['ceph_device']:
551 with open(storage_file, 'a') as fh:
552 fh.write(' ExtraConfig:\n')
553 fh.write(" ceph::profile::params::osds:{{{}:{{}}}}\n".format(
554 ds_opts['ceph_device']
558 def external_network_cmds(ns):
560 Generates external network openstack commands
561 :param ns: network settings
562 :return: list of commands to configure external network
564 if 'external' in ns.enabled_network_list:
565 net_config = ns['networks']['external'][0]
567 pool_start, pool_end = net_config['floating_ip_range']
569 net_config = ns['networks']['admin']
571 pool_start, pool_end = ns['apex']['networks']['admin'][
572 'introspection_range']
573 nic_config = net_config['nic_mapping']
574 gateway = net_config['gateway']
576 # create network command
577 if nic_config['compute']['vlan'] == 'native':
580 ext_type = "vlan --provider-segment {}".format(nic_config[
582 cmds.append("openstack network create external --project service "
583 "--external --provider-network-type {} "
584 "--provider-physical-network datacentre".format(ext_type))
585 # create subnet command
586 cidr = net_config['cidr']
587 subnet_cmd = "openstack subnet create external-subnet --project " \
588 "service --network external --no-dhcp --gateway {} " \
589 "--allocation-pool start={},end={} --subnet-range " \
590 "{}".format(gateway, pool_start, pool_end, str(cidr))
591 if external and cidr.version == 6:
592 subnet_cmd += ' --ip-version 6 --ipv6-ra-mode slaac ' \
593 '--ipv6-address-mode slaac'
594 cmds.append(subnet_cmd)
595 logging.debug("Neutron external network commands determined "
596 "as: {}".format(cmds))
600 def create_congress_cmds(overcloud_file):
601 drivers = ['nova', 'neutronv2', 'cinder', 'glancev2', 'keystone', 'doctor']
602 overcloudrc = parsers.parse_overcloudrc(overcloud_file)
603 logging.info("Creating congress commands")
606 "username={}".format(overcloudrc['OS_USERNAME']),
607 "tenant_name={}".format(overcloudrc['OS_PROJECT_NAME']),
608 "password={}".format(overcloudrc['OS_PASSWORD']),
609 "auth_url={}".format(overcloudrc['OS_AUTH_URL'])
612 logging.error("Unable to find all keys required for congress in "
613 "overcloudrc: OS_USERNAME, OS_PROJECT_NAME, "
614 "OS_PASSWORD, OS_AUTH_URL. Please check overcloudrc "
615 "file: {}".format(overcloud_file))
618 ds_cfg = '--config ' + ' --config '.join(ds_cfg)
620 for driver in drivers:
621 if driver == 'doctor':
622 cmd = "{} \"{}\"".format(driver, driver)
624 cmd = "{} \"{}\" {}".format(driver, driver, ds_cfg)
626 cmd += ' --config api_version="2.34"'
627 logging.debug("Congress command created: {}".format(cmd))