1 ##############################################################################
2 # Copyright (c) 2017 Tim Rozet (trozet@redhat.com) and others.
4 # All rights reserved. This program and the accompanying materials
5 # are made available under the terms of the Apache License, Version 2.0
6 # which accompanies this distribution, and is available at
7 # http://www.apache.org/licenses/LICENSE-2.0
8 ##############################################################################
20 from apex.common import constants as con
21 from apex.common.exceptions import ApexDeployException
22 from apex.common import parsers
23 from apex.virtual import utils as virt_utils
24 from cryptography.hazmat.primitives import serialization as \
26 from cryptography.hazmat.primitives.asymmetric import rsa
27 from cryptography.hazmat.backends import default_backend as \
28 crypto_default_backend
33 'sfc': 'neutron-sfc-opendaylight.yaml',
34 'vpn': 'neutron-bgpvpn-opendaylight.yaml',
35 'gluon': 'gluon.yaml',
37 'odl_vpp_netvirt': 'neutron-opendaylight-netvirt-vpp.yaml',
38 'dvr': 'neutron-opendaylight-fdio-dvr.yaml',
39 'default': 'neutron-opendaylight-honeycomb.yaml'
41 'l2gw': 'neutron-l2gw-opendaylight.yaml',
42 'default': 'neutron-opendaylight.yaml',
45 'sfc': 'neutron-onos-sfc.yaml',
46 'default': 'neutron-onos.yaml'
48 'ovn': 'neutron-ml2-ovn.yaml',
50 'vpp': 'neutron-ml2-vpp.yaml',
51 'dataplane': ('ovs_dpdk', 'neutron-ovs-dpdk.yaml')
56 'tacker': 'enable_tacker.yaml',
57 'congress': 'enable_congress.yaml',
58 'barometer': 'enable_barometer.yaml',
59 'rt_kvm': 'enable_rt_kvm.yaml'
63 'HostCpusList': 'dpdk_cores',
64 'NeutronDpdkCoreList': 'pmd_cores',
65 'NeutronDpdkSocketMemory': 'socket_memory',
66 'NeutronDpdkMemoryChannels': 'memory_channels'
69 OVS_NSH_KMOD_RPM = "openvswitch-kmod-2.6.1-1.el7.centos.x86_64.rpm"
70 OVS_NSH_RPM = "openvswitch-2.6.1-1.el7.centos.x86_64.rpm"
71 ODL_NETVIRT_VPP_RPM = "/root/opendaylight-7.0.0-0.1.20170531snap665.el7" \
75 def build_sdn_env_list(ds, sdn_map, env_list=None):
77 Builds a list of SDN environment files to be used in the deploy cmd.
79 This function recursively searches an sdn_map. First the sdn controller is
80 matched and then the function looks for enabled features for that
81 controller to determine which environment files should be used. By
82 default the feature will be checked if set to true in deploy settings to be
83 added to the list. If a feature does not have a boolean value, then the
84 key and value pair to compare with are checked as a tuple (k,v).
86 :param ds: deploy settings
87 :param sdn_map: SDN map to recursively search
88 :param env_list: recursive var to hold previously found env_list
89 :return: A list of env files
93 for k, v in sdn_map.items():
94 if ds['sdn_controller'] == k or (k in ds and ds[k] is True):
95 if isinstance(v, dict):
96 # Append default SDN env file first
97 # The assumption is that feature-enabled SDN env files
98 # override and do not conflict with previously set default
100 if ds['sdn_controller'] == k and 'default' in v:
101 env_list.append(os.path.join(con.THT_ENV_DIR,
103 env_list.extend(build_sdn_env_list(ds, v))
105 env_list.append(os.path.join(con.THT_ENV_DIR, v))
106 # check if the value is not a boolean
107 elif isinstance(v, tuple):
109 env_list.append(os.path.join(con.THT_ENV_DIR, v[1]))
110 if len(env_list) == 0:
112 env_list.append(os.path.join(
113 con.THT_ENV_DIR, sdn_map['default']))
115 logging.warning("Unable to find default file for SDN")
120 def create_deploy_cmd(ds, ns, inv, tmp_dir,
121 virtual, env_file='opnfv-environment.yaml',
124 logging.info("Creating deployment command")
125 deploy_options = ['network-environment.yaml']
128 deploy_options.append(env_file)
129 ds_opts = ds['deploy_options']
130 deploy_options += build_sdn_env_list(ds_opts, SDN_FILE_MAP)
132 for k, v in OTHER_FILE_MAP.items():
133 if k in ds_opts and ds_opts[k]:
134 deploy_options.append(os.path.join(con.THT_ENV_DIR, v))
137 prep_storage_env(ds, tmp_dir)
138 deploy_options.append(os.path.join(con.THT_ENV_DIR,
139 'storage-environment.yaml'))
140 if ds['global_params']['ha_enabled']:
141 deploy_options.append(os.path.join(con.THT_ENV_DIR,
142 'puppet-pacemaker.yaml'))
145 deploy_options.append('virtual-environment.yaml')
147 deploy_options.append('baremetal-environment.yaml')
149 num_control, num_compute = inv.get_node_counts()
150 if num_control == 0 or num_compute == 0:
151 logging.error("Detected 0 control or compute nodes. Control nodes: "
152 "{}, compute nodes{}".format(num_control, num_compute))
153 raise ApexDeployException("Invalid number of control or computes")
154 elif num_control > 1 and not ds['global_params']['ha_enabled']:
156 if platform.machine() == 'aarch64':
157 # aarch64 deploys were not completing in the default 90 mins.
158 # Not sure if this is related to the hardware the OOO support
159 # was developed on or the virtualization support in CentOS
160 # Either way it will probably get better over time as the aarch
161 # support matures in CentOS and deploy time should be tested in
162 # the future so this multiplier can be removed.
163 con.DEPLOY_TIMEOUT *= 2
164 cmd = "openstack overcloud deploy --templates --timeout {} " \
165 .format(con.DEPLOY_TIMEOUT)
167 for option in deploy_options:
168 cmd += " -e {}".format(option)
169 cmd += " --ntp-server {}".format(ns['ntp'][0])
170 cmd += " --control-scale {}".format(num_control)
171 cmd += " --compute-scale {}".format(num_compute)
172 cmd += ' --control-flavor control --compute-flavor compute'
174 cmd += ' --networks-file network_data.yaml'
177 with open('/sys/module/kvm_intel/parameters/nested') as f:
178 nested_kvm = f.read().strip()
179 if nested_kvm != 'Y':
180 libvirt_type = 'qemu'
181 cmd += ' --libvirt-type {}'.format(libvirt_type)
182 logging.info("Deploy command set: {}".format(cmd))
184 with open(os.path.join(tmp_dir, 'deploy_command'), 'w') as fh:
189 def prep_image(ds, ns, img, tmp_dir, root_pw=None):
191 Locates sdn image and preps for deployment.
192 :param ds: deploy settings
193 :param ns: network settings
194 :param img: sdn image
195 :param tmp_dir: dir to store modified sdn image
196 :param root_pw: password to configure for overcloud image
199 # TODO(trozet): Come up with a better way to organize this logic in this
201 logging.info("Preparing image: {} for deployment".format(img))
202 if not os.path.isfile(img):
203 logging.error("Missing SDN image {}".format(img))
204 raise ApexDeployException("Missing SDN image file: {}".format(img))
206 ds_opts = ds['deploy_options']
208 sdn = ds_opts['sdn_controller']
209 # we need this due to rhbz #1436021
210 # fixed in systemd-219-37.el7
212 logging.info("Neutron openvswitch-agent disabled")
215 "rm -f /etc/systemd/system/multi-user.target.wants/"
216 "neutron-openvswitch-agent.service"},
219 "rm -f /usr/lib/systemd/system/neutron-openvswitch-agent"
223 if ns.get('http_proxy', ''):
226 "echo 'http_proxy={}' >> /etc/environment".format(
229 if ns.get('https_proxy', ''):
232 "echo 'https_proxy={}' >> /etc/environment".format(
236 virt_cmds.append({con.VIRT_RUN_CMD: "chmod +x /etc/rc.d/rc.local"})
239 "echo 'sudo /opt/quagga/etc/init.d/zrpcd start' > "
240 "/opt/quagga/etc/init.d/zrpcd_start.sh"})
242 con.VIRT_RUN_CMD: "chmod +x /opt/quagga/etc/init.d/"
245 con.VIRT_RUN_CMD: "sed -i '$a /opt/quagga/etc/"
246 "init.d/zrpcd_start.sh' /etc/rc.local "})
248 con.VIRT_RUN_CMD: "sed -i '$a /opt/quagga/etc/"
249 "init.d/zrpcd_start.sh' /etc/rc.d/rc.local"})
250 logging.info("ZRPCD process started")
252 dataplane = ds_opts['dataplane']
253 if dataplane == 'ovs_dpdk' or dataplane == 'fdio':
254 logging.info("Enabling kernel modules for dpdk")
255 # file to module mapping
257 os.path.join(tmp_dir, 'vfio_pci.modules'): 'vfio_pci',
258 os.path.join(tmp_dir, 'uio_pci_generic.modules'): 'uio_pci_generic'
260 for mod_file, mod in uio_types.items():
261 with open(mod_file, 'w') as fh:
262 fh.write('#!/bin/bash\n')
263 fh.write('exec /sbin/modprobe {}'.format(mod))
267 {con.VIRT_UPLOAD: "{}:/etc/sysconfig/modules/".format(
269 {con.VIRT_RUN_CMD: "chmod 0755 /etc/sysconfig/modules/"
270 "{}".format(os.path.basename(mod_file))}
273 pw_op = "password:{}".format(root_pw)
274 virt_cmds.append({con.VIRT_PW: pw_op})
275 if ds_opts['sfc'] and dataplane == 'ovs':
277 {con.VIRT_RUN_CMD: "yum -y install "
278 "/root/ovs/rpm/rpmbuild/RPMS/x86_64/"
279 "{}".format(OVS_NSH_KMOD_RPM)},
280 {con.VIRT_RUN_CMD: "yum downgrade -y "
281 "/root/ovs/rpm/rpmbuild/RPMS/x86_64/"
282 "{}".format(OVS_NSH_RPM)}
284 if dataplane == 'fdio':
285 # Patch neutron with using OVS external interface for router
286 # and add generic linux NS interface driver
288 {con.VIRT_RUN_CMD: "cd /usr/lib/python2.7/site-packages && patch "
289 "-p1 < neutron-patch-NSDriver.patch"})
292 {con.VIRT_RUN_CMD: "yum remove -y vpp-lib"},
293 {con.VIRT_RUN_CMD: "yum install -y "
294 "/root/nosdn_vpp_rpms/*.rpm"}
297 if sdn == 'opendaylight':
298 if ds_opts['odl_version'] != con.DEFAULT_ODL_VERSION:
300 {con.VIRT_RUN_CMD: "yum -y remove opendaylight"},
301 {con.VIRT_RUN_CMD: "rm -rf /etc/puppet/modules/opendaylight"},
302 {con.VIRT_RUN_CMD: "cd /etc/puppet/modules && tar xzf "
303 "/root/puppet-opendaylight-"
304 "{}.tar.gz".format(ds_opts['odl_version'])}
306 if ds_opts['odl_version'] == 'master':
308 {con.VIRT_RUN_CMD: "rpm -ivh --nodeps /root/{}/*".format(
309 ds_opts['odl_version'])}
313 {con.VIRT_RUN_CMD: "yum -y install /root/{}/*".format(
314 ds_opts['odl_version'])}
317 elif sdn == 'opendaylight' and 'odl_vpp_netvirt' in ds_opts \
318 and ds_opts['odl_vpp_netvirt']:
320 {con.VIRT_RUN_CMD: "yum -y remove opendaylight"},
321 {con.VIRT_RUN_CMD: "yum -y install /root/{}/*".format(
322 ODL_NETVIRT_VPP_RPM)}
327 {con.VIRT_RUN_CMD: "cd /root/ovs28 && yum update -y "
329 {con.VIRT_RUN_CMD: "cd /root/ovs28 && yum downgrade -y "
333 tmp_oc_image = os.path.join(tmp_dir, 'overcloud-full.qcow2')
334 shutil.copyfile(img, tmp_oc_image)
335 logging.debug("Temporary overcloud image stored as: {}".format(
337 virt_utils.virt_customize(virt_cmds, tmp_oc_image)
338 logging.info("Overcloud image customization complete")
343 Creates public and private ssh keys with 1024 bit RSA encryption
344 :return: private, public key
346 key = rsa.generate_private_key(
347 backend=crypto_default_backend(),
348 public_exponent=65537,
352 private_key = key.private_bytes(
353 crypto_serialization.Encoding.PEM,
354 crypto_serialization.PrivateFormat.PKCS8,
355 crypto_serialization.NoEncryption())
356 public_key = key.public_key().public_bytes(
357 crypto_serialization.Encoding.OpenSSH,
358 crypto_serialization.PublicFormat.OpenSSH
360 return private_key.decode('utf-8'), public_key.decode('utf-8')
363 def prep_env(ds, ns, inv, opnfv_env, net_env, tmp_dir):
365 Creates modified opnfv/network environments for deployment
366 :param ds: deploy settings
367 :param ns: network settings
368 :param inv: node inventory
369 :param opnfv_env: file path for opnfv-environment file
370 :param net_env: file path for network-environment file
371 :param tmp_dir: Apex tmp dir
375 logging.info("Preparing opnfv-environment and network-environment files")
376 ds_opts = ds['deploy_options']
377 tmp_opnfv_env = os.path.join(tmp_dir, os.path.basename(opnfv_env))
378 shutil.copyfile(opnfv_env, tmp_opnfv_env)
379 tenant_nic_map = ns['networks']['tenant']['nic_mapping']
381 tenant_nic['Controller'] = tenant_nic_map['controller']['members'][0]
382 tenant_nic['NovaCompute'] = tenant_nic_map['compute']['members'][0]
383 external_nic_map = ns['networks']['external'][0]['nic_mapping']
384 external_nic = dict()
385 external_nic['NovaCompute'] = external_nic_map['compute']['members'][0]
388 private_key, public_key = make_ssh_key()
390 # Make easier/faster variables to index in the file editor
391 if 'performance' in ds_opts:
394 if 'vpp' in ds_opts['performance']['Compute']:
395 perf_vpp_comp = ds_opts['performance']['Compute']['vpp']
398 if 'vpp' in ds_opts['performance']['Controller']:
399 perf_vpp_ctrl = ds_opts['performance']['Controller']['vpp']
404 if 'ovs' in ds_opts['performance']['Compute']:
405 perf_ovs_comp = ds_opts['performance']['Compute']['ovs']
410 if 'kernel' in ds_opts['performance']['Compute']:
411 perf_kern_comp = ds_opts['performance']['Compute']['kernel']
413 perf_kern_comp = None
417 # Modify OPNFV environment
418 # TODO: Change to build a dict and outputting yaml rather than parsing
419 for line in fileinput.input(tmp_opnfv_env, inplace=True):
420 line = line.strip('\n')
422 if 'CloudDomain' in line:
423 output_line = " CloudDomain: {}".format(ns['domain_name'])
424 elif 'replace_private_key' in line:
425 output_line = " private_key: |\n"
427 for line in private_key.splitlines():
428 key_out += " {}\n".format(line)
429 output_line += key_out
430 elif 'replace_public_key' in line:
431 output_line = " public_key: '{}'".format(public_key)
432 elif ((perf and perf_kern_comp) or ds_opts.get('rt_kvm')) and \
433 'resource_registry' in line:
434 output_line = "resource_registry:\n" \
435 " OS::TripleO::NodeUserData: first-boot.yaml"
436 elif 'ComputeExtraConfigPre' in line and \
437 ds_opts['dataplane'] == 'ovs_dpdk':
438 output_line = ' OS::TripleO::ComputeExtraConfigPre: ' \
439 './ovs-dpdk-preconfig.yaml'
441 if ds_opts['sdn_controller'] == 'opendaylight' and \
442 'odl_vpp_routing_node' in ds_opts:
443 if 'opendaylight::vpp_routing_node' in line:
444 output_line = (" opendaylight::vpp_routing_node: {}.{}"
445 .format(ds_opts['odl_vpp_routing_node'],
447 elif not ds_opts['sdn_controller'] and ds_opts['dataplane'] == 'fdio':
448 if 'NeutronVPPAgentPhysnets' in line:
449 output_line = (" NeutronVPPAgentPhysnets: 'datacentre:{}'".
450 format(tenant_nic['Controller']))
451 elif ds_opts['sdn_controller'] == 'opendaylight' and ds_opts.get(
453 if 'OS::TripleO::Services::NeutronDhcpAgent' in line:
455 elif 'NeutronDhcpAgentsPerNetwork' in line:
456 num_control, num_compute = inv.get_node_counts()
457 output_line = (" NeutronDhcpAgentsPerNetwork: {}"
458 .format(num_compute))
459 elif 'ComputeServices' in line:
460 output_line = (" ComputeServices:\n"
461 " - OS::TripleO::Services::NeutronDhcpAgent")
464 for role in 'NovaCompute', 'Controller':
465 if role == 'NovaCompute':
466 perf_opts = perf_vpp_comp
468 perf_opts = perf_vpp_ctrl
469 cfg = "{}ExtraConfig".format(role)
470 if cfg in line and perf_opts:
472 if 'main-core' in perf_opts:
473 perf_line += ("\n fdio::vpp_cpu_main_core: '{}'"
474 .format(perf_opts['main-core']))
475 if 'corelist-workers' in perf_opts:
477 "fdio::vpp_cpu_corelist_workers: '{}'"
478 .format(perf_opts['corelist-workers']))
479 if ds_opts['sdn_controller'] == 'opendaylight' and \
480 ds_opts['dataplane'] == 'fdio':
481 if role == 'NovaCompute':
483 "tripleo::profile::base::neutron::"
484 "agents::honeycomb::"
485 "interface_role_mapping:"
486 " ['{}:tenant-interface',"
487 "'{}:public-interface']"
488 .format(tenant_nic[role],
492 "tripleo::profile::base::neutron::"
493 "agents::honeycomb::"
494 "interface_role_mapping:"
495 " ['{}:tenant-interface']"
496 .format(tenant_nic[role]))
498 output_line = (" {}:{}".format(cfg, perf_line))
500 if ds_opts['dataplane'] == 'ovs_dpdk' and perf_ovs_comp:
501 for k, v in OVS_PERF_MAP.items():
502 if k in line and v in perf_ovs_comp:
503 output_line = " {}: '{}'".format(k, perf_ovs_comp[v])
506 # (FIXME) use compute's kernel settings for all nodes for now.
508 if 'NovaSchedulerDefaultFilters' in line:
510 " NovaSchedulerDefaultFilters: 'RamFilter," \
511 "ComputeFilter,AvailabilityZoneFilter," \
512 "ComputeCapabilitiesFilter," \
513 "ImagePropertiesFilter,NUMATopologyFilter'"
514 elif 'ComputeKernelArgs' in line:
516 for k, v in perf_kern_comp.items():
517 kernel_args += "{}={} ".format(k, v)
519 output_line = " ComputeKernelArgs: '{}'".\
524 logging.info("opnfv-environment file written to {}".format(tmp_opnfv_env))
527 def generate_ceph_key():
529 header = struct.pack('<hiih', 1, int(time.time()), 0, len(key))
530 return base64.b64encode(header + key)
533 def prep_storage_env(ds, tmp_dir):
535 Creates storage environment file for deployment. Source file is copied by
536 undercloud playbook to host.
541 ds_opts = ds['deploy_options']
542 storage_file = os.path.join(tmp_dir, 'storage-environment.yaml')
543 if not os.path.isfile(storage_file):
544 logging.error("storage-environment file is not in tmp directory: {}. "
545 "Check if file was copied from "
546 "undercloud".format(tmp_dir))
547 raise ApexDeployException("storage-environment file not copied from "
549 for line in fileinput.input(storage_file, inplace=True):
550 line = line.strip('\n')
551 if 'CephClusterFSID' in line:
552 print(" CephClusterFSID: {}".format(str(uuid.uuid4())))
553 elif 'CephMonKey' in line:
554 print(" CephMonKey: {}".format(generate_ceph_key().decode(
556 elif 'CephAdminKey' in line:
557 print(" CephAdminKey: {}".format(generate_ceph_key().decode(
559 elif 'CephClientKey' in line:
560 print(" CephClientKey: {}".format(generate_ceph_key().decode(
564 if 'ceph_device' in ds_opts and ds_opts['ceph_device']:
565 with open(storage_file, 'a') as fh:
566 fh.write(' ExtraConfig:\n')
567 fh.write(" ceph::profile::params::osds:{{{}:{{}}}}\n".format(
568 ds_opts['ceph_device']
572 def external_network_cmds(ns):
574 Generates external network openstack commands
575 :param ns: network settings
576 :return: list of commands to configure external network
578 if 'external' in ns.enabled_network_list:
579 net_config = ns['networks']['external'][0]
581 pool_start, pool_end = net_config['floating_ip_range']
583 net_config = ns['networks']['admin']
585 pool_start, pool_end = ns['apex']['networks']['admin'][
586 'introspection_range']
587 nic_config = net_config['nic_mapping']
588 gateway = net_config['gateway']
590 # create network command
591 if nic_config['compute']['vlan'] == 'native':
594 ext_type = "vlan --provider-segment {}".format(nic_config[
596 cmds.append("openstack network create external --project service "
597 "--external --provider-network-type {} "
598 "--provider-physical-network datacentre".format(ext_type))
599 # create subnet command
600 cidr = net_config['cidr']
601 subnet_cmd = "openstack subnet create external-subnet --project " \
602 "service --network external --no-dhcp --gateway {} " \
603 "--allocation-pool start={},end={} --subnet-range " \
604 "{}".format(gateway, pool_start, pool_end, str(cidr))
605 if external and cidr.version == 6:
606 subnet_cmd += ' --ip-version 6 --ipv6-ra-mode slaac ' \
607 '--ipv6-address-mode slaac'
608 cmds.append(subnet_cmd)
609 logging.debug("Neutron external network commands determined "
610 "as: {}".format(cmds))
614 def create_congress_cmds(overcloud_file):
615 drivers = ['nova', 'neutronv2', 'cinder', 'glancev2', 'keystone', 'doctor']
616 overcloudrc = parsers.parse_overcloudrc(overcloud_file)
617 logging.info("Creating congress commands")
620 "username={}".format(overcloudrc['OS_USERNAME']),
621 "tenant_name={}".format(overcloudrc['OS_PROJECT_NAME']),
622 "password={}".format(overcloudrc['OS_PASSWORD']),
623 "auth_url={}".format(overcloudrc['OS_AUTH_URL'])
626 logging.error("Unable to find all keys required for congress in "
627 "overcloudrc: OS_USERNAME, OS_PROJECT_NAME, "
628 "OS_PASSWORD, OS_AUTH_URL. Please check overcloudrc "
629 "file: {}".format(overcloud_file))
632 ds_cfg = '--config ' + ' --config '.join(ds_cfg)
634 for driver in drivers:
635 if driver == 'doctor':
636 cmd = "{} \"{}\"".format(driver, driver)
638 cmd = "{} \"{}\" {}".format(driver, driver, ds_cfg)
640 cmd += ' --config api_version="2.34"'
641 logging.debug("Congress command created: {}".format(cmd))