1 ##############################################################################
2 # Copyright (c) 2017 Tim Rozet (trozet@redhat.com) and others.
4 # All rights reserved. This program and the accompanying materials
5 # are made available under the terms of the Apache License, Version 2.0
6 # which accompanies this distribution, and is available at
7 # http://www.apache.org/licenses/LICENSE-2.0
8 ##############################################################################
20 from apex.common import constants as con
21 from apex.common.exceptions import ApexDeployException
22 from apex.common import parsers
23 from apex.virtual import utils as virt_utils
24 from cryptography.hazmat.primitives import serialization as \
26 from cryptography.hazmat.primitives.asymmetric import rsa
27 from cryptography.hazmat.backends import default_backend as \
28 crypto_default_backend
33 'sfc': 'neutron-sfc-opendaylight.yaml',
34 'vpn': 'neutron-bgpvpn-opendaylight.yaml',
35 'gluon': 'gluon.yaml',
37 'odl_vpp_netvirt': 'neutron-opendaylight-netvirt-vpp.yaml',
38 'dvr': 'neutron-opendaylight-fdio-dvr.yaml',
39 'default': 'neutron-opendaylight-honeycomb.yaml'
41 'l2gw': 'neutron-l2gw-opendaylight.yaml',
42 'default': 'neutron-opendaylight.yaml',
45 'sfc': 'neutron-onos-sfc.yaml',
46 'default': 'neutron-onos.yaml'
48 'ovn': 'neutron-ml2-ovn.yaml',
50 'vpp': 'neutron-ml2-vpp.yaml',
51 'dataplane': ('ovs_dpdk', 'neutron-ovs-dpdk.yaml')
56 'tacker': 'enable_tacker.yaml',
57 'congress': 'enable_congress.yaml',
58 'barometer': 'enable_barometer.yaml',
59 'rt_kvm': 'enable_rt_kvm.yaml'
63 'HostCpusList': 'dpdk_cores',
64 'NeutronDpdkCoreList': 'pmd_cores',
65 'NeutronDpdkSocketMemory': 'socket_memory',
66 'NeutronDpdkMemoryChannels': 'memory_channels'
69 OVS_NSH_KMOD_RPM = "openvswitch-kmod-2.6.1-1.el7.centos.x86_64.rpm"
70 OVS_NSH_RPM = "openvswitch-2.6.1-1.el7.centos.x86_64.rpm"
71 ODL_NETVIRT_VPP_RPM = "/root/opendaylight-7.0.0-0.1.20170531snap665.el7" \
75 def build_sdn_env_list(ds, sdn_map, env_list=None):
77 Builds a list of SDN environment files to be used in the deploy cmd.
79 This function recursively searches an sdn_map. First the sdn controller is
80 matched and then the function looks for enabled features for that
81 controller to determine which environment files should be used. By
82 default the feature will be checked if set to true in deploy settings to be
83 added to the list. If a feature does not have a boolean value, then the
84 key and value pair to compare with are checked as a tuple (k,v).
86 :param ds: deploy settings
87 :param sdn_map: SDN map to recursively search
88 :param env_list: recursive var to hold previously found env_list
89 :return: A list of env files
93 for k, v in sdn_map.items():
94 if ds['sdn_controller'] == k or (k in ds and ds[k] is True):
95 if isinstance(v, dict):
96 # Append default SDN env file first
97 # The assumption is that feature-enabled SDN env files
98 # override and do not conflict with previously set default
100 if ds['sdn_controller'] == k and 'default' in v:
101 env_list.append(os.path.join(con.THT_ENV_DIR,
103 env_list.extend(build_sdn_env_list(ds, v))
105 env_list.append(os.path.join(con.THT_ENV_DIR, v))
106 # check if the value is not a boolean
107 elif isinstance(v, tuple):
109 env_list.append(os.path.join(con.THT_ENV_DIR, v[1]))
110 if len(env_list) == 0:
112 env_list.append(os.path.join(
113 con.THT_ENV_DIR, sdn_map['default']))
115 logging.warning("Unable to find default file for SDN")
120 def create_deploy_cmd(ds, ns, inv, tmp_dir,
121 virtual, env_file='opnfv-environment.yaml',
124 logging.info("Creating deployment command")
125 deploy_options = ['network-environment.yaml']
128 deploy_options.append(env_file)
129 ds_opts = ds['deploy_options']
130 deploy_options += build_sdn_env_list(ds_opts, SDN_FILE_MAP)
132 for k, v in OTHER_FILE_MAP.items():
133 if k in ds_opts and ds_opts[k]:
134 deploy_options.append(os.path.join(con.THT_ENV_DIR, v))
137 prep_storage_env(ds, tmp_dir)
138 deploy_options.append(os.path.join(con.THT_ENV_DIR,
139 'storage-environment.yaml'))
140 if ds['global_params']['ha_enabled']:
141 deploy_options.append(os.path.join(con.THT_ENV_DIR,
142 'puppet-pacemaker.yaml'))
145 deploy_options.append('virtual-environment.yaml')
147 deploy_options.append('baremetal-environment.yaml')
149 num_control, num_compute = inv.get_node_counts()
150 if num_control == 0 or num_compute == 0:
151 logging.error("Detected 0 control or compute nodes. Control nodes: "
152 "{}, compute nodes{}".format(num_control, num_compute))
153 raise ApexDeployException("Invalid number of control or computes")
154 elif num_control > 1 and not ds['global_params']['ha_enabled']:
156 if platform.machine() == 'aarch64':
157 # aarch64 deploys were not completing in the default 90 mins.
158 # Not sure if this is related to the hardware the OOO support
159 # was developed on or the virtualization support in CentOS
160 # Either way it will probably get better over time as the aarch
161 # support matures in CentOS and deploy time should be tested in
162 # the future so this multiplier can be removed.
163 con.DEPLOY_TIMEOUT *= 2
164 cmd = "openstack overcloud deploy --templates --timeout {} " \
165 .format(con.DEPLOY_TIMEOUT)
167 for option in deploy_options:
168 cmd += " -e {}".format(option)
169 cmd += " --ntp-server {}".format(ns['ntp'][0])
170 cmd += " --control-scale {}".format(num_control)
171 cmd += " --compute-scale {}".format(num_compute)
172 cmd += ' --control-flavor control --compute-flavor compute'
174 cmd += ' --networks-file network_data.yaml'
177 with open('/sys/module/kvm_intel/parameters/nested') as f:
178 nested_kvm = f.read().strip()
179 if nested_kvm != 'Y':
180 libvirt_type = 'qemu'
181 cmd += ' --libvirt-type {}'.format(libvirt_type)
182 logging.info("Deploy command set: {}".format(cmd))
184 with open(os.path.join(tmp_dir, 'deploy_command'), 'w') as fh:
189 def prep_image(ds, img, tmp_dir, root_pw=None):
191 Locates sdn image and preps for deployment.
192 :param ds: deploy settings
193 :param img: sdn image
194 :param tmp_dir: dir to store modified sdn image
195 :param root_pw: password to configure for overcloud image
198 # TODO(trozet): Come up with a better way to organize this logic in this
200 logging.info("Preparing image: {} for deployment".format(img))
201 if not os.path.isfile(img):
202 logging.error("Missing SDN image {}".format(img))
203 raise ApexDeployException("Missing SDN image file: {}".format(img))
205 ds_opts = ds['deploy_options']
207 sdn = ds_opts['sdn_controller']
208 # we need this due to rhbz #1436021
209 # fixed in systemd-219-37.el7
211 logging.info("Neutron openvswitch-agent disabled")
214 "rm -f /etc/systemd/system/multi-user.target.wants/"
215 "neutron-openvswitch-agent.service"},
218 "rm -f /usr/lib/systemd/system/neutron-openvswitch-agent"
223 virt_cmds.append({con.VIRT_RUN_CMD: "chmod +x /etc/rc.d/rc.local"})
226 "echo 'sudo /opt/quagga/etc/init.d/zrpcd start' > "
227 "/opt/quagga/etc/init.d/zrpcd_start.sh"})
229 con.VIRT_RUN_CMD: "chmod +x /opt/quagga/etc/init.d/"
232 con.VIRT_RUN_CMD: "sed -i '$a /opt/quagga/etc/"
233 "init.d/zrpcd_start.sh' /etc/rc.local "})
235 con.VIRT_RUN_CMD: "sed -i '$a /opt/quagga/etc/"
236 "init.d/zrpcd_start.sh' /etc/rc.d/rc.local"})
237 logging.info("ZRPCD process started")
239 dataplane = ds_opts['dataplane']
240 if dataplane == 'ovs_dpdk' or dataplane == 'fdio':
241 logging.info("Enabling kernel modules for dpdk")
242 # file to module mapping
244 os.path.join(tmp_dir, 'vfio_pci.modules'): 'vfio_pci',
245 os.path.join(tmp_dir, 'uio_pci_generic.modules'): 'uio_pci_generic'
247 for mod_file, mod in uio_types.items():
248 with open(mod_file, 'w') as fh:
249 fh.write('#!/bin/bash\n')
250 fh.write('exec /sbin/modprobe {}'.format(mod))
254 {con.VIRT_UPLOAD: "{}:/etc/sysconfig/modules/".format(
256 {con.VIRT_RUN_CMD: "chmod 0755 /etc/sysconfig/modules/"
257 "{}".format(os.path.basename(mod_file))}
260 pw_op = "password:{}".format(root_pw)
261 virt_cmds.append({con.VIRT_PW: pw_op})
262 if ds_opts['sfc'] and dataplane == 'ovs':
264 {con.VIRT_RUN_CMD: "yum -y install "
265 "/root/ovs/rpm/rpmbuild/RPMS/x86_64/"
266 "{}".format(OVS_NSH_KMOD_RPM)},
267 {con.VIRT_RUN_CMD: "yum downgrade -y "
268 "/root/ovs/rpm/rpmbuild/RPMS/x86_64/"
269 "{}".format(OVS_NSH_RPM)}
271 if dataplane == 'fdio':
272 # Patch neutron with using OVS external interface for router
273 # and add generic linux NS interface driver
275 {con.VIRT_RUN_CMD: "cd /usr/lib/python2.7/site-packages && patch "
276 "-p1 < neutron-patch-NSDriver.patch"})
279 {con.VIRT_RUN_CMD: "yum remove -y vpp-lib"},
280 {con.VIRT_RUN_CMD: "yum install -y "
281 "/root/nosdn_vpp_rpms/*.rpm"}
284 if sdn == 'opendaylight':
285 if ds_opts['odl_version'] != con.DEFAULT_ODL_VERSION:
287 {con.VIRT_RUN_CMD: "yum -y remove opendaylight"},
288 {con.VIRT_RUN_CMD: "rm -rf /etc/puppet/modules/opendaylight"},
289 {con.VIRT_RUN_CMD: "cd /etc/puppet/modules && tar xzf "
290 "/root/puppet-opendaylight-"
291 "{}.tar.gz".format(ds_opts['odl_version'])}
293 if ds_opts['odl_version'] == 'master':
295 {con.VIRT_RUN_CMD: "rpm -ivh --nodeps /root/{}/*".format(
296 ds_opts['odl_version'])}
300 {con.VIRT_RUN_CMD: "yum -y install /root/{}/*".format(
301 ds_opts['odl_version'])}
304 elif sdn == 'opendaylight' and 'odl_vpp_netvirt' in ds_opts \
305 and ds_opts['odl_vpp_netvirt']:
307 {con.VIRT_RUN_CMD: "yum -y remove opendaylight"},
308 {con.VIRT_RUN_CMD: "yum -y install /root/{}/*".format(
309 ODL_NETVIRT_VPP_RPM)}
314 {con.VIRT_RUN_CMD: "cd /root/ovs28 && yum update -y "
316 {con.VIRT_RUN_CMD: "cd /root/ovs28 && yum downgrade -y "
320 tmp_oc_image = os.path.join(tmp_dir, 'overcloud-full.qcow2')
321 shutil.copyfile(img, tmp_oc_image)
322 logging.debug("Temporary overcloud image stored as: {}".format(
324 virt_utils.virt_customize(virt_cmds, tmp_oc_image)
325 logging.info("Overcloud image customization complete")
330 Creates public and private ssh keys with 1024 bit RSA encryption
331 :return: private, public key
333 key = rsa.generate_private_key(
334 backend=crypto_default_backend(),
335 public_exponent=65537,
339 private_key = key.private_bytes(
340 crypto_serialization.Encoding.PEM,
341 crypto_serialization.PrivateFormat.PKCS8,
342 crypto_serialization.NoEncryption())
343 public_key = key.public_key().public_bytes(
344 crypto_serialization.Encoding.OpenSSH,
345 crypto_serialization.PublicFormat.OpenSSH
347 return private_key.decode('utf-8'), public_key.decode('utf-8')
350 def prep_env(ds, ns, inv, opnfv_env, net_env, tmp_dir):
352 Creates modified opnfv/network environments for deployment
353 :param ds: deploy settings
354 :param ns: network settings
355 :param inv: node inventory
356 :param opnfv_env: file path for opnfv-environment file
357 :param net_env: file path for network-environment file
358 :param tmp_dir: Apex tmp dir
362 logging.info("Preparing opnfv-environment and network-environment files")
363 ds_opts = ds['deploy_options']
364 tmp_opnfv_env = os.path.join(tmp_dir, os.path.basename(opnfv_env))
365 shutil.copyfile(opnfv_env, tmp_opnfv_env)
366 tenant_nic_map = ns['networks']['tenant']['nic_mapping']
368 tenant_nic['Controller'] = tenant_nic_map['controller']['members'][0]
369 tenant_nic['NovaCompute'] = tenant_nic_map['compute']['members'][0]
370 external_nic_map = ns['networks']['external'][0]['nic_mapping']
371 external_nic = dict()
372 external_nic['NovaCompute'] = external_nic_map['compute']['members'][0]
375 private_key, public_key = make_ssh_key()
377 # Make easier/faster variables to index in the file editor
378 if 'performance' in ds_opts:
381 if 'vpp' in ds_opts['performance']['Compute']:
382 perf_vpp_comp = ds_opts['performance']['Compute']['vpp']
385 if 'vpp' in ds_opts['performance']['Controller']:
386 perf_vpp_ctrl = ds_opts['performance']['Controller']['vpp']
391 if 'ovs' in ds_opts['performance']['Compute']:
392 perf_ovs_comp = ds_opts['performance']['Compute']['ovs']
397 if 'kernel' in ds_opts['performance']['Compute']:
398 perf_kern_comp = ds_opts['performance']['Compute']['kernel']
400 perf_kern_comp = None
404 # Modify OPNFV environment
405 # TODO: Change to build a dict and outputting yaml rather than parsing
406 for line in fileinput.input(tmp_opnfv_env, inplace=True):
407 line = line.strip('\n')
409 if 'CloudDomain' in line:
410 output_line = " CloudDomain: {}".format(ns['domain_name'])
411 elif 'replace_private_key' in line:
412 output_line = " private_key: |\n"
414 for line in private_key.splitlines():
415 key_out += " {}\n".format(line)
416 output_line += key_out
417 elif 'replace_public_key' in line:
418 output_line = " public_key: '{}'".format(public_key)
419 elif ((perf and perf_kern_comp) or ds_opts.get('rt_kvm')) and \
420 'resource_registry' in line:
421 output_line = "resource_registry:\n" \
422 " OS::TripleO::NodeUserData: first-boot.yaml"
423 elif 'ComputeExtraConfigPre' in line and \
424 ds_opts['dataplane'] == 'ovs_dpdk':
425 output_line = ' OS::TripleO::ComputeExtraConfigPre: ' \
426 './ovs-dpdk-preconfig.yaml'
428 if ds_opts['sdn_controller'] == 'opendaylight' and \
429 'odl_vpp_routing_node' in ds_opts:
430 if 'opendaylight::vpp_routing_node' in line:
431 output_line = (" opendaylight::vpp_routing_node: {}.{}"
432 .format(ds_opts['odl_vpp_routing_node'],
434 elif not ds_opts['sdn_controller'] and ds_opts['dataplane'] == 'fdio':
435 if 'NeutronVPPAgentPhysnets' in line:
436 output_line = (" NeutronVPPAgentPhysnets: 'datacentre:{}'".
437 format(tenant_nic['Controller']))
438 elif ds_opts['sdn_controller'] == 'opendaylight' and ds_opts.get(
440 if 'OS::TripleO::Services::NeutronDhcpAgent' in line:
442 elif 'NeutronDhcpAgentsPerNetwork' in line:
443 num_control, num_compute = inv.get_node_counts()
444 output_line = (" NeutronDhcpAgentsPerNetwork: {}"
445 .format(num_compute))
446 elif 'ComputeServices' in line:
447 output_line = (" ComputeServices:\n"
448 " - OS::TripleO::Services::NeutronDhcpAgent")
451 for role in 'NovaCompute', 'Controller':
452 if role == 'NovaCompute':
453 perf_opts = perf_vpp_comp
455 perf_opts = perf_vpp_ctrl
456 cfg = "{}ExtraConfig".format(role)
457 if cfg in line and perf_opts:
459 if 'main-core' in perf_opts:
460 perf_line += ("\n fdio::vpp_cpu_main_core: '{}'"
461 .format(perf_opts['main-core']))
462 if 'corelist-workers' in perf_opts:
464 "fdio::vpp_cpu_corelist_workers: '{}'"
465 .format(perf_opts['corelist-workers']))
466 if ds_opts['sdn_controller'] == 'opendaylight' and \
467 ds_opts['dataplane'] == 'fdio':
468 if role == 'NovaCompute':
470 "tripleo::profile::base::neutron::"
471 "agents::honeycomb::"
472 "interface_role_mapping:"
473 " ['{}:tenant-interface',"
474 "'{}:public-interface']"
475 .format(tenant_nic[role],
479 "tripleo::profile::base::neutron::"
480 "agents::honeycomb::"
481 "interface_role_mapping:"
482 " ['{}:tenant-interface']"
483 .format(tenant_nic[role]))
485 output_line = (" {}:{}".format(cfg, perf_line))
487 if ds_opts['dataplane'] == 'ovs_dpdk' and perf_ovs_comp:
488 for k, v in OVS_PERF_MAP.items():
489 if k in line and v in perf_ovs_comp:
490 output_line = " {}: '{}'".format(k, perf_ovs_comp[v])
493 # (FIXME) use compute's kernel settings for all nodes for now.
495 if 'NovaSchedulerDefaultFilters' in line:
497 " NovaSchedulerDefaultFilters: 'RamFilter," \
498 "ComputeFilter,AvailabilityZoneFilter," \
499 "ComputeCapabilitiesFilter," \
500 "ImagePropertiesFilter,NUMATopologyFilter'"
501 elif 'ComputeKernelArgs' in line:
503 for k, v in perf_kern_comp.items():
504 kernel_args += "{}={} ".format(k, v)
506 output_line = " ComputeKernelArgs: '{}'".\
511 logging.info("opnfv-environment file written to {}".format(tmp_opnfv_env))
514 def generate_ceph_key():
516 header = struct.pack('<hiih', 1, int(time.time()), 0, len(key))
517 return base64.b64encode(header + key)
520 def prep_storage_env(ds, tmp_dir):
522 Creates storage environment file for deployment. Source file is copied by
523 undercloud playbook to host.
528 ds_opts = ds['deploy_options']
529 storage_file = os.path.join(tmp_dir, 'storage-environment.yaml')
530 if not os.path.isfile(storage_file):
531 logging.error("storage-environment file is not in tmp directory: {}. "
532 "Check if file was copied from "
533 "undercloud".format(tmp_dir))
534 raise ApexDeployException("storage-environment file not copied from "
536 for line in fileinput.input(storage_file, inplace=True):
537 line = line.strip('\n')
538 if 'CephClusterFSID' in line:
539 print(" CephClusterFSID: {}".format(str(uuid.uuid4())))
540 elif 'CephMonKey' in line:
541 print(" CephMonKey: {}".format(generate_ceph_key().decode(
543 elif 'CephAdminKey' in line:
544 print(" CephAdminKey: {}".format(generate_ceph_key().decode(
546 elif 'CephClientKey' in line:
547 print(" CephClientKey: {}".format(generate_ceph_key().decode(
551 if 'ceph_device' in ds_opts and ds_opts['ceph_device']:
552 with open(storage_file, 'a') as fh:
553 fh.write(' ExtraConfig:\n')
554 fh.write(" ceph::profile::params::osds:{{{}:{{}}}}\n".format(
555 ds_opts['ceph_device']
559 def external_network_cmds(ns):
561 Generates external network openstack commands
562 :param ns: network settings
563 :return: list of commands to configure external network
565 if 'external' in ns.enabled_network_list:
566 net_config = ns['networks']['external'][0]
568 pool_start, pool_end = net_config['floating_ip_range']
570 net_config = ns['networks']['admin']
572 pool_start, pool_end = ns['apex']['networks']['admin'][
573 'introspection_range']
574 nic_config = net_config['nic_mapping']
575 gateway = net_config['gateway']
577 # create network command
578 if nic_config['compute']['vlan'] == 'native':
581 ext_type = "vlan --provider-segment {}".format(nic_config[
583 cmds.append("openstack network create external --project service "
584 "--external --provider-network-type {} "
585 "--provider-physical-network datacentre".format(ext_type))
586 # create subnet command
587 cidr = net_config['cidr']
588 subnet_cmd = "openstack subnet create external-subnet --project " \
589 "service --network external --no-dhcp --gateway {} " \
590 "--allocation-pool start={},end={} --subnet-range " \
591 "{}".format(gateway, pool_start, pool_end, str(cidr))
592 if external and cidr.version == 6:
593 subnet_cmd += ' --ip-version 6 --ipv6-ra-mode slaac ' \
594 '--ipv6-address-mode slaac'
595 cmds.append(subnet_cmd)
596 logging.debug("Neutron external network commands determined "
597 "as: {}".format(cmds))
601 def create_congress_cmds(overcloud_file):
602 drivers = ['nova', 'neutronv2', 'cinder', 'glancev2', 'keystone', 'doctor']
603 overcloudrc = parsers.parse_overcloudrc(overcloud_file)
604 logging.info("Creating congress commands")
607 "username={}".format(overcloudrc['OS_USERNAME']),
608 "tenant_name={}".format(overcloudrc['OS_PROJECT_NAME']),
609 "password={}".format(overcloudrc['OS_PASSWORD']),
610 "auth_url={}".format(overcloudrc['OS_AUTH_URL'])
613 logging.error("Unable to find all keys required for congress in "
614 "overcloudrc: OS_USERNAME, OS_PROJECT_NAME, "
615 "OS_PASSWORD, OS_AUTH_URL. Please check overcloudrc "
616 "file: {}".format(overcloud_file))
619 ds_cfg = '--config ' + ' --config '.join(ds_cfg)
621 for driver in drivers:
622 if driver == 'doctor':
623 cmd = "{} \"{}\"".format(driver, driver)
625 cmd = "{} \"{}\" {}".format(driver, driver, ds_cfg)
627 cmd += ' --config api_version="2.34"'
628 logging.debug("Congress command created: {}".format(cmd))