1 ##############################################################################
2 # Copyright (c) 2017 Tim Rozet (trozet@redhat.com) and others.
4 # All rights reserved. This program and the accompanying materials
5 # are made available under the terms of the Apache License, Version 2.0
6 # which accompanies this distribution, and is available at
7 # http://www.apache.org/licenses/LICENSE-2.0
8 ##############################################################################
19 from apex.common import constants as con
20 from apex.common.exceptions import ApexDeployException
21 from apex.common import parsers
22 from apex.virtual import utils as virt_utils
23 from cryptography.hazmat.primitives import serialization as \
25 from cryptography.hazmat.primitives.asymmetric import rsa
26 from cryptography.hazmat.backends import default_backend as \
27 crypto_default_backend
32 'sfc': 'neutron-sfc-opendaylight.yaml',
33 'vpn': 'neutron-bgpvpn-opendaylight.yaml',
34 'gluon': 'gluon.yaml',
36 'odl_vpp_netvirt': 'neutron-opendaylight-netvirt-vpp.yaml',
37 'dvr': 'neutron-opendaylight-fdio-dvr.yaml',
38 'default': 'neutron-opendaylight-honeycomb.yaml'
40 'default': 'neutron-opendaylight.yaml',
43 'sfc': 'neutron-onos-sfc.yaml',
44 'default': 'neutron-onos.yaml'
46 'ovn': 'neutron-ml2-ovn.yaml',
48 'vpp': 'neutron-ml2-vpp.yaml',
49 'dataplane': ('ovs_dpdk', 'neutron-ovs-dpdk.yaml')
54 'tacker': 'enable_tacker.yaml',
55 'congress': 'enable_congress.yaml',
56 'barometer': 'enable_barometer.yaml',
57 'rt_kvm': 'enable_rt_kvm.yaml'
61 'HostCpusList': 'dpdk_cores',
62 'NeutronDpdkCoreList': 'pmd_cores',
63 'NeutronDpdkSocketMemory': 'socket_memory',
64 'NeutronDpdkMemoryChannels': 'memory_channels'
67 OVS_NSH_KMOD_RPM = "openvswitch-kmod-2.6.1-1.el7.centos.x86_64.rpm"
68 OVS_NSH_RPM = "openvswitch-2.6.1-1.el7.centos.x86_64.rpm"
69 ODL_NETVIRT_VPP_RPM = "/root/opendaylight-7.0.0-0.1.20170531snap665.el7" \
73 def build_sdn_env_list(ds, sdn_map, env_list=None):
75 Builds a list of SDN environment files to be used in the deploy cmd.
77 This function recursively searches an sdn_map. First the sdn controller is
78 matched and then the function looks for enabled features for that
79 controller to determine which environment files should be used. By
80 default the feature will be checked if set to true in deploy settings to be
81 added to the list. If a feature does not have a boolean value, then the
82 key and value pair to compare with are checked as a tuple (k,v).
84 :param ds: deploy settings
85 :param sdn_map: SDN map to recursively search
86 :param env_list: recursive var to hold previously found env_list
87 :return: A list of env files
91 for k, v in sdn_map.items():
92 if ds['sdn_controller'] == k or (k in ds and ds[k] is True):
93 if isinstance(v, dict):
94 # Append default SDN env file first
95 # The assumption is that feature-enabled SDN env files
96 # override and do not conflict with previously set default
98 if ds['sdn_controller'] == k and 'default' in v:
99 env_list.append(os.path.join(con.THT_ENV_DIR,
101 env_list.extend(build_sdn_env_list(ds, v))
103 env_list.append(os.path.join(con.THT_ENV_DIR, v))
104 # check if the value is not a boolean
105 elif isinstance(v, tuple):
107 env_list.append(os.path.join(con.THT_ENV_DIR, v[1]))
108 if len(env_list) == 0:
110 env_list.append(os.path.join(
111 con.THT_ENV_DIR, sdn_map['default']))
113 logging.warning("Unable to find default file for SDN")
118 def create_deploy_cmd(ds, ns, inv, tmp_dir,
119 virtual, env_file='opnfv-environment.yaml',
122 logging.info("Creating deployment command")
123 deploy_options = ['network-environment.yaml']
126 deploy_options.append(env_file)
127 ds_opts = ds['deploy_options']
128 deploy_options += build_sdn_env_list(ds_opts, SDN_FILE_MAP)
130 for k, v in OTHER_FILE_MAP.items():
131 if k in ds_opts and ds_opts[k]:
132 deploy_options.append(os.path.join(con.THT_ENV_DIR, v))
135 prep_storage_env(ds, tmp_dir)
136 deploy_options.append(os.path.join(con.THT_ENV_DIR,
137 'storage-environment.yaml'))
138 if ds['global_params']['ha_enabled']:
139 deploy_options.append(os.path.join(con.THT_ENV_DIR,
140 'puppet-pacemaker.yaml'))
143 deploy_options.append('virtual-environment.yaml')
145 deploy_options.append('baremetal-environment.yaml')
147 num_control, num_compute = inv.get_node_counts()
148 if num_control == 0 or num_compute == 0:
149 logging.error("Detected 0 control or compute nodes. Control nodes: "
150 "{}, compute nodes{}".format(num_control, num_compute))
151 raise ApexDeployException("Invalid number of control or computes")
152 elif num_control > 1 and not ds['global_params']['ha_enabled']:
154 cmd = "openstack overcloud deploy --templates --timeout {} " \
155 .format(con.DEPLOY_TIMEOUT)
157 for option in deploy_options:
158 cmd += " -e {}".format(option)
159 cmd += " --ntp-server {}".format(ns['ntp'][0])
160 cmd += " --control-scale {}".format(num_control)
161 cmd += " --compute-scale {}".format(num_compute)
162 cmd += ' --control-flavor control --compute-flavor compute'
164 cmd += ' --networks-file network_data.yaml'
167 with open('/sys/module/kvm_intel/parameters/nested') as f:
168 nested_kvm = f.read().strip()
169 if nested_kvm != 'Y':
170 libvirt_type = 'qemu'
171 cmd += ' --libvirt-type {}'.format(libvirt_type)
172 logging.info("Deploy command set: {}".format(cmd))
174 with open(os.path.join(tmp_dir, 'deploy_command'), 'w') as fh:
179 def prep_image(ds, img, tmp_dir, root_pw=None):
181 Locates sdn image and preps for deployment.
182 :param ds: deploy settings
183 :param img: sdn image
184 :param tmp_dir: dir to store modified sdn image
185 :param root_pw: password to configure for overcloud image
188 # TODO(trozet): Come up with a better way to organize this logic in this
190 logging.info("Preparing image: {} for deployment".format(img))
191 if not os.path.isfile(img):
192 logging.error("Missing SDN image {}".format(img))
193 raise ApexDeployException("Missing SDN image file: {}".format(img))
195 ds_opts = ds['deploy_options']
197 sdn = ds_opts['sdn_controller']
198 # we need this due to rhbz #1436021
199 # fixed in systemd-219-37.el7
201 logging.info("Neutron openvswitch-agent disabled")
204 "rm -f /etc/systemd/system/multi-user.target.wants/"
205 "neutron-openvswitch-agent.service"},
208 "rm -f /usr/lib/systemd/system/neutron-openvswitch-agent"
213 virt_cmds.append({con.VIRT_RUN_CMD: "chmod +x /etc/rc.d/rc.local"})
216 "echo 'sudo /opt/quagga/etc/init.d/zrpcd start' > "
217 "/opt/quagga/etc/init.d/zrpcd_start.sh"})
219 con.VIRT_RUN_CMD: "chmod +x /opt/quagga/etc/init.d/"
222 con.VIRT_RUN_CMD: "sed -i '$a /opt/quagga/etc/"
223 "init.d/zrpcd_start.sh' /etc/rc.local "})
225 con.VIRT_RUN_CMD: "sed -i '$a /opt/quagga/etc/"
226 "init.d/zrpcd_start.sh' /etc/rc.d/rc.local"})
227 logging.info("ZRPCD process started")
229 dataplane = ds_opts['dataplane']
230 if dataplane == 'ovs_dpdk' or dataplane == 'fdio':
231 logging.info("Enabling kernel modules for dpdk")
232 # file to module mapping
234 os.path.join(tmp_dir, 'vfio_pci.modules'): 'vfio_pci',
235 os.path.join(tmp_dir, 'uio_pci_generic.modules'): 'uio_pci_generic'
237 for mod_file, mod in uio_types.items():
238 with open(mod_file, 'w') as fh:
239 fh.write('#!/bin/bash\n')
240 fh.write('exec /sbin/modprobe {}'.format(mod))
244 {con.VIRT_UPLOAD: "{}:/etc/sysconfig/modules/".format(
246 {con.VIRT_RUN_CMD: "chmod 0755 /etc/sysconfig/modules/"
247 "{}".format(os.path.basename(mod_file))}
250 pw_op = "password:{}".format(root_pw)
251 virt_cmds.append({con.VIRT_PW: pw_op})
252 if ds_opts['sfc'] and dataplane == 'ovs':
254 {con.VIRT_RUN_CMD: "yum -y install "
255 "/root/ovs/rpm/rpmbuild/RPMS/x86_64/"
256 "{}".format(OVS_NSH_KMOD_RPM)},
257 {con.VIRT_RUN_CMD: "yum downgrade -y "
258 "/root/ovs/rpm/rpmbuild/RPMS/x86_64/"
259 "{}".format(OVS_NSH_RPM)}
261 if dataplane == 'fdio':
262 # Patch neutron with using OVS external interface for router
263 # and add generic linux NS interface driver
265 {con.VIRT_RUN_CMD: "cd /usr/lib/python2.7/site-packages && patch "
266 "-p1 < neutron-patch-NSDriver.patch"})
269 {con.VIRT_RUN_CMD: "yum remove -y vpp-lib"},
270 {con.VIRT_RUN_CMD: "yum install -y "
271 "/root/nosdn_vpp_rpms/*.rpm"}
274 if sdn == 'opendaylight':
275 if ds_opts['odl_version'] != con.DEFAULT_ODL_VERSION:
277 {con.VIRT_RUN_CMD: "yum -y remove opendaylight"},
278 {con.VIRT_RUN_CMD: "rm -rf /etc/puppet/modules/opendaylight"},
279 {con.VIRT_RUN_CMD: "cd /etc/puppet/modules && tar xzf "
280 "/root/puppet-opendaylight-"
281 "{}.tar.gz".format(ds_opts['odl_version'])}
283 if ds_opts['odl_version'] == 'master':
285 {con.VIRT_RUN_CMD: "rpm -ivh --nodeps /root/{}/*".format(
286 ds_opts['odl_version'])}
290 {con.VIRT_RUN_CMD: "yum -y install /root/{}/*".format(
291 ds_opts['odl_version'])}
294 elif sdn == 'opendaylight' and 'odl_vpp_netvirt' in ds_opts \
295 and ds_opts['odl_vpp_netvirt']:
297 {con.VIRT_RUN_CMD: "yum -y remove opendaylight"},
298 {con.VIRT_RUN_CMD: "yum -y install /root/{}/*".format(
299 ODL_NETVIRT_VPP_RPM)}
304 {con.VIRT_RUN_CMD: "cd /root/ovs28 && yum update -y "
306 {con.VIRT_RUN_CMD: "cd /root/ovs28 && yum downgrade -y "
310 tmp_oc_image = os.path.join(tmp_dir, 'overcloud-full.qcow2')
311 shutil.copyfile(img, tmp_oc_image)
312 logging.debug("Temporary overcloud image stored as: {}".format(
314 virt_utils.virt_customize(virt_cmds, tmp_oc_image)
315 logging.info("Overcloud image customization complete")
320 Creates public and private ssh keys with 1024 bit RSA encryption
321 :return: private, public key
323 key = rsa.generate_private_key(
324 backend=crypto_default_backend(),
325 public_exponent=65537,
329 private_key = key.private_bytes(
330 crypto_serialization.Encoding.PEM,
331 crypto_serialization.PrivateFormat.PKCS8,
332 crypto_serialization.NoEncryption())
333 public_key = key.public_key().public_bytes(
334 crypto_serialization.Encoding.OpenSSH,
335 crypto_serialization.PublicFormat.OpenSSH
337 return private_key.decode('utf-8'), public_key.decode('utf-8')
340 def prep_env(ds, ns, inv, opnfv_env, net_env, tmp_dir):
342 Creates modified opnfv/network environments for deployment
343 :param ds: deploy settings
344 :param ns: network settings
345 :param inv: node inventory
346 :param opnfv_env: file path for opnfv-environment file
347 :param net_env: file path for network-environment file
348 :param tmp_dir: Apex tmp dir
352 logging.info("Preparing opnfv-environment and network-environment files")
353 ds_opts = ds['deploy_options']
354 tmp_opnfv_env = os.path.join(tmp_dir, os.path.basename(opnfv_env))
355 shutil.copyfile(opnfv_env, tmp_opnfv_env)
356 tenant_nic_map = ns['networks']['tenant']['nic_mapping']
358 tenant_nic['Controller'] = tenant_nic_map['controller']['members'][0]
359 tenant_nic['NovaCompute'] = tenant_nic_map['compute']['members'][0]
360 external_nic_map = ns['networks']['external'][0]['nic_mapping']
361 external_nic = dict()
362 external_nic['NovaCompute'] = external_nic_map['compute']['members'][0]
365 private_key, public_key = make_ssh_key()
367 # Make easier/faster variables to index in the file editor
368 if 'performance' in ds_opts:
371 if 'vpp' in ds_opts['performance']['Compute']:
372 perf_vpp_comp = ds_opts['performance']['Compute']['vpp']
375 if 'vpp' in ds_opts['performance']['Controller']:
376 perf_vpp_ctrl = ds_opts['performance']['Controller']['vpp']
381 if 'ovs' in ds_opts['performance']['Compute']:
382 perf_ovs_comp = ds_opts['performance']['Compute']['ovs']
387 if 'kernel' in ds_opts['performance']['Compute']:
388 perf_kern_comp = ds_opts['performance']['Compute']['kernel']
390 perf_kern_comp = None
394 # Modify OPNFV environment
395 # TODO: Change to build a dict and outputting yaml rather than parsing
396 for line in fileinput.input(tmp_opnfv_env, inplace=True):
397 line = line.strip('\n')
399 if 'CloudDomain' in line:
400 output_line = " CloudDomain: {}".format(ns['domain_name'])
401 elif 'replace_private_key' in line:
402 output_line = " private_key: |\n"
404 for line in private_key.splitlines():
405 key_out += " {}\n".format(line)
406 output_line += key_out
407 elif 'replace_public_key' in line:
408 output_line = " public_key: '{}'".format(public_key)
409 elif ((perf and perf_kern_comp) or ds_opts.get('rt_kvm')) and \
410 'resource_registry' in line:
411 output_line = "resource_registry:\n" \
412 " OS::TripleO::NodeUserData: first-boot.yaml"
413 elif 'ComputeExtraConfigPre' in line and \
414 ds_opts['dataplane'] == 'ovs_dpdk':
415 output_line = ' OS::TripleO::ComputeExtraConfigPre: ' \
416 './ovs-dpdk-preconfig.yaml'
418 if ds_opts['sdn_controller'] == 'opendaylight' and \
419 'odl_vpp_routing_node' in ds_opts:
420 if 'opendaylight::vpp_routing_node' in line:
421 output_line = (" opendaylight::vpp_routing_node: {}.{}"
422 .format(ds_opts['odl_vpp_routing_node'],
424 elif not ds_opts['sdn_controller'] and ds_opts['dataplane'] == 'fdio':
425 if 'NeutronVPPAgentPhysnets' in line:
426 output_line = (" NeutronVPPAgentPhysnets: 'datacentre:{}'".
427 format(tenant_nic['Controller']))
428 elif ds_opts['sdn_controller'] == 'opendaylight' and ds_opts.get(
430 if 'OS::TripleO::Services::NeutronDhcpAgent' in line:
432 elif 'NeutronDhcpAgentsPerNetwork' in line:
433 num_control, num_compute = inv.get_node_counts()
434 output_line = (" NeutronDhcpAgentsPerNetwork: {}"
435 .format(num_compute))
436 elif 'ComputeServices' in line:
437 output_line = (" ComputeServices:\n"
438 " - OS::TripleO::Services::NeutronDhcpAgent")
441 for role in 'NovaCompute', 'Controller':
442 if role == 'NovaCompute':
443 perf_opts = perf_vpp_comp
445 perf_opts = perf_vpp_ctrl
446 cfg = "{}ExtraConfig".format(role)
447 if cfg in line and perf_opts:
449 if 'main-core' in perf_opts:
450 perf_line += ("\n fdio::vpp_cpu_main_core: '{}'"
451 .format(perf_opts['main-core']))
452 if 'corelist-workers' in perf_opts:
454 "fdio::vpp_cpu_corelist_workers: '{}'"
455 .format(perf_opts['corelist-workers']))
456 if ds_opts['sdn_controller'] == 'opendaylight' and \
457 ds_opts['dataplane'] == 'fdio':
458 if role == 'NovaCompute':
460 "tripleo::profile::base::neutron::"
461 "agents::honeycomb::"
462 "interface_role_mapping:"
463 " ['{}:tenant-interface',"
464 "'{}:public-interface']"
465 .format(tenant_nic[role],
469 "tripleo::profile::base::neutron::"
470 "agents::honeycomb::"
471 "interface_role_mapping:"
472 " ['{}:tenant-interface']"
473 .format(tenant_nic[role]))
475 output_line = (" {}:{}".format(cfg, perf_line))
477 if ds_opts['dataplane'] == 'ovs_dpdk' and perf_ovs_comp:
478 for k, v in OVS_PERF_MAP.items():
479 if k in line and v in perf_ovs_comp:
480 output_line = " {}: '{}'".format(k, perf_ovs_comp[v])
483 # (FIXME) use compute's kernel settings for all nodes for now.
485 if 'NovaSchedulerDefaultFilters' in line:
487 " NovaSchedulerDefaultFilters: 'RamFilter," \
488 "ComputeFilter,AvailabilityZoneFilter," \
489 "ComputeCapabilitiesFilter," \
490 "ImagePropertiesFilter,NUMATopologyFilter'"
491 elif 'ComputeKernelArgs' in line:
493 for k, v in perf_kern_comp.items():
494 kernel_args += "{}={} ".format(k, v)
496 output_line = " ComputeKernelArgs: '{}'".\
501 logging.info("opnfv-environment file written to {}".format(tmp_opnfv_env))
504 def generate_ceph_key():
506 header = struct.pack('<hiih', 1, int(time.time()), 0, len(key))
507 return base64.b64encode(header + key)
510 def prep_storage_env(ds, tmp_dir):
512 Creates storage environment file for deployment. Source file is copied by
513 undercloud playbook to host.
518 ds_opts = ds['deploy_options']
519 storage_file = os.path.join(tmp_dir, 'storage-environment.yaml')
520 if not os.path.isfile(storage_file):
521 logging.error("storage-environment file is not in tmp directory: {}. "
522 "Check if file was copied from "
523 "undercloud".format(tmp_dir))
524 raise ApexDeployException("storage-environment file not copied from "
526 for line in fileinput.input(storage_file, inplace=True):
527 line = line.strip('\n')
528 if 'CephClusterFSID' in line:
529 print(" CephClusterFSID: {}".format(str(uuid.uuid4())))
530 elif 'CephMonKey' in line:
531 print(" CephMonKey: {}".format(generate_ceph_key().decode(
533 elif 'CephAdminKey' in line:
534 print(" CephAdminKey: {}".format(generate_ceph_key().decode(
538 if 'ceph_device' in ds_opts and ds_opts['ceph_device']:
539 with open(storage_file, 'a') as fh:
540 fh.write(' ExtraConfig:\n')
541 fh.write(" ceph::profile::params::osds:{{{}:{{}}}}\n".format(
542 ds_opts['ceph_device']
546 def external_network_cmds(ns):
548 Generates external network openstack commands
549 :param ns: network settings
550 :return: list of commands to configure external network
552 if 'external' in ns.enabled_network_list:
553 net_config = ns['networks']['external'][0]
555 pool_start, pool_end = net_config['floating_ip_range']
557 net_config = ns['networks']['admin']
559 pool_start, pool_end = ns['apex']['networks']['admin'][
560 'introspection_range']
561 nic_config = net_config['nic_mapping']
562 gateway = net_config['gateway']
564 # create network command
565 if nic_config['compute']['vlan'] == 'native':
568 ext_type = "vlan --provider-segment {}".format(nic_config[
570 cmds.append("openstack network create external --project service "
571 "--external --provider-network-type {} "
572 "--provider-physical-network datacentre".format(ext_type))
573 # create subnet command
574 cidr = net_config['cidr']
575 subnet_cmd = "openstack subnet create external-subnet --project " \
576 "service --network external --no-dhcp --gateway {} " \
577 "--allocation-pool start={},end={} --subnet-range " \
578 "{}".format(gateway, pool_start, pool_end, str(cidr))
579 if external and cidr.version == 6:
580 subnet_cmd += ' --ip-version 6 --ipv6-ra-mode slaac ' \
581 '--ipv6-address-mode slaac'
582 cmds.append(subnet_cmd)
583 logging.debug("Neutron external network commands determined "
584 "as: {}".format(cmds))
588 def create_congress_cmds(overcloud_file):
589 drivers = ['nova', 'neutronv2', 'cinder', 'glancev2', 'keystone', 'doctor']
590 overcloudrc = parsers.parse_overcloudrc(overcloud_file)
591 logging.info("Creating congress commands")
594 "username={}".format(overcloudrc['OS_USERNAME']),
595 "tenant_name={}".format(overcloudrc['OS_PROJECT_NAME']),
596 "password={}".format(overcloudrc['OS_PASSWORD']),
597 "auth_url={}".format(overcloudrc['OS_AUTH_URL'])
600 logging.error("Unable to find all keys required for congress in "
601 "overcloudrc: OS_USERNAME, OS_PROJECT_NAME, "
602 "OS_PASSWORD, OS_AUTH_URL. Please check overcloudrc "
603 "file: {}".format(overcloud_file))
606 ds_cfg = '--config ' + ' --config '.join(ds_cfg)
608 for driver in drivers:
609 if driver == 'doctor':
610 cmd = "{} \"{}\"".format(driver, driver)
612 cmd = "{} \"{}\" {}".format(driver, driver, ds_cfg)
614 cmd += ' --config api_version="2.34"'
615 logging.debug("Congress command created: {}".format(cmd))