1 ##############################################################################
2 # Copyright (c) 2017 Tim Rozet (trozet@redhat.com) and others.
4 # All rights reserved. This program and the accompanying materials
5 # are made available under the terms of the Apache License, Version 2.0
6 # which accompanies this distribution, and is available at
7 # http://www.apache.org/licenses/LICENSE-2.0
8 ##############################################################################
19 from apex.common import constants as con
20 from apex.common.exceptions import ApexDeployException
21 from apex.common import parsers
22 from apex.virtual import utils as virt_utils
23 from cryptography.hazmat.primitives import serialization as \
25 from cryptography.hazmat.primitives.asymmetric import rsa
26 from cryptography.hazmat.backends import default_backend as \
27 crypto_default_backend
32 'sfc': 'neutron-sfc-opendaylight.yaml',
33 'vpn': 'neutron-bgpvpn-opendaylight.yaml',
34 'gluon': 'gluon.yaml',
36 'odl_vpp_netvirt': 'neutron-opendaylight-netvirt-vpp.yaml',
37 'dvr': 'neutron-opendaylight-fdio-dvr.yaml',
38 'default': 'neutron-opendaylight-honeycomb.yaml'
40 'default': 'neutron-opendaylight.yaml',
43 'sfc': 'neutron-onos-sfc.yaml',
44 'default': 'neutron-onos.yaml'
46 'ovn': 'neutron-ml2-ovn.yaml',
48 'vpp': 'neutron-ml2-vpp.yaml',
49 'dataplane': ('ovs_dpdk', 'neutron-ovs-dpdk.yaml')
54 'tacker': 'enable_tacker.yaml',
55 'congress': 'enable_congress.yaml',
56 'barometer': 'enable_barometer.yaml',
57 'rt_kvm': 'enable_rt_kvm.yaml'
61 'HostCpusList': 'dpdk_cores',
62 'NeutronDpdkCoreList': 'pmd_cores',
63 'NeutronDpdkSocketMemory': 'socket_memory',
64 'NeutronDpdkMemoryChannels': 'memory_channels'
67 OVS_NSH_KMOD_RPM = "openvswitch-kmod-2.6.1-1.el7.centos.x86_64.rpm"
68 OVS_NSH_RPM = "openvswitch-2.6.1-1.el7.centos.x86_64.rpm"
69 ODL_NETVIRT_VPP_RPM = "/root/opendaylight-7.0.0-0.1.20170531snap665.el7" \
73 def build_sdn_env_list(ds, sdn_map, env_list=None):
76 for k, v in sdn_map.items():
77 if ds['sdn_controller'] == k or (k in ds and ds[k] is True):
78 if isinstance(v, dict):
79 env_list.extend(build_sdn_env_list(ds, v))
81 env_list.append(os.path.join(con.THT_ENV_DIR, v))
82 elif isinstance(v, tuple):
84 env_list.append(os.path.join(con.THT_ENV_DIR, v[1]))
85 if len(env_list) == 0:
87 env_list.append(os.path.join(
88 con.THT_ENV_DIR, sdn_map['default']))
90 logging.warning("Unable to find default file for SDN")
95 def create_deploy_cmd(ds, ns, inv, tmp_dir,
96 virtual, env_file='opnfv-environment.yaml'):
98 logging.info("Creating deployment command")
99 deploy_options = [env_file, 'network-environment.yaml']
100 ds_opts = ds['deploy_options']
101 deploy_options += build_sdn_env_list(ds_opts, SDN_FILE_MAP)
103 for k, v in OTHER_FILE_MAP.items():
104 if k in ds_opts and ds_opts[k]:
105 deploy_options.append(os.path.join(con.THT_ENV_DIR, v))
108 prep_storage_env(ds, tmp_dir)
109 deploy_options.append(os.path.join(con.THT_ENV_DIR,
110 'storage-environment.yaml'))
111 if ds['global_params']['ha_enabled']:
112 deploy_options.append(os.path.join(con.THT_ENV_DIR,
113 'puppet-pacemaker.yaml'))
116 deploy_options.append('virtual-environment.yaml')
118 deploy_options.append('baremetal-environment.yaml')
120 num_control, num_compute = inv.get_node_counts()
121 if num_control == 0 or num_compute == 0:
122 logging.error("Detected 0 control or compute nodes. Control nodes: "
123 "{}, compute nodes{}".format(num_control, num_compute))
124 raise ApexDeployException("Invalid number of control or computes")
125 elif num_control > 1 and not ds['global_params']['ha_enabled']:
127 cmd = "openstack overcloud deploy --templates --timeout {} " \
128 "--libvirt-type kvm".format(con.DEPLOY_TIMEOUT)
130 for option in deploy_options:
131 cmd += " -e {}".format(option)
132 cmd += " --ntp-server {}".format(ns['ntp'][0])
133 cmd += " --control-scale {}".format(num_control)
134 cmd += " --compute-scale {}".format(num_compute)
135 cmd += ' --control-flavor control --compute-flavor compute'
136 logging.info("Deploy command set: {}".format(cmd))
138 with open(os.path.join(tmp_dir, 'deploy_command'), 'w') as fh:
143 def prep_image(ds, img, tmp_dir, root_pw=None):
145 Locates sdn image and preps for deployment.
146 :param ds: deploy settings
147 :param img: sdn image
148 :param tmp_dir: dir to store modified sdn image
149 :param root_pw: password to configure for overcloud image
152 # TODO(trozet): Come up with a better way to organize this logic in this
154 logging.info("Preparing image: {} for deployment".format(img))
155 if not os.path.isfile(img):
156 logging.error("Missing SDN image {}".format(img))
157 raise ApexDeployException("Missing SDN image file: {}".format(img))
159 ds_opts = ds['deploy_options']
161 sdn = ds_opts['sdn_controller']
162 # we need this due to rhbz #1436021
163 # fixed in systemd-219-37.el7
165 logging.info("Neutron openvswitch-agent disabled")
168 "rm -f /etc/systemd/system/multi-user.target.wants/"
169 "neutron-openvswitch-agent.service"},
172 "rm -f /usr/lib/systemd/system/neutron-openvswitch-agent"
177 virt_cmds.append({con.VIRT_RUN_CMD: "chmod +x /etc/rc.d/rc.local"})
180 "echo '/opt/quagga/etc/init.d/zrpcd start' > "
181 "/opt/quagga/etc/init.d/zrpcd_start.sh"})
183 con.VIRT_RUN_CMD: "chmod +x /opt/quagga/etc/init.d/"
186 con.VIRT_RUN_CMD: "sed -i '$a /opt/quagga/etc/"
187 "init.d/zrpcd_start.sh' /etc/rc.local "})
188 logging.info("ZRPCD process started")
190 dataplane = ds_opts['dataplane']
191 if dataplane == 'ovs_dpdk' or dataplane == 'fdio':
192 logging.info("Enabling kernel modules for dpdk")
193 # file to module mapping
195 os.path.join(tmp_dir, 'vfio_pci.modules'): 'vfio_pci',
196 os.path.join(tmp_dir, 'uio_pci_generic.modules'): 'uio_pci_generic'
198 for mod_file, mod in uio_types.items():
199 with open(mod_file, 'w') as fh:
200 fh.write('#!/bin/bash\n')
201 fh.write('exec /sbin/modprobe {}'.format(mod))
205 {con.VIRT_UPLOAD: "{}:/etc/sysconfig/modules/".format(
207 {con.VIRT_RUN_CMD: "chmod 0755 /etc/sysconfig/modules/"
208 "{}".format(os.path.basename(mod_file))}
211 pw_op = "password:{}".format(root_pw)
212 virt_cmds.append({con.VIRT_PW: pw_op})
213 if ds_opts['sfc'] and dataplane == 'ovs':
215 {con.VIRT_RUN_CMD: "yum -y install "
216 "/root/ovs/rpm/rpmbuild/RPMS/x86_64/"
217 "{}".format(OVS_NSH_KMOD_RPM)},
218 {con.VIRT_RUN_CMD: "yum downgrade -y "
219 "/root/ovs/rpm/rpmbuild/RPMS/x86_64/"
220 "{}".format(OVS_NSH_RPM)}
222 if dataplane == 'fdio':
223 # Patch neutron with using OVS external interface for router
224 # and add generic linux NS interface driver
226 {con.VIRT_RUN_CMD: "cd /usr/lib/python2.7/site-packages && patch "
227 "-p1 < neutron-patch-NSDriver.patch"})
230 {con.VIRT_RUN_CMD: "yum remove -y vpp-lib"},
231 {con.VIRT_RUN_CMD: "yum install -y "
232 "/root/nosdn_vpp_rpms/*.rpm"}
235 if sdn == 'opendaylight':
236 if ds_opts['odl_version'] != con.DEFAULT_ODL_VERSION:
238 {con.VIRT_RUN_CMD: "yum -y remove opendaylight"},
239 {con.VIRT_RUN_CMD: "rm -rf /etc/puppet/modules/opendaylight"},
240 {con.VIRT_RUN_CMD: "cd /etc/puppet/modules && tar xzf "
241 "/root/puppet-opendaylight-"
242 "{}.tar.gz".format(ds_opts['odl_version'])}
244 if ds_opts['odl_version'] == 'master':
246 {con.VIRT_RUN_CMD: "rpm -ivh --nodeps /root/{}/*".format(
247 ds_opts['odl_version'])}
251 {con.VIRT_RUN_CMD: "yum -y install /root/{}/*".format(
252 ds_opts['odl_version'])}
255 elif sdn == 'opendaylight' and 'odl_vpp_netvirt' in ds_opts \
256 and ds_opts['odl_vpp_netvirt']:
258 {con.VIRT_RUN_CMD: "yum -y remove opendaylight"},
259 {con.VIRT_RUN_CMD: "yum -y install /root/{}/*".format(
260 ODL_NETVIRT_VPP_RPM)}
265 {con.VIRT_RUN_CMD: "cd /root/ovs28 && yum update -y "
267 {con.VIRT_RUN_CMD: "cd /root/ovs28 && yum downgrade -y "
271 tmp_oc_image = os.path.join(tmp_dir, 'overcloud-full.qcow2')
272 shutil.copyfile(img, tmp_oc_image)
273 logging.debug("Temporary overcloud image stored as: {}".format(
275 virt_utils.virt_customize(virt_cmds, tmp_oc_image)
276 logging.info("Overcloud image customization complete")
281 Creates public and private ssh keys with 1024 bit RSA encryption
282 :return: private, public key
284 key = rsa.generate_private_key(
285 backend=crypto_default_backend(),
286 public_exponent=65537,
290 private_key = key.private_bytes(
291 crypto_serialization.Encoding.PEM,
292 crypto_serialization.PrivateFormat.PKCS8,
293 crypto_serialization.NoEncryption())
294 public_key = key.public_key().public_bytes(
295 crypto_serialization.Encoding.OpenSSH,
296 crypto_serialization.PublicFormat.OpenSSH
298 return private_key.decode('utf-8'), public_key.decode('utf-8')
301 def prep_env(ds, ns, inv, opnfv_env, net_env, tmp_dir):
303 Creates modified opnfv/network environments for deployment
304 :param ds: deploy settings
305 :param ns: network settings
306 :param inv: node inventory
307 :param opnfv_env: file path for opnfv-environment file
308 :param net_env: file path for network-environment file
309 :param tmp_dir: Apex tmp dir
313 logging.info("Preparing opnfv-environment and network-environment files")
314 ds_opts = ds['deploy_options']
315 tmp_opnfv_env = os.path.join(tmp_dir, os.path.basename(opnfv_env))
316 shutil.copyfile(opnfv_env, tmp_opnfv_env)
317 tenant_nic_map = ns['networks']['tenant']['nic_mapping']
319 tenant_nic['Controller'] = tenant_nic_map['controller']['members'][0]
320 tenant_nic['NovaCompute'] = tenant_nic_map['compute']['members'][0]
321 external_nic_map = ns['networks']['external'][0]['nic_mapping']
322 external_nic = dict()
323 external_nic['NovaCompute'] = external_nic_map['compute']['members'][0]
326 private_key, public_key = make_ssh_key()
328 # Make easier/faster variables to index in the file editor
329 if 'performance' in ds_opts:
332 if 'vpp' in ds_opts['performance']['Compute']:
333 perf_vpp_comp = ds_opts['performance']['Compute']['vpp']
336 if 'vpp' in ds_opts['performance']['Controller']:
337 perf_vpp_ctrl = ds_opts['performance']['Controller']['vpp']
342 if 'ovs' in ds_opts['performance']['Compute']:
343 perf_ovs_comp = ds_opts['performance']['Compute']['ovs']
348 if 'kernel' in ds_opts['performance']['Compute']:
349 perf_kern_comp = ds_opts['performance']['Compute']['kernel']
351 perf_kern_comp = None
355 # Modify OPNFV environment
356 # TODO: Change to build a dict and outputing yaml rather than parsing
357 for line in fileinput.input(tmp_opnfv_env, inplace=True):
358 line = line.strip('\n')
360 if 'CloudDomain' in line:
361 output_line = " CloudDomain: {}".format(ns['domain_name'])
362 elif 'replace_private_key' in line:
363 output_line = " private_key: |\n"
365 for line in private_key.splitlines():
366 key_out += " {}\n".format(line)
367 output_line += key_out
368 elif 'replace_public_key' in line:
369 output_line = " public_key: '{}'".format(public_key)
371 if ds_opts['sdn_controller'] == 'opendaylight' and \
372 'odl_vpp_routing_node' in ds_opts:
373 if 'opendaylight::vpp_routing_node' in line:
374 output_line = (" opendaylight::vpp_routing_node: {}.{}"
375 .format(ds_opts['odl_vpp_routing_node'],
377 elif not ds_opts['sdn_controller'] and ds_opts['dataplane'] == 'fdio':
378 if 'NeutronVPPAgentPhysnets' in line:
379 output_line = (" NeutronVPPAgentPhysnets: 'datacentre:{}'".
380 format(tenant_nic['Controller']))
381 elif ds_opts['sdn_controller'] == 'opendaylight' and ds_opts.get(
383 if 'OS::TripleO::Services::NeutronDhcpAgent' in line:
385 elif 'NeutronDhcpAgentsPerNetwork' in line:
386 num_control, num_compute = inv.get_node_counts()
387 output_line = (" NeutronDhcpAgentsPerNetwork: {}"
388 .format(num_compute))
389 elif 'ComputeServices' in line:
390 output_line = (" ComputeServices:\n"
391 " - OS::TripleO::Services::NeutronDhcpAgent")
394 for role in 'NovaCompute', 'Controller':
395 if role == 'NovaCompute':
396 perf_opts = perf_vpp_comp
398 perf_opts = perf_vpp_ctrl
399 cfg = "{}ExtraConfig".format(role)
400 if cfg in line and perf_opts:
402 if 'main-core' in perf_opts:
403 perf_line += ("\n fdio::vpp_cpu_main_core: '{}'"
404 .format(perf_opts['main-core']))
405 if 'corelist-workers' in perf_opts:
407 "fdio::vpp_cpu_corelist_workers: '{}'"
408 .format(perf_opts['corelist-workers']))
409 if ds_opts['sdn_controller'] == 'opendaylight' and \
410 ds_opts['dataplane'] == 'fdio':
411 if role == 'NovaCompute':
413 "tripleo::profile::base::neutron::"
414 "agents::honeycomb::"
415 "interface_role_mapping:"
416 " ['{}:tenant-interface',"
417 "'{}:public-interface']"
418 .format(tenant_nic[role],
422 "tripleo::profile::base::neutron::"
423 "agents::honeycomb::"
424 "interface_role_mapping:"
425 " ['{}:tenant-interface']"
426 .format(tenant_nic[role]))
428 output_line = (" {}:{}".format(cfg, perf_line))
431 # (FIXME) use compute's kernel settings for all nodes for now.
432 if 'ComputeKernelArgs' in line and perf_kern_comp:
434 for k, v in perf_kern_comp.items():
435 kernel_args += "{}={} ".format(k, v)
437 output_line = " ComputeKernelArgs: '{}'".\
439 if ds_opts['dataplane'] == 'ovs_dpdk' and perf_ovs_comp:
440 for k, v in OVS_PERF_MAP.items():
441 if k in line and v in perf_ovs_comp:
442 output_line = " {}: '{}'".format(k, perf_ovs_comp[v])
446 logging.info("opnfv-environment file written to {}".format(tmp_opnfv_env))
448 # Modify Network environment
449 for line in fileinput.input(net_env, inplace=True):
450 line = line.strip('\n')
451 if 'ComputeExtraConfigPre' in line and \
452 ds_opts['dataplane'] == 'ovs_dpdk':
453 print(' OS::TripleO::ComputeExtraConfigPre: '
454 './ovs-dpdk-preconfig.yaml')
455 elif ((perf and perf_kern_comp) or ds_opts.get('rt_kvm')) and \
456 'resource_registry' in line:
457 print("resource_registry:\n"
458 " OS::TripleO::NodeUserData: first-boot.yaml")
459 elif perf and perf_kern_comp and \
460 'NovaSchedulerDefaultFilters' in line:
461 print(" NovaSchedulerDefaultFilters: 'RamFilter,"
462 "ComputeFilter,AvailabilityZoneFilter,"
463 "ComputeCapabilitiesFilter,ImagePropertiesFilter,"
464 "NUMATopologyFilter'")
468 logging.info("network-environment file written to {}".format(net_env))
471 def generate_ceph_key():
473 header = struct.pack('<hiih', 1, int(time.time()), 0, len(key))
474 return base64.b64encode(header + key)
477 def prep_storage_env(ds, tmp_dir):
479 Creates storage environment file for deployment. Source file is copied by
480 undercloud playbook to host.
485 ds_opts = ds['deploy_options']
486 storage_file = os.path.join(tmp_dir, 'storage-environment.yaml')
487 if not os.path.isfile(storage_file):
488 logging.error("storage-environment file is not in tmp directory: {}. "
489 "Check if file was copied from "
490 "undercloud".format(tmp_dir))
491 raise ApexDeployException("storage-environment file not copied from "
493 for line in fileinput.input(storage_file, inplace=True):
494 line = line.strip('\n')
495 if 'CephClusterFSID' in line:
496 print(" CephClusterFSID: {}".format(str(uuid.uuid4())))
497 elif 'CephMonKey' in line:
498 print(" CephMonKey: {}".format(generate_ceph_key().decode(
500 elif 'CephAdminKey' in line:
501 print(" CephAdminKey: {}".format(generate_ceph_key().decode(
505 if 'ceph_device' in ds_opts and ds_opts['ceph_device']:
506 with open(storage_file, 'a') as fh:
507 fh.write(' ExtraConfig:\n')
508 fh.write(" ceph::profile::params::osds:{{{}:{{}}}}\n".format(
509 ds_opts['ceph_device']
513 def external_network_cmds(ns):
515 Generates external network openstack commands
516 :param ns: network settings
517 :return: list of commands to configure external network
519 if 'external' in ns.enabled_network_list:
520 net_config = ns['networks']['external'][0]
522 pool_start, pool_end = net_config['floating_ip_range']
524 net_config = ns['networks']['admin']
526 pool_start, pool_end = ns['apex']['networks']['admin'][
527 'introspection_range']
528 nic_config = net_config['nic_mapping']
529 gateway = net_config['gateway']
531 # create network command
532 if nic_config['compute']['vlan'] == 'native':
535 ext_type = "vlan --provider-segment {}".format(nic_config[
537 cmds.append("openstack network create external --project service "
538 "--external --provider-network-type {} "
539 "--provider-physical-network datacentre".format(ext_type))
540 # create subnet command
541 cidr = net_config['cidr']
542 subnet_cmd = "openstack subnet create external-subnet --project " \
543 "service --network external --no-dhcp --gateway {} " \
544 "--allocation-pool start={},end={} --subnet-range " \
545 "{}".format(gateway, pool_start, pool_end, str(cidr))
546 if external and cidr.version == 6:
547 subnet_cmd += ' --ip-version 6 --ipv6-ra-mode slaac ' \
548 '--ipv6-address-mode slaac'
549 cmds.append(subnet_cmd)
550 logging.debug("Neutron external network commands determined "
551 "as: {}".format(cmds))
555 def create_congress_cmds(overcloud_file):
556 drivers = ['nova', 'neutronv2', 'cinder', 'glancev2', 'keystone', 'doctor']
557 overcloudrc = parsers.parse_overcloudrc(overcloud_file)
558 logging.info("Creating congress commands")
561 "username={}".format(overcloudrc['OS_USERNAME']),
562 "tenant_name={}".format(overcloudrc['OS_PROJECT_NAME']),
563 "password={}".format(overcloudrc['OS_PASSWORD']),
564 "auth_url={}".format(overcloudrc['OS_AUTH_URL'])
567 logging.error("Unable to find all keys required for congress in "
568 "overcloudrc: OS_USERNAME, OS_PROJECT_NAME, "
569 "OS_PASSWORD, OS_AUTH_URL. Please check overcloudrc "
570 "file: {}".format(overcloud_file))
573 ds_cfg = '--config ' + ' --config '.join(ds_cfg)
575 for driver in drivers:
576 if driver == 'doctor':
577 cmd = "{} \"{}\"".format(driver, driver)
579 cmd = "{} \"{}\" {}".format(driver, driver, ds_cfg)
581 cmd += ' --config api_version="2.34"'
582 logging.debug("Congress command created: {}".format(cmd))