1 ##############################################################################
2 # Copyright (c) 2017 Tim Rozet (trozet@redhat.com) and others.
4 # All rights reserved. This program and the accompanying materials
5 # are made available under the terms of the Apache License, Version 2.0
6 # which accompanies this distribution, and is available at
7 # http://www.apache.org/licenses/LICENSE-2.0
8 ##############################################################################
19 from apex.common import constants as con
20 from apex.common.exceptions import ApexDeployException
21 from apex.common import parsers
22 from apex.virtual import utils as virt_utils
23 from cryptography.hazmat.primitives import serialization as \
25 from cryptography.hazmat.primitives.asymmetric import rsa
26 from cryptography.hazmat.backends import default_backend as \
27 crypto_default_backend
32 'sfc': 'neutron-sfc-opendaylight.yaml',
33 'vpn': 'neutron-bgpvpn-opendaylight.yaml',
34 'gluon': 'gluon.yaml',
36 'odl_vpp_netvirt': 'neutron-opendaylight-netvirt-vpp.yaml',
37 'dvr': 'neutron-opendaylight-fdio-dvr.yaml',
38 'default': 'neutron-opendaylight-honeycomb.yaml'
40 'default': 'neutron-opendaylight.yaml',
43 'sfc': 'neutron-onos-sfc.yaml',
44 'default': 'neutron-onos.yaml'
46 'ovn': 'neutron-ml2-ovn.yaml',
48 'vpp': 'neutron-ml2-vpp.yaml',
49 'dataplane': ('ovs_dpdk', 'neutron-ovs-dpdk.yaml')
54 'tacker': 'enable_tacker.yaml',
55 'congress': 'enable_congress.yaml',
56 'barometer': 'enable_barometer.yaml',
57 'rt_kvm': 'enable_rt_kvm.yaml'
61 'HostCpusList': 'dpdk_cores',
62 'NeutronDpdkCoreList': 'pmd_cores',
63 'NeutronDpdkSocketMemory': 'socket_memory',
64 'NeutronDpdkMemoryChannels': 'memory_channels'
67 OVS_NSH_KMOD_RPM = "openvswitch-kmod-2.6.1-1.el7.centos.x86_64.rpm"
68 OVS_NSH_RPM = "openvswitch-2.6.1-1.el7.centos.x86_64.rpm"
69 ODL_NETVIRT_VPP_RPM = "/root/opendaylight-7.0.0-0.1.20170531snap665.el7" \
73 def build_sdn_env_list(ds, sdn_map, env_list=None):
76 for k, v in sdn_map.items():
77 if ds['sdn_controller'] == k or (k in ds and ds[k] is True):
78 if isinstance(v, dict):
79 env_list.extend(build_sdn_env_list(ds, v))
81 env_list.append(os.path.join(con.THT_ENV_DIR, v))
82 elif isinstance(v, tuple):
84 env_list.append(os.path.join(con.THT_ENV_DIR, v[1]))
85 if len(env_list) == 0:
87 env_list.append(os.path.join(
88 con.THT_ENV_DIR, sdn_map['default']))
90 logging.warning("Unable to find default file for SDN")
95 def create_deploy_cmd(ds, ns, inv, tmp_dir,
96 virtual, env_file='opnfv-environment.yaml'):
98 logging.info("Creating deployment command")
99 deploy_options = [env_file, 'network-environment.yaml']
100 ds_opts = ds['deploy_options']
101 deploy_options += build_sdn_env_list(ds_opts, SDN_FILE_MAP)
103 for k, v in OTHER_FILE_MAP.items():
104 if k in ds_opts and ds_opts[k]:
105 deploy_options.append(os.path.join(con.THT_ENV_DIR, v))
108 prep_storage_env(ds, tmp_dir)
109 deploy_options.append(os.path.join(con.THT_ENV_DIR,
110 'storage-environment.yaml'))
111 if ds['global_params']['ha_enabled']:
112 deploy_options.append(os.path.join(con.THT_ENV_DIR,
113 'puppet-pacemaker.yaml'))
116 deploy_options.append('virtual-environment.yaml')
118 deploy_options.append('baremetal-environment.yaml')
120 num_control, num_compute = inv.get_node_counts()
121 if num_control == 0 or num_compute == 0:
122 logging.error("Detected 0 control or compute nodes. Control nodes: "
123 "{}, compute nodes{}".format(num_control, num_compute))
124 raise ApexDeployException("Invalid number of control or computes")
125 elif num_control > 1 and not ds['global_params']['ha_enabled']:
127 cmd = "openstack overcloud deploy --templates --timeout {} " \
128 "--libvirt-type kvm".format(con.DEPLOY_TIMEOUT)
130 for option in deploy_options:
131 cmd += " -e {}".format(option)
132 cmd += " --ntp-server {}".format(ns['ntp'][0])
133 cmd += " --control-scale {}".format(num_control)
134 cmd += " --compute-scale {}".format(num_compute)
135 cmd += ' --control-flavor control --compute-flavor compute'
136 logging.info("Deploy command set: {}".format(cmd))
138 with open(os.path.join(tmp_dir, 'deploy_command'), 'w') as fh:
143 def prep_image(ds, img, tmp_dir, root_pw=None):
145 Locates sdn image and preps for deployment.
146 :param ds: deploy settings
147 :param img: sdn image
148 :param tmp_dir: dir to store modified sdn image
149 :param root_pw: password to configure for overcloud image
152 # TODO(trozet): Come up with a better way to organize this logic in this
154 logging.info("Preparing image: {} for deployment".format(img))
155 if not os.path.isfile(img):
156 logging.error("Missing SDN image {}".format(img))
157 raise ApexDeployException("Missing SDN image file: {}".format(img))
159 ds_opts = ds['deploy_options']
161 sdn = ds_opts['sdn_controller']
162 # we need this due to rhbz #1436021
163 # fixed in systemd-219-37.el7
165 logging.info("Neutron openvswitch-agent disabled")
168 "rm -f /etc/systemd/system/multi-user.target.wants/"
169 "neutron-openvswitch-agent.service"},
172 "rm -f /usr/lib/systemd/system/neutron-openvswitch-agent"
177 virt_cmds.append({con.VIRT_RUN_CMD: "chmod +x /etc/rc.d/rc.local"})
180 "echo 'sudo /opt/quagga/etc/init.d/zrpcd start' > "
181 "/opt/quagga/etc/init.d/zrpcd_start.sh"})
183 con.VIRT_RUN_CMD: "chmod +x /opt/quagga/etc/init.d/"
186 con.VIRT_RUN_CMD: "sed -i '$a /opt/quagga/etc/"
187 "init.d/zrpcd_start.sh' /etc/rc.local "})
189 con.VIRT_RUN_CMD: "sed -i '$a /opt/quagga/etc/"
190 "init.d/zrpcd_start.sh' /etc/rc.d/rc.local"})
191 logging.info("ZRPCD process started")
193 dataplane = ds_opts['dataplane']
194 if dataplane == 'ovs_dpdk' or dataplane == 'fdio':
195 logging.info("Enabling kernel modules for dpdk")
196 # file to module mapping
198 os.path.join(tmp_dir, 'vfio_pci.modules'): 'vfio_pci',
199 os.path.join(tmp_dir, 'uio_pci_generic.modules'): 'uio_pci_generic'
201 for mod_file, mod in uio_types.items():
202 with open(mod_file, 'w') as fh:
203 fh.write('#!/bin/bash\n')
204 fh.write('exec /sbin/modprobe {}'.format(mod))
208 {con.VIRT_UPLOAD: "{}:/etc/sysconfig/modules/".format(
210 {con.VIRT_RUN_CMD: "chmod 0755 /etc/sysconfig/modules/"
211 "{}".format(os.path.basename(mod_file))}
214 pw_op = "password:{}".format(root_pw)
215 virt_cmds.append({con.VIRT_PW: pw_op})
216 if ds_opts['sfc'] and dataplane == 'ovs':
218 {con.VIRT_RUN_CMD: "yum -y install "
219 "/root/ovs/rpm/rpmbuild/RPMS/x86_64/"
220 "{}".format(OVS_NSH_KMOD_RPM)},
221 {con.VIRT_RUN_CMD: "yum downgrade -y "
222 "/root/ovs/rpm/rpmbuild/RPMS/x86_64/"
223 "{}".format(OVS_NSH_RPM)}
225 if dataplane == 'fdio':
226 # Patch neutron with using OVS external interface for router
227 # and add generic linux NS interface driver
229 {con.VIRT_RUN_CMD: "cd /usr/lib/python2.7/site-packages && patch "
230 "-p1 < neutron-patch-NSDriver.patch"})
233 {con.VIRT_RUN_CMD: "yum remove -y vpp-lib"},
234 {con.VIRT_RUN_CMD: "yum install -y "
235 "/root/nosdn_vpp_rpms/*.rpm"}
238 if sdn == 'opendaylight':
239 if ds_opts['odl_version'] != con.DEFAULT_ODL_VERSION:
241 {con.VIRT_RUN_CMD: "yum -y remove opendaylight"},
242 {con.VIRT_RUN_CMD: "rm -rf /etc/puppet/modules/opendaylight"},
243 {con.VIRT_RUN_CMD: "cd /etc/puppet/modules && tar xzf "
244 "/root/puppet-opendaylight-"
245 "{}.tar.gz".format(ds_opts['odl_version'])}
247 if ds_opts['odl_version'] == 'master':
249 {con.VIRT_RUN_CMD: "rpm -ivh --nodeps /root/{}/*".format(
250 ds_opts['odl_version'])}
254 {con.VIRT_RUN_CMD: "yum -y install /root/{}/*".format(
255 ds_opts['odl_version'])}
258 elif sdn == 'opendaylight' and 'odl_vpp_netvirt' in ds_opts \
259 and ds_opts['odl_vpp_netvirt']:
261 {con.VIRT_RUN_CMD: "yum -y remove opendaylight"},
262 {con.VIRT_RUN_CMD: "yum -y install /root/{}/*".format(
263 ODL_NETVIRT_VPP_RPM)}
268 {con.VIRT_RUN_CMD: "cd /root/ovs28 && yum update -y "
270 {con.VIRT_RUN_CMD: "cd /root/ovs28 && yum downgrade -y "
274 tmp_oc_image = os.path.join(tmp_dir, 'overcloud-full.qcow2')
275 shutil.copyfile(img, tmp_oc_image)
276 logging.debug("Temporary overcloud image stored as: {}".format(
278 virt_utils.virt_customize(virt_cmds, tmp_oc_image)
279 logging.info("Overcloud image customization complete")
284 Creates public and private ssh keys with 1024 bit RSA encryption
285 :return: private, public key
287 key = rsa.generate_private_key(
288 backend=crypto_default_backend(),
289 public_exponent=65537,
293 private_key = key.private_bytes(
294 crypto_serialization.Encoding.PEM,
295 crypto_serialization.PrivateFormat.PKCS8,
296 crypto_serialization.NoEncryption())
297 public_key = key.public_key().public_bytes(
298 crypto_serialization.Encoding.OpenSSH,
299 crypto_serialization.PublicFormat.OpenSSH
301 return private_key.decode('utf-8'), public_key.decode('utf-8')
304 def prep_env(ds, ns, inv, opnfv_env, net_env, tmp_dir):
306 Creates modified opnfv/network environments for deployment
307 :param ds: deploy settings
308 :param ns: network settings
309 :param inv: node inventory
310 :param opnfv_env: file path for opnfv-environment file
311 :param net_env: file path for network-environment file
312 :param tmp_dir: Apex tmp dir
316 logging.info("Preparing opnfv-environment and network-environment files")
317 ds_opts = ds['deploy_options']
318 tmp_opnfv_env = os.path.join(tmp_dir, os.path.basename(opnfv_env))
319 shutil.copyfile(opnfv_env, tmp_opnfv_env)
320 tenant_nic_map = ns['networks']['tenant']['nic_mapping']
322 tenant_nic['Controller'] = tenant_nic_map['controller']['members'][0]
323 tenant_nic['NovaCompute'] = tenant_nic_map['compute']['members'][0]
324 external_nic_map = ns['networks']['external'][0]['nic_mapping']
325 external_nic = dict()
326 external_nic['NovaCompute'] = external_nic_map['compute']['members'][0]
329 private_key, public_key = make_ssh_key()
331 # Make easier/faster variables to index in the file editor
332 if 'performance' in ds_opts:
335 if 'vpp' in ds_opts['performance']['Compute']:
336 perf_vpp_comp = ds_opts['performance']['Compute']['vpp']
339 if 'vpp' in ds_opts['performance']['Controller']:
340 perf_vpp_ctrl = ds_opts['performance']['Controller']['vpp']
345 if 'ovs' in ds_opts['performance']['Compute']:
346 perf_ovs_comp = ds_opts['performance']['Compute']['ovs']
351 if 'kernel' in ds_opts['performance']['Compute']:
352 perf_kern_comp = ds_opts['performance']['Compute']['kernel']
354 perf_kern_comp = None
358 # Modify OPNFV environment
359 # TODO: Change to build a dict and outputing yaml rather than parsing
360 for line in fileinput.input(tmp_opnfv_env, inplace=True):
361 line = line.strip('\n')
363 if 'CloudDomain' in line:
364 output_line = " CloudDomain: {}".format(ns['domain_name'])
365 elif 'replace_private_key' in line:
366 output_line = " private_key: |\n"
368 for line in private_key.splitlines():
369 key_out += " {}\n".format(line)
370 output_line += key_out
371 elif 'replace_public_key' in line:
372 output_line = " public_key: '{}'".format(public_key)
374 if ds_opts['sdn_controller'] == 'opendaylight' and \
375 'odl_vpp_routing_node' in ds_opts:
376 if 'opendaylight::vpp_routing_node' in line:
377 output_line = (" opendaylight::vpp_routing_node: {}.{}"
378 .format(ds_opts['odl_vpp_routing_node'],
380 elif not ds_opts['sdn_controller'] and ds_opts['dataplane'] == 'fdio':
381 if 'NeutronVPPAgentPhysnets' in line:
382 output_line = (" NeutronVPPAgentPhysnets: 'datacentre:{}'".
383 format(tenant_nic['Controller']))
384 elif ds_opts['sdn_controller'] == 'opendaylight' and ds_opts.get(
386 if 'OS::TripleO::Services::NeutronDhcpAgent' in line:
388 elif 'NeutronDhcpAgentsPerNetwork' in line:
389 num_control, num_compute = inv.get_node_counts()
390 output_line = (" NeutronDhcpAgentsPerNetwork: {}"
391 .format(num_compute))
392 elif 'ComputeServices' in line:
393 output_line = (" ComputeServices:\n"
394 " - OS::TripleO::Services::NeutronDhcpAgent")
397 for role in 'NovaCompute', 'Controller':
398 if role == 'NovaCompute':
399 perf_opts = perf_vpp_comp
401 perf_opts = perf_vpp_ctrl
402 cfg = "{}ExtraConfig".format(role)
403 if cfg in line and perf_opts:
405 if 'main-core' in perf_opts:
406 perf_line += ("\n fdio::vpp_cpu_main_core: '{}'"
407 .format(perf_opts['main-core']))
408 if 'corelist-workers' in perf_opts:
410 "fdio::vpp_cpu_corelist_workers: '{}'"
411 .format(perf_opts['corelist-workers']))
412 if ds_opts['sdn_controller'] == 'opendaylight' and \
413 ds_opts['dataplane'] == 'fdio':
414 if role == 'NovaCompute':
416 "tripleo::profile::base::neutron::"
417 "agents::honeycomb::"
418 "interface_role_mapping:"
419 " ['{}:tenant-interface',"
420 "'{}:public-interface']"
421 .format(tenant_nic[role],
425 "tripleo::profile::base::neutron::"
426 "agents::honeycomb::"
427 "interface_role_mapping:"
428 " ['{}:tenant-interface']"
429 .format(tenant_nic[role]))
431 output_line = (" {}:{}".format(cfg, perf_line))
434 # (FIXME) use compute's kernel settings for all nodes for now.
435 if 'ComputeKernelArgs' in line and perf_kern_comp:
437 for k, v in perf_kern_comp.items():
438 kernel_args += "{}={} ".format(k, v)
440 output_line = " ComputeKernelArgs: '{}'".\
442 if ds_opts['dataplane'] == 'ovs_dpdk' and perf_ovs_comp:
443 for k, v in OVS_PERF_MAP.items():
444 if k in line and v in perf_ovs_comp:
445 output_line = " {}: '{}'".format(k, perf_ovs_comp[v])
449 logging.info("opnfv-environment file written to {}".format(tmp_opnfv_env))
451 # Modify Network environment
452 for line in fileinput.input(net_env, inplace=True):
453 line = line.strip('\n')
454 if 'ComputeExtraConfigPre' in line and \
455 ds_opts['dataplane'] == 'ovs_dpdk':
456 print(' OS::TripleO::ComputeExtraConfigPre: '
457 './ovs-dpdk-preconfig.yaml')
458 elif ((perf and perf_kern_comp) or ds_opts.get('rt_kvm')) and \
459 'resource_registry' in line:
460 print("resource_registry:\n"
461 " OS::TripleO::NodeUserData: first-boot.yaml")
462 elif perf and perf_kern_comp and \
463 'NovaSchedulerDefaultFilters' in line:
464 print(" NovaSchedulerDefaultFilters: 'RamFilter,"
465 "ComputeFilter,AvailabilityZoneFilter,"
466 "ComputeCapabilitiesFilter,ImagePropertiesFilter,"
467 "NUMATopologyFilter'")
471 logging.info("network-environment file written to {}".format(net_env))
474 def generate_ceph_key():
476 header = struct.pack('<hiih', 1, int(time.time()), 0, len(key))
477 return base64.b64encode(header + key)
480 def prep_storage_env(ds, tmp_dir):
482 Creates storage environment file for deployment. Source file is copied by
483 undercloud playbook to host.
488 ds_opts = ds['deploy_options']
489 storage_file = os.path.join(tmp_dir, 'storage-environment.yaml')
490 if not os.path.isfile(storage_file):
491 logging.error("storage-environment file is not in tmp directory: {}. "
492 "Check if file was copied from "
493 "undercloud".format(tmp_dir))
494 raise ApexDeployException("storage-environment file not copied from "
496 for line in fileinput.input(storage_file, inplace=True):
497 line = line.strip('\n')
498 if 'CephClusterFSID' in line:
499 print(" CephClusterFSID: {}".format(str(uuid.uuid4())))
500 elif 'CephMonKey' in line:
501 print(" CephMonKey: {}".format(generate_ceph_key().decode(
503 elif 'CephAdminKey' in line:
504 print(" CephAdminKey: {}".format(generate_ceph_key().decode(
508 if 'ceph_device' in ds_opts and ds_opts['ceph_device']:
509 with open(storage_file, 'a') as fh:
510 fh.write(' ExtraConfig:\n')
511 fh.write(" ceph::profile::params::osds:{{{}:{{}}}}\n".format(
512 ds_opts['ceph_device']
516 def external_network_cmds(ns):
518 Generates external network openstack commands
519 :param ns: network settings
520 :return: list of commands to configure external network
522 if 'external' in ns.enabled_network_list:
523 net_config = ns['networks']['external'][0]
525 pool_start, pool_end = net_config['floating_ip_range']
527 net_config = ns['networks']['admin']
529 pool_start, pool_end = ns['apex']['networks']['admin'][
530 'introspection_range']
531 nic_config = net_config['nic_mapping']
532 gateway = net_config['gateway']
534 # create network command
535 if nic_config['compute']['vlan'] == 'native':
538 ext_type = "vlan --provider-segment {}".format(nic_config[
540 cmds.append("openstack network create external --project service "
541 "--external --provider-network-type {} "
542 "--provider-physical-network datacentre".format(ext_type))
543 # create subnet command
544 cidr = net_config['cidr']
545 subnet_cmd = "openstack subnet create external-subnet --project " \
546 "service --network external --no-dhcp --gateway {} " \
547 "--allocation-pool start={},end={} --subnet-range " \
548 "{}".format(gateway, pool_start, pool_end, str(cidr))
549 if external and cidr.version == 6:
550 subnet_cmd += ' --ip-version 6 --ipv6-ra-mode slaac ' \
551 '--ipv6-address-mode slaac'
552 cmds.append(subnet_cmd)
553 logging.debug("Neutron external network commands determined "
554 "as: {}".format(cmds))
558 def create_congress_cmds(overcloud_file):
559 drivers = ['nova', 'neutronv2', 'cinder', 'glancev2', 'keystone', 'doctor']
560 overcloudrc = parsers.parse_overcloudrc(overcloud_file)
561 logging.info("Creating congress commands")
564 "username={}".format(overcloudrc['OS_USERNAME']),
565 "tenant_name={}".format(overcloudrc['OS_PROJECT_NAME']),
566 "password={}".format(overcloudrc['OS_PASSWORD']),
567 "auth_url={}".format(overcloudrc['OS_AUTH_URL'])
570 logging.error("Unable to find all keys required for congress in "
571 "overcloudrc: OS_USERNAME, OS_PROJECT_NAME, "
572 "OS_PASSWORD, OS_AUTH_URL. Please check overcloudrc "
573 "file: {}".format(overcloud_file))
576 ds_cfg = '--config ' + ' --config '.join(ds_cfg)
578 for driver in drivers:
579 if driver == 'doctor':
580 cmd = "{} \"{}\"".format(driver, driver)
582 cmd = "{} \"{}\" {}".format(driver, driver, ds_cfg)
584 cmd += ' --config api_version="2.34"'
585 logging.debug("Congress command created: {}".format(cmd))