1 ##############################################################################
2 # Copyright (c) 2017 Tim Rozet (trozet@redhat.com) and others.
4 # All rights reserved. This program and the accompanying materials
5 # are made available under the terms of the Apache License, Version 2.0
6 # which accompanies this distribution, and is available at
7 # http://www.apache.org/licenses/LICENSE-2.0
8 ##############################################################################
20 from apex.common import constants as con
21 from apex.common.exceptions import ApexDeployException
22 from apex.common import parsers
23 from apex.virtual import utils as virt_utils
24 from cryptography.hazmat.primitives import serialization as \
26 from cryptography.hazmat.primitives.asymmetric import rsa
27 from cryptography.hazmat.backends import default_backend as \
28 crypto_default_backend
33 'sfc': 'neutron-sfc-opendaylight.yaml',
34 'vpn': 'neutron-bgpvpn-opendaylight.yaml',
35 'gluon': 'gluon.yaml',
37 'odl_vpp_netvirt': 'neutron-opendaylight-netvirt-vpp.yaml',
38 'dvr': 'neutron-opendaylight-fdio-dvr.yaml',
39 'default': 'neutron-opendaylight-honeycomb.yaml'
41 'default': 'neutron-opendaylight.yaml',
44 'sfc': 'neutron-onos-sfc.yaml',
45 'default': 'neutron-onos.yaml'
47 'ovn': 'neutron-ml2-ovn.yaml',
49 'vpp': 'neutron-ml2-vpp.yaml',
50 'dataplane': ('ovs_dpdk', 'neutron-ovs-dpdk.yaml')
55 'tacker': 'enable_tacker.yaml',
56 'congress': 'enable_congress.yaml',
57 'barometer': 'enable_barometer.yaml',
58 'rt_kvm': 'enable_rt_kvm.yaml'
62 'HostCpusList': 'dpdk_cores',
63 'NeutronDpdkCoreList': 'pmd_cores',
64 'NeutronDpdkSocketMemory': 'socket_memory',
65 'NeutronDpdkMemoryChannels': 'memory_channels'
68 OVS_NSH_KMOD_RPM = "openvswitch-kmod-2.6.1-1.el7.centos.x86_64.rpm"
69 OVS_NSH_RPM = "openvswitch-2.6.1-1.el7.centos.x86_64.rpm"
70 ODL_NETVIRT_VPP_RPM = "/root/opendaylight-7.0.0-0.1.20170531snap665.el7" \
74 def build_sdn_env_list(ds, sdn_map, env_list=None):
77 for k, v in sdn_map.items():
78 if ds['sdn_controller'] == k or (k in ds and ds[k] is True):
79 if isinstance(v, dict):
80 env_list.extend(build_sdn_env_list(ds, v))
82 env_list.append(os.path.join(con.THT_ENV_DIR, v))
83 elif isinstance(v, tuple):
85 env_list.append(os.path.join(con.THT_ENV_DIR, v[1]))
86 if len(env_list) == 0:
88 env_list.append(os.path.join(
89 con.THT_ENV_DIR, sdn_map['default']))
91 logging.warning("Unable to find default file for SDN")
96 def _get_node_counts(inventory):
98 Return numbers of controller and compute nodes in inventory
100 :param inventory: node inventory data structure
101 :return: number of controller and compute nodes in inventory
104 raise ApexDeployException("Empty inventory")
106 nodes = inventory['nodes']
110 if node['capabilities'] == 'profile:control':
112 elif node['capabilities'] == 'profile:compute':
115 # TODO(trozet) do we want to allow capabilities to not exist?
116 logging.error("Every node must include a 'capabilities' key "
117 "tagged with either 'profile:control' or "
119 raise ApexDeployException("Node missing capabilities "
120 "key: {}".format(node))
121 return num_control, num_compute
124 def create_deploy_cmd(ds, ns, inv, tmp_dir,
125 virtual, env_file='opnfv-environment.yaml'):
127 logging.info("Creating deployment command")
128 deploy_options = [env_file, 'network-environment.yaml']
129 ds_opts = ds['deploy_options']
130 deploy_options += build_sdn_env_list(ds_opts, SDN_FILE_MAP)
132 for k, v in OTHER_FILE_MAP.items():
133 if k in ds_opts and ds_opts[k]:
134 deploy_options.append(os.path.join(con.THT_ENV_DIR, v))
137 prep_storage_env(ds, tmp_dir)
138 deploy_options.append(os.path.join(con.THT_ENV_DIR,
139 'storage-environment.yaml'))
140 if ds['global_params']['ha_enabled']:
141 deploy_options.append(os.path.join(con.THT_ENV_DIR,
142 'puppet-pacemaker.yaml'))
145 deploy_options.append('virtual-environment.yaml')
147 deploy_options.append('baremetal-environment.yaml')
149 num_control, num_compute = _get_node_counts(inv)
150 if num_control == 0 or num_compute == 0:
151 logging.error("Detected 0 control or compute nodes. Control nodes: "
152 "{}, compute nodes{}".format(num_control, num_compute))
153 raise ApexDeployException("Invalid number of control or computes")
154 elif num_control > 1 and not ds['global_params']['ha_enabled']:
156 cmd = "openstack overcloud deploy --templates --timeout {} " \
157 "--libvirt-type kvm".format(con.DEPLOY_TIMEOUT)
159 for option in deploy_options:
160 cmd += " -e {}".format(option)
161 cmd += " --ntp-server {}".format(ns['ntp'][0])
162 cmd += " --control-scale {}".format(num_control)
163 cmd += " --compute-scale {}".format(num_compute)
164 cmd += ' --control-flavor control --compute-flavor compute'
165 logging.info("Deploy command set: {}".format(cmd))
167 with open(os.path.join(tmp_dir, 'deploy_command'), 'w') as fh:
172 def prep_image(ds, img, tmp_dir, root_pw=None):
174 Locates sdn image and preps for deployment.
175 :param ds: deploy settings
176 :param img: sdn image
177 :param tmp_dir: dir to store modified sdn image
178 :param root_pw: password to configure for overcloud image
181 # TODO(trozet): Come up with a better way to organize this logic in this
183 logging.info("Preparing image: {} for deployment".format(img))
184 if not os.path.isfile(img):
185 logging.error("Missing SDN image {}".format(img))
186 raise ApexDeployException("Missing SDN image file: {}".format(img))
188 ds_opts = ds['deploy_options']
190 sdn = ds_opts['sdn_controller']
191 # we need this due to rhbz #1436021
192 # fixed in systemd-219-37.el7
194 logging.info("Neutron openvswitch-agent disabled")
197 "rm -f /etc/systemd/system/multi-user.target.wants/"
198 "neutron-openvswitch-agent.service"},
201 "rm -f /usr/lib/systemd/system/neutron-openvswitch-agent"
206 virt_cmds.append({con.VIRT_RUN_CMD: "systemctl enable zrpcd"})
207 logging.info("ZRPC and Quagga enabled")
209 dataplane = ds_opts['dataplane']
210 if dataplane == 'ovs_dpdk' or dataplane == 'fdio':
211 logging.info("Enabling kernel modules for dpdk")
212 # file to module mapping
214 os.path.join(tmp_dir, 'vfio_pci.modules'): 'vfio_pci',
215 os.path.join(tmp_dir, 'uio_pci_generic.modules'): 'uio_pci_generic'
217 for mod_file, mod in uio_types.items():
218 with open(mod_file, 'w') as fh:
219 fh.write('#!/bin/bash\n')
220 fh.write('exec /sbin/modprobe {}'.format(mod))
224 {con.VIRT_UPLOAD: "{}:/etc/sysconfig/modules/".format(
226 {con.VIRT_RUN_CMD: "chmod 0755 /etc/sysconfig/modules/"
227 "{}".format(os.path.basename(mod_file))}
230 pw_op = "password:{}".format(root_pw)
231 virt_cmds.append({con.VIRT_PW: pw_op})
232 if ds_opts['sfc'] and dataplane == 'ovs':
234 {con.VIRT_RUN_CMD: "yum -y install "
235 "/root/ovs/rpm/rpmbuild/RPMS/x86_64/"
236 "{}".format(OVS_NSH_KMOD_RPM)},
237 {con.VIRT_RUN_CMD: "yum downgrade -y "
238 "/root/ovs/rpm/rpmbuild/RPMS/x86_64/"
239 "{}".format(OVS_NSH_RPM)}
241 if dataplane == 'fdio':
242 # Patch neutron with using OVS external interface for router
243 # and add generic linux NS interface driver
245 {con.VIRT_RUN_CMD: "cd /usr/lib/python2.7/site-packages && patch "
246 "-p1 < neutron-patch-NSDriver.patch"})
248 if sdn == 'opendaylight':
249 if ds_opts['odl_version'] != con.DEFAULT_ODL_VERSION:
251 {con.VIRT_RUN_CMD: "yum -y remove opendaylight"},
252 {con.VIRT_RUN_CMD: "rm -rf /etc/puppet/modules/opendaylight"},
253 {con.VIRT_RUN_CMD: "cd /etc/puppet/modules && tar xzf "
254 "/root/puppet-opendaylight-"
255 "{}.tar.gz".format(ds_opts['odl_version'])}
257 if ds_opts['odl_version'] == 'master':
259 {con.VIRT_RUN_CMD: "rpm -ivh --nodeps /root/{}/*".format(
260 ds_opts['odl_version'])}
264 {con.VIRT_RUN_CMD: "yum -y install /root/{}/*".format(
265 ds_opts['odl_version'])}
268 elif sdn == 'opendaylight' and 'odl_vpp_netvirt' in ds_opts \
269 and ds_opts['odl_vpp_netvirt']:
271 {con.VIRT_RUN_CMD: "yum -y remove opendaylight"},
272 {con.VIRT_RUN_CMD: "yum -y install /root/{}/*".format(
273 ODL_NETVIRT_VPP_RPM)}
278 {con.VIRT_RUN_CMD: "cd /root/ovs28 && yum update -y "
280 {con.VIRT_RUN_CMD: "cd /root/ovs28 && yum downgrade -y "
284 tmp_oc_image = os.path.join(tmp_dir, 'overcloud-full.qcow2')
285 shutil.copyfile(img, tmp_oc_image)
286 logging.debug("Temporary overcloud image stored as: {}".format(
288 virt_utils.virt_customize(virt_cmds, tmp_oc_image)
289 logging.info("Overcloud image customization complete")
294 Creates public and private ssh keys with 1024 bit RSA encryption
295 :return: private, public key
297 key = rsa.generate_private_key(
298 backend=crypto_default_backend(),
299 public_exponent=65537,
303 private_key = key.private_bytes(
304 crypto_serialization.Encoding.PEM,
305 crypto_serialization.PrivateFormat.PKCS8,
306 crypto_serialization.NoEncryption())
307 public_key = key.public_key().public_bytes(
308 crypto_serialization.Encoding.OpenSSH,
309 crypto_serialization.PublicFormat.OpenSSH
311 pub_key = re.sub('ssh-rsa\s*', '', public_key.decode('utf-8'))
312 return private_key.decode('utf-8'), pub_key
315 def prep_env(ds, ns, inv, opnfv_env, net_env, tmp_dir):
317 Creates modified opnfv/network environments for deployment
318 :param ds: deploy settings
319 :param ns: network settings
320 :param inv: node inventory
321 :param opnfv_env: file path for opnfv-environment file
322 :param net_env: file path for network-environment file
323 :param tmp_dir: Apex tmp dir
327 logging.info("Preparing opnfv-environment and network-environment files")
328 ds_opts = ds['deploy_options']
329 tmp_opnfv_env = os.path.join(tmp_dir, os.path.basename(opnfv_env))
330 shutil.copyfile(opnfv_env, tmp_opnfv_env)
331 tenant_nic_map = ns['networks']['tenant']['nic_mapping']
332 tenant_ctrl_nic = tenant_nic_map['controller']['members'][0]
333 tenant_comp_nic = tenant_nic_map['compute']['members'][0]
336 private_key, public_key = make_ssh_key()
338 # Make easier/faster variables to index in the file editor
339 if 'performance' in ds_opts:
342 if 'vpp' in ds_opts['performance']['Compute']:
343 perf_vpp_comp = ds_opts['performance']['Compute']['vpp']
346 if 'vpp' in ds_opts['performance']['Controller']:
347 perf_vpp_ctrl = ds_opts['performance']['Controller']['vpp']
352 if 'ovs' in ds_opts['performance']['Compute']:
353 perf_ovs_comp = ds_opts['performance']['Compute']['ovs']
358 if 'kernel' in ds_opts['performance']['Compute']:
359 perf_kern_comp = ds_opts['performance']['Compute']['kernel']
361 perf_kern_comp = None
365 # Modify OPNFV environment
366 # TODO: Change to build a dict and outputing yaml rather than parsing
367 for line in fileinput.input(tmp_opnfv_env, inplace=True):
368 line = line.strip('\n')
370 if 'CloudDomain' in line:
371 output_line = " CloudDomain: {}".format(ns['domain_name'])
372 elif 'replace_private_key' in line:
373 output_line = " key: '{}'".format(private_key)
374 elif 'replace_public_key' in line:
375 output_line = " key: '{}'".format(public_key)
377 if ds_opts['sdn_controller'] == 'opendaylight' and \
378 'odl_vpp_routing_node' in ds_opts:
379 if 'opendaylight::vpp_routing_node' in line:
380 output_line = (" opendaylight::vpp_routing_node: {}.{}"
381 .format(ds_opts['odl_vpp_routing_node'],
383 elif 'ControllerExtraConfig' in line:
384 output_line = (" ControllerExtraConfig:\n "
385 "tripleo::profile::base::neutron::agents::"
386 "honeycomb::interface_role_mapping:"
387 " ['{}:tenant-interface]'"
388 .format(tenant_ctrl_nic))
389 elif 'NovaComputeExtraConfig' in line:
390 output_line = (" NovaComputeExtraConfig:\n "
391 "tripleo::profile::base::neutron::agents::"
392 "honeycomb::interface_role_mapping:"
393 " ['{}:tenant-interface]'"
394 .format(tenant_comp_nic))
395 elif not ds_opts['sdn_controller'] and ds_opts['dataplane'] == 'fdio':
396 if 'NeutronVPPAgentPhysnets' in line:
397 output_line = (" NeutronVPPAgentPhysnets: 'datacentre:{}'".
398 format(tenant_ctrl_nic))
399 elif ds_opts['sdn_controller'] == 'opendaylight' and ds_opts.get(
401 if 'OS::TripleO::Services::NeutronDhcpAgent' in line:
403 elif 'NeutronDhcpAgentsPerNetwork' in line:
404 num_control, num_compute = _get_node_counts(inv)
405 output_line = (" NeutronDhcpAgentsPerNetwork: {}"
406 .format(num_compute))
407 elif 'ComputeServices' in line:
408 output_line = (" ComputeServices:\n"
409 " - OS::TripleO::Services::NeutronDhcpAgent")
412 for role in 'NovaCompute', 'Controller':
413 if role == 'NovaCompute':
414 perf_opts = perf_vpp_comp
416 perf_opts = perf_vpp_ctrl
417 cfg = "{}ExtraConfig".format(role)
418 if cfg in line and perf_opts:
420 if 'main-core' in perf_opts:
421 perf_line += ("\n fdio::vpp_cpu_main_core: '{}'"
422 .format(perf_opts['main-core']))
423 if 'corelist-workers' in perf_opts:
425 "fdio::vpp_cpu_corelist_workers: '{}'"
426 .format(perf_opts['corelist-workers']))
428 output_line = (" {}:{}".format(cfg, perf_line))
431 # (FIXME) use compute's kernel settings for all nodes for now.
432 if 'ComputeKernelArgs' in line and perf_kern_comp:
434 for k, v in perf_kern_comp.items():
435 kernel_args += "{}={} ".format(k, v)
437 output_line = " ComputeKernelArgs: '{}'".\
439 if ds_opts['dataplane'] == 'ovs_dpdk' and perf_ovs_comp:
440 for k, v in OVS_PERF_MAP.items():
441 if k in line and v in perf_ovs_comp:
442 output_line = " {}: '{}'".format(k, perf_ovs_comp[v])
446 logging.info("opnfv-environment file written to {}".format(tmp_opnfv_env))
448 # Modify Network environment
449 for line in fileinput.input(net_env, inplace=True):
450 line = line.strip('\n')
451 if 'ComputeExtraConfigPre' in line and \
452 ds_opts['dataplane'] == 'ovs_dpdk':
453 print(' OS::TripleO::ComputeExtraConfigPre: '
454 './ovs-dpdk-preconfig.yaml')
455 elif ((perf and perf_kern_comp) or ds_opts.get('rt_kvm')) and \
456 'resource_registry' in line:
457 print("resource_registry:\n"
458 " OS::TripleO::NodeUserData: first-boot.yaml")
459 elif perf and perf_kern_comp and \
460 'NovaSchedulerDefaultFilters' in line:
461 print(" NovaSchedulerDefaultFilters: 'RamFilter,"
462 "ComputeFilter,AvailabilityZoneFilter,"
463 "ComputeCapabilitiesFilter,ImagePropertiesFilter,"
464 "NUMATopologyFilter'")
468 logging.info("network-environment file written to {}".format(net_env))
471 def generate_ceph_key():
473 header = struct.pack('<hiih', 1, int(time.time()), 0, len(key))
474 return base64.b64encode(header + key)
477 def prep_storage_env(ds, tmp_dir):
479 Creates storage environment file for deployment. Source file is copied by
480 undercloud playbook to host.
485 ds_opts = ds['deploy_options']
486 storage_file = os.path.join(tmp_dir, 'storage-environment.yaml')
487 if not os.path.isfile(storage_file):
488 logging.error("storage-environment file is not in tmp directory: {}. "
489 "Check if file was copied from "
490 "undercloud".format(tmp_dir))
491 raise ApexDeployException("storage-environment file not copied from "
493 for line in fileinput.input(storage_file, inplace=True):
494 line = line.strip('\n')
495 if 'CephClusterFSID' in line:
496 print(" CephClusterFSID: {}".format(str(uuid.uuid4())))
497 elif 'CephMonKey' in line:
498 print(" CephMonKey: {}".format(generate_ceph_key().decode(
500 elif 'CephAdminKey' in line:
501 print(" CephAdminKey: {}".format(generate_ceph_key().decode(
505 if 'ceph_device' in ds_opts and ds_opts['ceph_device']:
506 with open(storage_file, 'a') as fh:
507 fh.write(' ExtraConfig:\n')
508 fh.write(" ceph::profile::params::osds:{{{}:{{}}}}\n".format(
509 ds_opts['ceph_device']
513 def external_network_cmds(ns):
515 Generates external network openstack commands
516 :param ns: network settings
517 :return: list of commands to configure external network
519 if 'external' in ns.enabled_network_list:
520 net_config = ns['networks']['external'][0]
522 pool_start, pool_end = net_config['floating_ip_range']
524 net_config = ns['networks']['admin']
526 pool_start, pool_end = ns['apex']['networks']['admin'][
527 'introspection_range']
528 nic_config = net_config['nic_mapping']
529 gateway = net_config['gateway']
531 # create network command
532 if nic_config['compute']['vlan'] == 'native':
535 ext_type = "vlan --provider-segment {}".format(nic_config[
537 cmds.append("openstack network create external --project service "
538 "--external --provider-network-type {} "
539 "--provider-physical-network datacentre".format(ext_type))
540 # create subnet command
541 cidr = net_config['cidr']
542 subnet_cmd = "openstack subnet create external-subnet --project " \
543 "service --network external --no-dhcp --gateway {} " \
544 "--allocation-pool start={},end={} --subnet-range " \
545 "{}".format(gateway, pool_start, pool_end, str(cidr))
546 if external and cidr.version == 6:
547 subnet_cmd += ' --ip-version 6 --ipv6-ra-mode slaac ' \
548 '--ipv6-address-mode slaac'
549 cmds.append(subnet_cmd)
550 logging.debug("Neutron external network commands determined "
551 "as: {}".format(cmds))
555 def create_congress_cmds(overcloud_file):
556 drivers = ['nova', 'neutronv2', 'cinder', 'glancev2', 'keystone', 'doctor']
557 overcloudrc = parsers.parse_overcloudrc(overcloud_file)
558 logging.info("Creating congress commands")
561 "username={}".format(overcloudrc['OS_USERNAME']),
562 "tenant_name={}".format(overcloudrc['OS_PROJECT_NAME']),
563 "password={}".format(overcloudrc['OS_PASSWORD']),
564 "auth_url={}".format(overcloudrc['OS_AUTH_URL'])
567 logging.error("Unable to find all keys required for congress in "
568 "overcloudrc: OS_USERNAME, OS_PROJECT_NAME, "
569 "OS_PASSWORD, OS_AUTH_URL. Please check overcloudrc "
570 "file: {}".format(overcloud_file))
573 ds_cfg = '--config ' + ' --config '.join(ds_cfg)
575 for driver in drivers:
576 if driver == 'doctor':
577 cmd = "{} \"{}\"".format(driver, driver)
579 cmd = "{} \"{}\" {}".format(driver, driver, ds_cfg)
581 cmd += ' --config api_version="2.34"'
582 logging.debug("Congress command created: {}".format(cmd))