1 ##############################################################################
2 # Copyright (c) 2017 Tim Rozet (trozet@redhat.com) and others.
4 # All rights reserved. This program and the accompanying materials
5 # are made available under the terms of the Apache License, Version 2.0
6 # which accompanies this distribution, and is available at
7 # http://www.apache.org/licenses/LICENSE-2.0
8 ##############################################################################
19 from apex.common import constants as con
20 from apex.common.exceptions import ApexDeployException
21 from apex.common import parsers
22 from apex.virtual import utils as virt_utils
23 from cryptography.hazmat.primitives import serialization as \
25 from cryptography.hazmat.primitives.asymmetric import rsa
26 from cryptography.hazmat.backends import default_backend as \
27 crypto_default_backend
32 'sfc': 'neutron-sfc-opendaylight.yaml',
33 'vpn': 'neutron-bgpvpn-opendaylight.yaml',
34 'gluon': 'gluon.yaml',
36 'odl_vpp_netvirt': 'neutron-opendaylight-netvirt-vpp.yaml',
37 'dvr': 'neutron-opendaylight-fdio-dvr.yaml',
38 'default': 'neutron-opendaylight-honeycomb.yaml'
40 'default': 'neutron-opendaylight.yaml',
43 'sfc': 'neutron-onos-sfc.yaml',
44 'default': 'neutron-onos.yaml'
46 'ovn': 'neutron-ml2-ovn.yaml',
48 'vpp': 'neutron-ml2-vpp.yaml',
49 'dataplane': ('ovs_dpdk', 'neutron-ovs-dpdk.yaml')
54 'tacker': 'enable_tacker.yaml',
55 'congress': 'enable_congress.yaml',
56 'barometer': 'enable_barometer.yaml',
57 'rt_kvm': 'enable_rt_kvm.yaml'
61 'HostCpusList': 'dpdk_cores',
62 'NeutronDpdkCoreList': 'pmd_cores',
63 'NeutronDpdkSocketMemory': 'socket_memory',
64 'NeutronDpdkMemoryChannels': 'memory_channels'
67 OVS_NSH_KMOD_RPM = "openvswitch-kmod-2.6.1-1.el7.centos.x86_64.rpm"
68 OVS_NSH_RPM = "openvswitch-2.6.1-1.el7.centos.x86_64.rpm"
69 ODL_NETVIRT_VPP_RPM = "/root/opendaylight-7.0.0-0.1.20170531snap665.el7" \
73 def build_sdn_env_list(ds, sdn_map, env_list=None):
76 for k, v in sdn_map.items():
77 if ds['sdn_controller'] == k or (k in ds and ds[k] is True):
78 if isinstance(v, dict):
79 env_list.extend(build_sdn_env_list(ds, v))
81 env_list.append(os.path.join(con.THT_ENV_DIR, v))
82 elif isinstance(v, tuple):
84 env_list.append(os.path.join(con.THT_ENV_DIR, v[1]))
85 if len(env_list) == 0:
87 env_list.append(os.path.join(
88 con.THT_ENV_DIR, sdn_map['default']))
90 logging.warning("Unable to find default file for SDN")
95 def create_deploy_cmd(ds, ns, inv, tmp_dir,
96 virtual, env_file='opnfv-environment.yaml',
99 logging.info("Creating deployment command")
100 deploy_options = ['network-environment.yaml']
103 deploy_options.append(env_file)
104 ds_opts = ds['deploy_options']
105 deploy_options += build_sdn_env_list(ds_opts, SDN_FILE_MAP)
107 for k, v in OTHER_FILE_MAP.items():
108 if k in ds_opts and ds_opts[k]:
109 deploy_options.append(os.path.join(con.THT_ENV_DIR, v))
112 prep_storage_env(ds, tmp_dir)
113 deploy_options.append(os.path.join(con.THT_ENV_DIR,
114 'storage-environment.yaml'))
115 if ds['global_params']['ha_enabled']:
116 deploy_options.append(os.path.join(con.THT_ENV_DIR,
117 'puppet-pacemaker.yaml'))
120 deploy_options.append('virtual-environment.yaml')
122 deploy_options.append('baremetal-environment.yaml')
124 num_control, num_compute = inv.get_node_counts()
125 if num_control == 0 or num_compute == 0:
126 logging.error("Detected 0 control or compute nodes. Control nodes: "
127 "{}, compute nodes{}".format(num_control, num_compute))
128 raise ApexDeployException("Invalid number of control or computes")
129 elif num_control > 1 and not ds['global_params']['ha_enabled']:
131 cmd = "openstack overcloud deploy --templates --timeout {} " \
132 "--libvirt-type kvm".format(con.DEPLOY_TIMEOUT)
134 for option in deploy_options:
135 cmd += " -e {}".format(option)
136 cmd += " --ntp-server {}".format(ns['ntp'][0])
137 cmd += " --control-scale {}".format(num_control)
138 cmd += " --compute-scale {}".format(num_compute)
139 cmd += ' --control-flavor control --compute-flavor compute'
141 cmd += ' --networks-file network_data.yaml'
142 logging.info("Deploy command set: {}".format(cmd))
144 with open(os.path.join(tmp_dir, 'deploy_command'), 'w') as fh:
149 def prep_image(ds, img, tmp_dir, root_pw=None):
151 Locates sdn image and preps for deployment.
152 :param ds: deploy settings
153 :param img: sdn image
154 :param tmp_dir: dir to store modified sdn image
155 :param root_pw: password to configure for overcloud image
158 # TODO(trozet): Come up with a better way to organize this logic in this
160 logging.info("Preparing image: {} for deployment".format(img))
161 if not os.path.isfile(img):
162 logging.error("Missing SDN image {}".format(img))
163 raise ApexDeployException("Missing SDN image file: {}".format(img))
165 ds_opts = ds['deploy_options']
167 sdn = ds_opts['sdn_controller']
168 # we need this due to rhbz #1436021
169 # fixed in systemd-219-37.el7
171 logging.info("Neutron openvswitch-agent disabled")
174 "rm -f /etc/systemd/system/multi-user.target.wants/"
175 "neutron-openvswitch-agent.service"},
178 "rm -f /usr/lib/systemd/system/neutron-openvswitch-agent"
183 virt_cmds.append({con.VIRT_RUN_CMD: "chmod +x /etc/rc.d/rc.local"})
186 "echo 'sudo /opt/quagga/etc/init.d/zrpcd start' > "
187 "/opt/quagga/etc/init.d/zrpcd_start.sh"})
189 con.VIRT_RUN_CMD: "chmod +x /opt/quagga/etc/init.d/"
192 con.VIRT_RUN_CMD: "sed -i '$a /opt/quagga/etc/"
193 "init.d/zrpcd_start.sh' /etc/rc.local "})
195 con.VIRT_RUN_CMD: "sed -i '$a /opt/quagga/etc/"
196 "init.d/zrpcd_start.sh' /etc/rc.d/rc.local"})
197 logging.info("ZRPCD process started")
199 dataplane = ds_opts['dataplane']
200 if dataplane == 'ovs_dpdk' or dataplane == 'fdio':
201 logging.info("Enabling kernel modules for dpdk")
202 # file to module mapping
204 os.path.join(tmp_dir, 'vfio_pci.modules'): 'vfio_pci',
205 os.path.join(tmp_dir, 'uio_pci_generic.modules'): 'uio_pci_generic'
207 for mod_file, mod in uio_types.items():
208 with open(mod_file, 'w') as fh:
209 fh.write('#!/bin/bash\n')
210 fh.write('exec /sbin/modprobe {}'.format(mod))
214 {con.VIRT_UPLOAD: "{}:/etc/sysconfig/modules/".format(
216 {con.VIRT_RUN_CMD: "chmod 0755 /etc/sysconfig/modules/"
217 "{}".format(os.path.basename(mod_file))}
220 pw_op = "password:{}".format(root_pw)
221 virt_cmds.append({con.VIRT_PW: pw_op})
222 if ds_opts['sfc'] and dataplane == 'ovs':
224 {con.VIRT_RUN_CMD: "yum -y install "
225 "/root/ovs/rpm/rpmbuild/RPMS/x86_64/"
226 "{}".format(OVS_NSH_KMOD_RPM)},
227 {con.VIRT_RUN_CMD: "yum downgrade -y "
228 "/root/ovs/rpm/rpmbuild/RPMS/x86_64/"
229 "{}".format(OVS_NSH_RPM)}
231 if dataplane == 'fdio':
232 # Patch neutron with using OVS external interface for router
233 # and add generic linux NS interface driver
235 {con.VIRT_RUN_CMD: "cd /usr/lib/python2.7/site-packages && patch "
236 "-p1 < neutron-patch-NSDriver.patch"})
239 {con.VIRT_RUN_CMD: "yum remove -y vpp-lib"},
240 {con.VIRT_RUN_CMD: "yum install -y "
241 "/root/nosdn_vpp_rpms/*.rpm"}
244 if sdn == 'opendaylight':
245 if ds_opts['odl_version'] != con.DEFAULT_ODL_VERSION:
247 {con.VIRT_RUN_CMD: "yum -y remove opendaylight"},
248 {con.VIRT_RUN_CMD: "rm -rf /etc/puppet/modules/opendaylight"},
249 {con.VIRT_RUN_CMD: "cd /etc/puppet/modules && tar xzf "
250 "/root/puppet-opendaylight-"
251 "{}.tar.gz".format(ds_opts['odl_version'])}
253 if ds_opts['odl_version'] == 'master':
255 {con.VIRT_RUN_CMD: "rpm -ivh --nodeps /root/{}/*".format(
256 ds_opts['odl_version'])}
260 {con.VIRT_RUN_CMD: "yum -y install /root/{}/*".format(
261 ds_opts['odl_version'])}
264 elif sdn == 'opendaylight' and 'odl_vpp_netvirt' in ds_opts \
265 and ds_opts['odl_vpp_netvirt']:
267 {con.VIRT_RUN_CMD: "yum -y remove opendaylight"},
268 {con.VIRT_RUN_CMD: "yum -y install /root/{}/*".format(
269 ODL_NETVIRT_VPP_RPM)}
274 {con.VIRT_RUN_CMD: "cd /root/ovs28 && yum update -y "
276 {con.VIRT_RUN_CMD: "cd /root/ovs28 && yum downgrade -y "
280 tmp_oc_image = os.path.join(tmp_dir, 'overcloud-full.qcow2')
281 shutil.copyfile(img, tmp_oc_image)
282 logging.debug("Temporary overcloud image stored as: {}".format(
284 virt_utils.virt_customize(virt_cmds, tmp_oc_image)
285 logging.info("Overcloud image customization complete")
290 Creates public and private ssh keys with 1024 bit RSA encryption
291 :return: private, public key
293 key = rsa.generate_private_key(
294 backend=crypto_default_backend(),
295 public_exponent=65537,
299 private_key = key.private_bytes(
300 crypto_serialization.Encoding.PEM,
301 crypto_serialization.PrivateFormat.PKCS8,
302 crypto_serialization.NoEncryption())
303 public_key = key.public_key().public_bytes(
304 crypto_serialization.Encoding.OpenSSH,
305 crypto_serialization.PublicFormat.OpenSSH
307 return private_key.decode('utf-8'), public_key.decode('utf-8')
310 def prep_env(ds, ns, inv, opnfv_env, net_env, tmp_dir):
312 Creates modified opnfv/network environments for deployment
313 :param ds: deploy settings
314 :param ns: network settings
315 :param inv: node inventory
316 :param opnfv_env: file path for opnfv-environment file
317 :param net_env: file path for network-environment file
318 :param tmp_dir: Apex tmp dir
322 logging.info("Preparing opnfv-environment and network-environment files")
323 ds_opts = ds['deploy_options']
324 tmp_opnfv_env = os.path.join(tmp_dir, os.path.basename(opnfv_env))
325 shutil.copyfile(opnfv_env, tmp_opnfv_env)
326 tenant_nic_map = ns['networks']['tenant']['nic_mapping']
328 tenant_nic['Controller'] = tenant_nic_map['controller']['members'][0]
329 tenant_nic['NovaCompute'] = tenant_nic_map['compute']['members'][0]
330 external_nic_map = ns['networks']['external'][0]['nic_mapping']
331 external_nic = dict()
332 external_nic['NovaCompute'] = external_nic_map['compute']['members'][0]
335 private_key, public_key = make_ssh_key()
337 # Make easier/faster variables to index in the file editor
338 if 'performance' in ds_opts:
341 if 'vpp' in ds_opts['performance']['Compute']:
342 perf_vpp_comp = ds_opts['performance']['Compute']['vpp']
345 if 'vpp' in ds_opts['performance']['Controller']:
346 perf_vpp_ctrl = ds_opts['performance']['Controller']['vpp']
351 if 'ovs' in ds_opts['performance']['Compute']:
352 perf_ovs_comp = ds_opts['performance']['Compute']['ovs']
357 if 'kernel' in ds_opts['performance']['Compute']:
358 perf_kern_comp = ds_opts['performance']['Compute']['kernel']
360 perf_kern_comp = None
364 # Modify OPNFV environment
365 # TODO: Change to build a dict and outputting yaml rather than parsing
366 for line in fileinput.input(tmp_opnfv_env, inplace=True):
367 line = line.strip('\n')
369 if 'CloudDomain' in line:
370 output_line = " CloudDomain: {}".format(ns['domain_name'])
371 elif 'replace_private_key' in line:
372 output_line = " private_key: |\n"
374 for line in private_key.splitlines():
375 key_out += " {}\n".format(line)
376 output_line += key_out
377 elif 'replace_public_key' in line:
378 output_line = " public_key: '{}'".format(public_key)
379 elif ((perf and perf_kern_comp) or ds_opts.get('rt_kvm')) and \
380 'resource_registry' in line:
381 output_line = "resource_registry:\n" \
382 " OS::TripleO::NodeUserData: first-boot.yaml"
383 elif 'ComputeExtraConfigPre' in line and \
384 ds_opts['dataplane'] == 'ovs_dpdk':
385 output_line = ' OS::TripleO::ComputeExtraConfigPre: ' \
386 './ovs-dpdk-preconfig.yaml'
388 if ds_opts['sdn_controller'] == 'opendaylight' and \
389 'odl_vpp_routing_node' in ds_opts:
390 if 'opendaylight::vpp_routing_node' in line:
391 output_line = (" opendaylight::vpp_routing_node: {}.{}"
392 .format(ds_opts['odl_vpp_routing_node'],
394 elif not ds_opts['sdn_controller'] and ds_opts['dataplane'] == 'fdio':
395 if 'NeutronVPPAgentPhysnets' in line:
396 output_line = (" NeutronVPPAgentPhysnets: 'datacentre:{}'".
397 format(tenant_nic['Controller']))
398 elif ds_opts['sdn_controller'] == 'opendaylight' and ds_opts.get(
400 if 'OS::TripleO::Services::NeutronDhcpAgent' in line:
402 elif 'NeutronDhcpAgentsPerNetwork' in line:
403 num_control, num_compute = inv.get_node_counts()
404 output_line = (" NeutronDhcpAgentsPerNetwork: {}"
405 .format(num_compute))
406 elif 'ComputeServices' in line:
407 output_line = (" ComputeServices:\n"
408 " - OS::TripleO::Services::NeutronDhcpAgent")
411 for role in 'NovaCompute', 'Controller':
412 if role == 'NovaCompute':
413 perf_opts = perf_vpp_comp
415 perf_opts = perf_vpp_ctrl
416 cfg = "{}ExtraConfig".format(role)
417 if cfg in line and perf_opts:
419 if 'main-core' in perf_opts:
420 perf_line += ("\n fdio::vpp_cpu_main_core: '{}'"
421 .format(perf_opts['main-core']))
422 if 'corelist-workers' in perf_opts:
424 "fdio::vpp_cpu_corelist_workers: '{}'"
425 .format(perf_opts['corelist-workers']))
426 if ds_opts['sdn_controller'] == 'opendaylight' and \
427 ds_opts['dataplane'] == 'fdio':
428 if role == 'NovaCompute':
430 "tripleo::profile::base::neutron::"
431 "agents::honeycomb::"
432 "interface_role_mapping:"
433 " ['{}:tenant-interface',"
434 "'{}:public-interface']"
435 .format(tenant_nic[role],
439 "tripleo::profile::base::neutron::"
440 "agents::honeycomb::"
441 "interface_role_mapping:"
442 " ['{}:tenant-interface']"
443 .format(tenant_nic[role]))
445 output_line = (" {}:{}".format(cfg, perf_line))
447 if ds_opts['dataplane'] == 'ovs_dpdk' and perf_ovs_comp:
448 for k, v in OVS_PERF_MAP.items():
449 if k in line and v in perf_ovs_comp:
450 output_line = " {}: '{}'".format(k, perf_ovs_comp[v])
453 # (FIXME) use compute's kernel settings for all nodes for now.
455 if 'NovaSchedulerDefaultFilters' in line:
457 " NovaSchedulerDefaultFilters: 'RamFilter," \
458 "ComputeFilter,AvailabilityZoneFilter," \
459 "ComputeCapabilitiesFilter," \
460 "ImagePropertiesFilter,NUMATopologyFilter'"
461 elif 'ComputeKernelArgs' in line:
463 for k, v in perf_kern_comp.items():
464 kernel_args += "{}={} ".format(k, v)
466 output_line = " ComputeKernelArgs: '{}'".\
471 logging.info("opnfv-environment file written to {}".format(tmp_opnfv_env))
474 def generate_ceph_key():
476 header = struct.pack('<hiih', 1, int(time.time()), 0, len(key))
477 return base64.b64encode(header + key)
480 def prep_storage_env(ds, tmp_dir):
482 Creates storage environment file for deployment. Source file is copied by
483 undercloud playbook to host.
488 ds_opts = ds['deploy_options']
489 storage_file = os.path.join(tmp_dir, 'storage-environment.yaml')
490 if not os.path.isfile(storage_file):
491 logging.error("storage-environment file is not in tmp directory: {}. "
492 "Check if file was copied from "
493 "undercloud".format(tmp_dir))
494 raise ApexDeployException("storage-environment file not copied from "
496 for line in fileinput.input(storage_file, inplace=True):
497 line = line.strip('\n')
498 if 'CephClusterFSID' in line:
499 print(" CephClusterFSID: {}".format(str(uuid.uuid4())))
500 elif 'CephMonKey' in line:
501 print(" CephMonKey: {}".format(generate_ceph_key().decode(
503 elif 'CephAdminKey' in line:
504 print(" CephAdminKey: {}".format(generate_ceph_key().decode(
508 if 'ceph_device' in ds_opts and ds_opts['ceph_device']:
509 with open(storage_file, 'a') as fh:
510 fh.write(' ExtraConfig:\n')
511 fh.write(" ceph::profile::params::osds:{{{}:{{}}}}\n".format(
512 ds_opts['ceph_device']
516 def external_network_cmds(ns):
518 Generates external network openstack commands
519 :param ns: network settings
520 :return: list of commands to configure external network
522 if 'external' in ns.enabled_network_list:
523 net_config = ns['networks']['external'][0]
525 pool_start, pool_end = net_config['floating_ip_range']
527 net_config = ns['networks']['admin']
529 pool_start, pool_end = ns['apex']['networks']['admin'][
530 'introspection_range']
531 nic_config = net_config['nic_mapping']
532 gateway = net_config['gateway']
534 # create network command
535 if nic_config['compute']['vlan'] == 'native':
538 ext_type = "vlan --provider-segment {}".format(nic_config[
540 cmds.append("openstack network create external --project service "
541 "--external --provider-network-type {} "
542 "--provider-physical-network datacentre".format(ext_type))
543 # create subnet command
544 cidr = net_config['cidr']
545 subnet_cmd = "openstack subnet create external-subnet --project " \
546 "service --network external --no-dhcp --gateway {} " \
547 "--allocation-pool start={},end={} --subnet-range " \
548 "{}".format(gateway, pool_start, pool_end, str(cidr))
549 if external and cidr.version == 6:
550 subnet_cmd += ' --ip-version 6 --ipv6-ra-mode slaac ' \
551 '--ipv6-address-mode slaac'
552 cmds.append(subnet_cmd)
553 logging.debug("Neutron external network commands determined "
554 "as: {}".format(cmds))
558 def create_congress_cmds(overcloud_file):
559 drivers = ['nova', 'neutronv2', 'cinder', 'glancev2', 'keystone', 'doctor']
560 overcloudrc = parsers.parse_overcloudrc(overcloud_file)
561 logging.info("Creating congress commands")
564 "username={}".format(overcloudrc['OS_USERNAME']),
565 "tenant_name={}".format(overcloudrc['OS_PROJECT_NAME']),
566 "password={}".format(overcloudrc['OS_PASSWORD']),
567 "auth_url={}".format(overcloudrc['OS_AUTH_URL'])
570 logging.error("Unable to find all keys required for congress in "
571 "overcloudrc: OS_USERNAME, OS_PROJECT_NAME, "
572 "OS_PASSWORD, OS_AUTH_URL. Please check overcloudrc "
573 "file: {}".format(overcloud_file))
576 ds_cfg = '--config ' + ' --config '.join(ds_cfg)
578 for driver in drivers:
579 if driver == 'doctor':
580 cmd = "{} \"{}\"".format(driver, driver)
582 cmd = "{} \"{}\" {}".format(driver, driver, ds_cfg)
584 cmd += ' --config api_version="2.34"'
585 logging.debug("Congress command created: {}".format(cmd))