1 heat_template_version: ocata
4 OpenStack Nova Compute service configured with Puppet
9 description: Mapping of service_name -> network name. Typically set
10 via parameter_defaults in the resource registry. This
11 mapping overrides those in ServiceNetMapDefaults.
18 description: Mapping of service endpoint -> protocol. Typically set
19 via parameter_defaults in the resource registry.
27 CinderEnableNfsBackend:
29 description: Whether to enable or not the NFS backend for Cinder
31 CinderEnableRbdBackend:
33 description: Whether to enable or not the Rbd backend for Cinder
37 description: Whether to enable or not the Rbd backend for Nova
39 NovaComputeLibvirtVifDriver:
41 description: Libvirt VIF driver configuration for the network
45 List of PCI Passthrough whitelist parameters.
50 address: "0000:05:00.0"
51 physical_network: "datacentre"
52 For different formats, refer to the nova.conf documentation for
53 pci_passthrough_whitelist configuration
58 A list or range of physical CPU cores to reserve for virtual machine
60 Ex. NovaVcpuPinSet: ['4-12','^8'] will reserve cores from 4-12 excluding 8
61 type: comma_delimited_list
63 NovaReservedHostMemory:
65 Reserved RAM for host processes.
70 MonitoringSubscriptionNovaCompute:
71 default: 'overcloud-nova-compute'
73 NovaComputeLoggingSource:
76 tag: openstack.nova.compute
77 path: /var/log/nova/nova-compute.log
78 UpgradeLevelNovaCompute:
80 description: Nova Compute upgrade level
85 type: ./nova-base.yaml
87 ServiceNetMap: {get_param: ServiceNetMap}
88 DefaultPasswords: {get_param: DefaultPasswords}
89 EndpointMap: {get_param: EndpointMap}
93 description: Role data for the Nova Compute service.
95 service_name: nova_compute
96 monitoring_subscription: {get_param: MonitoringSubscriptionNovaCompute}
97 logging_source: {get_param: NovaComputeLoggingSource}
102 - get_attr: [NovaBase, role_data, config_settings]
103 - nova::compute::libvirt::manage_libvirt_services: false
104 nova::compute::pci_passthrough: {get_param: NovaPCIPassthrough}
105 nova::compute::vcpu_pin_set: {get_param: NovaVcpuPinSet}
106 nova::compute::reserved_host_memory: {get_param: NovaReservedHostMemory}
107 # we manage migration in nova common puppet profile
108 nova::compute::libvirt::migration_support: false
109 tripleo::profile::base::nova::manage_migration: true
110 tripleo::profile::base::nova::nova_compute_enabled: true
111 nova::compute::rbd::libvirt_images_rbd_pool: {get_param: NovaRbdPoolName}
112 nova::compute::rbd::libvirt_rbd_user: {get_param: CephClientUserName}
113 tripleo::profile::base::nova::compute::cinder_nfs_backend: {get_param: CinderEnableNfsBackend}
114 rbd_persistent_storage: {get_param: CinderEnableRbdBackend}
115 nova::compute::rbd::rbd_keyring:
119 - {get_param: CephClientUserName}
120 nova::compute::rbd::libvirt_rbd_secret_uuid: "%{hiera('ceph::profile::params::fsid')}"
121 nova::compute::instance_usage_audit: true
122 nova::compute::instance_usage_audit_period: 'hour'
123 nova::compute::rbd::ephemeral_storage: {get_param: NovaEnableRbdBackend}
124 # TUNNELLED mode provides a security enhancement when using shared
125 # storage but is not supported when not using shared storage.
126 # See https://bugzilla.redhat.com/show_bug.cgi?id=1301986#c12
127 # In future versions of QEMU (2.6, mostly), danpb's native
128 # encryption work will obsolete the need to use TUNNELLED transport
130 nova::migration::live_migration_tunnelled: {get_param: NovaEnableRbdBackend}
131 nova::compute::neutron::libvirt_vif_driver: {get_param: NovaComputeLibvirtVifDriver}
132 # NOTE: bind IP is found in Heat replacing the network name with the
133 # local node IP for the given network; replacement examples
134 # (eg. for internal_api):
136 # internal_api_uri -> [IP]
137 # internal_api_subnet - > IP/CIDR
138 nova::compute::vncserver_proxyclient_address: {get_param: [ServiceNetMap, NovaVncProxyNetwork]}
139 nova::compute::vncproxy_host: {get_param: [EndpointMap, NovaPublic, host_nobrackets]}
140 nova::vncproxy::common::vncproxy_protocol: {get_param: [EndpointMap, NovaVNCProxyPublic, protocol]}
141 nova::vncproxy::common::vncproxy_host: {get_param: [EndpointMap, NovaVNCProxyPublic, host_nobrackets]}
142 nova::vncproxy::common::vncproxy_port: {get_param: [EndpointMap, NovaVNCProxyPublic, port]}
144 # TODO(emilien): figure how to deal with libvirt profile.
145 # We'll probably treat it like we do with Neutron plugins.
146 # Until then, just include it in the default nova-compute role.
147 include tripleo::profile::base::nova::compute::libvirt
148 service_config_settings:
150 tripleo.collectd.plugins.nova_compute:
152 collectd::plugins::virt::connection: "qemu:///system"
154 - name: Stop nova-compute service
156 service: name=openstack-nova-compute state=stopped
157 # If not already set by puppet (e.g a pre-ocata version), set the
158 # upgrade_level for compute to "auto"
159 - name: Set compute upgrade level to auto
163 template: "dest=/etc/nova/nova.conf section=upgrade_levels option=compute value=LEVEL"
165 LEVEL: {get_param: UpgradeLevelNovaCompute}
166 - name: Start nova-compute service
168 service: name=openstack-nova-compute state=started