33b07dedfb20f0ffa14b291d5a40b4e3d9e6efae
[apex-tripleo-heat-templates.git] / puppet / services / nova-compute.yaml
1 heat_template_version: pike
2
3 description: >
4   OpenStack Nova Compute service configured with Puppet
5
6 parameters:
7   ServiceNetMap:
8     default: {}
9     description: Mapping of service_name -> network name. Typically set
10                  via parameter_defaults in the resource registry.  This
11                  mapping overrides those in ServiceNetMapDefaults.
12     type: json
13   DefaultPasswords:
14     default: {}
15     type: json
16   RoleName:
17     default: ''
18     description: Role name on which the service is applied
19     type: string
20   RoleParameters:
21     default: {}
22     description: Parameters specific to the role
23     type: json
24   EndpointMap:
25     default: {}
26     description: Mapping of service endpoint -> protocol. Typically set
27                  via parameter_defaults in the resource registry.
28     type: json
29   NovaRbdPoolName:
30     default: vms
31     type: string
32   CephClientUserName:
33     default: openstack
34     type: string
35   CephClientKey:
36     description: The Ceph client key. Can be created with ceph-authtool --gen-print-key. Currently only used for external Ceph deployments to create the openstack user keyring.
37     type: string
38     hidden: true
39   CephClusterFSID:
40     type: string
41     description: The Ceph cluster FSID. Must be a UUID.
42   CinderEnableNfsBackend:
43     default: false
44     description: Whether to enable or not the NFS backend for Cinder
45     type: boolean
46   CinderEnableRbdBackend:
47     default: false
48     description: Whether to enable or not the Rbd backend for Cinder
49     type: boolean
50   NovaEnableRbdBackend:
51     default: false
52     description: Whether to enable or not the Rbd backend for Nova
53     type: boolean
54   NovaComputeLibvirtVifDriver:
55     default: ''
56     description: Libvirt VIF driver configuration for the network
57     type: string
58   NovaPCIPassthrough:
59     description: >
60       List of PCI Passthrough whitelist parameters.
61       Example -
62       NovaPCIPassthrough:
63         - vendor_id: "8086"
64           product_id: "154c"
65           address: "0000:05:00.0"
66           physical_network: "datacentre"
67       For different formats, refer to the nova.conf documentation for
68       pci_passthrough_whitelist configuration
69     type: json
70     default: ''
71   NovaVcpuPinSet:
72     description: >
73       A list or range of physical CPU cores to reserve for virtual machine
74       processes.
75       Ex. NovaVcpuPinSet: ['4-12','^8'] will reserve cores from 4-12 excluding 8
76     type: comma_delimited_list
77     default: []
78   NovaReservedHostMemory:
79     description: >
80       Reserved RAM for host processes.
81     type: number
82     default: 4096
83     constraints:
84       - range: { min: 512 }
85   MonitoringSubscriptionNovaCompute:
86     default: 'overcloud-nova-compute'
87     type: string
88   NovaComputeLoggingSource:
89     type: json
90     default:
91       tag: openstack.nova.compute
92       path: /var/log/nova/nova-compute.log
93   UpgradeLevelNovaCompute:
94     type: string
95     description: Nova Compute upgrade level
96     default: auto
97   MigrationSshKey:
98     type: json
99     description: >
100       SSH key for migration.
101       Expects a dictionary with keys 'public_key' and 'private_key'.
102       Values should be identical to SSH public/private key files.
103     default: {}
104
105 resources:
106   NovaBase:
107     type: ./nova-base.yaml
108     properties:
109       ServiceNetMap: {get_param: ServiceNetMap}
110       DefaultPasswords: {get_param: DefaultPasswords}
111       EndpointMap: {get_param: EndpointMap}
112       RoleName: {get_param: RoleName}
113       RoleParameters: {get_param: RoleParameters}
114
115   # Merging role-specific parameters (RoleParameters) with the default parameters.
116   # RoleParameters will have the precedence over the default parameters.
117   RoleParametersValue:
118     type: OS::Heat::Value
119     properties:
120       type: json
121       value:
122         map_replace:
123           - map_replace:
124             - nova::compute::vcpu_pin_set: NovaVcpuPinSet
125               nova::compute::reserved_host_memory: NovaReservedHostMemory
126             - values: {get_param: [RoleParameters]}
127           - values:
128               NovaVcpuPinSet: {get_param: NovaVcpuPinSet}
129               NovaReservedHostMemory: {get_param: NovaReservedHostMemory}
130
131 outputs:
132   role_data:
133     description: Role data for the Nova Compute service.
134     value:
135       service_name: nova_compute
136       monitoring_subscription: {get_param: MonitoringSubscriptionNovaCompute}
137       logging_source: {get_param: NovaComputeLoggingSource}
138       logging_groups:
139         - nova
140       config_settings:
141         map_merge:
142           - get_attr: [NovaBase, role_data, config_settings]
143           - get_attr: [RoleParametersValue, value]
144           - nova::compute::libvirt::manage_libvirt_services: false
145             nova::compute::pci_passthrough:
146               str_replace:
147                 template: "JSON_PARAM"
148                 params:
149                   map_replace:
150                     - map_replace:
151                       - JSON_PARAM: NovaPCIPassthrough
152                       - values: {get_param: [RoleParameters]}
153                     - values:
154                         NovaPCIPassthrough: {get_param: NovaPCIPassthrough}
155             # we manage migration in nova common puppet profile
156             nova::compute::libvirt::migration_support: false
157             tripleo::profile::base::nova::manage_migration: true
158             tripleo::profile::base::nova::migration_ssh_key: {get_param: MigrationSshKey}
159             tripleo::profile::base::nova::migration_ssh_localaddrs:
160               - "%{hiera('cold_migration_ssh_inbound_addr')}"
161               - "%{hiera('live_migration_ssh_inbound_addr')}"
162             live_migration_ssh_inbound_addr: {get_param: [ServiceNetMap, NovaLibvirtNetwork]}
163             cold_migration_ssh_inbound_addr: {get_param: [ServiceNetMap, NovaColdMigrationNetwork]}
164             tripleo::profile::base::nova::nova_compute_enabled: true
165             nova::compute::rbd::libvirt_images_rbd_pool: {get_param: NovaRbdPoolName}
166             nova::compute::rbd::libvirt_rbd_user: {get_param: CephClientUserName}
167             tripleo::profile::base::nova::compute::cinder_nfs_backend: {get_param: CinderEnableNfsBackend}
168             rbd_persistent_storage: {get_param: CinderEnableRbdBackend}
169             nova::compute::rbd::libvirt_rbd_secret_key: {get_param: CephClientKey}
170             nova::compute::rbd::libvirt_rbd_secret_uuid: {get_param: CephClusterFSID}
171             nova::compute::instance_usage_audit: true
172             nova::compute::instance_usage_audit_period: 'hour'
173             nova::compute::rbd::ephemeral_storage: {get_param: NovaEnableRbdBackend}
174             # TUNNELLED mode provides a security enhancement when using shared
175             # storage but is not supported when not using shared storage.
176             # See https://bugzilla.redhat.com/show_bug.cgi?id=1301986#c12
177             # In future versions of QEMU (2.6, mostly), danpb's native
178             # encryption work will obsolete the need to use TUNNELLED transport
179             # mode.
180             nova::migration::live_migration_tunnelled: {get_param: NovaEnableRbdBackend}
181             nova::compute::neutron::libvirt_vif_driver: {get_param: NovaComputeLibvirtVifDriver}
182             # NOTE: bind IP is found in Heat replacing the network name with the
183             # local node IP for the given network; replacement examples
184             # (eg. for internal_api):
185             # internal_api -> IP
186             # internal_api_uri -> [IP]
187             # internal_api_subnet - > IP/CIDR
188             nova::compute::vncserver_proxyclient_address: {get_param: [ServiceNetMap, NovaVncProxyNetwork]}
189             nova::compute::vncproxy_host: {get_param: [EndpointMap, NovaPublic, host_nobrackets]}
190             nova::vncproxy::common::vncproxy_protocol: {get_param: [EndpointMap, NovaVNCProxyPublic, protocol]}
191             nova::vncproxy::common::vncproxy_host: {get_param: [EndpointMap, NovaVNCProxyPublic, host_nobrackets]}
192             nova::vncproxy::common::vncproxy_port: {get_param: [EndpointMap, NovaVNCProxyPublic, port]}
193       step_config: |
194         # TODO(emilien): figure how to deal with libvirt profile.
195         # We'll probably treat it like we do with Neutron plugins.
196         # Until then, just include it in the default nova-compute role.
197         include tripleo::profile::base::nova::compute::libvirt
198       service_config_settings:
199         collectd:
200           tripleo.collectd.plugins.nova_compute:
201             - virt
202           collectd::plugins::virt::connection: "qemu:///system"
203       upgrade_tasks:
204         - name: Stop nova-compute service
205           tags: step1
206           service: name=openstack-nova-compute state=stopped
207         # If not already set by puppet (e.g a pre-ocata version), set the
208         # upgrade_level for compute to "auto"
209         - name: Set compute upgrade level to auto
210           tags: step3
211           ini_file:
212             str_replace:
213               template: "dest=/etc/nova/nova.conf section=upgrade_levels option=compute value=LEVEL"
214               params:
215                 LEVEL: {get_param: UpgradeLevelNovaCompute}
216         - name: install openstack-nova-migration
217           tags: step3
218           yum: name=openstack-nova-migration state=latest
219         - name: Start nova-compute service
220           tags: step6
221           service: name=openstack-nova-compute state=started