Merge "Fix bug in docker-toool where values are sometimes empty."
[apex-tripleo-heat-templates.git] / puppet / services / nova-compute.yaml
1 heat_template_version: pike
2
3 description: >
4   OpenStack Nova Compute service configured with Puppet
5
6 parameters:
7   ServiceNetMap:
8     default: {}
9     description: Mapping of service_name -> network name. Typically set
10                  via parameter_defaults in the resource registry.  This
11                  mapping overrides those in ServiceNetMapDefaults.
12     type: json
13   DefaultPasswords:
14     default: {}
15     type: json
16   RoleName:
17     default: ''
18     description: Role name on which the service is applied
19     type: string
20   RoleParameters:
21     default: {}
22     description: Parameters specific to the role
23     type: json
24   EndpointMap:
25     default: {}
26     description: Mapping of service endpoint -> protocol. Typically set
27                  via parameter_defaults in the resource registry.
28     type: json
29   NovaRbdPoolName:
30     default: vms
31     type: string
32   CephClientUserName:
33     default: openstack
34     type: string
35   CinderEnableNfsBackend:
36     default: false
37     description: Whether to enable or not the NFS backend for Cinder
38     type: boolean
39   CinderEnableRbdBackend:
40     default: false
41     description: Whether to enable or not the Rbd backend for Cinder
42     type: boolean
43   NovaEnableRbdBackend:
44     default: false
45     description: Whether to enable or not the Rbd backend for Nova
46     type: boolean
47   NovaComputeLibvirtVifDriver:
48     default: ''
49     description: Libvirt VIF driver configuration for the network
50     type: string
51   NovaPCIPassthrough:
52     description: >
53       List of PCI Passthrough whitelist parameters.
54       Example -
55       NovaPCIPassthrough:
56         - vendor_id: "8086"
57           product_id: "154c"
58           address: "0000:05:00.0"
59           physical_network: "datacentre"
60       For different formats, refer to the nova.conf documentation for
61       pci_passthrough_whitelist configuration
62     type: json
63     default: ''
64   NovaVcpuPinSet:
65     description: >
66       A list or range of physical CPU cores to reserve for virtual machine
67       processes.
68       Ex. NovaVcpuPinSet: ['4-12','^8'] will reserve cores from 4-12 excluding 8
69     type: comma_delimited_list
70     default: []
71   NovaReservedHostMemory:
72     description: >
73       Reserved RAM for host processes.
74     type: number
75     default: 4096
76     constraints:
77       - range: { min: 512 }
78   MonitoringSubscriptionNovaCompute:
79     default: 'overcloud-nova-compute'
80     type: string
81   NovaComputeLoggingSource:
82     type: json
83     default:
84       tag: openstack.nova.compute
85       path: /var/log/nova/nova-compute.log
86   UpgradeLevelNovaCompute:
87     type: string
88     description: Nova Compute upgrade level
89     default: auto
90   MigrationSshKey:
91     type: json
92     description: >
93       SSH key for migration.
94       Expects a dictionary with keys 'public_key' and 'private_key'.
95       Values should be identical to SSH public/private key files.
96     default: {}
97
98 resources:
99   NovaBase:
100     type: ./nova-base.yaml
101     properties:
102       ServiceNetMap: {get_param: ServiceNetMap}
103       DefaultPasswords: {get_param: DefaultPasswords}
104       EndpointMap: {get_param: EndpointMap}
105       RoleName: {get_param: RoleName}
106       RoleParameters: {get_param: RoleParameters}
107
108   # Merging role-specific parameters (RoleParameters) with the default parameters.
109   # RoleParameters will have the precedence over the default parameters.
110   RoleParametersValue:
111     type: OS::Heat::Value
112     properties:
113       type: json
114       value:
115         map_replace:
116           - map_replace:
117             - nova::compute::vcpu_pin_set: NovaVcpuPinSet
118               nova::compute::reserved_host_memory: NovaReservedHostMemory
119             - values: {get_param: [RoleParameters]}
120           - values:
121               NovaVcpuPinSet: {get_param: NovaVcpuPinSet}
122               NovaReservedHostMemory: {get_param: NovaReservedHostMemory}
123
124 outputs:
125   role_data:
126     description: Role data for the Nova Compute service.
127     value:
128       service_name: nova_compute
129       monitoring_subscription: {get_param: MonitoringSubscriptionNovaCompute}
130       logging_source: {get_param: NovaComputeLoggingSource}
131       logging_groups:
132         - nova
133       config_settings:
134         map_merge:
135           - get_attr: [NovaBase, role_data, config_settings]
136           - get_attr: [RoleParametersValue, value]
137           - nova::compute::libvirt::manage_libvirt_services: false
138             nova::compute::pci_passthrough:
139               str_replace:
140                 template: "JSON_PARAM"
141                 params:
142                   map_replace:
143                     - map_replace:
144                       - JSON_PARAM: NovaPCIPassthrough
145                       - values: {get_param: [RoleParameters]}
146                     - values:
147                         NovaPCIPassthrough: {get_param: NovaPCIPassthrough}
148             # we manage migration in nova common puppet profile
149             nova::compute::libvirt::migration_support: false
150             tripleo::profile::base::nova::manage_migration: true
151             tripleo::profile::base::nova::migration_ssh_key: {get_param: MigrationSshKey}
152             tripleo::profile::base::nova::migration_ssh_localaddrs:
153               - "%{hiera('cold_migration_ssh_inbound_addr')}"
154               - "%{hiera('live_migration_ssh_inbound_addr')}"
155             live_migration_ssh_inbound_addr: {get_param: [ServiceNetMap, NovaLibvirtNetwork]}
156             cold_migration_ssh_inbound_addr: {get_param: [ServiceNetMap, NovaColdMigrationNetwork]}
157             tripleo::profile::base::nova::nova_compute_enabled: true
158             nova::compute::rbd::libvirt_images_rbd_pool: {get_param: NovaRbdPoolName}
159             nova::compute::rbd::libvirt_rbd_user: {get_param: CephClientUserName}
160             tripleo::profile::base::nova::compute::cinder_nfs_backend: {get_param: CinderEnableNfsBackend}
161             rbd_persistent_storage: {get_param: CinderEnableRbdBackend}
162             nova::compute::rbd::rbd_keyring:
163               list_join:
164               - '.'
165               - - 'client'
166                 - {get_param: CephClientUserName}
167             nova::compute::rbd::libvirt_rbd_secret_uuid: "%{hiera('ceph::profile::params::fsid')}"
168             nova::compute::instance_usage_audit: true
169             nova::compute::instance_usage_audit_period: 'hour'
170             nova::compute::rbd::ephemeral_storage: {get_param: NovaEnableRbdBackend}
171             # TUNNELLED mode provides a security enhancement when using shared
172             # storage but is not supported when not using shared storage.
173             # See https://bugzilla.redhat.com/show_bug.cgi?id=1301986#c12
174             # In future versions of QEMU (2.6, mostly), danpb's native
175             # encryption work will obsolete the need to use TUNNELLED transport
176             # mode.
177             nova::migration::live_migration_tunnelled: {get_param: NovaEnableRbdBackend}
178             nova::compute::neutron::libvirt_vif_driver: {get_param: NovaComputeLibvirtVifDriver}
179             # NOTE: bind IP is found in Heat replacing the network name with the
180             # local node IP for the given network; replacement examples
181             # (eg. for internal_api):
182             # internal_api -> IP
183             # internal_api_uri -> [IP]
184             # internal_api_subnet - > IP/CIDR
185             nova::compute::vncserver_proxyclient_address: {get_param: [ServiceNetMap, NovaVncProxyNetwork]}
186             nova::compute::vncproxy_host: {get_param: [EndpointMap, NovaPublic, host_nobrackets]}
187             nova::vncproxy::common::vncproxy_protocol: {get_param: [EndpointMap, NovaVNCProxyPublic, protocol]}
188             nova::vncproxy::common::vncproxy_host: {get_param: [EndpointMap, NovaVNCProxyPublic, host_nobrackets]}
189             nova::vncproxy::common::vncproxy_port: {get_param: [EndpointMap, NovaVNCProxyPublic, port]}
190       step_config: |
191         # TODO(emilien): figure how to deal with libvirt profile.
192         # We'll probably treat it like we do with Neutron plugins.
193         # Until then, just include it in the default nova-compute role.
194         include tripleo::profile::base::nova::compute::libvirt
195       service_config_settings:
196         collectd:
197           tripleo.collectd.plugins.nova_compute:
198             - virt
199           collectd::plugins::virt::connection: "qemu:///system"
200       upgrade_tasks:
201         - name: Stop nova-compute service
202           tags: step1
203           service: name=openstack-nova-compute state=stopped
204         # If not already set by puppet (e.g a pre-ocata version), set the
205         # upgrade_level for compute to "auto"
206         - name: Set compute upgrade level to auto
207           tags: step3
208           ini_file:
209             str_replace:
210               template: "dest=/etc/nova/nova.conf section=upgrade_levels option=compute value=LEVEL"
211               params:
212                 LEVEL: {get_param: UpgradeLevelNovaCompute}
213         - name: install openstack-nova-migration
214           tags: step3
215           yum: name=openstack-nova-migration state=latest
216         - name: Start nova-compute service
217           tags: step6
218           service: name=openstack-nova-compute state=started