Merge "Add a new role for ComputeOvsDpdk and clean-up parameters"
[apex-tripleo-heat-templates.git] / puppet / services / nova-compute.yaml
1 heat_template_version: pike
2
3 description: >
4   OpenStack Nova Compute service configured with Puppet
5
6 parameters:
7   ServiceData:
8     default: {}
9     description: Dictionary packing service data
10     type: json
11   ServiceNetMap:
12     default: {}
13     description: Mapping of service_name -> network name. Typically set
14                  via parameter_defaults in the resource registry.  This
15                  mapping overrides those in ServiceNetMapDefaults.
16     type: json
17   DefaultPasswords:
18     default: {}
19     type: json
20   RoleName:
21     default: ''
22     description: Role name on which the service is applied
23     type: string
24   RoleParameters:
25     default: {}
26     description: Parameters specific to the role
27     type: json
28   EndpointMap:
29     default: {}
30     description: Mapping of service endpoint -> protocol. Typically set
31                  via parameter_defaults in the resource registry.
32     type: json
33   NovaRbdPoolName:
34     default: vms
35     type: string
36   CephClientUserName:
37     default: openstack
38     type: string
39   CephClientKey:
40     description: The Ceph client key. Can be created with ceph-authtool --gen-print-key. Currently only used for external Ceph deployments to create the openstack user keyring.
41     type: string
42     hidden: true
43   CephClusterFSID:
44     type: string
45     description: The Ceph cluster FSID. Must be a UUID.
46   CinderEnableNfsBackend:
47     default: false
48     description: Whether to enable or not the NFS backend for Cinder
49     type: boolean
50   CinderEnableRbdBackend:
51     default: false
52     description: Whether to enable or not the Rbd backend for Cinder
53     type: boolean
54   NovaEnableRbdBackend:
55     default: false
56     description: Whether to enable or not the Rbd backend for Nova
57     type: boolean
58   NovaComputeLibvirtVifDriver:
59     default: ''
60     description: Libvirt VIF driver configuration for the network
61     type: string
62   NovaPCIPassthrough:
63     description: >
64       List of PCI Passthrough whitelist parameters.
65       Example -
66       NovaPCIPassthrough:
67         - vendor_id: "8086"
68           product_id: "154c"
69           address: "0000:05:00.0"
70           physical_network: "datacentre"
71       For different formats, refer to the nova.conf documentation for
72       pci_passthrough_whitelist configuration
73     type: json
74     default: ''
75   NovaVcpuPinSet:
76     description: >
77       A list or range of physical CPU cores to reserve for virtual machine
78       processes.
79       Ex. NovaVcpuPinSet: ['4-12','^8'] will reserve cores from 4-12 excluding 8
80     type: comma_delimited_list
81     default: []
82   NovaReservedHostMemory:
83     description: >
84       Reserved RAM for host processes.
85     type: number
86     default: 4096
87     constraints:
88       - range: { min: 512 }
89   MonitoringSubscriptionNovaCompute:
90     default: 'overcloud-nova-compute'
91     type: string
92   NovaComputeLoggingSource:
93     type: json
94     default:
95       tag: openstack.nova.compute
96       path: /var/log/nova/nova-compute.log
97   UpgradeLevelNovaCompute:
98     type: string
99     description: Nova Compute upgrade level
100     default: auto
101   MigrationSshKey:
102     type: json
103     description: >
104       SSH key for migration.
105       Expects a dictionary with keys 'public_key' and 'private_key'.
106       Values should be identical to SSH public/private key files.
107     default: {}
108
109 resources:
110   NovaBase:
111     type: ./nova-base.yaml
112     properties:
113       ServiceData: {get_param: ServiceData}
114       ServiceNetMap: {get_param: ServiceNetMap}
115       DefaultPasswords: {get_param: DefaultPasswords}
116       EndpointMap: {get_param: EndpointMap}
117       RoleName: {get_param: RoleName}
118       RoleParameters: {get_param: RoleParameters}
119
120   # Merging role-specific parameters (RoleParameters) with the default parameters.
121   # RoleParameters will have the precedence over the default parameters.
122   RoleParametersValue:
123     type: OS::Heat::Value
124     properties:
125       type: json
126       value:
127         map_replace:
128           - map_replace:
129             - nova::compute::vcpu_pin_set: NovaVcpuPinSet
130               nova::compute::reserved_host_memory: NovaReservedHostMemory
131             - values: {get_param: [RoleParameters]}
132           - values:
133               NovaVcpuPinSet: {get_param: NovaVcpuPinSet}
134               NovaReservedHostMemory: {get_param: NovaReservedHostMemory}
135
136 outputs:
137   role_data:
138     description: Role data for the Nova Compute service.
139     value:
140       service_name: nova_compute
141       monitoring_subscription: {get_param: MonitoringSubscriptionNovaCompute}
142       logging_source: {get_param: NovaComputeLoggingSource}
143       logging_groups:
144         - nova
145       config_settings:
146         map_merge:
147           - get_attr: [NovaBase, role_data, config_settings]
148           - get_attr: [RoleParametersValue, value]
149           - nova::compute::libvirt::manage_libvirt_services: false
150             nova::compute::pci_passthrough:
151               str_replace:
152                 template: "JSON_PARAM"
153                 params:
154                   map_replace:
155                     - map_replace:
156                       - JSON_PARAM: NovaPCIPassthrough
157                       - values: {get_param: [RoleParameters]}
158                     - values:
159                         NovaPCIPassthrough: {get_param: NovaPCIPassthrough}
160             # we manage migration in nova common puppet profile
161             nova::compute::libvirt::migration_support: false
162             tripleo::profile::base::nova::manage_migration: true
163             tripleo::profile::base::nova::migration_ssh_key: {get_param: MigrationSshKey}
164             tripleo::profile::base::nova::migration_ssh_localaddrs:
165               - "%{hiera('cold_migration_ssh_inbound_addr')}"
166               - "%{hiera('live_migration_ssh_inbound_addr')}"
167             live_migration_ssh_inbound_addr: {get_param: [ServiceNetMap, NovaLibvirtNetwork]}
168             cold_migration_ssh_inbound_addr: {get_param: [ServiceNetMap, NovaColdMigrationNetwork]}
169             tripleo::profile::base::nova::nova_compute_enabled: true
170             nova::compute::rbd::libvirt_images_rbd_pool: {get_param: NovaRbdPoolName}
171             nova::compute::rbd::libvirt_rbd_user: {get_param: CephClientUserName}
172             tripleo::profile::base::nova::compute::cinder_nfs_backend: {get_param: CinderEnableNfsBackend}
173             rbd_persistent_storage: {get_param: CinderEnableRbdBackend}
174             nova::compute::rbd::libvirt_rbd_secret_key: {get_param: CephClientKey}
175             nova::compute::rbd::libvirt_rbd_secret_uuid: {get_param: CephClusterFSID}
176             nova::compute::instance_usage_audit: true
177             nova::compute::instance_usage_audit_period: 'hour'
178             nova::compute::rbd::ephemeral_storage: {get_param: NovaEnableRbdBackend}
179             # TUNNELLED mode provides a security enhancement when using shared
180             # storage but is not supported when not using shared storage.
181             # See https://bugzilla.redhat.com/show_bug.cgi?id=1301986#c12
182             # In future versions of QEMU (2.6, mostly), danpb's native
183             # encryption work will obsolete the need to use TUNNELLED transport
184             # mode.
185             nova::migration::live_migration_tunnelled: {get_param: NovaEnableRbdBackend}
186             nova::compute::neutron::libvirt_vif_driver: {get_param: NovaComputeLibvirtVifDriver}
187             # NOTE: bind IP is found in Heat replacing the network name with the
188             # local node IP for the given network; replacement examples
189             # (eg. for internal_api):
190             # internal_api -> IP
191             # internal_api_uri -> [IP]
192             # internal_api_subnet - > IP/CIDR
193             nova::compute::vncserver_proxyclient_address: {get_param: [ServiceNetMap, NovaVncProxyNetwork]}
194             nova::compute::vncproxy_host: {get_param: [EndpointMap, NovaPublic, host_nobrackets]}
195             nova::vncproxy::common::vncproxy_protocol: {get_param: [EndpointMap, NovaVNCProxyPublic, protocol]}
196             nova::vncproxy::common::vncproxy_host: {get_param: [EndpointMap, NovaVNCProxyPublic, host_nobrackets]}
197             nova::vncproxy::common::vncproxy_port: {get_param: [EndpointMap, NovaVNCProxyPublic, port]}
198       step_config: |
199         # TODO(emilien): figure how to deal with libvirt profile.
200         # We'll probably treat it like we do with Neutron plugins.
201         # Until then, just include it in the default nova-compute role.
202         include tripleo::profile::base::nova::compute::libvirt
203       service_config_settings:
204         collectd:
205           tripleo.collectd.plugins.nova_compute:
206             - virt
207           collectd::plugins::virt::connection: "qemu:///system"
208       upgrade_tasks:
209         - name: Stop nova-compute service
210           tags: step1
211           service: name=openstack-nova-compute state=stopped
212         # If not already set by puppet (e.g a pre-ocata version), set the
213         # upgrade_level for compute to "auto"
214         - name: Set compute upgrade level to auto
215           tags: step3
216           ini_file:
217             str_replace:
218               template: "dest=/etc/nova/nova.conf section=upgrade_levels option=compute value=LEVEL"
219               params:
220                 LEVEL: {get_param: UpgradeLevelNovaCompute}
221         - name: install openstack-nova-migration
222           tags: step3
223           yum: name=openstack-nova-migration state=latest
224         - name: Start nova-compute service
225           tags: step6
226           service: name=openstack-nova-compute state=started