Change flat network name for nosdn fdio scenario
[apex-tripleo-heat-templates.git] / puppet / services / nova-compute.yaml
1 heat_template_version: pike
2
3 description: >
4   OpenStack Nova Compute service configured with Puppet
5
6 parameters:
7   ServiceData:
8     default: {}
9     description: Dictionary packing service data
10     type: json
11   ServiceNetMap:
12     default: {}
13     description: Mapping of service_name -> network name. Typically set
14                  via parameter_defaults in the resource registry.  This
15                  mapping overrides those in ServiceNetMapDefaults.
16     type: json
17   DefaultPasswords:
18     default: {}
19     type: json
20   RoleName:
21     default: ''
22     description: Role name on which the service is applied
23     type: string
24   RoleParameters:
25     default: {}
26     description: Parameters specific to the role
27     type: json
28   EndpointMap:
29     default: {}
30     description: Mapping of service endpoint -> protocol. Typically set
31                  via parameter_defaults in the resource registry.
32     type: json
33   NovaRbdPoolName:
34     default: vms
35     type: string
36   CephClientUserName:
37     default: openstack
38     type: string
39   CephClientKey:
40     description: The Ceph client key. Can be created with ceph-authtool --gen-print-key.
41     type: string
42     hidden: true
43   CephClusterFSID:
44     type: string
45     description: The Ceph cluster FSID. Must be a UUID.
46   CinderEnableNfsBackend:
47     default: false
48     description: Whether to enable or not the NFS backend for Cinder
49     type: boolean
50   CinderEnableRbdBackend:
51     default: false
52     description: Whether to enable or not the Rbd backend for Cinder
53     type: boolean
54   NovaEnableRbdBackend:
55     default: false
56     description: Whether to enable or not the Rbd backend for Nova
57     type: boolean
58   NovaComputeLibvirtVifDriver:
59     default: ''
60     description: Libvirt VIF driver configuration for the network
61     type: string
62   NovaPCIPassthrough:
63     description: >
64       List of PCI Passthrough whitelist parameters.
65       Example -
66       NovaPCIPassthrough:
67         - vendor_id: "8086"
68           product_id: "154c"
69           address: "0000:05:00.0"
70           physical_network: "datacentre"
71       For different formats, refer to the nova.conf documentation for
72       pci_passthrough_whitelist configuration
73     type: json
74     default: ''
75   NovaVcpuPinSet:
76     description: >
77       A list or range of physical CPU cores to reserve for virtual machine
78       processes.
79       Ex. NovaVcpuPinSet: ['4-12','^8'] will reserve cores from 4-12 excluding 8
80     type: comma_delimited_list
81     default: []
82   NovaReservedHostMemory:
83     description: >
84       Reserved RAM for host processes.
85     type: number
86     default: 4096
87     constraints:
88       - range: { min: 512 }
89   MonitoringSubscriptionNovaCompute:
90     default: 'overcloud-nova-compute'
91     type: string
92   NovaComputeLoggingSource:
93     type: json
94     default:
95       tag: openstack.nova.compute
96       path: /var/log/nova/nova-compute.log
97   UpgradeLevelNovaCompute:
98     type: string
99     description: Nova Compute upgrade level
100     default: ''
101   MigrationSshKey:
102     type: json
103     description: >
104       SSH key for migration.
105       Expects a dictionary with keys 'public_key' and 'private_key'.
106       Values should be identical to SSH public/private key files.
107     default:
108       public_key: ''
109       private_key: ''
110   MigrationSshPort:
111     default: 2022
112     description: Target port for migration over ssh
113     type: number
114
115 resources:
116   NovaBase:
117     type: ./nova-base.yaml
118     properties:
119       ServiceData: {get_param: ServiceData}
120       ServiceNetMap: {get_param: ServiceNetMap}
121       DefaultPasswords: {get_param: DefaultPasswords}
122       EndpointMap: {get_param: EndpointMap}
123       RoleName: {get_param: RoleName}
124       RoleParameters: {get_param: RoleParameters}
125
126   # Merging role-specific parameters (RoleParameters) with the default parameters.
127   # RoleParameters will have the precedence over the default parameters.
128   RoleParametersValue:
129     type: OS::Heat::Value
130     properties:
131       type: json
132       value:
133         map_replace:
134           - map_replace:
135             - nova::compute::vcpu_pin_set: NovaVcpuPinSet
136               nova::compute::reserved_host_memory: NovaReservedHostMemory
137             - values: {get_param: [RoleParameters]}
138           - values:
139               NovaVcpuPinSet: {get_param: NovaVcpuPinSet}
140               NovaReservedHostMemory: {get_param: NovaReservedHostMemory}
141
142 outputs:
143   role_data:
144     description: Role data for the Nova Compute service.
145     value:
146       service_name: nova_compute
147       monitoring_subscription: {get_param: MonitoringSubscriptionNovaCompute}
148       logging_source: {get_param: NovaComputeLoggingSource}
149       logging_groups:
150         - nova
151       config_settings:
152         map_merge:
153           - get_attr: [NovaBase, role_data, config_settings]
154           - get_attr: [RoleParametersValue, value]
155           - nova::compute::libvirt::manage_libvirt_services: false
156             nova::compute::pci_passthrough:
157               str_replace:
158                 template: "JSON_PARAM"
159                 params:
160                   map_replace:
161                     - map_replace:
162                       - JSON_PARAM: NovaPCIPassthrough
163                       - values: {get_param: [RoleParameters]}
164                     - values:
165                         NovaPCIPassthrough: {get_param: NovaPCIPassthrough}
166             # we manage migration in nova common puppet profile
167             nova::compute::libvirt::migration_support: false
168             tripleo::profile::base::nova::migration::client::nova_compute_enabled: true
169             tripleo::profile::base::nova::migration::client::ssh_private_key: {get_param: [ MigrationSshKey, private_key ]}
170             tripleo::profile::base::nova::migration::client::ssh_port: {get_param: MigrationSshPort}
171             nova::compute::rbd::libvirt_images_rbd_pool: {get_param: NovaRbdPoolName}
172             nova::compute::rbd::libvirt_rbd_user: {get_param: CephClientUserName}
173             nova::compute::rbd::rbd_keyring:
174               list_join:
175               - '.'
176               - - 'client'
177                 - {get_param: CephClientUserName}
178             tripleo::profile::base::nova::compute::cinder_nfs_backend: {get_param: CinderEnableNfsBackend}
179             rbd_persistent_storage: {get_param: CinderEnableRbdBackend}
180             nova::compute::rbd::libvirt_rbd_secret_key: {get_param: CephClientKey}
181             nova::compute::rbd::libvirt_rbd_secret_uuid: {get_param: CephClusterFSID}
182             nova::compute::instance_usage_audit: true
183             nova::compute::instance_usage_audit_period: 'hour'
184             nova::compute::rbd::ephemeral_storage: {get_param: NovaEnableRbdBackend}
185             # TUNNELLED mode provides a security enhancement when using shared
186             # storage but is not supported when not using shared storage.
187             # See https://bugzilla.redhat.com/show_bug.cgi?id=1301986#c12
188             # In future versions of QEMU (2.6, mostly), danpb's native
189             # encryption work will obsolete the need to use TUNNELLED transport
190             # mode.
191             nova::migration::live_migration_tunnelled: {get_param: NovaEnableRbdBackend}
192             nova::compute::neutron::libvirt_vif_driver: {get_param: NovaComputeLibvirtVifDriver}
193             # NOTE: bind IP is found in Heat replacing the network name with the
194             # local node IP for the given network; replacement examples
195             # (eg. for internal_api):
196             # internal_api -> IP
197             # internal_api_uri -> [IP]
198             # internal_api_subnet - > IP/CIDR
199             nova::compute::vncserver_proxyclient_address: {get_param: [ServiceNetMap, NovaVncProxyNetwork]}
200             nova::compute::vncproxy_host: {get_param: [EndpointMap, NovaPublic, host_nobrackets]}
201             nova::vncproxy::common::vncproxy_protocol: {get_param: [EndpointMap, NovaVNCProxyPublic, protocol]}
202             nova::vncproxy::common::vncproxy_host: {get_param: [EndpointMap, NovaVNCProxyPublic, host_nobrackets]}
203             nova::vncproxy::common::vncproxy_port: {get_param: [EndpointMap, NovaVNCProxyPublic, port]}
204       step_config: |
205         # TODO(emilien): figure how to deal with libvirt profile.
206         # We'll probably treat it like we do with Neutron plugins.
207         # Until then, just include it in the default nova-compute role.
208         include tripleo::profile::base::nova::compute::libvirt
209       service_config_settings:
210         collectd:
211           tripleo.collectd.plugins.nova_compute:
212             - virt
213           collectd::plugin::virt::connection: 'qemu:///system'
214       upgrade_tasks:
215         - name: Stop nova-compute service
216           tags: step1
217           service: name=openstack-nova-compute state=stopped
218         # If not already set by puppet (e.g a pre-ocata version), set the
219         # upgrade_level for compute to "auto"
220         - name: Set compute upgrade level to auto
221           tags: step3
222           ini_file:
223             str_replace:
224               template: "dest=/etc/nova/nova.conf section=upgrade_levels option=compute value=LEVEL"
225               params:
226                 LEVEL: {get_param: UpgradeLevelNovaCompute}
227         - name: install openstack-nova-migration
228           tags: step3
229           yum: name=openstack-nova-migration state=latest
230         - name: Start nova-compute service
231           tags: step6
232           service: name=openstack-nova-compute state=started