b1711436935b04da2b88a72e888a0755002eab18
[apex-tripleo-heat-templates.git] / puppet / services / nova-compute.yaml
1 heat_template_version: ocata
2
3 description: >
4   OpenStack Nova Compute service configured with Puppet
5
6 parameters:
7   ServiceNetMap:
8     default: {}
9     description: Mapping of service_name -> network name. Typically set
10                  via parameter_defaults in the resource registry.  This
11                  mapping overrides those in ServiceNetMapDefaults.
12     type: json
13   DefaultPasswords:
14     default: {}
15     type: json
16   EndpointMap:
17     default: {}
18     description: Mapping of service endpoint -> protocol. Typically set
19                  via parameter_defaults in the resource registry.
20     type: json
21   NovaRbdPoolName:
22     default: vms
23     type: string
24   CephClientUserName:
25     default: openstack
26     type: string
27   CinderEnableNfsBackend:
28     default: false
29     description: Whether to enable or not the NFS backend for Cinder
30     type: boolean
31   CinderEnableRbdBackend:
32     default: false
33     description: Whether to enable or not the Rbd backend for Cinder
34     type: boolean
35   NovaEnableRbdBackend:
36     default: false
37     description: Whether to enable or not the Rbd backend for Nova
38     type: boolean
39   NovaComputeLibvirtVifDriver:
40     default: ''
41     description: Libvirt VIF driver configuration for the network
42     type: string
43   NovaPCIPassthrough:
44     description: >
45       List of PCI Passthrough whitelist parameters.
46       Example -
47       NovaPCIPassthrough:
48         - vendor_id: "8086"
49           product_id: "154c"
50           address: "0000:05:00.0"
51           physical_network: "datacentre"
52       For different formats, refer to the nova.conf documentation for
53       pci_passthrough_whitelist configuration
54     type: json
55     default: ''
56   NovaVcpuPinSet:
57     description: >
58       A list or range of physical CPU cores to reserve for virtual machine
59       processes.
60       Ex. NovaVcpuPinSet: ['4-12','^8'] will reserve cores from 4-12 excluding 8
61     type: comma_delimited_list
62     default: []
63   NovaReservedHostMemory:
64     description: >
65       Reserved RAM for host processes.
66     type: number
67     default: 2048
68     constraints:
69       - range: { min: 512 }
70   MonitoringSubscriptionNovaCompute:
71     default: 'overcloud-nova-compute'
72     type: string
73   NovaComputeLoggingSource:
74     type: json
75     default:
76       tag: openstack.nova.compute
77       path: /var/log/nova/nova-compute.log
78   UpgradeLevelNovaCompute:
79     type: string
80     description: Nova Compute upgrade level
81     default: auto
82   MigrationSshKey:
83     type: json
84     description: >
85       SSH key for migration.
86       Expects a dictionary with keys 'public_key' and 'private_key'.
87       Values should be identical to SSH public/private key files.
88     default: {}
89
90 resources:
91   NovaBase:
92     type: ./nova-base.yaml
93     properties:
94       ServiceNetMap: {get_param: ServiceNetMap}
95       DefaultPasswords: {get_param: DefaultPasswords}
96       EndpointMap: {get_param: EndpointMap}
97
98 outputs:
99   role_data:
100     description: Role data for the Nova Compute service.
101     value:
102       service_name: nova_compute
103       monitoring_subscription: {get_param: MonitoringSubscriptionNovaCompute}
104       logging_source: {get_param: NovaComputeLoggingSource}
105       logging_groups:
106         - nova
107       config_settings:
108         map_merge:
109           - get_attr: [NovaBase, role_data, config_settings]
110           - nova::compute::libvirt::manage_libvirt_services: false
111             nova::compute::pci_passthrough:
112               str_replace:
113                 template: "JSON_PARAM"
114                 params:
115                   JSON_PARAM: {get_param: NovaPCIPassthrough}
116             nova::compute::vcpu_pin_set: {get_param: NovaVcpuPinSet}
117             nova::compute::reserved_host_memory: {get_param: NovaReservedHostMemory}
118             # we manage migration in nova common puppet profile
119             nova::compute::libvirt::migration_support: false
120             tripleo::profile::base::nova::manage_migration: true
121             tripleo::profile::base::nova::migration_ssh_key: {get_param: MigrationSshKey}
122             tripleo::profile::base::nova::nova_compute_enabled: true
123             nova::compute::rbd::libvirt_images_rbd_pool: {get_param: NovaRbdPoolName}
124             nova::compute::rbd::libvirt_rbd_user: {get_param: CephClientUserName}
125             tripleo::profile::base::nova::compute::cinder_nfs_backend: {get_param: CinderEnableNfsBackend}
126             rbd_persistent_storage: {get_param: CinderEnableRbdBackend}
127             nova::compute::rbd::rbd_keyring:
128               list_join:
129               - '.'
130               - - 'client'
131                 - {get_param: CephClientUserName}
132             nova::compute::rbd::libvirt_rbd_secret_uuid: "%{hiera('ceph::profile::params::fsid')}"
133             nova::compute::instance_usage_audit: true
134             nova::compute::instance_usage_audit_period: 'hour'
135             nova::compute::rbd::ephemeral_storage: {get_param: NovaEnableRbdBackend}
136             # TUNNELLED mode provides a security enhancement when using shared
137             # storage but is not supported when not using shared storage.
138             # See https://bugzilla.redhat.com/show_bug.cgi?id=1301986#c12
139             # In future versions of QEMU (2.6, mostly), danpb's native
140             # encryption work will obsolete the need to use TUNNELLED transport
141             # mode.
142             nova::migration::live_migration_tunnelled: {get_param: NovaEnableRbdBackend}
143             nova::compute::neutron::libvirt_vif_driver: {get_param: NovaComputeLibvirtVifDriver}
144             # NOTE: bind IP is found in Heat replacing the network name with the
145             # local node IP for the given network; replacement examples
146             # (eg. for internal_api):
147             # internal_api -> IP
148             # internal_api_uri -> [IP]
149             # internal_api_subnet - > IP/CIDR
150             nova::compute::vncserver_proxyclient_address: {get_param: [ServiceNetMap, NovaVncProxyNetwork]}
151             nova::compute::vncproxy_host: {get_param: [EndpointMap, NovaPublic, host_nobrackets]}
152             nova::vncproxy::common::vncproxy_protocol: {get_param: [EndpointMap, NovaVNCProxyPublic, protocol]}
153             nova::vncproxy::common::vncproxy_host: {get_param: [EndpointMap, NovaVNCProxyPublic, host_nobrackets]}
154             nova::vncproxy::common::vncproxy_port: {get_param: [EndpointMap, NovaVNCProxyPublic, port]}
155       step_config: |
156         # TODO(emilien): figure how to deal with libvirt profile.
157         # We'll probably treat it like we do with Neutron plugins.
158         # Until then, just include it in the default nova-compute role.
159         include tripleo::profile::base::nova::compute::libvirt
160       service_config_settings:
161         collectd:
162           tripleo.collectd.plugins.nova_compute:
163             - virt
164           collectd::plugins::virt::connection: "qemu:///system"
165       upgrade_tasks:
166         - name: Stop nova-compute service
167           tags: step1
168           service: name=openstack-nova-compute state=stopped
169         # If not already set by puppet (e.g a pre-ocata version), set the
170         # upgrade_level for compute to "auto"
171         - name: Set compute upgrade level to auto
172           tags: step3
173           ini_file:
174             str_replace:
175               template: "dest=/etc/nova/nova.conf section=upgrade_levels option=compute value=LEVEL"
176               params:
177                 LEVEL: {get_param: UpgradeLevelNovaCompute}
178         - name: Start nova-compute service
179           tags: step6
180           service: name=openstack-nova-compute state=started