Merge "Add support for node groups in NetConfigDataLookup"
[apex-tripleo-heat-templates.git] / puppet / services / nova-compute.yaml
1 heat_template_version: ocata
2
3 description: >
4   OpenStack Nova Compute service configured with Puppet
5
6 parameters:
7   ServiceNetMap:
8     default: {}
9     description: Mapping of service_name -> network name. Typically set
10                  via parameter_defaults in the resource registry.  This
11                  mapping overrides those in ServiceNetMapDefaults.
12     type: json
13   DefaultPasswords:
14     default: {}
15     type: json
16   EndpointMap:
17     default: {}
18     description: Mapping of service endpoint -> protocol. Typically set
19                  via parameter_defaults in the resource registry.
20     type: json
21   NovaRbdPoolName:
22     default: vms
23     type: string
24   CephClientUserName:
25     default: openstack
26     type: string
27   CinderEnableNfsBackend:
28     default: false
29     description: Whether to enable or not the NFS backend for Cinder
30     type: boolean
31   CinderEnableRbdBackend:
32     default: false
33     description: Whether to enable or not the Rbd backend for Cinder
34     type: boolean
35   NovaEnableRbdBackend:
36     default: false
37     description: Whether to enable or not the Rbd backend for Nova
38     type: boolean
39   NovaComputeLibvirtVifDriver:
40     default: ''
41     description: Libvirt VIF driver configuration for the network
42     type: string
43   NovaPCIPassthrough:
44     description: >
45       List of PCI Passthrough whitelist parameters.
46       Example -
47       NovaPCIPassthrough:
48         - vendor_id: "8086"
49           product_id: "154c"
50           address: "0000:05:00.0"
51           physical_network: "datacentre"
52       For different formats, refer to the nova.conf documentation for
53       pci_passthrough_whitelist configuration
54     type: json
55     default: {}
56   NovaVcpuPinSet:
57     description: >
58       A list or range of physical CPU cores to reserve for virtual machine
59       processes.
60       Ex. NovaVcpuPinSet: ['4-12','^8'] will reserve cores from 4-12 excluding 8
61     type: comma_delimited_list
62     default: []
63   NovaReservedHostMemory:
64     description: >
65       Reserved RAM for host processes.
66     type: number
67     default: 2048
68     constraints:
69       - range: { min: 512 }
70   MonitoringSubscriptionNovaCompute:
71     default: 'overcloud-nova-compute'
72     type: string
73   NovaComputeLoggingSource:
74     type: json
75     default:
76       tag: openstack.nova.compute
77       path: /var/log/nova/nova-compute.log
78   UpgradeLevelNovaCompute:
79     type: string
80     description: Nova Compute upgrade level
81     default: auto
82
83 resources:
84   NovaBase:
85     type: ./nova-base.yaml
86     properties:
87       ServiceNetMap: {get_param: ServiceNetMap}
88       DefaultPasswords: {get_param: DefaultPasswords}
89       EndpointMap: {get_param: EndpointMap}
90
91 outputs:
92   role_data:
93     description: Role data for the Nova Compute service.
94     value:
95       service_name: nova_compute
96       monitoring_subscription: {get_param: MonitoringSubscriptionNovaCompute}
97       logging_source: {get_param: NovaComputeLoggingSource}
98       logging_groups:
99         - nova
100       config_settings:
101         map_merge:
102           - get_attr: [NovaBase, role_data, config_settings]
103           - nova::compute::libvirt::manage_libvirt_services: false
104             nova::compute::pci_passthrough: {get_param: NovaPCIPassthrough}
105             nova::compute::vcpu_pin_set: {get_param: NovaVcpuPinSet}
106             nova::compute::reserved_host_memory: {get_param: NovaReservedHostMemory}
107             # we manage migration in nova common puppet profile
108             nova::compute::libvirt::migration_support: false
109             tripleo::profile::base::nova::manage_migration: true
110             tripleo::profile::base::nova::nova_compute_enabled: true
111             nova::compute::rbd::libvirt_images_rbd_pool: {get_param: NovaRbdPoolName}
112             nova::compute::rbd::libvirt_rbd_user: {get_param: CephClientUserName}
113             tripleo::profile::base::nova::compute::cinder_nfs_backend: {get_param: CinderEnableNfsBackend}
114             rbd_persistent_storage: {get_param: CinderEnableRbdBackend}
115             nova::compute::rbd::rbd_keyring:
116               list_join:
117               - '.'
118               - - 'client'
119                 - {get_param: CephClientUserName}
120             nova::compute::rbd::libvirt_rbd_secret_uuid: "%{hiera('ceph::profile::params::fsid')}"
121             nova::compute::instance_usage_audit: true
122             nova::compute::instance_usage_audit_period: 'hour'
123             nova::compute::rbd::ephemeral_storage: {get_param: NovaEnableRbdBackend}
124             # TUNNELLED mode provides a security enhancement when using shared
125             # storage but is not supported when not using shared storage.
126             # See https://bugzilla.redhat.com/show_bug.cgi?id=1301986#c12
127             # In future versions of QEMU (2.6, mostly), danpb's native
128             # encryption work will obsolete the need to use TUNNELLED transport
129             # mode.
130             nova::migration::live_migration_tunnelled: {get_param: NovaEnableRbdBackend}
131             nova::compute::neutron::libvirt_vif_driver: {get_param: NovaComputeLibvirtVifDriver}
132             # NOTE: bind IP is found in Heat replacing the network name with the
133             # local node IP for the given network; replacement examples
134             # (eg. for internal_api):
135             # internal_api -> IP
136             # internal_api_uri -> [IP]
137             # internal_api_subnet - > IP/CIDR
138             nova::compute::vncserver_proxyclient_address: {get_param: [ServiceNetMap, NovaVncProxyNetwork]}
139             nova::compute::vncproxy_host: {get_param: [EndpointMap, NovaPublic, host_nobrackets]}
140             nova::vncproxy::common::vncproxy_protocol: {get_param: [EndpointMap, NovaVNCProxyPublic, protocol]}
141             nova::vncproxy::common::vncproxy_host: {get_param: [EndpointMap, NovaVNCProxyPublic, host_nobrackets]}
142             nova::vncproxy::common::vncproxy_port: {get_param: [EndpointMap, NovaVNCProxyPublic, port]}
143       step_config: |
144         # TODO(emilien): figure how to deal with libvirt profile.
145         # We'll probably treat it like we do with Neutron plugins.
146         # Until then, just include it in the default nova-compute role.
147         include tripleo::profile::base::nova::compute::libvirt
148       service_config_settings:
149         collectd:
150           tripleo.collectd.plugins.nova_compute:
151             - virt
152           collectd::plugins::virt::connection: "qemu:///system"
153       upgrade_tasks:
154         - name: Stop nova-compute service
155           tags: step2
156           service: name=openstack-nova-compute state=stopped
157         # If not already set by puppet (e.g a pre-ocata version), set the
158         # upgrade_level for compute to "auto"
159         - name: Set compute upgrade level to auto
160           tags: step3
161           ini_file:
162             str_replace:
163               template: "dest=/etc/nova/nova.conf section=upgrade_levels option=compute value=LEVEL"
164               params:
165                 LEVEL: {get_param: UpgradeLevelNovaCompute}
166         - name: Start nova-compute service
167           tags: step6
168           service: name=openstack-nova-compute state=started