Fix race condition with nova privsep utime
[fuel.git] / mcp / reclass / classes / cluster / mcp-common-ha / openstack_control.yml.j2
1 ##############################################################################
2 # Copyright (c) 2018 Mirantis Inc., Enea AB and others.
3 # All rights reserved. This program and the accompanying materials
4 # are made available under the terms of the Apache License, Version 2.0
5 # which accompanies this distribution, and is available at
6 # http://www.apache.org/licenses/LICENSE-2.0
7 ##############################################################################
8 ---
9 classes:
10   - system.linux.system.repo.mcp.mirror.v1.openstack
11   - system.ceilometer.client
12   - system.memcached.server.single
13   - system.keystone.server.cluster
14   - system.keystone.server.wsgi
15   - system.glance.control.cluster
16   - system.nova.control.cluster
17   - system.cinder.control.cluster
18   - system.cinder.control.backend.lvm
19   - system.heat.server.cluster
20   - system.designate.server.cluster
21   - system.designate.server.backend.bind
22   - system.barbican.server.cluster
23   - system.apache.server.site.barbican
24   - service.barbican.server.plugin.simple_crypto
25   - system.apache.server.single
26   - system.bind.server.single
27   - system.haproxy.proxy.listen.openstack.placement
28   - system.glusterfs.client.cluster
29   - system.glusterfs.client.volume.glance
30   - system.glusterfs.client.volume.keystone
31   - cluster.mcp-common-ha.glusterfs_repo
32 {%- if not conf.MCP_VCP %}
33   # sync from kvm
34   - service.keepalived.cluster.single
35   - system.glusterfs.server.volume.glance
36   - system.glusterfs.server.volume.keystone
37   - system.glusterfs.server.cluster
38   # NOTE(armband): Disabled for novcp
39   # - system.salt.control.virt
40   # - system.salt.control.cluster.openstack_control_cluster
41   # - system.salt.control.cluster.openstack_proxy_cluster
42   # - system.salt.control.cluster.openstack_database_cluster
43   # - system.salt.control.cluster.openstack_message_queue_cluster
44   # - system.salt.control.cluster.openstack_telemetry_cluster
45   # - system.salt.control.cluster.stacklight_server_cluster
46   # - system.salt.control.cluster.stacklight_log_cluster
47   # - system.salt.control.cluster.stacklight_telemetry_cluster
48   - cluster.mcp-common-ha.infra.kvm_pdf
49   - cluster.all-mcp-arch-common.opnfv.maas_proxy
50   - cluster.all-mcp-arch-common.opnfv.lab_proxy_pdf
51 {%- endif %}
52 parameters:
53   _param:
54 {%- if not conf.MCP_VCP %}
55     linux_system_codename: xenial  # sync from kvm
56     # For NOVCP, we switch keepalived VIPs, to keep cluster_vip_address in ctl
57     single_nic: br-ctl  # for keepalive_vip_interface interpolation
58     control_nic: ~      # Dummy value to keep reclass 1.5.2 happy
59     keepalived_openstack_web_public_vip_address: ${_param:openstack_proxy_address}
60     keepalived_openstack_web_public_vip_interface: br-ex
61 {%- endif %}
62     keepalived_vip_interface: ${_param:single_nic}
63     keepalived_vip_virtual_router_id: 50
64     cluster_vip_address: ${_param:openstack_control_address}
65     cluster_local_address: ${_param:single_address}
66     cluster_node01_hostname: ${_param:openstack_control_node01_hostname}
67     cluster_node01_address: ${_param:openstack_control_node01_address}
68     cluster_node02_hostname: ${_param:openstack_control_node02_hostname}
69     cluster_node02_address: ${_param:openstack_control_node02_address}
70     cluster_node03_hostname: ${_param:openstack_control_node03_hostname}
71     cluster_node03_address: ${_param:openstack_control_node03_address}
72     nova_vncproxy_url: https://${_param:cluster_public_host}:6080
73     barbican_integration_enabled: 'false'
74     fernet_rotation_driver: 'shared_filesystem'
75     credential_rotation_driver: 'shared_filesystem'
76   nova:
77     controller: &db_conn_recycle_time
78       database:
79         connection_recycle_time: ${_param:db_connection_recycle_time}
80       barbican:
81         enabled: ${_param:barbican_integration_enabled}
82   cinder:
83     controller:
84       <<: *db_conn_recycle_time
85   neutron:
86     server:
87       <<: *db_conn_recycle_time
88       vlan_aware_vms: true
89       root_helper_daemon: false
90       global_physnet_mtu: ${_param:interface_mtu}
91       backend:
92         external_mtu: ${_param:interface_mtu}
93   keystone:
94     server:
95       <<: *db_conn_recycle_time
96       cacert: /etc/ssl/certs/mcp_os_cacert
97       openrc_extra:
98         volume_device_name: sdc
99   glance:
100     server:
101       <<: *db_conn_recycle_time
102       identity:
103         barbican_endpoint: ${barbican:server:host_href}
104 {%- if conf.MCP_VCP %}
105   heat:
106     server:
107       <<: *db_conn_recycle_time
108       metadata:
109         host: ${_param:openstack_proxy_control_address}
110         port: 8000
111         protocol: http
112       waitcondition:
113         host: ${_param:openstack_proxy_control_address}
114         port: 8000
115         protocol: http
116       watch:
117         host: ${_param:openstack_proxy_control_address}
118         port: 8003
119         protocol: http
120 {%- else %}
121   libvirt:
122     server:
123       service: libvirtd
124       config_sys: /etc/default/libvirtd
125       unix_sock_group: libvirt
126   linux:
127     network:
128       # Add public IPs here as overrides, no need to fork another kvm_pdf.j2
129       interface:
130         br-ex:
131           address: ${_param:external_address}
132           proto: static
133   apache:
134     server:
135       bind:
136         listen_default_ports: false
137   # sync from common-ha kvm role
138   glusterfs:
139     server:
140       service: glusterd
141       volumes:
142         nova_instances:
143           storage: /srv/glusterfs/nova_instances
144           replica: 3
145           bricks:
146             - ${_param:cluster_node01_address}:/srv/glusterfs/nova_instances
147             - ${_param:cluster_node02_address}:/srv/glusterfs/nova_instances
148             - ${_param:cluster_node03_address}:/srv/glusterfs/nova_instances
149           options:
150             cluster.readdir-optimize: 'True'
151             nfs.disable: 'True'
152             network.remote-dio: 'True'
153             cluster.favorite-child-policy: mtime
154             diagnostics.client-log-level: WARNING
155             diagnostics.brick-log-level: WARNING
156 {%- endif %}
157   haproxy:
158     proxy:
159       listen:
160         heat_cloudwatch_api:
161           enabled: false
162   barbican:
163     server:
164       ks_notifications_enable: true
165       store:
166         software:
167           crypto_plugin: simple_crypto
168           store_plugin: store_crypto
169           global_default: true
170       database:
171         connection_recycle_time: ${_param:db_connection_recycle_time}
172         host: ${_param:openstack_database_address}
173   bind:
174     server:
175       control:
176         mgmt:
177           enabled: true
178           bind:
179             address: ${_param:single_address}
180             port: 953
181           allow:
182             - ${_param:openstack_control_node01_address}
183             - ${_param:openstack_control_node02_address}
184             - ${_param:openstack_control_node03_address}
185           keys:
186             - designate
187   designate:
188     _support:
189       sphinx:
190         enabled: False  # Workaround broken meta/sphinx.yml in salt-formula-designate
191     server:
192       pools:
193         default:
194           description: 'test pool'
195           targets:
196             default:
197               description: 'test target1'
198             default1:
199               type: ${_param:designate_pool_target_type}
200               description: 'test target2'
201               masters: ${_param:designate_pool_target_masters}
202               options:
203                 host: ${_param:openstack_control_node02_address}
204                 port: 53
205                 rndc_host: ${_param:openstack_control_node02_address}
206                 rndc_port: 953
207                 rndc_key_file: /etc/designate/rndc.key
208             default2:
209               type: ${_param:designate_pool_target_type}
210               description: 'test target3'
211               masters: ${_param:designate_pool_target_masters}
212               options:
213                 host: ${_param:openstack_control_node03_address}
214                 port: 53
215                 rndc_host: ${_param:openstack_control_node03_address}
216                 rndc_port: 953
217                 rndc_key_file: /etc/designate/rndc.key