9e862febd5c6aa13a0af2213ad4451c213488640
[fuel.git] / mcp / reclass / classes / cluster / mcp-common-ha / openstack_control.yml.j2
1 ##############################################################################
2 # Copyright (c) 2018 Mirantis Inc., Enea AB and others.
3 # All rights reserved. This program and the accompanying materials
4 # are made available under the terms of the Apache License, Version 2.0
5 # which accompanies this distribution, and is available at
6 # http://www.apache.org/licenses/LICENSE-2.0
7 ##############################################################################
8 ---
9 classes:
10   - system.linux.system.repo.mcp.mirror.v1.openstack
11   - system.ceilometer.client
12   - system.memcached.server.single
13   - system.keystone.server.cluster
14   - system.keystone.server.wsgi
15   - system.glance.control.cluster
16   - system.nova.control.cluster
17   - system.cinder.control.cluster
18   - system.cinder.control.backend.lvm
19   - system.heat.server.cluster
20   - system.designate.server.cluster
21   - system.designate.server.backend.bind
22   - system.barbican.server.cluster
23   - system.apache.server.site.barbican
24   - service.barbican.server.plugin.simple_crypto
25   - system.apache.server.single
26   - system.bind.server.single
27   - system.haproxy.proxy.listen.openstack.placement
28   - system.glusterfs.client.cluster
29   - system.glusterfs.client.volume.glance
30   - system.glusterfs.client.volume.keystone
31   - cluster.mcp-common-ha.glusterfs_repo
32 {%- if not conf.MCP_VCP %}
33   # sync from kvm
34   - service.keepalived.cluster.single
35   - system.glusterfs.server.volume.glance
36   - system.glusterfs.server.volume.keystone
37   - system.glusterfs.server.cluster
38   # NOTE(armband): Disabled for novcp
39   # - system.salt.control.virt
40   # - system.salt.control.cluster.openstack_control_cluster
41   # - system.salt.control.cluster.openstack_proxy_cluster
42   # - system.salt.control.cluster.openstack_database_cluster
43   # - system.salt.control.cluster.openstack_message_queue_cluster
44   # - system.salt.control.cluster.openstack_telemetry_cluster
45   # - system.salt.control.cluster.stacklight_server_cluster
46   # - system.salt.control.cluster.stacklight_log_cluster
47   # - system.salt.control.cluster.stacklight_telemetry_cluster
48   - cluster.mcp-common-ha.infra.kvm_pdf
49   - cluster.all-mcp-arch-common.opnfv.maas_proxy
50   - cluster.all-mcp-arch-common.opnfv.lab_proxy_pdf
51 {%- endif %}
52 parameters:
53   _param:
54 {%- if not conf.MCP_VCP %}
55     linux_system_codename: xenial  # sync from kvm
56     # For NOVCP, we switch keepalived VIPs, to keep cluster_vip_address in ctl
57     single_nic: br-ctl  # for keepalive_vip_interface interpolation
58     control_nic: ~      # Dummy value to keep reclass 1.5.2 happy
59     keepalived_openstack_web_public_vip_address: ${_param:openstack_proxy_address}
60     keepalived_openstack_web_public_vip_interface: br-ex
61 {%- endif %}
62     keepalived_vip_interface: ${_param:single_nic}
63     keepalived_vip_virtual_router_id: 50
64     cluster_vip_address: ${_param:openstack_control_address}
65     cluster_local_address: ${_param:single_address}
66     cluster_node01_hostname: ${_param:openstack_control_node01_hostname}
67     cluster_node01_address: ${_param:openstack_control_node01_address}
68     cluster_node02_hostname: ${_param:openstack_control_node02_hostname}
69     cluster_node02_address: ${_param:openstack_control_node02_address}
70     cluster_node03_hostname: ${_param:openstack_control_node03_hostname}
71     cluster_node03_address: ${_param:openstack_control_node03_address}
72     nova_vncproxy_url: https://${_param:cluster_public_host}:6080
73     barbican_integration_enabled: 'false'
74     fernet_rotation_driver: 'shared_filesystem'
75     credential_rotation_driver: 'shared_filesystem'
76   nova:
77     controller: &db_conn_recycle_time
78       database:
79         connection_recycle_time: ${_param:db_connection_recycle_time}
80       barbican:
81         enabled: ${_param:barbican_integration_enabled}
82   cinder:
83     controller:
84       <<: *db_conn_recycle_time
85   neutron:
86     server:
87       <<: *db_conn_recycle_time
88       vlan_aware_vms: true
89       root_helper_daemon: false
90       global_physnet_mtu: ${_param:interface_mtu}
91       backend:
92         external_mtu: ${_param:interface_mtu}
93   keystone:
94     server:
95       <<: *db_conn_recycle_time
96       cacert: /etc/ssl/certs/mcp_os_cacert
97       openrc_extra:
98         volume_device_name: sdc
99   glance:
100     server:
101       <<: *db_conn_recycle_time
102       identity:
103         barbican_endpoint: ${barbican:server:host_href}
104 {%- if conf.MCP_VCP %}
105   heat:
106     server:
107       <<: *db_conn_recycle_time
108       metadata:
109         host: ${_param:openstack_proxy_control_address}
110         port: 8000
111         protocol: http
112       waitcondition:
113         host: ${_param:openstack_proxy_control_address}
114         port: 8000
115         protocol: http
116       watch:
117         host: ${_param:openstack_proxy_control_address}
118         port: 8003
119         protocol: http
120       stack_domain_admin:
121         domain: heat_user_domain
122 {%- else %}
123   libvirt:
124     server:
125       service: libvirtd
126       config_sys: /etc/default/libvirtd
127       unix_sock_group: libvirt
128   linux:
129     network:
130       # Add public IPs here as overrides, no need to fork another kvm_pdf.j2
131       interface:
132         br-ex:
133           address: ${_param:external_address}
134           proto: static
135   apache:
136     server:
137       bind:
138         listen_default_ports: false
139   # sync from common-ha kvm role
140   glusterfs:
141     server:
142       service: glusterd
143       volumes:
144         nova_instances:
145           storage: /srv/glusterfs/nova_instances
146           replica: 3
147           bricks:
148             - ${_param:cluster_node01_address}:/srv/glusterfs/nova_instances
149             - ${_param:cluster_node02_address}:/srv/glusterfs/nova_instances
150             - ${_param:cluster_node03_address}:/srv/glusterfs/nova_instances
151           options:
152             cluster.readdir-optimize: 'True'
153             nfs.disable: 'True'
154             network.remote-dio: 'True'
155             cluster.favorite-child-policy: mtime
156             diagnostics.client-log-level: WARNING
157             diagnostics.brick-log-level: WARNING
158 {%- endif %}
159   haproxy:
160     proxy:
161       listen:
162         heat_cloudwatch_api:
163           enabled: false
164   barbican:
165     server:
166       ks_notifications_enable: true
167       store:
168         software:
169           crypto_plugin: simple_crypto
170           store_plugin: store_crypto
171           global_default: true
172       database:
173         connection_recycle_time: ${_param:db_connection_recycle_time}
174         host: ${_param:openstack_database_address}
175   bind:
176     server:
177       control:
178         mgmt:
179           enabled: true
180           bind:
181             address: ${_param:single_address}
182             port: 953
183           allow:
184             - ${_param:openstack_control_node01_address}
185             - ${_param:openstack_control_node02_address}
186             - ${_param:openstack_control_node03_address}
187           keys:
188             - designate
189   designate:
190     _support:
191       sphinx:
192         enabled: False  # Workaround broken meta/sphinx.yml in salt-formula-designate
193     server:
194       pools:
195         default:
196           description: 'test pool'
197           targets:
198             default:
199               description: 'test target1'
200             default1:
201               type: ${_param:designate_pool_target_type}
202               description: 'test target2'
203               masters: ${_param:designate_pool_target_masters}
204               options:
205                 host: ${_param:openstack_control_node02_address}
206                 port: 53
207                 rndc_host: ${_param:openstack_control_node02_address}
208                 rndc_port: 953
209                 rndc_key_file: /etc/designate/rndc.key
210             default2:
211               type: ${_param:designate_pool_target_type}
212               description: 'test target3'
213               masters: ${_param:designate_pool_target_masters}
214               options:
215                 host: ${_param:openstack_control_node03_address}
216                 port: 53
217                 rndc_host: ${_param:openstack_control_node03_address}
218                 rndc_port: 953
219                 rndc_key_file: /etc/designate/rndc.key