Update system reclass
[fuel.git] / mcp / reclass / classes / cluster / mcp-common-ha / openstack_control.yml.j2
1 ##############################################################################
2 # Copyright (c) 2018 Mirantis Inc., Enea AB and others.
3 # All rights reserved. This program and the accompanying materials
4 # are made available under the terms of the Apache License, Version 2.0
5 # which accompanies this distribution, and is available at
6 # http://www.apache.org/licenses/LICENSE-2.0
7 ##############################################################################
8 ---
9 classes:
10   - system.linux.system.repo.mcp.apt_mirantis.openstack
11   - system.ceilometer.client
12   - system.memcached.server.single
13   - system.keystone.server.cluster
14   - system.keystone.server.wsgi
15   - system.glance.control.cluster
16   - system.nova.control.cluster
17   - system.cinder.control.cluster
18   - system.cinder.control.backend.lvm
19   - system.heat.server.cluster
20   - system.designate.server.cluster
21   - system.designate.server.backend.bind
22   - system.barbican.server.cluster
23   - system.apache.server.site.barbican
24   - service.barbican.server.plugin.simple_crypto
25   - system.apache.server.single
26   - system.bind.server.single
27   - system.haproxy.proxy.listen.openstack.placement
28   - system.glusterfs.client.cluster
29   - system.glusterfs.client.volume.glance
30   - system.glusterfs.client.volume.keystone
31   - cluster.mcp-common-ha.glusterfs_repo
32 {%- if not conf.MCP_VCP %}
33   # sync from kvm
34   - service.keepalived.cluster.single
35   - system.glusterfs.server.volume.glance
36   - system.glusterfs.server.volume.keystone
37   - system.glusterfs.server.cluster
38   # NOTE(armband): Disabled for novcp
39   # - system.salt.control.virt
40   # - system.salt.control.cluster.openstack_control_cluster
41   # - system.salt.control.cluster.openstack_proxy_cluster
42   # - system.salt.control.cluster.openstack_database_cluster
43   # - system.salt.control.cluster.openstack_message_queue_cluster
44   # - system.salt.control.cluster.openstack_telemetry_cluster
45   # - system.salt.control.cluster.stacklight_server_cluster
46   # - system.salt.control.cluster.stacklight_log_cluster
47   # - system.salt.control.cluster.stacklight_telemetry_cluster
48   - cluster.mcp-common-ha.infra.kvm_pdf
49   - cluster.all-mcp-arch-common.opnfv.maas_proxy
50   - cluster.all-mcp-arch-common.opnfv.lab_proxy_pdf
51 {%- endif %}
52 parameters:
53   _param:
54 {%- if not conf.MCP_VCP %}
55     linux_system_codename: xenial  # sync from kvm
56     # For NOVCP, we switch keepalived VIPs, to keep cluster_vip_address in ctl
57     single_nic: br-ctl  # for keepalive_vip_interface interpolation
58     control_nic: ~      # Dummy value to keep reclass 1.5.2 happy
59     keepalived_openstack_web_public_vip_address: ${_param:openstack_proxy_address}
60     keepalived_openstack_web_public_vip_interface: br-ex
61 {%- endif %}
62     keepalived_vip_interface: ${_param:single_nic}
63     keepalived_vip_virtual_router_id: 50
64     cluster_vip_address: ${_param:openstack_control_address}
65     cluster_local_address: ${_param:single_address}
66     cluster_node01_hostname: ${_param:openstack_control_node01_hostname}
67     cluster_node01_address: ${_param:openstack_control_node01_address}
68     cluster_node02_hostname: ${_param:openstack_control_node02_hostname}
69     cluster_node02_address: ${_param:openstack_control_node02_address}
70     cluster_node03_hostname: ${_param:openstack_control_node03_hostname}
71     cluster_node03_address: ${_param:openstack_control_node03_address}
72     nova_vncproxy_url: https://${_param:cluster_public_host}:6080
73     barbican_integration_enabled: 'false'
74     fernet_rotation_driver: 'shared_filesystem'
75     credential_rotation_driver: 'shared_filesystem'
76   nova:
77     controller: &db_conn_recycle_time
78       database:
79         connection_recycle_time: ${_param:db_connection_recycle_time}
80       barbican:
81         enabled: ${_param:barbican_integration_enabled}
82   cinder:
83     controller:
84       <<: *db_conn_recycle_time
85   neutron:
86     server:
87       <<: *db_conn_recycle_time
88       vlan_aware_vms: true
89       root_helper_daemon: false
90       agent_down_time: 300
91       global_physnet_mtu: ${_param:interface_mtu}
92       backend:
93         external_mtu: ${_param:interface_mtu}
94   keystone:
95     server:
96       <<: *db_conn_recycle_time
97       cacert: /etc/ssl/certs/mcp_os_cacert
98       openrc_extra:
99         volume_device_name: sdc
100   glance:
101     server:
102       <<: *db_conn_recycle_time
103       identity:
104         barbican_endpoint: ${barbican:server:host_href}
105 {%- if conf.MCP_VCP %}
106   heat:
107     server:
108       <<: *db_conn_recycle_time
109       metadata:
110         host: ${_param:openstack_proxy_control_address}
111         port: 8000
112         protocol: http
113       waitcondition:
114         host: ${_param:openstack_proxy_control_address}
115         port: 8000
116         protocol: http
117       watch:
118         host: ${_param:openstack_proxy_control_address}
119         port: 8003
120         protocol: http
121 {%- else %}
122   libvirt:
123     server:
124       service: libvirtd
125       config_sys: /etc/default/libvirtd
126       unix_sock_group: libvirt
127   linux:
128     network:
129       # Add public IPs here as overrides, no need to fork another kvm_pdf.j2
130       interface:
131         br-ex:
132           address: ${_param:external_address}
133           proto: static
134   apache:
135     server:
136       bind:
137         listen_default_ports: false
138   # sync from common-ha kvm role
139   glusterfs:
140     server:
141       service: glusterd
142       volumes:
143         nova_instances:
144           storage: /srv/glusterfs/nova_instances
145           replica: 3
146           bricks:
147             - ${_param:cluster_node01_address}:/srv/glusterfs/nova_instances
148             - ${_param:cluster_node02_address}:/srv/glusterfs/nova_instances
149             - ${_param:cluster_node03_address}:/srv/glusterfs/nova_instances
150           options:
151             cluster.readdir-optimize: 'True'
152             nfs.disable: 'True'
153             network.remote-dio: 'True'
154             cluster.favorite-child-policy: mtime
155             diagnostics.client-log-level: WARNING
156             diagnostics.brick-log-level: WARNING
157 {%- endif %}
158   haproxy:
159     proxy:
160       listen:
161         heat_cloudwatch_api:
162           enabled: false
163   barbican:
164     server:
165       ks_notifications_enable: true
166       store:
167         software:
168           crypto_plugin: simple_crypto
169           store_plugin: store_crypto
170           global_default: true
171       database:
172         connection_recycle_time: ${_param:db_connection_recycle_time}
173         host: ${_param:openstack_database_address}
174   bind:
175     server:
176       control:
177         mgmt:
178           enabled: true
179           bind:
180             address: ${_param:single_address}
181             port: 953
182           allow:
183             - ${_param:openstack_control_node01_address}
184             - ${_param:openstack_control_node02_address}
185             - ${_param:openstack_control_node03_address}
186           keys:
187             - designate
188   designate:
189     _support:
190       sphinx:
191         enabled: False  # Workaround broken meta/sphinx.yml in salt-formula-designate
192     server:
193       pools:
194         default:
195           description: 'test pool'
196           targets:
197             default:
198               description: 'test target1'
199             default1:
200               type: ${_param:designate_pool_target_type}
201               description: 'test target2'
202               masters: ${_param:designate_pool_target_masters}
203               options:
204                 host: ${_param:openstack_control_node02_address}
205                 port: 53
206                 rndc_host: ${_param:openstack_control_node02_address}
207                 rndc_port: 953
208                 rndc_key_file: /etc/designate/rndc.key
209             default2:
210               type: ${_param:designate_pool_target_type}
211               description: 'test target3'
212               masters: ${_param:designate_pool_target_masters}
213               options:
214                 host: ${_param:openstack_control_node03_address}
215                 port: 53
216                 rndc_host: ${_param:openstack_control_node03_address}
217                 rndc_port: 953
218                 rndc_key_file: /etc/designate/rndc.key