1 ##############################################################################
2 # Copyright (c) 2018 Mirantis Inc., Enea AB and others.
3 # All rights reserved. This program and the accompanying materials
4 # are made available under the terms of the Apache License, Version 2.0
5 # which accompanies this distribution, and is available at
6 # http://www.apache.org/licenses/LICENSE-2.0
7 ##############################################################################
10 - system.linux.system.repo.mcp.mirror.v1.openstack
11 - system.ceilometer.client
12 - system.memcached.server.single
13 - system.keystone.server.cluster
14 - system.keystone.server.wsgi
15 - system.glance.control.cluster
16 - system.nova.control.cluster
17 - system.cinder.control.cluster
18 - system.cinder.control.backend.lvm
19 - system.heat.server.cluster
20 - system.designate.server.cluster
21 - system.designate.server.backend.bind
22 - system.barbican.server.cluster
23 - system.apache.server.site.barbican
24 - service.barbican.server.plugin.simple_crypto
25 - system.apache.server.single
26 - system.bind.server.single
27 - system.haproxy.proxy.listen.openstack.placement
28 - system.glusterfs.client.cluster
29 - system.glusterfs.client.volume.glance
30 - system.glusterfs.client.volume.keystone
31 {%- if not conf.MCP_VCP %}
33 - service.keepalived.cluster.single
34 - system.glusterfs.server.volume.glance
35 - system.glusterfs.server.volume.keystone
36 - system.glusterfs.server.cluster
37 # NOTE(armband): Disabled for novcp
38 # - system.salt.control.virt
39 # - system.salt.control.cluster.openstack_control_cluster
40 # - system.salt.control.cluster.openstack_proxy_cluster
41 # - system.salt.control.cluster.openstack_database_cluster
42 # - system.salt.control.cluster.openstack_message_queue_cluster
43 # - system.salt.control.cluster.openstack_telemetry_cluster
44 # - system.salt.control.cluster.stacklight_server_cluster
45 # - system.salt.control.cluster.stacklight_log_cluster
46 # - system.salt.control.cluster.stacklight_telemetry_cluster
47 - cluster.mcp-common-ha.glusterfs_repo
48 - cluster.mcp-common-ha.infra.kvm_pdf
49 - cluster.all-mcp-arch-common.opnfv.maas_proxy
50 - cluster.all-mcp-arch-common.opnfv.lab_proxy_pdf
54 {%- if not conf.MCP_VCP %}
55 linux_system_codename: xenial # sync from kvm
56 # For NOVCP, we switch keepalived VIPs, to keep cluster_vip_address in ctl
57 single_nic: br-ctl # for keepalive_vip_interface interpolation
58 control_nic: ~ # Dummy value to keep reclass 1.5.2 happy
59 keepalived_openstack_web_public_vip_address: ${_param:openstack_proxy_address}
60 keepalived_openstack_web_public_vip_interface: br-ex
62 keepalived_vip_interface: ${_param:single_nic}
63 keepalived_vip_virtual_router_id: 50
64 cluster_vip_address: ${_param:openstack_control_address}
65 cluster_local_address: ${_param:single_address}
66 cluster_node01_hostname: ${_param:openstack_control_node01_hostname}
67 cluster_node01_address: ${_param:openstack_control_node01_address}
68 cluster_node02_hostname: ${_param:openstack_control_node02_hostname}
69 cluster_node02_address: ${_param:openstack_control_node02_address}
70 cluster_node03_hostname: ${_param:openstack_control_node03_hostname}
71 cluster_node03_address: ${_param:openstack_control_node03_address}
72 nova_vncproxy_url: https://${_param:cluster_public_host}:6080
73 barbican_integration_enabled: 'false'
74 fernet_rotation_driver: 'shared_filesystem'
75 credential_rotation_driver: 'shared_filesystem'
77 controller: &db_conn_recycle_time
79 connection_recycle_time: ${_param:db_connection_recycle_time}
81 enabled: ${_param:barbican_integration_enabled}
84 <<: *db_conn_recycle_time
87 <<: *db_conn_recycle_time
89 root_helper_daemon: false
92 <<: *db_conn_recycle_time
93 cacert: /etc/ssl/certs/mcp_os_cacert
95 volume_device_name: sdc
98 <<: *db_conn_recycle_time
100 barbican_endpoint: ${barbican:server:host_href}
101 {%- if conf.MCP_VCP %}
104 <<: *db_conn_recycle_time
106 host: ${_param:openstack_proxy_control_address}
110 host: ${_param:openstack_proxy_control_address}
114 host: ${_param:openstack_proxy_control_address}
121 config_sys: /etc/default/libvirtd
122 unix_sock_group: libvirt
125 # Add public IPs here as overrides, no need to fork another kvm_pdf.j2
128 address: ${_param:external_address}
133 listen_default_ports: false
134 # sync from common-ha kvm role
140 storage: /srv/glusterfs/nova_instances
143 - ${_param:cluster_node01_address}:/srv/glusterfs/nova_instances
144 - ${_param:cluster_node02_address}:/srv/glusterfs/nova_instances
145 - ${_param:cluster_node03_address}:/srv/glusterfs/nova_instances
147 cluster.readdir-optimize: 'True'
149 network.remote-dio: 'True'
150 cluster.favorite-child-policy: mtime
151 diagnostics.client-log-level: WARNING
152 diagnostics.brick-log-level: WARNING
161 ks_notifications_enable: true
164 crypto_plugin: simple_crypto
165 store_plugin: store_crypto
168 connection_recycle_time: ${_param:db_connection_recycle_time}
169 host: ${_param:openstack_database_address}
176 address: ${_param:single_address}
179 - ${_param:openstack_control_node01_address}
180 - ${_param:openstack_control_node02_address}
181 - ${_param:openstack_control_node03_address}
187 enabled: False # Workaround broken meta/sphinx.yml in salt-formula-designate
191 description: 'test pool'
194 description: 'test target1'
196 type: ${_param:designate_pool_target_type}
197 description: 'test target2'
198 masters: ${_param:designate_pool_target_masters}
200 host: ${_param:openstack_control_node02_address}
202 rndc_host: ${_param:openstack_control_node02_address}
204 rndc_key_file: /etc/designate/rndc.key
206 type: ${_param:designate_pool_target_type}
207 description: 'test target3'
208 masters: ${_param:designate_pool_target_masters}
210 host: ${_param:openstack_control_node03_address}
212 rndc_host: ${_param:openstack_control_node03_address}
214 rndc_key_file: /etc/designate/rndc.key