OSM: use full_install_osm.sh script to install from master
[releng-xci-scenarios.git] / scenarios / os-odl-nofeature / role / os-odl-nofeature / files / ha / openstack_user_config.yml
1 ---
2 cidr_networks:
3   container: 172.29.236.0/22
4   tunnel: 172.29.240.0/22
5   storage: 172.29.244.0/22
6
7 used_ips:
8   - "172.29.236.1,172.29.236.50"
9   - "172.29.240.1,172.29.240.50"
10   - "172.29.244.1,172.29.244.50"
11   - "172.29.248.1,172.29.248.50"
12   - "172.29.236.222"
13
14 global_overrides:
15   internal_lb_vip_address: 172.29.236.222
16   external_lb_vip_address: 192.168.122.220
17   tunnel_bridge: "br-vxlan"
18   management_bridge: "br-mgmt"
19   provider_networks:
20     - network:
21         container_bridge: "br-mgmt"
22         container_type: "veth"
23         container_interface: "eth1"
24         ip_from_q: "container"
25         type: "raw"
26         group_binds:
27           - all_containers
28           - hosts
29         is_container_address: true
30         is_ssh_address: true
31     - network:
32         container_bridge: "br-vxlan"
33         container_type: "veth"
34         container_interface: "eth10"
35         ip_from_q: "tunnel"
36         type: "vxlan"
37         range: "1:1000"
38         net_name: "vxlan"
39         group_binds:
40           - neutron_openvswitch_agent
41     - network:
42         container_bridge: "br-vlan"
43         container_type: "veth"
44         container_interface: "eth12"
45         host_bind_override: "eth12"
46         type: "flat"
47         net_name: "flat"
48         group_binds:
49           - neutron_openvswitch_agent
50     - network:
51         container_bridge: "br-vlan"
52         container_type: "veth"
53         container_interface: "eth11"
54         host_bind_override: "eth12"
55         type: "vlan"
56         range: "102:199"
57         net_name: "physnet1"
58         group_binds:
59           - neutron_openvswitch_agent
60     - network:
61         container_bridge: "br-storage"
62         container_type: "veth"
63         container_interface: "eth2"
64         ip_from_q: "storage"
65         type: "raw"
66         group_binds:
67           - glance_api
68           - cinder_api
69           - cinder_volume
70           - nova_compute
71
72 # ##
73 # ## Infrastructure
74 # ##
75
76 # galera, memcache, rabbitmq, utility
77 shared-infra_hosts:
78   controller00:
79     ip: 172.29.236.11
80   controller01:
81     ip: 172.29.236.12
82   controller02:
83     ip: 172.29.236.13
84
85 # repository (apt cache, python packages, etc)
86 repo-infra_hosts:
87   controller00:
88     ip: 172.29.236.11
89   controller01:
90     ip: 172.29.236.12
91   controller02:
92     ip: 172.29.236.13
93
94 # load balancer
95 # Ideally the load balancer should not use the Infrastructure hosts.
96 # Dedicated hardware is best for improved performance and security.
97 haproxy_hosts:
98   controller00:
99     ip: 172.29.236.11
100   controller01:
101     ip: 172.29.236.12
102   controller02:
103     ip: 172.29.236.13
104
105 # rsyslog server
106 # log_hosts:
107 # log1:
108 #  ip: 172.29.236.14
109
110 # ##
111 # ## OpenStack
112 # ##
113
114 # keystone
115 identity_hosts:
116   controller00:
117     ip: 172.29.236.11
118   controller01:
119     ip: 172.29.236.12
120   controller02:
121     ip: 172.29.236.13
122
123 # cinder api services
124 storage-infra_hosts:
125   controller00:
126     ip: 172.29.236.11
127   controller01:
128     ip: 172.29.236.12
129   controller02:
130     ip: 172.29.236.13
131
132 # glance
133 # The settings here are repeated for each infra host.
134 # They could instead be applied as global settings in
135 # user_variables, but are left here to illustrate that
136 # each container could have different storage targets.
137 image_hosts:
138   controller00:
139     ip: 172.29.236.11
140     container_vars:
141       limit_container_types: glance
142       glance_nfs_client:
143         - server: "172.29.244.14"
144           remote_path: "/images"
145           local_path: "/var/lib/glance/images"
146           type: "nfs"
147           options: "_netdev,auto"
148   controller01:
149     ip: 172.29.236.12
150     container_vars:
151       limit_container_types: glance
152       glance_nfs_client:
153         - server: "172.29.244.14"
154           remote_path: "/images"
155           local_path: "/var/lib/glance/images"
156           type: "nfs"
157           options: "_netdev,auto"
158   controller02:
159     ip: 172.29.236.13
160     container_vars:
161       limit_container_types: glance
162       glance_nfs_client:
163         - server: "172.29.244.14"
164           remote_path: "/images"
165           local_path: "/var/lib/glance/images"
166           type: "nfs"
167           options: "_netdev,auto"
168
169 # nova api, conductor, etc services
170 compute-infra_hosts:
171   controller00:
172     ip: 172.29.236.11
173   controller01:
174     ip: 172.29.236.12
175   controller02:
176     ip: 172.29.236.13
177
178 # heat
179 orchestration_hosts:
180   controller00:
181     ip: 172.29.236.11
182   controller01:
183     ip: 172.29.236.12
184   controller02:
185     ip: 172.29.236.13
186
187 # horizon
188 dashboard_hosts:
189   controller00:
190     ip: 172.29.236.11
191   controller01:
192     ip: 172.29.236.12
193   controller02:
194     ip: 172.29.236.13
195
196 # neutron server, agents (L3, etc)
197 network_hosts:
198   controller00:
199     ip: 172.29.236.11
200   controller01:
201     ip: 172.29.236.12
202   controller02:
203     ip: 172.29.236.13
204
205 # ceilometer
206 metering-infra_hosts:
207   controller00:
208     ip: 172.29.236.11
209   controller01:
210     ip: 172.29.236.12
211   controller02:
212     ip: 172.29.236.13
213
214 # nova hypervisors
215 compute_hosts:
216   compute00:
217     ip: 172.29.236.14
218   compute01:
219     ip: 172.29.236.15
220
221 # cinder volume hosts (NFS-backed)
222 # The settings here are repeated for each infra host.
223 # They could instead be applied as global settings in
224 # user_variables, but are left here to illustrate that
225 # each container could have different storage targets.
226 storage_hosts:
227   controller00:
228     ip: 172.29.236.11
229     container_vars:
230       cinder_backends:
231         limit_container_types: cinder_volume
232         nfs_volume:
233           volume_backend_name: NFS_VOLUME1
234           volume_driver: cinder.volume.drivers.nfs.NfsDriver
235           nfs_mount_options: "rsize=65535,wsize=65535,timeo=1200,actimeo=120"
236           nfs_shares_config: /etc/cinder/nfs_shares
237           shares:
238             - ip: "172.29.244.14"
239               share: "/volumes"
240   controller01:
241     ip: 172.29.236.12
242     container_vars:
243       cinder_backends:
244         limit_container_types: cinder_volume
245         nfs_volume:
246           volume_backend_name: NFS_VOLUME1
247           volume_driver: cinder.volume.drivers.nfs.NfsDriver
248           nfs_mount_options: "rsize=65535,wsize=65535,timeo=1200,actimeo=120"
249           nfs_shares_config: /etc/cinder/nfs_shares
250           shares:
251             - ip: "172.29.244.14"
252               share: "/volumes"
253   controller02:
254     ip: 172.29.236.13
255     container_vars:
256       cinder_backends:
257         limit_container_types: cinder_volume
258         nfs_volume:
259           volume_backend_name: NFS_VOLUME1
260           volume_driver: cinder.volume.drivers.nfs.NfsDriver
261           nfs_mount_options: "rsize=65535,wsize=65535,timeo=1200,actimeo=120"
262           nfs_shares_config: /etc/cinder/nfs_shares
263           shares:
264             - ip: "172.29.244.14"
265               share: "/volumes"