[baremetal] cleanup: rm cloud-init iface config
[fuel.git] / mcp / reclass / classes / cluster / mcp-pike-common-ha / infra / kvm.yml
1 ##############################################################################
2 # Copyright (c) 2017 Mirantis Inc., Enea AB and others.
3 # All rights reserved. This program and the accompanying materials
4 # are made available under the terms of the Apache License, Version 2.0
5 # which accompanies this distribution, and is available at
6 # http://www.apache.org/licenses/LICENSE-2.0
7 ##############################################################################
8 ---
9 classes:
10   - system.linux.system.repo.glusterfs
11   - service.keepalived.cluster.single
12   - system.glusterfs.server.volume.glance
13   - system.glusterfs.server.volume.keystone
14   - system.glusterfs.server.cluster
15   - system.salt.control.virt
16   - system.salt.control.cluster.openstack_control_cluster
17   - system.salt.control.cluster.openstack_proxy_cluster
18   - system.salt.control.cluster.openstack_database_cluster
19   - system.salt.control.cluster.openstack_message_queue_cluster
20   - system.salt.control.cluster.openstack_telemetry_cluster
21   # - system.salt.control.cluster.stacklight_server_cluster
22   # - system.salt.control.cluster.stacklight_log_cluster
23   # - system.salt.control.cluster.stacklight_telemetry_cluster
24   - cluster.mcp-pike-common-ha.infra.kvm_pdf
25   - cluster.mcp-pike-common-ha.include.proxy
26 parameters:
27   _param:
28     linux_system_codename: xenial
29     glusterfs_version: '3.13'
30     cluster_vip_address: ${_param:infra_kvm_address}
31     cluster_node01_address: ${_param:infra_kvm_node01_address}
32     cluster_node02_address: ${_param:infra_kvm_node02_address}
33     cluster_node03_address: ${_param:infra_kvm_node03_address}
34     keepalived_vip_interface: br-ctl
35     keepalived_vip_virtual_router_id: 69
36   linux:
37     system:
38       kernel:
39         boot_options:
40           - spectre_v2=off
41           - nopti
42   libvirt:
43     server:
44       service: libvirtd
45       config_sys: /etc/default/libvirtd
46       unix_sock_group: libvirt
47   salt:
48     control:
49       size:  # RAM 4096,8192,16384,32768,65536
50         # Default production sizing
51         openstack.control:
52           cpu: 4
53           ram: 12288
54           disk_profile: small
55           net_profile: default
56         openstack.database:
57           cpu: 4
58           ram: 6144
59           disk_profile: large
60           net_profile: default
61         openstack.message_queue:
62           cpu: 4
63           ram: 2048
64           disk_profile: small
65           net_profile: default
66         openstack.telemetry:
67           cpu: 2
68           ram: 3072
69           disk_profile: xxlarge
70           net_profile: default
71         # stacklight.log:
72         #   cpu: 2
73         #   ram: 4096
74         #   disk_profile: xxlarge
75         #   net_profile: default
76         # stacklight.server:
77         #   cpu: 2
78         #   ram: 4096
79         #   disk_profile: small
80         #   net_profile: default
81         # stacklight.telemetry:
82         #   cpu: 2
83         #   ram: 4096
84         #   disk_profile: xxlarge
85         #   net_profile: default
86         openstack.proxy:
87           cpu: 2
88           ram: 2048
89           disk_profile: small
90           net_profile: default_ext
91       cluster:
92         internal:
93           node:
94             mdb01:
95               image: ${_param:salt_control_xenial_image}
96             mdb02:
97               image: ${_param:salt_control_xenial_image}
98             mdb03:
99               image: ${_param:salt_control_xenial_image}
100             ctl01:
101               image: ${_param:salt_control_xenial_image}
102             ctl02:
103               image: ${_param:salt_control_xenial_image}
104             ctl03:
105               image: ${_param:salt_control_xenial_image}
106             dbs01:
107               image: ${_param:salt_control_xenial_image}
108             dbs02:
109               image: ${_param:salt_control_xenial_image}
110             dbs03:
111               image: ${_param:salt_control_xenial_image}
112             msg01:
113               image: ${_param:salt_control_xenial_image}
114             msg02:
115               image: ${_param:salt_control_xenial_image}
116             msg03:
117               image: ${_param:salt_control_xenial_image}
118             prx01:
119               image: ${_param:salt_control_xenial_image}
120             prx02:
121               image: ${_param:salt_control_xenial_image}
122               provider: kvm03.${_param:cluster_domain}
123   virt:
124     nic:
125       default:
126         eth1:
127           bridge: br-mgmt
128           model: virtio
129         eth0:
130           bridge: br-ctl
131           model: virtio
132       default_ext:
133         eth2:
134           bridge: br-mgmt
135           model: virtio
136         eth1:
137           bridge: br-ex
138           model: virtio
139         eth0:
140           bridge: br-ctl
141           model: virtio
142   glusterfs:
143     server:
144       service: glusterd
145       volumes:
146         nova_instances:
147           storage: /srv/glusterfs/nova_instances
148           replica: 3
149           bricks:
150             - ${_param:cluster_node01_address}:/srv/glusterfs/nova_instances
151             - ${_param:cluster_node02_address}:/srv/glusterfs/nova_instances
152             - ${_param:cluster_node03_address}:/srv/glusterfs/nova_instances
153           options:
154             cluster.readdir-optimize: 'True'
155             nfs.disable: 'True'
156             network.remote-dio: 'True'
157             cluster.favorite-child-policy: mtime
158             diagnostics.client-log-level: WARNING
159             diagnostics.brick-log-level: WARNING