[noha] Update OpenStack version to Queens
[fuel.git] / mcp / reclass / classes / cluster / mcp-pike-common-ha / infra / kvm.yml
1 ##############################################################################
2 # Copyright (c) 2018 Mirantis Inc., Enea AB and others.
3 # All rights reserved. This program and the accompanying materials
4 # are made available under the terms of the Apache License, Version 2.0
5 # which accompanies this distribution, and is available at
6 # http://www.apache.org/licenses/LICENSE-2.0
7 ##############################################################################
8 ---
9 classes:
10   - system.linux.system.repo.glusterfs
11   - service.keepalived.cluster.single
12   - system.glusterfs.server.volume.glance
13   - system.glusterfs.server.volume.keystone
14   - system.glusterfs.server.cluster
15   - system.salt.control.virt
16   - system.salt.control.cluster.openstack_control_cluster
17   - system.salt.control.cluster.openstack_proxy_cluster
18   - system.salt.control.cluster.openstack_database_cluster
19   - system.salt.control.cluster.openstack_message_queue_cluster
20   - system.salt.control.cluster.openstack_telemetry_cluster
21   # - system.salt.control.cluster.stacklight_server_cluster
22   # - system.salt.control.cluster.stacklight_log_cluster
23   # - system.salt.control.cluster.stacklight_telemetry_cluster
24   - cluster.mcp-pike-common-ha.infra.kvm_pdf
25   - cluster.mcp-pike-common-ha.include.maas_proxy
26   - cluster.mcp-pike-common-ha.include.lab_proxy_pdf
27 parameters:
28   _param:
29     linux_system_codename: xenial
30     glusterfs_version: '3.13'
31     cluster_vip_address: ${_param:infra_kvm_address}
32     cluster_node01_address: ${_param:infra_kvm_node01_address}
33     cluster_node02_address: ${_param:infra_kvm_node02_address}
34     cluster_node03_address: ${_param:infra_kvm_node03_address}
35     keepalived_vip_interface: br-ctl
36     keepalived_vip_virtual_router_id: 69
37   linux:
38     system:
39       kernel:
40         boot_options:
41           - spectre_v2=off
42           - nopti
43   libvirt:
44     server:
45       service: libvirtd
46       config_sys: /etc/default/libvirtd
47       unix_sock_group: libvirt
48   salt:
49     control:
50       size:  # RAM 4096,8192,16384,32768,65536
51         # Default production sizing
52         openstack.control:
53           cpu: 4
54           ram: 12288
55           disk_profile: small
56           net_profile: default
57         openstack.database:
58           cpu: 4
59           ram: 6144
60           disk_profile: large
61           net_profile: default
62         openstack.message_queue:
63           cpu: 4
64           ram: 2048
65           disk_profile: small
66           net_profile: default
67         openstack.telemetry:
68           cpu: 2
69           ram: 3072
70           disk_profile: xxlarge
71           net_profile: default
72         # stacklight.log:
73         #   cpu: 2
74         #   ram: 4096
75         #   disk_profile: xxlarge
76         #   net_profile: default
77         # stacklight.server:
78         #   cpu: 2
79         #   ram: 4096
80         #   disk_profile: small
81         #   net_profile: default
82         # stacklight.telemetry:
83         #   cpu: 2
84         #   ram: 4096
85         #   disk_profile: xxlarge
86         #   net_profile: default
87         openstack.proxy:
88           cpu: 2
89           ram: 2048
90           disk_profile: small
91           net_profile: default_ext
92       cluster:
93         internal:
94           node:
95             mdb01:
96               image: ${_param:salt_control_xenial_image}
97             mdb02:
98               image: ${_param:salt_control_xenial_image}
99             mdb03:
100               image: ${_param:salt_control_xenial_image}
101             ctl01:
102               image: ${_param:salt_control_xenial_image}
103             ctl02:
104               image: ${_param:salt_control_xenial_image}
105             ctl03:
106               image: ${_param:salt_control_xenial_image}
107             dbs01:
108               image: ${_param:salt_control_xenial_image}
109             dbs02:
110               image: ${_param:salt_control_xenial_image}
111             dbs03:
112               image: ${_param:salt_control_xenial_image}
113             msg01:
114               image: ${_param:salt_control_xenial_image}
115             msg02:
116               image: ${_param:salt_control_xenial_image}
117             msg03:
118               image: ${_param:salt_control_xenial_image}
119             prx01:
120               image: ${_param:salt_control_xenial_image}
121             prx02:
122               image: ${_param:salt_control_xenial_image}
123               provider: kvm03.${_param:cluster_domain}
124   virt:
125     nic:
126       default:
127         eth1:
128           bridge: br-mgmt
129           model: virtio
130         eth0:
131           bridge: br-ctl
132           model: virtio
133       default_ext:
134         eth2:
135           bridge: br-mgmt
136           model: virtio
137         eth1:
138           bridge: br-ex
139           model: virtio
140         eth0:
141           bridge: br-ctl
142           model: virtio
143   glusterfs:
144     server:
145       service: glusterd
146       volumes:
147         nova_instances:
148           storage: /srv/glusterfs/nova_instances
149           replica: 3
150           bricks:
151             - ${_param:cluster_node01_address}:/srv/glusterfs/nova_instances
152             - ${_param:cluster_node02_address}:/srv/glusterfs/nova_instances
153             - ${_param:cluster_node03_address}:/srv/glusterfs/nova_instances
154           options:
155             cluster.readdir-optimize: 'True'
156             nfs.disable: 'True'
157             network.remote-dio: 'True'
158             cluster.favorite-child-policy: mtime
159             diagnostics.client-log-level: WARNING
160             diagnostics.brick-log-level: WARNING