Turn off Retpoline and KPTI protection
[fuel.git] / mcp / reclass / classes / cluster / baremetal-mcp-pike-common-ha / infra / kvm.yml
1 ##############################################################################
2 # Copyright (c) 2017 Mirantis Inc., Enea AB and others.
3 # All rights reserved. This program and the accompanying materials
4 # are made available under the terms of the Apache License, Version 2.0
5 # which accompanies this distribution, and is available at
6 # http://www.apache.org/licenses/LICENSE-2.0
7 ##############################################################################
8 ---
9 classes:
10   - system.linux.system.repo.glusterfs
11   - service.keepalived.cluster.single
12   - system.glusterfs.server.volume.glance
13   - system.glusterfs.server.volume.keystone
14   - system.glusterfs.server.cluster
15   - system.salt.control.virt
16   - system.salt.control.cluster.openstack_control_cluster
17   - system.salt.control.cluster.openstack_proxy_cluster
18   - system.salt.control.cluster.openstack_database_cluster
19   - system.salt.control.cluster.openstack_message_queue_cluster
20   - system.salt.control.cluster.openstack_telemetry_cluster
21   # - system.salt.control.cluster.stacklight_server_cluster
22   # - system.salt.control.cluster.stacklight_log_cluster
23   # - system.salt.control.cluster.stacklight_telemetry_cluster
24   - cluster.baremetal-mcp-pike-common-ha.infra.kvm_pdf
25   - cluster.baremetal-mcp-pike-common-ha.include.proxy
26 parameters:
27   _param:
28     linux_system_codename: xenial
29     glusterfs_version: '3.13'
30     cluster_vip_address: ${_param:infra_kvm_address}
31     cluster_node01_address: ${_param:infra_kvm_node01_address}
32     cluster_node02_address: ${_param:infra_kvm_node02_address}
33     cluster_node03_address: ${_param:infra_kvm_node03_address}
34     keepalived_vip_interface: br-ctl
35     keepalived_vip_virtual_router_id: 69
36   linux:
37     network:
38       remove_iface_files:
39         - '/etc/network/interfaces.d/50-cloud-init.cfg'
40     system:
41       kernel:
42         boot_options:
43           - spectre_v2=off
44           - nopti
45   libvirt:
46     server:
47       service: libvirtd
48       config_sys: /etc/default/libvirtd
49       unix_sock_group: libvirt
50   salt:
51     control:
52       size:  # RAM 4096,8192,16384,32768,65536
53         # Default production sizing
54         openstack.control:
55           cpu: 4
56           ram: 12288
57           disk_profile: small
58           net_profile: default
59         openstack.database:
60           cpu: 4
61           ram: 6144
62           disk_profile: large
63           net_profile: default
64         openstack.message_queue:
65           cpu: 4
66           ram: 2048
67           disk_profile: small
68           net_profile: default
69         openstack.telemetry:
70           cpu: 2
71           ram: 3072
72           disk_profile: xxlarge
73           net_profile: default
74         # stacklight.log:
75         #   cpu: 2
76         #   ram: 4096
77         #   disk_profile: xxlarge
78         #   net_profile: default
79         # stacklight.server:
80         #   cpu: 2
81         #   ram: 4096
82         #   disk_profile: small
83         #   net_profile: default
84         # stacklight.telemetry:
85         #   cpu: 2
86         #   ram: 4096
87         #   disk_profile: xxlarge
88         #   net_profile: default
89         openstack.proxy:
90           cpu: 2
91           ram: 2048
92           disk_profile: small
93           net_profile: default_ext
94       cluster:
95         internal:
96           node:
97             mdb01:
98               image: ${_param:salt_control_xenial_image}
99             mdb02:
100               image: ${_param:salt_control_xenial_image}
101             mdb03:
102               image: ${_param:salt_control_xenial_image}
103             ctl01:
104               image: ${_param:salt_control_xenial_image}
105             ctl02:
106               image: ${_param:salt_control_xenial_image}
107             ctl03:
108               image: ${_param:salt_control_xenial_image}
109             dbs01:
110               image: ${_param:salt_control_xenial_image}
111             dbs02:
112               image: ${_param:salt_control_xenial_image}
113             dbs03:
114               image: ${_param:salt_control_xenial_image}
115             msg01:
116               image: ${_param:salt_control_xenial_image}
117             msg02:
118               image: ${_param:salt_control_xenial_image}
119             msg03:
120               image: ${_param:salt_control_xenial_image}
121             prx01:
122               image: ${_param:salt_control_xenial_image}
123             prx02:
124               image: ${_param:salt_control_xenial_image}
125               provider: kvm03.${_param:cluster_domain}
126   virt:
127     nic:
128       default:
129         eth1:
130           bridge: br-mgmt
131           model: virtio
132         eth0:
133           bridge: br-ctl
134           model: virtio
135       default_ext:
136         eth2:
137           bridge: br-mgmt
138           model: virtio
139         eth1:
140           bridge: br-ex
141           model: virtio
142         eth0:
143           bridge: br-ctl
144           model: virtio
145   glusterfs:
146     server:
147       service: glusterd
148       volumes:
149         nova_instances:
150           storage: /srv/glusterfs/nova_instances
151           replica: 3
152           bricks:
153             - ${_param:cluster_node01_address}:/srv/glusterfs/nova_instances
154             - ${_param:cluster_node02_address}:/srv/glusterfs/nova_instances
155             - ${_param:cluster_node03_address}:/srv/glusterfs/nova_instances
156           options:
157             cluster.readdir-optimize: 'True'
158             nfs.disable: 'True'
159             network.remote-dio: 'True'
160             cluster.favorite-child-policy: mtime
161             diagnostics.client-log-level: WARNING
162             diagnostics.brick-log-level: WARNING