1 # Copyright 2015 Red Hat, Inc.
4 # Licensed under the Apache License, Version 2.0 (the "License"); you may
5 # not use this file except in compliance with the License. You may obtain
6 # a copy of the License at
8 # http://www.apache.org/licenses/LICENSE-2.0
10 # Unless required by applicable law or agreed to in writing, software
11 # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
12 # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
13 # License for the specific language governing permissions and limitations
21 include ::tripleo::packages
22 include ::tripleo::firewall
24 if $::hostname == downcase(hiera('bootstrap_nodeid')) {
25 $pacemaker_master = true
28 $pacemaker_master = false
32 $enable_fencing = str2bool(hiera('enable_fencing', false)) and hiera('step') >= 5
33 $enable_load_balancer = hiera('enable_load_balancer', true)
35 # When to start and enable services which haven't been Pacemakerized
36 # FIXME: remove when we start all OpenStack services using Pacemaker
37 # (occurences of this variable will be gradually replaced with false)
38 $non_pcmk_start = hiera('step') >= 4
40 if hiera('step') >= 1 {
42 create_resources(kmod::load, hiera('kernel_modules'), {})
43 create_resources(sysctl::value, hiera('sysctl_settings'), {})
44 Exec <| tag == 'kmod::load' |> -> Sysctl <| |>
48 if count(hiera('ntp::servers')) > 0 {
52 $controller_node_ips = split(hiera('controller_node_ips'), ',')
53 $controller_node_names = split(downcase(hiera('controller_node_names')), ',')
54 if $enable_load_balancer {
55 class { '::tripleo::loadbalancer' :
56 controller_hosts => $controller_node_ips,
57 controller_hosts_names => $controller_node_names,
59 mysql_clustercheck => true,
60 haproxy_service_manage => false,
64 $pacemaker_cluster_members = downcase(regsubst(hiera('controller_node_names'), ',', ' ', 'G'))
65 $corosync_ipv6 = str2bool(hiera('corosync_ipv6', false))
67 $cluster_setup_extras = { '--ipv6' => '' }
69 $cluster_setup_extras = {}
74 class { '::pacemaker':
75 hacluster_pwd => hiera('hacluster_pwd'),
77 class { '::pacemaker::corosync':
78 cluster_members => $pacemaker_cluster_members,
79 setup_cluster => $pacemaker_master,
80 cluster_setup_extras => $cluster_setup_extras,
82 class { '::pacemaker::stonith':
83 disable => !$enable_fencing,
86 include ::tripleo::fencing
88 # enable stonith after all fencing devices have been created
89 Class['tripleo::fencing'] -> Class['pacemaker::stonith']
92 # FIXME(gfidente): sets 200secs as default start timeout op
93 # param; until we can use pcmk global defaults we'll still
94 # need to add it to every resource which redefines op params
95 Pacemaker::Resource::Service {
96 op_params => 'start timeout=200s stop timeout=200s',
99 # Only configure RabbitMQ in this step, don't start it yet to
100 # avoid races where non-master nodes attempt to start without
101 # config (eg. binding on 0.0.0.0)
102 # The module ignores erlang_cookie if cluster_config is false
103 $rabbit_ipv6 = str2bool(hiera('rabbit_ipv6', false))
105 $rabbit_env = merge(hiera('rabbitmq_environment'), {
106 'RABBITMQ_SERVER_START_ARGS' => '"-proto_dist inet6_tcp"'
109 $rabbit_env = hiera('rabbitmq_environment')
112 class { '::rabbitmq':
113 service_manage => false,
114 tcp_keepalive => false,
115 config_kernel_variables => hiera('rabbitmq_kernel_variables'),
116 config_variables => hiera('rabbitmq_config_variables'),
117 environment_variables => $rabbit_env,
119 file { '/var/lib/rabbitmq/.erlang.cookie':
124 content => hiera('rabbitmq::erlang_cookie'),
128 if downcase(hiera('ceilometer_backend')) == 'mongodb' {
129 include ::mongodb::globals
130 class { '::mongodb::server' :
131 service_manage => false,
136 class {'::memcached' :
137 service_manage => false,
142 service_manage => false,
143 notify_service => false,
147 if str2bool(hiera('enable_galera', true)) {
148 $mysql_config_file = '/etc/my.cnf.d/galera.cnf'
150 $mysql_config_file = '/etc/my.cnf.d/server.cnf'
152 $galera_nodes = downcase(hiera('galera_node_names', $::hostname))
153 $galera_nodes_count = count(split($galera_nodes, ','))
155 # FIXME: due to https://bugzilla.redhat.com/show_bug.cgi?id=1298671 we
156 # set bind-address to a hostname instead of an ip address; to move Mysql
157 # from internal_api on another network we'll have to customize both
158 # MysqlNetwork and ControllerHostnameResolveNetwork in ServiceNetMap
159 $mysql_bind_host = hiera('mysql_bind_host')
162 'skip-name-resolve' => '1',
163 'binlog_format' => 'ROW',
164 'default-storage-engine' => 'innodb',
165 'innodb_autoinc_lock_mode' => '2',
166 'innodb_locks_unsafe_for_binlog'=> '1',
167 'query_cache_size' => '0',
168 'query_cache_type' => '0',
169 'bind-address' => $::hostname,
170 'max_connections' => hiera('mysql_max_connections'),
171 'open_files_limit' => '-1',
172 'wsrep_provider' => '/usr/lib64/galera/libgalera_smm.so',
173 'wsrep_cluster_name' => 'galera_cluster',
174 'wsrep_slave_threads' => '1',
175 'wsrep_certify_nonPK' => '1',
176 'wsrep_max_ws_rows' => '131072',
177 'wsrep_max_ws_size' => '1073741824',
178 'wsrep_debug' => '0',
179 'wsrep_convert_LOCK_to_trx' => '0',
180 'wsrep_retry_autocommit' => '1',
181 'wsrep_auto_increment_control' => '1',
182 'wsrep_drupal_282555_workaround'=> '0',
183 'wsrep_causal_reads' => '0',
184 'wsrep_sst_method' => 'rsync',
185 'wsrep_provider_options' => "gmcast.listen_addr=tcp://[${mysql_bind_host}]:4567;",
189 class { '::mysql::server':
190 create_root_user => false,
191 create_root_my_cnf => false,
192 config_file => $mysql_config_file,
193 override_options => $mysqld_options,
194 remove_default_accounts => $pacemaker_master,
195 service_manage => false,
196 service_enabled => false,
201 if hiera('step') >= 2 {
203 # NOTE(gfidente): the following vars are needed on all nodes so they
204 # need to stay out of pacemaker_master conditional.
205 # The addresses mangling will hopefully go away when we'll be able to
206 # configure the connection string via hostnames, until then, we need to pass
207 # the list of IPv6 addresses *with* port and without the brackets as 'members'
208 # argument for the 'mongodb_replset' resource.
209 if str2bool(hiera('mongodb::server::ipv6', false)) {
210 $mongo_node_ips_with_port_prefixed = prefix(hiera('mongo_node_ips'), '[')
211 $mongo_node_ips_with_port = suffix($mongo_node_ips_with_port_prefixed, ']:27017')
212 $mongo_node_ips_with_port_nobr = suffix(hiera('mongo_node_ips'), ':27017')
214 $mongo_node_ips_with_port = suffix(hiera('mongo_node_ips'), ':27017')
215 $mongo_node_ips_with_port_nobr = suffix(hiera('mongo_node_ips'), ':27017')
217 $mongodb_replset = hiera('mongodb::server::replset')
219 if $pacemaker_master {
221 if $enable_load_balancer {
223 include ::pacemaker::resource_defaults
225 # Create an openstack-core dummy resource. See RHBZ 1290121
226 pacemaker::resource::ocf { 'openstack-core':
227 ocf_agent_name => 'heartbeat:Dummy',
228 clone_params => true,
230 # FIXME: we should not have to access tripleo::loadbalancer class
231 # parameters here to configure pacemaker VIPs. The configuration
232 # of pacemaker VIPs could move into puppet-tripleo or we should
233 # make use of less specific hiera parameters here for the settings.
234 pacemaker::resource::service { 'haproxy':
235 clone_params => true,
238 $control_vip = hiera('tripleo::loadbalancer::controller_virtual_ip')
239 if is_ipv6_address($control_vip) {
240 $control_vip_netmask = '64'
242 $control_vip_netmask = '32'
244 pacemaker::resource::ip { 'control_vip':
245 ip_address => $control_vip,
246 cidr_netmask => $control_vip_netmask,
248 pacemaker::constraint::base { 'control_vip-then-haproxy':
249 constraint_type => 'order',
250 first_resource => "ip-${control_vip}",
251 second_resource => 'haproxy-clone',
252 first_action => 'start',
253 second_action => 'start',
254 constraint_params => 'kind=Optional',
255 require => [Pacemaker::Resource::Service['haproxy'],
256 Pacemaker::Resource::Ip['control_vip']],
258 pacemaker::constraint::colocation { 'control_vip-with-haproxy':
259 source => "ip-${control_vip}",
260 target => 'haproxy-clone',
262 require => [Pacemaker::Resource::Service['haproxy'],
263 Pacemaker::Resource::Ip['control_vip']],
266 $public_vip = hiera('tripleo::loadbalancer::public_virtual_ip')
267 if is_ipv6_address($public_vip) {
268 $public_vip_netmask = '64'
270 $public_vip_netmask = '32'
272 if $public_vip and $public_vip != $control_vip {
273 pacemaker::resource::ip { 'public_vip':
274 ip_address => $public_vip,
275 cidr_netmask => $public_vip_netmask,
277 pacemaker::constraint::base { 'public_vip-then-haproxy':
278 constraint_type => 'order',
279 first_resource => "ip-${public_vip}",
280 second_resource => 'haproxy-clone',
281 first_action => 'start',
282 second_action => 'start',
283 constraint_params => 'kind=Optional',
284 require => [Pacemaker::Resource::Service['haproxy'],
285 Pacemaker::Resource::Ip['public_vip']],
287 pacemaker::constraint::colocation { 'public_vip-with-haproxy':
288 source => "ip-${public_vip}",
289 target => 'haproxy-clone',
291 require => [Pacemaker::Resource::Service['haproxy'],
292 Pacemaker::Resource::Ip['public_vip']],
296 $redis_vip = hiera('redis_vip')
297 if is_ipv6_address($redis_vip) {
298 $redis_vip_netmask = '64'
300 $redis_vip_netmask = '32'
302 if $redis_vip and $redis_vip != $control_vip {
303 pacemaker::resource::ip { 'redis_vip':
304 ip_address => $redis_vip,
305 cidr_netmask => $redis_vip_netmask,
307 pacemaker::constraint::base { 'redis_vip-then-haproxy':
308 constraint_type => 'order',
309 first_resource => "ip-${redis_vip}",
310 second_resource => 'haproxy-clone',
311 first_action => 'start',
312 second_action => 'start',
313 constraint_params => 'kind=Optional',
314 require => [Pacemaker::Resource::Service['haproxy'],
315 Pacemaker::Resource::Ip['redis_vip']],
317 pacemaker::constraint::colocation { 'redis_vip-with-haproxy':
318 source => "ip-${redis_vip}",
319 target => 'haproxy-clone',
321 require => [Pacemaker::Resource::Service['haproxy'],
322 Pacemaker::Resource::Ip['redis_vip']],
326 $internal_api_vip = hiera('tripleo::loadbalancer::internal_api_virtual_ip')
327 if is_ipv6_address($internal_api_vip) {
328 $internal_api_vip_netmask = '64'
330 $internal_api_vip_netmask = '32'
332 if $internal_api_vip and $internal_api_vip != $control_vip {
333 pacemaker::resource::ip { 'internal_api_vip':
334 ip_address => $internal_api_vip,
335 cidr_netmask => $internal_api_vip_netmask,
337 pacemaker::constraint::base { 'internal_api_vip-then-haproxy':
338 constraint_type => 'order',
339 first_resource => "ip-${internal_api_vip}",
340 second_resource => 'haproxy-clone',
341 first_action => 'start',
342 second_action => 'start',
343 constraint_params => 'kind=Optional',
344 require => [Pacemaker::Resource::Service['haproxy'],
345 Pacemaker::Resource::Ip['internal_api_vip']],
347 pacemaker::constraint::colocation { 'internal_api_vip-with-haproxy':
348 source => "ip-${internal_api_vip}",
349 target => 'haproxy-clone',
351 require => [Pacemaker::Resource::Service['haproxy'],
352 Pacemaker::Resource::Ip['internal_api_vip']],
356 $storage_vip = hiera('tripleo::loadbalancer::storage_virtual_ip')
357 if is_ipv6_address($storage_vip) {
358 $storage_vip_netmask = '64'
360 $storage_vip_netmask = '32'
362 if $storage_vip and $storage_vip != $control_vip {
363 pacemaker::resource::ip { 'storage_vip':
364 ip_address => $storage_vip,
365 cidr_netmask => $storage_vip_netmask,
367 pacemaker::constraint::base { 'storage_vip-then-haproxy':
368 constraint_type => 'order',
369 first_resource => "ip-${storage_vip}",
370 second_resource => 'haproxy-clone',
371 first_action => 'start',
372 second_action => 'start',
373 constraint_params => 'kind=Optional',
374 require => [Pacemaker::Resource::Service['haproxy'],
375 Pacemaker::Resource::Ip['storage_vip']],
377 pacemaker::constraint::colocation { 'storage_vip-with-haproxy':
378 source => "ip-${storage_vip}",
379 target => 'haproxy-clone',
381 require => [Pacemaker::Resource::Service['haproxy'],
382 Pacemaker::Resource::Ip['storage_vip']],
386 $storage_mgmt_vip = hiera('tripleo::loadbalancer::storage_mgmt_virtual_ip')
387 if is_ipv6_address($storage_mgmt_vip) {
388 $storage_mgmt_vip_netmask = '64'
390 $storage_mgmt_vip_netmask = '32'
392 if $storage_mgmt_vip and $storage_mgmt_vip != $control_vip {
393 pacemaker::resource::ip { 'storage_mgmt_vip':
394 ip_address => $storage_mgmt_vip,
395 cidr_netmask => $storage_mgmt_vip_netmask,
397 pacemaker::constraint::base { 'storage_mgmt_vip-then-haproxy':
398 constraint_type => 'order',
399 first_resource => "ip-${storage_mgmt_vip}",
400 second_resource => 'haproxy-clone',
401 first_action => 'start',
402 second_action => 'start',
403 constraint_params => 'kind=Optional',
404 require => [Pacemaker::Resource::Service['haproxy'],
405 Pacemaker::Resource::Ip['storage_mgmt_vip']],
407 pacemaker::constraint::colocation { 'storage_mgmt_vip-with-haproxy':
408 source => "ip-${storage_mgmt_vip}",
409 target => 'haproxy-clone',
411 require => [Pacemaker::Resource::Service['haproxy'],
412 Pacemaker::Resource::Ip['storage_mgmt_vip']],
418 pacemaker::resource::service { $::memcached::params::service_name :
419 clone_params => 'interleave=true',
420 require => Class['::memcached'],
423 pacemaker::resource::ocf { 'rabbitmq':
424 ocf_agent_name => 'heartbeat:rabbitmq-cluster',
425 resource_params => 'set_policy=\'ha-all ^(?!amq\.).* {"ha-mode":"all"}\'',
426 clone_params => 'ordered=true interleave=true',
427 meta_params => 'notify=true',
428 require => Class['::rabbitmq'],
431 if downcase(hiera('ceilometer_backend')) == 'mongodb' {
432 pacemaker::resource::service { $::mongodb::params::service_name :
433 op_params => 'start timeout=370s stop timeout=200s',
434 clone_params => true,
435 require => Class['::mongodb::server'],
437 # NOTE (spredzy) : The replset can only be run
438 # once all the nodes have joined the cluster.
439 mongodb_conn_validator { $mongo_node_ips_with_port :
441 require => Pacemaker::Resource::Service[$::mongodb::params::service_name],
442 before => Mongodb_replset[$mongodb_replset],
444 mongodb_replset { $mongodb_replset :
445 members => $mongo_node_ips_with_port_nobr,
449 pacemaker::resource::ocf { 'galera' :
450 ocf_agent_name => 'heartbeat:galera',
451 op_params => 'promote timeout=300s on-fail=block',
453 meta_params => "master-max=${galera_nodes_count} ordered=true",
454 resource_params => "additional_parameters='--open-files-limit=16384' enable_creation=true wsrep_cluster_address='gcomm://${galera_nodes}'",
455 require => Class['::mysql::server'],
456 before => Exec['galera-ready'],
459 pacemaker::resource::ocf { 'redis':
460 ocf_agent_name => 'heartbeat:redis',
462 meta_params => 'notify=true ordered=true interleave=true',
463 resource_params => 'wait_last_known_master=true',
464 require => Class['::redis'],
469 exec { 'galera-ready' :
470 command => '/usr/bin/clustercheck >/dev/null',
474 environment => ['AVAILABLE_WHEN_READONLY=0'],
475 require => File['/etc/sysconfig/clustercheck'],
478 file { '/etc/sysconfig/clustercheck' :
480 content => "MYSQL_USERNAME=root\n
482 MYSQL_HOST=localhost\n",
485 xinetd::service { 'galera-monitor' :
487 server => '/usr/bin/clustercheck',
488 per_source => 'UNLIMITED',
489 log_on_success => '',
490 log_on_failure => 'HOST',
492 service_type => 'UNLISTED',
495 require => File['/etc/sysconfig/clustercheck'],
498 # Create all the database schemas
500 class { '::keystone::db::mysql':
501 require => Exec['galera-ready'],
503 class { '::glance::db::mysql':
504 require => Exec['galera-ready'],
506 class { '::nova::db::mysql':
507 require => Exec['galera-ready'],
509 class { '::nova::db::mysql_api':
510 require => Exec['galera-ready'],
512 class { '::neutron::db::mysql':
513 require => Exec['galera-ready'],
515 class { '::cinder::db::mysql':
516 require => Exec['galera-ready'],
518 class { '::heat::db::mysql':
519 require => Exec['galera-ready'],
522 if downcase(hiera('ceilometer_backend')) == 'mysql' {
523 class { '::ceilometer::db::mysql':
524 require => Exec['galera-ready'],
528 class { '::sahara::db::mysql':
529 require => Exec['galera-ready'],
533 # pre-install swift here so we can build rings
537 $enable_ceph = hiera('ceph_storage_count', 0) > 0 or hiera('enable_ceph_storage', false)
540 $mon_initial_members = downcase(hiera('ceph_mon_initial_members'))
541 if str2bool(hiera('ceph_ipv6', false)) {
542 $mon_host = hiera('ceph_mon_host_v6')
544 $mon_host = hiera('ceph_mon_host')
546 class { '::ceph::profile::params':
547 mon_initial_members => $mon_initial_members,
548 mon_host => $mon_host,
551 include ::ceph::profile::mon
554 if str2bool(hiera('enable_ceph_storage', false)) {
555 if str2bool(hiera('ceph_osd_selinux_permissive', true)) {
556 exec { 'set selinux to permissive on boot':
557 command => "sed -ie 's/^SELINUX=.*/SELINUX=permissive/' /etc/selinux/config",
558 onlyif => "test -f /etc/selinux/config && ! grep '^SELINUX=permissive' /etc/selinux/config",
559 path => ['/usr/bin', '/usr/sbin'],
562 exec { 'set selinux to permissive':
563 command => 'setenforce 0',
564 onlyif => "which setenforce && getenforce | grep -i 'enforcing'",
565 path => ['/usr/bin', '/usr/sbin'],
566 } -> Class['ceph::profile::osd']
570 include ::ceph::profile::osd
573 if str2bool(hiera('enable_external_ceph', false)) {
574 if str2bool(hiera('ceph_ipv6', false)) {
575 $mon_host = hiera('ceph_mon_host_v6')
577 $mon_host = hiera('ceph_mon_host')
579 class { '::ceph::profile::params':
580 mon_host => $mon_host,
583 include ::ceph::profile::client
589 if hiera('step') >= 3 {
591 class { '::keystone':
593 manage_service => false,
595 enable_bootstrap => $pacemaker_master,
597 include ::keystone::config
599 #TODO: need a cleanup-keystone-tokens.sh solution here
601 file { [ '/etc/keystone/ssl', '/etc/keystone/ssl/certs', '/etc/keystone/ssl/private' ]:
602 ensure => 'directory',
605 require => Package['keystone'],
607 file { '/etc/keystone/ssl/certs/signing_cert.pem':
608 content => hiera('keystone_signing_certificate'),
611 notify => Service['keystone'],
612 require => File['/etc/keystone/ssl/certs'],
614 file { '/etc/keystone/ssl/private/signing_key.pem':
615 content => hiera('keystone_signing_key'),
618 notify => Service['keystone'],
619 require => File['/etc/keystone/ssl/private'],
621 file { '/etc/keystone/ssl/certs/ca.pem':
622 content => hiera('keystone_ca_certificate'),
625 notify => Service['keystone'],
626 require => File['/etc/keystone/ssl/certs'],
629 $glance_backend = downcase(hiera('glance_backend', 'swift'))
630 case $glance_backend {
631 'swift': { $backend_store = 'glance.store.swift.Store' }
632 'file': { $backend_store = 'glance.store.filesystem.Store' }
633 'rbd': { $backend_store = 'glance.store.rbd.Store' }
634 default: { fail('Unrecognized glance_backend parameter.') }
636 $http_store = ['glance.store.http.Store']
637 $glance_store = concat($http_store, $backend_store)
639 if $glance_backend == 'file' and hiera('glance_file_pcmk_manage', false) {
640 $secontext = 'context="system_u:object_r:glance_var_lib_t:s0"'
641 pacemaker::resource::filesystem { 'glance-fs':
642 device => hiera('glance_file_pcmk_device'),
643 directory => hiera('glance_file_pcmk_directory'),
644 fstype => hiera('glance_file_pcmk_fstype'),
645 fsoptions => join([$secontext, hiera('glance_file_pcmk_options', '')],','),
650 # TODO: notifications, scrubber, etc.
652 include ::glance::config
653 class { '::glance::api':
654 known_stores => $glance_store,
655 manage_service => false,
658 class { '::glance::registry' :
660 manage_service => false,
663 include ::glance::notify::rabbitmq
664 include join(['::glance::backend::', $glance_backend])
666 $nova_ipv6 = hiera('nova::use_ipv6', false)
668 $memcached_servers = suffix(hiera('memcache_node_ips_v6'), ':11211')
670 $memcached_servers = suffix(hiera('memcache_node_ips'), ':11211')
674 memcached_servers => $memcached_servers
677 include ::nova::config
679 class { '::nova::api' :
681 sync_db_api => $sync_db,
682 manage_service => false,
685 class { '::nova::cert' :
686 manage_service => false,
689 class { '::nova::conductor' :
690 manage_service => false,
693 class { '::nova::consoleauth' :
694 manage_service => false,
697 class { '::nova::vncproxy' :
698 manage_service => false,
701 include ::nova::scheduler::filter
702 class { '::nova::scheduler' :
703 manage_service => false,
706 include ::nova::network::neutron
708 if hiera('neutron::core_plugin') == 'midonet.neutron.plugin_v1.MidonetPluginV2' {
710 # TODO(devvesa) provide non-controller ips for these services
711 $zookeeper_node_ips = hiera('neutron_api_node_ips')
712 $cassandra_node_ips = hiera('neutron_api_node_ips')
714 # Run zookeeper in the controller if configured
715 if hiera('enable_zookeeper_on_controller') {
716 class {'::tripleo::cluster::zookeeper':
717 zookeeper_server_ips => $zookeeper_node_ips,
718 # TODO: create a 'bind' hiera key for zookeeper
719 zookeeper_client_ip => hiera('neutron::bind_host'),
720 zookeeper_hostnames => split(hiera('controller_node_names'), ',')
724 # Run cassandra in the controller if configured
725 if hiera('enable_cassandra_on_controller') {
726 class {'::tripleo::cluster::cassandra':
727 cassandra_servers => $cassandra_node_ips,
728 # TODO: create a 'bind' hiera key for cassandra
729 cassandra_ip => hiera('neutron::bind_host'),
733 class {'::tripleo::network::midonet::agent':
734 zookeeper_servers => $zookeeper_node_ips,
735 cassandra_seeds => $cassandra_node_ips
738 class {'::tripleo::network::midonet::api':
739 zookeeper_servers => $zookeeper_node_ips,
740 vip => hiera('tripleo::loadbalancer::public_virtual_ip'),
741 keystone_ip => hiera('tripleo::loadbalancer::public_virtual_ip'),
742 keystone_admin_token => hiera('keystone::admin_token'),
743 # TODO: create a 'bind' hiera key for api
744 bind_address => hiera('neutron::bind_host'),
745 admin_password => hiera('admin_password')
750 service_plugins => []
755 # Neutron class definitions
759 include ::neutron::config
760 class { '::neutron::server' :
762 manage_service => false,
765 include ::neutron::server::notifications
766 if hiera('neutron::core_plugin') == 'neutron.plugins.nuage.plugin.NuagePlugin' {
767 include ::neutron::plugins::nuage
769 if hiera('neutron::core_plugin') == 'neutron_plugin_contrail.plugins.opencontrail.contrail_plugin.NeutronPluginContrailCoreV2' {
770 include ::neutron::plugins::opencontrail
772 if hiera('neutron::core_plugin') == 'midonet.neutron.plugin_v1.MidonetPluginV2' {
773 class {'::neutron::plugins::midonet':
774 midonet_api_ip => hiera('tripleo::loadbalancer::public_virtual_ip'),
775 keystone_tenant => hiera('neutron::server::auth_tenant'),
776 keystone_password => hiera('neutron::server::auth_password')
779 if hiera('neutron::enable_dhcp_agent',true) {
780 class { '::neutron::agents::dhcp' :
781 manage_service => false,
784 file { '/etc/neutron/dnsmasq-neutron.conf':
785 content => hiera('neutron_dnsmasq_options'),
788 notify => Service['neutron-dhcp-service'],
789 require => Package['neutron'],
792 if hiera('neutron::enable_l3_agent',true) {
793 class { '::neutron::agents::l3' :
794 manage_service => false,
798 if hiera('neutron::enable_metadata_agent',true) {
799 class { '::neutron::agents::metadata':
800 manage_service => false,
804 include ::neutron::plugins::ml2
805 class { '::neutron::agents::ml2::ovs':
806 manage_service => false,
810 if 'cisco_ucsm' in hiera('neutron::plugins::ml2::mechanism_drivers') {
811 include ::neutron::plugins::ml2::cisco::ucsm
813 if 'cisco_nexus' in hiera('neutron::plugins::ml2::mechanism_drivers') {
814 include ::neutron::plugins::ml2::cisco::nexus
815 include ::neutron::plugins::ml2::cisco::type_nexus_vxlan
817 if 'cisco_n1kv' in hiera('neutron::plugins::ml2::mechanism_drivers') {
818 include ::neutron::plugins::ml2::cisco::nexus1000v
820 class { '::neutron::agents::n1kv_vem':
821 n1kv_source => hiera('n1kv_vem_source', undef),
822 n1kv_version => hiera('n1kv_vem_version', undef),
826 n1kv_source => hiera('n1kv_vsm_source', undef),
827 n1kv_version => hiera('n1kv_vsm_version', undef),
831 if 'bsn_ml2' in hiera('neutron::plugins::ml2::mechanism_drivers') {
832 include ::neutron::plugins::ml2::bigswitch::restproxy
833 include ::neutron::agents::bigswitch
835 neutron_l3_agent_config {
836 'DEFAULT/ovs_use_veth': value => hiera('neutron_ovs_use_veth', false);
838 neutron_dhcp_agent_config {
839 'DEFAULT/ovs_use_veth': value => hiera('neutron_ovs_use_veth', false);
842 'DEFAULT/notification_driver': value => 'messaging';
846 include ::cinder::config
847 include ::tripleo::ssl::cinder_config
848 class { '::cinder::api':
850 manage_service => false,
853 class { '::cinder::scheduler' :
854 manage_service => false,
857 class { '::cinder::volume' :
858 manage_service => false,
861 include ::cinder::glance
862 include ::cinder::ceilometer
863 class { '::cinder::setup_test_volume':
864 size => join([hiera('cinder_lvm_loop_device_size'), 'M']),
867 $cinder_enable_iscsi = hiera('cinder_enable_iscsi_backend', true)
868 if $cinder_enable_iscsi {
869 $cinder_iscsi_backend = 'tripleo_iscsi'
871 cinder::backend::iscsi { $cinder_iscsi_backend :
872 iscsi_ip_address => hiera('cinder_iscsi_ip_address'),
873 iscsi_helper => hiera('cinder_iscsi_helper'),
879 $ceph_pools = hiera('ceph_pools')
880 ceph::pool { $ceph_pools :
881 pg_num => hiera('ceph::profile::params::osd_pool_default_pg_num'),
882 pgp_num => hiera('ceph::profile::params::osd_pool_default_pgp_num'),
883 size => hiera('ceph::profile::params::osd_pool_default_size'),
886 $cinder_pool_requires = [Ceph::Pool[hiera('cinder_rbd_pool_name')]]
889 $cinder_pool_requires = []
892 if hiera('cinder_enable_rbd_backend', false) {
893 $cinder_rbd_backend = 'tripleo_ceph'
895 cinder::backend::rbd { $cinder_rbd_backend :
896 rbd_pool => hiera('cinder_rbd_pool_name'),
897 rbd_user => hiera('ceph_client_user_name'),
898 rbd_secret_uuid => hiera('ceph::profile::params::fsid'),
899 require => $cinder_pool_requires,
903 if hiera('cinder_enable_eqlx_backend', false) {
904 $cinder_eqlx_backend = hiera('cinder::backend::eqlx::volume_backend_name')
906 cinder::backend::eqlx { $cinder_eqlx_backend :
907 volume_backend_name => hiera('cinder::backend::eqlx::volume_backend_name', undef),
908 san_ip => hiera('cinder::backend::eqlx::san_ip', undef),
909 san_login => hiera('cinder::backend::eqlx::san_login', undef),
910 san_password => hiera('cinder::backend::eqlx::san_password', undef),
911 san_thin_provision => hiera('cinder::backend::eqlx::san_thin_provision', undef),
912 eqlx_group_name => hiera('cinder::backend::eqlx::eqlx_group_name', undef),
913 eqlx_pool => hiera('cinder::backend::eqlx::eqlx_pool', undef),
914 eqlx_use_chap => hiera('cinder::backend::eqlx::eqlx_use_chap', undef),
915 eqlx_chap_login => hiera('cinder::backend::eqlx::eqlx_chap_login', undef),
916 eqlx_chap_password => hiera('cinder::backend::eqlx::eqlx_san_password', undef),
920 if hiera('cinder_enable_dellsc_backend', false) {
921 $cinder_dellsc_backend = hiera('cinder::backend::dellsc_iscsi::volume_backend_name')
923 cinder::backend::dellsc_iscsi{ $cinder_dellsc_backend :
924 volume_backend_name => hiera('cinder::backend::dellsc_iscsi::volume_backend_name', undef),
925 san_ip => hiera('cinder::backend::dellsc_iscsi::san_ip', undef),
926 san_login => hiera('cinder::backend::dellsc_iscsi::san_login', undef),
927 san_password => hiera('cinder::backend::dellsc_iscsi::san_password', undef),
928 dell_sc_ssn => hiera('cinder::backend::dellsc_iscsi::dell_sc_ssn', undef),
929 iscsi_ip_address => hiera('cinder::backend::dellsc_iscsi::iscsi_ip_address', undef),
930 iscsi_port => hiera('cinder::backend::dellsc_iscsi::iscsi_port', undef),
931 dell_sc_api_port => hiera('cinder::backend::dellsc_iscsi::dell_sc_api_port', undef),
932 dell_sc_server_folder => hiera('cinder::backend::dellsc_iscsi::dell_sc_server_folder', undef),
933 dell_sc_volume_folder => hiera('cinder::backend::dellsc_iscsi::dell_sc_volume_folder', undef),
937 if hiera('cinder_enable_netapp_backend', false) {
938 $cinder_netapp_backend = hiera('cinder::backend::netapp::title')
940 if hiera('cinder::backend::netapp::nfs_shares', undef) {
941 $cinder_netapp_nfs_shares = split(hiera('cinder::backend::netapp::nfs_shares', undef), ',')
944 cinder::backend::netapp { $cinder_netapp_backend :
945 netapp_login => hiera('cinder::backend::netapp::netapp_login', undef),
946 netapp_password => hiera('cinder::backend::netapp::netapp_password', undef),
947 netapp_server_hostname => hiera('cinder::backend::netapp::netapp_server_hostname', undef),
948 netapp_server_port => hiera('cinder::backend::netapp::netapp_server_port', undef),
949 netapp_size_multiplier => hiera('cinder::backend::netapp::netapp_size_multiplier', undef),
950 netapp_storage_family => hiera('cinder::backend::netapp::netapp_storage_family', undef),
951 netapp_storage_protocol => hiera('cinder::backend::netapp::netapp_storage_protocol', undef),
952 netapp_transport_type => hiera('cinder::backend::netapp::netapp_transport_type', undef),
953 netapp_vfiler => hiera('cinder::backend::netapp::netapp_vfiler', undef),
954 netapp_volume_list => hiera('cinder::backend::netapp::netapp_volume_list', undef),
955 netapp_vserver => hiera('cinder::backend::netapp::netapp_vserver', undef),
956 netapp_partner_backend_name => hiera('cinder::backend::netapp::netapp_partner_backend_name', undef),
957 nfs_shares => $cinder_netapp_nfs_shares,
958 nfs_shares_config => hiera('cinder::backend::netapp::nfs_shares_config', undef),
959 netapp_copyoffload_tool_path => hiera('cinder::backend::netapp::netapp_copyoffload_tool_path', undef),
960 netapp_controller_ips => hiera('cinder::backend::netapp::netapp_controller_ips', undef),
961 netapp_sa_password => hiera('cinder::backend::netapp::netapp_sa_password', undef),
962 netapp_storage_pools => hiera('cinder::backend::netapp::netapp_storage_pools', undef),
963 netapp_eseries_host_type => hiera('cinder::backend::netapp::netapp_eseries_host_type', undef),
964 netapp_webservice_path => hiera('cinder::backend::netapp::netapp_webservice_path', undef),
968 if hiera('cinder_enable_nfs_backend', false) {
969 $cinder_nfs_backend = 'tripleo_nfs'
971 if str2bool($::selinux) {
972 selboolean { 'virt_use_nfs':
975 } -> Package['nfs-utils']
978 package { 'nfs-utils': } ->
979 cinder::backend::nfs { $cinder_nfs_backend:
980 nfs_servers => hiera('cinder_nfs_servers'),
981 nfs_mount_options => hiera('cinder_nfs_mount_options',''),
982 nfs_shares_config => '/etc/cinder/shares-nfs.conf',
986 $cinder_enabled_backends = delete_undef_values([$cinder_iscsi_backend, $cinder_rbd_backend, $cinder_eqlx_backend, $cinder_dellsc_backend, $cinder_netapp_backend, $cinder_nfs_backend])
987 class { '::cinder::backends' :
988 enabled_backends => union($cinder_enabled_backends, hiera('cinder_user_enabled_backends')),
994 class { '::sahara::service::api':
995 manage_service => false,
998 class { '::sahara::service::engine':
999 manage_service => false,
1004 class { '::swift::proxy' :
1005 manage_service => $non_pcmk_start,
1006 enabled => $non_pcmk_start,
1008 include ::swift::proxy::proxy_logging
1009 include ::swift::proxy::healthcheck
1010 include ::swift::proxy::cache
1011 include ::swift::proxy::keystone
1012 include ::swift::proxy::authtoken
1013 include ::swift::proxy::staticweb
1014 include ::swift::proxy::ratelimit
1015 include ::swift::proxy::catch_errors
1016 include ::swift::proxy::tempurl
1017 include ::swift::proxy::formpost
1020 if str2bool(hiera('enable_swift_storage', true)) {
1021 class {'::swift::storage::all':
1022 mount_check => str2bool(hiera('swift_mount_check')),
1024 class {'::swift::storage::account':
1025 manage_service => $non_pcmk_start,
1026 enabled => $non_pcmk_start,
1028 class {'::swift::storage::container':
1029 manage_service => $non_pcmk_start,
1030 enabled => $non_pcmk_start,
1032 class {'::swift::storage::object':
1033 manage_service => $non_pcmk_start,
1034 enabled => $non_pcmk_start,
1036 if(!defined(File['/srv/node'])) {
1038 ensure => directory,
1041 require => Package['openstack-swift'],
1044 $swift_components = ['account', 'container', 'object']
1045 swift::storage::filter::recon { $swift_components : }
1046 swift::storage::filter::healthcheck { $swift_components : }
1050 case downcase(hiera('ceilometer_backend')) {
1052 $ceilometer_database_connection = hiera('ceilometer_mysql_conn_string')
1055 $mongo_node_string = join($mongo_node_ips_with_port, ',')
1056 $ceilometer_database_connection = "mongodb://${mongo_node_string}/ceilometer?replicaSet=${mongodb_replset}"
1059 include ::ceilometer
1060 include ::ceilometer::config
1061 class { '::ceilometer::api' :
1062 manage_service => false,
1065 class { '::ceilometer::agent::notification' :
1066 manage_service => false,
1069 class { '::ceilometer::agent::central' :
1070 manage_service => false,
1073 class { '::ceilometer::collector' :
1074 manage_service => false,
1077 include ::ceilometer::expirer
1078 class { '::ceilometer::db' :
1079 database_connection => $ceilometer_database_connection,
1080 sync_db => $sync_db,
1082 include ::ceilometer::agent::auth
1084 Cron <| title == 'ceilometer-expirer' |> { command => "sleep $((\$(od -A n -t d -N 3 /dev/urandom) % 86400)) && ${::ceilometer::params::expirer_command}" }
1087 include ::heat::config
1089 sync_db => $sync_db,
1090 notification_driver => 'messaging',
1092 class { '::heat::api' :
1093 manage_service => false,
1096 class { '::heat::api_cfn' :
1097 manage_service => false,
1100 class { '::heat::api_cloudwatch' :
1101 manage_service => false,
1104 class { '::heat::engine' :
1105 manage_service => false,
1109 # httpd/apache and horizon
1110 # NOTE(gfidente): server-status can be consumed by the pacemaker resource agent
1111 class { '::apache' :
1112 service_enable => false,
1113 # service_manage => false, # <-- not supported with horizon&apache mod_wsgi?
1115 include ::keystone::wsgi::apache
1116 include ::apache::mod::status
1117 if 'cisco_n1kv' in hiera('neutron::plugins::ml2::mechanism_drivers') {
1118 $_profile_support = 'cisco'
1120 $_profile_support = 'None'
1122 $neutron_options = {'profile_support' => $_profile_support }
1123 class { '::horizon':
1124 cache_server_ip => hiera('memcache_node_ips', '127.0.0.1'),
1125 neutron_options => $neutron_options,
1128 $snmpd_user = hiera('snmpd_readonly_user_name')
1129 snmp::snmpv3_user { $snmpd_user:
1131 authpass => hiera('snmpd_readonly_user_password'),
1134 agentaddress => ['udp:161','udp6:[::1]:161'],
1135 snmpd_config => [ join(['createUser ', hiera('snmpd_readonly_user_name'), ' MD5 "', hiera('snmpd_readonly_user_password'), '"']), join(['rouser ', hiera('snmpd_readonly_user_name')]), 'proc cron', 'includeAllDisks 10%', 'master agentx', 'trapsink localhost public', 'iquerySecName internalUser', 'rouser internalUser', 'defaultMonitors yes', 'linkUpDownNotifications yes' ],
1138 hiera_include('controller_classes')
1142 if hiera('step') >= 4 {
1143 $keystone_enable_db_purge = hiera('keystone_enable_db_purge', true)
1144 $nova_enable_db_purge = hiera('nova_enable_db_purge', true)
1145 $cinder_enable_db_purge = hiera('cinder_enable_db_purge', true)
1146 $heat_enable_db_purge = hiera('heat_enable_db_purge', true)
1148 if $keystone_enable_db_purge {
1149 include ::keystone::cron::token_flush
1151 if $nova_enable_db_purge {
1152 include ::nova::cron::archive_deleted_rows
1154 if $cinder_enable_db_purge {
1155 include ::cinder::cron::db_purge
1157 if $heat_enable_db_purge {
1158 include ::heat::cron::purge_deleted
1161 if $pacemaker_master {
1163 if $enable_load_balancer {
1164 pacemaker::constraint::base { 'haproxy-then-keystone-constraint':
1165 constraint_type => 'order',
1166 first_resource => 'haproxy-clone',
1167 second_resource => 'openstack-core-clone',
1168 first_action => 'start',
1169 second_action => 'start',
1170 require => [Pacemaker::Resource::Service['haproxy'],
1171 Pacemaker::Resource::Ocf['openstack-core']],
1175 pacemaker::constraint::base { 'openstack-core-then-httpd-constraint':
1176 constraint_type => 'order',
1177 first_resource => 'openstack-core-clone',
1178 second_resource => "${::apache::params::service_name}-clone",
1179 first_action => 'start',
1180 second_action => 'start',
1181 require => [Pacemaker::Resource::Service[$::apache::params::service_name],
1182 Pacemaker::Resource::Ocf['openstack-core']],
1184 pacemaker::constraint::base { 'rabbitmq-then-keystone-constraint':
1185 constraint_type => 'order',
1186 first_resource => 'rabbitmq-clone',
1187 second_resource => 'openstack-core-clone',
1188 first_action => 'start',
1189 second_action => 'start',
1190 require => [Pacemaker::Resource::Ocf['rabbitmq'],
1191 Pacemaker::Resource::Ocf['openstack-core']],
1193 pacemaker::constraint::base { 'memcached-then-openstack-core-constraint':
1194 constraint_type => 'order',
1195 first_resource => 'memcached-clone',
1196 second_resource => 'openstack-core-clone',
1197 first_action => 'start',
1198 second_action => 'start',
1199 require => [Pacemaker::Resource::Service['memcached'],
1200 Pacemaker::Resource::Ocf['openstack-core']],
1202 pacemaker::constraint::base { 'galera-then-openstack-core-constraint':
1203 constraint_type => 'order',
1204 first_resource => 'galera-master',
1205 second_resource => 'openstack-core-clone',
1206 first_action => 'promote',
1207 second_action => 'start',
1208 require => [Pacemaker::Resource::Ocf['galera'],
1209 Pacemaker::Resource::Ocf['openstack-core']],
1213 pacemaker::resource::service { $::cinder::params::api_service :
1214 clone_params => 'interleave=true',
1215 require => Pacemaker::Resource::Ocf['openstack-core'],
1217 pacemaker::resource::service { $::cinder::params::scheduler_service :
1218 clone_params => 'interleave=true',
1220 pacemaker::resource::service { $::cinder::params::volume_service : }
1222 pacemaker::constraint::base { 'keystone-then-cinder-api-constraint':
1223 constraint_type => 'order',
1224 first_resource => 'openstack-core-clone',
1225 second_resource => "${::cinder::params::api_service}-clone",
1226 first_action => 'start',
1227 second_action => 'start',
1228 require => [Pacemaker::Resource::Ocf['openstack-core'],
1229 Pacemaker::Resource::Service[$::cinder::params::api_service]],
1231 pacemaker::constraint::base { 'cinder-api-then-cinder-scheduler-constraint':
1232 constraint_type => 'order',
1233 first_resource => "${::cinder::params::api_service}-clone",
1234 second_resource => "${::cinder::params::scheduler_service}-clone",
1235 first_action => 'start',
1236 second_action => 'start',
1237 require => [Pacemaker::Resource::Service[$::cinder::params::api_service],
1238 Pacemaker::Resource::Service[$::cinder::params::scheduler_service]],
1240 pacemaker::constraint::colocation { 'cinder-scheduler-with-cinder-api-colocation':
1241 source => "${::cinder::params::scheduler_service}-clone",
1242 target => "${::cinder::params::api_service}-clone",
1243 score => 'INFINITY',
1244 require => [Pacemaker::Resource::Service[$::cinder::params::api_service],
1245 Pacemaker::Resource::Service[$::cinder::params::scheduler_service]],
1247 pacemaker::constraint::base { 'cinder-scheduler-then-cinder-volume-constraint':
1248 constraint_type => 'order',
1249 first_resource => "${::cinder::params::scheduler_service}-clone",
1250 second_resource => $::cinder::params::volume_service,
1251 first_action => 'start',
1252 second_action => 'start',
1253 require => [Pacemaker::Resource::Service[$::cinder::params::scheduler_service],
1254 Pacemaker::Resource::Service[$::cinder::params::volume_service]],
1256 pacemaker::constraint::colocation { 'cinder-volume-with-cinder-scheduler-colocation':
1257 source => $::cinder::params::volume_service,
1258 target => "${::cinder::params::scheduler_service}-clone",
1259 score => 'INFINITY',
1260 require => [Pacemaker::Resource::Service[$::cinder::params::scheduler_service],
1261 Pacemaker::Resource::Service[$::cinder::params::volume_service]],
1265 pacemaker::resource::service { $::sahara::params::api_service_name :
1266 clone_params => 'interleave=true',
1267 require => Pacemaker::Resource::Ocf['openstack-core'],
1269 pacemaker::resource::service { $::sahara::params::engine_service_name :
1270 clone_params => 'interleave=true',
1272 pacemaker::constraint::base { 'keystone-then-sahara-api-constraint':
1273 constraint_type => 'order',
1274 first_resource => 'openstack-core-clone',
1275 second_resource => "${::sahara::params::api_service_name}-clone",
1276 first_action => 'start',
1277 second_action => 'start',
1278 require => [Pacemaker::Resource::Service[$::sahara::params::api_service_name],
1279 Pacemaker::Resource::Ocf['openstack-core']],
1283 pacemaker::resource::service { $::glance::params::registry_service_name :
1284 clone_params => 'interleave=true',
1285 require => Pacemaker::Resource::Ocf['openstack-core'],
1287 pacemaker::resource::service { $::glance::params::api_service_name :
1288 clone_params => 'interleave=true',
1291 pacemaker::constraint::base { 'keystone-then-glance-registry-constraint':
1292 constraint_type => 'order',
1293 first_resource => 'openstack-core-clone',
1294 second_resource => "${::glance::params::registry_service_name}-clone",
1295 first_action => 'start',
1296 second_action => 'start',
1297 require => [Pacemaker::Resource::Service[$::glance::params::registry_service_name],
1298 Pacemaker::Resource::Ocf['openstack-core']],
1300 pacemaker::constraint::base { 'glance-registry-then-glance-api-constraint':
1301 constraint_type => 'order',
1302 first_resource => "${::glance::params::registry_service_name}-clone",
1303 second_resource => "${::glance::params::api_service_name}-clone",
1304 first_action => 'start',
1305 second_action => 'start',
1306 require => [Pacemaker::Resource::Service[$::glance::params::registry_service_name],
1307 Pacemaker::Resource::Service[$::glance::params::api_service_name]],
1309 pacemaker::constraint::colocation { 'glance-api-with-glance-registry-colocation':
1310 source => "${::glance::params::api_service_name}-clone",
1311 target => "${::glance::params::registry_service_name}-clone",
1312 score => 'INFINITY',
1313 require => [Pacemaker::Resource::Service[$::glance::params::registry_service_name],
1314 Pacemaker::Resource::Service[$::glance::params::api_service_name]],
1317 if hiera('step') == 4 {
1319 # NOTE(gfidente): Neutron will try to populate the database with some data
1320 # as soon as neutron-server is started; to avoid races we want to make this
1321 # happen only on one node, before normal Pacemaker initialization
1322 # https://bugzilla.redhat.com/show_bug.cgi?id=1233061
1323 # NOTE(emilien): we need to run this Exec only at Step 4 otherwise this exec
1324 # will try to start the service while it's already started by Pacemaker
1325 # It would result to a deployment failure since systemd would return 1 to Puppet
1326 # and the overcloud would fail to deploy (6 would be returned).
1327 # This conditional prevents from a race condition during the deployment.
1328 # https://bugzilla.redhat.com/show_bug.cgi?id=1290582
1329 exec { 'neutron-server-systemd-start-sleep' :
1330 command => 'systemctl start neutron-server && /usr/bin/sleep 5',
1332 unless => '/sbin/pcs resource show neutron-server',
1334 pacemaker::resource::service { $::neutron::params::server_service:
1335 clone_params => 'interleave=true',
1336 require => Pacemaker::Resource::Ocf['openstack-core']
1339 pacemaker::resource::service { $::neutron::params::server_service:
1340 clone_params => 'interleave=true',
1341 require => Pacemaker::Resource::Ocf['openstack-core']
1344 if hiera('neutron::enable_l3_agent', true) {
1345 pacemaker::resource::service { $::neutron::params::l3_agent_service:
1346 clone_params => 'interleave=true',
1349 if hiera('neutron::enable_dhcp_agent', true) {
1350 pacemaker::resource::service { $::neutron::params::dhcp_agent_service:
1351 clone_params => 'interleave=true',
1354 if hiera('neutron::enable_ovs_agent', true) {
1355 pacemaker::resource::service { $::neutron::params::ovs_agent_service:
1356 clone_params => 'interleave=true',
1359 if hiera('neutron::core_plugin') == 'midonet.neutron.plugin_v1.MidonetPluginV2' {
1360 pacemaker::resource::service {'tomcat':
1361 clone_params => 'interleave=true',
1364 if hiera('neutron::enable_metadata_agent', true) {
1365 pacemaker::resource::service { $::neutron::params::metadata_agent_service:
1366 clone_params => 'interleave=true',
1369 if hiera('neutron::enable_ovs_agent', true) {
1370 pacemaker::resource::ocf { $::neutron::params::ovs_cleanup_service:
1371 ocf_agent_name => 'neutron:OVSCleanup',
1372 clone_params => 'interleave=true',
1374 pacemaker::resource::ocf { 'neutron-netns-cleanup':
1375 ocf_agent_name => 'neutron:NetnsCleanup',
1376 clone_params => 'interleave=true',
1379 # neutron - one chain ovs-cleanup-->netns-cleanup-->ovs-agent
1380 pacemaker::constraint::base { 'neutron-ovs-cleanup-to-netns-cleanup-constraint':
1381 constraint_type => 'order',
1382 first_resource => "${::neutron::params::ovs_cleanup_service}-clone",
1383 second_resource => 'neutron-netns-cleanup-clone',
1384 first_action => 'start',
1385 second_action => 'start',
1386 require => [Pacemaker::Resource::Ocf[$::neutron::params::ovs_cleanup_service],
1387 Pacemaker::Resource::Ocf['neutron-netns-cleanup']],
1389 pacemaker::constraint::colocation { 'neutron-ovs-cleanup-to-netns-cleanup-colocation':
1390 source => 'neutron-netns-cleanup-clone',
1391 target => "${::neutron::params::ovs_cleanup_service}-clone",
1392 score => 'INFINITY',
1393 require => [Pacemaker::Resource::Ocf[$::neutron::params::ovs_cleanup_service],
1394 Pacemaker::Resource::Ocf['neutron-netns-cleanup']],
1396 pacemaker::constraint::base { 'neutron-netns-cleanup-to-openvswitch-agent-constraint':
1397 constraint_type => 'order',
1398 first_resource => 'neutron-netns-cleanup-clone',
1399 second_resource => "${::neutron::params::ovs_agent_service}-clone",
1400 first_action => 'start',
1401 second_action => 'start',
1402 require => [Pacemaker::Resource::Ocf['neutron-netns-cleanup'],
1403 Pacemaker::Resource::Service[$::neutron::params::ovs_agent_service]],
1405 pacemaker::constraint::colocation { 'neutron-netns-cleanup-to-openvswitch-agent-colocation':
1406 source => "${::neutron::params::ovs_agent_service}-clone",
1407 target => 'neutron-netns-cleanup-clone',
1408 score => 'INFINITY',
1409 require => [Pacemaker::Resource::Ocf['neutron-netns-cleanup'],
1410 Pacemaker::Resource::Service[$::neutron::params::ovs_agent_service]],
1413 pacemaker::constraint::base { 'keystone-to-neutron-server-constraint':
1414 constraint_type => 'order',
1415 first_resource => 'openstack-core-clone',
1416 second_resource => "${::neutron::params::server_service}-clone",
1417 first_action => 'start',
1418 second_action => 'start',
1419 require => [Pacemaker::Resource::Ocf['openstack-core'],
1420 Pacemaker::Resource::Service[$::neutron::params::server_service]],
1422 if hiera('neutron::enable_ovs_agent',true) {
1423 pacemaker::constraint::base { 'neutron-openvswitch-agent-to-dhcp-agent-constraint':
1424 constraint_type => 'order',
1425 first_resource => "${::neutron::params::ovs_agent_service}-clone",
1426 second_resource => "${::neutron::params::dhcp_agent_service}-clone",
1427 first_action => 'start',
1428 second_action => 'start',
1429 require => [Pacemaker::Resource::Service[$::neutron::params::ovs_agent_service],
1430 Pacemaker::Resource::Service[$::neutron::params::dhcp_agent_service]],
1433 if hiera('neutron::enable_dhcp_agent',true) and hiera('neutron::enable_ovs_agent',true) {
1434 pacemaker::constraint::base { 'neutron-server-to-openvswitch-agent-constraint':
1435 constraint_type => 'order',
1436 first_resource => "${::neutron::params::server_service}-clone",
1437 second_resource => "${::neutron::params::ovs_agent_service}-clone",
1438 first_action => 'start',
1439 second_action => 'start',
1440 require => [Pacemaker::Resource::Service[$::neutron::params::server_service],
1441 Pacemaker::Resource::Service[$::neutron::params::ovs_agent_service]],
1444 pacemaker::constraint::colocation { 'neutron-openvswitch-agent-to-dhcp-agent-colocation':
1445 source => "${::neutron::params::dhcp_agent_service}-clone",
1446 target => "${::neutron::params::ovs_agent_service}-clone",
1447 score => 'INFINITY',
1448 require => [Pacemaker::Resource::Service[$::neutron::params::ovs_agent_service],
1449 Pacemaker::Resource::Service[$::neutron::params::dhcp_agent_service]],
1452 if hiera('neutron::enable_dhcp_agent',true) and hiera('l3_agent_service',true) {
1453 pacemaker::constraint::base { 'neutron-dhcp-agent-to-l3-agent-constraint':
1454 constraint_type => 'order',
1455 first_resource => "${::neutron::params::dhcp_agent_service}-clone",
1456 second_resource => "${::neutron::params::l3_agent_service}-clone",
1457 first_action => 'start',
1458 second_action => 'start',
1459 require => [Pacemaker::Resource::Service[$::neutron::params::dhcp_agent_service],
1460 Pacemaker::Resource::Service[$::neutron::params::l3_agent_service]]
1462 pacemaker::constraint::colocation { 'neutron-dhcp-agent-to-l3-agent-colocation':
1463 source => "${::neutron::params::l3_agent_service}-clone",
1464 target => "${::neutron::params::dhcp_agent_service}-clone",
1465 score => 'INFINITY',
1466 require => [Pacemaker::Resource::Service[$::neutron::params::dhcp_agent_service],
1467 Pacemaker::Resource::Service[$::neutron::params::l3_agent_service]]
1470 if hiera('neutron::enable_l3_agent',true) and hiera('neutron::enable_metadata_agent',true) {
1471 pacemaker::constraint::base { 'neutron-l3-agent-to-metadata-agent-constraint':
1472 constraint_type => 'order',
1473 first_resource => "${::neutron::params::l3_agent_service}-clone",
1474 second_resource => "${::neutron::params::metadata_agent_service}-clone",
1475 first_action => 'start',
1476 second_action => 'start',
1477 require => [Pacemaker::Resource::Service[$::neutron::params::l3_agent_service],
1478 Pacemaker::Resource::Service[$::neutron::params::metadata_agent_service]]
1480 pacemaker::constraint::colocation { 'neutron-l3-agent-to-metadata-agent-colocation':
1481 source => "${::neutron::params::metadata_agent_service}-clone",
1482 target => "${::neutron::params::l3_agent_service}-clone",
1483 score => 'INFINITY',
1484 require => [Pacemaker::Resource::Service[$::neutron::params::l3_agent_service],
1485 Pacemaker::Resource::Service[$::neutron::params::metadata_agent_service]]
1488 if hiera('neutron::core_plugin') == 'midonet.neutron.plugin_v1.MidonetPluginV2' {
1489 #midonet-chain chain keystone-->neutron-server-->dhcp-->metadata->tomcat
1490 pacemaker::constraint::base { 'neutron-server-to-dhcp-agent-constraint':
1491 constraint_type => 'order',
1492 first_resource => "${::neutron::params::server_service}-clone",
1493 second_resource => "${::neutron::params::dhcp_agent_service}-clone",
1494 first_action => 'start',
1495 second_action => 'start',
1496 require => [Pacemaker::Resource::Service[$::neutron::params::server_service],
1497 Pacemaker::Resource::Service[$::neutron::params::dhcp_agent_service]],
1499 pacemaker::constraint::base { 'neutron-dhcp-agent-to-metadata-agent-constraint':
1500 constraint_type => 'order',
1501 first_resource => "${::neutron::params::dhcp_agent_service}-clone",
1502 second_resource => "${::neutron::params::metadata_agent_service}-clone",
1503 first_action => 'start',
1504 second_action => 'start',
1505 require => [Pacemaker::Resource::Service[$::neutron::params::dhcp_agent_service],
1506 Pacemaker::Resource::Service[$::neutron::params::metadata_agent_service]],
1508 pacemaker::constraint::base { 'neutron-metadata-agent-to-tomcat-constraint':
1509 constraint_type => 'order',
1510 first_resource => "${::neutron::params::metadata_agent_service}-clone",
1511 second_resource => 'tomcat-clone',
1512 first_action => 'start',
1513 second_action => 'start',
1514 require => [Pacemaker::Resource::Service[$::neutron::params::metadata_agent_service],
1515 Pacemaker::Resource::Service['tomcat']],
1517 pacemaker::constraint::colocation { 'neutron-dhcp-agent-to-metadata-agent-colocation':
1518 source => "${::neutron::params::metadata_agent_service}-clone",
1519 target => "${::neutron::params::dhcp_agent_service}-clone",
1520 score => 'INFINITY',
1521 require => [Pacemaker::Resource::Service[$::neutron::params::dhcp_agent_service],
1522 Pacemaker::Resource::Service[$::neutron::params::metadata_agent_service]],
1527 pacemaker::resource::service { $::nova::params::api_service_name :
1528 clone_params => 'interleave=true',
1530 pacemaker::resource::service { $::nova::params::conductor_service_name :
1531 clone_params => 'interleave=true',
1533 pacemaker::resource::service { $::nova::params::consoleauth_service_name :
1534 clone_params => 'interleave=true',
1535 require => Pacemaker::Resource::Ocf['openstack-core'],
1537 pacemaker::resource::service { $::nova::params::vncproxy_service_name :
1538 clone_params => 'interleave=true',
1540 pacemaker::resource::service { $::nova::params::scheduler_service_name :
1541 clone_params => 'interleave=true',
1544 pacemaker::constraint::base { 'keystone-then-nova-consoleauth-constraint':
1545 constraint_type => 'order',
1546 first_resource => 'openstack-core-clone',
1547 second_resource => "${::nova::params::consoleauth_service_name}-clone",
1548 first_action => 'start',
1549 second_action => 'start',
1550 require => [Pacemaker::Resource::Service[$::nova::params::consoleauth_service_name],
1551 Pacemaker::Resource::Ocf['openstack-core']],
1553 pacemaker::constraint::base { 'nova-consoleauth-then-nova-vncproxy-constraint':
1554 constraint_type => 'order',
1555 first_resource => "${::nova::params::consoleauth_service_name}-clone",
1556 second_resource => "${::nova::params::vncproxy_service_name}-clone",
1557 first_action => 'start',
1558 second_action => 'start',
1559 require => [Pacemaker::Resource::Service[$::nova::params::consoleauth_service_name],
1560 Pacemaker::Resource::Service[$::nova::params::vncproxy_service_name]],
1562 pacemaker::constraint::colocation { 'nova-vncproxy-with-nova-consoleauth-colocation':
1563 source => "${::nova::params::vncproxy_service_name}-clone",
1564 target => "${::nova::params::consoleauth_service_name}-clone",
1565 score => 'INFINITY',
1566 require => [Pacemaker::Resource::Service[$::nova::params::consoleauth_service_name],
1567 Pacemaker::Resource::Service[$::nova::params::vncproxy_service_name]],
1569 pacemaker::constraint::base { 'nova-vncproxy-then-nova-api-constraint':
1570 constraint_type => 'order',
1571 first_resource => "${::nova::params::vncproxy_service_name}-clone",
1572 second_resource => "${::nova::params::api_service_name}-clone",
1573 first_action => 'start',
1574 second_action => 'start',
1575 require => [Pacemaker::Resource::Service[$::nova::params::vncproxy_service_name],
1576 Pacemaker::Resource::Service[$::nova::params::api_service_name]],
1578 pacemaker::constraint::colocation { 'nova-api-with-nova-vncproxy-colocation':
1579 source => "${::nova::params::api_service_name}-clone",
1580 target => "${::nova::params::vncproxy_service_name}-clone",
1581 score => 'INFINITY',
1582 require => [Pacemaker::Resource::Service[$::nova::params::vncproxy_service_name],
1583 Pacemaker::Resource::Service[$::nova::params::api_service_name]],
1585 pacemaker::constraint::base { 'nova-api-then-nova-scheduler-constraint':
1586 constraint_type => 'order',
1587 first_resource => "${::nova::params::api_service_name}-clone",
1588 second_resource => "${::nova::params::scheduler_service_name}-clone",
1589 first_action => 'start',
1590 second_action => 'start',
1591 require => [Pacemaker::Resource::Service[$::nova::params::api_service_name],
1592 Pacemaker::Resource::Service[$::nova::params::scheduler_service_name]],
1594 pacemaker::constraint::colocation { 'nova-scheduler-with-nova-api-colocation':
1595 source => "${::nova::params::scheduler_service_name}-clone",
1596 target => "${::nova::params::api_service_name}-clone",
1597 score => 'INFINITY',
1598 require => [Pacemaker::Resource::Service[$::nova::params::api_service_name],
1599 Pacemaker::Resource::Service[$::nova::params::scheduler_service_name]],
1601 pacemaker::constraint::base { 'nova-scheduler-then-nova-conductor-constraint':
1602 constraint_type => 'order',
1603 first_resource => "${::nova::params::scheduler_service_name}-clone",
1604 second_resource => "${::nova::params::conductor_service_name}-clone",
1605 first_action => 'start',
1606 second_action => 'start',
1607 require => [Pacemaker::Resource::Service[$::nova::params::scheduler_service_name],
1608 Pacemaker::Resource::Service[$::nova::params::conductor_service_name]],
1610 pacemaker::constraint::colocation { 'nova-conductor-with-nova-scheduler-colocation':
1611 source => "${::nova::params::conductor_service_name}-clone",
1612 target => "${::nova::params::scheduler_service_name}-clone",
1613 score => 'INFINITY',
1614 require => [Pacemaker::Resource::Service[$::nova::params::scheduler_service_name],
1615 Pacemaker::Resource::Service[$::nova::params::conductor_service_name]],
1619 case downcase(hiera('ceilometer_backend')) {
1621 pacemaker::resource::service { $::ceilometer::params::agent_central_service_name:
1622 clone_params => 'interleave=true',
1623 require => Pacemaker::Resource::Ocf['openstack-core'],
1627 pacemaker::resource::service { $::ceilometer::params::agent_central_service_name:
1628 clone_params => 'interleave=true',
1629 require => [Pacemaker::Resource::Ocf['openstack-core'],
1630 Pacemaker::Resource::Service[$::mongodb::params::service_name]],
1634 pacemaker::resource::service { $::ceilometer::params::collector_service_name :
1635 clone_params => 'interleave=true',
1637 pacemaker::resource::service { $::ceilometer::params::api_service_name :
1638 clone_params => 'interleave=true',
1640 pacemaker::resource::service { $::ceilometer::params::agent_notification_service_name :
1641 clone_params => 'interleave=true',
1643 pacemaker::resource::ocf { 'delay' :
1644 ocf_agent_name => 'heartbeat:Delay',
1645 clone_params => 'interleave=true',
1646 resource_params => 'startdelay=10',
1648 # Fedora doesn't know `require-all` parameter for constraints yet
1649 if $::operatingsystem == 'Fedora' {
1650 $redis_ceilometer_constraint_params = undef
1652 $redis_ceilometer_constraint_params = 'require-all=false'
1654 pacemaker::constraint::base { 'redis-then-ceilometer-central-constraint':
1655 constraint_type => 'order',
1656 first_resource => 'redis-master',
1657 second_resource => "${::ceilometer::params::agent_central_service_name}-clone",
1658 first_action => 'promote',
1659 second_action => 'start',
1660 constraint_params => $redis_ceilometer_constraint_params,
1661 require => [Pacemaker::Resource::Ocf['redis'],
1662 Pacemaker::Resource::Service[$::ceilometer::params::agent_central_service_name]],
1664 pacemaker::constraint::base { 'keystone-then-ceilometer-central-constraint':
1665 constraint_type => 'order',
1666 first_resource => 'openstack-core-clone',
1667 second_resource => "${::ceilometer::params::agent_central_service_name}-clone",
1668 first_action => 'start',
1669 second_action => 'start',
1670 require => [Pacemaker::Resource::Service[$::ceilometer::params::agent_central_service_name],
1671 Pacemaker::Resource::Ocf['openstack-core']],
1673 pacemaker::constraint::base { 'ceilometer-central-then-ceilometer-collector-constraint':
1674 constraint_type => 'order',
1675 first_resource => "${::ceilometer::params::agent_central_service_name}-clone",
1676 second_resource => "${::ceilometer::params::collector_service_name}-clone",
1677 first_action => 'start',
1678 second_action => 'start',
1679 require => [Pacemaker::Resource::Service[$::ceilometer::params::agent_central_service_name],
1680 Pacemaker::Resource::Service[$::ceilometer::params::collector_service_name]],
1682 pacemaker::constraint::base { 'ceilometer-collector-then-ceilometer-api-constraint':
1683 constraint_type => 'order',
1684 first_resource => "${::ceilometer::params::collector_service_name}-clone",
1685 second_resource => "${::ceilometer::params::api_service_name}-clone",
1686 first_action => 'start',
1687 second_action => 'start',
1688 require => [Pacemaker::Resource::Service[$::ceilometer::params::collector_service_name],
1689 Pacemaker::Resource::Service[$::ceilometer::params::api_service_name]],
1691 pacemaker::constraint::colocation { 'ceilometer-api-with-ceilometer-collector-colocation':
1692 source => "${::ceilometer::params::api_service_name}-clone",
1693 target => "${::ceilometer::params::collector_service_name}-clone",
1694 score => 'INFINITY',
1695 require => [Pacemaker::Resource::Service[$::ceilometer::params::api_service_name],
1696 Pacemaker::Resource::Service[$::ceilometer::params::collector_service_name]],
1698 pacemaker::constraint::base { 'ceilometer-api-then-ceilometer-delay-constraint':
1699 constraint_type => 'order',
1700 first_resource => "${::ceilometer::params::api_service_name}-clone",
1701 second_resource => 'delay-clone',
1702 first_action => 'start',
1703 second_action => 'start',
1704 require => [Pacemaker::Resource::Service[$::ceilometer::params::api_service_name],
1705 Pacemaker::Resource::Ocf['delay']],
1707 pacemaker::constraint::colocation { 'ceilometer-delay-with-ceilometer-api-colocation':
1708 source => 'delay-clone',
1709 target => "${::ceilometer::params::api_service_name}-clone",
1710 score => 'INFINITY',
1711 require => [Pacemaker::Resource::Service[$::ceilometer::params::api_service_name],
1712 Pacemaker::Resource::Ocf['delay']],
1714 if downcase(hiera('ceilometer_backend')) == 'mongodb' {
1715 pacemaker::constraint::base { 'mongodb-then-ceilometer-central-constraint':
1716 constraint_type => 'order',
1717 first_resource => "${::mongodb::params::service_name}-clone",
1718 second_resource => "${::ceilometer::params::agent_central_service_name}-clone",
1719 first_action => 'start',
1720 second_action => 'start',
1721 require => [Pacemaker::Resource::Service[$::ceilometer::params::agent_central_service_name],
1722 Pacemaker::Resource::Service[$::mongodb::params::service_name]],
1727 pacemaker::resource::service { $::heat::params::api_service_name :
1728 clone_params => 'interleave=true',
1730 pacemaker::resource::service { $::heat::params::api_cloudwatch_service_name :
1731 clone_params => 'interleave=true',
1733 pacemaker::resource::service { $::heat::params::api_cfn_service_name :
1734 clone_params => 'interleave=true',
1736 pacemaker::resource::service { $::heat::params::engine_service_name :
1737 clone_params => 'interleave=true',
1739 pacemaker::constraint::base { 'keystone-then-heat-api-constraint':
1740 constraint_type => 'order',
1741 first_resource => 'openstack-core-clone',
1742 second_resource => "${::heat::params::api_service_name}-clone",
1743 first_action => 'start',
1744 second_action => 'start',
1745 require => [Pacemaker::Resource::Service[$::heat::params::api_service_name],
1746 Pacemaker::Resource::Ocf['openstack-core']],
1748 pacemaker::constraint::base { 'heat-api-then-heat-api-cfn-constraint':
1749 constraint_type => 'order',
1750 first_resource => "${::heat::params::api_service_name}-clone",
1751 second_resource => "${::heat::params::api_cfn_service_name}-clone",
1752 first_action => 'start',
1753 second_action => 'start',
1754 require => [Pacemaker::Resource::Service[$::heat::params::api_service_name],
1755 Pacemaker::Resource::Service[$::heat::params::api_cfn_service_name]],
1757 pacemaker::constraint::colocation { 'heat-api-cfn-with-heat-api-colocation':
1758 source => "${::heat::params::api_cfn_service_name}-clone",
1759 target => "${::heat::params::api_service_name}-clone",
1760 score => 'INFINITY',
1761 require => [Pacemaker::Resource::Service[$::heat::params::api_cfn_service_name],
1762 Pacemaker::Resource::Service[$::heat::params::api_service_name]],
1764 pacemaker::constraint::base { 'heat-api-cfn-then-heat-api-cloudwatch-constraint':
1765 constraint_type => 'order',
1766 first_resource => "${::heat::params::api_cfn_service_name}-clone",
1767 second_resource => "${::heat::params::api_cloudwatch_service_name}-clone",
1768 first_action => 'start',
1769 second_action => 'start',
1770 require => [Pacemaker::Resource::Service[$::heat::params::api_cloudwatch_service_name],
1771 Pacemaker::Resource::Service[$::heat::params::api_cfn_service_name]],
1773 pacemaker::constraint::colocation { 'heat-api-cloudwatch-with-heat-api-cfn-colocation':
1774 source => "${::heat::params::api_cloudwatch_service_name}-clone",
1775 target => "${::heat::params::api_cfn_service_name}-clone",
1776 score => 'INFINITY',
1777 require => [Pacemaker::Resource::Service[$::heat::params::api_cfn_service_name],
1778 Pacemaker::Resource::Service[$::heat::params::api_cloudwatch_service_name]],
1780 pacemaker::constraint::base { 'heat-api-cloudwatch-then-heat-engine-constraint':
1781 constraint_type => 'order',
1782 first_resource => "${::heat::params::api_cloudwatch_service_name}-clone",
1783 second_resource => "${::heat::params::engine_service_name}-clone",
1784 first_action => 'start',
1785 second_action => 'start',
1786 require => [Pacemaker::Resource::Service[$::heat::params::api_cloudwatch_service_name],
1787 Pacemaker::Resource::Service[$::heat::params::engine_service_name]],
1789 pacemaker::constraint::colocation { 'heat-engine-with-heat-api-cloudwatch-colocation':
1790 source => "${::heat::params::engine_service_name}-clone",
1791 target => "${::heat::params::api_cloudwatch_service_name}-clone",
1792 score => 'INFINITY',
1793 require => [Pacemaker::Resource::Service[$::heat::params::api_cloudwatch_service_name],
1794 Pacemaker::Resource::Service[$::heat::params::engine_service_name]],
1796 pacemaker::constraint::base { 'ceilometer-notification-then-heat-api-constraint':
1797 constraint_type => 'order',
1798 first_resource => "${::ceilometer::params::agent_notification_service_name}-clone",
1799 second_resource => "${::heat::params::api_service_name}-clone",
1800 first_action => 'start',
1801 second_action => 'start',
1802 require => [Pacemaker::Resource::Service[$::heat::params::api_service_name],
1803 Pacemaker::Resource::Service[$::ceilometer::params::agent_notification_service_name]],
1806 # Horizon and Keystone
1807 pacemaker::resource::service { $::apache::params::service_name:
1808 clone_params => 'interleave=true',
1809 verify_on_create => true,
1810 require => [File['/etc/keystone/ssl/certs/ca.pem'],
1811 File['/etc/keystone/ssl/private/signing_key.pem'],
1812 File['/etc/keystone/ssl/certs/signing_cert.pem']],
1816 if 'cisco_n1kv' in hiera('neutron::plugins::ml2::mechanism_drivers') {
1817 pacemaker::resource::ocf { 'vsm-p' :
1818 ocf_agent_name => 'heartbeat:VirtualDomain',
1819 resource_params => 'force_stop=true config=/var/spool/cisco/vsm/vsm_primary_deploy.xml',
1820 require => Class['n1k_vsm'],
1821 meta_params => 'resource-stickiness=INFINITY',
1823 if str2bool(hiera('n1k_vsm::pacemaker_control', true)) {
1824 pacemaker::resource::ocf { 'vsm-s' :
1825 ocf_agent_name => 'heartbeat:VirtualDomain',
1826 resource_params => 'force_stop=true config=/var/spool/cisco/vsm/vsm_secondary_deploy.xml',
1827 require => Class['n1k_vsm'],
1828 meta_params => 'resource-stickiness=INFINITY',
1830 pacemaker::constraint::colocation { 'vsm-colocation-contraint':
1833 score => '-INFINITY',
1834 require => [Pacemaker::Resource::Ocf['vsm-p'],
1835 Pacemaker::Resource::Ocf['vsm-s']],
1844 if hiera('step') >= 5 {
1846 if $pacemaker_master {
1848 class {'::keystone::roles::admin' :
1849 require => Pacemaker::Resource::Service[$::apache::params::service_name],
1851 class {'::keystone::endpoint' :
1852 require => Pacemaker::Resource::Service[$::apache::params::service_name],
1854 include ::heat::keystone::domain
1855 Class['::keystone::roles::admin'] -> Class['::heat::keystone::domain']
1858 # On non-master controller we don't need to create Keystone resources again
1859 class { '::heat::keystone::domain':
1860 manage_domain => false,
1861 manage_user => false,
1862 manage_role => false,
1868 $package_manifest_name = join(['/var/lib/tripleo/installed-packages/overcloud_controller_pacemaker', hiera('step')])
1869 package_manifest{$package_manifest_name: ensure => present}