Merge "Optimize Nova disk_cachemodes and hw_disk_discard options for RBD"
[apex-tripleo-heat-templates.git] / puppet / manifests / overcloud_controller_pacemaker.pp
1 # Copyright 2015 Red Hat, Inc.
2 # All Rights Reserved.
3 #
4 # Licensed under the Apache License, Version 2.0 (the "License"); you may
5 # not use this file except in compliance with the License. You may obtain
6 # a copy of the License at
7 #
8 #     http://www.apache.org/licenses/LICENSE-2.0
9 #
10 # Unless required by applicable law or agreed to in writing, software
11 # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
12 # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
13 # License for the specific language governing permissions and limitations
14 # under the License.
15
16 Pcmk_resource <| |> {
17   tries     => 10,
18   try_sleep => 3,
19 }
20
21 # TODO(jistr): use pcs resource provider instead of just no-ops
22 Service <|
23   tag == 'aodh-service' or
24   tag == 'cinder-service' or
25   tag == 'ceilometer-service' or
26   tag == 'gnocchi-service' or
27   tag == 'heat-service' or
28   tag == 'neutron-service' or
29   tag == 'nova-service' or
30   tag == 'sahara-service'
31 |> {
32   hasrestart => true,
33   restart    => '/bin/true',
34   start      => '/bin/true',
35   stop       => '/bin/true',
36 }
37
38 include ::tripleo::packages
39 include ::tripleo::firewall
40
41 if $::hostname == downcase(hiera('bootstrap_nodeid')) {
42   $pacemaker_master = true
43   $sync_db = true
44 } else {
45   $pacemaker_master = false
46   $sync_db = false
47 }
48
49 $enable_fencing = str2bool(hiera('enable_fencing', false)) and hiera('step') >= 5
50 $enable_load_balancer = hiera('enable_load_balancer', true)
51
52 # When to start and enable services which haven't been Pacemakerized
53 # FIXME: remove when we start all OpenStack services using Pacemaker
54 # (occurrences of this variable will be gradually replaced with false)
55 $non_pcmk_start = hiera('step') >= 5
56
57 if hiera('step') >= 1 {
58
59   create_resources(kmod::load, hiera('kernel_modules'), {})
60   create_resources(sysctl::value, hiera('sysctl_settings'), {})
61   Exec <| tag == 'kmod::load' |>  -> Sysctl <| |>
62
63   include ::timezone
64
65   if count(hiera('ntp::servers')) > 0 {
66     include ::ntp
67   }
68
69   $pacemaker_cluster_members = downcase(regsubst(hiera('controller_node_names'), ',', ' ', 'G'))
70   $corosync_ipv6 = str2bool(hiera('corosync_ipv6', false))
71   if $corosync_ipv6 {
72     $cluster_setup_extras = { '--token' => hiera('corosync_token_timeout', 1000), '--ipv6' => '' }
73   } else {
74     $cluster_setup_extras = { '--token' => hiera('corosync_token_timeout', 1000) }
75   }
76   class { '::pacemaker':
77     hacluster_pwd => hiera('hacluster_pwd'),
78   } ->
79   class { '::pacemaker::corosync':
80     cluster_members      => $pacemaker_cluster_members,
81     setup_cluster        => $pacemaker_master,
82     cluster_setup_extras => $cluster_setup_extras,
83   }
84   class { '::pacemaker::stonith':
85     disable => !$enable_fencing,
86   }
87   if $enable_fencing {
88     include ::tripleo::fencing
89
90     # enable stonith after all Pacemaker resources have been created
91     Pcmk_resource<||> -> Class['tripleo::fencing']
92     Pcmk_constraint<||> -> Class['tripleo::fencing']
93     Exec <| tag == 'pacemaker_constraint' |> -> Class['tripleo::fencing']
94     # enable stonith after all fencing devices have been created
95     Class['tripleo::fencing'] -> Class['pacemaker::stonith']
96   }
97
98   # FIXME(gfidente): sets 200secs as default start timeout op
99   # param; until we can use pcmk global defaults we'll still
100   # need to add it to every resource which redefines op params
101   Pacemaker::Resource::Service {
102     op_params => 'start timeout=200s stop timeout=200s',
103   }
104
105   if downcase(hiera('ceilometer_backend')) == 'mongodb' {
106     include ::mongodb::globals
107     include ::mongodb::client
108     class { '::mongodb::server' :
109       service_manage => false,
110     }
111   }
112
113   # Redis
114   class { '::redis' :
115     service_manage => false,
116     notify_service => false,
117   }
118
119   # Galera
120   if str2bool(hiera('enable_galera', true)) {
121     $mysql_config_file = '/etc/my.cnf.d/galera.cnf'
122   } else {
123     $mysql_config_file = '/etc/my.cnf.d/server.cnf'
124   }
125   $galera_nodes = downcase(hiera('galera_node_names', $::hostname))
126   $galera_nodes_count = count(split($galera_nodes, ','))
127
128   # FIXME: due to https://bugzilla.redhat.com/show_bug.cgi?id=1298671 we
129   # set bind-address to a hostname instead of an ip address; to move Mysql
130   # from internal_api on another network we'll have to customize both
131   # MysqlNetwork and ControllerHostnameResolveNetwork in ServiceNetMap
132   $mysql_bind_host = hiera('mysql_bind_host')
133   $mysqld_options = {
134     'mysqld' => {
135       'skip-name-resolve'             => '1',
136       'binlog_format'                 => 'ROW',
137       'default-storage-engine'        => 'innodb',
138       'innodb_autoinc_lock_mode'      => '2',
139       'innodb_locks_unsafe_for_binlog'=> '1',
140       'query_cache_size'              => '0',
141       'query_cache_type'              => '0',
142       'bind-address'                  => $::hostname,
143       'max_connections'               => hiera('mysql_max_connections'),
144       'open_files_limit'              => '-1',
145       'wsrep_on'                      => 'ON',
146       'wsrep_provider'                => '/usr/lib64/galera/libgalera_smm.so',
147       'wsrep_cluster_name'            => 'galera_cluster',
148       'wsrep_cluster_address'         => "gcomm://${galera_nodes}",
149       'wsrep_slave_threads'           => '1',
150       'wsrep_certify_nonPK'           => '1',
151       'wsrep_max_ws_rows'             => '131072',
152       'wsrep_max_ws_size'             => '1073741824',
153       'wsrep_debug'                   => '0',
154       'wsrep_convert_LOCK_to_trx'     => '0',
155       'wsrep_retry_autocommit'        => '1',
156       'wsrep_auto_increment_control'  => '1',
157       'wsrep_drupal_282555_workaround'=> '0',
158       'wsrep_causal_reads'            => '0',
159       'wsrep_sst_method'              => 'rsync',
160       'wsrep_provider_options'        => "gmcast.listen_addr=tcp://[${mysql_bind_host}]:4567;",
161     },
162   }
163
164   class { '::mysql::server':
165     create_root_user        => false,
166     create_root_my_cnf      => false,
167     config_file             => $mysql_config_file,
168     override_options        => $mysqld_options,
169     remove_default_accounts => $pacemaker_master,
170     service_manage          => false,
171     service_enabled         => false,
172   }
173
174 }
175
176 if hiera('step') >= 2 {
177
178   # NOTE(gfidente): the following vars are needed on all nodes so they
179   # need to stay out of pacemaker_master conditional.
180   # The addresses mangling will hopefully go away when we'll be able to
181   # configure the connection string via hostnames, until then, we need to pass
182   # the list of IPv6 addresses *with* port and without the brackets as 'members'
183   # argument for the 'mongodb_replset' resource.
184   if str2bool(hiera('mongodb::server::ipv6', false)) {
185     $mongo_node_ips_with_port_prefixed = prefix(hiera('mongo_node_ips'), '[')
186     $mongo_node_ips_with_port = suffix($mongo_node_ips_with_port_prefixed, ']:27017')
187     $mongo_node_ips_with_port_nobr = suffix(hiera('mongo_node_ips'), ':27017')
188   } else {
189     $mongo_node_ips_with_port = suffix(hiera('mongo_node_ips'), ':27017')
190     $mongo_node_ips_with_port_nobr = suffix(hiera('mongo_node_ips'), ':27017')
191   }
192   $mongodb_replset = hiera('mongodb::server::replset')
193
194   if $pacemaker_master {
195
196     include ::pacemaker::resource_defaults
197
198     # Create an openstack-core dummy resource. See RHBZ 1290121
199     pacemaker::resource::ocf { 'openstack-core':
200       ocf_agent_name => 'heartbeat:Dummy',
201       clone_params   => true,
202     }
203
204     if downcase(hiera('ceilometer_backend')) == 'mongodb' {
205       pacemaker::resource::service { $::mongodb::params::service_name :
206         op_params    => 'start timeout=370s stop timeout=200s',
207         clone_params => true,
208         require      => Class['::mongodb::server'],
209       }
210       # NOTE (spredzy) : The replset can only be run
211       # once all the nodes have joined the cluster.
212       mongodb_conn_validator { $mongo_node_ips_with_port :
213         timeout => '600',
214         require => Pacemaker::Resource::Service[$::mongodb::params::service_name],
215         before  => Mongodb_replset[$mongodb_replset],
216       }
217       mongodb_replset { $mongodb_replset :
218         members => $mongo_node_ips_with_port_nobr,
219       }
220     }
221
222     pacemaker::resource::ocf { 'galera' :
223       ocf_agent_name  => 'heartbeat:galera',
224       op_params       => 'promote timeout=300s on-fail=block',
225       master_params   => '',
226       meta_params     => "master-max=${galera_nodes_count} ordered=true",
227       resource_params => "additional_parameters='--open-files-limit=16384' enable_creation=true wsrep_cluster_address='gcomm://${galera_nodes}'",
228       require         => Class['::mysql::server'],
229       before          => Exec['galera-ready'],
230     }
231
232     pacemaker::resource::ocf { 'redis':
233       ocf_agent_name  => 'heartbeat:redis',
234       master_params   => '',
235       meta_params     => 'notify=true ordered=true interleave=true',
236       resource_params => 'wait_last_known_master=true',
237       require         => Class['::redis'],
238     }
239
240   }
241   $mysql_root_password = hiera('mysql::server::root_password')
242   $mysql_clustercheck_password = hiera('mysql_clustercheck_password')
243   # This step is to create a sysconfig clustercheck file with the root user and empty password
244   # on the first install only (because later on the clustercheck db user will be used)
245   # We are using exec and not file in order to not have duplicate definition errors in puppet
246   # when we later set the the file to contain the clustercheck data
247   exec { 'create-root-sysconfig-clustercheck':
248     command => "/bin/echo 'MYSQL_USERNAME=root\nMYSQL_PASSWORD=\'\'\nMYSQL_HOST=localhost\n' > /etc/sysconfig/clustercheck",
249     unless  => '/bin/test -e /etc/sysconfig/clustercheck && grep -q clustercheck /etc/sysconfig/clustercheck',
250   }
251
252   exec { 'galera-ready' :
253     command     => '/usr/bin/clustercheck >/dev/null',
254     timeout     => 30,
255     tries       => 180,
256     try_sleep   => 10,
257     environment => ['AVAILABLE_WHEN_READONLY=0'],
258     require     => Exec['create-root-sysconfig-clustercheck'],
259   }
260
261   xinetd::service { 'galera-monitor' :
262     port           => '9200',
263     server         => '/usr/bin/clustercheck',
264     per_source     => 'UNLIMITED',
265     log_on_success => '',
266     log_on_failure => 'HOST',
267     flags          => 'REUSE',
268     service_type   => 'UNLISTED',
269     user           => 'root',
270     group          => 'root',
271     require        => Exec['create-root-sysconfig-clustercheck'],
272   }
273   # We add a clustercheck db user and we will switch /etc/sysconfig/clustercheck
274   # to it in a later step. We do this only on one node as it will replicate on
275   # the other members. We also make sure that the permissions are the minimum necessary
276   if $pacemaker_master {
277     mysql_user { 'clustercheck@localhost':
278       ensure        => 'present',
279       password_hash => mysql_password($mysql_clustercheck_password),
280       require       => Exec['galera-ready'],
281     }
282     mysql_grant { 'clustercheck@localhost/*.*':
283       ensure     => 'present',
284       options    => ['GRANT'],
285       privileges => ['PROCESS'],
286       table      => '*.*',
287       user       => 'clustercheck@localhost',
288     }
289   }
290
291   # Create all the database schemas
292   if $sync_db {
293     class { '::nova::db::mysql':
294       require => Exec['galera-ready'],
295     }
296     class { '::nova::db::mysql_api':
297       require => Exec['galera-ready'],
298     }
299     class { '::neutron::db::mysql':
300       require => Exec['galera-ready'],
301     }
302     class { '::cinder::db::mysql':
303       require => Exec['galera-ready'],
304     }
305     class { '::heat::db::mysql':
306       require => Exec['galera-ready'],
307     }
308
309     if downcase(hiera('ceilometer_backend')) == 'mysql' {
310       class { '::ceilometer::db::mysql':
311         require => Exec['galera-ready'],
312       }
313     }
314
315     if downcase(hiera('gnocchi_indexer_backend')) == 'mysql' {
316       class { '::gnocchi::db::mysql':
317         require => Exec['galera-ready'],
318       }
319     }
320     class { '::sahara::db::mysql':
321       require       => Exec['galera-ready'],
322     }
323   }
324
325   # pre-install swift here so we can build rings
326   include ::swift
327
328   # Ceph
329   $enable_ceph = hiera('ceph_storage_count', 0) > 0 or hiera('enable_ceph_storage', false)
330
331   if $enable_ceph {
332     $mon_initial_members = downcase(hiera('ceph_mon_initial_members'))
333     if str2bool(hiera('ceph_ipv6', false)) {
334       $mon_host = hiera('ceph_mon_host_v6')
335     } else {
336       $mon_host = hiera('ceph_mon_host')
337     }
338     class { '::ceph::profile::params':
339       mon_initial_members => $mon_initial_members,
340       mon_host            => $mon_host,
341     }
342     include ::ceph::conf
343     include ::ceph::profile::mon
344   }
345
346   if str2bool(hiera('enable_ceph_storage', false)) {
347     if str2bool(hiera('ceph_osd_selinux_permissive', true)) {
348       exec { 'set selinux to permissive on boot':
349         command => "sed -ie 's/^SELINUX=.*/SELINUX=permissive/' /etc/selinux/config",
350         onlyif  => "test -f /etc/selinux/config && ! grep '^SELINUX=permissive' /etc/selinux/config",
351         path    => ['/usr/bin', '/usr/sbin'],
352       }
353
354       exec { 'set selinux to permissive':
355         command => 'setenforce 0',
356         onlyif  => "which setenforce && getenforce | grep -i 'enforcing'",
357         path    => ['/usr/bin', '/usr/sbin'],
358       } -> Class['ceph::profile::osd']
359     }
360
361     include ::ceph::conf
362     include ::ceph::profile::osd
363   }
364
365   if str2bool(hiera('enable_external_ceph', false)) {
366     if str2bool(hiera('ceph_ipv6', false)) {
367       $mon_host = hiera('ceph_mon_host_v6')
368     } else {
369       $mon_host = hiera('ceph_mon_host')
370     }
371     class { '::ceph::profile::params':
372       mon_host            => $mon_host,
373     }
374     include ::ceph::conf
375     include ::ceph::profile::client
376   }
377
378
379 } #END STEP 2
380
381 if hiera('step') >= 4 or ( hiera('step') >= 3 and $sync_db ) {
382   # At this stage we are guaranteed that the clustercheck db user exists
383   # so we switch the resource agent to use it.
384   file { '/etc/sysconfig/clustercheck' :
385     ensure  => file,
386     mode    => '0600',
387     owner   => 'root',
388     group   => 'root',
389     content => "MYSQL_USERNAME=clustercheck\n
390 MYSQL_PASSWORD='${mysql_clustercheck_password}'\n
391 MYSQL_HOST=localhost\n",
392   }
393
394   $nova_ipv6 = hiera('nova::use_ipv6', false)
395   if $nova_ipv6 {
396     $memcached_servers = suffix(hiera('memcache_node_ips_v6'), ':11211')
397   } else {
398     $memcached_servers = suffix(hiera('memcache_node_ips'), ':11211')
399   }
400
401   class { '::nova' :
402     memcached_servers => $memcached_servers
403   }
404
405   include ::nova::config
406
407   class { '::nova::api' :
408     sync_db        => $sync_db,
409     sync_db_api    => $sync_db,
410     manage_service => false,
411     enabled        => false,
412   }
413   class { '::nova::cert' :
414     manage_service => false,
415     enabled        => false,
416   }
417   class { '::nova::conductor' :
418     manage_service => false,
419     enabled        => false,
420   }
421   class { '::nova::consoleauth' :
422     manage_service => false,
423     enabled        => false,
424   }
425   class { '::nova::vncproxy' :
426     manage_service => false,
427     enabled        => false,
428   }
429   include ::nova::scheduler::filter
430   class { '::nova::scheduler' :
431     manage_service => false,
432     enabled        => false,
433   }
434   include ::nova::network::neutron
435
436   if hiera('neutron::core_plugin') == 'midonet.neutron.plugin_v1.MidonetPluginV2' {
437
438     # TODO(devvesa) provide non-controller ips for these services
439     $zookeeper_node_ips = hiera('neutron_api_node_ips')
440     $cassandra_node_ips = hiera('neutron_api_node_ips')
441
442     # Run zookeeper in the controller if configured
443     if hiera('enable_zookeeper_on_controller') {
444       class {'::tripleo::cluster::zookeeper':
445         zookeeper_server_ips => $zookeeper_node_ips,
446         # TODO: create a 'bind' hiera key for zookeeper
447         zookeeper_client_ip  => hiera('neutron::bind_host'),
448         zookeeper_hostnames  => split(hiera('controller_node_names'), ',')
449       }
450     }
451
452     # Run cassandra in the controller if configured
453     if hiera('enable_cassandra_on_controller') {
454       class {'::tripleo::cluster::cassandra':
455         cassandra_servers => $cassandra_node_ips,
456         # TODO: create a 'bind' hiera key for cassandra
457         cassandra_ip      => hiera('neutron::bind_host'),
458       }
459     }
460
461     class {'::tripleo::network::midonet::agent':
462       zookeeper_servers => $zookeeper_node_ips,
463       cassandra_seeds   => $cassandra_node_ips
464     }
465
466     class {'::tripleo::network::midonet::api':
467       zookeeper_servers    => $zookeeper_node_ips,
468       vip                  => hiera('tripleo::loadbalancer::public_virtual_ip'),
469       keystone_ip          => hiera('tripleo::loadbalancer::public_virtual_ip'),
470       keystone_admin_token => hiera('keystone::admin_token'),
471       # TODO: create a 'bind' hiera key for api
472       bind_address         => hiera('neutron::bind_host'),
473       admin_password       => hiera('admin_password')
474     }
475
476     # Configure Neutron
477     class {'::neutron':
478       service_plugins => []
479     }
480
481   }
482   else {
483     # Neutron class definitions
484     include ::neutron
485   }
486
487   include ::neutron::config
488   class { '::neutron::server' :
489     sync_db        => $sync_db,
490     manage_service => false,
491     enabled        => false,
492   }
493   include ::neutron::server::notifications
494   if  hiera('neutron::core_plugin') == 'neutron.plugins.nuage.plugin.NuagePlugin' {
495     include ::neutron::plugins::nuage
496   }
497   if  hiera('neutron::core_plugin') == 'neutron_plugin_contrail.plugins.opencontrail.contrail_plugin.NeutronPluginContrailCoreV2' {
498     include ::neutron::plugins::opencontrail
499   }
500   if hiera('neutron::core_plugin') == 'midonet.neutron.plugin_v1.MidonetPluginV2' {
501     class {'::neutron::plugins::midonet':
502       midonet_api_ip    => hiera('tripleo::loadbalancer::public_virtual_ip'),
503       keystone_tenant   => hiera('neutron::server::auth_tenant'),
504       keystone_password => hiera('neutron::server::auth_password')
505     }
506   }
507   if hiera('neutron::core_plugin') == 'networking_plumgrid.neutron.plugins.plugin.NeutronPluginPLUMgridV2' {
508     class { '::neutron::plugins::plumgrid' :
509       connection                   => hiera('neutron::server::database_connection'),
510       controller_priv_host         => hiera('keystone_admin_api_vip'),
511       admin_password               => hiera('admin_password'),
512       metadata_proxy_shared_secret => hiera('nova::api::neutron_metadata_proxy_shared_secret'),
513     }
514   }
515   include ::neutron::plugins::ml2
516   class { '::neutron::agents::ml2::ovs':
517     manage_service => false,
518     enabled        => false,
519   }
520
521   if 'cisco_ucsm' in hiera('neutron::plugins::ml2::mechanism_drivers') {
522     include ::neutron::plugins::ml2::cisco::ucsm
523   }
524   if 'cisco_nexus' in hiera('neutron::plugins::ml2::mechanism_drivers') {
525     include ::neutron::plugins::ml2::cisco::nexus
526     include ::neutron::plugins::ml2::cisco::type_nexus_vxlan
527   }
528   if 'cisco_n1kv' in hiera('neutron::plugins::ml2::mechanism_drivers') {
529     include ::neutron::plugins::ml2::cisco::nexus1000v
530
531     class { '::neutron::agents::n1kv_vem':
532       n1kv_source  => hiera('n1kv_vem_source', undef),
533       n1kv_version => hiera('n1kv_vem_version', undef),
534     }
535
536     class { '::n1k_vsm':
537       n1kv_source  => hiera('n1kv_vsm_source', undef),
538       n1kv_version => hiera('n1kv_vsm_version', undef),
539     }
540   }
541
542   if 'bsn_ml2' in hiera('neutron::plugins::ml2::mechanism_drivers') {
543     include ::neutron::plugins::ml2::bigswitch::restproxy
544     include ::neutron::agents::bigswitch
545   }
546
547   include ::cinder
548   include ::cinder::config
549   include ::tripleo::ssl::cinder_config
550   class { '::cinder::api':
551     sync_db        => $sync_db,
552     manage_service => false,
553     enabled        => false,
554   }
555   class { '::cinder::scheduler' :
556     manage_service => false,
557     enabled        => false,
558   }
559   class { '::cinder::volume' :
560     manage_service => false,
561     enabled        => false,
562   }
563   include ::cinder::glance
564   include ::cinder::ceilometer
565   class { '::cinder::setup_test_volume':
566     size => join([hiera('cinder_lvm_loop_device_size'), 'M']),
567   }
568
569   $cinder_enable_iscsi = hiera('cinder_enable_iscsi_backend', true)
570   if $cinder_enable_iscsi {
571     $cinder_iscsi_backend = 'tripleo_iscsi'
572
573     cinder::backend::iscsi { $cinder_iscsi_backend :
574       iscsi_ip_address => hiera('cinder_iscsi_ip_address'),
575       iscsi_helper     => hiera('cinder_iscsi_helper'),
576     }
577   }
578
579   if $enable_ceph {
580
581     $ceph_pools = hiera('ceph_pools')
582     ceph::pool { $ceph_pools :
583       pg_num  => hiera('ceph::profile::params::osd_pool_default_pg_num'),
584       pgp_num => hiera('ceph::profile::params::osd_pool_default_pgp_num'),
585       size    => hiera('ceph::profile::params::osd_pool_default_size'),
586     }
587
588     $cinder_pool_requires = [Ceph::Pool[hiera('cinder_rbd_pool_name')]]
589
590   } else {
591     $cinder_pool_requires = []
592   }
593
594   if hiera('cinder_enable_rbd_backend', false) {
595     $cinder_rbd_backend = 'tripleo_ceph'
596
597     cinder::backend::rbd { $cinder_rbd_backend :
598       backend_host    => hiera('cinder::host'),
599       rbd_pool        => hiera('cinder_rbd_pool_name'),
600       rbd_user        => hiera('ceph_client_user_name'),
601       rbd_secret_uuid => hiera('ceph::profile::params::fsid'),
602       require         => $cinder_pool_requires,
603     }
604   }
605
606   if hiera('cinder_enable_eqlx_backend', false) {
607     $cinder_eqlx_backend = hiera('cinder::backend::eqlx::volume_backend_name')
608
609     cinder::backend::eqlx { $cinder_eqlx_backend :
610       volume_backend_name => hiera('cinder::backend::eqlx::volume_backend_name', undef),
611       san_ip              => hiera('cinder::backend::eqlx::san_ip', undef),
612       san_login           => hiera('cinder::backend::eqlx::san_login', undef),
613       san_password        => hiera('cinder::backend::eqlx::san_password', undef),
614       san_thin_provision  => hiera('cinder::backend::eqlx::san_thin_provision', undef),
615       eqlx_group_name     => hiera('cinder::backend::eqlx::eqlx_group_name', undef),
616       eqlx_pool           => hiera('cinder::backend::eqlx::eqlx_pool', undef),
617       eqlx_use_chap       => hiera('cinder::backend::eqlx::eqlx_use_chap', undef),
618       eqlx_chap_login     => hiera('cinder::backend::eqlx::eqlx_chap_login', undef),
619       eqlx_chap_password  => hiera('cinder::backend::eqlx::eqlx_san_password', undef),
620     }
621   }
622
623   if hiera('cinder_enable_dellsc_backend', false) {
624     $cinder_dellsc_backend = hiera('cinder::backend::dellsc_iscsi::volume_backend_name')
625
626     cinder::backend::dellsc_iscsi{ $cinder_dellsc_backend :
627       volume_backend_name   => hiera('cinder::backend::dellsc_iscsi::volume_backend_name', undef),
628       san_ip                => hiera('cinder::backend::dellsc_iscsi::san_ip', undef),
629       san_login             => hiera('cinder::backend::dellsc_iscsi::san_login', undef),
630       san_password          => hiera('cinder::backend::dellsc_iscsi::san_password', undef),
631       dell_sc_ssn           => hiera('cinder::backend::dellsc_iscsi::dell_sc_ssn', undef),
632       iscsi_ip_address      => hiera('cinder::backend::dellsc_iscsi::iscsi_ip_address', undef),
633       iscsi_port            => hiera('cinder::backend::dellsc_iscsi::iscsi_port', undef),
634       dell_sc_api_port      => hiera('cinder::backend::dellsc_iscsi::dell_sc_api_port', undef),
635       dell_sc_server_folder => hiera('cinder::backend::dellsc_iscsi::dell_sc_server_folder', undef),
636       dell_sc_volume_folder => hiera('cinder::backend::dellsc_iscsi::dell_sc_volume_folder', undef),
637     }
638   }
639
640   if hiera('cinder_enable_netapp_backend', false) {
641     $cinder_netapp_backend = hiera('cinder::backend::netapp::title')
642
643     if hiera('cinder::backend::netapp::nfs_shares', undef) {
644       $cinder_netapp_nfs_shares = split(hiera('cinder::backend::netapp::nfs_shares', undef), ',')
645     }
646
647     cinder::backend::netapp { $cinder_netapp_backend :
648       netapp_login                 => hiera('cinder::backend::netapp::netapp_login', undef),
649       netapp_password              => hiera('cinder::backend::netapp::netapp_password', undef),
650       netapp_server_hostname       => hiera('cinder::backend::netapp::netapp_server_hostname', undef),
651       netapp_server_port           => hiera('cinder::backend::netapp::netapp_server_port', undef),
652       netapp_size_multiplier       => hiera('cinder::backend::netapp::netapp_size_multiplier', undef),
653       netapp_storage_family        => hiera('cinder::backend::netapp::netapp_storage_family', undef),
654       netapp_storage_protocol      => hiera('cinder::backend::netapp::netapp_storage_protocol', undef),
655       netapp_transport_type        => hiera('cinder::backend::netapp::netapp_transport_type', undef),
656       netapp_vfiler                => hiera('cinder::backend::netapp::netapp_vfiler', undef),
657       netapp_volume_list           => hiera('cinder::backend::netapp::netapp_volume_list', undef),
658       netapp_vserver               => hiera('cinder::backend::netapp::netapp_vserver', undef),
659       netapp_partner_backend_name  => hiera('cinder::backend::netapp::netapp_partner_backend_name', undef),
660       nfs_shares                   => $cinder_netapp_nfs_shares,
661       nfs_shares_config            => hiera('cinder::backend::netapp::nfs_shares_config', undef),
662       netapp_copyoffload_tool_path => hiera('cinder::backend::netapp::netapp_copyoffload_tool_path', undef),
663       netapp_controller_ips        => hiera('cinder::backend::netapp::netapp_controller_ips', undef),
664       netapp_sa_password           => hiera('cinder::backend::netapp::netapp_sa_password', undef),
665       netapp_storage_pools         => hiera('cinder::backend::netapp::netapp_storage_pools', undef),
666       netapp_eseries_host_type     => hiera('cinder::backend::netapp::netapp_eseries_host_type', undef),
667       netapp_webservice_path       => hiera('cinder::backend::netapp::netapp_webservice_path', undef),
668     }
669   }
670
671   if hiera('cinder_enable_nfs_backend', false) {
672     $cinder_nfs_backend = 'tripleo_nfs'
673
674     if str2bool($::selinux) {
675       selboolean { 'virt_use_nfs':
676         value      => on,
677         persistent => true,
678       } -> Package['nfs-utils']
679     }
680
681     package { 'nfs-utils': } ->
682     cinder::backend::nfs { $cinder_nfs_backend:
683       nfs_servers       => hiera('cinder_nfs_servers'),
684       nfs_mount_options => hiera('cinder_nfs_mount_options',''),
685       nfs_shares_config => '/etc/cinder/shares-nfs.conf',
686     }
687   }
688
689   $cinder_enabled_backends = delete_undef_values([$cinder_iscsi_backend, $cinder_rbd_backend, $cinder_eqlx_backend, $cinder_dellsc_backend, $cinder_netapp_backend, $cinder_nfs_backend])
690   class { '::cinder::backends' :
691     enabled_backends => union($cinder_enabled_backends, hiera('cinder_user_enabled_backends')),
692   }
693
694   class { '::sahara':
695     sync_db => $sync_db,
696   }
697   class { '::sahara::service::api':
698     manage_service => false,
699     enabled        => false,
700   }
701   class { '::sahara::service::engine':
702     manage_service => false,
703     enabled        => false,
704   }
705
706   # swift proxy
707   class { '::swift::proxy' :
708     manage_service => $non_pcmk_start,
709     enabled        => $non_pcmk_start,
710   }
711   include ::swift::proxy::proxy_logging
712   include ::swift::proxy::healthcheck
713   include ::swift::proxy::cache
714   include ::swift::proxy::keystone
715   include ::swift::proxy::authtoken
716   include ::swift::proxy::staticweb
717   include ::swift::proxy::ratelimit
718   include ::swift::proxy::catch_errors
719   include ::swift::proxy::tempurl
720   include ::swift::proxy::formpost
721
722   # swift storage
723   if str2bool(hiera('enable_swift_storage', true)) {
724     class {'::swift::storage::all':
725       mount_check => str2bool(hiera('swift_mount_check')),
726     }
727     class {'::swift::storage::account':
728       manage_service => $non_pcmk_start,
729       enabled        => $non_pcmk_start,
730     }
731     class {'::swift::storage::container':
732       manage_service => $non_pcmk_start,
733       enabled        => $non_pcmk_start,
734     }
735     class {'::swift::storage::object':
736       manage_service => $non_pcmk_start,
737       enabled        => $non_pcmk_start,
738     }
739     if(!defined(File['/srv/node'])) {
740       file { '/srv/node':
741         ensure  => directory,
742         owner   => 'swift',
743         group   => 'swift',
744         require => Package['openstack-swift'],
745       }
746     }
747     $swift_components = ['account', 'container', 'object']
748     swift::storage::filter::recon { $swift_components : }
749     swift::storage::filter::healthcheck { $swift_components : }
750   }
751
752   # Ceilometer
753   case downcase(hiera('ceilometer_backend')) {
754     /mysql/: {
755       $ceilometer_database_connection = hiera('ceilometer_mysql_conn_string')
756     }
757     default: {
758       $mongo_node_string = join($mongo_node_ips_with_port, ',')
759       $ceilometer_database_connection = "mongodb://${mongo_node_string}/ceilometer?replicaSet=${mongodb_replset}"
760     }
761   }
762   include ::ceilometer
763   include ::ceilometer::config
764   class { '::ceilometer::api' :
765     manage_service => false,
766     enabled        => false,
767   }
768   class { '::ceilometer::agent::notification' :
769     manage_service => false,
770     enabled        => false,
771   }
772   class { '::ceilometer::agent::central' :
773     manage_service => false,
774     enabled        => false,
775   }
776   class { '::ceilometer::collector' :
777     manage_service => false,
778     enabled        => false,
779   }
780   include ::ceilometer::expirer
781   class { '::ceilometer::db' :
782     database_connection => $ceilometer_database_connection,
783     sync_db             => $sync_db,
784   }
785   include ::ceilometer::agent::auth
786   include ::ceilometer::dispatcher::gnocchi
787
788   Cron <| title == 'ceilometer-expirer' |> { command => "sleep $((\$(od -A n -t d -N 3 /dev/urandom) % 86400)) && ${::ceilometer::params::expirer_command}" }
789
790   # Heat
791   include ::heat::config
792   class { '::heat' :
793     sync_db             => $sync_db,
794     notification_driver => 'messaging',
795   }
796   class { '::heat::api' :
797     manage_service => false,
798     enabled        => false,
799   }
800   class { '::heat::api_cfn' :
801     manage_service => false,
802     enabled        => false,
803   }
804   class { '::heat::api_cloudwatch' :
805     manage_service => false,
806     enabled        => false,
807   }
808   class { '::heat::engine' :
809     manage_service => false,
810     enabled        => false,
811   }
812
813   # httpd/apache and horizon
814   # NOTE(gfidente): server-status can be consumed by the pacemaker resource agent
815   class { '::apache' :
816     service_enable => false,
817     # service_manage => false, # <-- not supported with horizon&apache mod_wsgi?
818   }
819   include ::apache::mod::remoteip
820   include ::apache::mod::status
821   if 'cisco_n1kv' in hiera('neutron::plugins::ml2::mechanism_drivers') {
822     $_profile_support = 'cisco'
823   } else {
824     $_profile_support = 'None'
825   }
826   $neutron_options   = {'profile_support' => $_profile_support }
827
828   $memcached_ipv6 = hiera('memcached_ipv6', false)
829   if $memcached_ipv6 {
830     $horizon_memcached_servers = hiera('memcache_node_ips_v6', '[::1]')
831   } else {
832     $horizon_memcached_servers = hiera('memcache_node_ips', '127.0.0.1')
833   }
834
835   class { '::horizon':
836     cache_server_ip => $horizon_memcached_servers,
837     neutron_options => $neutron_options,
838   }
839
840   # Aodh
841   class { '::aodh' :
842     database_connection => $ceilometer_database_connection,
843   }
844   include ::aodh::config
845   include ::aodh::auth
846   include ::aodh::client
847   include ::aodh::wsgi::apache
848   class { '::aodh::api':
849     manage_service => false,
850     enabled        => false,
851     service_name   => 'httpd',
852   }
853   class { '::aodh::evaluator':
854     manage_service => false,
855     enabled        => false,
856   }
857   class { '::aodh::notifier':
858     manage_service => false,
859     enabled        => false,
860   }
861   class { '::aodh::listener':
862     manage_service => false,
863     enabled        => false,
864   }
865
866   # Gnocchi
867   $gnocchi_database_connection = hiera('gnocchi_mysql_conn_string')
868   include ::gnocchi::client
869   if $sync_db {
870     include ::gnocchi::db::sync
871   }
872   include ::gnocchi::storage
873   $gnocchi_backend = downcase(hiera('gnocchi_backend', 'swift'))
874   case $gnocchi_backend {
875       'swift': { include ::gnocchi::storage::swift }
876       'file': { include ::gnocchi::storage::file }
877       'rbd': { include ::gnocchi::storage::ceph }
878       default: { fail('Unrecognized gnocchi_backend parameter.') }
879   }
880   class { '::gnocchi':
881     database_connection => $gnocchi_database_connection,
882   }
883   class { '::gnocchi::api' :
884     manage_service => false,
885     enabled        => false,
886     service_name   => 'httpd',
887   }
888   class { '::gnocchi::wsgi::apache' :
889     ssl => false,
890   }
891   class { '::gnocchi::metricd' :
892     manage_service => false,
893     enabled        => false,
894   }
895   class { '::gnocchi::statsd' :
896     manage_service => false,
897     enabled        => false,
898   }
899
900   $snmpd_user = hiera('snmpd_readonly_user_name')
901   snmp::snmpv3_user { $snmpd_user:
902     authtype => 'MD5',
903     authpass => hiera('snmpd_readonly_user_password'),
904   }
905   class { '::snmp':
906     agentaddress => ['udp:161','udp6:[::1]:161'],
907     snmpd_config => [ join(['createUser ', hiera('snmpd_readonly_user_name'), ' MD5 "', hiera('snmpd_readonly_user_password'), '"']), join(['rouser ', hiera('snmpd_readonly_user_name')]), 'proc  cron', 'includeAllDisks  10%', 'master agentx', 'trapsink localhost public', 'iquerySecName internalUser', 'rouser internalUser', 'defaultMonitors yes', 'linkUpDownNotifications yes' ],
908   }
909
910   hiera_include('controller_classes')
911
912 } #END STEP 4
913
914 if hiera('step') >= 5 {
915   # We now make sure that the root db password is set to a random one
916   # At first installation /root/.my.cnf will be empty and we connect without a root
917   # password. On second runs or updates /root/.my.cnf will already be populated
918   # with proper credentials. This step happens on every node because this sql
919   # statement does not automatically replicate across nodes.
920   exec { 'galera-set-root-password':
921     command => "/bin/touch /root/.my.cnf && /bin/echo \"UPDATE mysql.user SET Password = PASSWORD('${mysql_root_password}') WHERE user = 'root'; flush privileges;\" | /bin/mysql --defaults-extra-file=/root/.my.cnf -u root",
922   }
923   file { '/root/.my.cnf' :
924     ensure  => file,
925     mode    => '0600',
926     owner   => 'root',
927     group   => 'root',
928     content => "[client]
929 user=root
930 password=\"${mysql_root_password}\"
931
932 [mysql]
933 user=root
934 password=\"${mysql_root_password}\"",
935     require => Exec['galera-set-root-password'],
936   }
937
938   $nova_enable_db_purge = hiera('nova_enable_db_purge', true)
939   $cinder_enable_db_purge = hiera('cinder_enable_db_purge', true)
940   $heat_enable_db_purge = hiera('heat_enable_db_purge', true)
941
942   if $nova_enable_db_purge {
943     include ::nova::cron::archive_deleted_rows
944   }
945   if $cinder_enable_db_purge {
946     include ::cinder::cron::db_purge
947   }
948   if $heat_enable_db_purge {
949     include ::heat::cron::purge_deleted
950   }
951
952   if $pacemaker_master {
953
954     pacemaker::constraint::base { 'openstack-core-then-httpd-constraint':
955       constraint_type => 'order',
956       first_resource  => 'openstack-core-clone',
957       second_resource => "${::apache::params::service_name}-clone",
958       first_action    => 'start',
959       second_action   => 'start',
960       require         => [Pacemaker::Resource::Service[$::apache::params::service_name],
961                           Pacemaker::Resource::Ocf['openstack-core']],
962     }
963     pacemaker::constraint::base { 'galera-then-openstack-core-constraint':
964       constraint_type => 'order',
965       first_resource  => 'galera-master',
966       second_resource => 'openstack-core-clone',
967       first_action    => 'promote',
968       second_action   => 'start',
969       require         => [Pacemaker::Resource::Ocf['galera'],
970                           Pacemaker::Resource::Ocf['openstack-core']],
971     }
972
973     # Cinder
974     pacemaker::resource::service { $::cinder::params::api_service :
975       clone_params => 'interleave=true',
976       require      => Pacemaker::Resource::Ocf['openstack-core'],
977     }
978     pacemaker::resource::service { $::cinder::params::scheduler_service :
979       clone_params => 'interleave=true',
980     }
981     pacemaker::resource::service { $::cinder::params::volume_service : }
982
983     pacemaker::constraint::base { 'keystone-then-cinder-api-constraint':
984       constraint_type => 'order',
985       first_resource  => 'openstack-core-clone',
986       second_resource => "${::cinder::params::api_service}-clone",
987       first_action    => 'start',
988       second_action   => 'start',
989       require         => [Pacemaker::Resource::Ocf['openstack-core'],
990                           Pacemaker::Resource::Service[$::cinder::params::api_service]],
991     }
992     pacemaker::constraint::base { 'cinder-api-then-cinder-scheduler-constraint':
993       constraint_type => 'order',
994       first_resource  => "${::cinder::params::api_service}-clone",
995       second_resource => "${::cinder::params::scheduler_service}-clone",
996       first_action    => 'start',
997       second_action   => 'start',
998       require         => [Pacemaker::Resource::Service[$::cinder::params::api_service],
999                           Pacemaker::Resource::Service[$::cinder::params::scheduler_service]],
1000     }
1001     pacemaker::constraint::colocation { 'cinder-scheduler-with-cinder-api-colocation':
1002       source  => "${::cinder::params::scheduler_service}-clone",
1003       target  => "${::cinder::params::api_service}-clone",
1004       score   => 'INFINITY',
1005       require => [Pacemaker::Resource::Service[$::cinder::params::api_service],
1006                   Pacemaker::Resource::Service[$::cinder::params::scheduler_service]],
1007     }
1008     pacemaker::constraint::base { 'cinder-scheduler-then-cinder-volume-constraint':
1009       constraint_type => 'order',
1010       first_resource  => "${::cinder::params::scheduler_service}-clone",
1011       second_resource => $::cinder::params::volume_service,
1012       first_action    => 'start',
1013       second_action   => 'start',
1014       require         => [Pacemaker::Resource::Service[$::cinder::params::scheduler_service],
1015                           Pacemaker::Resource::Service[$::cinder::params::volume_service]],
1016     }
1017     pacemaker::constraint::colocation { 'cinder-volume-with-cinder-scheduler-colocation':
1018       source  => $::cinder::params::volume_service,
1019       target  => "${::cinder::params::scheduler_service}-clone",
1020       score   => 'INFINITY',
1021       require => [Pacemaker::Resource::Service[$::cinder::params::scheduler_service],
1022                   Pacemaker::Resource::Service[$::cinder::params::volume_service]],
1023     }
1024
1025     # Sahara
1026     pacemaker::resource::service { $::sahara::params::api_service_name :
1027       clone_params => 'interleave=true',
1028       require      => Pacemaker::Resource::Ocf['openstack-core'],
1029     }
1030     pacemaker::resource::service { $::sahara::params::engine_service_name :
1031       clone_params => 'interleave=true',
1032     }
1033     pacemaker::constraint::base { 'keystone-then-sahara-api-constraint':
1034       constraint_type => 'order',
1035       first_resource  => 'openstack-core-clone',
1036       second_resource => "${::sahara::params::api_service_name}-clone",
1037       first_action    => 'start',
1038       second_action   => 'start',
1039       require         => [Pacemaker::Resource::Service[$::sahara::params::api_service_name],
1040                           Pacemaker::Resource::Ocf['openstack-core']],
1041     }
1042     pacemaker::constraint::base { 'sahara-api-then-sahara-engine-constraint':
1043       constraint_type => 'order',
1044       first_resource  => "${::sahara::params::api_service_name}-clone",
1045       second_resource => "${::sahara::params::engine_service_name}-clone",
1046       first_action    => 'start',
1047       second_action   => 'start',
1048       require         => [Pacemaker::Resource::Service[$::sahara::params::api_service_name],
1049                           Pacemaker::Resource::Service[$::sahara::params::engine_service_name]],
1050     }
1051
1052     if hiera('neutron::enable_ovs_agent', true) {
1053       pacemaker::resource::service { $::neutron::params::ovs_agent_service:
1054         clone_params => 'interleave=true',
1055       }
1056     }
1057     if hiera('neutron::core_plugin') == 'midonet.neutron.plugin_v1.MidonetPluginV2' {
1058       pacemaker::resource::service {'tomcat':
1059         clone_params => 'interleave=true',
1060       }
1061     }
1062     if hiera('neutron::enable_ovs_agent', true) {
1063       pacemaker::resource::ocf { $::neutron::params::ovs_cleanup_service:
1064         ocf_agent_name => 'neutron:OVSCleanup',
1065         clone_params   => 'interleave=true',
1066       }
1067       pacemaker::resource::ocf { 'neutron-netns-cleanup':
1068         ocf_agent_name => 'neutron:NetnsCleanup',
1069         clone_params   => 'interleave=true',
1070       }
1071
1072       # neutron - one chain ovs-cleanup-->netns-cleanup-->ovs-agent
1073       pacemaker::constraint::base { 'neutron-ovs-cleanup-to-netns-cleanup-constraint':
1074         constraint_type => 'order',
1075         first_resource  => "${::neutron::params::ovs_cleanup_service}-clone",
1076         second_resource => 'neutron-netns-cleanup-clone',
1077         first_action    => 'start',
1078         second_action   => 'start',
1079         require         => [Pacemaker::Resource::Ocf[$::neutron::params::ovs_cleanup_service],
1080                             Pacemaker::Resource::Ocf['neutron-netns-cleanup']],
1081       }
1082       pacemaker::constraint::colocation { 'neutron-ovs-cleanup-to-netns-cleanup-colocation':
1083         source  => 'neutron-netns-cleanup-clone',
1084         target  => "${::neutron::params::ovs_cleanup_service}-clone",
1085         score   => 'INFINITY',
1086         require => [Pacemaker::Resource::Ocf[$::neutron::params::ovs_cleanup_service],
1087                     Pacemaker::Resource::Ocf['neutron-netns-cleanup']],
1088       }
1089       pacemaker::constraint::base { 'neutron-netns-cleanup-to-openvswitch-agent-constraint':
1090         constraint_type => 'order',
1091         first_resource  => 'neutron-netns-cleanup-clone',
1092         second_resource => "${::neutron::params::ovs_agent_service}-clone",
1093         first_action    => 'start',
1094         second_action   => 'start',
1095         require         => [Pacemaker::Resource::Ocf['neutron-netns-cleanup'],
1096                             Pacemaker::Resource::Service[$::neutron::params::ovs_agent_service]],
1097       }
1098       pacemaker::constraint::colocation { 'neutron-netns-cleanup-to-openvswitch-agent-colocation':
1099         source  => "${::neutron::params::ovs_agent_service}-clone",
1100         target  => 'neutron-netns-cleanup-clone',
1101         score   => 'INFINITY',
1102         require => [Pacemaker::Resource::Ocf['neutron-netns-cleanup'],
1103                     Pacemaker::Resource::Service[$::neutron::params::ovs_agent_service]],
1104       }
1105     }
1106     if hiera('neutron::core_plugin') == 'midonet.neutron.plugin_v1.MidonetPluginV2' {
1107       #midonet-chain chain keystone-->neutron-server-->dhcp-->metadata->tomcat
1108       pacemaker::constraint::base { 'neutron-server-to-dhcp-agent-constraint':
1109         constraint_type => 'order',
1110         first_resource  => "${::neutron::params::server_service}-clone",
1111         second_resource => "${::neutron::params::dhcp_agent_service}-clone",
1112         first_action    => 'start',
1113         second_action   => 'start',
1114         require         => [Pacemaker::Resource::Service[$::neutron::params::server_service],
1115                             Pacemaker::Resource::Service[$::neutron::params::dhcp_agent_service]],
1116       }
1117       pacemaker::constraint::base { 'neutron-dhcp-agent-to-metadata-agent-constraint':
1118         constraint_type => 'order',
1119         first_resource  => "${::neutron::params::dhcp_agent_service}-clone",
1120         second_resource => "${::neutron::params::metadata_agent_service}-clone",
1121         first_action    => 'start',
1122         second_action   => 'start',
1123         require         => [Pacemaker::Resource::Service[$::neutron::params::dhcp_agent_service],
1124                             Pacemaker::Resource::Service[$::neutron::params::metadata_agent_service]],
1125       }
1126       pacemaker::constraint::base { 'neutron-metadata-agent-to-tomcat-constraint':
1127         constraint_type => 'order',
1128         first_resource  => "${::neutron::params::metadata_agent_service}-clone",
1129         second_resource => 'tomcat-clone',
1130         first_action    => 'start',
1131         second_action   => 'start',
1132         require         => [Pacemaker::Resource::Service[$::neutron::params::metadata_agent_service],
1133                             Pacemaker::Resource::Service['tomcat']],
1134       }
1135       pacemaker::constraint::colocation { 'neutron-dhcp-agent-to-metadata-agent-colocation':
1136         source  => "${::neutron::params::metadata_agent_service}-clone",
1137         target  => "${::neutron::params::dhcp_agent_service}-clone",
1138         score   => 'INFINITY',
1139         require => [Pacemaker::Resource::Service[$::neutron::params::dhcp_agent_service],
1140                     Pacemaker::Resource::Service[$::neutron::params::metadata_agent_service]],
1141       }
1142     }
1143
1144     # Nova
1145     pacemaker::resource::service { $::nova::params::api_service_name :
1146       clone_params => 'interleave=true',
1147     }
1148     pacemaker::resource::service { $::nova::params::conductor_service_name :
1149       clone_params => 'interleave=true',
1150     }
1151     pacemaker::resource::service { $::nova::params::consoleauth_service_name :
1152       clone_params => 'interleave=true',
1153       require      => Pacemaker::Resource::Ocf['openstack-core'],
1154     }
1155     pacemaker::resource::service { $::nova::params::vncproxy_service_name :
1156       clone_params => 'interleave=true',
1157     }
1158     pacemaker::resource::service { $::nova::params::scheduler_service_name :
1159       clone_params => 'interleave=true',
1160     }
1161
1162     pacemaker::constraint::base { 'keystone-then-nova-consoleauth-constraint':
1163       constraint_type => 'order',
1164       first_resource  => 'openstack-core-clone',
1165       second_resource => "${::nova::params::consoleauth_service_name}-clone",
1166       first_action    => 'start',
1167       second_action   => 'start',
1168       require         => [Pacemaker::Resource::Service[$::nova::params::consoleauth_service_name],
1169                           Pacemaker::Resource::Ocf['openstack-core']],
1170     }
1171     pacemaker::constraint::base { 'nova-consoleauth-then-nova-vncproxy-constraint':
1172       constraint_type => 'order',
1173       first_resource  => "${::nova::params::consoleauth_service_name}-clone",
1174       second_resource => "${::nova::params::vncproxy_service_name}-clone",
1175       first_action    => 'start',
1176       second_action   => 'start',
1177       require         => [Pacemaker::Resource::Service[$::nova::params::consoleauth_service_name],
1178                           Pacemaker::Resource::Service[$::nova::params::vncproxy_service_name]],
1179     }
1180     pacemaker::constraint::colocation { 'nova-vncproxy-with-nova-consoleauth-colocation':
1181       source  => "${::nova::params::vncproxy_service_name}-clone",
1182       target  => "${::nova::params::consoleauth_service_name}-clone",
1183       score   => 'INFINITY',
1184       require => [Pacemaker::Resource::Service[$::nova::params::consoleauth_service_name],
1185                   Pacemaker::Resource::Service[$::nova::params::vncproxy_service_name]],
1186     }
1187     pacemaker::constraint::base { 'nova-vncproxy-then-nova-api-constraint':
1188       constraint_type => 'order',
1189       first_resource  => "${::nova::params::vncproxy_service_name}-clone",
1190       second_resource => "${::nova::params::api_service_name}-clone",
1191       first_action    => 'start',
1192       second_action   => 'start',
1193       require         => [Pacemaker::Resource::Service[$::nova::params::vncproxy_service_name],
1194                           Pacemaker::Resource::Service[$::nova::params::api_service_name]],
1195     }
1196     pacemaker::constraint::colocation { 'nova-api-with-nova-vncproxy-colocation':
1197       source  => "${::nova::params::api_service_name}-clone",
1198       target  => "${::nova::params::vncproxy_service_name}-clone",
1199       score   => 'INFINITY',
1200       require => [Pacemaker::Resource::Service[$::nova::params::vncproxy_service_name],
1201                   Pacemaker::Resource::Service[$::nova::params::api_service_name]],
1202     }
1203     pacemaker::constraint::base { 'nova-api-then-nova-scheduler-constraint':
1204       constraint_type => 'order',
1205       first_resource  => "${::nova::params::api_service_name}-clone",
1206       second_resource => "${::nova::params::scheduler_service_name}-clone",
1207       first_action    => 'start',
1208       second_action   => 'start',
1209       require         => [Pacemaker::Resource::Service[$::nova::params::api_service_name],
1210                           Pacemaker::Resource::Service[$::nova::params::scheduler_service_name]],
1211     }
1212     pacemaker::constraint::colocation { 'nova-scheduler-with-nova-api-colocation':
1213       source  => "${::nova::params::scheduler_service_name}-clone",
1214       target  => "${::nova::params::api_service_name}-clone",
1215       score   => 'INFINITY',
1216       require => [Pacemaker::Resource::Service[$::nova::params::api_service_name],
1217                   Pacemaker::Resource::Service[$::nova::params::scheduler_service_name]],
1218     }
1219     pacemaker::constraint::base { 'nova-scheduler-then-nova-conductor-constraint':
1220       constraint_type => 'order',
1221       first_resource  => "${::nova::params::scheduler_service_name}-clone",
1222       second_resource => "${::nova::params::conductor_service_name}-clone",
1223       first_action    => 'start',
1224       second_action   => 'start',
1225       require         => [Pacemaker::Resource::Service[$::nova::params::scheduler_service_name],
1226                           Pacemaker::Resource::Service[$::nova::params::conductor_service_name]],
1227     }
1228     pacemaker::constraint::colocation { 'nova-conductor-with-nova-scheduler-colocation':
1229       source  => "${::nova::params::conductor_service_name}-clone",
1230       target  => "${::nova::params::scheduler_service_name}-clone",
1231       score   => 'INFINITY',
1232       require => [Pacemaker::Resource::Service[$::nova::params::scheduler_service_name],
1233                   Pacemaker::Resource::Service[$::nova::params::conductor_service_name]],
1234     }
1235
1236     # Ceilometer and Aodh
1237     case downcase(hiera('ceilometer_backend')) {
1238       /mysql/: {
1239         pacemaker::resource::service { $::ceilometer::params::agent_central_service_name:
1240           clone_params => 'interleave=true',
1241           require      => Pacemaker::Resource::Ocf['openstack-core'],
1242         }
1243       }
1244       default: {
1245         pacemaker::resource::service { $::ceilometer::params::agent_central_service_name:
1246           clone_params => 'interleave=true',
1247           require      => [Pacemaker::Resource::Ocf['openstack-core'],
1248                           Pacemaker::Resource::Service[$::mongodb::params::service_name]],
1249         }
1250       }
1251     }
1252     pacemaker::resource::service { $::ceilometer::params::collector_service_name :
1253       clone_params => 'interleave=true',
1254     }
1255     pacemaker::resource::service { $::ceilometer::params::api_service_name :
1256       clone_params => 'interleave=true',
1257     }
1258     pacemaker::resource::service { $::ceilometer::params::agent_notification_service_name :
1259       clone_params => 'interleave=true',
1260     }
1261     pacemaker::resource::ocf { 'delay' :
1262       ocf_agent_name  => 'heartbeat:Delay',
1263       clone_params    => 'interleave=true',
1264       resource_params => 'startdelay=10',
1265     }
1266     # Fedora doesn't know `require-all` parameter for constraints yet
1267     if $::operatingsystem == 'Fedora' {
1268       $redis_ceilometer_constraint_params = undef
1269       $redis_aodh_constraint_params = undef
1270     } else {
1271       $redis_ceilometer_constraint_params = 'require-all=false'
1272       $redis_aodh_constraint_params = 'require-all=false'
1273     }
1274     pacemaker::constraint::base { 'redis-then-ceilometer-central-constraint':
1275       constraint_type   => 'order',
1276       first_resource    => 'redis-master',
1277       second_resource   => "${::ceilometer::params::agent_central_service_name}-clone",
1278       first_action      => 'promote',
1279       second_action     => 'start',
1280       constraint_params => $redis_ceilometer_constraint_params,
1281       require           => [Pacemaker::Resource::Ocf['redis'],
1282                             Pacemaker::Resource::Service[$::ceilometer::params::agent_central_service_name]],
1283     }
1284     pacemaker::constraint::base { 'redis-then-aodh-evaluator-constraint':
1285       constraint_type   => 'order',
1286       first_resource    => 'redis-master',
1287       second_resource   => "${::aodh::params::evaluator_service_name}-clone",
1288       first_action      => 'promote',
1289       second_action     => 'start',
1290       constraint_params => $redis_aodh_constraint_params,
1291       require           => [Pacemaker::Resource::Ocf['redis'],
1292                             Pacemaker::Resource::Service[$::aodh::params::evaluator_service_name]],
1293     }
1294     pacemaker::constraint::base { 'keystone-then-ceilometer-central-constraint':
1295       constraint_type => 'order',
1296       first_resource  => 'openstack-core-clone',
1297       second_resource => "${::ceilometer::params::agent_central_service_name}-clone",
1298       first_action    => 'start',
1299       second_action   => 'start',
1300       require         => [Pacemaker::Resource::Service[$::ceilometer::params::agent_central_service_name],
1301                           Pacemaker::Resource::Ocf['openstack-core']],
1302     }
1303     pacemaker::constraint::base { 'keystone-then-ceilometer-notification-constraint':
1304       constraint_type => 'order',
1305       first_resource  => 'openstack-core-clone',
1306       second_resource => "${::ceilometer::params::agent_notification_service_name}-clone",
1307       first_action    => 'start',
1308       second_action   => 'start',
1309       require         => [Pacemaker::Resource::Service[$::ceilometer::params::agent_central_service_name],
1310                           Pacemaker::Resource::Ocf['openstack-core']],
1311     }
1312     pacemaker::constraint::base { 'ceilometer-central-then-ceilometer-collector-constraint':
1313       constraint_type => 'order',
1314       first_resource  => "${::ceilometer::params::agent_central_service_name}-clone",
1315       second_resource => "${::ceilometer::params::collector_service_name}-clone",
1316       first_action    => 'start',
1317       second_action   => 'start',
1318       require         => [Pacemaker::Resource::Service[$::ceilometer::params::agent_central_service_name],
1319                           Pacemaker::Resource::Service[$::ceilometer::params::collector_service_name]],
1320     }
1321     pacemaker::constraint::base { 'ceilometer-collector-then-ceilometer-api-constraint':
1322       constraint_type => 'order',
1323       first_resource  => "${::ceilometer::params::collector_service_name}-clone",
1324       second_resource => "${::ceilometer::params::api_service_name}-clone",
1325       first_action    => 'start',
1326       second_action   => 'start',
1327       require         => [Pacemaker::Resource::Service[$::ceilometer::params::collector_service_name],
1328                           Pacemaker::Resource::Service[$::ceilometer::params::api_service_name]],
1329     }
1330     pacemaker::constraint::colocation { 'ceilometer-api-with-ceilometer-collector-colocation':
1331       source  => "${::ceilometer::params::api_service_name}-clone",
1332       target  => "${::ceilometer::params::collector_service_name}-clone",
1333       score   => 'INFINITY',
1334       require => [Pacemaker::Resource::Service[$::ceilometer::params::api_service_name],
1335                   Pacemaker::Resource::Service[$::ceilometer::params::collector_service_name]],
1336     }
1337     pacemaker::constraint::base { 'ceilometer-api-then-ceilometer-delay-constraint':
1338       constraint_type => 'order',
1339       first_resource  => "${::ceilometer::params::api_service_name}-clone",
1340       second_resource => 'delay-clone',
1341       first_action    => 'start',
1342       second_action   => 'start',
1343       require         => [Pacemaker::Resource::Service[$::ceilometer::params::api_service_name],
1344                           Pacemaker::Resource::Ocf['delay']],
1345     }
1346     pacemaker::constraint::colocation { 'ceilometer-delay-with-ceilometer-api-colocation':
1347       source  => 'delay-clone',
1348       target  => "${::ceilometer::params::api_service_name}-clone",
1349       score   => 'INFINITY',
1350       require => [Pacemaker::Resource::Service[$::ceilometer::params::api_service_name],
1351                   Pacemaker::Resource::Ocf['delay']],
1352     }
1353     # Aodh
1354     pacemaker::resource::service { $::aodh::params::evaluator_service_name :
1355       clone_params => 'interleave=true',
1356     }
1357     pacemaker::resource::service { $::aodh::params::notifier_service_name :
1358       clone_params => 'interleave=true',
1359     }
1360     pacemaker::resource::service { $::aodh::params::listener_service_name :
1361       clone_params => 'interleave=true',
1362     }
1363     pacemaker::constraint::base { 'aodh-delay-then-aodh-evaluator-constraint':
1364       constraint_type => 'order',
1365       first_resource  => 'delay-clone',
1366       second_resource => "${::aodh::params::evaluator_service_name}-clone",
1367       first_action    => 'start',
1368       second_action   => 'start',
1369       require         => [Pacemaker::Resource::Service[$::aodh::params::evaluator_service_name],
1370                           Pacemaker::Resource::Ocf['delay']],
1371     }
1372     pacemaker::constraint::colocation { 'aodh-evaluator-with-aodh-delay-colocation':
1373       source  => "${::aodh::params::evaluator_service_name}-clone",
1374       target  => 'delay-clone',
1375       score   => 'INFINITY',
1376       require => [Pacemaker::Resource::Service[$::aodh::params::evaluator_service_name],
1377                   Pacemaker::Resource::Ocf['delay']],
1378     }
1379     pacemaker::constraint::base { 'aodh-evaluator-then-aodh-notifier-constraint':
1380       constraint_type => 'order',
1381       first_resource  => "${::aodh::params::evaluator_service_name}-clone",
1382       second_resource => "${::aodh::params::notifier_service_name}-clone",
1383       first_action    => 'start',
1384       second_action   => 'start',
1385       require         => [Pacemaker::Resource::Service[$::aodh::params::evaluator_service_name],
1386                           Pacemaker::Resource::Service[$::aodh::params::notifier_service_name]],
1387     }
1388     pacemaker::constraint::colocation { 'aodh-notifier-with-aodh-evaluator-colocation':
1389       source  => "${::aodh::params::notifier_service_name}-clone",
1390       target  => "${::aodh::params::evaluator_service_name}-clone",
1391       score   => 'INFINITY',
1392       require => [Pacemaker::Resource::Service[$::aodh::params::evaluator_service_name],
1393                   Pacemaker::Resource::Service[$::aodh::params::notifier_service_name]],
1394     }
1395     pacemaker::constraint::base { 'aodh-evaluator-then-aodh-listener-constraint':
1396       constraint_type => 'order',
1397       first_resource  => "${::aodh::params::evaluator_service_name}-clone",
1398       second_resource => "${::aodh::params::listener_service_name}-clone",
1399       first_action    => 'start',
1400       second_action   => 'start',
1401       require         => [Pacemaker::Resource::Service[$::aodh::params::evaluator_service_name],
1402                           Pacemaker::Resource::Service[$::aodh::params::listener_service_name]],
1403     }
1404     pacemaker::constraint::colocation { 'aodh-listener-with-aodh-evaluator-colocation':
1405       source  => "${::aodh::params::listener_service_name}-clone",
1406       target  => "${::aodh::params::evaluator_service_name}-clone",
1407       score   => 'INFINITY',
1408       require => [Pacemaker::Resource::Service[$::aodh::params::evaluator_service_name],
1409                   Pacemaker::Resource::Service[$::aodh::params::listener_service_name]],
1410     }
1411     if downcase(hiera('ceilometer_backend')) == 'mongodb' {
1412       pacemaker::constraint::base { 'mongodb-then-ceilometer-central-constraint':
1413         constraint_type => 'order',
1414         first_resource  => "${::mongodb::params::service_name}-clone",
1415         second_resource => "${::ceilometer::params::agent_central_service_name}-clone",
1416         first_action    => 'start',
1417         second_action   => 'start',
1418         require         => [Pacemaker::Resource::Service[$::ceilometer::params::agent_central_service_name],
1419                             Pacemaker::Resource::Service[$::mongodb::params::service_name]],
1420       }
1421     }
1422
1423     # gnocchi
1424     pacemaker::resource::service { $::gnocchi::params::metricd_service_name :
1425       clone_params => 'interleave=true',
1426     }
1427     pacemaker::resource::service { $::gnocchi::params::statsd_service_name :
1428       clone_params => 'interleave=true',
1429     }
1430     pacemaker::constraint::base { 'gnocchi-metricd-then-gnocchi-statsd-constraint':
1431       constraint_type => 'order',
1432       first_resource  => "${::gnocchi::params::metricd_service_name}-clone",
1433       second_resource => "${::gnocchi::params::statsd_service_name}-clone",
1434       first_action    => 'start',
1435       second_action   => 'start',
1436       require         => [Pacemaker::Resource::Service[$::gnocchi::params::metricd_service_name],
1437                           Pacemaker::Resource::Service[$::gnocchi::params::statsd_service_name]],
1438     }
1439     pacemaker::constraint::colocation { 'gnocchi-statsd-with-metricd-colocation':
1440       source  => "${::gnocchi::params::statsd_service_name}-clone",
1441       target  => "${::gnocchi::params::metricd_service_name}-clone",
1442       score   => 'INFINITY',
1443       require => [Pacemaker::Resource::Service[$::gnocchi::params::metricd_service_name],
1444                   Pacemaker::Resource::Service[$::gnocchi::params::statsd_service_name]],
1445     }
1446
1447     # Heat
1448     pacemaker::resource::service { $::heat::params::api_service_name :
1449       clone_params => 'interleave=true',
1450     }
1451     pacemaker::resource::service { $::heat::params::api_cloudwatch_service_name :
1452       clone_params => 'interleave=true',
1453     }
1454     pacemaker::resource::service { $::heat::params::api_cfn_service_name :
1455       clone_params => 'interleave=true',
1456     }
1457     pacemaker::resource::service { $::heat::params::engine_service_name :
1458       clone_params => 'interleave=true',
1459     }
1460     pacemaker::constraint::base { 'heat-api-then-heat-api-cfn-constraint':
1461       constraint_type => 'order',
1462       first_resource  => "${::heat::params::api_service_name}-clone",
1463       second_resource => "${::heat::params::api_cfn_service_name}-clone",
1464       first_action    => 'start',
1465       second_action   => 'start',
1466       require         => [Pacemaker::Resource::Service[$::heat::params::api_service_name],
1467                           Pacemaker::Resource::Service[$::heat::params::api_cfn_service_name]],
1468     }
1469     pacemaker::constraint::colocation { 'heat-api-cfn-with-heat-api-colocation':
1470       source  => "${::heat::params::api_cfn_service_name}-clone",
1471       target  => "${::heat::params::api_service_name}-clone",
1472       score   => 'INFINITY',
1473       require => [Pacemaker::Resource::Service[$::heat::params::api_cfn_service_name],
1474                   Pacemaker::Resource::Service[$::heat::params::api_service_name]],
1475     }
1476     pacemaker::constraint::base { 'heat-api-cfn-then-heat-api-cloudwatch-constraint':
1477       constraint_type => 'order',
1478       first_resource  => "${::heat::params::api_cfn_service_name}-clone",
1479       second_resource => "${::heat::params::api_cloudwatch_service_name}-clone",
1480       first_action    => 'start',
1481       second_action   => 'start',
1482       require         => [Pacemaker::Resource::Service[$::heat::params::api_cloudwatch_service_name],
1483                           Pacemaker::Resource::Service[$::heat::params::api_cfn_service_name]],
1484     }
1485     pacemaker::constraint::colocation { 'heat-api-cloudwatch-with-heat-api-cfn-colocation':
1486       source  => "${::heat::params::api_cloudwatch_service_name}-clone",
1487       target  => "${::heat::params::api_cfn_service_name}-clone",
1488       score   => 'INFINITY',
1489       require => [Pacemaker::Resource::Service[$::heat::params::api_cfn_service_name],
1490                   Pacemaker::Resource::Service[$::heat::params::api_cloudwatch_service_name]],
1491     }
1492     pacemaker::constraint::base { 'heat-api-cloudwatch-then-heat-engine-constraint':
1493       constraint_type => 'order',
1494       first_resource  => "${::heat::params::api_cloudwatch_service_name}-clone",
1495       second_resource => "${::heat::params::engine_service_name}-clone",
1496       first_action    => 'start',
1497       second_action   => 'start',
1498       require         => [Pacemaker::Resource::Service[$::heat::params::api_cloudwatch_service_name],
1499                           Pacemaker::Resource::Service[$::heat::params::engine_service_name]],
1500     }
1501     pacemaker::constraint::colocation { 'heat-engine-with-heat-api-cloudwatch-colocation':
1502       source  => "${::heat::params::engine_service_name}-clone",
1503       target  => "${::heat::params::api_cloudwatch_service_name}-clone",
1504       score   => 'INFINITY',
1505       require => [Pacemaker::Resource::Service[$::heat::params::api_cloudwatch_service_name],
1506                   Pacemaker::Resource::Service[$::heat::params::engine_service_name]],
1507     }
1508     pacemaker::constraint::base { 'ceilometer-notification-then-heat-api-constraint':
1509       constraint_type => 'order',
1510       first_resource  => "${::ceilometer::params::agent_notification_service_name}-clone",
1511       second_resource => "${::heat::params::api_service_name}-clone",
1512       first_action    => 'start',
1513       second_action   => 'start',
1514       require         => [Pacemaker::Resource::Service[$::heat::params::api_service_name],
1515                           Pacemaker::Resource::Service[$::ceilometer::params::agent_notification_service_name]],
1516     }
1517
1518     # Horizon and Keystone
1519     pacemaker::resource::service { $::apache::params::service_name:
1520       clone_params     => 'interleave=true',
1521       verify_on_create => true,
1522       require          => [File['/etc/keystone/ssl/certs/ca.pem'],
1523       File['/etc/keystone/ssl/private/signing_key.pem'],
1524       File['/etc/keystone/ssl/certs/signing_cert.pem']],
1525     }
1526
1527     #VSM
1528     if 'cisco_n1kv' in hiera('neutron::plugins::ml2::mechanism_drivers') {
1529       pacemaker::resource::ocf { 'vsm-p' :
1530         ocf_agent_name  => 'heartbeat:VirtualDomain',
1531         resource_params => 'force_stop=true config=/var/spool/cisco/vsm/vsm_primary_deploy.xml',
1532         require         => Class['n1k_vsm'],
1533         meta_params     => 'resource-stickiness=INFINITY',
1534       }
1535       if str2bool(hiera('n1k_vsm::pacemaker_control', true)) {
1536         pacemaker::resource::ocf { 'vsm-s' :
1537           ocf_agent_name  => 'heartbeat:VirtualDomain',
1538           resource_params => 'force_stop=true config=/var/spool/cisco/vsm/vsm_secondary_deploy.xml',
1539           require         => Class['n1k_vsm'],
1540           meta_params     => 'resource-stickiness=INFINITY',
1541         }
1542         pacemaker::constraint::colocation { 'vsm-colocation-contraint':
1543           source  => 'vsm-p',
1544           target  => 'vsm-s',
1545           score   => '-INFINITY',
1546           require => [Pacemaker::Resource::Ocf['vsm-p'],
1547                       Pacemaker::Resource::Ocf['vsm-s']],
1548         }
1549       }
1550     }
1551
1552   }
1553
1554 } #END STEP 5
1555
1556 $package_manifest_name = join(['/var/lib/tripleo/installed-packages/overcloud_controller_pacemaker', hiera('step')])
1557 package_manifest{$package_manifest_name: ensure => present}