Merge "Composable OpenContrail compute plugin"
[apex-tripleo-heat-templates.git] / puppet / manifests / overcloud_controller_pacemaker.pp
1 # Copyright 2015 Red Hat, Inc.
2 # All Rights Reserved.
3 #
4 # Licensed under the Apache License, Version 2.0 (the "License"); you may
5 # not use this file except in compliance with the License. You may obtain
6 # a copy of the License at
7 #
8 #     http://www.apache.org/licenses/LICENSE-2.0
9 #
10 # Unless required by applicable law or agreed to in writing, software
11 # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
12 # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
13 # License for the specific language governing permissions and limitations
14 # under the License.
15
16 Pcmk_resource <| |> {
17   tries     => 10,
18   try_sleep => 3,
19 }
20
21 # TODO(jistr): use pcs resource provider instead of just no-ops
22 Service <|
23   tag == 'aodh-service' or
24   tag == 'gnocchi-service'
25 |> {
26   hasrestart => true,
27   restart    => '/bin/true',
28   start      => '/bin/true',
29   stop       => '/bin/true',
30 }
31
32 include ::tripleo::packages
33 include ::tripleo::firewall
34
35 if $::hostname == downcase(hiera('bootstrap_nodeid')) {
36   $pacemaker_master = true
37   $sync_db = true
38 } else {
39   $pacemaker_master = false
40   $sync_db = false
41 }
42
43 $enable_fencing = str2bool(hiera('enable_fencing', false)) and hiera('step') >= 5
44 $enable_load_balancer = hiera('enable_load_balancer', true)
45
46 # When to start and enable services which haven't been Pacemakerized
47 # FIXME: remove when we start all OpenStack services using Pacemaker
48 # (occurrences of this variable will be gradually replaced with false)
49 $non_pcmk_start = hiera('step') >= 5
50
51 if hiera('step') >= 1 {
52
53   $pacemaker_cluster_members = downcase(regsubst(hiera('controller_node_names'), ',', ' ', 'G'))
54   $corosync_ipv6 = str2bool(hiera('corosync_ipv6', false))
55   if $corosync_ipv6 {
56     $cluster_setup_extras = { '--token' => hiera('corosync_token_timeout', 1000), '--ipv6' => '' }
57   } else {
58     $cluster_setup_extras = { '--token' => hiera('corosync_token_timeout', 1000) }
59   }
60   class { '::pacemaker':
61     hacluster_pwd => hiera('hacluster_pwd'),
62   } ->
63   class { '::pacemaker::corosync':
64     cluster_members      => $pacemaker_cluster_members,
65     setup_cluster        => $pacemaker_master,
66     cluster_setup_extras => $cluster_setup_extras,
67   }
68   class { '::pacemaker::stonith':
69     disable => !$enable_fencing,
70   }
71   if $enable_fencing {
72     include ::tripleo::fencing
73
74     # enable stonith after all Pacemaker resources have been created
75     Pcmk_resource<||> -> Class['tripleo::fencing']
76     Pcmk_constraint<||> -> Class['tripleo::fencing']
77     Exec <| tag == 'pacemaker_constraint' |> -> Class['tripleo::fencing']
78     # enable stonith after all fencing devices have been created
79     Class['tripleo::fencing'] -> Class['pacemaker::stonith']
80   }
81
82   # FIXME(gfidente): sets 200secs as default start timeout op
83   # param; until we can use pcmk global defaults we'll still
84   # need to add it to every resource which redefines op params
85   Pacemaker::Resource::Service {
86     op_params => 'start timeout=200s stop timeout=200s',
87   }
88
89   # Galera
90   if str2bool(hiera('enable_galera', true)) {
91     $mysql_config_file = '/etc/my.cnf.d/galera.cnf'
92   } else {
93     $mysql_config_file = '/etc/my.cnf.d/server.cnf'
94   }
95   $galera_nodes = downcase(hiera('galera_node_names', $::hostname))
96   $galera_nodes_count = count(split($galera_nodes, ','))
97
98   # FIXME: due to https://bugzilla.redhat.com/show_bug.cgi?id=1298671 we
99   # set bind-address to a hostname instead of an ip address; to move Mysql
100   # from internal_api on another network we'll have to customize both
101   # MysqlNetwork and ControllerHostnameResolveNetwork in ServiceNetMap
102   $mysql_bind_host = hiera('mysql_bind_host')
103   $mysqld_options = {
104     'mysqld' => {
105       'skip-name-resolve'             => '1',
106       'binlog_format'                 => 'ROW',
107       'default-storage-engine'        => 'innodb',
108       'innodb_autoinc_lock_mode'      => '2',
109       'innodb_locks_unsafe_for_binlog'=> '1',
110       'query_cache_size'              => '0',
111       'query_cache_type'              => '0',
112       'bind-address'                  => $::hostname,
113       'max_connections'               => hiera('mysql_max_connections'),
114       'open_files_limit'              => '-1',
115       'wsrep_on'                      => 'ON',
116       'wsrep_provider'                => '/usr/lib64/galera/libgalera_smm.so',
117       'wsrep_cluster_name'            => 'galera_cluster',
118       'wsrep_cluster_address'         => "gcomm://${galera_nodes}",
119       'wsrep_slave_threads'           => '1',
120       'wsrep_certify_nonPK'           => '1',
121       'wsrep_max_ws_rows'             => '131072',
122       'wsrep_max_ws_size'             => '1073741824',
123       'wsrep_debug'                   => '0',
124       'wsrep_convert_LOCK_to_trx'     => '0',
125       'wsrep_retry_autocommit'        => '1',
126       'wsrep_auto_increment_control'  => '1',
127       'wsrep_drupal_282555_workaround'=> '0',
128       'wsrep_causal_reads'            => '0',
129       'wsrep_sst_method'              => 'rsync',
130       'wsrep_provider_options'        => "gmcast.listen_addr=tcp://[${mysql_bind_host}]:4567;",
131     },
132   }
133
134   class { '::mysql::server':
135     create_root_user        => false,
136     create_root_my_cnf      => false,
137     config_file             => $mysql_config_file,
138     override_options        => $mysqld_options,
139     remove_default_accounts => $pacemaker_master,
140     service_manage          => false,
141     service_enabled         => false,
142   }
143
144 }
145
146 if hiera('step') >= 2 {
147
148
149   # NOTE(gfidente): the following vars are needed on all nodes so they
150   # need to stay out of pacemaker_master conditional.
151   # The addresses mangling will hopefully go away when we'll be able to
152   # configure the connection string via hostnames, until then, we need to pass
153   # the list of IPv6 addresses *with* port and without the brackets as 'members'
154   # argument for the 'mongodb_replset' resource.
155   if str2bool(hiera('mongodb::server::ipv6', false)) {
156     $mongo_node_ips_with_port_prefixed = prefix(hiera('mongo_node_ips'), '[')
157     $mongo_node_ips_with_port = suffix($mongo_node_ips_with_port_prefixed, ']:27017')
158     $mongo_node_ips_with_port_nobr = suffix(hiera('mongo_node_ips'), ':27017')
159   } else {
160     $mongo_node_ips_with_port = suffix(hiera('mongo_node_ips'), ':27017')
161     $mongo_node_ips_with_port_nobr = suffix(hiera('mongo_node_ips'), ':27017')
162   }
163   $mongodb_replset = hiera('mongodb::server::replset')
164
165   if $pacemaker_master {
166
167     include ::pacemaker::resource_defaults
168
169     # Create an openstack-core dummy resource. See RHBZ 1290121
170     pacemaker::resource::ocf { 'openstack-core':
171       ocf_agent_name => 'heartbeat:Dummy',
172       clone_params   => true,
173     }
174
175     pacemaker::resource::ocf { 'galera' :
176       ocf_agent_name  => 'heartbeat:galera',
177       op_params       => 'promote timeout=300s on-fail=block',
178       master_params   => '',
179       meta_params     => "master-max=${galera_nodes_count} ordered=true",
180       resource_params => "additional_parameters='--open-files-limit=16384' enable_creation=true wsrep_cluster_address='gcomm://${galera_nodes}'",
181       require         => Class['::mysql::server'],
182       before          => Exec['galera-ready'],
183     }
184
185     exec { 'galera-ready' :
186       command     => '/usr/bin/clustercheck >/dev/null',
187       timeout     => 30,
188       tries       => 180,
189       try_sleep   => 10,
190       environment => ['AVAILABLE_WHEN_READONLY=0'],
191       require     => Exec['create-root-sysconfig-clustercheck'],
192     }
193
194     # We add a clustercheck db user and we will switch /etc/sysconfig/clustercheck
195     # to it in a later step. We do this only on one node as it will replicate on
196     # the other members. We also make sure that the permissions are the minimum necessary
197     mysql_user { 'clustercheck@localhost':
198       ensure        => 'present',
199       password_hash => mysql_password(hiera('mysql_clustercheck_password')),
200       require       => Exec['galera-ready'],
201     }
202
203     mysql_grant { 'clustercheck@localhost/*.*':
204       ensure     => 'present',
205       options    => ['GRANT'],
206       privileges => ['PROCESS'],
207       table      => '*.*',
208       user       => 'clustercheck@localhost',
209     }
210
211     if downcase(hiera('gnocchi_indexer_backend')) == 'mysql' {
212       class { '::gnocchi::db::mysql':
213         require => Exec['galera-ready'],
214       }
215     }
216
217     class { '::aodh::db::mysql':
218         require => Exec['galera-ready'],
219     }
220   }
221   # This step is to create a sysconfig clustercheck file with the root user and empty password
222   # on the first install only (because later on the clustercheck db user will be used)
223   # We are using exec and not file in order to not have duplicate definition errors in puppet
224   # when we later set the the file to contain the clustercheck data
225   exec { 'create-root-sysconfig-clustercheck':
226     command => "/bin/echo 'MYSQL_USERNAME=root\nMYSQL_PASSWORD=\'\'\nMYSQL_HOST=localhost\n' > /etc/sysconfig/clustercheck",
227     unless  => '/bin/test -e /etc/sysconfig/clustercheck && grep -q clustercheck /etc/sysconfig/clustercheck',
228   }
229
230   xinetd::service { 'galera-monitor' :
231     port           => '9200',
232     server         => '/usr/bin/clustercheck',
233     per_source     => 'UNLIMITED',
234     log_on_success => '',
235     log_on_failure => 'HOST',
236     flags          => 'REUSE',
237     service_type   => 'UNLISTED',
238     user           => 'root',
239     group          => 'root',
240     require        => Exec['create-root-sysconfig-clustercheck'],
241   }
242
243 } #END STEP 2
244
245 if hiera('step') >= 4 or ( hiera('step') >= 3 and $sync_db ) {
246   # At this stage we are guaranteed that the clustercheck db user exists
247   # so we switch the resource agent to use it.
248   $mysql_clustercheck_password = hiera('mysql_clustercheck_password')
249   file { '/etc/sysconfig/clustercheck' :
250     ensure  => file,
251     mode    => '0600',
252     owner   => 'root',
253     group   => 'root',
254     content => "MYSQL_USERNAME=clustercheck\n
255 MYSQL_PASSWORD='${mysql_clustercheck_password}'\n
256 MYSQL_HOST=localhost\n",
257   }
258
259   $nova_ipv6 = hiera('nova::use_ipv6', false)
260   if $nova_ipv6 {
261     $memcached_servers = suffix(hiera('memcache_node_ips_v6'), ':11211')
262   } else {
263     $memcached_servers = suffix(hiera('memcache_node_ips'), ':11211')
264   }
265
266   class { '::nova' :
267     memcached_servers => $memcached_servers
268   }
269
270   include ::nova::config
271
272   # Aodh
273   class { '::aodh' :
274     database_connection => hiera('aodh_mysql_conn_string'),
275   }
276   include ::aodh::config
277   include ::aodh::auth
278   include ::aodh::client
279   include ::aodh::wsgi::apache
280   class { '::aodh::api':
281     manage_service => false,
282     enabled        => false,
283     service_name   => 'httpd',
284   }
285   class { '::aodh::evaluator':
286     manage_service => false,
287     enabled        => false,
288   }
289   class { '::aodh::notifier':
290     manage_service => false,
291     enabled        => false,
292   }
293   class { '::aodh::listener':
294     manage_service => false,
295     enabled        => false,
296   }
297
298   # Gnocchi
299   $gnocchi_database_connection = hiera('gnocchi_mysql_conn_string')
300   include ::gnocchi::client
301   if $sync_db {
302     include ::gnocchi::db::sync
303   }
304   include ::gnocchi::storage
305   $gnocchi_backend = downcase(hiera('gnocchi_backend', 'swift'))
306   case $gnocchi_backend {
307       'swift': { include ::gnocchi::storage::swift }
308       'file': { include ::gnocchi::storage::file }
309       'rbd': { include ::gnocchi::storage::ceph }
310       default: { fail('Unrecognized gnocchi_backend parameter.') }
311   }
312   class { '::gnocchi':
313     database_connection => $gnocchi_database_connection,
314   }
315   class { '::gnocchi::api' :
316     manage_service => false,
317     enabled        => false,
318     service_name   => 'httpd',
319   }
320   class { '::gnocchi::wsgi::apache' :
321     ssl => false,
322   }
323   class { '::gnocchi::metricd' :
324     manage_service => false,
325     enabled        => false,
326   }
327   class { '::gnocchi::statsd' :
328     manage_service => false,
329     enabled        => false,
330   }
331
332   hiera_include('controller_classes')
333
334 } #END STEP 4
335
336 if hiera('step') >= 5 {
337   # We now make sure that the root db password is set to a random one
338   # At first installation /root/.my.cnf will be empty and we connect without a root
339   # password. On second runs or updates /root/.my.cnf will already be populated
340   # with proper credentials. This step happens on every node because this sql
341   # statement does not automatically replicate across nodes.
342   $mysql_root_password = hiera('mysql::server::root_password')
343   exec { 'galera-set-root-password':
344     command => "/bin/touch /root/.my.cnf && /bin/echo \"UPDATE mysql.user SET Password = PASSWORD('${mysql_root_password}') WHERE user = 'root'; flush privileges;\" | /bin/mysql --defaults-extra-file=/root/.my.cnf -u root",
345   }
346   file { '/root/.my.cnf' :
347     ensure  => file,
348     mode    => '0600',
349     owner   => 'root',
350     group   => 'root',
351     content => "[client]
352 user=root
353 password=\"${mysql_root_password}\"
354
355 [mysql]
356 user=root
357 password=\"${mysql_root_password}\"",
358     require => Exec['galera-set-root-password'],
359   }
360
361   $nova_enable_db_purge = hiera('nova_enable_db_purge', true)
362
363   if $nova_enable_db_purge {
364     include ::nova::cron::archive_deleted_rows
365   }
366
367   if $pacemaker_master {
368
369     pacemaker::constraint::base { 'openstack-core-then-httpd-constraint':
370       constraint_type => 'order',
371       first_resource  => 'openstack-core-clone',
372       second_resource => "${::apache::params::service_name}-clone",
373       first_action    => 'start',
374       second_action   => 'start',
375       require         => [Pacemaker::Resource::Service[$::apache::params::service_name],
376                           Pacemaker::Resource::Ocf['openstack-core']],
377     }
378     pacemaker::constraint::base { 'galera-then-openstack-core-constraint':
379       constraint_type => 'order',
380       first_resource  => 'galera-master',
381       second_resource => 'openstack-core-clone',
382       first_action    => 'promote',
383       second_action   => 'start',
384       require         => [Pacemaker::Resource::Ocf['galera'],
385                           Pacemaker::Resource::Ocf['openstack-core']],
386     }
387
388     # Nova
389     pacemaker::constraint::base { 'keystone-then-nova-consoleauth-constraint':
390       constraint_type => 'order',
391       first_resource  => 'openstack-core-clone',
392       second_resource => "${::nova::params::consoleauth_service_name}-clone",
393       first_action    => 'start',
394       second_action   => 'start',
395       require         => [Pacemaker::Resource::Service[$::nova::params::consoleauth_service_name],
396                           Pacemaker::Resource::Ocf['openstack-core']],
397     }
398     pacemaker::constraint::colocation { 'nova-consoleauth-with-openstack-core':
399       source  => "${::nova::params::consoleauth_service_name}-clone",
400       target  => 'openstack-core-clone',
401       score   => 'INFINITY',
402       require => [Pacemaker::Resource::Service[$::nova::params::consoleauth_service_name],
403                   Pacemaker::Resource::Ocf['openstack-core']],
404     }
405     pacemaker::constraint::base { 'nova-consoleauth-then-nova-vncproxy-constraint':
406       constraint_type => 'order',
407       first_resource  => "${::nova::params::consoleauth_service_name}-clone",
408       second_resource => "${::nova::params::vncproxy_service_name}-clone",
409       first_action    => 'start',
410       second_action   => 'start',
411       require         => [Pacemaker::Resource::Service[$::nova::params::consoleauth_service_name],
412                           Pacemaker::Resource::Service[$::nova::params::vncproxy_service_name]],
413     }
414     pacemaker::constraint::colocation { 'nova-vncproxy-with-nova-consoleauth-colocation':
415       source  => "${::nova::params::vncproxy_service_name}-clone",
416       target  => "${::nova::params::consoleauth_service_name}-clone",
417       score   => 'INFINITY',
418       require => [Pacemaker::Resource::Service[$::nova::params::consoleauth_service_name],
419                   Pacemaker::Resource::Service[$::nova::params::vncproxy_service_name]],
420     }
421     pacemaker::constraint::base { 'nova-vncproxy-then-nova-api-constraint':
422       constraint_type => 'order',
423       first_resource  => "${::nova::params::vncproxy_service_name}-clone",
424       second_resource => "${::nova::params::api_service_name}-clone",
425       first_action    => 'start',
426       second_action   => 'start',
427       require         => [Pacemaker::Resource::Service[$::nova::params::vncproxy_service_name],
428                           Pacemaker::Resource::Service[$::nova::params::api_service_name]],
429     }
430     pacemaker::constraint::colocation { 'nova-api-with-nova-vncproxy-colocation':
431       source  => "${::nova::params::api_service_name}-clone",
432       target  => "${::nova::params::vncproxy_service_name}-clone",
433       score   => 'INFINITY',
434       require => [Pacemaker::Resource::Service[$::nova::params::vncproxy_service_name],
435                   Pacemaker::Resource::Service[$::nova::params::api_service_name]],
436     }
437     pacemaker::constraint::base { 'nova-api-then-nova-scheduler-constraint':
438       constraint_type => 'order',
439       first_resource  => "${::nova::params::api_service_name}-clone",
440       second_resource => "${::nova::params::scheduler_service_name}-clone",
441       first_action    => 'start',
442       second_action   => 'start',
443       require         => [Pacemaker::Resource::Service[$::nova::params::api_service_name],
444                           Pacemaker::Resource::Service[$::nova::params::scheduler_service_name]],
445     }
446     pacemaker::constraint::colocation { 'nova-scheduler-with-nova-api-colocation':
447       source  => "${::nova::params::scheduler_service_name}-clone",
448       target  => "${::nova::params::api_service_name}-clone",
449       score   => 'INFINITY',
450       require => [Pacemaker::Resource::Service[$::nova::params::api_service_name],
451                   Pacemaker::Resource::Service[$::nova::params::scheduler_service_name]],
452     }
453     pacemaker::constraint::base { 'nova-scheduler-then-nova-conductor-constraint':
454       constraint_type => 'order',
455       first_resource  => "${::nova::params::scheduler_service_name}-clone",
456       second_resource => "${::nova::params::conductor_service_name}-clone",
457       first_action    => 'start',
458       second_action   => 'start',
459       require         => [Pacemaker::Resource::Service[$::nova::params::scheduler_service_name],
460                           Pacemaker::Resource::Service[$::nova::params::conductor_service_name]],
461     }
462     pacemaker::constraint::colocation { 'nova-conductor-with-nova-scheduler-colocation':
463       source  => "${::nova::params::conductor_service_name}-clone",
464       target  => "${::nova::params::scheduler_service_name}-clone",
465       score   => 'INFINITY',
466       require => [Pacemaker::Resource::Service[$::nova::params::scheduler_service_name],
467                   Pacemaker::Resource::Service[$::nova::params::conductor_service_name]],
468     }
469
470     # Fedora doesn't know `require-all` parameter for constraints yet
471     if $::operatingsystem == 'Fedora' {
472       $redis_aodh_constraint_params = undef
473     } else {
474       $redis_aodh_constraint_params = 'require-all=false'
475     }
476     pacemaker::constraint::base { 'redis-then-aodh-evaluator-constraint':
477       constraint_type   => 'order',
478       first_resource    => 'redis-master',
479       second_resource   => "${::aodh::params::evaluator_service_name}-clone",
480       first_action      => 'promote',
481       second_action     => 'start',
482       constraint_params => $redis_aodh_constraint_params,
483       require           => [Pacemaker::Resource::Ocf['redis'],
484                             Pacemaker::Resource::Service[$::aodh::params::evaluator_service_name]],
485     }
486     # Aodh
487     pacemaker::resource::service { $::aodh::params::evaluator_service_name :
488       clone_params => 'interleave=true',
489     }
490     pacemaker::resource::service { $::aodh::params::notifier_service_name :
491       clone_params => 'interleave=true',
492     }
493     pacemaker::resource::service { $::aodh::params::listener_service_name :
494       clone_params => 'interleave=true',
495     }
496     pacemaker::constraint::base { 'aodh-evaluator-then-aodh-notifier-constraint':
497       constraint_type => 'order',
498       first_resource  => "${::aodh::params::evaluator_service_name}-clone",
499       second_resource => "${::aodh::params::notifier_service_name}-clone",
500       first_action    => 'start',
501       second_action   => 'start',
502       require         => [Pacemaker::Resource::Service[$::aodh::params::evaluator_service_name],
503                           Pacemaker::Resource::Service[$::aodh::params::notifier_service_name]],
504     }
505     pacemaker::constraint::colocation { 'aodh-notifier-with-aodh-evaluator-colocation':
506       source  => "${::aodh::params::notifier_service_name}-clone",
507       target  => "${::aodh::params::evaluator_service_name}-clone",
508       score   => 'INFINITY',
509       require => [Pacemaker::Resource::Service[$::aodh::params::evaluator_service_name],
510                   Pacemaker::Resource::Service[$::aodh::params::notifier_service_name]],
511     }
512     pacemaker::constraint::base { 'aodh-evaluator-then-aodh-listener-constraint':
513       constraint_type => 'order',
514       first_resource  => "${::aodh::params::evaluator_service_name}-clone",
515       second_resource => "${::aodh::params::listener_service_name}-clone",
516       first_action    => 'start',
517       second_action   => 'start',
518       require         => [Pacemaker::Resource::Service[$::aodh::params::evaluator_service_name],
519                           Pacemaker::Resource::Service[$::aodh::params::listener_service_name]],
520     }
521     pacemaker::constraint::colocation { 'aodh-listener-with-aodh-evaluator-colocation':
522       source  => "${::aodh::params::listener_service_name}-clone",
523       target  => "${::aodh::params::evaluator_service_name}-clone",
524       score   => 'INFINITY',
525       require => [Pacemaker::Resource::Service[$::aodh::params::evaluator_service_name],
526                   Pacemaker::Resource::Service[$::aodh::params::listener_service_name]],
527     }
528
529     # gnocchi
530     pacemaker::resource::service { $::gnocchi::params::metricd_service_name :
531       clone_params => 'interleave=true',
532     }
533     pacemaker::resource::service { $::gnocchi::params::statsd_service_name :
534       clone_params => 'interleave=true',
535     }
536     pacemaker::constraint::base { 'gnocchi-metricd-then-gnocchi-statsd-constraint':
537       constraint_type => 'order',
538       first_resource  => "${::gnocchi::params::metricd_service_name}-clone",
539       second_resource => "${::gnocchi::params::statsd_service_name}-clone",
540       first_action    => 'start',
541       second_action   => 'start',
542       require         => [Pacemaker::Resource::Service[$::gnocchi::params::metricd_service_name],
543                           Pacemaker::Resource::Service[$::gnocchi::params::statsd_service_name]],
544     }
545     pacemaker::constraint::colocation { 'gnocchi-statsd-with-metricd-colocation':
546       source  => "${::gnocchi::params::statsd_service_name}-clone",
547       target  => "${::gnocchi::params::metricd_service_name}-clone",
548       score   => 'INFINITY',
549       require => [Pacemaker::Resource::Service[$::gnocchi::params::metricd_service_name],
550                   Pacemaker::Resource::Service[$::gnocchi::params::statsd_service_name]],
551     }
552
553     # Horizon and Keystone
554     pacemaker::resource::service { $::apache::params::service_name:
555       clone_params     => 'interleave=true',
556       verify_on_create => true,
557       require          => [File['/etc/keystone/ssl/certs/ca.pem'],
558       File['/etc/keystone/ssl/private/signing_key.pem'],
559       File['/etc/keystone/ssl/certs/signing_cert.pem']],
560     }
561
562     #VSM
563     if 'cisco_n1kv' in hiera('neutron::plugins::ml2::mechanism_drivers') {
564       pacemaker::resource::ocf { 'vsm-p' :
565         ocf_agent_name  => 'heartbeat:VirtualDomain',
566         resource_params => 'force_stop=true config=/var/spool/cisco/vsm/vsm_primary_deploy.xml',
567         require         => Class['n1k_vsm'],
568         meta_params     => 'resource-stickiness=INFINITY',
569       }
570       if str2bool(hiera('n1k_vsm::pacemaker_control', true)) {
571         pacemaker::resource::ocf { 'vsm-s' :
572           ocf_agent_name  => 'heartbeat:VirtualDomain',
573           resource_params => 'force_stop=true config=/var/spool/cisco/vsm/vsm_secondary_deploy.xml',
574           require         => Class['n1k_vsm'],
575           meta_params     => 'resource-stickiness=INFINITY',
576         }
577         pacemaker::constraint::colocation { 'vsm-colocation-contraint':
578           source  => 'vsm-p',
579           target  => 'vsm-s',
580           score   => '-INFINITY',
581           require => [Pacemaker::Resource::Ocf['vsm-p'],
582                       Pacemaker::Resource::Ocf['vsm-s']],
583         }
584       }
585     }
586
587   }
588
589 } #END STEP 5
590
591 $package_manifest_name = join(['/var/lib/tripleo/installed-packages/overcloud_controller_pacemaker', hiera('step')])
592 package_manifest{$package_manifest_name: ensure => present}