Ensure httpd is not enabled by puppet on system boot
[apex-tripleo-heat-templates.git] / puppet / manifests / overcloud_controller_pacemaker.pp
1 # Copyright 2015 Red Hat, Inc.
2 # All Rights Reserved.
3 #
4 # Licensed under the Apache License, Version 2.0 (the "License"); you may
5 # not use this file except in compliance with the License. You may obtain
6 # a copy of the License at
7 #
8 #     http://www.apache.org/licenses/LICENSE-2.0
9 #
10 # Unless required by applicable law or agreed to in writing, software
11 # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
12 # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
13 # License for the specific language governing permissions and limitations
14 # under the License.
15
16 Pcmk_resource <| |> {
17   tries     => 10,
18   try_sleep => 3,
19 }
20
21 include tripleo::packages
22
23 if $::hostname == downcase(hiera('bootstrap_nodeid')) {
24   $pacemaker_master = true
25   $sync_db = true
26 } else {
27   $pacemaker_master = false
28   $sync_db = false
29 }
30
31 $enable_fencing = str2bool(hiera('enable_fencing', 'false')) and hiera('step') >= 5
32
33 # When to start and enable services which haven't been Pacemakerized
34 # FIXME: remove when we start all OpenStack services using Pacemaker
35 # (occurences of this variable will be gradually replaced with false)
36 $non_pcmk_start = hiera('step') >= 4
37
38 if hiera('step') >= 1 {
39
40   create_resources(sysctl::value, hiera('sysctl_settings'), {})
41
42   if count(hiera('ntp::servers')) > 0 {
43     include ::ntp
44   }
45
46   $controller_node_ips = split(hiera('controller_node_ips'), ',')
47   $controller_node_names = split(downcase(hiera('controller_node_names')), ',')
48   class { '::tripleo::loadbalancer' :
49     controller_hosts       => $controller_node_ips,
50     controller_hosts_names => $controller_node_names,
51     manage_vip             => false,
52     mysql_clustercheck     => true,
53     haproxy_service_manage => false,
54   }
55
56   $pacemaker_cluster_members = downcase(regsubst(hiera('controller_node_names'), ',', ' ', 'G'))
57   user { 'hacluster':
58    ensure => present,
59   } ->
60   class { '::pacemaker':
61     hacluster_pwd => hiera('hacluster_pwd'),
62   } ->
63   class { '::pacemaker::corosync':
64     cluster_members => $pacemaker_cluster_members,
65     setup_cluster   => $pacemaker_master,
66   }
67   class { '::pacemaker::stonith':
68     disable => !$enable_fencing,
69   }
70   if $enable_fencing {
71     include tripleo::fencing
72
73     # enable stonith after all fencing devices have been created
74     Class['tripleo::fencing'] -> Class['pacemaker::stonith']
75   }
76
77   # FIXME(gfidente): sets 90secs as default start timeout op
78   # param; until we can use pcmk global defaults we'll still
79   # need to add it to every resource which redefines op params
80   Pacemaker::Resource::Service {
81     op_params => 'start timeout=90s',
82   }
83
84   # Only configure RabbitMQ in this step, don't start it yet to
85   # avoid races where non-master nodes attempt to start without
86   # config (eg. binding on 0.0.0.0)
87   # The module ignores erlang_cookie if cluster_config is false
88   class { '::rabbitmq':
89     service_manage          => false,
90     tcp_keepalive           => false,
91     config_kernel_variables => hiera('rabbitmq_kernel_variables'),
92     config_variables        => hiera('rabbitmq_config_variables'),
93     environment_variables   => hiera('rabbitmq_environment'),
94   } ->
95   file { '/var/lib/rabbitmq/.erlang.cookie':
96     ensure  => 'present',
97     owner   => 'rabbitmq',
98     group   => 'rabbitmq',
99     mode    => '0400',
100     content => hiera('rabbitmq::erlang_cookie'),
101     replace => true,
102   }
103
104   if downcase(hiera('ceilometer_backend')) == 'mongodb' {
105     include ::mongodb::globals
106     class { '::mongodb::server' :
107       service_manage => false,
108     }
109   }
110
111   # Memcached
112   class {'::memcached' :
113     service_manage => false,
114   }
115
116   # Redis
117   class { '::redis' :
118     service_manage => false,
119     notify_service => false,
120   }
121
122   # Galera
123   if str2bool(hiera('enable_galera', 'true')) {
124     $mysql_config_file = '/etc/my.cnf.d/galera.cnf'
125   } else {
126     $mysql_config_file = '/etc/my.cnf.d/server.cnf'
127   }
128   $galera_nodes = downcase(hiera('galera_node_names', $::hostname))
129   $galera_nodes_count = count(split($galera_nodes, ','))
130
131   $mysqld_options = {
132     'mysqld' => {
133       'skip-name-resolve'             => '1',
134       'binlog_format'                 => 'ROW',
135       'default-storage-engine'        => 'innodb',
136       'innodb_autoinc_lock_mode'      => '2',
137       'innodb_locks_unsafe_for_binlog'=> '1',
138       'query_cache_size'              => '0',
139       'query_cache_type'              => '0',
140       'bind-address'                  => hiera('mysql_bind_host'),
141       'max_connections'               => hiera('mysql_max_connections'),
142       'open_files_limit'              => '-1',
143       'wsrep_provider'                => '/usr/lib64/galera/libgalera_smm.so',
144       'wsrep_cluster_name'            => 'galera_cluster',
145       'wsrep_slave_threads'           => '1',
146       'wsrep_certify_nonPK'           => '1',
147       'wsrep_max_ws_rows'             => '131072',
148       'wsrep_max_ws_size'             => '1073741824',
149       'wsrep_debug'                   => '0',
150       'wsrep_convert_LOCK_to_trx'     => '0',
151       'wsrep_retry_autocommit'        => '1',
152       'wsrep_auto_increment_control'  => '1',
153       'wsrep_drupal_282555_workaround'=> '0',
154       'wsrep_causal_reads'            => '0',
155       'wsrep_notify_cmd'              => '',
156       'wsrep_sst_method'              => 'rsync',
157     }
158   }
159
160   class { '::mysql::server':
161     create_root_user   => false,
162     create_root_my_cnf => false,
163     config_file        => $mysql_config_file,
164     override_options   => $mysqld_options,
165     service_manage     => false,
166     service_enabled    => false,
167   }
168
169 }
170
171 if hiera('step') >= 2 {
172
173   # NOTE(gfidente): the following vars are needed on all nodes so they
174   # need to stay out of pacemaker_master conditional
175   $mongo_node_ips_with_port = suffix(hiera('mongo_node_ips'), ':27017')
176   $mongodb_replset = hiera('mongodb::server::replset')
177
178   if $pacemaker_master {
179
180     include pacemaker::resource_defaults
181
182     # FIXME: we should not have to access tripleo::loadbalancer class
183     # parameters here to configure pacemaker VIPs. The configuration
184     # of pacemaker VIPs could move into puppet-tripleo or we should
185     # make use of less specific hiera parameters here for the settings.
186     pacemaker::resource::service { 'haproxy':
187       clone_params => true,
188     }
189
190     $control_vip = hiera('tripleo::loadbalancer::controller_virtual_ip')
191     pacemaker::resource::ip { 'control_vip':
192       ip_address => $control_vip,
193     }
194     pacemaker::constraint::base { 'control_vip-then-haproxy':
195       constraint_type   => 'order',
196       first_resource    => "ip-${control_vip}",
197       second_resource   => 'haproxy-clone',
198       first_action      => 'start',
199       second_action     => 'start',
200       constraint_params => 'kind=Optional',
201       require => [Pacemaker::Resource::Service['haproxy'],
202                   Pacemaker::Resource::Ip['control_vip']],
203     }
204     pacemaker::constraint::colocation { 'control_vip-with-haproxy':
205       source  => "ip-${control_vip}",
206       target  => 'haproxy-clone',
207       score   => 'INFINITY',
208       require => [Pacemaker::Resource::Service['haproxy'],
209                   Pacemaker::Resource::Ip['control_vip']],
210     }
211
212     $public_vip = hiera('tripleo::loadbalancer::public_virtual_ip')
213     if $public_vip and $public_vip != $control_vip {
214       pacemaker::resource::ip { 'public_vip':
215         ip_address => $public_vip,
216       }
217       pacemaker::constraint::base { 'public_vip-then-haproxy':
218         constraint_type   => 'order',
219         first_resource    => "ip-${public_vip}",
220         second_resource   => 'haproxy-clone',
221         first_action      => 'start',
222         second_action     => 'start',
223         constraint_params => 'kind=Optional',
224         require => [Pacemaker::Resource::Service['haproxy'],
225                     Pacemaker::Resource::Ip['public_vip']],
226       }
227       pacemaker::constraint::colocation { 'public_vip-with-haproxy':
228         source  => "ip-${public_vip}",
229         target  => 'haproxy-clone',
230         score   => 'INFINITY',
231         require => [Pacemaker::Resource::Service['haproxy'],
232                     Pacemaker::Resource::Ip['public_vip']],
233       }
234     }
235
236     $redis_vip = hiera('redis_vip')
237     if $redis_vip and $redis_vip != $control_vip {
238       pacemaker::resource::ip { 'redis_vip':
239         ip_address => $redis_vip,
240       }
241       pacemaker::constraint::base { 'redis_vip-then-haproxy':
242         constraint_type   => 'order',
243         first_resource    => "ip-${redis_vip}",
244         second_resource   => 'haproxy-clone',
245         first_action      => 'start',
246         second_action     => 'start',
247         constraint_params => 'kind=Optional',
248         require => [Pacemaker::Resource::Service['haproxy'],
249                     Pacemaker::Resource::Ip['redis_vip']],
250       }
251       pacemaker::constraint::colocation { 'redis_vip-with-haproxy':
252         source  => "ip-${redis_vip}",
253         target  => 'haproxy-clone',
254         score   => 'INFINITY',
255         require => [Pacemaker::Resource::Service['haproxy'],
256                     Pacemaker::Resource::Ip['redis_vip']],
257       }
258     }
259
260     $internal_api_vip = hiera('tripleo::loadbalancer::internal_api_virtual_ip')
261     if $internal_api_vip and $internal_api_vip != $control_vip {
262       pacemaker::resource::ip { 'internal_api_vip':
263         ip_address => $internal_api_vip,
264       }
265       pacemaker::constraint::base { 'internal_api_vip-then-haproxy':
266         constraint_type   => 'order',
267         first_resource    => "ip-${internal_api_vip}",
268         second_resource   => 'haproxy-clone',
269         first_action      => 'start',
270         second_action     => 'start',
271         constraint_params => 'kind=Optional',
272         require => [Pacemaker::Resource::Service['haproxy'],
273                     Pacemaker::Resource::Ip['internal_api_vip']],
274       }
275       pacemaker::constraint::colocation { 'internal_api_vip-with-haproxy':
276         source  => "ip-${internal_api_vip}",
277         target  => 'haproxy-clone',
278         score   => 'INFINITY',
279         require => [Pacemaker::Resource::Service['haproxy'],
280                     Pacemaker::Resource::Ip['internal_api_vip']],
281       }
282     }
283
284     $storage_vip = hiera('tripleo::loadbalancer::storage_virtual_ip')
285     if $storage_vip and $storage_vip != $control_vip {
286       pacemaker::resource::ip { 'storage_vip':
287         ip_address => $storage_vip,
288       }
289       pacemaker::constraint::base { 'storage_vip-then-haproxy':
290         constraint_type   => 'order',
291         first_resource    => "ip-${storage_vip}",
292         second_resource   => 'haproxy-clone',
293         first_action      => 'start',
294         second_action     => 'start',
295         constraint_params => 'kind=Optional',
296         require => [Pacemaker::Resource::Service['haproxy'],
297                     Pacemaker::Resource::Ip['storage_vip']],
298       }
299       pacemaker::constraint::colocation { 'storage_vip-with-haproxy':
300         source  => "ip-${storage_vip}",
301         target  => 'haproxy-clone',
302         score   => 'INFINITY',
303         require => [Pacemaker::Resource::Service['haproxy'],
304                     Pacemaker::Resource::Ip['storage_vip']],
305       }
306     }
307
308     $storage_mgmt_vip = hiera('tripleo::loadbalancer::storage_mgmt_virtual_ip')
309     if $storage_mgmt_vip and $storage_mgmt_vip != $control_vip {
310       pacemaker::resource::ip { 'storage_mgmt_vip':
311         ip_address => $storage_mgmt_vip,
312       }
313       pacemaker::constraint::base { 'storage_mgmt_vip-then-haproxy':
314         constraint_type   => 'order',
315         first_resource    => "ip-${storage_mgmt_vip}",
316         second_resource   => 'haproxy-clone',
317         first_action      => 'start',
318         second_action     => 'start',
319         constraint_params => 'kind=Optional',
320         require => [Pacemaker::Resource::Service['haproxy'],
321                     Pacemaker::Resource::Ip['storage_mgmt_vip']],
322       }
323       pacemaker::constraint::colocation { 'storage_mgmt_vip-with-haproxy':
324         source  => "ip-${storage_mgmt_vip}",
325         target  => 'haproxy-clone',
326         score   => 'INFINITY',
327         require => [Pacemaker::Resource::Service['haproxy'],
328                     Pacemaker::Resource::Ip['storage_mgmt_vip']],
329       }
330     }
331
332     pacemaker::resource::service { $::memcached::params::service_name :
333       clone_params => true,
334       require      => Class['::memcached'],
335     }
336
337     pacemaker::resource::ocf { 'rabbitmq':
338       ocf_agent_name  => 'heartbeat:rabbitmq-cluster',
339       resource_params => 'set_policy=\'ha-all ^(?!amq\.).* {"ha-mode":"all"}\'',
340       clone_params    => 'ordered=true interleave=true',
341       require         => Class['::rabbitmq'],
342     }
343
344     if downcase(hiera('ceilometer_backend')) == 'mongodb' {
345       pacemaker::resource::service { $::mongodb::params::service_name :
346         op_params    => 'start timeout=120s',
347         clone_params => true,
348         require      => Class['::mongodb::server'],
349       }
350       # NOTE (spredzy) : The replset can only be run
351       # once all the nodes have joined the cluster.
352       mongodb_conn_validator { $mongo_node_ips_with_port :
353         timeout => '600',
354         require => Pacemaker::Resource::Service[$::mongodb::params::service_name],
355         before  => Mongodb_replset[$mongodb_replset],
356       }
357       mongodb_replset { $mongodb_replset :
358         members => $mongo_node_ips_with_port,
359       }
360     }
361
362     pacemaker::resource::ocf { 'galera' :
363       ocf_agent_name  => 'heartbeat:galera',
364       op_params       => 'promote timeout=300s on-fail=block',
365       master_params   => '',
366       meta_params     => "master-max=${galera_nodes_count} ordered=true",
367       resource_params => "additional_parameters='--open-files-limit=16384' enable_creation=true wsrep_cluster_address='gcomm://${galera_nodes}'",
368       require         => Class['::mysql::server'],
369       before          => Exec['galera-ready'],
370     }
371
372     pacemaker::resource::ocf { 'redis':
373       ocf_agent_name  => 'heartbeat:redis',
374       master_params   => '',
375       meta_params     => 'notify=true ordered=true interleave=true',
376       resource_params => 'wait_last_known_master=true',
377       require         => Class['::redis'],
378     }
379
380   }
381
382   exec { 'galera-ready' :
383     command     => '/usr/bin/clustercheck >/dev/null',
384     timeout     => 30,
385     tries       => 180,
386     try_sleep   => 10,
387     environment => ["AVAILABLE_WHEN_READONLY=0"],
388     require     => File['/etc/sysconfig/clustercheck'],
389   }
390
391   file { '/etc/sysconfig/clustercheck' :
392     ensure  => file,
393     content => "MYSQL_USERNAME=root\n
394 MYSQL_PASSWORD=''\n
395 MYSQL_HOST=localhost\n",
396   }
397
398   xinetd::service { 'galera-monitor' :
399     port           => '9200',
400     server         => '/usr/bin/clustercheck',
401     per_source     => 'UNLIMITED',
402     log_on_success => '',
403     log_on_failure => 'HOST',
404     flags          => 'REUSE',
405     service_type   => 'UNLISTED',
406     user           => 'root',
407     group          => 'root',
408     require        => File['/etc/sysconfig/clustercheck'],
409   }
410
411   # Create all the database schemas
412   if $sync_db {
413     class { 'keystone::db::mysql':
414       require       => Exec['galera-ready'],
415     }
416     class { 'glance::db::mysql':
417       require       => Exec['galera-ready'],
418     }
419     class { 'nova::db::mysql':
420       require       => Exec['galera-ready'],
421     }
422     class { 'neutron::db::mysql':
423       require       => Exec['galera-ready'],
424     }
425     class { 'cinder::db::mysql':
426       require       => Exec['galera-ready'],
427     }
428     class { 'heat::db::mysql':
429       require       => Exec['galera-ready'],
430     }
431
432     if downcase(hiera('ceilometer_backend')) == 'mysql' {
433       class { 'ceilometer::db::mysql':
434         require       => Exec['galera-ready'],
435       }
436     }
437   }
438
439   # pre-install swift here so we can build rings
440   include ::swift
441
442   # Ceph
443   $enable_ceph = hiera('ceph_storage_count', 0) > 0
444
445   if $enable_ceph {
446     class { 'ceph::profile::params':
447       mon_initial_members => downcase(hiera('ceph_mon_initial_members'))
448     }
449     include ::ceph::profile::mon
450   }
451
452   if str2bool(hiera('enable_ceph_storage', 'false')) {
453     if str2bool(hiera('ceph_osd_selinux_permissive', true)) {
454       exec { 'set selinux to permissive on boot':
455         command => "sed -ie 's/^SELINUX=.*/SELINUX=permissive/' /etc/selinux/config",
456         onlyif  => "test -f /etc/selinux/config && ! grep '^SELINUX=permissive' /etc/selinux/config",
457         path    => ["/usr/bin", "/usr/sbin"],
458       }
459
460       exec { 'set selinux to permissive':
461         command => "setenforce 0",
462         onlyif  => "which setenforce && getenforce | grep -i 'enforcing'",
463         path    => ["/usr/bin", "/usr/sbin"],
464       } -> Class['ceph::profile::osd']
465     }
466
467     include ::ceph::profile::osd
468   }
469
470   if str2bool(hiera('enable_external_ceph', 'false')) {
471     include ::ceph::profile::client
472   }
473
474
475 } #END STEP 2
476
477 if hiera('step') >= 3 {
478
479   class { '::keystone':
480     sync_db => $sync_db,
481     manage_service => false,
482     enabled => false,
483   }
484
485   #TODO: need a cleanup-keystone-tokens.sh solution here
486   keystone_config {
487     'ec2/driver': value => 'keystone.contrib.ec2.backends.sql.Ec2';
488   }
489   file { [ '/etc/keystone/ssl', '/etc/keystone/ssl/certs', '/etc/keystone/ssl/private' ]:
490     ensure  => 'directory',
491     owner   => 'keystone',
492     group   => 'keystone',
493     require => Package['keystone'],
494   }
495   file { '/etc/keystone/ssl/certs/signing_cert.pem':
496     content => hiera('keystone_signing_certificate'),
497     owner   => 'keystone',
498     group   => 'keystone',
499     notify  => Service['keystone'],
500     require => File['/etc/keystone/ssl/certs'],
501   }
502   file { '/etc/keystone/ssl/private/signing_key.pem':
503     content => hiera('keystone_signing_key'),
504     owner   => 'keystone',
505     group   => 'keystone',
506     notify  => Service['keystone'],
507     require => File['/etc/keystone/ssl/private'],
508   }
509   file { '/etc/keystone/ssl/certs/ca.pem':
510     content => hiera('keystone_ca_certificate'),
511     owner   => 'keystone',
512     group   => 'keystone',
513     notify  => Service['keystone'],
514     require => File['/etc/keystone/ssl/certs'],
515   }
516
517   $glance_backend = downcase(hiera('glance_backend', 'swift'))
518   case $glance_backend {
519       swift: { $backend_store = 'glance.store.swift.Store' }
520       file: { $backend_store = 'glance.store.filesystem.Store' }
521       rbd: { $backend_store = 'glance.store.rbd.Store' }
522       default: { fail('Unrecognized glance_backend parameter.') }
523   }
524   $http_store = ['glance.store.http.Store']
525   $glance_store = concat($http_store, $backend_store)
526
527   # TODO: notifications, scrubber, etc.
528   include ::glance
529   class { 'glance::api':
530     known_stores => $glance_store,
531     manage_service => false,
532     enabled => false,
533   }
534   class { '::glance::registry' :
535     sync_db => $sync_db,
536     manage_service => false,
537     enabled => false,
538   }
539   include join(['::glance::backend::', $glance_backend])
540
541   class { '::nova' :
542     memcached_servers => suffix(hiera('memcache_node_ips'), ':11211'),
543   }
544
545   include ::nova::config
546
547   class { '::nova::api' :
548     sync_db => $sync_db,
549     manage_service => false,
550     enabled => false,
551   }
552   class { '::nova::cert' :
553     manage_service => false,
554     enabled => false,
555   }
556   class { '::nova::conductor' :
557     manage_service => false,
558     enabled => false,
559   }
560   class { '::nova::consoleauth' :
561     manage_service => false,
562     enabled => false,
563   }
564   class { '::nova::vncproxy' :
565     manage_service => false,
566     enabled => false,
567   }
568   include ::nova::scheduler::filter
569   class { '::nova::scheduler' :
570     manage_service => false,
571     enabled => false,
572   }
573   include ::nova::network::neutron
574
575   # Neutron class definitions
576   include ::neutron
577   class { '::neutron::server' :
578     sync_db => $sync_db,
579     manage_service => false,
580     enabled => false,
581   }
582   class { '::neutron::agents::dhcp' :
583     manage_service => false,
584     enabled => false,
585   }
586   class { '::neutron::agents::l3' :
587     manage_service => false,
588     enabled => false,
589   }
590   class { 'neutron::agents::metadata':
591     manage_service => false,
592     enabled => false,
593   }
594   file { '/etc/neutron/dnsmasq-neutron.conf':
595     content => hiera('neutron_dnsmasq_options'),
596     owner   => 'neutron',
597     group   => 'neutron',
598     notify  => Service['neutron-dhcp-service'],
599     require => Package['neutron'],
600   }
601   class { 'neutron::plugins::ml2':
602     flat_networks   => split(hiera('neutron_flat_networks'), ','),
603     tenant_network_types => [hiera('neutron_tenant_network_type')],
604     mechanism_drivers   => [hiera('neutron_mechanism_drivers')],
605   }
606   class { 'neutron::agents::ml2::ovs':
607     manage_service   => false,
608     enabled          => false,
609     bridge_mappings  => split(hiera('neutron_bridge_mappings'), ','),
610     tunnel_types     => split(hiera('neutron_tunnel_types'), ','),
611   }
612
613   if 'cisco_ucsm' in hiera('neutron_mechanism_drivers') {
614     include ::neutron::plugins::ml2::cisco::ucsm
615   }
616   if 'cisco_nexus' in hiera('neutron_mechanism_drivers') {
617     include ::neutron::plugins::ml2::cisco::nexus
618     include ::neutron::plugins::ml2::cisco::type_nexus_vxlan
619   }
620   if 'cisco_n1kv' in hiera('neutron_mechanism_drivers') {
621     include neutron::plugins::ml2::cisco::nexus1000v
622
623     class { 'neutron::agents::n1kv_vem':
624       n1kv_source          => hiera('n1kv_vem_source', undef),
625       n1kv_version         => hiera('n1kv_vem_version', undef),
626     }
627
628     class { 'n1k_vsm':
629       n1kv_source       => hiera('n1kv_vsm_source', undef),
630       n1kv_version      => hiera('n1kv_vsm_version', undef),
631     }
632   }
633
634   if hiera('neutron_enable_bigswitch_ml2', false) {
635     include neutron::plugins::ml2::bigswitch::restproxy
636   }
637   neutron_l3_agent_config {
638     'DEFAULT/ovs_use_veth': value => hiera('neutron_ovs_use_veth', false);
639   }
640   neutron_dhcp_agent_config {
641     'DEFAULT/ovs_use_veth': value => hiera('neutron_ovs_use_veth', false);
642   }
643
644   include ::cinder
645   class { '::cinder::api':
646     sync_db => $sync_db,
647     manage_service => false,
648     enabled => false,
649   }
650   class { '::cinder::scheduler' :
651     manage_service => false,
652     enabled => false,
653   }
654   class { '::cinder::volume' :
655     manage_service => false,
656     enabled => false,
657   }
658   include ::cinder::glance
659   class {'cinder::setup_test_volume':
660     size => join([hiera('cinder_lvm_loop_device_size'), 'M']),
661   }
662
663   $cinder_enable_iscsi = hiera('cinder_enable_iscsi_backend', true)
664   if $cinder_enable_iscsi {
665     $cinder_iscsi_backend = 'tripleo_iscsi'
666
667     cinder::backend::iscsi { $cinder_iscsi_backend :
668       iscsi_ip_address => hiera('cinder_iscsi_ip_address'),
669       iscsi_helper     => hiera('cinder_iscsi_helper'),
670     }
671   }
672
673   if $enable_ceph {
674
675     Ceph_pool {
676       pg_num  => hiera('ceph::profile::params::osd_pool_default_pg_num'),
677       pgp_num => hiera('ceph::profile::params::osd_pool_default_pgp_num'),
678       size    => hiera('ceph::profile::params::osd_pool_default_size'),
679     }
680
681     $ceph_pools = hiera('ceph_pools')
682     ceph::pool { $ceph_pools : }
683
684     $cinder_pool_requires = [Ceph::Pool['volumes']]
685
686   } else {
687     $cinder_pool_requires = []
688   }
689
690   if hiera('cinder_enable_rbd_backend', false) {
691     $cinder_rbd_backend = 'tripleo_ceph'
692
693     cinder::backend::rbd { $cinder_rbd_backend :
694       rbd_pool        => 'volumes',
695       rbd_user        => 'openstack',
696       rbd_secret_uuid => hiera('ceph::profile::params::fsid'),
697       require         => $cinder_pool_requires,
698     }
699   }
700
701   if hiera('cinder_enable_netapp_backend', false) {
702     $cinder_netapp_backend = hiera('cinder::backend::netapp::title')
703
704     cinder_config {
705       "${cinder_netapp_backend}/host": value => 'hostgroup';
706     }
707
708     if hiera('cinder::backend::netapp::nfs_shares', undef) {
709       $cinder_netapp_nfs_shares = split(hiera('cinder::backend::netapp::nfs_shares', undef), ',')
710     }
711
712     cinder::backend::netapp { $cinder_netapp_backend :
713       netapp_login                 => hiera('cinder::backend::netapp::netapp_login', undef),
714       netapp_password              => hiera('cinder::backend::netapp::netapp_password', undef),
715       netapp_server_hostname       => hiera('cinder::backend::netapp::netapp_server_hostname', undef),
716       netapp_server_port           => hiera('cinder::backend::netapp::netapp_server_port', undef),
717       netapp_size_multiplier       => hiera('cinder::backend::netapp::netapp_size_multiplier', undef),
718       netapp_storage_family        => hiera('cinder::backend::netapp::netapp_storage_family', undef),
719       netapp_storage_protocol      => hiera('cinder::backend::netapp::netapp_storage_protocol', undef),
720       netapp_transport_type        => hiera('cinder::backend::netapp::netapp_transport_type', undef),
721       netapp_vfiler                => hiera('cinder::backend::netapp::netapp_vfiler', undef),
722       netapp_volume_list           => hiera('cinder::backend::netapp::netapp_volume_list', undef),
723       netapp_vserver               => hiera('cinder::backend::netapp::netapp_vserver', undef),
724       netapp_partner_backend_name  => hiera('cinder::backend::netapp::netapp_partner_backend_name', undef),
725       nfs_shares                   => $cinder_netapp_nfs_shares,
726       nfs_shares_config            => hiera('cinder::backend::netapp::nfs_shares_config', undef),
727       netapp_copyoffload_tool_path => hiera('cinder::backend::netapp::netapp_copyoffload_tool_path', undef),
728       netapp_controller_ips        => hiera('cinder::backend::netapp::netapp_controller_ips', undef),
729       netapp_sa_password           => hiera('cinder::backend::netapp::netapp_sa_password', undef),
730       netapp_storage_pools         => hiera('cinder::backend::netapp::netapp_storage_pools', undef),
731       netapp_eseries_host_type     => hiera('cinder::backend::netapp::netapp_eseries_host_type', undef),
732       netapp_webservice_path       => hiera('cinder::backend::netapp::netapp_webservice_path', undef),
733     }
734   }
735
736   if hiera('cinder_enable_nfs_backend', false) {
737     $cinder_nfs_backend = 'tripleo_nfs'
738
739     if ($::selinux != "false") {
740       selboolean { 'virt_use_nfs':
741           value => on,
742           persistent => true,
743       } -> Package['nfs-utils']
744     }
745
746     package {'nfs-utils': } ->
747     cinder::backend::nfs { $cinder_nfs_backend:
748       nfs_servers         => hiera('cinder_nfs_servers'),
749       nfs_mount_options   => hiera('cinder_nfs_mount_options'),
750       nfs_shares_config   => '/etc/cinder/shares-nfs.conf',
751     }
752   }
753
754   $cinder_enabled_backends = delete_undef_values([$cinder_iscsi_backend, $cinder_rbd_backend, $cinder_netapp_backend, $cinder_nfs_backend])
755   class { '::cinder::backends' :
756     enabled_backends => $cinder_enabled_backends,
757   }
758
759   # swift proxy
760   class { '::swift::proxy' :
761     manage_service => $non_pcmk_start,
762     enabled => $non_pcmk_start,
763   }
764   include ::swift::proxy::proxy_logging
765   include ::swift::proxy::healthcheck
766   include ::swift::proxy::cache
767   include ::swift::proxy::keystone
768   include ::swift::proxy::authtoken
769   include ::swift::proxy::staticweb
770   include ::swift::proxy::ratelimit
771   include ::swift::proxy::catch_errors
772   include ::swift::proxy::tempurl
773   include ::swift::proxy::formpost
774
775   # swift storage
776   if str2bool(hiera('enable_swift_storage', 'true')) {
777     class {'::swift::storage::all':
778       mount_check => str2bool(hiera('swift_mount_check'))
779     }
780     class {'::swift::storage::account':
781       manage_service => $non_pcmk_start,
782       enabled => $non_pcmk_start,
783     }
784     class {'::swift::storage::container':
785       manage_service => $non_pcmk_start,
786       enabled => $non_pcmk_start,
787     }
788     class {'::swift::storage::object':
789       manage_service => $non_pcmk_start,
790       enabled => $non_pcmk_start,
791     }
792     if(!defined(File['/srv/node'])) {
793       file { '/srv/node':
794         ensure  => directory,
795         owner   => 'swift',
796         group   => 'swift',
797         require => Package['openstack-swift'],
798       }
799     }
800     $swift_components = ['account', 'container', 'object']
801     swift::storage::filter::recon { $swift_components : }
802     swift::storage::filter::healthcheck { $swift_components : }
803   }
804
805   # Ceilometer
806   $ceilometer_backend = downcase(hiera('ceilometer_backend'))
807   case $ceilometer_backend {
808     /mysql/ : {
809       $ceilometer_database_connection = hiera('ceilometer_mysql_conn_string')
810     }
811     default : {
812       $mongo_node_string = join($mongo_node_ips_with_port, ',')
813       $ceilometer_database_connection = "mongodb://${mongo_node_string}/ceilometer?replicaSet=${mongodb_replset}"
814     }
815   }
816   include ::ceilometer
817   include ::ceilometer::config
818   class { '::ceilometer::api' :
819     manage_service => false,
820     enabled => false,
821   }
822   class { '::ceilometer::agent::notification' :
823     manage_service => false,
824     enabled => false,
825   }
826   class { '::ceilometer::agent::central' :
827     manage_service => false,
828     enabled => false,
829   }
830   class { '::ceilometer::alarm::notifier' :
831     manage_service => false,
832     enabled => false,
833   }
834   class { '::ceilometer::alarm::evaluator' :
835     manage_service => false,
836     enabled => false,
837   }
838   class { '::ceilometer::collector' :
839     manage_service => false,
840     enabled => false,
841   }
842   include ::ceilometer::expirer
843   class { '::ceilometer::db' :
844     database_connection => $ceilometer_database_connection,
845     sync_db             => $sync_db,
846   }
847   include ceilometer::agent::auth
848
849   Cron <| title == 'ceilometer-expirer' |> { command => "sleep $((\$(od -A n -t d -N 3 /dev/urandom) % 86400)) && ${::ceilometer::params::expirer_command}" }
850
851   # Heat
852   class { '::heat' :
853     sync_db => $sync_db,
854   }
855   class { '::heat::api' :
856     manage_service => false,
857     enabled => false,
858   }
859   class { '::heat::api_cfn' :
860     manage_service => false,
861     enabled => false,
862   }
863   class { '::heat::api_cloudwatch' :
864     manage_service => false,
865     enabled => false,
866   }
867   class { '::heat::engine' :
868     manage_service => false,
869     enabled => false,
870   }
871
872   # httpd/apache and horizon
873   # NOTE(gfidente): server-status can be consumed by the pacemaker resource agent
874   class { '::apache' :
875     service_enable => false,
876     # service_manage => false, # <-- not supported with horizon&apache mod_wsgi?
877   }
878   include ::apache::mod::status
879   if 'cisco_n1kv' in hiera('neutron_mechanism_drivers') {
880     $_profile_support = 'cisco'
881   } else {
882     $_profile_support = 'None'
883   }
884   $neutron_options   = {'profile_support' => $_profile_support }
885   $vhost_params = {
886     add_listen => false,
887     priority   => 10,
888   }
889   class { 'horizon':
890     cache_server_ip    => hiera('memcache_node_ips', '127.0.0.1'),
891     vhost_extra_params => $vhost_params,
892     server_aliases     => $::hostname,
893     neutron_options    => $neutron_options,
894   }
895
896   $snmpd_user = hiera('snmpd_readonly_user_name')
897   snmp::snmpv3_user { $snmpd_user:
898     authtype => 'MD5',
899     authpass => hiera('snmpd_readonly_user_password'),
900   }
901   class { 'snmp':
902     agentaddress => ['udp:161','udp6:[::1]:161'],
903     snmpd_config => [ join(['rouser ', hiera('snmpd_readonly_user_name')]), 'proc  cron', 'includeAllDisks  10%', 'master agentx', 'trapsink localhost public', 'iquerySecName internalUser', 'rouser internalUser', 'defaultMonitors yes', 'linkUpDownNotifications yes' ],
904   }
905
906   hiera_include('controller_classes')
907
908 } #END STEP 3
909
910 if hiera('step') >= 4 {
911   include ::keystone::cron::token_flush
912
913   if $pacemaker_master {
914
915     # Keystone
916     pacemaker::resource::service { $::keystone::params::service_name :
917       clone_params => "interleave=true",
918     }
919
920     pacemaker::constraint::base { 'haproxy-then-keystone-constraint':
921       constraint_type => 'order',
922       first_resource  => "haproxy-clone",
923       second_resource => "${::keystone::params::service_name}-clone",
924       first_action    => 'start',
925       second_action   => 'start',
926       require         => [Pacemaker::Resource::Service['haproxy'],
927                           Pacemaker::Resource::Service[$::keystone::params::service_name]],
928     }
929     pacemaker::constraint::base { 'rabbitmq-then-keystone-constraint':
930       constraint_type => 'order',
931       first_resource  => "rabbitmq-clone",
932       second_resource => "${::keystone::params::service_name}-clone",
933       first_action    => 'start',
934       second_action   => 'start',
935       require         => [Pacemaker::Resource::Ocf['rabbitmq'],
936                           Pacemaker::Resource::Service[$::keystone::params::service_name]],
937     }
938     pacemaker::constraint::base { 'memcached-then-keystone-constraint':
939       constraint_type => 'order',
940       first_resource  => "memcached-clone",
941       second_resource => "${::keystone::params::service_name}-clone",
942       first_action    => 'start',
943       second_action   => 'start',
944       require         => [Pacemaker::Resource::Service['memcached'],
945                           Pacemaker::Resource::Service[$::keystone::params::service_name]],
946     }
947     pacemaker::constraint::base { 'galera-then-keystone-constraint':
948       constraint_type => 'order',
949       first_resource  => "galera-master",
950       second_resource => "${::keystone::params::service_name}-clone",
951       first_action    => 'promote',
952       second_action   => 'start',
953       require         => [Pacemaker::Resource::Ocf['galera'],
954                           Pacemaker::Resource::Service[$::keystone::params::service_name]],
955     }
956
957     # Cinder
958     pacemaker::resource::service { $::cinder::params::api_service :
959       clone_params => "interleave=true",
960       require      => Pacemaker::Resource::Service[$::keystone::params::service_name],
961     }
962     pacemaker::resource::service { $::cinder::params::scheduler_service :
963       clone_params => "interleave=true",
964     }
965     pacemaker::resource::service { $::cinder::params::volume_service : }
966
967     pacemaker::constraint::base { 'keystone-then-cinder-api-constraint':
968       constraint_type => 'order',
969       first_resource  => "${::keystone::params::service_name}-clone",
970       second_resource => "${::cinder::params::api_service}-clone",
971       first_action    => 'start',
972       second_action   => 'start',
973       require         => [Pacemaker::Resource::Service[$::cinder::params::api_service],
974                           Pacemaker::Resource::Service[$::keystone::params::service_name]],
975     }
976     pacemaker::constraint::base { 'cinder-api-then-cinder-scheduler-constraint':
977       constraint_type => "order",
978       first_resource => "${::cinder::params::api_service}-clone",
979       second_resource => "${::cinder::params::scheduler_service}-clone",
980       first_action => "start",
981       second_action => "start",
982       require => [Pacemaker::Resource::Service[$::cinder::params::api_service],
983                   Pacemaker::Resource::Service[$::cinder::params::scheduler_service]],
984     }
985     pacemaker::constraint::colocation { 'cinder-scheduler-with-cinder-api-colocation':
986       source => "${::cinder::params::scheduler_service}-clone",
987       target => "${::cinder::params::api_service}-clone",
988       score => "INFINITY",
989       require => [Pacemaker::Resource::Service[$::cinder::params::api_service],
990                   Pacemaker::Resource::Service[$::cinder::params::scheduler_service]],
991     }
992     pacemaker::constraint::base { 'cinder-scheduler-then-cinder-volume-constraint':
993       constraint_type => "order",
994       first_resource => "${::cinder::params::scheduler_service}-clone",
995       second_resource => "${::cinder::params::volume_service}",
996       first_action => "start",
997       second_action => "start",
998       require => [Pacemaker::Resource::Service[$::cinder::params::scheduler_service],
999                   Pacemaker::Resource::Service[$::cinder::params::volume_service]],
1000     }
1001     pacemaker::constraint::colocation { 'cinder-volume-with-cinder-scheduler-colocation':
1002       source => "${::cinder::params::volume_service}",
1003       target => "${::cinder::params::scheduler_service}-clone",
1004       score => "INFINITY",
1005       require => [Pacemaker::Resource::Service[$::cinder::params::scheduler_service],
1006                   Pacemaker::Resource::Service[$::cinder::params::volume_service]],
1007     }
1008
1009     # Glance
1010     pacemaker::resource::service { $::glance::params::registry_service_name :
1011       clone_params => "interleave=true",
1012       require      => Pacemaker::Resource::Service[$::keystone::params::service_name],
1013     }
1014     pacemaker::resource::service { $::glance::params::api_service_name :
1015       clone_params => "interleave=true",
1016     }
1017
1018     pacemaker::constraint::base { 'keystone-then-glance-registry-constraint':
1019       constraint_type => 'order',
1020       first_resource  => "${::keystone::params::service_name}-clone",
1021       second_resource => "${::glance::params::registry_service_name}-clone",
1022       first_action    => 'start',
1023       second_action   => 'start',
1024       require         => [Pacemaker::Resource::Service[$::glance::params::registry_service_name],
1025                           Pacemaker::Resource::Service[$::keystone::params::service_name]],
1026     }
1027     pacemaker::constraint::base { 'glance-registry-then-glance-api-constraint':
1028       constraint_type => "order",
1029       first_resource  => "${::glance::params::registry_service_name}-clone",
1030       second_resource => "${::glance::params::api_service_name}-clone",
1031       first_action    => "start",
1032       second_action   => "start",
1033       require => [Pacemaker::Resource::Service[$::glance::params::registry_service_name],
1034                   Pacemaker::Resource::Service[$::glance::params::api_service_name]],
1035     }
1036     pacemaker::constraint::colocation { 'glance-api-with-glance-registry-colocation':
1037       source  => "${::glance::params::api_service_name}-clone",
1038       target  => "${::glance::params::registry_service_name}-clone",
1039       score   => "INFINITY",
1040       require => [Pacemaker::Resource::Service[$::glance::params::registry_service_name],
1041                   Pacemaker::Resource::Service[$::glance::params::api_service_name]],
1042     }
1043
1044     # Neutron
1045     # NOTE(gfidente): Neutron will try to populate the database with some data
1046     # as soon as neutron-server is started; to avoid races we want to make this
1047     # happen only on one node, before normal Pacemaker initialization
1048     # https://bugzilla.redhat.com/show_bug.cgi?id=1233061
1049     exec { '/usr/bin/systemctl start neutron-server && /usr/bin/sleep 5' : } ->
1050     pacemaker::resource::service { $::neutron::params::server_service:
1051       op_params => "start timeout=90",
1052       clone_params   => "interleave=true",
1053       require => Pacemaker::Resource::Service[$::keystone::params::service_name]
1054     }
1055     pacemaker::resource::service { $::neutron::params::l3_agent_service:
1056       clone_params   => "interleave=true",
1057     }
1058     pacemaker::resource::service { $::neutron::params::dhcp_agent_service:
1059       clone_params   => "interleave=true",
1060     }
1061     pacemaker::resource::service { $::neutron::params::ovs_agent_service:
1062       clone_params => "interleave=true",
1063     }
1064     pacemaker::resource::service { $::neutron::params::metadata_agent_service:
1065       clone_params => "interleave=true",
1066     }
1067     pacemaker::resource::ocf { $::neutron::params::ovs_cleanup_service:
1068       ocf_agent_name => "neutron:OVSCleanup",
1069       clone_params => "interleave=true",
1070     }
1071     pacemaker::resource::ocf { 'neutron-netns-cleanup':
1072       ocf_agent_name => "neutron:NetnsCleanup",
1073       clone_params => "interleave=true",
1074     }
1075
1076     # neutron - one chain ovs-cleanup-->netns-cleanup-->ovs-agent
1077     pacemaker::constraint::base { 'neutron-ovs-cleanup-to-netns-cleanup-constraint':
1078       constraint_type => "order",
1079       first_resource => "${::neutron::params::ovs_cleanup_service}-clone",
1080       second_resource => "neutron-netns-cleanup-clone",
1081       first_action => "start",
1082       second_action => "start",
1083       require => [Pacemaker::Resource::Ocf["${::neutron::params::ovs_cleanup_service}"],
1084                   Pacemaker::Resource::Ocf['neutron-netns-cleanup']],
1085     }
1086     pacemaker::constraint::colocation { 'neutron-ovs-cleanup-to-netns-cleanup-colocation':
1087       source => "neutron-netns-cleanup-clone",
1088       target => "${::neutron::params::ovs_cleanup_service}-clone",
1089       score => "INFINITY",
1090       require => [Pacemaker::Resource::Ocf["${::neutron::params::ovs_cleanup_service}"],
1091                   Pacemaker::Resource::Ocf['neutron-netns-cleanup']],
1092     }
1093     pacemaker::constraint::base { 'neutron-netns-cleanup-to-openvswitch-agent-constraint':
1094       constraint_type => "order",
1095       first_resource => "neutron-netns-cleanup-clone",
1096       second_resource => "${::neutron::params::ovs_agent_service}-clone",
1097       first_action => "start",
1098       second_action => "start",
1099       require => [Pacemaker::Resource::Ocf["neutron-netns-cleanup"],
1100                   Pacemaker::Resource::Service["${::neutron::params::ovs_agent_service}"]],
1101     }
1102     pacemaker::constraint::colocation { 'neutron-netns-cleanup-to-openvswitch-agent-colocation':
1103       source => "${::neutron::params::ovs_agent_service}-clone",
1104       target => "neutron-netns-cleanup-clone",
1105       score => "INFINITY",
1106       require => [Pacemaker::Resource::Ocf["neutron-netns-cleanup"],
1107                   Pacemaker::Resource::Service["${::neutron::params::ovs_agent_service}"]],
1108     }
1109
1110     #another chain keystone-->neutron-server-->ovs-agent-->dhcp-->l3
1111     pacemaker::constraint::base { 'keystone-to-neutron-server-constraint':
1112       constraint_type => "order",
1113       first_resource => "${::keystone::params::service_name}-clone",
1114       second_resource => "${::neutron::params::server_service}-clone",
1115       first_action => "start",
1116       second_action => "start",
1117       require => [Pacemaker::Resource::Service[$::keystone::params::service_name],
1118                   Pacemaker::Resource::Service[$::neutron::params::server_service]],
1119     }
1120     pacemaker::constraint::base { 'neutron-server-to-openvswitch-agent-constraint':
1121       constraint_type => "order",
1122       first_resource => "${::neutron::params::server_service}-clone",
1123       second_resource => "${::neutron::params::ovs_agent_service}-clone",
1124       first_action => "start",
1125       second_action => "start",
1126       require => [Pacemaker::Resource::Service[$::neutron::params::server_service],
1127                   Pacemaker::Resource::Service[$::neutron::params::ovs_agent_service]],
1128     }
1129     pacemaker::constraint::base { 'neutron-openvswitch-agent-to-dhcp-agent-constraint':
1130       constraint_type => "order",
1131       first_resource => "${::neutron::params::ovs_agent_service}-clone",
1132       second_resource => "${::neutron::params::dhcp_agent_service}-clone",
1133       first_action => "start",
1134       second_action => "start",
1135       require => [Pacemaker::Resource::Service["${::neutron::params::ovs_agent_service}"],
1136                   Pacemaker::Resource::Service["${::neutron::params::dhcp_agent_service}"]],
1137
1138     }
1139     pacemaker::constraint::colocation { 'neutron-openvswitch-agent-to-dhcp-agent-colocation':
1140       source => "${::neutron::params::dhcp_agent_service}-clone",
1141       target => "${::neutron::params::ovs_agent_service}-clone",
1142       score => "INFINITY",
1143       require => [Pacemaker::Resource::Service["${::neutron::params::ovs_agent_service}"],
1144                   Pacemaker::Resource::Service["${::neutron::params::dhcp_agent_service}"]],
1145     }
1146     pacemaker::constraint::base { 'neutron-dhcp-agent-to-l3-agent-constraint':
1147       constraint_type => "order",
1148       first_resource => "${::neutron::params::dhcp_agent_service}-clone",
1149       second_resource => "${::neutron::params::l3_agent_service}-clone",
1150       first_action => "start",
1151       second_action => "start",
1152       require => [Pacemaker::Resource::Service["${::neutron::params::dhcp_agent_service}"],
1153                   Pacemaker::Resource::Service["${::neutron::params::l3_agent_service}"]]
1154     }
1155     pacemaker::constraint::colocation { 'neutron-dhcp-agent-to-l3-agent-colocation':
1156       source => "${::neutron::params::l3_agent_service}-clone",
1157       target => "${::neutron::params::dhcp_agent_service}-clone",
1158       score => "INFINITY",
1159       require => [Pacemaker::Resource::Service["${::neutron::params::dhcp_agent_service}"],
1160                   Pacemaker::Resource::Service["${::neutron::params::l3_agent_service}"]]
1161     }
1162     pacemaker::constraint::base { 'neutron-l3-agent-to-metadata-agent-constraint':
1163       constraint_type => "order",
1164       first_resource => "${::neutron::params::l3_agent_service}-clone",
1165       second_resource => "${::neutron::params::metadata_agent_service}-clone",
1166       first_action => "start",
1167       second_action => "start",
1168       require => [Pacemaker::Resource::Service["${::neutron::params::l3_agent_service}"],
1169                   Pacemaker::Resource::Service["${::neutron::params::metadata_agent_service}"]]
1170     }
1171     pacemaker::constraint::colocation { 'neutron-l3-agent-to-metadata-agent-colocation':
1172       source => "${::neutron::params::metadata_agent_service}-clone",
1173       target => "${::neutron::params::l3_agent_service}-clone",
1174       score => "INFINITY",
1175       require => [Pacemaker::Resource::Service["${::neutron::params::l3_agent_service}"],
1176                   Pacemaker::Resource::Service["${::neutron::params::metadata_agent_service}"]]
1177     }
1178
1179     # Nova
1180     pacemaker::resource::service { $::nova::params::api_service_name :
1181       clone_params    => "interleave=true",
1182       op_params       => "start timeout=90s monitor start-delay=10s",
1183     }
1184     pacemaker::resource::service { $::nova::params::conductor_service_name :
1185       clone_params    => "interleave=true",
1186       op_params       => "start timeout=90s monitor start-delay=10s",
1187     }
1188     pacemaker::resource::service { $::nova::params::consoleauth_service_name :
1189       clone_params    => "interleave=true",
1190       op_params       => "start timeout=90s monitor start-delay=10s",
1191       require         => Pacemaker::Resource::Service[$::keystone::params::service_name],
1192     }
1193     pacemaker::resource::service { $::nova::params::vncproxy_service_name :
1194       clone_params    => "interleave=true",
1195       op_params       => "start timeout=90s monitor start-delay=10s",
1196     }
1197     pacemaker::resource::service { $::nova::params::scheduler_service_name :
1198       clone_params    => "interleave=true",
1199       op_params       => "start timeout=90s monitor start-delay=10s",
1200     }
1201
1202     pacemaker::constraint::base { 'keystone-then-nova-consoleauth-constraint':
1203       constraint_type => 'order',
1204       first_resource  => "${::keystone::params::service_name}-clone",
1205       second_resource => "${::nova::params::consoleauth_service_name}-clone",
1206       first_action    => 'start',
1207       second_action   => 'start',
1208       require         => [Pacemaker::Resource::Service[$::nova::params::consoleauth_service_name],
1209                           Pacemaker::Resource::Service[$::keystone::params::service_name]],
1210     }
1211     pacemaker::constraint::base { 'nova-consoleauth-then-nova-vncproxy-constraint':
1212       constraint_type => "order",
1213       first_resource  => "${::nova::params::consoleauth_service_name}-clone",
1214       second_resource => "${::nova::params::vncproxy_service_name}-clone",
1215       first_action    => "start",
1216       second_action   => "start",
1217       require => [Pacemaker::Resource::Service[$::nova::params::consoleauth_service_name],
1218                   Pacemaker::Resource::Service[$::nova::params::vncproxy_service_name]],
1219     }
1220     pacemaker::constraint::colocation { 'nova-vncproxy-with-nova-consoleauth-colocation':
1221       source => "${::nova::params::vncproxy_service_name}-clone",
1222       target => "${::nova::params::consoleauth_service_name}-clone",
1223       score => "INFINITY",
1224       require => [Pacemaker::Resource::Service[$::nova::params::consoleauth_service_name],
1225                   Pacemaker::Resource::Service[$::nova::params::vncproxy_service_name]],
1226     }
1227     pacemaker::constraint::base { 'nova-vncproxy-then-nova-api-constraint':
1228       constraint_type => "order",
1229       first_resource  => "${::nova::params::vncproxy_service_name}-clone",
1230       second_resource => "${::nova::params::api_service_name}-clone",
1231       first_action    => "start",
1232       second_action   => "start",
1233       require => [Pacemaker::Resource::Service[$::nova::params::vncproxy_service_name],
1234                   Pacemaker::Resource::Service[$::nova::params::api_service_name]],
1235     }
1236     pacemaker::constraint::colocation { 'nova-api-with-nova-vncproxy-colocation':
1237       source => "${::nova::params::api_service_name}-clone",
1238       target => "${::nova::params::vncproxy_service_name}-clone",
1239       score => "INFINITY",
1240       require => [Pacemaker::Resource::Service[$::nova::params::vncproxy_service_name],
1241                   Pacemaker::Resource::Service[$::nova::params::api_service_name]],
1242     }
1243     pacemaker::constraint::base { 'nova-api-then-nova-scheduler-constraint':
1244       constraint_type => "order",
1245       first_resource  => "${::nova::params::api_service_name}-clone",
1246       second_resource => "${::nova::params::scheduler_service_name}-clone",
1247       first_action    => "start",
1248       second_action   => "start",
1249       require => [Pacemaker::Resource::Service[$::nova::params::api_service_name],
1250                   Pacemaker::Resource::Service[$::nova::params::scheduler_service_name]],
1251     }
1252     pacemaker::constraint::colocation { 'nova-scheduler-with-nova-api-colocation':
1253       source => "${::nova::params::scheduler_service_name}-clone",
1254       target => "${::nova::params::api_service_name}-clone",
1255       score => "INFINITY",
1256       require => [Pacemaker::Resource::Service[$::nova::params::api_service_name],
1257                   Pacemaker::Resource::Service[$::nova::params::scheduler_service_name]],
1258     }
1259     pacemaker::constraint::base { 'nova-scheduler-then-nova-conductor-constraint':
1260       constraint_type => "order",
1261       first_resource  => "${::nova::params::scheduler_service_name}-clone",
1262       second_resource => "${::nova::params::conductor_service_name}-clone",
1263       first_action    => "start",
1264       second_action   => "start",
1265       require => [Pacemaker::Resource::Service[$::nova::params::scheduler_service_name],
1266                   Pacemaker::Resource::Service[$::nova::params::conductor_service_name]],
1267     }
1268     pacemaker::constraint::colocation { 'nova-conductor-with-nova-scheduler-colocation':
1269       source => "${::nova::params::conductor_service_name}-clone",
1270       target => "${::nova::params::scheduler_service_name}-clone",
1271       score => "INFINITY",
1272       require => [Pacemaker::Resource::Service[$::nova::params::scheduler_service_name],
1273                   Pacemaker::Resource::Service[$::nova::params::conductor_service_name]],
1274     }
1275
1276     # Ceilometer
1277     pacemaker::resource::service { $::ceilometer::params::agent_central_service_name :
1278       clone_params => 'interleave=true',
1279       require      => [Pacemaker::Resource::Service[$::keystone::params::service_name],
1280                        Pacemaker::Resource::Service[$::mongodb::params::service_name]],
1281     }
1282     pacemaker::resource::service { $::ceilometer::params::collector_service_name :
1283       clone_params => 'interleave=true',
1284     }
1285     pacemaker::resource::service { $::ceilometer::params::api_service_name :
1286       clone_params => 'interleave=true',
1287     }
1288     pacemaker::resource::service { $::ceilometer::params::alarm_evaluator_service_name :
1289       clone_params => 'interleave=true',
1290     }
1291     pacemaker::resource::service { $::ceilometer::params::alarm_notifier_service_name :
1292       clone_params => 'interleave=true',
1293     }
1294     pacemaker::resource::service { $::ceilometer::params::agent_notification_service_name :
1295       clone_params => 'interleave=true',
1296     }
1297     pacemaker::resource::ocf { 'delay' :
1298       ocf_agent_name  => 'heartbeat:Delay',
1299       clone_params    => 'interleave=true',
1300       resource_params => 'startdelay=10',
1301     }
1302     # Fedora doesn't know `require-all` parameter for constraints yet
1303     if $::operatingsystem == 'Fedora' {
1304       $redis_ceilometer_constraint_params = undef
1305     } else {
1306       $redis_ceilometer_constraint_params = 'require-all=false'
1307     }
1308     pacemaker::constraint::base { 'redis-then-ceilometer-central-constraint':
1309       constraint_type   => 'order',
1310       first_resource    => "redis-master",
1311       second_resource   => "${::ceilometer::params::agent_central_service_name}-clone",
1312       first_action      => 'promote',
1313       second_action     => 'start',
1314       constraint_params => $redis_ceilometer_constraint_params,
1315       require           => [Pacemaker::Resource::Ocf['redis'],
1316                             Pacemaker::Resource::Service[$::ceilometer::params::agent_central_service_name]],
1317     }
1318     pacemaker::constraint::base { 'keystone-then-ceilometer-central-constraint':
1319       constraint_type => 'order',
1320       first_resource  => "${::keystone::params::service_name}-clone",
1321       second_resource => "${::ceilometer::params::agent_central_service_name}-clone",
1322       first_action    => 'start',
1323       second_action   => 'start',
1324       require         => [Pacemaker::Resource::Service[$::ceilometer::params::agent_central_service_name],
1325                           Pacemaker::Resource::Service[$::keystone::params::service_name]],
1326     }
1327     pacemaker::constraint::base { 'ceilometer-central-then-ceilometer-collector-constraint':
1328       constraint_type => 'order',
1329       first_resource  => "${::ceilometer::params::agent_central_service_name}-clone",
1330       second_resource => "${::ceilometer::params::collector_service_name}-clone",
1331       first_action    => 'start',
1332       second_action   => 'start',
1333       require         => [Pacemaker::Resource::Service[$::ceilometer::params::agent_central_service_name],
1334                           Pacemaker::Resource::Service[$::ceilometer::params::collector_service_name]],
1335     }
1336     pacemaker::constraint::base { 'ceilometer-collector-then-ceilometer-api-constraint':
1337       constraint_type => 'order',
1338       first_resource  => "${::ceilometer::params::collector_service_name}-clone",
1339       second_resource => "${::ceilometer::params::api_service_name}-clone",
1340       first_action    => 'start',
1341       second_action   => 'start',
1342       require         => [Pacemaker::Resource::Service[$::ceilometer::params::collector_service_name],
1343                           Pacemaker::Resource::Service[$::ceilometer::params::api_service_name]],
1344     }
1345     pacemaker::constraint::colocation { 'ceilometer-api-with-ceilometer-collector-colocation':
1346       source  => "${::ceilometer::params::api_service_name}-clone",
1347       target  => "${::ceilometer::params::collector_service_name}-clone",
1348       score   => 'INFINITY',
1349       require => [Pacemaker::Resource::Service[$::ceilometer::params::api_service_name],
1350                   Pacemaker::Resource::Service[$::ceilometer::params::collector_service_name]],
1351     }
1352     pacemaker::constraint::base { 'ceilometer-api-then-ceilometer-delay-constraint':
1353       constraint_type => 'order',
1354       first_resource  => "${::ceilometer::params::api_service_name}-clone",
1355       second_resource => 'delay-clone',
1356       first_action    => 'start',
1357       second_action   => 'start',
1358       require         => [Pacemaker::Resource::Service[$::ceilometer::params::api_service_name],
1359                           Pacemaker::Resource::Ocf['delay']],
1360     }
1361     pacemaker::constraint::colocation { 'ceilometer-delay-with-ceilometer-api-colocation':
1362       source  => 'delay-clone',
1363       target  => "${::ceilometer::params::api_service_name}-clone",
1364       score   => 'INFINITY',
1365       require => [Pacemaker::Resource::Service[$::ceilometer::params::api_service_name],
1366                   Pacemaker::Resource::Ocf['delay']],
1367     }
1368     pacemaker::constraint::base { 'ceilometer-delay-then-ceilometer-alarm-evaluator-constraint':
1369       constraint_type => 'order',
1370       first_resource  => 'delay-clone',
1371       second_resource => "${::ceilometer::params::alarm_evaluator_service_name}-clone",
1372       first_action    => 'start',
1373       second_action   => 'start',
1374       require         => [Pacemaker::Resource::Service[$::ceilometer::params::alarm_evaluator_service_name],
1375                           Pacemaker::Resource::Ocf['delay']],
1376     }
1377     pacemaker::constraint::colocation { 'ceilometer-alarm-evaluator-with-ceilometer-delay-colocation':
1378       source  => "${::ceilometer::params::alarm_evaluator_service_name}-clone",
1379       target  => 'delay-clone',
1380       score   => 'INFINITY',
1381       require => [Pacemaker::Resource::Service[$::ceilometer::params::api_service_name],
1382                   Pacemaker::Resource::Ocf['delay']],
1383     }
1384     pacemaker::constraint::base { 'ceilometer-alarm-evaluator-then-ceilometer-alarm-notifier-constraint':
1385       constraint_type => 'order',
1386       first_resource  => "${::ceilometer::params::alarm_evaluator_service_name}-clone",
1387       second_resource => "${::ceilometer::params::alarm_notifier_service_name}-clone",
1388       first_action    => 'start',
1389       second_action   => 'start',
1390       require         => [Pacemaker::Resource::Service[$::ceilometer::params::alarm_evaluator_service_name],
1391                           Pacemaker::Resource::Service[$::ceilometer::params::alarm_notifier_service_name]],
1392     }
1393     pacemaker::constraint::colocation { 'ceilometer-alarm-notifier-with-ceilometer-alarm-evaluator-colocation':
1394       source  => "${::ceilometer::params::alarm_notifier_service_name}-clone",
1395       target  => "${::ceilometer::params::alarm_evaluator_service_name}-clone",
1396       score   => 'INFINITY',
1397       require => [Pacemaker::Resource::Service[$::ceilometer::params::alarm_evaluator_service_name],
1398                   Pacemaker::Resource::Service[$::ceilometer::params::alarm_notifier_service_name]],
1399     }
1400     pacemaker::constraint::base { 'ceilometer-alarm-notifier-then-ceilometer-notification-constraint':
1401       constraint_type => 'order',
1402       first_resource  => "${::ceilometer::params::alarm_notifier_service_name}-clone",
1403       second_resource => "${::ceilometer::params::agent_notification_service_name}-clone",
1404       first_action    => 'start',
1405       second_action   => 'start',
1406       require         => [Pacemaker::Resource::Service[$::ceilometer::params::agent_notification_service_name],
1407                           Pacemaker::Resource::Service[$::ceilometer::params::alarm_notifier_service_name]],
1408     }
1409     pacemaker::constraint::colocation { 'ceilometer-notification-with-ceilometer-alarm-notifier-colocation':
1410       source  => "${::ceilometer::params::agent_notification_service_name}-clone",
1411       target  => "${::ceilometer::params::alarm_notifier_service_name}-clone",
1412       score   => 'INFINITY',
1413       require => [Pacemaker::Resource::Service[$::ceilometer::params::agent_notification_service_name],
1414                   Pacemaker::Resource::Service[$::ceilometer::params::alarm_notifier_service_name]],
1415     }
1416     if downcase(hiera('ceilometer_backend')) == 'mongodb' {
1417       pacemaker::constraint::base { 'mongodb-then-ceilometer-central-constraint':
1418         constraint_type => 'order',
1419         first_resource  => "${::mongodb::params::service_name}-clone",
1420         second_resource => "${::ceilometer::params::agent_central_service_name}-clone",
1421         first_action    => 'start',
1422         second_action   => 'start',
1423         require         => [Pacemaker::Resource::Service[$::ceilometer::params::agent_central_service_name],
1424                             Pacemaker::Resource::Service[$::mongodb::params::service_name]],
1425       }
1426     }
1427
1428     # Heat
1429     pacemaker::resource::service { $::heat::params::api_service_name :
1430       clone_params => 'interleave=true',
1431     }
1432     pacemaker::resource::service { $::heat::params::api_cloudwatch_service_name :
1433       clone_params => 'interleave=true',
1434     }
1435     pacemaker::resource::service { $::heat::params::api_cfn_service_name :
1436       clone_params => 'interleave=true',
1437     }
1438     pacemaker::resource::service { $::heat::params::engine_service_name :
1439       clone_params => 'interleave=true',
1440     }
1441     pacemaker::constraint::base { 'keystone-then-heat-api-constraint':
1442       constraint_type => 'order',
1443       first_resource  => "${::keystone::params::service_name}-clone",
1444       second_resource => "${::heat::params::api_service_name}-clone",
1445       first_action    => 'start',
1446       second_action   => 'start',
1447       require         => [Pacemaker::Resource::Service[$::heat::params::api_service_name],
1448                           Pacemaker::Resource::Service[$::keystone::params::service_name]],
1449     }
1450     pacemaker::constraint::base { 'heat-api-then-heat-api-cfn-constraint':
1451       constraint_type => 'order',
1452       first_resource  => "${::heat::params::api_service_name}-clone",
1453       second_resource => "${::heat::params::api_cfn_service_name}-clone",
1454       first_action    => 'start',
1455       second_action   => 'start',
1456       require => [Pacemaker::Resource::Service[$::heat::params::api_service_name],
1457                   Pacemaker::Resource::Service[$::heat::params::api_cfn_service_name]],
1458     }
1459     pacemaker::constraint::colocation { 'heat-api-cfn-with-heat-api-colocation':
1460       source  => "${::heat::params::api_cfn_service_name}-clone",
1461       target  => "${::heat::params::api_service_name}-clone",
1462       score   => 'INFINITY',
1463       require => [Pacemaker::Resource::Service[$::heat::params::api_cfn_service_name],
1464                   Pacemaker::Resource::Service[$::heat::params::api_service_name]],
1465     }
1466     pacemaker::constraint::base { 'heat-api-cfn-then-heat-api-cloudwatch-constraint':
1467       constraint_type => 'order',
1468       first_resource  => "${::heat::params::api_cfn_service_name}-clone",
1469       second_resource => "${::heat::params::api_cloudwatch_service_name}-clone",
1470       first_action    => 'start',
1471       second_action   => 'start',
1472       require => [Pacemaker::Resource::Service[$::heat::params::api_cloudwatch_service_name],
1473                   Pacemaker::Resource::Service[$::heat::params::api_cfn_service_name]],
1474     }
1475     pacemaker::constraint::colocation { 'heat-api-cloudwatch-with-heat-api-cfn-colocation':
1476       source  => "${::heat::params::api_cloudwatch_service_name}-clone",
1477       target  => "${::heat::params::api_cfn_service_name}-clone",
1478       score   => 'INFINITY',
1479       require => [Pacemaker::Resource::Service[$::heat::params::api_cfn_service_name],
1480                   Pacemaker::Resource::Service[$::heat::params::api_cloudwatch_service_name]],
1481     }
1482     pacemaker::constraint::base { 'heat-api-cloudwatch-then-heat-engine-constraint':
1483       constraint_type => 'order',
1484       first_resource  => "${::heat::params::api_cloudwatch_service_name}-clone",
1485       second_resource => "${::heat::params::engine_service_name}-clone",
1486       first_action    => 'start',
1487       second_action   => 'start',
1488       require => [Pacemaker::Resource::Service[$::heat::params::api_cloudwatch_service_name],
1489                   Pacemaker::Resource::Service[$::heat::params::engine_service_name]],
1490     }
1491     pacemaker::constraint::colocation { 'heat-engine-with-heat-api-cloudwatch-colocation':
1492       source  => "${::heat::params::engine_service_name}-clone",
1493       target  => "${::heat::params::api_cloudwatch_service_name}-clone",
1494       score   => 'INFINITY',
1495       require => [Pacemaker::Resource::Service[$::heat::params::api_cloudwatch_service_name],
1496                   Pacemaker::Resource::Service[$::heat::params::engine_service_name]],
1497     }
1498     pacemaker::constraint::base { 'ceilometer-notification-then-heat-api-constraint':
1499       constraint_type => 'order',
1500       first_resource  => "${::ceilometer::params::agent_notification_service_name}-clone",
1501       second_resource => "${::heat::params::api_service_name}-clone",
1502       first_action    => 'start',
1503       second_action   => 'start',
1504       require         => [Pacemaker::Resource::Service[$::heat::params::api_service_name],
1505                           Pacemaker::Resource::Service[$::ceilometer::params::agent_notification_service_name]],
1506     }
1507
1508     # Horizon
1509     pacemaker::resource::service { $::horizon::params::http_service:
1510         clone_params => "interleave=true",
1511     }
1512
1513     #VSM
1514     if 'cisco_n1kv' in hiera('neutron_mechanism_drivers') {
1515       pacemaker::resource::ocf { 'vsm-p' :
1516         ocf_agent_name  => 'heartbeat:VirtualDomain',
1517         resource_params => 'force_stop=true config=/var/spool/cisco/vsm/vsm_primary_deploy.xml',
1518         require         => Class['n1k_vsm'],
1519         meta_params     => 'resource-stickiness=INFINITY',
1520       }
1521       if str2bool(hiera('n1k_vsm::pacemaker_control', 'true')) {
1522         pacemaker::resource::ocf { 'vsm-s' :
1523           ocf_agent_name  => 'heartbeat:VirtualDomain',
1524           resource_params => 'force_stop=true config=/var/spool/cisco/vsm/vsm_secondary_deploy.xml',
1525           require         => Class['n1k_vsm'],
1526           meta_params     => 'resource-stickiness=INFINITY',
1527         }
1528         pacemaker::constraint::colocation { 'vsm-colocation-contraint':
1529           source  => "vsm-p",
1530           target  => "vsm-s",
1531           score   => "-INFINITY",
1532           require => [Pacemaker::Resource::Ocf['vsm-p'],
1533                       Pacemaker::Resource::Ocf['vsm-s']],
1534         }
1535       }
1536     }
1537
1538   }
1539
1540 } #END STEP 4
1541
1542 $package_manifest_name = join(['/var/lib/tripleo/installed-packages/overcloud_controller_pacemaker', hiera('step')])
1543 package_manifest{$package_manifest_name: ensure => present}