Add Nova as Pacemaker resource
[apex-tripleo-heat-templates.git] / puppet / manifests / overcloud_controller_pacemaker.pp
1 # Copyright 2015 Red Hat, Inc.
2 # All Rights Reserved.
3 #
4 # Licensed under the Apache License, Version 2.0 (the "License"); you may
5 # not use this file except in compliance with the License. You may obtain
6 # a copy of the License at
7 #
8 #     http://www.apache.org/licenses/LICENSE-2.0
9 #
10 # Unless required by applicable law or agreed to in writing, software
11 # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
12 # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
13 # License for the specific language governing permissions and limitations
14 # under the License.
15
16 Pcmk_resource <| |> {
17   tries     => 10,
18   try_sleep => 3,
19 }
20
21 if !str2bool(hiera('enable_package_install', 'false')) {
22   case $::osfamily {
23     'RedHat': {
24       Package { provider => 'norpm' } # provided by tripleo-puppet
25     }
26     default: {
27       warning('enable_package_install option not supported.')
28     }
29   }
30 }
31
32 if $::hostname == downcase(hiera('bootstrap_nodeid')) {
33   $pacemaker_master = true
34   $sync_db = true
35 } else {
36   $pacemaker_master = false
37   $sync_db = false
38 }
39
40 # When to start and enable services which haven't been Pacemakerized
41 # FIXME: remove when we start all OpenStack services using Pacemaker
42 # (occurences of this variable will be gradually replaced with false)
43 $non_pcmk_start = hiera('step') >= 4
44
45 if hiera('step') >= 1 {
46
47   create_resources(sysctl::value, hiera('sysctl_settings'), {})
48
49   if count(hiera('ntp::servers')) > 0 {
50     include ::ntp
51   }
52
53   $controller_node_ips = split(hiera('controller_node_ips'), ',')
54   $controller_node_names = split(downcase(hiera('controller_node_names')), ',')
55   class { '::tripleo::loadbalancer' :
56     controller_hosts       => $controller_node_ips,
57     controller_hosts_names => $controller_node_names,
58     manage_vip             => false,
59     haproxy_service_manage => false,
60   }
61
62   $pacemaker_cluster_members = downcase(regsubst(hiera('controller_node_names'), ',', ' ', 'G'))
63   user { 'hacluster':
64    ensure => present,
65   } ->
66   class { '::pacemaker':
67     hacluster_pwd => hiera('hacluster_pwd'),
68   } ->
69   class { '::pacemaker::corosync':
70     cluster_members => $pacemaker_cluster_members,
71     setup_cluster   => $pacemaker_master,
72   }
73   class { '::pacemaker::stonith':
74     disable => true,
75   }
76
77   # Only configure RabbitMQ in this step, don't start it yet to
78   # avoid races where non-master nodes attempt to start without
79   # config (eg. binding on 0.0.0.0)
80   # The module ignores erlang_cookie if cluster_config is false
81   class { '::rabbitmq':
82     service_manage          => false,
83     tcp_keepalive           => false,
84     config_kernel_variables => hiera('rabbitmq_kernel_variables'),
85     config_variables        => hiera('rabbitmq_config_variables'),
86     environment_variables   => hiera('rabbitmq_environment'),
87   } ->
88   file { '/var/lib/rabbitmq/.erlang.cookie':
89     ensure  => 'present',
90     owner   => 'rabbitmq',
91     group   => 'rabbitmq',
92     mode    => '0400',
93     content => hiera('rabbitmq::erlang_cookie'),
94     replace => true,
95   }
96
97   if downcase(hiera('ceilometer_backend')) == 'mongodb' {
98     include ::mongodb::globals
99     # FIXME: replace with service_manage => false on ::mongodb::server
100     # when this is merged: https://github.com/puppetlabs/pupp etlabs-mongodb/pull/198
101     class { '::mongodb::server' :
102       service_ensure => undef,
103       service_enable => false,
104     }
105   }
106
107   # Memcached
108   class {'::memcached' :
109     service_manage => false,
110   }
111
112   # Galera
113   if str2bool(hiera('enable_galera', 'true')) {
114     $mysql_config_file = '/etc/my.cnf.d/galera.cnf'
115   } else {
116     $mysql_config_file = '/etc/my.cnf.d/server.cnf'
117   }
118   $galera_nodes = downcase(hiera('galera_node_names', $::hostname))
119   $galera_nodes_count = count(split($galera_nodes, ','))
120
121   $mysqld_options = {
122     'mysqld' => {
123       'skip-name-resolve'             => '1',
124       'binlog_format'                 => 'ROW',
125       'default-storage-engine'        => 'innodb',
126       'innodb_autoinc_lock_mode'      => '2',
127       'innodb_locks_unsafe_for_binlog'=> '1',
128       'query_cache_size'              => '0',
129       'query_cache_type'              => '0',
130       'bind-address'                  => hiera('controller_host'),
131       'max_connections'               => '1024',
132       'open_files_limit'              => '-1',
133       'wsrep_provider'                => '/usr/lib64/galera/libgalera_smm.so',
134       'wsrep_cluster_name'            => 'galera_cluster',
135       'wsrep_slave_threads'           => '1',
136       'wsrep_certify_nonPK'           => '1',
137       'wsrep_max_ws_rows'             => '131072',
138       'wsrep_max_ws_size'             => '1073741824',
139       'wsrep_debug'                   => '0',
140       'wsrep_convert_LOCK_to_trx'     => '0',
141       'wsrep_retry_autocommit'        => '1',
142       'wsrep_auto_increment_control'  => '1',
143       'wsrep_drupal_282555_workaround'=> '0',
144       'wsrep_causal_reads'            => '0',
145       'wsrep_notify_cmd'              => '',
146       'wsrep_sst_method'              => 'rsync',
147     }
148   }
149
150   class { '::mysql::server':
151     create_root_user   => false,
152     create_root_my_cnf => false,
153     config_file        => $mysql_config_file,
154     override_options   => $mysqld_options,
155     service_manage     => false,
156   }
157
158 }
159
160 if hiera('step') >= 2 {
161
162   if $pacemaker_master {
163     $control_vip = hiera('tripleo::loadbalancer::controller_virtual_ip')
164     pacemaker::resource::ip { 'control_vip':
165       ip_address => $control_vip,
166     }
167     $public_vip = hiera('tripleo::loadbalancer::public_virtual_ip')
168     pacemaker::resource::ip { 'public_vip':
169       ip_address => $public_vip,
170     }
171     pacemaker::resource::service { 'haproxy':
172       clone_params => true,
173     }
174     pacemaker::resource::service { $::memcached::params::service_name :
175       clone_params => true,
176       require      => Class['::memcached'],
177     }
178
179     pacemaker::resource::ocf { 'rabbitmq':
180       ocf_agent_name  => 'heartbeat:rabbitmq-cluster',
181       resource_params => 'set_policy=\'ha-all ^(?!amq\.).* {"ha-mode":"all"}\'',
182       clone_params    => 'ordered=true interleave=true',
183       require         => Class['::rabbitmq'],
184     }
185
186     if downcase(hiera('ceilometer_backend')) == 'mongodb' {
187       pacemaker::resource::service { $::mongodb::params::service_name :
188         op_params    => 'start timeout=120s',
189         clone_params => true,
190         require      => Class['::mongodb::server'],
191         before       => Exec['mongodb-ready'],
192       }
193       # NOTE (spredzy) : The replset can only be run
194       # once all the nodes have joined the cluster.
195       $mongo_node_ips = split(hiera('mongo_node_ips'), ',')
196       $mongo_node_ips_with_port = suffix($mongo_node_ips, ':27017')
197       $mongo_node_string = join($mongo_node_ips_with_port, ',')
198       $mongodb_replset = hiera('mongodb::server::replset')
199       $mongodb_cluster_ready_command = join(suffix(prefix($mongo_node_ips, '/bin/nc -w1 '), ' 27017 < /dev/null'), ' && ')
200       exec { 'mongodb-ready' :
201         command   => $mongodb_cluster_ready_command,
202         timeout   => 30,
203         tries     => 180,
204         try_sleep => 10,
205       }
206       mongodb_replset { $mongodb_replset :
207         members => $mongo_node_ips_with_port,
208         require => Exec['mongodb-ready'],
209       }
210     }
211
212     pacemaker::resource::ocf { 'galera' :
213       ocf_agent_name  => 'heartbeat:galera',
214       op_params       => 'promote timeout=300s on-fail=block',
215       master_params   => '',
216       meta_params     => "master-max=${galera_nodes_count} ordered=true",
217       resource_params => "additional_parameters='--open-files-limit=16384' enable_creation=true wsrep_cluster_address='gcomm://${galera_nodes}'",
218       require         => Class['::mysql::server'],
219       before          => Exec['galera-ready'],
220     }
221   }
222
223   # Redis
224   $redis_node_ips = split(hiera('redis_node_ips'), ',')
225   $redis_master_hostname = downcase(hiera('bootstrap_nodeid'))
226
227   if $redis_master_hostname == $::hostname {
228     $slaveof = undef
229   } else {
230     $slaveof = "${redis_master_hostname} 6379"
231   }
232   class {'::redis' :
233     slaveof => $slaveof,
234   }
235
236   if count($redis_node_ips) > 1 {
237     Class['::tripleo::redis_notification'] -> Service['redis-sentinel']
238     include ::redis::sentinel
239     class {'::tripleo::redis_notification' :
240       haproxy_monitor_ip => hiera('tripleo::loadbalancer::controller_virtual_ip'),
241     }
242   }
243
244   exec { 'galera-ready' :
245     command     => '/usr/bin/clustercheck >/dev/null',
246     timeout     => 30,
247     tries       => 180,
248     try_sleep   => 10,
249     environment => ["AVAILABLE_WHEN_READONLY=0"],
250     require     => File['/etc/sysconfig/clustercheck'],
251   }
252
253   file { '/etc/sysconfig/clustercheck' :
254     ensure  => file,
255     content => "MYSQL_USERNAME=root\n
256 MYSQL_PASSWORD=''\n
257 MYSQL_HOST=localhost\n",
258   }
259
260   xinetd::service { 'galera-monitor' :
261     port           => '9200',
262     server         => '/usr/bin/clustercheck',
263     per_source     => 'UNLIMITED',
264     log_on_success => '',
265     log_on_failure => 'HOST',
266     flags          => 'REUSE',
267     service_type   => 'UNLISTED',
268     user           => 'root',
269     group          => 'root',
270     require        => File['/etc/sysconfig/clustercheck'],
271   }
272
273   # Create all the database schemas
274   # Example DSN format: mysql://user:password@host/dbname
275   if $sync_db {
276     $allowed_hosts = ['%',hiera('controller_host')]
277     $keystone_dsn = split(hiera('keystone::database_connection'), '[@:/?]')
278     class { 'keystone::db::mysql':
279       user          => $keystone_dsn[3],
280       password      => $keystone_dsn[4],
281       host          => $keystone_dsn[5],
282       dbname        => $keystone_dsn[6],
283       allowed_hosts => $allowed_hosts,
284       require       => Exec['galera-ready'],
285     }
286     $glance_dsn = split(hiera('glance::api::database_connection'), '[@:/?]')
287     class { 'glance::db::mysql':
288       user          => $glance_dsn[3],
289       password      => $glance_dsn[4],
290       host          => $glance_dsn[5],
291       dbname        => $glance_dsn[6],
292       allowed_hosts => $allowed_hosts,
293       require       => Exec['galera-ready'],
294     }
295     $nova_dsn = split(hiera('nova::database_connection'), '[@:/?]')
296     class { 'nova::db::mysql':
297       user          => $nova_dsn[3],
298       password      => $nova_dsn[4],
299       host          => $nova_dsn[5],
300       dbname        => $nova_dsn[6],
301       allowed_hosts => $allowed_hosts,
302       require       => Exec['galera-ready'],
303     }
304     $neutron_dsn = split(hiera('neutron::server::database_connection'), '[@:/?]')
305     class { 'neutron::db::mysql':
306       user          => $neutron_dsn[3],
307       password      => $neutron_dsn[4],
308       host          => $neutron_dsn[5],
309       dbname        => $neutron_dsn[6],
310       allowed_hosts => $allowed_hosts,
311       require       => Exec['galera-ready'],
312     }
313     $cinder_dsn = split(hiera('cinder::database_connection'), '[@:/?]')
314     class { 'cinder::db::mysql':
315       user          => $cinder_dsn[3],
316       password      => $cinder_dsn[4],
317       host          => $cinder_dsn[5],
318       dbname        => $cinder_dsn[6],
319       allowed_hosts => $allowed_hosts,
320       require       => Exec['galera-ready'],
321     }
322     $heat_dsn = split(hiera('heat::database_connection'), '[@:/?]')
323     class { 'heat::db::mysql':
324       user          => $heat_dsn[3],
325       password      => $heat_dsn[4],
326       host          => $heat_dsn[5],
327       dbname        => $heat_dsn[6],
328       allowed_hosts => $allowed_hosts,
329       require       => Exec['galera-ready'],
330     }
331     if downcase(hiera('ceilometer_backend')) == 'mysql' {
332       $ceilometer_dsn = split(hiera('ceilometer_mysql_conn_string'), '[@:/?]')
333       class { 'ceilometer::db::mysql':
334         user          => $ceilometer_dsn[3],
335         password      => $ceilometer_dsn[4],
336         host          => $ceilometer_dsn[5],
337         dbname        => $ceilometer_dsn[6],
338         allowed_hosts => $allowed_hosts,
339         require       => Exec['galera-ready'],
340       }
341     }
342   }
343
344   # pre-install swift here so we can build rings
345   include ::swift
346
347   # Ceph
348   $cinder_enable_rbd_backend = hiera('cinder_enable_rbd_backend', false)
349   $enable_ceph = $cinder_enable_rbd_backend
350
351   if $enable_ceph {
352     class { 'ceph::profile::params':
353       mon_initial_members => downcase(hiera('ceph_mon_initial_members'))
354     }
355     include ::ceph::profile::mon
356   }
357
358   if str2bool(hiera('enable_ceph_storage', 'false')) {
359     include ::ceph::profile::client
360     include ::ceph::profile::osd
361   }
362
363
364 } #END STEP 2
365
366 if hiera('step') >= 3 {
367
368   class { '::keystone':
369     sync_db => $sync_db,
370     manage_service => false,
371     enabled => false,
372   }
373
374   #TODO: need a cleanup-keystone-tokens.sh solution here
375   keystone_config {
376     'ec2/driver': value => 'keystone.contrib.ec2.backends.sql.Ec2';
377   }
378   file { [ '/etc/keystone/ssl', '/etc/keystone/ssl/certs', '/etc/keystone/ssl/private' ]:
379     ensure  => 'directory',
380     owner   => 'keystone',
381     group   => 'keystone',
382     require => Package['keystone'],
383   }
384   file { '/etc/keystone/ssl/certs/signing_cert.pem':
385     content => hiera('keystone_signing_certificate'),
386     owner   => 'keystone',
387     group   => 'keystone',
388     notify  => Service['keystone'],
389     require => File['/etc/keystone/ssl/certs'],
390   }
391   file { '/etc/keystone/ssl/private/signing_key.pem':
392     content => hiera('keystone_signing_key'),
393     owner   => 'keystone',
394     group   => 'keystone',
395     notify  => Service['keystone'],
396     require => File['/etc/keystone/ssl/private'],
397   }
398   file { '/etc/keystone/ssl/certs/ca.pem':
399     content => hiera('keystone_ca_certificate'),
400     owner   => 'keystone',
401     group   => 'keystone',
402     notify  => Service['keystone'],
403     require => File['/etc/keystone/ssl/certs'],
404   }
405
406   $glance_backend = downcase(hiera('glance_backend', 'swift'))
407   case $glance_backend {
408       swift: { $glance_store = 'glance.store.swift.Store' }
409       file: { $glance_store = 'glance.store.filesystem.Store' }
410       rbd: { $glance_store = 'glance.store.rbd.Store' }
411       default: { fail('Unrecognized glance_backend parameter.') }
412   }
413
414   # TODO: notifications, scrubber, etc.
415   include ::glance
416   class { 'glance::api':
417     known_stores => [$glance_store],
418     manage_service => false,
419     enabled => false,
420   }
421   class { '::glance::registry' :
422     sync_db => $sync_db,
423     manage_service => false,
424     enabled => false,
425   }
426   include join(['::glance::backend::', $glance_backend])
427
428   class { 'nova':
429     glance_api_servers     => join([hiera('glance_protocol'), '://', hiera('controller_virtual_ip'), ':', hiera('glance_port')]),
430   }
431
432   class { '::nova::api' :
433     sync_db => $sync_db,
434     manage_service => false,
435     enabled => false,
436   }
437   class { '::nova::cert' :
438     manage_service => false,
439     enabled => false,
440   }
441   class { '::nova::conductor' :
442     manage_service => false,
443     enabled => false,
444   }
445   class { '::nova::consoleauth' :
446     manage_service => false,
447     enabled => false,
448   }
449   class { '::nova::vncproxy' :
450     manage_service => false,
451     enabled => false,
452   }
453   class { '::nova::scheduler' :
454     manage_service => false,
455     enabled => false,
456   }
457   include ::nova::network::neutron
458
459   # Neutron class definitions
460   include ::neutron
461   class { '::neutron::server' :
462     sync_db => $sync_db,
463     manage_service => false,
464     enabled => false,
465   }
466   class { '::neutron::agents::dhcp' :
467     manage_service => false,
468     enabled => false,
469   }
470   class { '::neutron::agents::l3' :
471     manage_service => false,
472     enabled => false,
473   }
474   class { 'neutron::agents::metadata':
475     auth_url => join(['http://', hiera('controller_virtual_ip'), ':35357/v2.0']),
476     manage_service => false,
477     enabled => false,
478   }
479   file { '/etc/neutron/dnsmasq-neutron.conf':
480     content => hiera('neutron_dnsmasq_options'),
481     owner   => 'neutron',
482     group   => 'neutron',
483     notify  => Service['neutron-dhcp-service'],
484     require => Package['neutron'],
485   }
486   class { 'neutron::plugins::ml2':
487     flat_networks   => split(hiera('neutron_flat_networks'), ','),
488     tenant_network_types => [hiera('neutron_tenant_network_type')],
489     type_drivers    => [hiera('neutron_tenant_network_type')],
490   }
491   class { 'neutron::agents::ml2::ovs':
492     # manage_service   => false # not implemented
493     enabled          => false,
494     bridge_mappings  => split(hiera('neutron_bridge_mappings'), ','),
495     tunnel_types     => split(hiera('neutron_tunnel_types'), ','),
496     local_ip    => hiera('controller_host'),
497   }
498
499
500   include ::cinder
501   class { '::cinder::api':
502     sync_db => $sync_db,
503     manage_service => false,
504     enabled => false,
505   }
506   class { '::cinder::scheduler' :
507     manage_service => false,
508     enabled => false,
509   }
510   class { '::cinder::volume' :
511     manage_service => false,
512     enabled => false,
513   }
514   include ::cinder::glance
515   class {'cinder::setup_test_volume':
516     size => join([hiera('cinder_lvm_loop_device_size'), 'M']),
517   }
518
519   $cinder_enable_iscsi = hiera('cinder_enable_iscsi_backend', true)
520   if $cinder_enable_iscsi {
521     $cinder_iscsi_backend = 'tripleo_iscsi'
522
523     cinder::backend::iscsi { $cinder_iscsi_backend :
524       iscsi_ip_address => hiera('cinder_iscsi_ip_address'),
525       iscsi_helper     => hiera('cinder_iscsi_helper'),
526     }
527   }
528
529   if $enable_ceph {
530
531     Ceph_pool {
532       pg_num  => hiera('ceph::profile::params::osd_pool_default_pg_num'),
533       pgp_num => hiera('ceph::profile::params::osd_pool_default_pgp_num'),
534       size    => hiera('ceph::profile::params::osd_pool_default_size'),
535     }
536
537     $ceph_pools = hiera('ceph_pools')
538     ceph::pool { $ceph_pools : }
539   }
540
541   if $cinder_enable_rbd_backend {
542     $cinder_rbd_backend = 'tripleo_ceph'
543
544     cinder_config {
545       "${cinder_rbd_backend}/host": value => 'hostgroup';
546     }
547
548     cinder::backend::rbd { $cinder_rbd_backend :
549       rbd_pool        => 'volumes',
550       rbd_user        => 'openstack',
551       rbd_secret_uuid => hiera('ceph::profile::params::fsid'),
552       require         => Ceph::Pool['volumes'],
553     }
554   }
555
556   $cinder_enabled_backends = delete_undef_values([$cinder_iscsi_backend, $cinder_rbd_backend])
557   class { '::cinder::backends' :
558     enabled_backends => $cinder_enabled_backends,
559   }
560
561   # swift proxy
562   class { '::swift::proxy' :
563     manage_service => $non_pcmk_start,
564     enabled => $non_pcmk_start,
565   }
566   include ::swift::proxy::proxy_logging
567   include ::swift::proxy::healthcheck
568   include ::swift::proxy::cache
569   include ::swift::proxy::keystone
570   include ::swift::proxy::authtoken
571   include ::swift::proxy::staticweb
572   include ::swift::proxy::ceilometer
573   include ::swift::proxy::ratelimit
574   include ::swift::proxy::catch_errors
575   include ::swift::proxy::tempurl
576   include ::swift::proxy::formpost
577
578   # swift storage
579   if str2bool(hiera('enable_swift_storage', 'true')) {
580     class {'::swift::storage::all':
581       mount_check => str2bool(hiera('swift_mount_check'))
582     }
583     class {'::swift::storage::account':
584       manage_service => $non_pcmk_start,
585       enabled => $non_pcmk_start,
586     }
587     class {'::swift::storage::container':
588       manage_service => $non_pcmk_start,
589       enabled => $non_pcmk_start,
590     }
591     class {'::swift::storage::object':
592       manage_service => $non_pcmk_start,
593       enabled => $non_pcmk_start,
594     }
595     if(!defined(File['/srv/node'])) {
596       file { '/srv/node':
597         ensure  => directory,
598         owner   => 'swift',
599         group   => 'swift',
600         require => Package['openstack-swift'],
601       }
602     }
603     $swift_components = ['account', 'container', 'object']
604     swift::storage::filter::recon { $swift_components : }
605     swift::storage::filter::healthcheck { $swift_components : }
606   }
607
608   # Ceilometer
609   $ceilometer_backend = downcase(hiera('ceilometer_backend'))
610   case $ceilometer_backend {
611     /mysql/ : {
612       $ceilometer_database_connection = hiera('ceilometer_mysql_conn_string')
613     }
614     default : {
615       $ceilometer_database_connection = "mongodb://${mongo_node_string}/ceilometer?replicaSet=${mongodb_replset}"
616     }
617   }
618   include ::ceilometer
619   class { '::ceilometer::api' :
620     manage_service => $non_pcmk_start,
621     enabled => $non_pcmk_start,
622   }
623   class { '::ceilometer::agent::notification' :
624     manage_service => $non_pcmk_start,
625     enabled => $non_pcmk_start,
626   }
627   class { '::ceilometer::agent::central' :
628     manage_service => $non_pcmk_start,
629     enabled => $non_pcmk_start,
630   }
631   class { '::ceilometer::alarm::notifier' :
632     manage_service => $non_pcmk_start,
633     enabled => $non_pcmk_start,
634   }
635   class { '::ceilometer::alarm::evaluator' :
636     manage_service => $non_pcmk_start,
637     enabled => $non_pcmk_start,
638   }
639   class { '::ceilometer::collector' :
640     manage_service => $non_pcmk_start,
641     enabled => $non_pcmk_start,
642   }
643   include ::ceilometer::expirer
644   class { '::ceilometer::db' :
645     database_connection => $ceilometer_database_connection,
646     sync_db             => $sync_db,
647   }
648   class { 'ceilometer::agent::auth':
649     auth_url => join(['http://', hiera('controller_virtual_ip'), ':5000/v2.0']),
650   }
651
652   Cron <| title == 'ceilometer-expirer' |> { command => "sleep $((\$(od -A n -t d -N 3 /dev/urandom) % 86400)) && ${::ceilometer::params::expirer_command}" }
653
654   # Heat
655   class { '::heat' :
656     sync_db => $sync_db,
657   }
658   class { '::heat::api' :
659     manage_service => $non_pcmk_start,
660     enabled => $non_pcmk_start,
661   }
662   class { '::heat::api_cfn' :
663     manage_service => $non_pcmk_start,
664     enabled => $non_pcmk_start,
665   }
666   class { '::heat::api_cloudwatch' :
667     manage_service => $non_pcmk_start,
668     enabled => $non_pcmk_start,
669   }
670   class { '::heat::engine' :
671     manage_service => $non_pcmk_start,
672     enabled => $non_pcmk_start,
673   }
674
675   # Horizon
676   $vhost_params = { add_listen => false }
677   class { 'horizon':
678     cache_server_ip    => split(hiera('memcache_node_ips', '127.0.0.1'), ','),
679     vhost_extra_params => $vhost_params,
680   }
681
682   $snmpd_user = hiera('snmpd_readonly_user_name')
683   snmp::snmpv3_user { $snmpd_user:
684     authtype => 'MD5',
685     authpass => hiera('snmpd_readonly_user_password'),
686   }
687   class { 'snmp':
688     agentaddress => ['udp:161','udp6:[::1]:161'],
689     snmpd_config => [ join(['rouser ', hiera('snmpd_readonly_user_name')]), 'proc  cron', 'includeAllDisks  10%', 'master agentx', 'trapsink localhost public', 'iquerySecName internalUser', 'rouser internalUser', 'defaultMonitors yes', 'linkUpDownNotifications yes' ],
690   }
691
692 } #END STEP 3
693
694 if hiera('step') >= 4 {
695   if $pacemaker_master {
696
697     # Keystone
698     pacemaker::resource::service { $::keystone::params::service_name :
699       clone_params => "interleave=true",
700     }
701
702     # Cinder
703     pacemaker::resource::service { $::cinder::params::api_service :
704       clone_params => "interleave=true",
705       require      => Pacemaker::Resource::Service[$::keystone::params::service_name],
706     }
707     pacemaker::resource::service { $::cinder::params::scheduler_service :
708       clone_params => "interleave=true",
709     }
710     pacemaker::resource::service { $::cinder::params::volume_service : }
711
712     pacemaker::constraint::base { 'keystone-then-cinder-api-constraint':
713       constraint_type => 'order',
714       first_resource  => "${::keystone::params::service_name}-clone",
715       second_resource => "${::cinder::params::api_service}-clone",
716       first_action    => 'start',
717       second_action   => 'start',
718       require         => [Pacemaker::Resource::Service[$::cinder::params::api_service],
719                           Pacemaker::Resource::Service[$::keystone::params::service_name]],
720     }
721     pacemaker::constraint::base { 'cinder-api-then-cinder-scheduler-constraint':
722       constraint_type => "order",
723       first_resource => "${::cinder::params::api_service}-clone",
724       second_resource => "${::cinder::params::scheduler_service}-clone",
725       first_action => "start",
726       second_action => "start",
727       require => [Pacemaker::Resource::Service[$::cinder::params::api_service],
728                   Pacemaker::Resource::Service[$::cinder::params::scheduler_service]],
729     }
730     pacemaker::constraint::colocation { 'cinder-scheduler-with-cinder-api-colocation':
731       source => "${::cinder::params::scheduler_service}-clone",
732       target => "${::cinder::params::api_service}-clone",
733       score => "INFINITY",
734       require => [Pacemaker::Resource::Service[$::cinder::params::api_service],
735                   Pacemaker::Resource::Service[$::cinder::params::scheduler_service]],
736     }
737     pacemaker::constraint::base { 'cinder-scheduler-then-cinder-volume-constraint':
738       constraint_type => "order",
739       first_resource => "${::cinder::params::scheduler_service}-clone",
740       second_resource => "${::cinder::params::volume_service}",
741       first_action => "start",
742       second_action => "start",
743       require => [Pacemaker::Resource::Service[$::cinder::params::scheduler_service],
744                   Pacemaker::Resource::Service[$::cinder::params::volume_service]],
745     }
746     pacemaker::constraint::colocation { 'cinder-volume-with-cinder-scheduler-colocation':
747       source => "${::cinder::params::volume_service}",
748       target => "${::cinder::params::scheduler_service}-clone",
749       score => "INFINITY",
750       require => [Pacemaker::Resource::Service[$::cinder::params::scheduler_service],
751                   Pacemaker::Resource::Service[$::cinder::params::volume_service]],
752     }
753
754     # Glance
755     pacemaker::resource::service { $::glance::params::registry_service_name :
756       clone_params => "interleave=true",
757       require      => Pacemaker::Resource::Service[$::keystone::params::service_name],
758     }
759     pacemaker::resource::service { $::glance::params::api_service_name :
760       clone_params => "interleave=true",
761     }
762
763     pacemaker::constraint::base { 'keystone-then-glance-registry-constraint':
764       constraint_type => 'order',
765       first_resource  => "${::keystone::params::service_name}-clone",
766       second_resource => "${::glance::params::registry_service_name}-clone",
767       first_action    => 'start',
768       second_action   => 'start',
769       require         => [Pacemaker::Resource::Service[$::glance::params::registry_service_name],
770                           Pacemaker::Resource::Service[$::keystone::params::service_name]],
771     }
772     pacemaker::constraint::base { 'glance-registry-then-glance-api-constraint':
773       constraint_type => "order",
774       first_resource  => "${::glance::params::registry_service_name}-clone",
775       second_resource => "${::glance::params::api_service_name}-clone",
776       first_action    => "start",
777       second_action   => "start",
778       require => [Pacemaker::Resource::Service[$::glance::params::registry_service_name],
779                   Pacemaker::Resource::Service[$::glance::params::api_service_name]],
780     }
781     pacemaker::constraint::colocation { 'glance-api-with-glance-registry-colocation':
782       source  => "${::glance::params::api_service_name}-clone",
783       target  => "${::glance::params::registry_service_name}-clone",
784       score   => "INFINITY",
785       require => [Pacemaker::Resource::Service[$::glance::params::registry_service_name],
786                   Pacemaker::Resource::Service[$::glance::params::api_service_name]],
787     }
788
789     # Neutron
790     pacemaker::resource::service { $::neutron::params::server_service:
791       op_params => "start timeout=90",
792       clone_params   => "interleave=true",
793       require => Pacemaker::Resource::Service[$::keystone::params::service_name]
794     }
795     pacemaker::resource::service { $::neutron::params::l3_agent_service:
796       clone_params   => "interleave=true",
797     }
798     pacemaker::resource::service { $::neutron::params::dhcp_agent_service:
799       clone_params   => "interleave=true",
800     }
801     pacemaker::resource::service { $::neutron::params::ovs_agent_service:
802       clone_params => "interleave=true",
803     }
804     pacemaker::resource::service { $::neutron::params::metadata_agent_service:
805       clone_params => "interleave=true",
806     }
807     pacemaker::resource::ocf { $::neutron::params::ovs_cleanup_service:
808       ocf_agent_name => "neutron:OVSCleanup",
809       clone_params => "interleave=true",
810     }
811     pacemaker::resource::ocf { 'neutron-netns-cleanup':
812       ocf_agent_name => "neutron:NetnsCleanup",
813       clone_params => "interleave=true",
814     }
815     pacemaker::resource::ocf { 'neutron-scale':
816       ocf_agent_name => "neutron:NeutronScale",
817       clone_params => "globally-unique=true clone-max=3 interleave=true",
818     }
819     pacemaker::constraint::base { 'keystone-to-neutron-server-constraint':
820       constraint_type => "order",
821       first_resource => "${::keystone::params::service_name}-clone",
822       second_resource => "${::neutron::params::server_service}-clone",
823       first_action => "start",
824       second_action => "start",
825       require => [Pacemaker::Resource::Service[$::keystone::params::service_name],
826                   Pacemaker::Resource::Service[$::neutron::params::server_service]],
827     }
828     pacemaker::constraint::base { 'neutron-server-to-neutron-scale-constraint':
829       constraint_type => "order",
830       first_resource => "${::neutron::params::server_service}-clone",
831       second_resource => "neutron-scale-clone",
832       first_action => "start",
833       second_action => "start",
834       require => [Pacemaker::Resource::Service[$::neutron::params::server_service],
835                   Pacemaker::Resource::Ocf['neutron-scale']],
836     }
837     pacemaker::constraint::base { 'neutron-scale-to-ovs-cleanup-constraint':
838       constraint_type => "order",
839       first_resource => "neutron-scale-clone",
840       second_resource => "${::neutron::params::ovs_cleanup_service}-clone",
841       first_action => "start",
842       second_action => "start",
843       require => [Pacemaker::Resource::Ocf['neutron-scale'],
844                   Pacemaker::Resource::Ocf["${::neutron::params::ovs_cleanup_service}"]],
845     }
846     pacemaker::constraint::colocation { 'neutron-scale-to-ovs-cleanup-colocation':
847       source => "${::neutron::params::ovs_cleanup_service}-clone",
848       target => "neutron-scale-clone",
849       score => "INFINITY",
850       require => [Pacemaker::Resource::Ocf['neutron-scale'],
851                   Pacemaker::Resource::Ocf["${::neutron::params::ovs_cleanup_service}"]],
852     }
853     pacemaker::constraint::base { 'neutron-ovs-cleanup-to-netns-cleanup-constraint':
854       constraint_type => "order",
855       first_resource => "${::neutron::params::ovs_cleanup_service}-clone",
856       second_resource => "neutron-netns-cleanup-clone",
857       first_action => "start",
858       second_action => "start",
859       require => [Pacemaker::Resource::Ocf["${::neutron::params::ovs_cleanup_service}"],
860                   Pacemaker::Resource::Ocf['neutron-netns-cleanup']],
861     }
862     pacemaker::constraint::colocation { 'neutron-ovs-cleanup-to-netns-cleanup-colocation':
863       source => "neutron-netns-cleanup-clone",
864       target => "${::neutron::params::ovs_cleanup_service}-clone",
865       score => "INFINITY",
866       require => [Pacemaker::Resource::Ocf["${::neutron::params::ovs_cleanup_service}"],
867                   Pacemaker::Resource::Ocf['neutron-netns-cleanup']],
868     }
869     pacemaker::constraint::base { 'neutron-netns-cleanup-to-openvswitch-agent-constraint':
870       constraint_type => "order",
871       first_resource => "neutron-netns-cleanup-clone",
872       second_resource => "${::neutron::params::ovs_agent_service}-clone",
873       first_action => "start",
874       second_action => "start",
875       require => [Pacemaker::Resource::Ocf["neutron-netns-cleanup"],
876                   Pacemaker::Resource::Service["${::neutron::params::ovs_agent_service}"]],
877     }
878     pacemaker::constraint::colocation { 'neutron-netns-cleanup-to-openvswitch-agent-colocation':
879       source => "${::neutron::params::ovs_agent_service}-clone",
880       target => "neutron-netns-cleanup-clone",
881       score => "INFINITY",
882       require => [Pacemaker::Resource::Ocf["neutron-netns-cleanup"],
883                   Pacemaker::Resource::Service["${::neutron::params::ovs_agent_service}"]],
884     }
885     pacemaker::constraint::base { 'neutron-openvswitch-agent-to-dhcp-agent-constraint':
886       constraint_type => "order",
887       first_resource => "${::neutron::params::ovs_agent_service}-clone",
888       second_resource => "${::neutron::params::dhcp_agent_service}-clone",
889       first_action => "start",
890       second_action => "start",
891       require => [Pacemaker::Resource::Service["${::neutron::params::ovs_agent_service}"],
892                   Pacemaker::Resource::Service["${::neutron::params::dhcp_agent_service}"]],
893
894     }
895     pacemaker::constraint::colocation { 'neutron-openvswitch-agent-to-dhcp-agent-colocation':
896       source => "${::neutron::params::dhcp_agent_service}-clone",
897       target => "${::neutron::params::ovs_agent_service}-clone",
898       score => "INFINITY",
899       require => [Pacemaker::Resource::Service["${::neutron::params::ovs_agent_service}"],
900                   Pacemaker::Resource::Service["${::neutron::params::dhcp_agent_service}"]],
901     }
902     pacemaker::constraint::base { 'neutron-dhcp-agent-to-l3-agent-constraint':
903       constraint_type => "order",
904       first_resource => "${::neutron::params::dhcp_agent_service}-clone",
905       second_resource => "${::neutron::params::l3_agent_service}-clone",
906       first_action => "start",
907       second_action => "start",
908       require => [Pacemaker::Resource::Service["${::neutron::params::dhcp_agent_service}"],
909                   Pacemaker::Resource::Service["${::neutron::params::l3_agent_service}"]]
910     }
911     pacemaker::constraint::colocation { 'neutron-dhcp-agent-to-l3-agent-colocation':
912       source => "${::neutron::params::l3_agent_service}-clone",
913       target => "${::neutron::params::dhcp_agent_service}-clone",
914       score => "INFINITY",
915       require => [Pacemaker::Resource::Service["${::neutron::params::dhcp_agent_service}"],
916                   Pacemaker::Resource::Service["${::neutron::params::l3_agent_service}"]]
917     }
918     pacemaker::constraint::base { 'neutron-l3-agent-to-metadata-agent-constraint':
919       constraint_type => "order",
920       first_resource => "${::neutron::params::l3_agent_service}-clone",
921       second_resource => "${::neutron::params::metadata_agent_service}-clone",
922       first_action => "start",
923       second_action => "start",
924       require => [Pacemaker::Resource::Service["${::neutron::params::l3_agent_service}"],
925                   Pacemaker::Resource::Service["${::neutron::params::metadata_agent_service}"]]
926     }
927     pacemaker::constraint::colocation { 'neutron-l3-agent-to-metadata-agent-colocation':
928       source => "${::neutron::params::metadata_agent_service}-clone",
929       target => "${::neutron::params::l3_agent_service}-clone",
930       score => "INFINITY",
931       require => [Pacemaker::Resource::Service["${::neutron::params::l3_agent_service}"],
932                   Pacemaker::Resource::Service["${::neutron::params::metadata_agent_service}"]]
933     }
934
935     # Nova
936     pacemaker::resource::service { $::nova::params::api_service_name :
937       clone_params    => "interleave=true",
938       op_params       => "monitor start-delay=10s",
939     }
940     pacemaker::resource::service { $::nova::params::conductor_service_name :
941       clone_params    => "interleave=true",
942       op_params       => "monitor start-delay=10s",
943     }
944     pacemaker::resource::service { $::nova::params::consoleauth_service_name :
945       clone_params    => "interleave=true",
946       op_params       => "monitor start-delay=10s",
947       require         => Pacemaker::Resource::Service[$::keystone::params::service_name],
948     }
949     pacemaker::resource::service { $::nova::params::vncproxy_service_name :
950       clone_params    => "interleave=true",
951       op_params       => "monitor start-delay=10s",
952     }
953     pacemaker::resource::service { $::nova::params::scheduler_service_name :
954       clone_params    => "interleave=true",
955       op_params       => "monitor start-delay=10s",
956     }
957
958     pacemaker::constraint::base { 'keystone-then-nova-consoleauth-constraint':
959       constraint_type => 'order',
960       first_resource  => "${::keystone::params::service_name}-clone",
961       second_resource => "${::nova::params::consoleauth_service_name}-clone",
962       first_action    => 'start',
963       second_action   => 'start',
964       require         => [Pacemaker::Resource::Service[$::nova::params::consoleauth_service_name],
965                           Pacemaker::Resource::Service[$::keystone::params::service_name]],
966     }
967     pacemaker::constraint::base { 'nova-consoleauth-then-nova-vncproxy-constraint':
968       constraint_type => "order",
969       first_resource  => "${::nova::params::consoleauth_service_name}-clone",
970       second_resource => "${::nova::params::vncproxy_service_name}-clone",
971       first_action    => "start",
972       second_action   => "start",
973       require => [Pacemaker::Resource::Service[$::nova::params::consoleauth_service_name],
974                   Pacemaker::Resource::Service[$::nova::params::vncproxy_service_name]],
975     }
976     pacemaker::constraint::colocation { 'nova-vncproxy-with-nova-consoleauth-colocation':
977       source => "${::nova::params::vncproxy_service_name}-clone",
978       target => "${::nova::params::consoleauth_service_name}-clone",
979       score => "INFINITY",
980       require => [Pacemaker::Resource::Service[$::nova::params::consoleauth_service_name],
981                   Pacemaker::Resource::Service[$::nova::params::vncproxy_service_name]],
982     }
983     # FIXME(gfidente): novncproxy will not start unless websockify is updated to 0.6
984     # which is not the case for f20 nor f21; ucomment when it becomes available
985     #pacemaker::constraint::base { 'nova-vncproxy-then-nova-api-constraint':
986     #  constraint_type => "order",
987     #  first_resource  => "${::nova::params::vncproxy_service_name}-clone",
988     #  second_resource => "${::nova::params::api_service_name}-clone",
989     #  first_action    => "start",
990     #  second_action   => "start",
991     #  require => [Pacemaker::Resource::Service[$::nova::params::vncproxy_service_name],
992     #              Pacemaker::Resource::Service[$::nova::params::api_service_name]],
993     #}
994     #pacemaker::constraint::colocation { 'nova-api-with-nova-vncproxy-colocation':
995     #  source => "${::nova::params::api_service_name}-clone",
996     #  target => "${::nova::params::vncproxy_service_name}-clone",
997     #  score => "INFINITY",
998     #  require => [Pacemaker::Resource::Service[$::nova::params::vncproxy_service_name],
999     #              Pacemaker::Resource::Service[$::nova::params::api_service_name]],
1000     #}
1001     pacemaker::constraint::base { 'nova-api-then-nova-scheduler-constraint':
1002       constraint_type => "order",
1003       first_resource  => "${::nova::params::api_service_name}-clone",
1004       second_resource => "${::nova::params::scheduler_service_name}-clone",
1005       first_action    => "start",
1006       second_action   => "start",
1007       require => [Pacemaker::Resource::Service[$::nova::params::api_service_name],
1008                   Pacemaker::Resource::Service[$::nova::params::scheduler_service_name]],
1009     }
1010     pacemaker::constraint::colocation { 'nova-scheduler-with-nova-api-colocation':
1011       source => "${::nova::params::scheduler_service_name}-clone",
1012       target => "${::nova::params::api_service_name}-clone",
1013       score => "INFINITY",
1014       require => [Pacemaker::Resource::Service[$::nova::params::api_service_name],
1015                   Pacemaker::Resource::Service[$::nova::params::scheduler_service_name]],
1016     }
1017     pacemaker::constraint::base { 'nova-scheduler-then-nova-conductor-constraint':
1018       constraint_type => "order",
1019       first_resource  => "${::nova::params::scheduler_service_name}-clone",
1020       second_resource => "${::nova::params::conductor_service_name}-clone",
1021       first_action    => "start",
1022       second_action   => "start",
1023       require => [Pacemaker::Resource::Service[$::nova::params::scheduler_service_name],
1024                   Pacemaker::Resource::Service[$::nova::params::conductor_service_name]],
1025     }
1026     pacemaker::constraint::colocation { 'nova-conductor-with-nova-scheduler-colocation':
1027       source => "${::nova::params::conductor_service_name}-clone",
1028       target => "${::nova::params::scheduler_service_name}-clone",
1029       score => "INFINITY",
1030       require => [Pacemaker::Resource::Service[$::nova::params::scheduler_service_name],
1031                   Pacemaker::Resource::Service[$::nova::params::conductor_service_name]],
1032     }
1033
1034   }
1035
1036 } #END STEP 4