url = https://github.com/openstack/fuel-ui.git
branch = stable/newton
ignore = all
+[submodule "mcp/reclass/classes/system"]
+ path = mcp/reclass/classes/system
+ url = https://github.com/Mirantis/reclass-system-salt-model
--- /dev/null
+Subproject commit e1bf38ccfd44662c806fe6c7b652b988bc0d6f24
+++ /dev/null
-classes:
-- service.aodh.server.cluster
-- system.haproxy.proxy.listen.openstack.aodh
-parameters:
- aodh:
- server:
- enabled: true
- version: ${_param:aodh_version}
- cluster: true
- ttl: 86400
- debug: false
- verbose: true
- region: ${_param:openstack_region}
- database:
- engine: "mysql+pymysql"
- host: ${_param:openstack_database_address}
- port: 3306
- name: aodh
- user: aodh
- password: ${_param:mysql_aodh_password}
- bind:
- host: ${_param:cluster_local_address}
- port: 8042
- identity:
- engine: keystone
- host: ${_param:keystone_service_host}
- port: 35357
- tenant: service
- user: aodh
- password: ${_param:keystone_aodh_password}
- message_queue:
- engine: rabbitmq
- port: 5672
- user: openstack
- password: ${_param:rabbitmq_openstack_password}
- virtual_host: '/openstack'
- members:
- - host: ${_param:openstack_message_queue_node01_address}
- - host: ${_param:openstack_message_queue_node02_address}
- - host: ${_param:openstack_message_queue_node03_address}
+++ /dev/null
-classes:
-- service.redis.server.single
-parameters:
- aodh:
- server:
- coordination_backend:
- url: redis://${_param:single_address}:6379/${_param:cluster_node01_address}
+++ /dev/null
-classes:
-- service.aodh.server.single
+++ /dev/null
-classes:
-- service.apache.server.single
+++ /dev/null
-classes:
-- service.ceilometer.agent.cluster
-parameters:
- ceilometer:
- agent:
- region: ${_param:openstack_region}
- enabled: true
- version: ${_param:ceilometer_version}
- secret: ${_param:ceilometer_secret_key}
- identity:
- engine: keystone
- host: ${_param:keystone_service_host}
- port: 35357
- tenant: service
- user: ceilometer
- password: ${_param:keystone_ceilometer_password}
- message_queue:
- engine: rabbitmq
- members:
- - host: ${_param:openstack_message_queue_node01_address}
- - host: ${_param:openstack_message_queue_node02_address}
- - host: ${_param:openstack_message_queue_node03_address}
- user: openstack
- password: ${_param:rabbitmq_openstack_password}
- virtual_host: '/openstack'
- ha_queues: true
- rpc_thread_pool_size: 5
- nova:
- compute:
- notification:
- driver: messagingv2
+++ /dev/null
-classes:
-- service.ceilometer.agent.single
+++ /dev/null
-parameters:
- cinder:
- controller:
- notification: true
\ No newline at end of file
+++ /dev/null
-parameters:
- cinder:
- volume:
- notification: true
\ No newline at end of file
+++ /dev/null
-parameters:
- glance:
- server:
- notification: true
\ No newline at end of file
+++ /dev/null
-parameters:
- heat:
- server:
- notification: true
\ No newline at end of file
+++ /dev/null
-classes:
-- system.ceilometer.client.cinder_control
-- system.ceilometer.client.keystone
-- system.ceilometer.client.glance
-- system.ceilometer.client.heat
-- system.ceilometer.client.nova_control
\ No newline at end of file
+++ /dev/null
-parameters:
- keystone:
- server:
- notification: true
\ No newline at end of file
+++ /dev/null
-parameters:
- neutron:
- server:
- notification: true
\ No newline at end of file
+++ /dev/null
-parameters:
- nova:
- compute:
- notification:
- driver: messagingv2
- notify_on:
- state_change: vm_and_task_state
\ No newline at end of file
+++ /dev/null
-parameters:
- nova:
- controller:
- notification:
- driver: messagingv2
+++ /dev/null
-parameters:
- ceilometer:
- server:
- database:
- influxdb:
- host: ${_param:stacklight_telemetry_node01_address}
- port: 8086
- user: ceilometer
- password: ${_param:ceilometer_influxdb_password}
- database: ceilometer
- elasticsearch:
- enabled: true
- host: ${_param:stacklight_log_address}
- port: 9200
\ No newline at end of file
+++ /dev/null
-parameters:
- ceilometer:
- server:
- database:
- engine: mongodb
- members:
- - host: ${_param:cluster_node01_address}
- port: 27017
- - host: ${_param:cluster_node02_address}
- port: 27017
- - host: ${_param:cluster_node03_address}
- port: 27017
- name: ceilometer
- user: ceilometer
- password: ${_param:mongodb_ceilometer_password}
\ No newline at end of file
+++ /dev/null
-classes:
-- service.ceilometer.server.cluster
-- system.haproxy.proxy.listen.openstack.ceilometer
-- system.keepalived.cluster.instance.openstack_telemetry_vip
-- service.haproxy.proxy.single
-parameters:
- ceilometer:
- server:
- enabled: true
- version: ${_param:ceilometer_version}
- region: ${_param:openstack_region}
- cluster: true
- secret: ${_param:ceilometer_secret_key}
- ttl: 86400
- publisher:
- default:
- bind:
- host: ${_param:cluster_local_address}
- port: 8777
- identity:
- engine: keystone
- host: ${_param:openstack_control_address}
- port: 35357
- tenant: service
- user: ceilometer
- password: ${_param:keystone_ceilometer_password}
- message_queue:
- engine: rabbitmq
- members:
- - host: ${_param:openstack_message_queue_node01_address}
- - host: ${_param:openstack_message_queue_node02_address}
- - host: ${_param:openstack_message_queue_node03_address}
- user: openstack
- password: ${_param:rabbitmq_openstack_password}
- virtual_host: '/openstack'
- # Workaround for https://bugs.launchpad.net/ceilometer/+bug/1337715
- rpc_thread_pool_size: 5
+++ /dev/null
-classes:
-- service.redis.server.single
-parameters:
- aodh:
- server:
- coordination_backend:
- url: redis://${_param:single_address}:6379/${_param:cluster_node01_address}
+++ /dev/null
-classes:
-- service.ceilometer.server.single
-parameters:
- ceilometer:
- server:
- database:
- influxdb:
- host: ${_param:stacklight_monitor_node01_address}
- port: 8086
- user: ceilometer
- password: ${_param:ceilometer_influxdb_password}
- database: ceilometer
- elasticsearch:
- enabled: true
- host: ${_param:stacklight_monitor_address}
- port: 9200
+++ /dev/null
-classes:
-- service.ceph.client.single
-parameters:
- _param:
- ceph_auth_client_required: cephx
- ceph:
- client:
- config:
- global:
- fsid: ${_param:ceph_fsid}
- mon_initial_members: ${_param:ceph_mon_initial_members}
- mon_host: ${_param:ceph_mon_host}
- auth_client_required: ${_param:ceph_auth_client_required}
- public_network: ${_param:ceph_public_network}
- cluster_network: ${_param:ceph_cluster_network}
- osd_fs_mount_options_xfs: rw,noatime
- osd_fs_type: xfs
- osd:
- filestore_xattr_use_omap: True
- osd_journal_size: 7500
- mon:
- mon_debug_dump_transactions: False
- client:
- rbd_cache_size: 268435456
- rbd_cache_max_dirty: 134217728
- rbd_cache_max_dirty_age: 5
- rbd_cache: True
- # TODO: Configure these keys on cluster level
- # keyring:
- # images:
- # key:
- # object:
- # key:
- # cinder:
- # key:
- # nova:
- # key:
+++ /dev/null
-parameters:
- _param:
- radosgw_keyring_path: /etc/ceph/ceph.client.radosgw.keyring
- ceph:
- radosgw:
- enabled: true
- client:
- config:
- client.radosgw.gateway:
- rgw_keystone_accepted_roles: "_member_, Member, admin, swiftoperator"
- keyring: /etc/ceph/ceph.client.radosgw.keyring
- rgw_socket_path: /tmp/radosgw.sock
- rgw_keystone_revocation_interval: 60
- rgw_keystone_url: ${_param:keystone_service_host}:5000
- rgw_keystone_admin_token: ${_param:keystone_service_token}
- host: ${linux:system:name}
- rgw_dns_name : ${_param:cluster_domain}
- rgw_print_continue: True
- rgw_content_length_compat: true
- user: www-data
+++ /dev/null
-parameters:
- cinder:
- controller:
- default_volume_type: standard-iops
- backend:
- ceph:
- type_name: standard-iops
- backend: ceph
- pool: ${_param:cinder_storage_pool}
- engine: ceph
- user: ${_param:cinder_storage_user}
- secret_uuid: ${_param:cinder_storage_secret_uuid}
- client_cinder_key: ${_param:cinder_storage_client_key}
+++ /dev/null
-parameters:
- cinder:
- controller:
- default_volume_type: lvm-driver
- backend:
- lvm-driver:
- engine: lvm
- type_name: lvm-driver
- volume_group: cinder-volume
+++ /dev/null
-parameters:
- _param:
- cinder_nfs_local_path: /var/lib/cinder/nfs
- cinder:
- controller:
- default_volume_type: nfs-driver
- backend:
- nfs-driver:
- engine: nfs
- type_name: nfs-driver
- volume_group: cinder-volume
- path: ${_param:cinder_nfs_local_path}
- devices:
- - ${_param:cinder_nfs_host}:${_param:cinder_nfs_remote_path}
- options: ${_param:cinder_nfs_mount_options}
\ No newline at end of file
+++ /dev/null
-classes:
-- service.cinder.control.cluster_control
-- service.haproxy.proxy.single
-- service.keepalived.cluster.single
-- system.haproxy.proxy.listen.openstack.cinder
-parameters:
- cinder:
- volume:
- enabled: false
- default_volume_type: ""
- backend: {}
- logging:
- heka:
- enabled: true
- controller:
- enabled: true
- default_volume_type: ""
- backend: {}
- version: ${_param:cinder_version}
- osapi:
- host: ${_param:cluster_local_address}
- database:
- engine: mysql
- host: ${_param:openstack_database_address}
- port: 3306
- name: cinder
- user: cinder
- password: ${_param:mysql_cinder_password}
- identity:
- engine: keystone
- region: ${_param:openstack_region}
- host: ${_param:cluster_vip_address}
- port: 35357
- tenant: service
- user: cinder
- password: ${_param:keystone_cinder_password}
- glance:
- host: ${_param:cluster_vip_address}
- port: 9292
- message_queue:
- engine: rabbitmq
- members:
- - host: ${_param:openstack_message_queue_node01_address}
- - host: ${_param:openstack_message_queue_node02_address}
- - host: ${_param:openstack_message_queue_node03_address}
- user: openstack
- password: ${_param:rabbitmq_openstack_password}
- virtual_host: '/openstack'
- cache:
- engine: memcached
- members:
- - host: ${_param:cluster_node01_address}
- port: 11211
- - host: ${_param:cluster_node02_address}
- port: 11211
- - host: ${_param:cluster_node03_address}
- port: 11211
-
+++ /dev/null
-parameters:
- cinder:
- controller:
- audit:
- enabled: true
+++ /dev/null
-parameters:
- cinder:
- controller:
- notification:
- driver: messagingv2
- topics: "${_param:openstack_notification_topics}"
+++ /dev/null
-classes:
-- service.cinder.control.single
-parameters:
- cinder:
- volume:
- enabled: false
- controller:
- backend: {}
- default_volume_type: ''
- database:
- host: ${_param:single_address}
-
+++ /dev/null
-parameters:
- cinder:
- volume:
- message_queue:
- ha_queues: true
- enabled: true
- notification: true
- default_volume_type: standard-iops
- backend:
- ceph:
- type_name: standard-iops
- backend: ceph
- pool: ${_param:cinder_storage_pool}
- engine: ceph
- user: ${_param:cinder_storage_user}
- secret_uuid: ${_param:cinder_storage_secret_uuid}
- client_cinder_key: ${_param:cinder_storage_client_key}
+++ /dev/null
-parameters:
- cinder:
- volume:
- default_volume_type: hitachi_vsp
- enabled: True
- backend:
- hitachi_vsp:
- backend: hitachi_vsp
- type_name: hitachi_vsp
- engine: hitachi_vsp
- version: 1.3
- user: ${_param:cinder_storage_user}
- password: ${_param:cinder_storage_password}
- storage_id: ${_param:cinder_storage_id}
- thin_pool_id: ${_param:cinder_thin_pool_id}
- pool_id: ${_param:cinder_pool_id}
- target_ports: ${_param:cinder_target_ports}
- compute_target_ports: ${_param:cinder_target_compute_ports}
+++ /dev/null
-parameters:
- _param:
- cinder_lvm_devices: [ "/dev/loop0" ]
- linux:
- storage:
- lvm:
- cinder-vg:
- enabled: true
- name: cinder-volume
- devices: ${_param:cinder_lvm_devices}
- cinder:
- volume:
- default_volume_type: lvm-driver
- backend:
- lvm-driver:
- engine: lvm
- type_name: lvm-driver
- volume_group: cinder-volume
+++ /dev/null
-parameters:
- cinder:
- volume:
- default_volume_type: nfs-driver
- backend:
- nfs-driver:
- engine: nfs
- type_name: nfs-driver
- volume_group: cinder-volume
\ No newline at end of file
+++ /dev/null
-classes:
-- service.cinder.volume.local
-parameters:
- cinder:
- volume:
- enabled: True
- database:
- host: ${_param:single_address}
- glance:
- host: ${_param:single_address}
- message_queue:
- host: ${_param:single_address}
- identity:
- host: ${_param:single_address}
+++ /dev/null
-parameters:
- cinder:
- volume:
- notification:
- driver: messagingv2
- topics: "${_param:openstack_notification_topics}"
+++ /dev/null
-classes:
-- service.cinder.volume.single
-parameters:
- cinder:
- volume:
- enabled: True
- database:
- host: ${_param:openstack_database_address}
- glance:
- host: ${_param:openstack_control_address}
- message_queue:
- members:
- - host: ${_param:openstack_message_queue_node01_address}
- - host: ${_param:openstack_message_queue_node02_address}
- - host: ${_param:openstack_message_queue_node03_address}
- identity:
- host: ${_param:openstack_control_address}
+++ /dev/null
-classes:
-- service.collectd.client
+++ /dev/null
-classes:
-- service.collectd.client
-parameters:
- _param:
- collectd_backend_carbon_host: localhost
- collectd_backend_carbon_port: 2003
- collectd:
- client:
- enabled: true
- backend:
- carbon:
- engine: carbon
- host: ${_param:collectd_backend_carbon_host}
- port: ${_param:collectd_backend_carbon_port}
\ No newline at end of file
+++ /dev/null
-classes:
-- service.collectd.client
-parameters:
- _param:
- collectd_metric_collector_host: 127.0.0.1
- collectd_metric_collector_port: 8325
- collectd:
- client:
- enabled: true
- backend:
- metric_collector:
- engine: http
- host: ${_param:collectd_metric_collector_host}
- port: ${_param:collectd_metric_collector_port}
- read_interval: 10
- use_fqdn: false
- syslog_logging: false
+++ /dev/null
-classes:
-- system.collectd.remote_client.output.heka
-- service.collectd.remote_client.cluster
-parameters:
- collectd:
- remote_client:
- automatic_starting: false
- keepalived:
- cluster:
- instance:
- stacklight_monitor_vip:
- notify_action:
- master:
- - service remote_collectd start
- backup:
- - service remote_collectd stop
- fault:
- - service remote_collectd stop
+++ /dev/null
-parameters:
- _param:
- collectd_remote_collector_port: 8326
- collectd:
- remote_client:
- enabled: true
- backend:
- remote_collector:
- engine: http
- host: ${_param:collectd_remote_collector_host}
- port: ${_param:collectd_remote_collector_port}
- timeout: 5
- read_interval: 10
- use_fqdn: false
- syslog_logging: false
+++ /dev/null
-classes:
-- system.collectd.remote_client.output.heka
-- service.collectd.remote_client.single
-parameters:
- collectd:
- remote_client:
- automatic_starting: true
+++ /dev/null
-parameters:
- designate:
- server:
- backend:
- bind9:
- rndc_key: "${_param:designate_bind9_rndc_key}"
- bind:
- server:
- key:
- designate:
- secret: "${_param:designate_bind9_rndc_key}"
- algorithm: hmac-sha512
- allow_new_zones: true
- query: true
- control:
- local:
- enabled: true
- bind:
- address: 127.0.0.1
- port: 953
- allow:
- - 127.0.0.1
- - ${_param:single_address}
- keys:
- - designate
- client:
- enabled: true
- option:
- default:
- server: 127.0.0.1
- port: 953
- key: designate
- key:
- designate:
- secret: "${_param:designate_bind9_rndc_key}"
- algorithm: hmac-sha512
+++ /dev/null
-classes:
-- service.designate.server.cluster
-- service.keepalived.cluster.single
-- system.haproxy.proxy.listen.openstack.designate
-- service.haproxy.proxy.single
-parameters:
- designate:
- _support:
- sensu:
- enabled: false
- server:
- enabled: true
- local_bind: true
- region: ${_param:openstack_region}
- domain_id: ${_param:designate_domain_id}
- version: ${_param:designate_version}
- database:
- engine: mysql
- host: ${_param:openstack_database_address}
- port: 3306
- name:
- main_database: designate
- pool_manager: designate_pool_manager
- user: designate
- password: ${_param:mysql_designate_password}
- identity:
- engine: keystone
- host: ${_param:openstack_control_address}
- port: 35357
- tenant: service
- user: designate
- password: ${_param:keystone_designate_password}
- bind:
- api:
- address: ${_param:single_address}
- message_queue:
- engine: rabbitmq
- port: 5672
- members:
- - host: ${_param:openstack_message_queue_node01_address}
- - host: ${_param:openstack_message_queue_node02_address}
- - host: ${_param:openstack_message_queue_node03_address}
- user: openstack
- password: ${_param:rabbitmq_openstack_password}
- virtual_host: '/openstack'
- pool:
- pool_id: ${_param:designate_pool_id}
- nameservers:
- - uuid: ${_param:designate_node1_uuid}
- host: ${_param:cluster_node01_address}
- port: 53
- - uuid: ${_param:designate_node2_uuid}
- host: ${_param:cluster_node02_address}
- port: 53
- targets:
- uuid: ${_param:designate_target_uuid}
- options: 'port: 53, host: 127.0.0.1'
- masters: 127.0.0.1:5354
- type: ${_param:designate_target_type}
+++ /dev/null
-classes:
-- service.designate.server.single
-- service.haproxy.proxy.single
-parameters:
- designate:
- server:
- enabled: true
- local_bind: true
- region: ${_param:openstack_region}
- domain_id: ${_param:designate_domain_id}
- version: ${_param:designate_version}
- bind:
- api:
- address: ${_param:single_address}
- database:
- engine: mysql
- host: ${_param:openstack_database_address}
- port: 3306
- name:
- main_database: designate
- pool_manager: designate_pool_manager
- user: designate
- password: ${_param:mysql_designate_password}
- identity:
- engine: keystone
- host: ${_param:openstack_control_address}
- port: 35357
- tenant: service
- user: designate
- password: ${_param:keystone_designate_password}
- message_queue:
- engine: rabbitmq
- host: ${_param:cluster_vip_address}
- port: 5672
- user: openstack
- password: ${_param:rabbitmq_openstack_password}
- virtual_host: '/openstack'
+++ /dev/null
-classes:
-- service.keepalived.cluster.single
-- service.haproxy.proxy.single
-- system.haproxy.proxy.listen.openstack.galera
\ No newline at end of file
+++ /dev/null
-parameters:
- mysql:
- server:
- database:
- aodh:
- encoding: utf8
- users:
- - name: aodh
- password: ${_param:mysql_aodh_password}
- host: '%'
- rights: all
- - name: aodh
- password: ${_param:mysql_aodh_password}
- host: ${_param:cluster_vip_address}
- rights: all
+++ /dev/null
-parameters:
- mysql:
- server:
- database:
- ceilometer:
- encoding: utf8
- users:
- - name: ceilometer
- password: ${_param:mysql_ceilometer_password}
- host: '%'
- rights: all
- - name: ceilometer
- password: ${_param:mysql_ceilometer_password}
- host: ${_param:cluster_local_address}
- rights: all
+++ /dev/null
-parameters:
- mysql:
- server:
- database:
- cinder:
- encoding: utf8
- users:
- - name: cinder
- password: ${_param:mysql_cinder_password}
- host: '%'
- rights: all
- - name: cinder
- password: ${_param:mysql_cinder_password}
- host: ${_param:cluster_local_address}
- rights: all
+++ /dev/null
-parameters:
- mysql:
- server:
- database:
- glance:
- encoding: utf8
- users:
- - name: glance
- password: ${_param:mysql_glance_password}
- host: '%'
- rights: all
- - name: glance
- password: ${_param:mysql_glance_password}
- host: ${_param:cluster_local_address}
- rights: all
+++ /dev/null
-parameters:
- mysql:
- server:
- database:
- grafana:
- encoding: utf8
- users:
- - name: grafana
- password: ${_param:mysql_grafana_password}
- host: '%'
- rights: all
- - name: grafana
- password: ${_param:mysql_grafana_password}
- host: ${_param:cluster_local_address}
- rights: all
-
+++ /dev/null
-parameters:
- mysql:
- server:
- database:
- graphite:
- encoding: 'utf8'
- users:
- - name: 'graphite'
- password: '${_param:mysql_graphite_password}'
- host: '%'
- rights: 'all'
+++ /dev/null
-parameters:
- mysql:
- server:
- database:
- heat:
- encoding: utf8
- users:
- - name: heat
- password: ${_param:mysql_heat_password}
- host: '%'
- rights: all
- - name: heat
- password: ${_param:mysql_heat_password}
- host: ${_param:cluster_local_address}
- rights: all
+++ /dev/null
-parameters:
- mysql:
- server:
- database:
- keystone:
- encoding: utf8
- users:
- - name: keystone
- password: ${_param:mysql_keystone_password}
- host: '%'
- rights: all
- - name: keystone
- password: ${_param:mysql_keystone_password}
- host: ${_param:cluster_local_address}
- rights: all
+++ /dev/null
-parameters:
- mysql:
- server:
- database:
- neutron:
- encoding: 'utf8'
- users:
- - name: 'neutron'
- password: '${_param:mysql_neutron_password}'
- host: '%'
- rights: 'all'
- - name: 'neutron'
- password: '${_param:mysql_neutron_password}'
- host: '${_param:cluster_local_address}'
- rights: 'all'
+++ /dev/null
-parameters:
- mysql:
- server:
- database:
- nova:
- encoding: utf8
- users:
- - name: nova
- password: ${_param:mysql_nova_password}
- host: '%'
- rights: all
- - name: nova
- password: ${_param:mysql_nova_password}
- host: ${_param:cluster_local_address}
- rights: all
- nova_api:
- encoding: utf8
- users:
- - name: nova
- password: ${_param:mysql_nova_password}
- host: '%'
- rights: all
- - name: nova
- password: ${_param:mysql_nova_password}
- host: ${_param:cluster_local_address}
- rights: all
- nova_cell0:
- encoding: utf8
- users:
- - name: nova
- password: ${_param:mysql_nova_password}
- host: '%'
- rights: all
- - name: nova
- password: ${_param:mysql_nova_password}
- host: ${_param:cluster_local_address}
- rights: all
+++ /dev/null
-classes:
-- service.glance.control.cluster
-- service.keepalived.cluster.single
-- service.haproxy.proxy.single
-- system.haproxy.proxy.listen.openstack.glance
-parameters:
- glance:
- server:
- enabled: true
- version: ${_param:glance_version}
- workers: 8
- database:
- engine: mysql
- host: ${_param:openstack_database_address}
- port: 3306
- name: glance
- user: glance
- password: ${_param:mysql_glance_password}
- registry:
- host: ${_param:cluster_vip_address}
- port: 9191
- bind:
- address: ${_param:cluster_local_address}
- port: 9292
- identity:
- engine: keystone
- host: ${_param:cluster_vip_address}
- port: 35357
- user: glance
- password: ${_param:keystone_glance_password}
- region: ${_param:openstack_region}
- tenant: service
- message_queue:
- engine: rabbitmq
- port: 5672
- user: openstack
- password: ${_param:rabbitmq_openstack_password}
- virtual_host: '/openstack'
- members:
- - host: ${_param:openstack_message_queue_node01_address}
- - host: ${_param:openstack_message_queue_node02_address}
- - host: ${_param:openstack_message_queue_node03_address}
- storage:
- engine: file
- images: []
\ No newline at end of file
+++ /dev/null
-parameters:
- glance:
- server:
- audit:
- enabled: true
+++ /dev/null
-parameters:
- glance:
- server:
- notification:
- driver: messagingv2
- topics: "${_param:openstack_notification_topics}"
+++ /dev/null
-classes:
-- service.glance.control.single
-parameters:
- glance:
- server:
- database:
- host: ${_param:single_address}
+++ /dev/null
-parameters:
- glance:
- server:
- storage:
- engine: rbd,http
- user: ${_param:glance_storage_user}
- pool: ${_param:glance_storage_pool}
- chunk_size: 8
\ No newline at end of file
+++ /dev/null
-classes:
-- service.glusterfs.client
\ No newline at end of file
+++ /dev/null
-parameters:
- _param:
- aptly_glusterfs_service_host: ${_param:glusterfs_service_host}
- glusterfs_node01_address: ${_param:cluster_node01_address}
- glusterfs_node02_address: ${_param:cluster_node02_address}
- glusterfs_node03_address: ${_param:cluster_node03_address}
- glusterfs:
- client:
- volumes:
- aptly:
- path: /srv/volumes/aptly
- server: ${_param:aptly_glusterfs_service_host}
- opts: "defaults,backup-volfile-servers=${_param:glusterfs_node01_address}:${_param:glusterfs_node02_address}:${_param:glusterfs_node03_address}"
+++ /dev/null
-parameters:
- _param:
- artifactory_glusterfs_service_host: ${_param:glusterfs_service_host}
- glusterfs_node01_address: ${_param:cluster_node01_address}
- glusterfs_node02_address: ${_param:cluster_node02_address}
- glusterfs_node03_address: ${_param:cluster_node03_address}
- glusterfs:
- client:
- volumes:
- artifactory:
- path: /srv/volumes/artifactory
- server: ${_param:artifactory_glusterfs_service_host}
- opts: "defaults,backup-volfile-servers=${_param:glusterfs_node01_address}:${_param:glusterfs_node02_address}:${_param:glusterfs_node03_address}"
+++ /dev/null
-parameters:
- _param:
- devops_portal_glusterfs_service_host: ${_param:glusterfs_service_host}
- glusterfs_node01_address: ${_param:cluster_node01_address}
- glusterfs_node02_address: ${_param:cluster_node02_address}
- glusterfs_node03_address: ${_param:cluster_node03_address}
- glusterfs:
- client:
- volumes:
- devops_portal:
- path: /srv/volumes/devops_portal
- server: ${_param:devops_portal_glusterfs_service_host}
- opts: "defaults,backup-volfile-servers=${_param:glusterfs_node01_address}:${_param:glusterfs_node02_address}:${_param:glusterfs_node03_address}"
+++ /dev/null
-parameters:
- _param:
- elasticsearch_glusterfs_service_host: ${_param:glusterfs_service_host}
- glusterfs_node01_address: ${_param:cluster_node01_address}
- glusterfs_node02_address: ${_param:cluster_node02_address}
- glusterfs_node03_address: ${_param:cluster_node03_address}
- glusterfs:
- client:
- volumes:
- elasticsearch:
- path: /srv/volumes/elasticsearch
- server: ${_param:elasticsearch_glusterfs_service_host}
- opts: "defaults,backup-volfile-servers=${_param:glusterfs_node01_address}:${_param:glusterfs_node02_address}:${_param:glusterfs_node03_address}"
+++ /dev/null
-parameters:
- _param:
- gerrit_glusterfs_service_host: ${_param:glusterfs_service_host}
- glusterfs_node01_address: ${_param:cluster_node01_address}
- glusterfs_node02_address: ${_param:cluster_node02_address}
- glusterfs_node03_address: ${_param:cluster_node03_address}
- glusterfs:
- client:
- volumes:
- gerrit:
- path: /srv/volumes/gerrit
- server: ${_param:gerrit_glusterfs_service_host}
- opts: "defaults,backup-volfile-servers=${_param:glusterfs_node01_address}:${_param:glusterfs_node02_address}:${_param:glusterfs_node03_address}"
- user: 1000
- group: 1000
+++ /dev/null
-parameters:
- _param:
- glance_glusterfs_service_host: ${_param:glusterfs_service_host}
- glusterfs_node01_address: ${_param:cluster_node01_address}
- glusterfs_node02_address: ${_param:cluster_node02_address}
- glusterfs_node03_address: ${_param:cluster_node03_address}
- glusterfs:
- client:
- volumes:
- glance:
- path: /var/lib/glance/images
- server: ${_param:glance_glusterfs_service_host}
- user: glance
- group: glance
- opts: "defaults,backup-volfile-servers=${_param:glusterfs_node01_address}:${_param:glusterfs_node02_address}:${_param:glusterfs_node03_address}"
+++ /dev/null
-parameters:
- _param:
- jenkins_glusterfs_service_host: ${_param:glusterfs_service_host}
- glusterfs_node01_address: ${_param:cluster_node01_address}
- glusterfs_node02_address: ${_param:cluster_node02_address}
- glusterfs_node03_address: ${_param:cluster_node03_address}
- glusterfs:
- client:
- volumes:
- jenkins:
- path: /srv/volumes/jenkins
- server: ${_param:jenkins_glusterfs_service_host}
- opts: "defaults,backup-volfile-servers=${_param:glusterfs_node01_address}:${_param:glusterfs_node02_address}:${_param:glusterfs_node03_address}"
- user: 1000
- group: 1000
+++ /dev/null
-parameters:
- _param:
- keystone_glusterfs_service_host: ${_param:glusterfs_service_host}
- glusterfs_node01_address: ${_param:cluster_node01_address}
- glusterfs_node02_address: ${_param:cluster_node02_address}
- glusterfs_node03_address: ${_param:cluster_node03_address}
- glusterfs:
- client:
- volumes:
- keystone-keys:
- path: /var/lib/keystone/fernet-keys
- server: ${_param:keystone_glusterfs_service_host}
- user: keystone
- group: keystone
- opts: "defaults,backup-volfile-servers=${_param:glusterfs_node01_address}:${_param:glusterfs_node02_address}:${_param:glusterfs_node03_address}"
+++ /dev/null
-parameters:
- _param:
- mysql_glusterfs_service_host: ${_param:glusterfs_service_host}
- glusterfs_node01_address: ${_param:cluster_node01_address}
- glusterfs_node02_address: ${_param:cluster_node02_address}
- glusterfs_node03_address: ${_param:cluster_node03_address}
- glusterfs:
- client:
- volumes:
- mysql:
- path: /srv/volumes/mysql
- server: ${_param:mysql_glusterfs_service_host}
- opts: "defaults,backup-volfile-servers=${_param:glusterfs_node01_address}:${_param:glusterfs_node02_address}:${_param:glusterfs_node03_address}"
+++ /dev/null
-parameters:
- _param:
- openldap_glusterfs_service_host: ${_param:glusterfs_service_host}
- glusterfs_node01_address: ${_param:cluster_node01_address}
- glusterfs_node02_address: ${_param:cluster_node02_address}
- glusterfs_node03_address: ${_param:cluster_node03_address}
- glusterfs:
- client:
- volumes:
- openldap:
- path: /srv/volumes/openldap
- server: ${_param:openldap_glusterfs_service_host}
- opts: "defaults,backup-volfile-servers=${_param:glusterfs_node01_address}:${_param:glusterfs_node02_address}:${_param:glusterfs_node03_address}"
+++ /dev/null
-parameters:
- _param:
- postgresql_glusterfs_service_host: ${_param:glusterfs_service_host}
- glusterfs_node01_address: ${_param:cluster_node01_address}
- glusterfs_node02_address: ${_param:cluster_node02_address}
- glusterfs_node03_address: ${_param:cluster_node03_address}
- glusterfs:
- client:
- volumes:
- postgresql:
- path: /srv/volumes/postgresql
- server: ${_param:postgresql_glusterfs_service_host}
- opts: "defaults,backup-volfile-servers=${_param:glusterfs_node01_address}:${_param:glusterfs_node02_address}:${_param:glusterfs_node03_address}"
+++ /dev/null
-parameters:
- _param:
- prometheus_glusterfs_service_host: ${_param:glusterfs_service_host}
- glusterfs_node01_address: ${_param:cluster_node01_address}
- glusterfs_node02_address: ${_param:cluster_node02_address}
- glusterfs_node03_address: ${_param:cluster_node03_address}
- glusterfs:
- client:
- volumes:
- prometheus:
- path: /srv/volumes/prometheus
- server: ${_param:prometheus_glusterfs_service_host}
- opts: "defaults,backup-volfile-servers=${_param:glusterfs_node01_address}:${_param:glusterfs_node02_address}:${_param:glusterfs_node03_address}"
+++ /dev/null
-parameters:
- _param:
- pushkin_glusterfs_service_host: ${_param:glusterfs_service_host}
- glusterfs_node01_address: ${_param:cluster_node01_address}
- glusterfs_node02_address: ${_param:cluster_node02_address}
- glusterfs_node03_address: ${_param:cluster_node03_address}
- glusterfs:
- client:
- volumes:
- pushkin:
- path: /srv/volumes/pushkin
- server: ${_param:pushkin_glusterfs_service_host}
- opts: "defaults,backup-volfile-servers=${_param:glusterfs_node01_address}:${_param:glusterfs_node02_address}:${_param:glusterfs_node03_address}"
+++ /dev/null
-parameters:
- _param:
- registry_glusterfs_service_host: ${_param:glusterfs_service_host}
- glusterfs_node01_address: ${_param:cluster_node01_address}
- glusterfs_node02_address: ${_param:cluster_node02_address}
- glusterfs_node03_address: ${_param:cluster_node03_address}
- glusterfs:
- client:
- volumes:
- registry:
- path: /srv/volumes/registry
- server: ${_param:registry_glusterfs_service_host}
- opts: "defaults,backup-volfile-servers=${_param:glusterfs_node01_address}:${_param:glusterfs_node02_address}:${_param:glusterfs_node03_address}"
+++ /dev/null
-parameters:
- _param:
- rundeck_glusterfs_service_host: ${_param:glusterfs_service_host}
- glusterfs_node01_address: ${_param:cluster_node01_address}
- glusterfs_node02_address: ${_param:cluster_node02_address}
- glusterfs_node03_address: ${_param:cluster_node03_address}
- glusterfs:
- client:
- volumes:
- rundeck:
- path: /srv/volumes/rundeck
- server: ${_param:rundeck_glusterfs_service_host}
- opts: "defaults,backup-volfile-servers=${_param:glusterfs_node01_address}:${_param:glusterfs_node02_address}:${_param:glusterfs_node03_address}"
+++ /dev/null
-parameters:
- _param:
- salt_glusterfs_service_host: ${_param:glusterfs_service_host}
- glusterfs_node01_address: ${_param:cluster_node01_address}
- glusterfs_node02_address: ${_param:cluster_node02_address}
- glusterfs_node03_address: ${_param:cluster_node03_address}
- glusterfs:
- client:
- volumes:
- saltmaster:
- path: /etc/salt/pki/master
- server: ${_param:salt_glusterfs_service_host}
- opts: "defaults,backup-volfile-servers=${_param:glusterfs_node01_address}:${_param:glusterfs_node02_address}:${_param:glusterfs_node03_address}"
+++ /dev/null
-parameters:
- _param:
- salt_glusterfs_service_host: ${_param:glusterfs_service_host}
- glusterfs_node01_address: ${_param:cluster_node01_address}
- glusterfs_node02_address: ${_param:cluster_node02_address}
- glusterfs_node03_address: ${_param:cluster_node03_address}
- glusterfs:
- client:
- volumes:
- salt_pki:
- path: /srv/salt/pki
- server: ${_param:salt_glusterfs_service_host}
- opts: "defaults,backup-volfile-servers=${_param:glusterfs_node01_address}:${_param:glusterfs_node02_address}:${_param:glusterfs_node03_address}"
+++ /dev/null
-parameters:
- _param:
- security_monkey_glusterfs_service_host: ${_param:glusterfs_service_host}
- glusterfs_node01_address: ${_param:cluster_node01_address}
- glusterfs_node02_address: ${_param:cluster_node02_address}
- glusterfs_node03_address: ${_param:cluster_node03_address}
- glusterfs:
- client:
- volumes:
- security_monkey:
- path: /srv/volumes/security_monkey
- server: ${_param:security_monkey_glusterfs_service_host}
- opts: "defaults,backup-volfile-servers=${_param:glusterfs_node01_address}:${_param:glusterfs_node02_address}:${_param:glusterfs_node03_address}"
+++ /dev/null
-classes:
-- service.glusterfs.server
+++ /dev/null
-classes:
-- service.glusterfs.server
-parameters:
- glusterfs:
- _support:
- iptables:
- enabled: false
- server:
- peers:
- - ${_param:cluster_node01_address}
- - ${_param:cluster_node02_address}
- - ${_param:cluster_node03_address}
+++ /dev/null
-parameters:
- glusterfs:
- server:
- volumes:
- aptly:
- storage: /srv/glusterfs/aptly
- replica: 3
- bricks:
- - ${_param:cluster_node01_address}:/srv/glusterfs/aptly
- - ${_param:cluster_node02_address}:/srv/glusterfs/aptly
- - ${_param:cluster_node03_address}:/srv/glusterfs/aptly
- options:
- cluster.readdir-optimize: On
- nfs.disable: On
- network.remote-dio: On
- diagnostics.client-log-level: WARNING
- diagnostics.brick-log-level: WARNING
+++ /dev/null
-parameters:
- glusterfs:
- server:
- volumes:
- artifactory:
- storage: /srv/glusterfs/artifactory
- replica: 3
- bricks:
- - ${_param:cluster_node01_address}:/srv/glusterfs/artifactory
- - ${_param:cluster_node02_address}:/srv/glusterfs/artifactory
- - ${_param:cluster_node03_address}:/srv/glusterfs/artifactory
- options:
- cluster.readdir-optimize: On
- nfs.disable: On
- network.remote-dio: On
- diagnostics.client-log-level: WARNING
- diagnostics.brick-log-level: WARNING
+++ /dev/null
-parameters:
- glusterfs:
- server:
- volumes:
- devops_portal:
- storage: /srv/glusterfs/devops_portal
- replica: 3
- bricks:
- - ${_param:cluster_node01_address}:/srv/glusterfs/devops_portal
- - ${_param:cluster_node02_address}:/srv/glusterfs/devops_portal
- - ${_param:cluster_node03_address}:/srv/glusterfs/devops_portal
- options:
- cluster.readdir-optimize: On
- nfs.disable: On
- network.remote-dio: On
- diagnostics.client-log-level: WARNING
- diagnostics.brick-log-level: WARNING
+++ /dev/null
-parameters:
- glusterfs:
- server:
- volumes:
- elasticsearch:
- storage: /srv/glusterfs/elasticsearch
- replica: 3
- bricks:
- - ${_param:cluster_node01_address}:/srv/glusterfs/elasticsearch
- - ${_param:cluster_node02_address}:/srv/glusterfs/elasticsearch
- - ${_param:cluster_node03_address}:/srv/glusterfs/elasticsearch
- options:
- cluster.readdir-optimize: On
- nfs.disable: On
- network.remote-dio: On
- diagnostics.client-log-level: WARNING
- diagnostics.brick-log-level: WARNING
+++ /dev/null
-parameters:
- glusterfs:
- server:
- volumes:
- gerrit:
- storage: /srv/glusterfs/gerrit
- replica: 3
- bricks:
- - ${_param:cluster_node01_address}:/srv/glusterfs/gerrit
- - ${_param:cluster_node02_address}:/srv/glusterfs/gerrit
- - ${_param:cluster_node03_address}:/srv/glusterfs/gerrit
- options:
- cluster.readdir-optimize: On
- nfs.disable: On
- network.remote-dio: On
- diagnostics.client-log-level: WARNING
- diagnostics.brick-log-level: WARNING
+++ /dev/null
-parameters:
- glusterfs:
- server:
- volumes:
- glance:
- storage: /srv/glusterfs/glance
- replica: 3
- bricks:
- - ${_param:cluster_node01_address}:/srv/glusterfs/glance
- - ${_param:cluster_node02_address}:/srv/glusterfs/glance
- - ${_param:cluster_node03_address}:/srv/glusterfs/glance
- options:
- cluster.readdir-optimize: On
- nfs.disable: On
- network.remote-dio: On
- diagnostics.client-log-level: WARNING
- diagnostics.brick-log-level: WARNING
\ No newline at end of file
+++ /dev/null
-parameters:
- glusterfs:
- server:
- volumes:
- jenkins:
- storage: /srv/glusterfs/jenkins
- replica: 3
- bricks:
- - ${_param:cluster_node01_address}:/srv/glusterfs/jenkins
- - ${_param:cluster_node02_address}:/srv/glusterfs/jenkins
- - ${_param:cluster_node03_address}:/srv/glusterfs/jenkins
- options:
- cluster.readdir-optimize: On
- nfs.disable: On
- network.remote-dio: On
- diagnostics.client-log-level: WARNING
- diagnostics.brick-log-level: WARNING
+++ /dev/null
-parameters:
- glusterfs:
- server:
- volumes:
- keystone-keys:
- storage: /srv/glusterfs/keystone-keys
- replica: 3
- bricks:
- - ${_param:cluster_node01_address}:/srv/glusterfs/keystone-keys
- - ${_param:cluster_node02_address}:/srv/glusterfs/keystone-keys
- - ${_param:cluster_node03_address}:/srv/glusterfs/keystone-keys
- options:
- cluster.readdir-optimize: On
- nfs.disable: On
- network.remote-dio: On
- diagnostics.client-log-level: WARNING
- diagnostics.brick-log-level: WARNING
\ No newline at end of file
+++ /dev/null
-parameters:
- glusterfs:
- server:
- volumes:
- mysql:
- storage: /srv/glusterfs/mysql
- replica: 3
- bricks:
- - ${_param:cluster_node01_address}:/srv/glusterfs/mysql
- - ${_param:cluster_node02_address}:/srv/glusterfs/mysql
- - ${_param:cluster_node03_address}:/srv/glusterfs/mysql
- options:
- cluster.readdir-optimize: On
- nfs.disable: On
- network.remote-dio: On
- diagnostics.client-log-level: WARNING
- diagnostics.brick-log-level: WARNING
\ No newline at end of file
+++ /dev/null
-parameters:
- glusterfs:
- server:
- volumes:
- openldap:
- storage: /srv/glusterfs/openldap
- replica: 3
- bricks:
- - ${_param:cluster_node01_address}:/srv/glusterfs/openldap
- - ${_param:cluster_node02_address}:/srv/glusterfs/openldap
- - ${_param:cluster_node03_address}:/srv/glusterfs/openldap
- options:
- cluster.readdir-optimize: On
- nfs.disable: On
- network.remote-dio: On
- diagnostics.client-log-level: WARNING
- diagnostics.brick-log-level: WARNING
+++ /dev/null
-parameters:
- glusterfs:
- server:
- volumes:
- postgresql:
- storage: /srv/glusterfs/postgresql
- replica: 3
- bricks:
- - ${_param:cluster_node01_address}:/srv/glusterfs/postgresql
- - ${_param:cluster_node02_address}:/srv/glusterfs/postgresql
- - ${_param:cluster_node03_address}:/srv/glusterfs/postgresql
- options:
- cluster.readdir-optimize: On
- nfs.disable: On
- network.remote-dio: On
- diagnostics.client-log-level: WARNING
- diagnostics.brick-log-level: WARNING
+++ /dev/null
-parameters:
- glusterfs:
- server:
- volumes:
- prometheus:
- storage: /srv/glusterfs/prometheus
- replica: 3
- bricks:
- - ${_param:cluster_node01_address}:/srv/glusterfs/prometheus
- - ${_param:cluster_node02_address}:/srv/glusterfs/prometheus
- - ${_param:cluster_node03_address}:/srv/glusterfs/prometheus
- options:
- cluster.readdir-optimize: On
- nfs.disable: On
- network.remote-dio: On
- diagnostics.client-log-level: WARNING
- diagnostics.brick-log-level: WARNING
+++ /dev/null
-parameters:
- glusterfs:
- server:
- volumes:
- pushkin:
- storage: /srv/glusterfs/pushkin
- replica: 3
- bricks:
- - ${_param:cluster_node01_address}:/srv/glusterfs/pushkin
- - ${_param:cluster_node02_address}:/srv/glusterfs/pushkin
- - ${_param:cluster_node03_address}:/srv/glusterfs/pushkin
- options:
- cluster.readdir-optimize: On
- nfs.disable: On
- network.remote-dio: On
- diagnostics.client-log-level: WARNING
- diagnostics.brick-log-level: WARNING
+++ /dev/null
-parameters:
- glusterfs:
- server:
- volumes:
- registry:
- storage: /srv/glusterfs/registry
- replica: 3
- bricks:
- - ${_param:cluster_node01_address}:/srv/glusterfs/registry
- - ${_param:cluster_node02_address}:/srv/glusterfs/registry
- - ${_param:cluster_node03_address}:/srv/glusterfs/registry
- options:
- cluster.readdir-optimize: On
- nfs.disable: On
- network.remote-dio: On
- diagnostics.client-log-level: WARNING
- diagnostics.brick-log-level: WARNING
+++ /dev/null
-parameters:
- glusterfs:
- server:
- volumes:
- rundeck:
- storage: /srv/glusterfs/rundeck
- replica: 3
- bricks:
- - ${_param:cluster_node01_address}:/srv/glusterfs/rundeck
- - ${_param:cluster_node02_address}:/srv/glusterfs/rundeck
- - ${_param:cluster_node03_address}:/srv/glusterfs/rundeck
- options:
- cluster.readdir-optimize: On
- nfs.disable: On
- network.remote-dio: On
- diagnostics.client-log-level: WARNING
- diagnostics.brick-log-level: WARNING
+++ /dev/null
-parameters:
- glusterfs:
- server:
- volumes:
- saltmaster:
- storage: /srv/glusterfs/saltmaster
- replica: 3
- bricks:
- - ${_param:cluster_node01_address}:/srv/glusterfs/saltmaster
- - ${_param:cluster_node02_address}:/srv/glusterfs/saltmaster
- - ${_param:cluster_node03_address}:/srv/glusterfs/saltmaster
- options:
- cluster.readdir-optimize: On
- nfs.disable: On
- network.remote-dio: On
- diagnostics.client-log-level: WARNING
- diagnostics.brick-log-level: WARNING
+++ /dev/null
-parameters:
- glusterfs:
- server:
- volumes:
- salt_pki:
- storage: /srv/glusterfs/salt_pki
- replica: 3
- bricks:
- - ${_param:cluster_node01_address}:/srv/glusterfs/salt_pki
- - ${_param:cluster_node02_address}:/srv/glusterfs/salt_pki
- - ${_param:cluster_node03_address}:/srv/glusterfs/salt_pki
- options:
- cluster.readdir-optimize: On
- nfs.disable: On
- network.remote-dio: On
- diagnostics.client-log-level: WARNING
- diagnostics.brick-log-level: WARNING
+++ /dev/null
-parameters:
- glusterfs:
- server:
- volumes:
- security_monkey:
- storage: /srv/glusterfs/security_monkey
- replica: 3
- bricks:
- - ${_param:cluster_node01_address}:/srv/glusterfs/security_monkey
- - ${_param:cluster_node02_address}:/srv/glusterfs/security_monkey
- - ${_param:cluster_node03_address}:/srv/glusterfs/security_monkey
- options:
- cluster.readdir-optimize: On
- nfs.disable: On
- network.remote-dio: On
- diagnostics.client-log-level: WARNING
- diagnostics.brick-log-level: WARNING
+++ /dev/null
-parameters:
- _param:
- haproxy_aptly_api_bind_host: ${_param:haproxy_bind_address}
- haproxy_aptly_api_bind_port: 8084
- haproxy_aptly_public_bind_host: ${_param:haproxy_bind_address}
- haproxy_aptly_public_bind_port: 8085
- haproxy:
- proxy:
- listen:
- aptly-api:
- mode: http
- options:
- - forwardfor
- - httpclose
- - httplog
- balance: source
- binds:
- - address: ${_param:haproxy_aptly_api_bind_host}
- port: ${_param:haproxy_aptly_api_bind_port}
- servers:
- - name: ${_param:cluster_node01_name}
- host: ${_param:cluster_node01_address}
- port: 18084
- params: check
- - name: ${_param:cluster_node02_name}
- host: ${_param:cluster_node02_address}
- port: 18084
- params: backup check
- - name: ${_param:cluster_node03_name}
- host: ${_param:cluster_node03_address}
- port: 18084
- params: backup check
- aptly-public:
- mode: http
- options:
- - forwardfor
- - httpclose
- - httplog
- balance: source
- binds:
- - address: ${_param:haproxy_aptly_public_bind_host}
- port: ${_param:haproxy_aptly_public_bind_port}
- servers:
- - name: ${_param:cluster_node01_name}
- host: ${_param:cluster_node01_address}
- port: 18085
- params: check
- - name: ${_param:cluster_node02_name}
- host: ${_param:cluster_node02_address}
- port: 18085
- params: check
- - name: ${_param:cluster_node03_name}
- host: ${_param:cluster_node03_address}
- port: 18085
- params: check
+++ /dev/null
-parameters:
- _param:
- haproxy_gerrit_bind_host: ${_param:haproxy_bind_address}
- haproxy_gerrit_bind_port: 8080
- haproxy_gerrit_ssh_bind_host: ${_param:haproxy_gerrit_bind_host}
- haproxy_gerrit_ssh_bind_port: 29418
- haproxy_gerrit_ssl:
- enabled: false
- haproxy:
- proxy:
- listen:
- gerrit:
- mode: http
- options:
- - forwardfor
- - httpchk
- - httpclose
- - httplog
- balance: source
- http_request:
- - action: "add-header X-Forwarded-Proto https"
- condition: "if { ssl_fc }"
- binds:
- - address: ${_param:haproxy_gerrit_bind_host}
- port: ${_param:haproxy_gerrit_bind_port}
- ssl: ${_param:haproxy_gerrit_ssl}
- servers:
- - name: ${_param:cluster_node01_name}
- host: ${_param:cluster_node01_address}
- port: 18083
- params: check
- - name: ${_param:cluster_node02_name}
- host: ${_param:cluster_node02_address}
- port: 18083
- params: backup check
- - name: ${_param:cluster_node03_name}
- host: ${_param:cluster_node03_address}
- port: 18083
- params: backup check
- gerrit_ssh:
- mode: tcp
- balance: source
- binds:
- - address: ${_param:haproxy_gerrit_ssh_bind_host}
- port: ${_param:haproxy_gerrit_ssh_bind_port}
- servers:
- - name: ${_param:cluster_node01_name}
- host: ${_param:cluster_node01_address}
- port: 29417
- params: check
- - name: ${_param:cluster_node02_name}
- host: ${_param:cluster_node02_address}
- port: 29417
- params: backup check
- - name: ${_param:cluster_node03_name}
- host: ${_param:cluster_node03_address}
- port: 29417
- params: backup check
+++ /dev/null
-parameters:
- _param:
- haproxy_jenkins_bind_host: ${_param:haproxy_bind_address}
- haproxy_jenkins_bind_port: 8081
- haproxy_jenkins_jnlp_bind_host: ${_param:haproxy_jenkins_bind_host}
- haproxy_jenkins_jnlp_bind_port: 50000
- haproxy_jenkins_ssl:
- enabled: false
- haproxy:
- proxy:
- listen:
- jenkins:
- mode: http
- options:
- - forwardfor
-# - httpchk
- - httpclose
- - httplog
- balance: source
- http_request:
- - action: "add-header X-Forwarded-Proto https"
- condition: "if { ssl_fc }"
- http_response:
- - action: "del-header X-Frame-Options"
- binds:
- - address: ${_param:haproxy_jenkins_bind_host}
- port: ${_param:haproxy_jenkins_bind_port}
- ssl: ${_param:haproxy_jenkins_ssl}
- servers:
- - name: ${_param:cluster_node01_name}
- host: ${_param:cluster_node01_address}
- port: 18081
- params: check
- - name: ${_param:cluster_node02_name}
- host: ${_param:cluster_node02_address}
- port: 18081
- params: backup check
- - name: ${_param:cluster_node03_name}
- host: ${_param:cluster_node03_address}
- port: 18081
- params: backup check
- jenkins_jnlp:
- mode: tcp
- balance: source
- binds:
- - address: ${_param:haproxy_jenkins_jnlp_bind_host}
- port: ${_param:haproxy_jenkins_jnlp_bind_port}
- servers:
- - name: ${_param:cluster_node01_name}
- host: ${_param:cluster_node01_address}
- port: 50001
- params: check
- - name: ${_param:cluster_node02_name}
- host: ${_param:cluster_node02_address}
- port: 50001
- params: backup check
- - name: ${_param:cluster_node03_name}
- host: ${_param:cluster_node03_address}
- port: 50001
- params: backup check
+++ /dev/null
-parameters:
- _param:
- haproxy_docker_registry_listen_host: ${_param:haproxy_bind_address}
- haproxy_docker_registry_listen_port: 5000
- haproxy_docker_registry_ssl:
- enabled: false
- haproxy:
- proxy:
- listen:
- registry:
- mode: http
- options:
- - forwardfor
- - httpclose
- - httplog
- balance: source
- http_request:
- - action: "add-header X-Forwarded-Proto https"
- condition: "if { ssl_fc }"
- binds:
- - address: ${_param:haproxy_docker_registry_listen_host}
- port: ${_param:haproxy_docker_registry_listen_port}
- ssl: ${_param:haproxy_docker_registry_ssl}
- servers:
- - name: ${_param:cluster_node01_name}
- host: ${_param:cluster_node01_address}
- port: 15000
- params: check
- - name: ${_param:cluster_node02_name}
- host: ${_param:cluster_node02_address}
- port: 15000
- params: check
- - name: ${_param:cluster_node03_name}
- host: ${_param:cluster_node03_address}
- port: 15000
- params: check
+++ /dev/null
-parameters:
- _param:
- haproxy_docker_visualizer_listen_port: 8091
- haproxy:
- proxy:
- listen:
- visualizer:
- mode: http
- options:
- - forwardfor
- - httpchk
- - httpclose
- - httplog
- balance: source
- binds:
- - address: ${_param:haproxy_bind_address}
- port: ${_param:haproxy_docker_visualizer_listen_port}
- servers:
- - name: ${_param:cluster_node01_name}
- host: ${_param:cluster_node01_address}
- port: 18090
- params: check
- - name: ${_param:cluster_node02_name}
- host: ${_param:cluster_node02_address}
- port: 18090
- params: check
- - name: ${_param:cluster_node03_name}
- host: ${_param:cluster_node03_address}
- port: 18090
- params: check
+++ /dev/null
-parameters:
- haproxy:
- proxy:
- listen:
- elasticsearch:
- mode: http
- service_name: elasticsearch
- options:
- - forwardfor
- - httpchk
- - httpclose
- - httplog
- balance: roundrobin
- binds:
- - address: ${_param:cluster_vip_address}
- port: 9200
- servers:
- - name: ${_param:cluster_node01_hostname}
- host: ${_param:cluster_node01_address}
- port: 9200
- params: check
- - name: ${_param:cluster_node02_hostname}
- host: ${_param:cluster_node02_address}
- port: 9200
- params: check
- - name: ${_param:cluster_node03_hostname}
- host: ${_param:cluster_node03_address}
- port: 9200
- params: check
+++ /dev/null
-parameters:
- haproxy:
- proxy:
- listen:
- kibana:
- mode: http
- service_name: kibana
- options:
- - forwardfor
-# - httpchk
- - httpclose
- - httplog
- balance: source
- binds:
- - address: ${_param:cluster_vip_address}
- port: 80
- servers:
- - name: ${_param:cluster_node01_hostname}
- host: ${_param:cluster_node01_address}
- port: 5601
- params: check
- - name: ${_param:cluster_node02_hostname}
- host: ${_param:cluster_node02_address}
- port: 5601
- params: check
- - name: ${_param:cluster_node03_hostname}
- host: ${_param:cluster_node03_address}
- port: 5601
- params: check
+++ /dev/null
-parameters:
- haproxy:
- proxy:
- listen:
- k8s_secure:
- type: kubernetes
- options:
- - ssl-hello-chk
- binds:
- - address: ${_param:cluster_vip_address}
- port: 443
- servers:
- - name: ${_param:cluster_node01_hostname}
- host: ${_param:cluster_node01_address}
- port: 443
- params: check
- - name: ${_param:cluster_node02_hostname}
- host: ${_param:cluster_node02_address}
- port: 443
- params: check
- - name: ${_param:cluster_node03_hostname}
- host: ${_param:cluster_node03_address}
- port: 443
- params: check
\ No newline at end of file
+++ /dev/null
-parameters:
- haproxy:
- proxy:
- listen:
- k8s_cluster:
- type: kubernetes
- binds:
- - address: ${_param:cluster_vip_address}
- port: 8080
- servers:
- - name: ${_param:cluster_node01_hostname}
- host: ${_param:cluster_node01_address}
- port: 8080
- params: check
- - name: ${_param:cluster_node02_hostname}
- host: ${_param:cluster_node02_address}
- port: 8080
- params: check
- - name: ${_param:cluster_node03_hostname}
- host: ${_param:cluster_node03_address}
- port: 8080
- params: check
- k8s_cluster_localhost:
- type: kubernetes
- binds:
- - address: localhost
- port: 8080
- servers:
- - name: ${_param:cluster_node01_hostname}
- host: ${_param:cluster_node01_address}
- port: 8080
- params: check
- - name: ${_param:cluster_node02_hostname}
- host: ${_param:cluster_node02_address}
- port: 8080
- params: check
- - name: ${_param:cluster_node03_hostname}
- host: ${_param:cluster_node03_address}
- port: 8080
- params: check
\ No newline at end of file
+++ /dev/null
-parameters:
- haproxy:
- proxy:
- listen:
- etcd_cluster:
- type: etcd
- binds:
- - address: ${_param:cluster_vip_address}
- port: 4001
- servers:
- - name: etc01
- host: ${_param:cluster_node01_address}
- port: 4001
- params: check
- - name: etc02
- host: ${_param:cluster_node02_address}
- port: 4001
- params: backup check
- - name: etc03
- host: ${_param:cluster_node03_address}
- port: 4001
- params: backup check
\ No newline at end of file
+++ /dev/null
-classes:
- - service.haproxy.proxy.single
- - service.haproxy.proxy.stats
- # Services
- - system.haproxy.proxy._kibana
- - system.haproxy.proxy._elasticsearch
+++ /dev/null
-classes:
- - service.haproxy.proxy.single
- - service.haproxy.proxy.stats
- # Services
- - system.haproxy.proxy._rabbitmq
- - system.haproxy.proxy._uchiwa
- - system.haproxy.proxy._sensu
- - system.haproxy.proxy._redis
+++ /dev/null
-parameters:
- _param:
- haproxy_mysql_bind_port: 3306
- haproxy_mysql_source_port: 3306
- haproxy:
- proxy:
- listen:
- mysql:
- mode: tcp
- balance: source
- binds:
- - address: ${_param:haproxy_bind_address}
- port: ${_param:haproxy_mysql_bind_port}
- servers:
- - name: ${_param:cluster_node01_name}
- host: ${_param:cluster_node01_address}
- port: ${_param:haproxy_mysql_source_port}
- params: check
- - name: ${_param:cluster_node02_name}
- host: ${_param:cluster_node02_address}
- port: ${_param:haproxy_mysql_source_port}
- params: backup check
- - name: ${_param:cluster_node03_name}
- host: ${_param:cluster_node03_address}
- port: ${_param:haproxy_mysql_source_port}
- params: backup check
+++ /dev/null
-parameters:
- haproxy:
- proxy:
- listen:
- contrail_analytics:
- type: contrail-analytics
- service_name: contrail
- binds:
- - address: ${_param:cluster_vip_address}
- port: 8081
- servers:
- - name: nal01
- host: ${_param:cluster_node01_address}
- port: 9081
- params: check inter 2000 rise 2 fall 3
- - name: nal02
- host: ${_param:cluster_node02_address}
- port: 9081
- params: check inter 2000 rise 2 fall 3
- - name: nal03
- host: ${_param:cluster_node03_address}
- port: 9081
- params: check inter 2000 rise 2 fall 3
- contrail_config_stats:
- type: contrail-config
- service_name: contrail
- check: false
- format: listen
- binds:
- - address: '*'
- port: 5937
- user: haproxy
- password: ${_param:opencontrail_stats_password}
- contrail_openstack_stats:
- type: contrail-config
- service_name: contrail
- check: false
- format: listen
- binds:
- - address: '*'
- port: 5936
- user: haproxy
- password: ${_param:opencontrail_stats_password}
- contrail_collector_stats:
- type: contrail-config
- service_name: contrail
- check: false
- format: listen
- binds:
- - address: '*'
- port: 5938
- user: haproxy
- password: ${_param:opencontrail_stats_password}
+++ /dev/null
-parameters:
- haproxy:
- proxy:
- listen:
- contrail_api:
- type: contrail-api
- service_name: contrail
- binds:
- - address: ${_param:cluster_vip_address}
- port: 8082
- servers:
- - name: ntw01
- host: ${_param:cluster_node01_address}
- port: 9100
- params: check inter 2000 rise 2 fall 3
- - name: ntw02
- host: ${_param:cluster_node02_address}
- port: 9100
- params: check inter 2000 rise 2 fall 3
- - name: ntw03
- host: ${_param:cluster_node03_address}
- port: 9100
- params: check inter 2000 rise 2 fall 3
- contrail_discovery:
- type: contrail-api
- service_name: contrail
- binds:
- - address: ${_param:cluster_vip_address}
- port: 5998
- servers:
- - name: ntw01
- host: ${_param:cluster_node01_address}
- port: 9110
- params: check inter 2000 rise 2 fall 3
- - name: ntw02
- host: ${_param:cluster_node02_address}
- port: 9110
- params: check inter 2000 rise 2 fall 3
- - name: ntw03
- host: ${_param:cluster_node03_address}
- port: 9110
- params: check inter 2000 rise 2 fall 3
- contrail_config_stats:
- type: contrail-config
- check: false
- format: listen
- binds:
- - address: '*'
- port: 5937
- user: haproxy
- password: ${_param:opencontrail_stats_password}
- contrail_openstack_stats:
- type: contrail-config
- check: false
- format: listen
- binds:
- - address: '*'
- port: 5936
- user: haproxy
- password: ${_param:opencontrail_stats_password}
- contrail_collector_stats:
- type: contrail-config
- check: false
- format: listen
- binds:
- - address: '*'
- port: 5938
- user: haproxy
- password: ${_param:opencontrail_stats_password}
+++ /dev/null
-parameters:
- _param:
- haproxy_openldap_bind_host: ${_param:haproxy_bind_address}
- haproxy_openldap_bind_port: 389
- haproxy_openldap_ssl_bind_port: 636
- haproxy:
- proxy:
- listen:
- openldap:
- mode: tcp
- balance: source
- binds:
- - address: ${_param:haproxy_openldap_bind_host}
- port: ${_param:haproxy_openldap_bind_port}
- servers:
- - name: ${_param:cluster_node01_name}
- host: ${_param:cluster_node01_address}
- port: 1389
- params: check
- - name: ${_param:cluster_node02_name}
- host: ${_param:cluster_node02_address}
- port: 1389
- params: backup check
- - name: ${_param:cluster_node03_name}
- host: ${_param:cluster_node03_address}
- port: 1389
- params: backup check
- openldap_ssl:
- mode: tcp
- balance: source
- binds:
- - address: ${_param:haproxy_openldap_bind_host}
- port: ${_param:haproxy_openldap_ssl_bind_port}
- servers:
- - name: ${_param:cluster_node01_name}
- host: ${_param:cluster_node01_address}
- port: 1636
- params: check
- - name: ${_param:cluster_node02_name}
- host: ${_param:cluster_node02_address}
- port: 1636
- params: backup check
- - name: ${_param:cluster_node03_name}
- host: ${_param:cluster_node03_address}
- port: 1636
- params: backup check
+++ /dev/null
-parameters:
- haproxy:
- proxy:
- listen:
- aodh-api:
- type: openstack-service
- service_name: aodh
- binds:
- - address: ${_param:cluster_vip_address}
- port: 8042
- servers:
- - name: ctl01
- host: ${_param:cluster_node01_address}
- port: 8042
- params: check inter 10s fastinter 2s downinter 3s rise 3 fall 3
- - name: ctl02
- host: ${_param:cluster_node02_address}
- port: 8042
- params: check inter 10s fastinter 2s downinter 3s rise 3 fall 3
- - name: ctl03
- host: ${_param:cluster_node03_address}
- port: 8042
- params: check inter 10s fastinter 2s downinter 3s rise 3 fall 3
\ No newline at end of file
+++ /dev/null
-parameters:
- haproxy:
- proxy:
- listen:
- ceilometer_api:
- type: general-service
- check: false
- binds:
- - address: ${_param:cluster_vip_address}
- port: 8777
- servers:
- - name: ${_param:cluster_node01_hostname}
- host: ${_param:cluster_node01_address}
- port: 8777
- params: check
- - name: ${_param:cluster_node02_hostname}
- host: ${_param:cluster_node02_address}
- port: 8777
- params: check
- - name: ${_param:cluster_node03_hostname}
- host: ${_param:cluster_node03_address}
- port: 8777
- params: check
\ No newline at end of file
+++ /dev/null
-parameters:
- haproxy:
- proxy:
- listen:
- cinder_api:
- type: openstack-service
- service_name: cinder
- binds:
- - address: ${_param:cluster_vip_address}
- port: 8776
- servers:
- - name: ctl01
- host: ${_param:cluster_node01_address}
- port: 8776
- params: check inter 10s fastinter 2s downinter 3s rise 3 fall 3
- - name: ctl02
- host: ${_param:cluster_node02_address}
- port: 8776
- params: check inter 10s fastinter 2s downinter 3s rise 3 fall 3
- - name: ctl03
- host: ${_param:cluster_node03_address}
- port: 8776
- params: check inter 10s fastinter 2s downinter 3s rise 3 fall 3
\ No newline at end of file
+++ /dev/null
-parameters:
- haproxy:
- proxy:
- listen:
- designate_api:
- type: openstack-service
- service_name: designate
- binds:
- - address: ${_param:cluster_vip_address}
- port: 9001
- servers:
- - name: ${_param:cluster_node01_hostname}
- host: ${_param:cluster_node01_address}
- port: 9001
- params: check inter 10s fastinter 2s downinter 3s rise 3 fall 3
- - name: ${_param:cluster_node02_hostname}
- host: ${_param:cluster_node02_address}
- port: 9001
- params: check inter 10s fastinter 2s downinter 3s rise 3 fall 3
+++ /dev/null
-parameters:
- haproxy:
- proxy:
- timeout:
- queue: '10s'
- connect: '10s'
- client: '10s'
- server: '10s'
- check: '10s'
- listen:
- mysql_cluster:
- type: mysql
- service_name: mysql
- binds:
- - address: ${_param:cluster_vip_address}
- port: 3306
- servers:
- - name: ${_param:cluster_node01_hostname}
- host: ${_param:cluster_node01_address}
- port: 3306
- params: check inter 20s fastinter 2s downinter 2s rise 3 fall 3
- - name: ${_param:cluster_node02_hostname}
- host: ${_param:cluster_node02_address}
- port: 3306
- params: backup check inter 20s fastinter 2s downinter 2s rise 3 fall 3
- - name: ${_param:cluster_node03_hostname}
- host: ${_param:cluster_node03_address}
- port: 3306
- params: backup check inter 20s fastinter 2s downinter 2s rise 3 fall 3
+++ /dev/null
-parameters:
- haproxy:
- proxy:
- listen:
- glance_api:
- type: openstack-service
- service_name: glance
- binds:
- - address: ${_param:cluster_vip_address}
- port: 9292
- servers:
- - name: ctl01
- host: ${_param:cluster_node01_address}
- port: 9292
- params: check inter 10s fastinter 2s downinter 3s rise 3 fall 3
- - name: ctl02
- host: ${_param:cluster_node02_address}
- port: 9292
- params: check inter 10s fastinter 2s downinter 3s rise 3 fall 3
- - name: ctl03
- host: ${_param:cluster_node03_address}
- port: 9292
- params: check inter 10s fastinter 2s downinter 3s rise 3 fall 3
- glance_registry_api:
- type: general-service
- service_name: glance
- binds:
- - address: ${_param:cluster_vip_address}
- port: 9191
- servers:
- - name: ctl01
- host: ${_param:cluster_node01_address}
- port: 9191
- params: check
- - name: ctl02
- host: ${_param:cluster_node02_address}
- port: 9191
- params: check
- - name: ctl03
- host: ${_param:cluster_node03_address}
- port: 9191
- params: check
+++ /dev/null
-parameters:
- haproxy:
- proxy:
- listen:
- glare:
- type: general-service
- service_name: glare
- binds:
- - address: ${_param:cluster_vip_address}
- port: 9494
- servers:
- - name: ctl01
- host: ${_param:cluster_node01_address}
- port: 9494
- params: check inter 10s fastinter 2s downinter 3s rise 3 fall 3
- - name: ctl02
- host: ${_param:cluster_node02_address}
- port: 9494
- params: check inter 10s fastinter 2s downinter 3s rise 3 fall 3
- - name: ctl03
- host: ${_param:cluster_node03_address}
- port: 9494
- params: check inter 10s fastinter 2s downinter 3s rise 3 fall 3
+++ /dev/null
-parameters:
- haproxy:
- proxy:
- listen:
- heat_cloudwatch_api:
- type: openstack-service
- service_name: heat
- binds:
- - address: ${_param:cluster_vip_address}
- port: 8000
- servers:
- - name: ctl01
- host: ${_param:cluster_node01_address}
- port: 8000
- params: check inter 10s fastinter 2s downinter 3s rise 3 fall 3
- - name: ctl02
- host: ${_param:cluster_node02_address}
- port: 8000
- params: check inter 10s fastinter 2s downinter 3s rise 3 fall 3
- - name: ctl03
- host: ${_param:cluster_node03_address}
- port: 8000
- params: check inter 10s fastinter 2s downinter 3s rise 3 fall 3
- heat_api:
- type: openstack-service
- service_name: heat
- binds:
- - address: ${_param:cluster_vip_address}
- port: 8004
- servers:
- - name: ctl01
- host: ${_param:cluster_node01_address}
- port: 8004
- params: check inter 10s fastinter 2s downinter 3s rise 3 fall 3
- - name: ctl02
- host: ${_param:cluster_node02_address}
- port: 8004
- params: check inter 10s fastinter 2s downinter 3s rise 3 fall 3
- - name: ctl03
- host: ${_param:cluster_node03_address}
- port: 8004
- params: check inter 10s fastinter 2s downinter 3s rise 3 fall 3
- heat_cfn_api:
- type: openstack-service
- service_name: heat
- binds:
- - address: ${_param:cluster_vip_address}
- port: 8003
- servers:
- - name: ctl01
- host: ${_param:cluster_node01_address}
- port: 8003
- params: check inter 10s fastinter 2s downinter 3s rise 3 fall 3
- - name: ctl02
- host: ${_param:cluster_node02_address}
- port: 8003
- params: check inter 10s fastinter 2s downinter 3s rise 3 fall 3
- - name: ctl03
- host: ${_param:cluster_node03_address}
- port: 8003
- params: check inter 10s fastinter 2s downinter 3s rise 3 fall 3
\ No newline at end of file
+++ /dev/null
-parameters:
- haproxy:
- proxy:
- listen:
- horizon_web:
- type: general-service
- check: false
- binds:
- - address: ${_param:cluster_vip_address}
- port: 8078
- servers:
- - name: ctl01
- host: ${_param:cluster_node01_address}
- port: 8078
- params: check
- - name: ctl02
- host: ${_param:cluster_node02_address}
- port: 8078
- params: check
- - name: ctl03
- host: ${_param:cluster_node03_address}
- port: 8078
- params: check
\ No newline at end of file
+++ /dev/null
-parameters:
- haproxy:
- proxy:
- listen:
- keystone_public_api:
- type: openstack-service
- service_name: keystone
- binds:
- - address: ${_param:cluster_vip_address}
- port: 5000
- servers:
- - name: ctl01
- host: ${_param:cluster_node01_address}
- port: 5000
- params: check inter 10s fastinter 2s downinter 3s rise 3 fall 3
- - name: ctl02
- host: ${_param:cluster_node02_address}
- port: 5000
- params: check inter 10s fastinter 2s downinter 3s rise 3 fall 3
- - name: ctl03
- host: ${_param:cluster_node03_address}
- port: 5000
- params: check inter 10s fastinter 2s downinter 3s rise 3 fall 3
- keystone_admin_api:
- type: openstack-service
- service_name: keystone
- binds:
- - address: ${_param:cluster_vip_address}
- port: 35357
- servers:
- - name: ctl01
- host: ${_param:cluster_node01_address}
- port: 35357
- params: check inter 10s fastinter 2s downinter 3s rise 3 fall 3
- - name: ctl02
- host: ${_param:cluster_node02_address}
- port: 35357
- params: check inter 10s fastinter 2s downinter 3s rise 3 fall 3
- - name: ctl03
- host: ${_param:cluster_node03_address}
- port: 35357
- params: check inter 10s fastinter 2s downinter 3s rise 3 fall 3
\ No newline at end of file
+++ /dev/null
-parameters:
- haproxy:
- proxy:
- listen:
- murano_api:
- type: openstack-service
- check: false
- binds:
- - address: ${_param:cluster_vip_address}
- port: 8082
- servers:
- - name: ${_param:cluster_node01_hostname}
- host: ${_param:cluster_node01_address}
- port: 8082
- params: check
- - name: ${_param:cluster_node02_hostname}
- host: ${_param:cluster_node02_address}
- port: 8082
\ No newline at end of file
+++ /dev/null
-parameters:
- haproxy:
- proxy:
- listen:
- neutron_api:
- type: openstack-service
- service_name: neutron
- binds:
- - address: ${_param:cluster_vip_address}
- port: 9696
- servers:
- - name: ctl01
- host: ${_param:cluster_node01_address}
- port: 9696
- params: check inter 10s fastinter 2s downinter 3s rise 3 fall 3
- - name: ctl02
- host: ${_param:cluster_node02_address}
- port: 9696
- params: check inter 10s fastinter 2s downinter 3s rise 3 fall 3
- - name: ctl03
- host: ${_param:cluster_node03_address}
- port: 9696
- params: check inter 10s fastinter 2s downinter 3s rise 3 fall 3
\ No newline at end of file
+++ /dev/null
-parameters:
- haproxy:
- proxy:
- listen:
- nova_placement_api:
- mode: http
- binds:
- - address: ${_param:cluster_vip_address}
- port: 8778
- options:
- - httpclose
- - httplog
- health-check:
- http:
- options:
- - expect status 401
- servers:
- - name: ctl01
- host: ${_param:cluster_node01_address}
- port: 8778
- params: check inter 10s fastinter 2s downinter 3s rise 3 fall 3
- - name: ctl02
- host: ${_param:cluster_node02_address}
- port: 8778
- params: check inter 10s fastinter 2s downinter 3s rise 3 fall 3
- - name: ctl03
- host: ${_param:cluster_node03_address}
- port: 8778
- params: check inter 10s fastinter 2s downinter 3s rise 3 fall 3
+++ /dev/null
-parameters:
- haproxy:
- proxy:
- listen:
- nova_ec2_api:
- type: general-service
- service_name: nova
- check: false
- binds:
- - address: ${_param:cluster_vip_address}
- port: 8773
- servers:
- - name: ctl01
- host: ${_param:cluster_node01_address}
- port: 8773
- params: check
- - name: ctl02
- host: ${_param:cluster_node02_address}
- port: 8773
- params: check
- - name: ctl03
- host: ${_param:cluster_node03_address}
- port: 8773
- params: check
- nova_api:
- type: openstack-service
- service_name: nova
- binds:
- - address: ${_param:cluster_vip_address}
- port: 8774
- servers:
- - name: ctl01
- host: ${_param:cluster_node01_address}
- port: 8774
- params: check inter 10s fastinter 2s downinter 3s rise 3 fall 3
- - name: ctl02
- host: ${_param:cluster_node02_address}
- port: 8774
- params: check inter 10s fastinter 2s downinter 3s rise 3 fall 3
- - name: ctl03
- host: ${_param:cluster_node03_address}
- port: 8774
- params: check inter 10s fastinter 2s downinter 3s rise 3 fall 3
- nova_metadata_api:
- type: openstack-service
- binds:
- - address: ${_param:cluster_vip_address}
- port: 8775
- servers:
- - name: ctl01
- host: ${_param:cluster_node01_address}
- port: 8775
- params: check inter 10s fastinter 2s downinter 3s rise 3 fall 3
- - name: ctl02
- host: ${_param:cluster_node02_address}
- port: 8775
- params: check inter 10s fastinter 2s downinter 3s rise 3 fall 3
- - name: ctl03
- host: ${_param:cluster_node03_address}
- port: 8775
- params: check inter 10s fastinter 2s downinter 3s rise 3 fall 3
+++ /dev/null
-parameters:
- haproxy:
- proxy:
- listen:
- nova_novnc:
- type: general-service
- service_name: http
- check: true
- binds:
- - address: ${_param:cluster_vip_address}
- port: 6080
- servers:
- - name: ctl01
- host: ${_param:cluster_node01_address}
- port: 6080
- params: check
- - name: ctl02
- host: ${_param:cluster_node02_address}
- port: 6080
- params: check
- - name: ctl03
- host: ${_param:cluster_node03_address}
- port: 6080
- params: check
-
+++ /dev/null
-parameters:
- haproxy:
- proxy:
- listen:
- sahara_api:
- type: openstack-service
- options:
- - httplog
- binds:
- - address: ${_param:cluster_vip_address}
- port: 8386
- servers:
- - name: ctl01
- host: ${_param:cluster_node01_address}
- port: 8386
- params: check inter 10s fastinter 2s downinter 3s rise 3 fall 3
- - name: ctl02
- host: ${_param:cluster_node02_address}
- port: 8386
- params: check inter 10s fastinter 2s downinter 3s rise 3 fall 3
- - name: ctl03
- host: ${_param:cluster_node03_address}
- port: 8386
- params: check inter 10s fastinter 2s downinter 3s rise 3 fall 3
+++ /dev/null
-parameters:
- _param:
- haproxy_devops_portal_bind_host: ${_param:haproxy_bind_address}
- haproxy_devops_portal_bind_port: 8800
- haproxy_devops_portal_ssl:
- enabled: false
- haproxy:
- proxy:
- listen:
- devops_portal:
- mode: http
- options:
- - forwardfor
- - httpchk
- - httpclose
- - httplog
- balance: source
- http_request:
- - action: "add-header X-Forwarded-Proto https"
- condition: "if { ssl_fc }"
- binds:
- - address: ${_param:haproxy_devops_portal_bind_host}
- port: ${_param:haproxy_devops_portal_bind_port}
- ssl: ${_param:haproxy_devops_portal_ssl}
- servers:
- - name: ${_param:cluster_node01_name}
- host: ${_param:cluster_node01_address}
- port: 18800
- params: check
- - name: ${_param:cluster_node02_name}
- host: ${_param:cluster_node02_address}
- port: 18800
- params: backup check
- - name: ${_param:cluster_node03_name}
- host: ${_param:cluster_node03_address}
- port: 18800
- params: backup check
+++ /dev/null
-parameters:
- _param:
- haproxy_elasticsearch_bind_host: ${_param:haproxy_bind_address}
- haproxy_elasticsearch_bind_port: 9200
- haproxy_elasticsearch_exposed_port: 19200
- haproxy_elasticsearch_ssl:
- enabled: false
- haproxy:
- proxy:
- listen:
- elasticsearch:
- mode: http
- options:
- - forwardfor
- - httpchk
- - httpclose
- - httplog
- balance: source
- http_request:
- - action: "add-header X-Forwarded-Proto https"
- condition: "if { ssl_fc }"
- binds:
- - address: ${_param:haproxy_elasticsearch_bind_host}
- port: ${_param:haproxy_elasticsearch_bind_port}
- ssl: ${_param:haproxy_elasticsearch_ssl}
- servers:
- - name: ${_param:cluster_node01_name}
- host: ${_param:cluster_node01_address}
- port: ${_param:haproxy_elasticsearch_exposed_port}
- params: check
- - name: ${_param:cluster_node02_name}
- host: ${_param:cluster_node02_address}
- port: ${_param:haproxy_elasticsearch_exposed_port}
- params: backup check
- - name: ${_param:cluster_node03_name}
- host: ${_param:cluster_node03_address}
- port: ${_param:haproxy_elasticsearch_exposed_port}
- params: backup check
+++ /dev/null
-parameters:
- _param:
- haproxy_postgresql_bind_host: ${_param:haproxy_bind_address}
- haproxy_postgresql_bind_port: 5432
- haproxy_postgresql_exposed_port: 15432
- haproxy_postgresql_ssl:
- enabled: false
- haproxy:
- proxy:
- listen:
- postgresql:
- mode: tcp
- balance: source
- options:
- - tcp-check
- binds:
- - address: ${_param:haproxy_postgresql_bind_host}
- port: ${_param:haproxy_postgresql_bind_port}
- ssl: ${_param:haproxy_postgresql_ssl}
- servers:
- - name: ${_param:cluster_node01_name}
- host: ${_param:cluster_node01_address}
- port: ${_param:haproxy_postgresql_exposed_port}
- params: check port ${_param:haproxy_postgresql_exposed_port}
- - name: ${_param:cluster_node02_name}
- host: ${_param:cluster_node02_address}
- port: ${_param:haproxy_postgresql_exposed_port}
- params: backup check port ${_param:haproxy_postgresql_exposed_port}
- - name: ${_param:cluster_node03_name}
- host: ${_param:cluster_node03_address}
- port: ${_param:haproxy_postgresql_exposed_port}
- params: backup check port ${_param:haproxy_postgresql_exposed_port}
+++ /dev/null
-parameters:
- _param:
- haproxy_pushkin_bind_host: ${_param:haproxy_bind_address}
- haproxy_pushkin_bind_port: 8887
- haproxy_pushkin_exposed_port: 18887
- haproxy_pushkin_ssl:
- enabled: false
- haproxy:
- proxy:
- listen:
- pushkin:
- mode: http
- options:
- - httpchk GET /apps
- balance: source
- http_request:
- - action: "add-header X-Forwarded-Proto https"
- condition: "if { ssl_fc }"
- sticks:
- - http-check expect status 200
- binds:
- - address: ${_param:haproxy_pushkin_bind_host}
- port: ${_param:haproxy_pushkin_bind_port}
- ssl: ${_param:haproxy_pushkin_ssl}
- servers:
- - name: ${_param:cluster_node01_name}
- host: ${_param:cluster_node01_address}
- port: ${_param:haproxy_pushkin_exposed_port}
- params: check
- - name: ${_param:cluster_node02_name}
- host: ${_param:cluster_node02_address}
- port: ${_param:haproxy_pushkin_exposed_port}
- params: backup check
- - name: ${_param:cluster_node03_name}
- host: ${_param:cluster_node03_address}
- port: ${_param:haproxy_pushkin_exposed_port}
- params: backup check
+++ /dev/null
-parameters:
- _param:
- haproxy_rundeck_bind_host: ${_param:haproxy_bind_address}
- haproxy_rundeck_bind_port: 4440
- haproxy_rundeck_ssl:
- enabled: false
- haproxy:
- proxy:
- listen:
- rundeck:
- mode: http
- options:
- - forwardfor
- - httpchk
- - httpclose
- - httplog
- balance: source
- http_request:
- - action: "add-header X-Forwarded-Proto https"
- condition: "if { ssl_fc }"
- binds:
- - address: ${_param:haproxy_rundeck_bind_host}
- port: ${_param:haproxy_rundeck_bind_port}
- ssl: ${_param:haproxy_rundeck_ssl}
- servers:
- - name: ${_param:cluster_node01_name}
- host: ${_param:cluster_node01_address}
- port: 14440
- params: check
- - name: ${_param:cluster_node02_name}
- host: ${_param:cluster_node02_address}
- port: 14440
- params: backup check
- - name: ${_param:cluster_node03_name}
- host: ${_param:cluster_node03_address}
- port: 14440
- params: backup check
+++ /dev/null
-parameters:
- _param:
- haproxy_security_monkey_bind_host: ${_param:haproxy_bind_address}
- haproxy_security_monkey_bind_port: 5001
- haproxy_security_monkey_exposed_port: 15001
- haproxy_security_monkey_ssl:
- enabled: false
- haproxy:
- proxy:
- listen:
- security_monkey:
- mode: http
- options:
- - httpchk GET /
- balance: source
- http_request:
- - action: "add-header X-Forwarded-Proto https"
- condition: "if { ssl_fc }"
- sticks:
- - http-check expect status 404
- binds:
- - address: ${_param:haproxy_security_monkey_bind_host}
- port: ${_param:haproxy_security_monkey_bind_port}
- ssl: ${_param:haproxy_security_monkey_ssl}
- servers:
- - name: ${_param:cluster_node01_name}
- host: ${_param:cluster_node01_address}
- port: ${_param:haproxy_security_monkey_exposed_port}
- params: check
- - name: ${_param:cluster_node02_name}
- host: ${_param:cluster_node02_address}
- port: ${_param:haproxy_security_monkey_exposed_port}
- params: backup check
- - name: ${_param:cluster_node03_name}
- host: ${_param:cluster_node03_address}
- port: ${_param:haproxy_security_monkey_exposed_port}
- params: backup check
+++ /dev/null
-parameters:
- _param:
- haproxy_phpldapadmin_bind_host: ${_param:haproxy_bind_address}
- haproxy_phpldapadmin_bind_port: 8089
- haproxy:
- proxy:
- listen:
- phpldapadmin:
- mode: http
- options:
- - forwardfor
- - httpclose
- - httplog
- balance: source
- binds:
- - address: ${_param:haproxy_phpldapadmin_bind_host}
- port: ${_param:haproxy_phpldapadmin_bind_port}
- servers:
- - name: ${_param:cluster_node01_name}
- host: ${_param:cluster_node01_address}
- port: 18089
- params: check
- - name: ${_param:cluster_node02_name}
- host: ${_param:cluster_node02_address}
- port: 18089
- params: backup check
- - name: ${_param:cluster_node03_name}
- host: ${_param:cluster_node03_address}
- port: 18089
- params: backup check
+++ /dev/null
-parameters:
- haproxy:
- proxy:
- listen:
- rabbitmq:
- type: rabbitmq
- service_name: rabbitmq
- binds:
- - address: ${_param:cluster_vip_address}
- port: 5672
- servers:
- - name: ${_param:cluster_node01_hostname}
- host: ${_param:cluster_node01_address}
- port: 5672
- params: check
- - name: ${_param:cluster_node02_hostname}
- host: ${_param:cluster_node02_address}
- port: 5672
- params: backup check
- rabbitmq_management:
- type: rabbitmq
- service_name: rabbitmq
- binds:
- - address: ${_param:cluster_vip_address}
- port: 15672
- servers:
- - name: ${_param:cluster_node01_hostname}
- host: ${_param:cluster_node01_address}
- port: 15672
- params: check
- - name: ${_param:cluster_node02_hostname}
- host: ${_param:cluster_node02_address}
- port: 15672
- params: backup check
+++ /dev/null
-parameters:
- _param:
- haproxy_radosgw_bind_port: 8080
- haproxy_radosgw_source_port: 8080
- haproxy:
- proxy:
- listen:
- radosgw:
- mode: tcp
- balance: source
- binds:
- - address: ${_param:cluster_vip_address}
- port: ${_param:haproxy_radosgw_bind_port}
- servers:
- - name: ${_param:cluster_node01_hostname}
- host: ${_param:cluster_node01_address}
- port: ${_param:haproxy_radosgw_source_port}
- params: check
- - name: ${_param:cluster_node02_hostname}
- host: ${_param:cluster_node02_address}
- port: ${_param:haproxy_radosgw_source_port}
- params: backup check
- - name: ${_param:cluster_node03_hostname}
- host: ${_param:cluster_node03_address}
- port: ${_param:haproxy_radosgw_source_port}
- params: backup check
+++ /dev/null
-parameters:
- haproxy:
- proxy:
- listen:
- redis:
- mode: tcp
- service_name: redis
- balance: source
- binds:
- - address: ${_param:cluster_vip_address}
- port: 6379
- servers:
- - name: ${_param:cluster_node01_hostname}
- host: ${_param:cluster_node01_address}
- port: 6379
- params: check
- - name: ${_param:cluster_node02_hostname}
- host: ${_param:cluster_node02_address}
- port: 6379
- params: backup check
+++ /dev/null
-parameters:
- haproxy:
- proxy:
- listen:
- salt:
- mode: http
- options:
- - forwardfor
- - httpchk
- - httpclose
- - httplog
- balance: source
- timeout:
- server: 20m
- client: 20m
- binds:
- - address: ${_param:haproxy_bind_address}
- port: 8000
- servers:
- - name: ${_param:cluster_node01_name}
- host: ${_param:cluster_node01_address}
- port: 8000
- params: check
- - name: ${_param:cluster_node02_name}
- host: ${_param:cluster_node02_address}
- port: 8000
- params: backup check
- - name: ${_param:cluster_node03_name}
- host: ${_param:cluster_node03_address}
- port: 8000
- params: backup check
+++ /dev/null
-parameters:
- haproxy:
- proxy:
- listen:
- sensu:
- mode: http
- options:
- - forwardfor
- - httpchk
- - httpclose
- - httplog
- balance: source
- binds:
- - address: ${_param:cluster_vip_address}
- port: 4567
- servers:
- - name: ${_param:cluster_node01_hostname}
- host: ${_param:cluster_node01_address}
- port: 4567
- params: check
- - name: ${_param:cluster_node02_hostname}
- host: ${_param:cluster_node02_address}
- port: 4567
- params: backup check
+++ /dev/null
-parameters:
- haproxy:
- proxy:
- listen:
- elasticsearch:
- mode: http
- options:
- - httplog
- - http-keep-alive
- - prefer-last-server
- - dontlog-normal
- balance: roundrobin
- binds:
- - address: ${_param:cluster_vip_address}
- port: 9200
- servers:
- - name: ${_param:cluster_node01_hostname}
- host: ${_param:cluster_node01_address}
- port: 9200
- params: 'check inter 10s fastinter 2s downinter 3s rise 3 fall 3'
- - name: ${_param:cluster_node02_hostname}
- host: ${_param:cluster_node02_address}
- port: 9200
- params: 'check inter 10s fastinter 2s downinter 3s rise 3 fall 3'
- - name: ${_param:cluster_node03_hostname}
- host: ${_param:cluster_node03_address}
- port: 9200
- params: 'check inter 10s fastinter 2s downinter 3s rise 3 fall 3'
+++ /dev/null
-parameters:
- haproxy:
- proxy:
- listen:
- grafana:
- mode: http
- options:
- - httplog
- - dontlog-normal
- balance: source
- binds:
- - address: ${_param:cluster_vip_address}
- port: ${_param:cluster_grafana_port}
- servers:
- - name: ${_param:cluster_node01_hostname}
- host: ${_param:cluster_node01_address}
- port: 3000
- params: 'check'
- - name: ${_param:cluster_node02_hostname}
- host: ${_param:cluster_node02_address}
- port: 3000
- params: 'check'
- - name: ${_param:cluster_node03_hostname}
- host: ${_param:cluster_node03_address}
- port: 3000
- params: 'check'
+++ /dev/null
-parameters:
- _param:
- cluster_influxdb_port: ${_param:influxdb_port}
- haproxy:
- proxy:
- listen:
- influxdb:
- mode: http
- options:
- - "httpchk GET /ping"
- - httplog
- - dontlog-normal
- binds:
- - address: ${_param:cluster_vip_address}
- port: ${_param:cluster_influxdb_port}
- servers:
- - name: ${_param:cluster_node01_hostname}
- host: ${_param:cluster_node01_address}
- port: 8086
- params: 'check'
- - name: ${_param:cluster_node02_hostname}
- host: ${_param:cluster_node02_address}
- port: 8086
- params: 'backup check'
- - name: ${_param:cluster_node03_hostname}
- host: ${_param:cluster_node03_address}
- port: 8086
- params: 'backup check'
+++ /dev/null
-parameters:
- haproxy:
- proxy:
- listen:
- kibana:
- mode: http
- options:
- - httplog
- - http-keep-alive
- - prefer-last-server
- - dontlog-normal
- balance: roundrobin
- binds:
- - address: ${_param:cluster_vip_address}
- port: 5601
- servers:
- - name: ${_param:cluster_node01_hostname}
- host: ${_param:cluster_node01_address}
- port: 5601
- params: 'check inter 10s fastinter 2s downinter 3s rise 3 fall 3'
- - name: ${_param:cluster_node02_hostname}
- host: ${_param:cluster_node02_address}
- port: 5601
- params: 'check inter 10s fastinter 2s downinter 3s rise 3 fall 3'
- - name: ${_param:cluster_node03_hostname}
- host: ${_param:cluster_node03_address}
- port: 5601
- params: 'check inter 10s fastinter 2s downinter 3s rise 3 fall 3'
+++ /dev/null
-parameters:
- haproxy:
- proxy:
- listen:
- redis:
- mode: tcp
- check: False
- health-check:
- tcp:
- options:
- - send PING\r\n
- - expect string +PONG
- - send info\ replication\r\n
- - expect string role:master
- - send QUIT\r\n
- - expect string +OK
- binds:
- - address: ${_param:cluster_vip_address}
- port: ${_param:cluster_redis_port}
- servers:
- - name: ${_param:cluster_node01_hostname}
- host: ${_param:cluster_node01_address}
- port: 6379
- params: 'check'
- - name: ${_param:cluster_node02_hostname}
- host: ${_param:cluster_node02_address}
- port: 6379
- params: 'check'
- - name: ${_param:cluster_node03_hostname}
- host: ${_param:cluster_node03_address}
- port: 6379
- params: 'check'
+++ /dev/null
-parameters:
- haproxy:
- proxy:
- listen:
- sensu:
- mode: http
- options:
- - forwardfor
- - httpchk
- - httpclose
- - httplog
- balance: source
- binds:
- - address: ${_param:cluster_vip_address}
- port: 4567
- servers:
- - name: ${_param:cluster_node01_hostname}
- host: ${_param:cluster_node01_address}
- port: 4567
- params: check
- - name: ${_param:cluster_node02_hostname}
- host: ${_param:cluster_node02_address}
- port: 4567
- params: check
- - name: ${_param:cluster_node03_hostname}
- host: ${_param:cluster_node03_address}
- port: 4567
- params: check
+++ /dev/null
-parameters:
- haproxy:
- proxy:
- listen:
- uchiwa:
- mode: http
- options:
- - forwardfor
- - httpchk
- - httpclose
- - httplog
- balance: source
- binds:
- - address: ${_param:cluster_vip_address}
- port: 3001
- servers:
- - name: ${_param:cluster_node01_hostname}
- host: ${_param:cluster_node01_address}
- port: 3001
- params: check
- - name: ${_param:cluster_node02_hostname}
- host: ${_param:cluster_node02_address}
- port: 3001
- params: check
- - name: ${_param:cluster_node03_hostname}
- host: ${_param:cluster_node03_address}
- port: 3001
- params: check
-
+++ /dev/null
-parameters:
- _param:
- haproxy_stats_port: 9600
- haproxy:
- proxy:
- listen:
- stats:
- type: stats
- check: false
- binds:
- - address: ${_param:haproxy_bind_address}
- port: ${_param:haproxy_stats_port}
+++ /dev/null
-parameters:
- haproxy:
- proxy:
- listen:
- uchiwa:
- mode: http
- options:
- - forwardfor
- - httpchk
- - httpclose
- - httplog
- balance: source
- binds:
- - address: ${_param:cluster_vip_address}
- port: 80
- servers:
- - name: ${_param:cluster_node01_hostname}
- host: ${_param:cluster_node01_address}
- port: 3000
- params: check
- - name: ${_param:cluster_node02_hostname}
- host: ${_param:cluster_node02_address}
- port: 3000
- params: check
+++ /dev/null
-parameters:
- haproxy:
- proxy:
- enabled: true
+++ /dev/null
-parameters:
- _param:
- heat_data_revision: master
- heat:
- client:
- enabled: true
- source:
- engine: git
- address: ${_param:heat_data_repository}
- revision: ${_param:heat_data_revision}
+++ /dev/null
-classes:
-- service.heat.server.cluster
-- service.haproxy.proxy.single
-- service.keepalived.cluster.single
-- system.haproxy.proxy.listen.openstack.heat
-parameters:
- _param:
- cluster_public_protocol: https
- heat:
- server:
- stack_domain_admin:
- name: heat_domain_admin
- password: ${_param:heat_domain_admin_password}
- domain: heat
- enabled: true
- region: ${_param:openstack_region}
- version: ${_param:heat_version}
- bind:
- api_cfn:
- address: ${_param:cluster_local_address}
- api_cloudwatch:
- address: ${_param:cluster_local_address}
- api:
- address: ${_param:cluster_local_address}
- database:
- engine: mysql
- host: ${_param:openstack_database_address}
- port: 3306
- name: heat
- user: heat
- password: ${_param:mysql_heat_password}
- metadata:
- host: ${_param:cluster_public_host}
- port: 8000
- protocol: ${_param:cluster_public_protocol}
- waitcondition:
- host: ${_param:cluster_public_host}
- port: 8000
- protocol: ${_param:cluster_public_protocol}
- watch:
- host: ${_param:cluster_public_host}
- port: 8003
- protocol: ${_param:cluster_public_protocol}
- identity:
- engine: keystone
- host: ${_param:cluster_vip_address}
- port: 35357
- tenant: service
- user: heat
- password: ${_param:keystone_heat_password}
- message_queue:
- engine: rabbitmq
- port: 5672
- user: openstack
- password: ${_param:rabbitmq_openstack_password}
- virtual_host: '/openstack'
- members:
- - host: ${_param:openstack_message_queue_node01_address}
- - host: ${_param:openstack_message_queue_node02_address}
- - host: ${_param:openstack_message_queue_node03_address}
+++ /dev/null
-parameters:
- heat:
- server:
- audit:
- enabled: true
+++ /dev/null
-parameters:
- heat:
- server:
- notification:
- driver: messagingv2
- topics: "${_param:openstack_notification_topics}"
+++ /dev/null
-parameters:
- linux:
- system:
- package:
- contrail-heat:
- version: latest
- heat:
- server:
- dir:
- plugins:
- - /usr/lib/python2.7/dist-packages/vnc_api/gen/heat/resources
+++ /dev/null
-parameters:
- linux:
- system:
- package:
- python-heat-salt:
- version: latest
- heat:
- server:
- dir:
- plugins:
- - /usr/lib/python2.7/dist-packages/heat_salt/resources
+++ /dev/null
-classes:
-- service.heat.server.single
-parameters:
- heat:
- server:
- stack_domain_admin:
- name: heat_domain_admin
- password: ${_param:heat_domain_admin_password}
- domain: heat
+++ /dev/null
-classes:
-- service.keepalived.cluster.single
-- service.horizon.server.cluster
-- service.haproxy.proxy.single
-- system.haproxy.proxy.listen.openstack.horizon
-- system.memcached.server.single
-parameters:
- _param:
- horizon_site_branding: "OpenStack Dashboard"
- horizon:
- server:
- branding: ${_param:horizon_site_branding}
- plugin: {}
- session:
- engine: "cache"
-
+++ /dev/null
-parameters:
- horizon:
- server:
- plugin:
- api_mask:
- app: api_mask
- mask_url: ${_param:horizon_api_mask_url}
- source:
- engine: pkg
- name: openstack-dashboard-api-mask
+++ /dev/null
-parameters:
- _param:
- openstack_billing_currency: EUR
- openstack_billing_allocation: False
- openstack_billing_resource_types: []
- horizon:
- server:
- plugin:
- billing:
- metric:
- engine: graphite
- host: ${_param:openstack_billing_address}
- port: 80
- config:
- currency: ${_param:openstack_billing_currency}
- allocation: ${_param:openstack_billing_allocation}
- extra_resource_types: ${_param:openstack_billing_resource_types}
- source:
- engine: pkg
- name: openstack-dashboard-billing
- horizon_overrides:
- overrides:
- - horizon_billing
+++ /dev/null
-parameters:
- horizon:
- server:
- plugin:
- contrail:
- source:
- engine: pkg
- name: openstack-dashboard-contrail-panels
- horizon_overrides:
- overrides:
- - contrail_openstack_dashboard
+++ /dev/null
-parameters:
- horizon:
- server:
- plugin:
- heat:
- source:
- engine: pkg
- name: openstack-dashboard-heat-server-templates
- horizon_overrides:
- overrides:
- - heat_server_templates
+++ /dev/null
-parameters:
- _param:
- horizon_jenkins_url: http://localhost:8080/
- horizon_jenkins_user: admin
- horizon:
- server:
- jenkins_api:
- url: ${_param:horizon_jenkins_url}
- user: ${_param:horizon_jenkins_user}
- password: ${_param:horizon_jenkins_password}
- plugin:
- jenkins:
- source:
- engine: pkg
- name: openstack-dashboard-jenkins
+++ /dev/null
-parameters:
- horizon:
- server:
- plugin:
- monitoring:
- app: horizon_monitoring
- source:
- engine: pkg
- name: openstack-dashboard-sensu
+++ /dev/null
-parameters:
- horizon:
- server:
- plugin:
- horizon_overrides:
- override: true
- app: horizon_overrides
- source:
- engine: pkg
- name: python-horizon-overrides-plugin
+++ /dev/null
-parameters:
- _param:
- horizon_telemetry_engine: graphite
- horizon_telemetry_host: 127.0.0.1
- horizon_telemetry_port: 80
- horizon_telemetry_control_nodes: {}
- horizon:
- server:
- control_nodes: ${_param:horizon_telemetry_control_nodes}
- plugin:
- telemetry:
- metric:
- engine: ${_param:horizon_telemetry_engine}
- host: ${_param:horizon_telemetry_host}
- port: ${_param:horizon_telemetry_port}
- source:
- engine: pkg
- name: openstack-dashboard-telemetry
- horizon_overrides:
- overrides:
- - horizon_telemetry
+++ /dev/null
-parameters:
- _param:
- horizon_dashboard_theme: mirantis
- horizon:
- server:
- plugin:
- horizon_theme:
- app: horizon_theme
- theme_name: ${_param:horizon_dashboard_theme}
- source:
- engine: pkg
- name: openstack-dashboard-${_param:horizon_dashboard_theme}-theme
+++ /dev/null
-classes:
-- service.horizon.server.single
-- system.memcached.server.single
-parameters:
- _param:
- horizon_site_branding: "OpenStack Dashboard"
- horizon:
- server:
- branding: ${_param:horizon_site_branding}
- bind:
- address: 0.0.0.0
- port: 8078
- plugin: {}
- session:
- engine: "cache"
-
+++ /dev/null
-applications:
- - keepalived
-classes:
- - service.keepalived.support
-parameters:
- keepalived:
- cluster:
- enabled: true
- instance:
- cicd_control_vip:
- address: ${_param:cluster_vip_address}
- password: ${_param:keepalived_vip_password}
- interface: ${_param:keepalived_vip_interface}
- virtual_router_id: ${_param:keepalived_vip_virtual_router_id}
- priority: ${_param:keepalived_vip_priority}
+++ /dev/null
-applications:
-- keepalived
-classes:
-- service.keepalived.support
-parameters:
- _param:
- keepalived_openstack_telemetry_vip_address: ${_param:cluster_vip_address}
- keepalived_openstack_telemetry_vip_password: password
- keepalived_openstack_telemetry_vip_interface: eth1
- keepalived:
- cluster:
- enabled: true
- instance:
- openstack_telemetry_vip:
- address: ${_param:keepalived_openstack_telemetry_vip_address}
- password: ${_param:keepalived_openstack_telemetry_vip_password}
- interface: ${_param:keepalived_openstack_telemetry_vip_interface}
- virtual_router_id: 230
- priority: 101
+++ /dev/null
-applications:
-- keepalived
-classes:
-- service.keepalived.support
-parameters:
- _param:
- keepalived_openstack_web_public_vip_address: ${_param:cluster_vip_address}
- keepalived_openstack_web_public_vip_password: password
- keepalived_openstack_web_public_vip_interface: eth1
- keepalived:
- cluster:
- enabled: true
- instance:
- openstack_web_public_vip:
- address: ${_param:keepalived_openstack_web_public_vip_address}
- password: ${_param:keepalived_openstack_web_public_vip_password}
- interface: ${_param:keepalived_openstack_web_public_vip_interface}
- virtual_router_id: 132
- priority: ${_param:keepalived_vip_priority}
+++ /dev/null
-applications:
-- keepalived
-classes:
-- service.keepalived.support
-parameters:
- _param:
- keepalived_vip_priority: 101
- keepalived:
- cluster:
- enabled: true
- instance:
- prometheus_server_vip:
- address: ${_param:keepalived_prometheus_vip_address}
- password: ${_param:keepalived_prometheus_vip_password}
- interface: ${_param:keepalived_prometheus_vip_interface}
- virtual_router_id: 105
- priority: ${_param:keepalived_vip_priority}
-
+++ /dev/null
-applications:
-- keepalived
-classes:
-- service.keepalived.support
-parameters:
- keepalived:
- cluster:
- enabled: true
- instance:
- stacklight_log_vip:
- address: ${_param:keepalived_stacklight_log_vip_address}
- password: ${_param:keepalived_stacklight_log_vip_password}
- interface: ${_param:keepalived_stacklight_log_vip_interface}
- virtual_router_id: 110
- priority: 101
+++ /dev/null
-applications:
-- keepalived
-classes:
-- service.keepalived.support
-parameters:
- keepalived:
- cluster:
- enabled: true
- instance:
- stacklight_monitor_vip:
- address: ${_param:keepalived_stacklight_monitor_vip_address}
- password: ${_param:keepalived_stacklight_monitor_vip_password}
- interface: ${_param:keepalived_stacklight_monitor_vip_interface}
- virtual_router_id: 100
- priority: 101
+++ /dev/null
-applications:
-- keepalived
-classes:
-- service.keepalived.support
-parameters:
- keepalived:
- cluster:
- enabled: true
- instance:
- stacklight_telemetry_vip:
- address: ${_param:keepalived_stacklight_telemetry_vip_address}
- password: ${_param:keepalived_stacklight_telemetry_vip_password}
- interface: ${_param:keepalived_stacklight_telemetry_vip_interface}
- virtual_router_id: 120
- priority: 101
+++ /dev/null
-parameters:
- _param:
- cluster_public_protocol: https
- keystone:
- client:
- server:
- identity:
- project:
- service:
- user:
- aodh:
- is_admin: true
- password: ${_param:keystone_aodh_password}
- email: ${_param:admin_email}
- service:
- aodh:
- type: alarming
- description: OpenStack Alarming Service
- endpoints:
- - region: ${_param:openstack_region}
- public_address: ${_param:cluster_public_host}
- public_protocol: ${_param:cluster_public_protocol}
- public_port: 8042
- public_path: '/'
- internal_address: ${_param:aodh_service_host}
- internal_port: 8042
- internal_path: '/'
- admin_address: ${_param:aodh_service_host}
- admin_port: 8042
- admin_path: '/'
+++ /dev/null
-parameters:
- _param:
- cluster_public_protocol: https
- keystone:
- client:
- server:
- identity:
- project:
- service:
- user:
- billometer:
- is_admin: true
- password: ${_param:keystone_billometer_password}
- email: ${_param:admin_email}
- service:
- billometer:
- type: billing
- description: OpenStack Billing Service
- endpoints:
- - region: ${_param:openstack_region}
- public_address: ${_param:cluster_public_host}
- public_protocol: ${_param:cluster_public_protocol}
- public_port: 9753
- public_path: '/v1'
- internal_address: ${_param:billometer_service_host}
- internal_port: 9753
- internal_path: '/v1'
- admin_address: ${_param:billometer_service_host}
- admin_port: 9753
- admin_path: '/v1'
+++ /dev/null
-parameters:
- _param:
- cluster_public_protocol: https
- keystone:
- client:
- server:
- identity:
- project:
- service:
- user:
- ceilometer:
- is_admin: true
- password: ${_param:keystone_ceilometer_password}
- email: ${_param:admin_email}
- service:
- ceilometer:
- type: metering
- description: OpenStack Telemetry Service
- endpoints:
- - region: ${_param:openstack_region}
- public_address: ${_param:cluster_public_host}
- public_protocol: ${_param:cluster_public_protocol}
- public_port: 8777
- public_path: '/'
- internal_address: ${_param:ceilometer_service_host}
- internal_port: 8777
- internal_path: '/'
- admin_address: ${_param:ceilometer_service_host}
- admin_port: 8777
- admin_path: '/'
+++ /dev/null
-parameters:
- _param:
- cluster_public_protocol: https
- keystone:
- client:
- server:
- identity:
- project:
- service:
- user:
- cinder:
- is_admin: true
- password: ${_param:keystone_cinder_password}
- email: ${_param:admin_email}
- service:
- cinder:
- type: volume
- description: OpenStack Volume Service
- endpoints:
- - region: ${_param:openstack_region}
- public_address: ${_param:cluster_public_host}
- public_protocol: ${_param:cluster_public_protocol}
- public_port: 8776
- public_path: '/v1/$(project_id)s'
- internal_address: ${_param:cinder_service_host}
- internal_port: 8776
- internal_path: '/v1/$(project_id)s'
- admin_address: ${_param:cinder_service_host}
- admin_port: 8776
- admin_path: '/v1/$(project_id)s'
+++ /dev/null
-parameters:
- _param:
- cluster_public_protocol: https
- keystone:
- client:
- server:
- identity:
- project:
- service:
- user:
- cinder:
- is_admin: true
- password: ${_param:keystone_cinder_password}
- email: ${_param:admin_email}
- service:
- cinderv2:
- type: volumev2
- description: OpenStack Volume Service v2
- endpoints:
- - region: ${_param:openstack_region}
- public_address: ${_param:cluster_public_host}
- public_protocol: ${_param:cluster_public_protocol}
- public_port: 8776
- public_path: '/v2/$(project_id)s'
- internal_address: ${_param:cinder_service_host}
- internal_port: 8776
- internal_path: '/v2/$(project_id)s'
- admin_address: ${_param:cinder_service_host}
- admin_port: 8776
- admin_path: '/v2/$(project_id)s'
+++ /dev/null
-parameters:
- _param:
- cluster_public_protocol: https
- keystone:
- client:
- server:
- identity:
- project:
- service:
- user:
- cinder:
- is_admin: true
- password: ${_param:keystone_cinder_password}
- email: ${_param:admin_email}
- service:
- cinderv3:
- type: volumev3
- description: OpenStack Volume Service v3
- endpoints:
- - region: ${_param:openstack_region}
- public_address: ${_param:cluster_public_host}
- public_protocol: ${_param:cluster_public_protocol}
- public_port: 8776
- public_path: '/v3/$(project_id)s'
- internal_address: ${_param:cinder_service_host}
- internal_port: 8776
- internal_path: '/v3/$(project_id)s'
- admin_address: ${_param:cinder_service_host}
- admin_port: 8776
- admin_path: '/v3/$(project_id)s'
-
+++ /dev/null
-parameters:
- _param:
- cluster_public_protocol: https
- keystone:
- client:
- server:
- identity:
- project:
- service:
- user:
- designate:
- is_admin: true
- password: ${_param:keystone_designate_password}
- email: ${_param:admin_email}
- service:
- designate:
- type: dns
- description: OpenStack DNS service
- endpoints:
- - region: ${_param:openstack_region}
- public_address: ${_param:cluster_public_host}
- public_protocol: ${_param:cluster_public_protocol}
- public_port: 9001
- public_path: '/'
- internal_address: ${_param:designate_service_host}
- internal_port: 9001
- internal_path: '/'
- admin_address: ${_param:designate_service_host}
- admin_port: 9001
- admin_path: '/'
+++ /dev/null
-parameters:
- _param:
- cluster_public_protocol: https
- keystone:
- client:
- server:
- identity:
- project:
- service:
- user:
- glance:
- is_admin: true
- password: ${_param:keystone_glance_password}
- email: ${_param:admin_email}
- service:
- glance:
- type: image
- description: OpenStack Image Service
- endpoints:
- - region: ${_param:openstack_region}
- public_address: ${_param:cluster_public_host}
- public_protocol: ${_param:cluster_public_protocol}
- public_port: 9292
- public_path: ''
- internal_address: ${_param:glance_service_host}
- internal_port: 9292
- internal_path: ''
- admin_address: ${_param:glance_service_host}
- admin_port: 9292
- admin_path: ''
+++ /dev/null
-parameters:
- _param:
- cluster_public_protocol: https
- keystone:
- client:
- server:
- identity:
- project:
- service:
- user:
- glance:
- is_admin: true
- password: ${_param:keystone_glance_password}
- email: ${_param:admin_email}
- service:
- glare:
- type: artifact
- description: OpenStack Image Artifact Service
- endpoints:
- - region: ${_param:openstack_region}
- public_address: ${_param:cluster_public_host}
- public_protocol: ${_param:cluster_public_protocol}
- public_port: 9494
- public_path: ''
- internal_address: ${_param:glance_service_host}
- internal_port: 9494
- internal_path: ''
- admin_address: ${_param:glance_service_host}
- admin_port: 9494
- admin_path: ''
-
+++ /dev/null
-parameters:
- _param:
- cluster_public_protocol: https
- keystone:
- client:
- server:
- identity:
- service:
- heat-cfn:
- type: cloudformation
- description: OpenStack CloudFormation Service
- endpoints:
- - region: ${_param:openstack_region}
- public_address: ${_param:cluster_public_host}
- public_protocol: ${_param:cluster_public_protocol}
- public_port: 8000
- public_path: '/v1'
- internal_address: ${_param:heat_service_host}
- internal_port: 8000
- internal_path: '/v1'
- admin_address: ${_param:heat_service_host}
- admin_port: 8000
- admin_path: '/v1'
+++ /dev/null
-parameters:
- _param:
- cluster_public_protocol: https
- keystone:
- client:
- server:
- identity:
- roles:
- - heat_stack_user
- - heat_stack_owner
- project:
- service:
- user:
- heat:
- is_admin: true
- password: ${_param:keystone_heat_password}
- email: ${_param:admin_email}
- service:
- heat:
- type: orchestration
- description: OpenStack Orchestration Service
- endpoints:
- - region: ${_param:openstack_region}
- public_address: ${_param:cluster_public_host}
- public_protocol: ${_param:cluster_public_protocol}
- public_port: 8004
- public_path: '/v1/%(project_id)s'
- internal_address: ${_param:heat_service_host}
- internal_port: 8004
- internal_path: '/v1/%(project_id)s'
- admin_address: ${_param:heat_service_host}
- admin_port: 8004
- admin_path: '/v1/%(project_id)s'
+++ /dev/null
-parameters:
- _param:
- cluster_public_protocol: https
- keystone:
- client:
- server:
- identity:
- service:
- keystone:
- type: identity
- description: OpenStack Identity Service
- endpoints:
- - region: ${_param:openstack_region}
- public_address: ${_param:cluster_public_host}
- public_protocol: ${_param:cluster_public_protocol}
- public_port: 5000
- public_path: '/v2.0'
- internal_address: ${_param:keystone_service_host}
- internal_port: 5000
- internal_path: '/v2.0'
- admin_address: ${_param:keystone_service_host}
- admin_port: 35357
- admin_path: '/v2.0'
+++ /dev/null
-parameters:
- _param:
- cluster_public_protocol: https
- keystone:
- client:
- server:
- identity:
- service:
- keystone3:
- type: identity
- description: OpenStack Identity Service v3
- endpoints:
- - region: ${_param:openstack_region}
- public_address: ${_param:cluster_public_host}
- public_protocol: ${_param:cluster_public_protocol}
- public_port: 5000
- public_path: '/v3'
- internal_address: ${_param:keystone_service_host}
- internal_port: 5000
- internal_path: '/v3'
- admin_address: ${_param:keystone_service_host}
- admin_port: 35357
- admin_path: '/v3'
+++ /dev/null
-parameters:
- _param:
- cluster_public_protocol: https
- keystone:
- client:
- server:
- identity:
- project:
- service:
- user:
- murano:
- is_admin: true
- password: ${_param:keystone_murano_password}
- email: ${_param:admin_email}
- service:
- murano:
- type: application-catalog
- description: Application Catalog for OpenStack
- endpoints:
- - region: ${_param:openstack_region}
- public_address: ${_param:cluster_public_host}
- public_protocol: ${_param:cluster_public_protocol}
- public_port: 8082
- public_path: ''
- internal_address: ${_param:murano_service_host}
- internal_port: 8082
- internal_path: ''
- admin_address: ${_param:murano_service_host}
- admin_port: 8082
- admin_path: ''
+++ /dev/null
-parameters:
- _param:
- cluster_public_protocol: https
- keystone:
- client:
- server:
- identity:
- project:
- service:
- user:
- neutron:
- is_admin: true
- password: ${_param:keystone_neutron_password}
- email: ${_param:admin_email}
- service:
- neutron:
- type: network
- description: OpenStack Networking Service
- endpoints:
- - region: ${_param:openstack_region}
- public_address: ${_param:cluster_public_host}
- public_protocol: ${_param:cluster_public_protocol}
- public_port: 9696
- public_path: '/'
- internal_address: ${_param:neutron_service_host}
- internal_port: 9696
- internal_path: '/'
- admin_address: ${_param:neutron_service_host}
- admin_port: 9696
- admin_path: '/'
+++ /dev/null
-parameters:
- _param:
- cluster_public_protocol: https
- keystone:
- client:
- server:
- identity:
- service:
- nova-ec2:
- type: ec2
- description: OpenStack EC2 Service
- endpoints:
- - region: ${_param:openstack_region}
- public_address: ${_param:cluster_public_host}
- public_protocol: ${_param:cluster_public_protocol}
- public_port: 8773
- public_path: '/services/Cloud'
- internal_address: ${_param:nova_service_host}
- internal_port: 8773
- internal_path: '/services/Cloud'
- admin_address: ${_param:nova_service_host}
- admin_port: 8773
- admin_path: '/services/Admin'
+++ /dev/null
-parameters:
- _param:
- cluster_public_protocol: https
- keystone:
- client:
- server:
- identity:
- service:
- placement:
- type: placement
- description: OpenStack Placement API
- endpoints:
- - region: ${_param:openstack_region}
- public_address: ${_param:cluster_public_host}
- public_protocol: ${_param:cluster_public_protocol}
- public_port: '8778'
- public_path: ''
- internal_address: ${_param:nova_service_host}
- internal_port: '8778'
- internal_path: ''
- admin_address: ${_param:nova_service_host}
- admin_port: '8778'
- admin_path: ''
+++ /dev/null
-parameters:
- _param:
- cluster_public_protocol: https
- keystone:
- client:
- server:
- identity:
- project:
- service:
- user:
- nova:
- is_admin: true
- password: ${_param:keystone_nova_password}
- email: ${_param:admin_email}
- service:
- nova:
- type: compute
- description: OpenStack Compute Service
- endpoints:
- - region: ${_param:openstack_region}
- public_address: ${_param:cluster_public_host}
- public_protocol: ${_param:cluster_public_protocol}
- public_port: 8774
- public_path: '/v2/$(project_id)s'
- internal_address: ${_param:nova_service_host}
- internal_port: 8774
- internal_path: '/v2/$(project_id)s'
- admin_address: ${_param:nova_service_host}
- admin_port: 8774
- admin_path: '/v2/$(project_id)s'
+++ /dev/null
-parameters:
- _param:
- cluster_public_protocol: https
- keystone:
- client:
- server:
- identity:
- project:
- service:
- user:
- nova:
- is_admin: true
- password: ${_param:keystone_nova_password}
- email: ${_param:admin_email}
- service:
- nova20:
- type: compute_legacy
- description: OpenStack Compute Service
- endpoints:
- - region: ${_param:openstack_region}
- public_address: ${_param:cluster_public_host}
- public_protocol: ${_param:cluster_public_protocol}
- public_port: 8774
- public_path: '/v2/$(project_id)s'
- internal_address: ${_param:nova_service_host}
- internal_port: 8774
- internal_path: '/v2/$(project_id)s'
- admin_address: ${_param:nova_service_host}
- admin_port: 8774
- admin_path: '/v2/$(project_id)s'
- nova:
- type: compute
- description: OpenStack Compute Service v2.1
- endpoints:
- - region: ${_param:openstack_region}
- public_address: ${_param:cluster_public_host}
- public_protocol: ${_param:cluster_public_protocol}
- public_port: 8774
- public_path: '/v2.1/$(project_id)s'
- internal_address: ${_param:nova_service_host}
- internal_port: 8774
- internal_path: '/v2.1/$(project_id)s'
- admin_address: ${_param:nova_service_host}
- admin_port: 8774
- admin_path: '/v2.1/$(project_id)s'
+++ /dev/null
-parameters:
- _param:
- cluster_public_protocol: https
- radosgw_s3_cluster_port: 8081
- keystone:
- client:
- server:
- identity:
- project:
- service:
- user:
- swift:
- is_admin: true
- password: ${_param:keystone_swift_password}
- email: ${_param:admin_email}
- service:
- radosgw-s3:
- type: s3
- description: S3 Service (radosgw)
- endpoints:
- - region: ${_param:openstack_region}
- public_address: ${_param:cluster_public_host}
- public_protocol: ${_param:cluster_public_protocol}
- public_port: ${_param:radosgw_s3_cluster_port}
- public_path: '/' # /$(project_id)s
- internal_address: ${_param:radosgw_service_host}
- internal_port: ${_param:radosgw_s3_cluster_port}
- internal_path: '/'
- admin_address: ${_param:radosgw_service_host}
- admin_port: ${_param:radosgw_s3_cluster_port}
- admin_path: '/'
+++ /dev/null
-parameters:
- _param:
- cluster_public_protocol: https
- keystone:
- client:
- server:
- identity:
- project:
- service:
- user:
- swift:
- is_admin: true
- password: ${_param:keystone_swift_password}
- email: ${_param:admin_email}
- service:
- radosgw-swift:
- type: object-store
- description: Swift Service (radosgw)
- endpoints:
- - region: ${_param:openstack_region}
- public_address: ${_param:cluster_public_host}
- public_protocol: ${_param:cluster_public_protocol}
- public_port: 8081
- public_path: '/swift/v1' # /$(project_id)s
- internal_address: ${_param:radosgw_service_host}
- internal_port: 8081
- internal_path: '/swift/v1'
- admin_address: ${_param:radosgw_service_host}
- admin_port: 8081
- admin_path: '/swift/v1'
+++ /dev/null
-parameters:
- _param:
- cluster_public_protocol: https
- keystone:
- client:
- server:
- identity:
- project:
- service:
- user:
- sahara:
- is_admin: true
- password: ${_param:keystone_sahara_password}
- email: ${_param:admin_email}
- service:
- sahara:
- type: data-processing
- description: Sahara Data Processing
- endpoints:
- - region: ${_param:openstack_region}
- public_address: ${_param:cluster_public_host}
- public_protocol: ${_param:cluster_public_protocol}
- public_port: 8386
- public_path: '/v1.1/%(project_id)s'
- internal_address: ${_param:sahara_service_host}
- internal_port: 8386
- internal_path: '/v1.1/%(project_id)s'
- admin_address: ${_param:sahara_service_host}
- admin_port: 8386
- admin_path: '/v1.1/%(project_id)s'
+++ /dev/null
-parameters:
- _param:
- cluster_public_protocol: https
- keystone:
- client:
- server:
- identity:
- project:
- service:
- user:
- swift:
- is_admin: true
- password: ${_param:keystone_swift_password}
- email: ${_param:admin_email}
- service:
- swift-s3:
- type: object-store
- description: S3 Service (swift)
- region: ${_param:openstack_region}
- endpoints:
- - region: ${_param:openstack_region}
- public_address: ${_param:cluster_public_host}
- public_protocol: ${_param:cluster_public_protocol}
- public_port: 8080
- public_path: '/v1/AUTH_%(project_id)s'
- internal_address: ${_param:swift_service_host}
- internal_port: 8080
- internal_path: '/v1/AUTH_%(project_id)s'
- admin_address: ${_param:swift_service_host}
- admin_port: 8080
- admin_path: '/'
+++ /dev/null
-parameters:
- _param:
- cluster_public_protocol: https
- keystone:
- client:
- server:
- identity:
- project:
- service:
- user:
- swift:
- is_admin: true
- password: ${_param:keystone_swift_password}
- email: ${_param:admin_email}
- service:
- swift:
- type: object-store
- description: Swift Service
- endpoints:
- - region: ${_param:openstack_region}
- public_address: ${_param:cluster_public_host}
- public_protocol: ${_param:cluster_public_protocol}
- public_port: 8080
- public_path: '/v1/AUTH_%(project_id)s'
- internal_address: ${_param:swift_service_host}
- internal_port: 8080
- internal_path: '/v1/AUTH_%(project_id)s'
- admin_address: ${_param:swift_service_host}
- admin_port: 8080
- admin_path: '/'
+++ /dev/null
-classes:
-- system.keystone.client.service.cinder
-- system.keystone.client.service.cinder2
-- system.keystone.client.service.glance
-- system.keystone.client.service.heat
-- system.keystone.client.service.heat-cfn
-- system.keystone.client.service.keystone
-- system.keystone.client.service.neutron
-- system.keystone.client.service.nova-ec2
-parameters:
- linux:
- system:
- job:
- keystone_job_rotate:
- command: '/usr/bin/keystone-manage fernet_rotate --keystone-user keystone --keystone-group keystone >> /var/log/key_rotation_log 2>> /var/log/key_rotation_log'
- enabled: true
- user: root
- minute: 0
- keystone:
- client:
- enabled: true
- server:
- identity:
- admin:
- host: ${_param:keystone_service_host}
- port: 35357
- token: ${_param:keystone_service_token}
- roles:
- - admin
- - Member
- project:
- service:
- description: "OpenStack Service tenant"
- admin:
- description: "OpenStack Admin tenant"
- user:
- admin:
- is_admin: true
- password: ${_param:keystone_admin_password}
- email: ${_param:admin_email}
+++ /dev/null
-classes:
-- service.keystone.server.cluster
-- service.keepalived.cluster.single
-- system.haproxy.proxy.listen.openstack.keystone
-parameters:
- keystone:
- server:
- enabled: true
- version: ${_param:keystone_version}
- service_token: ${_param:keystone_service_token}
- service_tenant: service
- admin_tenant: admin
- admin_name: admin
- admin_password: ${_param:keystone_admin_password}
- admin_email: ${_param:admin_email}
- bind:
- address: ${_param:cluster_local_address}
- private_address: ${_param:cluster_vip_address}
- private_port: 35357
- public_address: ${_param:cluster_vip_address}
- public_port: 5000
- region: ${_param:openstack_region}
- database:
- engine: mysql
- host: ${_param:openstack_database_address}
- name: keystone
- password: ${_param:mysql_keystone_password}
- user: keystone
- tokens:
- engine: fernet
- expiration: 3600
- max_active_keys: 3
- location: /var/lib/keystone/fernet-keys
- message_queue:
- engine: rabbitmq
- members:
- - host: ${_param:openstack_message_queue_node01_address}
- - host: ${_param:openstack_message_queue_node02_address}
- - host: ${_param:openstack_message_queue_node03_address}
- user: openstack
- password: ${_param:rabbitmq_openstack_password}
- virtual_host: '/openstack'
- ha_queues: true
- auth_methods:
- - password
- - token
+++ /dev/null
-parameters:
- keystone:
- server:
- notification_format: cadf
+++ /dev/null
-parameters:
- keystone:
- server:
- notification:
- driver: messagingv2
- topics: "${_param:openstack_notification_topics}"
+++ /dev/null
-classes:
-- service.keystone.server.single
-parameters:
- _param:
- keystone_service_token: token
- keystone_admin_password: password
- mysql_admin_user: root
- mysql_admin_password: password
- mysql_keystone_password: password
- keystone:
- server:
- enabled: true
- version: ${_param:keystone_version}
- service_token: ${_param:keystone_service_token}
- service_tenant: service
- admin_tenant: admin
- admin_name: admin
- admin_password: ${_param:keystone_admin_password}
- admin_email: ${_param:admin_email}
- bind:
- address: ${_param:single_address}
- private_address: ${_param:single_address}
- private_port: 35357
- public_address: ${_param:single_address}
- public_port: 5000
- region: ${_param:openstack_region}
- database:
- engine: mysql
- host: ${_param:single_address}
- name: keystone
- password: ${_param:mysql_keystone_password}
- user: keystone
- tokens:
- engine: fernet
- expiration: 3600
- max_active_keys: 3
- location: /var/lib/keystone/fernet-keys
- message_queue:
- engine: rabbitmq
- host: ${_param:single_address}
- user: openstack
- password: ${_param:rabbitmq_openstack_password}
- virtual_host: '/openstack'
- ha_queues: true
- roles:
- - admin
- - Member
- - image_manager
- auth_methods:
- - password
- - token
- database:
- host: 127.0.0.1
+++ /dev/null
-classes:
-- service.shibboleth.server.cluster
-parameters:
- keystone:
- server:
- websso:
- protocol: saml2
- remote_id_attribute: Shib-Identity-Provider
- federation_driver: keystone.contrib.federation.backends.sql.Federation
- trusted_dashboard:
- - https://${_param:cluster_public_host}/auth/websso/
- - https://${_param:proxy_vip_host}/auth/websso/
- auth_methods:
- - saml2
- - external
+++ /dev/null
-classes:
-- system.apache.server.single
-parameters:
- keystone:
- server:
- service_name: apache2
- apache:
- server:
- enabled: true
- default_mpm: event
- site:
- keystone:
- enabled: true
- type: keystone
- name: wsgi
- host:
- name: ${linux:network:fqdn}
- modules:
- - wsgi
+++ /dev/null
-parameters:
- linux:
- network:
- interface:
- primary_interface:
- enabled: true
- name: ${_param:primary_interface}
- type: eth
- proto: dhcp
+++ /dev/null
-parameters:
- _param:
- primary_interface: eth1
- tenant_interface: eth2
- external_interface: eth3
- interface_mtu: 9000
- linux:
- network:
- bridge: openvswitch
- interface:
- primary_interface:
- enabled: true
- name: ${_param:primary_interface}
- type: eth
- mtu: ${_param:interface_mtu}
- proto: manual
- tenant_interface:
- enabled: true
- name: ${_param:tenant_interface}
- type: eth
- mtu: ${_param:interface_mtu}
- proto: manual
- external_interface:
- enabled: true
- name: ${_param:external_interface}
- type: eth
- mtu: ${_param:interface_mtu}
- proto: manual
- br-int:
- enabled: true
- mtu: ${_param:interface_mtu}
- type: ovs_bridge
- br-floating:
- enabled: true
- mtu: ${_param:interface_mtu}
- type: ovs_bridge
- float-to-ex:
- enabled: true
- type: ovs_port
- mtu: 65000
- bridge: br-floating
- br-mgmt:
- enabled: true
- type: bridge
- mtu: ${_param:interface_mtu}
- address: ${_param:single_address}
- netmask: 255.255.255.0
- use_interfaces:
- - ${_param:primary_interface}
- br-mesh:
- enabled: true
- type: bridge
- mtu: ${_param:interface_mtu}
- address: ${_param:tenant_address}
- netmask: 255.255.255.0
- use_interfaces:
- - ${_param:tenant_interface}
- br-ex:
- enabled: true
- type: bridge
- mtu: ${_param:interface_mtu}
- address: ${_param:external_address}
- netmask: 255.255.255.0
- use_interfaces:
- - ${_param:external_interface}
- use_ovs_ports:
- - float-to-ex
\ No newline at end of file
+++ /dev/null
-parameters:
- linux:
- network:
- interface:
- primary_interface:
- enabled: true
- name: ${_param:primary_interface}
- type: eth
- proto: manual
- address: ${_param:single_address}
\ No newline at end of file
+++ /dev/null
-parameters:
- linux:
- storage:
- enabled: true
- loopback:
- loop0:
- file: "/srv/disk0"
- size: ${_param:loopback_device_size}G
+++ /dev/null
-parameters:
- linux:
- system:
- haveged:
- enabled: true
+++ /dev/null
-parameters:
- linux:
- storage:
- swap:
- swap01:
- enabled: true
- engine: file
- device: /var/tmp/swap01
- size: 2048
-# opencontrail:
-# database:
-# max_heap_size: "1G"
-# heap_newsize: "200M"
-# rabbitmq:
-# server:
-# memory:
-# vm_high_watermark: 0.2
+++ /dev/null
-parameters:
- linux:
- system:
- motd:
- - warning: |
- #!/bin/sh
- printf "WARNING: This is private network.\n"
- printf " Unauthorized access is strictly prohibited.\n"
- printf "\n"
- - info: |
- #!/bin/sh
- printf -- "------------------------------------------------------\n"
- printf " Hostname | $(hostname)\n"
- printf " Domain | $(hostname -d)\n"
- printf " System | %s\n" "$(lsb_release -s -d)"
- printf " Kernel | %s\n" "$(uname -r)"
- printf " Uptime | %s\n" "$(uptime -p)"
- printf " Load Average | %s\n" "$(cat /proc/loadavg | awk '{print $1", "$2", "$3}')"
- printf -- "------------------------------------------------------\n"
+++ /dev/null
-parameters:
- linux:
- system:
- motd: |
- WARNING: This is private network
- Unauthorized access is strictly prohibited
-
- ------------------------------------------------------
- Hostname | ${linux:system:name}
- Domain | ${linux:system:domain}
- ------------------------------------------------------
-
+++ /dev/null
-parameters:
- linux:
- system:
- prompt:
- default: \\n\\[\\033[0;37m\\]\\D{%y/%m/%d %H:%M:%S} ${linux:system:name}.${linux:system:domain}\\[\\e[0m\\]\\n\\[\\e[1;39m\\][\\u@\\h:\\w]\\[\\e[0m\\]
- bash:
- preserve_history: true
+++ /dev/null
-classes:
- - system.linux.system.prompt
-parameters:
- linux:
- system:
- prompt:
- default: \\n\\[\\033[0;37m\\]\\D{%y/%m/%d %H:%M:%S} ${linux:system:name}.${linux:system:domain}\\[\\e[0m\\]\\n\\[\\e[1;31m\\][\\u@\\h:\\w]\\[\\e[0m\\]
+++ /dev/null
-parameters:
- _param:
- linux_repo_cassandra_component: 21x
- linux:
- system:
- repo:
- cassandra:
- source: "deb http://www.apache.org/dist/cassandra/debian/ ${_param:linux_repo_cassandra_component} main"
- architectures: amd64
- key_url: "https://www.apache.org/dist/cassandra/KEYS"
+++ /dev/null
-parameters:
- linux:
- system:
- repo:
- docker:
- source: "deb https://apt.dockerproject.org/repo ubuntu-${_param:linux_system_codename} main"
- architectures: amd64
- key_id: 58118E89F3A912897C070ADBF76221572C52609D
- key_server: hkp://p80.pool.sks-keyservers.net:80
+++ /dev/null
-parameters:
- linux:
- system:
- repo:
- elasticsearch:
- source: "deb http://packages.elastic.co/elasticsearch/2.x/debian stable main"
- architectures: amd64
- key_url: "https://packages.elastic.co/GPG-KEY-elasticsearch"
\ No newline at end of file
+++ /dev/null
-parameters:
- _param:
- glusterfs_version: 3.8
- linux:
- system:
- repo:
- glusterfs-ppa:
- source: "deb http://ppa.launchpad.net/gluster/glusterfs-${_param:glusterfs_version}/ubuntu ${_param:linux_system_codename} main"
- architectures: amd64
- key_id: 3FE869A9
- key_server: keyserver.ubuntu.com
+++ /dev/null
-parameters:
- linux:
- system:
- repo:
- grafana:
- enabled: true
- source: 'deb https://packagecloud.io/grafana/stable/debian/ jessie main'
- key_url: 'https://packagecloud.io/gpg.key'
+++ /dev/null
-parameters:
- linux:
- system:
- repo:
- influxdb:
- source: 'deb [arch=amd64] https://repos.influxdata.com/ubuntu ${_param:linux_system_codename} stable'
- architectures: amd64
- key_url: 'https://repos.influxdata.com/influxdb.key'
\ No newline at end of file
+++ /dev/null
-parameters:
- linux:
- system:
- repo:
- kibana:
- source: "deb https://packages.elastic.co/kibana/4.6/debian stable main"
- key_url: "https://packages.elastic.co/GPG-KEY-elasticsearch"
+++ /dev/null
-parameters:
- _param:
- apt_mk_version: stable
- linux:
- system:
- repo:
- mcp_extra:
- source: "deb [arch=amd64] http://apt-mk.mirantis.com/${_param:linux_system_codename}/ ${_param:apt_mk_version} extra"
- architectures: amd64
- key_url: "http://apt-mk.mirantis.com/public.gpg"
- clean_file: true
- pin:
- - pin: 'release a=${_param:apt_mk_version}'
- priority: 1100
- package: '*'
+++ /dev/null
-parameters:
- _param:
- apt_mk_version: stable
- linux:
- system:
- repo:
- mirantis_openstack:
- source: "deb http://mirror.fuel-infra.org/mcp-repos/${_param:openstack_version}/${_param:linux_system_codename} ${_param:openstack_version} main"
- architectures: amd64
- key_url: "http://mirror.fuel-infra.org/mcp-repos/${_param:openstack_version}/${_param:linux_system_codename}/archive-mcp${_param:openstack_version}.key"
- pin:
- - pin: 'release a=${_param:openstack_version}'
- priority: 1100
- package: '*'
- mirantis_openstack_hotfix:
- source: "deb http://mirror.fuel-infra.org/mcp-repos/${_param:openstack_version}/${_param:linux_system_codename} ${_param:openstack_version}-hotfix main"
- architectures: amd64
- key_url: "http://mirror.fuel-infra.org/mcp-repos/${_param:openstack_version}/${_param:linux_system_codename}/archive-mcp${_param:openstack_version}.key"
- pin:
- - pin: 'release a=${_param:openstack_version}-hotfix'
- priority: 1100
- package: '*'
- mirantis_openstack_security:
- source: "deb http://mirror.fuel-infra.org/mcp-repos/${_param:openstack_version}/${_param:linux_system_codename} ${_param:openstack_version}-security main"
- architectures: amd64
- key_url: "http://mirror.fuel-infra.org/mcp-repos/${_param:openstack_version}/${_param:linux_system_codename}/archive-mcp${_param:openstack_version}.key"
- pin:
- - pin: 'release a=${_param:openstack_version}-security'
- priority: 1100
- package: '*'
- mirantis_openstack_updates:
- source: "deb http://mirror.fuel-infra.org/mcp-repos/${_param:openstack_version}/${_param:linux_system_codename} ${_param:openstack_version}-updates main"
- architectures: amd64
- key_url: "http://mirror.fuel-infra.org/mcp-repos/${_param:openstack_version}/${_param:linux_system_codename}/archive-mcp${_param:openstack_version}.key"
- pin:
- - pin: 'release a=${_param:openstack_version}-uptades'
- priority: 1100
- package: '*'
- mirantis_openstack_holdback:
- source: "deb http://mirror.fuel-infra.org/mcp-repos/${_param:openstack_version}/${_param:linux_system_codename} ${_param:openstack_version}-holdback main"
- architectures: amd64
- key_url: "http://mirror.fuel-infra.org/mcp-repos/${_param:openstack_version}/${_param:linux_system_codename}/archive-mcp${_param:openstack_version}.key"
- pin:
- - pin: 'release a=${_param:openstack_version}-holdback'
- priority: 1100
- package: '*'
- mk_openstack:
- source: "deb [arch=amd64] http://apt-mk.mirantis.com/${_param:linux_system_codename}/ ${_param:apt_mk_version} ${_param:openstack_version}"
- architectures: amd64
- key_url: "http://apt-mk.mirantis.com/public.gpg"
- pin:
- - pin: 'release a=${_param:apt_mk_version}'
- priority: 1100
- package: '*'
+++ /dev/null
-parameters:
- _param:
- apt_mk_version: stable
- linux:
- system:
- repo:
- mcp_salt:
- source: "deb [arch=amd64] http://apt-mk.mirantis.com/${_param:linux_system_codename}/ ${_param:apt_mk_version} salt"
- architectures: amd64
- key_url: "http://apt-mk.mirantis.com/public.gpg"
- clean_file: true
- pin:
- - pin: 'release a=${_param:apt_mk_version}'
- priority: 1100
- package: '*'
+++ /dev/null
-parameters:
- _param:
- salt_version: 2016.3
- linux:
- system:
- repo:
- salt:
- source: "deb http://repo.saltstack.com/apt/ubuntu/16.04/amd64/${_param:salt_version} xenial main"
- architectures: amd64
- key_url: "http://repo.saltstack.com/apt/ubuntu/16.04/amd64/${_param:salt_version}/SALTSTACK-GPG-KEY.pub"
\ No newline at end of file
+++ /dev/null
-parameters:
- linux:
- system:
- repo:
- sensu:
- source: "deb https://sensu.global.ssl.fastly.net/apt ${_param:linux_system_codename} main"
- architectures: amd64
- key_url: "https://sensu.global.ssl.fastly.net/apt/pubkey.gpg"
\ No newline at end of file
+++ /dev/null
-parameters:
- linux:
- system:
- repo:
- ubuntu:
- source: "deb [arch=amd64] http://archive.ubuntu.com/ubuntu/ ${_param:linux_system_codename} main restricted universe"
- architectures: amd64
- default: true
- key_id: 437D05B5
- key_server: keyserver.ubuntu.com
- ubuntu_updates:
- source: "deb [arch=amd64] http://archive.ubuntu.com/ubuntu/ ${_param:linux_system_codename}-updates main restricted universe"
- architectures: amd64
- default: true
- key_id: 437D05B5
- key_server: keyserver.ubuntu.com
- ubuntu_security:
- source: "deb [arch=amd64] http://archive.ubuntu.com/ubuntu/ ${_param:linux_system_codename}-security main restricted universe"
- architectures: amd64
- default: true
- key_id: 437D05B5
- key_server: keyserver.ubuntu.com
\ No newline at end of file
+++ /dev/null
-classes:
-- service.linux.system
-- service.salt.minion.master
-- system.ntp.client.single
-parameters:
- linux:
- system:
- package:
- python-msgpack:
- version: latest
- cloud-init:
- version: purged
- mcelog:
- version: latest
- kernel:
- modules:
- - nf_conntrack
- sysctl:
- net.ipv4.tcp_keepalive_intvl: 3
- net.ipv4.tcp_keepalive_time: 30
- net.ipv4.tcp_keepalive_probes: 8
- fs.file-max: 124165
- net.core.somaxconn: 4096
- vm.swappiness: 10
- net.nf_conntrack_max: 1048576
- net.ipv4.tcp_retries2: 5
- net.ipv4.tcp_max_syn_backlog: 8192
- net.ipv4.neigh.default.gc_thresh1: 4096
- net.ipv4.neigh.default.gc_thresh2: 8192
- net.ipv4.neigh.default.gc_thresh3: 16384
- net.core.netdev_max_backlog: 261144
- kernel.panic: 60
- cpu:
- governor: performance
- timezone: UTC
- locale:
- en_US.UTF-8:
- enabled: true
- default: true
- cs_CZ.UTF-8:
- enabled: true
- limit:
- default:
- enabled: true
- domain: "*"
- limits:
- - type: hard
- item: nofile
- value: 307200
- - type: soft
- item: nofile
- value: 307200
- - type: soft
- item: nproc
- value: 307200
- - type: hard
- item: nproc
- value: 307200
+++ /dev/null
-parameters:
- _param:
- sudo_shells:
- - /bin/sh
- - /bin/ksh
- - /bin/bash
- - /bin/rbash
- - /bin/dash
- - /bin/zsh
- - /bin/csh
- - /bin/fish
- - /bin/tcsh
- - /usr/bin/login
- - /usr/bin/su
- - /usr/su
- sudo_restricted_su:
- - /bin/vi* /etc/sudoers*
- - /bin/nano /etc/sudoers*
- - /bin/emacs /etc/sudoers*
- - /bin/su - root
- - /bin/su -
- - /bin/su
- - /usr/sbin/visudo
- sudo_coreutils_safe:
- - /usr/bin/less
- sudo_rabbitmq_safe:
- - /usr/sbin/rabbitmqctl status
- - /usr/sbin/rabbitmqctl cluster_status
- - /usr/sbin/rabbitmqctl list_queues*
- sudo_salt_safe:
- - /usr/bin/salt * state*
- - /usr/bin/salt * service*
- - /usr/bin/salt * pillar*
- - /usr/bin/salt * grains*
- - /usr/bin/salt * saltutil*
- - /usr/bin/salt * test.ping
- - /usr/bin/salt-call state*
- - /usr/bin/salt-call service*
- - /usr/bin/salt-call pillar*
- - /usr/bin/salt-call grains*
- - /usr/bin/salt-call saltutil*
- sudo_salt_trusted:
- - /usr/bin/salt*
+++ /dev/null
-classes:
- - system.maas.region.single
- - system.keepalived.server.cluster_maas
-parameters:
- _param:
- cluster_vip_address: 10.0.175.80
- maas:
- cluster:
- enabled: true
- role: ${_param:maas_cluster_role}
- region:
- host: ${_param:cluster_vip_address}
\ No newline at end of file
+++ /dev/null
-classes:
- - service.maas.region.single
- - service.maas.region.vendor_repo
-parameters:
- maas:
- region:
- theme: mirantis
- enable_iframe: True
- bind:
- host: ${_param:cluster_vip_address}
- database:
- engine: postgresql
- host: 10.0.175.10
- name: maasdb
- password: password
- username: maas
\ No newline at end of file
+++ /dev/null
-classes:
-- service.memcached.server.single
+++ /dev/null
-classes:
-- service.mongodb.server.cluster
-parameters:
- _param:
- mongodb_server_replica_set: mongodb
- mongodb_master: mdb01
- mongodb_server_members:
- - host: mdb01
- priority: 2
- - host: mdb02
- - host: mdb03
+++ /dev/null
-parameters:
- mongodb:
- server:
- database:
- ceilometer:
- enabled: true
- password: ${_param:mongodb_ceilometer_password}
- users:
- - name: ceilometer
- password: ${_param:mongodb_ceilometer_password}
+++ /dev/null
-classes:
-- service.mongodb.server.single
-parameters:
- _param:
- mongodb_server_replica_set: ceilometer
+++ /dev/null
-parameters:
- mysql:
- client:
- server:
- database:
- database:
- aodh:
- encoding: utf8
- users:
- - name: aodh
- password: ${_param:mysql_aodh_password}
- host: '%'
- rights: all
- - name: aodh
- password: ${_param:mysql_aodh_password}
- host: ${_param:single_address}
- rights: all
\ No newline at end of file
+++ /dev/null
-parameters:
- mysql:
- client:
- server:
- database:
- database:
- ceilometer:
- encoding: utf8
- users:
- - name: ceilometer
- password: ${_param:mysql_ceilometer_password}
- host: '%'
- rights: all
- - name: ceilometer
- password: ${_param:mysql_ceilometer_password}
- host: ${_param:single_address}
- rights: all
+++ /dev/null
-parameters:
- mysql:
- client:
- server:
- database:
- database:
- cinder:
- encoding: utf8
- users:
- - name: cinder
- password: ${_param:mysql_cinder_password}
- host: '%'
- rights: all
- - name: cinder
- password: ${_param:mysql_cinder_password}
- host: ${_param:single_address}
- rights: all
+++ /dev/null
-parameters:
- mysql:
- client:
- server:
- database:
- database:
- designate:
- encoding: utf8
- users:
- - name: designate
- password: ${_param:mysql_designate_password}
- host: '%'
- rights: all
- - name: designate
- password: ${_param:mysql_designate_password}
- host: ${_param:single_address}
- rights: all
\ No newline at end of file
+++ /dev/null
-parameters:
- mysql:
- client:
- server:
- database:
- database:
- designate_pool_manager:
- encoding: utf8
- users:
- - name: designate
- password: ${_param:mysql_designate_password}
- host: '%'
- rights: all
- - name: designate
- password: ${_param:mysql_designate_password}
- host: ${_param:single_address}
- rights: all
+++ /dev/null
-parameters:
- mysql:
- client:
- server:
- database:
- database:
- glance:
- encoding: utf8
- users:
- - name: glance
- password: ${_param:mysql_glance_password}
- host: '%'
- rights: all
- - name: glance
- password: ${_param:mysql_glance_password}
- host: ${_param:single_address}
- rights: all
+++ /dev/null
-parameters:
- mysql:
- client:
- server:
- database:
- database:
- grafana:
- encoding: utf8
- users:
- - name: grafana
- password: ${_param:mysql_grafana_password}
- host: '%'
- rights: all
- - name: grafana
- password: ${_param:mysql_grafana_password}
- host: ${_param:single_address}
- rights: all
+++ /dev/null
-parameters:
- mysql:
- client:
- server:
- database:
- database:
- heat:
- encoding: utf8
- users:
- - name: heat
- password: ${_param:mysql_heat_password}
- host: '%'
- rights: all
- - name: heat
- password: ${_param:mysql_heat_password}
- host: ${_param:single_address}
- rights: all
+++ /dev/null
-parameters:
- mysql:
- client:
- server:
- database:
- database:
- keystone:
- encoding: utf8
- users:
- - name: keystone
- password: ${_param:mysql_keystone_password}
- host: '%'
- rights: all
- - name: keystone
- password: ${_param:mysql_keystone_password}
- host: ${_param:single_address}
- rights: all
+++ /dev/null
-parameters:
- mysql:
- client:
- server:
- database:
- database:
- murano:
- encoding: utf8
- users:
- - name: murano
- password: ${_param:mysql_murano_password}
- host: '%'
- rights: all
- - name: murano
- password: ${_param:mysql_murano_password}
- host: ${_param:single_address}
- rights: all
\ No newline at end of file
+++ /dev/null
-parameters:
- mysql:
- client:
- server:
- database:
- database:
- neutron:
- encoding: utf8
- users:
- - name: neutron
- password: ${_param:mysql_neutron_password}
- host: '%'
- rights: all
- - name: neutron
- password: ${_param:mysql_neutron_password}
- host: ${_param:single_address}
- rights: all
+++ /dev/null
-parameters:
- mysql:
- client:
- server:
- database:
- database:
- nova:
- encoding: utf8
- users:
- - name: nova
- password: ${_param:mysql_nova_password}
- host: '%'
- rights: all
- - name: nova
- password: ${_param:mysql_nova_password}
- host: ${_param:single_address}
- rights: all
+++ /dev/null
-parameters:
- mysql:
- client:
- server:
- database:
- database:
- nova_api:
- encoding: utf8
- users:
- - name: nova
- password: ${_param:mysql_nova_password}
- host: '%'
- rights: all
- - name: nova
- password: ${_param:mysql_nova_password}
- host: ${_param:single_address}
- rights: all
- nova_cell0:
- encoding: utf8
- users:
- - name: nova
- password: ${_param:mysql_nova_password}
- host: '%'
- rights: all
- - name: nova
- password: ${_param:mysql_nova_password}
- host: ${_param:single_address}
- rights: all
+++ /dev/null
-parameters:
- mysql:
- client:
- server:
- database:
- database:
- sahara:
- encoding: utf8
- users:
- - name: sahara
- password: ${_param:mysql_sahara_password}
- host: '%'
- rights: all
- - name: sahara
- password: ${_param:mysql_sahara_password}
- host: ${_param:single_address}
- rights: all
+++ /dev/null
-classes:
-- system.mysql.client.database.aodh
-parameters:
- mysql:
- client:
- enabled: true
- server:
- database:
- database:
- aodh:
- initial_data:
- engine: backupninja
- source: ${_param:backupninja_backup_host}
- host: ${linux:network:fqdn}
- database: aodh
+++ /dev/null
-classes:
-- system.mysql.client.database.ceilometer
-parameters:
- mysql:
- client:
- enabled: true
- server:
- database:
- database:
- ceilometer:
- initial_data:
- engine: backupninja
- source: ${_param:backupninja_backup_host}
- host: ${linux:network:fqdn}
- database: ceilometer
+++ /dev/null
-classes:
-- system.mysql.client.database.cinder
-parameters:
- mysql:
- client:
- enabled: true
- server:
- database:
- database:
- cinder:
- initial_data:
- engine: backupninja
- source: ${_param:backupninja_backup_host}
- host: ${linux:network:fqdn}
- database: cinder
+++ /dev/null
-classes:
-- system.mysql.client.database.designate
-parameters:
- mysql:
- client:
- enabled: true
- server:
- database:
- database:
- designate:
- initial_data:
- engine: backupninja
- source: ${_param:backupninja_backup_host}
- host: ${linux:network:fqdn}
- database: designate
+++ /dev/null
-classes:
-- system.mysql.client.database.designate_pool_manager
-parameters:
- mysql:
- client:
- enabled: true
- server:
- database:
- database:
- designate_pool_manager:
- initial_data:
- engine: backupninja
- source: ${_param:backupninja_backup_host}
- host: ${linux:network:fqdn}
- database: designate_pool_manager
+++ /dev/null
-classes:
-- system.mysql.client.database.glance
-parameters:
- mysql:
- client:
- enabled: true
- server:
- database:
- database:
- glance:
- initial_data:
- engine: backupninja
- source: ${_param:backupninja_backup_host}
- host: ${linux:network:fqdn}
- database: glance
+++ /dev/null
-classes:
-- system.mysql.client.database.grafana
-parameters:
- mysql:
- client:
- enabled: true
- server:
- database:
- database:
- grafana:
- initial_data:
- engine: backupninja
- source: ${_param:backupninja_backup_host}
- host: ${linux:network:fqdn}
- database: grafana
+++ /dev/null
-classes:
-- system.mysql.client.database.heat
-parameters:
- mysql:
- client:
- enabled: true
- server:
- database:
- database:
- heat:
- initial_data:
- engine: backupninja
- source: ${_param:backupninja_backup_host}
- host: ${linux:network:fqdn}
- database: heat
+++ /dev/null
-classes:
-- system.mysql.client.database.keystone
-parameters:
- mysql:
- client:
- enabled: true
- server:
- database:
- database:
- keystone:
- initial_data:
- engine: backupninja
- source: ${_param:backupninja_backup_host}
- host: ${linux:network:fqdn}
- database: keystone
+++ /dev/null
-classes:
-- system.mysql.client.database.murano
-parameters:
- mysql:
- client:
- enabled: true
- server:
- database:
- database:
- murano:
- initial_data:
- engine: backupninja
- source: ${_param:backupninja_backup_host}
- host: ${linux:network:fqdn}
- database: murano
+++ /dev/null
-classes:
-- system.mysql.client.database.neutron
-parameters:
- mysql:
- client:
- enabled: true
- server:
- database:
- database:
- neutron:
- initial_data:
- engine: backupninja
- source: ${_param:backupninja_backup_host}
- host: ${linux:network:fqdn}
- database: neutron
+++ /dev/null
-classes:
-- system.mysql.client.database.nova
-parameters:
- mysql:
- client:
- enabled: true
- server:
- database:
- database:
- nova:
- initial_data:
- engine: backupninja
- source: ${_param:backupninja_backup_host}
- host: ${linux:network:fqdn}
- database: nova
+++ /dev/null
-classes:
-- system.mysql.client.database.nova_api
-parameters:
- mysql:
- client:
- enabled: true
- server:
- database:
- database:
- nova_api:
- initial_data:
- engine: backupninja
- source: ${_param:backupninja_backup_host}
- host: ${linux:network:fqdn}
- database: nova_api
- nova_cell0:
- initial_data:
- engine: backupninja
- source: ${_param:backupninja_backup_host}
- host: ${linux:network:fqdn}
- database: nova_cell0
+++ /dev/null
-classes:
-- system.mysql.client.database.sahara
-parameters:
- mysql:
- client:
- enabled: true
- server:
- database:
- database:
- sahara:
- initial_data:
- engine: backupninja
- source: ${_param:backupninja_backup_host}
- host: ${linux:network:fqdn}
- database: sahara
+++ /dev/null
-parameters:
- mysql:
- client:
- server:
- database:
- database:
- aodh_upgrade:
- encoding: utf8
- users:
- - name: aodh
- password: ${_param:mysql_aodh_password}
- host: '%'
- rights: all
- - name: aodh
- password: ${_param:mysql_aodh_password}
- host: ${_param:single_address}
- rights: all
- initial_data:
- engine: backupninja
- source: ${_param:backupninja_backup_host}
- host: ${linux:network:fqdn}
- database: aodh
-
+++ /dev/null
-parameters:
- mysql:
- client:
- server:
- database:
- database:
- ceilometer_upgrade:
- encoding: utf8
- users:
- - name: ceilometer
- password: ${_param:mysql_ceilometer_password}
- host: '%'
- rights: all
- - name: ceilometer
- password: ${_param:mysql_ceilometer_password}
- host: ${_param:single_address}
- rights: all
- initial_data:
- engine: backupninja
- source: ${_param:backupninja_backup_host}
- host: ${linux:network:fqdn}
- database: ceilometer
-
+++ /dev/null
-parameters:
- mysql:
- client:
- server:
- database:
- database:
- cinder_upgrade:
- encoding: utf8
- users:
- - name: cinder
- password: ${_param:mysql_cinder_password}
- host: '%'
- rights: all
- - name: cinder
- password: ${_param:mysql_cinder_password}
- host: ${_param:single_address}
- rights: all
- initial_data:
- engine: backupninja
- source: ${_param:backupninja_backup_host}
- host: ${linux:network:fqdn}
- database: cinder
\ No newline at end of file
+++ /dev/null
-parameters:
- mysql:
- client:
- server:
- database:
- database:
- designate_upgrade:
- encoding: utf8
- users:
- - name: designate
- password: ${_param:mysql_designate_password}
- host: '%'
- rights: all
- - name: designate
- password: ${_param:mysql_designate_password}
- host: ${_param:single_address}
- rights: all
- initial_data:
- engine: backupninja
- source: ${_param:backupninja_backup_host}
- host: ${linux:network:fqdn}
- database: designate
\ No newline at end of file
+++ /dev/null
-parameters:
- mysql:
- client:
- server:
- database:
- database:
- designate_pool_manager_upgrade:
- encoding: utf8
- users:
- - name: designate
- password: ${_param:mysql_designate_password}
- host: '%'
- rights: all
- - name: designate
- password: ${_param:mysql_designate_password}
- host: ${_param:single_address}
- rights: all
- initial_data:
- engine: backupninja
- source: ${_param:backupninja_backup_host}
- host: ${linux:network:fqdn}
- database: designate_pool_manager
\ No newline at end of file
+++ /dev/null
-parameters:
- mysql:
- client:
- server:
- database:
- database:
- glance_upgrade:
- encoding: utf8
- users:
- - name: glance
- password: ${_param:mysql_glance_password}
- host: '%'
- rights: all
- - name: glance
- password: ${_param:mysql_glance_password}
- host: ${_param:single_address}
- rights: all
- initial_data:
- engine: backupninja
- source: ${_param:backupninja_backup_host}
- host: ${linux:network:fqdn}
- database: glance
+++ /dev/null
-parameters:
- mysql:
- client:
- server:
- database:
- database:
- grafana_upgrade:
- encoding: utf8
- users:
- - name: grafana
- password: ${_param:mysql_grafana_password}
- host: '%'
- rights: all
- - name: grafana
- password: ${_param:mysql_grafana_password}
- host: ${_param:single_address}
- rights: all
- initial_data:
- engine: backupninja
- source: ${_param:backupninja_backup_host}
- host: ${linux:network:fqdn}
- database: grafana
\ No newline at end of file
+++ /dev/null
-parameters:
- mysql:
- client:
- server:
- database:
- database:
- heat_upgrade:
- encoding: utf8
- users:
- - name: heat
- password: ${_param:mysql_heat_password}
- host: '%'
- rights: all
- - name: heat
- password: ${_param:mysql_heat_password}
- host: ${_param:single_address}
- rights: all
- initial_data:
- engine: backupninja
- source: ${_param:backupninja_backup_host}
- host: ${linux:network:fqdn}
- database: heat
\ No newline at end of file
+++ /dev/null
-parameters:
- mysql:
- client:
- server:
- database:
- database:
- keystone_upgrade:
- encoding: utf8
- users:
- - name: keystone
- password: ${_param:mysql_keystone_password}
- host: '%'
- rights: all
- - name: keystone
- password: ${_param:mysql_keystone_password}
- host: ${_param:single_address}
- rights: all
- initial_data:
- engine: backupninja
- source: ${_param:backupninja_backup_host}
- host: ${linux:network:fqdn}
- database: keystone
\ No newline at end of file
+++ /dev/null
-parameters:
- mysql:
- client:
- server:
- database:
- database:
- murano_upgrade:
- encoding: utf8
- users:
- - name: murano
- password: ${_param:mysql_murano_password}
- host: '%'
- rights: all
- - name: murano
- password: ${_param:mysql_murano_password}
- host: ${_param:single_address}
- rights: all
- initial_data:
- engine: backupninja
- source: ${_param:backupninja_backup_host}
- host: ${linux:network:fqdn}
- database: murano
\ No newline at end of file
+++ /dev/null
-parameters:
- mysql:
- client:
- server:
- database:
- database:
- neutron_upgrade:
- encoding: utf8
- users:
- - name: neutron
- password: ${_param:mysql_neutron_password}
- host: '%'
- rights: all
- - name: neutron
- password: ${_param:mysql_neutron_password}
- host: ${_param:single_address}
- rights: all
- initial_data:
- engine: backupninja
- source: ${_param:backupninja_backup_host}
- host: ${linux:network:fqdn}
- database: neutron
\ No newline at end of file
+++ /dev/null
-parameters:
- mysql:
- client:
- server:
- database:
- database:
- nova_upgrade:
- encoding: utf8
- users:
- - name: nova
- password: ${_param:mysql_nova_password}
- host: '%'
- rights: all
- - name: nova
- password: ${_param:mysql_nova_password}
- host: ${_param:single_address}
- rights: all
- initial_data:
- engine: backupninja
- source: ${_param:backupninja_backup_host}
- host: ${linux:network:fqdn}
- database: nova
\ No newline at end of file
+++ /dev/null
-parameters:
- mysql:
- client:
- server:
- database:
- database:
- nova_upgrade_api:
- encoding: utf8
- users:
- - name: nova
- password: ${_param:mysql_nova_password}
- host: '%'
- rights: all
- - name: nova
- password: ${_param:mysql_nova_password}
- host: ${_param:single_address}
- rights: all
- initial_data:
- engine: backupninja
- source: ${_param:backupninja_backup_host}
- host: ${linux:network:fqdn}
- database: nova_api
- nova_upgrade_cell0:
- encoding: utf8
- users:
- - name: nova
- password: ${_param:mysql_nova_password}
- host: '%'
- rights: all
- - name: nova
- password: ${_param:mysql_nova_password}
- host: ${_param:single_address}
- rights: all
- initial_data:
- engine: backupninja
- source: ${_param:backupninja_backup_host}
- host: ${linux:network:fqdn}
- database: nova_cell0
\ No newline at end of file
+++ /dev/null
-parameters:
- mysql:
- client:
- server:
- database:
- database:
- sahara_upgrade:
- encoding: utf8
- users:
- - name: sahara
- password: ${_param:mysql_sahara_password}
- host: '%'
- rights: all
- - name: sahara
- password: ${_param:mysql_sahara_password}
- host: ${_param:single_address}
- rights: all
- initial_data:
- engine: backupninja
- source: ${_param:backupninja_backup_host}
- host: ${linux:network:fqdn}
- database: sahara
\ No newline at end of file
+++ /dev/null
-parameters:
- _param:
- mysql_client_host: localhost
- mysql:
- client:
- enabled: true
- server:
- database:
- admin:
- host: ${_param:mysql_client_host}
- port: 3306
- user: ${_param:mysql_admin_user}
- password: ${_param:mysql_admin_password}
- encoding: utf8
+++ /dev/null
-classes:
-- system.mysql.client
-- system.mysql.client.database.aodh
-- system.mysql.client.database.ceilometer
-- system.mysql.client.database.cinder
-- system.mysql.client.database.glance
-- system.mysql.client.database.heat
-- system.mysql.client.database.keystone
-- system.mysql.client.database.nova
-- system.mysql.client.database.nova_api
-- system.mysql.client.database.neutron
+++ /dev/null
-classes:
-- system.mysql.client.database_init.aodh
-- system.mysql.client.database_init.ceilometer
-- system.mysql.client.database_init.cinder
-- system.mysql.client.database_init.glance
-- system.mysql.client.database_init.heat
-- system.mysql.client.database_init.keystone
-- system.mysql.client.database_init.nova
-- system.mysql.client.database_init.nova_api
-- system.mysql.client.database_init.neutron
-parameters:
- _param:
- mysql_client_host: localhost
- mysql:
- client:
- enabled: true
- server:
- database:
- admin:
- host: ${_param:mysql_client_host}
- port: 3306
- user: ${_param:mysql_admin_user}
- password: ${_param:mysql_admin_password}
- encoding: utf8
+++ /dev/null
-classes:
-- system.mysql.client.database_upgrade.aodh
-- system.mysql.client.database_upgrade.ceilometer
-- system.mysql.client.database_upgrade.cinder
-- system.mysql.client.database_upgrade.glance
-- system.mysql.client.database_upgrade.heat
-- system.mysql.client.database_upgrade.keystone
-- system.mysql.client.database_upgrade.nova
-- system.mysql.client.database_upgrade.nova_api
-- system.mysql.client.database_upgrade.neutron
-- service.mysql.client.single
-parameters:
- _param:
- mysql_client_host: localhost
- mysql:
- client:
- enabled: true
- server:
- database:
- admin:
- host: ${_param:mysql_client_host}
- port: 3306
- user: ${_param:mysql_admin_user}
- password: ${_param:mysql_admin_password}
- encoding: utf8
+++ /dev/null
-classes:
-- service.neutron.compute.single
-parameters:
- neutron:
- compute:
- dvr: ${_param:neutron_compute_dvr}
- agent_mode: ${_param:neutron_compute_agent_mode}
- external_access: ${_param:neutron_compute_external_access}
- backend:
- tenant_network_types: ${_param:neutron_tenant_network_types}"
- message_queue:
- members:
- - host: ${_param:openstack_message_queue_node01_address}
- - host: ${_param:openstack_message_queue_node02_address}
- - host: ${_param:openstack_message_queue_node03_address}
\ No newline at end of file
+++ /dev/null
-parameters:
- _param:
- compute_dpdk_driver: uio
- linux:
- network:
- bridge: openvswitch
- dpdk:
- enabled: true
- driver: "${_param:compute_dpdk_driver}"
- openvswitch:
- pmd_cpu_mask: "${_param:compute_ovs_pmd_cpu_mask}"
- dpdk_socket_mem: "${_param:compute_ovs_dpdk_socket_mem}"
- dpdk_lcore_mask: "${_param:compute_ovs_dpdk_lcore_mask}"
- memory_channels: "${_param:compute_ovs_memory_channels}"
- neutron:
- compute:
- dpdk: True
+++ /dev/null
-parameters:
- neutron:
- compute:
- backend:
- sriov:
- sriov_nic01:
- devname: ${_param:sriov_nic01_device_name}
- physical_network: ${_param:sriov_nic01_physical_network}
+++ /dev/null
-parameters:
- neutron:
- compute:
- notification:
- driver: messagingv2
- topics: "${_param:openstack_notification_topics}"
+++ /dev/null
-classes:
-- service.keepalived.cluster.single
-- service.haproxy.proxy.single
-- service.neutron.control.cluster
-- system.haproxy.proxy.listen.openstack.neutron
-parameters:
- mysql:
- server:
- database:
- neutron:
- encoding: utf8
- users:
- - name: neutron
- password: ${_param:mysql_neutron_password}
- host: '%'
- rights: all
- - name: neutron
- password: ${_param:mysql_neutron_password}
- host: ${_param:cluster_local_address}
- rights: all
- haproxy:
- proxy:
- listen:
- neutron_api:
- type: openstack-service
- service_name: neutron
- binds:
- - address: ${_param:cluster_vip_address}
- port: 9696
- servers:
- - name: ctl01
- host: ${_param:cluster_node01_address}
- port: 9696
- params: check inter 10s fastinter 2s downinter 3s rise 3 fall 3
- - name: ctl02
- host: ${_param:cluster_node02_address}
- port: 9696
- params: check inter 10s fastinter 2s downinter 3s rise 3 fall 3
- - name: ctl03
- host: ${_param:cluster_node03_address}
- port: 9696
- params: check inter 10s fastinter 2s downinter 3s rise 3 fall 3
- neutron:
- server:
- plugin: contrail
+++ /dev/null
-parameters:
- neutron:
- server:
- audit:
- enabled: true
+++ /dev/null
-parameters:
- neutron:
- server:
- notification:
- driver: messagingv2
- topics: "${_param:openstack_notification_topics}"
+++ /dev/null
-classes:
-- service.keepalived.cluster.single
-- service.haproxy.proxy.single
-- service.neutron.control.cluster
-- system.haproxy.proxy.listen.openstack.neutron
-parameters:
- mysql:
- server:
- database:
- neutron:
- encoding: utf8
- users:
- - name: neutron
- password: ${_param:mysql_neutron_password}
- host: '%'
- rights: all
- - name: neutron
- password: ${_param:mysql_neutron_password}
- host: ${_param:cluster_local_address}
- rights: all
- neutron:
- server:
- dns_domain: ${_param:cluster_domain}
- database:
- host: ${_param:openstack_database_address}
- identity:
- region: ${_param:openstack_region}
- host: ${_param:openstack_control_address}
- message_queue:
- members:
- - host: ${_param:openstack_message_queue_node01_address}
- - host: ${_param:openstack_message_queue_node02_address}
- - host: ${_param:openstack_message_queue_node03_address}
- compute:
- host: ${_param:openstack_control_address}
- region: ${_param:openstack_region}
- backend:
- engine: contrail
- host: ${_param:opencontrail_control_address}
- port: 8082
- user: admin
- password: ${_param:keystone_admin_password}
- tenant: admin
- token: ${_param:keystone_service_token}
\ No newline at end of file
+++ /dev/null
-classes:
-- service.keepalived.cluster.single
-- service.haproxy.proxy.single
-- service.neutron.control.cluster
-parameters:
- _param:
- neutron_control_dvr: True
- neutron_l3_ha: False
- neutron_global_physnet_mtu: 1500
- neutron_external_mtu: 1500
- neutron_tenant_network_types: "flat,vxlan"
- neutron:
- server:
- plugin: ml2
- global_physnet_mtu: ${_param:neutron_global_physnet_mtu}
- l3_ha: ${_param:neutron_l3_ha}
- dvr: ${_param:neutron_control_dvr}
- backend:
- engine: ml2
- tenant_network_types: "${_param:neutron_tenant_network_types}"
- external_mtu: ${_param:neutron_external_mtu}
- mechanism:
- ovs:
- driver: openvswitch
- compute:
- region: ${_param:openstack_region}
- database:
- host: ${_param:openstack_database_address}
- identity:
- region: ${_param:openstack_region}
- message_queue:
- members:
- - host: ${_param:openstack_message_queue_node01_address}
- - host: ${_param:openstack_message_queue_node02_address}
- - host: ${_param:openstack_message_queue_node03_address}
- mysql:
- server:
- database:
- neutron:
- encoding: utf8
- users:
- - name: neutron
- password: ${_param:mysql_neutron_password}
- host: '%'
- rights: all
- - name: neutron
- password: ${_param:mysql_neutron_password}
- host: ${_param:cluster_local_address}
- rights: all
- haproxy:
- proxy:
- listen:
- neutron_api:
- type: openstack-service
- service_name: neutron
- binds:
- - address: ${_param:cluster_vip_address}
- port: 9696
- servers:
- - name: ctl01
- host: ${_param:cluster_node01_address}
- port: 9696
- params: check inter 10s fastinter 2s downinter 3s rise 3 fall 3
- - name: ctl02
- host: ${_param:cluster_node02_address}
- port: 9696
- params: check inter 10s fastinter 2s downinter 3s rise 3 fall 3
- - name: ctl03
- host: ${_param:cluster_node03_address}
- port: 9696
- params: check inter 10s fastinter 2s downinter 3s rise 3 fall 3
+++ /dev/null
-parameters:
- neutron:
- server:
- dpdk: True
\ No newline at end of file
+++ /dev/null
-classes:
-- service.neutron.control.single
-parameters:
- _param:
- neutron_control_dvr: True
- neutron_l3_ha: False
- neutron_global_physnet_mtu: 1500
- neutron_external_mtu: 1500
- neutron_tenant_network_types: "flat,vxlan"
- neutron:
- server:
- plugin: ml2
- global_physnet_mtu: ${_param:neutron_global_physnet_mtu}
- l3_ha: ${_param:neutron_l3_ha}
- dvr: ${_param:neutron_control_dvr}
- backend:
- engine: ml2
- tenant_network_types: "${_param:neutron_tenant_network_types}"
- external_mtu: ${_param:neutron_external_mtu}
- mechanism:
- ovs:
- driver: openvswitch
- compute:
- region: ${_param:openstack_region}
- database:
- host: ${_param:openstack_database_address}
- identity:
- region: ${_param:openstack_region}
- message_queue:
- members:
- - host: ${_param:openstack_message_queue_node01_address}
- mysql:
- server:
- database:
- neutron:
- encoding: utf8
- users:
- - name: neutron
- password: ${_param:mysql_neutron_password}
- host: '%'
- rights: all
- - name: neutron
- password: ${_param:mysql_neutron_password}
- host: ${_param:cluster_local_address}
- rights: all
\ No newline at end of file
+++ /dev/null
-parameters:
- neutron:
- server:
- backend:
- mechanism:
- sriov:
- driver: sriovnicswitch
+++ /dev/null
-classes:
-- service.neutron.control.single
-parameters:
- neutron:
- server:
- database:
- host: ${_param:single_address}
- mysql:
- server:
- database:
- neutron:
- encoding: utf8
- users:
- - name: neutron
- password: ${_param:mysql_neutron_password}
- host: '%'
- rights: all
- - name: neutron
- password: ${_param:mysql_neutron_password}
- host: ${_param:single_address}
- rights: all
-
+++ /dev/null
-classes:
-- service.neutron.gateway.single
-parameters:
- neutron:
- gateway:
- dvr: ${_param:neutron_gateway_dvr}
- agent_mode: ${_param:neutron_gateway_agent_mode}
- backend:
- tenant_network_types: ${_param:neutron_tenant_network_types}"
- message_queue:
- members:
- - host: ${_param:openstack_message_queue_node01_address}
- - host: ${_param:openstack_message_queue_node02_address}
- - host: ${_param:openstack_message_queue_node03_address}
+++ /dev/null
-parameters:
- neutron:
- gateway:
- notification:
- driver: messagingv2
- topics: "${_param:openstack_notification_topics}"
+++ /dev/null
-classes:
-- service.nova.compute.kvm
-parameters:
- _param:
- nova_vncproxy_url: https://${_param:cluster_public_host}:6080
- nova_compute_virtualization: kvm
- nova_compute_avail_zone:
- nova_aggregates: []
- nova_compute_ssh_public: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCltIn93BcTMzNK/n2eBze6PyTkmIgdDkeXNR9X4DqE48Va80ojv2pq8xuaBxiNITJzyl+4p4UvTTXo+HmuX8qbHvqgMGXvuPUCpndEfb2r67f6vpMqPwMgBrUg2ZKgN4OsSDHU+H0dia0cEaTjz5pvbUy9lIsSyhrqOUVF9reJq+boAvVEedm8fUqiZuiejAw2D27+rRtdEPgsKMnh3626YEsr963q4rjU/JssV/iKMNu7mk2a+koOrJ+aHvcVU8zJjfA0YghoeVT/I3GLU/MB/4tD/RyR8GM+UYbI4sgAC7ZOCdQyHdJgnEzx3SJIwcS65U0T2XYvn2qXHXqJ9iGZ root@mirantis.com
- nova_compute_ssh_private: |
- -----BEGIN RSA PRIVATE KEY-----
- MIIEpAIBAAKCAQEApbSJ/dwXEzMzSv59ngc3uj8k5JiIHQ5HlzUfV+A6hOPFWvNK
- I79qavMbmgcYjSEyc8pfuKeFL0016Ph5rl/Kmx76oDBl77j1AqZ3RH29q+u3+r6T
- Kj8DIAa1INmSoDeDrEgx1Ph9HYmtHBGk48+ab21MvZSLEsoa6jlFRfa3iavm6AL1
- RHnZvH1KombonowMNg9u/q0bXRD4LCjJ4d+tumBLK/et6uK41PybLFf4ijDbu5pN
- mvpKDqyfmh73FVPMyY3wNGIIaHlU/yNxi1PzAf+LQ/0ckfBjPlGGyOLIAAu2TgnU
- Mh3SYJxM8d0iSMHEuuVNE9l2L59qlx16ifYhmQIDAQABAoIBAQCYpqbwvE5tm59H
- GQb0C8Ykx4LfLD1INx1wiLmlJKYEQihPTw0fvXj1qZvl21+cs9ZcoTRpUbn6B3EA
- e9bs8sYc/P75j1x46LSdimkZKZUPygkk72d3ZbElUciOyKCxBDNDBQcTIQ9xpKFa
- 2E5Ep72npNMrWqp71r/Qwo20lEIkikIgAFPBgraxn5xIEdo59vzXNZsvyoIRi5p4
- ayH9nWSAXdF1YU3p3ljtHD8o2G/0d2TWGmjrd9vztc6tgXjp0PF60vDNgcJiudBg
- oNLDK/e5a44GJxlVDdJ84ESb7GprRStYmddl22xnI1SXlg87+t0QQwzR0CCtWXrz
- neXkicHhAoGBANkG9tOZfErhSL/jmsElQTNPcMNQkPiJzEmOIpr6jgSzCusPT/QD
- PnVwB42GC5+Zhd4e88BsTzECxPXmKk7r1cBKeJTg/ejgsrSfVAZqMsfhbp3mGOiH
- jymF+zC6Urj5q/Zkof8pEFICtyA5zlHvZmsQL9PDiqXIWALki2JvIDPdAoGBAMN2
- O+LWOM9qqwgSMaFY8VUdDdbmLx/ZMGWQ//Tx42WM8SU+cCpGTLDHHR0qC0gnRsV7
- V63DySEwiHn4I1cQ/AMijRxuw4Dkgk2YMRlgsAbVWO7aIlECWjSg+pRjNeA7If4D
- 5L/gu6wZIv1vu8/fvOwRpPUzhWjGN5Z0RyvYc7btAoGALNnrmL9XmIIGbuGy0cfJ
- OblpLHQyAas4tNrS/ARb5Uy7LOj1NRCWj96fMPhK3qjzqXvsFBBOLWrNGaR/id/j
- ROIfGWWGE+KcDAgBbXH1HKnSGn+7FhMt2v79coyPG/s9NqaFdB4gaVJ2VgqcQQKg
- v++QcssulCRbS/2/cJBWr2ECgYAJFCDL9G9HEwlGorGzcNIkxeiyppZhwFDDJuz8
- j4+kU9uPg0rqa8F8JINxq1ZCz7A10/jKlWFuLTbpk2Dw1lUeQCiVvX9PKU30FLGT
- IC6M4rPyxCb75EQUVbXN1p3WAGkfx0aEsweEgtZhNyNeEGJSBK/Iw8/agfpq/pOf
- sboOMQKBgQClKmrAYKWnwdPPka3msyjl/AXDruR4XFvMlOPKbs3nYstolE7eR94F
- 7xDyBz85icFU0rceYQetwFH2p5tRL0GcUQhJmJFgIL0OXdCQvRNJrT3iS00N1aUo
- SG9MrLHCd5l60aCUQg0UA5ed7Hd6SA314k+HwxJno9/wJ+voBeacMg==
- -----END RSA PRIVATE KEY-----
- openssh:
- client:
- enabled: True
- user: {}
- stricthostkeychecking: False
- nova:
- compute:
- version: ${_param:nova_version}
- enabled: true
- virtualization: ${_param:nova_compute_virtualization}
- availability_zone: ${_param:nova_compute_avail_zone}
- aggregates: ${_param:nova_aggregates}
- heal_instance_info_cache_interval: 60
- vncproxy_url: ${_param:nova_vncproxy_url}
- bind:
- vnc_address: ${_param:cluster_local_address}
- vnc_port: 6080
- vnc_name: 0.0.0.0
- database:
- engine: mysql
- host: ${_param:openstack_database_address}
- port: 3306
- name: nova
- user: nova
- password: ${_param:mysql_nova_password}
- identity:
- engine: keystone
- region: ${_param:openstack_region}
- host: ${_param:cluster_vip_address}
- port: 35357
- user: nova
- password: ${_param:keystone_nova_password}
- tenant: service
- message_queue:
- engine: rabbitmq
- port: 5672
- user: openstack
- password: ${_param:rabbitmq_openstack_password}
- virtual_host: '/openstack'
- members:
- - host: ${_param:openstack_message_queue_node01_address}
- - host: ${_param:openstack_message_queue_node02_address}
- - host: ${_param:openstack_message_queue_node03_address}
- image:
- engine: glance
- host: ${_param:cluster_vip_address}
- port: 9292
- network:
- engine: neutron
- region: ${_param:openstack_region}
- host: ${_param:neutron_service_host}
- port: 9696
- user: neutron
- tenant: service
- password: ${_param:keystone_neutron_password}
- user:
- public_key: ${_param:nova_compute_ssh_public}
- private_key: ${_param:nova_compute_ssh_private}
+++ /dev/null
-parameters:
- nova:
- compute:
- vcpu_pin_set: ${_param:nova_cpu_pinning}
- linux:
- system:
- kernel:
- isolcpu: ${_param:compute_kernel_isolcpu}
\ No newline at end of file
+++ /dev/null
-parameters:
- _param:
- compute_hugepages_size: 1G
- compute_hugepages_mount: /mnt/hugepages_1G
- linux:
- system:
- kernel:
- hugepages:
- large:
- default: true
- size: ${_param:compute_hugepages_size}
- count: ${_param:compute_hugepages_count}
- mount_point: ${_param:compute_hugepages_mount}
- nova:
- compute:
- hugepages:
- mount_points:
- - path: ${_param:compute_hugepages_mount}
\ No newline at end of file
+++ /dev/null
-parameters:
- _param:
- sriov_nic01_physical_network: physnet1
- sriov_unsafe_interrupts: False
- nova:
- compute:
- sriov:
- sriov_nic01:
- devname: ${_param:sriov_nic01_device_name}
- physical_network: ${_param:sriov_nic01_physical_network}
- linux:
- system:
- kernel:
- sriov: True
- unsafe_interrupts: ${_param:sriov_unsafe_interrupts}
- rc:
- local: |
- #!/bin/sh -e
- # Enabling ${_param:sriov_nic01_numvfs} VFs on ${_param:sriov_nic01_device_name} PF
- echo ${_param:sriov_nic01_numvfs} > /sys/class/net/${_param:sriov_nic01_device_name}/device/sriov_numvfs; sleep 2; ip link set ${_param:sriov_nic01_device_name} up
- exit 0
\ No newline at end of file
+++ /dev/null
-parameters:
- nova:
- compute:
- notification:
- driver: messagingv2
- topics: "${_param:openstack_notification_topics}"
- notify_on:
- state_change: vm_and_task_state
+++ /dev/null
-applications:
-- nova
-classes:
-- service.nova.compute.kvm
-parameters:
- _param:
- nova_vncproxy_url: https://${_param:cluster_public_host}:6080
- nova_compute_ssh_public: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCltIn93BcTMzNK/n2eBze6PyTkmIgdDkeXNR9X4DqE48Va80ojv2pq8xuaBxiNITJzyl+4p4UvTTXo+HmuX8qbHvqgMGXvuPUCpndEfb2r67f6vpMqPwMgBrUg2ZKgN4OsSDHU+H0dia0cEaTjz5pvbUy9lIsSyhrqOUVF9reJq+boAvVEedm8fUqiZuiejAw2D27+rRtdEPgsKMnh3626YEsr963q4rjU/JssV/iKMNu7mk2a+koOrJ+aHvcVU8zJjfA0YghoeVT/I3GLU/MB/4tD/RyR8GM+UYbI4sgAC7ZOCdQyHdJgnEzx3SJIwcS65U0T2XYvn2qXHXqJ9iGZ root@mirantis.com
- nova_compute_avail_zone:
- nova_compute_ssh_private: |
- -----BEGIN RSA PRIVATE KEY-----
- MIIEpAIBAAKCAQEApbSJ/dwXEzMzSv59ngc3uj8k5JiIHQ5HlzUfV+A6hOPFWvNK
- I79qavMbmgcYjSEyc8pfuKeFL0016Ph5rl/Kmx76oDBl77j1AqZ3RH29q+u3+r6T
- Kj8DIAa1INmSoDeDrEgx1Ph9HYmtHBGk48+ab21MvZSLEsoa6jlFRfa3iavm6AL1
- RHnZvH1KombonowMNg9u/q0bXRD4LCjJ4d+tumBLK/et6uK41PybLFf4ijDbu5pN
- mvpKDqyfmh73FVPMyY3wNGIIaHlU/yNxi1PzAf+LQ/0ckfBjPlGGyOLIAAu2TgnU
- Mh3SYJxM8d0iSMHEuuVNE9l2L59qlx16ifYhmQIDAQABAoIBAQCYpqbwvE5tm59H
- GQb0C8Ykx4LfLD1INx1wiLmlJKYEQihPTw0fvXj1qZvl21+cs9ZcoTRpUbn6B3EA
- e9bs8sYc/P75j1x46LSdimkZKZUPygkk72d3ZbElUciOyKCxBDNDBQcTIQ9xpKFa
- 2E5Ep72npNMrWqp71r/Qwo20lEIkikIgAFPBgraxn5xIEdo59vzXNZsvyoIRi5p4
- ayH9nWSAXdF1YU3p3ljtHD8o2G/0d2TWGmjrd9vztc6tgXjp0PF60vDNgcJiudBg
- oNLDK/e5a44GJxlVDdJ84ESb7GprRStYmddl22xnI1SXlg87+t0QQwzR0CCtWXrz
- neXkicHhAoGBANkG9tOZfErhSL/jmsElQTNPcMNQkPiJzEmOIpr6jgSzCusPT/QD
- PnVwB42GC5+Zhd4e88BsTzECxPXmKk7r1cBKeJTg/ejgsrSfVAZqMsfhbp3mGOiH
- jymF+zC6Urj5q/Zkof8pEFICtyA5zlHvZmsQL9PDiqXIWALki2JvIDPdAoGBAMN2
- O+LWOM9qqwgSMaFY8VUdDdbmLx/ZMGWQ//Tx42WM8SU+cCpGTLDHHR0qC0gnRsV7
- V63DySEwiHn4I1cQ/AMijRxuw4Dkgk2YMRlgsAbVWO7aIlECWjSg+pRjNeA7If4D
- 5L/gu6wZIv1vu8/fvOwRpPUzhWjGN5Z0RyvYc7btAoGALNnrmL9XmIIGbuGy0cfJ
- OblpLHQyAas4tNrS/ARb5Uy7LOj1NRCWj96fMPhK3qjzqXvsFBBOLWrNGaR/id/j
- ROIfGWWGE+KcDAgBbXH1HKnSGn+7FhMt2v79coyPG/s9NqaFdB4gaVJ2VgqcQQKg
- v++QcssulCRbS/2/cJBWr2ECgYAJFCDL9G9HEwlGorGzcNIkxeiyppZhwFDDJuz8
- j4+kU9uPg0rqa8F8JINxq1ZCz7A10/jKlWFuLTbpk2Dw1lUeQCiVvX9PKU30FLGT
- IC6M4rPyxCb75EQUVbXN1p3WAGkfx0aEsweEgtZhNyNeEGJSBK/Iw8/agfpq/pOf
- sboOMQKBgQClKmrAYKWnwdPPka3msyjl/AXDruR4XFvMlOPKbs3nYstolE7eR94F
- 7xDyBz85icFU0rceYQetwFH2p5tRL0GcUQhJmJFgIL0OXdCQvRNJrT3iS00N1aUo
- SG9MrLHCd5l60aCUQg0UA5ed7Hd6SA314k+HwxJno9/wJ+voBeacMg==
- -----END RSA PRIVATE KEY-----
- openssh:
- client:
- enabled: True
- user: {}
- stricthostkeychecking: False
- nova:
- compute:
- version: ${_param:nova_version}
- disable_flow_collection: true
- enabled: true
- virtualization: kvm
- availability_zone: ${_param:nova_compute_avail_zone}
- vncproxy_url: ${_param:nova_vncproxy_url}
- bind:
- vnc_address: ${_param:control_address}
- vnc_port: 6080
- vnc_name: 0.0.0.0
- database:
- engine: mysql
- host: ${_param:control_address}
- port: 3306
- name: nova
- user: nova
- password: ${_param:mysql_nova_password}
- identity:
- engine: keystone
- host: ${_param:control_address}
- port: 35357
- user: nova
- password: ${_param:keystone_nova_password}
- tenant: service
- message_queue:
- engine: rabbitmq
- host: ${_param:control_address}
- port: 5672
- user: openstack
- password: ${_param:rabbitmq_openstack_password}
- virtual_host: '/openstack'
- image:
- engine: glance
- host: ${_param:control_address}
- port: 9292
- network:
- engine: neutron
- host: ${_param:control_address}
- port: 9696
- cache:
- engine: memcached
- members:
- - host: 127.0.0.1
- port: 11211
- user:
- public_key: ${_param:nova_compute_ssh_public}
- private_key: ${_param:nova_compute_ssh_private}
+++ /dev/null
-parameters:
- nova:
- compute:
- ceph:
- enabled: true
- ephemeral: yes
- rbd_pool: ${_param:nova_storage_pool}
- secret_uuid: ${_param:nova_storage_secret_uuid}
- client_cinder_key: ${_param:nova_storage_client_key}
- rbd_user: ${_param:nova_storage_user}
\ No newline at end of file
+++ /dev/null
-classes:
-- service.haproxy.proxy.single
-- service.nova.control.cluster
-- service.keepalived.cluster.single
-- system.haproxy.proxy.listen.openstack.nova
-- system.haproxy.proxy.listen.openstack.novnc
-parameters:
- _param:
- nova_vncproxy_url: http://${_param:cluster_vip_address}:6080
- nova_cpu_allocation_ratio: 16.0
- nova_ram_allocation_ratio: 1.5
- nova_disk_allocation_ratio: 1.0
- metadata_password: metadataPass
- nova:
- controller:
- enabled: true
- version: ${_param:nova_version}
- vncproxy_url: ${_param:nova_vncproxy_url}
- security_group: false
- dhcp_domain: novalocal
- scheduler_default_filters: "DifferentHostFilter,RetryFilter,AvailabilityZoneFilter,RamFilter,CoreFilter,DiskFilter,ComputeFilter,ComputeCapabilitiesFilter,ImagePropertiesFilter,ServerGroupAntiAffinityFilter,ServerGroupAffinityFilter,PciPassthroughFilter,NUMATopologyFilter,AggregateInstanceExtraSpecsFilter"
- cpu_allocation_ratio: ${_param:nova_cpu_allocation_ratio}
- ram_allocation_ratio: ${_param:nova_ram_allocation_ratio}
- disk_allocation_ratio: ${_param:nova_disk_allocation_ratio}
- workers: 8
- logging:
- - engine: syslog
- facility: local0
- heka:
- enabled: true
- bind:
- private_address: ${_param:cluster_local_address}
- public_address: ${_param:cluster_vip_address}
- public_name: ${_param:cluster_vip_address}
- novncproxy_address: ${_param:cluster_local_address}
- novncproxy_port: 6080
- database:
- engine: mysql
- host: ${_param:openstack_database_address}
- port: 3306
- name: nova
- user: nova
- password: ${_param:mysql_nova_password}
- identity:
- engine: keystone
- region: ${_param:openstack_region}
- host: ${_param:cluster_vip_address}
- port: 35357
- user: nova
- password: ${_param:keystone_nova_password}
- tenant: service
- message_queue:
- engine: rabbitmq
- port: 5672
- user: openstack
- password: ${_param:rabbitmq_openstack_password}
- virtual_host: '/openstack'
- members:
- - host: ${_param:openstack_message_queue_node01_address}
- - host: ${_param:openstack_message_queue_node02_address}
- - host: ${_param:openstack_message_queue_node03_address}
- glance:
- host: ${_param:cluster_vip_address}
- port: 9292
- network:
- engine: neutron
- region: ${_param:openstack_region}
- host: ${_param:neutron_service_host}
- user: neutron
- password: ${_param:keystone_neutron_password}
- port: 9696
- mtu: 1500
- tenant: service
- metadata:
- password: ${_param:metadata_password}
+++ /dev/null
-parameters:
- nova:
- controller:
- audit:
- enabled: true
+++ /dev/null
-parameters:
- nova:
- controller:
- notification:
- driver: messagingv2
- topics: "${_param:openstack_notification_topics}"
+++ /dev/null
-classes:
-- service.nova.control.single
-parameters:
- nova:
- controller:
- database:
- host: ${_param:single_address}
+++ /dev/null
-classes:
-- service.ntp.client
-parameters:
- ntp:
- client:
- mode7: true
+++ /dev/null
-classes:
-- service.rabbitmq.server.cluster
-- service.keepalived.cluster.single
-- service.haproxy.proxy.single
\ No newline at end of file
+++ /dev/null
-classes:
-- service.rabbitmq.server.single
+++ /dev/null
-parameters:
- rabbitmq:
- server:
- host:
- '/murano':
- enabled: true
- user: openstack
- password: ${_param:rabbitmq_murano_agent_password}
- policies:
- - name: HA
- pattern: '^(?!amq\.).*'
- definition: '{"ha-mode": "all", "message-ttl": 120000}'
- admin:
- name: admin
- password: zeQuooQu47eed8esahpie2Lai8En9ohp
- bind:
- address: ${_param:single_address}
- management:
- bind:
- address: ${_param:single_address}
+++ /dev/null
-parameters:
- rabbitmq:
- server:
- host:
- '/monitor':
- enabled: true
- user: monitor
- password: ${_param:rabbitmq_monitor_password}
+++ /dev/null
-parameters:
- rabbitmq:
- server:
- host:
- '/':
- enabled: true
- user: guest
- password: guest
- policies:
- - name: HA
- pattern: '^(?!amq\.).*'
- definition: '{"ha-mode": "all", "message-ttl": 120000}'
- '/openstack':
- enabled: true
- user: openstack
- password: ${_param:rabbitmq_openstack_password}
- policies:
- - name: HA
- pattern: '^(?!amq\.).*'
- definition: '{"ha-mode": "all", "message-ttl": 120000}'
+++ /dev/null
-classes:
-- service.reclass.storage.salt
-- service.git.client
-parameters:
- reclass:
- storage:
- data_source:
- engine: git
- address: '${_param:reclass_data_repository}'
- branch: ${_param:reclass_data_revision}
- salt:
- master:
- pillar:
- engine: reclass
- data_dir: /srv/salt/reclass
-
+++ /dev/null
-parameters:
- _param:
- cicd_control_node01_hostname: cid01
- cicd_control_node02_hostname: cid02
- cicd_control_node03_hostname: cid03
- reclass:
- storage:
- node:
- cicd_control_node01:
- name: ${_param:cicd_control_node01_hostname}
- domain: ${_param:cluster_domain}
- classes:
- - cluster.${_param:cluster_name}.cicd.control.leader
- params:
- salt_master_host: ${_param:reclass_config_master}
- linux_system_codename: xenial
- single_address: ${_param:cicd_control_node01_address}
- keepalived_vip_priority: 103
- cicd_database_id: 1
- cicd_control_node02:
- name: ${_param:cicd_control_node02_hostname}
- domain: ${_param:cluster_domain}
- classes:
- - cluster.${_param:cluster_name}.cicd.control.manager
- params:
- salt_master_host: ${_param:reclass_config_master}
- linux_system_codename: xenial
- single_address: ${_param:cicd_control_node02_address}
- keepalived_vip_priority: 102
- cicd_database_id: 2
- cicd_control_node03:
- name: ${_param:cicd_control_node03_hostname}
- domain: ${_param:cluster_domain}
- classes:
- - cluster.${_param:cluster_name}.cicd.control.manager
- params:
- salt_master_host: ${_param:reclass_config_master}
- linux_system_codename: xenial
- single_address: ${_param:cicd_control_node03_address}
- keepalived_vip_priority: 101
- cicd_database_id: 3
+++ /dev/null
-parameters:
- _param:
- infra_idm_node01_hostname: idm01
- infra_idm_node02_hostname: idm02
- infra_idm_node03_hostname: idm03
- reclass:
- storage:
- node:
- infra_idm_node01:
- name: ${_param:infra_idm_node01_hostname}
- domain: ${_param:cluster_domain}
- classes:
- - cluster.${_param:cluster_name}.infra.idm
- params:
- salt_master_host: ${_param:reclass_config_master}
- linux_system_codename: centos
- single_address: ${_param:infra_idm_node01_address}
- infra_idm_node02:
- name: ${_param:infra_idm_node02_hostname}
- domain: ${_param:cluster_domain}
- classes:
- - cluster.${_param:cluster_name}.infra.idm
- params:
- salt_master_host: ${_param:reclass_config_master}
- linux_system_codename: centos
- single_address: ${_param:infra_idm_node02_address}
- infra_idm_node03:
- name: ${_param:infra_idm_node03_hostname}
- domain: ${_param:cluster_domain}
- classes:
- - cluster.${_param:cluster_name}.infra.idm
- params:
- salt_master_host: ${_param:reclass_config_master}
- linux_system_codename: centos
- single_address: ${_param:infra_idm_node03_address}
\ No newline at end of file
+++ /dev/null
-parameters:
- _param:
- infra_proxy_node01_hostname: prx01
- infra_proxy_node02_hostname: prx02
- reclass:
- storage:
- node:
- infra_proxy_node01:
- name: ${_param:infra_proxy_node01_hostname}
- domain: ${_param:cluster_domain}
- classes:
- - cluster.${_param:cluster_name}.openstack.proxy
- - cluster.${_param:cluster_name}.stacklight.proxy
- params:
- salt_master_host: ${_param:reclass_config_master}
- linux_system_codename: trusty
- single_address: ${_param:infra_proxy_node01_address}
- keepalived_vip_priority: 102
- infra_proxy_node02:
- name: ${_param:infra_proxy_node02_hostname}
- domain: ${_param:cluster_domain}
- classes:
- - cluster.${_param:cluster_name}.openstack.proxy
- - cluster.${_param:cluster_name}.stacklight.proxy
- params:
- salt_master_host: ${_param:reclass_config_master}
- linux_system_codename: trusty
- single_address: ${_param:infra_proxy_node02_address}
- keepalived_vip_priority: 101
+++ /dev/null
-parameters:
- _param:
- openstack_proxy_node01_hostname: prx01
- reclass:
- storage:
- node:
- openstack_proxy_node01:
- name: ${_param:openstack_proxy_node01_hostname}
- domain: ${_param:cluster_domain}
- classes:
- - cluster.${_param:cluster_name}.openstack.proxy
- - cluster.${_param:cluster_name}.stacklight.proxy
- params:
- salt_master_host: ${_param:reclass_config_master}
- linux_system_codename: trusty
- single_address: ${_param:openstack_proxy_node01_address}
+++ /dev/null
-parameters:
- _param:
- kubernetes_control_node01_hostname: ctl01
- kubernetes_control_node02_hostname: ctl02
- kubernetes_control_node03_hostname: ctl03
- reclass:
- storage:
- node:
- kubernetes_control_node01:
- name: ${_param:kubernetes_control_node01_hostname}
- domain: ${_param:cluster_domain}
- classes:
- - cluster.${_param:cluster_name}.kubernetes.control
- params:
- salt_master_host: ${_param:reclass_config_master}
- linux_system_codename: xenial
- single_address: ${_param:kubernetes_control_node01_address}
- keepalived_vip_priority: 103
- kubernetes_control_node02:
- name: ${_param:kubernetes_control_node02_hostname}
- domain: ${_param:cluster_domain}
- classes:
- - cluster.${_param:cluster_name}.kubernetes.control
- params:
- salt_master_host: ${_param:reclass_config_master}
- linux_system_codename: xenial
- single_address: ${_param:kubernetes_control_node02_address}
- keepalived_vip_priority: 102
- kubernetes_control_node03:
- name: ${_param:kubernetes_control_node03_hostname}
- domain: ${_param:cluster_domain}
- classes:
- - cluster.${_param:cluster_name}.kubernetes.control
- params:
- salt_master_host: ${_param:reclass_config_master}
- linux_system_codename: xenial
- single_address: ${_param:kubernetes_control_node03_address}
- keepalived_vip_priority: 101
\ No newline at end of file
+++ /dev/null
-parameters:
- _param:
- kubernetes_control_node01_hostname: ctl01
- reclass:
- storage:
- node:
- kubernetes_control_node01:
- name: ${_param:kubernetes_control_node01_hostname}
- domain: ${_param:cluster_domain}
- classes:
- - cluster.${_param:cluster_name}.kubernetes.control
- params:
- salt_master_host: ${_param:reclass_config_master}
- linux_system_codename: xenial
- single_address: ${_param:kubernetes_control_node01_address}
\ No newline at end of file
+++ /dev/null
-parameters:
- _param:
- monitor_node01_hostname: mon01
- monitor_node02_hostname: mon02
- monitor_node03_hostname: mon03
- reclass:
- storage:
- node:
- monitoring_service_node01:
- name: ${_param:monitor_node01_hostname}
- domain: ${_param:cluster_domain}
- classes:
- - cluster.${_param:cluster_name}.monitoring.server
- params:
- salt_master_host: ${_param:reclass_config_master}
- linux_system_codename: xenial
- single_address: ${_param:monitoring_service_node01_address}
- monitoring_service_node02:
- name: ${_param:monitor_node02_hostname}
- domain: ${_param:cluster_domain}
- classes:
- - cluster.${_param:cluster_name}.monitoring.server
- params:
- salt_master_host: ${_param:reclass_config_master}
- linux_system_codename: xenial
- single_address: ${_param:monitoring_service_node02_address}
- monitoring_service_node03:
- name: ${_param:monitor_node03_hostname}
- domain: ${_param:cluster_domain}
- classes:
- - cluster.${_param:cluster_name}.monitoring.server
- params:
- salt_master_host: ${_param:reclass_config_master}
- linux_system_codename: xenial
- single_address: ${_param:monitoring_service_node03_address}
+++ /dev/null
-parameters:
- _param:
- monitor_node01_hostname: mon01
- reclass:
- storage:
- node:
- monitoring_service_node01:
- name: ${_param:monitor_node01_hostname}
- domain: ${_param:cluster_domain}
- classes:
- - cluster.${_param:cluster_name}.monitoring.server
- params:
- salt_master_host: ${_param:reclass_config_master}
- linux_system_codename: xenial
- single_address: ${_param:monitoring_service_node01_address}
+++ /dev/null
-parameters:
- _param:
- opencontrail_analytics_node01_hostname: nal01
- opencontrail_analytics_node02_hostname: nal02
- opencontrail_analytics_node03_hostname: nal03
- reclass:
- storage:
- node:
- opencontrail_analytics_node01:
- name: ${_param:opencontrail_analytics_node01_hostname}
- domain: ${_param:cluster_domain}
- classes:
- - cluster.${_param:cluster_name}.opencontrail.analytics
- params:
- salt_master_host: ${_param:reclass_config_master}
- linux_system_codename: trusty
- single_address: ${_param:opencontrail_analytics_node01_address}
- keepalived_vip_priority: 103
- opencontrail_database_id: 1
- opencontrail_analytics_node02:
- name: ${_param:opencontrail_analytics_node02_hostname}
- domain: ${_param:cluster_domain}
- classes:
- - cluster.${_param:cluster_name}.opencontrail.analytics
- params:
- salt_master_host: ${_param:reclass_config_master}
- linux_system_codename: trusty
- single_address: ${_param:opencontrail_analytics_node02_address}
- keepalived_vip_priority: 102
- opencontrail_database_id: 2
- opencontrail_analytics_node03:
- name: ${_param:opencontrail_analytics_node03_hostname}
- domain: ${_param:cluster_domain}
- classes:
- - cluster.${_param:cluster_name}.opencontrail.analytics
- params:
- salt_master_host: ${_param:reclass_config_master}
- linux_system_codename: trusty
- single_address: ${_param:opencontrail_analytics_node03_address}
- keepalived_vip_priority: 101
- opencontrail_database_id: 3
+++ /dev/null
-parameters:
- _param:
- opencontrail_control_node01_hostname: ntw01
- opencontrail_control_node02_hostname: ntw02
- opencontrail_control_node03_hostname: ntw03
- reclass:
- storage:
- node:
- opencontrail_control_node01:
- name: ${_param:opencontrail_control_node01_hostname}
- domain: ${_param:cluster_domain}
- classes:
- - cluster.${_param:cluster_name}.opencontrail.control
- params:
- salt_master_host: ${_param:reclass_config_master}
- linux_system_codename: trusty
- single_address: ${_param:opencontrail_control_node01_address}
- keepalived_vip_priority: 103
- opencontrail_database_id: 1
- opencontrail_control_node02:
- name: ${_param:opencontrail_control_node02_hostname}
- domain: ${_param:cluster_domain}
- classes:
- - cluster.${_param:cluster_name}.opencontrail.control
- params:
- salt_master_host: ${_param:reclass_config_master}
- linux_system_codename: trusty
- single_address: ${_param:opencontrail_control_node02_address}
- keepalived_vip_priority: 102
- opencontrail_database_id: 2
- opencontrail_control_node03:
- name: ${_param:opencontrail_control_node03_hostname}
- domain: ${_param:cluster_domain}
- classes:
- - cluster.${_param:cluster_name}.opencontrail.control
- params:
- salt_master_host: ${_param:reclass_config_master}
- linux_system_codename: trusty
- single_address: ${_param:opencontrail_control_node03_address}
- keepalived_vip_priority: 101
- opencontrail_database_id: 3
+++ /dev/null
-parameters:
- _param:
- opencontrail_gateway_hostname: gtw01
- reclass:
- storage:
- node:
- opencontrail_gateway_node01:
- name: ${_param:opencontrail_gateway_hostname}
- domain: ${_param:cluster_domain}
- classes:
- - cluster.${_param:cluster_name}.opencontrail.gateway
- params:
- salt_master_host: ${_param:reclass_config_master}
- linux_system_codename: trusty
- single_address: ${_param:opencontrail_gateway_address}
+++ /dev/null
-parameters:
- _param:
- openstack_benchmark_node01_hostname: bmk01
- reclass:
- storage:
- node:
- openstack_benchmark_node01:
- name: ${_param:openstack_benchmark_node01_hostname}
- domain: ${_param:cluster_domain}
- classes:
- - cluster.${_param:cluster_name}.openstack.benchmark
- params:
- salt_master_host: ${_param:reclass_config_master}
- linux_system_codename: xenial
- single_address: ${_param:openstack_benchmark_node01_address}
+++ /dev/null
-parameters:
- _param:
- openstack_billing_hostname: bil01
- reclass:
- storage:
- node:
- opencontrail_billing_node01:
- name: ${_param:openstack_billing_hostname}
- domain: ${_param:cluster_domain}
- classes:
- - cluster.${_param:cluster_name}.openstack.billing
- params:
- salt_master_host: ${_param:reclass_config_master}
- linux_system_codename: trusty
- single_address: ${_param:openstack_billing_address}
+++ /dev/null
-parameters:
- _param:
- openstack_catalog_node01_hostname: asc01
- openstack_catalog_node02_hostname: asc02
- reclass:
- storage:
- node:
- openstack_catalog_node01:
- name: ${_param:openstack_catalog_node01_hostname}
- domain: ${_param:cluster_domain}
- classes:
- - cluster.${_param:cluster_name}.openstack.catalog
- params:
- salt_master_host: ${_param:reclass_config_master}
- linux_system_codename: ${_param:linux_system_codename}
- single_address: ${_param:openstack_catalog_node01_address}
- keepalived_vip_priority: 102
- openstack_catalog_node02:
- name: ${_param:openstack_catalog_node02_hostname}
- domain: ${_param:cluster_domain}
- classes:
- - cluster.${_param:cluster_name}.openstack.catalog
- params:
- salt_master_host: ${_param:reclass_config_master}
- linux_system_codename: ${_param:linux_system_codename}
- single_address: ${_param:openstack_catalog_node02_address}
- keepalived_vip_priority: 101
+++ /dev/null
-parameters:
- _param:
- openstack_catalog_node01_hostname: asc01
- reclass:
- storage:
- node:
- openstack_catalog_node01:
- name: ${_param:openstack_catalog_node01_hostname}
- domain: ${_param:cluster_domain}
- classes:
- - cluster.${_param:cluster_name}.openstack.catalog
- params:
- salt_master_host: ${_param:reclass_config_master}
- linux_system_codename: ${_param:linux_system_codename}
- single_address: ${_param:openstack_catalog_node01_address}
+++ /dev/null
-parameters:
- _param:
- openstack_compute_node01_hostname: cmp01
- openstack_compute_node02_hostname: cmp02
- reclass:
- storage:
- node:
- openstack_compute_node01:
- name: ${_param:openstack_compute_node01_hostname}
- domain: ${_param:cluster_domain}
- classes:
- - cluster.${_param:cluster_name}.openstack.compute
- params:
- salt_master_host: ${_param:reclass_config_master}
- linux_system_codename: ${_param:linux_system_codename}
- single_address: 172.16.10.105
- openstack_compute_node02:
- name: ${_param:openstack_compute_node02_hostname}
- domain: ${_param:cluster_domain}
- classes:
- - cluster.${_param:cluster_name}.openstack.compute
- params:
- salt_master_host: ${_param:reclass_config_master}
- linux_system_codename: ${_param:linux_system_codename}
- single_address: 172.16.10.106
+++ /dev/null
-parameters:
- _param:
- openstack_compute_node01_hostname: cmp01
- reclass:
- storage:
- node:
- openstack_compute_node01:
- name: ${_param:openstack_compute_node01_hostname}
- domain: ${_param:cluster_domain}
- classes:
- - cluster.${_param:cluster_name}.openstack.compute
- params:
- salt_master_host: ${_param:reclass_config_master}
- linux_system_codename: ${_param:linux_system_codename}
- single_address: 172.16.10.105
+++ /dev/null
-parameters:
- _param:
- openstack_control_node01_hostname: ctl01
- openstack_control_node02_hostname: ctl02
- openstack_control_node03_hostname: ctl03
- reclass:
- storage:
- node:
- openstack_control_node01:
- name: ${_param:openstack_control_node01_hostname}
- domain: ${_param:cluster_domain}
- classes:
- - cluster.${_param:cluster_name}.openstack.control
- params:
- salt_master_host: ${_param:reclass_config_master}
- linux_system_codename: trusty
- single_address: ${_param:openstack_control_node01_address}
- keepalived_vip_priority: 103
- opencontrail_database_id: 1
- rabbitmq_cluster_role: master
- openstack_control_node02:
- name: ${_param:openstack_control_node02_hostname}
- domain: ${_param:cluster_domain}
- classes:
- - cluster.${_param:cluster_name}.openstack.control
- params:
- salt_master_host: ${_param:reclass_config_master}
- linux_system_codename: trusty
- single_address: ${_param:openstack_control_node02_address}
- keepalived_vip_priority: 102
- opencontrail_database_id: 2
- rabbitmq_cluster_role: slave
- openstack_control_node03:
- name: ${_param:openstack_control_node03_hostname}
- domain: ${_param:cluster_domain}
- classes:
- - cluster.${_param:cluster_name}.openstack.control
- params:
- salt_master_host: ${_param:reclass_config_master}
- linux_system_codename: trusty
- single_address: ${_param:openstack_control_node03_address}
- keepalived_vip_priority: 101
- opencontrail_database_id: 3
- rabbitmq_cluster_role: slave
+++ /dev/null
-parameters:
- _param:
- openstack_upgrade_node01_hostname: upg01
- reclass:
- storage:
- node:
- openstack_control_upgrade_node01:
- name: ${_param:openstack_upgrade_node01_hostname}
- domain: ${_param:cluster_domain}
- classes:
- - cluster.${_param:cluster_name}.openstack.upgrade
- params:
- salt_master_host: ${_param:reclass_config_master}
- linux_system_codename: xenial
- single_address: ${_param:openstack_upgrade_node01_address}
+++ /dev/null
-parameters:
- _param:
- openstack_proxy_hostname: prx01
- reclass:
- storage:
- node:
- openstack_proxy_node01:
- name: ${_param:openstack_proxy_hostname}
- domain: ${_param:cluster_domain}
- classes:
- - cluster.${_param:cluster_name}.openstack.dashboard
- params:
- salt_master_host: ${_param:reclass_config_master}
- linux_system_codename: trusty
- single_address: 172.16.10.121
+++ /dev/null
-parameters:
- _param:
- openstack_database_node01_hostname: dbs01
- openstack_database_node02_hostname: dbs02
- openstack_database_node03_hostname: dbs03
- reclass:
- storage:
- node:
- openstack_database_node01:
- name: ${_param:openstack_database_node01_hostname}
- domain: ${_param:cluster_domain}
- classes:
- - service.galera.master.cluster
- - cluster.${_param:cluster_name}.openstack.database
- params:
- salt_master_host: ${_param:reclass_config_master}
- linux_system_codename: trusty
- single_address: ${_param:openstack_database_node01_address}
- keepalived_vip_priority: 103
- mysql_cluster_role: master
- openstack_database_node02:
- name: ${_param:openstack_database_node02_hostname}
- domain: ${_param:cluster_domain}
- classes:
- - service.galera.slave.cluster
- - cluster.${_param:cluster_name}.openstack.database
- params:
- salt_master_host: ${_param:reclass_config_master}
- linux_system_codename: trusty
- single_address: ${_param:openstack_database_node02_address}
- keepalived_vip_priority: 102
- mysql_cluster_role: slave
- openstack_database_node03:
- name: ${_param:openstack_database_node03_hostname}
- domain: ${_param:cluster_domain}
- classes:
- - service.galera.slave.cluster
- - cluster.${_param:cluster_name}.openstack.database
- params:
- salt_master_host: ${_param:reclass_config_master}
- linux_system_codename: trusty
- single_address: ${_param:openstack_database_node03_address}
- keepalived_vip_priority: 101
- mysql_cluster_role: slave
+++ /dev/null
-parameters:
- _param:
- openstack_dns_node01_hostname: dns01
- openstack_dns_node02_hostname: dns02
- reclass:
- storage:
- node:
- openstack_dns_node01:
- name: ${_param:openstack_dns_node01_hostname}
- domain: ${_param:cluster_domain}
- classes:
- - cluster.${_param:cluster_name}.openstack.dns
- params:
- salt_master_host: ${_param:reclass_config_master}
- linux_system_codename: trusty
- single_address: ${_param:openstack_dns_node01_address}
- keepalived_vip_priority: 110
- openstack_dns_node02:
- name: ${_param:openstack_dns_node02_hostname}
- domain: ${_param:cluster_domain}
- classes:
- - cluster.${_param:cluster_name}.openstack.dns
- params:
- salt_master_host: ${_param:reclass_config_master}
- linux_system_codename: trusty
- single_address: ${_param:openstack_dns_node02_address}
- keepalived_vip_priority: 111
+++ /dev/null
-parameters:
- _param:
- openstack_gateway_node01_hostname: gtw01
- openstack_gateway_node02_hostname: gtw02
- openstack_gateway_node03_hostname: gtw03
-
- openstack_gateway_node01_tenant_address: ${_param:single_address}
- openstack_gateway_node02_tenant_address: ${_param:single_address}
- openstack_gateway_node03_tenant_address: ${_param:single_address}
- reclass:
- storage:
- node:
- openstack_gateway_node01:
- name: ${_param:openstack_gateway_node01_hostname}
- domain: ${_param:cluster_domain}
- classes:
- - cluster.${_param:cluster_name}.openstack.gateway
- params:
- salt_master_host: ${_param:reclass_config_master}
- linux_system_codename: ${_param:linux_system_codename}
- single_address: ${_param:openstack_gateway_node01_address}
- tenant_address: ${_param:openstack_gateway_node01_tenant_address}
- openstack_gateway_node02:
- name: ${_param:openstack_gateway_node02_hostname}
- domain: ${_param:cluster_domain}
- classes:
- - cluster.${_param:cluster_name}.openstack.gateway
- params:
- salt_master_host: ${_param:reclass_config_master}
- linux_system_codename: ${_param:linux_system_codename}
- single_address: ${_param:openstack_gateway_node02_address}
- tenant_address: ${_param:openstack_gateway_node02_tenant_address}
- openstack_gateway_node03:
- name: ${_param:openstack_gateway_node03_hostname}
- domain: ${_param:cluster_domain}
- classes:
- - cluster.${_param:cluster_name}.openstack.gateway
- params:
- salt_master_host: ${_param:reclass_config_master}
- linux_system_codename: ${_param:linux_system_codename}
- single_address: ${_param:openstack_gateway_node03_address}
- tenant_address: ${_param:openstack_gateway_node03_tenant_address}
-
-
+++ /dev/null
-parameters:
- _param:
- openstack_gateway_node01_hostname: gtw01
- reclass:
- storage:
- node:
- openstack_gateway_node01:
- name: ${_param:openstack_gateway_node01_hostname}
- domain: ${_param:cluster_domain}
- classes:
- - cluster.${_param:cluster_name}.openstack.gateway
- params:
- salt_master_host: ${_param:reclass_config_master}
- linux_system_codename: ${_param:linux_system_codename}
- single_address: ${_param:openstack_gateway_address}
\ No newline at end of file
+++ /dev/null
-parameters:
- _param:
- openstack_message_queue_node01_hostname: msg01
- openstack_message_queue_node02_hostname: msg02
- openstack_message_queue_node03_hostname: msg03
- reclass:
- storage:
- node:
- openstack_message_queue_node01:
- name: ${_param:openstack_message_queue_node01_hostname}
- domain: ${_param:cluster_domain}
- classes:
- - cluster.${_param:cluster_name}.openstack.message_queue
- params:
- salt_master_host: ${_param:reclass_config_master}
- linux_system_codename: trusty
- single_address: ${_param:openstack_message_queue_node01_address}
- keepalived_vip_priority: 103
- rabbitmq_cluster_role: master
- openstack_message_queue_node02:
- name: ${_param:openstack_message_queue_node02_hostname}
- domain: ${_param:cluster_domain}
- classes:
- - cluster.${_param:cluster_name}.openstack.message_queue
- params:
- salt_master_host: ${_param:reclass_config_master}
- linux_system_codename: trusty
- single_address: ${_param:openstack_message_queue_node02_address}
- keepalived_vip_priority: 102
- rabbitmq_cluster_role: slave
- openstack_message_queue_node03:
- name: ${_param:openstack_message_queue_node03_hostname}
- domain: ${_param:cluster_domain}
- classes:
- - cluster.${_param:cluster_name}.openstack.message_queue
- params:
- salt_master_host: ${_param:reclass_config_master}
- linux_system_codename: trusty
- single_address: ${_param:openstack_message_queue_node03_address}
- keepalived_vip_priority: 101
- rabbitmq_cluster_role: slave
+++ /dev/null
-parameters:
- _param:
- openstack_proxy_node01_hostname: prx01
- openstack_proxy_node02_hostname: prx02
- reclass:
- storage:
- node:
- openstack_proxy_node01:
- name: ${_param:openstack_proxy_node01_hostname}
- domain: ${_param:cluster_domain}
- classes:
- - cluster.${_param:cluster_name}.openstack.proxy
- params:
- salt_master_host: ${_param:reclass_config_master}
- linux_system_codename: trusty
- single_address: ${_param:openstack_proxy_node01_address}
- keepalived_vip_priority: 102
- openstack_proxy_node02:
- name: ${_param:openstack_proxy_node02_hostname}
- domain: ${_param:cluster_domain}
- classes:
- - cluster.${_param:cluster_name}.openstack.proxy
- params:
- salt_master_host: ${_param:reclass_config_master}
- linux_system_codename: trusty
- single_address: ${_param:openstack_proxy_node02_address}
- keepalived_vip_priority: 101
+++ /dev/null
-parameters:
- _param:
- openstack_proxy_node01_hostname: prx01
- reclass:
- storage:
- node:
- openstack_proxy_node01:
- name: ${_param:openstack_proxy_node01_hostname}
- domain: ${_param:cluster_domain}
- classes:
- - cluster.${_param:cluster_name}.openstack.proxy
- params:
- salt_master_host: ${_param:reclass_config_master}
- linux_system_codename: trusty
- single_address: ${_param:openstack_proxy_node01_address}
+++ /dev/null
-parameters:
- _param:
- openstack_telemetry_node01_hostname: mdb01
- openstack_telemetry_node02_hostname: mdb02
- openstack_telemetry_node03_hostname: mdb03
- reclass:
- storage:
- node:
- openstack_telemetry_node01:
- name: ${_param:openstack_telemetry_node01_hostname}
- domain: ${_param:cluster_domain}
- classes:
- - cluster.${_param:cluster_name}.openstack.telemetry
- params:
- salt_master_host: ${_param:reclass_config_master}
- linux_system_codename: trusty
- single_address: ${_param:openstack_telemetry_node01_address}
- keepalived_vip_priority: 103
- openstack_telemetry_node02:
- name: ${_param:openstack_telemetry_node02_hostname}
- domain: ${_param:cluster_domain}
- classes:
- - cluster.${_param:cluster_name}.openstack.telemetry
- params:
- salt_master_host: ${_param:reclass_config_master}
- linux_system_codename: trusty
- single_address: ${_param:openstack_telemetry_node02_address}
- keepalived_vip_priority: 102
- openstack_telemetry_node03:
- name: ${_param:openstack_telemetry_node03_hostname}
- domain: ${_param:cluster_domain}
- classes:
- - cluster.${_param:cluster_name}.openstack.telemetry
- params:
- salt_master_host: ${_param:reclass_config_master}
- linux_system_codename: trusty
- single_address: ${_param:openstack_telemetry_node03_address}
- keepalived_vip_priority: 101
+++ /dev/null
-parameters:
- _param:
- infra_kvm_node01_hostname: kvm01
- infra_kvm_node02_hostname: kvm02
- infra_kvm_node03_hostname: kvm03
-
- infra_kvm_node01_deploy_address: ${_param:infra_kvm_node01_address}
- infra_kvm_node02_deploy_address: ${_param:infra_kvm_node02_address}
- infra_kvm_node03_deploy_address: ${_param:infra_kvm_node03_address}
-
- infra_kvm_node01_storage_address: ${_param:infra_kvm_node01_address}
- infra_kvm_node02_storage_address: ${_param:infra_kvm_node02_address}
- infra_kvm_node03_storage_address: ${_param:infra_kvm_node03_address}
-
- infra_kvm_node01_public_address: ${_param:infra_kvm_node01_address}
- infra_kvm_node02_public_address: ${_param:infra_kvm_node02_address}
- infra_kvm_node03_public_address: ${_param:infra_kvm_node03_address}
-
- reclass:
- storage:
- node:
- infra_kvm_node01:
- name: ${_param:infra_kvm_node01_hostname}
- domain: ${_param:cluster_domain}
- classes:
- - cluster.${_param:cluster_name}.infra.kvm
- params:
- salt_master_host: ${_param:reclass_config_master}
- linux_system_codename: trusty
- single_address: ${_param:infra_kvm_node01_address}
- deploy_address: ${_param:infra_kvm_node01_deploy_address}
- public_address: ${_param:infra_kvm_node01_public_address}
- storage_address: ${_param:infra_kvm_node01_storage_address}
- infra_kvm_node02:
- name: ${_param:infra_kvm_node02_hostname}
- domain: ${_param:cluster_domain}
- classes:
- - cluster.${_param:cluster_name}.infra.kvm
- params:
- salt_master_host: ${_param:reclass_config_master}
- linux_system_codename: trusty
- single_address: ${_param:infra_kvm_node02_address}
- deploy_address: ${_param:infra_kvm_node02_deploy_address}
- public_address: ${_param:infra_kvm_node02_public_address}
- storage_address: ${_param:infra_kvm_node02_storage_address}
- infra_kvm_node03:
- name: ${_param:infra_kvm_node03_hostname}
- domain: ${_param:cluster_domain}
- classes:
- - cluster.${_param:cluster_name}.infra.kvm
- params:
- salt_master_host: ${_param:reclass_config_master}
- linux_system_codename: trusty
- single_address: ${_param:infra_kvm_node03_address}
- deploy_address: ${_param:infra_kvm_node03_deploy_address}
- public_address: ${_param:infra_kvm_node03_public_address}
- storage_address: ${_param:infra_kvm_node03_storage_address}
+++ /dev/null
-parameters:
- _param:
- infra_kvm_node01_hostname: kvm01
- infra_kvm_node02_hostname: kvm02
- infra_kvm_node03_hostname: kvm03
- infra_kvm_node04_hostname: kvm04
- infra_kvm_node05_hostname: kvm05
- infra_kvm_node06_hostname: kvm06
- infra_kvm_node07_hostname: kvm07
- infra_kvm_node08_hostname: kvm08
- infra_kvm_node09_hostname: kvm09
-
- infra_kvm_node01_deploy_address: ${_param:infra_kvm_node01_address}
- infra_kvm_node02_deploy_address: ${_param:infra_kvm_node02_address}
- infra_kvm_node03_deploy_address: ${_param:infra_kvm_node03_address}
- infra_kvm_node04_deploy_address: ${_param:infra_kvm_node04_address}
- infra_kvm_node05_deploy_address: ${_param:infra_kvm_node05_address}
- infra_kvm_node06_deploy_address: ${_param:infra_kvm_node06_address}
- infra_kvm_node07_deploy_address: ${_param:infra_kvm_node07_address}
- infra_kvm_node08_deploy_address: ${_param:infra_kvm_node08_address}
- infra_kvm_node09_deploy_address: ${_param:infra_kvm_node09_address}
-
- infra_kvm_node01_storage_address: ${_param:infra_kvm_node01_address}
- infra_kvm_node02_storage_address: ${_param:infra_kvm_node02_address}
- infra_kvm_node03_storage_address: ${_param:infra_kvm_node03_address}
- infra_kvm_node04_storage_address: ${_param:infra_kvm_node04_address}
- infra_kvm_node05_storage_address: ${_param:infra_kvm_node05_address}
- infra_kvm_node06_storage_address: ${_param:infra_kvm_node06_address}
- infra_kvm_node07_storage_address: ${_param:infra_kvm_node07_address}
- infra_kvm_node08_storage_address: ${_param:infra_kvm_node08_address}
- infra_kvm_node09_storage_address: ${_param:infra_kvm_node09_address}
-
- infra_kvm_node01_public_address: ${_param:infra_kvm_node01_address}
- infra_kvm_node02_public_address: ${_param:infra_kvm_node02_address}
- infra_kvm_node03_public_address: ${_param:infra_kvm_node03_address}
- infra_kvm_node04_public_address: ${_param:infra_kvm_node04_address}
- infra_kvm_node05_public_address: ${_param:infra_kvm_node05_address}
- infra_kvm_node06_public_address: ${_param:infra_kvm_node06_address}
- infra_kvm_node07_public_address: ${_param:infra_kvm_node07_address}
- infra_kvm_node08_public_address: ${_param:infra_kvm_node08_address}
- infra_kvm_node09_public_address: ${_param:infra_kvm_node09_address}
-
- reclass:
- storage:
- node:
- infra_kvm_node01:
- name: ${_param:infra_kvm_node01_hostname}
- domain: ${_param:cluster_domain}
- classes:
- - cluster.${_param:cluster_name}.infra.kvm
- params:
- salt_master_host: ${_param:reclass_config_master}
- linux_system_codename: trusty
- single_address: ${_param:infra_kvm_node01_address}
- deploy_address: ${_param:infra_kvm_node01_deploy_address}
- public_address: ${_param:infra_kvm_node01_public_address}
- storage_address: ${_param:infra_kvm_node01_storage_address}
- infra_kvm_node02:
- name: ${_param:infra_kvm_node02_hostname}
- domain: ${_param:cluster_domain}
- classes:
- - cluster.${_param:cluster_name}.infra.kvm
- params:
- salt_master_host: ${_param:reclass_config_master}
- linux_system_codename: trusty
- single_address: ${_param:infra_kvm_node02_address}
- deploy_address: ${_param:infra_kvm_node02_deploy_address}
- public_address: ${_param:infra_kvm_node02_public_address}
- storage_address: ${_param:infra_kvm_node02_storage_address}
- infra_kvm_node03:
- name: ${_param:infra_kvm_node03_hostname}
- domain: ${_param:cluster_domain}
- classes:
- - cluster.${_param:cluster_name}.infra.kvm
- params:
- salt_master_host: ${_param:reclass_config_master}
- linux_system_codename: trusty
- single_address: ${_param:infra_kvm_node03_address}
- deploy_address: ${_param:infra_kvm_node03_deploy_address}
- public_address: ${_param:infra_kvm_node03_public_address}
- storage_address: ${_param:infra_kvm_node03_storage_address}
- infra_kvm_node04:
- name: ${_param:infra_kvm_node04_hostname}
- domain: ${_param:cluster_domain}
- classes:
- - cluster.${_param:cluster_name}.infra.kvm
- params:
- salt_master_host: ${_param:reclass_config_master}
- linux_system_codename: trusty
- single_address: ${_param:infra_kvm_node04_address}
- deploy_address: ${_param:infra_kvm_node04_deploy_address}
- public_address: ${_param:infra_kvm_node04_public_address}
- storage_address: ${_param:infra_kvm_node04_storage_address}
- infra_kvm_node05:
- name: ${_param:infra_kvm_node05_hostname}
- domain: ${_param:cluster_domain}
- classes:
- - cluster.${_param:cluster_name}.infra.kvm
- params:
- salt_master_host: ${_param:reclass_config_master}
- linux_system_codename: trusty
- single_address: ${_param:infra_kvm_node05_address}
- deploy_address: ${_param:infra_kvm_node05_deploy_address}
- public_address: ${_param:infra_kvm_node05_public_address}
- storage_address: ${_param:infra_kvm_node05_storage_address}
- infra_kvm_node06:
- name: ${_param:infra_kvm_node06_hostname}
- domain: ${_param:cluster_domain}
- classes:
- - cluster.${_param:cluster_name}.infra.kvm
- params:
- salt_master_host: ${_param:reclass_config_master}
- linux_system_codename: trusty
- single_address: ${_param:infra_kvm_node06_address}
- deploy_address: ${_param:infra_kvm_node06_deploy_address}
- public_address: ${_param:infra_kvm_node06_public_address}
- storage_address: ${_param:infra_kvm_node06_storage_address}
- infra_kvm_node07:
- name: ${_param:infra_kvm_node07_hostname}
- domain: ${_param:cluster_domain}
- classes:
- - cluster.${_param:cluster_name}.infra.kvm
- params:
- salt_master_host: ${_param:reclass_config_master}
- linux_system_codename: trusty
- single_address: ${_param:infra_kvm_node07_address}
- deploy_address: ${_param:infra_kvm_node07_deploy_address}
- public_address: ${_param:infra_kvm_node07_public_address}
- storage_address: ${_param:infra_kvm_node07_storage_address}
- infra_kvm_node08:
- name: ${_param:infra_kvm_node08_hostname}
- domain: ${_param:cluster_domain}
- classes:
- - cluster.${_param:cluster_name}.infra.kvm
- params:
- salt_master_host: ${_param:reclass_config_master}
- linux_system_codename: trusty
- single_address: ${_param:infra_kvm_node08_address}
- deploy_address: ${_param:infra_kvm_node08_deploy_address}
- public_address: ${_param:infra_kvm_node08_public_address}
- storage_address: ${_param:infra_kvm_node08_storage_address}
- infra_kvm_node09:
- name: ${_param:infra_kvm_node09_hostname}
- domain: ${_param:cluster_domain}
- classes:
- - cluster.${_param:cluster_name}.infra.kvm
- params:
- salt_master_host: ${_param:reclass_config_master}
- linux_system_codename: trusty
- single_address: ${_param:infra_kvm_node09_address}
- deploy_address: ${_param:infra_kvm_node09_deploy_address}
- public_address: ${_param:infra_kvm_node09_public_address}
- storage_address: ${_param:infra_kvm_node09_storage_address}
+++ /dev/null
-parameters:
- _param:
- infra_kvm_node01_hostname: kvm01
- infra_kvm_node01_deploy_address: ${_param:infra_kvm_node01_address}
- infra_kvm_node01_storage_address: ${_param:infra_kvm_node01_address}
- infra_kvm_node01_public_address: ${_param:infra_kvm_node01_address}
- reclass:
- storage:
- node:
- infra_kvm_node01:
- name: ${_param:infra_kvm_node01_hostname}
- domain: ${_param:cluster_domain}
- classes:
- - cluster.${_param:cluster_name}.infra.kvm
- params:
- salt_master_host: ${_param:reclass_config_master}
- linux_system_codename: trusty
- single_address: ${_param:infra_kvm_node01_address}
- deploy_address: ${_param:infra_kvm_node01_deploy_address}
- public_address: ${_param:infra_kvm_node01_public_address}
- storage_address: ${_param:infra_kvm_node01_storage_address}
+++ /dev/null
-parameters:
- _param:
- rsyslog_node01_hostname: rsl01
- reclass:
- storage:
- node:
- rsyslog_node01:
- name: ${_param:rsyslog_node01_hostname}
- domain: ${_param:cluster_domain}
- classes:
- - cluster.${_param:cluster_name}.infra.rsyslog
- params:
- salt_master_host: ${_param:reclass_config_master}
- linux_system_codename: xenial
- single_address: ${_param:rsyslog_node01_address}
+++ /dev/null
-parameters:
- _param:
- stacklight_log_node01_hostname: log01
- stacklight_log_node02_hostname: log02
- stacklight_log_node03_hostname: log03
- reclass:
- storage:
- node:
- stacklight_log_node01:
- name: ${_param:stacklight_log_node01_hostname}
- domain: ${_param:cluster_domain}
- classes:
- - cluster.${_param:cluster_name}.stacklight.log
- - cluster.${_param:cluster_name}.stacklight.log_curator
- params:
- salt_master_host: ${_param:reclass_config_master}
- linux_system_codename: xenial
- single_address: ${_param:stacklight_log_node01_address}
- keepalived_vip_priority: 103
- stacklight_log_node02:
- name: ${_param:stacklight_log_node02_hostname}
- domain: ${_param:cluster_domain}
- classes:
- - cluster.${_param:cluster_name}.stacklight.log
- params:
- salt_master_host: ${_param:reclass_config_master}
- linux_system_codename: xenial
- single_address: ${_param:stacklight_log_node02_address}
- keepalived_vip_priority: 102
- stacklight_log_node03:
- name: ${_param:stacklight_log_node03_hostname}
- domain: ${_param:cluster_domain}
- classes:
- - cluster.${_param:cluster_name}.stacklight.log
- params:
- salt_master_host: ${_param:reclass_config_master}
- linux_system_codename: xenial
- single_address: ${_param:stacklight_log_node03_address}
- keepalived_vip_priority: 101
+++ /dev/null
-parameters:
- _param:
- stacklight_monitor_node01_hostname: mon01
- stacklight_monitor_node02_hostname: mon02
- stacklight_monitor_node03_hostname: mon03
- reclass:
- storage:
- node:
- stacklight_monitor_node01:
- name: ${_param:stacklight_monitor_node01_hostname}
- domain: ${_param:cluster_domain}
- classes:
- - cluster.${_param:cluster_name}.stacklight.monitor
- params:
- redis_cluster_role: 'master'
- rabbitmq_cluster_role: 'master'
- salt_master_host: ${_param:reclass_config_master}
- linux_system_codename: xenial
- single_address: ${_param:stacklight_monitor_node01_address}
- keepalived_vip_priority: 103
- stacklight_monitor_node02:
- name: ${_param:stacklight_monitor_node02_hostname}
- domain: ${_param:cluster_domain}
- classes:
- - cluster.${_param:cluster_name}.stacklight.monitor
- params:
- redis_cluster_role: 'slave'
- rabbitmq_cluster_role: 'slave'
- salt_master_host: ${_param:reclass_config_master}
- linux_system_codename: xenial
- single_address: ${_param:stacklight_monitor_node02_address}
- keepalived_vip_priority: 102
- stacklight_monitor_node03:
- name: ${_param:stacklight_monitor_node03_hostname}
- domain: ${_param:cluster_domain}
- classes:
- - cluster.${_param:cluster_name}.stacklight.monitor
- params:
- redis_cluster_role: 'slave'
- rabbitmq_cluster_role: 'slave'
- salt_master_host: ${_param:reclass_config_master}
- linux_system_codename: xenial
- single_address: ${_param:stacklight_monitor_node03_address}
- keepalived_vip_priority: 101
+++ /dev/null
-parameters:
- _param:
- stacklight_monitor_node01_hostname: mon01
- stacklight_monitor_node02_hostname: mon02
- stacklight_monitor_node03_hostname: mon03
- reclass:
- storage:
- node:
- stacklight_server_node01:
- name: ${_param:stacklight_monitor_node01_hostname}
- domain: ${_param:cluster_domain}
- classes:
- - cluster.${_param:cluster_name}.stacklight.server
- - system.elasticsearch.client.single
- - system.grafana.client.single
- - system.kibana.client.single
- params:
- redis_cluster_role: 'master'
- rabbitmq_cluster_role: 'master'
- salt_master_host: ${_param:reclass_config_master}
- linux_system_codename: xenial
- single_address: ${_param:stacklight_monitor_node01_address}
- keepalived_vip_priority: 103
- rabbitmq_cluster_role: master
- stacklight_server_node02:
- name: ${_param:stacklight_monitor_node02_hostname}
- domain: ${_param:cluster_domain}
- classes:
- - cluster.${_param:cluster_name}.stacklight.server
- params:
- redis_cluster_role: 'slave'
- rabbitmq_cluster_role: 'slave'
- salt_master_host: ${_param:reclass_config_master}
- linux_system_codename: xenial
- single_address: ${_param:stacklight_monitor_node02_address}
- keepalived_vip_priority: 102
- rabbitmq_cluster_role: slave
- stacklight_server_node03:
- name: ${_param:stacklight_monitor_node03_hostname}
- domain: ${_param:cluster_domain}
- classes:
- - cluster.${_param:cluster_name}.stacklight.server
- params:
- redis_cluster_role: 'slave'
- rabbitmq_cluster_role: 'slave'
- salt_master_host: ${_param:reclass_config_master}
- linux_system_codename: xenial
- single_address: ${_param:stacklight_monitor_node03_address}
- keepalived_vip_priority: 101
- rabbitmq_cluster_role: slave
+++ /dev/null
-parameters:
- _param:
- stacklight_monitor_node01_hostname: mon01
- reclass:
- storage:
- node:
- stacklight_server_node01:
- name: ${_param:stacklight_monitor_node01_hostname}
- domain: ${_param:cluster_domain}
- classes:
- - cluster.${_param:cluster_name}.stacklight.server
- - system.elasticsearch.client.single
- - system.grafana.client.single
- - system.kibana.client.single
- params:
- salt_master_host: ${_param:reclass_config_master}
- linux_system_codename: xenial
- single_address: ${_param:stacklight_monitor_node01_address}
+++ /dev/null
-parameters:
- _param:
- stacklight_telemetry_node01_hostname: mtr01
- stacklight_telemetry_node02_hostname: mtr02
- stacklight_telemetry_node03_hostname: mtr03
- reclass:
- storage:
- node:
- stacklight_telemetry_node01:
- name: ${_param:stacklight_telemetry_node01_hostname}
- domain: ${_param:cluster_domain}
- classes:
- - cluster.${_param:cluster_name}.stacklight.telemetry
- params:
- salt_master_host: ${_param:reclass_config_master}
- linux_system_codename: xenial
- single_address: ${_param:stacklight_telemetry_node01_address}
- keepalived_vip_priority: 103
- stacklight_telemetry_node02:
- name: ${_param:stacklight_telemetry_node02_hostname}
- domain: ${_param:cluster_domain}
- classes:
- - cluster.${_param:cluster_name}.stacklight.telemetry
- params:
- salt_master_host: ${_param:reclass_config_master}
- linux_system_codename: xenial
- single_address: ${_param:stacklight_telemetry_node02_address}
- keepalived_vip_priority: 102
- stacklight_telemetry_node03:
- name: ${_param:stacklight_telemetry_node03_hostname}
- domain: ${_param:cluster_domain}
- classes:
- - cluster.${_param:cluster_name}.stacklight.telemetry
- params:
- salt_master_host: ${_param:reclass_config_master}
- linux_system_codename: xenial
- single_address: ${_param:stacklight_telemetry_node03_address}
- keepalived_vip_priority: 101
+++ /dev/null
-parameters:
- _param:
- stacklight_telemetry_node01_hostname: mtr01
- reclass:
- storage:
- node:
- stacklight_telemetry_node01:
- name: ${_param:stacklight_telemetry_node01_hostname}
- domain: ${_param:cluster_domain}
- classes:
- - cluster.${_param:cluster_name}.stacklight.telemetry
- params:
- salt_master_host: ${_param:reclass_config_master}
- linux_system_codename: xenial
- single_address: ${_param:stacklight_telemetry_node01_address}
- keepalived_vip_priority: 103
\ No newline at end of file
+++ /dev/null
-parameters:
- _param:
- stacklight_monitor_node01_hostname: mon01
- stacklight_monitor_node02_hostname: mon02
- stacklight_monitor_node03_hostname: mon03
- reclass:
- storage:
- node:
- stacklight_server_node01:
- name: ${_param:stacklight_monitor_node01_hostname}
- domain: ${_param:cluster_domain}
- classes:
- - system.docker.swarm.master
- - cluster.${_param:cluster_name}.stacklight.server
- - cluster.${_param:cluster_name}.stacklight.client
- params:
- salt_master_host: ${_param:reclass_config_master}
- linux_system_codename: xenial
- single_address: ${_param:stacklight_monitor_node01_address}
- keepalived_vip_priority: 103
- stacklight_server_node02:
- name: ${_param:stacklight_monitor_node02_hostname}
- domain: ${_param:cluster_domain}
- classes:
- - system.docker.swarm.manager
- - cluster.${_param:cluster_name}.stacklight.server
- params:
- salt_master_host: ${_param:reclass_config_master}
- linux_system_codename: xenial
- single_address: ${_param:stacklight_monitor_node02_address}
- keepalived_vip_priority: 102
- stacklight_server_node03:
- name: ${_param:stacklight_monitor_node03_hostname}
- domain: ${_param:cluster_domain}
- classes:
- - system.docker.swarm.manager
- - cluster.${_param:cluster_name}.stacklight.server
- params:
- salt_master_host: ${_param:reclass_config_master}
- linux_system_codename: xenial
- single_address: ${_param:stacklight_monitor_node03_address}
- keepalived_vip_priority: 101
+++ /dev/null
-classes:
-- service.rsyslog.client.single
+++ /dev/null
-parameters:
- salt:
- control:
- size:
- cicd.control:
- cpu: 8
- ram: 32768
- disk_profile: large
- net_profile: default
- cluster:
- internal:
- domain: ${_param:cluster_domain}
- engine: virt
- node:
- cid01:
- name: ${_param:cicd_control_node01_hostname}
- provider: ${_param:infra_kvm_node01_hostname}.${_param:cluster_domain}
- image: ${_param:salt_control_xenial_image}
- size: cicd.control
- cid02:
- name: ${_param:cicd_control_node02_hostname}
- provider: ${_param:infra_kvm_node02_hostname}.${_param:cluster_domain}
- image: ${_param:salt_control_xenial_image}
- size: cicd.control
- cid03:
- name: ${_param:cicd_control_node03_hostname}
- provider: ${_param:infra_kvm_node03_hostname}.${_param:cluster_domain}
- image: ${_param:salt_control_xenial_image}
- size: cicd.control
+++ /dev/null
-parameters:
- salt:
- control:
- size:
- infra.idm:
- cpu: 4
- ram: 8192
- disk_profile: large
- net_profile: default
- cluster:
- internal:
- domain: ${_param:cluster_domain}
- engine: virt
- node:
- idm01:
- provider: ${_param:infra_kvm_node01_hostname}.${_param:cluster_domain}
- image: ${_param:salt_control_centos7_image}
- size: infra.idm
- idm02:
- provider: ${_param:infra_kvm_node02_hostname}.${_param:cluster_domain}
- image: ${_param:salt_control_centos7_image}
- size: infra.idm
- idm03:
- provider: ${_param:infra_kvm_node03_hostname}.${_param:cluster_domain}
- image: ${_param:salt_control_centos7_image}
- size: infra.idm
+++ /dev/null
-parameters:
- salt:
- control:
- size:
- infra.integration:
- cpu: 4
- ram: 8192
- disk_profile: small
- net_profile: default
- cluster:
- internal:
- domain: ${_param:cluster_domain}
- engine: virt
- node:
- cid01:
- name: ${_param:cicd_control_node01_hostname}
- provider: ${_param:infra_kvm_node02_hostname}.${_param:cluster_domain}
- image: ${_param:salt_control_xenial_image}
- size: infra.integration
+++ /dev/null
-parameters:
- salt:
- control:
- size:
- infra.proxy:
- cpu: 32
- ram: 65536
- disk_profile: small
- net_profile: default
- cluster:
- internal:
- domain: ${_param:cluster_domain}
- engine: virt
- node:
- prx01:
- name: ${_param:openstack_proxy_node01_hostname}
- provider: ${_param:infra_kvm_node01_hostname}.${_param:cluster_domain}
- image: ${_param:salt_control_trusty_image}
- size: infra.proxy
- prx02:
- name: ${_param:openstack_proxy_node01_hostname}
- provider: ${_param:infra_kvm_node02_hostname}.${_param:cluster_domain}
- image: ${_param:salt_control_trusty_image}
- size: infra.proxy
+++ /dev/null
-parameters:
- salt:
- control:
- size:
- infra.proxy:
- cpu: 32
- ram: 65536
- disk_profile: small
- net_profile: default
- cluster:
- internal:
- domain: ${_param:cluster_domain}
- engine: virt
- node:
- prx01:
- name: ${_param:openstack_proxy_node01_hostname}
- provider: ${_param:infra_kvm_node01_hostname}.${_param:cluster_domain}
- image: ${_param:salt_control_trusty_image}
- size: infra.proxy
+++ /dev/null
-parameters:
- salt:
- control:
- size:
- infra.storage:
- cpu: 4
- ram: 8192
- disk_profile: xxxlarge
- net_profile: default
- cluster:
- internal:
- domain: ${_param:cluster_domain}
- engine: virt
- node:
- sto01:
- provider: ${_param:infra_kvm_node02_hostname}.${_param:cluster_domain}
- image: ${_param:salt_control_xenial_image}
- size: infra.storage
+++ /dev/null
-parameters:
- salt:
- control:
- size:
- infra.version_control:
- cpu: 4
- ram: 8192
- disk_profile: small
- net_profile: default
- cluster:
- internal:
- domain: ${_param:cluster_domain}
- engine: virt
- node:
- git01:
- provider: ${_param:infra_kvm_node02_hostname}.${_param:cluster_domain}
- image: ${_param:salt_control_xenial_image}
- size: infra.version_control
+++ /dev/null
-parameters:
- salt:
- control:
- size:
- opencontrail.analytics:
- cpu: 32
- ram: 65536
- disk_profile: small
- net_profile: default
- cluster:
- internal:
- domain: ${_param:cluster_domain}
- engine: virt
- node:
- nal01:
- name: ${_param:opencontrail_analytics_node01_hostname}
- provider: ${_param:infra_kvm_node01_hostname}.${_param:cluster_domain}
- image: ${_param:salt_control_trusty_image}
- size: opencontrail.analytics
- nal02:
- name: ${_param:opencontrail_analytics_node02_hostname}
- provider: ${_param:infra_kvm_node02_hostname}.${_param:cluster_domain}
- image: ${_param:salt_control_trusty_image}
- size: opencontrail.analytics
- nal03:
- name: ${_param:opencontrail_analytics_node03_hostname}
- provider: ${_param:infra_kvm_node03_hostname}.${_param:cluster_domain}
- image: ${_param:salt_control_trusty_image}
- size: opencontrail.analytics
+++ /dev/null
-parameters:
- salt:
- control:
- size:
- opencontrail.control:
- cpu: 32
- ram: 65536
- disk_profile: small
- net_profile: default
- cluster:
- internal:
- domain: ${_param:cluster_domain}
- engine: virt
- node:
- ntw01:
- name: ${_param:opencontrail_control_node01_hostname}
- provider: ${_param:infra_kvm_node01_hostname}.${_param:cluster_domain}
- image: ${_param:salt_control_trusty_image}
- size: opencontrail.control
- ntw02:
- name: ${_param:opencontrail_control_node02_hostname}
- provider: ${_param:infra_kvm_node02_hostname}.${_param:cluster_domain}
- image: ${_param:salt_control_trusty_image}
- size: opencontrail.control
- ntw03:
- name: ${_param:opencontrail_control_node03_hostname}
- provider: ${_param:infra_kvm_node03_hostname}.${_param:cluster_domain}
- image: ${_param:salt_control_trusty_image}
- size: opencontrail.control
+++ /dev/null
-parameters:
- salt:
- control:
- size:
- openstack.benchmark:
- cpu: 2
- ram: 4096
- disk_profile: small
- net_profile: default
- cluster:
- internal:
- domain: ${_param:cluster_domain}
- engine: virt
- node:
- bmk01:
- provider: ${_param:infra_kvm_node03_hostname}.${_param:cluster_domain}
- image: ${_param:salt_control_trusty_image}
- size: openstack.benchmark
+++ /dev/null
-parameters:
- salt:
- control:
- size:
- openstack.billing:
- cpu: 4
- ram: 8192
- disk_profile: large
- net_profile: default
- cluster:
- internal:
- domain: ${_param:cluster_domain}
- engine: virt
- node:
- bil01:
- provider: ${_param:infra_kvm_node03_hostname}.${_param:cluster_domain}
- image: ${_param:salt_control_trusty_image}
- size: openstack.control
+++ /dev/null
-parameters:
- salt:
- control:
- size:
- openstack.control:
- cpu: 32
- ram: 65536
- disk_profile: small
- net_profile: default
- cluster:
- internal:
- domain: ${_param:cluster_domain}
- engine: virt
- node:
- ctl01:
- name: ${_param:openstack_control_node01_hostname}
- provider: ${_param:infra_kvm_node01_hostname}.${_param:cluster_domain}
- image: ${_param:salt_control_trusty_image}
- size: openstack.control
- ctl02:
- name: ${_param:openstack_control_node02_hostname}
- provider: ${_param:infra_kvm_node02_hostname}.${_param:cluster_domain}
- image: ${_param:salt_control_trusty_image}
- size: openstack.control
- ctl03:
- name: ${_param:openstack_control_node03_hostname}
- provider: ${_param:infra_kvm_node03_hostname}.${_param:cluster_domain}
- image: ${_param:salt_control_trusty_image}
- size: openstack.control
+++ /dev/null
-parameters:
- salt:
- control:
- size:
- openstack.database:
- cpu: 32
- ram: 65536
- disk_profile: small
- net_profile: default
- cluster:
- internal:
- domain: ${_param:cluster_domain}
- engine: virt
- node:
- dbs01:
- name: ${_param:openstack_database_node01_hostname}
- provider: ${_param:infra_kvm_node01_hostname}.${_param:cluster_domain}
- image: ${_param:salt_control_trusty_image}
- size: openstack.database
- dbs02:
- name: ${_param:openstack_database_node02_hostname}
- provider: ${_param:infra_kvm_node02_hostname}.${_param:cluster_domain}
- image: ${_param:salt_control_trusty_image}
- size: openstack.database
- dbs03:
- name: ${_param:openstack_database_node03_hostname}
- provider: ${_param:infra_kvm_node03_hostname}.${_param:cluster_domain}
- image: ${_param:salt_control_trusty_image}
- size: openstack.database
+++ /dev/null
-parameters:
- salt:
- control:
- size:
- openstack.dns:
- cpu: 2
- ram: 4096
- disk_profile: small
- net_profile: default
- cluster:
- internal:
- domain: ${_param:cluster_domain}
- engine: virt
- node:
- dns01:
- provider: ${_param:infra_kvm_node01_hostname}.${_param:cluster_domain}
- image: ${_param:salt_control_xenial_image}
- size: openstack.dns
- dns02:
- provider: ${_param:infra_kvm_node02_hostname}.${_param:cluster_domain}
- image: ${_param:salt_control_xenial_image}
- size: openstack.dns
+++ /dev/null
-parameters:
- salt:
- control:
- size:
- openstack.gateway:
- cpu: 32
- ram: 65536
- disk_profile: small
- net_profile: default
- cluster:
- internal:
- domain: ${_param:cluster_domain}
- engine: virt
- node:
- gtw01:
- provider: kvm01.${_param:cluster_domain}
- image: ${_param:salt_control_xenial_image}
- size: openstack.gateway
- gtw02:
- provider: kvm01.${_param:cluster_domain}
- image: ${_param:salt_control_xenial_image}
- size: openstack.gateway
- gtw03:
- provider: kvm01.${_param:cluster_domain}
- image: ${_param:salt_control_xenial_image}
- size: openstack.gateway
+++ /dev/null
-parameters:
- salt:
- control:
- size:
- openstack.gateway:
- cpu: 32
- ram: 65536
- disk_profile: small
- net_profile: default
- cluster:
- internal:
- domain: ${_param:cluster_domain}
- engine: virt
- node:
- gtw01:
- provider: ${_param:infra_kvm_node01_hostname}.${_param:cluster_domain}
- image: ${_param:salt_control_xenial_image}
- size: openstack.gateway
+++ /dev/null
-parameters:
- salt:
- control:
- size:
- openstack.message_queue:
- cpu: 32
- ram: 65536
- disk_profile: small
- net_profile: default
- cluster:
- internal:
- domain: ${_param:cluster_domain}
- engine: virt
- node:
- msg01:
- name: ${_param:openstack_message_queue_node01_hostname}
- provider: ${_param:infra_kvm_node01_hostname}.${_param:cluster_domain}
- image: ${_param:salt_control_trusty_image}
- size: openstack.message_queue
- msg02:
- name: ${_param:openstack_message_queue_node02_hostname}
- provider: ${_param:infra_kvm_node02_hostname}.${_param:cluster_domain}
- image: ${_param:salt_control_trusty_image}
- size: openstack.message_queue
- msg03:
- name: ${_param:openstack_message_queue_node03_hostname}
- provider: ${_param:infra_kvm_node03_hostname}.${_param:cluster_domain}
- image: ${_param:salt_control_trusty_image}
- size: openstack.message_queue
+++ /dev/null
-parameters:
- salt:
- control:
- size:
- openstack.proxy:
- cpu: 32
- ram: 65536
- disk_profile: small
- net_profile: default
- cluster:
- internal:
- domain: ${_param:cluster_domain}
- engine: virt
- node:
- prx01:
- name: ${_param:openstack_proxy_node01_hostname}
- provider: ${_param:infra_kvm_node01_hostname}.${_param:cluster_domain}
- image: ${_param:salt_control_trusty_image}
- size: openstack.proxy
- prx02:
- name: ${_param:openstack_proxy_node02_hostname}
- provider: ${_param:infra_kvm_node02_hostname}.${_param:cluster_domain}
- image: ${_param:salt_control_trusty_image}
- size: openstack.proxy
-
+++ /dev/null
-parameters:
- salt:
- control:
- size:
- openstack.proxy:
- cpu: 32
- ram: 65536
- disk_profile: small
- net_profile: default
- cluster:
- internal:
- domain: ${_param:cluster_domain}
- engine: virt
- node:
- prx01:
- name: ${_param:openstack_proxy_node01_hostname}
- provider: ${_param:infra_kvm_node01_hostname}.${_param:cluster_domain}
- image: ${_param:salt_control_trusty_image}
- size: openstack.proxy
+++ /dev/null
-parameters:
- salt:
- control:
- size:
- openstack.telemetry:
- cpu: 4
- ram: 8192
- disk_profile: large
- net_profile: default
- cluster:
- internal:
- domain: ${_param:cluster_domain}
- engine: virt
- node:
- mdb01:
- name: ${_param:openstack_telemetry_node01_hostname}
- provider: ${_param:infra_kvm_node01_hostname}.${_param:cluster_domain}
- image: ${_param:salt_control_trusty_image}
- size: openstack.telemetry
- mdb02:
- name: ${_param:openstack_telemetry_node02_hostname}
- provider: ${_param:infra_kvm_node02_hostname}.${_param:cluster_domain}
- image: ${_param:salt_control_trusty_image}
- size: openstack.telemetry
- mdb03:
- name: ${_param:openstack_telemetry_node03_hostname}
- provider: ${_param:infra_kvm_node03_hostname}.${_param:cluster_domain}
- image: ${_param:salt_control_trusty_image}
- size: openstack.telemetry
+++ /dev/null
-parameters:
- salt:
- control:
- size:
- openstack.upgrade:
- cpu: 32
- ram: 65536
- disk_profile: medium
- net_profile: default
- cluster:
- internal:
- domain: ${_param:cluster_domain}
- engine: virt
- node:
- upg01:
- provider: ${_param:infra_kvm_node02_hostname}.${_param:cluster_domain}
- image: ${_param:salt_control_xenial_image}
- size: openstack.upgrade
+++ /dev/null
-parameters:
- salt:
- control:
- size:
- infra.rsyslog:
- cpu: 8
- ram: 8192
- disk_profile: xxlarge
- net_profile: default
- cluster:
- internal:
- domain: ${_param:cluster_domain}
- engine: virt
- node:
- rsl01:
- provider: ${_param:infra_kvm_node02_hostname}.${_param:cluster_domain}
- image: ${_param:salt_control_xenial_image}
- size: infra.rsyslog
+++ /dev/null
-parameters:
- salt:
- control:
- size:
- stacklight.log:
- cpu: 32
- ram: 65536
- disk_profile: small
- net_profile: default
- cluster:
- internal:
- domain: ${_param:cluster_domain}
- engine: virt
- node:
- log01:
- name: ${_param:stacklight_log_node01_hostname}
- provider: ${_param:infra_kvm_node01_hostname}.${_param:cluster_domain}
- image: ${_param:salt_control_xenial_image}
- size: stacklight.log
- log02:
- name: ${_param:stacklight_log_node02_hostname}
- provider: ${_param:infra_kvm_node02_hostname}.${_param:cluster_domain}
- image: ${_param:salt_control_xenial_image}
- size: stacklight.log
- log03:
- name: ${_param:stacklight_log_node03_hostname}
- provider: ${_param:infra_kvm_node03_hostname}.${_param:cluster_domain}
- image: ${_param:salt_control_xenial_image}
- size: stacklight.log
+++ /dev/null
-parameters:
- salt:
- control:
- size:
- stacklight.server:
- cpu: 32
- ram: 65536
- disk_profile: small
- net_profile: default
- cluster:
- internal:
- domain: ${_param:cluster_domain}
- engine: virt
- node:
- mon01:
- name: ${_param:stacklight_monitor_node01_hostname}
- provider: ${_param:infra_kvm_node01_hostname}.${_param:cluster_domain}
- image: ${_param:salt_control_xenial_image}
- size: stacklight.server
- mon02:
- name: ${_param:stacklight_monitor_node02_hostname}
- provider: ${_param:infra_kvm_node02_hostname}.${_param:cluster_domain}
- image: ${_param:salt_control_xenial_image}
- size: stacklight.server
- mon03:
- name: ${_param:stacklight_monitor_node03_hostname}
- provider: ${_param:infra_kvm_node03_hostname}.${_param:cluster_domain}
- image: ${_param:salt_control_xenial_image}
- size: stacklight.server
+++ /dev/null
-parameters:
- salt:
- control:
- size:
- stacklight.telemetry:
- cpu: 32
- ram: 65536
- disk_profile: small
- net_profile: default
- cluster:
- internal:
- domain: ${_param:cluster_domain}
- engine: virt
- node:
- mtr01:
- name: ${_param:stacklight_telemetry_node01_hostname}
- provider: ${_param:infra_kvm_node01_hostname}.${_param:cluster_domain}
- image: ${_param:salt_control_xenial_image}
- size: stacklight.telemetry
- mtr02:
- name: ${_param:stacklight_telemetry_node02_hostname}
- provider: ${_param:infra_kvm_node02_hostname}.${_param:cluster_domain}
- image: ${_param:salt_control_xenial_image}
- size: stacklight.telemetry
- mtr03:
- name: ${_param:stacklight_telemetry_node03_hostname}
- provider: ${_param:infra_kvm_node03_hostname}.${_param:cluster_domain}
- image: ${_param:salt_control_xenial_image}
- size: stacklight.telemetry
+++ /dev/null
-parameters:
- salt:
- control:
- size:
- stacklight.telemetry:
- cpu: 32
- ram: 65536
- disk_profile: small
- net_profile: default
- cluster:
- internal:
- domain: ${_param:cluster_domain}
- engine: virt
- node:
- mtr01:
- name: ${_param:stacklight_telemetry_node01_hostname}
- provider: ${_param:infra_kvm_node01_hostname}.${_param:cluster_domain}
- image: ${_param:salt_control_xenial_image}
- size: stacklight.telemetry
+++ /dev/null
-classes:
- - service.libvirt.server.kvm
-parameters:
- salt:
- control:
- enabled: True
- virt_enabled: True
- virt:
- nic:
- default:
- eth1:
- bridge: br0
- model: virtio
- eth0:
- bridge: br1
- model: virtio
- disk:
- default:
- - system:
- size: 50000
- xxxsmall:
- - system:
- size: 8000
- xxsmall:
- - system:
- size: 15000
- xsmall:
- - system:
- size: 30000
- small:
- - system:
- size: 50000
- medium:
- - system:
- size: 80000
- large:
- - system:
- size: 100000
- xlarge:
- - system:
- size: 150000
- xxlarge:
- - system:
- size: 300000
- xxxlarge:
- - system:
- size: 500000
+++ /dev/null
-parameters:
- _param:
- salt_master_api_port: 6969
- salt_master_api_permissions:
- - '.*'
- - '@local'
- - '@wheel' # to allow access to all wheel modules
- - '@runner' # to allow access to all runner modules
- - '@jobs' # to allow access to the jobs runner and/or wheel mo
- salt:
- api:
- enabled: true
- bind:
- address: 0.0.0.0
- port: ${_param:salt_master_api_port}
- master:
- command_timeout: 600
- user:
- salt:
- permissions: ${_param:salt_master_api_permissions}
- linux:
- system:
- user:
- salt:
- enabled: true
- name: salt
- password: ${_param:salt_api_password_hash}
- home: /var/tmp/salt
- sudo: false
- system: true
- shell: /bin/false
+++ /dev/null
-parameters:
- salt:
- master:
- environment:
- dev:
- formula:
- ccp:
- source: git
- address: '${_param:salt_master_environment_repository}/salt-formula-ccp.git'
- revision: ${_param:salt_master_environment_revision}
+++ /dev/null
-parameters:
- salt:
- master:
- environment:
- dev:
- formula:
- aptly:
- source: git
- address: '${_param:salt_master_environment_repository}/salt-formula-aptly.git'
- revision: ${_param:salt_master_environment_revision}
- bind:
- source: git
- address: '${_param:salt_master_environment_repository}/salt-formula-bind.git'
- revision: ${_param:salt_master_environment_revision}
- gerrit:
- module:
- gerrit.py:
- enabled: true
- state:
- gerrit.py:
- enabled: true
- source: git
- address: '${_param:salt_master_environment_repository}/salt-formula-gerrit.git'
- revision: ${_param:salt_master_environment_revision}
- jenkins:
- module:
- jenkins_common.py:
- enabled: true
- state:
- jenkins_credential.py:
- enabled: true
- jenkins_job.py:
- enabled: true
- jenkins_lib.py:
- enabled: true
- jenkins_node.py:
- enabled: true
- jenkins_plugin.py:
- enabled: true
- jenkins_security.py:
- enabled: true
- jenkins_slack.py:
- enabled: true
- jenkins_smtp.py:
- enabled: true
- jenkins_user.py:
- enabled: true
- jenkins_view.py:
- enabled: true
- source: git
- address: '${_param:salt_master_environment_repository}/salt-formula-jenkins.git'
- revision: ${_param:salt_master_environment_revision}
- openldap:
- source: git
- address: '${_param:salt_master_environment_repository}/salt-formula-openldap.git'
- revision: ${_param:salt_master_environment_revision}
+++ /dev/null
-parameters:
- salt:
- master:
- environment:
- dev:
- formula:
- kubernetes:
- source: git
- address: '${_param:salt_master_environment_repository}/salt-formula-kubernetes.git'
- revision: ${_param:salt_master_environment_revision}
- etcd:
- source: git
- address: '${_param:salt_master_environment_repository}/salt-formula-etcd.git'
- revision: ${_param:salt_master_environment_revision}
- bird:
- source: git
- address: '${_param:salt_master_environment_repository}/salt-formula-bird.git'
- revision: ${_param:salt_master_environment_revision}
- docker:
- source: git
- address: '${_param:salt_master_environment_repository}/salt-formula-docker.git'
- revision: ${_param:salt_master_environment_revision}
\ No newline at end of file
+++ /dev/null
-parameters:
- salt:
- master:
- environment:
- dev:
- formula:
- prometheus:
- source: git
- address: '${_param:salt_master_environment_repository}/salt-formula-prometheus.git'
- revision: ${_param:salt_master_environment_revision}
- telegraf:
- source: git
- address: '${_param:salt_master_environment_repository}/salt-formula-telegraf.git'
- revision: ${_param:salt_master_environment_revision}
+++ /dev/null
-parameters:
- salt:
- master:
- environment:
- dev:
- formula:
- aodh:
- source: git
- address: '${_param:salt_master_environment_repository}/salt-formula-aodh.git'
- revision: ${_param:salt_master_environment_revision}
- #avinetworks:
- # source: git
- # address: '${_param:salt_master_environment_repository}/salt-formula-avinetworks.git'
- # revision: ${_param:salt_master_environment_revision}
- billometer:
- source: git
- address: '${_param:salt_master_environment_repository}/salt-formula-billometer.git'
- revision: ${_param:salt_master_environment_revision}
- ceilometer:
- source: git
- address: '${_param:salt_master_environment_repository}/salt-formula-ceilometer.git'
- revision: ${_param:salt_master_environment_revision}
- ceph:
- source: git
- address: '${_param:salt_master_environment_repository}/salt-formula-ceph.git'
- revision: ${_param:salt_master_environment_revision}
- cinder:
- source: git
- address: '${_param:salt_master_environment_repository}/salt-formula-cinder.git'
- revision: ${_param:salt_master_environment_revision}
- designate:
- source: git
- address: '${_param:salt_master_environment_repository}/salt-formula-designate.git'
- revision: ${_param:salt_master_environment_revision}
- galera:
- source: git
- address: '${_param:salt_master_environment_repository}/salt-formula-galera.git'
- revision: ${_param:salt_master_environment_revision}
- glance:
- source: git
- address: '${_param:salt_master_environment_repository}/salt-formula-glance.git'
- revision: ${_param:salt_master_environment_revision}
- glusterfs:
- source: git
- address: '${_param:salt_master_environment_repository}/salt-formula-glusterfs.git'
- revision: ${_param:salt_master_environment_revision}
- haproxy:
- source: git
- address: '${_param:salt_master_environment_repository}/salt-formula-haproxy.git'
- revision: ${_param:salt_master_environment_revision}
- heat:
- source: git
- address: '${_param:salt_master_environment_repository}/salt-formula-heat.git'
- revision: ${_param:salt_master_environment_revision}
- horizon:
- source: git
- address: '${_param:salt_master_environment_repository}/salt-formula-horizon.git'
- revision: ${_param:salt_master_environment_revision}
- keepalived:
- source: git
- address: '${_param:salt_master_environment_repository}/salt-formula-keepalived.git'
- revision: ${_param:salt_master_environment_revision}
- keystone:
- source: git
- address: '${_param:salt_master_environment_repository}/salt-formula-keystone.git'
- revision: ${_param:salt_master_environment_revision}
- memcached:
- source: git
- address: '${_param:salt_master_environment_repository}/salt-formula-memcached.git'
- revision: ${_param:salt_master_environment_revision}
- mongodb:
- source: git
- address: '${_param:salt_master_environment_repository}/salt-formula-mongodb.git'
- revision: ${_param:salt_master_environment_revision}
- mysql:
- source: git
- address: '${_param:salt_master_environment_repository}/salt-formula-mysql.git'
- revision: ${_param:salt_master_environment_revision}
- murano:
- source: git
- address: '${_param:salt_master_environment_repository}/salt-formula-murano.git'
- revision: ${_param:salt_master_environment_revision}
- neutron:
- source: git
- address: '${_param:salt_master_environment_repository}/salt-formula-neutron.git'
- revision: ${_param:salt_master_environment_revision}
- nginx:
- source: git
- address: '${_param:salt_master_environment_repository}/salt-formula-nginx.git'
- revision: ${_param:salt_master_environment_revision}
- nova:
- source: git
- address: '${_param:salt_master_environment_repository}/salt-formula-nova.git'
- revision: ${_param:salt_master_environment_revision}
- opencontrail:
- source: git
- address: '${_param:salt_master_environment_repository}/salt-formula-opencontrail.git'
- revision: ${_param:salt_master_environment_revision}
- python:
- source: git
- address: '${_param:salt_master_environment_repository}/salt-formula-python.git'
- revision: ${_param:salt_master_environment_revision}
- rabbitmq:
- source: git
- address: '${_param:salt_master_environment_repository}/salt-formula-rabbitmq.git'
- revision: ${_param:salt_master_environment_revision}
- sahara:
- source: git
- address: '${_param:salt_master_environment_repository}/salt-formula-sahara.git'
- revision: ${_param:salt_master_environment_revision}
- statsd:
- source: git
- address: '${_param:salt_master_environment_repository}/salt-formula-statsd.git'
- revision: ${_param:salt_master_environment_revision}
- supervisor:
- source: git
- address: '${_param:salt_master_environment_repository}/salt-formula-supervisor.git'
- revision: ${_param:salt_master_environment_revision}
- swift:
- source: git
- address: '${_param:salt_master_environment_repository}/salt-formula-swift.git'
- revision: ${_param:salt_master_environment_revision}
+++ /dev/null
-parameters:
- _param:
- salt_master_oss_repository: https://gerrit.mcp.mirantis.net/salt-formulas
- salt_master_oss_revision: master
- salt:
- master:
- environment:
- dev:
- formula:
- devops_portal:
- module:
- devops_utils.py:
- enabled: true
- source: git
- address: '${_param:salt_master_oss_repository}/devops-portal.git'
- revision: ${_param:salt_master_oss_revision}
- rundeck:
- module:
- rundeck.py:
- enabled: true
- state:
- rundeck_project.py:
- enabled: true
- source: git
- address: '${_param:salt_master_oss_repository}/rundeck.git'
- revision: ${_param:salt_master_oss_revision}
+++ /dev/null
-parameters:
- salt:
- master:
- environment:
- dev:
- formula:
- backupninja:
- source: git
- address: '${_param:salt_master_environment_repository}/salt-formula-backupninja.git'
- revision: ${_param:salt_master_environment_revision}
- git:
- source: git
- address: '${_param:salt_master_environment_repository}/salt-formula-git.git'
- revision: ${_param:salt_master_environment_revision}
- iptables:
- source: git
- address: '${_param:salt_master_environment_repository}/salt-formula-iptables.git'
- revision: ${_param:salt_master_environment_revision}
- libvirt:
- source: git
- address: '${_param:salt_master_environment_repository}/salt-formula-libvirt.git'
- revision: ${_param:salt_master_environment_revision}
- linux:
- module:
- linux_netlink.py:
- enabled: true
- source: git
- address: '${_param:salt_master_environment_repository}/salt-formula-linux.git'
- revision: ${_param:salt_master_environment_revision}
- ntp:
- source: git
- address: '${_param:salt_master_environment_repository}/salt-formula-ntp.git'
- revision: ${_param:salt_master_environment_revision}
- openssh:
- source: git
- address: '${_param:salt_master_environment_repository}/salt-formula-openssh.git'
- revision: ${_param:salt_master_environment_revision}
- reclass:
- source: git
- address: '${_param:salt_master_environment_repository}/salt-formula-reclass.git'
- revision: ${_param:salt_master_environment_revision}
- salt:
- source: git
- address: '${_param:salt_master_environment_repository}/salt-formula-salt.git'
- revision: ${_param:salt_master_environment_revision}
- sphinx:
- source: git
- address: '${_param:salt_master_environment_repository}/salt-formula-sphinx.git'
- revision: ${_param:salt_master_environment_revision}
+++ /dev/null
-parameters:
- salt:
- master:
- environment:
- dev:
- formula:
- apache:
- source: git
- address: '${_param:salt_master_environment_repository}/salt-formula-apache.git'
- revision: ${_param:salt_master_environment_revision}
- collectd:
- source: git
- address: '${_param:salt_master_environment_repository}/salt-formula-collectd.git'
- revision: ${_param:salt_master_environment_revision}
- elasticsearch:
- source: git
- address: '${_param:salt_master_environment_repository}/salt-formula-elasticsearch.git'
- revision: ${_param:salt_master_environment_revision}
- grafana:
- source: git
- address: '${_param:salt_master_environment_repository}/salt-formula-grafana.git'
- revision: ${_param:salt_master_environment_revision}
- state:
- grafana3_datasource.py:
- enabled: true
- grafana3_dashboard.py:
- enabled: true
- graphite:
- source: git
- address: '${_param:salt_master_environment_repository}/salt-formula-graphite.git'
- revision: ${_param:salt_master_environment_revision}
- heka:
- module:
- heka_alarming.py:
- enabled: true
- source: git
- address: '${_param:salt_master_environment_repository}/salt-formula-heka.git'
- revision: ${_param:salt_master_environment_revision}
- influxdb:
- source: git
- address: '${_param:salt_master_environment_repository}/salt-formula-influxdb.git'
- revision: ${_param:salt_master_environment_revision}
- java:
- source: git
- address: '${_param:salt_master_environment_repository}/salt-formula-java.git'
- revision: ${_param:salt_master_environment_revision}
- kibana:
- state:
- kibana_object.py:
- enabled: true
- source: git
- address: '${_param:salt_master_environment_repository}/salt-formula-kibana.git'
- revision: ${_param:salt_master_environment_revision}
- nagios:
- module:
- nagios_alarming.py:
- enabled: true
- source: git
- address: '${_param:salt_master_environment_repository}/salt-formula-nagios.git'
- revision: ${_param:salt_master_environment_revision}
- postgresql:
- source: git
- address: '${_param:salt_master_environment_repository}/salt-formula-postgresql.git'
- revision: ${_param:salt_master_environment_revision}
- rabbitmq:
- source: git
- address: '${_param:salt_master_environment_repository}/salt-formula-rabbitmq.git'
- revision: ${_param:salt_master_environment_revision}
- redis:
- source: git
- address: '${_param:salt_master_environment_repository}/salt-formula-redis.git'
- revision: ${_param:salt_master_environment_revision}
- rsyslog:
- module:
- rsyslog_util.py:
- enabled: true
- source: git
- address: '${_param:salt_master_environment_repository}/salt-formula-rsyslog.git'
- revision: ${_param:salt_master_environment_revision}
- sensu:
- source: git
- address: '${_param:salt_master_environment_repository}/salt-formula-sensu.git'
- revision: ${_param:salt_master_environment_revision}
+++ /dev/null
-parameters:
- salt:
- master:
- environment:
- prd:
- formula:
- ccp:
- source: pkg
- name: salt-formula-ccp
+++ /dev/null
-parameters:
- salt:
- master:
- environment:
- prd:
- formula:
- aptcacher:
- source: pkg
- name: salt-formula-aptcacher
- aptly:
- source: pkg
- name: salt-formula-aptly
- bind:
- source: pkg
- name: salt-formula-bind
- gerrit:
- source: pkg
- name: salt-formula-gerrit
- jenkins:
- source: pkg
- name: salt-formula-jenkins
- freeipa:
- source: pkg
- name: salt-formula-freeipa
- maas:
- source: pkg
- name: salt-formula-maas
- openldap:
- source: pkg
- name: salt-formula-openldap
- lldp:
- source: pkg
- name: salt-formula-lldp
+++ /dev/null
-parameters:
- salt:
- master:
- environment:
- prd:
- formula:
- kubernetes:
- source: pkg
- name: salt-formula-kubernetes
- etcd:
- source: pkg
- name: salt-formula-etcd
- bird:
- source: pkg
- name: salt-formula-bird
- docker:
- source: pkg
- name: salt-formula-docker
\ No newline at end of file
+++ /dev/null
-parameters:
- salt:
- master:
- environment:
- prd:
- formula:
- prometheus:
- source: pkg
- name: salt-formula-prometheus
- telegraf:
- source: pkg
- name: salt-formula-telegraf
+++ /dev/null
-parameters:
- salt:
- master:
- environment:
- prd:
- formula:
- aodh:
- source: pkg
- name: salt-formula-aodh
- #avinetworks:
- # source: pkg
- # name: salt-formula-avinetworks
- billometer:
- source: pkg
- name: salt-formula-billometer
- ceilometer:
- source: pkg
- name: salt-formula-ceilometer
- ceph:
- source: pkg
- name: salt-formula-ceph
- cinder:
- source: pkg
- name: salt-formula-cinder
- galera:
- source: pkg
- name: salt-formula-galera
- glance:
- source: pkg
- name: salt-formula-glance
- glusterfs:
- source: pkg
- name: salt-formula-glusterfs
- designate:
- source: pkg
- name: salt-formula-designate
- haproxy:
- source: pkg
- name: salt-formula-haproxy
- heat:
- source: pkg
- name: salt-formula-heat
- horizon:
- source: pkg
- name: salt-formula-horizon
- keepalived:
- source: pkg
- name: salt-formula-keepalived
- keystone:
- source: pkg
- name: salt-formula-keystone
- memcached:
- source: pkg
- name: salt-formula-memcached
- mongodb:
- source: pkg
- name: salt-formula-mongodb
- mysql:
- source: pkg
- name: salt-formula-mysql
- murano:
- source: pkg
- name: salt-formula-murano
- neutron:
- source: pkg
- name: salt-formula-neutron
- nginx:
- source: pkg
- name: salt-formula-nginx
- nova:
- source: pkg
- name: salt-formula-nova
- opencontrail:
- source: pkg
- name: salt-formula-opencontrail
- python:
- source: pkg
- name: salt-formula-python
- rabbitmq:
- source: pkg
- name: salt-formula-rabbitmq
- sahara:
- source: pkg
- name: salt-formula-sahara
- statsd:
- source: pkg
- name: salt-formula-statsd
- supervisor:
- source: pkg
- name: salt-formula-supervisor
- swift:
- source: pkg
- name: salt-formula-swift
+++ /dev/null
-parameters:
- salt:
- master:
- environment:
- prd:
- formula:
- devops_portal:
- source: pkg
- name: salt-formula-devops-portal
- rundeck:
- source: pkg
- name: salt-formula-rundeck
+++ /dev/null
-parameters:
- salt:
- master:
- environment:
- prd:
- formula:
- backupninja:
- source: pkg
- name: salt-formula-backupninja
- git:
- source: pkg
- name: salt-formula-git
- iptables:
- source: pkg
- name: salt-formula-iptables
- libvirt:
- source: pkg
- name: salt-formula-libvirt
- linux:
- source: pkg
- name: salt-formula-linux
- nginx:
- source: pkg
- name: salt-formula-nginx
- ntp:
- source: pkg
- name: salt-formula-ntp
- openssh:
- source: pkg
- name: salt-formula-openssh
- reclass:
- source: pkg
- name: salt-formula-reclass
- salt:
- source: pkg
- name: salt-formula-salt
- sphinx:
- source: pkg
- name: salt-formula-sphinx
+++ /dev/null
-parameters:
- salt:
- master:
- environment:
- prd:
- formula:
- apache:
- source: pkg
- name: salt-formula-apache
- collectd:
- source: pkg
- name: salt-formula-collectd
- elasticsearch:
- source: pkg
- name: salt-formula-elasticsearch
- grafana:
- source: pkg
- name: salt-formula-grafana
- graphite:
- source: pkg
- name: salt-formula-graphite
- heka:
- source: pkg
- name: salt-formula-heka
- influxdb:
- source: pkg
- name: salt-formula-influxdb
- java:
- source: pkg
- name: salt-formula-java
- kibana:
- source: pkg
- name: salt-formula-kibana
- #nagios:
- # source: pkg
- # name: salt-formula-nagios
- postgresql:
- source: pkg
- name: salt-formula-postgresql
- rabbitmq:
- source: pkg
- name: salt-formula-rabbitmq
- redis:
- source: pkg
- name: salt-formula-redis
- rsyslog:
- source: pkg
- name: salt-formula-rsyslog
- sensu:
- source: pkg
- name: salt-formula-sensu
- nagios:
- source: pkg
- name: salt-formula-nagios
+++ /dev/null
-classes:
-- system.salt.master.single
-- system.salt.master.formula.git.ccp
-- system.salt.master.formula.git.foundation
-- system.salt.master.formula.git.kubernetes
-- system.salt.master.formula.git.openstack
-- system.salt.master.formula.git.oss
-- system.salt.master.formula.git.saltstack
-- system.salt.master.formula.git.stacklight
-- system.salt.master.formula.git.monitoring
-parameters:
- _param:
- salt_master_environment_repository: "https://github.com/salt-formulas"
- salt_master_environment_revision: master
+++ /dev/null
-classes:
-- system.salt.master.single
-- system.salt.master.formula.pkg.ccp
-- system.salt.master.formula.pkg.foundation
-- system.salt.master.formula.pkg.kubernetes
-- system.salt.master.formula.pkg.openstack
-- system.salt.master.formula.pkg.oss
-- system.salt.master.formula.pkg.saltstack
-- system.salt.master.formula.pkg.stacklight
-- system.salt.master.formula.pkg.monitoring
-- system.linux.system.repo.mcp.salt
+++ /dev/null
-classes:
-- service.git.client
-- service.salt.master.single
-parameters:
- linux:
- system:
- sysctl:
- net.core.rmem_max: 16777216
- net.core.wmem_max: 16777216
- net.ipv4.tcp_rmem: 4096 87380 16777216
- net.ipv4.tcp_wmem: 4096 87380 16777216
- salt:
- master:
- accept_policy: auto_accept
- worker_threads: 40
- command_timeout: 10
- peer:
- '.*':
- - x509.sign_remote_certificate
+++ /dev/null
-parameters:
- _param:
- salt_minion_ca_common_name: Salt Master CA
- salt_minion_ca_country: cz
- salt_minion_ca_locality: Prague
- salt_minion_ca_organization: Mirantis
- salt_minion_ca_days_valid_authority: 3650
- salt_minion_ca_days_valid_certificate: 365
- salt:
- minion:
- ca:
- salt_master_ca:
- common_name: ${_param:salt_minion_ca_common_name}
- country: ${_param:salt_minion_ca_country}
- locality: ${_param:salt_minion_ca_locality}
- organization: ${_param:salt_minion_ca_organization}
- signing_policy:
- cert_server:
- type: v3_edge_cert_server
- minions: '*'
- cert_client:
- type: v3_edge_cert_client
- minions: '*'
- cert_open:
- type: v3_edge_cert_open
- minions: '*'
- days_valid:
- authority: ${_param:salt_minion_ca_days_valid_authority}
- certificate: ${_param:salt_minion_ca_days_valid_certificate}
+++ /dev/null
-parameters:
- _param:
- salt_minion_ca_authority: salt_master_ca
- salt:
- minion:
- cert:
- ceph:
- host: ${_param:salt_minion_ca_host}
- signing_policy: cert_server
- authority: ${_param:salt_minion_ca_authority}
- common_name: ${_param:cluster_public_host}
-
+++ /dev/null
-classes:
-- system.salt.minion.cert.ceph
-parameters:
- _param:
- salt_pki_ceph_alt_names: IP:${_param:cluster_public_host},DNS:${_param:cluster_public_host}
- salt:
- minion:
- cert:
- ceph:
- common_name: ceph
- alternative_names: IP:127.0.0.1,${_param:salt_pki_ceph_alt_names}
+++ /dev/null
-parameters:
- salt:
- minion:
- cert:
- ceph:
- key_file: /srv/salt/pki/${_param:cluster_name}/${salt:minion:cert:ceph:common_name}.key
- cert_file: /srv/salt/pki/${_param:cluster_name}/${salt:minion:cert:ceph:common_name}.crt
- all_file: /srv/salt/pki/${_param:cluster_name}/${salt:minion:cert:ceph:common_name}-chain-with-key.pem
+++ /dev/null
-parameters:
- salt:
- minion:
- cert:
- etcd_client:
- host: ${_param:salt_minion_ca_host}
- authority: ${_param:salt_minion_ca_authority}
- common_name: ${linux:system:name}
- signing_policy: cert_open
- alternative_names: IP:${_param:cluster_local_address},DNS:${linux:system:name},DNS:${linux:network:fqdn}
- extended_key_usage: clientAuth
- key_usage: "digitalSignature,nonRepudiation,keyEncipherment"
- key_file: /var/lib/etcd/etcd-client.key
- cert_file: /var/lib/etcd/etcd-client.crt
- all_file: /var/lib/etcd/etcd-client.pem
- ca_file: /var/lib/etcd/ca.pem
- user: etcd
- group: etcd
+++ /dev/null
-parameters:
- salt:
- minion:
- cert:
- etcd_server:
- host: ${_param:salt_minion_ca_host}
- authority: ${_param:salt_minion_ca_authority}
- common_name: ${linux:system:name}
- signing_policy: cert_open
- alternative_names: IP:127.0.0.1,IP:${_param:cluster_vip_address},IP:${_param:cluster_local_address},DNS:${linux:system:name},DNS:${linux:network:fqdn}
- extended_key_usage: serverAuth,clientAuth
- key_usage: "digitalSignature,nonRepudiation,keyEncipherment"
- key_file: /var/lib/etcd/etcd-server.key
- cert_file: /var/lib/etcd/etcd-server.crt
- all_file: /var/lib/etcd/etcd-server.pem
- ca_file: /var/lib/etcd/ca.pem
- user: etcd
- group: etcd
+++ /dev/null
-parameters:
- salt:
- minion:
- cert:
- k8s_client:
- host: ${_param:salt_minion_ca_host}
- authority: ${_param:salt_minion_ca_authority}
- key_file: /etc/kubernetes/ssl/kubelet-client.key
- cert_file: /etc/kubernetes/ssl/kubelet-client.crt
- ca_file: /etc/kubernetes/ssl/ca-kubernetes.crt
- common_name: kubelet-client
- signing_policy: cert_client
- alternative_names: IP:${_param:cluster_vip_address},IP:${_param:cluster_node01_address},IP:${_param:cluster_node02_address},IP:${_param:cluster_node03_address},IP:${_param:kubernetes_internal_api_address}
\ No newline at end of file
+++ /dev/null
-parameters:
- salt:
- minion:
- cert:
- k8s_client:
- host: ${_param:salt_minion_ca_host}
- authority: ${_param:salt_minion_ca_authority}
- key_file: /etc/kubernetes/ssl/kubelet-client.key
- cert_file: /etc/kubernetes/ssl/kubelet-client.crt
- ca_file: /etc/kubernetes/ssl/ca-kubernetes.crt
- common_name: kubelet-client
- signing_policy: cert_client
- alternative_names: IP:${_param:control_address},IP:${_param:kubernetes_internal_api_address}
\ No newline at end of file
+++ /dev/null
-parameters:
- salt:
- minion:
- cert:
- k8s_server:
- host: ${_param:salt_minion_ca_host}
- authority: ${_param:salt_minion_ca_authority}
- common_name: kubernetes-server
- key_file: /srv/salt/env/${_param:salt_master_base_environment}/_certs/kubernetes/kubernetes-server.key
- cert_file: /srv/salt/env/${_param:salt_master_base_environment}/_certs/kubernetes/kubernetes-server.crt
- all_file: /srv/salt/env/${_param:salt_master_base_environment}/_certs/kubernetes/kubernetes-server.pem
- signing_policy: cert_server
- alternative_names: IP:${_param:cluster_vip_address},IP:${_param:cluster_node01_address},IP:${_param:cluster_node02_address},IP:${_param:cluster_node03_address},IP:${_param:kubernetes_internal_api_address},DNS:kubernetes.default,DNS:kubernetes.default.svc
+++ /dev/null
-parameters:
- salt:
- minion:
- cert:
- k8s_server:
- host: ${_param:salt_minion_ca_host}
- authority: ${_param:salt_minion_ca_authority}
- common_name: kubernetes-server
- key_file: /srv/salt/env/${_param:salt_master_base_environment}/_certs/kubernetes/kubernetes-server.key
- cert_file: /srv/salt/env/${_param:salt_master_base_environment}/_certs/kubernetes/kubernetes-server.crt
- all_file: /srv/salt/env/${_param:salt_master_base_environment}/_certs/kubernetes/kubernetes-server.pem
- signing_policy: cert_server
- alternative_names: IP:${_param:control_address},IP:${_param:kubernetes_internal_api_address}
+++ /dev/null
-parameters:
- salt:
- minion:
- cert:
- prometheus_server:
- host: ${_param:salt_minion_ca_host}
- authority: ${_param:salt_minion_ca_authority}
- key_file: ${prometheus:server:dir:config}/prometheus-server.key
- cert_file: ${prometheus:server:dir:config}/prometheus-server.crt
- common_name: prometheus-server
- signing_policy: cert_client
- alternative_names: IP:${_param:cluster_vip_address},IP:${_param:cluster_node01_address},IP:${_param:cluster_node02_address},IP:${_param:cluster_node03_address},IP:${_param:kubernetes_internal_api_address}
- mode: '0444'
+++ /dev/null
-classes:
-- system.salt.minion.cert.proxy
-parameters:
- salt:
- minion:
- cert:
- proxy:
- alternative_names: "DNS:${_param:cluster_public_host}, DNS:*.${_param:cluster_public_host}, IP:${_param:control_vip_address}, IP:${_param:single_address}"
- key_file: /etc/haproxy/ssl/${_param:cluster_public_host}.key
- cert_file: /etc/haproxy/ssl/${_param:cluster_public_host}.crt
- all_file: /etc/haproxy/ssl/${_param:cluster_public_host}-all.pem
- ca_file: /etc/haproxy/ssl/${_param:salt_minion_ca_authority}-ca.crt
- user: root
- group: haproxy
- mode: 640
\ No newline at end of file
+++ /dev/null
-parameters:
- _param:
- salt_minion_ca_authority: salt_master_ca
- salt:
- minion:
- cert:
- proxy:
- host: ${_param:salt_minion_ca_host}
- signing_policy: cert_server
- authority: ${_param:salt_minion_ca_authority}
- common_name: ${_param:cluster_public_host}
+++ /dev/null
-classes:
-- system.salt.minion.cert.proxy
-parameters:
- _param:
- salt_pki_proxy_alt_names: IP:${_param:cluster_public_host},DNS:${_param:cluster_public_host},DNS:proxy.${_param:cluster_public_host},DNS:horizon.${_param:cluster_public_host}
- salt:
- minion:
- cert:
- proxy:
- common_name: proxy
- alternative_names: IP:127.0.0.1,${_param:salt_pki_proxy_alt_names}
+++ /dev/null
-parameters:
- salt:
- minion:
- cert:
- proxy:
- key_file: /srv/salt/pki/${_param:cluster_name}/${salt:minion:cert:proxy:common_name}.key
- cert_file: /srv/salt/pki/${_param:cluster_name}/${salt:minion:cert:proxy:common_name}.crt
- all_file: /srv/salt/pki/${_param:cluster_name}/${salt:minion:cert:proxy:common_name}-chain-with-key.pem
+++ /dev/null
-parameters:
- _param:
- salt_minion_ca_authority: salt_master_ca
- salt:
- minion:
- cert:
- swift:
- host: ${_param:salt_minion_ca_host}
- signing_policy: cert_server
- authority: ${_param:salt_minion_ca_authority}
- common_name: ${_param:cluster_public_host}
+++ /dev/null
-classes:
-- system.salt.minion.cert.swift
-parameters:
- _param:
- salt_pki_swift_alt_names: IP:${_param:cluster_public_host},DNS:${_param:cluster_public_host}
- salt:
- minion:
- cert:
- swift:
- common_name: swift
- alternative_names: IP:127.0.0.1,${_param:salt_pki_swift_alt_names}
+++ /dev/null
-parameters:
- salt:
- minion:
- cert:
- swift:
- key_file: /srv/salt/pki/${_param:cluster_name}/${salt:minion:cert:swift:common_name}.key
- cert_file: /srv/salt/pki/${_param:cluster_name}/${salt:minion:cert:swift:common_name}.crt
- all_file: /srv/salt/pki/${_param:cluster_name}/${salt:minion:cert:swift:common_name}-chain-with-key.pem
+++ /dev/null
-parameters:
- _param:
- salt_minion_ca_authority: salt_master_ca
- salt_pki_wildcard_alt_names: IP:${_param:cluster_public_host},DNS:${_param:cluster_public_host},DNS:*.${_param:cluster_public_host},DNS:${_param:cluster_domain},DNS:*.${_param:cluster_domain}
- salt:
- minion:
- cert:
- proxy:
- host: ${_param:salt_minion_ca_host}
- signing_policy: cert_server
- authority: ${_param:salt_minion_ca_authority}
- common_name: wildcard
- alternative_names: IP:127.0.0.1,${_param:salt_pki_wildcard_alt_names}
- key_file: /srv/salt/pki/${_param:cluster_name}/${salt:minion:cert:wildcard:common_name}.key
- cert_file: /srv/salt/pki/${_param:cluster_name}/${salt:minion:cert:wildcard:common_name}.crt
- all_file: /srv/salt/pki/${_param:cluster_name}/${salt:minion:cert:wildcard:common_name}-chain-with-key.pem
+++ /dev/null
-classes:
-- service.salt.minion.master
+++ /dev/null
-parameters:
- salt:
- minion:
- master_type: failover
- masters:
- - host: ${_param:infra_config_deploy_address}
- - host: ${_param:infra_config_address}
BASE_IMAGE=https://cloud-images.ubuntu.com/xenial/current/xenial-server-cloudimg-amd64-disk1.img
declare -A NODES=( [cfg01]=4096 [ctl01]=6144 [ctl02]=6144 [ctl03]=6144 [gtw01]=2048 [cmp01]=2048 )
+# get required packages
+apt-get install -y mkisofs curl virtinst cpu-checker qemu-kvm
+
+# generate ssh key
[ -f $SSH_KEY ] || ssh-keygen -f $SSH_KEY -N ''
# get base image
CONNECTION_ATTEMPTS=20
SLEEP=15
+# refresh salt master host key
+ssh-keygen -R $SALT_MASTER
+
# wait until ssh on Salt master is available
echo "Attempting to ssh to Salt master ..."
ATTEMPT=1
(*) echo "${ATTEMPT}/${CONNECTION_ATTEMPTS}> ssh server ain't ready yet, waiting for ${SLEEP} seconds ..." ;;
esac
sleep $SLEEP
+ ssh-keyscan -t ecdsa $SALT_MASTER >> ~/.ssh/known_hosts
((ATTEMPT+=1))
done
apt-get install -y git curl subversion
svn export --force https://github.com/salt-formulas/salt-formulas/trunk/deploy/scripts /srv/salt/scripts
- git clone --depth=1 https://git.opnfv.org/fuel
- ln -s fuel/mcp/reclass /srv/salt/reclass
+ git clone --depth=1 --recurse-submodules https://git.opnfv.org/fuel
+ ln -s $(pwd)/fuel/mcp/reclass /srv/salt/reclass
cd /srv/salt/scripts
MASTER_HOSTNAME=cfg01.virtual-mcp-ocata-ovs.local ./salt-master-init.sh