## A Heat environment file which can be used to set up storage
## backends. Defaults to Ceph used as a backend for Cinder, Glance and
## Nova ephemeral storage.
+resource_registry:
+ OS::TripleO::Services::CephMon: ../puppet/services/ceph-mon.yaml
+ OS::TripleO::Services::CephOSD: ../puppet/services/ceph-osd.yaml
+ OS::TripleO::Services::CephClient: ../puppet/services/ceph-client.yaml
+
parameter_defaults:
#### BACKEND SELECTION ####
CinderEnableIscsiBackend: false
## Whether to enable rbd (Ceph) backend for Cinder.
CinderEnableRbdBackend: true
+ ## Cinder Backup backend can be either 'ceph' or 'swift'.
+ CinderBackupBackend: ceph
## Whether to enable NFS backend for Cinder.
# CinderEnableNfsBackend: false
## Whether to enable rbd (Ceph) backend for Nova ephemeral storage.
# CinderNfsServers: ''
- #### GLANCE FILE BACKEND PACEMAKER SETTINGS (used for mounting NFS) ####
+ #### GLANCE NFS SETTINGS ####
- ## Whether to make Glance 'file' backend a mount managed by Pacemaker
- # GlanceFilePcmkManage: false
- ## File system type of the mount
- # GlanceFilePcmkFstype: nfs
- ## Pacemaker mount point, e.g. '192.168.122.1:/export/glance' for NFS
- # GlanceFilePcmkDevice: ''
- ## Options for the mount managed by Pacemaker
- # GlanceFilePcmkOptions: ''
+ ## Make sure to set `GlanceBackend: file` when enabling NFS
+ ##
+ ## Whether to make Glance 'file' backend a NFS mount
+ # GlanceNfsEnabled: false
+ ## NFS share for image storage, e.g. '192.168.122.1:/export/glance'
+ ## (If using IPv6, use both double- and single-quotes,
+ ## e.g. "'[fdd0::1]:/export/glance'")
+ # GlanceNfsShare: ''
+ ## Mount options for the NFS image storage mount point
+ # GlanceNfsOptions: 'intr,context=system_u:object_r:glance_var_lib_t:s0'
#### CEPH SETTINGS ####
- ## Whether to deploy Ceph OSDs on the controller nodes. By default
- ## OSDs are deployed on dedicated ceph-storage nodes only.
- # ControllerEnableCephStorage: false
-
## When deploying Ceph Nodes through the oscplugin CLI, the following
## parameters are set automatically by the CLI. When deploying via
## heat stack-create or ceph on the controller nodes only,
# CephMonKey: ''
## Ceph admin key, e.g. 'AQDLOh1VgEp6FRAAFzT7Zw+Y9V6JJExQAsRnRQ=='
# CephAdminKey: ''
+ ## Ceph client key, e.g 'AQC+vYNXgDAgAhAAc8UoYt+OTz5uhV7ItLdwUw=='
+ # CephClientKey: ''