X-Git-Url: https://gerrit.opnfv.org/gerrit/gitweb?a=blobdiff_plain;f=ci%2Fansible%2Fgroup_vars%2Fceph%2Fall.yml;fp=ci%2Fansible%2Fgroup_vars%2Fceph%2Fall.yml;h=9594d33dbb62a5747feb7cac2c11280443594c14;hb=64df7bc3bc70d49153409436b411fb327691a4d5;hp=0000000000000000000000000000000000000000;hpb=0786fde30eba926b097617dea9ca4683ac2fa1b7;p=stor4nfv.git diff --git a/ci/ansible/group_vars/ceph/all.yml b/ci/ansible/group_vars/ceph/all.yml new file mode 100644 index 0000000..9594d33 --- /dev/null +++ b/ci/ansible/group_vars/ceph/all.yml @@ -0,0 +1,501 @@ +--- +# Variables here are applicable to all host groups NOT roles + +# This sample file generated by generate_group_vars_sample.sh + +# Dummy variable to avoid error because ansible does not recognize the +# file as a good configuration file when no variable in it. +dummy: + +# You can override vars by using host or group vars + +########### +# GENERAL # +########### + +###################################### +# Releases name to number dictionary # +###################################### +#ceph_release_num: +# dumpling: 0.67 +# emperor: 0.72 +# firefly: 0.80 +# giant: 0.87 +# hammer: 0.94 +# infernalis: 9 +# jewel: 10 +# kraken: 11 +# luminous: 12 +# mimic: 13 + +# Directory to fetch cluster fsid, keys etc... +#fetch_directory: fetch/ + +# The 'cluster' variable determines the name of the cluster. +# Changing the default value to something else means that you will +# need to change all the command line calls as well, for example if +# your cluster name is 'foo': +# "ceph health" will become "ceph --cluster foo health" +# +# An easier way to handle this is to use the environment variable CEPH_ARGS +# So run: "export CEPH_ARGS="--cluster foo" +# With that you will be able to run "ceph health" normally +#cluster: ceph + +# Inventory host group variables +#mon_group_name: mons +#osd_group_name: osds +#rgw_group_name: rgws +#mds_group_name: mdss +#nfs_group_name: nfss +#restapi_group_name: restapis +#rbdmirror_group_name: rbdmirrors +#client_group_name: clients +#iscsi_gw_group_name: iscsi-gws +#mgr_group_name: mgrs + +# If check_firewall is true, then ansible will try to determine if the +# Ceph ports are blocked by a firewall. If the machine running ansible +# cannot reach the Ceph ports for some other reason, you may need or +# want to set this to False to skip those checks. +#check_firewall: False + + +############ +# PACKAGES # +############ +#debian_package_dependencies: +# - python-pycurl +# - hdparm + +#centos_package_dependencies: +# - python-pycurl +# - hdparm +# - epel-release +# - python-setuptools +# - libselinux-python + +#redhat_package_dependencies: +# - python-pycurl +# - hdparm +# - python-setuptools + +# Whether or not to install the ceph-test package. +#ceph_test: false + +# Enable the ntp service by default to avoid clock skew on +# ceph nodes +#ntp_service_enabled: true + +# Set uid/gid to default '64045' for bootstrap directories. +# '64045' is used for debian based distros. It must be set to 167 in case of rhel based distros. +# These values have to be set according to the base OS used by the container image, NOT the host. +#bootstrap_dirs_owner: "64045" +#bootstrap_dirs_group: "64045" + +# This variable determines if ceph packages can be updated. If False, the +# package resources will use "state=present". If True, they will use +# "state=latest". +#upgrade_ceph_packages: False + +#ceph_use_distro_backports: false # DEBIAN ONLY + + +########### +# INSTALL # +########### +#ceph_rhcs_cdn_install: False # backward compatibility with stable-2.2, will disappear in stable 3.1 +#ceph_repository_type: "{{ 'cdn' if ceph_rhcs_cdn_install else 'iso' if ceph_rhcs_iso_install else 'dummy' }}" # backward compatibility with stable-2.2, will disappear in stable 3.1 +#ceph_rhcs_iso_install: False # backward compatibility with stable-2.2, will disappear in stable 3.1 +#ceph_rhcs: False # backward compatibility with stable-2.2, will disappear in stable 3.1 +#ceph_stable: False # backward compatibility with stable-2.2, will disappear in stable 3.1 +#ceph_dev: False # backward compatibility with stable-2.2, will disappear in stable 3.1 +#ceph_stable_uca: False # backward compatibility with stable-2.2, will disappear in stable 3.1 +#ceph_custom: False # backward compatibility with stable-2.2, will disappear in stable 3.1 + +# ORIGIN SOURCE +# +# Choose between: +# - 'repository' means that you will get ceph installed through a new repository. Later below choose between 'community', 'rhcs' or 'dev' +# - 'distro' means that no separate repo file will be added +# you will get whatever version of Ceph is included in your Linux distro. +# 'local' means that the ceph binaries will be copied over from the local machine +#ceph_origin: "{{ 'repository' if ceph_rhcs or ceph_stable or ceph_dev or ceph_stable_uca or ceph_custom else 'dummy' }}" # backward compatibility with stable-2.2, will disappear in stable 3.1 +#valid_ceph_origins: +# - repository +# - distro +# - local +ceph_origin: repository +ceph_repository: community + +#ceph_repository: "{{ 'community' if ceph_stable else 'rhcs' if ceph_rhcs else 'dev' if ceph_dev else 'uca' if ceph_stable_uca else 'custom' if ceph_custom else 'dummy' }}" # backward compatibility with stable-2.2, will disappear in stable 3.1 +#valid_ceph_repository: +# - community +# - rhcs +# - dev +# - uca +# - custom + + +# REPOSITORY: COMMUNITY VERSION +# +# Enabled when ceph_repository == 'community' +# +#ceph_mirror: http://download.ceph.com +#ceph_stable_key: https://download.ceph.com/keys/release.asc +ceph_stable_release: luminous +#ceph_stable_repo: "{{ ceph_mirror }}/debian-{{ ceph_stable_release }}" + +#nfs_ganesha_stable: true # use stable repos for nfs-ganesha +#nfs_ganesha_stable_branch: V2.5-stable +#nfs_ganesha_stable_deb_repo: "{{ ceph_mirror }}/nfs-ganesha/deb-{{ nfs_ganesha_stable_branch }}/{{ ceph_stable_release }}" + + +# Use the option below to specify your applicable package tree, eg. when using non-LTS Ubuntu versions +# # for a list of available Debian distributions, visit http://download.ceph.com/debian-{{ ceph_stable_release }}/dists/ +# for more info read: https://github.com/ceph/ceph-ansible/issues/305 +#ceph_stable_distro_source: "{{ ansible_lsb.codename }}" + +# This option is needed for _both_ stable and dev version, so please always fill the right version +# # for supported distros, see http://download.ceph.com/rpm-{{ ceph_stable_release }}/ +#ceph_stable_redhat_distro: el7 + + +# REPOSITORY: RHCS VERSION RED HAT STORAGE (from 1.3) +# +# Enabled when ceph_repository == 'rhcs' +# +# This version is only supported on RHEL >= 7.1 +# As of RHEL 7.1, libceph.ko and rbd.ko are now included in Red Hat's kernel +# packages natively. The RHEL 7.1 kernel packages are more stable and secure than +# using these 3rd-party kmods with RHEL 7.0. Please update your systems to RHEL +# 7.1 or later if you want to use the kernel RBD client. +# +# The CephFS kernel client is undergoing rapid development upstream, and we do +# not recommend running the CephFS kernel module on RHEL 7's 3.10 kernel at this +# time. Please use ELRepo's latest upstream 4.x kernels if you want to run CephFS +# on RHEL 7. +# +# +#ceph_rhcs_version: "{{ ceph_stable_rh_storage_version | default(2) }}" +#valid_ceph_repository_type: +# - cdn +# - iso +#ceph_rhcs_iso_path: "{{ ceph_stable_rh_storage_iso_path | default('') }}" +#ceph_rhcs_mount_path: "{{ ceph_stable_rh_storage_mount_path | default('/tmp/rh-storage-mount') }}" +#ceph_rhcs_repository_path: "{{ ceph_stable_rh_storage_repository_path | default('/tmp/rh-storage-repo') }}" # where to copy iso's content + +# RHCS installation in Debian systems +#ceph_rhcs_cdn_debian_repo: https://customername:customerpasswd@rhcs.download.redhat.com +#ceph_rhcs_cdn_debian_repo_version: "/3-release/" # for GA, later for updates use /3-updates/ + + +# REPOSITORY: UBUNTU CLOUD ARCHIVE +# +# Enabled when ceph_repository == 'uca' +# +# This allows the install of Ceph from the Ubuntu Cloud Archive. The Ubuntu Cloud Archive +# usually has newer Ceph releases than the normal distro repository. +# +# +#ceph_stable_repo_uca: "http://ubuntu-cloud.archive.canonical.com/ubuntu" +#ceph_stable_openstack_release_uca: liberty +#ceph_stable_release_uca: "{{ansible_lsb.codename}}-updates/{{ceph_stable_openstack_release_uca}}" + + +# REPOSITORY: DEV +# +# Enabled when ceph_repository == 'dev' +# +#ceph_dev_branch: master # development branch you would like to use e.g: master, wip-hack +#ceph_dev_sha1: latest # distinct sha1 to use, defaults to 'latest' (as in latest built) + +#nfs_ganesha_dev: false # use development repos for nfs-ganesha + +# Set this to choose the version of ceph dev libraries used in the nfs-ganesha packages from shaman +# flavors so far include: ceph_master, ceph_jewel, ceph_kraken, ceph_luminous +#nfs_ganesha_flavor: "ceph_master" + +#ceph_iscsi_config_dev: true # special repo for deploying iSCSI gateways + + +# REPOSITORY: CUSTOM +# +# Enabled when ceph_repository == 'custom' +# +# Use a custom repository to install ceph. For RPM, ceph_custom_repo should be +# a URL to the .repo file to be installed on the targets. For deb, +# ceph_custom_repo should be the URL to the repo base. +# +#ceph_custom_repo: https://server.domain.com/ceph-custom-repo + + +# ORIGIN: LOCAL CEPH INSTALLATION +# +# Enabled when ceph_repository == 'local' +# +# Path to DESTDIR of the ceph install +#ceph_installation_dir: "/path/to/ceph_installation/" +# Whether or not to use installer script rundep_installer.sh +# This script takes in rundep and installs the packages line by line onto the machine +# If this is set to false then it is assumed that the machine ceph is being copied onto will already have +# all runtime dependencies installed +#use_installer: false +# Root directory for ceph-ansible +#ansible_dir: "/path/to/ceph-ansible" + + +###################### +# CEPH CONFIGURATION # +###################### + +## Ceph options +# +# Each cluster requires a unique, consistent filesystem ID. By +# default, the playbook generates one for you and stores it in a file +# in `fetch_directory`. If you want to customize how the fsid is +# generated, you may find it useful to disable fsid generation to +# avoid cluttering up your ansible repo. If you set `generate_fsid` to +# false, you *must* generate `fsid` in another way. +# ACTIVATE THE FSID VARIABLE FOR NON-VAGRANT DEPLOYMENT +#fsid: "{{ cluster_uuid.stdout }}" +#generate_fsid: true + +#ceph_conf_key_directory: /etc/ceph + +#cephx: true + +## Client options +# +#rbd_cache: "true" +#rbd_cache_writethrough_until_flush: "true" +#rbd_concurrent_management_ops: 20 + +#rbd_client_directories: true # this will create rbd_client_log_path and rbd_client_admin_socket_path directories with proper permissions + +# Permissions for the rbd_client_log_path and +# rbd_client_admin_socket_path. Depending on your use case for Ceph +# you may want to change these values. The default, which is used if +# any of the variables are unset or set to a false value (like `null` +# or `false`) is to automatically determine what is appropriate for +# the Ceph version with non-OpenStack workloads -- ceph:ceph and 0770 +# for infernalis releases, and root:root and 1777 for pre-infernalis +# releases. +# +# For other use cases, including running Ceph with OpenStack, you'll +# want to set these differently: +# +# For OpenStack on RHEL, you'll want: +# rbd_client_directory_owner: "qemu" +# rbd_client_directory_group: "libvirtd" (or "libvirt", depending on your version of libvirt) +# rbd_client_directory_mode: "0755" +# +# For OpenStack on Ubuntu or Debian, set: +# rbd_client_directory_owner: "libvirt-qemu" +# rbd_client_directory_group: "kvm" +# rbd_client_directory_mode: "0755" +# +# If you set rbd_client_directory_mode, you must use a string (e.g., +# 'rbd_client_directory_mode: "0755"', *not* +# 'rbd_client_directory_mode: 0755', or Ansible will complain: mode +# must be in octal or symbolic form +#rbd_client_directory_owner: null +#rbd_client_directory_group: null +#rbd_client_directory_mode: null + +#rbd_client_log_path: /var/log/ceph +#rbd_client_log_file: "{{ rbd_client_log_path }}/qemu-guest-$pid.log" # must be writable by QEMU and allowed by SELinux or AppArmor +#rbd_client_admin_socket_path: /var/run/ceph # must be writable by QEMU and allowed by SELinux or AppArmor + +## Monitor options +# +# You must define either monitor_interface, monitor_address or monitor_address_block. +# These variables must be defined at least in all.yml and overrided if needed (inventory host file or group_vars/*.yml). +# Eg. If you want to specify for each monitor which address the monitor will bind to you can set it in your **inventory host file** by using 'monitor_address' variable. +# Preference will go to monitor_address if both monitor_address and monitor_interface are defined. +# To use an IPv6 address, use the monitor_address setting instead (and set ip_version to ipv6) +monitor_interface: ens3 +#monitor_address: 0.0.0.0 +#monitor_address_block: subnet +# set to either ipv4 or ipv6, whichever your network is using +#ip_version: ipv4 +#mon_use_fqdn: false # if set to true, the MON name used will be the fqdn in the ceph.conf + +## OSD options +# +journal_size: 100 # OSD journal size in MB +public_network: 100.64.128.40/24 +cluster_network: "{{ public_network }}" +#osd_mkfs_type: xfs +#osd_mkfs_options_xfs: -f -i size=2048 +#osd_mount_options_xfs: noatime,largeio,inode64,swalloc +#osd_objectstore: filestore + +# xattrs. by default, 'filestore xattr use omap' is set to 'true' if +# 'osd_mkfs_type' is set to 'ext4'; otherwise it isn't set. This can +# be set to 'true' or 'false' to explicitly override those +# defaults. Leave it 'null' to use the default for your chosen mkfs +# type. +#filestore_xattr_use_omap: null + +## MDS options +# +#mds_use_fqdn: false # if set to true, the MDS name used will be the fqdn in the ceph.conf +#mds_allow_multimds: false +#mds_max_mds: 3 + +## Rados Gateway options +# +#radosgw_dns_name: your.subdomain.tld # subdomains used by radosgw. See http://ceph.com/docs/master/radosgw/config/#enabling-subdomain-s3-calls +#radosgw_resolve_cname: false # enable for radosgw to resolve DNS CNAME based bucket names +#radosgw_civetweb_port: 8080 +#radosgw_civetweb_num_threads: 100 +# For additional civetweb configuration options available such as SSL, logging, +# keepalive, and timeout settings, please see the civetweb docs at +# https://github.com/civetweb/civetweb/blob/master/docs/UserManual.md +#radosgw_civetweb_options: "num_threads={{ radosgw_civetweb_num_threads }}" +# You must define either radosgw_interface, radosgw_address. +# These variables must be defined at least in all.yml and overrided if needed (inventory host file or group_vars/*.yml). +# Eg. If you want to specify for each radosgw node which address the radosgw will bind to you can set it in your **inventory host file** by using 'radosgw_address' variable. +# Preference will go to radosgw_address if both radosgw_address and radosgw_interface are defined. +# To use an IPv6 address, use the radosgw_address setting instead (and set ip_version to ipv6) +#radosgw_interface: interface +#radosgw_address: "{{ '0.0.0.0' if rgw_containerized_deployment else 'address' }}" # backward compatibility with stable-2.2, will disappear in stable 3.1 +#radosgw_address_block: subnet +#radosgw_keystone: false # activate OpenStack Keystone options full detail here: http://ceph.com/docs/master/radosgw/keystone/ +# Rados Gateway options +#email_address: foo@bar.com + +## REST API options +# +#restapi_interface: "{{ monitor_interface }}" +#restapi_address: "{{ monitor_address }}" +#restapi_port: 5000 + +## Testing mode +# enable this mode _only_ when you have a single node +# if you don't want it keep the option commented +#common_single_host_mode: true + +## Handlers - restarting daemons after a config change +# if for whatever reasons the content of your ceph configuration changes +# ceph daemons will be restarted as well. At the moment, we can not detect +# which config option changed so all the daemons will be restarted. Although +# this restart will be serialized for each node, in between a health check +# will be performed so we make sure we don't move to the next node until +# ceph is not healthy +# Obviously between the checks (for monitors to be in quorum and for osd's pgs +# to be clean) we have to wait. These retries and delays can be configurable +# for both monitors and osds. +# +# Monitor handler checks +#handler_health_mon_check_retries: 5 +#handler_health_mon_check_delay: 10 +# +# OSD handler checks +#handler_health_osd_check_retries: 40 +#handler_health_osd_check_delay: 30 +#handler_health_osd_check: true +# +# MDS handler checks +#handler_health_mds_check_retries: 5 +#handler_health_mds_check_delay: 10 +# +# RGW handler checks +#handler_health_rgw_check_retries: 5 +#handler_health_rgw_check_delay: 10 + +# NFS handler checks +#handler_health_nfs_check_retries: 5 +#handler_health_nfs_check_delay: 10 + +# RBD MIRROR handler checks +#handler_health_rbd_mirror_check_retries: 5 +#handler_health_rbd_mirror_check_delay: 10 + +# MGR handler checks +#handler_health_mgr_check_retries: 5 +#handler_health_mgr_check_delay: 10 + +############### +# NFS-GANESHA # +############### + +# Confiure the type of NFS gatway access. At least one must be enabled for an +# NFS role to be useful +# +# Set this to true to enable File access via NFS. Requires an MDS role. +#nfs_file_gw: false +# Set this to true to enable Object access via NFS. Requires an RGW role. +#nfs_obj_gw: true + +################### +# CONFIG OVERRIDE # +################### + +# Ceph configuration file override. +# This allows you to specify more configuration options +# using an INI style format. +# The following sections are supported: [global], [mon], [osd], [mds], [rgw] +# +# Example: +# ceph_conf_overrides: +# global: +# foo: 1234 +# bar: 5678 +# +#ceph_conf_overrides: {} + + +############# +# OS TUNING # +############# + +#disable_transparent_hugepage: true +#os_tuning_params: +# - { name: kernel.pid_max, value: 4194303 } +# - { name: fs.file-max, value: 26234859 } +# - { name: vm.zone_reclaim_mode, value: 0 } +# - { name: vm.swappiness, value: 10 } +# - { name: vm.min_free_kbytes, value: "{{ vm_min_free_kbytes }}" } + +# For Debian & Red Hat/CentOS installs set TCMALLOC_MAX_TOTAL_THREAD_CACHE_BYTES +# Set this to a byte value (e.g. 134217728) +# A value of 0 will leave the package default. +#ceph_tcmalloc_max_total_thread_cache: 0 + + +########## +# DOCKER # +########## +#docker_exec_cmd: +#docker: false +#ceph_docker_image: "ceph/daemon" +#ceph_docker_image_tag: latest +#ceph_docker_registry: docker.io +#ceph_docker_enable_centos_extra_repo: false +#ceph_docker_on_openstack: false +#ceph_mon_docker_interface: "{{ monitor_interface }}" # backward compatibility with stable-2.2, will disappear in stable 3.1 +#ceph_mon_docker_subnet: "{{ public_network }}" # backward compatibility with stable-2.2, will disappear in stable 3.1 +#mon_containerized_deployment: False # backward compatibility with stable-2.2, will disappear in stable 3.1 +#osd_containerized_deployment: False # backward compatibility with stable-2.2, will disappear in stable 3.1 +#mds_containerized_deployment: False # backward compatibility with stable-2.2, will disappear in stable 3.1 +#rgw_containerized_deployment: False # backward compatibility with stable-2.2, will disappear in stable 3.1 +#containerized_deployment: "{{ True if mon_containerized_deployment or osd_containerized_deployment or mds_containerized_deployment or rgw_containerized_deployment else False }}" # backward compatibility with stable-2.2, will disappear in stable 3.1 + + +############ +# KV store # +############ +#containerized_deployment_with_kv: false +#mon_containerized_default_ceph_conf_with_kv: false +#kv_type: etcd +#kv_endpoint: 127.0.0.1 +#kv_port: 2379 + + +# this is only here for usage with the rolling_update.yml playbook +# do not ever change this here +#rolling_update: false + +