2 - name: kill osdslet daemon service
\r
3 shell: killall osdslet
\r
5 when: container_enabled == false
\r
7 - name: kill osdslet containerized service
\r
9 image: opensdsio/opensds-controller:latest
\r
11 when: container_enabled == true
\r
13 - name: kill osdsdock daemon service
\r
14 shell: killall osdsdock
\r
16 when: container_enabled == false
\r
18 - name: kill osdsdock containerized service
\r
20 image: opensdsio/opensds-dock:latest
\r
22 when: container_enabled == true
\r
24 - name: kill etcd daemon service
\r
27 when: db_driver == "etcd" and container_enabled == false
\r
29 - name: kill etcd containerized service
\r
31 image: quay.io/coreos/etcd:latest
\r
33 when: db_driver == "etcd" and container_enabled == true
\r
35 - name: remove etcd service data
\r
37 path: "{{ etcd_dir }}"
\r
41 when: db_driver == "etcd"
\r
43 - name: remove etcd tarball
\r
45 path: "/opt/{{ etcd_tarball }}"
\r
49 when: db_driver == "etcd"
\r
51 - name: clean opensds release files
\r
53 path: "{{ opensds_dir }}"
\r
58 - name: clean opensds release tarball file
\r
60 path: "{{ opensds_tarball_url }}"
\r
65 - name: clean opensds flexvolume plugins binary file
\r
67 path: "{{ flexvolume_plugin_dir }}"
\r
71 when: nbp_plugin_type == "flexvolume"
\r
73 - name: clean nbp release files
\r
75 path: "{{ nbp_dir }}"
\r
80 - name: clean nbp release tarball file
\r
82 path: "{{ nbp_tarball_url }}"
\r
87 - name: clean all opensds configuration files
\r
89 path: "{{ opensds_config_dir }}"
\r
94 - name: clean all opensds log files
\r
96 path: "{{ opensds_log_dir }}"
\r
101 - name: check if it existed before cleaning a volume group
\r
102 shell: vgdisplay {{ vg_name }}
\r
104 register: vg_existed
\r
105 when: enabled_backend == "lvm"
\r
107 - name: remove a volume group if lvm backend specified
\r
109 vg: "{{ vg_name }}"
\r
111 when: enabled_backend == "lvm" and vg_existed.rc == 0
\r
113 - name: remove physical volumes if lvm backend specified
\r
114 shell: pvremove {{ item }}
\r
115 with_items: "{{ pv_devices }}"
\r
116 when: enabled_backend == "lvm"
\r
118 - name: stop cinder-standalone service
\r
119 shell: docker-compose down
\r
122 chdir: "{{ cinder_data_dir }}/cinder/contrib/block-box"
\r
123 when: enabled_backend == "cinder"
\r
125 - name: clean the volume group of cinder
\r
129 # _clean_lvm_volume_group removes all default LVM volumes
\r
131 # Usage: _clean_lvm_volume_group $vg
\r
132 function _clean_lvm_volume_group {
\r
135 # Clean out existing volumes
\r
136 sudo lvremove -f $vg
\r
139 # _remove_lvm_volume_group removes the volume group
\r
141 # Usage: _remove_lvm_volume_group $vg
\r
142 function _remove_lvm_volume_group {
\r
145 # Remove the volume group
\r
146 sudo vgremove -f $vg
\r
149 # _clean_lvm_backing_file() removes the backing file of the
\r
152 # Usage: _clean_lvm_backing_file() $backing_file
\r
153 function _clean_lvm_backing_file {
\r
154 local backing_file=$1
\r
156 # If the backing physical device is a loop device, it was probably setup by DevStack
\r
157 if [[ -n "$backing_file" ]] && [[ -e "$backing_file" ]]; then
\r
159 vg_dev=$(sudo losetup -j $backing_file | awk -F':' '/'.img'/ { print $1}')
\r
160 if [[ -n "$vg_dev" ]]; then
\r
161 sudo losetup -d $vg_dev
\r
163 rm -f $backing_file
\r
167 # clean_lvm_volume_group() cleans up the volume group and removes the
\r
170 # Usage: clean_lvm_volume_group $vg
\r
171 function clean_lvm_volume_group {
\r
174 _clean_lvm_volume_group $vg
\r
175 _remove_lvm_volume_group $vg
\r
176 # if there is no logical volume left, it's safe to attempt a cleanup
\r
177 # of the backing file
\r
178 if [[ -z "$(sudo lvs --noheadings -o lv_name $vg 2>/dev/null)" ]]; then
\r
179 _clean_lvm_backing_file {{ cinder_data_dir }}/${vg}.img
\r
183 clean_lvm_volume_group {{cinder_volume_group}}
\r
186 executable: /bin/bash
\r
188 when: enabled_backend == "cinder"
\r