----\r
+# Copyright (c) 2018 Huawei Technologies Co., Ltd. All Rights Reserved.\r
+#\r
+# Licensed under the Apache License, Version 2.0 (the "License");\r
+# you may not use this file except in compliance with the License.\r
+# You may obtain a copy of the License at\r
+#\r
+# http://www.apache.org/licenses/LICENSE-2.0\r
+#\r
+# Unless required by applicable law or agreed to in writing, software\r
+# distributed under the License is distributed on an "AS IS" BASIS,\r
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r
+# See the License for the specific language governing permissions and\r
+# limitations under the License.\r
+\r
- name: kill osdslet daemon service\r
- shell: killall osdslet\r
- ignore_errors: yes\r
- when: container_enabled == false\r
+ shell: killall osdslet osdsdock\r
+ when: install_from != "container"\r
+ ignore_errors: true\r
\r
- name: kill osdslet containerized service\r
- docker:\r
- image: opensdsio/opensds-controller:latest\r
+ docker_container:\r
+ name: osdslet\r
+ image: "{{ controller_docker_image }}"\r
state: stopped\r
- when: container_enabled == true\r
-\r
-- name: kill osdsdock daemon service\r
- shell: killall osdsdock\r
- ignore_errors: yes\r
- when: container_enabled == false\r
+ when: install_from == "container"\r
\r
- name: kill osdsdock containerized service\r
- docker:\r
- image: opensdsio/opensds-dock:latest\r
+ docker_container:\r
+ name: osdsdock\r
+ image: "{{ dock_docker_image }}"\r
state: stopped\r
- when: container_enabled == true\r
+ when: install_from == "container"\r
\r
-- name: kill etcd daemon service\r
- shell: killall etcd\r
- ignore_errors: yes\r
- when: db_driver == "etcd" and container_enabled == false\r
-\r
-- name: kill etcd containerized service\r
- docker:\r
- image: "{{ etcd_docker_image }}"\r
+- name: stop container where dashboard is located\r
+ docker_container:\r
+ name: dashboard\r
+ image: "{{ dashboard_docker_image }}"\r
state: stopped\r
- when: db_driver == "etcd" and container_enabled == true\r
-\r
-- name: remove etcd service data\r
- file:\r
- path: "{{ etcd_dir }}"\r
- state: absent\r
- force: yes\r
- ignore_errors: yes\r
- when: db_driver == "etcd"\r
-\r
-- name: remove etcd tarball\r
- file:\r
- path: "/opt/{{ etcd_tarball }}"\r
- state: absent\r
- force: yes\r
- ignore_errors: yes\r
- when: db_driver == "etcd"\r
-\r
-- name: clean opensds release files\r
- file:\r
- path: "{{ opensds_dir }}"\r
- state: absent\r
- force: yes\r
- ignore_errors: yes\r
+ when: dashboard_installation_type == "container"\r
\r
-- name: clean opensds release tarball file\r
- file:\r
- path: "{{ opensds_tarball_url }}"\r
- state: absent\r
- force: yes\r
- ignore_errors: yes\r
-\r
-- name: clean opensds flexvolume plugins binary file\r
+- name: clean opensds flexvolume plugins binary file if flexvolume specified\r
file:\r
path: "{{ flexvolume_plugin_dir }}"\r
state: absent\r
ignore_errors: yes\r
when: nbp_plugin_type == "flexvolume"\r
\r
-- name: clean nbp release files\r
- file:\r
- path: "{{ nbp_dir }}"\r
- state: absent\r
- force: yes\r
- ignore_errors: yes\r
-\r
-- name: clean nbp release tarball file\r
- file:\r
- path: "{{ nbp_tarball_url }}"\r
- state: absent\r
- force: yes\r
+- name: clean opensds external provisioner plugin if flexvolume specified\r
+ shell: |\r
+ . /etc/profile\r
+ kubectl delete -f deploy/\r
+ args:\r
+ chdir: "{{ nbp_work_dir }}/provisioner"\r
ignore_errors: yes\r
+ when: nbp_plugin_type == "flexvolume"\r
\r
-- name: clean all opensds configuration files\r
- file:\r
- path: "{{ opensds_config_dir }}"\r
- state: absent\r
- force: yes\r
+- name: clean opensds csi plugin if csi plugin specified\r
+ shell: |\r
+ . /etc/profile\r
+ kubectl delete -f deploy/kubernetes\r
+ args:\r
+ chdir: "{{ nbp_work_dir }}/csi"\r
ignore_errors: yes\r
+ when: nbp_plugin_type == "csi"\r
\r
-- name: clean all opensds log files\r
+- name: clean all configuration and log files in opensds and nbp work directory\r
file:\r
- path: "{{ opensds_log_dir }}"\r
+ path: "{{ item }}"\r
state: absent\r
force: yes\r
+ with_items:\r
+ - "{{ opensds_work_dir }}"\r
+ - "{{ nbp_work_dir }}"\r
+ - "{{ opensds_config_dir }}"\r
+ - "{{ opensds_log_dir }}"\r
ignore_errors: yes\r
\r
-- name: check if it existed before cleaning a volume group\r
- shell: vgdisplay {{ vg_name }}\r
- ignore_errors: yes\r
- register: vg_existed\r
- when: enabled_backend == "lvm"\r
+- name: include scenarios/auth-keystone.yml when specifies keystone\r
+ include_tasks: scenarios/auth-keystone.yml\r
+ when: opensds_auth_strategy == "keystone"\r
\r
-- name: remove a volume group if lvm backend specified\r
- lvg:\r
- vg: "{{ vg_name }}"\r
- state: absent\r
- when: enabled_backend == "lvm" and vg_existed.rc == 0\r
+- name: include scenarios/repository.yml if installed from repository\r
+ include_tasks: scenarios/repository.yml\r
+ when: install_from == "repository" or dashboard_installation_type == "source_code"\r
\r
-- name: remove physical volumes if lvm backend specified\r
- shell: pvremove {{ item }}\r
- with_items: "{{ pv_devices }}"\r
- when: enabled_backend == "lvm"\r
+- name: include scenarios/release.yml if installed from release\r
+ include_tasks: scenarios/release.yml\r
+ when: install_from == "release"\r
\r
-- name: stop cinder-standalone service\r
- shell: docker-compose down\r
- become: true\r
- args:\r
- chdir: "{{ cinder_data_dir }}/cinder/contrib/block-box"\r
- when: enabled_backend == "cinder"\r
-\r
-- name: clean the volume group of cinder\r
- shell:\r
- _raw_params: |\r
-\r
- # _clean_lvm_volume_group removes all default LVM volumes\r
- #\r
- # Usage: _clean_lvm_volume_group $vg\r
- function _clean_lvm_volume_group {\r
- local vg=$1\r
-\r
- # Clean out existing volumes\r
- sudo lvremove -f $vg\r
- }\r
-\r
- # _remove_lvm_volume_group removes the volume group\r
- #\r
- # Usage: _remove_lvm_volume_group $vg\r
- function _remove_lvm_volume_group {\r
- local vg=$1\r
-\r
- # Remove the volume group\r
- sudo vgremove -f $vg\r
- }\r
-\r
- # _clean_lvm_backing_file() removes the backing file of the\r
- # volume group\r
- #\r
- # Usage: _clean_lvm_backing_file() $backing_file\r
- function _clean_lvm_backing_file {\r
- local backing_file=$1\r
-\r
- # If the backing physical device is a loop device, it was probably setup by DevStack\r
- if [[ -n "$backing_file" ]] && [[ -e "$backing_file" ]]; then\r
- local vg_dev\r
- vg_dev=$(sudo losetup -j $backing_file | awk -F':' '/'.img'/ { print $1}')\r
- if [[ -n "$vg_dev" ]]; then\r
- sudo losetup -d $vg_dev\r
- fi\r
- rm -f $backing_file\r
- fi\r
- }\r
-\r
- # clean_lvm_volume_group() cleans up the volume group and removes the\r
- # backing file\r
- #\r
- # Usage: clean_lvm_volume_group $vg\r
- function clean_lvm_volume_group {\r
- local vg=$1\r
-\r
- _clean_lvm_volume_group $vg\r
- _remove_lvm_volume_group $vg\r
- # if there is no logical volume left, it's safe to attempt a cleanup\r
- # of the backing file\r
- if [[ -z "$(sudo lvs --noheadings -o lv_name $vg 2>/dev/null)" ]]; then\r
- _clean_lvm_backing_file {{ cinder_data_dir }}/${vg}.img\r
- fi\r
- }\r
-\r
- clean_lvm_volume_group {{cinder_volume_group}}\r
-\r
- args:\r
- executable: /bin/bash\r
- become: true\r
- when: enabled_backend == "cinder"\r
+- name: include scenarios/backend.yml for cleaning up storage backend service\r
+ include_tasks: scenarios/backend.yml\r