Update stor4nfv install scripts according to opensds aruba release
[stor4nfv.git] / ci / ansible / roles / cleaner / tasks / main.yml
old mode 100755 (executable)
new mode 100644 (file)
index c1c465c..8399b08
----
-- name: remove golang tarball
-  file:
-    path: "/opt/{{ golang_tarball }}"
-    state: absent
-    force: yes
-  ignore_errors: yes
-
-- name: kill etcd daemon service
-  shell: killall etcd
-  ignore_errors: yes
-  when: db_driver == "etcd" and container_enabled == false
-
-- name: kill etcd containerized service
-  docker:
-    image: quay.io/coreos/etcd:latest
-    state: stopped
-  when: container_enabled == true
-
-- name: remove etcd service data
-  file:
-    path: "{{ etcd_dir }}"
-    state: absent
-    force: yes
-  ignore_errors: yes
-  when: db_driver == "etcd"
-
-- name: remove etcd tarball
-  file:
-    path: "/opt/{{ etcd_tarball }}"
-    state: absent
-    force: yes
-  ignore_errors: yes
-  when: db_driver == "etcd"
-
-- name: kill osdslet daemon service
-  shell: killall osdslet
-  ignore_errors: yes
-  when: container_enabled == false
-
-- name: kill osdslet containerized service
-  docker:
-    image: opensdsio/opensds-controller:latest
-    state: stopped
-  when: container_enabled == true
-
-- name: kill osdsdock daemon service
-  shell: killall osdsdock
-  ignore_errors: yes
-  when: container_enabled == false
-
-- name: kill osdsdock containerized service
-  docker:
-    image: opensdsio/opensds-dock:latest
-    state: stopped
-  when: container_enabled == true
-
-- name: clean all opensds build files
-  shell: . /etc/profile; make clean
-  args:
-    chdir: "{{ opensds_root_dir }}"
-
-- name: clean all opensds configuration files
-  file:
-    path: "{{ opensds_config_dir }}"
-    state: absent
-    force: yes
-  ignore_errors: yes
-
-- name: clean all opensds log files
-  file:
-    path: "{{ opensds_log_dir }}"
-    state: absent
-    force: yes
-  ignore_errors: yes
-
-- name: check if it existed before cleaning a volume group
-  shell: vgdisplay {{ vg_name }}
-  ignore_errors: yes
-  register: vg_existed
-  when: enabled_backend == "lvm"
-
-- name: remove a volume group if lvm backend specified
-  shell: vgremove {{ vg_name }}
-  when: enabled_backend == "lvm" and vg_existed.rc == 0
-
-- name: check if it existed before cleaning a physical volume
-  shell: pvdisplay {{ pv_device }}
-  ignore_errors: yes
-  register: pv_existed
-  when: enabled_backend == "lvm"
-
-- name: remove a physical volume if lvm backend specified
-  shell: pvremove {{ pv_device }}
-  when: enabled_backend == "lvm" and pv_existed.rc == 0
-
-- name: stop cinder-standalone service
-  shell: docker-compose down
-  become: true
-  args:
-    chdir: "{{ cinder_data_dir }}/cinder/contrib/block-box"
-  when: enabled_backend == "cinder"
-
-- name: clean the volume group of cinder
-  shell:
-    _raw_params: |
-
-      # _clean_lvm_volume_group removes all default LVM volumes
-      #
-      # Usage: _clean_lvm_volume_group $vg
-      function _clean_lvm_volume_group {
-          local vg=$1
-
-          # Clean out existing volumes
-          sudo lvremove -f $vg
-      }
-
-      # _remove_lvm_volume_group removes the volume group
-      #
-      # Usage: _remove_lvm_volume_group $vg
-      function _remove_lvm_volume_group {
-          local vg=$1
-
-          # Remove the volume group
-          sudo vgremove -f $vg
-      }
-
-      # _clean_lvm_backing_file() removes the backing file of the
-      # volume group
-      #
-      # Usage: _clean_lvm_backing_file() $backing_file
-      function _clean_lvm_backing_file {
-          local backing_file=$1
-
-          # If the backing physical device is a loop device, it was probably setup by DevStack
-          if [[ -n "$backing_file" ]] && [[ -e "$backing_file" ]]; then
-              local vg_dev
-              vg_dev=$(sudo losetup -j $backing_file | awk -F':' '/'.img'/ { print $1}')
-              if [[ -n "$vg_dev" ]]; then
-                  sudo losetup -d $vg_dev
-              fi
-              rm -f $backing_file
-          fi
-      }
-
-      # clean_lvm_volume_group() cleans up the volume group and removes the
-      # backing file
-      #
-      # Usage: clean_lvm_volume_group $vg
-      function clean_lvm_volume_group {
-          local vg=$1
-
-          _clean_lvm_volume_group $vg
-          _remove_lvm_volume_group $vg
-          # if there is no logical volume left, it's safe to attempt a cleanup
-          # of the backing file
-          if [[ -z "$(sudo lvs --noheadings -o lv_name $vg 2>/dev/null)" ]]; then
-              _clean_lvm_backing_file {{ cinder_data_dir }}/${vg}.img
-          fi
-      }
-
-      clean_lvm_volume_group {{cinder_volume_group}}
-
-  args:
-    executable: /bin/bash
-  become: true
-  when: enabled_backend == "cinder"
+# Copyright (c) 2018 Huawei Technologies Co., Ltd. All Rights Reserved.\r
+#\r
+# Licensed under the Apache License, Version 2.0 (the "License");\r
+# you may not use this file except in compliance with the License.\r
+# You may obtain a copy of the License at\r
+#\r
+#     http://www.apache.org/licenses/LICENSE-2.0\r
+#\r
+# Unless required by applicable law or agreed to in writing, software\r
+# distributed under the License is distributed on an "AS IS" BASIS,\r
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r
+# See the License for the specific language governing permissions and\r
+# limitations under the License.\r
+\r
+- name: kill osdslet daemon service\r
+  shell: killall osdslet osdsdock\r
+  when: container_enabled == false\r
+  ignore_errors: true\r
+\r
+- name: kill osdslet containerized service\r
+  docker_container:\r
+    name: osdslet\r
+    image: "{{ controller_docker_image }}"\r
+    state: stopped\r
+  when: container_enabled == true\r
+\r
+- name: kill osdsdock containerized service\r
+  docker_container:\r
+    name: osdsdock\r
+    image: "{{ dock_docker_image }}"\r
+    state: stopped\r
+  when: container_enabled == true\r
+\r
+- name: stop container where dashboard is located\r
+  docker_container:\r
+    name: dashboard\r
+    image: "{{ dashboard_docker_image }}"\r
+    state: stopped\r
+  when: dashboard_installation_type == "container"\r
+\r
+- name: clean opensds flexvolume plugins binary file if flexvolume specified\r
+  file:\r
+    path: "{{ flexvolume_plugin_dir }}"\r
+    state: absent\r
+    force: yes\r
+  ignore_errors: yes\r
+  when: nbp_plugin_type == "flexvolume"\r
+\r
+- name: clean opensds csi plugin if csi plugin specified\r
+  shell: |\r
+    . /etc/profile\r
+    kubectl delete -f deploy/kubernetes\r
+  args:\r
+    chdir: "{{ nbp_work_dir }}/csi"\r
+  ignore_errors: yes\r
+  when: nbp_plugin_type == "csi"\r
+\r
+- name: clean all configuration and log files in opensds and nbp work directory\r
+  file:\r
+    path: "{{ item }}"\r
+    state: absent\r
+    force: yes\r
+  with_items:\r
+    - "{{ opensds_work_dir }}"\r
+    - "{{ nbp_work_dir }}"\r
+    - "{{ opensds_config_dir }}"\r
+    - "{{ opensds_log_dir }}"\r
+  ignore_errors: yes\r
+\r
+- name: include scenarios/auth-keystone.yml when specifies keystone\r
+  include_tasks: scenarios/auth-keystone.yml\r
+  when: opensds_auth_strategy == "keystone"\r
+\r
+- name: include scenarios/repository.yml if installed from repository\r
+  include_tasks: scenarios/repository.yml\r
+  when: install_from == "repository" or dashboard_installation_type == "source_code"\r
+\r
+- name: include scenarios/release.yml if installed from release\r
+  include_tasks: scenarios/release.yml\r
+  when: install_from == "release"\r
+\r
+- name: include scenarios/backend.yml for cleaning up storage backend service\r
+  include_tasks: scenarios/backend.yml\r