\r
### Run ceph-ansible playbook to clean ceph cluster if ceph is deployed\r
```bash\r
-cd /tmp/ceph-ansible\r
+cd /opt/ceph-ansible\r
sudo ansible-playbook infrastructure-playbooks/purge-cluster.yml -i ceph.hosts\r
```\r
\r
### Remove ceph-ansible source code (optional)\r
```bash\r
cd ..\r
-sudo rm -rf /tmp/ceph-ansible\r
+sudo rm -rf /opt/ceph-ansible\r
```\r
# GENERAL #\r
###########\r
\r
-workplace: /home/krej # Change this field according to your username\r
+workplace: /home/krej # Change this field according to your username, use '/root' if you login as root.\r
\r
# These fields are NOT suggested to be modified\r
remote_url: https://github.com/opensds/opensds.git\r
opensds_build_dir: "{{ opensds_root_dir }}/build"\r
opensds_config_dir: /etc/opensds\r
opensds_log_dir: /var/log/opensds\r
+\r
+###########\r
+# GOLANG #\r
+###########\r
+\r
+golang_release: 1.9.2\r
+\r
+# These fields are NOT suggested to be modified\r
+golang_tarball: go{{ golang_release }}.linux-amd64.tar.gz\r
+golang_download_url: https://storage.googleapis.com/golang/{{ golang_tarball }}\r
# These fields are not suggested to be modified\r
etcd_tarball: etcd-{{ etcd_release }}-linux-amd64.tar.gz\r
etcd_download_url: https://github.com/coreos/etcd/releases/download/{{ etcd_release }}/{{ etcd_tarball }}\r
-etcd_dir: /tmp/etcd-{{ etcd_release }}-linux-amd64\r
+etcd_dir: /opt/etcd-{{ etcd_release }}-linux-amd64\r
---\r
+- name: remove golang tarball\r
+ file:\r
+ path: "/opt/{{ golang_tarball }}"\r
+ state: absent\r
+ force: yes\r
+ ignore_errors: yes\r
+\r
- name: kill etcd daemon service\r
shell: killall etcd\r
ignore_errors: yes\r
\r
- name: remove etcd tarball\r
file:\r
- path: "/tmp/{{ etcd_tarball }}"\r
+ path: "/opt/{{ etcd_tarball }}"\r
state: absent\r
force: yes\r
ignore_errors: yes\r
ignore_errors: yes\r
\r
- name: clean all opensds build files\r
- file:\r
- path: "{{ opensds_build_dir }}"\r
- state: absent\r
- force: yes\r
- ignore_errors: yes\r
+ shell: . /etc/profile; make clean\r
+ args:\r
+ chdir: "{{ opensds_root_dir }}"\r
\r
- name: clean all opensds configuration files\r
file:\r
set -e\r
set -x\r
\r
- wget https://storage.googleapis.com/golang/go1.9.linux-amd64.tar.gz\r
- tar xvf go1.9.linux-amd64.tar.gz -C /usr/local/\r
+ wget {{ golang_download_url }} -P /opt/\r
+ tar xvf /opt/{{ golang_tarball }} -C /usr/local/\r
cat >> /etc/profile <<GOLANG__CONFIG_DOC\r
export GOROOT=/usr/local/go\r
export GOPATH=\$HOME/gopath\r
- name: download etcd\r
get_url:\r
url={{ etcd_download_url }}\r
- dest=/tmp/{{ etcd_tarball }}\r
+ dest=/opt/{{ etcd_tarball }}\r
when:\r
- etcdexisted.stat.exists is undefined or etcdexisted.stat.exists == false\r
\r
- name: extract the etcd tarball\r
unarchive:\r
- src=/tmp/{{ etcd_tarball }}\r
- dest=/tmp/\r
+ src=/opt/{{ etcd_tarball }}\r
+ dest=/opt/\r
when:\r
- etcdexisted.stat.exists is undefined or etcdexisted.stat.exists == false\r
\r
\r
- name: check for ceph-ansible source code existed\r
stat:\r
- path: /tmp/ceph-ansible\r
+ path: /opt/ceph-ansible\r
ignore_errors: yes\r
register: cephansibleexisted\r
\r
- name: download ceph-ansible source code\r
git:\r
repo: https://github.com/ceph/ceph-ansible.git\r
- dest: /tmp/ceph-ansible\r
+ dest: /opt/ceph-ansible\r
when:\r
- cephansibleexisted.stat.exists is undefined or cephansibleexisted.stat.exists == false\r
\r
- name: copy ceph inventory host into ceph-ansible directory\r
copy:\r
src: ../../../group_vars/ceph/ceph.hosts\r
- dest: /tmp/ceph-ansible/ceph.hosts\r
+ dest: /opt/ceph-ansible/ceph.hosts\r
\r
- name: copy ceph all.yml file into ceph-ansible group_vars directory\r
copy:\r
src: ../../../group_vars/ceph/all.yml\r
- dest: /tmp/ceph-ansible/group_vars/all.yml\r
+ dest: /opt/ceph-ansible/group_vars/all.yml\r
\r
- name: copy ceph osds.yml file into ceph-ansible group_vars directory\r
copy:\r
src: ../../../group_vars/ceph/osds.yml\r
- dest: /tmp/ceph-ansible/group_vars/osds.yml\r
+ dest: /opt/ceph-ansible/group_vars/osds.yml\r
\r
- name: copy site.yml.sample to site.yml in ceph-ansible\r
copy:\r
- src: /tmp/ceph-ansible/site.yml.sample\r
- dest: /tmp/ceph-ansible/site.yml\r
+ src: /opt/ceph-ansible/site.yml.sample\r
+ dest: /opt/ceph-ansible/site.yml\r
\r
- name: ping all hosts\r
shell: ansible all -m ping -i ceph.hosts\r
become: true\r
args:\r
- chdir: /tmp/ceph-ansible\r
+ chdir: /opt/ceph-ansible\r
\r
- name: run ceph-ansible playbook\r
shell: ansible-playbook site.yml -i ceph.hosts\r
become: true\r
args:\r
- chdir: /tmp/ceph-ansible\r
+ chdir: /opt/ceph-ansible\r
\r
- name: Check if ceph osd is running\r
shell: ps aux | grep ceph-osd | grep -v grep\r
sed -i "s/TAG ?= debian-cinder:latest/TAG ?= {{ cinder_image_tag }}:latest/g" Makefile\r
\r
sed -i "s/image: debian-cinder/image: {{ cinder_image_tag }}/g" docker-compose.yml\r
- sed -i "s/image: lvm-debian-cinder/image: {{ cinder_image_tag }}/g" docker-compose.yml\r
+ sed -i "s/image: lvm-debian-cinder/image: lvm-{{ cinder_image_tag }}/g" docker-compose.yml\r
\r
sed -i "s/volume_group = cinder-volumes /volume_group = {{ cinder_volume_group }}/g" etc/cinder.conf\r
become: true\r
ssh-copy-id -i ~/.ssh/id_rsa.pub <ip_address> # IP address of the target machine of the installation\r
```\r
\r
-### Install docker\r
-If use a standalone cinder as backend, you also need to install docker to run cinder service. Please see the [docker installation document](https://docs.docker.com/engine/installation/linux/docker-ce/ubuntu/) for details.\r
-\r
### Install ansible tool\r
```bash\r
sudo add-apt-repository ppa:ansible/ansible # This step is needed to upgrade ansible to version 2.4.2 which is required for the ceph backend.\r
ansible --version # Ansible version 2.4.2 or higher is required for ceph; 2.0.0.2 or higher is needed for other backends.\r
```\r
\r
+### Configure nbp plugin variable\r
+##### Common environment:\r
+Configure the ```nbp_plugin_type``` in `group_vars/common.yml` according to your environment:\r
+```yaml\r
+nbp_plugin_type: flexvolume # flexvolume is the default integration way, but you can change it from 'csi', 'flexvolume'\r
+```\r
+\r
### Check if the hosts can be reached\r
```bash\r
sudo ansible all -m ping -i nbp.hosts\r
# GENERAL #\r
###########\r
\r
-# These fields are not suggested to be modified\r
-nbp_download_url: https://github.com/opensds/nbp/releases/download/v0.1.0/opensds-k8s-linux-amd64.tar.gz\r
-nbp_tarball_url: /opt/opensds-k8s-linux-amd64.tar.gz\r
-nbp_dir: /opt/opensds-k8s-linux-amd64\r
+nbp_release: v0.1.0\r
+\r
+# These fields are not suggested to be modified \r
+nbp_download_url: https://github.com/opensds/nbp/releases/download/{{ nbp_release }}/opensds-k8s-{{ nbp_release }}-linux-amd64.tar.gz\r
+nbp_tarball_url: /opt/opensds-k8s-{{ nbp_release }}-linux-amd64.tar.gz\r
+nbp_dir: /opt/opensds-k8s-{{ nbp_release }}-linux-amd64\r
+\r
+\r
+###########\r
+# PLUGIN #\r
+###########\r
+\r
+nbp_plugin_type: flexvolume # flexvolume is the default integration way, but you can change it from 'csi', 'flexvolume'\r
\r
flexvolume_plugin_dir: /usr/libexec/kubernetes/kubelet-plugins/volume/exec/opensds.io~opensds\r
+\r
state: absent\r
force: yes\r
ignore_errors: yes\r
+ when: nbp_plugin_type == "flexvolume"\r
\r
- name: clean nbp release files\r
file:\r
--- /dev/null
+---\r
+- name: include scenarios/flexvolume.yml\r
+ include: scenarios/flexvolume.yml\r
+ when: nbp_plugin_type == "flexvolume"\r
+\r
+- name: include scenarios/csi.yml\r
+ include: scenarios/csi.yml\r
+ when: nbp_plugin_type == "csi"\r
become: True\r
roles:\r
- common\r
- - flexvolume\r
+ - installer\r
--- /dev/null
+## Prerequisite ##\r
+\r
+### ubuntu\r
+* Version information\r
+\r
+ ```\r
+ root@proxy:~# cat /etc/issue\r
+ Ubuntu 16.04.2 LTS \n \l\r
+ ```\r
+\r
+### docker\r
+* Version information\r
+\r
+ ```\r
+ root@proxy:~# docker version\r
+ Client:\r
+ Version: 1.12.6\r
+ API version: 1.24\r
+ Go version: go1.6.2\r
+ Git commit: 78d1802\r
+ Built: Tue Jan 31 23:35:14 2017\r
+ OS/Arch: linux/amd64\r
+ \r
+ Server:\r
+ Version: 1.12.6\r
+ API version: 1.24\r
+ Go version: go1.6.2\r
+ Git commit: 78d1802\r
+ Built: Tue Jan 31 23:35:14 2017\r
+ OS/Arch: linux/amd64\r
+ ```\r
+\r
+### [kubernetes](https://github.com/kubernetes/kubernetes) local cluster\r
+* You can startup the lastest k8s local cluster by executing commands blow:\r
+\r
+ ```\r
+ cd $HOME\r
+ git clone https://github.com/kubernetes/kubernetes.git\r
+ cd $HOME/kubernetes\r
+ make\r
+ echo alias kubectl='$HOME/kubernetes/cluster/kubectl.sh' >> /etc/profile\r
+ ALLOW_PRIVILEGED=true FEATURE_GATES=CSIPersistentVolume=true,MountPropagation=true RUNTIME_CONFIG="storage.k8s.io/v1alpha1=true" LOG_LEVEL=5 hack/local-up-cluster.sh\r
+ ```\r
+\r
+### [opensds](https://github.com/opensds/opensds) local cluster\r
+* For testing purposes you can deploy OpenSDS referring the [OpenSDS Cluster Installation through Ansible](https://github.com/opensds/opensds/wiki/OpenSDS-Cluster-Installation-through-Ansible) wiki. Besides, you need to deploy opensds csi plugin refering to ```nbp-ansible/README.md```.\r
+\r
+## Testing steps ##\r
+\r
+* Change the workplace\r
+\r
+ ```\r
+ cd /opt/opensds-k8s-v0.1.0-linux-amd64\r
+ ```\r
+\r
+* Configure opensds endpoint IP\r
+\r
+ ```\r
+ vim csi/deploy/kubernetes/csi-configmap-opensdsplugin.yaml\r
+ ```\r
+\r
+ The IP (127.0.0.1) should be replaced with the opensds actual endpoint IP.\r
+ ```yaml\r
+ kind: ConfigMap\r
+ apiVersion: v1\r
+ metadata:\r
+ name: csi-configmap-opensdsplugin\r
+ data:\r
+ opensdsendpoint: http://127.0.0.1:50040\r
+ ```\r
+\r
+* Create opensds CSI pods.\r
+\r
+ ```\r
+ kubectl create -f csi/deploy/kubernetes\r
+ ```\r
+\r
+ After this three pods can be found by ```kubectl get pods``` like below:\r
+\r
+ - csi-provisioner-opensdsplugin\r
+ - csi-attacher-opensdsplugin\r
+ - csi-nodeplugin-opensdsplugin\r
+\r
+ You can find more design details from\r
+ [CSI Volume Plugins in Kubernetes Design Doc](https://github.com/kubernetes/community/blob/master/contributors/design-proposals/storage/container-storage-interface.md)\r
+\r
+* Create example nginx application\r
+\r
+ ```\r
+ kubectl create -f csi/examples/kubernetes/nginx.yaml\r
+ ```\r
+\r
+ This example will mount a opensds volume into ```/var/lib/www/html```.\r
+\r
+ You can use the following command to inspect into nginx container to verify it.\r
+\r
+ ```\r
+ docker exec -it <nginx container id> /bin/bash\r
+ ```\r
+\r
+## Clean up steps ##\r
+\r
+Clean up example nginx application and opensds CSI pods by the following commands.\r
+\r
+```\r
+kubectl delete -f csi/examples/kubernetes/nginx.yaml\r
+kubectl delete -f csi/deploy/kubernetes\r
+```\r
## Prerequisite ##\r
+\r
### ubuntu\r
* Version information\r
\r
root@proxy:~# cat /etc/issue\r
Ubuntu 16.04.2 LTS \n \l\r
```\r
+\r
### docker\r
* Version information\r
\r
\r
* Create service account, role and bind them.\r
```\r
- cd /opt/opensds-k8s-linux-amd64/provisioner\r
+ cd /opt/opensds-k8s-{release version}-linux-amd64/provisioner\r
kubectl create -f serviceaccount.yaml\r
kubectl create -f clusterrole.yaml\r
kubectl create -f clusterrolebinding.yaml\r
```\r
\r
* Change the opensds endpoint IP in pod-provisioner.yaml\r
-The IP (192.168.56.106) should be replaced with the OpenSDS osdslet actual endpoint IP.\r
+The IP ```192.168.56.106``` should be replaced with the OpenSDS osdslet actual endpoint IP.\r
```yaml\r
kind: Pod\r
apiVersion: v1\r
serviceAccount: opensds-provisioner\r
containers:\r
- name: opensds-provisioner\r
- image: opensdsio/opensds-provisioner\r
+ image: opensdsio/opensds-provisioner:latest\r
securityContext:\r
args:\r
- "-endpoint=http://192.168.56.106:50040" # should be replaced\r