fix hanging due to nested sudo 75/2475/2
authorgrakiss <grakiss.wanglei@huawei.com>
Tue, 13 Oct 2015 06:38:31 +0000 (14:38 +0800)
committergrakiss <grakiss.wanglei@huawei.com>
Tue, 13 Oct 2015 12:04:12 +0000 (20:04 +0800)
JIRA: COMPASS-93
  - clear all the sudo through scripts

Change-Id: I2d750858658b09de17cc32df5359ef04263ff1f4
Signed-off-by: grakiss <grakiss.wanglei@huawei.com>
deploy/adapters/ansible/roles/ceph-deploy/files/create_osd.sh
deploy/adapters/ansible/roles/ceph-deploy/tasks/ceph_openstack_conf.yml
deploy/adapters/ansible/roles/ceph-deploy/tasks/ceph_openstack_pre.yml

index 13e5fd8..f2a4066 100644 (file)
@@ -33,9 +33,9 @@ fi
 losetup -d /dev/loop0
 
 echo "vgcreate"
-vgcreate -y ceph-volumes $(sudo losetup --show -f /ceph/images/ceph-volumes.img)
+vgcreate -y ceph-volumes $(losetup --show -f /ceph/images/ceph-volumes.img)
 echo "lvcreate"
-sudo lvcreate -l 100%FREE -nceph0 ceph-volumes
+lvcreate -l 100%FREE -nceph0 ceph-volumes
 echo "mkfs"
 mkfs.xfs -f /dev/ceph-volumes/ceph0
 
index b437d4a..cb95cf0 100644 (file)
@@ -1,21 +1,21 @@
 ---
 
 - name: modify glance-api.conf for ceph
-  shell: ssh -o StrictHostKeyChecking=no {{ item }} -t "sed -i 's/^\(default_store\).*/\1 = rbd/' /etc/glance/glance-api.conf && sed -i '/^\[glance_store/a stores = rbd\nrbd_store_pool = images\nrbd_store_user = glance\nrbd_store_ceph_conf = /etc/ceph/ceph.conf\nrbd_store_chunk_size = 8\nshow_image_direct_url=True' /etc/glance/glance-api.conf && sudo glance-control api restart"
+  shell: ssh -o StrictHostKeyChecking=no {{ item }} -t "sed -i 's/^\(default_store\).*/\1 = rbd/' /etc/glance/glance-api.conf && sed -i '/^\[glance_store/a stores = rbd\nrbd_store_pool = images\nrbd_store_user = glance\nrbd_store_ceph_conf = /etc/ceph/ceph.conf\nrbd_store_chunk_size = 8\nshow_image_direct_url=True' /etc/glance/glance-api.conf && glance-control api restart"
   with_items:
     - "{{ groups['controller'] }}"
   tags:
     - ceph_conf_glance
 
 - name: modify cinder.conf for ceph
-  shell: ssh -o StrictHostKeyChecking=no {{ item }} -t "sed -i 's/^\(volume_driver\).*/\1 = cinder.volume.drivers.rbd.RBDDriver/' /etc/cinder/cinder.conf && sed -i '/^\[DEFAULT/a rbd_pool = volumes\nrbd_ceph_conf = /etc/ceph/ceph.conf\nrbd_flatten_volume_from_snapshot = false\nrbd_max_clone_depth = 5\nrbd_store_chunk_size = 4\nrados_connect_timeout = -1\nglance_api_version = 2\nrbd_user = cinder\nrbd_secret_uuid = {{ ceph_uuid.stdout_lines[0] }}' /etc/cinder/cinder.conf && sudo service {{ cinder_service }} restart"
+  shell: ssh -o StrictHostKeyChecking=no {{ item }} -t "sed -i 's/^\(volume_driver\).*/\1 = cinder.volume.drivers.rbd.RBDDriver/' /etc/cinder/cinder.conf && sed -i '/^\[DEFAULT/a rbd_pool = volumes\nrbd_ceph_conf = /etc/ceph/ceph.conf\nrbd_flatten_volume_from_snapshot = false\nrbd_max_clone_depth = 5\nrbd_store_chunk_size = 4\nrados_connect_timeout = -1\nglance_api_version = 2\nrbd_user = cinder\nrbd_secret_uuid = {{ ceph_uuid.stdout_lines[0] }}' /etc/cinder/cinder.conf && service {{ cinder_service }} restart"
   with_items:
     - "{{ groups['compute'] }}"
   tags:
     - ceph_conf_cinder
 
 - name: modify nova.conf for ceph
-  shell: ssh -o StrictHostKeyChecking=no {{ item }} -t "sed -i 's/^\(images_type\).*/\1 = rbd/' /etc/nova/nova-compute.conf && sed -i '/^\[libvirt/a images_rbd_pool = vms\nimages_rbd_ceph_conf = /etc/ceph/ceph.conf\nrbd_user = cinder\nrbd_secret_uuid = {{ ceph_uuid.stdout_lines[0] }}\ndisk_cachemodes=\"network=writeback\"\nlive_migration_flag=\"VIR_MIGRATE_UNDEFINE_SOURCE,VIR_MIGRATE_PEER2PEER,VIR_MIGRATE_LIVE,VIR_MIGRATE_PERSIST_DEST,VIR_MIGRATE_TUNNELLED\"' /etc/nova/nova-compute.conf && sudo service  {{ nova_service }} restart"
+  shell: ssh -o StrictHostKeyChecking=no {{ item }} -t "sed -i 's/^\(images_type\).*/\1 = rbd/' /etc/nova/nova-compute.conf && sed -i '/^\[libvirt/a images_rbd_pool = vms\nimages_rbd_ceph_conf = /etc/ceph/ceph.conf\nrbd_user = cinder\nrbd_secret_uuid = {{ ceph_uuid.stdout_lines[0] }}\ndisk_cachemodes=\"network=writeback\"\nlive_migration_flag=\"VIR_MIGRATE_UNDEFINE_SOURCE,VIR_MIGRATE_PEER2PEER,VIR_MIGRATE_LIVE,VIR_MIGRATE_PERSIST_DEST,VIR_MIGRATE_TUNNELLED\"' /etc/nova/nova-compute.conf && service  {{ nova_service }} restart"
   with_items:
     - "{{ groups['compute'] }}"
   tags:
index a2ff030..52e54cb 100644 (file)
   shell: ceph auth get-or-create client.cinder mon 'allow r' osd 'allow class-read object_prefix rbd_children, allow rwx pool=volumes, allow rwx pool=vms, allow rx pool=images' && ceph auth get-or-create client.glance mon 'allow r' osd 'allow class-read object_prefix rbd_children, allow rwx pool=images'
 
 - name: send glance key to controller nodes
-  shell: ceph auth get-or-create client.glance | ssh {{ item }} sudo tee /etc/ceph/ceph.client.glance.keyring && ssh {{ item }} sudo chown glance:glance /etc/ceph/ceph.client.glance.keyring
+  shell: ceph auth get-or-create client.glance | ssh {{ item }} tee /etc/ceph/ceph.client.glance.keyring && ssh {{ item }} chown glance:glance /etc/ceph/ceph.client.glance.keyring
   with_items:
     - "{{ groups['controller'] }}"
 
 - name: send cinder key to compute nodes
-  shell: ceph auth get-or-create client.cinder | ssh {{ item }} sudo tee /etc/ceph/ceph.client.cinder.keyring && ssh {{ item }} sudo chown cinder:cinder /etc/ceph/ceph.client.cinder.keyring
+  shell: ceph auth get-or-create client.cinder | ssh {{ item }} tee /etc/ceph/ceph.client.cinder.keyring && ssh {{ item }} chown cinder:cinder /etc/ceph/ceph.client.cinder.keyring
   with_items:
     - "{{ groups['compute'] }}"
   tags: