bugfix: cinder scheduler service is not restart when configuration updated 93/5293/1
author汉 徐 <hanxu@carey.local>
Mon, 28 Dec 2015 06:24:45 +0000 (14:24 +0800)
committer汉 徐 <hanxu@carey.local>
Mon, 28 Dec 2015 06:24:45 +0000 (14:24 +0800)
JIRA: COMPASS-222

Change-Id: I43bb9ccc1bfe8e2cce9d0d82eccf97337d1fc07a
Signed-off-by: 汉 徐 <hanxu@carey.local>
22 files changed:
deploy/adapters/ansible/roles/ceph-deploy/tasks/main.yml
deploy/adapters/ansible/roles/cinder-controller/tasks/main.yml
deploy/adapters/ansible/roles/cinder-controller/templates/cinder.conf
deploy/adapters/ansible/roles/cinder-volume/tasks/main.yml
deploy/adapters/ansible/roles/cinder-volume/templates/cinder.conf
deploy/adapters/ansible/roles/common/tasks/main.yml
deploy/adapters/ansible/roles/database/tasks/main.yml
deploy/adapters/ansible/roles/glance/tasks/main.yml
deploy/adapters/ansible/roles/ha/tasks/main.yml
deploy/adapters/ansible/roles/heat/tasks/main.yml
deploy/adapters/ansible/roles/keystone/tasks/main.yml
deploy/adapters/ansible/roles/memcached/tasks/main.yml
deploy/adapters/ansible/roles/monitor/tasks/main.yml
deploy/adapters/ansible/roles/mq/tasks/main.yml
deploy/adapters/ansible/roles/neutron-compute/tasks/main.yml
deploy/adapters/ansible/roles/neutron-controller/tasks/main.yml
deploy/adapters/ansible/roles/neutron-network/tasks/main.yml
deploy/adapters/ansible/roles/nova-compute/tasks/main.yml
deploy/adapters/ansible/roles/nova-controller/tasks/main.yml
deploy/adapters/ansible/roles/secgroup/tasks/main.yml
deploy/adapters/ansible/roles/setup-network/tasks/main.yml
deploy/adapters/ansible/roles/storage/tasks/main.yml

index 75ed1da..1b04f3c 100644 (file)
@@ -28,3 +28,5 @@
     - ceph_deploy
     - ceph_openstack_conf
     - ceph_openstack
+
+- meta: flush_handlers
index 3714aa5..8ecda64 100644 (file)
@@ -3,7 +3,7 @@ rootwrap_config = /etc/cinder/rootwrap.conf
 api_paste_confg = /etc/cinder/api-paste.ini
 iscsi_helper = tgtadm
 volume_name_template = volume-%s
-volume_group = cinder-volumes
+volume_group = storage-volumes
 verbose = {{ VERBOSE }}
 debug = {{ DEBUG }}
 auth_strategy = keystone
@@ -39,7 +39,6 @@ volume_name_template = volume-%s
 snapshot_name_template = snapshot-%s
 
 max_gigabytes=10000
-volume_group=cinder-volumes
 
 volume_clear=zero
 volume_clear_size=10
index 3765071..c0da998 100644 (file)
@@ -14,3 +14,5 @@
             backup=yes
   notify:
      - restart cinder-volume services
+
+- meta: flush_handlers
index 7528894..4d8e49c 100644 (file)
@@ -38,7 +38,6 @@ volume_name_template = volume-%s
 snapshot_name_template = snapshot-%s
 
 max_gigabytes=10000
-volume_group=cinder-volumes
 
 volume_clear=zero
 volume_clear_size=10
index 1276429..b9c52c3 100644 (file)
@@ -70,3 +70,5 @@
 - name: kill daemon for accelerate
   shell: lsof -ni :5099|grep LISTEN|awk '{print $2}'|xargs kill -9
   ignore_errors: true
+
+- meta: flush_handlers
index bfdcb75..314a85b 100644 (file)
@@ -13,3 +13,5 @@
 - include: mongodb_config.yml
   when:
     - inventory_hostname == haproxy_hosts.keys()[0]
+
+- meta: flush_handlers
index f60740a..8eb9f1b 100644 (file)
@@ -84,3 +84,5 @@
 
 - name: restart rsyslog
   shell: service rsyslog restart
+
+- meta: flush_handlers
index 8b955cb..58a7ae3 100644 (file)
@@ -11,3 +11,5 @@
 - name: restart services
   service: name={{ item }} state=restarted enabled=yes
   with_items: services| union(services_noarch)
+
+- meta: flush_handlers
index 6e44694..1cc21fe 100644 (file)
@@ -10,4 +10,5 @@
 - name: restart cron
   service: name={{ cron }} state=restarted
 
+- meta: flush_handlers
 
index 84d4a94..521f599 100644 (file)
@@ -5,3 +5,5 @@
 
 - include: rabbitmq_config.yml
   when: inventory_hostname == haproxy_hosts.keys()[0]
+
+- meta: flush_handlers
index f3474ea..0d7bb2d 100644 (file)
@@ -46,4 +46,3 @@
 
 - include: ../../neutron-network/tasks/odl.yml
   when: "'opendaylight' in {{ NEUTRON_MECHANISM_DRIVERS }}"
-
index 45dab3f..6c30f25 100644 (file)
@@ -23,7 +23,7 @@
   lineinfile: dest=/opt/service create=yes line='{{ item }}'
   with_items: services | union(services_noarch)
 
-- meta: flush_handlers
-
 - name: remove nova sqlite db
   shell: rm /var/lib/nova/nova.sqlite || touch nova.sqlite.removed
+
+- meta: flush_handlers
index c26af4b..6419208 100644 (file)
@@ -8,3 +8,5 @@
 - include: secgroup.yml
   when: '{{ enable_secgroup }} == False'
   tags: secgroup
+
+- meta: flush_handlers
index ee4c97e..4185dc9 100755 (executable)
@@ -16,3 +16,5 @@
   when: status.stat.exists == True and status.stat.isblk == True
   tags:
     - storage
+
+- meta: flush_handlers