support deployment openstack juno with ansible for compass in centos7.1 25/1125/3
authorbaigk <baiguoku@huawei.com>
Thu, 6 Aug 2015 06:37:33 +0000 (02:37 -0400)
committerbaigk <baiguoku@huawei.com>
Mon, 10 Aug 2015 02:47:56 +0000 (22:47 -0400)
JIRA: COMPASS-6

Change-Id: I8ef865e8acfe29c3ff5c7e9030e4cebabed3457b
Signed-off-by: baigk <baiguoku@huawei.com>
87 files changed:
deploy/adapters/ansible/roles/cinder-controller/handlers/main.yml
deploy/adapters/ansible/roles/cinder-controller/tasks/cinder_config.yml
deploy/adapters/ansible/roles/cinder-controller/tasks/cinder_install.yml
deploy/adapters/ansible/roles/cinder-controller/vars/Debian.yml
deploy/adapters/ansible/roles/cinder-controller/vars/RedHat.yml [new file with mode: 0644]
deploy/adapters/ansible/roles/cinder-controller/vars/main.yml [new file with mode: 0644]
deploy/adapters/ansible/roles/cinder-volume/handlers/main.yml
deploy/adapters/ansible/roles/cinder-volume/tasks/main.yml
deploy/adapters/ansible/roles/cinder-volume/vars/Debian.yml
deploy/adapters/ansible/roles/cinder-volume/vars/RedHat.yml [new file with mode: 0644]
deploy/adapters/ansible/roles/cinder-volume/vars/main.yml [new file with mode: 0644]
deploy/adapters/ansible/roles/common/tasks/main.yml
deploy/adapters/ansible/roles/common/templates/hosts
deploy/adapters/ansible/roles/common/templates/pip.conf
deploy/adapters/ansible/roles/common/vars/Debian.yml
deploy/adapters/ansible/roles/common/vars/RedHat.yml [new file with mode: 0644]
deploy/adapters/ansible/roles/common/vars/main.yml [new file with mode: 0644]
deploy/adapters/ansible/roles/dashboard/tasks/main.yml
deploy/adapters/ansible/roles/dashboard/vars/Debian.yml
deploy/adapters/ansible/roles/dashboard/vars/RedHat.yml [new file with mode: 0644]
deploy/adapters/ansible/roles/dashboard/vars/main.yml [new file with mode: 0644]
deploy/adapters/ansible/roles/database/files/my.cnf
deploy/adapters/ansible/roles/database/tasks/mariadb.yml
deploy/adapters/ansible/roles/database/tasks/mysql.yml
deploy/adapters/ansible/roles/database/templates/my-huge.cnf [new file with mode: 0644]
deploy/adapters/ansible/roles/database/templates/server.cnf [new file with mode: 0644]
deploy/adapters/ansible/roles/database/templates/wsrep.cnf
deploy/adapters/ansible/roles/database/vars/Debian.yml
deploy/adapters/ansible/roles/database/vars/RedHat.yml [new file with mode: 0644]
deploy/adapters/ansible/roles/database/vars/main.yml [new file with mode: 0644]
deploy/adapters/ansible/roles/glance/handlers/main.yml
deploy/adapters/ansible/roles/glance/tasks/glance_config.yml
deploy/adapters/ansible/roles/glance/tasks/glance_install.yml
deploy/adapters/ansible/roles/glance/tasks/nfs.yml
deploy/adapters/ansible/roles/glance/templates/glance-api.conf
deploy/adapters/ansible/roles/glance/vars/Debian.yml
deploy/adapters/ansible/roles/glance/vars/RedHat.yml [new file with mode: 0644]
deploy/adapters/ansible/roles/glance/vars/main.yml [new file with mode: 0644]
deploy/adapters/ansible/roles/ha/tasks/main.yml
deploy/adapters/ansible/roles/ha/templates/failover.j2
deploy/adapters/ansible/roles/ha/templates/keepalived.conf
deploy/adapters/ansible/roles/ha/vars/Debian.yml
deploy/adapters/ansible/roles/ha/vars/RedHat.yml [new file with mode: 0644]
deploy/adapters/ansible/roles/ha/vars/main.yml [new file with mode: 0644]
deploy/adapters/ansible/roles/keystone/tasks/keystone_install.yml
deploy/adapters/ansible/roles/keystone/vars/Debian.yml
deploy/adapters/ansible/roles/keystone/vars/RedHat.yml [new file with mode: 0644]
deploy/adapters/ansible/roles/keystone/vars/main.yml [new file with mode: 0644]
deploy/adapters/ansible/roles/mq/tasks/rabbitmq.yml
deploy/adapters/ansible/roles/mq/vars/Debian.yml
deploy/adapters/ansible/roles/mq/vars/RedHat.yml [new file with mode: 0644]
deploy/adapters/ansible/roles/mq/vars/main.yml [new file with mode: 0644]
deploy/adapters/ansible/roles/neutron-common/handlers/main.yml
deploy/adapters/ansible/roles/neutron-compute/handlers/main.yml
deploy/adapters/ansible/roles/neutron-compute/tasks/main.yml
deploy/adapters/ansible/roles/neutron-compute/templates/nova.conf
deploy/adapters/ansible/roles/neutron-compute/vars/Debian.yml
deploy/adapters/ansible/roles/neutron-compute/vars/RedHat.yml [new file with mode: 0644]
deploy/adapters/ansible/roles/neutron-compute/vars/main.yml [new file with mode: 0644]
deploy/adapters/ansible/roles/neutron-controller/handlers/main.yml
deploy/adapters/ansible/roles/neutron-controller/tasks/neutron_config.yml
deploy/adapters/ansible/roles/neutron-controller/tasks/neutron_install.yml
deploy/adapters/ansible/roles/neutron-controller/templates/nova.conf
deploy/adapters/ansible/roles/neutron-controller/vars/Debian.yml
deploy/adapters/ansible/roles/neutron-controller/vars/RedHat.yml [new file with mode: 0644]
deploy/adapters/ansible/roles/neutron-controller/vars/main.yml [new file with mode: 0644]
deploy/adapters/ansible/roles/neutron-network/files/xorp [new file with mode: 0644]
deploy/adapters/ansible/roles/neutron-network/handlers/main.yml
deploy/adapters/ansible/roles/neutron-network/tasks/igmp-router.yml
deploy/adapters/ansible/roles/neutron-network/tasks/main.yml
deploy/adapters/ansible/roles/neutron-network/templates/nova.conf
deploy/adapters/ansible/roles/neutron-network/vars/Debian.yml
deploy/adapters/ansible/roles/neutron-network/vars/RedHat.yml [new file with mode: 0644]
deploy/adapters/ansible/roles/neutron-network/vars/main.yml [new file with mode: 0644]
deploy/adapters/ansible/roles/nova-compute/handlers/main.yml
deploy/adapters/ansible/roles/nova-compute/tasks/main.yml
deploy/adapters/ansible/roles/nova-compute/templates/nova.conf
deploy/adapters/ansible/roles/nova-compute/vars/Debian.yml
deploy/adapters/ansible/roles/nova-compute/vars/RedHat.yml [new file with mode: 0644]
deploy/adapters/ansible/roles/nova-compute/vars/main.yml [new file with mode: 0644]
deploy/adapters/ansible/roles/nova-controller/handlers/main.yml
deploy/adapters/ansible/roles/nova-controller/tasks/nova_config.yml
deploy/adapters/ansible/roles/nova-controller/tasks/nova_install.yml
deploy/adapters/ansible/roles/nova-controller/templates/nova.conf
deploy/adapters/ansible/roles/nova-controller/vars/Debian.yml
deploy/adapters/ansible/roles/nova-controller/vars/RedHat.yml [new file with mode: 0644]
deploy/adapters/ansible/roles/nova-controller/vars/main.yml [new file with mode: 0644]

index ef671dd..21dde84 100644 (file)
@@ -1,6 +1,5 @@
 ---
-- name: restart cinder-scheduler
-  service: name=cinder-scheduler state=restarted enabled=yes
-- name: restart cinder-api
-  service: name=cinder-api state=restarted enabled=yes
+- name: restart cinder control serveice
+  service: name={{ item }} state=restarted enabled=yes
+  with_items: services | union(services_noarch)
 
index 7796cf7..fe9f7dc 100644 (file)
@@ -1,4 +1,7 @@
 ---
+- name: upload cinder conf
+  template: src=cinder.conf dest=/etc/cinder/cinder.conf
+
 - name: sync cinder db
   shell: su -s /bin/sh -c "cinder-manage db sync" cinder && cinder
   register: result
@@ -6,8 +9,7 @@
   retries: 5
   delay: 3
   notify:
-    - restart cinder-scheduler
-    - restart cinder-api
+    - restart cinder control serveice
 
 - meta: flush_handlers
 
index bd4a460..24981e9 100644 (file)
@@ -1,15 +1,9 @@
 ---
 - name: install cinder packages
-  action: "{{ ansible_pkg_mgr }} name={{ item }} state=present force=yes"
-  with_items: packages
+  action: "{{ ansible_pkg_mgr }} name={{ item }} state=present"
+  with_items: packages | union(packages_noarch)
 
-- name: generate cinder service list
+- name: generate common cinder service list
   shell: echo {{ item }} >> /opt/service
-  with_items: services
-
-- name: upload cinder conf
-  template: src=cinder.conf dest=/etc/cinder/cinder.conf
-  notify:
-    - restart cinder-scheduler
-    - restart cinder-api
+  with_items: services | union(services_noarch)
 
index e1c096e..a04fe8f 100644 (file)
@@ -1,9 +1,9 @@
 ---
+
 packages:
   - cinder-api
   - cinder-scheduler
-  - python-cinderclient
 
 services:
-  - "cinder-api"
-  - "cinder-scheduler"
+  - cinder-api
+  - cinder-scheduler
diff --git a/deploy/adapters/ansible/roles/cinder-controller/vars/RedHat.yml b/deploy/adapters/ansible/roles/cinder-controller/vars/RedHat.yml
new file mode 100644 (file)
index 0000000..e059c27
--- /dev/null
@@ -0,0 +1,8 @@
+---
+packages:
+  - openstack-cinder
+  - python-oslo-db
+
+services:
+  - openstack-cinder-api
+  - openstack-cinder-scheduler
diff --git a/deploy/adapters/ansible/roles/cinder-controller/vars/main.yml b/deploy/adapters/ansible/roles/cinder-controller/vars/main.yml
new file mode 100644 (file)
index 0000000..561952c
--- /dev/null
@@ -0,0 +1,6 @@
+---
+packages_noarch:
+  - python-cinderclient
+
+services_noarch: []
+
index ad917ce..55fbc2c 100644 (file)
@@ -1,3 +1,4 @@
 ---
-- name: restart cinder-volume
-  service: name=cinder-volume state=restarted enabled=yes
+- name: restart cinder-volume services
+  service: name={{ item }} state=restarted enabled=yes
+  with_items: services | union(services_noarch)
index d727b66..3700bcd 100644 (file)
@@ -2,24 +2,24 @@
 - include_vars: "{{ ansible_os_family }}.yml"
 
 - name: install cinder-volume and lvm2 packages
-  action: "{{ ansible_pkg_mgr }} name={{ item }} state=present force=yes"
-  with_items: packages
+  action: "{{ ansible_pkg_mgr }} name={{ item }} state=present"
+  with_items: packages | union(packages_noarch)
 
 - name: generate cinder volume service list
   shell: echo {{ item }} >> /opt/service
-  with_items: services
+  with_items: services | union(services_noarch)
 
 - name: check if physical device exists
   stat: path={{ physical_device }}
-  register: st
+  register: status
 
-- name: repace physical_device if st is false
+- name: replace physical_device if st is false
   local_action: copy src=loop.yml dest=/tmp/loop.yml
-  when: st.stat.exists == False
+  when: status.stat.exists == False
 
 - name: load loop.yml
   include_vars: /tmp/loop.yml
-  when: st.stat.exists == False
+  when: status.stat.exists == False
 
 - name: check if cinder-volumes is mounted
   shell: ls /mnt
@@ -35,7 +35,7 @@
   when: cindervolumes.stdout != 'cinder-volumes'
 
 - name: get first lo device
-  shell: ls /dev/loop* | egrep 'loop[0-9]+'|sed -n 1p
+  shell: losetup -f
   register: first_lo
   when: cindervolumes.stdout != 'cinder-volumes'
 
@@ -51,4 +51,4 @@
   template: src=cinder.conf dest=/etc/cinder/cinder.conf
             backup=yes
   notify:
-     - restart cinder-volume
+     - restart cinder-volume services
diff --git a/deploy/adapters/ansible/roles/cinder-volume/vars/RedHat.yml b/deploy/adapters/ansible/roles/cinder-volume/vars/RedHat.yml
new file mode 100644 (file)
index 0000000..123d457
--- /dev/null
@@ -0,0 +1,11 @@
+---
+packages:
+  - openstack-cinder
+  - targetcli
+  - python-oslo-db
+  - MySQL-python
+  - lvm2
+
+services:
+  - openstack-cinder-volume
+  - lvm2-lvmetad
diff --git a/deploy/adapters/ansible/roles/cinder-volume/vars/main.yml b/deploy/adapters/ansible/roles/cinder-volume/vars/main.yml
new file mode 100644 (file)
index 0000000..036740c
--- /dev/null
@@ -0,0 +1,6 @@
+---
+packages_noarch:
+  - lvm2
+
+services_noarch: []
+
index 452f768..1b20c0f 100644 (file)
@@ -2,11 +2,11 @@
 - include_vars: "{{ ansible_os_family }}.yml"
 
 - name: first update pkgs
-  apt: update_cache=yes
+  shell: "{{ ansible_pkg_mgr }}  update"
 
 - name: install packages
   action: "{{ ansible_pkg_mgr }} name={{ item }} state=latest"
-  with_items: packages
+  with_items: packages | union(packages_noarch)
 
 - name: update hosts files to all hosts
   template: src=hosts
   shell: awk -F'=' '/compass_server/ {print $2}' /etc/compass.conf
   register: ip
 
-- name: update compass-core name and ip to  hosts files
-  shell: echo "# compass\n"{{ ip.stdout_lines[0] }} {{ name.stdout_lines[0] }} >> /etc/hosts
+- name: update compass-core name and ip to hosts files
+  shell: |
+    echo "# compass" >> /etc/hosts
+    echo {{ ip.stdout_lines[0] }} {{ name.stdout_lines[0] }} >> /etc/hosts
 
 - name: create pip config directory
   file: path=~/.pip state=directory
 - name: update pip.conf
   template: src=pip.conf dest=~/.pip/pip.conf
 
-- name: restart ntp
-  command: su -s /bin/sh -c "service ntp stop; ntpd -gq; hwclock --systohc"
+- name: sync between sys clock and hard clock
+  command: su -s /bin/sh -c "service {{ ntp_service }} stop; ntpd -gq; hwclock --systohc"
   ignore_errors: True
 
 - name: update ntp conf
   template: src=ntp.conf dest=/etc/ntp.conf backup=yes
 
 - name: restart ntp
-  service: name=ntp state=restarted enabled=yes
+  service: name={{ ntp_service }} state=restarted enabled=yes
index 9d27c0a..bb770d5 100644 (file)
@@ -1,22 +1,9 @@
-# compute-controller
-10.145.89.136 host-136
-# database
-10.145.89.136 host-136
-# messaging
-10.145.89.136 host-136
-# storage-controller
-10.145.89.138 host-138
-# image
-10.145.89.138 host-138
-# identity
-10.145.89.136 host-136
-# network-server
-10.145.89.138 host-138
-# dashboard
-10.145.89.136 host-136
-# storage-volume
-10.145.89.139 host-139
-# network-worker
-10.145.89.139 host-139
-# compute-worker
-10.145.89.137 host-137
+# localhost
+127.0.0.1 localhost
+# controller
+10.1.0.50 host1
+10.1.0.51 host2
+10.1.0.52 host3
+# compute
+10.1.0.53 host4
+10.1.0.54 host5
index 4e1309a..7bb3e43 100644 (file)
@@ -1,3 +1,5 @@
 [global]
 find-links = http://{{ COMPASS_SERVER.stdout_lines[0] }}/pip
 no-index = true
+[install]
+trusted-host={{ COMPASS_SERVER.stdout_lines[0] }}
index c763efd..af6130c 100644 (file)
@@ -1,7 +1,6 @@
 ---
 packages:
   - ubuntu-cloud-keyring
-  - python-pip
   - python-dev
-  - python-mysqldb
-  - ntp
+
+ntp_service: ntp
diff --git a/deploy/adapters/ansible/roles/common/vars/RedHat.yml b/deploy/adapters/ansible/roles/common/vars/RedHat.yml
new file mode 100644 (file)
index 0000000..f75c253
--- /dev/null
@@ -0,0 +1,5 @@
+---
+packages:
+  - python-devel
+
+ntp_service: ntpd
diff --git a/deploy/adapters/ansible/roles/common/vars/main.yml b/deploy/adapters/ansible/roles/common/vars/main.yml
new file mode 100644 (file)
index 0000000..2c5b567
--- /dev/null
@@ -0,0 +1,6 @@
+---
+packages_noarch:
+  - python-pip
+  - ntp
+
+services_noarch: []
index e43b6fd..2cad117 100644 (file)
@@ -2,26 +2,17 @@
 - include_vars: "{{ ansible_os_family }}.yml"
 
 - name: install dashboard packages
-  action: "{{ ansible_pkg_mgr }} name={{ item }} state=present force=yes"
-  with_items: packages
+  action: "{{ ansible_pkg_mgr }} name={{ item }} state=present"
+  with_items: packages | union(packages_noarch)
 
 - name: remove ubuntu theme
   action: "{{ ansible_pkg_mgr }} name=openstack-dashboard-ubuntu-theme state=absent"
 
-## horizon configuration is already enabled in apache2/conf-enabled
-## by openstack-dashboard package deploy script.
-#- name: update dashboard conf
-#  template: src=openstack-dashboard.conf
-#            dest=/etc/apache2/sites-available/openstack-dashboard.conf
-#            backup=yes
-
 - name: update horizon settings
   template: src=local_settings.py
             dest=/etc/openstack-dashboard/local_settings.py
             backup=yes
 
-- name: restart apache2
-  service: name=apache2 state=restarted enabled=yes
-
-- name: restart memcached
-  service: name=memcached state=restarted enabled=yes
+- name: restart dashboard services
+  service: name={{ item }} state=restarted enabled=yes
+  with_items: services | union(services_noarch)
index e6986f4..fee64c3 100644 (file)
@@ -1,6 +1,8 @@
 ---
 packages:
   - apache2
-  - memcached
   - libapache2-mod-wsgi
-  - openstack-dashboard
+
+services:
+  - apache2
+  - memcached
diff --git a/deploy/adapters/ansible/roles/dashboard/vars/RedHat.yml b/deploy/adapters/ansible/roles/dashboard/vars/RedHat.yml
new file mode 100644 (file)
index 0000000..f0acce9
--- /dev/null
@@ -0,0 +1,8 @@
+---
+packages:
+  - httpd
+  - mod_wsgi
+  - python-memcached
+
+services:
+  - httpd
diff --git a/deploy/adapters/ansible/roles/dashboard/vars/main.yml b/deploy/adapters/ansible/roles/dashboard/vars/main.yml
new file mode 100644 (file)
index 0000000..b6a965c
--- /dev/null
@@ -0,0 +1,6 @@
+---
+packages_noarch:
+  - memcached
+  - openstack-dashboard
+
+services_noarch: []
index 8575d97..e4a23d9 100644 (file)
 # escpecially if they contain "#" chars...
 # Remember to edit /etc/mysql/debian.cnf when changing the socket location.
 [client]
-port    = 3306
-socket  = /var/run/mysqld/mysqld.sock
+port   = 3306
+socket = /var/run/mysqld/mysqld.sock
 
 # Here is entries for some specific programs
 # The following values assume you have at least 32M ram
 
 # This was formally known as [safe_mysqld]. Both versions are currently parsed.
 [mysqld_safe]
-socket  = /var/run/mysqld/mysqld.sock
-nice    = 0
+socket = /var/run/mysqld/mysqld.sock
+nice   = 0
 
 [mysqld]
 #
 # * Basic Settings
 #
-user      = mysql
-pid-file  = /var/run/mysqld/mysqld.pid
-socket    = /var/run/mysqld/mysqld.sock
-port      = 3306
-basedir   = /usr
-datadir   = /var/lib/mysql
-tmpdir    = /tmp
-lc-messages-dir  = /usr/share/mysql
+user            = mysql
+pid-file        = /var/run/mysqld/mysqld.pid
+socket          = /var/run/mysqld/mysqld.sock
+port            = 3306
+basedir         = /usr
+datadir         = /var/lib/mysql
+tmpdir          = /tmp
+lc-messages-dir = /usr/share/mysql
 skip-external-locking
 #
 # Instead of skip-networking the default is now to listen only on
 # localhost which is more compatible and is not less secure.
-bind-address = 0.0.0.0
+bind-address    = 0.0.0.0
 #
 # * Fine Tuning
 #
-key_buffer         = 16M
+key_buffer      = 16M
 max_allowed_packet = 16M
 thread_stack       = 192K
 thread_cache_size  = 8
@@ -57,7 +57,7 @@ thread_cache_size  = 8
 myisam-recover      = BACKUP
 #max_connections    = 100
 #table_cache        = 64
-#thread_concurrenc  = 10
+#thread_concurrency = 10
 #
 # * Query Cache Configuration
 #
@@ -69,27 +69,27 @@ query_cache_size    = 16M
 # Both location gets rotated by the cronjob.
 # Be aware that this log type is a performance killer.
 # As of 5.1 you can enable the log at runtime!
-#general_log_file        = /var/log/mysql/mysql.log
-#general_log             = 1
+#general_log_file   = /var/log/mysql/mysql.log
+#general_log        = 1
 #
 # Error log - should be very few entries.
 #
 log_error = /var/log/mysql/error.log
 #
 # Here you can see queries with especially long duration
-#log_slow_queries    = /var/log/mysql/mysql-slow.log
-#long_query_time = 2
+#log_slow_queries  = /var/log/mysql/mysql-slow.log
+#long_query_time   = 2
 #log-queries-not-using-indexes
 #
 # The following can be used as easy to replay backup logs or for replication.
 # note: if you are setting up a replication slave, see README.Debian about
 #       other settings you may need to change.
-#server-id        = 1
-#log_bin            = /var/log/mysql/mysql-bin.log
-expire_logs_days    = 10
-max_binlog_size     = 100M
-#binlog_do_db       = include_database_name
-#binlog_ignore_db   = include_database_name
+#server-id         = 1
+#log_bin           = /var/log/mysql/mysql-bin.log
+expire_logs_days   = 10
+max_binlog_size    = 100M
+#binlog_do_db      = include_database_name
+#binlog_ignore_db  = include_database_name
 #
 # * InnoDB
 #
@@ -108,20 +108,20 @@ max_binlog_size     = 100M
 # ssl-key=/etc/mysql/server-key.pem
 default-storage-engine = innodb
 innodb_file_per_table
-collation-server = utf8_general_ci
-init-connect = 'SET NAMES utf8'
-character-set-server = utf8
+collation-server       = utf8_general_ci
+init-connect           = 'SET NAMES utf8'
+character-set-server   = utf8
 
 [mysqldump]
 quick
 quote-names
-max_allowed_packet    = 16M
+max_allowed_packet     = 16M
 
 [mysql]
 #no-auto-rehash    # faster start of mysql but no tab completition
 
 [isamchk]
-key_buffer        = 16M
+key_buffer             = 16M
 
 #
 # * IMPORTANT: Additional settings that can override those from this file!
index 50618bf..dbd76c6 100644 (file)
@@ -1,16 +1,17 @@
 ---
 - name: install python-mysqldb
-  action: "{{ ansible_pkg_mgr }} name={{ item }} state=present force=yes"
-  with_items: maridb_packages
+  action: "{{ ansible_pkg_mgr }} name={{ item }} state=present"
+  with_items: maridb_packages | union(packages_noarch)
 
 - name: create mysql log directy
   file: path=/var/log/mysql state=directory owner=mysql group=mysql mode=0755
 
-- name: update mariadb my.cnf
-  template: src=my.cnf dest=/etc/mysql/my.cnf backup=yes
+- name: update mariadb config file
+  template: src={{ item }} dest={{ mysql_config_file_path }}/{{ item }} backup=yes
+  with_items: mysql_config_file_name
 
 - name: update galera wsrep.cnf
-  template: src=wsrep.cnf dest=/etc/mysql/conf.d/wsrep.cnf backup=yes
+  template: src=wsrep.cnf dest={{ wsrep_config_file_path }}/wsrep.cnf backup=yes
 
 - name: update wsrep_sst_rsync uid
   lineinfile: dest=/usr/bin/wsrep_sst_rsync state=absent regexp="\s*uid = \$MYUID$"  backup=yes
@@ -29,8 +30,7 @@
 
 - name: generate mysql service list
   shell: echo {{ item }} >> /opt/service
-  with_items:
-   - mysql
+  with_items: services_noarch
 
 - name: create database/user
   shell: /opt/data.sh
index 8005292..809d617 100644 (file)
@@ -6,10 +6,9 @@
 - name: create mysql log directy
   file: path=/var/log/mysql state=directory owner=mysql group=mysql mode=0755
 
-- name: update mysql my.cnf
-  copy: src=my.cnf
-        dest=/etc/mysql/my.cnf
-        backup=yes
+- name: update mysql config file
+  copy: src= {{ item }} dest={{ mysql_config_file_path }}/{{ item }} backup=yes
+  with_items: mysql_config_file_name
 
 - name: manually restart mysql server
   shell: service mysql restart
diff --git a/deploy/adapters/ansible/roles/database/templates/my-huge.cnf b/deploy/adapters/ansible/roles/database/templates/my-huge.cnf
new file mode 100644 (file)
index 0000000..c561858
--- /dev/null
@@ -0,0 +1,151 @@
+# Example MariaDB config file for very large systems.
+#
+# This is for a large system with memory of 1G-2G where the system runs mainly
+# MariaDB.
+#
+# MariaDB programs look for option files in a set of
+# locations which depend on the deployment platform.
+# You can copy this option file to one of those
+# locations. For information about these locations, do:
+# 'my_print_defaults --help' and see what is printed under
+# Default options are read from the following files in the given order:
+# More information at: http://dev.mysql.com/doc/mysql/en/option-files.html
+#
+# In this file, you can use all long options that a program supports.
+# If you want to know which options a program supports, run the program
+# with the "--help" option.
+
+# The following options will be passed to all MySQL clients
+[client]
+#password = your_password
+port      = 3306
+socket    = /var/lib/mysql/mysql.sock
+
+# Here follows entries for some specific programs
+
+# The MySQL server
+[mysqld]
+port      = 3306
+socket    = /var/lib/mysql/mysql.sock
+skip-external-locking
+key_buffer_size    = 384M
+max_allowed_packet = 1M
+table_open_cache   = 512
+sort_buffer_size   = 2M
+read_buffer_size   = 2M
+read_rnd_buffer_size    = 8M
+myisam_sort_buffer_size = 64M
+thread_cache_size  = 8
+query_cache_size   = 32M
+# Try number of CPU's*2 for thread_concurrency
+thread_concurrency = 8
+
+# Point the following paths to a dedicated disk
+#tmpdir        = /tmp/
+
+# Don't listen on a TCP/IP port at all. This can be a security enhancement,
+# if all processes that need to connect to mysqld run on the same host.
+# All interaction with mysqld must be made via Unix sockets or named pipes.
+# Note that using this option without enabling named pipes on Windows
+# (via the "enable-named-pipe" option) will render mysqld useless!
+#
+#skip-networking
+
+# Replication Master Server (default)
+# binary logging is required for replication
+log-bin=mysql-bin
+
+# required unique id between 1 and 2^32 - 1
+# defaults to 1 if master-host is not set
+# but will not function as a master if omitted
+server-id = 1
+
+# Replication Slave (comment out master section to use this)
+#
+# To configure this host as a replication slave, you can choose between
+# two methods :
+#
+# 1) Use the CHANGE MASTER TO command (fully described in our manual) -
+#    the syntax is:
+#
+#    CHANGE MASTER TO MASTER_HOST=<host>, MASTER_PORT=<port>,
+#    MASTER_USER=<user>, MASTER_PASSWORD=<password> ;
+#
+#    where you replace <host>, <user>, <password> by quoted strings and
+#    <port> by the master's port number (3306 by default).
+#
+#    Example:
+#
+#    CHANGE MASTER TO MASTER_HOST='125.564.12.1', MASTER_PORT=3306,
+#    MASTER_USER='joe', MASTER_PASSWORD='secret';
+#
+# OR
+#
+# 2) Set the variables below. However, in case you choose this method, then
+#    start replication for the first time (even unsuccessfully, for example
+#    if you mistyped the password in master-password and the slave fails to
+#    connect), the slave will create a master.info file, and any later
+#    change in this file to the variables' values below will be ignored and
+#    overridden by the content of the master.info file, unless you shutdown
+#    the slave server, delete master.info and restart the slaver server.
+#    For that reason, you may want to leave the lines below untouched
+#    (commented) and instead use CHANGE MASTER TO (see above)
+#
+# required unique id between 2 and 2^32 - 1
+# (and different from the master)
+# defaults to 2 if master-host is set
+# but will not function as a slave if omitted
+#server-id       = 2
+#
+# The replication master for this slave - required
+#master-host     =   <hostname>
+#
+# The username the slave will use for authentication when connecting
+# to the master - required
+#master-user     =   <username>
+#
+# The password the slave will authenticate with when connecting to
+# the master - required
+#master-password =   <password>
+#
+# The port the master is listening on.
+# optional - defaults to 3306
+#master-port     =  <port>
+#
+# binary logging - not required for slaves, but recommended
+#log-bin=mysql-bin
+#
+# binary logging format - mixed recommended
+#binlog_format=mixed
+
+# Uncomment the following if you are using InnoDB tables
+#innodb_data_home_dir = /var/lib/mysql
+#innodb_data_file_path = ibdata1:2000M;ibdata2:10M:autoextend
+#innodb_log_group_home_dir = /var/lib/mysql
+# You can set .._buffer_pool_size up to 50 - 80 %
+# of RAM but beware of setting memory usage too high
+#innodb_buffer_pool_size = 384M
+#innodb_additional_mem_pool_size = 20M
+# Set .._log_file_size to 25 % of buffer pool size
+#innodb_log_file_size = 100M
+#innodb_log_buffer_size = 8M
+#innodb_flush_log_at_trx_commit = 1
+#innodb_lock_wait_timeout = 50
+
+[mysqldump]
+quick
+max_allowed_packet = 16M
+
+[mysql]
+no-auto-rehash
+# Remove the next comment character if you are not familiar with SQL
+#safe-updates
+
+[myisamchk]
+key_buffer_size  = 256M
+sort_buffer_size = 256M
+read_buffer      = 2M
+write_buffer     = 2M
+
+[mysqlhotcopy]
+interactive-timeout
diff --git a/deploy/adapters/ansible/roles/database/templates/server.cnf b/deploy/adapters/ansible/roles/database/templates/server.cnf
new file mode 100644 (file)
index 0000000..e0893c0
--- /dev/null
@@ -0,0 +1,47 @@
+#
+# These groups are read by MariaDB server.
+# Use it for options that only the server (but not clients) should see
+#
+# See the examples of server my.cnf files in /usr/share/mysql/
+#
+
+# this is read by the standalone daemon and embedded servers
+[server]
+
+# this is only for the mysqld standalone daemon
+[mysqld]
+log_error = /var/log/mysql/error.log
+max_connections    = 2000
+max_connect_errors = 8000
+skip-host-cache
+skip-name-resolve
+bind-address       = {{ HA_VIP }}
+#
+# * Galera-related settings
+#
+[galera]
+# Mandatory settings
+#wsrep_provider=
+#wsrep_cluster_address=
+#binlog_format=row
+#default_storage_engine=InnoDB
+#innodb_autoinc_lock_mode=2
+#bind-address=0.0.0.0
+#
+# Optional setting
+#wsrep_slave_threads=1
+#innodb_flush_log_at_trx_commit=0
+
+# this is only for embedded server
+[embedded]
+
+# This group is only read by MariaDB servers, not by MySQL.
+# If you use the same .cnf file for MySQL and MariaDB,
+# you can put MariaDB-only options here
+[mariadb]
+
+# This group is only read by MariaDB-10.0 servers.
+# If you use the same .cnf file for MariaDB of different versions,
+# use this group for options that older servers don't understand
+[mariadb-10.0]
+
index b9e9424..8169979 100644 (file)
@@ -39,7 +39,7 @@ bind-address={{ HA_VIP }}
 ##
 
 # Full path to wsrep provider library or 'none'
-wsrep_provider=/usr/lib/galera/libgalera_smm.so
+wsrep_provider={{ wsrep_provider_file }}
 
 # Provider specific configuration options
 #wsrep_provider_options=
index d23c18d..5c014b4 100644 (file)
@@ -1,4 +1,5 @@
 ---
+
 mysql_packages:
   - python-mysqldb
   - mysql-server
@@ -10,3 +11,10 @@ maridb_packages:
   - python-mysqldb
   - mysql-server-wsrep
   - galera
+
+services: []
+
+mysql_config_file_path: "/etc/mysql"
+mysql_config_file_name: "my.cnf"
+wsrep_config_file_path: "/etc/mysql/conf.d"
+wsrep_provider_file: "/usr/lib/galera/libgalera_smm.so"
diff --git a/deploy/adapters/ansible/roles/database/vars/RedHat.yml b/deploy/adapters/ansible/roles/database/vars/RedHat.yml
new file mode 100644 (file)
index 0000000..1e4b427
--- /dev/null
@@ -0,0 +1,16 @@
+---
+mysql_packages:
+  - python-mysqldb
+  - mysql-server
+
+maridb_packages:
+  - MariaDB-client
+  - MariaDB-server
+  - galera
+
+services: []
+
+mysql_config_file_path: "/etc/my.cnf.d"
+mysql_config_file_name: ["my-huge.cnf", "server.cnf"]
+wsrep_config_file_path: "/etc/my.cnf.d"
+wsrep_provider_file: "/usr/lib64/galera/libgalera_smm.so"
diff --git a/deploy/adapters/ansible/roles/database/vars/main.yml b/deploy/adapters/ansible/roles/database/vars/main.yml
new file mode 100644 (file)
index 0000000..e497cd9
--- /dev/null
@@ -0,0 +1,5 @@
+---
+packages_noarch: []
+
+services_noarch:
+  - mysql
index d8eaa44..7fdaea5 100644 (file)
@@ -1,6 +1,4 @@
 ---
-- name: restart glance-api
-  service: name=glance-api state=restarted enabled=yes
-
-- name: restart glance-registry
-  service: name=glance-registry state=restarted enabled=yes
+- name: restart glance services
+  service: name={{ item }} state=restarted enabled=yes
+  with_items: services | union(services_noarch)
index 008595d..0960acf 100644 (file)
@@ -1,6 +1,7 @@
 ---
 - name: init glance db version
   shell: glance-manage db_version_control 0
+  when: ansible_os_family == "Debian"
 
 - name: sync glance db
   shell: sleep 15; su -s /bin/sh -c "glance-manage db_sync" glance
@@ -9,8 +10,7 @@
   retries: 5
   delay: 3
   notify:
-    - restart glance-registry
-    - restart glance-api
+    - restart glance services
 
 - meta: flush_handlers
 
index d9d15db..294b35e 100644 (file)
@@ -1,13 +1,11 @@
 ---
-- include_vars: "{{ ansible_os_family }}.yml"
-
 - name: install glance packages
-  action: "{{ ansible_pkg_mgr }} name={{ item }} state=latest force=yes"
-  with_items: glance_packages
+  action: "{{ ansible_pkg_mgr }} name={{ item }} state=latest"
+  with_items: packages | union(packages_noarch)
 
 - name: generate glance service list
   shell: echo {{ item }} >> /opt/service
-  with_items: glance_services
+  with_items: services | union(services_noarch)
 
 - name: update glance conf
   template: src={{ item }} dest=/etc/glance/{{ item }}
@@ -16,8 +14,7 @@
     - glance-api.conf
     - glance-registry.conf
   notify:
-    - restart glance-registry
-    - restart glance-api
+    - restart glance services
 
 - name: remove default sqlite db
   shell: rm /var/lib/glance/glance.sqlite || touch glance.sqllite.db.removed
index 70ed91d..dc1be97 100644 (file)
@@ -1,7 +1,7 @@
 ---
 - name: install packages
-  action: "{{ ansible_pkg_mgr }} name=nfs-common state=present"
-  with_items: nfs-packages
+  action: "{{ ansible_pkg_mgr }} name={{ item }} state=present"
+  with_items: nfs_packages
 
 - name: install nfs
   local_action: yum  name=nfs-utils state=present
index 2ce7479..3046ab3 100644 (file)
@@ -1,9 +1,9 @@
 [DEFAULT]
 # Show more verbose log output (sets INFO log level output)
-#verbose = False
+verbose = {{ VERBOSE }}
 
 # Show debugging output in logs (sets DEBUG log level output)
-#debug = False
+debug = {{ DEBUG }}
 
 # Which backend scheme should Glance use by default is not specified
 # in a request to add a new image to Glance? Known schemes are determined
index feacea1..b295ff2 100644 (file)
@@ -1,11 +1,11 @@
 ---
-glance_packages:
+
+packages:
   - glance
-  - python-glanceclient
 
-glance_services:
+nfs_packages:
+  - nfs-common
+
+services:
   - glance-registry
   - glance-api
-
-nfs-packages:
-  - nfs-common
diff --git a/deploy/adapters/ansible/roles/glance/vars/RedHat.yml b/deploy/adapters/ansible/roles/glance/vars/RedHat.yml
new file mode 100644 (file)
index 0000000..e26b703
--- /dev/null
@@ -0,0 +1,11 @@
+---
+packages:
+  - openstack-glance
+
+nfs_packages:
+  - nfs-utils
+  - rpcbind
+
+services:
+  - openstack-glance-api
+  - openstack-glance-registry
diff --git a/deploy/adapters/ansible/roles/glance/vars/main.yml b/deploy/adapters/ansible/roles/glance/vars/main.yml
new file mode 100644 (file)
index 0000000..83e90ff
--- /dev/null
@@ -0,0 +1,5 @@
+---
+packages_noarch:
+  - python-glanceclient
+
+services_noarch: []
index eeaf592..edd5e6d 100644 (file)
@@ -3,14 +3,14 @@
 
 - name: install keepalived xinet haproxy
   action: "{{ ansible_pkg_mgr }} name={{ item }} state=present"
-  with_items: packages
+  with_items: packages | union(packages_noarch)
 
 - name: generate ha service list
   shell: echo {{ item }} >> /opt/service
-  with_items: services
+  with_items: services | union(services_noarch)
 
 - name: install pexpect
-  pip: name=pexpect state=present
+  pip: name=pexpect state=present extra_args='--pre'
 
 - name: activate ip_nonlocal_bind
   sysctl: name=net.ipv4.ip_nonlocal_bind value=1
@@ -37,6 +37,7 @@
               regexp="ENABLED=*"
               line="ENABLED=1"
   notify: restart haproxy
+  when: ansible_os_family == "Debian"
 
 - name: set haproxy log
   lineinfile: dest=/etc/rsyslog.conf state=present
@@ -76,6 +77,7 @@
   lineinfile: dest=/etc/default/keepalived state=present
               regexp="^DAEMON_ARGS=*"
               line="DAEMON_ARGS=\"-D -d -S 1\""
+  when: ansible_os_family == "Debian"
 
 - name: set keepalived log
   lineinfile: dest=/etc/rsyslog.conf state=present
index e3877e6..ebfa65f 100644 (file)
@@ -28,7 +28,7 @@ def ssh_get_hostname(ip):
         return result.split(os.linesep)[1]
     except pxssh.ExceptionPxssh as e:
         LOG.error("pxssh failed on login.")
-    raise
+        raise
 
 def failover(mode):
     config = ConfigParser.ConfigParser()
index 02c5532..f9f9191 100644 (file)
@@ -15,7 +15,6 @@ global_defs {
 }
 
 vrrp_instance VI_1 {
-
     interface {{ INTERNAL_INTERFACE }}
     virtual_router_id 51
     state BACKUP
index 263c2ea..041cf47 100644 (file)
@@ -1,10 +1,3 @@
 ---
-packages:
-  - keepalived
-  - xinetd
-  - haproxy
-
-services:
-  - keepalived
-  - xinetd
-  - haproxy
+services: []
+packages: []
diff --git a/deploy/adapters/ansible/roles/ha/vars/RedHat.yml b/deploy/adapters/ansible/roles/ha/vars/RedHat.yml
new file mode 100644 (file)
index 0000000..041cf47
--- /dev/null
@@ -0,0 +1,3 @@
+---
+services: []
+packages: []
diff --git a/deploy/adapters/ansible/roles/ha/vars/main.yml b/deploy/adapters/ansible/roles/ha/vars/main.yml
new file mode 100644 (file)
index 0000000..3928b78
--- /dev/null
@@ -0,0 +1,10 @@
+---
+packages_noarch:
+  - keepalived
+  - xinetd
+  - haproxy
+
+services_noarch:
+  - keepalived
+  - xinetd
+  - haproxy
index 568c040..32d2b6b 100644 (file)
@@ -2,11 +2,12 @@
 - include_vars: "{{ ansible_os_family }}.yml"
 
 - name: install keystone packages
-  action: "{{ ansible_pkg_mgr }} name=keystone state=present force=yes"
+  action: "{{ ansible_pkg_mgr }} name={{ item }} state=present"
+  with_items: packages | union(packages_noarch)
 
 - name: generate keystone service list
   shell: echo {{ item }} >> /opt/service
-  with_items: services
+  with_items: services | union(services_noarch)
 
 - name: update keystone conf
   template: src=keystone.conf dest=/etc/keystone/keystone.conf backup=yes
   shell: rm /var/lib/keystone/keystone.db || echo sqllite database already removed
 
 - name: cron job to purge expired tokens hourly
-  shell: (crontab -l -u keystone 2>&1 | grep -q token_flush) || echo '@hourly /usr/bin/keystone-manage token_flush > /var/log/keystone/keystone-tokenflush.log 2>&1' >> /var/spool/cron/crontabs/keystone
+  shell: (crontab -l -u keystone 2>&1 | grep -q token_flush) || echo '@hourly /usr/bin/keystone-manage token_flush > /var/log/keystone/keystone-tokenflush.log 2>&1' >> {{ cron_path }}/keystone
 
 - name: modify keystone cron rights
-  file: path=/var/spool/cron/crontabs/keystone mode=0600
+  file: path={{ cron_path }}/keystone mode=0600
 
 - name: keystone source files
   template: src={{ item }} dest=/opt/{{ item }}
@@ -26,5 +27,6 @@
     - admin-openrc.sh
     - demo-openrc.sh
 
-- name: manually start keystone
-  service: name=keystone state=restarted enabled=yes
+- name: restart keystone services
+  service: name={{ item }} state=restarted enabled=yes
+  with_items: services | union(services_noarch)
index 101dc46..e8998a4 100644 (file)
@@ -1,6 +1,9 @@
 ---
+
+cron_path: "/var/spool/cron/crontabs"
+
 packages:
   - keystone
 
 services:
-  - "keystone"
+  - keystone
diff --git a/deploy/adapters/ansible/roles/keystone/vars/RedHat.yml b/deploy/adapters/ansible/roles/keystone/vars/RedHat.yml
new file mode 100644 (file)
index 0000000..91d1217
--- /dev/null
@@ -0,0 +1,8 @@
+---
+cron_path: "/var/spool/cron"
+
+packages:
+  - openstack-keystone
+
+services:
+  - openstack-keystone
diff --git a/deploy/adapters/ansible/roles/keystone/vars/main.yml b/deploy/adapters/ansible/roles/keystone/vars/main.yml
new file mode 100644 (file)
index 0000000..cc24916
--- /dev/null
@@ -0,0 +1,6 @@
+---
+packages_noarch:
+  - python-keystoneclient
+
+services_noarch: []
+
index 9e574c1..edfc8a7 100644 (file)
@@ -7,7 +7,7 @@
 
 - name: install rabbitmq-server
   action: "{{ ansible_pkg_mgr }} name=rabbitmq-server state=present"
-  with_items: packages
+  with_items: packages | union(packages_noarch)
 
 - name: stop rabbitmq-server
   service: name=rabbitmq-server
@@ -27,7 +27,7 @@
 
 - name: generate mq service list
   shell: echo {{ item }} >> /opt/service
-  with_items: services
+  with_items: services_noarch
 
 - name: modify rabbitmq password
   command: rabbitmqctl change_password guest {{ RABBIT_PASS }}
index a9a9ac4..c233bda 100644 (file)
@@ -1,6 +1,4 @@
 ---
-packages:
-  - rabbitmq-server
+services: []
+packages: []
 
-services:
-  - "rabbitmq-server"
diff --git a/deploy/adapters/ansible/roles/mq/vars/RedHat.yml b/deploy/adapters/ansible/roles/mq/vars/RedHat.yml
new file mode 100644 (file)
index 0000000..c233bda
--- /dev/null
@@ -0,0 +1,4 @@
+---
+services: []
+packages: []
+
diff --git a/deploy/adapters/ansible/roles/mq/vars/main.yml b/deploy/adapters/ansible/roles/mq/vars/main.yml
new file mode 100644 (file)
index 0000000..2c48a64
--- /dev/null
@@ -0,0 +1,6 @@
+---
+packages_noarch:
+  - rabbitmq-server
+
+services_noarch:
+  - rabbitmq-server
index 36d779d..f19c937 100644 (file)
@@ -1,13 +1,3 @@
 ---
 - name: restart neutron-plugin-openvswitch-agent
-  service: name=neutron-plugin-openvswitch-agent state=restarted enabled=yes
-  when: "'opendaylight' not in {{ NEUTRON_MECHANISM_DRIVERS }}"
-
-- name: restart neutron-l3-agent
-  service: name=neutron-l3-agent state=restarted enabled=yes
-
-- name: restart neutron-dhcp-agent
-  service: name=neutron-dhcp-agent state=restarted enabled=yes
-
-- name: restart neutron-metadata-agent
-  service: name=neutron-metadata-agent state=restarted enabled=yes
+  service: name={{ neutron_plugin_openvswitch_agent_services }} state=restarted enabled=yes
index 36d779d..25d8004 100644 (file)
@@ -1,13 +1,4 @@
 ---
-- name: restart neutron-plugin-openvswitch-agent
-  service: name=neutron-plugin-openvswitch-agent state=restarted enabled=yes
-  when: "'opendaylight' not in {{ NEUTRON_MECHANISM_DRIVERS }}"
-
-- name: restart neutron-l3-agent
-  service: name=neutron-l3-agent state=restarted enabled=yes
-
-- name: restart neutron-dhcp-agent
-  service: name=neutron-dhcp-agent state=restarted enabled=yes
-
-- name: restart neutron-metadata-agent
-  service: name=neutron-metadata-agent state=restarted enabled=yes
+- name: restart neutron compute service
+  service: name={{ item }} state=restarted enabled=yes
+  with_items: services | union(services_noarch)
index 3a811ec..fbc4138 100644 (file)
           value=0 state=present reload=yes
 
 - name: install compute-related neutron packages
-  action: "{{ ansible_pkg_mgr }} name={{ item }} state=present force=yes"
-  with_items: packages
+  action: "{{ ansible_pkg_mgr }} name={{ item }} state=present"
+  with_items: packages | union(packages_noarch)
 
-- name: install neutron openvswitch agent
-  action: "{{ ansible_pkg_mgr }} name={{ item }} state=present force=yes"
-  with_items: neutron_plugin_openvswitch_agent_packages
-  when: "'opendaylight' not in {{ NEUTRON_MECHANISM_DRIVERS }}"
+- name: fix openstack neutron plugin config file
+  shell: |
+    sed -i 's,plugins/openvswitch/ovs_neutron_plugin.ini,plugin.ini,g' /usr/lib/systemd/system/neutron-openvswitch-agent.service
+    systemctl daemon-reload
+  when: ansible_os_family == 'RedHat'
 
-- name: generate neutron computer service list
+- name: generate neutron compute service list
   shell: echo {{ item }} >> /opt/service
-  with_items: neutron-plugin-openvswitch-agent-services
-  when: "'opendaylight' not in {{ NEUTRON_MECHANISM_DRIVERS }}"
-
-- name: config neutron
-  template: src=neutron-network.conf
-            dest=/etc/neutron/neutron.conf backup=yes
-  notify:
-    - restart neutron-plugin-openvswitch-agent
+  with_items: services | union(services_noarch)
 
 - name: config ml2 plugin
   template: src=ml2_conf.ini
             dest=/etc/neutron/plugins/ml2/ml2_conf.ini
             backup=yes
+
+- name: ln plugin.ini
+  file: src=/etc/neutron/plugins/ml2/ml2_conf.ini dest=/etc/neutron/plugin.ini state=link
+
+- name: config neutron
+  template: src=neutron-network.conf
+            dest=/etc/neutron/neutron.conf backup=yes
   notify:
-    - restart neutron-plugin-openvswitch-agent
+    - restart neutron compute service
+    - restart nova-compute services
+
+- meta: flush_handlers
 
 - name: add br-int
   openvswitch_bridge: bridge=br-int state=present
-  notify:
-    - restart neutron-plugin-openvswitch-agent
-    - restart nova-compute
 
 - include: ../../neutron-network/tasks/odl.yml
   when: "'opendaylight' in {{ NEUTRON_MECHANISM_DRIVERS }}"
 
-- meta: flush_handlers
index 4988cb0..4706d1d 100644 (file)
@@ -3,7 +3,7 @@ dhcpbridge_flagfile=/etc/nova/nova.conf
 dhcpbridge=/usr/bin/nova-dhcpbridge
 logdir=/var/log/nova
 state_path=/var/lib/nova
-lock_path=/var/lock/nova
+lock_path=/var/lib/nova/tmp
 force_dhcp_release=True
 iscsi_helper=tgtadm
 libvirt_use_virtio_for_bridges=True
index cea31b3..9c58b36 100644 (file)
@@ -1,12 +1,11 @@
 ---
+
 packages:
   - neutron-common
   - neutron-plugin-ml2
   - openvswitch-datapath-dkms
   - openvswitch-switch
-
-neutron_plugin_openvswitch_agent_packages:
   - neutron-plugin-openvswitch-agent
 
-neutron-plugin-openvswitch-agent-services:
-  - "neutron-plugin-openvswitch-agent"
+services:
+  - neutron-plugin-openvswitch-agent
diff --git a/deploy/adapters/ansible/roles/neutron-compute/vars/RedHat.yml b/deploy/adapters/ansible/roles/neutron-compute/vars/RedHat.yml
new file mode 100644 (file)
index 0000000..cf88215
--- /dev/null
@@ -0,0 +1,9 @@
+---
+packages:
+  - openstack-neutron-ml2
+  - openstack-neutron-openvswitch
+
+services:
+  - openvswitch
+  - neutron-openvswitch-agent
+  - libvirtd
diff --git a/deploy/adapters/ansible/roles/neutron-compute/vars/main.yml b/deploy/adapters/ansible/roles/neutron-compute/vars/main.yml
new file mode 100644 (file)
index 0000000..d43b835
--- /dev/null
@@ -0,0 +1,4 @@
+---
+packages_noarch: []
+
+services_noarch: []
index b4c1585..a9d9bd1 100644 (file)
@@ -1,24 +1,4 @@
 ---
-- name: restart nova-api
-  service: name=nova-api state=restarted enabled=yes
-
-- name: restart nova-cert
-  service: name=nova-cert state=restarted enabled=yes
-
-- name: restart nova-consoleauth
-  service: name=nova-consoleauth state=restarted enabled=yes
-
-- name: restart nova-scheduler
-  service: name=nova-scheduler state=restarted enabled=yes
-
-- name: restart nova-conductor
-  service: name=nova-conductor state=restarted enabled=yes
-
-- name: restart nova-novncproxy
-  service: name=nova-novncproxy state=restarted enabled=yes
-
-- name: remove nova-sqlite-db
-  shell: rm /var/lib/nova/nova.sqlite || touch nova.sqlite.db.removed
-
-- name: restart neutron-server
-  service: name=neutron-server state=restarted enabled=yes
+- name: restart neutron control services
+  service: name={{ item }}  state=restarted enabled=yes
+  with_items: services | union(services_noarch)
index 77cc29a..c575b32 100644 (file)
@@ -6,5 +6,6 @@
   retries: 5
   delay: 3
   notify:
-    - restart neutron-server
+    - restart neutron control services
 
+- meta: flush_handlers
index 08a8ce6..028419b 100644 (file)
@@ -2,12 +2,12 @@
 - include_vars: "{{ ansible_os_family }}.yml"
 
 - name: install controller-related neutron packages
-  action: "{{ ansible_pkg_mgr }} name={{ item }} state=present force=yes"
-  with_items: packages
+  action: "{{ ansible_pkg_mgr }} name={{ item }} state=present"
+  with_items: packages | union(packages_noarch)
 
-- name: generate neutron controll service list
+- name: generate neutron control service list
   shell: echo {{ item }} >> /opt/service
-  with_items: services
+  with_items: services | union(services_noarch)
 
 - name: get tenant id to fill neutron.conf
   shell: keystone --os-token={{ ADMIN_TOKEN }} --os-endpoint=http://{{ HA_VIP }}:35357/v2.0 tenant-get service | grep id | awk '{print $4}'
 
 - name: update neutron conf
   template: src=neutron.conf dest=/etc/neutron/neutron.conf backup=yes
-  notify:
-    - restart neutron-server
 
 - name: update ml2 plugin conf
   template: src=ml2_conf.ini dest=/etc/neutron/plugins/ml2/ml2_conf.ini backup=yes
-  notify:
-    - restart neutron-server
-
-- meta: flush_handlers
 
+- name: ln plugin.ini
+  file: src=/etc/neutron/plugins/ml2/ml2_conf.ini dest=/etc/neutron/plugin.ini state=link
index 9587073..2b2bd9b 100644 (file)
@@ -3,7 +3,7 @@ dhcpbridge_flagfile=/etc/nova/nova.conf
 dhcpbridge=/usr/bin/nova-dhcpbridge
 logdir=/var/log/nova
 state_path=/var/lib/nova
-lock_path=/var/lock/nova
+lock_path=/var/lib/nova/tmp
 force_dhcp_release=True
 iscsi_helper=tgtadm
 libvirt_use_virtio_for_bridges=True
index c010e9a..55800f7 100644 (file)
@@ -2,7 +2,5 @@
 packages:
   - neutron-server
   - neutron-plugin-ml2
-services:
-  - neutron-server
-  - neutron-plugin-ml2
 
+services: []
diff --git a/deploy/adapters/ansible/roles/neutron-controller/vars/RedHat.yml b/deploy/adapters/ansible/roles/neutron-controller/vars/RedHat.yml
new file mode 100644 (file)
index 0000000..14eb730
--- /dev/null
@@ -0,0 +1,7 @@
+---
+packages:
+  - openstack-neutron
+  - openstack-neutron-ml2
+
+services:
+  - neutron-server
diff --git a/deploy/adapters/ansible/roles/neutron-controller/vars/main.yml b/deploy/adapters/ansible/roles/neutron-controller/vars/main.yml
new file mode 100644 (file)
index 0000000..453c1d5
--- /dev/null
@@ -0,0 +1,6 @@
+---
+packages_noarch: []
+
+services_noarch:
+  - neutron-server
+
diff --git a/deploy/adapters/ansible/roles/neutron-network/files/xorp b/deploy/adapters/ansible/roles/neutron-network/files/xorp
new file mode 100644 (file)
index 0000000..5a48610
--- /dev/null
@@ -0,0 +1,23 @@
+# Defaults for xorp initscript
+# sourced by /etc/init.d/xorp
+# installed at /etc/default/xorp by the maintainer scripts
+
+#
+# This is a POSIX shell fragment
+#
+
+# Master system-wide xorp switch. The initscript
+# will not run if it is not set to yes.
+
+RUN="yes"
+
+
+# Additional options that are passed to the rtrmgr Daemon.
+# e.g. :
+# -a <allowed host> Host allowed by the finder
+# -n <allowed net>  Subnet allowed by the finder
+# -v        Print verbose information
+# -b <file> Specify boot file
+# -d        Run as a daemon, detach from tty
+
+DAEMON_OPTS="-b /etc/xorp/config.boot "
index d6c5cc8..7e67b76 100644 (file)
@@ -1,21 +1,19 @@
 ---
-- name: restart neutron-plugin-openvswitch-agent
-  service: name=neutron-plugin-openvswitch-agent state=restarted enabled=yes
-  when: "'opendaylight' not in {{ NEUTRON_MECHANISM_DRIVERS }}"
+- name: restart common neutron network relation service
+  service: name={{ item }} state=restarted enabled=yes
+  with_items: services_noarch
 
-- name: restart neutron-l3-agent
-  service: name=neutron-l3-agent state=restarted enabled=yes
+- name: restart neutron network relation service
+  service: name={{ item }} state=restarted enabled=yes
+  with_items: services
+
+- name: restart openvswitch agent service
+  service: name=neutron-openvswitch-agent state=restarted enabled=yes
 
 - name: kill dnsmasq
   command: killall dnsmasq
   ignore_errors: True
 
-- name: restart neutron-dhcp-agent
-  service: name=neutron-dhcp-agent state=restarted enabled=yes
-
-- name: restart neutron-metadata-agent
-  service: name=neutron-metadata-agent state=restarted enabled=yes
-
 - name: restart xorp
   service: name=xorp state=restarted enabled=yes sleep=10
   ignore_errors: True
index d6f38a0..c9406ca 100644 (file)
@@ -1,6 +1,10 @@
 ---
 - name: Install XORP to provide IGMP router functionality
-  apt: pkg=xorp
+  action: "{{ ansible_pkg_mgr }} name={{ item }} state=present"
+  with_items: xorp_packages
+
+- name: create xorp directory
+  file: path=/etc/xorp state=directory
 
 - name: configure xorp
   template: src=etc/xorp/config.boot dest=/etc/xorp/config.boot
@@ -8,7 +12,7 @@
     - restart xorp
 
 - name: set xorp defaults
-  lineinfile: dest=/etc/default/xorp regexp=^RUN= line=RUN=yes
+  copy: src=xorp dest=/etc/default/xorp
   notify:
     - restart xorp
 
index f0953e0..758f320 100644 (file)
           value=0 state=present reload=yes
 
 - name: install neutron network related packages
-  action: "{{ ansible_pkg_mgr }} name={{ item }} state=present force=yes"
-  with_items: packages
+  action: "{{ ansible_pkg_mgr }} name={{ item }} state=present"
+  with_items: packages | union(packages_noarch)
 
-- name: install neutron openvswitch agent
-  action: "{{ ansible_pkg_mgr }} name={{ item }} state=present force=yes"
-  with_items: neutron_plugin_openvswitch_agent_packages
-  when: "'openvswitch' in {{ NEUTRON_MECHANISM_DRIVERS }}"
-
-- name: generate neutron service list
+- name: generate neutron network service list
   shell: echo {{ item }} >> /opt/service
-  with_items: services
+  with_items: services | union(services_noarch)
 
-- name: generate neutron-plugin-openvswitch-agent service list
-  shell: echo {{ item }} >> /opt/service
-  with_items: neutron-plugin-openvswitch-agent-services
-  when: "'openvswitch' in {{ NEUTRON_MECHANISM_DRIVERS }}"
-
-- name: config neutron
-  template: src=neutron-network.conf
-            dest=/etc/neutron/neutron.conf backup=yes
-  notify:
-    - restart neutron-plugin-openvswitch-agent
-    - restart neutron-l3-agent
-    - kill dnsmasq
-    - restart neutron-dhcp-agent
-    - restart neutron-metadata-agent
+- name: fix openstack neutron plugin config file
+  shell: |
+    sed -i 's,plugins/openvswitch/ovs_neutron_plugin.ini,plugin.ini,g' /usr/lib/systemd/system/neutron-openvswitch-agent.service
+    systemctl daemon-reload
+  when: ansible_os_family == 'RedHat'
 
 - name: config l3 agent
   template: src=l3_agent.ini dest=/etc/neutron/l3_agent.ini
             backup=yes
-  notify:
-    - restart neutron-l3-agent
 
 - name: config dhcp agent
   template: src=dhcp_agent.ini dest=/etc/neutron/dhcp_agent.ini
             backup=yes
-  notify:
-    - kill dnsmasq
-    - restart neutron-dhcp-agent
 
 - name: update dnsmasq-neutron.conf
   template: src=dnsmasq-neutron.conf
             dest=/etc/neutron/dnsmasq-neutron.conf
-  notify:
-    - kill dnsmasq
-    - restart neutron-dhcp-agent
 
 - name: config metadata agent
   template: src=metadata_agent.ini
             dest=/etc/neutron/metadata_agent.ini backup=yes
-  notify:
-    - restart neutron-metadata-agent
 
 - name: config ml2 plugin
   template: src=ml2_conf.ini
             dest=/etc/neutron/plugins/ml2/ml2_conf.ini
             backup=yes
+
+- name: config neutron
+  template: src=neutron-network.conf
+            dest=/etc/neutron/neutron.conf backup=yes
   notify:
-    - restart neutron-plugin-openvswitch-agent
+    - restart common neutron network relation service
+    - restart neutron network relation service
+    - kill dnsmasq
 
 - meta: flush_handlers
 
   openvswitch_bridge: bridge=br-ex state=present
   when: "'openvswitch' in {{ NEUTRON_MECHANISM_DRIVERS }}"
 
+- name: ln plugin.ini
+  file: src=/etc/neutron/plugins/ml2/ml2_conf.ini dest=/etc/neutron/plugin.ini state=link
+
+- name: restart openvswitch-agent service
+  service: name={{ openvswitch_agent }} state=restarted enabled=yes
+
+- meta: flush_handlers
+
 - name: assign a port to br-ex for physical ext interface
   openvswitch_port: bridge=br-ex port={{ INTERFACE_NAME }}
                     state=present
   when: "'openvswitch' in {{ NEUTRON_MECHANISM_DRIVERS }}"
 
 - include: igmp-router.yml
-  when: "'vxlan' in {{ NEUTRON_TUNNEL_TYPES }}"
+  when: "'vxlan' in {{ NEUTRON_TUNNEL_TYPES }} and ansible_os_family == 'Debian'"
 
 - name: assert kernel support for vxlan
   command: modinfo -F version vxlan
 
 - include: odl.yml
   when: "'opendaylight' in {{ NEUTRON_MECHANISM_DRIVERS }}"
-
-- name: restart ovs service
-  service: name=openvswitch-switch state=restarted enabled=yes
-
-- meta: flush_handlers
index 9587073..2b2bd9b 100644 (file)
@@ -3,7 +3,7 @@ dhcpbridge_flagfile=/etc/nova/nova.conf
 dhcpbridge=/usr/bin/nova-dhcpbridge
 logdir=/var/log/nova
 state_path=/var/lib/nova
-lock_path=/var/lock/nova
+lock_path=/var/lib/nova/tmp
 force_dhcp_release=True
 iscsi_helper=tgtadm
 libvirt_use_virtio_for_bridges=True
index 0350c42..ee511a2 100644 (file)
@@ -5,16 +5,13 @@ packages:
   - openvswitch-switch
   - neutron-l3-agent
   - neutron-dhcp-agent
+  - neutron-plugin-openvswitch-agent
 
 services:
-  - neutron-plugin-ml2
-  - openvswitch-datapath-dkms
   - openvswitch-switch
-  - neutron-l3-agent
-  - neutron-dhcp-agent
-
-neutron_plugin_openvswitch_agent_packages:
   - neutron-plugin-openvswitch-agent
 
-neutron-plugin-openvswitch-agent-services:
-  - neutron-plugin-openvswitch-agent
+openvswitch_agent: neutron-plugin-openvswitch-agent
+
+xorp_packages:
+  - xorp
diff --git a/deploy/adapters/ansible/roles/neutron-network/vars/RedHat.yml b/deploy/adapters/ansible/roles/neutron-network/vars/RedHat.yml
new file mode 100644 (file)
index 0000000..4be67bd
--- /dev/null
@@ -0,0 +1,14 @@
+---
+packages:
+  - openstack-neutron-ml2
+  - openstack-neutron-openvswitch
+
+services:
+  - openvswitch
+  - neutron-openvswitch-agent
+
+openvswitch_agent: neutron-openvswitch-agent
+
+xorp_packages:
+  - openssl098e
+  - xorp
diff --git a/deploy/adapters/ansible/roles/neutron-network/vars/main.yml b/deploy/adapters/ansible/roles/neutron-network/vars/main.yml
new file mode 100644 (file)
index 0000000..77698b1
--- /dev/null
@@ -0,0 +1,7 @@
+---
+packages_noarch: []
+
+services_noarch:
+  - neutron-l3-agent
+  - neutron-dhcp-agent
+  - neutron-metadata-agent
index c135003..6910c81 100644 (file)
@@ -1,3 +1,4 @@
 ---
-- name: restart nova-compute
-  service: name=nova-compute state=restarted enabled=yes
+- name: restart nova-compute services
+  service: name={{ item }} state=restarted enabled=yes
+  with_items: services | union(services_noarch)
index 6c42689..14c5450 100644 (file)
@@ -2,8 +2,8 @@
 - include_vars: "{{ ansible_os_family }}.yml"
 
 - name: install nova-compute related packages
-  action: "{{ ansible_pkg_mgr }} name={{ item }} state=present force=yes"
-  with_items: packages
+  action: "{{ ansible_pkg_mgr }} name={{ item }} state=present"
+  with_items: packages | union(packages_noarch)
 
 - name: update nova-compute conf
   template: src={{ item }} dest=/etc/nova/{{ item }}
     - nova.conf
     - nova-compute.conf
   notify:
-    - restart nova-compute
+    - restart nova-compute services
 
-- name: generate neutron controll service list
+- name: generate neutron control service list
   shell: echo {{ item }} >> /opt/service
-  with_items: services
+  with_items: services | union(services_noarch)
 
 - meta: flush_handlers
 
index 4988cb0..4706d1d 100644 (file)
@@ -3,7 +3,7 @@ dhcpbridge_flagfile=/etc/nova/nova.conf
 dhcpbridge=/usr/bin/nova-dhcpbridge
 logdir=/var/log/nova
 state_path=/var/lib/nova
-lock_path=/var/lock/nova
+lock_path=/var/lib/nova/tmp
 force_dhcp_release=True
 iscsi_helper=tgtadm
 libvirt_use_virtio_for_bridges=True
diff --git a/deploy/adapters/ansible/roles/nova-compute/vars/RedHat.yml b/deploy/adapters/ansible/roles/nova-compute/vars/RedHat.yml
new file mode 100644 (file)
index 0000000..7f5d388
--- /dev/null
@@ -0,0 +1,8 @@
+---
+packages:
+  - openstack-nova-compute
+  - sysfsutils
+
+services:
+  - libvirtd
+  - openstack-nova-compute
diff --git a/deploy/adapters/ansible/roles/nova-compute/vars/main.yml b/deploy/adapters/ansible/roles/nova-compute/vars/main.yml
new file mode 100644 (file)
index 0000000..d43b835
--- /dev/null
@@ -0,0 +1,4 @@
+---
+packages_noarch: []
+
+services_noarch: []
index 0f92294..37b106a 100644 (file)
@@ -1,21 +1,7 @@
 ---
-- name: restart nova-api
-  service: name=nova-api state=restarted enabled=yes
-
-- name: restart nova-cert
-  service: name=nova-cert state=restarted enabled=yes
-
-- name: restart nova-consoleauth
-  service: name=nova-consoleauth state=restarted enabled=yes
-
-- name: restart nova-scheduler
-  service: name=nova-scheduler state=restarted enabled=yes
-
-- name: restart nova-conductor
-  service: name=nova-conductor state=restarted enabled=yes
-
-- name: restart nova-novncproxy
-  service: name=nova-novncproxy state=restarted enabled=yes
+- name: restart nova service
+  service: name={{ item}} state=restarted enabled=yes
+  with_items: services | union(services_noarch)
 
 - name: remove nova-sqlite-db
   shell: rm /var/lib/nova/nova.sqlite || touch nova.sqlite.db.removed
index 62351fa..fbf8e77 100644 (file)
@@ -6,11 +6,6 @@
   retries: 5
   delay: 3
   notify:
-    - restart nova-api
-    - restart nova-cert
-    - restart nova-consoleauth
-    - restart nova-scheduler
-    - restart nova-conductor
-    - restart nova-novncproxy
+    - restart nova service
 
 - meta: flush_handlers
index 83bc287..bb1dbac 100644 (file)
@@ -2,22 +2,17 @@
 - include_vars: "{{ ansible_os_family }}.yml"
 
 - name: install nova related packages
-  action: "{{ ansible_pkg_mgr }} name={{ item }} state=present force=yes"
-  with_items: packages
+  action: "{{ ansible_pkg_mgr }} name={{ item }} state=present"
+  with_items: packages | union(packages_noarch)
 
-- name: generate nova controll service list
+- name: generate nova control service list
   shell: echo {{ item }} >> /opt/service
-  with_items: services
+  with_items: services | union(services_noarch)
 
 - name: update nova conf
   template: src=nova.conf
             dest=/etc/nova/nova.conf
             backup=yes
   notify:
-    - restart nova-api
-    - restart nova-cert
-    - restart nova-consoleauth
-    - restart nova-scheduler
-    - restart nova-conductor
-    - restart nova-novncproxy
+    - restart nova service
     - remove nova-sqlite-db
index 4c837e3..9b4280c 100644 (file)
@@ -3,7 +3,7 @@ dhcpbridge_flagfile=/etc/nova/nova.conf
 dhcpbridge=/usr/bin/nova-dhcpbridge
 logdir=/var/log/nova
 state_path=/var/lib/nova
-lock_path=/var/lock/nova
+lock_path=/var/lib/nova/tmp
 force_dhcp_release=True
 iscsi_helper=tgtadm
 libvirt_use_virtio_for_bridges=True
index 7cd81ca..e96a1b1 100644 (file)
@@ -6,7 +6,6 @@ packages:
   - nova-consoleauth
   - nova-novncproxy
   - nova-scheduler
-  - python-novaclient
   - python-oslo.rootwrap
 
 services:
diff --git a/deploy/adapters/ansible/roles/nova-controller/vars/RedHat.yml b/deploy/adapters/ansible/roles/nova-controller/vars/RedHat.yml
new file mode 100644 (file)
index 0000000..e2709a3
--- /dev/null
@@ -0,0 +1,16 @@
+---
+packages:
+  - openstack-nova-api
+  - openstack-nova-cert
+  - openstack-nova-conductor
+  - openstack-nova-console
+  - openstack-nova-novncproxy
+  - openstack-nova-scheduler
+
+services:
+  - openstack-nova-api
+  - openstack-nova-cert
+  - openstack-nova-conductor
+  - openstack-nova-consoleauth
+  - openstack-nova-novncproxy
+  - openstack-nova-scheduler
diff --git a/deploy/adapters/ansible/roles/nova-controller/vars/main.yml b/deploy/adapters/ansible/roles/nova-controller/vars/main.yml
new file mode 100644 (file)
index 0000000..d43b835
--- /dev/null
@@ -0,0 +1,4 @@
+---
+packages_noarch: []
+
+services_noarch: []