- authorized_key:
user: root
- key: "{{ lookup('file', 'item') }}"
+ key: "{{ lookup('file', item) }}"
with_fileglob:
- /tmp/ssh-keys-*
max_fail_percentage: 0
- hosts: all
remote_user: root
- accelerate: true
max_fail_percentage: 0
roles:
- setup-network
- hosts: ha
remote_user: root
- accelerate: true
max_fail_percentage: 0
roles:
- ha
- hosts: controller
remote_user: root
- accelerate: true
max_fail_percentage: 0
roles:
- memcached
- hosts: all
remote_user: root
- accelerate: true
max_fail_percentage: 0
roles:
- storage
- hosts: compute
remote_user: root
- accelerate: true
max_fail_percentage: 0
roles:
- nova-compute
#- hosts: all
# remote_user: root
-# accelerate: true
-# max_fail_percentage: 0
+## max_fail_percentage: 0
# roles:
# - moon
- hosts: all
remote_user: root
- accelerate: true
max_fail_percentage: 0
roles:
- secgroup
- hosts: ceph_adm
remote_user: root
- accelerate: true
max_fail_percentage: 0
roles: []
# - ceph-deploy
- hosts: ceph
remote_user: root
- accelerate: true
max_fail_percentage: 0
roles:
- ceph-purge
- hosts: ceph_mon
remote_user: root
- accelerate: true
max_fail_percentage: 0
roles:
- ceph-mon
- hosts: ceph_osd
remote_user: root
- accelerate: true
max_fail_percentage: 0
roles:
- ceph-osd
- hosts: ceph
remote_user: root
- accelerate: true
max_fail_percentage: 0
roles:
- ceph-openstack
- hosts: all
remote_user: root
- accelerate: true
max_fail_percentage: 0
roles:
- monitor
- hosts: all
remote_user: root
- accelerate: true
max_fail_percentage: 0
tasks:
- name: set bash to nova
- authorized_key:
user: nova
- key: "{{ lookup('file', 'item') }}"
+ key: "{{ lookup('file', item) }}"
with_fileglob:
- /tmp/ssh-keys-*
- hosts: all
remote_user: root
- accelerate: true
max_fail_percentage: 0
roles:
- odl_cluster
- hosts: all
remote_user: root
- accelerate: true
max_fail_percentage: 0
roles:
- onos_cluster
- hosts: all
remote_user: root
- accelerate: true
serial: 1
max_fail_percentage: 0
roles:
- hosts: all
remote_user: root
- accelerate: true
max_fail_percentage: 0
roles:
- odl_cluster_post
- hosts: controller
remote_user: root
- accelerate: true
max_fail_percentage: 0
roles:
- ext-network
- hosts: controller
remote_user: root
- accelerate: true
max_fail_percentage: 0
roles:
- boot-recovery
- hosts: controller
remote_user: root
- accelerate: true
max_fail_percentage: 0
roles:
- controller-recovery
- hosts: compute
remote_user: root
- accelerate: true
max_fail_percentage: 0
roles:
- compute-recovery
---
- name: restart aodh services
service: name={{ item }} state=restarted enabled=yes
- with_items: services | union(services_noarch)
+ with_items: "{{ services | union(services_noarch) }}"
- name: install aodh packages
action: "{{ ansible_pkg_mgr }} name={{ item }} state=present"
- with_items: packages | union(packages_noarch)
+ with_items: "{{ packages | union(packages_noarch) }}"
- name: update aodh conf
template: src={{ item }} dest=/etc/aodh/aodh.conf backup=yes
- name: write services to monitor list
lineinfile: dest=/opt/service create=yes line='{{ item }}'
- with_items: services | union(services_noarch)
+ with_items: "{{ services | union(services_noarch) }}"
- name: remove default sqlite db
shell: rm /var/lib/aodh/aodh.sqlite || touch aodh.sqllite.db.removed
---
- name: restart apache related services
service: name={{ item }} state=restarted enabled=yes
- with_items: services| union(services_noarch)
+ with_items: "{{ services| union(services_noarch) }}"
- name: install packages
action: "{{ ansible_pkg_mgr }} name={{ item }} state=latest update_cache=yes"
- with_items: packages | union(packages_noarch)
+ with_items: "{{ packages | union(packages_noarch) }}"
- name: assure listen port exist
template:
- name: stop controller services
service: name={{ item }} state=stopped enabled=yes
- with_items: controller_services | union(controller_services_noarch)
+ with_items: "{{ controller_services | union(controller_services_noarch) }}"
when: RECOVERY_ENV
tags:
- recovery-stop-service
---
- name: restart ceilometer service
service: name={{ item }} state=restarted enabled=yes
- with_items: ceilometer_services
+ with_items: "{{ ceilometer_services }}"
- name: restart nova service
service: name={{ item }} state=restarted enabled=yes
- with_items: nova_services
+ with_items: "{{ nova_services }}"
- name: write services to monitor list
lineinfile: dest=/opt/service create=yes line='{{ item }}'
- with_items: ceilometer_services
+ with_items: "{{ ceilometer_services }}"
- meta: flush_handlers
- name: install ceilometer packages
action: "{{ ansible_pkg_mgr }} name={{ item }} state=present"
- with_items: ceilometer_packages | union(packages_noarch)
+ with_items: "{{ ceilometer_packages | union(packages_noarch) }}"
- name: enable auto start
file:
---
- name: restart ceilometer service
service: name={{ item }} state=restarted enabled=yes
- with_items: ceilometer_services
+ with_items: "{{ ceilometer_services }}"
- name: restart glance_cinder service
service: name={{ item }} state=restarted enabled=yes
- with_items: glance_cinder_services
+ with_items: "{{ glance_cinder_services }}"
- name: reload apache server
service: name=apache2 state=reloaded
- name: write services to monitor list
lineinfile: dest=/opt/service create=yes line='{{ item }}'
- with_items: ceilometer_services
+ with_items: "{{ ceilometer_services }}"
- meta: flush_handlers
- name: install ceilometer packages
action: "{{ ansible_pkg_mgr }} name={{ item }} state=present"
- with_items: ceilometer_packages | union(packages_noarch)
+ with_items: "{{ ceilometer_packages | union(packages_noarch) }}"
- name: enable auto start
file:
---
- name: restart cinder control serveice
service: name={{ item }} state=restarted enabled=yes
- with_items: services | union(services_noarch)
+ with_items: "{{ services | union(services_noarch) }}"
- name: install cinder packages
action: "{{ ansible_pkg_mgr }} name={{ item }} state=present"
- with_items: packages | union(packages_noarch)
+ with_items: "{{ packages | union(packages_noarch) }}"
- name: enable auto start
file:
- name: generate common cinder service list
lineinfile: dest=/opt/service create=yes line='{{ item }}'
- with_items: services | union(services_noarch)
+ with_items: "{{ services | union(services_noarch) }}"
- name: upload cinder conf
template: src=cinder.conf dest=/etc/cinder/cinder.conf
---
- name: restart cinder-volume services
service: name={{ item }} state=restarted enabled=yes
- with_items: services | union(services_noarch)
+ with_items: "{{ services | union(services_noarch) }}"
- name: install cinder-volume and lvm2 packages
action: "{{ ansible_pkg_mgr }} name={{ item }} state=present"
- with_items: packages | union(packages_noarch)
+ with_items: "{{ packages | union(packages_noarch) }}"
- name: enable auto start
file:
- name: generate cinder volume service list
lineinfile: dest=/opt/service create=yes line='{{ item }}'
- with_items: services | union(services_noarch)
+ with_items: "{{ services | union(services_noarch) }}"
- name: upload cinder-volume configuration
template: src=cinder.conf dest=/etc/cinder/cinder.conf
- name: install packages
action: "{{ ansible_pkg_mgr }} name={{ item }} state=latest update_cache=yes"
- with_items: packages | union(packages_noarch)
+ with_items: "{{ packages | union(packages_noarch) }}"
- name: make config template dir exist
file: path=/opt/os_templates state=directory mode=0755
- name: install pip packages
pip: name={{ item }} state=present extra_args='--pre'
- with_items: pip_packages
+ with_items: "{{ pip_packages }}"
- name: install keyczar for accelerate
pip: name=python-keyczar state=present extra_args='--pre'
- name: restart services
service: name={{ item }} state=restarted enabled=yes
- with_items: services| union(services_noarch)
+ with_items: "{{ services| union(services_noarch) }}"
- name: write services to monitor list
lineinfile: dest=/opt/service create=yes line='{{ item }}'
- with_items: services| union(services_noarch)
+ with_items: "{{ services| union(services_noarch) }}"
- name: kill daemon for accelerate
shell: lsof -ni :5099|grep LISTEN|awk '{print $2}'|xargs kill -9
- name: restart compute services
service: name={{ item }} state=restarted enabled=yes
- with_items: compute_services | union(compute_services_noarch)
+ with_items: "{{ compute_services | union(compute_services_noarch) }}"
when: RECOVERY_ENV
tags:
- recovery
- name: restart controller services
service: name={{ item }} state=restarted enabled=yes
- with_items: controller_services | union(controller_services_noarch)
+ with_items: "{{ controller_services | union(controller_services_noarch) }}"
when: RECOVERY_ENV
tags:
- recovery
---
- name: restart dashboard services
service: name={{ item }} state=restarted enabled=yes
- with_items: services | union(services_noarch)
+ with_items: "{{ services | union(services_noarch) }}"
- name: install dashboard packages
action: "{{ ansible_pkg_mgr }} name={{ item }} state=present"
- with_items: packages | union(packages_noarch)
+ with_items: "{{ packages | union(packages_noarch) }}"
- name: enable auto start
file:
- name: install python-mysqldb
action: "{{ ansible_pkg_mgr }} name={{ item }} state=present"
- with_items: maridb_packages | union(packages_noarch)
+ with_items: "{{ maridb_packages | union(packages_noarch) }}"
- name: create conf dir for wsrep
file: path=/etc/my.cnf.d state=directory mode=0755
dest: '{{ item.dest }}'
backup: yes
mode: 0644
- with_items: mysql_config
+ with_items: "{{ mysql_config }}"
- name: bugfix for rsync version 3.1
lineinfile:
- debug: msg='{{ servers.stdout |int }}'
+- name: Add admin user
+ mongodb_user:
+ login_host: "{{ internal_vip.ip }}"
+ database: admin
+ name: root
+ password: root
+ roles: 'root'
+ state: present
+
- name: create mongodb user and db
mongodb_user:
login_host: "{{ internal_vip.ip }}"
+ login_user: root
+ login_password: root
database: ceilometer
name: ceilometer
password: "{{ CEILOMETER_DBPASS }}"
- name: grant user privilege
mongodb_user:
login_host: "{{ internal_vip.ip }}"
+ login_user: root
+ login_password: root
database: ceilometer
name: ceilometer
password: "{{ CEILOMETER_DBPASS }}"
---
- name: install mongodb packages
action: "{{ ansible_pkg_mgr }} name={{ item }} state=present"
- with_items: mongodb_packages | union(packages_noarch)
+ with_items: "{{ mongodb_packages | union(packages_noarch) }}"
- name: install pymongod packages
pip: name={{ item }} state=present extra_args='--pre'
- with_items: pip_packages
+ with_items: "{{ pip_packages }}"
- name: copy ceilometer configs
template: src=mongodb.conf dest=/opt/os_templates backup=yes
neutron subnet-create \
--name {{ public_net_info.subnet }} \
--gateway {{ public_net_info.external_gw }} \
+ --disable-dhcp \
--allocation-pool \
start={{ public_net_info.floating_ip_start }},end={{ public_net_info.floating_ip_end }} \
{{ public_net_info.network }} {{ public_net_info.floating_ip_cidr }}
---
- name: restart glance services
service: name={{ item }} state=restarted enabled=yes
- with_items: services | union(services_noarch)
+ with_items: "{{ services | union(services_noarch) }}"
---
- name: install glance packages
action: "{{ ansible_pkg_mgr }} name={{ item }} state=latest"
- with_items: packages | union(packages_noarch)
+ with_items: "{{ packages | union(packages_noarch) }}"
- name: generate glance service list
lineinfile: dest=/opt/service create=yes line='{{ item }}'
- with_items: services | union(services_noarch)
+ with_items: "{{ services | union(services_noarch) }}"
- name: update glance conf
template: src={{ item }} dest=/etc/glance/{{ item }}
---
- name: install nfs packages
action: "{{ ansible_pkg_mgr }} name={{ item }} state=present"
- with_items: nfs_packages
+ with_items: "{{ nfs_packages }}"
- name: install nfs
local_action: yum name={{ item }} state=present
- name: install keepalived xinet haproxy
action: "{{ ansible_pkg_mgr }} name={{ item }} state=present"
- with_items: packages | union(packages_noarch)
+ with_items: "{{ packages | union(packages_noarch) }}"
- name: generate ha service list
lineinfile: dest=/opt/service create=yes line='{{ item }}'
- with_items: services | union(services_noarch)
+ with_items: "{{ services | union(services_noarch) }}"
- name: install pexpect
pip: name=pexpect state=present extra_args='--pre'
---
- name: restart heat service
service: name={{ item }} state=restarted enabled=yes
- with_items: services | union(services_noarch)
+ with_items: "{{ services | union(services_noarch) }}"
- name: remove heat-sqlite-db
shell: rm /var/lib/heat/heat.sqlite || touch heat.sqlite.db.removed
- name: install heat related packages
action: "{{ ansible_pkg_mgr }} name={{ item }} state=present"
- with_items: packages | union(packages_noarch)
+ with_items: "{{ packages | union(packages_noarch) }}"
- name: generate heat service list
lineinfile: dest=/opt/service create=yes line='{{ item }}'
- with_items: services | union(services_noarch)
-
-# '
+ with_items: "{{ services | union(services_noarch) }}"
- name: create heat user domain
shell: >
---
- name: restart keystone services
service: name={{ item }} state=restarted enabled=yes
- with_items: services | union(services_noarch)
+ with_items: "{{ services | union(services_noarch) }}"
--delete \
/etc/keystone/fernet-keys \
root@{{ hostvars[ item ].ansible_eth0.ipv4.address }}:/etc/keystone/
- with_items: groups['controller'][1:]
+ with_items: "{{ groups['controller'][1:] }}"
notify:
- restart keystone services
--delete \
/etc/keystone/credential-keys \
root@{{ hostvars[ item ].ansible_eth0.ipv4.address }}:/etc/keystone/
- with_items: groups['controller'][1:]
+ with_items: "{{ groups['controller'][1:] }}"
notify:
- restart keystone services
--url {{ item.adminurl }} \
$(openstack endpoint list | grep keystone | grep admin | awk '{print $2}');
with_items: "{{ os_services[0:1] }}"
+ register: result
+ until: result.rc == 0
+ retries: 10
+ delay: 5
- name: add service
shell:
--description "{{ item.description }}" \
{{ item.type }}
with_items: "{{ os_services[1:] }}"
+ register: result
+ until: result.rc == 0
+ retries: 10
+ delay: 5
- name: add project
shell:
. /opt/admin-openrc.sh;
openstack project create --description "Service Project" service;
openstack project create --domain default --description "Demo Project" demo;
+ register: result
+ until: result.rc == 0
+ retries: 10
+ delay: 5
- name: set admin user
shell:
{{ item.user }}
with_items: "{{ os_users }}"
when: item["user"] == "admin"
+ register: result
+ until: result.rc == 0
+ retries: 10
+ delay: 5
- name: add user
shell:
--password "{{ item.password }}" \
{{ item.user }}
with_items: "{{ os_users[1:] }}"
+ register: result
+ until: result.rc == 0
+ retries: 10
+ delay: 5
- name: add roles
shell:
openstack role create {{ item.role }}
with_items: "{{ os_users }}"
when: item["user"] == "demo"
+ register: result
+ until: result.rc == 0
+ retries: 10
+ delay: 5
- name: grant roles
shell:
--user "{{ item.user }}" \
{{ item.role }}
with_items: "{{ os_users }}"
+ register: result
+ until: result.rc == 0
+ retries: 10
+ delay: 5
- name: add endpoints
shell:
--region {{ item.region }} \
{{ item.name }} admin {{ item.adminurl }};
with_items: "{{ os_services[1:] }}"
+ register: result
+ until: result.rc == 0
+ retries: 10
+ delay: 5
+
- name: install keystone packages
action: "{{ ansible_pkg_mgr }} name={{ item }} state=present"
- with_items: packages | union(packages_noarch)
+ with_items: "{{ packages | union(packages_noarch) }}"
- name: enable auto start
file:
- name: generate keystone service list
lineinfile: dest=/opt/service create=yes line='{{ item }}'
- with_items: services | union(services_noarch)
+ with_items: "{{ services | union(services_noarch) }}"
- name: delete sqlite database
file:
internalurl: "http://{{ internal_vip.ip }}:8000/v1"
adminurl: "http://{{ internal_vip.ip }}:8000/v1"
- - name: congress
- type: policy
- region: RegionOne
- description: "OpenStack Policy Service"
- publicurl: "http://{{ public_vip.ip }}:1789"
- internalurl: "http://{{ internal_vip.ip }}:1789"
- adminurl: "http://{{ internal_vip.ip }}:1789"
+# - name: congress
+# type: policy
+# region: RegionOne
+# description: "OpenStack Policy Service"
+# publicurl: "http://{{ public_vip.ip }}:1789"
+# internalurl: "http://{{ internal_vip.ip }}:1789"
+# adminurl: "http://{{ internal_vip.ip }}:1789"
# - name: swift
# type: object-store
tenant: service
tenant_description: "Service Tenant"
- - user: congress
- password: "{{ CONGRESS_PASS }}"
- email: congress@admin.com
- role: admin
- tenant: service
- tenant_description: "Service Tenant"
+# - user: congress
+# password: "{{ CONGRESS_PASS }}"
+# email: congress@admin.com
+# role: admin
+# tenant: service
+# tenant_description: "Service Tenant"
- user: demo
password: "{{ DEMO_PASS }}"
# role: admin
# tenant: service
# tenant_description: "Service Tenant"
+
---
- name: restart memcahed services
service: name={{ item }} state=restarted enabled=yes
- with_items: services| union(services_noarch)
+ with_items: "{{ services| union(services_noarch) }}"
- name: install packages
action: "{{ ansible_pkg_mgr }} name={{ item }} state=latest update_cache=yes"
- with_items: packages | union(packages_noarch)
+ with_items: "{{ packages | union(packages_noarch) }}"
- name: enable auto start
file:
- name: install rabbitmq-server
action: "{{ ansible_pkg_mgr }} name=rabbitmq-server state=present"
- with_items: packages | union(packages_noarch)
+ with_items: "{{ packages | union(packages_noarch) }}"
- name: enable auto start
file:
- name: generate mq service list
shell: echo {{ item }} >> /opt/service
- with_items: services_noarch
+ with_items: "{{ services_noarch }}"
---
- name: restart neutron compute service
service: name={{ item }} state=restarted enabled=yes
- with_items: services | union(services_noarch)
+ with_items: "{{ services | union(services_noarch) }}"
- name: restart nova-compute services
service: name=nova-compute state=restarted enabled=yes
- name: install compute-related neutron packages
action: "{{ ansible_pkg_mgr }} name={{ item }} state=present"
- with_items: packages | union(packages_noarch)
+ with_items: "{{ packages | union(packages_noarch) }}"
- name: enable auto start
file:
- name: generate neutron compute service list
lineinfile: dest=/opt/service create=yes line='{{ item }}'
- with_items: services | union(services_noarch)
+ with_items: "{{ services | union(services_noarch) }}"
- name: config ml2 plugin
template: src=templates/ml2_conf.ini
- meta: flush_handlers
- include: ../../neutron-network/tasks/odl.yml
- when: "'opendaylight' in {{ NEUTRON_MECHANISM_DRIVERS }}"
+ when: "'opendaylight' in {{ NEUTRON_MECHANISM_DRIVERS | to_json }}"
---
- name: restart neutron control services
service: name={{ item }} state=restarted enabled=yes
- with_items: services | union(services_noarch)
+ with_items: "{{ services | union(services_noarch) }}"
when: item != "neutron-server"
register: result
run_once: True
until: result.rc == 0
- retries: 5
- delay: 3
+ retries: 10
+ delay: 5
notify:
- restart neutron control services
- name: install controller-related neutron packages
action: "{{ ansible_pkg_mgr }} name={{ item }} state=present"
- with_items: packages | union(packages_noarch)
+ with_items: "{{ packages | union(packages_noarch) }}"
- name: enable auto start
file:
- name: generate neutron control service list
lineinfile: dest=/opt/service create=yes line='{{ item }}'
- with_items: services | union(services_noarch)
+ with_items: "{{ services | union(services_noarch) }}"
- name: get tenant id to fill neutron.conf
shell:
- name: restart neutron network relation service
service: name={{ item }} state=restarted enabled=yes
with_flattened:
- - services_noarch
- - services
+ - "{{ services_noarch }}"
+ - "{{ services }}"
- name: restart openvswitch agent service
service: name=neutron-openvswitch-agent state=restarted enabled=yes
- name: install firewall packages
action: "{{ ansible_pkg_mgr }} name={{ item }} state=present"
- with_items: firewall_packages
+ with_items: "{{ firewall_packages }}"
- name: enable auto start
file:
---
- name: Install XORP to provide IGMP router functionality
action: "{{ ansible_pkg_mgr }} name={{ item }} state=present"
- with_items: xorp_packages
+ with_items: "{{ xorp_packages }}"
- name: create xorp directory
file: path=/etc/xorp state=directory
- name: assert kernel support for vxlan
command: modinfo -F version vxlan
- when: "'vxlan' in {{ NEUTRON_TUNNEL_TYPES }}"
+ when: "'vxlan' in {{ NEUTRON_TUNNEL_TYPES | to_json }}"
- name: assert iproute2 suppport for vxlan
command: ip link add type vxlan help
register: iproute_out
failed_when: iproute_out.rc == 255
- when: "'vxlan' in {{ NEUTRON_TUNNEL_TYPES }}"
+ when: "'vxlan' in {{ NEUTRON_TUNNEL_TYPES | to_json }}"
- name: disable auto start
copy:
- name: install neutron network related packages
action: "{{ ansible_pkg_mgr }} name={{ item }} state=present"
- with_items: packages | union(packages_noarch)
+ with_items: "{{ packages | union(packages_noarch) }}"
- name: enable auto start
file:
- name: generate neutron network service list
lineinfile: dest=/opt/service create=yes line='{{ item }}'
- with_items: services | union(services_noarch)
+ with_items: "{{ services | union(services_noarch) }}"
- name: fix openstack neutron plugin config file
shell: |
dest: /etc/neutron/dnsmasq-neutron.conf
regexp: '^dhcp-option-force'
line: 'dhcp-option-force=26,1450'
- when: "'vxlan' in {{ NEUTRON_TUNNEL_TYPES }}"
+ when: "'vxlan' in {{ NEUTRON_TUNNEL_TYPES | to_json }}"
- include: firewall.yml
when: enable_fwaas == True
when: enable_vpnaas == True
- include: odl.yml
- when: "'opendaylight' in {{ NEUTRON_MECHANISM_DRIVERS }}"
+ when: "'opendaylight' in {{ NEUTRON_MECHANISM_DRIVERS | to_json }}"
- name: restart neutron network relation service
service: name={{ item }} state=restarted enabled=yes
with_flattened:
- - services_noarch
- - services
+ - "{{ services_noarch }}"
+ - "{{ services }}"
- meta: flush_handlers
- name: install vpn packages
action: "{{ ansible_pkg_mgr }} name={{ item }} state=present"
- with_items: vpn_packages
+ with_items: "{{ vpn_packages }}"
- name: enable auto start
file:
openvswitch_agent: neutron-plugin-openvswitch-agent
+firewall_packages:
+
+vpn_packages:
+
xorp_packages:
- xorp
---
- name: restart nova-compute services
service: name={{ item }} state=restarted enabled=yes
- with_items: services | union(services_noarch)
+ with_items: "{{ services | union(services_noarch) }}"
- name: install nova-compute related packages
action: "{{ ansible_pkg_mgr }} name={{ item }} state=present"
- with_items: packages | union(packages_noarch)
+ with_items: "{{ packages | union(packages_noarch) }}"
- name: restart virtlogd
service: name=virtlogd state=started enabled=yes
- name: generate neutron control service list
lineinfile: dest=/opt/service create=yes line='{{ item }}'
- with_items: services | union(services_noarch)
+ with_items: "{{ services | union(services_noarch) }}"
#'
- name: remove nova sqlite db
shell: rm /var/lib/nova/nova.sqlite || touch nova.sqlite.removed
---
- name: restart nova service
service: name={{ item}} state=restarted enabled=yes
- with_items: services | union(services_noarch)
+ with_items: "{{ services | union(services_noarch) }}"
- name: remove nova-sqlite-db
shell: rm /var/lib/nova/nova.sqlite || touch nova.sqlite.db.removed
- name: install nova related packages
action: "{{ ansible_pkg_mgr }} name={{ item }} state=present"
- with_items: packages | union(packages_noarch)
+ with_items: "{{ packages | union(packages_noarch) }}"
- name: enable auto start
file:
- name: generate nova control service list
lineinfile: dest=/opt/service create=yes line='{{ item }}'
- with_items: services | union(services_noarch)
+ with_items: "{{ services | union(services_noarch) }}"
- name: update nova conf
template: src=templates/nova.conf
- { regexp: "^\\s*config.storageManager.ip", line: "config.storageManager.ip = '{{ contrail_keystone_address }}';" }
- { regexp: "^\\s*config.cnfg.server_ip", line: "config.cnfg.server_ip = '{{ contrail_haproxy_address }}';" }
- { regexp: "^\\s*config.analytics.server_ip", line: "config.analytics.server_ip = '{{ contrail_haproxy_address }}';" }
- - { regexp: "^\\s*config.cassandra.server_ips", line: "config.cassandra.server_ips = [{{ cassandra_addrs }}];" }
+# TODO: when I update ansibel version to 2.2, this playbook can't pass the test. ERROR log: "'cassandra_addrs' is undefined".
+# - { regexp: "^\\s*config.cassandra.server_ips", line: "config.cassandra.server_ips = [{{ cassandra_addrs }}];" }
- name: "modify webui userauth js"
lineinfile:
- name: restart controller relation service
service: name={{ item }} state=restarted enabled=yes
ignore_errors: True
- with_items: controller_services
+ with_items: "{{ controller_services }}"
- name: restart compute relation service
service: name={{ item }} state=restarted enabled=yes
ignore_errors: True
- with_items: compute_services
+ with_items: "{{ compute_services }}"
tags: secgroup
- name: copy configs
- template: src={{ item.src}} dest=/opt/os_templates
- with_items: "{{ configs_templates }}"
+ template: src={{ item }} dest=/opt/os_templates
+ with_items:
+ - nova.j2
+ - neutron.j2
tags: secgroup
- name: update controller configs
- shell: '[ -f {{ item.1 }} ] && crudini --merge {{ item.1 }} < /opt/os_templates/{{ item.0.src }} || /bin/true'
+ shell: "[ -f '{{ item.1 }}' ] && crudini --merge '{{ item.1 }}' < /opt/os_templates/{{ item.0.src }} || /bin/true"
tags: secgroup
with_subelements:
- - configs_templates
+ - "{{ configs_templates }}"
- dest
notify: restart controller relation service
when: inventory_hostname in "{{ groups['controller'] }}"
- name: update compute configs
- shell: '[ -f {{ item.1 }} ] && crudini --merge {{ item.1 }} < /opt/os_templates/{{ item.0.src }} || /bin/true'
+ shell: "[ -f '{{ item.1 }}' ] && crudini --merge '{{ item.1 }}' < /opt/os_templates/{{ item.0.src }} || /bin/true"
tags: secgroup
with_subelements:
- - configs_templates
+ - "{{ configs_templates }}"
- dest
notify: restart compute relation service
when: inventory_hostname in "{{ groups['compute'] }}"
+
shell: >
python /opt/setup_networks/check_network.py \
"{{ inventory_hostname }}" \
- "{{ ip_settings }}"
+ "{{ ip_settings | to_json }}"
tags:
- network_check
retries: 3
delay: 2
- name: add to boot scripts
- service: name=net_init enabled=yes
+ shell: update-rc.d net_init defaults
- meta: flush_handlers
-bond_mappings: {{ network_cfg["bond_mappings"] }}
-ip_settings: {{ ip_settings[inventory_hostname] }}
-sys_intf_mappings: {{ sys_intf_mappings }}
-provider_net_mappings: {{ network_cfg["provider_net_mappings"] }}
+bond_mappings: {{ network_cfg["bond_mappings"] | to_json }}
+ip_settings: {{ ip_settings[inventory_hostname] | to_json }}
+sys_intf_mappings: {{ sys_intf_mappings | to_json }}
+provider_net_mappings: {{ network_cfg["provider_net_mappings"] | to_json }}
register: part_size
- name: create image file if not exitst
- script: create_img.sh \"{{ part_size.stdout }}\"
+ script: create_img.sh "{{ part_size.stdout }}"
- name: do a losetup on storage volumes
script: losetup.sh
- name: load loop.yml
include: loop.yml
- when: status.stat.exists == False or status.stat.isblk == False
+ when: status.stat.exists == False or status.stat.isblk == False
tags:
- storage
- name: enable service
- service: name=storage enabled=yes
+ shell: update-rc.d storage defaults
tags:
- storage
[defaults]
log_path = /var/ansible/run/openstack_newton-$cluster_name/ansible.log
host_key_checking = False
+callback_whitelist = playbook_done, status_callback
callback_plugins = /opt/compass/bin/ansible_callbacks
-pipelining=True
library = /opt/openstack-ansible-modules
+forks=100
+
+[ssh_connection]
+pipelining=True
export AYNC_TIMEOUT=20
ssh $ssh_args root@${MGMT_IP} mkdir -p /opt/compass/bin/ansible_callbacks
scp $ssh_args -r ${COMPASS_DIR}/deploy/status_callback.py root@${MGMT_IP}:/opt/compass/bin/ansible_callbacks/status_callback.py
+ scp $ssh_args -r ${COMPASS_DIR}/deploy/playbook_done.py root@${MGMT_IP}:/opt/compass/bin/ansible_callbacks/playbook_done.py
# avoid nodes reboot to fast, cobbler can not give response
(sleep $AYNC_TIMEOUT; rename_nics; reboot_hosts) &
--- /dev/null
+#!/usr/bin/env python
+#
+# Copyright 2014 Huawei Technologies Co. Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Ansible playbook callback after a playbook run has completed."""
+import os
+import sys
+
+from ansible.plugins.callback import CallbackBase
+
+current_dir = os.path.dirname(os.path.realpath(__file__))
+sys.path.append(current_dir + '/..')
+
+
+import switch_virtualenv # noqa
+from compass.apiclient.restful import Client # noqa: E402
+from compass.utils import flags # noqa: E402
+
+
+flags.add('compass_server',
+ help='compass server url',
+ default='http://127.0.0.1/api')
+flags.add('compass_user_email',
+ help='compass user email',
+ default='admin@huawei.com')
+flags.add('compass_user_password',
+ help='compass user password',
+ default='admin')
+
+
+class CallbackModule(CallbackBase):
+ CALLBACK_VERSION = 2.0
+ CALLBACK_TYPE = 'notification'
+ CALLBACK_NAME = 'playbook_done'
+ CALLBACK_NEEDS_WHITELIST = True
+
+ def __init__(self):
+ super(CallbackModule, self).__init__()
+
+ self.play = None
+ self.loader = None
+ self.disabled = False
+ try:
+ self.client = self._get_client()
+ except Exception:
+ self.disabled = True
+ self._display.error("No compass server found"
+ "disabling this plugin")
+
+ def _get_client(self):
+ return Client(flags.OPTIONS.compass_server)
+
+ def _login(self, client):
+ """get apiclient token."""
+ status, resp = client.get_token(
+ flags.OPTIONS.compass_user_email,
+ flags.OPTIONS.compass_user_password
+ )
+ self._display.warning(
+ 'login status: %s, resp: %s' %
+ (status, resp)
+ )
+ if status >= 400:
+ raise Exception(
+ 'failed to login %s with user %s',
+ flags.OPTIONS.compass_server,
+ flags.OPTIONS.compass_user_email
+ )
+ return resp['token']
+
+ def v2_playbook_on_play_start(self, play):
+ self.play = play
+ self.loader = self.play.get_loader()
+ return
+
+ def v2_playbook_on_stats(self, stats):
+ all_vars = self.play.get_variable_manager().get_vars(self.loader)
+ host_vars = all_vars["hostvars"]
+ hosts = sorted(stats.processed.keys())
+ cluster_name = host_vars[hosts[0]]['cluster_name']
+ self._display.warning("cluster_name %s" % cluster_name)
+
+ failures = False
+ unreachable = False
+
+ for host in hosts:
+ summary = stats.summarize(host)
+
+ if summary['failures'] > 0:
+ failures = True
+ if summary['unreachable'] > 0:
+ unreachable = True
+
+ if failures or unreachable:
+ return
+
+ self._login(self.client)
+
+ for host in hosts:
+ clusterhost_name = host + "." + cluster_name
+ self.client.clusterhost_ready(clusterhost_name)
import httplib
import json
-import sys
-import logging
+import sys # noqa:F401
+from ansible.plugins.callback import CallbackBase
-def task_error(host, data):
- logging.info("task_error: host=%s,data=%s" % (host, data))
+
+def task_error(display, host, data):
+ display.display("task_error: host=%s,data=%s" % (host, data))
# if isinstance(data, dict):
# invocation = data.pop('invocation', {})
- notify_host("localhost", host, "failed")
+ notify_host(display, "localhost", host, "failed")
-class CallbackModule(object):
+class CallbackModule(CallbackBase):
"""
logs playbook results, per host, in /var/log/ansible/hosts
"""
+ CALLBACK_VERSION = 2.0
+ CALLBACK_TYPE = 'notification'
+ CALLBACK_NAME = 'status_callback'
+ CALLBACK_NEEDS_WHITELIST = True
+
+ def __init__(self):
+ super(CallbackModule, self).__init__()
- def on_any(self, *args, **kwargs):
+ def v2_on_any(self, *args, **kwargs):
pass
- def runner_on_failed(self, host, res, ignore_errors=False):
- task_error(host, res)
+ def v2_runner_on_failed(self, host, res, ignore_errors=False):
+ task_error(self._display, host, res)
- def runner_on_ok(self, host, res):
+ def v2_runner_on_ok(self, host, res):
pass
- def runner_on_skipped(self, host, item=None):
+ def v2_runner_on_skipped(self, host, item=None):
pass
- def runner_on_unreachable(self, host, res):
+ def v2_runner_on_unreachable(self, host, res):
pass
- def runner_on_no_hosts(self):
+ def v2_runner_on_no_hosts(self):
pass
- def runner_on_async_poll(self, host, res, jid, clock):
+ def v2_runner_on_async_poll(self, host, res, jid, clock):
pass
- def runner_on_async_ok(self, host, res, jid):
+ def v2_runner_on_async_ok(self, host, res, jid):
pass
- def runner_on_async_failed(self, host, res, jid):
- task_error(host, res)
+ def v2_runner_on_async_failed(self, host, res, jid):
+ task_error(self._display, host, res)
- def playbook_on_start(self):
+ def v2_playbook_on_start(self):
pass
- def playbook_on_notify(self, host, handler):
+ def v2_playbook_on_notify(self, host, handler):
pass
- def playbook_on_no_hosts_matched(self):
+ def v2_playbook_on_no_hosts_matched(self):
pass
- def playbook_on_no_hosts_remaining(self):
+ def v2_playbook_on_no_hosts_remaining(self):
pass
- def playbook_on_task_start(self, name, is_conditional):
+ def v2_playbook_on_task_start(self, name, is_conditional):
pass
- def playbook_on_vars_prompt(self, varname, private=True, prompt=None,
+ def v2_playbook_on_vars_prompt(self, varname, private=True, prompt=None,
encrypt=None, confirm=False, salt_size=None, salt=None, default=None): # noqa
pass
- def playbook_on_setup(self):
+ def v2_playbook_on_setup(self):
pass
- def playbook_on_import_for_host(self, host, imported_file):
+ def v2_playbook_on_import_for_host(self, host, imported_file):
pass
- def playbook_on_not_import_for_host(self, host, missing_file):
+ def v2_playbook_on_not_import_for_host(self, host, missing_file):
pass
- def playbook_on_play_start(self, name):
- pass
+ def v2_playbook_on_play_start(self, play):
+ self.play = play
+ self.loader = self.play.get_loader()
+ return
- def playbook_on_stats(self, stats):
- logging.info("playbook_on_stats enter")
+ def v2_playbook_on_stats(self, stats):
+ self._display.display("playbook_on_stats enter")
+ all_vars = self.play.get_variable_manager().get_vars(self.loader)
+ host_vars = all_vars["hostvars"]
hosts = sorted(stats.processed.keys())
- host_vars = self.playbook.inventory.get_variables(hosts[0])
- cluster_name = host_vars['cluster_name']
+ cluster_name = host_vars[hosts[0]]['cluster_name']
failures = False
unreachable = False
if failures or unreachable:
for host in hosts:
- notify_host("localhost", host, "error")
+ notify_host(self._display, "localhost", host, "error")
return
for host in hosts:
clusterhost_name = host + "." + cluster_name
- notify_host("localhost", clusterhost_name, "succ")
+ notify_host(self._display, "localhost", clusterhost_name, "succ")
def raise_for_status(resp):
return json.loads(resp.read())["token"]
-def notify_host(compass_host, host, status):
+def notify_host(display, compass_host, host, status):
if status == "succ":
body = {"ready": True}
url = "/api/clusterhosts/%s/state_internal" % host
host = host.strip("host")
url = "/api/clusterhosts/%s/state" % host
else:
- logging.error("notify_host: host %s with status %s is not supported"
+ display.error("notify_host: host %s with status %s is not supported"
% (host, status))
return
conn = httplib.HTTPConnection(compass_host, 80)
token = auth(conn)
headers["X-Auth-Token"] = token
- logging.info("host=%s,url=%s,body=%s,headers=%s" %
- (compass_host, url, json.dumps(body), headers))
+ display.display("host=%s,url=%s,body=%s,headers=%s" %
+ (compass_host, url, json.dumps(body), headers))
conn.request("POST", url, json.dumps(body), headers)
resp = conn.getresponse()
try:
raise_for_status(resp)
- logging.info(
+ display.display(
"notify host status success!!! status=%s, body=%s" %
(resp.status, resp.read()))
except Exception as e:
- logging.error("http request failed %s" % str(e))
+ display.error("http request failed %s" % str(e))
raise
finally:
conn.close()
-
-if __name__ == "__main__":
- if len(sys.argv) != 3:
- logging.error("params: host, status is need")
- sys.exit(1)
-
- host = sys.argv[1]
- status = sys.argv[2]
- notify_host(host, status)
export PIP_PACKAGE="https://pypi.python.org/packages/0d/af/8ccfb73834a6ddf9d57ecac61466557b7ca0722620bbb16d2d069ce312db/networking-odl-2.0.0.tar.gz \
https://pypi.python.org/packages/90/4f/74b730294de1db393e3e82211b5d2115f9a763849abca7d014348a550d2a/oslosphinx-4.5.0.tar.gz \
+ https://pypi.python.org/packages/48/da/5e51cf931e4c7849ba698654877e2951ade8f842f28f0c904453a1d317d7/ansible-2.2.0.0.tar.gz \
https://pypi.python.org/packages/74/f0/386f7f73aa6628c1bef53874c5d453b556356d77732add69000aa53b353b/policy2tosca-1.0.tar.gz "