Merge "speed up the installation of compass"
authorTim Rozet <trozet@redhat.com>
Tue, 28 Jul 2015 15:39:49 +0000 (15:39 +0000)
committerGerrit Code Review <gerrit@172.30.200.206>
Tue, 28 Jul 2015 15:39:49 +0000 (15:39 +0000)
130 files changed:
compass/deploy/ansible/openstack_juno/roles/keystone/handlers/main.yml [deleted file]
compass/deploy/ansible/openstack_juno/roles/repo/tasks/main.yml [deleted file]
compass/deploy/ansible/roles/cinder-controller/handlers/main.yml [moved from compass/deploy/ansible/openstack_juno/roles/cinder-controller/handlers/main.yml with 100% similarity]
compass/deploy/ansible/roles/cinder-controller/tasks/cinder_config.yml [moved from compass/deploy/ansible/openstack_juno/roles/cinder-controller/tasks/cinder_config.yml with 100% similarity]
compass/deploy/ansible/roles/cinder-controller/tasks/cinder_install.yml [moved from compass/deploy/ansible/openstack_juno/roles/cinder-controller/tasks/cinder_install.yml with 100% similarity]
compass/deploy/ansible/roles/cinder-controller/tasks/main.yml [moved from compass/deploy/ansible/openstack_juno/roles/cinder-controller/tasks/main.yml with 100% similarity]
compass/deploy/ansible/roles/cinder-controller/templates/api-paste.ini [moved from compass/deploy/ansible/openstack_juno/roles/cinder-controller/templates/api-paste.ini with 100% similarity]
compass/deploy/ansible/roles/cinder-controller/templates/cinder.conf [moved from compass/deploy/ansible/openstack_juno/roles/cinder-controller/templates/cinder.conf with 100% similarity]
compass/deploy/ansible/roles/cinder-controller/templates/cinder_init.sh [moved from compass/deploy/ansible/openstack_juno/roles/cinder-controller/templates/cinder_init.sh with 100% similarity]
compass/deploy/ansible/roles/cinder-volume/files/loop.yml [moved from compass/deploy/ansible/openstack_juno/roles/cinder-volume/files/loop.yml with 100% similarity]
compass/deploy/ansible/roles/cinder-volume/handlers/main.yml [moved from compass/deploy/ansible/openstack_juno/roles/cinder-volume/handlers/main.yml with 98% similarity]
compass/deploy/ansible/roles/cinder-volume/tasks/main.yml [moved from compass/deploy/ansible/openstack_juno/roles/cinder-volume/tasks/main.yml with 100% similarity]
compass/deploy/ansible/roles/cinder-volume/templates/cinder.conf [moved from compass/deploy/ansible/openstack_juno/roles/cinder-volume/templates/cinder.conf with 100% similarity]
compass/deploy/ansible/roles/common/files/sources.list.d/cloudarchive-juno.list [moved from compass/deploy/ansible/openstack_juno/roles/common/files/sources.list.d/cloudarchive-juno.list with 100% similarity]
compass/deploy/ansible/roles/common/tasks/main.yml [moved from compass/deploy/ansible/openstack_juno/roles/common/tasks/main.yml with 100% similarity]
compass/deploy/ansible/roles/common/templates/hosts [moved from compass/deploy/ansible/openstack_juno/roles/common/templates/hosts with 100% similarity]
compass/deploy/ansible/roles/common/templates/ntp.conf [moved from compass/deploy/ansible/openstack_juno/roles/common/templates/ntp.conf with 100% similarity]
compass/deploy/ansible/roles/dashboard/tasks/main.yml [moved from compass/deploy/ansible/openstack_juno/roles/dashboard/tasks/main.yml with 100% similarity]
compass/deploy/ansible/roles/dashboard/templates/local_settings.py [moved from compass/deploy/ansible/openstack_juno/roles/dashboard/templates/local_settings.py with 100% similarity]
compass/deploy/ansible/roles/dashboard/templates/openstack-dashboard.conf [moved from compass/deploy/ansible/openstack_juno/roles/dashboard/templates/openstack-dashboard.conf with 100% similarity]
compass/deploy/ansible/roles/database/files/my.cnf [moved from compass/deploy/ansible/openstack_juno/roles/database/files/my.cnf with 100% similarity]
compass/deploy/ansible/roles/database/tasks/main.yml [moved from compass/deploy/ansible/openstack_juno/roles/database/tasks/main.yml with 100% similarity]
compass/deploy/ansible/roles/database/tasks/mariadb.yml [moved from compass/deploy/ansible/openstack_juno/roles/database/tasks/mariadb.yml with 72% similarity]
compass/deploy/ansible/roles/database/tasks/mysql.yml [moved from compass/deploy/ansible/openstack_juno/roles/database/tasks/mysql.yml with 100% similarity]
compass/deploy/ansible/roles/database/templates/data.j2 [moved from compass/deploy/ansible/openstack_juno/roles/database/templates/data.j2 with 100% similarity]
compass/deploy/ansible/roles/database/templates/my.cnf [moved from compass/deploy/ansible/openstack_juno/roles/database/templates/my.cnf with 100% similarity]
compass/deploy/ansible/roles/database/templates/wsrep.cnf [moved from compass/deploy/ansible/openstack_juno/roles/database/templates/wsrep.cnf with 100% similarity]
compass/deploy/ansible/roles/glance/handlers/main.yml [moved from compass/deploy/ansible/openstack_juno/roles/glance/handlers/main.yml with 100% similarity]
compass/deploy/ansible/roles/glance/tasks/glance_config.yml [moved from compass/deploy/ansible/openstack_juno/roles/glance/tasks/glance_config.yml with 100% similarity]
compass/deploy/ansible/roles/glance/tasks/glance_install.yml [moved from compass/deploy/ansible/openstack_juno/roles/glance/tasks/glance_install.yml with 100% similarity]
compass/deploy/ansible/roles/glance/tasks/main.yml [moved from compass/deploy/ansible/openstack_juno/roles/glance/tasks/main.yml with 100% similarity]
compass/deploy/ansible/roles/glance/tasks/nfs.yml [moved from compass/deploy/ansible/openstack_juno/roles/glance/tasks/nfs.yml with 100% similarity]
compass/deploy/ansible/roles/glance/templates/glance-api.conf [moved from compass/deploy/ansible/openstack_juno/roles/glance/templates/glance-api.conf with 100% similarity]
compass/deploy/ansible/roles/glance/templates/glance-registry.conf [moved from compass/deploy/ansible/openstack_juno/roles/glance/templates/glance-registry.conf with 100% similarity]
compass/deploy/ansible/roles/glance/templates/image_upload.sh [moved from compass/deploy/ansible/openstack_juno/roles/glance/templates/image_upload.sh with 100% similarity]
compass/deploy/ansible/roles/ha/files/galera_chk [moved from compass/deploy/ansible/openstack_juno/roles/ha/files/galera_chk with 100% similarity]
compass/deploy/ansible/roles/ha/files/mysqlchk [moved from compass/deploy/ansible/openstack_juno/roles/ha/files/mysqlchk with 100% similarity]
compass/deploy/ansible/roles/ha/files/notify.sh [moved from compass/deploy/ansible/openstack_juno/roles/ha/files/notify.sh with 100% similarity]
compass/deploy/ansible/roles/ha/handlers/main.yml [moved from compass/deploy/ansible/openstack_juno/roles/ha/handlers/main.yml with 100% similarity]
compass/deploy/ansible/roles/ha/tasks/main.yml [moved from compass/deploy/ansible/openstack_juno/roles/ha/tasks/main.yml with 100% similarity]
compass/deploy/ansible/roles/ha/templates/failover.j2 [moved from compass/deploy/ansible/openstack_juno/roles/ha/templates/failover.j2 with 100% similarity]
compass/deploy/ansible/roles/ha/templates/haproxy.cfg [moved from compass/deploy/ansible/openstack_juno/roles/ha/templates/haproxy.cfg with 100% similarity]
compass/deploy/ansible/roles/ha/templates/keepalived.conf [moved from compass/deploy/ansible/openstack_juno/roles/ha/templates/keepalived.conf with 100% similarity]
compass/deploy/ansible/roles/keystone/tasks/keystone_config.yml [moved from compass/deploy/ansible/openstack_juno/roles/keystone/tasks/keystone_config.yml with 100% similarity]
compass/deploy/ansible/roles/keystone/tasks/keystone_install.yml [moved from compass/deploy/ansible/openstack_juno/roles/keystone/tasks/keystone_install.yml with 91% similarity]
compass/deploy/ansible/roles/keystone/tasks/main.yml [moved from compass/deploy/ansible/openstack_juno/roles/keystone/tasks/main.yml with 100% similarity]
compass/deploy/ansible/roles/keystone/templates/admin-openrc.sh [moved from compass/deploy/ansible/openstack_juno/roles/keystone/templates/admin-openrc.sh with 100% similarity]
compass/deploy/ansible/roles/keystone/templates/demo-openrc.sh [moved from compass/deploy/ansible/openstack_juno/roles/keystone/templates/demo-openrc.sh with 100% similarity]
compass/deploy/ansible/roles/keystone/templates/keystone.conf [moved from compass/deploy/ansible/openstack_juno/roles/keystone/templates/keystone.conf with 99% similarity]
compass/deploy/ansible/roles/keystone/templates/keystone_init [moved from compass/deploy/ansible/openstack_juno/roles/keystone/templates/keystone_init with 100% similarity]
compass/deploy/ansible/roles/monitor/files/check_service.sh [moved from compass/deploy/ansible/openstack_juno/roles/monitor/files/check_service.sh with 100% similarity]
compass/deploy/ansible/roles/monitor/files/root [moved from compass/deploy/ansible/openstack_juno/roles/monitor/files/root with 100% similarity]
compass/deploy/ansible/roles/monitor/tasks/main.yml [moved from compass/deploy/ansible/openstack_juno/roles/monitor/tasks/main.yml with 100% similarity]
compass/deploy/ansible/roles/mq/tasks/main.yml [moved from compass/deploy/ansible/openstack_juno/roles/mq/tasks/main.yml with 100% similarity]
compass/deploy/ansible/roles/mq/tasks/rabbitmq.yml [moved from compass/deploy/ansible/openstack_juno/roles/mq/tasks/rabbitmq.yml with 100% similarity]
compass/deploy/ansible/roles/mq/tasks/rabbitmq_cluster.yml [moved from compass/deploy/ansible/openstack_juno/roles/mq/tasks/rabbitmq_cluster.yml with 100% similarity]
compass/deploy/ansible/roles/mq/templates/.erlang.cookie [moved from compass/deploy/ansible/openstack_juno/roles/mq/templates/.erlang.cookie with 100% similarity]
compass/deploy/ansible/roles/mq/templates/rabbitmq-env.conf [moved from compass/deploy/ansible/openstack_juno/roles/mq/templates/rabbitmq-env.conf with 100% similarity]
compass/deploy/ansible/roles/neutron-common/handlers/main.yml [moved from compass/deploy/ansible/openstack_juno/roles/neutron-common/handlers/main.yml with 100% similarity]
compass/deploy/ansible/roles/neutron-compute/defaults/main.yml [moved from compass/deploy/ansible/openstack_juno/roles/neutron-compute/defaults/main.yml with 100% similarity]
compass/deploy/ansible/roles/neutron-compute/handlers/main.yml [moved from compass/deploy/ansible/openstack_juno/roles/neutron-compute/handlers/main.yml with 100% similarity]
compass/deploy/ansible/roles/neutron-compute/tasks/main.yml [moved from compass/deploy/ansible/openstack_juno/roles/neutron-compute/tasks/main.yml with 100% similarity]
compass/deploy/ansible/roles/neutron-compute/templates/dhcp_agent.ini [moved from compass/deploy/ansible/openstack_juno/roles/neutron-compute/templates/dhcp_agent.ini with 100% similarity]
compass/deploy/ansible/roles/neutron-compute/templates/dnsmasq-neutron.conf [moved from compass/deploy/ansible/openstack_juno/roles/neutron-compute/templates/dnsmasq-neutron.conf with 100% similarity]
compass/deploy/ansible/roles/neutron-compute/templates/etc/xorp/config.boot [moved from compass/deploy/ansible/openstack_juno/roles/neutron-compute/templates/etc/xorp/config.boot with 100% similarity]
compass/deploy/ansible/roles/neutron-compute/templates/l3_agent.ini [moved from compass/deploy/ansible/openstack_juno/roles/neutron-compute/templates/l3_agent.ini with 100% similarity]
compass/deploy/ansible/roles/neutron-compute/templates/metadata_agent.ini [moved from compass/deploy/ansible/openstack_juno/roles/neutron-compute/templates/metadata_agent.ini with 100% similarity]
compass/deploy/ansible/roles/neutron-compute/templates/ml2_conf.ini [moved from compass/deploy/ansible/openstack_juno/roles/neutron-compute/templates/ml2_conf.ini with 100% similarity]
compass/deploy/ansible/roles/neutron-compute/templates/neutron-network.conf [moved from compass/deploy/ansible/openstack_juno/roles/neutron-compute/templates/neutron-network.conf with 100% similarity]
compass/deploy/ansible/roles/neutron-compute/templates/neutron.conf [moved from compass/deploy/ansible/openstack_juno/roles/neutron-compute/templates/neutron.conf with 100% similarity]
compass/deploy/ansible/roles/neutron-compute/templates/neutron_init.sh [moved from compass/deploy/ansible/openstack_juno/roles/neutron-compute/templates/neutron_init.sh with 100% similarity]
compass/deploy/ansible/roles/neutron-compute/templates/nova.conf [moved from compass/deploy/ansible/openstack_juno/roles/neutron-compute/templates/nova.conf with 100% similarity]
compass/deploy/ansible/roles/neutron-controller/handlers/main.yml [moved from compass/deploy/ansible/openstack_juno/roles/neutron-controller/handlers/main.yml with 100% similarity]
compass/deploy/ansible/roles/neutron-controller/tasks/main.yml [moved from compass/deploy/ansible/openstack_juno/roles/neutron-controller/tasks/main.yml with 100% similarity]
compass/deploy/ansible/roles/neutron-controller/tasks/neutron_config.yml [moved from compass/deploy/ansible/openstack_juno/roles/neutron-controller/tasks/neutron_config.yml with 100% similarity]
compass/deploy/ansible/roles/neutron-controller/tasks/neutron_install.yml [moved from compass/deploy/ansible/openstack_juno/roles/neutron-controller/tasks/neutron_install.yml with 100% similarity]
compass/deploy/ansible/roles/neutron-controller/templates/dhcp_agent.ini [moved from compass/deploy/ansible/openstack_juno/roles/neutron-controller/templates/dhcp_agent.ini with 100% similarity]
compass/deploy/ansible/roles/neutron-controller/templates/dnsmasq-neutron.conf [moved from compass/deploy/ansible/openstack_juno/roles/neutron-controller/templates/dnsmasq-neutron.conf with 100% similarity]
compass/deploy/ansible/roles/neutron-controller/templates/etc/xorp/config.boot [moved from compass/deploy/ansible/openstack_juno/roles/neutron-controller/templates/etc/xorp/config.boot with 100% similarity]
compass/deploy/ansible/roles/neutron-controller/templates/l3_agent.ini [moved from compass/deploy/ansible/openstack_juno/roles/neutron-controller/templates/l3_agent.ini with 100% similarity]
compass/deploy/ansible/roles/neutron-controller/templates/metadata_agent.ini [moved from compass/deploy/ansible/openstack_juno/roles/neutron-controller/templates/metadata_agent.ini with 100% similarity]
compass/deploy/ansible/roles/neutron-controller/templates/ml2_conf.ini [moved from compass/deploy/ansible/openstack_juno/roles/neutron-controller/templates/ml2_conf.ini with 100% similarity]
compass/deploy/ansible/roles/neutron-controller/templates/neutron-network.conf [moved from compass/deploy/ansible/openstack_juno/roles/neutron-controller/templates/neutron-network.conf with 100% similarity]
compass/deploy/ansible/roles/neutron-controller/templates/neutron.conf [moved from compass/deploy/ansible/openstack_juno/roles/neutron-controller/templates/neutron.conf with 100% similarity]
compass/deploy/ansible/roles/neutron-controller/templates/neutron_init.sh [moved from compass/deploy/ansible/openstack_juno/roles/neutron-controller/templates/neutron_init.sh with 100% similarity]
compass/deploy/ansible/roles/neutron-controller/templates/nova.conf [moved from compass/deploy/ansible/openstack_juno/roles/neutron-controller/templates/nova.conf with 100% similarity]
compass/deploy/ansible/roles/neutron-network/handlers/main.yml [moved from compass/deploy/ansible/openstack_juno/roles/neutron-network/handlers/main.yml with 100% similarity]
compass/deploy/ansible/roles/neutron-network/tasks/igmp-router.yml [moved from compass/deploy/ansible/openstack_juno/roles/neutron-network/tasks/igmp-router.yml with 100% similarity]
compass/deploy/ansible/roles/neutron-network/tasks/main.yml [moved from compass/deploy/ansible/openstack_juno/roles/neutron-network/tasks/main.yml with 100% similarity]
compass/deploy/ansible/roles/neutron-network/tasks/odl.yml [moved from compass/deploy/ansible/openstack_juno/roles/neutron-network/tasks/odl.yml with 100% similarity]
compass/deploy/ansible/roles/neutron-network/templates/dhcp_agent.ini [moved from compass/deploy/ansible/openstack_juno/roles/neutron-network/templates/dhcp_agent.ini with 100% similarity]
compass/deploy/ansible/roles/neutron-network/templates/dnsmasq-neutron.conf [moved from compass/deploy/ansible/openstack_juno/roles/neutron-network/templates/dnsmasq-neutron.conf with 100% similarity]
compass/deploy/ansible/roles/neutron-network/templates/etc/xorp/config.boot [moved from compass/deploy/ansible/openstack_juno/roles/neutron-network/templates/etc/xorp/config.boot with 100% similarity]
compass/deploy/ansible/roles/neutron-network/templates/l3_agent.ini [moved from compass/deploy/ansible/openstack_juno/roles/neutron-network/templates/l3_agent.ini with 100% similarity]
compass/deploy/ansible/roles/neutron-network/templates/metadata_agent.ini [moved from compass/deploy/ansible/openstack_juno/roles/neutron-network/templates/metadata_agent.ini with 100% similarity]
compass/deploy/ansible/roles/neutron-network/templates/ml2_conf.ini [moved from compass/deploy/ansible/openstack_juno/roles/neutron-network/templates/ml2_conf.ini with 100% similarity]
compass/deploy/ansible/roles/neutron-network/templates/neutron-network.conf [moved from compass/deploy/ansible/openstack_juno/roles/neutron-network/templates/neutron-network.conf with 100% similarity]
compass/deploy/ansible/roles/neutron-network/templates/neutron.conf [moved from compass/deploy/ansible/openstack_juno/roles/neutron-network/templates/neutron.conf with 100% similarity]
compass/deploy/ansible/roles/neutron-network/templates/neutron_init.sh [moved from compass/deploy/ansible/openstack_juno/roles/neutron-network/templates/neutron_init.sh with 100% similarity]
compass/deploy/ansible/roles/neutron-network/templates/nova.conf [moved from compass/deploy/ansible/openstack_juno/roles/neutron-network/templates/nova.conf with 100% similarity]
compass/deploy/ansible/roles/nova-compute/handlers/main.yml [moved from compass/deploy/ansible/openstack_juno/roles/nova-compute/handlers/main.yml with 100% similarity]
compass/deploy/ansible/roles/nova-compute/tasks/main.yml [moved from compass/deploy/ansible/openstack_juno/roles/nova-compute/tasks/main.yml with 100% similarity]
compass/deploy/ansible/roles/nova-compute/templates/nova-compute.conf [moved from compass/deploy/ansible/openstack_juno/roles/nova-compute/templates/nova-compute.conf with 100% similarity]
compass/deploy/ansible/roles/nova-compute/templates/nova.conf [moved from compass/deploy/ansible/openstack_juno/roles/nova-compute/templates/nova.conf with 100% similarity]
compass/deploy/ansible/roles/nova-controller/handlers/main.yml [moved from compass/deploy/ansible/openstack_juno/roles/nova-controller/handlers/main.yml with 100% similarity]
compass/deploy/ansible/roles/nova-controller/tasks/main.yml [moved from compass/deploy/ansible/openstack_juno/roles/nova-controller/tasks/main.yml with 100% similarity]
compass/deploy/ansible/roles/nova-controller/tasks/nova_config.yml [moved from compass/deploy/ansible/openstack_juno/roles/nova-controller/tasks/nova_config.yml with 100% similarity]
compass/deploy/ansible/roles/nova-controller/tasks/nova_install.yml [moved from compass/deploy/ansible/openstack_juno/roles/nova-controller/tasks/nova_install.yml with 100% similarity]
compass/deploy/ansible/roles/nova-controller/templates/dhcp_agent.ini [moved from compass/deploy/ansible/openstack_juno/roles/nova-controller/templates/dhcp_agent.ini with 100% similarity]
compass/deploy/ansible/roles/nova-controller/templates/dnsmasq-neutron.conf [moved from compass/deploy/ansible/openstack_juno/roles/nova-controller/templates/dnsmasq-neutron.conf with 100% similarity]
compass/deploy/ansible/roles/nova-controller/templates/etc/xorp/config.boot [moved from compass/deploy/ansible/openstack_juno/roles/nova-controller/templates/etc/xorp/config.boot with 100% similarity]
compass/deploy/ansible/roles/nova-controller/templates/l3_agent.ini [moved from compass/deploy/ansible/openstack_juno/roles/nova-controller/templates/l3_agent.ini with 100% similarity]
compass/deploy/ansible/roles/nova-controller/templates/metadata_agent.ini [moved from compass/deploy/ansible/openstack_juno/roles/nova-controller/templates/metadata_agent.ini with 100% similarity]
compass/deploy/ansible/roles/nova-controller/templates/ml2_conf.ini [moved from compass/deploy/ansible/openstack_juno/roles/nova-controller/templates/ml2_conf.ini with 100% similarity]
compass/deploy/ansible/roles/nova-controller/templates/neutron-network.conf [moved from compass/deploy/ansible/openstack_juno/roles/nova-controller/templates/neutron-network.conf with 100% similarity]
compass/deploy/ansible/roles/nova-controller/templates/neutron.conf [moved from compass/deploy/ansible/openstack_juno/roles/nova-controller/templates/neutron.conf with 100% similarity]
compass/deploy/ansible/roles/nova-controller/templates/neutron_init.sh [moved from compass/deploy/ansible/openstack_juno/roles/nova-controller/templates/neutron_init.sh with 100% similarity]
compass/deploy/ansible/roles/nova-controller/templates/nova.conf [moved from compass/deploy/ansible/openstack_juno/roles/nova-controller/templates/nova.conf with 100% similarity]
compass/deploy/ansible/roles/repo/tasks/main.yml [new file with mode: 0644]
compass/deploy/ansible/roles/repo/templates/sources.list [moved from compass/deploy/ansible/openstack_juno/roles/repo/templates/sources.list with 100% similarity]
foreman/ci/Vagrantfile
foreman/ci/bootstrap.sh
foreman/ci/clean.sh
foreman/ci/deploy.sh
foreman/ci/inventory/lf_pod2_ksgen_settings.yml
foreman/ci/opnfv_ksgen_settings.yml
foreman/ci/opnfv_ksgen_settings_no_HA.yml [new file with mode: 0644]
foreman/ci/reload_playbook.yml
foreman/ci/vm_nodes_provision.sh
foreman/docs/src/installation-instructions.rst

diff --git a/compass/deploy/ansible/openstack_juno/roles/keystone/handlers/main.yml b/compass/deploy/ansible/openstack_juno/roles/keystone/handlers/main.yml
deleted file mode 100644 (file)
index 9c0084e..0000000
+++ /dev/null
@@ -1,3 +0,0 @@
----
-- name: restart keystone
-  service: name=keystone state=restarted enabled=yes
diff --git a/compass/deploy/ansible/openstack_juno/roles/repo/tasks/main.yml b/compass/deploy/ansible/openstack_juno/roles/repo/tasks/main.yml
deleted file mode 100644 (file)
index 21f4ef0..0000000
+++ /dev/null
@@ -1,14 +0,0 @@
----
-- name: copy local sources.list
-  template: src=sources.list dest=/etc/apt/sources.list backup=yes
-  when: LOCAL_REPO is defined
-
-- name: copy deb packages
-  shell: cp -rf /opt/repo/pool/main/ /var/cache/apt/archive/
-  ignore_errors: True
-
-- name: add juno cloudarchive
-  apt_repository: repo="{{ juno_cloud_archive }}" state=present
-
-- name: first update pkgs
-  apt: update_cache=yes
@@ -7,22 +7,8 @@
       #- mariadb-client-5.5
       - mysql-client-5.5
       - python-mysqldb
-
-- name: download mariadb and galera deb package
-  get_url: url={{ item.url }} dest=/opt/{{ item.filename }}
-  register: result
-  until: result|success
-  retries: 5
-  delay: 3
-  with_items:
-    - { url:  "{{ MARIADB_URL }}", filename: "{{ MARIADB }}" }
-    - { url:  "{{ GALERA_URL }}", filename: "{{ GALERA }}" }
-
-- name: install mariadb  and galera packages
-  command: dpkg -i /opt/{{ item }}
-  with_items:
-    - "{{ MARIADB }}"
-    - "{{ GALERA }}"
+      - mysql-server-wsrep
+      - galera
 
 - name: create mysql log directy
   file: path=/var/log/mysql state=directory owner=mysql group=mysql mode=0755
@@ -53,7 +39,6 @@
   with_items:
    - mysql
 
-
 - name: create database/user
   shell: /opt/data.sh
   when: HA_CLUSTER[inventory_hostname] == ''
@@ -9,7 +9,6 @@
 
 - name: update keystone conf
   template: src=keystone.conf dest=/etc/keystone/keystone.conf backup=yes
-  notify: restart keystone
 
 - name: delete sqlite database
   shell: rm /var/lib/keystone/keystone.db || echo sqllite database already removed
@@ -25,3 +24,6 @@
   with_items:
     - admin-openrc.sh
     - demo-openrc.sh
+
+- name: manually start keystone
+  service: name=keystone state=restarted enabled=yes
@@ -355,11 +355,11 @@ rabbit_password={{ RABBIT_PASS }}
 
 # Print debugging output (set logging level to DEBUG instead
 # of default WARNING level). (boolean value)
-#debug=false
+debug={{ DEBUG }}
 
 # Print more verbose output (set logging level to INFO instead
 # of default WARNING level). (boolean value)
-#verbose=false
+verbose={{ VERBOSE }}
 
 # Log output to standard error (boolean value)
 #use_stderr=true
diff --git a/compass/deploy/ansible/roles/repo/tasks/main.yml b/compass/deploy/ansible/roles/repo/tasks/main.yml
new file mode 100644 (file)
index 0000000..9476f80
--- /dev/null
@@ -0,0 +1,6 @@
+---
+- name: add juno cloudarchive
+  apt_repository: repo="{{ juno_cloud_archive }}" state=present
+
+- name: first update pkgs
+  apt: update_cache=yes
index 100e12d..a01da70 100644 (file)
@@ -41,6 +41,9 @@ Vagrant.configure(2) do |config|
   default_gw = ""
   nat_flag = false
 
+  # Disable dhcp flag
+  disable_dhcp_flag = false
+
   # Share an additional folder to the guest VM. The first argument is
   # the path on the host to the actual folder. The second argument is
   # the path on the guest to mount the folder. And the optional third
@@ -90,4 +93,8 @@ Vagrant.configure(2) do |config|
     config.vm.provision :shell, path: "nat_setup.sh"
   end
   config.vm.provision :shell, path: "bootstrap.sh"
+  if disable_dhcp_flag
+    config.vm.provision :shell, :inline => "systemctl stop dhcpd"
+    config.vm.provision :shell, :inline => "systemctl disable dhcpd"
+  end
 end
index 4bc22ed..c98f00e 100755 (executable)
@@ -25,8 +25,7 @@ green=`tput setaf 2`
 yum install -y epel-release-7*
 
 # Install other required packages
-# Major version is pinned to force some consistency for Arno
-if ! yum -y install python-pip-1* python-virtualenv-1* gcc-4* git-1* sshpass-1* ansible-1* python-requests-1*; then
+if ! yum -y install python-pip python-virtualenv gcc git sshpass ansible python-requests; then
   printf '%s\n' 'bootstrap.sh: failed to install required packages' >&2
   exit 1
 fi
@@ -36,7 +35,7 @@ cd /opt
 echo "Cloning khaleesi to /opt"
 
 if [ ! -d khaleesi ]; then
-  if ! git clone -b v1.0 https://github.com/trozet/khaleesi.git; then
+  if ! git clone -b opnfv https://github.com/trozet/khaleesi.git; then
     printf '%s\n' 'bootstrap.sh: Unable to git clone khaleesi' >&2
     exit 1
   fi
index f61ac93..1a16efd 100755 (executable)
@@ -5,7 +5,7 @@
 #
 #Uses Vagrant and VirtualBox
 #
-#Destroys Vagrant VM running in /tmp/bgs_vagrant
+#Destroys Vagrant VMs running in $vm_dir/
 #Shuts down all nodes found in Khaleesi settings
 #Removes hypervisor kernel modules (VirtualBox)
 
@@ -14,6 +14,8 @@ reset=`tput sgr0`
 blue=`tput setaf 4`
 red=`tput setaf 1`
 green=`tput setaf 2`
+
+vm_dir=/var/opt/opnfv
 ##END VARS
 
 ##FUNCTIONS
@@ -85,7 +87,7 @@ node_counter=0
 output=`grep bmc_pass $base_config | sed 's/\s*bmc_pass:\s*//'`
 for line in ${output} ; do
   bmc_pass[$node_counter]=$line
-  ((node_counter++)) 
+  ((node_counter++))
 done
 
 for mynode in `seq 0 $max_nodes`; do
@@ -106,35 +108,78 @@ else
   skip_vagrant=1
 fi
 
+###legacy VM location check
+###remove me later
+if [ -d /tmp/bgs_vagrant ]; then
+  cd /tmp/bgs_vagrant
+  vagrant destroy -f
+  rm -rf /tmp/bgs_vagrant
+fi
+
 ###destroy vagrant
 if [ $skip_vagrant -eq 0 ]; then
-  cd /tmp/bgs_vagrant
-  if vagrant destroy -f; then
-    echo "${blue}Successfully destroyed Foreman VM ${reset}"
+  if [ -d $vm_dir ]; then
+    ##all vm directories
+    for vm in $( ls $vm_dir ); do
+      cd $vm_dir/$vm
+      if vagrant destroy -f; then
+        echo "${blue}Successfully destroyed $vm Vagrant VM ${reset}"
+      else
+        echo "${red}Unable to destroy $vm Vagrant VM! Attempting to killall vagrant if process is hung ${reset}"
+        killall vagrant
+        echo "${blue}Checking if vagrant was already destroyed and no process is active...${reset}"
+        if ps axf | grep vagrant; then
+          echo "${red}Vagrant process still exists after kill...exiting ${reset}"
+          exit 1
+        else
+          echo "${blue}Vagrant process doesn't exist.  Moving on... ${reset}"
+        fi
+      fi
+
+      ##Vagrant boxes appear as VboxHeadless processes
+      ##try to gracefully destroy the VBox VM if it still exists
+      if vboxmanage list runningvms | grep $vm; then
+        echo "${red} $vm VBoxHeadless process still exists...Removing${reset}"
+        vbox_id=$(vboxmanage list runningvms | grep $vm | awk '{print $1}' | sed 's/"//g')
+        vboxmanage controlvm $vbox_id poweroff
+        if vboxmanage unregistervm --delete $vbox_id; then
+          echo "${blue}$vm VM is successfully deleted! ${reset}"
+        else
+          echo "${red} Unable to delete VM $vm ...Exiting ${reset}"
+          exit 1
+        fi
+      else
+        echo "${blue}$vm VM is successfully deleted! ${reset}"
+      fi
+    done
   else
-    echo "${red}Unable to destroy Foreman VM ${reset}"
-    echo "${blue}Checking if vagrant was already destroyed and no process is active...${reset}"
-    if ps axf | grep vagrant; then
-      echo "${red}Vagrant VM still exists...exiting ${reset}"
-      exit 1
-    else
-      echo "${blue}Vagrant process doesn't exist.  Moving on... ${reset}"
-    fi
+    echo "${blue}${vm_dir} doesn't exist, no VMs in OPNFV directory to destroy! ${reset}"
   fi
 
+  echo "${blue}Checking for any remaining virtual box processes...${reset}"
   ###kill virtualbox
-  echo "${blue}Killing VirtualBox ${reset}"
-  killall virtualbox
-  killall VBoxHeadless
+  if ps axf | grep virtualbox; then
+    echo "${blue}virtualbox processes are still running. Killing any remaining VirtualBox processes...${reset}"
+    killall virtualbox
+  fi
+
+  ###kill any leftover VMs (brute force)
+  if ps axf | grep VBoxHeadless; then
+    echo "${blue}VBoxHeadless processes are still running. Killing any remaining VBoxHeadless processes...${reset}"
+    killall VBoxHeadless
+  fi
 
   ###remove virtualbox
-  echo "${blue}Removing VirtualBox ${reset}"
+  echo "${blue}Removing VirtualBox... ${reset}"
   yum -y remove $vboxpkg
 
 else
-  echo "${blue}Skipping Vagrant destroy + Vbox Removal as VirtualBox package is already removed ${reset}"
+  echo "${blue}Skipping Vagrant destroy + VBox Removal as VirtualBox package is already removed ${reset}"
 fi
 
+###remove working vm directory
+echo "${blue}Removing working VM directory: $vm_dir ${reset}"
+rm -rf $vm_dir
 
 ###remove kernel modules
 echo "${blue}Removing kernel modules ${reset}"
index 46ba80e..a05b3de 100755 (executable)
@@ -25,6 +25,11 @@ red=`tput setaf 1`
 green=`tput setaf 2`
 
 declare -A interface_arr
+declare -A controllers_ip_arr
+declare -A admin_ip_arr
+declare -A public_ip_arr
+
+vm_dir=/var/opt/opnfv
 ##END VARS
 
 ##FUNCTIONS
@@ -35,6 +40,28 @@ display_usage() {
   echo -e "\n   -no_parse : No variable parsing into config. Flag. \n"
   echo -e "\n   -base_config : Full path of settings file to parse. Optional.  Will provide a new base settings file rather than the default.  Example:  -base_config /opt/myinventory.yml \n"
   echo -e "\n   -virtual : Node virtualization instead of baremetal. Flag. \n"
+  echo -e "\n   -enable_virtual_dhcp : Run dhcp server instead of using static IPs.  Use this with -virtual only. \n"
+  echo -e "\n   -static_ip_range : static IP range to define when using virtual and when dhcp is not being used (default), must at least a 20 IP block.  Format: '192.168.1.1,192.168.1.20' \n"
+  echo -e "\n   -ping_site : site to use to verify IP connectivity from the VM when -virtual is used.  Format: -ping_site www.blah.com \n"
+  echo -e "\n   -floating_ip_count : number of IP address from the public range to be used for floating IP. Default is 20.\n"
+}
+
+##verify vm dir exists
+##params: none
+function verify_vm_dir {
+  if [ -d "$vm_dir" ]; then
+    echo -e "\n\n${red}ERROR: VM Directory: $vm_dir already exists.  Environment not clean.  Please use clean.sh.  Exiting${reset}\n\n"
+    exit 1
+  else
+    mkdir -p $vm_dir
+  fi
+
+  chmod 700 $vm_dir
+
+  if [ ! -d $vm_dir ]; then
+    echo -e "\n\n${red}ERROR: Unable to create VM Directory: $vm_dir  Exiting${reset}\n\n"
+    exit -1
+  fi
 }
 
 ##find ip of interface
@@ -51,6 +78,41 @@ function find_subnet {
   printf "%d.%d.%d.%d\n" "$((i1 & m1))" "$((i2 & m2))" "$((i3 & m3))" "$((i4 & m4))"
 }
 
+##verify subnet has at least n IPs
+##params: subnet mask, n IPs
+function verify_subnet_size {
+  IFS=. read -r i1 i2 i3 i4 <<< "$1"
+  num_ips_required=$2
+
+  ##this function assumes you would never need more than 254
+  ##we check here to make sure
+  if [ "$num_ips_required" -ge 254 ]; then
+    echo -e "\n\n${red}ERROR: allocating more than 254 IPs is unsupported...Exiting${reset}\n\n"
+    return 1
+  fi
+
+  ##we just return if 3rd octet is not 255
+  ##because we know the subnet is big enough
+  if [ "$i3" -ne 255 ]; then
+    return 0
+  elif [ $((254-$i4)) -ge "$num_ips_required" ]; then
+    return 0
+  else
+    echo -e "\n\n${red}ERROR: Subnet is too small${reset}\n\n"
+    return 1
+  fi
+}
+
+##finds last usable ip (broadcast minus 1) of a subnet from an IP and netmask
+## Warning: This function only works for IPv4 at the moment.
+##params: ip, netmask
+function find_last_ip_subnet {
+  IFS=. read -r i1 i2 i3 i4 <<< "$1"
+  IFS=. read -r m1 m2 m3 m4 <<< "$2"
+  IFS=. read -r s1 s2 s3 s4 <<< "$((i1 & m1)).$((i2 & m2)).$((i3 & m3)).$((i4 & m4))"
+  printf "%d.%d.%d.%d\n" "$((255 - $m1 + $s1))" "$((255 - $m2 + $s2))" "$((255 - $m3 + $s3))" "$((255 - $m4 + $s4 - 1))"
+}
+
 ##increments subnet by a value
 ##params: ip, value
 ##assumes low value
@@ -87,6 +149,19 @@ function next_ip {
   echo $baseaddr.$lsv
 }
 
+##subtracts a value from an IP address
+##params: last ip, ip_count
+##assumes ip_count is less than the last octect of the address
+subtract_ip() {
+  IFS=. read -r i1 i2 i3 i4 <<< "$1"
+  ip_count=$2
+  if [ $i4 -lt $ip_count ]; then
+    echo -e "\n\n${red}ERROR: Can't subtract $ip_count from IP address $1  Exiting${reset}\n\n"
+    exit 1
+  fi
+  printf "%d.%d.%d.%d\n" "$i1" "$i2" "$i3" "$((i4 - $ip_count ))"
+}
+
 ##removes the network interface config from Vagrantfile
 ##params: interface
 ##assumes you are in the directory of Vagrantfile
@@ -178,12 +253,47 @@ parse_cmdline() {
                 virtual="TRUE"
                 shift 1
             ;;
+        -enable_virtual_dhcp)
+                enable_virtual_dhcp="TRUE"
+                shift 1
+            ;;
+        -static_ip_range)
+                static_ip_range=$2
+                shift 2
+            ;;
+        -ping_site)
+                ping_site=$2
+                shift 2
+            ;;
+        -floating_ip_count)
+                floating_ip_count=$2
+                shift 2
+            ;;
         *)
                 display_usage
                 exit 1
             ;;
     esac
   done
+
+  if [ ! -z "$enable_virtual_dhcp" ] && [ ! -z "$static_ip_range" ]; then
+    echo -e "\n\n${red}ERROR: Incorrect Usage.  Static IP range cannot be set when using DHCP!.  Exiting${reset}\n\n"
+    exit 1
+  fi
+
+  if [ -z "$virtual" ]; then
+    if [ ! -z "$enable_virtual_dhcp" ]; then
+      echo -e "\n\n${red}ERROR: Incorrect Usage.  enable_virtual_dhcp can only be set when using -virtual!.  Exiting${reset}\n\n"
+      exit 1
+    elif [ ! -z "$static_ip_range" ]; then
+      echo -e "\n\n${red}ERROR: Incorrect Usage.  static_ip_range can only be set when using -virtual!.  Exiting${reset}\n\n"
+      exit 1
+    fi
+  fi
+
+  if [ -z "$floating_ip_count" ]; then
+    floating_ip_count=20
+  fi
 }
 
 ##disable selinux
@@ -253,7 +363,7 @@ EOM
 
 ##install Ansible using yum
 ##params: none
-##usage: install_anible()
+##usage: install_ansible()
 install_ansible() {
   if ! yum list installed | grep -i ansible; then
     if ! yum -y install ansible-1*; then
@@ -301,27 +411,31 @@ install_vagrant() {
 ##params: none
 ##usage: clean_tmp()
 clean_tmp() {
-  rm -rf /tmp/bgs_vagrant
+  rm -rf $vm_dir/foreman_vm
 }
 
-##clone bgs vagrant version 1.0 using git
+##clone genesis and move to node vm dir
 ##params: none
 ##usage: clone_bgs
 clone_bgs() {
   cd /tmp/
+  rm -rf /tmp/genesis/
 
-  ##will change this to be opnfv repo when commit is done
-  if ! git clone -b v1.0 https://github.com/trozet/bgs_vagrant.git; then
-    printf '%s\n' 'deploy.sh: Unable to clone vagrant repo' >&2
+  ##clone artifacts and move into foreman_vm dir
+  if ! GIT_SSL_NO_VERIFY=true git clone https://gerrit.opnfv.org/gerrit/genesis.git; then
+    printf '%s\n' 'deploy.sh: Unable to clone genesis repo' >&2
     exit 1
   fi
+
+  mv -f /tmp/genesis/foreman/ci $vm_dir/foreman_vm
+  rm -rf /tmp/genesis/
 }
 
-##validates the netork settings and update VagrantFile with network settings
+##validates the network settings and update VagrantFile with network settings
 ##params: none
 ##usage: configure_network()
 configure_network() {
-  cd /tmp/bgs_vagrant
+  cd $vm_dir/foreman_vm
 
   echo "${blue}Detecting network configuration...${reset}"
   ##detect host 1 or 3 interface configuration
@@ -333,52 +447,126 @@ configure_network() {
     exit 1
   fi
 
-  ##find number of interfaces with ip and substitute in VagrantFile
-  if_counter=0
-  for interface in ${output}; do
+  ##virtual we only find 1 interface
+  if [ $virtual ]; then
+    ##find interface with default gateway
+    this_default_gw=$(ip route | grep default | awk '{print $3}')
+    echo "${blue}Default Gateway: $this_default_gw ${reset}"
+    this_default_gw_interface=$(ip route get $this_default_gw | awk '{print $3}')
 
-    if [ "$if_counter" -ge 4 ]; then
-      break
-    fi
-    interface_ip=$(find_ip $interface)
+    ##find interface IP, make sure its valid
+    interface_ip=$(find_ip $this_default_gw_interface)
     if [ ! "$interface_ip" ]; then
-      continue
+      echo "${red}Interface ${this_default_gw_interface} does not have an IP: $interface_ip ! Exiting ${reset}"
+      exit 1
     fi
-    new_ip=$(next_usable_ip $interface_ip)
-    if [ ! "$new_ip" ]; then
-      continue
+
+    ##set variable info
+    if [ ! -z "$static_ip_range" ]; then
+      new_ip=$(echo $static_ip_range | cut -d , -f1)
+    else
+      new_ip=$(next_usable_ip $interface_ip)
+      if [ ! "$new_ip" ]; then
+        echo "${red} Cannot find next IP on interface ${this_default_gw_interface} new_ip: $new_ip ! Exiting ${reset}"
+        exit 1
+      fi
     fi
-    interface_arr[$interface]=$if_counter
-    interface_ip_arr[$if_counter]=$new_ip
+    interface=$this_default_gw_interface
+    public_interface=$interface
+    interface_arr[$interface]=2
+    interface_ip_arr[2]=$new_ip
     subnet_mask=$(find_netmask $interface)
-    if [ "$if_counter" -eq 1 ]; then
-      private_subnet_mask=$subnet_mask
-      private_short_subnet_mask=$(find_short_netmask $interface)
-    fi
-    if [ "$if_counter" -eq 2 ]; then
-      public_subnet_mask=$subnet_mask
-      public_short_subnet_mask=$(find_short_netmask $interface)
-    fi
-    if [ "$if_counter" -eq 3 ]; then
-      storage_subnet_mask=$subnet_mask
+    public_subnet_mask=$subnet_mask
+    public_short_subnet_mask=$(find_short_netmask $interface)
+
+    if ! verify_subnet_size $public_subnet_mask 25; then
+      echo "${red} Not enough IPs in public subnet: $interface_ip_arr[2] ${public_subnet_mask}.  Need at least 25 IPs.  Please resize subnet! Exiting ${reset}"
+      exit 1
     fi
-    sed -i 's/^.*eth_replace'"$if_counter"'.*$/  config.vm.network "public_network", ip: '\""$new_ip"\"', bridge: '\'"$interface"\'', netmask: '\""$subnet_mask"\"'/' Vagrantfile
-    ((if_counter++))
-  done
 
+    ##set that interface to be public
+    sed -i 's/^.*eth_replace2.*$/  config.vm.network "public_network", ip: '\""$new_ip"\"', bridge: '\'"$interface"\'', netmask: '\""$subnet_mask"\"'/' Vagrantfile
+    if_counter=1
+  else
+    ##find number of interfaces with ip and substitute in VagrantFile
+    if_counter=0
+    for interface in ${output}; do
+
+      if [ "$if_counter" -ge 4 ]; then
+        break
+      fi
+      interface_ip=$(find_ip $interface)
+      if [ ! "$interface_ip" ]; then
+        continue
+      fi
+      new_ip=$(next_usable_ip $interface_ip)
+      if [ ! "$new_ip" ]; then
+        continue
+      fi
+      interface_arr[$interface]=$if_counter
+      interface_ip_arr[$if_counter]=$new_ip
+      subnet_mask=$(find_netmask $interface)
+      if [ "$if_counter" -eq 0 ]; then
+        admin_subnet_mask=$subnet_mask
+        if ! verify_subnet_size $admin_subnet_mask 5; then
+          echo "${red} Not enough IPs in admin subnet: ${interface_ip_arr[$if_counter]} ${admin_subnet_mask}.  Need at least 5 IPs.  Please resize subnet! Exiting ${reset}"
+          exit 1
+        fi
+
+      elif [ "$if_counter" -eq 1 ]; then
+        private_subnet_mask=$subnet_mask
+        private_short_subnet_mask=$(find_short_netmask $interface)
+
+        if ! verify_subnet_size $private_subnet_mask 15; then
+          echo "${red} Not enough IPs in private subnet: ${interface_ip_arr[$if_counter]} ${private_subnet_mask}.  Need at least 15 IPs.  Please resize subnet! Exiting ${reset}"
+          exit 1
+        fi
+      elif [ "$if_counter" -eq 2 ]; then
+        public_subnet_mask=$subnet_mask
+        public_short_subnet_mask=$(find_short_netmask $interface)
+
+        if ! verify_subnet_size $public_subnet_mask 25; then
+          echo "${red} Not enough IPs in public subnet: ${interface_ip_arr[$if_counter]} ${public_subnet_mask}.  Need at least 25 IPs.  Please resize subnet! Exiting ${reset}"
+          exit 1
+        fi
+      elif [ "$if_counter" -eq 3 ]; then
+        storage_subnet_mask=$subnet_mask
+
+        if ! verify_subnet_size $storage_subnet_mask 10; then
+          echo "${red} Not enough IPs in storage subnet: ${interface_ip_arr[$if_counter]} ${storage_subnet_mask}.  Need at least 10 IPs.  Please resize subnet! Exiting ${reset}"
+          exit 1
+        fi
+      else
+        echo "${red}ERROR: interface counter outside valid range of 0 to 3: $if_counter ! ${reset}"
+        exit 1
+      fi
+      sed -i 's/^.*eth_replace'"$if_counter"'.*$/  config.vm.network "public_network", ip: '\""$new_ip"\"', bridge: '\'"$interface"\'', netmask: '\""$subnet_mask"\"'/' Vagrantfile
+      ((if_counter++))
+    done
+  fi
   ##now remove interface config in Vagrantfile for 1 node
   ##if 1, 3, or 4 interfaces set deployment type
   ##if 2 interfaces remove 2nd interface and set deployment type
-  if [ "$if_counter" == 1 ]; then
-    deployment_type="single_network"
-    remove_vagrant_network eth_replace1
-    remove_vagrant_network eth_replace2
-    remove_vagrant_network eth_replace3
-  elif [ "$if_counter" == 2 ]; then
-    deployment_type="single_network"
-    second_interface=`echo $output | awk '{print $2}'`
-    remove_vagrant_network $second_interface
-    remove_vagrant_network eth_replace2
+  if [[ "$if_counter" == 1 || "$if_counter" == 2 ]]; then
+    if [ $virtual ]; then
+      deployment_type="single_network"
+      echo "${blue}Single network detected for Virtual deployment...converting to three_network with internal networks! ${reset}"
+      private_internal_ip=155.1.2.2
+      admin_internal_ip=156.1.2.2
+      private_subnet_mask=255.255.255.0
+      private_short_subnet_mask=/24
+      interface_ip_arr[1]=$private_internal_ip
+      interface_ip_arr[0]=$admin_internal_ip
+      admin_subnet_mask=255.255.255.0
+      admin_short_subnet_mask=/24
+      sed -i 's/^.*eth_replace1.*$/  config.vm.network "private_network", virtualbox__intnet: "my_private_network", ip: '\""$private_internal_ip"\"', netmask: '\""$private_subnet_mask"\"'/' Vagrantfile
+      sed -i 's/^.*eth_replace0.*$/  config.vm.network "private_network", virtualbox__intnet: "my_admin_network", ip: '\""$admin_internal_ip"\"', netmask: '\""$private_subnet_mask"\"'/' Vagrantfile
+      remove_vagrant_network eth_replace3
+      deployment_type=three_network
+    else
+       echo "${blue}Single network or 2 network detected for baremetal deployment.  This is unsupported! Exiting. ${reset}"
+       exit 1
+    fi
   elif [ "$if_counter" == 3 ]; then
     deployment_type="three_network"
     remove_vagrant_network eth_replace3
@@ -388,6 +576,28 @@ configure_network() {
 
   echo "${blue}Network detected: ${deployment_type}! ${reset}"
 
+  if [ $virtual ]; then
+    if [ -z "$enable_virtual_dhcp" ]; then
+      sed -i 's/^.*disable_dhcp_flag =.*$/  disable_dhcp_flag = true/' Vagrantfile
+      if [ $static_ip_range ]; then
+        ##verify static range is at least 20 IPs
+        static_ip_range_begin=$(echo $static_ip_range | cut -d , -f1)
+        static_ip_range_end=$(echo $static_ip_range | cut -d , -f2)
+        ##verify range is at least 20 ips
+        ##assumes less than 255 range pool
+        begin_octet=$(echo $static_ip_range_begin | cut -d . -f4)
+        end_octet=$(echo $static_ip_range_end | cut -d . -f4)
+        ip_count=$((end_octet-begin_octet+1))
+        if [ "$ip_count" -lt 20 ]; then
+          echo "${red}Static range is less than 20 ips: ${ip_count}, exiting  ${reset}"
+          exit 1
+        else
+          echo "${blue}Static IP range is size $ip_count ${reset}"
+        fi
+      fi
+    fi
+  fi
+
   if route | grep default; then
     echo "${blue}Default Gateway Detected ${reset}"
     host_default_gw=$(ip route | grep default | awk '{print $3}')
@@ -476,17 +686,56 @@ configure_network() {
       ##required for controllers_ip_array global param
       next_private_ip=${interface_ip_arr[1]}
       type=_private
+      control_count=0
       for node in controller1 controller2 controller3; do
         next_private_ip=$(next_usable_ip $next_private_ip)
         if [ ! "$next_private_ip" ]; then
-           printf '%s\n' 'deploy.sh: Unable to find next ip for private network for control nodes' >&2
-           exit 1
+          printf '%s\n' 'deploy.sh: Unable to find next ip for private network for control nodes' >&2
+          exit 1
         fi
         sed -i 's/'"$node$type"'/'"$next_private_ip"'/g' opnfv_ksgen_settings.yml
         controller_ip_array=$controller_ip_array$next_private_ip,
+        controllers_ip_arr[$control_count]=$next_private_ip
+        ((control_count++))
       done
 
-      ##replace global param for contollers_ip_array
+      next_public_ip=${interface_ip_arr[2]}
+      foreman_ip=$next_public_ip
+
+      ##if no dhcp, find all the Admin IPs for nodes in advance
+      if [ $virtual ]; then
+        if [ -z "$enable_virtual_dhcp" ]; then
+          sed -i 's/^.*no_dhcp:.*$/no_dhcp: true/' opnfv_ksgen_settings.yml
+          nodes=`sed -nr '/nodes:/{:start /workaround/!{N;b start};//p}' opnfv_ksgen_settings.yml | sed -n '/^  [A-Za-z0-9]\+:$/p' | sed 's/\s*//g' | sed 's/://g'`
+          compute_nodes=`echo $nodes | tr " " "\n" | grep -v controller | tr "\n" " "`
+          controller_nodes=`echo $nodes | tr " " "\n" | grep controller | tr "\n" " "`
+          nodes=${controller_nodes}${compute_nodes}
+          next_admin_ip=${interface_ip_arr[0]}
+          type=_admin
+          for node in ${nodes}; do
+            next_admin_ip=$(next_ip $next_admin_ip)
+            if [ ! "$next_admin_ip" ]; then
+              echo "${red} Unable to find an unused IP in admin_network for $node ! ${reset}"
+              exit 1
+            else
+              admin_ip_arr[$node]=$next_admin_ip
+              sed -i 's/'"$node$type"'/'"$next_admin_ip"'/g' opnfv_ksgen_settings.yml
+            fi
+          done
+
+          ##allocate node public IPs
+          for node in ${nodes}; do
+            next_public_ip=$(next_usable_ip $next_public_ip)
+            if [ ! "$next_public_ip" ]; then
+              echo "${red} Unable to find an unused IP in admin_network for $node ! ${reset}"
+              exit 1
+            else
+              public_ip_arr[$node]=$next_public_ip
+            fi
+          done
+        fi
+      fi
+      ##replace global param for controllers_ip_array
       controller_ip_array=${controller_ip_array%?}
       sed -i 's/^.*controllers_ip_array:.*$/  controllers_ip_array: '"$controller_ip_array"'/' opnfv_ksgen_settings.yml
 
@@ -495,28 +744,49 @@ configure_network() {
       ##therefore we increment the ip by 10 to make sure we have a safe buffer
       next_private_ip=$(increment_ip $next_private_ip 10)
 
-      grep -E '*private_vip|loadbalancer_vip|db_vip|amqp_vip|*admin_vip' opnfv_ksgen_settings.yml | while read -r line ; do
-        sed -i 's/^.*'"$line"'.*$/  '"$line $next_private_ip"'/' opnfv_ksgen_settings.yml
-        next_private_ip=$(next_usable_ip $next_private_ip)
-        if [ ! "$next_private_ip" ]; then
-          printf '%s\n' 'deploy.sh: Unable to find next ip for private network for vip replacement' >&2
-          exit 1
+      private_output=$(grep -E '*private_vip|loadbalancer_vip|db_vip|amqp_vip|*admin_vip' opnfv_ksgen_settings.yml)
+      if [ ! -z "$private_output" ]; then
+        while read -r line; do
+          sed -i 's/^.*'"$line"'.*$/  '"$line $next_private_ip"'/' opnfv_ksgen_settings.yml
+          next_private_ip=$(next_usable_ip $next_private_ip)
+          if [ ! "$next_private_ip" ]; then
+            printf '%s\n' 'deploy.sh: Unable to find next ip for private network for vip replacement' >&2
+            exit 1
           fi
-      done
+        done <<< "$private_output"
+      fi
+
+      ##replace odl_control_ip (non-HA only)
+      odl_control_ip=${controllers_ip_arr[0]}
+      sed -i 's/^.*odl_control_ip:.*$/  odl_control_ip: '"$odl_control_ip"'/' opnfv_ksgen_settings.yml
+
+      ##replace controller_ip (non-HA only)
+      sed -i 's/^.*controller_ip:.*$/  controller_ip: '"$odl_control_ip"'/' opnfv_ksgen_settings.yml
 
       ##replace foreman site
-      next_public_ip=${interface_ip_arr[2]}
-      sed -i 's/^.*foreman_url:.*$/  foreman_url:'" https:\/\/$next_public_ip"'\/api\/v2\//' opnfv_ksgen_settings.yml
+      sed -i 's/^.*foreman_url:.*$/  foreman_url:'" https:\/\/$foreman_ip"'\/api\/v2\//' opnfv_ksgen_settings.yml
       ##replace public vips
-      next_public_ip=$(increment_ip $next_public_ip 10)
-      grep -E '*public_vip' opnfv_ksgen_settings.yml | while read -r line ; do
-        sed -i 's/^.*'"$line"'.*$/  '"$line $next_public_ip"'/' opnfv_ksgen_settings.yml
+      ##no need to do this if no dhcp
+      if [[ -z "$enable_virtual_dhcp" && ! -z "$virtual" ]]; then
         next_public_ip=$(next_usable_ip $next_public_ip)
-        if [ ! "$next_public_ip" ]; then
-          printf '%s\n' 'deploy.sh: Unable to find next ip for public network for vip replcement' >&2
-          exit 1
-        fi
-      done
+      else
+        next_public_ip=$(increment_ip $next_public_ip 10)
+      fi
+
+      public_output=$(grep -E '*public_vip' opnfv_ksgen_settings.yml)
+      if [ ! -z "$public_output" ]; then
+        while read -r line; do
+          if echo $line | grep horizon_public_vip; then
+            horizon_public_vip=$next_public_ip
+          fi
+          sed -i 's/^.*'"$line"'.*$/  '"$line $next_public_ip"'/' opnfv_ksgen_settings.yml
+          next_public_ip=$(next_usable_ip $next_public_ip)
+          if [ ! "$next_public_ip" ]; then
+            printf '%s\n' 'deploy.sh: Unable to find next ip for public network for vip replcement' >&2
+            exit 1
+          fi
+        done <<< "$public_output"
+      fi
 
       ##replace public_network param
       public_subnet=$(find_subnet $next_public_ip $public_subnet_mask)
@@ -553,11 +823,28 @@ configure_network() {
 
       ##we have to define an allocation range of the public subnet to give
       ##to neutron to use as floating IPs
-      ##we should control this subnet, so this range should work .150-200
-      ##but generally this is a bad idea and we are assuming at least a /24 subnet here
+      ##if static ip range, then we take the difference of the end range and current ip
+      ## to be the allocation pool
+      ##if not static ip, we will use the last 20 IP from the subnet
+      ## note that this is not a really good idea because the subnet must be at least a /27 for this to work...
       public_subnet=$(find_subnet $next_public_ip $public_subnet_mask)
-      public_allocation_start=$(increment_subnet $public_subnet 150)
-      public_allocation_end=$(increment_subnet $public_subnet 200)
+      if [ ! -z "$static_ip_range" ]; then
+        begin_octet=$(echo $next_public_ip | cut -d . -f4)
+        end_octet=$(echo $static_ip_range_end | cut -d . -f4)
+        ip_diff=$((end_octet-begin_octet))
+        if [ $ip_diff -le 0 ]; then
+          echo "${red}ip range left for floating range is less than or equal to 0! $ipdiff ${reset}"
+          exit 1
+        else
+          public_allocation_start=$(next_ip $next_public_ip)
+          public_allocation_end=$static_ip_range_end
+        fi
+      else
+        last_ip_subnet=$(find_last_ip_subnet $next_public_ip $public_subnet_mask)
+        public_allocation_start=$(subtract_ip $last_ip_subnet $floating_ip_count )
+        public_allocation_end=${last_ip_subnet}
+      fi
+      echo "${blue}Neutron Floating IP range: $public_allocation_start to $public_allocation_end ${reset}"
 
       sed -i 's/^.*public_allocation_start:.*$/  public_allocation_start:'" $public_allocation_start"'/' opnfv_ksgen_settings.yml
       sed -i 's/^.*public_allocation_end:.*$/  public_allocation_end:'" $public_allocation_end"'/' opnfv_ksgen_settings.yml
@@ -582,7 +869,7 @@ configure_virtual() {
   fi
 }
 
-##Starts for forement VM with Vagrant
+##Starts Foreman VM with Vagrant
 ##params: none
 ##usage: start_vagrant()
 start_foreman() {
@@ -590,18 +877,18 @@ start_foreman() {
 
   ##stand up vagrant
   if ! vagrant up; then
-    printf '%s\n' 'deploy.sh: Unable to start vagrant' >&2
+    printf '%s\n' 'deploy.sh: Unable to complete Foreman VM install' >&2
     exit 1
   else
     echo "${blue}Foreman VM is up! ${reset}"
   fi
 }
 
-##start the VM if this is a virtual installaion
+##start the VM if this is a virtual installation
 ##this function does nothing if baremetal servers are being used
 ##params: none
 ##usage: start_virtual_nodes()
-start_virutal_nodes() {
+start_virtual_nodes() {
   if [ $virtual ]; then
 
     ##Bring up VM nodes
@@ -613,25 +900,30 @@ start_virutal_nodes() {
     compute_nodes=`echo $nodes | tr " " "\n" | grep -v controller | tr "\n" " "`
     controller_nodes=`echo $nodes | tr " " "\n" | grep controller | tr "\n" " "`
     nodes=${controller_nodes}${compute_nodes}
+    controller_count=0
+    compute_wait_completed=false
 
     for node in ${nodes}; do
-      cd /tmp
+      cd /tmp/
 
       ##remove VM nodes incase it wasn't cleaned up
-      rm -rf /tmp/$node
+      rm -rf $vm_dir/$node
+      rm -rf /tmp/genesis/
 
-      ##clone bgs vagrant
-      ##will change this to be opnfv repo when commit is done
-      if ! git clone -b v1.0 https://github.com/trozet/bgs_vagrant.git $node; then
+      ##clone genesis and move into node folder
+      if ! GIT_SSL_NO_VERIFY=true git clone https://gerrit.opnfv.org/gerrit/genesis.git; then
         printf '%s\n' 'deploy.sh: Unable to clone vagrant repo' >&2
         exit 1
       fi
 
-      cd $node
+      mv -f /tmp/genesis/foreman/ci $vm_dir/$node
+      rm -rf /tmp/genesis/
+
+      cd $vm_dir/$node
 
       if [ $base_config ]; then
         if ! cp -f $base_config opnfv_ksgen_settings.yml; then
-          echo "{red}ERROR: Unable to copy $base_config to opnfv_ksgen_settings.yml${reset}"
+          echo "${red}ERROR: Unable to copy $base_config to opnfv_ksgen_settings.yml${reset}"
           exit 1
         fi
       fi
@@ -642,6 +934,13 @@ start_virutal_nodes() {
       node_type=config_nodes_${node}_type
       node_type=$(eval echo \$$node_type)
 
+      ##trozet test make compute nodes wait 20 minutes
+      if [ "$compute_wait_completed" = false ] && [ "$node_type" != "controller" ]; then
+        echo "${blue}Waiting 20 minutes for Control nodes to install before continuing with Compute nodes..."
+        compute_wait_completed=true
+        sleep 1400
+      fi
+
       ##find number of interfaces with ip and substitute in VagrantFile
       output=`ifconfig | grep -E "^[a-zA-Z0-9]+:"| grep -Ev "lo|tun|virbr|vboxnet" | awk '{print $1}' | sed 's/://'`
 
@@ -650,11 +949,14 @@ start_virutal_nodes() {
         exit 1
       fi
 
-
       if_counter=0
       for interface in ${output}; do
 
-        if [ "$if_counter" -ge 4 ]; then
+        if [ -z "$enable_virtual_dhcp" ]; then
+          if [ "$if_counter" -ge 1 ]; then
+            break
+          fi
+        elif [ "$if_counter" -ge 4 ]; then
           break
         fi
         interface_ip=$(find_ip $interface)
@@ -690,30 +992,66 @@ start_virutal_nodes() {
             mac_addr=$(echo $mac_addr | sed 's/:\|-//g')
             ;;
         esac
-          sed -i 's/^.*eth_replace'"$if_counter"'.*$/  config.vm.network "public_network", bridge: '\'"$interface"\'', :mac => '\""$mac_addr"\"'/' Vagrantfile
-          ((if_counter++))
+        this_admin_ip=${admin_ip_arr[$node]}
+        sed -i 's/^.*eth_replace'"$if_counter"'.*$/  config.vm.network "private_network", virtualbox__intnet: "my_admin_network", ip: '\""$this_admin_ip"\"', netmask: '\""$admin_subnet_mask"\"', :mac => '\""$mac_addr"\"'/' Vagrantfile
+        ((if_counter++))
       done
-
       ##now remove interface config in Vagrantfile for 1 node
       ##if 1, 3, or 4 interfaces set deployment type
       ##if 2 interfaces remove 2nd interface and set deployment type
-      if [ "$if_counter" == 1 ]; then
+      if [[ "$if_counter" == 1 || "$if_counter" == 2 ]]; then
         deployment_type="single_network"
-        remove_vagrant_network eth_replace1
-        remove_vagrant_network eth_replace2
+        if [ "$node_type" == "controller" ]; then
+            mac_string=config_nodes_${node}_private_mac
+            mac_addr=$(eval echo \$$mac_string)
+            if [ $mac_addr == "" ]; then
+              echo "${red} Unable to find private_mac for $node! ${reset}"
+              exit 1
+            fi
+        else
+            ##generate random mac
+            mac_addr=$(echo -n 00-60-2F; dd bs=1 count=3 if=/dev/random 2>/dev/null |hexdump -v -e '/1 "-%02X"')
+        fi
+        mac_addr=$(echo $mac_addr | sed 's/:\|-//g')
+        if [ "$node_type" == "controller" ]; then
+          new_node_ip=${controllers_ip_arr[$controller_count]}
+          if [ ! "$new_node_ip" ]; then
+            echo "{red}ERROR: Empty node ip for controller $controller_count ${reset}"
+            exit 1
+          fi
+          ((controller_count++))
+        else
+          next_private_ip=$(next_ip $next_private_ip)
+          if [ ! "$next_private_ip" ]; then
+            echo "{red}ERROR: Could not find private ip for $node ${reset}"
+            exit 1
+          fi
+          new_node_ip=$next_private_ip
+        fi
+        sed -i 's/^.*eth_replace1.*$/  config.vm.network "private_network", virtualbox__intnet: "my_private_network", :mac => '\""$mac_addr"\"', ip: '\""$new_node_ip"\"', netmask: '\""$private_subnet_mask"\"'/' Vagrantfile
+        ##replace host_ip in vm_nodes_provision with private ip
+        sed -i 's/^host_ip=REPLACE/host_ip='$new_node_ip'/' vm_nodes_provision.sh
+        ##replace ping site
+        if [ ! -z "$ping_site" ]; then
+          sed -i 's/www.google.com/'$ping_site'/' vm_nodes_provision.sh
+        fi
+        ##find public ip info
+        mac_addr=$(echo -n 00-60-2F; dd bs=1 count=3 if=/dev/random 2>/dev/null |hexdump -v -e '/1 "-%02X"')
+        mac_addr=$(echo $mac_addr | sed 's/:\|-//g')
+        this_public_ip=${public_ip_arr[$node]}
+
+        if [ -z "$enable_virtual_dhcp" ]; then
+          sed -i 's/^.*eth_replace2.*$/  config.vm.network "public_network", bridge: '\'"$public_interface"\'', :mac => '\""$mac_addr"\"', ip: '\""$this_public_ip"\"', netmask: '\""$public_subnet_mask"\"'/' Vagrantfile
+        else
+          sed -i 's/^.*eth_replace2.*$/  config.vm.network "public_network", bridge: '\'"$public_interface"\'', :mac => '\""$mac_addr"\"'/' Vagrantfile
+        fi
         remove_vagrant_network eth_replace3
-      elif [ "$if_counter" == 2 ]; then
-        deployment_type="single_network"
-        second_interface=`echo $output | awk '{print $2}'`
-        remove_vagrant_network $second_interface
-        remove_vagrant_network eth_replace2
       elif [ "$if_counter" == 3 ]; then
         deployment_type="three_network"
         remove_vagrant_network eth_replace3
       else
         deployment_type="multi_network"
       fi
-
       ##modify provisioning to do puppet install, config, and foreman check-in
       ##substitute host_name and dns_server in the provisioning script
       host_string=config_nodes_${node}_hostname
@@ -721,19 +1059,17 @@ start_virutal_nodes() {
       sed -i 's/^host_name=REPLACE/host_name='$host_name'/' vm_nodes_provision.sh
       ##dns server should be the foreman server
       sed -i 's/^dns_server=REPLACE/dns_server='${interface_ip_arr[0]}'/' vm_nodes_provision.sh
-
       ## remove bootstrap and NAT provisioning
       sed -i '/nat_setup.sh/d' Vagrantfile
       sed -i 's/bootstrap.sh/vm_nodes_provision.sh/' Vagrantfile
-
       ## modify default_gw to be node_default_gw
       sed -i 's/^.*default_gw =.*$/  default_gw = '\""$node_default_gw"\"'/' Vagrantfile
-
       ## modify VM memory to be 4gig
-      sed -i 's/^.*vb.memory =.*$/     vb.memory = 4096/' Vagrantfile
-
+      ##if node type is controller
+      if [ "$node_type" == "controller" ]; then
+        sed -i 's/^.*vb.memory =.*$/     vb.memory = 4096/' Vagrantfile
+      fi
       echo "${blue}Starting Vagrant Node $node! ${reset}"
-
       ##stand up vagrant
       if ! vagrant up; then
         echo "${red} Unable to start $node ${reset}"
@@ -741,11 +1077,33 @@ start_virutal_nodes() {
       else
         echo "${blue} $node VM is up! ${reset}"
       fi
-
     done
-
     echo "${blue} All VMs are UP! ${reset}"
-
+    echo "${blue} Waiting for puppet to complete on the nodes... ${reset}"
+    ##check puppet is complete
+    ##ssh into foreman server, run check to verify puppet is complete
+    pushd $vm_dir/foreman_vm
+    if ! vagrant ssh -c "/opt/khaleesi/run.sh --no-logs --use /vagrant/opnfv_ksgen_settings.yml /opt/khaleesi/playbooks/validate_opnfv-vm.yml"; then
+      echo "${red} Failed to validate puppet completion on nodes ${reset}"
+      exit 1
+    else
+      echo "{$blue} Puppet complete on all nodes! ${reset}"
+    fi
+    popd
+    ##add routes back to nodes
+    for node in ${nodes}; do
+      pushd $vm_dir/$node
+      if ! vagrant ssh -c "route | grep default | grep $this_default_gw"; then
+        echo "${blue} Adding public route back to $node! ${reset}"
+        vagrant ssh -c "route add default gw $this_default_gw"
+      fi
+      popd
+    done
+    if [ ! -z "$horizon_public_vip" ]; then
+      echo "${blue} Virtual deployment SUCCESS!! Foreman URL:  http://${foreman_ip}, Horizon URL: http://${horizon_public_vip} ${reset}"
+    else
+      echo "${blue} Virtual deployment SUCCESS!! Foreman URL:  http://${foreman_ip}, Horizon URL: http://${odl_control_ip} ${reset}"
+    fi
   fi
 }
 
@@ -759,11 +1117,12 @@ main() {
   install_ansible
   install_vagrant
   clean_tmp
+  verify_vm_dir
   clone_bgs
   configure_network
   configure_virtual
   start_foreman
-  start_virutal_nodes
+  start_virtual_nodes
 }
 
 main "$@"
index 72935c9..2c146a0 100644 (file)
@@ -105,9 +105,9 @@ nodes:
     type: compute
     host_type: baremetal
     hostgroup: Compute
-    mac_address: "00:25:b5:a0:00:5e"
-    bmc_ip: 172.30.8.74
-    bmc_mac: "74:a2:e6:a4:14:9c"
+    mac_address: "00:25:B5:A0:00:2A"
+    bmc_ip: 172.30.8.75
+    bmc_mac: "a8:9d:21:c9:8b:56"
     bmc_user: admin
     bmc_pass: octopus
     ansible_ssh_pass: "Op3nStack"
@@ -125,9 +125,9 @@ nodes:
     type: compute
     host_type: baremetal
     hostgroup: Compute
-    mac_address: "00:25:b5:a0:00:3e"
-    bmc_ip: 172.30.8.73
-    bmc_mac: "a8:9d:21:a0:15:9c"
+    mac_address: "00:25:B5:A0:00:3A"
+    bmc_ip: 172.30.8.65
+    bmc_mac: "a8:9d:21:c9:4d:26"
     bmc_user: admin
     bmc_pass: octopus
     ansible_ssh_pass: "Op3nStack"
@@ -145,13 +145,13 @@ nodes:
     type: controller
     host_type: baremetal
     hostgroup: Controller_Network_ODL
-    mac_address: "00:25:b5:a0:00:af"
-    bmc_ip: 172.30.8.66
-    bmc_mac: "a8:9d:21:c9:8b:56"
+    mac_address: "00:25:B5:A0:00:4A"
+    bmc_ip: 172.30.8.74
+    bmc_mac: "a8:9d:21:c9:3a:92"
     bmc_user: admin
     bmc_pass: octopus
     private_ip: controller1_private
-    private_mac: "00:25:b5:b0:00:1f"
+    private_mac: "00:25:B5:A0:00:4B"
     ansible_ssh_pass: "Op3nStack"
     admin_password: "octopus"
     groups:
@@ -167,13 +167,13 @@ nodes:
     type: controller
     host_type: baremetal
     hostgroup: Controller_Network
-    mac_address: "00:25:b5:a0:00:9e"
-    bmc_ip: 172.30.8.75
-    bmc_mac: "a8:9d:21:c9:4d:26"
+    mac_address: "00:25:B5:A0:00:5A"
+    bmc_ip: 172.30.8.73
+    bmc_mac: "74:a2:e6:a4:14:9c"
     bmc_user: admin
     bmc_pass: octopus
     private_ip: controller2_private
-    private_mac: "00:25:b5:b0:00:de"
+    private_mac: "00:25:B5:A0:00:5B"
     ansible_ssh_pass: "Op3nStack"
     admin_password: "octopus"
     groups:
@@ -189,13 +189,13 @@ nodes:
     type: controller
     host_type: baremetal
     hostgroup: Controller_Network
-    mac_address: "00:25:b5:a0:00:7e"
-    bmc_ip: 172.30.8.65
-    bmc_mac: "a8:9d:21:c9:3a:92"
+    mac_address: "00:25:B5:A0:00:6A"
+    bmc_ip: 172.30.8.72
+    bmc_mac: "a8:9d:21:a0:15:9c"
     bmc_user: admin
     bmc_pass: octopus
     private_ip: controller3_private
-    private_mac: "00:25:b5:b0:00:be"
+    private_mac: "00:25:B5:A0:00:6B"
     ansible_ssh_pass: "Op3nStack"
     admin_password: "octopus"
     groups:
index 21840dd..b41a41b 100644 (file)
@@ -44,6 +44,7 @@ global_params:
   deployment_type:
 network_type: multi_network
 default_gw:
+no_dhcp: false
 foreman:
   seed_values:
     - { name: heat_cfn, oldvalue: true, newvalue: false }
@@ -110,6 +111,7 @@ nodes:
     bmc_mac: "10:23:45:67:88:AB"
     bmc_user: root
     bmc_pass: root
+    admin_ip: compute_admin
     ansible_ssh_pass: "Op3nStack"
     admin_password: ""
     groups:
@@ -130,6 +132,7 @@ nodes:
     bmc_mac: "10:23:45:67:88:AC"
     bmc_user: root
     bmc_pass: root
+    admin_ip: controller1_admin
     private_ip: controller1_private
     private_mac: "10:23:45:67:87:AC"
     ansible_ssh_pass: "Op3nStack"
@@ -152,6 +155,7 @@ nodes:
     bmc_mac: "10:23:45:67:88:AD"
     bmc_user: root
     bmc_pass: root
+    admin_ip: controller2_admin
     private_ip: controller2_private
     private_mac: "10:23:45:67:87:AD"
     ansible_ssh_pass: "Op3nStack"
@@ -174,6 +178,7 @@ nodes:
     bmc_mac: "10:23:45:67:88:AE"
     bmc_user: root
     bmc_pass: root
+    admin_ip: controller3_admin
     private_ip: controller3_private
     private_mac: "10:23:45:67:87:AE"
     ansible_ssh_pass: "Op3nStack"
diff --git a/foreman/ci/opnfv_ksgen_settings_no_HA.yml b/foreman/ci/opnfv_ksgen_settings_no_HA.yml
new file mode 100644 (file)
index 0000000..79db257
--- /dev/null
@@ -0,0 +1,264 @@
+global_params:
+  admin_email: opnfv@opnfv.com
+  ha_flag: "false"
+  odl_flag: "true"
+  odl_control_ip:
+  private_network:
+  storage_network:
+  public_network:
+  private_subnet:
+  deployment_type:
+  controller_ip:
+network_type: multi_network
+default_gw:
+no_dhcp: false
+foreman:
+  seed_values:
+    - { name: heat_cfn, oldvalue: true, newvalue: false }
+workaround_puppet_version_lock: false
+opm_branch: master
+installer:
+  name: puppet
+  short_name: pupt
+  network:
+    auto_assign_floating_ip: false
+    variant:
+      short_name: m2vx
+    plugin:
+      name: neutron
+workaround_openstack_packstack_rpm: false
+tempest:
+  repo:
+    Fedora:
+      '19': http://REPLACE_ME/~REPLACE_ME/openstack-tempest-icehouse/fedora-19/
+      '20': http://REPLACE_ME/~REPLACE_ME/openstack-tempest-icehouse/fedora-20/
+    RedHat:
+       '7.0': https://repos.fedorapeople.org/repos/openstack/openstack-juno/epel-7/
+  use_virtual_env: false
+  public_allocation_end: 10.2.84.71
+  skip:
+    files: null
+    tests: null
+  public_allocation_start: 10.2.84.51
+  physnet: physnet1
+  use_custom_repo: false
+  public_subnet_cidr: 10.2.84.0/24
+  public_subnet_gateway: 10.2.84.1
+  additional_default_settings:
+  - section: compute
+    option: flavor_ref
+    value: 1
+  cirros_image_file: cirros-0.3.1-x86_64-disk.img
+  setup_method: tempest/rpm
+  test_name: all
+  rdo:
+     version: juno
+     rpm: http://repos.fedorapeople.org/repos/openstack/openstack-juno/rdo-release-juno-1.noarch.rpm
+  rpm:
+    version: 20141201
+  dir: ~{{ nodes.tempest.remote_user }}/tempest-dir
+tmp:
+  node_prefix: '{{ node.prefix | reject("none") | join("-") }}-'
+  anchors:
+  - https://repos.fedorapeople.org/repos/openstack/openstack-juno/rdo-release-juno-1.noarch.rpm
+  - http://repos.fedorapeople.org/repos/openstack/openstack-juno/
+opm_repo: https://github.com/redhat-openstack/openstack-puppet-modules.git
+workaround_vif_plugging: false
+openstack_packstack_rpm: http://REPLACE_ME/brewroot/packages/openstack-puppet-modules/2013.2/9.el6ost/noarch/openstack-puppet-modules-2013.2-9.el6ost.noarch.rpm
+nodes:
+  compute:
+    name: oscompute11.opnfv.com
+    hostname: oscompute11.opnfv.com
+    short_name: oscompute11
+    type: compute
+    host_type: baremetal
+    hostgroup: Compute
+    mac_address: "10:23:45:67:89:AB"
+    bmc_ip: 10.4.17.2
+    bmc_mac: "10:23:45:67:88:AB"
+    bmc_user: root
+    bmc_pass: root
+    admin_ip: compute_admin
+    ansible_ssh_pass: "Op3nStack"
+    admin_password: ""
+    groups:
+    - compute
+    - foreman_nodes
+    - puppet
+    - rdo
+    - neutron
+  controller1:
+    name: oscontroller1.opnfv.com
+    hostname: oscontroller1.opnfv.com
+    short_name: oscontroller1
+    type: controller
+    host_type: baremetal
+    hostgroup: Controller_Network_ODL
+    mac_address: "10:23:45:67:89:AC"
+    bmc_ip: 10.4.17.3
+    bmc_mac: "10:23:45:67:88:AC"
+    bmc_user: root
+    bmc_pass: root
+    private_ip: controller1_private
+    admin_ip: controller1_admin
+    private_mac: "10:23:45:67:87:AC"
+    ansible_ssh_pass: "Op3nStack"
+    admin_password: "octopus"
+    groups:
+    - controller
+    - foreman_nodes
+    - puppet
+    - rdo
+    - neutron
+workaround_mysql_centos7: true
+distro:
+  name: centos
+  centos:
+    '7.0':
+      repos: []
+  short_name: c
+  short_version: 70
+  version: '7.0'
+  rhel:
+    '7.0':
+      kickstart_url: http://REPLACE_ME/released/RHEL-7/7.0/Server/x86_64/os/
+      repos:
+      - section: rhel7-server-rpms
+        name: Packages for RHEL 7 - $basearch
+        baseurl: http://REPLACE_ME/rel-eng/repos/rhel-7.0/x86_64/
+        gpgcheck: 0
+      - section: rhel-7-server-update-rpms
+        name: Update Packages for Enterprise Linux 7 - $basearch
+        baseurl: http://REPLACE_ME/rel-eng/repos/rhel-7.0-z/x86_64/
+        gpgcheck: 0
+      - section: rhel-7-server-optional-rpms
+        name: Optional Packages for Enterprise Linux 7 - $basearch
+        baseurl: http://REPLACE_ME/released/RHEL-7/7.0/Server-optional/x86_64/os/
+        gpgcheck: 0
+      - section: rhel-7-server-extras-rpms
+        name: Optional Packages for Enterprise Linux 7 - $basearch
+        baseurl: http://REPLACE_ME/rel-eng/EXTRAS-7.0-RHEL-7-20140610.0/compose/Server/x86_64/os/
+        gpgcheck: 0
+    '6.5':
+      kickstart_url: http://REPLACE_ME/released/RHEL-6/6.5/Server/x86_64/os/
+      repos:
+      - section: rhel6.5-server-rpms
+        name: Packages for RHEL 6.5 - $basearch
+        baseurl: http://REPLACE_ME.REPLACE_ME/released/RHEL-6/6.5/Server/$basearch/os/Server
+        gpgcheck: 0
+      - section: rhel-6.5-server-update-rpms
+        name: Update Packages for Enterprise Linux 6.5 - $basearch
+        baseurl: http://REPLACE_ME.REPLACE_ME/rel-eng/repos/RHEL-6.5-Z/$basearch/
+        gpgcheck: 0
+      - section: rhel-6.5-server-optional-rpms
+        name: Optional Packages for Enterprise Linux 6.5 - $basearch
+        baseurl: http://REPLACE_ME.REPLACE_ME/released/RHEL-6/6.5/Server/optional/$basearch/os
+        gpgcheck: 0
+      - section: rhel6.5-server-rpms-32bit
+        name: Packages for RHEL 6.5 - i386
+        baseurl: http://REPLACE_ME.REPLACE_ME/released/RHEL-6/6.5/Server/i386/os/Server
+        gpgcheck: 0
+        enabled: 1
+      - section: rhel-6.5-server-update-rpms-32bit
+        name: Update Packages for Enterprise Linux 6.5 - i686
+        baseurl: http://REPLACE_ME.REPLACE_ME/rel-eng/repos/RHEL-6.5-Z/i686/
+        gpgcheck: 0
+        enabled: 1
+      - section: rhel-6.5-server-optional-rpms-32bit
+        name: Optional Packages for Enterprise Linux 6.5 - i386
+        baseurl: http://REPLACE_ME.REPLACE_ME/released/RHEL-6/6.5/Server/optional/i386/os
+        gpgcheck: 0
+        enabled: 1
+    subscription:
+      username: REPLACE_ME
+      password: HWj8TE28Qi0eP2c
+      pool: 8a85f9823e3d5e43013e3ddd4e2a0977
+  config:
+    selinux: permissive
+    ntp_server: 0.pool.ntp.org
+    dns_servers:
+    - 10.4.1.1
+    - 10.4.0.2
+    reboot_delay: 1
+    initial_boot_timeout: 180
+node:
+  prefix:
+  - rdo
+  - pupt
+  - ffqiotcxz1
+  - null
+product:
+  repo_type: production
+  name: rdo
+  short_name: rdo
+  rpm:
+    CentOS: https://repos.fedorapeople.org/repos/openstack/openstack-juno/rdo-release-juno-1.noarch.rpm
+    Fedora: https://repos.fedorapeople.org/repos/openstack/openstack-juno/rdo-release-juno-1.noarch.rpm
+    RedHat: https://repos.fedorapeople.org/repos/openstack/openstack-juno/rdo-release-juno-1.noarch.rpm
+  short_version: ju
+  repo:
+    production:
+      CentOS:
+        7.0.1406: http://repos.fedorapeople.org/repos/openstack/openstack-juno/epel-7
+        '6.5': http://repos.fedorapeople.org/repos/openstack/openstack-juno/epel-6
+        '7.0': http://repos.fedorapeople.org/repos/openstack/openstack-juno/epel-7
+      Fedora:
+        '20': http://repos.fedorapeople.org/repos/openstack/openstack-juno/fedora-20
+        '21': http://repos.fedorapeople.org/repos/openstack/openstack-juno/fedora-21
+      RedHat:
+        '6.6': http://repos.fedorapeople.org/repos/openstack/openstack-juno/epel-6
+        '6.5': http://repos.fedorapeople.org/repos/openstack/openstack-juno/epel-6
+        '7.0': http://repos.fedorapeople.org/repos/openstack/openstack-juno/epel-7
+  version: juno
+  config:
+    enable_epel: y
+  short_repo: prod
+tester:
+  name: tempest
+distro_reboot_options: '--no-wall '' Reboot is triggered by Ansible'' '
+job:
+  verbosity: 1
+  archive:
+  - '{{ tempest.dir }}/etc/tempest.conf'
+  - '{{ tempest.dir }}/etc/tempest.conf.sample'
+  - '{{ tempest.dir }}/*.log'
+  - '{{ tempest.dir }}/*.xml'
+  - /root/
+  - /var/log/
+  - /etc/nova
+  - /etc/ceilometer
+  - /etc/cinder
+  - /etc/glance
+  - /etc/keystone
+  - /etc/neutron
+  - /etc/ntp
+  - /etc/puppet
+  - /etc/qpid
+  - /etc/qpidd.conf
+  - /root
+  - /etc/yum.repos.d
+  - /etc/yum.repos.d
+topology:
+  name: multinode
+  short_name: mt
+workaround_neutron_ovs_udev_loop: true
+workaround_glance_table_utf8: false
+verbosity:
+  debug: 0
+  info: 1
+  warning: 2
+  warn: 2
+  errors: 3
+provisioner:
+  username: admin
+  network:
+    type: nova
+    name: external
+  skip: skip_provision
+  foreman_url: https://10.2.84.2/api/v2/
+  password: octopus
+  type: foreman
+workaround_nova_compute_fix: false
+workarounds:
+  enabled: true
+
index 9e3d053..9b3a4d4 100644 (file)
@@ -14,3 +14,4 @@
                     delay=60
                     timeout=180
       sudo: false
+    - pause: minutes=1
index d0bba64..e64c0ad 100755 (executable)
@@ -18,6 +18,7 @@ green=`tput setaf 2`
 
 host_name=REPLACE
 dns_server=REPLACE
+host_ip=REPLACE
 ##END VARS
 
 ##set hostname
@@ -31,27 +32,52 @@ if ! grep 'PEERDNS=no' /etc/sysconfig/network-scripts/ifcfg-enp0s3; then
   systemctl restart NetworkManager
 fi
 
-if ! ping www.google.com -c 5; then 
+##modify /etc/resolv.conf to point to foreman
+echo "${blue} Configuring resolv.conf with DNS: $dns_server ${reset}"
+cat > /etc/resolv.conf << EOF
+search ci.com opnfv.com
+nameserver $dns_server
+nameserver 8.8.8.8
+
+EOF
+
+##modify /etc/hosts to add own IP for rabbitmq workaround
+host_short_name=`echo $host_name | cut -d . -f 1`
+echo "${blue} Configuring hosts with: $host_name $host_ip ${reset}"
+cat > /etc/hosts << EOF
+$host_ip  $host_short_name $host_name
+127.0.0.1   localhost localhost.localdomain localhost4 localhost4.localdomain4
+::1         localhost localhost.localdomain localhost6 localhost6.localdomain6
+EOF
+
+if ! ping www.google.com -c 5; then
   echo "${red} No internet connection, check your route and DNS setup ${reset}"
   exit 1
 fi
 
-# Install EPEL repo for access to many other yum repos
-# Major version is pinned to force some consistency for Arno
-yum install -y epel-release-7*
+##install EPEL
+if ! yum repolist | grep "epel/"; then
+  if ! rpm -Uvh http://dl.fedoraproject.org/pub/epel/7/x86_64/e/epel-release-7-5.noarch.rpm; then
+    printf '%s\n' 'vm_provision_nodes.sh: Unable to configure EPEL repo' >&2
+    exit 1
+  fi
+else
+  printf '%s\n' 'vm_nodes_provision.sh: Skipping EPEL repo as it is already configured.'
+fi
 
-# Update device-mapper-libs, needed for libvirtd on compute nodes
-# Major version is pinned to force some consistency for Arno
-if ! yum -y upgrade device-mapper-libs-1*; then
+##install device-mapper-libs
+##needed for libvirtd on compute nodes
+if ! yum -y upgrade device-mapper-libs; then
    echo "${red} WARN: Unable to upgrade device-mapper-libs...nova-compute may not function ${reset}"
 fi
 
-# Install other required packages
-# Major version is pinned to force some consistency for Arno
 echo "${blue} Installing Puppet ${reset}"
-if ! yum install -y puppet-3*; then
-  printf '%s\n' 'vm_nodes_provision.sh: failed to install required packages' >&2
-  exit 1
+##install puppet
+if ! yum list installed | grep -i puppet; then
+  if ! yum -y install puppet; then
+    printf '%s\n' 'vm_nodes_provision.sh: Unable to install puppet package' >&2
+    exit 1
+  fi
 fi
 
 echo "${blue} Configuring puppet ${reset}"
index 19c526b..20ea983 100644 (file)
@@ -213,8 +213,6 @@ Follow the steps below to execute:
 
 4.  Your nodes will take 40-60 minutes to re-install CentOS 7 and install/configure OPNFV.  When complete you will see "Finished: SUCCESS"
 
-.. _setup_verify:
-
 Verifying the Setup
 -------------------
 
@@ -236,9 +234,7 @@ Now that the installer has finished it is a good idea to check and make sure thi
 
 7.  Now go to your web browser and insert the Horizon public VIP.  The login will be "admin"/"octopus".
 
-8.  You are now able to follow the `OpenStack Verification <openstack_verify_>`_ section.
-
-.. _openstack_verify:
+8.  You are now able to follow the `OpenStack Verification`_ section.
 
 OpenStack Verification
 ----------------------
@@ -316,14 +312,14 @@ Follow the steps below to execute:
 Verifying the Setup - VMs
 -------------------------
 
-Follow the instructions in the `Verifying the Setup <setup_verify_>`_ section.
+Follow the instructions in the `Verifying the Setup`_ section.
 
 Also, for VM deployment you are able to easily access your nodes by going to ``/tmp/<node name>`` and then ``vagrant ssh`` (password is "vagrant").  You can use this to go to a controller and check OpenStack services, OpenDaylight, etc.
 
 OpenStack Verification - VMs
 ----------------------------
 
-Follow the steps in `OpenStack Verification <openstack_verify_>`_ section.
+Follow the steps in `OpenStack Verification`_ section.
 
 Frequently Asked Questions
 ==========================