Moving tag arno.2015.1.0 from genesis to fuel/stable/arno 31/2231/1 arno.2015.1.0
authorJonas Bjurel <jonas.bjurel@ericsson.com>
Sat, 3 Oct 2015 14:54:43 +0000 (16:54 +0200)
committerJonas Bjurel <jonas.bjurel@ericsson.com>
Sat, 3 Oct 2015 14:54:43 +0000 (16:54 +0200)
Change-Id: I8bb3e28a814e04ad15e8a4b24b40bd7685600f46
Signed-off-by: Jonas Bjurel <jonas.bjurel@ericsson.com>
227 files changed:
INFO
LICENSE [deleted file]
LICENSE.rst
ceph.md [new file with mode: 0644]
common/README.md [new file with mode: 0644]
common/docs/user-guide.rst [new file with mode: 0644]
common/manifests/README.md [new file with mode: 0644]
common/puppet-opnfv/manifests/ceph_deploy.pp [new file with mode: 0644]
common/puppet-opnfv/manifests/compute.pp [new file with mode: 0644]
common/puppet-opnfv/manifests/controller.pp [new file with mode: 0644]
common/puppet-opnfv/manifests/controller_networker.pp [new file with mode: 0644]
common/puppet-opnfv/manifests/external_net_presetup.pp [new file with mode: 0644]
common/puppet-opnfv/manifests/external_net_setup.pp [new file with mode: 0644]
common/puppet-opnfv/manifests/init.pp [new file with mode: 0644]
common/puppet-opnfv/manifests/network.pp [new file with mode: 0644]
common/puppet-opnfv/manifests/ntp.pp [new file with mode: 0644]
common/puppet-opnfv/manifests/odl_docker.pp [new file with mode: 0644]
common/puppet-opnfv/manifests/odl_service.pp [new file with mode: 0644]
common/puppet-opnfv/manifests/repo.pp [new file with mode: 0644]
common/puppet-opnfv/manifests/resolver.pp [new file with mode: 0644]
common/puppet-opnfv/manifests/tempest.pp [new file with mode: 0644]
common/puppet-opnfv/manifests/templates/dockerfile/Dockerfile [new file with mode: 0644]
common/puppet-opnfv/manifests/templates/dockerfile/container_scripts/check_feature.sh [new file with mode: 0644]
common/puppet-opnfv/manifests/templates/dockerfile/container_scripts/speak.sh [new file with mode: 0644]
common/puppet-opnfv/manifests/templates/dockerfile/container_scripts/start_odl_docker_container.sh [new file with mode: 0644]
common/puppet-opnfv/manifests/templates/ntp.conf.compute.erb [new file with mode: 0644]
common/puppet-opnfv/manifests/templates/ntp.conf.controller.erb [new file with mode: 0644]
common/puppet-opnfv/templates/br_ex.erb [new file with mode: 0644]
common/tools/README.md [new file with mode: 0644]
compass/build/Makefile [new file with mode: 0755]
compass/build/cache.mk [new file with mode: 0755]
compass/build/config.mk [new file with mode: 0755]
compass/build/install.sh [new file with mode: 0755]
compass/ci/build.sh [new file with mode: 0755]
compass/ci/deploy.sh [new file with mode: 0755]
compass/deploy/conf/base.conf [new file with mode: 0644]
compass/deploy/conf/five.conf [new file with mode: 0644]
compass/deploy/deploy-vm.sh [new file with mode: 0644]
compass/deploy/func.sh [new file with mode: 0644]
compass/deploy/mac_generator.sh [new file with mode: 0755]
compass/deploy/prepare.sh [new file with mode: 0644]
compass/deploy/setup-env.sh [new file with mode: 0644]
foreman/build/Makefile [new file with mode: 0644]
foreman/build/c7-opnfv-x86_64-comps.xml [new file with mode: 0644]
foreman/build/cache.mk [new file with mode: 0644]
foreman/build/config.mk [new file with mode: 0644]
foreman/build/isolinux.cfg [new file with mode: 0644]
foreman/build/opnfv-genesis.spec [new file with mode: 0644]
foreman/ci/README.md [new file with mode: 0644]
foreman/ci/Vagrantfile [new file with mode: 0644]
foreman/ci/bootstrap.sh [new file with mode: 0755]
foreman/ci/build.sh [new file with mode: 0755]
foreman/ci/clean.sh [new file with mode: 0755]
foreman/ci/deploy.sh [new file with mode: 0755]
foreman/ci/inventory/lf_pod2_ksgen_settings.yml [new file with mode: 0644]
foreman/ci/nat_setup.sh [new file with mode: 0755]
foreman/ci/opnfv_ksgen_settings.yml [new file with mode: 0644]
foreman/ci/reload_playbook.yml [new file with mode: 0644]
foreman/ci/vm_nodes_provision.sh [new file with mode: 0755]
foreman/docs/src/installation-instructions.rst [new file with mode: 0644]
foreman/docs/src/release-notes.rst [new file with mode: 0644]
foreman/include/build.sh.debug [new file with mode: 0644]
fuel/.DS_Store [deleted file]
fuel/TODO
fuel/build/Makefile
fuel/build/README
fuel/build/cache.mk
fuel/build/config.mk
fuel/build/docker/ubuntu-builder/Dockerfile
fuel/build/docker/ubuntu-builder/install.sh [new file with mode: 0755]
fuel/build/f_isoroot/Makefile
fuel/build/f_isoroot/f_bootstrap/bootstrap_admin_node.sh
fuel/build/f_isoroot/f_bootstrap/bootstrap_admin_node.sh.orig
fuel/build/f_isoroot/f_bootstrap/post-scripts/01_fix_iommubug.sh [new file with mode: 0755]
fuel/build/f_isoroot/f_bootstrap/post-scripts/02_fix_console_speed.sh [new file with mode: 0755]
fuel/build/f_isoroot/f_bootstrap/post-scripts/03_install_repo.sh [deleted file]
fuel/build/f_isoroot/f_kscfg/ks.cfg [changed mode: 0644->0755]
fuel/build/f_isoroot/f_kscfg/ks.cfg.orig
fuel/build/f_isoroot/f_odlpluginbuild/Makefile [deleted file]
fuel/build/f_isoroot/f_predeployment/Makefile [new file with mode: 0644]
fuel/build/f_isoroot/f_predeployment/README [new file with mode: 0644]
fuel/build/f_isoroot/f_predeployment/pre-deploy.sh [new file with mode: 0755]
fuel/build/f_isoroot/f_predeployment/sysinfo.sh [new file with mode: 0755]
fuel/build/f_isoroot/f_predeployment/transform_yaml.py [new file with mode: 0755]
fuel/build/f_isoroot/f_repobuild/Makefile [deleted file]
fuel/build/f_l23network/Makefile [new file with mode: 0644]
fuel/build/f_l23network/README [new file with mode: 0644]
fuel/build/f_l23network/puppet/modules/l23network/lib/puppet/parser/functions/extras_to_hosts.rb [new file with mode: 0644]
fuel/build/f_l23network/puppet/modules/l23network/manifests/hosts_file.pp [new file with mode: 0644]
fuel/build/f_l23network/puppet/modules/l23network/manifests/hosts_file.pp.orig [new file with mode: 0644]
fuel/build/f_l23network/testing/README [new file with mode: 0644]
fuel/build/f_l23network/testing/fake_init.pp [new file with mode: 0644]
fuel/build/f_ntp/Makefile [new file with mode: 0644]
fuel/build/f_ntp/README [new file with mode: 0644]
fuel/build/f_ntp/puppet/modules/opnfv/manifests/ntp.pp [new file with mode: 0644]
fuel/build/f_ntp/puppet/modules/opnfv/templates/ntp.conf.compute.erb [new file with mode: 0644]
fuel/build/f_ntp/puppet/modules/opnfv/templates/ntp.conf.controller.erb [new file with mode: 0644]
fuel/build/f_ntp/testing/README [new file with mode: 0644]
fuel/build/f_ntp/testing/fake_init.pp [moved from fuel/deploy/environments/__init__.py with 72% similarity]
fuel/build/f_odl_docker/Makefile [new file with mode: 0755]
fuel/build/f_odl_docker/dockerfile/Dockerfile [new file with mode: 0755]
fuel/build/f_odl_docker/dockerfile/container_scripts/check_feature.sh [new file with mode: 0755]
fuel/build/f_odl_docker/dockerfile/container_scripts/speak.sh [new file with mode: 0755]
fuel/build/f_odl_docker/dockerfile/container_scripts/start_odl_docker.sh [new file with mode: 0755]
fuel/build/f_odl_docker/puppet/modules/opnfv/manifests/odl_docker.pp [new file with mode: 0644]
fuel/build/f_odl_docker/puppet/modules/opnfv/scripts/change.sh [new file with mode: 0644]
fuel/build/f_odl_docker/puppet/modules/opnfv/scripts/config_net_odl.sh [new file with mode: 0755]
fuel/build/f_odl_docker/puppet/modules/opnfv/scripts/stage_odl.sh [new file with mode: 0755]
fuel/build/f_odl_docker/puppet/modules/opnfv/scripts/start_odl_container.sh [new file with mode: 0755]
fuel/build/f_odl_docker/scripts/config_net_odl.sh [new file with mode: 0644]
fuel/build/f_odl_docker/scripts/config_neutron_for_odl.sh [new file with mode: 0644]
fuel/build/f_odl_docker/scripts/prep_nets_for_odl.sh [new file with mode: 0755]
fuel/build/f_odl_docker/scripts/setup_ovs_for_odl.sh [new file with mode: 0644]
fuel/build/f_opnfv_puppet/Makefile [new file with mode: 0644]
fuel/build/f_opnfv_puppet/README [new file with mode: 0644]
fuel/build/f_opnfv_puppet/puppet/modules/opnfv/manifests/add_packages.pp [new file with mode: 0644]
fuel/build/f_opnfv_puppet/puppet/modules/opnfv/manifests/init.pp [new file with mode: 0644]
fuel/build/f_opnfv_puppet/puppet/modules/opnfv/manifests/opncheck.pp [new file with mode: 0644]
fuel/build/f_osnaily/Makefile [new file with mode: 0644]
fuel/build/f_osnaily/puppet/modules/osnailyfacter/examples/site.pp [new file with mode: 0644]
fuel/build/f_osnaily/puppet/modules/osnailyfacter/examples/site.pp.orig [new file with mode: 0644]
fuel/build/f_resolvconf/Makefile [new file with mode: 0644]
fuel/build/f_resolvconf/README [new file with mode: 0644]
fuel/build/f_resolvconf/puppet/modules/opnfv/manifests/resolver.pp [new file with mode: 0644]
fuel/build/f_resolvconf/puppet/modules/opnfv/templates/resolv.conf.erb [new file with mode: 0644]
fuel/build/f_resolvconf/testing/README [new file with mode: 0644]
fuel/build/f_resolvconf/testing/fake_init.pp [new file with mode: 0644]
fuel/build/fuel-agent_1.patch [deleted file]
fuel/build/fuel-main_1.patch [new file with mode: 0644]
fuel/build/fuel-main_2.patch [new file with mode: 0644]
fuel/build/fuel-main_3.patch [deleted file]
fuel/build/fuel-main_5.patch [deleted file]
fuel/build/install/apt-ftparchive-deb.conf
fuel/build/install/apt-ftparchive-release.conf
fuel/build/install/apt-ftparchive-udeb.conf
fuel/build/install/install.sh
fuel/build/install/uninstall.sh
fuel/build/opendaylight/Makefile [new file with mode: 0644]
fuel/build/opendaylight/README [new file with mode: 0644]
fuel/build/opendaylight/f_odl/Makefile [new file with mode: 0644]
fuel/build/opendaylight/f_odl/README [new file with mode: 0644]
fuel/build/opendaylight/f_odl/puppet/modules/opnfv/manifests/odl.pp [new file with mode: 0644]
fuel/build/opendaylight/f_odl/testing/README [new file with mode: 0644]
fuel/build/opendaylight/f_odl/testing/fake_init.pp [new file with mode: 0644]
fuel/build/opendaylight/make-odl-deb.sh [new file with mode: 0755]
fuel/build/opendaylight/odl_maven/settings.xml [new file with mode: 0644]
fuel/build/patch-packages/Makefile
fuel/build/patch-packages/debootstrap/Makefile [new file with mode: 0644]
fuel/build/patch-packages/debootstrap/debootstrap.patch [new file with mode: 0644]
fuel/build/patch-packages/neutron-common/Makefile [new file with mode: 0644]
fuel/build/patch-packages/neutron-common/quota.patch [new file with mode: 0644]
fuel/build/patch-packages/novnc/Makefile [new file with mode: 0644]
fuel/build/patch-packages/novnc/fix-missing.sh [new file with mode: 0755]
fuel/ci/README
fuel/ci/build.sh
fuel/ci/deploy.sh
fuel/deploy/README [deleted file]
fuel/deploy/README.txt [new file with mode: 0644]
fuel/deploy/__init__.py
fuel/deploy/baremetal/dea.yaml [new file with mode: 0644]
fuel/deploy/baremetal/dha.yaml [new file with mode: 0644]
fuel/deploy/baremetal/vm/vFuel [moved from fuel/deploy/templates/hardware_environment/vms/fuel.xml with 77% similarity]
fuel/deploy/cloud/configure_environment.py
fuel/deploy/cloud/configure_network.py
fuel/deploy/cloud/configure_nodes.py
fuel/deploy/cloud/configure_settings.py
fuel/deploy/cloud/deploy.py
fuel/deploy/cloud/deployment.py
fuel/deploy/common.py
fuel/deploy/dea.py
fuel/deploy/deploy.py
fuel/deploy/deploy_env.py
fuel/deploy/dha.py
fuel/deploy/dha_adapters/__init__.py
fuel/deploy/dha_adapters/hardware_adapter.py
fuel/deploy/dha_adapters/hp_adapter.py
fuel/deploy/dha_adapters/ipmi_adapter.py
fuel/deploy/dha_adapters/libvirt_adapter.py
fuel/deploy/environments/execution_environment.py [deleted file]
fuel/deploy/environments/libvirt_environment.py [deleted file]
fuel/deploy/environments/virtual_fuel.py [deleted file]
fuel/deploy/execution_environment.py [deleted file]
fuel/deploy/fuel_patch/ks.cfg.patch [deleted file]
fuel/deploy/install-ubuntu-packages.sh [new file with mode: 0755]
fuel/deploy/install_fuel_master.py
fuel/deploy/libvirt/dea.yaml [moved from fuel/deploy/templates/hardware_environment/conf/linux_foundation_lab/pod2/dea.yaml with 50% similarity]
fuel/deploy/libvirt/dha.yaml [new file with mode: 0644]
fuel/deploy/libvirt/networks/fuel1 [moved from fuel/deploy/templates/virtual_environment/networks/fuel1.xml with 100% similarity]
fuel/deploy/libvirt/networks/fuel2 [moved from fuel/deploy/templates/virtual_environment/networks/fuel2.xml with 100% similarity]
fuel/deploy/libvirt/networks/fuel3 [moved from fuel/deploy/templates/virtual_environment/networks/fuel3.xml with 100% similarity]
fuel/deploy/libvirt/networks/fuel4 [moved from fuel/deploy/templates/virtual_environment/networks/fuel4.xml with 100% similarity]
fuel/deploy/libvirt/vms/compute [moved from fuel/deploy/templates/virtual_environment/vms/compute.xml with 98% similarity]
fuel/deploy/libvirt/vms/controller [moved from fuel/deploy/templates/virtual_environment/vms/controller.xml with 94% similarity]
fuel/deploy/libvirt/vms/fuel-master [moved from fuel/deploy/templates/virtual_environment/vms/fuel.xml with 99% similarity]
fuel/deploy/reap.py [deleted file]
fuel/deploy/setup_environment.py [new file with mode: 0644]
fuel/deploy/setup_vfuel.py [new file with mode: 0644]
fuel/deploy/ssh_client.py
fuel/deploy/templates/hardware_environment/conf/ericsson_montreal_lab/dea.yaml [deleted file]
fuel/deploy/templates/hardware_environment/conf/ericsson_montreal_lab/dha.yaml [deleted file]
fuel/deploy/templates/hardware_environment/conf/linux_foundation_lab/pod1/dea.yaml [deleted file]
fuel/deploy/templates/hardware_environment/conf/linux_foundation_lab/pod1/dha.yaml [deleted file]
fuel/deploy/templates/hardware_environment/conf/linux_foundation_lab/pod2/dha.yaml [deleted file]
fuel/deploy/templates/hardware_environment/conf/opnfv_box/dea.yaml [deleted file]
fuel/deploy/templates/hardware_environment/conf/opnfv_box/dha.yaml [deleted file]
fuel/deploy/templates/virtual_environment/conf/dea.yaml [deleted file]
fuel/deploy/templates/virtual_environment/conf/dha.yaml [deleted file]
fuel/deploy/transplant_fuel_settings.py
fuel/docs/.DS_Store [deleted file]
fuel/docs/src/build-instructions.rst
fuel/docs/src/installation-instructions.rst
fuel/docs/src/release-notes.rst
fuel/prototypes/auto-deploy/configs/lf_pod1/dea_ha.yaml
fuel/prototypes/auto-deploy/configs/lf_pod1/dea_no-ha.yaml
fuel/prototypes/auto-deploy/examples/libvirt/conf/vms/compute4
fuel/prototypes/auto-deploy/examples/libvirt/conf/vms/compute5
fuel/prototypes/auto-deploy/examples/libvirt/conf/vms/controller1
fuel/prototypes/auto-deploy/examples/libvirt/conf/vms/fuel-master
juju/ci/build.sh [new file with mode: 0644]
juju/ci/deploy.sh [new file with mode: 0644]
opensteak/ci/build.sh [new file with mode: 0644]
opensteak/ci/deploy.sh [new file with mode: 0644]
puppet.md [new file with mode: 0644]
puppet/opnfv/manifests/compute.pp [new file with mode: 0644]
puppet/opnfv/manifests/controller.pp [new file with mode: 0644]
puppet/opnfv/manifests/init.pp [new file with mode: 0644]
puppet/opnfv/manifests/network.pp [new file with mode: 0644]

diff --git a/INFO b/INFO
index 251e9ea..dffc3ad 100644 (file)
--- a/INFO
+++ b/INFO
@@ -1,23 +1,35 @@
-Project: Fuel based OPNFV installer (Fuel@OPNFV)
-Project Creation Date: 2015.07.07
-Project Category: Integration and testing
+Project: Bootstrap/Get started (Genesis)
+Project Creation Date: December 9, 2014
+Project Category: Integration & Testing
 Lifecycle State: Incubation
-Primary Contact:  jonas.bjurel@ericsson.com
-Project Lead: jonas.bjurel@ericsson.com
-Jira Project Name: Fuel based OPNFV installer
-Jira Project Prefix: FUEL
-Mailing list tag: [Fuel]
-IRC: Server:freenode.net Channel:#opnfv-fuel
-Repository: fuel
+Primary Contact: Frank Brockners
+Project Lead: Frank Brockners
+Jira Project Name: Bootstrap Get started project
+Jira Project Prefix: BGS
+Mailing list tag: [bgs]
+IRC: Server:freenode.net Channel:#opnfv-bgs
+Repository: genesis
 
 Committers:
+dradez@redhat.com
+fbrockne@cisco.com
+iawells@cisco.com
 jonas.bjurel@ericsson.com
+sudha.kumari@hp.com
+cm-r@hp.com
+pals@cisco.com
+peter.hladky@pantheon.sk
+rapenno@gmail.com
 stefan.k.berg@ericsson.com
+prakash.ramchandran@huawei.com
+whayutin@redhat.com
+zhangxiong7@huawei.com
+joseph.gasparakis@intel.com
+pbandzi@cisco.com
 daniel.smith@ericsson.com
-szilard.cserey@ericsson.com
-mskalski@mirantis.com
-ruijing.guo@intel.com
+trozet@redhat.com
 
-<<<<<<< HEAD
-Link to TSC approval of the project: http://meetbot.opnfv.org/meetings/opnfv-meeting/2015/opnfv-meeting.2015-07-07-13.59.log.html
+Link to TSC approval of the project: http://meetbot.opnfv.org/meetings/opnfv-meeting/2014/opnfv-meeting.2014-12-09-15.02.html
 Link(s) to approval of additional committers:
+* Addition of Daniel Smith: Email vote: http://lists.opnfv.org/pipermail/opnfv-tech-discuss/2015-March/001801.html
+* Addition of Tim Rozet: Email vote: http://lists.opnfv.org/pipermail/opnfv-tsc/2015-May/000845.html
diff --git a/LICENSE b/LICENSE
deleted file mode 100644 (file)
index 143e209..0000000
--- a/LICENSE
+++ /dev/null
@@ -1,13 +0,0 @@
-Copyright 2015 Open Platform for NFV Project, Inc. and its contributors
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
index efee103..30e94bf 100644 (file)
@@ -64,6 +64,15 @@ Licenses as listed below:
 | Puppet         | Apache License 2.0                                  |
 |                | https://www.apache.org/licenses/LICENSE-2.0         |
 +----------------+-----------------------------------------------------+
+
+Other applicable upstream project Licenses used by Fuel ISO
+==================================================================
+You may not use the content of this software bundle except in compliance with the
+Licenses as listed below:
+
++----------------+-----------------------------------------------------+
+| **Component**  | **Licence**                                         |
++----------------+-----------------------------------------------------+
 | Docker         | Apache License 2.0                                  |
 |                | https://www.apache.org/licenses/LICENSE-2.0         |
 +----------------+-----------------------------------------------------+
@@ -82,3 +91,26 @@ Licenses as listed below:
 | Astute         | Apache License 2.0                                  |
 |                | https://www.apache.org/licenses/LICENSE-2.0         |
 +----------------+-----------------------------------------------------+
+
+Other applicable upstream project Licenses used by Foreman ISO
+==================================================================
+You may not use the content of this software bundle except in compliance with the
+Licenses as listed below:
+
++----------------+-----------------------------------------------------+
+| **Component**  | **Licence**                                         |
++----------------+-----------------------------------------------------+
+| Foreman        | Creative Commons Attribution-ShareAlike 3.0         |
+|                | http://creativecommons.org/licenses/by-sa/3.0/      |
++----------------+-----------------------------------------------------+
+| VirtualBox     | GPL v2                                              |
+|                | https://www.gnu.org/licenses/gpl-2.0.html           |
++----------------+-----------------------------------------------------+
+| Vagrant        | The MIT License                                     |
++----------------+-----------------------------------------------------+
+| Ansible        | GPL v3                                              |
+|                | https://www.gnu.org/licenses/gpl-3.0.html           |
++----------------+-----------------------------------------------------+
+| Khaleesi       | GPL v3                                              |
+|                | https://www.gnu.org/licenses/gpl-3.0.html           |
++----------------+-----------------------------------------------------+
diff --git a/ceph.md b/ceph.md
new file mode 100644 (file)
index 0000000..216aa32
--- /dev/null
+++ b/ceph.md
@@ -0,0 +1,217 @@
+# Ceph Installation
+
+---
+## Intro
+Ceph is used to build a storage system accross all machines
+
+## Architecture
+We consider the following architecture
+
+    TODO: add schema (4 machines: ceph-admin, 3 ceph-nodes: opensteak9{2,3,4})
+
+Networks:
+```
+192.168.0.0/24 is the cluster network (use for storage)
+192.168.1.0/24 is the management network (use for admin task)
+```
+
+
+## Ceph-admin machine preparation
+
+This is done on an Ubuntu 14.04 64b server
+
+### Install ceph-deploy
+```bash
+wget -q -O- 'https://ceph.com/git/?p=ceph.git;a=blob_plain;f=keys/release.asc' | sudo apt-key add -
+echo deb http://ceph.com/debian-firefly/ $(lsb_release -sc) main | sudo tee /etc/apt/sources.list.d/ceph.list
+sudo apt-get update && sudo apt-get install ceph-deploy
+```
+
+### Install ntp
+```bash
+sudo apt-get install ntp
+sudo service ntp restart
+```
+
+### Create a ceph user on each node (ceph-admin included)
+```bash
+sudo useradd -d /home/ceph -m ceph
+sudo passwd ceph
+```
+
+Add sudo rights:
+```bash
+echo "ceph ALL = (root) NOPASSWD:ALL" | sudo tee /etc/sudoers.d/ceph
+sudo chmod 0440 /etc/sudoers.d/ceph
+```
+
+* *Note: if you think this can be a security treat, remove the ceph user from sudoers after installation is complete*
+
+* *Note 2: the ceph documentation ask for this user: http://ceph.com/docs/master/rados/deployment/preflight-checklist/?highlight=sudoers*
+
+
+### Add each node in hosts file (ceph-admin included)
+```bash
+sudo bash -c ' cat << EOF >> /etc/hosts
+192.168.1.200 ceph-admin
+192.168.1.92 opensteak92
+192.168.1.93 opensteak93
+192.168.1.94 opensteak94
+EOF'
+```
+
+### Create and copy a passwordless ssh key to each node
+```bash
+ssh-keygen
+ssh-copy-id ceph@ceph-admin
+ssh-copy-id ceph@opensteak92
+ssh-copy-id ceph@opensteak93
+ssh-copy-id ceph@opensteak94
+```
+
+### Create a .ssh/config file to connect automatically
+```bash
+cat << EOF >> .ssh/config
+Host ceph-admin
+  Hostname ceph-admin
+  User ceph
+Host opensteak92
+  Hostname opensteak92
+  User ceph
+Host opensteak93
+  Hostname opensteak93
+  User ceph
+Host opensteak94
+  Hostname opensteak94
+  User ceph
+EOF
+```
+
+## Ceph storage cluster
+All these commands must be run inside the ceph-admin machine as a regular user
+
+### Prepare folder
+```bash
+mkdir ceph-cluster
+cd ceph-cluster/
+```
+
+### Deploy initial monitor on first node
+```bash
+ceph-deploy new opensteak92
+```
+
+### Configure ceph
+We set default pool size to 2 and public/cluster networks:
+
+```bash
+cat << EOF >> ceph.conf
+osd pool default size = 2
+public network = 192.168.1.0/24
+cluster network = 192.168.0.0/24
+EOF
+```
+
+### Install ceph in all nodes
+```bash
+ceph-deploy --username ceph install ceph-admin opensteak92 opensteak93 opensteak94
+```
+
+### Create initial monitor and gather the keys
+```bash
+ceph-deploy --username ceph mon create-initial
+```
+
+### Create and add OSD
+We will use hard disk (/dev/sdb) for storage: http://docs.ceph.com/docs/master/rados/deployment/ceph-deploy-osd/
+
+```bash
+ceph-deploy --username ceph osd create opensteak93:sdb
+ceph-deploy --username ceph osd create opensteak94:sdb
+```
+
+### Prepare all nodes to administer the cluster
+Prepare all nodes with a ceph.conf and ceph.client.admin.keyring keyring so that it can administer the cluster:
+
+```bash
+ceph-deploy admin ceph-admin opensteak92 opensteak93 opensteak94
+sudo chmod +r /etc/ceph/ceph.client.admin.keyring
+```
+
+### Add a metadata server in first node
+```bash
+ceph-deploy--username ceph mds create opensteak92
+```
+
+## Extend
+### Extend the OSD pool
+We decided to extend OSD pool by adding the first node as well:
+
+```bash
+ceph-deploy --username ceph osd create opensteak92:sdb
+```
+
+### Extend the monitors
+In the same spirit, extend the monitor by adding the two last nodes and check the status
+```bash
+ceph-deploy --username ceph mon create opensteak93 opensteak94
+ceph quorum_status --format json-pretty
+```
+
+## Check status
+```bash
+ceph health
+```
+
+## Create a file system
+Check osd pools:
+```bash
+ceph osd lspools
+```
+
+I you don't have data and metadata pools, create it:
+```bash
+ceph osd pool create cephfs_data 64
+ceph osd pool create cephfs_metadata 64
+```
+
+Then enable filesystem on the cephfs_data pool:
+```bash
+ceph fs new cephfs cephfs_metadata cephfs_data
+```
+
+And check again:
+```bash
+ceph osd lspools
+```
+
+Should produce:
+```bash
+0 rbd,1 cephfs_data,2 cephfs_metadata,
+```
+
+You can check as well with:
+```bash
+$ ceph fs ls
+name: cephfs, metadata pool: cephfs_metadata, data pools: [cephfs_data ]
+
+$ ceph mds stat
+e5: 1/1/1 up {0=opensteak92=up:active}
+```
+
+## Mount file system
+For each node you want to mount ceph in **/mnt/cephfs/**, run:
+```bash
+ssh opensteak9x "cat /etc/ceph/ceph.client.admin.keyring |grep key|awk '{print \$3}'|sudo tee /etc/ceph/ceph.client.admin.key"
+
+ssh opensteak9x "sudo mkdir /mnt/cephfs"
+
+ssh opensteak9x "echo '192.168.1.92:6789:/ /mnt/cephfs ceph name=admin,secretfile=/etc/ceph/ceph.client.admin.key,noatime 0 2' | sudo tee --append /etc/fstab && sudo mount /mnt/cephfs"
+```
+
+This will add a line in fstab so the file system will automatically be mounted on boot.
+
+## TODO
+
+* create a python/bash script that will install & check that the cluster is well configured (do all of this automatically)
+* create a conf file that will be used by the above script to describe the architecture?
diff --git a/common/README.md b/common/README.md
new file mode 100644 (file)
index 0000000..537c8ea
--- /dev/null
@@ -0,0 +1,30 @@
+<!---
+Copyright 2015 Open Platform for NFV Project, Inc. and its contributors
+
+    Licensed under the Apache License, Version 2.0 (the "License");
+    you may not use this file except in compliance with the License.
+    You may obtain a copy of the License at
+
+        http://www.apache.org/licenses/LICENSE-2.0
+
+    Unless required by applicable law or agreed to in writing, software
+    distributed under the License is distributed on an "AS IS" BASIS,
+    WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+    See the License for the specific language governing permissions and
+    limitations under the License.
+-->
+# Common
+This directory contains those files which belong to the "OPNFV-Installation and Maintenance" phase of the installation process.
+
+The OPNFV install process consists of two main phases:
+* **BASE-INSTALLATION:** Installation of plain-vanilla VM-manager (for BGS, OpenStack will be used as VM-Manager)
+ * (repeatable) install of a plain vanilla VM-manager (for BGS this is OpenStack) that deploys to bare metal and supports a HA-setup of the VM-manager
+ * the installation is performed with an installer “i” that creates a system in state BASE(i).
+ * Files which are specific to an installer process are found in the directory of the associated installer approach (e.g. "fuel", "foreman", "opensteak", etc.)
+ * Once the installation of the plain vanilla environment is complete, the installer i is terminated. The system is left in state BASE(i) and handed over to the second phase.
+* **OPNFV-INSTALLATION and MAINTENANCE:** Installation of OPNFV specific modules, maintenance of the overall OPNFV installation
+ * the system state for this second phase is called OPNFV(x) - where x is determined by a particular OPNFV release item.
+ * install deltas to state BASE(i) to reach the desired state OPNFV(x). Deltas would be defined as a set of scripts/manifests. Given that the state BASE(i) differs by installer used, the scripts could also be different. That said, it is a clear objective to make these scripts as generic and independent from the installer used as possible.
+ * maintain the system in state OPNFV(x)
+ * decouple device configuration from orchestration; allow for different tool chains to be used for device configuration and orchestration. I.e. rather than couple device config and orchestration with a single tool such as puppet in master-agent mode, enable a single tool to be focused on config (e.g. puppet in master-less mode) and another one for orchestration (e.g. Ansible/Salt driving upgrade of components, download of particular manifests to the nodes etc.
+
diff --git a/common/docs/user-guide.rst b/common/docs/user-guide.rst
new file mode 100644 (file)
index 0000000..8e02224
--- /dev/null
@@ -0,0 +1,159 @@
+=====================================
+OPNFV User Guide for the Arno release
+=====================================
+
+Abstract
+========
+
+This document provides an overview of how to use the Arno release of OPNFV once the system has been successfully deployed to a Pharos compliant infrastructure.
+
+License
+=======
+
+OPNFV User Guide for the Arno release (c) by Christopher Price (christopher.price@ericsson.com)
+
+OPNFV User Guide for the Arno release is licensed under a Creative Commons Attribution 4.0 International License. You should have received a copy of the license along with this. If not, see <http://creativecommons.org/licenses/by/4.0/>.
+
+Version history
+===================
+
++--------------------+--------------------+--------------------+--------------------+
+| **Date**           | **Ver.**           | **Author**         | **Comment**        |
+|                    |                    |                    |                    |
++--------------------+--------------------+--------------------+--------------------+
+| 2015-05-28         | 0.0.1              | Christopher Price  | Initial version    |
+|                    |                    | (Ericsson AB)      |                    |
++--------------------+--------------------+--------------------+--------------------+
+| 2015-06-02         | 0.0.2              | Christopher Price  | Minor Updates      |
+|                    |                    | (Ericsson AB)      |                    |
++--------------------+--------------------+--------------------+--------------------+
+
+
+.. contents:: Table of Contents
+   :backlinks: none
+
+
+Introduction
+============
+
+This document provides a rudimentary user guide for the OPNFV Arno release.  The Arno release is the initial iteration of the OPNFV project and should be used as a developmental platform if you are interested in participating in the community and providing feedback.
+
+The Arno release is not intended to be used in production environments.  It is an initial, experimental, release intended to provide a foundation on which the OPNFV project can evaluate platform capabilities and develop further capabilities.  It is possible that some expected features in the platform may not perform as desired, if you find these types of issue in the platform please report them to support@opnfv.org so that we can resolve them in future releases.
+
+Preface
+=======
+
+The Arno release of OPNFV is derived exclusively from open source development activities.  As such the functions, capabilities and user interfaces of the platform derive directly from those upstream sources as well.  The OPNFV project intends to work cooperatively and in conjunction with these projects, awareness of the components and their roles in the platform is important when attempting to use the platform.
+
+ - In the Arno release OpenStack Juno is used as the virtual infrastructure management component.  When operating the Arno release you will most often be interacting with the provided OpenStack interfaces.
+ - Application deployment on the stack can be performed directly on the OpenStack interfaces or by using the Heat interfaces provided.
+ - If you intend to perform actions on the networking component of the stack the Arno release uses OpenDaylight Helium.  The interfaces for OpenDaylight are available for checking topology and interacting with the OpenDaylight SDN controller.
+ - Additional interfaces are provided by the Linux operating systems.  The Arno release supports either Centos 7.1 or Ubuntu 14.4 operating systems.
+
+Details of operating these interfaces are explained later in the document.
+
+Prerequisites
+=============
+
+Hardware Requirements
+---------------------
+
+The Arno release of OPNFV is intended to be run as a baremetal deployment on a "Pharos compliant" lab infrastructure.  The Pharos project in OPNFV is a community activity to provide guidance and establish requirements on hardware platforms supporting the Arno virtualisation platform.
+
+Prior to deploying the OPNFV platform it is important that the hardware infrastructure be configured according to the Pharos specification: http://artifacts.opnfv.org/pharos/docs/spec.html
+
+Arno Platform Deployment
+------------------------
+
+The Arno platform supports installation and deployment using two deployment tools; a Foreman based deployment toolchain and a Fuel based deployment toolchain.
+
+In order to deploy the Arno release on a Pharos compliant lab using the Foreman deployment toolchain you should follow in the Foreman installation guide: http://artifacts.opnfv.org/genesis/foreman/docs/installation-instructions.html
+
+In order to deploy the Arno release on a Pharos compliant lab using the Fuel deployment toolchain you should follow in the Fuel installation guide: http://artifacts.opnfv.org/genesis/fuel/docs/installation-instructions.html
+
+Enabling or disabling OpenDaylight and the native Neutron driver
+----------------------------------------------------------------
+
+You may find that you wish to adjust the system by enabling or disabling the native OpenStack Neutron driver depending on the tasks you are trying to achieve with the platform.  Each of the deployment tools has the option to deploy with or without OpenDaylight enabled.  Details of the available delpoyment options can be found in the associated installation-instructions, please note the platform validation procedures expect a fully deployed platform and results may vary depending on the options selected.
+
+Deployment Validation
+---------------------
+
+Once installed you should validate the deployment completed successfully by executing the automated basic platform validation routines outlined in the Arno testing documentation: http://artifacts.opnfv.org/functest/docs/functest.html
+
+Operating the Arno platform
+===========================
+
+The Arno release provides a platform for deploying software on virtual infrastructure.  The majority of operations to be executed on the platform revolve around deploying, managing and removing software (applications) on the platform itself.  Application deployment is covered in the following sections, however some platform operations you may want to perform include setting up a tenant, in OpenStack tenants are also known as projects in this document we will refer to them as tenants, and associated users for that tenant.
+
+OpenStack provides a good overview of how to create your first tenant for deploying your applications.  You should create a tenant for your applications, associate users with the tenant and assign quota's.
+ - Open the OpenStack console (Horizon) you should find this by logging into your control node; for example to access the console of POD1 of the OPNFV lab you would browse to <172.30.9.70:80>
+ - Create your tenant and users by following the instructions at: http://docs.openstack.org/openstack-ops/content/projects_users.html
+
+Further actions and activities for checking logs and status can be found in other areas of the operations document: http://docs.openstack.org/openstack-ops/content/openstack-ops_preface.html
+
+
+Deploying your applications on Arno
+===================================
+
+Most actions you will want to perform can be executed from the OpenStack dashboard.  When deploying your application on Arno a good reference is the user-guide which describe uploading, managing and deploying your application images.
+
+ - Make sure you have established your tenant, associated users and quota's
+ - Follow the guidelines for managing and deploying your images in the following user-guide: http://docs.openstack.org/user-guide/dashboard.html
+
+
+Frequently Asked Questions
+==========================
+
+Does OPNFV provide support for the Arno release?
+------------------------------------------------
+
+The Arno release of OPNFV is intended to be a developmental release and is not considered suitable for production deployment or at scale testing activities.  As a developmental release, and in the spirit of collaborative development, we want as much feedback from the community as possible on your experiences with the platform and how the release can be improved.
+
+Support for Arno is provided in two ways:
+
+You can engage with the community to help us improve and further develop the OPNFV platform by raising Jira Bugs or Tasks, and pushing correction patches to our repositories.
+
+ - To access Jira for issue reporting or improvement proposals head to: https://jira.opnfv.org/
+ - To get started helping out developing the platform head to: https://wiki.opnfv.org/developer
+
+Alternatively if you are intending to invest your time as a user of the platform you can ask questions and request help from our mailing list at: mailto://support@opnfv.org
+
+License
+=======
+
+All Arno entities are protected by the `Apache 2.0 License <http://www.apache.org/licenses/>`_.
+Arno platform components and their licences are described in their respective Release Notes: http://artifacts.opnfv.org/genesis/foreman/docs/release-notes.html and http://artifacts.opnfv.org/genesis/fuel/docs/release-notes.html
+
+References
+==========
+
+OpenStack
+---------
+
+`OpenStack Admin User Guide <http://docs.openstack.org/user-guide-admin/>`_
+
+OpenDaylight
+------------
+
+`OpenDaylight User Guide <https://www.opendaylight.org/sites/opendaylight/files/User-Guide-Helium-SR2.pdf>`_
+
+Foreman
+-------
+
+`Foreman User Manual <http://theforeman.org/manuals/1.7/index.html>`_
+
+Fuel
+----
+
+`Fuel User Guide <http://docs.fuel-infra.org/openstack/fuel/fuel-6.0/user-guide.html>`_
+
+:Authors: Christopher Price (christopher.price@ericsson.com)
+:Version: 0.0.2
+
+**Documentation tracking**
+
+Revision: _sha1_
+
+Build date:  _date_
+
diff --git a/common/manifests/README.md b/common/manifests/README.md
new file mode 100644 (file)
index 0000000..2c34e19
--- /dev/null
@@ -0,0 +1,17 @@
+<!---
+Copyright 2015 Open Platform for NFV Project, Inc. and its contributors
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+# Manifests
+Puppet manifests which are common to OPNFV (i.e. independent from the various installer approaches).
diff --git a/common/puppet-opnfv/manifests/ceph_deploy.pp b/common/puppet-opnfv/manifests/ceph_deploy.pp
new file mode 100644 (file)
index 0000000..57202aa
--- /dev/null
@@ -0,0 +1,102 @@
+#Copyright 2015 Open Platform for NFV Project, Inc. and its contributors
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+#Class installs and configures a ceph cluster
+#Creates a single OSD per host and configures host as a monitor
+#Inserts authentication keyrings for volumes and images users
+#Creates OSD pools for volumes and images (needed by OpenStack)
+#Depends on puppet module: https://github.com/stackforge/puppet-ceph/
+
+class opnfv::ceph_deploy (
+  $fsid                      = '904c8491-5c16-4dae-9cc3-6ce633a7f4cc',
+  $osd_pool_default_pg_num   = '128',
+  $osd_pool_default_size     = '1',
+  $osd_pool_default_min_size = '1',
+  $mon_initial_members       = '',
+  $mon_host                  = '',
+  $cluster_network           = "10.4.8.0/21",
+  $public_network            = "10.4.8.0/21",
+  $osd_journal_size          = '1000',
+  $osd_ip                    = '',
+  $mon_key                   = 'AQDcvhVV+H08DBAA5/0GGcfBQxz+/eKAdbJdTQ==',
+  $admin_key                 = 'AQDcvhVV+H08DBAA5/0GGcfBQxz+/eKAdbJdTQ==',
+  $images_key                = 'AQAfHBdUKLnUFxAAtO7WPKQZ8QfEoGqH0CLd7A==',
+  $volumes_key               = 'AQAfHBdUsFPTHhAAfqVqPq31FFCvyyO7oaOQXw==',
+  $boostrap_key              = 'AQDcvhVV+H08DBAA5/0GGcfBQxz+/eKAdbJdTQ==',
+) {
+
+  class { 'ceph':
+     fsid                      => $fsid,
+     osd_pool_default_pg_num   => $osd_pool_default_pg_num,
+     osd_pool_default_size     => $osd_pool_default_size,
+     osd_pool_default_min_size => $osd_pool_default_min_size,
+     mon_initial_members       => $mon_initial_members,
+     mon_host                  => $mon_host,
+     cluster_network           => $cluster_network,
+     public_network            => $public_network,
+  }
+  ->
+  ceph_config {
+    'global/osd_journal_size': value => $osd_journal_size;
+  }
+  ->
+  ceph::mon { $::hostname:
+     public_addr  => $osd_ip,
+     key          => $mon_key,
+  }
+
+  Ceph::Key {
+        inject         => true,
+        inject_as_id   => 'mon.',
+        inject_keyring => "/var/lib/ceph/mon/ceph-${::hostname}/keyring",
+  }
+
+  ceph::key { 'client.admin':
+        secret  => $admin_key,
+        cap_mon => 'allow *',
+        cap_osd => 'allow *',
+        cap_mds => 'allow',
+        mode    => '0644',
+  }
+  ceph::key { 'client.images':
+        secret  => $images_key,
+        cap_mon => 'allow r',
+        cap_osd => 'allow class-read object_prefix rbd_children, allow rwx pool=images',
+        inject  => true,
+        mode    => '0644',
+  }
+
+  ceph::key { 'client.volumes':
+        secret  => $volumes_key,
+        cap_mon => 'allow r',
+        cap_osd => 'allow class-read object_prefix rbd_children, allow rwx pool=volumes',
+        inject  => true,
+        mode    => '0644',
+  }
+  ceph::key { 'client.bootstrap-osd':
+        secret  => $boostrap_key,
+        cap_mon => 'allow profile bootstrap-osd',
+        keyring_path => '/var/lib/ceph/bootstrap-osd/ceph.keyring',
+  }
+  ->
+  ceph::osd { '/osd0': }
+  ->
+  exec { 'create volumes pool':
+        command => "/usr/bin/ceph osd pool create volumes $osd_pool_default_pg_num",
+  }
+  ->
+  exec { 'create images pool':
+        command => "/usr/bin/ceph osd pool create images $osd_pool_default_pg_num",
+  }
+}
diff --git a/common/puppet-opnfv/manifests/compute.pp b/common/puppet-opnfv/manifests/compute.pp
new file mode 100644 (file)
index 0000000..0b81757
--- /dev/null
@@ -0,0 +1,163 @@
+#Copyright 2015 Open Platform for NFV Project, Inc. and its contributors
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+#Provides a manifest to configure OpenStack compute node in HA or non-HA
+#environment, with Ceph configured as Cinder backend storage.
+#ha_flag set to true will use virtual IP addresses (VIPs provided by
+#global params) as the provider to the compute node for HA
+
+class opnfv::compute {
+  if ($odl_flag != '') and str2bool($odl_flag) {
+     $ml2_mech_drivers = ['opendaylight']
+     $this_agent = 'opendaylight'
+  }
+  else {
+    $ml2_mech_drivers = ['openvswitch','l2population']
+    $this_agent = 'ovs'
+  }
+
+  ##Common Parameters
+  if !$rbd_secret_uuid { $rbd_secret_uuid = '3b519746-4021-4f72-957e-5b9d991723be' }
+  if !$private_subnet { fail('private_subnet is empty')}
+  if !$ceph_public_network { $ceph_public_network = $private_subnet }
+  if !$ceph_fsid { $ceph_fsid = '904c8491-5c16-4dae-9cc3-6ce633a7f4cc' }
+  if !$ceph_images_key { $ceph_images_key = 'AQAfHBdUKLnUFxAAtO7WPKQZ8QfEoGqH0CLd7A==' }
+  if !$ceph_osd_journal_size { $ceph_osd_journal_size = '1000' }
+  if !$ceph_osd_pool_size { $ceph_osd_pool_size = '1' }
+  if !$ceph_volumes_key { $ceph_volumes_key = 'AQAfHBdUsFPTHhAAfqVqPq31FFCvyyO7oaOQXw==' }
+
+
+  ##Most users will only care about a single user/password for all services
+  ##so lets create one variable that can be used instead of separate usernames/passwords
+  if !$single_username { $single_username = 'octopus' }
+  if !$single_password { $single_password = 'octopus' }
+
+  if !$admin_password { $admin_password = $single_password }
+  if !$neutron_db_password  { $neutron_db_password = $single_password }
+  if !$neutron_user_password  { $neutron_user_password = $single_password }
+
+  if !$ceilometer_user_password { $ceilometer_user_password = $single_password }
+  if !$ceilometer_metering_secret { $ceilometer_metering_secret = $single_password }
+
+  ##HA Global params
+  if $ha_flag {
+     if $private_network == '' { fail('private_network is empty') }
+     if !$keystone_private_vip { fail('keystone_private_vip is empty') }
+     if !$glance_private_vip { fail('glance_private_vip is empty') }
+     if !$nova_private_vip { fail('nova_private_vip is empty') }
+     if !$nova_db_password { $nova_db_password = $single_password }
+     if !$nova_user_password { $nova_user_password = $single_password }
+     if !$controllers_ip_array { fail('controllers_ip_array is empty') }
+     if !$controllers_hostnames_array { fail('controllers_hostnames_array is empty') }
+     $controllers_ip_array = split($controllers_ip_array, ',')
+     $controllers_hostnames_array = split($controllers_hostnames_array, ',')
+     if !$odl_control_ip  { $odl_control_ip =  $controllers_ip_array[0] }
+     if !$db_vip { fail('db_vip is empty') }
+     $mysql_ip = $db_vip
+     if !$amqp_vip { fail('amqp_vip is empty') }
+     $amqp_ip = $amqp_vip
+     if !$amqp_username { $amqp_username = $single_username }
+     if !$amqp_password { $amqp_password = $single_password }
+     if !$ceph_mon_initial_members { $ceph_mon_initial_members = $controllers_hostnames_array }
+     if !$ceph_mon_host { $ceph_mon_host = $controllers_ip_array }
+     if !$neutron_private_vip { fail('neutron_private_vip is empty') }
+
+    ##Find private interface
+    $ovs_tunnel_if = get_nic_from_network("$private_network")
+
+  } else {
+  ##non HA params
+     if $ovs_tunnel_if == '' { fail('ovs_tunnel_if is empty') }
+     if !$private_ip { fail('private_ip is empty') }
+     $keystone_private_vip = $private_ip
+     $glance_private_vip   = $private_ip
+     $nova_private_vip     = $private_ip
+     $neutron_private_vip  = $private_ip
+     if !$nova_db_password { fail('nova_db_password is empty') }
+     if !$nova_user_password { fail('nova_user_password is empty') }
+     if !$odl_control_ip { $odl_control_ip = $private_ip }
+     if !$mysql_ip { $mysql_ip = $private_ip }
+     if !$amqp_ip { $amqp_ip = $private_ip }
+     if !$amqp_username { $amqp_username = 'guest' }
+     if !$amqp_password { $amqp_password = 'guest' }
+     if !$ceph_mon_host { $ceph_mon_host= ["$private_ip"] }
+     if !$ceph_mon_initial_members { $ceph_mon_initial_members = ["$::hostname"] }
+  }
+
+  class { "quickstack::neutron::compute":
+    auth_host                    => $keystone_private_vip,
+    glance_host                  => $glance_private_vip,
+    libvirt_images_rbd_pool      => 'volumes',
+    libvirt_images_rbd_ceph_conf => '/etc/ceph/ceph.conf',
+    libvirt_inject_password      => 'false',
+    libvirt_inject_key           => 'false',
+    libvirt_images_type          => 'rbd',
+    nova_host                    => $nova_private_vip,
+    nova_db_password             => $nova_db_password,
+    nova_user_password           => $nova_user_password,
+    private_network              => '',
+    private_iface                => $ovs_tunnel_if,
+    private_ip                   => '',
+    rbd_user                     => 'volumes',
+    rbd_secret_uuid              => $rbd_secret_uuid,
+    network_device_mtu           => $quickstack::params::network_device_mtu,
+
+    admin_password               => $admin_password,
+    ssl                          => false,
+
+    mysql_host                   => $mysql_ip,
+    mysql_ca                     =>  '/etc/ipa/ca.crt',
+    amqp_host                    => $amqp_ip,
+    amqp_username                => $amqp_username,
+    amqp_password                => $amqp_password,
+
+    ceilometer                   => 'false',
+    ceilometer_metering_secret   => $ceilometer_metering_secret,
+    ceilometer_user_password     => $ceilometer_user_password,
+
+    cinder_backend_gluster       => $quickstack::params::cinder_backend_gluster,
+    cinder_backend_rbd           => 'true',
+    glance_backend_rbd           => 'true',
+    ceph_cluster_network         => $ceph_public_network,
+    ceph_fsid                    => $ceph_fsid,
+    ceph_images_key              => $ceph_images_key,
+    ceph_mon_host                => $ceph_mon_host,
+    ceph_mon_initial_members     => $ceph_mon_initial_members,
+    ceph_osd_pool_default_size   => $ceph_osd_pool_size,
+    ceph_osd_journal_size        => $ceph_osd_journal_size,
+    ceph_volumes_key             => $ceph_volumes_key,
+
+    agent_type                   => $this_agent,
+    enable_tunneling             => true,
+
+    ml2_mechanism_drivers        => $ml2_mech_drivers,
+    odl_controller_ip            => $odl_control_ip,
+
+    neutron_db_password          => $neutron_db_password,
+    neutron_user_password        => $neutron_user_password,
+    neutron_host                 => $neutron_private_vip,
+
+    ovs_tunnel_iface             => $ovs_tunnel_if,
+    ovs_tunnel_network           => '',
+    ovs_l2_population            => 'false',
+
+    tenant_network_type          => 'vxlan',
+    tunnel_id_ranges             => '1:1000',
+    ovs_tunnel_types             => ['vxlan'],
+
+    verbose                      => 'true',
+    security_group_api           => 'neutron',
+
+  }
+}
diff --git a/common/puppet-opnfv/manifests/controller.pp b/common/puppet-opnfv/manifests/controller.pp
new file mode 100644 (file)
index 0000000..97b0181
--- /dev/null
@@ -0,0 +1,135 @@
+#Copyright 2015 Open Platform for NFV Project, Inc. and its contributors
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+
+
+class opnfv::controller {
+  ###use 8081 as a default work around swift service
+  if $odl_rest_port == '' {$odl_rest_port = '8081'}
+
+  if ($odl_flag != '') and str2bool($odl_flag) {
+     $ml2_mech_drivers = ['opendaylight']
+  }
+  else {
+    $ml2_mech_drivers = ['openvswitch','l2population']
+  }
+
+
+  if $admin_email == '' { fail('admin_email is empty') }
+  if $admin_password == '' { fail('admin_password is empty') }
+
+  if $public_ip == '' { fail('public_ip is empty') }
+  if $private_ip == '' { fail('private_ip is empty') }
+
+  if $odl_control_ip == '' { fail('odl_controL_ip is empty, should be the IP of your network node private interface') }
+
+  if $mysql_ip == '' { fail('mysql_ip is empty') }
+  if $mysql_root_password == '' { fail('mysql_root_password is empty') }
+  if $amqp_ip == '' { fail('amqp_ip is empty') }
+
+  if $memcache_ip == '' { fail('memcache_ip is empty') }
+  if $neutron_ip == '' { fail('neutron_ip is empty') }
+
+  if $keystone_admin_token == '' { fail('keystone_admin_token is empty') }
+  if $keystone_db_password == '' { fail('keystone_db_password is empty') }
+
+  if $horizon_secret_key == '' { fail('horizon_secret_key is empty') }
+  #if $trystack_db_password == '' { fail('trystack_db_password is empty') }
+
+  if $nova_user_password == '' { fail('nova_user_password is empty') }
+  if $nova_db_password == '' { fail('nova_db_password is empty') }
+
+  if $cinder_user_password == '' { fail('cinder_user_password is empty') }
+  if $cinder_db_password == '' { fail('cinder_db_password is empty') }
+
+  if $glance_user_password == '' { fail('glance_user_password is empty') }
+  if $glance_db_password == '' { fail('glance_db_password is empty') }
+
+  if $neutron_user_password == '' { fail('neutron_user_password is empty') }
+  if $neutron_db_password == '' { fail('neutron_db_password is empty') }
+  if $neutron_metadata_shared_secret == '' { fail('neutron_metadata_shared_secret is empty') }
+
+  if $ceilometer_user_password == '' { fail('ceilometer_user_password is empty') }
+  if $ceilometer_metering_secret == '' { fail('ceilometer_user_password is empty') }
+
+  if $heat_user_password == '' { fail('heat_user_password is empty') }
+  if $heat_db_password == '' { fail('heat_db_password is empty') }
+  if $heat_auth_encrypt_key == '' { fail('heat_auth_encrypt_key is empty') }
+
+  if $swift_user_password == '' { fail('swift_user_password is empty') }
+  if $swift_shared_secret == '' { fail('swift_shared_secret is empty') }
+  if $swift_admin_password == '' { fail('swift_admin_password is empty') }
+
+  class { "quickstack::neutron::controller":
+    admin_email                   => $admin_email,
+    admin_password                => $admin_password,
+    controller_admin_host         => $private_ip,
+    controller_priv_host          => $private_ip,
+    controller_pub_host           => $public_ip,
+    ssl                           => false,
+    #support_profile               => $quickstack::params::support_profile,
+    #freeipa                       => $quickstack::params::freeipa,
+
+    mysql_host                    => $mysql_ip,
+    mysql_root_password           => $mysql_root_password,
+    #amqp_provider                 => $amqp_provider,
+    amqp_host                     => $amqp_ip,
+    amqp_username                 => 'guest',
+    amqp_password                 => 'guest',
+    #amqp_nssdb_password           => $quickstack::params::amqp_nssdb_password,
+
+    keystone_admin_token          => $keystone_admin_token,
+    keystone_db_password          => $keystone_db_password,
+
+    ceilometer_metering_secret    => $ceilometer_metering_secret,
+    ceilometer_user_password      => $ceilometer_user_password,
+
+    cinder_backend_gluster        => $quickstack::params::cinder_backend_gluster,
+    cinder_backend_gluster_name   => $quickstack::params::cinder_backend_gluster_name,
+    cinder_gluster_shares         => $quickstack::params::cinder_gluster_shares,
+    cinder_user_password          => $cinder_user_password,
+    cinder_db_password            => $cinder_db_password,
+
+    glance_db_password            => $glance_db_password,
+    glance_user_password          => $glance_user_password,
+
+    heat_cfn                      => true,
+    heat_cloudwatch               => true,
+    heat_db_password              => $heat_db_password,
+    heat_user_password            => $heat_user_password,
+    heat_auth_encrypt_key         => $heat_auth_encrypt_key,
+
+    horizon_secret_key            => $horizon_secret_key,
+    horizon_ca                    => $quickstack::params::horizon_ca,
+    horizon_cert                  => $quickstack::params::horizon_cert,
+    horizon_key                   => $quickstack::params::horizon_key,
+
+    ml2_mechanism_drivers         => $ml2_mech_drivers,
+    #neutron                       => true,
+    neutron_metadata_proxy_secret => $neutron_metadata_shared_secret,
+    neutron_db_password           => $neutron_db_password,
+    neutron_user_password         => $neutron_user_password,
+
+    nova_db_password              => $nova_db_password,
+    nova_user_password            => $nova_user_password,
+    odl_controller_ip             => $odl_control_ip,
+    odl_controller_port           => $odl_rest_port,
+
+    swift_shared_secret           => $swift_shared_secret,
+    swift_admin_password          => $swift_admin_password,
+    swift_ringserver_ip           => '192.168.203.1',
+    swift_storage_ips             => ["192.168.203.2","192.168.203.3","192.168.203.4"],
+    swift_storage_device          => 'device1',
+  }
+
+}
diff --git a/common/puppet-opnfv/manifests/controller_networker.pp b/common/puppet-opnfv/manifests/controller_networker.pp
new file mode 100644 (file)
index 0000000..157bc8f
--- /dev/null
@@ -0,0 +1,438 @@
+#Copyright 2015 Open Platform for NFV Project, Inc. and its contributors
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+#Provides HA or non-HA setup for OpenStack Controller with ODL integration
+#Mandatory common and HA variables are needed to setup each Controller
+#ha_flag set to true will provide OpenStack HA of the following services:
+#rabbitmq, galera mariadb, keystone, glance, nova, cinder, horizon, neutron
+#includes all sub-services of those features (i.e. neutron-server, neutron-lg-agent, etc)
+
+class opnfv::controller_networker {
+  if $odl_rest_port == '' { $odl_rest_port= '8081'}
+  if ($odl_flag != '') and str2bool($odl_flag) {
+     $ml2_mech_drivers = ['opendaylight']
+     $this_agent = 'opendaylight'
+  } else {
+    $ml2_mech_drivers = ['openvswitch','l2population']
+    $this_agent = 'ovs'
+  }
+
+  ##Mandatory Common variables
+  if $admin_email == '' { fail('admin_email is empty') }
+
+  ##Most users will only care about a single user/password for all services
+  ##so lets create one variable that can be used instead of separate usernames/passwords
+  if !$single_username { $single_username = 'octopus' }
+  if !$single_password { $single_password = 'octopus' }
+
+  if !$keystone_admin_token { $keystone_admin_token = $single_password }
+  if !$neutron_metadata_shared_secret { $neutron_metadata_shared_secret = $single_password }
+  if !$mysql_root_password { $mysql_root_password = $single_password }
+  if !$admin_password { $admin_password = $single_password }
+
+  ##Check for HA, if not leave old functionality alone
+  if $ha_flag and str2bool($ha_flag) {
+    ##Mandatory HA variables
+    if !$controllers_ip_array { fail('controllers_ip_array is empty') }
+    $controllers_ip_array_str = $controllers_ip_array
+    $controllers_ip_array = split($controllers_ip_array, ',')
+    if !$controllers_hostnames_array { fail('controllers_hostnames_array is empty') }
+    $controllers_hostnames_array_str = $controllers_hostnames_array
+    $controllers_hostnames_array = split($controllers_hostnames_array, ',')
+    if !$amqp_vip { fail('amqp_vip is empty') }
+    if !$private_subnet { fail('private_subnet is empty')}
+    if !$cinder_admin_vip { fail('cinder_admin_vip is empty') }
+    if !$cinder_private_vip { fail('cinder_private_vip is empty') }
+    if !$cinder_public_vip { fail('cinder_public_vip is empty') }
+    if !$db_vip { fail('db_vip is empty') }
+    if !$glance_admin_vip { fail('glance_admin_vip is empty') }
+    if !$glance_private_vip { fail('glance_private_vip is empty') }
+    if !$glance_public_vip { fail('glance_public_vip is empty') }
+    if !$horizon_admin_vip { fail('horizon_admin_vip is empty') }
+    if !$horizon_private_vip { fail('horizon_private_vip is empty') }
+    if !$horizon_public_vip { fail('horizon_public_vip is empty') }
+    if !$keystone_admin_vip { fail('keystone_admin_vip is empty') }
+    if !$keystone_private_vip { fail('keystone_private_vip is empty') }
+    if !$keystone_public_vip { fail('keystone_public_vip is empty') }
+    if !$loadbalancer_vip { fail('loadbalancer_vip is empty') }
+    if !$neutron_admin_vip { fail('neutron_admin_vip is empty') }
+    if !$neutron_private_vip { fail('neutron_private_vip is empty') }
+    if !$neutron_public_vip { fail('neutron_public_vip is empty') }
+    if !$nova_admin_vip { fail('nova_admin_vip is empty') }
+    if !$nova_private_vip { fail('nova_private_vip is empty') }
+    if !$nova_public_vip { fail('nova_public_vip is empty') }
+    if $private_network == '' { fail('private_network is empty') }
+    if !$heat_admin_vip { fail('heat_admin_vip is empty') }
+    if !$heat_private_vip { fail('heat_private_vip is empty') }
+    if !$heat_public_vip { fail('heat_public_vip is empty') }
+    if !$heat_cfn_admin_vip { fail('heat_cfn_admin_vip is empty') }
+    if !$heat_cfn_private_vip { fail('heat_cfn_private_vip is empty') }
+    if !$heat_cfn_public_vip { fail('heat_cfn_public_vip is empty') }
+
+    ##Find private interface
+    $ovs_tunnel_if = get_nic_from_network("$private_network")
+
+    ##Optional HA variables
+    if !$amqp_username  { $amqp_username = $single_username }
+    if !$amqp_password  { $amqp_password = $single_password }
+    if !$ceph_fsid { $ceph_fsid = '904c8491-5c16-4dae-9cc3-6ce633a7f4cc' }
+    if !$ceph_images_key { $ceph_images_key = 'AQAfHBdUKLnUFxAAtO7WPKQZ8QfEoGqH0CLd7A==' }
+    if !$ceph_mon_host { $ceph_mon_host= $controllers_ip_array }
+    if !$ceph_mon_initial_members { $ceph_mon_initial_members = $controllers_hostnames_array}
+    if !$ceph_osd_journal_size { $ceph_osd_journal_size = '1000' }
+    if !$ceph_osd_pool_size { $ceph_osd_pool_size = '1' }
+    if !$ceph_public_network { $ceph_public_network = $private_subnet }
+    if !$ceph_volumes_key { $ceph_volumes_key = 'AQAfHBdUsFPTHhAAfqVqPq31FFCvyyO7oaOQXw==' }
+    if !$cinder_db_password { $cinder_db_password = $single_password }
+    if !$cinder_user_password { $cinder_user_password = $single_password }
+    if !$cluster_control_ip { $cluster_control_ip = $controllers_ip_array[0] }
+    if !$horizon_secret { $horizon_secret = $single_password }
+    if !$glance_db_password { $glance_db_password = $single_password }
+    if !$glance_user_password { $glance_user_password = $single_password }
+    if !$keystone_db_password { $keystone_db_password = $single_password }
+    if !$keystone_user_password { $keystone_user_password = $single_password }
+    if !$lb_backend_server_addrs { $lb_backend_server_addrs = $controllers_ip_array }
+    if !$lb_backend_server_names { $lb_backend_server_names = $controllers_hostnames_array }
+    if !$neutron_db_password  { $neutron_db_password = $single_password }
+    if !$neutron_user_password  { $neutron_user_password = $single_password }
+    if !$neutron_metadata_proxy_secret { $neutron_metadata_proxy_secret = $single_password }
+    if !$nova_db_password { $nova_db_password = $single_password }
+    if !$nova_user_password { $nova_user_password = $single_password }
+    if !$pcmk_server_addrs {$pcmk_server_addrs = $controllers_ip_array}
+    if !$pcmk_server_names {$pcmk_server_names = ["pcmk-${controllers_hostnames_array[0]}", "pcmk-${controllers_hostnames_array[1]}", "pcmk-${controllers_hostnames_array[2]}"] }
+    if !$rbd_secret_uuid { $rbd_secret_uuid = '3b519746-4021-4f72-957e-5b9d991723be' }
+    if !$heat_user_password  { $heat_user_password = $single_password }
+    if !$heat_db_password  { $heat_db_password = $single_password }
+    if !$heat_cfn_user_password  { $heat_cfn_user_password = $single_password }
+    if !$heat_auth_encryption_key  { $heat_auth_encryption_key = 'octopus1octopus1' }
+    if !$storage_network {
+          $storage_iface = $ovs_tunnel_if
+    } else {
+          $storage_iface = get_nic_from_network("$storage_network")
+    }
+
+    ##we assume here that if not provided, the first controller is where ODL will reside
+    ##this is fine for now as we will replace ODL with ODL HA when it is ready
+    if $odl_control_ip == '' { $odl_control_ip =  $controllers_ip_array[0] }
+
+    ###find interface ip of storage network
+    $osd_ip = find_ip("",
+                      "$storage_iface",
+                      "")
+
+    if ($external_network_flag != '') and str2bool($external_network_flag) {
+      class { "opnfv::external_net_presetup":
+        stage   => presetup,
+        require => Class['opnfv::repo'],
+      }
+    }
+
+    class { "opnfv::ceph_deploy":
+      fsid                     => $ceph_fsid,
+      osd_pool_default_size    => $ceph_osd_pool_size,
+      osd_journal_size         => $ceph_osd_journal_size,
+      mon_initial_members      => $controllers_hostnames_array_str,
+      mon_host                 => $controllers_ip_array_str,
+      osd_ip                   => $osd_ip,
+      public_network           => $ceph_public_network,
+      cluster_network          => $ceph_public_network,
+      images_key               => $ceph_images_key,
+      volumes_key              => $ceph_volumes_key,
+    }
+    ->
+    class { "quickstack::openstack_common": }
+    ->
+    class { "quickstack::pacemaker::params":
+      amqp_password            => $amqp_password,
+      amqp_username            => $amqp_username,
+      amqp_vip                 => $amqp_vip,
+      ceph_cluster_network     => $private_subnet,
+      ceph_fsid                => $ceph_fsid,
+      ceph_images_key          => $ceph_images_key,
+      ceph_mon_host            => $ceph_mon_host,
+      ceph_mon_initial_members => $ceph_mon_initial_members,
+      ceph_osd_journal_size    => $ceph_osd_journal_size,
+      ceph_osd_pool_size       => $ceph_osd_pool_size,
+      ceph_public_network      => $ceph_public_network,
+      ceph_volumes_key         => $ceph_volumes_key,
+      cinder_admin_vip         => $cinder_admin_vip,
+      cinder_db_password       => $cinder_db_password,
+      cinder_private_vip       => $cinder_private_vip,
+      cinder_public_vip        => $cinder_public_vip,
+      cinder_user_password     => $cinder_user_password,
+      cluster_control_ip       => $cluster_control_ip,
+      db_vip                   => $db_vip,
+      glance_admin_vip         => $glance_admin_vip,
+      glance_db_password       => $glance_db_password,
+      glance_private_vip       => $glance_private_vip,
+      glance_public_vip        => $glance_public_vip,
+      glance_user_password     => $glance_user_password,
+      heat_auth_encryption_key => $heat_auth_encryption_key,
+      heat_cfn_admin_vip       => $heat_cfn_admin_vip,
+      heat_cfn_private_vip     => $heat_cfn_private_vip,
+      heat_cfn_public_vip      => $heat_cfn_public_vip,
+      heat_cfn_user_password   => $heat_cfn_user_password,
+      heat_cloudwatch_enabled  => 'true',
+      heat_cfn_enabled         => 'true',
+      heat_db_password         => $heat_db_password,
+      heat_admin_vip           => $heat_admin_vip,
+      heat_private_vip         => $heat_private_vip,
+      heat_public_vip          => $heat_public_vip,
+      heat_user_password       => $heat_user_password,
+      horizon_admin_vip        => $horizon_admin_vip,
+      horizon_private_vip      => $horizon_private_vip,
+      horizon_public_vip       => $horizon_public_vip,
+      include_ceilometer       => 'false',
+      include_cinder           => 'true',
+      include_glance           => 'true',
+      include_heat             => 'true',
+      include_horizon          => 'true',
+      include_keystone         => 'true',
+      include_neutron          => 'true',
+      include_nosql            => 'false',
+      include_nova             => 'true',
+      include_swift            => 'false',
+      keystone_admin_vip       => $keystone_admin_vip,
+      keystone_db_password     => $keystone_db_password,
+      keystone_private_vip     => $keystone_private_vip,
+      keystone_public_vip      => $keystone_public_vip,
+      keystone_user_password   => $keystone_user_password,
+      lb_backend_server_addrs  => $lb_backend_server_addrs,
+      lb_backend_server_names  => $lb_backend_server_names,
+      loadbalancer_vip         => $loadbalancer_vip,
+      neutron                  => 'true',
+      neutron_admin_vip        => $neutron_admin_vip,
+      neutron_db_password      => $neutron_db_password,
+      neutron_metadata_proxy_secret  => $neutron_metadata_proxy_secret,
+      neutron_private_vip      => $neutron_private_vip,
+      neutron_public_vip       => $neutron_public_vip,
+      neutron_user_password    => $neutron_user_password,
+      nova_admin_vip           => $nova_admin_vip,
+      nova_db_password         => $nova_db_password,
+      nova_private_vip         => $nova_private_vip,
+      nova_public_vip          => $nova_public_vip,
+      nova_user_password       => $nova_user_password,
+      pcmk_iface               => $ovs_tunnel_if,
+      pcmk_server_addrs        => $pcmk_server_addrs,
+      pcmk_server_names        => $pcmk_server_names,
+      private_iface            => $ovs_tunnel_if,
+    }
+    ->
+    class { "quickstack::pacemaker::common": }
+    ->
+    class { "quickstack::pacemaker::load_balancer": }
+    ->
+    class { "quickstack::pacemaker::galera":
+      mysql_root_password     => $mysql_root_password,
+      wsrep_cluster_members   => $controllers_ip_array,
+    }
+    ->
+     class { "quickstack::pacemaker::qpid": }
+    ->
+    class { "quickstack::pacemaker::rabbitmq": }
+    ->
+    class { "quickstack::pacemaker::keystone":
+      admin_email         =>  $admin_email,
+      admin_password      =>  $admin_password,
+      admin_token         =>  $keystone_admin_token,
+      cinder              =>  'true',
+      heat                =>  'true',
+      heat_cfn            =>  'true',
+      keystonerc          =>  'true',
+      use_syslog          =>  'true',
+      verbose             =>  'true',
+    }
+    ->
+    class { "quickstack::pacemaker::swift": }
+    ->
+    class { "quickstack::pacemaker::glance":
+      backend         => 'rbd',
+      debug           => true,
+      pcmk_fs_manage  => 'false',
+      use_syslog      => true,
+      verbose         => true
+    }
+    ->
+    class { "quickstack::pacemaker::nova":
+      neutron_metadata_proxy_secret => $neutron_metadata_shared_secret,
+    }
+    ->
+    class { "quickstack::pacemaker::cinder":
+      backend_rbd     => true,
+      rbd_secret_uuid => $rbd_secret_uuid,
+      use_syslog      => true,
+      verbose         => true,
+      volume          => true,
+    }
+    ->
+    class { "quickstack::pacemaker::heat":
+      use_syslog      => true,
+      verbose         => true,
+    }
+    ->
+    class { "quickstack::pacemaker::constraints": }
+
+    class { "quickstack::pacemaker::nosql": }
+
+    class { "quickstack::pacemaker::memcached": }
+
+    class { "quickstack::pacemaker::ceilometer":
+      ceilometer_metering_secret => $single_password,
+    }
+
+    class { "quickstack::pacemaker::horizon":
+      horizon_ca       =>  '/etc/ipa/ca.crt',
+      horizon_cert     =>  '/etc/pki/tls/certs/PUB_HOST-horizon.crt',
+      horizon_key      =>  '/etc/pki/tls/private/PUB_HOST-horizon.key',
+      secret_key       =>  $horizon_secret,
+      verbose          =>  'true',
+    }
+
+    class { "quickstack::pacemaker::neutron":
+      agent_type               =>  $this_agent,
+      enable_tunneling         =>  'true',
+      ml2_mechanism_drivers    =>  $ml2_mech_drivers,
+      ml2_network_vlan_ranges  =>  ["physnet1:10:50"],
+      odl_controller_ip        =>  $odl_control_ip,
+      odl_controller_port      =>  $odl_rest_port,
+      ovs_tunnel_iface         =>  $ovs_tunnel_if,
+      ovs_tunnel_types         =>  ["vxlan"],
+      verbose                  =>  'true',
+    }
+
+    if ($external_network_flag != '') and str2bool($external_network_flag) {
+      class { "opnfv::external_net_setup": }
+    }
+
+  } else {
+    if $ovs_tunnel_if == '' { fail('ovs_tunnel_if is empty') }
+    if $public_ip == '' { fail('public_ip is empty') }
+    if $private_ip == '' { fail('private_ip is empty') }
+
+    if $odl_control_ip == '' { $odl_control_ip = $private_ip }
+
+    if $mysql_ip == '' { fail('mysql_ip is empty') }
+    if $mysql_root_password == '' { fail('mysql_root_password is empty') }
+    if $amqp_ip == '' { fail('amqp_ip is empty') }
+
+    if $memcache_ip == '' { fail('memcache_ip is empty') }
+    if $neutron_ip == '' { fail('neutron_ip is empty') }
+
+    if $keystone_db_password == '' { fail('keystone_db_password is empty') }
+
+    if $horizon_secret_key == '' { fail('horizon_secret_key is empty') }
+
+    if $nova_user_password == '' { fail('nova_user_password is empty') }
+    if $nova_db_password == '' { fail('nova_db_password is empty') }
+
+    if $cinder_user_password == '' { fail('cinder_user_password is empty') }
+    if $cinder_db_password == '' { fail('cinder_db_password is empty') }
+
+    if $glance_user_password == '' { fail('glance_user_password is empty') }
+    if $glance_db_password == '' { fail('glance_db_password is empty') }
+
+    if $neutron_user_password == '' { fail('neutron_user_password is empty') }
+    if $neutron_db_password == '' { fail('neutron_db_password is empty') }
+    if $neutron_metadata_shared_secret == '' { fail('neutron_metadata_shared_secret is empty') }
+
+    if $ceilometer_user_password == '' { fail('ceilometer_user_password is empty') }
+    if $ceilometer_metering_secret == '' { fail('ceilometer_user_password is empty') }
+
+    if $heat_user_password == '' { fail('heat_user_password is empty') }
+    if $heat_db_password == '' { fail('heat_db_password is empty') }
+    if $heat_auth_encrypt_key == '' { fail('heat_auth_encrypt_key is empty') }
+
+    if $swift_user_password == '' { fail('swift_user_password is empty') }
+    if $swift_shared_secret == '' { fail('swift_shared_secret is empty') }
+    if $swift_admin_password == '' { fail('swift_admin_password is empty') }
+
+    if !$amqp_username { $amqp_username = $single_username }
+    if !$amqp_password { $amqp_password = $single_password }
+
+
+    class { "quickstack::neutron::controller_networker":
+      admin_email                   => $admin_email,
+      admin_password                => $admin_password,
+      agent_type                    => $this_agent,
+      enable_tunneling              => true,
+      ovs_tunnel_iface              => $ovs_tunnel_if,
+      ovs_tunnel_network            => '',
+      ovs_tunnel_types              => ['vxlan'],
+      ovs_l2_population             => 'True',
+      external_network_bridge       => 'br-ex',
+      tenant_network_type           => 'vxlan',
+      tunnel_id_ranges              => '1:1000',
+      controller_admin_host         => $private_ip,
+      controller_priv_host          => $private_ip,
+      controller_pub_host           => $public_ip,
+      ssl                           => false,
+      #support_profile               => $quickstack::params::support_profile,
+      #freeipa                       => $quickstack::params::freeipa,
+
+      mysql_host                    => $mysql_ip,
+      mysql_root_password           => $mysql_root_password,
+      #amqp_provider                 => $amqp_provider,
+      amqp_host                     => $amqp_ip,
+      amqp_username                 => $amqp_username,
+      amqp_password                 => $amqp_password,
+      #amqp_nssdb_password           => $quickstack::params::amqp_nssdb_password,
+
+      keystone_admin_token          => $keystone_admin_token,
+      keystone_db_password          => $keystone_db_password,
+
+      ceilometer_metering_secret    => $ceilometer_metering_secret,
+      ceilometer_user_password      => $ceilometer_user_password,
+
+      cinder_backend_gluster        => $quickstack::params::cinder_backend_gluster,
+      cinder_backend_gluster_name   => $quickstack::params::cinder_backend_gluster_name,
+      cinder_gluster_shares         => $quickstack::params::cinder_gluster_shares,
+      cinder_user_password          => $cinder_user_password,
+      cinder_db_password            => $cinder_db_password,
+
+      glance_db_password            => $glance_db_password,
+      glance_user_password          => $glance_user_password,
+
+      heat_cfn                      => true,
+      heat_cloudwatch               => true,
+      heat_db_password              => $heat_db_password,
+      heat_user_password            => $heat_user_password,
+      heat_auth_encrypt_key         => $heat_auth_encrypt_key,
+
+      horizon_secret_key            => $horizon_secret_key,
+      horizon_ca                    => $quickstack::params::horizon_ca,
+      horizon_cert                  => $quickstack::params::horizon_cert,
+      horizon_key                   => $quickstack::params::horizon_key,
+
+      ml2_mechanism_drivers         => $ml2_mech_drivers,
+
+      #neutron                       => true,
+      neutron_metadata_proxy_secret => $neutron_metadata_shared_secret,
+      neutron_db_password           => $neutron_db_password,
+      neutron_user_password         => $neutron_user_password,
+
+      nova_db_password              => $nova_db_password,
+      nova_user_password            => $nova_user_password,
+
+      odl_controller_ip             => $odl_control_ip,
+      odl_controller_port           => $odl_rest_port,
+
+      swift_shared_secret           => $swift_shared_secret,
+      swift_admin_password          => $swift_admin_password,
+      swift_ringserver_ip           => '192.168.203.1',
+      swift_storage_ips             => ["192.168.203.2","192.168.203.3","192.168.203.4"],
+      swift_storage_device          => 'device1',
+    }
+
+  }
+}
diff --git a/common/puppet-opnfv/manifests/external_net_presetup.pp b/common/puppet-opnfv/manifests/external_net_presetup.pp
new file mode 100644 (file)
index 0000000..b7c7c5f
--- /dev/null
@@ -0,0 +1,102 @@
+#Copyright 2015 Open Platform for NFV Project, Inc. and its contributors
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+
+class opnfv::external_net_presetup {
+
+  if $public_gateway == '' { fail('public_gateway is empty') }
+  if $public_dns == '' { fail('public_dns is empty') }
+  if $public_network == '' { fail('public_network is empty') }
+  if $public_subnet == '' { fail('public_subnet is empty') }
+  if $public_allocation_start == '' { fail('public_allocation_start is empty') }
+  if $public_allocation_end == '' { fail('public_allocation_end is empty') }
+  if !$controllers_hostnames_array { fail('controllers_hostnames_array is empty') }
+  $controllers_hostnames_array_str = $controllers_hostnames_array
+  $controllers_hostnames_array = split($controllers_hostnames_array, ',')
+
+  #find public NIC
+  $public_nic = get_nic_from_network("$public_network")
+  $public_nic_ip = get_ip_from_nic("$public_nic")
+  $public_nic_netmask = get_netmask_from_nic("$public_nic")
+
+ if ($public_nic == '') or ($public_nic_ip == '') or ($public_nic == "br-ex") or ($public_nic == "br_ex") {
+  notify {"Skipping augeas, public_nic ${public_nic}, public_nic_ip ${public_nic_ip}":} 
+
+  exec {'ovs-vsctl -t 10 -- --may-exist add-br br-ex':
+       path         => ["/usr/sbin/", "/usr/bin/"],
+       unless       => 'ip addr show br-ex | grep "inet "',
+       before       => Exec['restart-network-public-nic-ip'],
+  }
+  ~>
+  exec {'systemctl restart network':
+       path         => ["/usr/sbin/", "/usr/bin/"],
+       refreshonly  => 'true',
+  }
+
+  exec {'restart-network-public-nic-ip':
+       command      => 'systemctl restart network',
+       path         => ["/usr/sbin/", "/usr/bin/"],
+       onlyif       => 'ip addr show | grep $(ip addr show br-ex | grep -Eo "inet [\.0-9]+" | cut -d " " -f2) | grep -v br-ex',
+  }
+
+ } else {
+  #reconfigure public interface to be ovsport
+  augeas { "main-$public_nic":
+        context => "/files/etc/sysconfig/network-scripts/ifcfg-$public_nic",
+        changes => [
+                "rm IPADDR",
+                "rm NETMASK",
+                "rm GATEWAY",
+                "rm DNS1",
+                "rm BOOTPROTO",
+                "rm DEFROUTE",
+                "rm IPV6_DEFROUTE",
+                "rm IPV6_PEERDNS",
+                "rm IPV6_PEERROUTES",
+                "rm PEERROUTES",
+                "set PEERDNS no",
+                "set BOOTPROTO static",
+                "set IPV6INIT no",
+                "set IPV6_AUTOCONF no",
+                "set ONBOOT yes",
+                "set TYPE OVSPort",
+                "set OVS_BRIDGE br-ex",
+                "set PROMISC yes"
+
+        ],
+        before  => Class["quickstack::pacemaker::params"],
+        require => Service["openvswitch"],
+  }
+
+  ->
+  #create br-ex interface
+  file { 'external_bridge':
+        path => '/etc/sysconfig/network-scripts/ifcfg-br-ex',
+        owner   => 'root',
+        group   => 'root',
+        mode    => '0644',
+        content => template('trystack/br_ex.erb'),
+        before  => Class["quickstack::pacemaker::params"],
+  }
+  ->
+  exec {'ovs-vsctl -t 10 -- --may-exist add-br br-ex':
+       path         => ["/usr/sbin/", "/usr/bin/"],
+  }
+  ~>
+  exec {'systemctl restart network':
+       path         => ["/usr/sbin/", "/usr/bin/"],
+       refreshonly  => 'true',
+  }
+
+ }
+}
diff --git a/common/puppet-opnfv/manifests/external_net_setup.pp b/common/puppet-opnfv/manifests/external_net_setup.pp
new file mode 100644 (file)
index 0000000..af00f20
--- /dev/null
@@ -0,0 +1,85 @@
+#Copyright 2015 Open Platform for NFV Project, Inc. and its contributors
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+
+class opnfv::external_net_setup {
+
+  if $public_gateway == '' { fail('public_gateway is empty') }
+  if $public_dns == '' { fail('public_dns is empty') }
+  if $public_network == '' { fail('public_network is empty') }
+  if $public_subnet == '' { fail('public_subnet is empty') }
+  if $public_allocation_start == '' { fail('public_allocation_start is empty') }
+  if $public_allocation_end == '' { fail('public_allocation_end is empty') }
+  if !$controllers_hostnames_array { fail('controllers_hostnames_array is empty') }
+  $controllers_hostnames_array_str = $controllers_hostnames_array
+  $controllers_hostnames_array = split($controllers_hostnames_array, ',')
+
+  #find public NIC
+  $public_nic = get_nic_from_network("$public_network")
+  $public_nic_ip = get_ip_from_nic("$public_nic")
+  $public_nic_netmask = get_netmask_from_nic("$public_nic")
+
+  Anchor[ 'neutron configuration anchor end' ]
+  ->
+  #update bridge-mappings to physnet1
+  file_line { 'ovs':
+    ensure  => present,
+    path    => '/etc/neutron/plugin.ini',
+    line    => '[ovs]',
+  }
+  ->
+  #update bridge-mappings to physnet1
+  file_line { 'bridge_mapping':
+    ensure  => present,
+    path    => '/etc/neutron/plugin.ini',
+    line    => 'bridge_mappings = physnet1:br-ex',
+  }
+  ->
+  Exec["pcs-neutron-server-set-up"]
+
+##this way we only let controller1 create the neutron resources
+##controller1 should be the active neutron-server at provisioining time
+
+ if $hostname == $controllers_hostnames_array[0] {
+  Exec["all-neutron-nodes-are-up"]
+  ->
+  neutron_network { 'provider_network':
+    ensure                    => present,
+    name                      => 'provider_network',
+    admin_state_up            => true,
+    provider_network_type     => flat,
+    provider_physical_network => 'physnet1',
+    router_external           => true,
+    tenant_name               => 'admin',
+  }
+  ->
+  neutron_subnet { 'provider_subnet':
+    ensure            => present,
+    name              => provider_subnet,
+    cidr              => $public_subnet,
+    gateway_ip        => $public_gateway,
+    allocation_pools  => [ "start=${public_allocation_start},end=${public_allocation_end}" ],
+    dns_nameservers   => $public_dns,
+    network_name      => 'provider_network',
+    tenant_name       => 'admin',
+  }
+  ->
+  neutron_router { 'provider_router':
+    ensure               => present,
+    name                 => 'provider_router',
+    admin_state_up       => true,
+    gateway_network_name => 'provider_network',
+    tenant_name          => 'admin',
+  }
+ }
+}
diff --git a/common/puppet-opnfv/manifests/init.pp b/common/puppet-opnfv/manifests/init.pp
new file mode 100644 (file)
index 0000000..7b68df5
--- /dev/null
@@ -0,0 +1,44 @@
+#Copyright 2015 Open Platform for NFV Project, Inc. and its contributors
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+
+
+class opnfv {
+     if $::osfamily == 'Fuel' {
+       include opnfv::resolver
+       include opnfv::ntp
+       include opnfv::add_packages
+       include opnfv::odl_docker
+       include opnfv::opncheck
+    }
+
+   if $::osfamily == 'RedHat' {
+
+       include stdlib
+       stage { 'presetup':
+               before => Stage['setup'],
+       }
+
+      class { '::ntp':
+        stage => presetup,
+      }
+
+       class { "opnfv::repo":
+               stage => presetup,
+       }
+      ->
+      package { "python-rados":
+        ensure => latest,
+      }
+   }
+}
diff --git a/common/puppet-opnfv/manifests/network.pp b/common/puppet-opnfv/manifests/network.pp
new file mode 100644 (file)
index 0000000..91e7693
--- /dev/null
@@ -0,0 +1,77 @@
+#Copyright 2015 Open Platform for NFV Project, Inc. and its contributors
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+
+
+class opnfv::network {
+  ###use 8081 as a default work around swift service
+  if $odl_rest_port == '' {$odl_rest_port = '8081'}
+
+  if ($odl_flag != '') and str2bool($odl_flag) {
+     $ml2_mech_drivers = ['opendaylight']
+     $this_agent = 'opendaylight'
+     class {"opendaylight":
+       odl_rest_port => $odl_rest_port,
+       extra_features => ['odl-base-all', 'odl-aaa-authn', 'odl-restconf', 'odl-nsf-all', 'odl-adsal-northbound', 'odl-mdsal-apidocs', 'odl-ovsdb-openstack', 'odl-ovsdb-northbound', 'odl-dlux-core'],
+     }
+  }
+  else {
+    $ml2_mech_drivers = ['openvswitch','l2population']
+    $this_agent = 'ovs'
+  }
+
+
+
+  if $ovs_tunnel_if == '' { fail('ovs_tunnel_if is empty') }
+  if $private_ip == '' { fail('private_ip is empty') }
+
+  if $odl_control_ip == '' { fail('odl_controL_ip is empty, should be the IP of your network node private interface') }
+
+  if $mysql_ip == '' { fail('mysql_ip is empty') }
+  if $amqp_ip == '' { fail('amqp_ip is empty') }
+
+  if $nova_user_password == '' { fail('nova_user_password is empty') }
+  if $nova_db_password == '' { fail('nova_db_password is empty') }
+
+  if $neutron_user_password == '' { fail('neutron_user_password is empty') }
+  if $neutron_db_password == '' { fail('neutron_db_password is empty') }
+  if $neutron_metadata_shared_secret == '' { fail('neutron_metadata_shared_secret is empty') }
+
+  class { "quickstack::neutron::networker":
+    agent_type                    => $this_agent,
+    neutron_metadata_proxy_secret => $neutron_metadata_shared_secret,
+    neutron_db_password           => $neutron_db_password,
+    neutron_user_password         => $neutron_user_password,
+    nova_db_password              => $nova_db_password,
+    nova_user_password            => $nova_user_password,
+
+    controller_priv_host          => $private_ip,
+
+    enable_tunneling              => true,
+    ovs_tunnel_iface              => $ovs_tunnel_if,
+    ovs_tunnel_network            => '',
+    ovs_l2_population             => 'True',
+    ovs_tunnel_types              => ['vxlan'],
+    external_network_bridge       => 'br-ex',
+    tenant_network_type           => 'vxlan',
+    tunnel_id_ranges              => '1:1000',
+
+    mysql_host                    => $mysql_ip,
+    amqp_host                     => $amqp_ip,
+    amqp_username                 => 'guest',
+    amqp_password                 => 'guest',
+
+    ml2_mechanism_drivers        => $ml2_mech_drivers,
+    odl_controller_ip            => $odl_control_ip,
+  }
+}
diff --git a/common/puppet-opnfv/manifests/ntp.pp b/common/puppet-opnfv/manifests/ntp.pp
new file mode 100644 (file)
index 0000000..c27175e
--- /dev/null
@@ -0,0 +1,72 @@
+##############################################################################
+# Copyright (c) 2015 Ericsson AB and others.
+# stefan.k.berg@ericsson.com
+# jonas.bjurel@ericsson.com
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+# Class: Ntp
+#
+# Add Ntp content passed through astute.yaml into ntp.conf depending on the role
+#
+# Suitable yaml content:
+# <begin>
+# opnfv:
+#   ntp:
+#     controller: |
+#      line 1
+#      line 2
+#    compute: |
+#      line 1
+#      line 2
+# <end>
+#
+#
+#
+
+class opnfv::ntp(
+  $file='/etc/ntp.conf'
+) {
+
+  if $::fuel_settings['role'] {
+    if ($::fuel_settings['opnfv'] and
+    $::fuel_settings['opnfv']['ntp']) {
+      case $::fuel_settings['role'] {
+        /controller/: {
+          if $::fuel_settings['opnfv']['ntp']['controller'] {
+            $template = 'opnfv/ntp.conf.controller.erb'
+            $file_content = $::fuel_settings['opnfv']['ntp']['controller']
+          }
+        }
+        /compute/:    {
+          if $::fuel_settings['opnfv']['ntp']['compute'] {
+            $template = 'opnfv/ntp.conf.compute.erb'
+            $file_content = $::fuel_settings['opnfv']['ntp']['compute']
+          }
+        }
+      }
+    }
+  }
+
+  if $file_content {
+    package { 'ntp':
+      ensure => installed,
+    }
+
+    file { $file:
+      content => template($template),
+      notify  => Service['ntp'],
+    }
+
+    service { 'ntp':
+      ensure  => running,
+      enable  => true,
+      require => [ Package['ntp'], File[$file]]
+    }
+  }
+}
+
+
diff --git a/common/puppet-opnfv/manifests/odl_docker.pp b/common/puppet-opnfv/manifests/odl_docker.pp
new file mode 100644 (file)
index 0000000..6e70ba0
--- /dev/null
@@ -0,0 +1,50 @@
+##############################################################################
+# Copyright (c) 2015 Ericsson AB and others.
+# daniel.smith@ericsson.com
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+class opnfv::odl_docker
+{
+    case $::fuel_settings['role'] {
+      /controller/: {
+
+        file { "/opt":
+                ensure => "directory",
+             }
+
+        file { "/opt/opnfv":
+                ensure => "directory",
+                owner => "root",
+                group => "root",
+                mode => 777,
+             }
+
+        file { "/opt/opnfv/odl":
+                ensure => "directory",
+             }
+
+        file { "/opt/opnfv/odl/odl_docker_image.tar":
+                ensure => present,
+                source => "/etc/puppet/modules/opnfv/odl_docker/odl_docker_image.tar",
+                mode => 750,
+             }
+
+        file { "/opt/opnfv/odl/docker-latest":
+                ensure => present,
+                source => "/etc/puppet/modules/opnfv/odl_docker/docker-latest",
+                mode => 750,
+             }
+
+        file { "/opt/opnfv/odl/start_odl_conatiner.sh":
+                ensure => present,
+                source => "/etc/puppet/modules/opnfv/scripts/start_odl_container.sh",
+                mode => 750,
+             }
+  }
+ }
+}
+
diff --git a/common/puppet-opnfv/manifests/odl_service.pp b/common/puppet-opnfv/manifests/odl_service.pp
new file mode 100644 (file)
index 0000000..bbe8218
--- /dev/null
@@ -0,0 +1,24 @@
+#Copyright 2015 Open Platform for NFV Project, Inc. and its contributors
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+#Class installs opendaylight with a default rest port of 8081
+#This is to work around OpenStack Swift which also uses common port 8080
+
+class opnfv::odl_service {
+     if !$odl_rest_port { $odl_rest_port = '8081'}
+     class {"opendaylight":
+       extra_features => ['odl-base-all', 'odl-aaa-authn', 'odl-restconf', 'odl-nsf-all', 'odl-adsal-northbound', 'odl-mdsal-apidocs', 'odl-ovsdb-openstack', 'odl-ovsdb-northbound', 'odl-dlux-core'],
+       odl_rest_port  => $odl_rest_port,
+     }
+}
diff --git a/common/puppet-opnfv/manifests/repo.pp b/common/puppet-opnfv/manifests/repo.pp
new file mode 100644 (file)
index 0000000..fe89305
--- /dev/null
@@ -0,0 +1,67 @@
+#Copyright 2015 Open Platform for NFV Project, Inc. and its contributors
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+
+
+class opnfv::repo {
+  if $::osfamily == 'RedHat' {
+    if $proxy_address != '' {
+      $myline= "proxy=${proxy_address}"
+      include stdlib
+      file_line { 'yumProxy':
+        ensure => present,
+        path   => '/etc/yum.conf',
+        line   => $myline,
+        before => Yumrepo['openstack-juno'],
+      }
+    }
+
+    yumrepo { "openstack-juno":
+      baseurl => "http://repos.fedorapeople.org/repos/openstack/openstack-juno/epel-7/",
+      descr => "RDO Community repository",
+      enabled => 1,
+      gpgcheck => 0,
+    }
+
+    exec {'disable selinux':
+        command => '/usr/sbin/setenforce 0',
+        unless => '/usr/sbin/getenforce | grep Permissive',
+    }
+    ->
+    service { "network":
+      ensure => "running",
+      enable => "true",
+      hasrestart => true,
+      restart => '/usr/bin/systemctl restart network',
+    }
+    ->
+    service { 'NetworkManager':
+      ensure => "stopped",
+      enable => "false",
+    }
+    ~>
+    exec { 'restart-network-presetup':
+      command => 'systemctl restart network',
+      path         => ["/usr/sbin/", "/usr/bin/"],
+      refreshonly  => 'true',
+    }
+    ->
+    package { 'openvswitch':
+     ensure  => installed,
+    }
+    ->
+    service {'openvswitch':
+     ensure  => 'running',
+    }
+  }
+}
diff --git a/common/puppet-opnfv/manifests/resolver.pp b/common/puppet-opnfv/manifests/resolver.pp
new file mode 100644 (file)
index 0000000..2951f7e
--- /dev/null
@@ -0,0 +1,62 @@
+##############################################################################
+# Copyright (c) 2015 Ericsson AB and others.
+# stefan.k.berg@ericsson.com
+# jonas.bjurel@ericsson.com
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+# Class: opnfv::resolver
+#
+# Add resolver content passed through astute.yaml into resolv.conf
+# depending on the role
+#
+# Suitable yaml content:
+# <begin>
+# opnfv:
+#  dns:
+#    compute:
+#    - 100.100.100.2
+#    - 100.100.100.3
+#    controller:
+#    - 100.100.100.102
+#    - 100.100.100.104
+# <end>
+#
+#
+#
+
+class opnfv::resolver()
+{
+  if $::fuel_settings['role'] {
+    if $::fuel_settings['role']  == 'primary-controller' {
+      $role = 'controller'
+    } else {
+      $role = $::fuel_settings['role']
+    }
+
+    if ($::fuel_settings['opnfv']
+        and $::fuel_settings['opnfv']['dns']
+        and $::fuel_settings['opnfv']['dns'][$role]) {
+      $nameservers=$::fuel_settings['opnfv']['dns'][$role]
+
+      file { '/etc/resolv.conf':
+            owner   => root,
+            group   => root,
+            mode    => '0644',
+            content => template('opnfv/resolv.conf.erb'),
+      }
+# /etc/resolv.conf is re-generated at each boot by resolvconf, so we
+# need to store there as well.
+      file { '/etc/resolvconf/resolv.conf.d/head':
+            owner   => root,
+            group   => root,
+            mode    => '0644',
+            content => template('opnfv/resolv.conf.erb'),
+      }
+    }
+  }
+}
+
diff --git a/common/puppet-opnfv/manifests/tempest.pp b/common/puppet-opnfv/manifests/tempest.pp
new file mode 100644 (file)
index 0000000..86f4212
--- /dev/null
@@ -0,0 +1,27 @@
+#Copyright 2015 Open Platform for NFV Project, Inc. and its contributors
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+
+
+#The required package for tempest is missing in Khaleesi along with EPEL for CentOS.
+#This is a workaround for now since we require EPEL with Foreman/Puppet
+#Also is a good place to put anything additional that we wish to install on the tempest node.
+
+class opnfv::tempest {
+
+  if $::osfamily == 'RedHat' {
+    package { 'subunit-filters':
+      ensure    => present,
+    }
+  }
+}
diff --git a/common/puppet-opnfv/manifests/templates/dockerfile/Dockerfile b/common/puppet-opnfv/manifests/templates/dockerfile/Dockerfile
new file mode 100644 (file)
index 0000000..80a92d8
--- /dev/null
@@ -0,0 +1,82 @@
+####################################################################
+# Copyright (c) 2015 Ericsson AB and others.
+# daniel.smith@ericsson.com
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+#
+#  DOCKERFILE TO CREATE ODL IN CONTAINER AND EXPOSE DLUX AND OVSDB TO ODL
+#
+#############################################################################
+
+
+#Set the base image - note: the current release of Karaf is using Jdk7 and alot of 12.04, so we will use it rather than 14.04 and backport a ton of stuff
+FROM ubuntu:12.04
+
+# Maintainer Info
+MAINTAINER Daniel Smith
+
+
+#Run apt-get update one start just to check for updates when building
+RUN echo "Updating APT"
+RUN apt-get update
+RUN echo "Adding wget"
+RUN apt-get install -y wget
+RUN apt-get install -y net-tools
+RUN apt-get install -y openjdk-7-jre
+RUN apt-get install -y openjdk-7-jdk
+RUN apt-get install -y openssh-server
+RUN apt-get install -y vim
+RUN apt-get install -y expect
+RUN apt-get install -y daemontools
+RUN mkdir -p /opt/odl_source
+RUN bash -c 'echo "export JAVA_HOME=/usr/lib/jvm/java-7-openjdk-amd64" >> ~/.bashrc'
+
+
+
+#Now lets got and fetch the ODL distribution
+RUN echo "Fetching ODL"
+RUN wget https://nexus.opendaylight.org/content/groups/public/org/opendaylight/integration/distribution-karaf/0.2.3-Helium-SR3/distribution-karaf-0.2.3-Helium-SR3.tar.gz -O /opt/odl_source/distribution-karaf-0.2.3-Helium-SR3.tar.gz
+
+RUN echo "Untarring ODL inplace"
+RUN mkdir -p /opt/odl
+RUN tar zxvf /opt/odl_source/distribution-karaf-0.2.3-Helium-SR3.tar.gz -C /opt/odl
+
+RUN echo "Installing DLUX and other features into ODL"
+#COPY dockerfile/container_scripts/start_odl_docker.sh /etc/init.d/start_odl_docker.sh
+COPY container_scripts/start_odl_docker_container.sh /etc/init.d/
+COPY container_scripts/speak.sh /etc/init.d/
+#COPY dockerfile/container_scripts/speak.sh /etc/init.d/speak.sh
+RUN chmod 777 /etc/init.d/start_odl_docker_container.sh
+RUN chmod 777 /etc/init.d/speak.sh
+
+
+
+# Expose the ports
+
+# PORTS FOR BASE SYSTEM AND DLUX
+EXPOSE 8101
+EXPOSE 6633
+EXPOSE 1099
+EXPOSE 43506
+EXPOSE 8181
+EXPOSE 8185
+EXPOSE 9000
+EXPOSE 39378
+EXPOSE 33714
+EXPOSE 44444
+EXPOSE 6653
+
+# PORTS FOR OVSDB AND ODL CONTROL
+EXPOSE 12001
+EXPOSE 6640
+EXPOSE 8080
+EXPOSE 7800
+EXPOSE 55130
+EXPOSE 52150
+EXPOSE 36826
+
+# set the ENTRYPOINT - An entry point allows us to run this container as an exectuable
+CMD ["/etc/init.d/start_odl_docker_container.sh"]
diff --git a/common/puppet-opnfv/manifests/templates/dockerfile/container_scripts/check_feature.sh b/common/puppet-opnfv/manifests/templates/dockerfile/container_scripts/check_feature.sh
new file mode 100644 (file)
index 0000000..533942e
--- /dev/null
@@ -0,0 +1,18 @@
+##############################################################################
+# Copyright (c) 2015 Ericsson AB and others.
+# daniel.smith@ericsson.com
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+#!/usr/bin/expect
+spawn /opt/odl/distribution-karaf-0.2.3-Helium-SR3/bin/client
+expect "root>"
+send "feature:list | grep -i odl-restconf\r"
+send "\r\r\r"
+expect "root>"
+send "logout\r"
+
+
diff --git a/common/puppet-opnfv/manifests/templates/dockerfile/container_scripts/speak.sh b/common/puppet-opnfv/manifests/templates/dockerfile/container_scripts/speak.sh
new file mode 100644 (file)
index 0000000..95bbaf4
--- /dev/null
@@ -0,0 +1,20 @@
+#!/usr/bin/expect
+##############################################################################
+# Copyright (c) 2015 Ericsson AB and others.
+# daniel.smith@ericsson.com
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+#
+# Simple expect script to start up ODL client and load feature set for DLUX and OVSDB
+#  NOTE: THIS WILL BE REPLACED WITH A PROGRAMATIC METHOD SHORTLY
+#################################################################################
+
+spawn /opt/odl/distribution-karaf-0.2.3-Helium-SR3/bin/client
+expect "root>"
+send "feature:install odl-base-all odl-aaa-authn odl-restconf odl-nsf-all odl-adsal-northbound odl-mdsal-apidocs  odl-ovsdb-openstack odl-ovsdb-northbound odl-dlux-core"
+send "\r\r\r"
+expect "root>"
+send "logout\r"
diff --git a/common/puppet-opnfv/manifests/templates/dockerfile/container_scripts/start_odl_docker_container.sh b/common/puppet-opnfv/manifests/templates/dockerfile/container_scripts/start_odl_docker_container.sh
new file mode 100644 (file)
index 0000000..8ae05f7
--- /dev/null
@@ -0,0 +1,48 @@
+#!/bin/bash
+##############################################################################
+# Copyright (c) 2015 Ericsson AB and others.
+# daniel.smith@ericsson.com
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+#
+# Simple expect script to start up ODL client and load feature set for DLUX and OVSDB
+#  NOTE: THIS WILL BE REPLACED WITH A PROGRAMATIC METHOD SHORTLY
+#################################################################################
+#  Start up script for calling karaf / ODL inside a docker container.
+#
+#  This script will also call a couple expect scripts to load the feature set that we want
+
+
+#ENV
+export JAVA_HOME=/usr/lib/jvm/java-7-openjdk-amd64
+
+#MAIN
+echo "Starting up the da Sheilds..."
+/opt/odl/distribution-karaf-0.2.3-Helium-SR3/bin/karaf server &
+echo "Sleeping 5 bad hack"
+sleep 10
+echo "should see stuff listening now"
+netstat -na
+echo " should see proess running for karaf"
+ps -efa
+echo " Starting the packages we want"
+/etc/init.d/speak.sh
+echo "Printout the status - if its right, you should see 8181 appear now"
+netstat -na
+ps -efa
+
+
+
+## This is a loop that keeps our container going currently, prinout the "status of karaf" to the docker logs every minute
+## Cheap - but effective
+while true;
+do
+        echo "Checking status of ODL:"
+        /opt/odl/distribution-karaf-0.2.3-Helium-SR3/bin/status
+        sleep 60
+done
+
+
diff --git a/common/puppet-opnfv/manifests/templates/ntp.conf.compute.erb b/common/puppet-opnfv/manifests/templates/ntp.conf.compute.erb
new file mode 100644 (file)
index 0000000..ac65293
--- /dev/null
@@ -0,0 +1,22 @@
+##############################################################################
+# Copyright (c) 2015 Ericsson AB and others.
+# stefan.k.berg@ericsson.com
+# jonas.bjurel@ericsson.com
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+tinker panic 0
+driftfile /var/lib/ntp/ntp.drift
+statistics loopstats peerstats clockstats
+filegen loopstats file loopstats type day enable
+filegen peerstats file peerstats type day enable
+filegen clockstats file clockstats type day enable
+restrict -4 default kod notrap nomodify nopeer noquery
+restrict -6 default kod notrap nomodify nopeer noquery
+restrict 127.0.0.1
+restrict ::1
+<%= @file_content %>
+
diff --git a/common/puppet-opnfv/manifests/templates/ntp.conf.controller.erb b/common/puppet-opnfv/manifests/templates/ntp.conf.controller.erb
new file mode 100644 (file)
index 0000000..ac65293
--- /dev/null
@@ -0,0 +1,22 @@
+##############################################################################
+# Copyright (c) 2015 Ericsson AB and others.
+# stefan.k.berg@ericsson.com
+# jonas.bjurel@ericsson.com
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+tinker panic 0
+driftfile /var/lib/ntp/ntp.drift
+statistics loopstats peerstats clockstats
+filegen loopstats file loopstats type day enable
+filegen peerstats file peerstats type day enable
+filegen clockstats file clockstats type day enable
+restrict -4 default kod notrap nomodify nopeer noquery
+restrict -6 default kod notrap nomodify nopeer noquery
+restrict 127.0.0.1
+restrict ::1
+<%= @file_content %>
+
diff --git a/common/puppet-opnfv/templates/br_ex.erb b/common/puppet-opnfv/templates/br_ex.erb
new file mode 100644 (file)
index 0000000..6c0e7e7
--- /dev/null
@@ -0,0 +1,10 @@
+DEVICE=br-ex
+DEVICETYPE=ovs
+IPADDR=<%= @public_nic_ip %>
+NETMASK=<%= @public_nic_netmask %>
+GATEWAY=<%= @public_gateway %>
+BOOTPROTO=static
+ONBOOT=yes
+TYPE=OVSBridge
+PROMISC=yes
+PEERDNS=no
diff --git a/common/tools/README.md b/common/tools/README.md
new file mode 100644 (file)
index 0000000..e407dd2
--- /dev/null
@@ -0,0 +1,17 @@
+<!---
+Copyright 2015 Open Platform for NFV Project, Inc. and its contributors
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+# Tools
+This directory contains tools which are common to OPNFV (i.e. independent from the various installer approaches).
diff --git a/compass/build/Makefile b/compass/build/Makefile
new file mode 100755 (executable)
index 0000000..7448dc4
--- /dev/null
@@ -0,0 +1,117 @@
+##############################################################################
+# Copyright (c) 2015 Ericsson AB and others.
+# stefan.k.berg@ericsson.com
+# jonas.bjurel@ericsson.com
+# dradez@redhat.com
+# chigang@huawei.com
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+SHELL = /bin/bash
+############################################################################
+# BEGIN of variables to customize
+#
+#Input args
+export UNIT_TEST = FALSE
+export INTERACTIVE = TRUE
+export ISOSRC = file:$(shell pwd)/ubuntu
+export ISOCACHE = $(shell pwd)/$(shell basename $(ISOSRC))
+export PRODNO = "OPNFV_BGS"
+export REVSTATE = "P0000"
+export RELEASE_DIR = $(shell pwd)/release
+
+# Note! Invoke with "make REVSTATE=RXXXX all" to make release build!
+# Invoke with ICOCACHE=/full/path/to/iso if cached ISO is in non-standard location.
+
+#Build variables
+export BUILD_BASE := $(shell pwd)
+export CACHE_DIR := $(BUILD_BASE)/cache
+export INSTALL_DIR := $(BUILD_BASE)
+export VERSION_FILE := $(BUILD_BASE)/.versions
+export TOPDIR := $(shell pwd)
+
+export OLDISO_DIR := $(TOPDIR)/oldiso
+export NEWISO_DIR := $(TOPDIR)/newiso
+export NEWIMAGE_DIR := $(TOPDIR)/newiso/image
+export NEWFILESYSTEM := $(TOPDIR)/newiso/filesystem
+export MANIFEST_DIR = $(shell find $(NEWISO_DIR) -name filesystem.manifest)
+export SQUASHFS_DIR = $(shell find $(NEWISO_DIR) -name filesystem.squashfs)
+export FSSIZE_DIR = $(shell find $(NEWISO_DIR) -name filesystem.size)
+
+#
+# END of variables to customize
+#############################################################################
+
+.PHONY: all
+all: iso
+       @echo "Versions of cached build results built by" $(shell hostname) "at" $(shell date -u) > $(VERSION_FILE)
+       @echo "cache.mk" $(shell md5sum $(BUILD_BASE)/cache.mk | cut -f1 -d " ") >> $(VERSION_FILE)
+       @echo "config.mk" $(shell md5sum $(BUILD_BASE)/config.mk | cut -f1 -d " ") >> $(VERSION_FILE)
+
+############################################################################
+# BEGIN of Include definitions
+#
+include config.mk
+include cache.mk
+#
+# END Include definitions
+#############################################################################
+
+.PHONY: prepare-cache
+prepare-cache: 
+       @echo "prepare-cache to be done"
+
+.PHONY: mount-ubuntuiso
+mount-ubuntuiso:
+       @echo "===Mounting ubuntu ISO in $(OLDISO_DIR)"
+       -mkdir -p $(OLDISO_DIR) $(NEWIMAGE_DIR)
+       @fuseiso $(ISOCACHE)/*.iso $(OLDISO_DIR)
+       cp $(OLDISO_DIR)/. $(NEWIMAGE_DIR) -rp
+
+.PHONY: umount-ubuntuiso
+umount-ubuntuiso:
+       @set +e
+       @echo "===Unmounting ubuntu ISO from $(OLDISO_DIR)"
+       @fusermount -u $(OLDISO_DIR)
+       @set -e
+
+.PHONY: install-package
+install-package:
+       @echo "===uncompress file system to add new files"
+       @find $(NEWISO_DIR) -name "filesystem.squashfs" |xargs unsquashfs
+       @mv squashfs-root $(NEWFILESYSTEM)
+       cp -f /etc/resolv.conf $(NEWFILESYSTEM)/run/resolvconf/
+       cp /etc/hosts $(NEWFILESYSTEM)/etc/
+       cp $(INSTALL_DIR)/install.sh $(NEWFILESYSTEM)/
+       @echo "===install package on filesystem for newiso"
+       #@chroot $(NEWFILESYSTEM) sh ./install.sh
+       @chmod +w $(MANIFEST_DIR)
+       @chroot $(NEWFILESYSTEM) dpkg-query -W --showformat='$${Package} $${Version}\n' | tee ${MANIFEST_DIR}
+       @rm $(SQUASHFS_DIR)
+       @mksquashfs $(NEWFILESYSTEM) $(SQUASHFS_DIR)
+       @chmod +w $(FSSIZE_DIR)
+       cd $(NEWISO_DIR); \
+       (du -sx --block-size=1 $(NEWFILESYSTEM) | cut -f1 ) | tee ${FSSIZE_DIR}
+       cd $(NEWIMAGE_DIR); \
+       find . -type f -print0 | xargs -0 md5sum | grep -v "\./md5sum.txt" | tee ./md5sum.txt
+
+.PHONY: make-iso
+make-iso:
+       @echo "===Building OPNFV iso"
+       cd $(NEWIMAGE_DIR); \
+       mkisofs -r -V "OPNFV" -cache-inodes -J -l -b isolinux/isolinux.bin -c isolinux/boot.cat -no-emul-boot -boot-load-size 4 -boot-info-table -o ../ubuntu-14.04-amd64-opnfv.iso .
+
+.PHONY: build-clean
+build-clean:
+       -rm -Rf $(OLDISO_DIR)
+       -rm -Rf $(NEWISO_DIR)
+       -rm -Rf $(RELEASE_DIR)
+
+.PHONY: iso
+iso: build-clean mount-ubuntuiso umount-ubuntuiso install-package make-iso
+       -mkdir $(RELEASE_DIR)
+       @mv $(NEWISO_DIR)/*.iso $(RELEASE_DIR)
+       @printf "\n\nISO is built successfully!\n\n"
diff --git a/compass/build/cache.mk b/compass/build/cache.mk
new file mode 100755 (executable)
index 0000000..e69de29
diff --git a/compass/build/config.mk b/compass/build/config.mk
new file mode 100755 (executable)
index 0000000..e69de29
diff --git a/compass/build/install.sh b/compass/build/install.sh
new file mode 100755 (executable)
index 0000000..4a8b893
--- /dev/null
@@ -0,0 +1,27 @@
+#!/bin/bash
+#####################################################################################
+# Copyright (c) 2015 Huawei Technologies Co.,Ltd.
+# chigang@huawei.com
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+#####################################################################################
+
+# some packages or tools maybe use below filesystem
+mount -t proc none /proc
+mount -t sysfs none /sys
+mount -t devpts none /dev/pts
+
+# install/remove packages
+sudo apt-get update
+sudo apt-get -y upgrade
+sudo apt-get -y dist-upgrade
+sudo apt-get install libxslt-dev libxml2-dev libvirt-dev build-essential qemu-utils qemu-kvm libvirt-bin virtinst -y
+
+#rm  /etc/resolv.conf
+#rm -rf /tmp/*
+
+umount /proc
+umount /sys
+umount /dev/pts
\ No newline at end of file
diff --git a/compass/ci/build.sh b/compass/ci/build.sh
new file mode 100755 (executable)
index 0000000..4e5b87b
--- /dev/null
@@ -0,0 +1,392 @@
+#!/bin/bash
+set -e
+##############################################################################
+# Copyright (c) 2015 Ericsson AB and others.
+# stefan.k.berg@ericsson.com
+# jonas.bjurel@ericsson.com
+# chigang@huawei.com
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+trap 'echo "Exiting ..."; \
+if [ -f ${LOCK_FILE} ]; then \
+   if [ $(cat ${LOCK_FILE}) -eq $$ ]; then \
+      rm -f ${LOCK_FILE}; \
+   fi; \
+fi;' EXIT
+
+############################################################################
+# BEGIN of usage description
+#
+usage ()
+{
+cat << EOF
+$0 Builds the Compass@OPNFV stack
+
+usage: $0 [-s spec-file] [-c cache-URI] [-l log-file] [-f Flags] build-directory
+
+OPTIONS:
+  -s spec-file ($BUILD_SPEC), define the build-spec file, default ../build/config.mk
+  -c cache base URI ($BUILD_CACHE_URI), specifies the base URI to a build cache to be used/updated - the name is automatically generated from the md5sum of the spec-file, http://, ftp://, file://[absolute path] suported.
+
+  -l log-file ($BUILD_LOG), specifies the output log-file (stdout and stderr), if not specified logs are output to console as normal
+  -v version tag to be applied to the build result
+  -r alternative remote access method script/program. curl is default.
+  -t run small build-script unit test.
+  -T run large build-script unit test.
+  -f build flags ($BUILD_FLAGS):
+     o s: Do nothing, succeed
+     o f: Do nothing, fail
+     o t: run build unit tests
+     o i: run interactive (-t flag to docker run)
+     o P: Populate a new local cache and push it to the (-c cache-URI) cache artifactory if -c option is present, currently file://, http:// and ftp:// are supported
+     o d: Detatch - NOT YET SUPPORTED
+
+  build-directory ($BUILD_DIR), specifies the directory for the output artifacts (.iso file).
+
+  -h help, prints this help text
+
+Description:
+build.sh builds opnfv .iso artifact.
+To reduce build time it uses build cache on a local or remote location. The cache is rebuilt and uploaded if either of the below conditions are met:
+1) The P(opulate) flag is set and the -c cache-base-URI is provided, if -c is not provided the cache will stay local.
+2) If the cache is invalidated by one of the following conditions:
+   - The config spec md5sum does not compare to the md5sum for the spec which the cache was built.
+   - The git Commit-Id on the remote repos/HEAD defined in the spec file does not correspont with the Commit-Id for what the cache was built with.
+3) A valid cache does not exist on the specified -c cache-base-URI.
+
+The cache URI object name is compass_cache-"md5sum(spec file)"
+
+Logging by default to console, but can be directed elsewhere with the -l option in which case both stdout and stderr is redirected to that destination.
+
+Built in unit testing of components is enabled by adding the t(est) flag.
+
+Return codes:
+ - 0 Success!
+ - 1-99 Unspecified build error
+ - 100-199 Build system internal error (not build it self)
+   o 101 Build system instance busy
+ - 200 Build failure
+
+Examples:
+build -c http://opnfv.org/artifactory/compass/cache -d ~/jenkins/genesis/compass/ci/output -f ti
+NOTE: At current the build scope is set to the git root of the repository, -d destination locations outside that scope will not work
+EOF
+}
+#
+# END of usage description
+############################################################################
+
+############################################################################
+# BEGIN of variables to customize
+#
+BUILD_BASE=$(readlink -e ../build/)
+RESULT_DIR="${BUILD_BASE}/release"
+BUILD_SPEC="${BUILD_BASE}/config.mk"
+CACHE_DIR="cache"
+LOCAL_CACHE_ARCH_NAME="compass-cache"
+REMOTE_CACHE_ARCH_NAME="compass_cache-$(md5sum ${BUILD_SPEC}| cut -f1 -d " ")"
+REMOTE_ACCESS_METHD=curl
+INCLUDE_DIR=../include
+#
+# END of variables to customize
+############################################################################
+
+############################################################################
+# BEGIN of script assigned variables
+#
+SCRIPT_DIR=$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )
+LOCK_FILE="${SCRIPT_DIR}/.build.lck"
+CACHE_TMP="${SCRIPT_DIR}/tmp"
+TEST_SUCCEED=0
+TEST_FAIL=0
+UNIT_TEST=0
+UPDATE_CACHE=0
+POPULATE_CACHE=0
+RECURSIV=0
+DETACH=0
+DEBUG=0
+INTEGRATION_TEST=0
+FULL_INTEGRATION_TEST=0
+INTERACTIVE=0
+BUILD_CACHE_URI=
+BUILD_SPEC=
+BUILD_DIR=
+BUILD_LOG=
+BUILD_VERSION=
+MAKE_ARGS=
+#
+# END of script assigned variables
+############################################################################
+
+############################################################################
+# BEGIN of include pragmas
+#
+source ${INCLUDE_DIR}/build.sh.debug
+#
+# END of include
+############################################################################
+
+############################################################################
+# BEGIN of main
+#
+while getopts "s:c:v:f:l:r:RtTh" OPTION
+do
+    case $OPTION in
+       h)
+           usage
+           rc=0
+           exit $rc
+           ;;
+
+       s)
+           BUILD_SPEC=${OPTARG}
+           ;;
+
+       c)
+           BUILD_CACHE_URI=${OPTARG}
+           ;;
+
+       l)
+           BUILD_LOG=${OPTARG}
+           ;;
+
+       v)
+           BUILD_VERSION=${OPTARG}
+           ;;
+
+       f)
+           BUILD_FLAGS=${OPTARG}
+           ;;
+
+       r)  REMOTE_ACCESS_METHD=${OPTARG}
+           ;;
+
+       R)
+           RECURSIVE=1
+           ;;
+
+       t)
+           INTEGRATION_TEST=1
+           ;;
+
+       T)
+           INTEGRATION_TEST=1
+           FULL_INTEGRATION_TEST=1
+           ;;
+
+       *)
+           echo "${OPTION} is not a valid argument"
+           rc=100
+           exit $rc
+           ;;
+    esac
+done
+
+if [ -z $BUILD_DIR ]; then
+    BUILD_DIR=$(echo $@ | cut -d ' ' -f ${OPTIND})
+fi
+
+for ((i=0; i<${#BUILD_FLAGS};i++)); do
+    case ${BUILD_FLAGS:$i:1} in
+       s)
+           rc=0
+           exit $rc
+           ;;
+
+       f)
+           rc=1
+           exit $rc
+           ;;
+
+       t)
+           UNIT_TEST=1
+           ;;
+
+       i)
+           INTERACTIVE=1
+           ;;
+
+       P)
+           POPULATE_CACHE=1
+           ;;
+
+       d)
+           DETACH=1
+           echo "Detach is not yet supported - exiting ...."
+           rc=100
+           exit $rc
+           ;;
+
+       D)
+           DEBUG=1
+           ;;
+
+       *)
+           echo "${BUILD_FLAGS:$i:1} is not a valid build flag - exiting ...."
+           rc=100
+           exit $rc
+           ;;
+    esac
+done
+
+if [ ${INTEGRATION_TEST} -eq 1 ]; then
+    integration-test
+    rc=0
+    exit $rc
+fi
+
+if [ ! -f ${BUILD_SPEC} ]; then
+    echo "spec file does not exist: $BUILD_SPEC - exiting ...."
+    rc=100
+    exit $rc
+fi
+
+if [ -z ${BUILD_DIR} ]; then
+    echo "Missing build directory - exiting ...."
+    rc=100
+    exit $rc
+fi
+
+if [ ! -z ${BUILD_LOG} ]; then
+    if [[ ${RECURSIVE} -ne 1 ]]; then
+       set +e
+       eval $0 -R $@ > ${BUILD_LOG} 2>&1
+       rc=$?
+       set -e
+       if [ $rc -ne 0 ]; then
+           exit $rc
+       fi
+    fi
+fi
+
+if [ ${TEST_SUCCEED} -eq 1 ]; then
+    sleep 1
+    rc=0
+    exit $rc
+fi
+
+if [ ${TEST_FAIL} -eq 1 ]; then
+    sleep 1
+    rc=1
+    exit $rc
+fi
+
+if [ -e ${LOCK_FILE} ]; then
+    echo "A build job is already running, exiting....."
+    rc=101
+    exit $rc
+fi
+
+echo $$ > ${LOCK_FILE}
+
+if [ ! -z ${BUILD_CACHE_URI} ]; then
+    if [ ${POPULATE_CACHE} -ne 1 ]; then
+       rm -rf ${CACHE_TMP}/cache
+       mkdir -p ${CACHE_TMP}/cache
+       echo "Downloading cach file ${BUILD_CACHE_URI}/${REMOTE_CACHE_ARCH_NAME} ..."
+       set +e
+       ${REMOTE_ACCESS_METHD} -o ${CACHE_TMP}/cache/${LOCAL_CACHE_ARCH_NAME}.tgz ${BUILD_CACHE_URI}/${REMOTE_CACHE_ARCH_NAME}.tgz
+       rc=$?
+       set -e
+       if [ $rc -ne 0 ]; then
+               echo "Remote cache does not exist, or is not accessible - a new cache will be built ..."
+               POPULATE_CACHE=1
+       else
+           echo "Unpacking cache file ..."
+           tar -C ${CACHE_TMP}/cache -xvf ${CACHE_TMP}/cache/${LOCAL_CACHE_ARCH_NAME}.tgz
+           cp ${CACHE_TMP}/cache/cache/.versions ${BUILD_BASE}/.
+           set +e
+                   make -C ${BUILD_BASE} validate-cache;
+           rc=$?
+           set -e
+
+           if [ $rc -ne 0 ]; then
+               echo "Cache invalid - a new cache will be built "
+               POPULATE_CACHE=1
+           else
+               cp -rf ${CACHE_TMP}/cache/cache/. ${BUILD_BASE}
+           fi
+           rm -rf ${CACHE_TMP}/cache
+       fi
+    fi
+fi
+
+if [ ${POPULATE_CACHE} -eq 1 ]; then
+    if [ ${DEBUG} -eq 0 ]; then
+       set +e
+       cd ${BUILD_BASE} && make clean
+       rc=$?
+       set -e
+       if [ $rc -ne 0 ]; then
+           echo "Build - make clean failed, exiting ..."
+           rc=100
+           exit $rc
+       fi
+    fi
+fi
+
+if [ ! -z ${BUILD_VERSION} ]; then
+    MAKE_ARGS+="REVSTATE=${BUILD_VERSION} "
+fi
+
+if [ ${UNIT_TEST} -eq 1 ]; then
+    MAKE_ARGS+="UNIT_TEST=TRUE "
+else
+    MAKE_ARGS+="UNIT_TEST=FALSE "
+fi
+
+if [ ${INTERACTIVE} -eq 1 ]; then
+    MAKE_ARGS+="INTERACTIVE=TRUE "
+else
+    MAKE_ARGS+="INTERACTIVE=FALSE "
+fi
+
+MAKE_ARGS+=all
+
+if [ ${DEBUG} -eq 0 ]; then
+    set +e
+    cd ${BUILD_BASE} && make ${MAKE_ARGS}
+    rc=$?
+    set -e
+    if [ $rc -gt 0 ]; then
+       echo "Build: make all failed, exiting ..."
+       rc=200
+       exit $rc
+    fi
+else
+debug_make
+fi
+set +e
+make -C ${BUILD_BASE} prepare-cache
+rc=$?
+set -e
+
+if [ $rc -gt 0 ]; then
+    echo "Build: make prepare-cache failed - exiting ..."
+    rc=100
+    exit $rc
+fi
+echo "Copying built OPNFV .iso file to target directory ${BUILD_DIR} ..."
+rm -rf ${BUILD_DIR}
+mkdir -p ${BUILD_DIR}
+cp ${BUILD_BASE}/.versions ${BUILD_DIR}
+cp ${RESULT_DIR}/*.iso* ${BUILD_DIR}
+
+if [ $POPULATE_CACHE -eq 1 ]; then
+    if [ ! -z ${BUILD_CACHE_URI} ]; then
+       echo "Building cache ..."
+       tar --dereference -C ${BUILD_BASE} -caf ${BUILD_BASE}/${LOCAL_CACHE_ARCH_NAME}.tgz ${CACHE_DIR}
+       echo "Uploading cache ${BUILD_CACHE_URI}/${REMOTE_CACHE_ARCH_NAME}"
+       ${REMOTE_ACCESS_METHD} -T ${BUILD_BASE}/${LOCAL_CACHE_ARCH_NAME}.tgz ${BUILD_CACHE_URI}/${REMOTE_CACHE_ARCH_NAME}.tgz
+       rm ${BUILD_BASE}/${LOCAL_CACHE_ARCH_NAME}.tgz
+    fi
+fi
+echo "Success!!!"
+exit 0
+#
+# END of main
+############################################################################
diff --git a/compass/ci/deploy.sh b/compass/ci/deploy.sh
new file mode 100755 (executable)
index 0000000..fe754aa
--- /dev/null
@@ -0,0 +1,5 @@
+SCRIPT_DIR=$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )
+CONF_NAME=$1
+source ${SCRIPT_DIR}/../deploy/prepare.sh || exit $?
+source ${SCRIPT_DIR}/../deploy/setup-env.sh || exit $?
+source ${SCRIPT_DIR}/../deploy/deploy-vm.sh || exit $?
diff --git a/compass/deploy/conf/base.conf b/compass/deploy/conf/base.conf
new file mode 100644 (file)
index 0000000..8362b9a
--- /dev/null
@@ -0,0 +1,60 @@
+export COMPASS_SERVER_URL="http://10.1.0.12/api"
+export COMPASS_USER_EMAIL="admin@huawei.com"
+export COMPASS_USER_PASSWORD="admin"
+export CLUSTER_NAME="opnfv2"
+export LANGUAGE="EN"
+export TIMEZONE="America/Los_Angeles"
+export NTP_SERVER="10.1.0.12"
+export NAMESERVERS="10.1.0.12"
+export DOMAIN="ods.com"
+export PARTITIONS="/home=5%,/tmp=5%,/var=20%"
+export SUBNETS="10.1.0.0/24,172.16.2.0/24,172.16.3.0/24,172.16.4.0/24"
+export MANAGEMENT_IP_START=${MANAGEMENT_IP_START:-'10.1.0.50'}
+export TENANT_IP_START=${TENANT_IP_START:-'172.16.2.50'}
+export PUBLIC_IP_START=${PUBLIC_IP_START:-'172.16.3.50'}
+export STORAGE_IP_START=${STORAGE_IP_START:-'172.16.4.50'}
+export MANAGEMENT_INTERFACE=${MANAGEMENT_INTERFACE:-eth0}
+export TENANT_INTERFACE=${TENANT_INTERFACE:-eth1}
+export STORAGE_INTERFACE=${STORAGE_INTERFACE:-eth3}
+export PUBLIC_INTERFACE=${PUBLIC_INTERFACE:-eth2}
+
+function next_ip {
+    ip_addr=$1
+    ip_base="$(echo $ip_addr | cut -d. -f'1 2 3')"
+    ip_last="$(echo $ip_addr | cut -d. -f4)"
+    let ip_last_next=$ip_last+1
+    echo "${ip_base}.${ip_last_next}"
+}
+
+if [ -z "$HOST_NETWORKS" ]; then
+    IFS=, read -a HOSTNAME_LIST <<< "$HOSTNAMES"
+    MANAGE_IP=${MANAGEMENT_IP_START}
+    TENANT_IP=${TENANT_IP_START}
+    PUBLIC_IP=${PUBLIC_IP_START}
+    STORAGE_IP=${STORAGE_IP_START}
+    for HOSTNAME in ${HOSTNAME_LIST[@]}; do
+        if [ -z "$HOST_NETWORKS" ]; then
+            HOST_NETWORKS="${HOSTNAME}:${MANAGEMENT_INTERFACE}=${MANAGE_IP}|is_mgmt,${TENANT_INTERFACE}=${TENANT_IP},${PUBLIC_INTERFACE}=${PUBLIC_IP}|is_promiscuous,${STORAGE_INTERFACE}=${STORAGE_IP}"
+        else
+            HOST_NETWORKS="${HOST_NETWORKS};${HOSTNAME}:${MANAGEMENT_INTERFACE}=${MANAGE_IP}|is_mgmt,${TENANT_INTERFACE}=${TENANT_IP},${PUBLIC_INTERFACE}=${PUBLIC_IP}|is_promiscuous,${STORAGE_INTERFACE}=${STORAGE_IP}"
+        fi
+        MANAGE_IP=$(next_ip ${MANAGE_IP})
+        TENANT_IP=$(next_ip ${TENANT_IP})
+        PUBLIC_IP=$(next_ip ${PUBLIC_IP})
+        STORAGE_IP=$(next_ip ${STORAGE_IP})
+    done
+    export HOST_NETWORKS
+fi
+
+export NETWORK_MAPPING=${NETWORK_MAPPING:-"management=${MANAGEMENT_INTERFACE},tenant=${TENANT_INTERFACE},storage=${STORAGE_INTERFACE},external=${PUBLIC_INTERFACE}"}
+
+export PROXY=""
+export IGNORE_PROXY=""
+export SEARCH_PATH="ods.com"
+export GATEWAY="10.1.0.1"
+export SERVER_CREDENTIAL="root=root"
+export LOCAL_REPO_URL=""
+export OS_CONFIG_FILENAME=""
+export SERVICE_CREDENTIALS="image:service=service,compute:service=service,dashboard:service=service,identity:service=service,metering:service=service,rabbitmq:service=service,volume:service=service,mysql:service=service"
+export CONSOLE_CREDENTIALS="admin:console=console,compute:console=console,dashboard:console=console,image:console=console,metering:console=console,network:console=console,object-store:console=console,volume:console=console"
+export PACKAGE_CONFIG_FILENAME=""
diff --git a/compass/deploy/conf/five.conf b/compass/deploy/conf/five.conf
new file mode 100644 (file)
index 0000000..e63e514
--- /dev/null
@@ -0,0 +1,19 @@
+export VIRT_NUMBER=5
+export VIRT_CPUS=4
+export VIRT_MEM=4096
+export VIRT_DISK=30G
+export 'ADAPTER_OS_PATTERN=(?i)ubuntu-14\.04.*'
+#export 'ADAPTER_OS_PATTERN=(?i)centos-6\.5.*'
+export ADAPTER_NAME="openstack_juno"
+export ADAPTER_TARGET_SYSTEM_PATTERN="^openstack$"
+export ADAPTER_FLAVOR_PATTERN="single-controller"
+export HOSTNAMES="host1,host2,host3,host4,host5"
+export HOST_ROLES="host1=controller,network;host2=compute,storage;host3=compute,storage;host4=compute,storage;host5=compute,storage"
+export DEFAULT_ROLES=""
+export SWITCH_IPS="1.1.1.1"
+export SWITCH_CREDENTIAL="version=2c,community=public"
+export DEPLOYMENT_TIMEOUT="90"
+export POLL_SWITCHES_FLAG="nopoll_switches"
+export DASHBOARD_URL=""
+export REGTEST_DIR=$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )
+source ${REGTEST_DIR}/base.conf
diff --git a/compass/deploy/deploy-vm.sh b/compass/deploy/deploy-vm.sh
new file mode 100644 (file)
index 0000000..18857cd
--- /dev/null
@@ -0,0 +1,45 @@
+cd ..
+rm -rf compass-core
+git clone http://git.openstack.org/stackforge/compass-core -b dev/experimental
+cd compass-core
+virtualenv venv
+source venv/bin/activate
+pip install -i http://pypi.douban.com/simple -e .
+if [[ ! -f /var/log/compass ]]; then
+    sudo mkdir /var/log/compass
+    sudo chown -R 777 /var/log/compass
+fi
+if [[ ! -f /etc/compass ]]; then
+    sudo mkdir /etc/compass
+    sudo cp -rf conf/setting /etc/compass/.
+fi
+cp bin/switch_virtualenv.py.template bin/switch_virtualenv.py
+sed -i "s|\$PythonHome|$VIRTUAL_ENV|g" bin/switch_virtualenv.py
+#source ../compass-install/ci/allinone.conf
+bin/client.py --logfile= --loglevel=debug --logdir= --compass_server="${COMPASS_SERVER_URL}" \
+--compass_user_email="${COMPASS_USER_EMAIL}" --compass_user_password="${COMPASS_USER_PASSWORD}" \
+--cluster_name="${CLUSTER_NAME}" --language="${LANGUAGE}" --timezone="${TIMEZONE}" \
+--hostnames="${HOSTNAMES}" --partitions="${PARTITIONS}" --subnets="${SUBNETS}" \
+--adapter_os_pattern="${ADAPTER_OS_PATTERN}" --adapter_name="${ADAPTER_NAME}" \
+--adapter_target_system_pattern="${ADAPTER_TARGET_SYSTEM_PATTERN}" \
+--adapter_flavor_pattern="${ADAPTER_FLAVOR_PATTERN}" \
+--http_proxy="${PROXY}" --https_proxy="${PROXY}" --no_proxy="${IGNORE_PROXY}" \
+--ntp_server="${NTP_SERVER}" --dns_servers="${NAMESERVERS}" --domain="${DOMAIN}" \
+--search_path="${SEARCH_PATH}" --default_gateway="${GATEWAY}" \
+--server_credential="${SERVER_CREDENTIAL}" --local_repo_url="${LOCAL_REPO_URL}" \
+--os_config_json_file="${OS_CONFIG_FILENAME}" --service_credentials="${SERVICE_CREDENTIALS}" \
+--console_credentials="${CONSOLE_CREDENTIALS}" --host_networks="${HOST_NETWORKS}" \
+--network_mapping="${NETWORK_MAPPING}" --package_config_json_file="${PACKAGE_CONFIG_FILENAME}" \
+--host_roles="${HOST_ROLES}" --default_roles="${DEFAULT_ROLES}" --switch_ips="${SWITCH_IPS}" \
+--machines=${machines//\'} --switch_credential="${SWITCH_CREDENTIAL}" \
+--deployment_timeout="${DEPLOYMENT_TIMEOUT}" --${POLL_SWITCHES_FLAG} --dashboard_url="${DASHBOARD_URL}"
+deploy_result=$?
+tear_down_machines
+cd ../compass-install
+sudo vagrant destroy compass_nodocker
+if [[ $deploy_result != 0 ]]; then
+    echo "deployment failed"
+    exit 1
+else
+    echo "deployment complete"
+fi
diff --git a/compass/deploy/func.sh b/compass/deploy/func.sh
new file mode 100644 (file)
index 0000000..29c2c23
--- /dev/null
@@ -0,0 +1,20 @@
+function tear_down_machines() {
+    virtmachines=$(virsh list --name |grep pxe)
+    for virtmachine in $virtmachines; do
+        echo "destroy $virtmachine"
+        virsh destroy $virtmachine
+        if [[ "$?" != "0" ]]; then
+            echo "destroy instance $virtmachine failed"
+            exit 1
+        fi
+    done
+    virtmachines=$(virsh list --all --name |grep pxe)
+    for virtmachine in $virtmachines; do
+        echo "undefine $virtmachine"
+        virsh undefine $virtmachine
+        if [[ "$?" != "0" ]]; then
+            echo "undefine instance $virtmachine failed"
+            exit 1
+        fi
+    done
+}
diff --git a/compass/deploy/mac_generator.sh b/compass/deploy/mac_generator.sh
new file mode 100755 (executable)
index 0000000..ca898cb
--- /dev/null
@@ -0,0 +1,23 @@
+#!/bin/bash
+function mac_address_part() {
+    hex_number=$(printf '%02x' $RANDOM)
+    number_length=${#hex_number}
+    number_start=$(expr $number_length - 2)
+    echo ${hex_number:$number_start:2}
+}
+
+function mac_address() {
+    echo "'00:00:$(mac_address_part):$(mac_address_part):$(mac_address_part):$(mac_address_part)'"
+}
+
+machines=''
+for i in `seq $1`; do
+  mac=$(mac_address)
+
+  if [[ -z $machines ]]; then
+    machines="${mac}"
+  else
+    machines="${machines} ${mac}"
+  fi 
+done
+echo ${machines}
diff --git a/compass/deploy/prepare.sh b/compass/deploy/prepare.sh
new file mode 100644 (file)
index 0000000..2086c5d
--- /dev/null
@@ -0,0 +1,35 @@
+sudo apt-get update -y
+sudo apt-get install git python-pip python-dev -y
+vagrant --version
+if [[ $? != 0 ]]; then
+    vagrant_pkg_url=https://dl.bintray.com/mitchellh/vagrant/vagrant_1.7.2_x86_64.deb
+    wget ${vagrant_pkg_url}
+    sudo dpkg -i $(basename ${vagrant_pkg_url})
+else
+    echo "vagrant is already installed"
+fi
+sudo apt-get install libxslt-dev libxml2-dev libvirt-dev build-essential qemu-utils qemu-kvm libvirt-bin virtinst libmysqld-dev -y
+sudo service libvirt-bin restart
+
+for plugin in vagrant-libvirt vagrant-mutate; do
+    vagrant plugin list |grep $plugin
+    if [[ $? != 0 ]]; then
+        vagrant plugin install $plugin --plugin-source https://ruby.taobao.org
+    else
+        echo "$plugin plugin is already installed"
+    fi
+done
+sudo pip install --upgrade ansible virtualenv
+#precise_box_vb_url=https://cloud-images.ubuntu.com/vagrant/precise/current/precise-server-cloudimg-amd64-vagrant-disk1.box
+#precise_box_vb_filename=$(basename ${precise_box_vb_url})
+centos65_box_vb_url=https://developer.nrel.gov/downloads/vagrant-boxes/CentOS-6.5-x86_64-v20140504.box
+centos65_box_vb_filename=$(basename ${centos65_box_vb_url})
+#wget ${precise_box_vb_url}
+vagrant box list |grep centos65
+if [[ $? != 0 ]]; then
+    wget ${centos65_box_vb_url}
+    mv ${centos65_box_vb_filename} centos65.box
+    vagrant mutate centos65.box libvirt
+else
+    echo "centos65 box already exists"
+fi
diff --git a/compass/deploy/setup-env.sh b/compass/deploy/setup-env.sh
new file mode 100644 (file)
index 0000000..ffa9aa5
--- /dev/null
@@ -0,0 +1,61 @@
+rm -rf compass-install
+git clone http://git.openstack.org/stackforge/compass-install
+cd compass-install
+
+function join { local IFS="$1"; shift; echo "$*"; }
+source ${SCRIPT_DIR}/../deploy/conf/${CONF_NAME}.conf
+source ${SCRIPT_DIR}/../deploy/func.sh
+if [[ ! -z $VIRT_NUMBER ]]; then
+    mac_array=$(${SCRIPT_DIR}/../deploy/mac_generator.sh $VIRT_NUMBER)
+    mac_list=$(join , $mac_array)
+    echo "pxe_boot_macs: [${mac_list}]" >> install/group_vars/all
+    echo "test: true" >> install/group_vars/all
+fi
+virsh list |grep compass
+if [[ $? == 0 ]]; then
+    compass_old=`virsh list |grep compass|awk '{print$2}'`
+    virsh destroy ${compass_old}
+    virsh undefine ${compass_old}
+fi
+sudo vagrant up compass_nodocker
+if [[ $? != 0 ]]; then
+    echo "installation of compass failed"
+    sudo vagrant destroy compass_nodocker
+    exit 1
+fi
+echo "compass is up"
+
+tear_down_machines
+if [[ -n $mac_array ]]; then
+    echo "bringing up pxe boot vms"
+    i=0
+    for mac in $mac_array; do
+        echo "creating vm disk for instance pxe${i}"
+        sudo qemu-img create -f raw /home/pxe${i}.raw ${VIRT_DISK}
+        sudo virt-install --accelerate --hvm --connect qemu:///system \
+             --name pxe$i --ram=$VIRT_MEM --pxe --disk /home/pxe$i.raw,format=raw \
+             --vcpus=$VIRT_CPUS --graphics vnc,listen=0.0.0.0 \
+             --network=bridge:virbr2,mac=$mac \
+             --network=bridge:virbr2 \
+             --network=bridge:virbr2 \
+             --network=bridge:virbr2 \
+             --noautoconsole --autostart --os-type=linux --os-variant=rhel6
+        if [[ $? != 0 ]]; then
+            echo "launching pxe${i} failed"
+            exit 1
+        fi
+        echo "checking pxe${i} state"
+        state=$(virsh domstate pxe${i})
+        if [[ "$state" == "running" ]]; then
+            echo "pxe${i} is running"
+            sudo virsh destroy pxe${i}
+        fi
+        echo "add network boot option and make pxe${i} reboot if failing"
+        sudo sed -i "/<boot dev='hd'\/>/ a\    <boot dev='network'\/>" /etc/libvirt/qemu/pxe${i}.xml
+        sudo sed -i "/<boot dev='network'\/>/ a\    <bios useserial='yes' rebootTimeout='0'\/>" /etc/libvirt/qemu/pxe${i}.xml
+        sudo virsh define /etc/libvirt/qemu/pxe${i}.xml
+        sudo virsh start pxe${i}
+        let i=i+1
+    done
+fi
+machines=${mac_list}
diff --git a/foreman/build/Makefile b/foreman/build/Makefile
new file mode 100644 (file)
index 0000000..8b87ce6
--- /dev/null
@@ -0,0 +1,133 @@
+##############################################################################
+# Copyright (c) 2015 Ericsson AB and others.
+# stefan.k.berg@ericsson.com
+# jonas.bjurel@ericsson.com
+# dradez@redhat.com
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+SHELL = /bin/bash
+############################################################################
+# BEGIN of variables to customize
+#
+#Input args
+export UNIT_TEST = FALSE
+export INTERACTIVE = TRUE
+export CENTDNLD = http://mirrors.cat.pdx.edu/centos/7.1.1503/isos/x86_64/CentOS-7-x86_64-DVD-1503-01.iso
+export ISOSRC = file:$(shell pwd)/CentOS-7-x86_64-DVD-1503-01.iso
+export ISOCACHE = $(shell pwd)/$(shell basename $(ISOSRC))
+export PRODNO = "OPNFV_BGS"
+export REVSTATE = "P0000"
+export NEWISO = $(shell pwd)/release/OPNFV-CentOS-7-x86_64-${REVSTATE}.iso
+export VBOXDNLD = http://download.virtualbox.org/virtualbox/rpm/el/7.1/x86_64/VirtualBox-4.3-4.3.26_98988_el7-1.x86_64.rpm
+export VBOXRPM = $(shell pwd)/VirtualBox-4.3-4.3.26_98988_el7-1.x86_64.rpm
+export VAGRANTDNLD = https://dl.bintray.com/mitchellh/vagrant/vagrant_1.7.2_x86_64.rpm
+export VAGRANTRPM = $(shell pwd)/vagrant_1.7.2_x86_64.rpm
+export GENESISRPM = $(shell pwd)/x86_64/opnfv-genesis-0.1-1.x86_64.rpm
+
+# Note! Invoke with "make REVSTATE=RXXXX all" to make release build!
+# Invoke with ICOCACHE=/full/path/to/iso if cached ISO is in non-standard location.
+
+#Build variables
+export BUILD_BASE := $(shell pwd)
+export CACHE_DIR := $(BUILD_BASE)/cache
+export VERSION_FILE := $(BUILD_BASE)/.versions
+export TOPDIR := $(shell pwd)
+
+CENTDIR := $(TOPDIR)/centiso
+#
+# END of variables to customize
+#############################################################################
+
+SUBCLEAN = $(addsuffix .clean,$(SUBDIRS))
+
+
+.PHONY: all
+all: iso
+       @echo "Versions of cached build results built by" $(shell hostname) "at" $(shell date -u) > $(VERSION_FILE)
+       @echo "cache.mk" $(shell md5sum $(BUILD_BASE)/cache.mk | cut -f1 -d " ") >> $(VERSION_FILE)
+       @echo "config.mk" $(shell md5sum $(BUILD_BASE)/config.mk | cut -f1 -d " ") >> $(VERSION_FILE)
+
+############################################################################
+# BEGIN of Include definitions
+#
+include config.mk
+include cache.mk
+#
+# END Include definitions
+#############################################################################
+
+$(ISOCACHE):
+       test -s $(ISOCACHE) || { wget -nv $(CENTDNLD) ; }
+
+$(VBOXRPM):
+       test -s $(VBOXRPM) || { wget -nv $(VBOXDNLD) ; }
+
+$(VAGRANTRPM):
+       test -s $(VAGRANTRPM) || { wget -nv $(VAGRANTDNLD) ; }
+
+.PHONY: mount-centiso umount-centiso
+mount-centiso: $(ISOCACHE)
+       @echo "Mounting CentOS ISO in $(CENTDIR)"
+       @mkdir -p $(CENTDIR)
+       @fuseiso $(ISOCACHE) $(CENTDIR)
+
+umount-centiso:
+       @set +e
+       @echo "Unmounting CentOS ISO from $(CENTDIR)"
+       @fusermount -u $(CENTDIR)
+       @rmdir $(CENTDIR)
+       @set -e
+
+.PHONY: build-clean $(SUBCLEAN)
+build-clean: $(SUBCLEAN)
+       @rm -Rf centos
+       @rm -Rf release
+       @rm -Rf newiso
+       @rm -f $(NEWISO)
+
+.PHONY: clean $(SUBCLEAN)
+clean:  clean-cache $(SUBCLEAN)
+       @rm -f *.iso
+       @rm -Rf release
+       @rm -Rf newiso
+       @rm -f $(NEWISO)
+       @rm -f $(BUILD_BASE)/.versions
+
+$(SUBCLEAN): %.clean:
+       $(MAKE) -C $* -f Makefile clean
+
+.PHONY: rpm-clean
+rpm-clean:
+       rpmbuild --clean opnfv-genesis.spec -D '_topdir %(echo `pwd`)' -D '_builddir %(echo `pwd`)' -D '_sourcedir %(echo `pwd`)' -D '_rpmdir %(echo `pwd`)' -D '_specdir %(echo `pwd`)' -D '_srcrpmdir %(echo `pwd`)'
+
+.PHONY: rpm
+rpm:
+       pushd ../../ && git archive --format=tar --prefix=opnfv-genesis-0.1/ HEAD | gzip > foreman/build/opnfv-genesis.tar.gz
+       rpmbuild -ba opnfv-genesis.spec -D '_topdir %(echo `pwd`)' -D '_builddir %(echo `pwd`)' -D '_sourcedir %(echo `pwd`)' -D '_rpmdir %(echo `pwd`)' -D '_specdir %(echo `pwd`)' -D '_srcrpmdir %(echo `pwd`)' 
+       @make rpm-clean
+
+
+# Todo: Make things smarter - we shouldn't need to clean everything
+# betwen make invocations.
+.PHONY: iso
+iso:   build-clean $(ISOCACHE) $(VBOXRPM) $(VAGRANTRPM) rpm
+       @make mount-centiso
+       @mkdir centos release
+       cp -r $(CENTDIR)/* centos
+       @make umount-centiso
+       # modify the installer iso's contents
+       @cp -f isolinux.cfg centos/isolinux/isolinux.cfg
+       @cp $(VBOXRPM) centos/Packages
+       @cp $(VAGRANTRPM) centos/Packages
+       @cp $(GENESISRPM) centos/Packages
+       # regenerate yum repo data
+       @echo "Generating new yum metadata"
+       createrepo --update -g ../c7-opnfv-x86_64-comps.xml centos
+       # build the iso
+       @echo "Building OPNFV iso"
+       mkisofs -b isolinux/isolinux.bin -no-emul-boot -boot-load-size 4 -boot-info-table -V "OPNFV CentOS 7 x86_64" -R -J -v -T -o $(NEWISO) centos
+       @printf "\n\nISO is built at $(NEWISO)\n\n"
diff --git a/foreman/build/c7-opnfv-x86_64-comps.xml b/foreman/build/c7-opnfv-x86_64-comps.xml
new file mode 100644 (file)
index 0000000..1e384e6
--- /dev/null
@@ -0,0 +1,293 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<!DOCTYPE comps PUBLIC "-//CentOS//DTD Comps info//EN" "comps.dtd">
+<comps>
+  
+  <group>
+   <id>core</id>
+   <name>Core</name>
+   <name xml:lang='af'>Kern</name>
+   <name xml:lang='am'>ማዕከላዊ ቦታ</name>
+   <name xml:lang='ar'>اللبّ</name>
+   <name xml:lang='as'>ভিত্তি</name>
+   <name xml:lang='bal'>هستگ</name>
+   <name xml:lang='be'>Падмурак</name>
+   <name xml:lang='bg'>Основа</name>
+   <name xml:lang='bn'>কোর</name>
+   <name xml:lang='bn_IN'>কোর</name>
+   <name xml:lang='bs'>Jezgra</name>
+   <name xml:lang='ca'>Nucli</name>
+   <name xml:lang='cs'>Úplný základ</name>
+   <name xml:lang='cy'>Craidd</name>
+   <name xml:lang='da'>Grundlæggende</name>
+   <name xml:lang='de'>Kern</name>
+   <name xml:lang='el'>Πυρήνας</name>
+   <name xml:lang='en_GB'>Core</name>
+   <name xml:lang='es'>Núcleo</name>
+   <name xml:lang='et'>Tuum</name>
+   <name xml:lang='fa'>اصل</name>
+   <name xml:lang='fi'>Keskeiset</name>
+   <name xml:lang='fr'>Core</name>
+   <name xml:lang='gl'>Núcleo</name>
+   <name xml:lang='gu'>મૂળ</name>
+   <name xml:lang='he'>ליבה</name>
+   <name xml:lang='hi'>कोर</name>
+   <name xml:lang='hr'>Jezgra</name>
+   <name xml:lang='hu'>Mag</name>
+   <name xml:lang='hy'>Հիմք</name>
+   <name xml:lang='ia'>Nucleo</name>
+   <name xml:lang='id'>Inti</name>
+   <name xml:lang='ilo'>Bugas</name>
+   <name xml:lang='is'>Lágmarkskerfi</name>
+   <name xml:lang='it'>Principale</name>
+   <name xml:lang='ja'>コア</name>
+   <name xml:lang='ka'>ბირთვი</name>
+   <name xml:lang='kn'>ಅಂತಸ್ಸಾರ</name>
+   <name xml:lang='ko'>핵심</name>
+   <name xml:lang='lv'>Pamatsistēma</name>
+   <name xml:lang='mai'>कोर</name>
+   <name xml:lang='mk'>Основни</name>
+   <name xml:lang='ml'>കോറ്‍</name>
+   <name xml:lang='mr'>कोर</name>
+   <name xml:lang='ms'>Teras</name>
+   <name xml:lang='nb'>Kjerne</name>
+   <name xml:lang='ne'>कोर</name>
+   <name xml:lang='nl'>Kern</name>
+   <name xml:lang='no'>Kjerne</name>
+   <name xml:lang='nso'>Bogare</name>
+   <name xml:lang='or'>ପ୍ରମୂଖ</name>
+   <name xml:lang='pa'>ਮੂਲ</name>
+   <name xml:lang='pl'>Rdzeń</name>
+   <name xml:lang='pt'>Núcleo</name>
+   <name xml:lang='pt_BR'>Núcleo</name>
+   <name xml:lang='ro'>Nucleu</name>
+   <name xml:lang='ru'>Основа</name>
+   <name xml:lang='si'>න්‍යෂ්ඨිය</name>
+   <name xml:lang='sk'>Jadro</name>
+   <name xml:lang='sl'>Jedro</name>
+   <name xml:lang='sq'>Bërthama</name>
+   <name xml:lang='sr'>Срж</name>
+   <name xml:lang='sr@latin'>Srž</name>
+   <name xml:lang='sr@Latn'>Srž</name>
+   <name xml:lang='sv'>Grund</name>
+   <name xml:lang='ta'>கோர்</name>
+   <name xml:lang='te'>అంతర్భాగం</name>
+   <name xml:lang='tg'>Система</name>
+   <name xml:lang='th'>แกนหลัก</name>
+   <name xml:lang='tr'>Çekirdek</name>
+   <name xml:lang='uk'>Основа</name>
+   <name xml:lang='ur'>مرکز</name>
+   <name xml:lang='vi'>Lõi</name>
+   <name xml:lang='zh_CN'>核心</name>
+   <name xml:lang='zh_TW'>核心</name>
+   <name xml:lang='zu'>Okuyikhona</name>
+   <description>Smallest possible installation.</description>
+   <description xml:lang='as'>ন্যূনতম ইনস্টল।</description>
+   <description xml:lang='bn'>ন্যূনতম ইনস্টলেশন।</description>
+   <description xml:lang='bn_IN'>ন্যূনতম ইনস্টলেশন।</description>
+   <description xml:lang='cs'>Nejmenší možná instalace.</description>
+   <description xml:lang='de'>Kleinstmögliche Installation.</description>
+   <description xml:lang='es'>La instalación más pequeña posible.</description>
+   <description xml:lang='fr'>Plus petite installation possible.</description>
+   <description xml:lang='gu'>નાનામાં નાના શક્ય સ્થાપન.</description>
+   <description xml:lang='hi'>लघुतम संभावित संस्थापन.</description>
+   <description xml:lang='ia'>Le minime possibile installation.</description>
+   <description xml:lang='it'>Minima installazione possibile.</description>
+   <description xml:lang='ja'>最小限のインストール</description>
+   <description xml:lang='kn'>ಅತ್ಯಲ್ಪಸಾಧ್ಯ ಅನುಸ್ಥಾಪನೆ.</description>
+   <description xml:lang='ko'>가능한 최소 설치</description>
+   <description xml:lang='ml'>സാധ്യമായ ഏറ്റവും ചെറിയ ഇന്‍സ്റ്റലേഷന്‍.</description>
+   <description xml:lang='mr'>शक्यतया सर्वात लहान प्रतिष्ठापन.</description>
+   <description xml:lang='or'>କ୍ଷୁଦ୍ରତମ ସମ୍ଭାବ୍ଯ ସ୍ଥାପନା।</description>
+   <description xml:lang='pa'>ਘੱਟੋ-ਘੱਟ ਸੰਭਵ ਇੰਸਟਾਲੇਸ਼ਨ।</description>
+   <description xml:lang='pl'>Najmniejsza możliwa instalacja.</description>
+   <description xml:lang='pt_BR'>Menor instalação possível</description>
+   <description xml:lang='ru'>Минимально возможная установка</description>
+   <description xml:lang='sv'>Minsta möjliga installation</description>
+   <description xml:lang='ta'>மிகச் சிறிய செயல்படுத்தக்கூடிய நிறுவல்.</description>
+   <description xml:lang='te'>సాధ్యమగు అతిచిన్న సంస్థాపన.</description>
+   <description xml:lang='uk'>Мінімально можливе встановлення.</description>
+   <description xml:lang='zh_CN'>最小可能安装。</description>
+   <description xml:lang='zh_TW'>最小型安裝。</description>
+   <default>false</default>
+   <uservisible>false</uservisible>
+   <packagelist>
+      <packagereq type="default">aic94xx-firmware</packagereq>
+      <packagereq type="default">alsa-firmware</packagereq>
+      <packagereq type="default">bfa-firmware</packagereq>
+      <packagereq type="default">dracut-config-rescue</packagereq>
+      <packagereq type="default">ivtv-firmware</packagereq>
+      <packagereq type="default">iwl1000-firmware</packagereq>
+      <packagereq type="default">iwl100-firmware</packagereq>
+      <packagereq type="default">iwl105-firmware</packagereq>
+      <packagereq type="default">iwl135-firmware</packagereq>
+      <packagereq type="default">iwl2000-firmware</packagereq>
+      <packagereq type="default">iwl2030-firmware</packagereq>
+      <packagereq type="default">iwl3160-firmware</packagereq>
+      <packagereq type="default">iwl3945-firmware</packagereq>
+      <packagereq type="default">iwl4965-firmware</packagereq>
+      <packagereq type="default">iwl5000-firmware</packagereq>
+      <packagereq type="default">iwl5150-firmware</packagereq>
+      <packagereq type="default">iwl6000-firmware</packagereq>
+      <packagereq type="default">iwl6000g2a-firmware</packagereq>
+      <packagereq type="default">iwl6000g2b-firmware</packagereq>
+      <packagereq type="default">iwl6050-firmware</packagereq>
+      <packagereq type="default">iwl7260-firmware</packagereq>
+      <packagereq type="default">kernel-tools</packagereq>
+      <packagereq type="default">libertas-sd8686-firmware</packagereq>
+      <packagereq type="default">libertas-sd8787-firmware</packagereq>
+      <packagereq type="default">libertas-usb8388-firmware</packagereq>
+      <packagereq type="default">linux-firmware</packagereq>
+      <packagereq type="default">microcode_ctl</packagereq>
+      <packagereq type="default">NetworkManager</packagereq>
+      <packagereq type="default">NetworkManager-tui</packagereq>
+      <packagereq type="default">postfix</packagereq>
+      <packagereq type="default">ql2100-firmware</packagereq>
+      <packagereq type="default">ql2200-firmware</packagereq>
+      <packagereq type="default">ql23xx-firmware</packagereq>
+      <packagereq type="mandatory">audit</packagereq>
+      <packagereq type="mandatory">basesystem</packagereq>
+      <packagereq type="mandatory">bash</packagereq>
+      <packagereq type="mandatory">biosdevname</packagereq>
+      <packagereq type="mandatory">btrfs-progs</packagereq>
+      <packagereq type="mandatory">coreutils</packagereq>
+      <packagereq type="mandatory">cpp</packagereq>
+      <packagereq type="mandatory">cronie</packagereq>
+      <packagereq type="mandatory">curl</packagereq>
+      <packagereq type="mandatory">dhclient</packagereq>
+      <packagereq type="mandatory">dkms</packagereq>
+      <packagereq type="mandatory">e2fsprogs</packagereq>
+      <packagereq type="mandatory">filesystem</packagereq>
+      <packagereq type="mandatory">gcc</packagereq>
+      <packagereq type="mandatory">glibc</packagereq>
+      <packagereq type="mandatory">glibc-devel</packagereq>
+      <packagereq type="mandatory">glibc-headers</packagereq>
+      <packagereq type="mandatory">git</packagereq>
+      <packagereq type="mandatory">hostname</packagereq>
+      <packagereq type="mandatory">initscripts</packagereq>
+      <packagereq type="mandatory">iproute</packagereq>
+      <packagereq type="mandatory">iprutils</packagereq>
+      <packagereq type="mandatory">iptables</packagereq>
+      <packagereq type="mandatory">iputils</packagereq>
+      <packagereq type="mandatory">irqbalance</packagereq>
+      <packagereq type="mandatory">kbd</packagereq>
+      <packagereq type="mandatory">kernel-devel</packagereq>
+      <packagereq type="mandatory">kernel-headers</packagereq>
+      <packagereq type="mandatory">kexec-tools</packagereq>
+      <packagereq type="mandatory">less</packagereq>
+      <packagereq type="mandatory">libmpc</packagereq>
+      <packagereq type="mandatory">mpfr</packagereq>
+      <packagereq type="mandatory">man-db</packagereq>
+      <packagereq type="mandatory">ncurses</packagereq>
+      <packagereq type="mandatory">net-tools</packagereq>
+      <packagereq type="mandatory">less</packagereq>
+      <packagereq type="mandatory">man-db</packagereq>
+      <packagereq type="mandatory">ncurses</packagereq>
+      <packagereq type="mandatory">openssh-clients</packagereq>
+      <packagereq type="mandatory">openssh-server</packagereq>
+      <packagereq type="mandatory">opnfv-genesis</packagereq>
+      <packagereq type="mandatory">parted</packagereq>
+      <packagereq type="mandatory">passwd</packagereq>
+      <packagereq type="mandatory">patch</packagereq>
+      <packagereq type="mandatory">plymouth</packagereq>
+      <packagereq type="mandatory">policycoreutils</packagereq>
+      <packagereq type="mandatory">procps-ng</packagereq>
+      <packagereq type="mandatory">rootfiles</packagereq>
+      <packagereq type="mandatory">rpm</packagereq>
+      <packagereq type="mandatory">rsyslog</packagereq>
+      <packagereq type="mandatory">selinux-policy-targeted</packagereq>
+      <packagereq type="mandatory">setup</packagereq>
+      <packagereq type="mandatory">shadow-utils</packagereq>
+      <packagereq type="mandatory">sudo</packagereq>
+      <packagereq type="mandatory">systemd</packagereq>
+      <packagereq type="mandatory">tar</packagereq>
+      <packagereq type="mandatory">tuned</packagereq>
+      <packagereq type="mandatory">util-linux</packagereq>
+      <packagereq type="mandatory">vagrant</packagereq>
+      <packagereq type="mandatory">vim-minimal</packagereq>
+      <packagereq type="mandatory">VirtualBox-4.3</packagereq>
+      <packagereq type="mandatory">xfsprogs</packagereq>
+      <packagereq type="mandatory">yum</packagereq>
+      <packagereq type="optional">dracut-config-generic</packagereq>
+      <packagereq type="optional">dracut-fips-aesni</packagereq>
+      <packagereq type="optional">dracut-fips</packagereq>
+      <packagereq type="optional">dracut-network</packagereq>
+      <packagereq type="optional">openssh-keycat</packagereq>
+      <packagereq type="optional">selinux-policy-mls</packagereq>
+      <packagereq type="optional">tboot</packagereq>
+   </packagelist>
+  </group>
+
+  <environment>
+   <id>opnfv_provisioning</id>
+   <name>OPNFV Provisioning Server Install</name>
+   <name xml:lang='as'>নূন্যতম ইনস্টল</name>
+   <name xml:lang='bn_IN'>ন্যূনতম ইনস্টল</name>
+   <name xml:lang='cs'>Minimální instalace</name>
+   <name xml:lang='de'>Minimale Installation</name>
+   <name xml:lang='es'>Instalación mínima</name>
+   <name xml:lang='fr'>Installation minimale</name>
+   <name xml:lang='gu'>ન્યૂનતમ સ્થાપન</name>
+   <name xml:lang='hi'>न्यूनतम संस्थापन</name>
+   <name xml:lang='it'>Installazione minima</name>
+   <name xml:lang='ja'>最小限のインストール</name>
+   <name xml:lang='kn'>ಕನಿಷ್ಟ ಅನುಸ್ಥಾಪನೆ</name>
+   <name xml:lang='ko'>최소 설치</name>
+   <name xml:lang='ml'>ഏറ്റവും കുറഞ്ഞ ഇന്‍സ്റ്റോള്‍</name>
+   <name xml:lang='mr'>किमान इंस्टॉल</name>
+   <name xml:lang='or'>ସର୍ବନିମ୍ନ ସ୍ଥାପନ</name>
+   <name xml:lang='pa'>ਘੱਟ ਤੋਂ ਘੱਟ ਇੰਸਟਾਲ</name>
+   <name xml:lang='pl'>Minimalna instalacja</name>
+   <name xml:lang='pt_BR'>Instalações Mínimas</name>
+   <name xml:lang='ru'>Минимальная установка</name>
+   <name xml:lang='ta'>குறைந்தபட்ச நிறுவல்</name>
+   <name xml:lang='te'>కనీసపు సంస్థాపన</name>
+   <name xml:lang='uk'>Мінімальна система</name>
+   <name xml:lang='zh_CN'>最小安装</name>
+   <name xml:lang='zh_TW'>最小型安裝</name>
+   <description>Installs an OPNFV Provisioning Server</description>
+   <description xml:lang='as'>মৌলি কাৰ্য্যকৰীতা।</description>
+   <description xml:lang='bn_IN'>প্রাথমিক বৈশিষ্ট্য।</description>
+   <description xml:lang='cs'>Základní funkcionalita.</description>
+   <description xml:lang='de'>Grundlegende Funktionalität.</description>
+   <description xml:lang='es'>Funcionalidad básica.</description>
+   <description xml:lang='fr'>Fonctionnalité de base.</description>
+   <description xml:lang='gu'>મૂળભૂત વિધેય.</description>
+   <description xml:lang='hi'>मौलिक प्रकार्यात्मकता.</description>
+   <description xml:lang='it'>Funzione di base.</description>
+   <description xml:lang='ja'>基本的な機能です。</description>
+   <description xml:lang='kn'>ಮೂಲಭೂತ ಕ್ರಿಯಾಶೀಲತೆ.</description>
+   <description xml:lang='ko'>기본적인 기능입니다.</description>
+   <description xml:lang='ml'>അടിസ്ഥാന പ്രവൃത്തിവിശേഷണം.</description>
+   <description xml:lang='mr'>मूळ कार्यक्षमता.</description>
+   <description xml:lang='or'>ସାଧାରଣ କାର୍ଯ୍ୟକାରିତା।</description>
+   <description xml:lang='pa'>ਮੁੱਢਲੀ ਕਾਰਜਸ਼ੀਲਤਾ।</description>
+   <description xml:lang='pl'>Podstawowa funkcjonalność.</description>
+   <description xml:lang='pt_BR'>Função básica</description>
+   <description xml:lang='ru'>Базовая функциональность.</description>
+   <description xml:lang='ta'>அடிப்படை செயலம்சம்.</description>
+   <description xml:lang='te'>ప్రాథమిక ఫంక్షనాలిటి.</description>
+   <description xml:lang='uk'>Основні можливості.</description>
+   <description xml:lang='zh_CN'>基本功能。</description>
+   <description xml:lang='zh_TW'>基本功能。</description>
+   <display_order>5</display_order>
+   <grouplist>
+         <groupid>core</groupid>
+   </grouplist>
+   <optionlist>
+   
+   </optionlist>
+  </environment>
+  <langpacks>
+      <match install="autocorr-%s" name="autocorr-en"/>
+      <match install="firefox-langpack-%s" name="firefox"/>
+      <match install="gimp-help-%s" name="gimp-help"/>
+      <match install="gnome-getting-started-docs-%s" name="gnome-getting-started-docs"/>
+      <match install="hunspell-%s" name="hunspell"/>
+      <match install="hyphen-%s" name="hyphen"/>
+      <match install="kde-l10n-%s" name="kdelibs"/>
+      <match install="libreoffice-langpack-%s" name="libreoffice-core"/>
+      <match install="man-pages-%s" name="man-pages"/>
+      <match install="mythes-%s" name="mythes"/>
+    </langpacks> 
+
+</comps>
diff --git a/foreman/build/cache.mk b/foreman/build/cache.mk
new file mode 100644 (file)
index 0000000..fdfd003
--- /dev/null
@@ -0,0 +1,78 @@
+##############################################################################
+# Copyright (c) 2015 Ericsson AB and others.
+# stefan.k.berg@ericsson.com
+# jonas.bjurel@ericsson.com
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+SHELL = /bin/bash
+CACHEVALIDATE := $(addsuffix .validate,$(SUBDIRS))
+CACHECLEAN := $(addsuffix .clean,$(CACHEFILES) $(CACHEDIRS))
+
+############################################################################
+# BEGIN of variables to customize
+#
+CACHEFILES += .versions
+CACHEFILES += $(shell basename $(ISOSRC))
+#
+# END of variables to customize
+############################################################################
+
+.PHONY: prepare-cache
+prepare-cache: make-cache-dir $(CACHEDIRS) $(CACHEFILES)
+
+.PHONY: make-cache-dir
+make-cache-dir:
+       @rm -rf ${CACHE_DIR}
+       @mkdir ${CACHE_DIR}
+
+.PHONY: clean-cache
+clean-cache: $(CACHECLEAN)
+       @rm -rf ${CACHE_DIR}
+
+.PHONY: $(CACHEDIRS)
+$(CACHEDIRS):
+       @mkdir -p $(dir $(CACHE_DIR)/$@)
+       @if [ ! -d $(BUILD_BASE)/$@ ]; then\
+          mkdir -p $(BUILD_BASE)/$@;\
+       fi
+       @ln -s $(BUILD_BASE)/$@ $(CACHE_DIR)/$@
+
+.PHONY: $(CACHEFILES)
+$(CACHEFILES):
+       @mkdir -p $(dir $(CACHE_DIR)/$@)
+       @if [ ! -d $(dir $(BUILD_BASE)/$@) ]; then\
+          mkdir -p $(dir $(BUILD_BASE)/$@);\
+       fi
+
+       @if [ ! -f $(BUILD_BASE)/$@ ]; then\
+          echo " " > $(BUILD_BASE)/$@;\
+          ln -s $(BUILD_BASE)/$@ $(CACHE_DIR)/$@;\
+          rm -f $(BUILD_BASE)/$@;\
+       else\
+          ln -s $(BUILD_BASE)/$@ $(CACHE_DIR)/$@;\
+       fi
+
+.PHONY: validate-cache
+validate-cache: $(CACHEVALIDATE)
+       @if [[ $(shell md5sum $(BUILD_BASE)/config.mk | cut -f1 -d " ") != $(shell cat $(VERSION_FILE) | grep config.mk | awk '{print $$NF}') ]]; then\
+          echo "Cache does not match current config.mk definition, cache must be rebuilt";\
+          exit 1;\
+       fi;
+
+       @if [[ $(shell md5sum $(BUILD_BASE)/cache.mk | cut -f1 -d " ") != $(shell cat $(VERSION_FILE) | grep cache.mk | awk '{print $$NF}') ]]; then\
+          echo "Cache does not match current cache.mk definition, cache must be rebuilt";\
+          exit 1;\
+       fi;
+
+.PHONY: $(CACHEVALIDATE)
+$(CACHEVALIDATE): %.validate:
+       @echo VALIDATE $(CACHEVALIDATE)
+       $(MAKE) -C $* -f Makefile validate-cache
+
+.PHONY: $(CACHECLEAN)
+$(CACHECLEAN): %.clean:
+       rm -rf ${CACHE_DIR}/$*
diff --git a/foreman/build/config.mk b/foreman/build/config.mk
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/foreman/build/isolinux.cfg b/foreman/build/isolinux.cfg
new file mode 100644 (file)
index 0000000..12848de
--- /dev/null
@@ -0,0 +1,120 @@
+default vesamenu.c32
+timeout 600
+
+display boot.msg
+
+# Clear the screen when exiting the menu, instead of leaving the menu displayed.
+# For vesamenu, this means the graphical background is still displayed without
+# the menu itself for as long as the screen remains in graphics mode.
+menu clear
+menu background splash.png
+menu title CentOS 7
+menu vshift 8
+menu rows 18
+menu margin 8
+#menu hidden
+menu helpmsgrow 15
+menu tabmsgrow 13
+
+# Border Area
+menu color border * #00000000 #00000000 none
+
+# Selected item
+menu color sel 0 #ffffffff #00000000 none
+
+# Title bar
+menu color title 0 #ff7ba3d0 #00000000 none
+
+# Press [Tab] message
+menu color tabmsg 0 #ff3a6496 #00000000 none
+
+# Unselected menu item
+menu color unsel 0 #84b8ffff #00000000 none
+
+# Selected hotkey
+menu color hotsel 0 #84b8ffff #00000000 none
+
+# Unselected hotkey
+menu color hotkey 0 #ffffffff #00000000 none
+
+# Help text
+menu color help 0 #ffffffff #00000000 none
+
+# A scrollbar of some type? Not sure.
+menu color scrollbar 0 #ffffffff #ff355594 none
+
+# Timeout msg
+menu color timeout 0 #ffffffff #00000000 none
+menu color timeout_msg 0 #ffffffff #00000000 none
+
+# Command prompt text
+menu color cmdmark 0 #84b8ffff #00000000 none
+menu color cmdline 0 #ffffffff #00000000 none
+
+# Do not display the actual menu unless the user presses a key. All that is displayed is a timeout message.
+
+menu tabmsg Press Tab for full configuration options on menu items.
+
+menu separator # insert an empty line
+menu separator # insert an empty line
+
+label linux
+  menu label ^Install OPNFV CentOS 7
+  menu default
+  kernel vmlinuz
+  append initrd=initrd.img inst.stage2=hd:LABEL=OPNFV\x20CentOS\x207\x20x86_64
+
+#label check
+#  menu label Test this ^media & install CentOS 7
+#  kernel vmlinuz
+#  append initrd=initrd.img inst.stage2=hd:LABEL=CentOS\x207\x20x86_64 rd.live.check quiet
+
+menu separator # insert an empty line
+
+# utilities submenu
+menu begin ^Troubleshooting
+  menu title Troubleshooting
+
+label vesa
+  menu indent count 5
+  menu label Install CentOS 7 in ^basic graphics mode
+  text help
+       Try this option out if you're having trouble installing
+       CentOS 7.
+  endtext
+  kernel vmlinuz
+  append initrd=initrd.img inst.stage2=hd:LABEL=OPNFV\x20CentOS\x207\x20x86_64 xdriver=vesa nomodeset quiet
+
+label rescue
+  menu indent count 5
+  menu label ^Rescue a CentOS system
+  text help
+       If the system will not boot, this lets you access files
+       and edit config files to try to get it booting again.
+  endtext
+  kernel vmlinuz
+  append initrd=initrd.img inst.stage2=hd:LABEL=CentOS\x207\x20x86_64 rescue quiet
+
+label memtest
+  menu label Run a ^memory test
+  text help
+       If your system is having issues, a problem with your
+       system's memory may be the cause. Use this utility to
+       see if the memory is working correctly.
+  endtext
+  kernel memtest
+
+menu separator # insert an empty line
+
+label local
+  menu label Boot from ^local drive
+  localboot 0xffff
+
+menu separator # insert an empty line
+menu separator # insert an empty line
+
+label returntomain
+  menu label Return to ^main menu
+  menu exit
+
+menu end
diff --git a/foreman/build/opnfv-genesis.spec b/foreman/build/opnfv-genesis.spec
new file mode 100644 (file)
index 0000000..674760f
--- /dev/null
@@ -0,0 +1,33 @@
+Name:          opnfv-genesis
+Version:       0.1
+Release:       1
+Summary:       The files from the OPNFV genesis repo
+
+Group:         System Environment
+License:       Apache 2.0
+URL:           https://gerrit.opnfv.org/gerrit/genesis.git
+Source0:       opnfv-genesis.tar.gz
+
+#BuildRequires:        
+Requires:      vagrant, VirtualBox-4.3
+
+%description
+The files from the OPNFV genesis repo
+
+%prep
+%setup -q
+
+
+%build
+
+%install
+mkdir -p %{buildroot}/usr/bin/
+cp foreman/ci/deploy.sh %{buildroot}/usr/bin/
+
+%files
+/usr/bin/deploy.sh
+
+
+%changelog
+* Fri Apr 24 2015 Dan Radez <dradez@redhatcom> - 0.1-1
+- Initial Packaging
diff --git a/foreman/ci/README.md b/foreman/ci/README.md
new file mode 100644 (file)
index 0000000..9417ee5
--- /dev/null
@@ -0,0 +1,86 @@
+# Foreman/QuickStack Automatic Deployment README
+
+A simple bash script (deploy.sh) will provision out a Foreman/QuickStack VM Server and 4-5 other baremetal or VM nodes in an OpenStack HA + OpenDaylight environment.
+
+##Pre-Requisites
+####Baremetal:
+* At least 5 baremetal servers, with 3 interfaces minimum, all connected to separate VLANs
+* DHCP should not be running in any VLAN. Foreman will act as a DHCP server.
+* On the baremetal server that will be your JumpHost, you need to have the 3 interfaces configured with IP addresses
+* On baremetal JumpHost you will need an RPM based linux (CentOS 7 will do) with the kernel up to date (yum update kernel) + at least 2GB of RAM
+* Nodes will need to be set to PXE boot first in priority, and off the first NIC, connected to the same VLAN as NIC 1 * of your JumpHost
+* Nodes need to have BMC/OOB management via IPMI setup
+* Internet access via first (Admin) or third interface (Public)
+* No other hypervisors should be running on JumpHost
+
+####VM Nodes:
+* JumpHost with 3 interfaces, configured with IP, connected to separate VLANS
+* DHCP should not be running in any VLAN.  Foreman will act as a DHCP Server
+* On baremetal JumpHost you will need an RPM based linux (CentOS 7 will do) with the kernel up to date (yum update kernel) + at least 24GB of RAM
+* Internet access via the first (Admin) or third interface (Public)
+* No other hypervisors should be running on JumpHost
+
+##How It Works
+
+###deploy.sh:
+
+* Detects your network configuration (3 or 4 usable interfaces)
+* Modifies a “ksgen.yml” settings file and Vagrantfile with necessary network info
+* Installs Vagrant and dependencies
+* Downloads Centos7 Vagrant basebox, and issues a “vagrant up” to start the VM
+* The Vagrantfile points to bootstrap.sh as the provisioner to takeover rest of the install
+
+###bootstrap.sh:
+
+* Is initiated inside of the VM once it is up
+* Installs Khaleesi, Ansible, and Python dependencies
+* Makes a call to Khaleesi to start a playbook: opnfv.yml + “ksgen.yml” settings file
+
+###Khaleesi (Ansible):
+
+* Runs through the playbook to install Foreman/QuickStack inside of the VM
+* Configures services needed for a JumpHost: DHCP, TFTP, DNS
+* Uses info from “ksgen.yml” file to add your nodes into Foreman and set them to Build mode
+
+####Baremetal Only:
+* Issues an API call to Foreman to rebuild all nodes
+* Ansible then waits to make sure nodes come back via ssh checks
+* Ansible then waits for puppet to run on each node and complete
+
+####VM Only:
+* deploy.sh then brings up 5 more Vagrant VMs
+* Checks into Foreman and tells Foreman nodes are built
+* Configures and starts puppet on each node
+
+##Execution Instructions
+
+* On your JumpHost, clone 'git clone https://github.com/trozet/bgs_vagrant.git' to as root to /root/
+
+####Baremetal Only:
+* Edit opnvf_ksgen_settings.yml → “nodes” section:
+
+  * For each node, compute, controller1..3:
+    * mac_address - change to mac_address of that node's Admin NIC (1st NIC)
+    * bmc_ip - change to IP of BMC (out-of-band) IP
+    * bmc_mac - same as above, but MAC address
+    * bmc_user - IPMI username
+    * bmc_pass - IPMI password
+
+  * For each controller node:
+    * private_mac - change to mac_address of node's Private NIC (2nd NIC)
+
+* Execute deploy.sh via: ./deploy.sh -base_config /root/bgs_vagrant/opnfv_ksgen_settings.yml
+
+####VM Only:
+* Execute deploy.sh via: ./deploy.sh -virtual
+* Install directory for each VM will be in /tmp (for example /tmp/compute, /tmp/controller1)
+
+####Both Approaches:
+* Install directory for foreman-server is /tmp/bgs_vagrant/ - This is where vagrant will be launched from automatically
+* To access the VM you can 'cd /tmp/bgs_vagrant' and type 'vagrant ssh'
+* To access Foreman enter the IP address shown in 'cat /tmp/bgs_vagrant/opnfv_ksgen_settings.yml | grep foreman_url'
+* The user/pass by default is admin//octopus
+
+##Redeploying
+Make sure you run ./clean.sh for the baremetal deployment with your opnfv_ksgen_settings.yml file as "-base_config".  This will ensure that your nodes are turned off and that your VM is destroyed ("vagrant destroy" in the /tmp/bgs_vagrant directory).
+For VM redeployment, make sure you "vagrant destroy" in each /tmp/<node> as well if you want to redeploy.  To check and make sure no VMs are still running on your Jumphost you can use "vboxmanage list runningvms".
diff --git a/foreman/ci/Vagrantfile b/foreman/ci/Vagrantfile
new file mode 100644 (file)
index 0000000..100e12d
--- /dev/null
@@ -0,0 +1,93 @@
+# -*- mode: ruby -*-
+# vi: set ft=ruby :
+
+# All Vagrant configuration is done below. The "2" in Vagrant.configure
+# configures the configuration version (we support older styles for
+# backwards compatibility). Please don't change it unless you know what
+# you're doing.
+Vagrant.configure(2) do |config|
+  # The most common configuration options are documented and commented below.
+  # For a complete reference, please see the online documentation at
+  # https://docs.vagrantup.com.
+
+  # Every Vagrant development environment requires a box. You can search for
+  # boxes at https://atlas.hashicorp.com/search.
+  config.vm.box = "chef/centos-7.0"
+
+  # Disable automatic box update checking. If you disable this, then
+  # boxes will only be checked for updates when the user runs
+  # `vagrant box outdated`. This is not recommended.
+  # config.vm.box_check_update = false
+
+  # Create a forwarded port mapping which allows access to a specific port
+  # within the machine from a port on the host machine. In the example below,
+  # accessing "localhost:8080" will access port 80 on the guest machine.
+  # config.vm.network "forwarded_port", guest: 80, host: 8080
+  # Create a private network, which allows host-only access to the machine
+  # using a specific IP.
+  # config.vm.network "private_network", ip: "192.168.33.10"
+
+  # Create a public network, which generally matched to bridged network.
+  # Bridged networks make the machine appear as another physical device on
+  # your network.
+  # config.vm.network "public_network"
+  config.vm.network "public_network", ip: "10.4.1.2", bridge: 'eth_replace0'
+  config.vm.network "public_network", ip: "10.4.9.2", bridge: 'eth_replace1'
+  config.vm.network "public_network", ip: "10.2.84.2", bridge: 'eth_replace2'
+  config.vm.network "public_network", ip: "10.3.84.2", bridge: 'eth_replace3'
+
+  # IP address of your LAN's router
+  default_gw = ""
+  nat_flag = false
+
+  # Share an additional folder to the guest VM. The first argument is
+  # the path on the host to the actual folder. The second argument is
+  # the path on the guest to mount the folder. And the optional third
+  # argument is a set of non-required options.
+  # config.vm.synced_folder "../data", "/vagrant_data"
+
+  # Provider-specific configuration so you can fine-tune various
+  # backing providers for Vagrant. These expose provider-specific options.
+  # Example for VirtualBox:
+  #
+   config.vm.provider "virtualbox" do |vb|
+  #   # Display the VirtualBox GUI when booting the machine
+  #   vb.gui = true
+  #
+  #   # Customize the amount of memory on the VM:
+     vb.memory = 2048
+     vb.cpus = 2
+   end
+  #
+  # View the documentation for the provider you are using for more
+  # information on available options.
+
+  # Define a Vagrant Push strategy for pushing to Atlas. Other push strategies
+  # such as FTP and Heroku are also available. See the documentation at
+  # https://docs.vagrantup.com/v2/push/atlas.html for more information.
+  # config.push.define "atlas" do |push|
+  #   push.app = "YOUR_ATLAS_USERNAME/YOUR_APPLICATION_NAME"
+  # end
+
+  # Enable provisioning with a shell script. Additional provisioners such as
+  # Puppet, Chef, Ansible, Salt, and Docker are also available. Please see the
+  # documentation for more information about their specific syntax and use.
+  # config.vm.provision "shell", inline: <<-SHELL
+  #   sudo apt-get update
+  #   sudo apt-get install -y apache2
+  # SHELL
+  
+  config.ssh.username = 'root'
+  config.ssh.password = 'vagrant'
+  config.ssh.insert_key = 'true'
+  config.vm.provision "ansible" do |ansible|
+     ansible.playbook = "reload_playbook.yml"
+  end
+  config.vm.provision :shell, :inline => "mount -t vboxsf vagrant /vagrant"
+  config.vm.provision :shell, :inline => "route add default gw #{default_gw}"
+  if nat_flag
+    config.vm.provision :shell, path: "nat_setup.sh"
+  end
+  config.vm.provision :shell, path: "bootstrap.sh"
+end
diff --git a/foreman/ci/bootstrap.sh b/foreman/ci/bootstrap.sh
new file mode 100755 (executable)
index 0000000..4bc22ed
--- /dev/null
@@ -0,0 +1,55 @@
+#!/usr/bin/env bash
+
+#bootstrap script for installing/running Khaleesi in Foreman/QuickStack VM
+#author: Tim Rozet (trozet@redhat.com)
+#
+#Uses Vagrant and VirtualBox
+#VagrantFile uses bootsrap.sh which Installs Khaleesi
+#Khaleesi will install and configure Foreman/QuickStack
+#
+#Pre-requisties:
+#Target system should be Centos7
+#Ensure the host's kernel is up to date (yum update)
+
+##VARS
+reset=`tput sgr0`
+blue=`tput setaf 4`
+red=`tput setaf 1`
+green=`tput setaf 2`
+
+##END VARS
+
+
+# Install EPEL repo for access to many other yum repos
+# Major version is pinned to force some consistency for Arno
+yum install -y epel-release-7*
+
+# Install other required packages
+# Major version is pinned to force some consistency for Arno
+if ! yum -y install python-pip-1* python-virtualenv-1* gcc-4* git-1* sshpass-1* ansible-1* python-requests-1*; then
+  printf '%s\n' 'bootstrap.sh: failed to install required packages' >&2
+  exit 1
+fi
+
+cd /opt
+
+echo "Cloning khaleesi to /opt"
+
+if [ ! -d khaleesi ]; then
+  if ! git clone -b v1.0 https://github.com/trozet/khaleesi.git; then
+    printf '%s\n' 'bootstrap.sh: Unable to git clone khaleesi' >&2
+    exit 1
+  fi
+fi
+
+cd khaleesi
+
+cp ansible.cfg.example ansible.cfg
+
+echo "Completed Installing Khaleesi"
+
+cd /opt/khaleesi/
+
+ansible localhost -m setup -i local_hosts
+
+./run.sh --no-logs --use /vagrant/opnfv_ksgen_settings.yml playbooks/opnfv.yml
diff --git a/foreman/ci/build.sh b/foreman/ci/build.sh
new file mode 100755 (executable)
index 0000000..7a1ef52
--- /dev/null
@@ -0,0 +1,398 @@
+#!/bin/bash
+set -e
+##############################################################################
+# Copyright (c) 2015 Ericsson AB and others.
+# stefan.k.berg@ericsson.com
+# jonas.bjurel@ericsson.com
+# dradez@redhat.com
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+trap 'echo "Exiting ..."; \
+if [ -f ${LOCK_FILE} ]; then \
+   if [ $(cat ${LOCK_FILE}) -eq $$ ]; then \
+      rm -f ${LOCK_FILE}; \
+   fi; \
+fi;' EXIT
+
+############################################################################
+# BEGIN of usage description
+#
+usage ()
+{
+cat << EOF
+$0 Builds the Foreman OPNFV Deployment ISO
+
+usage: $0 [-s spec-file] [-c cache-URI] [-l log-file] [-f Flags] build-directory
+
+OPTIONS:
+  -s spec-file ($BUILD_SPEC), define the build-spec file, default ../build/config.mk
+  -c cache base URI ($BUILD_CACHE_URI), specifies the base URI to a build cache to be used/updated - the name is automatically generated from the md5sum of the spec-file, http://, ftp://, file://[absolute path] suported.
+
+  -l log-file ($BUILD_LOG), specifies the output log-file (stdout and stderr), if not specified logs are output to console as normal
+  -v version tag to be applied to the build result
+  -r alternative remote access method script/program. curl is default.
+  -t run small build-script unit test.
+  -T run large build-script unit test.
+  -f build flags ($BUILD_FLAGS):
+     o s: Do nothing, succeed
+     o f: Do nothing, fail
+     o t: run build unit tests
+     o i: run interactive (-t flag to docker run)
+     o P: Populate a new local cache and push it to the (-c cache-URI) cache artifactory if -c option is present, currently file://, http:// and ftp:// are supported
+     o d: Detatch - NOT YET SUPPORTED
+
+  build-directory ($BUILD_DIR), specifies the directory for the output artifacts (.iso file).
+
+  -h help, prints this help text
+
+Description:
+build.sh builds opnfv .iso artifact.
+To reduce build time it uses build cache on a local or remote location. The cache is rebuilt and uploaded if either of the below conditions are met:
+1) The P(opulate) flag is set and the -c cache-base-URI is provided, if -c is not provided the cache will stay local.
+2) If the cache is invalidated by one of the following conditions:
+   - The config spec md5sum does not compare to the md5sum for the spec which the cache was built.
+   - The git Commit-Id on the remote repos/HEAD defined in the spec file does not correspont with the Commit-Id for what the cache was built with.
+3) A valid cache does not exist on the specified -c cache-base-URI.
+
+The cache URI object name is foreman_cache-"md5sum(spec file)"
+
+Logging by default to console, but can be directed elsewhere with the -l option in which case both stdout and stderr is redirected to that destination.
+
+Built in unit testing of components is enabled by adding the t(est) flag.
+
+Return codes:
+ - 0 Success!
+ - 1-99 Unspecified build error
+ - 100-199 Build system internal error (not build it self)
+   o 101 Build system instance busy
+ - 200 Build failure
+
+Examples:
+build -c http://opnfv.org/artifactory/foreman/cache -d ~/jenkins/genesis/foreman/ci/output -f ti
+NOTE: At current the build scope is set to the git root of the repository, -d destination locations outside that scope will not work
+EOF
+}
+#
+# END of usage description
+############################################################################
+
+############################################################################
+# BEGIN of variables to customize
+#
+BUILD_BASE=$(readlink -e ../build/)
+RESULT_DIR="${BUILD_BASE}/release"
+BUILD_SPEC="${BUILD_BASE}/config.mk"
+CACHE_DIR="cache"
+LOCAL_CACHE_ARCH_NAME="foreman-cache"
+REMOTE_CACHE_ARCH_NAME="foreman_cache-$(md5sum ${BUILD_SPEC}| cut -f1 -d " ")"
+REMOTE_ACCESS_METHD=curl
+INCLUDE_DIR=../include
+#
+# END of variables to customize
+############################################################################
+
+############################################################################
+# BEGIN of script assigned variables
+#
+SCRIPT_DIR=$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )
+LOCK_FILE="${SCRIPT_DIR}/.build.lck"
+CACHE_TMP="${SCRIPT_DIR}/tmp"
+TEST_SUCCEED=0
+TEST_FAIL=0
+UNIT_TEST=0
+UPDATE_CACHE=0
+POPULATE_CACHE=0
+RECURSIV=0
+DETACH=0
+DEBUG=0
+INTEGRATION_TEST=0
+FULL_INTEGRATION_TEST=0
+INTERACTIVE=0
+BUILD_CACHE_URI=
+BUILD_SPEC=
+BUILD_DIR=
+BUILD_LOG=
+BUILD_VERSION=
+MAKE_ARGS=
+#
+# END of script assigned variables
+############################################################################
+
+############################################################################
+# BEGIN of include pragmas
+#
+source ${INCLUDE_DIR}/build.sh.debug
+#
+# END of include
+############################################################################
+
+############################################################################
+# BEGIN of main
+#
+while getopts "s:c:d:v:f:l:r:RtTh" OPTION
+do
+    case $OPTION in
+       h)
+           usage
+           rc=0
+           exit $rc
+           ;;
+
+       s)
+           BUILD_SPEC=${OPTARG}
+           ;;
+
+       c)
+           BUILD_CACHE_URI=${OPTARG}
+           ;;
+
+       d)
+           BUILD_DIR=${OPTARG}
+           ;;
+
+       l)
+           BUILD_LOG=${OPTARG}
+           ;;
+
+       v)
+           BUILD_VERSION=${OPTARG}
+           ;;
+
+       f)
+           BUILD_FLAGS=${OPTARG}
+           ;;
+
+       r)  REMOTE_ACCESS_METHD=${OPTARG}
+           ;;
+
+       R)
+           RECURSIVE=1
+           ;;
+
+       t)
+           INTEGRATION_TEST=1
+           ;;
+
+       T)
+           INTEGRATION_TEST=1
+           FULL_INTEGRATION_TEST=1
+           ;;
+
+       *)
+           echo "${OPTION} is not a valid argument"
+           rc=100
+           exit $rc
+           ;;
+    esac
+done
+
+if [ -z $BUILD_DIR ]; then
+    BUILD_DIR=$(echo $@ | cut -d ' ' -f ${OPTIND})
+fi
+
+for ((i=0; i<${#BUILD_FLAGS};i++)); do
+    case ${BUILD_FLAGS:$i:1} in
+       s)
+           rc=0
+           exit $rc
+           ;;
+
+       f)
+           rc=1
+           exit $rc
+           ;;
+
+       t)
+           UNIT_TEST=1
+           ;;
+
+       i)
+           INTERACTIVE=1
+           ;;
+
+       P)
+           POPULATE_CACHE=1
+           ;;
+
+       d)
+           DETACH=1
+           echo "Detach is not yet supported - exiting ...."
+           rc=100
+           exit $rc
+           ;;
+
+       D)
+           DEBUG=1
+           ;;
+
+       *)
+           echo "${BUILD_FLAGS:$i:1} is not a valid build flag - exiting ...."
+           rc=100
+           exit $rc
+           ;;
+    esac
+done
+
+shift $((OPTIND-1))
+
+if [ ${INTEGRATION_TEST} -eq 1 ]; then
+    integration-test
+    rc=0
+    exit $rc
+fi
+
+if [ ! -f ${BUILD_SPEC} ]; then
+    echo "spec file does not exist: $BUILD_SPEC - exiting ...."
+    rc=100
+    exit $rc
+fi
+
+if [ -z ${BUILD_DIR} ]; then
+    echo "Missing build directory - exiting ...."
+    rc=100
+    exit $rc
+fi
+
+if [ ! -z ${BUILD_LOG} ]; then
+    if [[ ${RECURSIVE} -ne 1 ]]; then
+       set +e
+       eval $0 -R $@ > ${BUILD_LOG} 2>&1
+       rc=$?
+       set -e
+       if [ $rc -ne 0]; then
+           exit $rc
+       fi
+    fi
+fi
+
+if [ ${TEST_SUCCEED} -eq 1 ]; then
+    sleep 1
+    rc=0
+    exit $rc
+fi
+
+if [ ${TEST_FAIL} -eq 1 ]; then
+    sleep 1
+    rc=1
+    exit $rc
+fi
+
+if [ -e ${LOCK_FILE} ]; then
+    echo "A build job is already running, exiting....."
+    rc=101
+    exit $rc
+fi
+
+echo $$ > ${LOCK_FILE}
+
+if [ ! -z ${BUILD_CACHE_URI} ]; then
+    if [ ${POPULATE_CACHE} -ne 1 ]; then
+       rm -rf ${CACHE_TMP}/cache
+       mkdir -p ${CACHE_TMP}/cache
+       echo "Downloading cach file ${BUILD_CACHE_URI}/${REMOTE_CACHE_ARCH_NAME} ..."
+       set +e
+       ${REMOTE_ACCESS_METHD} -o ${CACHE_TMP}/cache/${LOCAL_CACHE_ARCH_NAME}.tgz ${BUILD_CACHE_URI}/${REMOTE_CACHE_ARCH_NAME}.tgz
+       rc=$?
+       set -e
+       if [ $rc -ne 0 ]; then
+               echo "Remote cache does not exist, or is not accessible - a new cache will be built ..."
+               POPULATE_CACHE=1
+       else
+           echo "Unpacking cache file ..."
+           tar -C ${CACHE_TMP}/cache -xvf ${CACHE_TMP}/cache/${LOCAL_CACHE_ARCH_NAME}.tgz
+           cp ${CACHE_TMP}/cache/cache/.versions ${BUILD_BASE}/.
+           set +e
+                   make -C ${BUILD_BASE} validate-cache;
+           rc=$?
+           set -e
+
+           if [ $rc -ne 0 ]; then
+               echo "Cache invalid - a new cache will be built "
+               POPULATE_CACHE=1
+           else
+               cp -rf ${CACHE_TMP}/cache/cache/. ${BUILD_BASE}
+           fi
+           rm -rf ${CACHE_TMP}/cache
+       fi
+    fi
+fi
+
+if [ ${POPULATE_CACHE} -eq 1 ]; then
+    if [ ${DEBUG} -eq 0 ]; then
+       set +e
+       cd ${BUILD_BASE} && make clean
+       rc=$?
+       set -e
+       if [ $rc -ne 0 ]; then
+           echo "Build - make clean failed, exiting ..."
+           rc=100
+           exit $rc
+       fi
+    fi
+fi
+
+if [ ! -z ${BUILD_VERSION} ]; then
+    MAKE_ARGS+="REVSTATE=${BUILD_VERSION} "
+fi
+
+if [ ${UNIT_TEST} -eq 1 ]; then
+    MAKE_ARGS+="UNIT_TEST=TRUE "
+else
+    MAKE_ARGS+="UNIT_TEST=FALSE "
+fi
+
+if [ ${INTERACTIVE} -eq 1 ]; then
+    MAKE_ARGS+="INTERACTIVE=TRUE "
+else
+    MAKE_ARGS+="INTERACTIVE=FALSE "
+fi
+
+MAKE_ARGS+=all
+
+if [ ${DEBUG} -eq 0 ]; then
+    set +e
+    cd ${BUILD_BASE} && make ${MAKE_ARGS}
+    rc=$?
+    set -e
+    if [ $rc -gt 0 ]; then
+       echo "Build: make all failed, exiting ..."
+       rc=200
+       exit $rc
+    fi
+else
+debug_make
+fi
+set +e
+make -C ${BUILD_BASE} prepare-cache
+rc=$?
+set -e
+
+if [ $rc -gt 0 ]; then
+    echo "Build: make prepare-cache failed - exiting ..."
+    rc=100
+    exit $rc
+fi
+echo "Copying built OPNFV .iso file to target directory ${BUILD_DIR} ..."
+rm -rf ${BUILD_DIR}
+mkdir -p ${BUILD_DIR}
+cp ${BUILD_BASE}/.versions ${BUILD_DIR}
+cp ${RESULT_DIR}/*.iso* ${BUILD_DIR}
+
+if [ $POPULATE_CACHE -eq 1 ]; then
+    if [ ! -z ${BUILD_CACHE_URI} ]; then
+       echo "Building cache ..."
+       tar --dereference -C ${BUILD_BASE} -caf ${BUILD_BASE}/${LOCAL_CACHE_ARCH_NAME}.tgz ${CACHE_DIR}
+       echo "Uploading cache ${BUILD_CACHE_URI}/${REMOTE_CACHE_ARCH_NAME}"
+       ${REMOTE_ACCESS_METHD} -T ${BUILD_BASE}/${LOCAL_CACHE_ARCH_NAME}.tgz ${BUILD_CACHE_URI}/${REMOTE_CACHE_ARCH_NAME}.tgz
+       rm ${BUILD_BASE}/${LOCAL_CACHE_ARCH_NAME}.tgz
+    fi
+fi
+echo "Success!!!"
+exit 0
+#
+# END of main
+############################################################################
diff --git a/foreman/ci/clean.sh b/foreman/ci/clean.sh
new file mode 100755 (executable)
index 0000000..f61ac93
--- /dev/null
@@ -0,0 +1,152 @@
+#!/usr/bin/env bash
+
+#Clean script to uninstall provisioning server for Foreman/QuickStack
+#author: Tim Rozet (trozet@redhat.com)
+#
+#Uses Vagrant and VirtualBox
+#
+#Destroys Vagrant VM running in /tmp/bgs_vagrant
+#Shuts down all nodes found in Khaleesi settings
+#Removes hypervisor kernel modules (VirtualBox)
+
+##VARS
+reset=`tput sgr0`
+blue=`tput setaf 4`
+red=`tput setaf 1`
+green=`tput setaf 2`
+##END VARS
+
+##FUNCTIONS
+display_usage() {
+  echo -e "\n\n${blue}This script is used to uninstall Foreman/QuickStack Installer and Clean OPNFV Target System${reset}\n\n"
+  echo -e "\nUsage:\n$0 [arguments] \n"
+  echo -e "\n   -no_parse : No variable parsing into config. Flag. \n"
+  echo -e "\n   -base_config : Full path of ksgen settings file to parse. Required.  Will provide BMC info to shutdown hosts.  Example:  -base_config /opt/myinventory.yml \n"
+}
+
+##END FUNCTIONS
+
+if [[ ( $1 == "--help") ||  $1 == "-h" ]]; then
+    display_usage
+    exit 0
+fi
+
+echo -e "\n\n${blue}This script is used to uninstall Foreman/QuickStack Installer and Clean OPNFV Target System${reset}\n\n"
+echo "Use -h to display help"
+sleep 2
+
+while [ "`echo $1 | cut -c1`" = "-" ]
+do
+    echo $1
+    case "$1" in
+        -base_config)
+                base_config=$2
+                shift 2
+            ;;
+        *)
+                display_usage
+                exit 1
+            ;;
+esac
+done
+
+
+# Install ipmitool
+# Major version is pinned to force some consistency for Arno
+if ! yum list installed | grep -i ipmitool; then
+  if ! yum -y install ipmitool-1*; then
+    echo "${red}Unable to install ipmitool!${reset}"
+    exit 1
+  fi
+else
+  echo "${blue}Skipping ipmitool as it is already installed!${reset}"
+fi
+
+###find all the bmc IPs and number of nodes
+node_counter=0
+output=`grep bmc_ip $base_config | grep -Eo '[0-9]+.[0-9]+.[0-9]+.[0-9]+'`
+for line in ${output} ; do
+  bmc_ip[$node_counter]=$line
+  ((node_counter++))
+done
+
+max_nodes=$((node_counter-1))
+
+###find bmc_users per node
+node_counter=0
+output=`grep bmc_user $base_config | sed 's/\s*bmc_user:\s*//'`
+for line in ${output} ; do
+  bmc_user[$node_counter]=$line
+  ((node_counter++))
+done
+
+###find bmc_pass per node
+node_counter=0
+output=`grep bmc_pass $base_config | sed 's/\s*bmc_pass:\s*//'`
+for line in ${output} ; do
+  bmc_pass[$node_counter]=$line
+  ((node_counter++)) 
+done
+
+for mynode in `seq 0 $max_nodes`; do
+  echo "${blue}Node: ${bmc_ip[$mynode]} ${bmc_user[$mynode]} ${bmc_pass[$mynode]} ${reset}"
+  if ipmitool -I lanplus -P ${bmc_pass[$mynode]} -U ${bmc_user[$mynode]} -H ${bmc_ip[$mynode]} chassis power off; then
+    echo "${blue}Node: $mynode, ${bmc_ip[$mynode]} powered off!${reset}"
+  else
+    echo "${red}Error: Unable to power off $mynode, ${bmc_ip[$mynode]} ${reset}"
+    exit 1
+  fi
+done
+
+###check to see if vbox is installed
+vboxpkg=`rpm -qa | grep VirtualBox`
+if [ $? -eq 0 ]; then
+  skip_vagrant=0
+else
+  skip_vagrant=1
+fi
+
+###destroy vagrant
+if [ $skip_vagrant -eq 0 ]; then
+  cd /tmp/bgs_vagrant
+  if vagrant destroy -f; then
+    echo "${blue}Successfully destroyed Foreman VM ${reset}"
+  else
+    echo "${red}Unable to destroy Foreman VM ${reset}"
+    echo "${blue}Checking if vagrant was already destroyed and no process is active...${reset}"
+    if ps axf | grep vagrant; then
+      echo "${red}Vagrant VM still exists...exiting ${reset}"
+      exit 1
+    else
+      echo "${blue}Vagrant process doesn't exist.  Moving on... ${reset}"
+    fi
+  fi
+
+  ###kill virtualbox
+  echo "${blue}Killing VirtualBox ${reset}"
+  killall virtualbox
+  killall VBoxHeadless
+
+  ###remove virtualbox
+  echo "${blue}Removing VirtualBox ${reset}"
+  yum -y remove $vboxpkg
+
+else
+  echo "${blue}Skipping Vagrant destroy + Vbox Removal as VirtualBox package is already removed ${reset}"
+fi
+
+
+###remove kernel modules
+echo "${blue}Removing kernel modules ${reset}"
+for kernel_mod in vboxnetadp vboxnetflt vboxpci vboxdrv; do
+  if ! rmmod $kernel_mod; then
+    if rmmod $kernel_mod 2>&1 | grep -i 'not currently loaded'; then
+      echo "${blue} $kernel_mod is not currently loaded! ${reset}"
+    else
+      echo "${red}Error trying to remove Kernel Module: $kernel_mod ${reset}"
+      exit 1
+    fi
+  else
+    echo "${blue}Removed Kernel Module: $kernel_mod ${reset}"
+  fi
+done
diff --git a/foreman/ci/deploy.sh b/foreman/ci/deploy.sh
new file mode 100755 (executable)
index 0000000..86f03a7
--- /dev/null
@@ -0,0 +1,694 @@
+#!/usr/bin/env bash
+
+#Deploy script to install provisioning server for Foreman/QuickStack
+#author: Tim Rozet (trozet@redhat.com)
+#
+#Uses Vagrant and VirtualBox
+#VagrantFile uses bootsrap.sh which Installs Khaleesi
+#Khaleesi will install and configure Foreman/QuickStack
+#
+#Pre-requisties:
+#Supports 3 or 4 network interface configuration
+#Target system must be RPM based
+#Ensure the host's kernel is up to date (yum update)
+#Provisioned nodes expected to have following order of network connections (note: not all have to exist, but order is maintained):
+#eth0- admin network
+#eth1- private network (+storage network in 3 NIC config)
+#eth2- public network
+#eth3- storage network
+#script assumes /24 subnet mask
+
+##VARS
+reset=`tput sgr0`
+blue=`tput setaf 4`
+red=`tput setaf 1`
+green=`tput setaf 2`
+
+declare -A interface_arr
+##END VARS
+
+##FUNCTIONS
+display_usage() {
+  echo -e "\n\n${blue}This script is used to deploy Foreman/QuickStack Installer and Provision OPNFV Target System${reset}\n\n"
+  echo -e "\n${green}Make sure you have the latest kernel installed before running this script! (yum update kernel +reboot)${reset}\n"
+  echo -e "\nUsage:\n$0 [arguments] \n"
+  echo -e "\n   -no_parse : No variable parsing into config. Flag. \n"
+  echo -e "\n   -base_config : Full path of settings file to parse. Optional.  Will provide a new base settings file rather than the default.  Example:  -base_config /opt/myinventory.yml \n"
+  echo -e "\n   -virtual : Node virtualization instead of baremetal. Flag. \n"
+}
+
+##find ip of interface
+##params: interface name
+function find_ip {
+  ip addr show $1 | grep -Eo '^\s+inet\s+[\.0-9]+' | awk '{print $2}'
+}
+
+##finds subnet of ip and netmask
+##params: ip, netmask
+function find_subnet {
+  IFS=. read -r i1 i2 i3 i4 <<< "$1"
+  IFS=. read -r m1 m2 m3 m4 <<< "$2"
+  printf "%d.%d.%d.%d\n" "$((i1 & m1))" "$((i2 & m2))" "$((i3 & m3))" "$((i4 & m4))"
+}
+
+##increments subnet by a value
+##params: ip, value
+##assumes low value
+function increment_subnet {
+  IFS=. read -r i1 i2 i3 i4 <<< "$1"
+  printf "%d.%d.%d.%d\n" "$i1" "$i2" "$i3" "$((i4 | $2))"
+}
+
+
+##finds netmask of interface
+##params: interface
+##returns long format 255.255.x.x
+function find_netmask {
+  ifconfig $1 | grep -Eo 'netmask\s+[\.0-9]+' | awk '{print $2}'
+}
+
+##finds short netmask of interface
+##params: interface
+##returns short format, ex: /21
+function find_short_netmask {
+  echo "/$(ip addr show $1 | grep -Eo '^\s+inet\s+[\/\.0-9]+' | awk '{print $2}' | cut -d / -f2)"
+}
+
+##increments next IP
+##params: ip
+##assumes a /24 subnet
+function next_ip {
+  baseaddr="$(echo $1 | cut -d. -f1-3)"
+  lsv="$(echo $1 | cut -d. -f4)"
+  if [ "$lsv" -ge 254 ]; then
+    return 1
+  fi
+  ((lsv++))
+  echo $baseaddr.$lsv
+}
+
+##removes the network interface config from Vagrantfile
+##params: interface
+##assumes you are in the directory of Vagrantfile
+function remove_vagrant_network {
+  sed -i 's/^.*'"$1"'.*$//' Vagrantfile
+}
+
+##check if IP is in use
+##params: ip
+##ping ip to get arp entry, then check arp
+function is_ip_used {
+  ping -c 5 $1 > /dev/null 2>&1
+  arp -n | grep "$1 " | grep -iv incomplete > /dev/null 2>&1
+}
+
+##find next usable IP
+##params: ip
+function next_usable_ip {
+  new_ip=$(next_ip $1)
+  while [ "$new_ip" ]; do
+    if ! is_ip_used $new_ip; then
+      echo $new_ip
+      return 0
+    fi
+    new_ip=$(next_ip $new_ip)
+  done
+  return 1
+}
+
+##increment ip by value
+##params: ip, amount to increment by
+##increment_ip $next_private_ip 10
+function increment_ip {
+  baseaddr="$(echo $1 | cut -d. -f1-3)"
+  lsv="$(echo $1 | cut -d. -f4)"
+  incrval=$2
+  lsv=$((lsv+incrval))
+  if [ "$lsv" -ge 254 ]; then
+    return 1
+  fi
+  echo $baseaddr.$lsv
+}
+
+##translates yaml into variables
+##params: filename, prefix (ex. "config_")
+##usage: parse_yaml opnfv_ksgen_settings.yml "config_"
+parse_yaml() {
+   local prefix=$2
+   local s='[[:space:]]*' w='[a-zA-Z0-9_]*' fs=$(echo @|tr @ '\034')
+   sed -ne "s|^\($s\)\($w\)$s:$s\"\(.*\)\"$s\$|\1$fs\2$fs\3|p" \
+        -e "s|^\($s\)\($w\)$s:$s\(.*\)$s\$|\1$fs\2$fs\3|p"  $1 |
+   awk -F$fs '{
+      indent = length($1)/2;
+      vname[indent] = $2;
+      for (i in vname) {if (i > indent) {delete vname[i]}}
+      if (length($3) > 0) {
+         vn=""; for (i=0; i<indent; i++) {vn=(vn)(vname[i])("_")}
+         printf("%s%s%s=\"%s\"\n", "'$prefix'",vn, $2, $3);
+      }
+   }'
+}
+
+##END FUNCTIONS
+
+if [[ ( $1 == "--help") ||  $1 == "-h" ]]; then
+    display_usage
+    exit 0
+fi
+
+echo -e "\n\n${blue}This script is used to deploy Foreman/QuickStack Installer and Provision OPNFV Target System${reset}\n\n"
+echo "Use -h to display help"
+sleep 2
+
+while [ "`echo $1 | cut -c1`" = "-" ]
+do
+    echo $1
+    case "$1" in
+        -base_config)
+                base_config=$2
+                shift 2
+            ;;
+        -no_parse)
+                no_parse="TRUE"
+                shift 1
+            ;;
+        -virtual)
+                virtual="TRUE"
+                shift 1
+            ;;
+        *)
+                display_usage
+                exit 1
+            ;;
+esac
+done
+
+##disable selinux
+/sbin/setenforce 0
+
+# Install EPEL repo for access to many other yum repos
+# Major version is pinned to force some consistency for Arno
+yum install -y epel-release-7*
+
+# Install other required packages
+# Major versions are pinned to force some consistency for Arno
+if ! yum install -y binutils-2* gcc-4* make-3* patch-2* libgomp-4* glibc-headers-2* glibc-devel-2* kernel-headers-3* kernel-devel-3* dkms-2* psmisc-22*; then
+  printf '%s\n' 'deploy.sh: Unable to install depdency packages' >&2
+  exit 1
+fi
+
+##install VirtualBox repo
+if cat /etc/*release | grep -i "Fedora release"; then
+  vboxurl=http://download.virtualbox.org/virtualbox/rpm/fedora/\$releasever/\$basearch
+else
+  vboxurl=http://download.virtualbox.org/virtualbox/rpm/el/\$releasever/\$basearch
+fi
+
+cat > /etc/yum.repos.d/virtualbox.repo << EOM
+[virtualbox]
+name=Oracle Linux / RHEL / CentOS-\$releasever / \$basearch - VirtualBox
+baseurl=$vboxurl
+enabled=1
+gpgcheck=1
+gpgkey=https://www.virtualbox.org/download/oracle_vbox.asc
+skip_if_unavailable = 1
+keepcache = 0
+EOM
+
+##install VirtualBox
+if ! yum list installed | grep -i virtualbox; then
+  if ! yum -y install VirtualBox-4.3; then
+    printf '%s\n' 'deploy.sh: Unable to install virtualbox package' >&2
+    exit 1
+  fi
+fi
+
+##install kmod-VirtualBox
+if ! lsmod | grep vboxdrv; then
+  if ! sudo /etc/init.d/vboxdrv setup; then
+    printf '%s\n' 'deploy.sh: Unable to install kernel module for virtualbox' >&2
+    exit 1
+  fi
+else
+  printf '%s\n' 'deploy.sh: Skipping kernel module for virtualbox.  Already Installed'
+fi
+
+##install Ansible
+if ! yum list installed | grep -i ansible; then
+  if ! yum -y install ansible-1*; then
+    printf '%s\n' 'deploy.sh: Unable to install Ansible package' >&2
+    exit 1
+  fi
+fi
+
+##install Vagrant
+if ! rpm -qa | grep vagrant; then
+  if ! rpm -Uvh https://dl.bintray.com/mitchellh/vagrant/vagrant_1.7.2_x86_64.rpm; then
+    printf '%s\n' 'deploy.sh: Unable to install vagrant package' >&2
+    exit 1
+  fi
+else
+  printf '%s\n' 'deploy.sh: Skipping Vagrant install as it is already installed.'
+fi
+
+##add centos 7 box to vagrant
+if ! vagrant box list | grep chef/centos-7.0; then
+  if ! vagrant box add chef/centos-7.0 --provider virtualbox; then
+    printf '%s\n' 'deploy.sh: Unable to download centos7 box for Vagrant' >&2
+    exit 1
+  fi
+else
+  printf '%s\n' 'deploy.sh: Skipping Vagrant box add as centos-7.0 is already installed.'
+fi
+
+##install workaround for centos7
+if ! vagrant plugin list | grep vagrant-centos7_fix; then
+  if ! vagrant plugin install vagrant-centos7_fix; then
+    printf '%s\n' 'deploy.sh: Warning: unable to install vagrant centos7 workaround' >&2
+  fi
+else
+  printf '%s\n' 'deploy.sh: Skipping Vagrant plugin as centos7 workaround is already installed.'
+fi
+
+cd /tmp/
+
+##remove bgs vagrant incase it wasn't cleaned up
+rm -rf /tmp/bgs_vagrant
+
+##clone bgs vagrant
+##will change this to be opnfv repo when commit is done
+if ! git clone -b v1.0 https://github.com/trozet/bgs_vagrant.git; then
+  printf '%s\n' 'deploy.sh: Unable to clone vagrant repo' >&2
+  exit 1
+fi
+
+cd bgs_vagrant
+
+echo "${blue}Detecting network configuration...${reset}"
+##detect host 1 or 3 interface configuration
+#output=`ip link show | grep -E "^[0-9]" | grep -Ev ": lo|tun|virbr|vboxnet" | awk '{print $2}' | sed 's/://'`
+output=`ifconfig | grep -E "^[a-zA-Z0-9]+:"| grep -Ev "lo|tun|virbr|vboxnet" | awk '{print $1}' | sed 's/://'`
+
+if [ ! "$output" ]; then
+  printf '%s\n' 'deploy.sh: Unable to detect interfaces to bridge to' >&2
+  exit 1
+fi
+
+##find number of interfaces with ip and substitute in VagrantFile
+if_counter=0
+for interface in ${output}; do
+
+  if [ "$if_counter" -ge 4 ]; then
+    break
+  fi
+  interface_ip=$(find_ip $interface)
+  if [ ! "$interface_ip" ]; then
+    continue
+  fi
+  new_ip=$(next_usable_ip $interface_ip)
+  if [ ! "$new_ip" ]; then
+    continue
+  fi
+  interface_arr[$interface]=$if_counter
+  interface_ip_arr[$if_counter]=$new_ip
+  subnet_mask=$(find_netmask $interface)
+  if [ "$if_counter" -eq 1 ]; then
+    private_subnet_mask=$subnet_mask
+    private_short_subnet_mask=$(find_short_netmask $interface)
+  fi
+  if [ "$if_counter" -eq 2 ]; then
+    public_subnet_mask=$subnet_mask
+    public_short_subnet_mask=$(find_short_netmask $interface)
+  fi
+  if [ "$if_counter" -eq 3 ]; then
+    storage_subnet_mask=$subnet_mask
+  fi
+  sed -i 's/^.*eth_replace'"$if_counter"'.*$/  config.vm.network "public_network", ip: '\""$new_ip"\"', bridge: '\'"$interface"\'', netmask: '\""$subnet_mask"\"'/' Vagrantfile
+  ((if_counter++))
+done
+
+##now remove interface config in Vagrantfile for 1 node
+##if 1, 3, or 4 interfaces set deployment type
+##if 2 interfaces remove 2nd interface and set deployment type
+if [ "$if_counter" == 1 ]; then
+  deployment_type="single_network"
+  remove_vagrant_network eth_replace1
+  remove_vagrant_network eth_replace2
+  remove_vagrant_network eth_replace3
+elif [ "$if_counter" == 2 ]; then
+  deployment_type="single_network"
+  second_interface=`echo $output | awk '{print $2}'`
+  remove_vagrant_network $second_interface
+  remove_vagrant_network eth_replace2
+elif [ "$if_counter" == 3 ]; then
+  deployment_type="three_network"
+  remove_vagrant_network eth_replace3
+else
+  deployment_type="multi_network"
+fi
+
+echo "${blue}Network detected: ${deployment_type}! ${reset}"
+
+if route | grep default; then
+  echo "${blue}Default Gateway Detected ${reset}"
+  host_default_gw=$(ip route | grep default | awk '{print $3}')
+  echo "${blue}Default Gateway: $host_default_gw ${reset}"
+  default_gw_interface=$(ip route get $host_default_gw | awk '{print $3}')
+  case "${interface_arr[$default_gw_interface]}" in
+           0)
+             echo "${blue}Default Gateway Detected on Admin Interface!${reset}"
+             sed -i 's/^.*default_gw =.*$/  default_gw = '\""$host_default_gw"\"'/' Vagrantfile
+             node_default_gw=$host_default_gw
+             ;;
+           1)
+             echo "${red}Default Gateway Detected on Private Interface!${reset}"
+             echo "${red}Private subnet should be private and not have Internet access!${reset}"
+             exit 1
+             ;;
+           2)
+             echo "${blue}Default Gateway Detected on Public Interface!${reset}"
+             sed -i 's/^.*default_gw =.*$/  default_gw = '\""$host_default_gw"\"'/' Vagrantfile
+             echo "${blue}Will setup NAT from Admin -> Public Network on VM!${reset}"
+             sed -i 's/^.*nat_flag =.*$/  nat_flag = true/' Vagrantfile
+             echo "${blue}Setting node gateway to be VM Admin IP${reset}"
+             node_default_gw=${interface_ip_arr[0]}
+             public_gateway=$default_gw
+             ;;
+           3)
+             echo "${red}Default Gateway Detected on Storage Interface!${reset}"
+             echo "${red}Storage subnet should be private and not have Internet access!${reset}"
+             exit 1
+             ;;
+           *)
+             echo "${red}Unable to determine which interface default gateway is on..Exiting!${reset}"
+             exit 1
+             ;;
+  esac
+else
+  #assumes 24 bit mask
+  defaultgw=`echo ${interface_ip_arr[0]} | cut -d. -f1-3`
+  firstip=.1
+  defaultgw=$defaultgw$firstip
+  echo "${blue}Unable to find default gateway.  Assuming it is $defaultgw ${reset}"
+  sed -i 's/^.*default_gw =.*$/  default_gw = '\""$defaultgw"\"'/' Vagrantfile
+  node_default_gw=$defaultgw
+fi
+
+if [ $base_config ]; then
+  if ! cp -f $base_config opnfv_ksgen_settings.yml; then
+    echo "{red}ERROR: Unable to copy $base_config to opnfv_ksgen_settings.yml${reset}"
+    exit 1
+  fi
+fi
+
+if [ $no_parse ]; then
+echo "${blue}Skipping parsing variables into settings file as no_parse flag is set${reset}"
+
+else
+
+echo "${blue}Gathering network parameters for Target System...this may take a few minutes${reset}"
+##Edit the ksgen settings appropriately
+##ksgen settings will be stored in /vagrant on the vagrant machine
+##if single node deployment all the variables will have the same ip
+##interface names will be enp0s3, enp0s8, enp0s9 in chef/centos7
+
+sed -i 's/^.*default_gw:.*$/default_gw:'" $node_default_gw"'/' opnfv_ksgen_settings.yml
+
+##replace private interface parameter
+##private interface will be of hosts, so we need to know the provisioned host interface name
+##we add biosdevname=0, net.ifnames=0 to the kickstart to use regular interface naming convention on hosts
+##replace IP for parameters with next IP that will be given to controller
+if [ "$deployment_type" == "single_network" ]; then
+  ##we also need to assign IP addresses to nodes
+  ##for single node, foreman is managing the single network, so we can't reserve them
+  ##not supporting single network anymore for now
+  echo "{blue}Single Network type is unsupported right now.  Please check your interface configuration.  Exiting. ${reset}"
+  exit 0
+
+elif [[ "$deployment_type" == "multi_network" || "$deployment_type" == "three_network" ]]; then
+
+  if [ "$deployment_type" == "three_network" ]; then
+    sed -i 's/^.*network_type:.*$/network_type: three_network/' opnfv_ksgen_settings.yml
+  fi
+
+  sed -i 's/^.*deployment_type:.*$/  deployment_type: '"$deployment_type"'/' opnfv_ksgen_settings.yml
+
+  ##get ip addresses for private network on controllers to make dhcp entries
+  ##required for controllers_ip_array global param
+  next_private_ip=${interface_ip_arr[1]}
+  type=_private
+  for node in controller1 controller2 controller3; do
+    next_private_ip=$(next_usable_ip $next_private_ip)
+    if [ ! "$next_private_ip" ]; then
+       printf '%s\n' 'deploy.sh: Unable to find next ip for private network for control nodes' >&2
+       exit 1
+    fi
+    sed -i 's/'"$node$type"'/'"$next_private_ip"'/g' opnfv_ksgen_settings.yml
+    controller_ip_array=$controller_ip_array$next_private_ip,
+  done
+
+  ##replace global param for contollers_ip_array
+  controller_ip_array=${controller_ip_array%?}
+  sed -i 's/^.*controllers_ip_array:.*$/  controllers_ip_array: '"$controller_ip_array"'/' opnfv_ksgen_settings.yml
+
+  ##now replace all the VIP variables.  admin//private can be the same IP
+  ##we have to use IP's here that won't be allocated to hosts at provisioning time
+  ##therefore we increment the ip by 10 to make sure we have a safe buffer
+  next_private_ip=$(increment_ip $next_private_ip 10)
+
+  grep -E '*private_vip|loadbalancer_vip|db_vip|amqp_vip|*admin_vip' opnfv_ksgen_settings.yml | while read -r line ; do
+    sed -i 's/^.*'"$line"'.*$/  '"$line $next_private_ip"'/' opnfv_ksgen_settings.yml
+    next_private_ip=$(next_usable_ip $next_private_ip)
+    if [ ! "$next_private_ip" ]; then
+       printf '%s\n' 'deploy.sh: Unable to find next ip for private network for vip replacement' >&2
+       exit 1
+    fi
+  done
+
+  ##replace foreman site
+  next_public_ip=${interface_ip_arr[2]}
+  sed -i 's/^.*foreman_url:.*$/  foreman_url:'" https:\/\/$next_public_ip"'\/api\/v2\//' opnfv_ksgen_settings.yml
+  ##replace public vips
+  next_public_ip=$(increment_ip $next_public_ip 10)
+  grep -E '*public_vip' opnfv_ksgen_settings.yml | while read -r line ; do
+    sed -i 's/^.*'"$line"'.*$/  '"$line $next_public_ip"'/' opnfv_ksgen_settings.yml
+    next_public_ip=$(next_usable_ip $next_public_ip)
+    if [ ! "$next_public_ip" ]; then
+       printf '%s\n' 'deploy.sh: Unable to find next ip for public network for vip replcement' >&2
+       exit 1
+    fi
+  done
+
+  ##replace public_network param
+  public_subnet=$(find_subnet $next_public_ip $public_subnet_mask)
+  sed -i 's/^.*public_network:.*$/  public_network:'" $public_subnet"'/' opnfv_ksgen_settings.yml
+  ##replace private_network param
+  private_subnet=$(find_subnet $next_private_ip $private_subnet_mask)
+  sed -i 's/^.*private_network:.*$/  private_network:'" $private_subnet"'/' opnfv_ksgen_settings.yml
+  ##replace storage_network
+  if [ "$deployment_type" == "three_network" ]; then
+    sed -i 's/^.*storage_network:.*$/  storage_network:'" $private_subnet"'/' opnfv_ksgen_settings.yml
+  else
+    next_storage_ip=${interface_ip_arr[3]}
+    storage_subnet=$(find_subnet $next_storage_ip $storage_subnet_mask)
+    sed -i 's/^.*storage_network:.*$/  storage_network:'" $storage_subnet"'/' opnfv_ksgen_settings.yml
+  fi
+
+  ##replace public_subnet param
+  public_subnet=$public_subnet'\'$public_short_subnet_mask
+  sed -i 's/^.*public_subnet:.*$/  public_subnet:'" $public_subnet"'/' opnfv_ksgen_settings.yml
+  ##replace private_subnet param
+  private_subnet=$private_subnet'\'$private_short_subnet_mask
+  sed -i 's/^.*private_subnet:.*$/  private_subnet:'" $private_subnet"'/' opnfv_ksgen_settings.yml
+
+  ##replace public_dns param to be foreman server
+  sed -i 's/^.*public_dns:.*$/  public_dns: '${interface_ip_arr[2]}'/' opnfv_ksgen_settings.yml
+
+  ##replace public_gateway
+  if [ -z "$public_gateway" ]; then
+    ##if unset then we assume its the first IP in the public subnet
+    public_subnet=$(find_subnet $next_public_ip $public_subnet_mask)
+    public_gateway=$(increment_subnet $public_subnet 1)
+  fi
+  sed -i 's/^.*public_gateway:.*$/  public_gateway:'" $public_gateway"'/' opnfv_ksgen_settings.yml
+
+  ##we have to define an allocation range of the public subnet to give
+  ##to neutron to use as floating IPs
+  ##we should control this subnet, so this range should work .150-200
+  ##but generally this is a bad idea and we are assuming at least a /24 subnet here
+  public_subnet=$(find_subnet $next_public_ip $public_subnet_mask)
+  public_allocation_start=$(increment_subnet $public_subnet 150)
+  public_allocation_end=$(increment_subnet $public_subnet 200)
+
+  sed -i 's/^.*public_allocation_start:.*$/  public_allocation_start:'" $public_allocation_start"'/' opnfv_ksgen_settings.yml
+  sed -i 's/^.*public_allocation_end:.*$/  public_allocation_end:'" $public_allocation_end"'/' opnfv_ksgen_settings.yml
+
+else
+  printf '%s\n' 'deploy.sh: Unknown network type: $deployment_type' >&2
+  exit 1
+fi
+
+echo "${blue}Parameters Complete.  Settings have been set for Foreman. ${reset}"
+
+fi
+
+if [ $virtual ]; then
+  echo "${blue} Virtual flag detected, setting Khaleesi playbook to be opnfv-vm.yml ${reset}"
+  sed -i 's/opnfv.yml/opnfv-vm.yml/' bootstrap.sh
+fi
+
+echo "${blue}Starting Vagrant! ${reset}"
+
+##stand up vagrant
+if ! vagrant up; then
+  printf '%s\n' 'deploy.sh: Unable to start vagrant' >&2
+  exit 1
+else
+  echo "${blue}Foreman VM is up! ${reset}"
+fi
+
+if [ $virtual ]; then
+
+##Bring up VM nodes
+echo "${blue}Setting VMs up... ${reset}"
+nodes=`sed -nr '/nodes:/{:start /workaround/!{N;b start};//p}' opnfv_ksgen_settings.yml | sed -n '/^  [A-Za-z0-9]\+:$/p' | sed 's/\s*//g' | sed 's/://g'`
+##due to ODL Helium bug of OVS connecting to ODL too early, we need controllers to install first
+##this is fix kind of assumes more than I would like to, but for now it should be OK as we always have
+##3 static controllers
+compute_nodes=`echo $nodes | tr " " "\n" | grep -v controller | tr "\n" " "`
+controller_nodes=`echo $nodes | tr " " "\n" | grep controller | tr "\n" " "`
+nodes=${controller_nodes}${compute_nodes}
+
+for node in ${nodes}; do
+  cd /tmp
+
+  ##remove VM nodes incase it wasn't cleaned up
+  rm -rf /tmp/$node
+
+  ##clone bgs vagrant
+  ##will change this to be opnfv repo when commit is done
+  if ! git clone -b v1.0 https://github.com/trozet/bgs_vagrant.git $node; then
+    printf '%s\n' 'deploy.sh: Unable to clone vagrant repo' >&2
+    exit 1
+  fi
+
+  cd $node
+
+  if [ $base_config ]; then
+    if ! cp -f $base_config opnfv_ksgen_settings.yml; then
+      echo "{red}ERROR: Unable to copy $base_config to opnfv_ksgen_settings.yml${reset}"
+      exit 1
+    fi
+  fi
+
+  ##parse yaml into variables
+  eval $(parse_yaml opnfv_ksgen_settings.yml "config_")
+  ##find node type
+  node_type=config_nodes_${node}_type
+  node_type=$(eval echo \$$node_type)
+
+  ##find number of interfaces with ip and substitute in VagrantFile
+  output=`ifconfig | grep -E "^[a-zA-Z0-9]+:"| grep -Ev "lo|tun|virbr|vboxnet" | awk '{print $1}' | sed 's/://'`
+
+  if [ ! "$output" ]; then
+    printf '%s\n' 'deploy.sh: Unable to detect interfaces to bridge to' >&2
+    exit 1
+  fi
+
+
+  if_counter=0
+  for interface in ${output}; do
+
+    if [ "$if_counter" -ge 4 ]; then
+      break
+    fi
+    interface_ip=$(find_ip $interface)
+    if [ ! "$interface_ip" ]; then
+      continue
+    fi
+    case "${if_counter}" in
+           0)
+             mac_string=config_nodes_${node}_mac_address
+             mac_addr=$(eval echo \$$mac_string)
+             mac_addr=$(echo $mac_addr | sed 's/:\|-//g')
+             if [ $mac_addr == "" ]; then
+                 echo "${red} Unable to find mac_address for $node! ${reset}"
+                 exit 1
+             fi
+             ;;
+           1)
+             if [ "$node_type" == "controller" ]; then
+               mac_string=config_nodes_${node}_private_mac
+               mac_addr=$(eval echo \$$mac_string)
+               if [ $mac_addr == "" ]; then
+                 echo "${red} Unable to find private_mac for $node! ${reset}"
+                 exit 1
+               fi
+             else
+               ##generate random mac
+               mac_addr=$(echo -n 00-60-2F; dd bs=1 count=3 if=/dev/random 2>/dev/null |hexdump -v -e '/1 "-%02X"')
+             fi
+             mac_addr=$(echo $mac_addr | sed 's/:\|-//g')
+             ;;
+           *)
+             mac_addr=$(echo -n 00-60-2F; dd bs=1 count=3 if=/dev/random 2>/dev/null |hexdump -v -e '/1 "-%02X"')
+             mac_addr=$(echo $mac_addr | sed 's/:\|-//g')
+             ;;
+    esac
+    sed -i 's/^.*eth_replace'"$if_counter"'.*$/  config.vm.network "public_network", bridge: '\'"$interface"\'', :mac => '\""$mac_addr"\"'/' Vagrantfile
+    ((if_counter++))
+  done
+
+  ##now remove interface config in Vagrantfile for 1 node
+  ##if 1, 3, or 4 interfaces set deployment type
+  ##if 2 interfaces remove 2nd interface and set deployment type
+  if [ "$if_counter" == 1 ]; then
+    deployment_type="single_network"
+    remove_vagrant_network eth_replace1
+    remove_vagrant_network eth_replace2
+    remove_vagrant_network eth_replace3
+  elif [ "$if_counter" == 2 ]; then
+    deployment_type="single_network"
+    second_interface=`echo $output | awk '{print $2}'`
+    remove_vagrant_network $second_interface
+    remove_vagrant_network eth_replace2
+  elif [ "$if_counter" == 3 ]; then
+    deployment_type="three_network"
+    remove_vagrant_network eth_replace3
+  else
+    deployment_type="multi_network"
+  fi
+
+  ##modify provisioning to do puppet install, config, and foreman check-in
+  ##substitute host_name and dns_server in the provisioning script
+  host_string=config_nodes_${node}_hostname
+  host_name=$(eval echo \$$host_string)
+  sed -i 's/^host_name=REPLACE/host_name='$host_name'/' vm_nodes_provision.sh
+  ##dns server should be the foreman server
+  sed -i 's/^dns_server=REPLACE/dns_server='${interface_ip_arr[0]}'/' vm_nodes_provision.sh
+
+  ## remove bootstrap and NAT provisioning
+  sed -i '/nat_setup.sh/d' Vagrantfile
+  sed -i 's/bootstrap.sh/vm_nodes_provision.sh/' Vagrantfile
+
+  ## modify default_gw to be node_default_gw
+  sed -i 's/^.*default_gw =.*$/  default_gw = '\""$node_default_gw"\"'/' Vagrantfile
+
+  ## modify VM memory to be 4gig
+  sed -i 's/^.*vb.memory =.*$/     vb.memory = 4096/' Vagrantfile
+
+  echo "${blue}Starting Vagrant Node $node! ${reset}"
+
+  ##stand up vagrant
+  if ! vagrant up; then
+    echo "${red} Unable to start $node ${reset}"
+    exit 1
+  else
+    echo "${blue} $node VM is up! ${reset}"
+  fi
+
+done
+
+ echo "${blue} All VMs are UP! ${reset}"
+
+fi
diff --git a/foreman/ci/inventory/lf_pod2_ksgen_settings.yml b/foreman/ci/inventory/lf_pod2_ksgen_settings.yml
new file mode 100644 (file)
index 0000000..72935c9
--- /dev/null
@@ -0,0 +1,357 @@
+global_params:
+  admin_email: opnfv@opnfv.com
+  ha_flag: "true"
+  odl_flag: "true"
+  private_network:
+  storage_network:
+  controllers_hostnames_array: oscontroller1,oscontroller2,oscontroller3
+  controllers_ip_array:
+  amqp_vip:
+  private_subnet:
+  cinder_admin_vip:
+  cinder_private_vip:
+  cinder_public_vip:
+  db_vip:
+  glance_admin_vip:
+  glance_private_vip:
+  glance_public_vip:
+  heat_admin_vip:
+  heat_private_vip:
+  heat_public_vip:
+  heat_cfn_admin_vip:
+  heat_cfn_private_vip:
+  heat_cfn_public_vip:
+  horizon_admin_vip:
+  horizon_private_vip:
+  horizon_public_vip:
+  keystone_admin_vip:
+  keystone_private_vip:
+  keystone_public_vip:
+  loadbalancer_vip:
+  neutron_admin_vip:
+  neutron_private_vip:
+  neutron_public_vip:
+  nova_admin_vip:
+  nova_private_vip:
+  nova_public_vip:
+  external_network_flag: "true"
+  public_gateway:
+  public_dns:
+  public_network:
+  public_subnet:
+  public_allocation_start:
+  public_allocation_end:
+  deployment_type:
+network_type: multi_network
+default_gw:
+foreman:
+  seed_values:
+    - { name: heat_cfn, oldvalue: true, newvalue: false }
+workaround_puppet_version_lock: false
+opm_branch: master
+installer:
+  name: puppet
+  short_name: pupt
+  network:
+    auto_assign_floating_ip: false
+    variant:
+      short_name: m2vx
+    plugin:
+      name: neutron
+workaround_openstack_packstack_rpm: false
+tempest:
+  repo:
+    Fedora:
+      '19': http://REPLACE_ME/~REPLACE_ME/openstack-tempest-icehouse/fedora-19/
+      '20': http://REPLACE_ME/~REPLACE_ME/openstack-tempest-icehouse/fedora-20/
+    RedHat:
+       '7.0': https://repos.fedorapeople.org/repos/openstack/openstack-juno/epel-7/
+  use_virtual_env: false
+  public_allocation_end: 10.2.84.71
+  skip:
+    files: null
+    tests: null
+  public_allocation_start: 10.2.84.51
+  physnet: physnet1
+  use_custom_repo: false
+  public_subnet_cidr: 10.2.84.0/24
+  public_subnet_gateway: 10.2.84.1
+  additional_default_settings:
+  - section: compute
+    option: flavor_ref
+    value: 1
+  cirros_image_file: cirros-0.3.1-x86_64-disk.img
+  setup_method: tempest/rpm
+  test_name: all
+  rdo:
+     version: juno
+     rpm: http://repos.fedorapeople.org/repos/openstack/openstack-juno/rdo-release-juno-1.noarch.rpm
+  rpm:
+    version: 20141201
+  dir: ~{{ nodes.tempest.remote_user }}/tempest-dir
+tmp:
+  node_prefix: '{{ node.prefix | reject("none") | join("-") }}-'
+  anchors:
+  - https://repos.fedorapeople.org/repos/openstack/openstack-juno/rdo-release-juno-1.noarch.rpm
+  - http://repos.fedorapeople.org/repos/openstack/openstack-juno/
+opm_repo: https://github.com/redhat-openstack/openstack-puppet-modules.git
+workaround_vif_plugging: false
+openstack_packstack_rpm: http://REPLACE_ME/brewroot/packages/openstack-puppet-modules/2013.2/9.el6ost/noarch/openstack-puppet-modules-2013.2-9.el6ost.noarch.rpm
+nodes:
+  compute1:
+    name: oscompute11.opnfv.com
+    hostname: oscompute11.opnfv.com
+    short_name: oscompute11
+    type: compute
+    host_type: baremetal
+    hostgroup: Compute
+    mac_address: "00:25:b5:a0:00:5e"
+    bmc_ip: 172.30.8.74
+    bmc_mac: "74:a2:e6:a4:14:9c"
+    bmc_user: admin
+    bmc_pass: octopus
+    ansible_ssh_pass: "Op3nStack"
+    admin_password: ""
+    groups:
+    - compute
+    - foreman_nodes
+    - puppet
+    - rdo
+    - neutron
+  compute2:
+    name: oscompute12.opnfv.com
+    hostname: oscompute12.opnfv.com
+    short_name: oscompute12
+    type: compute
+    host_type: baremetal
+    hostgroup: Compute
+    mac_address: "00:25:b5:a0:00:3e"
+    bmc_ip: 172.30.8.73
+    bmc_mac: "a8:9d:21:a0:15:9c"
+    bmc_user: admin
+    bmc_pass: octopus
+    ansible_ssh_pass: "Op3nStack"
+    admin_password: ""
+    groups:
+    - compute
+    - foreman_nodes
+    - puppet
+    - rdo
+    - neutron
+  controller1:
+    name: oscontroller1.opnfv.com
+    hostname: oscontroller1.opnfv.com
+    short_name: oscontroller1
+    type: controller
+    host_type: baremetal
+    hostgroup: Controller_Network_ODL
+    mac_address: "00:25:b5:a0:00:af"
+    bmc_ip: 172.30.8.66
+    bmc_mac: "a8:9d:21:c9:8b:56"
+    bmc_user: admin
+    bmc_pass: octopus
+    private_ip: controller1_private
+    private_mac: "00:25:b5:b0:00:1f"
+    ansible_ssh_pass: "Op3nStack"
+    admin_password: "octopus"
+    groups:
+    - controller
+    - foreman_nodes
+    - puppet
+    - rdo
+    - neutron
+  controller2:
+    name: oscontroller2.opnfv.com
+    hostname: oscontroller2.opnfv.com
+    short_name: oscontroller2
+    type: controller
+    host_type: baremetal
+    hostgroup: Controller_Network
+    mac_address: "00:25:b5:a0:00:9e"
+    bmc_ip: 172.30.8.75
+    bmc_mac: "a8:9d:21:c9:4d:26"
+    bmc_user: admin
+    bmc_pass: octopus
+    private_ip: controller2_private
+    private_mac: "00:25:b5:b0:00:de"
+    ansible_ssh_pass: "Op3nStack"
+    admin_password: "octopus"
+    groups:
+    - controller
+    - foreman_nodes
+    - puppet
+    - rdo
+    - neutron
+  controller3:
+    name: oscontroller3.opnfv.com
+    hostname: oscontroller3.opnfv.com
+    short_name: oscontroller3
+    type: controller
+    host_type: baremetal
+    hostgroup: Controller_Network
+    mac_address: "00:25:b5:a0:00:7e"
+    bmc_ip: 172.30.8.65
+    bmc_mac: "a8:9d:21:c9:3a:92"
+    bmc_user: admin
+    bmc_pass: octopus
+    private_ip: controller3_private
+    private_mac: "00:25:b5:b0:00:be"
+    ansible_ssh_pass: "Op3nStack"
+    admin_password: "octopus"
+    groups:
+    - controller
+    - foreman_nodes
+    - puppet
+    - rdo
+    - neutron
+workaround_mysql_centos7: true
+distro:
+  name: centos
+  centos:
+    '7.0':
+      repos: []
+  short_name: c
+  short_version: 70
+  version: '7.0'
+  rhel:
+    '7.0':
+      kickstart_url: http://REPLACE_ME/released/RHEL-7/7.0/Server/x86_64/os/
+      repos:
+      - section: rhel7-server-rpms
+        name: Packages for RHEL 7 - $basearch
+        baseurl: http://REPLACE_ME/rel-eng/repos/rhel-7.0/x86_64/
+        gpgcheck: 0
+      - section: rhel-7-server-update-rpms
+        name: Update Packages for Enterprise Linux 7 - $basearch
+        baseurl: http://REPLACE_ME/rel-eng/repos/rhel-7.0-z/x86_64/
+        gpgcheck: 0
+      - section: rhel-7-server-optional-rpms
+        name: Optional Packages for Enterprise Linux 7 - $basearch
+        baseurl: http://REPLACE_ME/released/RHEL-7/7.0/Server-optional/x86_64/os/
+        gpgcheck: 0
+      - section: rhel-7-server-extras-rpms
+        name: Optional Packages for Enterprise Linux 7 - $basearch
+        baseurl: http://REPLACE_ME/rel-eng/EXTRAS-7.0-RHEL-7-20140610.0/compose/Server/x86_64/os/
+        gpgcheck: 0
+    '6.5':
+      kickstart_url: http://REPLACE_ME/released/RHEL-6/6.5/Server/x86_64/os/
+      repos:
+      - section: rhel6.5-server-rpms
+        name: Packages for RHEL 6.5 - $basearch
+        baseurl: http://REPLACE_ME.REPLACE_ME/released/RHEL-6/6.5/Server/$basearch/os/Server
+        gpgcheck: 0
+      - section: rhel-6.5-server-update-rpms
+        name: Update Packages for Enterprise Linux 6.5 - $basearch
+        baseurl: http://REPLACE_ME.REPLACE_ME/rel-eng/repos/RHEL-6.5-Z/$basearch/
+        gpgcheck: 0
+      - section: rhel-6.5-server-optional-rpms
+        name: Optional Packages for Enterprise Linux 6.5 - $basearch
+        baseurl: http://REPLACE_ME.REPLACE_ME/released/RHEL-6/6.5/Server/optional/$basearch/os
+        gpgcheck: 0
+      - section: rhel6.5-server-rpms-32bit
+        name: Packages for RHEL 6.5 - i386
+        baseurl: http://REPLACE_ME.REPLACE_ME/released/RHEL-6/6.5/Server/i386/os/Server
+        gpgcheck: 0
+        enabled: 1
+      - section: rhel-6.5-server-update-rpms-32bit
+        name: Update Packages for Enterprise Linux 6.5 - i686
+        baseurl: http://REPLACE_ME.REPLACE_ME/rel-eng/repos/RHEL-6.5-Z/i686/
+        gpgcheck: 0
+        enabled: 1
+      - section: rhel-6.5-server-optional-rpms-32bit
+        name: Optional Packages for Enterprise Linux 6.5 - i386
+        baseurl: http://REPLACE_ME.REPLACE_ME/released/RHEL-6/6.5/Server/optional/i386/os
+        gpgcheck: 0
+        enabled: 1
+    subscription:
+      username: REPLACE_ME
+      password: HWj8TE28Qi0eP2c
+      pool: 8a85f9823e3d5e43013e3ddd4e2a0977
+  config:
+    selinux: permissive
+    ntp_server: 0.pool.ntp.org
+    dns_servers:
+    - 10.4.1.1
+    - 10.4.0.2
+    reboot_delay: 1
+    initial_boot_timeout: 180
+node:
+  prefix:
+  - rdo
+  - pupt
+  - ffqiotcxz1
+  - null
+product:
+  repo_type: production
+  name: rdo
+  short_name: rdo
+  rpm:
+    CentOS: https://repos.fedorapeople.org/repos/openstack/openstack-juno/rdo-release-juno-1.noarch.rpm
+    Fedora: https://repos.fedorapeople.org/repos/openstack/openstack-juno/rdo-release-juno-1.noarch.rpm
+    RedHat: https://repos.fedorapeople.org/repos/openstack/openstack-juno/rdo-release-juno-1.noarch.rpm
+  short_version: ju
+  repo:
+    production:
+      CentOS:
+        7.0.1406: http://repos.fedorapeople.org/repos/openstack/openstack-juno/epel-7
+        '6.5': http://repos.fedorapeople.org/repos/openstack/openstack-juno/epel-6
+        '7.0': http://repos.fedorapeople.org/repos/openstack/openstack-juno/epel-7
+      Fedora:
+        '20': http://repos.fedorapeople.org/repos/openstack/openstack-juno/fedora-20
+        '21': http://repos.fedorapeople.org/repos/openstack/openstack-juno/fedora-21
+      RedHat:
+        '6.6': http://repos.fedorapeople.org/repos/openstack/openstack-juno/epel-6
+        '6.5': http://repos.fedorapeople.org/repos/openstack/openstack-juno/epel-6
+        '7.0': http://repos.fedorapeople.org/repos/openstack/openstack-juno/epel-7
+  version: juno
+  config:
+    enable_epel: y
+  short_repo: prod
+tester:
+  name: tempest
+distro_reboot_options: '--no-wall '' Reboot is triggered by Ansible'' '
+job:
+  verbosity: 1
+  archive:
+  - '{{ tempest.dir }}/etc/tempest.conf'
+  - '{{ tempest.dir }}/etc/tempest.conf.sample'
+  - '{{ tempest.dir }}/*.log'
+  - '{{ tempest.dir }}/*.xml'
+  - /root/
+  - /var/log/
+  - /etc/nova
+  - /etc/ceilometer
+  - /etc/cinder
+  - /etc/glance
+  - /etc/keystone
+  - /etc/neutron
+  - /etc/ntp
+  - /etc/puppet
+  - /etc/qpid
+  - /etc/qpidd.conf
+  - /root
+  - /etc/yum.repos.d
+  - /etc/yum.repos.d
+topology:
+  name: multinode
+  short_name: mt
+workaround_neutron_ovs_udev_loop: true
+workaround_glance_table_utf8: false
+verbosity:
+  debug: 0
+  info: 1
+  warning: 2
+  warn: 2
+  errors: 3
+provisioner:
+  username: admin
+  network:
+    type: nova
+    name: external
+  skip: skip_provision
+  foreman_url: https://10.2.84.2/api/v2/
+  password: octopus
+  type: foreman
+workaround_nova_compute_fix: false
+workarounds:
+  enabled: true
diff --git a/foreman/ci/nat_setup.sh b/foreman/ci/nat_setup.sh
new file mode 100755 (executable)
index 0000000..349e416
--- /dev/null
@@ -0,0 +1,44 @@
+#!/usr/bin/env bash
+
+#NAT setup script to setup NAT from Admin -> Public interface
+#on a Vagrant VM
+#Called by Vagrantfile in conjunction with deploy.sh
+#author: Tim Rozet (trozet@redhat.com)
+#
+#Uses Vagrant and VirtualBox
+#VagrantFile uses nat_setup.sh which sets up NAT
+#
+
+##make sure firewalld is stopped and disabled
+if ! systemctl stop firewalld; then
+  printf '%s\n' 'nat_setup.sh: Unable to stop firewalld' >&2
+  exit 1
+fi
+
+systemctl disable firewalld
+
+# Install iptables
+# Major version is pinned to force some consistency for Arno
+if ! yum -y install iptables-services-1*; then
+  printf '%s\n' 'nat_setup.sh: Unable to install iptables-services' >&2
+  exit 1
+fi
+
+##start and enable iptables service
+if ! systemctl start iptables; then
+  printf '%s\n' 'nat_setup.sh: Unable to start iptables-services' >&2
+  exit 1
+fi
+
+systemctl enable iptables
+
+##enable IP forwarding
+echo 1 > /proc/sys/net/ipv4/ip_forward
+
+##Configure iptables
+/sbin/iptables -t nat -I POSTROUTING -o enp0s10 -j MASQUERADE
+/sbin/iptables -I FORWARD 1 -i enp0s10 -o enp0s8 -m state --state RELATED,ESTABLISHED -j ACCEPT
+/sbin/iptables -I FORWARD 1 -i enp0s8 -o enp0s10 -j ACCEPT
+/sbin/iptables -I INPUT 1 -j ACCEPT
+/sbin/iptables -I OUTPUT 1 -j ACCEPT
+
diff --git a/foreman/ci/opnfv_ksgen_settings.yml b/foreman/ci/opnfv_ksgen_settings.yml
new file mode 100644 (file)
index 0000000..21840dd
--- /dev/null
@@ -0,0 +1,338 @@
+global_params:
+  admin_email: opnfv@opnfv.com
+  ha_flag: "true"
+  odl_flag: "true"
+  private_network:
+  storage_network:
+  controllers_hostnames_array: oscontroller1,oscontroller2,oscontroller3
+  controllers_ip_array:
+  amqp_vip:
+  private_subnet:
+  cinder_admin_vip:
+  cinder_private_vip:
+  cinder_public_vip:
+  db_vip:
+  glance_admin_vip:
+  glance_private_vip:
+  glance_public_vip:
+  heat_admin_vip:
+  heat_private_vip:
+  heat_public_vip:
+  heat_cfn_admin_vip:
+  heat_cfn_private_vip:
+  heat_cfn_public_vip:
+  horizon_admin_vip:
+  horizon_private_vip:
+  horizon_public_vip:
+  keystone_admin_vip:
+  keystone_private_vip:
+  keystone_public_vip:
+  loadbalancer_vip:
+  neutron_admin_vip:
+  neutron_private_vip:
+  neutron_public_vip:
+  nova_admin_vip:
+  nova_private_vip:
+  nova_public_vip:
+  external_network_flag: "true"
+  public_gateway:
+  public_dns:
+  public_network:
+  public_subnet:
+  public_allocation_start:
+  public_allocation_end:
+  deployment_type:
+network_type: multi_network
+default_gw:
+foreman:
+  seed_values:
+    - { name: heat_cfn, oldvalue: true, newvalue: false }
+workaround_puppet_version_lock: false
+opm_branch: master
+installer:
+  name: puppet
+  short_name: pupt
+  network:
+    auto_assign_floating_ip: false
+    variant:
+      short_name: m2vx
+    plugin:
+      name: neutron
+workaround_openstack_packstack_rpm: false
+tempest:
+  repo:
+    Fedora:
+      '19': http://REPLACE_ME/~REPLACE_ME/openstack-tempest-icehouse/fedora-19/
+      '20': http://REPLACE_ME/~REPLACE_ME/openstack-tempest-icehouse/fedora-20/
+    RedHat:
+       '7.0': https://repos.fedorapeople.org/repos/openstack/openstack-juno/epel-7/
+  use_virtual_env: false
+  public_allocation_end: 10.2.84.71
+  skip:
+    files: null
+    tests: null
+  public_allocation_start: 10.2.84.51
+  physnet: physnet1
+  use_custom_repo: false
+  public_subnet_cidr: 10.2.84.0/24
+  public_subnet_gateway: 10.2.84.1
+  additional_default_settings:
+  - section: compute
+    option: flavor_ref
+    value: 1
+  cirros_image_file: cirros-0.3.1-x86_64-disk.img
+  setup_method: tempest/rpm
+  test_name: all
+  rdo:
+     version: juno
+     rpm: http://repos.fedorapeople.org/repos/openstack/openstack-juno/rdo-release-juno-1.noarch.rpm
+  rpm:
+    version: 20141201
+  dir: ~{{ nodes.tempest.remote_user }}/tempest-dir
+tmp:
+  node_prefix: '{{ node.prefix | reject("none") | join("-") }}-'
+  anchors:
+  - https://repos.fedorapeople.org/repos/openstack/openstack-juno/rdo-release-juno-1.noarch.rpm
+  - http://repos.fedorapeople.org/repos/openstack/openstack-juno/
+opm_repo: https://github.com/redhat-openstack/openstack-puppet-modules.git
+workaround_vif_plugging: false
+openstack_packstack_rpm: http://REPLACE_ME/brewroot/packages/openstack-puppet-modules/2013.2/9.el6ost/noarch/openstack-puppet-modules-2013.2-9.el6ost.noarch.rpm
+nodes:
+  compute:
+    name: oscompute11.opnfv.com
+    hostname: oscompute11.opnfv.com
+    short_name: oscompute11
+    type: compute
+    host_type: baremetal
+    hostgroup: Compute
+    mac_address: "10:23:45:67:89:AB"
+    bmc_ip: 10.4.17.2
+    bmc_mac: "10:23:45:67:88:AB"
+    bmc_user: root
+    bmc_pass: root
+    ansible_ssh_pass: "Op3nStack"
+    admin_password: ""
+    groups:
+    - compute
+    - foreman_nodes
+    - puppet
+    - rdo
+    - neutron
+  controller1:
+    name: oscontroller1.opnfv.com
+    hostname: oscontroller1.opnfv.com
+    short_name: oscontroller1
+    type: controller
+    host_type: baremetal
+    hostgroup: Controller_Network_ODL
+    mac_address: "10:23:45:67:89:AC"
+    bmc_ip: 10.4.17.3
+    bmc_mac: "10:23:45:67:88:AC"
+    bmc_user: root
+    bmc_pass: root
+    private_ip: controller1_private
+    private_mac: "10:23:45:67:87:AC"
+    ansible_ssh_pass: "Op3nStack"
+    admin_password: "octopus"
+    groups:
+    - controller
+    - foreman_nodes
+    - puppet
+    - rdo
+    - neutron
+  controller2:
+    name: oscontroller2.opnfv.com
+    hostname: oscontroller2.opnfv.com
+    short_name: oscontroller2
+    type: controller
+    host_type: baremetal
+    hostgroup: Controller_Network
+    mac_address: "10:23:45:67:89:AD"
+    bmc_ip: 10.4.17.4
+    bmc_mac: "10:23:45:67:88:AD"
+    bmc_user: root
+    bmc_pass: root
+    private_ip: controller2_private
+    private_mac: "10:23:45:67:87:AD"
+    ansible_ssh_pass: "Op3nStack"
+    admin_password: "octopus"
+    groups:
+    - controller
+    - foreman_nodes
+    - puppet
+    - rdo
+    - neutron
+  controller3:
+    name: oscontroller3.opnfv.com
+    hostname: oscontroller3.opnfv.com
+    short_name: oscontroller3
+    type: controller
+    host_type: baremetal
+    hostgroup: Controller_Network
+    mac_address: "10:23:45:67:89:AE"
+    bmc_ip: 10.4.17.5
+    bmc_mac: "10:23:45:67:88:AE"
+    bmc_user: root
+    bmc_pass: root
+    private_ip: controller3_private
+    private_mac: "10:23:45:67:87:AE"
+    ansible_ssh_pass: "Op3nStack"
+    admin_password: "octopus"
+    groups:
+    - controller
+    - foreman_nodes
+    - puppet
+    - rdo
+    - neutron
+workaround_mysql_centos7: true
+distro:
+  name: centos
+  centos:
+    '7.0':
+      repos: []
+  short_name: c
+  short_version: 70
+  version: '7.0'
+  rhel:
+    '7.0':
+      kickstart_url: http://REPLACE_ME/released/RHEL-7/7.0/Server/x86_64/os/
+      repos:
+      - section: rhel7-server-rpms
+        name: Packages for RHEL 7 - $basearch
+        baseurl: http://REPLACE_ME/rel-eng/repos/rhel-7.0/x86_64/
+        gpgcheck: 0
+      - section: rhel-7-server-update-rpms
+        name: Update Packages for Enterprise Linux 7 - $basearch
+        baseurl: http://REPLACE_ME/rel-eng/repos/rhel-7.0-z/x86_64/
+        gpgcheck: 0
+      - section: rhel-7-server-optional-rpms
+        name: Optional Packages for Enterprise Linux 7 - $basearch
+        baseurl: http://REPLACE_ME/released/RHEL-7/7.0/Server-optional/x86_64/os/
+        gpgcheck: 0
+      - section: rhel-7-server-extras-rpms
+        name: Optional Packages for Enterprise Linux 7 - $basearch
+        baseurl: http://REPLACE_ME/rel-eng/EXTRAS-7.0-RHEL-7-20140610.0/compose/Server/x86_64/os/
+        gpgcheck: 0
+    '6.5':
+      kickstart_url: http://REPLACE_ME/released/RHEL-6/6.5/Server/x86_64/os/
+      repos:
+      - section: rhel6.5-server-rpms
+        name: Packages for RHEL 6.5 - $basearch
+        baseurl: http://REPLACE_ME.REPLACE_ME/released/RHEL-6/6.5/Server/$basearch/os/Server
+        gpgcheck: 0
+      - section: rhel-6.5-server-update-rpms
+        name: Update Packages for Enterprise Linux 6.5 - $basearch
+        baseurl: http://REPLACE_ME.REPLACE_ME/rel-eng/repos/RHEL-6.5-Z/$basearch/
+        gpgcheck: 0
+      - section: rhel-6.5-server-optional-rpms
+        name: Optional Packages for Enterprise Linux 6.5 - $basearch
+        baseurl: http://REPLACE_ME.REPLACE_ME/released/RHEL-6/6.5/Server/optional/$basearch/os
+        gpgcheck: 0
+      - section: rhel6.5-server-rpms-32bit
+        name: Packages for RHEL 6.5 - i386
+        baseurl: http://REPLACE_ME.REPLACE_ME/released/RHEL-6/6.5/Server/i386/os/Server
+        gpgcheck: 0
+        enabled: 1
+      - section: rhel-6.5-server-update-rpms-32bit
+        name: Update Packages for Enterprise Linux 6.5 - i686
+        baseurl: http://REPLACE_ME.REPLACE_ME/rel-eng/repos/RHEL-6.5-Z/i686/
+        gpgcheck: 0
+        enabled: 1
+      - section: rhel-6.5-server-optional-rpms-32bit
+        name: Optional Packages for Enterprise Linux 6.5 - i386
+        baseurl: http://REPLACE_ME.REPLACE_ME/released/RHEL-6/6.5/Server/optional/i386/os
+        gpgcheck: 0
+        enabled: 1
+    subscription:
+      username: REPLACE_ME
+      password: HWj8TE28Qi0eP2c
+      pool: 8a85f9823e3d5e43013e3ddd4e2a0977
+  config:
+    selinux: permissive
+    ntp_server: 0.pool.ntp.org
+    dns_servers:
+    - 10.4.1.1
+    - 10.4.0.2
+    reboot_delay: 1
+    initial_boot_timeout: 180
+node:
+  prefix:
+  - rdo
+  - pupt
+  - ffqiotcxz1
+  - null
+product:
+  repo_type: production
+  name: rdo
+  short_name: rdo
+  rpm:
+    CentOS: https://repos.fedorapeople.org/repos/openstack/openstack-juno/rdo-release-juno-1.noarch.rpm
+    Fedora: https://repos.fedorapeople.org/repos/openstack/openstack-juno/rdo-release-juno-1.noarch.rpm
+    RedHat: https://repos.fedorapeople.org/repos/openstack/openstack-juno/rdo-release-juno-1.noarch.rpm
+  short_version: ju
+  repo:
+    production:
+      CentOS:
+        7.0.1406: http://repos.fedorapeople.org/repos/openstack/openstack-juno/epel-7
+        '6.5': http://repos.fedorapeople.org/repos/openstack/openstack-juno/epel-6
+        '7.0': http://repos.fedorapeople.org/repos/openstack/openstack-juno/epel-7
+      Fedora:
+        '20': http://repos.fedorapeople.org/repos/openstack/openstack-juno/fedora-20
+        '21': http://repos.fedorapeople.org/repos/openstack/openstack-juno/fedora-21
+      RedHat:
+        '6.6': http://repos.fedorapeople.org/repos/openstack/openstack-juno/epel-6
+        '6.5': http://repos.fedorapeople.org/repos/openstack/openstack-juno/epel-6
+        '7.0': http://repos.fedorapeople.org/repos/openstack/openstack-juno/epel-7
+  version: juno
+  config:
+    enable_epel: y
+  short_repo: prod
+tester:
+  name: tempest
+distro_reboot_options: '--no-wall '' Reboot is triggered by Ansible'' '
+job:
+  verbosity: 1
+  archive:
+  - '{{ tempest.dir }}/etc/tempest.conf'
+  - '{{ tempest.dir }}/etc/tempest.conf.sample'
+  - '{{ tempest.dir }}/*.log'
+  - '{{ tempest.dir }}/*.xml'
+  - /root/
+  - /var/log/
+  - /etc/nova
+  - /etc/ceilometer
+  - /etc/cinder
+  - /etc/glance
+  - /etc/keystone
+  - /etc/neutron
+  - /etc/ntp
+  - /etc/puppet
+  - /etc/qpid
+  - /etc/qpidd.conf
+  - /root
+  - /etc/yum.repos.d
+  - /etc/yum.repos.d
+topology:
+  name: multinode
+  short_name: mt
+workaround_neutron_ovs_udev_loop: true
+workaround_glance_table_utf8: false
+verbosity:
+  debug: 0
+  info: 1
+  warning: 2
+  warn: 2
+  errors: 3
+provisioner:
+  username: admin
+  network:
+    type: nova
+    name: external
+  skip: skip_provision
+  foreman_url: https://10.2.84.2/api/v2/
+  password: octopus
+  type: foreman
+workaround_nova_compute_fix: false
+workarounds:
+  enabled: true
+
diff --git a/foreman/ci/reload_playbook.yml b/foreman/ci/reload_playbook.yml
new file mode 100644 (file)
index 0000000..9e3d053
--- /dev/null
@@ -0,0 +1,16 @@
+---
+- hosts: all
+  tasks:
+    - name: restart machine
+      shell: sleep 2 && shutdown -r now "Ansible updates triggered"
+      async: 1
+      poll: 0
+      ignore_errors: true
+
+    - name: waiting for server to come back
+      local_action: wait_for host="{{ ansible_ssh_host }}"
+                    port="{{ ansible_ssh_port }}"
+                    state=started
+                    delay=60
+                    timeout=180
+      sudo: false
diff --git a/foreman/ci/vm_nodes_provision.sh b/foreman/ci/vm_nodes_provision.sh
new file mode 100755 (executable)
index 0000000..d0bba64
--- /dev/null
@@ -0,0 +1,91 @@
+#!/usr/bin/env bash
+
+#bootstrap script for VM OPNFV nodes
+#author: Tim Rozet (trozet@redhat.com)
+#
+#Uses Vagrant and VirtualBox
+#VagrantFile uses vm_nodes_provision.sh which configures linux on nodes
+#Depends on Foreman being up to be able to register and apply puppet
+#
+#Pre-requisties:
+#Target system should be Centos7 Vagrant VM
+
+##VARS
+reset=`tput sgr0`
+blue=`tput setaf 4`
+red=`tput setaf 1`
+green=`tput setaf 2`
+
+host_name=REPLACE
+dns_server=REPLACE
+##END VARS
+
+##set hostname
+echo "${blue} Setting Hostname ${reset}"
+hostnamectl set-hostname $host_name
+
+##remove NAT DNS
+echo "${blue} Removing DNS server on first interface ${reset}"
+if ! grep 'PEERDNS=no' /etc/sysconfig/network-scripts/ifcfg-enp0s3; then
+  echo "PEERDNS=no" >> /etc/sysconfig/network-scripts/ifcfg-enp0s3
+  systemctl restart NetworkManager
+fi
+
+if ! ping www.google.com -c 5; then 
+  echo "${red} No internet connection, check your route and DNS setup ${reset}"
+  exit 1
+fi
+
+# Install EPEL repo for access to many other yum repos
+# Major version is pinned to force some consistency for Arno
+yum install -y epel-release-7*
+
+# Update device-mapper-libs, needed for libvirtd on compute nodes
+# Major version is pinned to force some consistency for Arno
+if ! yum -y upgrade device-mapper-libs-1*; then
+   echo "${red} WARN: Unable to upgrade device-mapper-libs...nova-compute may not function ${reset}"
+fi
+
+# Install other required packages
+# Major version is pinned to force some consistency for Arno
+echo "${blue} Installing Puppet ${reset}"
+if ! yum install -y puppet-3*; then
+  printf '%s\n' 'vm_nodes_provision.sh: failed to install required packages' >&2
+  exit 1
+fi
+
+echo "${blue} Configuring puppet ${reset}"
+cat > /etc/puppet/puppet.conf << EOF
+
+[main]
+vardir = /var/lib/puppet
+logdir = /var/log/puppet
+rundir = /var/run/puppet
+ssldir = \$vardir/ssl
+
+[agent]
+pluginsync      = true
+report          = true
+ignoreschedules = true
+daemon          = false
+ca_server       = foreman-server.opnfv.com
+certname        = $host_name
+environment     = production
+server          = foreman-server.opnfv.com
+runinterval     = 600
+
+EOF
+
+# Setup puppet to run on system reboot
+/sbin/chkconfig --level 345 puppet on
+
+/usr/bin/puppet agent --config /etc/puppet/puppet.conf -o --tags no_such_tag --server foreman-server.opnfv.com --no-daemonize
+
+sync
+
+# Inform the build system that we are done.
+echo "Informing Foreman that we are built"
+wget -q -O /dev/null --no-check-certificate http://foreman-server.opnfv.com:80/unattended/built
+
+echo "Starting puppet"
+systemctl start puppet
diff --git a/foreman/docs/src/installation-instructions.rst b/foreman/docs/src/installation-instructions.rst
new file mode 100644 (file)
index 0000000..2ac872d
--- /dev/null
@@ -0,0 +1,371 @@
+=======================================================================================================
+OPNFV Installation instructions for the Arno release of OPNFV when using Foreman as a deployment tool
+=======================================================================================================
+
+
+.. contents:: Table of Contents
+   :backlinks: none
+
+
+Abstract
+========
+
+This document describes how to install the Arno release of OPNFV when using Foreman/Quickstack as a deployment tool covering it's limitations, dependencies and required system resources.
+
+License
+=======
+Arno release of OPNFV when using Foreman as a deployment tool Docs (c) by Tim Rozet (RedHat)
+
+Arno release of OPNFV when using Foreman as a deployment tool Docs are licensed under a Creative Commons Attribution 4.0 International License. You should have received a copy of the license along with this. If not, see <http://creativecommons.org/licenses/by/4.0/>.
+
+Version history
+===================
+
++--------------------+--------------------+--------------------+--------------------+
+| **Date**           | **Ver.**           | **Author**         | **Comment**        |
+|                    |                    |                    |                    |
++--------------------+--------------------+--------------------+--------------------+
+| 2015-05-07         | 0.0.1              | Tim Rozet          | First draft        |
+|                    |                    | (RedHat)           |                    |
++--------------------+--------------------+--------------------+--------------------+
+| 2015-05-27         | 0.0.2              | Christopher Price  | Minor changes &    |
+|                    |                    | (Ericsson AB)      | formatting         |
++--------------------+--------------------+--------------------+--------------------+
+| 2015-06-02         | 0.0.3              | Christopher Price  | Minor changes &    |
+|                    |                    | (Ericsson AB)      | formatting         |
++--------------------+--------------------+--------------------+--------------------+
+| 2015-06-03         | 0.0.4              | Ildiko Vancsa      | Minor changes      |
+|                    |                    | (Ericsson)         |                    |
++--------------------+--------------------+--------------------+--------------------+
+
+
+Introduction
+============
+
+This document describes the steps to install an OPNFV Arno reference platform, as defined by the Bootstrap/Getting-Started (BGS) Project using the Foreman/QuickStack installer.
+
+The audience is assumed to have a good background in networking and Linux administration.
+
+Preface
+=======
+
+Foreman/QuickStack uses the Foreman Open Source project as a server management tool, which in turn manages and executes Genesis/QuickStack.  Genesis/QuickStack consists of layers of Puppet modules that are capable of provisioning the OPNFV Target System (3 controllers, n number of compute nodes).
+
+The Genesis repo contains the necessary tools to get install and deploy an OPNFV target system using Foreman/QuickStack.  These tools consist of the Foreman/QuickStack bootable ISO (``arno.2015.1.0.foreman.iso``), and the automatic deployment script (``deploy.sh``).
+
+An OPNFV install requires a "Jumphost" in order to operate.  The bootable ISO will allow you to install a customized CentOS 7 release to the Jumphost, which then gives you the required packages needed to run ``deploy.sh``.  If you already have a Jumphost with CentOS 7 installed, you may choose to ignore the ISO step and instead move directly to running ``deploy.sh``.  In this case, ``deploy.sh`` will install the necessary packages for you in order to execute.
+
+``deploy.sh`` installs Foreman/QuickStack VM server using Vagrant with VirtualBox as its provider.  This VM is then used to provision the OPNFV target system (3 controllers, n compute nodes).  These nodes can be either virtual or bare metal. This guide contains instructions for installing both.
+
+Setup Requirements
+==================
+
+Jumphost Requirements
+---------------------
+
+The Jumphost requirements are outlined below:
+
+1.     CentOS 7 (from ISO or self-installed).
+
+2.     Root access.
+
+3.     libvirt or other hypervisors disabled (no kernel modules loaded).
+
+4.     3-4 NICs, untagged (no 802.1Q tagging), with IP addresses.
+
+5.     Internet access for downloading packages, with a default gateway configured.
+
+6.     4 GB of RAM for a bare metal deployment, 24 GB of RAM for a VM deployment.
+
+Network Requirements
+--------------------
+
+Network requirements include:
+
+1.     No DHCP or TFTP server running on networks used by OPNFV.
+
+2.     3-4 separate VLANs (untagged) with connectivity between Jumphost and nodes (bare metal deployment only).  These make up the admin, private, public and optional storage networks.
+
+3.     Lights out OOB network access from Jumphost with IPMI node enabled (bare metal deployment only).
+
+4.     Admin or public network has Internet access, meaning a gateway and DNS availability.
+
+*Note: Storage network will be consolidated to the private network if only 3 networks are used.*
+
+Bare Metal Node Requirements
+----------------------------
+
+Bare metal nodes require:
+
+1.     IPMI enabled on OOB interface for power control.
+
+2.     BIOS boot priority should be PXE first then local hard disk.
+
+3.     BIOS PXE interface should include admin network mentioned above.
+
+Execution Requirements (Bare Metal Only)
+----------------------------------------
+
+In order to execute a deployment, one must gather the following information:
+
+1.     IPMI IP addresses for the nodes.
+
+2.     IPMI login information for the nodes (user/pass).
+
+3.     MAC address of admin interfaces on nodes.
+
+4.     MAC address of private interfaces on 3 nodes that will be controllers.
+
+
+Installation High-Level Overview - Bare Metal Deployment
+========================================================
+
+The setup presumes that you have 6 bare metal servers and have already setup connectivity on at least 3 interfaces for all servers via a TOR switch or other network implementation.
+
+The physical TOR switches are **not** automatically configured from the OPNFV reference platform. All the networks involved in the OPNFV infrastructure as well as the provider networks and the private tenant VLANs needs to be manually configured.
+
+The Jumphost can be installed using the bootable ISO.  The Jumphost should then be configured with an IP gateway on its admin or public interface and configured with a working DNS server.  The Jumphost should also have routable access to the lights out network.
+
+``deploy.sh`` is then executed in order to install the Foreman/QuickStack Vagrant VM.  ``deploy.sh`` uses a configuration file with YAML format in order to know how to install and provision the OPNFV target system.  The information gathered under section `Execution Requirements (Bare Metal Only)`_ is put into this configuration file.
+
+``deploy.sh`` brings up a CentOS 7 Vagrant VM, provided by VirtualBox.  The VM then executes an Ansible project called Khaleesi in order to install Foreman and QuickStack.  Once the Foreman/QuickStack VM is up, Foreman will be configured with the nodes' information.  This includes MAC address, IPMI, OpenStack type (controller, compute, OpenDaylight controller) and other information.  At this point Khaleesi makes a REST API call to Foreman to instruct it to provision the hardware.
+
+Foreman will then reboot the nodes via IPMI.  The nodes should already be set to PXE boot first off the admin interface.  Foreman will then allow the nodes to PXE and install CentOS 7 as well as Puppet.  Foreman/QuickStack VM server runs a Puppet Master and the nodes query this master to get their appropriate OPNFV configuration.  The nodes will then reboot one more time and once back up, will DHCP on their private, public and storage NICs to gain IP addresses.  The nodes will now check in via Puppet and start installing OPNFV.
+
+Khaleesi will wait until these nodes are fully provisioned and then return a success or failure based on the outcome of the Puppet application.
+
+Installation High-Level Overview - VM Deployment
+================================================
+
+The VM nodes deployment operates almost the same way as the bare metal deployment with a few differences.  ``deploy.sh`` still installs Foreman/QuickStack VM the exact same way, however the part of the Khaleesi Ansible playbook which IPMI reboots/PXE boots the servers is ignored.  Instead, ``deploy.sh`` brings up N number more Vagrant VMs (where N is 3 control nodes + n compute).  These VMs already come up with CentOS 7 so instead of re-provisioning the entire VM, ``deploy.sh`` initiates a small Bash script that will signal to Foreman that those nodes are built and install/configure Puppet on them.
+
+To Foreman these nodes look like they have just built and register the same way as bare metal nodes.
+
+Installation Guide - Bare Metal Deployment
+==========================================
+
+This section goes step-by-step on how to correctly install and provision the OPNFV target system to bare metal nodes.
+
+Install Bare Metal Jumphost
+---------------------------
+
+1.  If your Jumphost does not have CentOS 7 already on it, or you would like to do a fresh install, then download the Foreman/QuickStack bootable ISO <http://artifacts.opnfv.org/arno.2015.1.0/foreman/arno.2015.1.0.foreman.iso> here.
+
+2.  Boot the ISO off of a USB or other installation media and walk through installing OPNFV CentOS 7.
+
+3.  After OS is installed login to your Jumphost as root.
+
+4.  Configure IP addresses on 3-4 interfaces that you have selected as your admin, private, public, and storage (optional) networks.
+
+5.  Configure the IP gateway to the Internet either, preferably on the public interface.
+
+6.  Configure your ``/etc/resolv.conf`` to point to a DNS server (8.8.8.8 is provided by Google).
+
+7.  Disable selinux:
+
+    - ``setenforce 0``
+    - ``sed -i 's/SELINUX=.*/SELINUX=permissive/' /etc/selinux/config``
+
+8.  Disable firewalld:
+
+    - ``systemctl stop firewalld``
+    - ``systemctl disable firewalld``
+
+Creating an Inventory File
+--------------------------
+
+You now need to take the MAC address/IPMI info gathered in section `Execution Requirements (Bare Metal Only)`_ and create the YAML inventory (also known as configuration) file for ``deploy.sh``.
+
+1.  Copy the ``opnfv_ksgen_settings.yml`` file from ``/root/bgs_vagrant/`` to another directory and rename it to be what you want EX: ``/root/my_ksgen_settings.yml``
+
+2.  Edit the file in your favorite editor.  There is a lot of information in this file, but you really only need to be concerned with the "nodes:" dictionary.
+
+3.  The nodes dictionary contains each bare metal host you want to deploy.  You can have 1 or more compute nodes and must have 3 controller nodes (these are already defined for you).  It is optional at this point to add more compute nodes into the dictionary.  You must use a different name, hostname, short_name and dictionary keyname for each node.
+
+4.  Once you have decided on your node definitions you now need to modify the MAC address/IPMI info dependent on your hardware.  Edit the following values for each node:
+
+    - ``mac_address``: change to MAC address of that node's admin NIC (defaults to 1st NIC)
+    - ``bmc_ip``: change to IP Address of BMC (out-of-band)/IPMI IP
+    - ``bmc_mac``: same as above, but MAC address
+    - ``bmc_user``: IPMI username
+    - ``bmc_pass``: IPMI password
+
+5.  Also edit the following for only controller nodes:
+
+    - ``private_mac`` - change to MAC address of node's private NIC (default to 2nd NIC)
+
+6.  Save your changes.
+
+Running ``deploy.sh``
+---------------------
+
+You are now ready to deploy OPNFV!  ``deploy.sh`` will use your ``/tmp/`` directory to store its Vagrant VMs.  Your Foreman/QuickStack Vagrant VM will be running out of ``/tmp/bgs_vagrant``.
+
+It is also recommended that you power off your nodes before running ``deploy.sh``  If there are DHCP servers or other network services that are on those nodes it may conflict with the installation.
+
+Follow the steps below to execute:
+
+1.  ``cd /root/bgs_vagrant``
+
+2.  ``./deploy.sh -base_config </root/my_ksgen_settings.yml>``
+
+3.  It will take about 20-25 minutes to install Foreman/QuickStack VM.  If something goes wrong during this part of the process, it is most likely a problem with the setup of your Jumphost.  You will also notice different outputs in your shell.  When you see messages that say "TASK:" or "PLAY:" this is Khalessi running and installing Foreman/QuickStack inside of your VM or deploying your nodes.  Look for "PLAY [Deploy Nodes]" as a sign that Foreman/QuickStack is finished installing and now your nodes are being rebuilt.
+
+4.  Your nodes will take 40-60 minutes to re-install CentOS 7 and install/configure OPNFV.  When complete you will see "Finished: SUCCESS"
+
+.. _setup_verify:
+
+Verifying the Setup
+-------------------
+
+Now that the installer has finished it is a good idea to check and make sure things are working correctly.  To access your Foreman/QuickStack VM:
+
+1.  ``cd /tmp/bgs_vagrant``
+
+2.  ``vagrant ssh`` (password is "vagrant")
+
+3.  You are now in the VM and can check the status of Foreman service, etc.  For example: ``systemctl status foreman``
+
+4.  Type "exit" and leave the Vagrant VM.  Now execute: ``cat /tmp/bgs_vagrant/opnfv_ksgen_settings.yml | grep foreman_url``
+
+5.  This is your Foreman URL on your public interface.  You can go to your web browser, ``http://<foreman_ip>``, login will be "admin"/"octopus".  This way you can look around in Foreman and check that your hosts are in a good state, etc.
+
+6.  In Foreman GUI, you can now go to Infrastructure -> Global Parameters.  This is a list of all the variables being handed to Puppet for configuring OPNFV.  Look for ``horizon_public_vip``.  This is your IP address to Horizon GUI.
+
+    **Note: You can find out more about how to ues Foreman by going to http://www.theforeman.org/ or by watching a walkthrough video here: https://bluejeans.com/s/89gb/**
+
+7.  Now go to your web browser and insert the Horizon public VIP.  The login will be "admin"/"octopus".
+
+8.  You are now able to follow the `OpenStack Verification <openstack_verify_>`_ section.
+
+.. _openstack_verify:
+
+OpenStack Verification
+----------------------
+
+Now that you have Horizon access, let's make sure OpenStack the OPNFV target system are working correctly:
+
+1.  In Horizon, click Project -> Compute -> Volumes, Create Volume
+
+2.  Make a volume "test_volume" of size 1 GB
+
+3.  Now in the left pane, click Compute -> Images, click Create Image
+
+4.  Insert a name "cirros", Insert an Image Location ``http://download.cirros-cloud.net/0.3.3/cirros-0.3.3-x86_64-disk.img``
+
+5.  Select format "QCOW2", select Public, then hit Create Image
+
+6.  Now click Project -> Network -> Networks, click Create Network
+
+7.  Enter a name "test_network", click Next
+
+8.  Enter a subnet name "test_subnet", and enter Network Address ``10.0.0.0/24``, click Next
+
+9.  Enter ``10.0.0.5,10.0.0.9`` under Allocation Pools, then hit Create
+
+10. Now go to Project -> Compute -> Instances, click Launch Instance
+
+11. Enter Instance Name "cirros1", select Instance Boot Source "Boot from image", and then select Image Name "cirros"
+
+12. Click Launch, status should show "Spawning" while it is being built
+
+13. You can now repeat steps 11 and 12, but create a "cirros2" named instance
+
+14. Once both instances are up you can see their IP addresses on the Instances page.  Click the Instance Name of cirros1.
+
+15. Now click the "Console" tab and login as "cirros"/"cubswin" :)
+
+16. Verify you can ping the IP address of cirros2
+
+Congratulations you have successfully installed OPNFV!
+
+Installation Guide - VM Deployment
+==================================
+
+This section goes step-by-step on how to correctly install and provision the OPNFV target system to VM nodes.
+
+Install Jumphost
+----------------
+
+Follow the instructions in the `Install Bare Metal Jumphost`_ section.
+
+Running ``deploy.sh``
+---------------------------
+
+You are now ready to deploy OPNFV!  ``deploy.sh`` will use your ``/tmp/`` directory to store its Vagrant VMs.  Your Foreman/QuickStack Vagrant VM will run out of ``/tmp/bgs_vagrant``.  Your compute and subsequent controller nodes will run in:
+
+- ``/tmp/compute``
+- ``/tmp/controller1``
+- ``/tmp/controller2``
+- ``/tmp/controller3``
+
+Each VM will be brought up and bridged to your Jumphost NICs.  ``deploy.sh`` will first bring up your Foreman/QuickStack Vagrant VM and afterwards it will bring up each of the nodes listed above, in order.
+
+Follow the steps below to execute:
+
+1.  ``cd /root/bgs_vagrant``
+
+2.  ``./deploy.sh -virtual``
+
+3.  It will take about 20-25 minutes to install Foreman/QuickStack VM.  If something goes wrong during this part of the process, it is most likely a problem with the setup of your Jumphost.  You will also notice different outputs in your shell.  When you see messages that say "TASK:" or "PLAY:" this is Khalessi running and installing Foreman/QuickStack inside of your VM or deploying your nodes.  When you see "Foreman is up!", that means deploy will now move on to bringing up your other nodes.
+
+4.  ``deploy.sh`` will now bring up your other nodes, look for logging messages like "Starting Vagrant Node <node name>", "<node name> VM is up!"  These are indicators of how far along in the process you are.  ``deploy.sh`` will start each Vagrant VM, then run provisioning scripts to inform Foreman they are built and initiate Puppet.
+
+5.  The speed at which nodes are provisioned is totally dependent on your Jumphost server specs.  When complete you will see "All VMs are UP!"
+
+Verifying the Setup - VMs
+-------------------------
+
+Follow the instructions in the `Verifying the Setup <setup_verify_>`_ section.
+
+Also, for VM deployment you are able to easily access your nodes by going to ``/tmp/<node name>`` and then ``vagrant ssh`` (password is "vagrant").  You can use this to go to a controller and check OpenStack services, OpenDaylight, etc.
+
+OpenStack Verification - VMs
+----------------------------
+
+Follow the steps in `OpenStack Verification <openstack_verify_>`_ section.
+
+Frequently Asked Questions
+==========================
+
+License
+=======
+
+All Foreman/QuickStack and "common" entities are protected by the `Apache 2.0 License <http://www.apache.org/licenses/>`_.
+
+References
+==========
+
+OPNFV
+-----
+
+`OPNFV Home Page <www.opnfv.org>`_
+
+`OPNFV Genesis project page <https://wiki.opnfv.org/get_started>`_
+
+OpenStack
+---------
+
+`OpenStack Juno Release artifacts <http://www.openstack.org/software/juno>`_
+
+`OpenStack documentation <http://docs.openstack.org>`_
+
+OpenDaylight
+------------
+
+`OpenDaylight artifacts <http://www.opendaylight.org/software/downloads>`_
+
+Foreman
+-------
+
+`Foreman documentation <http://theforeman.org/documentation.html>`_
+
+:Authors: Tim Rozet (trozet@redhat.com)
+:Version: 0.0.3
+
+**Documentation tracking**
+
+Revision: _sha1_
+
+Build date:  _date_
+
diff --git a/foreman/docs/src/release-notes.rst b/foreman/docs/src/release-notes.rst
new file mode 100644 (file)
index 0000000..f9fcb37
--- /dev/null
@@ -0,0 +1,193 @@
+===========================================================================================
+OPNFV Release Note for  the Arno release of OPNFV when using Foreman as a deployment tool
+===========================================================================================
+
+
+.. contents:: Table of Contents
+   :backlinks: none
+
+
+Abstract
+========
+
+This document provides the release notes for Arno release with the Foreman/QuickStack deployment toolchain.
+
+License
+=======
+
+All Foreman/QuickStack and "common" entities are protected by the Apache License ( http://www.apache.org/licenses/ )
+
+
+Version history
+===============
+
++--------------------+--------------------+--------------------+--------------------+
+| **Date**           | **Ver.**           | **Author**         | **Comment**        |
+|                    |                    |                    |                    |
++--------------------+--------------------+--------------------+--------------------+
+| 2015-04-16         | 0.1.0              | Tim Rozet          | First draft        |
+|                    |                    |                    |                    |
++--------------------+--------------------+--------------------+--------------------+
+| 2015-06-02         | 0.1.1              | Chris Price        | Minor Edits        |
+|                    |                    |                    |                    |
++--------------------+--------------------+--------------------+--------------------+
+| 2015-06-03         | 0.1.2              | Tim Rozet          | Minor Edits        |
+|                    |                    |                    |                    |
++--------------------+--------------------+--------------------+--------------------+
+
+Important notes
+===============
+
+This is the initial OPNFV Arno release that implements the deploy stage of the OPNFV CI pipeline.
+
+Carefully follow the installation-instructions which guide a user on how to deploy OPNFV using Foreman/QuickStack installer.
+
+Summary
+=======
+
+Arno release with the Foreman/QuickStack deployment toolchain will establish an OPNFV target system on a Pharos compliant lab infrastructure.  The current definition of an OPNFV target system is and OpenStack Juno version combined with OpenDaylight version: Helium.  The system is deployed with OpenStack High Availability (HA) for most OpenStack services.  OpenDaylight is deployed in non-HA form as HA is not availble for Arno release.  Ceph storage is used as Cinder backend, and is the only supported storage for Arno.  Ceph is setup as 3 OSDs and 3 Monitors, one OSD+Mon per Controller node.
+
+- Documentation is built by Jenkins
+- .iso image is built by Jenkins
+- Jenkins deploys an Arno release with the Foreman/QuickStack deployment toolchain baremetal, which includes 3 control+network nodes, and 2 compute nodes.
+
+Release Data
+============
+
++--------------------------------------+--------------------------------------+
+| **Project**                          | genesis                              |
+|                                      |                                      |
++--------------------------------------+--------------------------------------+
+| **Repo/tag**                         | genesis/arno.2015.1.0                |
+|                                      |                                      |
++--------------------------------------+--------------------------------------+
+| **Release designation**              | arno.2015.1.0                        |
+|                                      |                                      |
++--------------------------------------+--------------------------------------+
+| **Release date**                     | 2015-06-04                           |
+|                                      |                                      |
++--------------------------------------+--------------------------------------+
+| **Purpose of the delivery**          | OPNFV Arno release                   |
+|                                      |                                      |
++--------------------------------------+--------------------------------------+
+
+Version change
+--------------
+
+Module version changes
+~~~~~~~~~~~~~~~~~~~~~~
+This is the first tracked version of the Arno release with the Foreman/QuickStack deployment toolchain. It is based on following upstream versions:
+
+- OpenStack (Juno release)
+
+- OpenDaylight Helium-SR3
+
+- CentOS 7
+
+Document version changes
+~~~~~~~~~~~~~~~~~~~~~~~~
+
+This is the first tracked version of Arno release with the Foreman/QuickStack deployment toolchain. The following documentation is provided with this release:
+
+- OPNFV Installation instructions for the Arno release with the Foreman/QuickStack deployment toolchain - ver. 1.0.0
+- OPNFV Release Notes for the Arno release with the Foreman/QuickStack deployment toolchain - ver. 1.0.0 (this document)
+
+Feature additions
+~~~~~~~~~~~~~~~~~
+
++--------------------------------------+--------------------------------------+
+| **JIRA REFERENCE**                   | **SLOGAN**                           |
+|                                      |                                      |
++--------------------------------------+--------------------------------------+
+| JIRA: BGS-4                          | OPNFV base system install            |
+|                                      | using Foreman/Quickstack.            |
++--------------------------------------+--------------------------------------+
+
+Bug corrections
+~~~~~~~~~~~~~~~
+
+**JIRA TICKETS:**
+
++--------------------------------------+--------------------------------------+
+| **JIRA REFERENCE**                   | **SLOGAN**                           |
+|                                      |                                      |
++--------------------------------------+--------------------------------------+
+|                                      |                                      |
+|                                      |                                      |
++--------------------------------------+--------------------------------------+
+
+Deliverables
+------------
+
+Software deliverables
+~~~~~~~~~~~~~~~~~~~~~
+Foreman/QuickStack@OPNFV .iso file
+deploy.sh - Automatically deploys Target OPNFV System to Bare Metal
+
+Documentation deliverables
+~~~~~~~~~~~~~~~~~~~~~~~~~~
+- OPNFV Installation instructions for the Arno release with the Foreman/QuickStack deployment toolchain - ver. 1.0.0
+- OPNFV Release Notes for the Arno release with the Foreman/QuickStack deployment toolchain - ver. 1.0.0 (this document)
+
+Known Limitations, Issues and Workarounds
+=========================================
+
+System Limitations
+------------------
+
+**Max number of blades:**   1 Foreman/QuickStack master, 3 Controllers, 20 Compute blades
+
+**Min number of blades:**   1 Foreman/QuickStack master, 1 Controller, 1 Compute blade
+
+**Storage:**    Ceph is the only supported storage configuration.
+
+**Min master requirements:** At least 2048 MB of RAM
+
+
+Known issues
+------------
+
+**JIRA TICKETS:**
+
++--------------------------------------+--------------------------------------+
+| **JIRA REFERENCE**                   | **SLOGAN**                           |
+|                                      |                                      |
++--------------------------------------+--------------------------------------+
+| JIRA: BGS-13                         | bridge br-ex is not auto configured  |
+|                                      | by puppet                            |
++--------------------------------------+--------------------------------------+
+
+Workarounds
+-----------
+**-**
+
+
+Test Result
+===========
+
+The Arno release with the Foreman/QuickStack deployment toolchain has undergone QA test runs with the following results:
+
++--------------------------------------+--------------------------------------+
+| **TEST-SUITE**                       | **Results:**                         |
+|                                      |                                      |
++--------------------------------------+--------------------------------------+
+| **-**                                | **-**                                |
++--------------------------------------+--------------------------------------+
+
+
+References
+==========
+
+For more information on the OPNFV Arno release, please see:
+
+http://wiki.opnfv.org/release/arno
+
+:Authors: Tim Rozet (trozet@redhat.com)
+:Version: 0.2
+
+**Documentation tracking**
+
+Revision: _sha1_
+
+Build date:  _date_
+
diff --git a/foreman/include/build.sh.debug b/foreman/include/build.sh.debug
new file mode 100644 (file)
index 0000000..8b13789
--- /dev/null
@@ -0,0 +1 @@
+
diff --git a/fuel/.DS_Store b/fuel/.DS_Store
deleted file mode 100644 (file)
index b1bc858..0000000
Binary files a/fuel/.DS_Store and /dev/null differ
index e2d1b37..7aa42d2 100644 (file)
--- a/fuel/TODO
+++ b/fuel/TODO
@@ -3,3 +3,8 @@
 # jonas.bjurel@ericsson.com 0.2 2015.04.14
 #########################################################################
 Following items needs to be done to achieve an OPNFV/BGS ARNO Fuel Stack:
+1) Add support for CentOS 6.5 - REMAINING
+2) Add Local GIT repo mirror
+3) Add Auto-deployment for Linux-Foundation Lab.
+4) Dry-run Funktest (Jenkins/Robot/etc.)
+5) Finalize Documentation
\ No newline at end of file
index ba4beaf..5f63120 100644 (file)
@@ -15,7 +15,7 @@ SHELL = /bin/bash
 #Input args
 export UNIT_TEST = FALSE
 export INTERACTIVE = TRUE
-export ISOSRC = file:$(shell pwd)/fuel-6.1.iso
+export ISOSRC = file:$(shell pwd)/fuel-6.0.1.iso
 export ISOCACHE = $(shell pwd)/$(shell basename $(ISOSRC))
 export PRODNO = "OPNFV_BGS"
 export REVSTATE = "P0000"
@@ -37,6 +37,13 @@ export TOPDIR := $(shell pwd)
 #Build subclasses
 
 SUBDIRS := f_isoroot
+SUBDIRS += f_opnfv_puppet
+SUBDIRS += f_osnaily
+SUBDIRS += f_l23network
+SUBDIRS += f_resolvconf
+SUBDIRS += f_ntp
+SUBDIRS += f_odl_docker
+#SUBDIRS += f_odl
 
 # f_example is only an example of how to generate a .deb package and
 # should not be enabled in official builds.
@@ -56,6 +63,7 @@ all:
        @echo "Versions of cached build results built by" $(shell hostname) "at" $(shell date -u) > $(VERSION_FILE)
        @echo "cache.mk" $(shell md5sum $(BUILD_BASE)/cache.mk | cut -f1 -d " ") >> $(VERSION_FILE)
        @echo "config.mk" $(shell md5sum $(BUILD_BASE)/config.mk | cut -f1 -d " ") >> $(VERSION_FILE)
+       $(MAKE) -C f_odl_docker -f Makefile all
        @make -C docker
        @docker/runcontext $(DOCKERIMG) $(MAKE) $(MAKEFLAGS) iso
 
@@ -75,25 +83,16 @@ $(ISOCACHE):
        @echo "fuel" `git -C /tmp/fuel-main show | grep commit | head -1 | cut -d " " -f2` >> $(VERSION_FILE)
        # Setup cgroups for docker-in-docker
        sudo /root/enable_dockerx2
-       # Temporary patch to accomodate for new Ubuntu trusty devops keys not yet
-       # backported to fuel 6.0 or 6.1
-       cd /tmp/fuel-main && patch -p0 < $(TOPDIR)/fuel-main_3.patch
-       # Patch for adding dosfstools, as Fuel 6.1 is running mkfs.vfat
-       cd /tmp/fuel-main && patch -p0 < $(TOPDIR)/fuel-main_5.patch
+       # Patch to fix race condition when doing "Docker-in-Docker" build
+       cd /tmp/fuel-main && patch -p1 < $(TOPDIR)/fuel-main_1.patch
+       # Patch to make the sandbox chroot in Fuel succeed with package
+       # installation in a Docker build
+       cd /tmp/fuel-main && patch -p1 < $(TOPDIR)/fuel-main_2.patch
        # Remove Docker optimizations, otherwise multistrap will fail during
        # Fuel build.
        sudo rm -f /etc/apt/apt.conf.d/docker*
        #
        cd /tmp/fuel-main && ./prepare-build-env.sh
-       cd /tmp/fuel-main && make repos
-       # Patch for speeding up image creation in virtual environments,
-       # https://review.openstack.org/#/c/197943/
-       cd /tmp/fuel-main && patch -p0 < $(TOPDIR)/fuel-agent_1.patch
-       cd /tmp/fuel-main/build/repos/nailgun && git config --global user.email "build$opnfv.org"
-       cd /tmp/fuel-main/build/repos/nailgun && git config --global user.name "OPNFV build"
-       cd /tmp/fuel-main/build/repos/nailgun && git add -u .
-       cd /tmp/fuel-main/build/repos/nailgun && git commit -m "Added patch"
-       #
        cd /tmp/fuel-main && make iso
        mv /tmp/fuel-main/build/artifacts/fuel*.iso .
 
@@ -117,6 +116,14 @@ $(SUBDIRS):
 patch-packages:
        ORIGISO=$(ISOCACHE) REVSTATE=$(REVSTATE) $(MAKE) -C $@ -f Makefile release
 
+.PHONY: prepare
+prepare:
+       #$(MAKE) -C opendaylight -f Makefile setup
+
+.PHONY: odl
+odl:
+       #$(MAKE) -C opendaylight -f Makefile
+
 .PHONY: build-clean $(SUBCLEAN)
 build-clean: $(SUBCLEAN)
        $(MAKE) -C patch-packages -f Makefile clean
@@ -125,11 +132,13 @@ build-clean: $(SUBCLEAN)
        @rm -f $(NEWISO)
 
 .PHONY: clean $(SUBCLEAN)
-clean:  clean-cache $(SUBCLEAN)
+clean:  clean-cache prepare $(SUBCLEAN)
        $(MAKE) -C patch-packages -f Makefile clean
+       #$(MAKE) -C opendaylight -f Makefile clean
        @rm -f *.iso
        @rm -Rf release
        @rm -Rf newiso
+       @rm -f f_odl
        @rm -f $(NEWISO)
        @rm -f $(BUILD_BASE)/.versions
 
@@ -139,12 +148,6 @@ $(SUBCLEAN): %.clean:
 # Todo: Make things smarter - we shouldn't need to clean everything
 # betwen make invocations.
 .PHONY: iso
-iso:   build-clean $(ISOCACHE) $(SUBDIRS) patch-packages
+iso:   prepare build-clean odl $(ISOCACHE) $(SUBDIRS) patch-packages
        install/install.sh iso $(ISOCACHE) $(NEWISO) $(PRODNO) $(REVSTATE)
        @printf "\n\nProduct ISO is $(NEWISO)\n\n"
-
-# Start a bash shell in docker for Makefile debugging
-.PHONY: debug
-debug:
-       @docker version >/dev/null 2>&1 || (echo 'No Docker installation available'; exit 1)
-       docker/runcontext $(DOCKERIMG) bash
index a6e1569..9692f34 100644 (file)
@@ -18,5 +18,6 @@ This purpose of this framework is to:
         - Apply patches to the baseline.
         - Etc.
 3)      Re factor/rebuild the .iso image for deployment (also builds in a container, for the same reason as mentioned above)
+4)      Through a pre-deployment script, setting config's not part of the pristine fuel build can be achieved
 
 For detailed instructions on how to add content, configuration, build and deply - please see: DOC/
index b88ac2f..cc98f68 100644 (file)
@@ -1,4 +1,4 @@
-#############################################################################
+##############################################################################
 # Copyright (c) 2015 Ericsson AB and others.
 # stefan.k.berg@ericsson.com
 # jonas.bjurel@ericsson.com
@@ -15,8 +15,10 @@ CACHECLEAN := $(addsuffix .clean,$(CACHEFILES) $(CACHEDIRS))
 ############################################################################
 # BEGIN of variables to customize
 #
-#CACHEDIRS := foo/bar
+#CACHEDIRS := opendaylight/f_odl/package
 
+#CACHEFILES := opendaylight/.odl-build-history
+#CACHEFILES += opendaylight/.odl-build.log
 CACHEFILES += .versions
 CACHEFILES += $(shell basename $(ISOSRC))
 #
@@ -52,20 +54,20 @@ $(CACHEFILES):
 
        @if [ ! -f $(BUILD_BASE)/$@ ]; then\
           echo " " > $(BUILD_BASE)/$@;\
-          ln -s $(BUILD_BASE)/$@ $(CACHE_DIR)/$@;\
+          ln -s $(BUILD_BASE)/$@ $(CACHE_DIR)/$@;\
           rm -f $(BUILD_BASE)/$@;\
        else\
           ln -s $(BUILD_BASE)/$@ $(CACHE_DIR)/$@;\
        fi
 
 .PHONY: validate-cache
-validate-cache: $(CACHEVALIDATE)
-       @if [ "$(shell md5sum $(BUILD_BASE)/config.mk | cut -f1 -d " ")" != "$(shell cat $(VERSION_FILE) | grep config.mk | awk '{print $$NF}')" ]; then\
+validate-cache: prepare $(CACHEVALIDATE)
+       @if [[ $(shell md5sum $(BUILD_BASE)/config.mk | cut -f1 -d " ") != $(shell cat $(VERSION_FILE) | grep config.mk | awk '{print $$NF}') ]]; then\
           echo "Cache does not match current config.mk definition, cache must be rebuilt";\
           exit 1;\
        fi;
 
-       @if [ "$(shell md5sum $(BUILD_BASE)/cache.mk | cut -f1 -d " ")" != "$(shell cat $(VERSION_FILE) | grep cache.mk | awk '{print $$NF}')" ]; then\
+       @if [[ $(shell md5sum $(BUILD_BASE)/cache.mk | cut -f1 -d " ") != $(shell cat $(VERSION_FILE) | grep cache.mk | awk '{print $$NF}') ]]; then\
           echo "Cache does not match current cache.mk definition, cache must be rebuilt";\
           exit 1;\
        fi;
@@ -78,12 +80,14 @@ validate-cache: $(CACHEVALIDATE)
        then \
           REMOTE_ID=$(shell git ls-remote $(FUEL_MAIN_REPO) $(FUEL_MAIN_TAG) | awk '{print $$(NF-1)}'); \
        fi; \
-       if [[ $$REMOTE_ID != $(shell cat $(VERSION_FILE) | grep fuel | awk '{print $$NF}') ]]; \
+       if [ $$REMOTE_ID != $(shell cat $(VERSION_FILE) | grep fuel | awk '{print $$NF}') ]; \
        then \
           echo "Cache does not match upstream Fuel, cache must be rebuilt!"; \
           exit 1; \
        fi
 
+       #$(MAKE) -C opendaylight validate-cache
+
 .PHONY: $(CACHEVALIDATE)
 $(CACHEVALIDATE): %.validate:
        @echo VALIDATE $(CACHEVALIDATE)
index e9a5320..19f502d 100644 (file)
@@ -8,12 +8,19 @@
 # http://www.apache.org/licenses/LICENSE-2.0
 ##############################################################################
 
+ODL_MAIN_REPO := https://git.opendaylight.org/gerrit/p/controller.git
+ODL_MAIN_TAG := release/helium
+
 FUEL_MAIN_REPO := https://github.com/stackforge/fuel-main
-FUEL_MAIN_TAG = stable/6.1
+FUEL_MAIN_TAG = stable/6.0
 
 DOCKER_REPO := http://get.docker.com/builds/Linux/x86_64
 DOCKER_TAG := docker-latest
 
+.PHONY: get-odl-repo
+get-odl-repo:
+       @echo $(ODL_MAIN_REPO) $(ODL_MAIN_TAG)
+
 .PHONY: get-fuel-repo
 get-fuel-repo:
        @echo $(FUEL_MAIN_REPO) $(FUEL_MAIN_TAG)
index 81cdc43..b4e1b4e 100644 (file)
@@ -14,7 +14,7 @@ RUN apt-get update
 RUN apt-get install -y software-properties-common python-software-properties \
     make python-setuptools python-all dpkg-dev debhelper \
     fuseiso git genisoimage bind9-host wget curl lintian tmux lxc iptables \
-    ca-certificates sudo apt-utils lsb-release dosfstools
+    ca-certificates sudo apt-utils lsb-release
 
 RUN echo "ALL ALL=NOPASSWD: ALL" > /etc/sudoers.d/open-sudo
 RUN chmod 0440 /etc/sudoers.d/open-sudo
@@ -24,5 +24,8 @@ ADD ./setcontext /root/setcontext
 RUN chmod +x /root/setcontext
 ADD ./enable_dockerx2 /root/enable_dockerx2
 RUN chmod +x /root/enable_dockerx2
+ADD ./install.sh /root/install.sh
+RUN chmod +x /root/install.sh
+RUN /root/install.sh
 
 VOLUME /var/lib/docker
diff --git a/fuel/build/docker/ubuntu-builder/install.sh b/fuel/build/docker/ubuntu-builder/install.sh
new file mode 100755 (executable)
index 0000000..df1af72
--- /dev/null
@@ -0,0 +1,25 @@
+#!/bin/bash
+##############################################################################
+# Copyright (c) 2015 Ericsson AB and others.
+# stefan.k.berg@ericsson.com
+# jonas.bjurel@ericsson.com
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+#Install Oracle Java 7 jdk
+echo "Installing JAVA 7"
+apt-get update
+add-apt-repository ppa:webupd8team/java -y
+apt-get update
+echo debconf shared/accepted-oracle-license-v1-1 select true | debconf-set-selections
+apt-get install oracle-java7-installer -y
+
+#Install Maven 3
+echo deb http://ppa.launchpad.net/natecarlson/maven3/ubuntu precise main >> /etc/apt/sources.list
+echo deb-src http://ppa.launchpad.net/natecarlson/maven3/ubuntu precise main >> /etc/apt/sources.list
+apt-get update || exit 1
+sudo apt-get install -y --force-yes maven3 || exit 1
+ln -s /usr/share/maven3/bin/mvn /usr/bin/mvn
index a9b12d9..bde8e64 100644 (file)
@@ -8,7 +8,7 @@
 # http://www.apache.org/licenses/LICENSE-2.0
 ##############################################################################
 
-SUBDIRS = f_kscfg f_bootstrap f_repobuild f_odlpluginbuild
+SUBDIRS = f_predeployment f_kscfg f_bootstrap
 SUBCLEAN = $(addsuffix .clean,$(SUBDIRS))
 
 .PHONY: all
index 8bdf566..348ce3c 100755 (executable)
@@ -1,5 +1,4 @@
 #!/bin/bash
-FUEL_RELEASE=$(grep release: /etc/fuel/version.yaml | cut -d: -f2 | tr -d '" ')
 
 function countdown() {
   local i
@@ -48,6 +47,9 @@ if [[ "$showmenu" == "yes" || "$showmenu" == "YES" ]]; then
     { kill "$pid"; wait $!; } 2>/dev/null
     case "$key" in
       $'\e')  echo "Skipping Fuel Setup.."
+              echo -n "Applying default Fuel setings..."
+              fuelmenu --save-only --iface=eth0
+              echo "Done!"
               ;;
       *)      echo -e "\nEntering Fuel Setup..."
               fuelmenu
@@ -55,51 +57,30 @@ if [[ "$showmenu" == "yes" || "$showmenu" == "YES" ]]; then
     esac
   fi
 fi
-
-
 #Reread /etc/sysconfig/network to inform puppet of changes
 . /etc/sysconfig/network
 hostname "$HOSTNAME"
 
-service docker start
-
-if [ -f /root/.build_images ]; then
-  #Fail on all errors
-  set -e
-  trap fail EXIT
+### docker stuff
+images_dir="/var/www/nailgun/docker/images"
 
-  echo "Loading Fuel base image for Docker..."
-  docker load -i /var/www/nailgun/docker/images/fuel-images.tar
+# extract docker images
+mkdir -p $images_dir $sources_dir
+rm -f $images_dir/*tar
+pushd $images_dir &>/dev/null
 
-  echo "Building Fuel Docker images..."
-  WORKDIR=$(mktemp -d /tmp/docker-buildXXX)
-  SOURCE=/var/www/nailgun/docker
-  REPO_CONT_ID=$(docker -D run -d -p 80 -v /var/www/nailgun:/var/www/nailgun fuel/centos sh -c 'mkdir /var/www/html/os;ln -sf /var/www/nailgun/centos/x86_64 /var/www/html/os/x86_64;/usr/sbin/apachectl -DFOREGROUND')
-  RANDOM_PORT=$(docker port $REPO_CONT_ID 80 | cut -d':' -f2)
-
-  for imagesource in /var/www/nailgun/docker/sources/*; do
-    if ! [ -f "$imagesource/Dockerfile" ]; then
-      echo "Skipping ${imagesource}..."
-      continue
-    fi
-    image=$(basename "$imagesource")
-    cp -R "$imagesource" $WORKDIR/$image
-    mkdir -p $WORKDIR/$image/etc
-    cp -R /etc/puppet /etc/fuel $WORKDIR/$image/etc
-    sed -e "s/_PORT_/${RANDOM_PORT}/" -i $WORKDIR/$image/Dockerfile
-    sed -e 's/production:.*/production: "docker-build"/' -i $WORKDIR/$image/etc/fuel/version.yaml
-    docker build -t fuel/${image}_${FUEL_RELEASE} $WORKDIR/$image
-  done
-  docker rm -f $REPO_CONT_ID
-  rm -rf "$WORKDIR"
+echo "Extracting and loading docker images. (This may take a while)"
+lrzip -d -o fuel-images.tar fuel-images.tar.lrz && tar -xf fuel-images.tar && rm -f fuel-images.tar
+popd &>/dev/null
+service docker start
 
-  #Remove trap for normal deployment
-  trap - EXIT
-  set +e
-else
-  echo "Loading docker images. (This may take a while)"
-  docker load -i /var/www/nailgun/docker/images/fuel-images.tar
-fi
+# load docker images
+for image in $images_dir/*tar ; do
+    echo "Loading docker image ${image}..."
+    docker load -i "$image"
+    # clean up extracted image
+    rm -f "$image"
+done
 
 # apply puppet
 puppet apply --detailed-exitcodes -d -v /etc/puppet/modules/nailgun/examples/host-only.pp
@@ -121,52 +102,4 @@ done
 shopt -u nullglob
 ### OPNFV addition END
 
-# Enable updates repository
-cat > /etc/yum.repos.d/mos${FUEL_RELEASE}-updates.repo << EOF
-[mos${FUEL_RELEASE}-updates]
-name=mos${FUEL_RELEASE}-updates
-baseurl=http://mirror.fuel-infra.org/mos/centos-6/mos${FUEL_RELEASE}/updates/
-gpgcheck=0
-skip_if_unavailable=1
-EOF
-
-# Enable security repository
-cat > /etc/yum.repos.d/mos${FUEL_RELEASE}-security.repo << EOF
-[mos${FUEL_RELEASE}-security]
-name=mos${FUEL_RELEASE}-security
-baseurl=http://mirror.fuel-infra.org/mos/centos-6/mos${FUEL_RELEASE}/security/
-gpgcheck=0
-skip_if_unavailable=1
-EOF
-
-#Check if repo is accessible
-echo "Checking for access to updates repository..."
-repourl=$(grep baseurl /etc/yum.repos.d/*updates* 2>/dev/null | cut -d'=' -f2- | head -1)
-if urlaccesscheck check "$repourl" ; then
-  UPDATE_ISSUES=0
-else
-  UPDATE_ISSUES=1
-fi
-
-if [ $UPDATE_ISSUES -eq 1 ]; then
-  warning="WARNING: There are issues connecting to Fuel update repository.\
-\nPlease fix your connection and update this node with \`yum update\`\
-\nThen run \`dockerctl destroy all; bootstrap_admin_node.sh;\`\
-\nto repeat bootstrap on Fuel Master with the latest updates.\
-\nFor more information, check out Fuel documentation at:\
-\nhttp://docs.mirantis.com/fuel"
-else
-  warning="WARNING: There may be updates available for Fuel.\
-\nYou should update this node with \`yum update\`. If there are available\
-\n updates, run \`dockerctl destroy all; bootstrap_admin_node.sh;\`\
-\nto repeat bootstrap on Fuel Master with the latest updates.\
-\nFor more information, check out Fuel documentation at:\
-\nhttp://docs.mirantis.com/fuel"
-fi
-echo
-echo "*************************************************"
-echo -e "$warning"
-echo "*************************************************"
-echo "Sending notification to Fuel UI..."
-fuel notify --topic warning --send "$warning"
 echo "Fuel node deployment complete!"
index 8d21c1e..7b6e6bd 100755 (executable)
@@ -1,5 +1,4 @@
 #!/bin/bash
-FUEL_RELEASE=$(grep release: /etc/fuel/version.yaml | cut -d: -f2 | tr -d '" ')
 
 function countdown() {
   local i
@@ -38,6 +37,9 @@ if [[ "$showmenu" == "yes" || "$showmenu" == "YES" ]]; then
     { kill "$pid"; wait $!; } 2>/dev/null
     case "$key" in
       $'\e')  echo "Skipping Fuel Setup.."
+              echo -n "Applying default Fuel setings..."
+              fuelmenu --save-only --iface=eth0
+              echo "Done!"
               ;;
       *)      echo -e "\nEntering Fuel Setup..."
               fuelmenu
@@ -45,51 +47,30 @@ if [[ "$showmenu" == "yes" || "$showmenu" == "YES" ]]; then
     esac
   fi
 fi
-
-
 #Reread /etc/sysconfig/network to inform puppet of changes
 . /etc/sysconfig/network
 hostname "$HOSTNAME"
 
-service docker start
-
-if [ -f /root/.build_images ]; then
-  #Fail on all errors
-  set -e
-  trap fail EXIT
+### docker stuff
+images_dir="/var/www/nailgun/docker/images"
 
-  echo "Loading Fuel base image for Docker..."
-  docker load -i /var/www/nailgun/docker/images/fuel-images.tar
+# extract docker images
+mkdir -p $images_dir $sources_dir
+rm -f $images_dir/*tar
+pushd $images_dir &>/dev/null
 
-  echo "Building Fuel Docker images..."
-  WORKDIR=$(mktemp -d /tmp/docker-buildXXX)
-  SOURCE=/var/www/nailgun/docker
-  REPO_CONT_ID=$(docker -D run -d -p 80 -v /var/www/nailgun:/var/www/nailgun fuel/centos sh -c 'mkdir /var/www/html/os;ln -sf /var/www/nailgun/centos/x86_64 /var/www/html/os/x86_64;/usr/sbin/apachectl -DFOREGROUND')
-  RANDOM_PORT=$(docker port $REPO_CONT_ID 80 | cut -d':' -f2)
-
-  for imagesource in /var/www/nailgun/docker/sources/*; do
-    if ! [ -f "$imagesource/Dockerfile" ]; then
-      echo "Skipping ${imagesource}..."
-      continue
-    fi
-    image=$(basename "$imagesource")
-    cp -R "$imagesource" $WORKDIR/$image
-    mkdir -p $WORKDIR/$image/etc
-    cp -R /etc/puppet /etc/fuel $WORKDIR/$image/etc
-    sed -e "s/_PORT_/${RANDOM_PORT}/" -i $WORKDIR/$image/Dockerfile
-    sed -e 's/production:.*/production: "docker-build"/' -i $WORKDIR/$image/etc/fuel/version.yaml
-    docker build -t fuel/${image}_${FUEL_RELEASE} $WORKDIR/$image
-  done
-  docker rm -f $REPO_CONT_ID
-  rm -rf "$WORKDIR"
+echo "Extracting and loading docker images. (This may take a while)"
+lrzip -d -o fuel-images.tar fuel-images.tar.lrz && tar -xf fuel-images.tar && rm -f fuel-images.tar
+popd &>/dev/null
+service docker start
 
-  #Remove trap for normal deployment
-  trap - EXIT
-  set +e
-else
-  echo "Loading docker images. (This may take a while)"
-  docker load -i /var/www/nailgun/docker/images/fuel-images.tar
-fi
+# load docker images
+for image in $images_dir/*tar ; do
+    echo "Loading docker image ${image}..."
+    docker load -i "$image"
+    # clean up extracted image
+    rm -f "$image"
+done
 
 # apply puppet
 puppet apply --detailed-exitcodes -d -v /etc/puppet/modules/nailgun/examples/host-only.pp
@@ -100,53 +81,4 @@ rmdir /var/log/remote && ln -s /var/log/docker-logs/remote /var/log/remote
 
 dockerctl check || fail
 bash /etc/rc.local
-
-# Enable updates repository
-cat > /etc/yum.repos.d/mos${FUEL_RELEASE}-updates.repo << EOF
-[mos${FUEL_RELEASE}-updates]
-name=mos${FUEL_RELEASE}-updates
-baseurl=http://mirror.fuel-infra.org/mos/centos-6/mos${FUEL_RELEASE}/updates/
-gpgcheck=0
-skip_if_unavailable=1
-EOF
-
-# Enable security repository
-cat > /etc/yum.repos.d/mos${FUEL_RELEASE}-security.repo << EOF
-[mos${FUEL_RELEASE}-security]
-name=mos${FUEL_RELEASE}-security
-baseurl=http://mirror.fuel-infra.org/mos/centos-6/mos${FUEL_RELEASE}/security/
-gpgcheck=0
-skip_if_unavailable=1
-EOF
-
-#Check if repo is accessible
-echo "Checking for access to updates repository..."
-repourl=$(grep baseurl /etc/yum.repos.d/*updates* 2>/dev/null | cut -d'=' -f2- | head -1)
-if urlaccesscheck check "$repourl" ; then
-  UPDATE_ISSUES=0
-else
-  UPDATE_ISSUES=1
-fi
-
-if [ $UPDATE_ISSUES -eq 1 ]; then
-  warning="WARNING: There are issues connecting to Fuel update repository.\
-\nPlease fix your connection and update this node with \`yum update\`\
-\nThen run \`dockerctl destroy all; bootstrap_admin_node.sh;\`\
-\nto repeat bootstrap on Fuel Master with the latest updates.\
-\nFor more information, check out Fuel documentation at:\
-\nhttp://docs.mirantis.com/fuel"
-else
-  warning="WARNING: There may be updates available for Fuel.\
-\nYou should update this node with \`yum update\`. If there are available\
-\n updates, run \`dockerctl destroy all; bootstrap_admin_node.sh;\`\
-\nto repeat bootstrap on Fuel Master with the latest updates.\
-\nFor more information, check out Fuel documentation at:\
-\nhttp://docs.mirantis.com/fuel"
-fi
-echo
-echo "*************************************************"
-echo -e "$warning"
-echo "*************************************************"
-echo "Sending notification to Fuel UI..."
-fuel notify --topic warning --send "$warning"
 echo "Fuel node deployment complete!"
diff --git a/fuel/build/f_isoroot/f_bootstrap/post-scripts/01_fix_iommubug.sh b/fuel/build/f_isoroot/f_bootstrap/post-scripts/01_fix_iommubug.sh
new file mode 100755 (executable)
index 0000000..79aa31a
--- /dev/null
@@ -0,0 +1,9 @@
+#/bin/sh
+echo "Setting intel_iommu=off in bootstrap profile - a fix for the Dell systems"
+echo "Old settings"
+dockerctl shell cobbler cobbler profile report --name bootstrap
+echo "Modifying"
+dockerctl shell cobbler cobbler profile edit --name bootstrap --kopts "intel_iommu=off" --in-place
+echo "New settings"
+dockerctl shell cobbler cobbler profile report --name bootstrap
+
diff --git a/fuel/build/f_isoroot/f_bootstrap/post-scripts/02_fix_console_speed.sh b/fuel/build/f_isoroot/f_bootstrap/post-scripts/02_fix_console_speed.sh
new file mode 100755 (executable)
index 0000000..bf7591b
--- /dev/null
@@ -0,0 +1,15 @@
+#/bin/sh
+echo "Changing console speed to 115200 (std is 9600) on bootstrap"
+echo "Old settings"
+dockerctl shell cobbler cobbler profile report --name bootstrap
+echo "Modifying"
+dockerctl shell cobbler cobbler profile edit --name bootstrap --kopts "console=tty0 console=ttyS0,115200" --in-place
+echo "New settings"
+dockerctl shell cobbler cobbler profile report --name bootstrap
+echo "Setting console speed to 115200 on ubuntu_1204_x86_64 (std is no serial console)"
+echo "Old settings"
+dockerctl shell cobbler cobbler profile report --name ubuntu_1204_x86_64
+echo "Modifying"
+dockerctl shell cobbler cobbler profile edit --name ubuntu_1204_x86_64 --kopts "console=tty0 console=ttyS0,115200" --in-place
+echo "New settings"
+dockerctl shell cobbler cobbler profile report --name ubuntu_1204_x86_64
diff --git a/fuel/build/f_isoroot/f_bootstrap/post-scripts/03_install_repo.sh b/fuel/build/f_isoroot/f_bootstrap/post-scripts/03_install_repo.sh
deleted file mode 100755 (executable)
index 427a55a..0000000
+++ /dev/null
@@ -1,19 +0,0 @@
-#/bin/sh
-echo "Installing pre-build repo"
-if [ ! -d /opt/opnfv/nailgun ]; then
-  echo "Error - found no repo!"
-  exit 1
-fi
-
-mkdir -p /var/www/nailgun
-mv /opt/opnfv/nailgun/* /var/www/nailgun
-if [ $? -ne 0 ]; then
-  echo "Error moving repos to their correct location!"
-  exit 1
-fi
-rmdir /opt/opnfv/nailgun
-if [ $? -ne 0 ]; then
-  echo "Error removing /opt/opnfv/nailgun directory!"
-  exit 1
-fi
-echo "Done installing pre-build repo"
old mode 100644 (file)
new mode 100755 (executable)
index 12cd1ab..508f044
@@ -26,12 +26,10 @@ skipx
 drives=""
 removable_drives=""
 for drv in `ls -1 /sys/block | grep "sd\|hd\|vd\|cciss"`; do
-    if !(blkid | grep -q "${drv}.*Fuel"); then
-      if (grep -q 0 /sys/block/${drv}/removable); then
-          drives="${drives} ${drv}"
-      else
-          removable_drives="${removable_drives} ${drv}"
-      fi
+    if (grep -q 0 /sys/block/${drv}/removable); then
+        drives="${drives} ${drv}"
+    else
+        removable_drives="${removable_drives} ${drv}"
     fi
 done
 default_drive=`echo ${drives} ${removable_drives} | awk '{print $1}'`
@@ -154,9 +152,9 @@ if [ "$format_confirmed" != "yes" ] ; then
   chvt 1
 fi
 
-# verify tgtdrive is at least 41GB
+# verify tgtdrive is at least 30GB
 tgtdrivesize=$(( $(cat "/sys/class/block/${tgtdrive}/size") / 2 / 1024 ))
-if [ $tgtdrivesize -lt 41984 ]; then
+if [ $tgtdrivesize -lt 30720 ]; then
     exec < /dev/tty3 > /dev/tty3 2>&1
     chvt 3
     clear
@@ -164,7 +162,7 @@ if [ $tgtdrivesize -lt 41984 ]; then
     echo '********************************************************************'
     echo '*                            E R R O R                             *'
     echo '*                                                                  *'
-    echo '*  Your disk is under 41GB in size. Installation cannot continue.  *'
+    echo '*  Your disk is under 30GB in size. Installation cannot continue.  *'
     echo '*             Restart installation with a larger disk.             *'
     echo '*                                                                  *'
     echo '********************************************************************'
@@ -177,9 +175,7 @@ fi
 tgtdrive=$(echo $tgtdrive | sed -e 's/!/\//')
 
 # source
-if test -e /dev/disk/by-label/"OpenStack_Fuel"; then
-    echo "harddrive --partition=LABEL="OpenStack_Fuel" --dir=/" > /tmp/source.ks
-elif test -e /dev/disk/by-uuid/will_be_substituted_with_actual_uuid; then
+if test -e /dev/disk/by-uuid/will_be_substituted_with_actual_uuid; then
     echo "harddrive --partition=UUID=will_be_substituted_with_actual_uuid --dir=/" > /tmp/source.ks
 else
     echo "cdrom" > /tmp/source.ks
@@ -208,18 +204,16 @@ else
 fi
 echo > /tmp/partition.ks
 echo "partition /boot --onpart=/dev/${bootdev}3" >> /tmp/partition.ks
-echo "partition /boot/efi --onpart=/dev/${bootdev}2" >> /tmp/partition.ks
-echo "partition pv.001 --ondisk=${tgtdrive} --size=41000 --grow" >> /tmp/partition.ks
+echo "partition pv.001 --ondisk=${tgtdrive} --size=30000 --grow" >> /tmp/partition.ks
 echo "volgroup os pv.001" >> /tmp/partition.ks
 echo "logvol swap --vgname=os --recommended --name=swap" >> /tmp/partition.ks
 echo "logvol / --vgname=os --size=10000 --name=root --fstype=ext4" >> /tmp/partition.ks
-echo "logvol /var --vgname=os --size=10000 --percent 30 --grow --name=var --fstype=ext4" >> /tmp/partition.ks
-echo "logvol /var/lib/docker --vgname=os --size=17000  --percent 20 --grow --name=varlibdocker --fstype=ext4" >> /tmp/partition.ks
-echo "logvol /var/log --vgname=os --size=4096 --percent 50 --grow --name=varlog --fstype=ext4" >> /tmp/partition.ks
+echo "logvol /var --vgname=os --size=10000 --percent 60 --grow --name=var --fstype=ext4" >> /tmp/partition.ks
+echo "logvol /var/log --vgname=os --size=4096 --percent 40 --grow --name=varlog --fstype=ext4" >> /tmp/partition.ks
 
 
 # bootloader
-echo "bootloader --location=partition --driveorder=${tgtdrive} --append=' biosdevname=0 crashkernel=none'" > /tmp/bootloader.ks
+echo "bootloader --location=mbr --driveorder=${tgtdrive} --append=' biosdevname=0 crashkernel=none'" > /tmp/bootloader.ks
 
 # Anaconda can not install grub 0.97 on disks which are >4T.
 # The reason is that grub does not support such large geometries
@@ -239,9 +233,6 @@ echo "cat /tmp/grub.script | chroot /mnt/sysimage /sbin/grub --no-floppy --batch
 
 %packages --nobase --excludedocs
 @Core
-fuel
-fuel-library
-fuel-dockerctl
 authconfig
 bind-utils
 cronie
@@ -250,12 +241,7 @@ curl
 daemonize
 dhcp
 docker-io
-fuel-bootstrap-image
-fuel-createmirror
-fuel-target-centos-images
-fuel-package-updates
 fuelmenu
-fuel-docker-images
 gdisk
 lrzip
 lsof
@@ -263,10 +249,8 @@ man
 mlocate
 nmap-ncat
 ntp
-ntpdate
 openssh-clients
 policycoreutils
-python-daemon
 rsync
 ruby21-puppet
 ruby21-rubygem-netaddr
@@ -282,7 +266,6 @@ vim-enhanced
 virt-what
 wget
 yum
-yum-plugin-priorities
 
 %include /tmp/post_partition.ks
 
@@ -302,8 +285,6 @@ echo -e "* soft core unlimited\n* hard core unlimited" >> /etc/security/limits.c
 %post --nochroot --log=/mnt/sysimage/root/anaconda-post-before-chroot.log
 #!/bin/sh
 
-set -x
-
 SOURCE="/mnt/sysimage/tmp/source"
 
 for I in `cat /proc/cmdline`; do case "$I" in *=*) eval $I;; esac ; done
@@ -361,7 +342,6 @@ function save_cfg {
     else
         echo GATEWAY=$gw >> /etc/sysconfig/network
     fi
-    [ -n "$build_images" -a "$build_images" != "0" ] && echo -e "$build_images" > /root/.build_images
 }
 
 # Default FQDN
@@ -376,7 +356,6 @@ gw=$gw
 device="eth0"
 hwaddr=`ifconfig $device | grep -i hwaddr | sed -e 's#^.*hwaddr[[:space:]]*##I'`
 dhcp_interface=$dhcp_interface
-build_images=$build_images
 save_cfg
 
 # Mounting installation source
@@ -387,9 +366,7 @@ echo
 mkdir -p ${SOURCE}
 mkdir -p ${FS}
 
-if test -e /dev/disk/by-label/"OpenStack_Fuel"; then
-    mount /dev/disk/by-label/"OpenStack_Fuel" ${SOURCE}
-elif test -e /dev/disk/by-uuid/will_be_substituted_with_actual_uuid; then
+if test -e /dev/disk/by-uuid/will_be_substituted_with_actual_uuid; then
     mount /dev/disk/by-uuid/will_be_substituted_with_actual_uuid ${FS}
     mount -o loop ${FS}/nailgun.iso ${SOURCE}
 fi
@@ -413,47 +390,61 @@ cp ${SOURCE}/.treeinfo ${repodir}/centos/x86_64
 
 # Copying Ubuntu files
 mkdir -p ${repodir}/ubuntu/x86_64/images
+cp -r ${SOURCE}/ubuntu/conf ${repodir}/ubuntu/x86_64
+cp -r ${SOURCE}/ubuntu/db ${repodir}/ubuntu/x86_64
 cp -r ${SOURCE}/ubuntu/dists ${repodir}/ubuntu/x86_64
+cp -r ${SOURCE}/ubuntu/indices ${repodir}/ubuntu/x86_64
 cp -r ${SOURCE}/ubuntu/pool ${repodir}/ubuntu/x86_64
-
-# We do not ship debian-installer kernel and initrd on ISO.
-# But we still need to be able to create ubuntu cobbler distro
-# which requires kernel and initrd to be available. So, we
-# just touch these files to work around cobbler's limitation.
-touch ${repodir}/ubuntu/x86_64/images/linux
-touch ${repodir}/ubuntu/x86_64/images/initrd.gz
+cp -r ${SOURCE}/ubuntu/installer-amd64/current/images/netboot/ubuntu-installer/amd64/linux ${repodir}/ubuntu/x86_64/images
+cp -r ${SOURCE}/ubuntu/installer-amd64/current/images/netboot/ubuntu-installer/amd64/initrd.gz ${repodir}/ubuntu/x86_64/images
 
 # make links for backward compatibility
 ln -s ${repodir}/centos ${wwwdir}/centos
 ln -s ${repodir}/ubuntu ${wwwdir}/ubuntu
 
+# Copying bootstrap image
+mkdir -p ${wwwdir}/bootstrap
+cp -r ${SOURCE}/bootstrap/initramfs.img ${wwwdir}/bootstrap
+cp -r ${SOURCE}/bootstrap/linux ${wwwdir}/bootstrap
+
+# Copying target images
+cp -r ${SOURCE}/targetimages ${wwwdir}
+
+mkdir -p /root/.ssh
+chmod 700 /root/.ssh
+cp ${SOURCE}/bootstrap/bootstrap.rsa /root/.ssh
+chmod 600 /root/.ssh/bootstrap.rsa
+
 # --------------------------
 # UNPACKING PUPPET MANIFESTS
 # --------------------------
 
 # create folders
-#mkdir -p /etc/puppet/${OPENSTACK_VERSION}/manifests/
-#mkdir -p /etc/puppet/${OPENSTACK_VERSION}/modules/
-#rm -rf /etc/puppet/modules/
+mkdir -p /etc/puppet/${OPENSTACK_VERSION}/manifests/
+mkdir -p /etc/puppet/${OPENSTACK_VERSION}/modules/
+rm -rf /etc/puppet/modules/
 
 # TODO(ikalnitsky): investigate why we need this
-#cp ${SOURCE}/puppet-slave.tgz ${wwwdir}/
+cp ${SOURCE}/puppet-slave.tgz ${wwwdir}/
 
 # place modules and manifests
-#tar zxf ${SOURCE}/puppet-slave.tgz -C /etc/puppet/${OPENSTACK_VERSION}/modules
-#cp /etc/puppet/${OPENSTACK_VERSION}/modules/osnailyfacter/examples/site.pp /etc/puppet/${OPENSTACK_VERSION}/manifests/site.pp
+tar zxf ${SOURCE}/puppet-slave.tgz -C /etc/puppet/${OPENSTACK_VERSION}/modules
+cp /etc/puppet/${OPENSTACK_VERSION}/modules/osnailyfacter/examples/site.pp /etc/puppet/${OPENSTACK_VERSION}/manifests/site.pp
 cp ${SOURCE}/centos-versions.yaml ${SOURCE}/ubuntu-versions.yaml /etc/puppet/${OPENSTACK_VERSION}/manifests/
 
 # make links for backward compatibility
-#pushd /etc/puppet
-#ln -s ${OPENSTACK_VERSION}/manifests/ /etc/puppet/manifests
-#ln -s ${OPENSTACK_VERSION}/modules/ /etc/puppet/modules
-#popd
+pushd /etc/puppet
+ln -s ${OPENSTACK_VERSION}/manifests/ /etc/puppet/manifests
+ln -s ${OPENSTACK_VERSION}/modules/ /etc/puppet/modules
+popd
 
 cp ${SOURCE}/send2syslog.py /bin/send2syslog.py
 mkdir -p /var/lib/hiera
 touch /var/lib/hiera/common.yaml /etc/puppet/hiera.yaml
 
+# Deploy docker images and ctl tools if we built ISO with docker containers support
+[ -d "${SOURCE}/docker" ] && cp -r ${SOURCE}/docker ${wwwdir}/docker
+
 # Prepare local repository specification
 rm /etc/yum.repos.d/CentOS*.repo
 cat > /etc/yum.repos.d/nailgun.repo << EOF
@@ -470,24 +461,6 @@ sed -i 's/^enabled.*/enabled=0/' /etc/yum/pluginconf.d/subscription-manager.conf
 # Disable GSSAPI in ssh server config
 sed -i -e "/^\s*GSSAPICleanupCredentials yes/d" -e "/^\s*GSSAPIAuthentication yes/d" /etc/ssh/sshd_config
 
-# Enable MOTD banner in sshd
-sed -i -e "s/^\s*PrintMotd no/PrintMotd yes/g" /etc/ssh/sshd_config
-
-# Add note regarding local repos creation to MOTD
-cat >> /etc/motd << EOF
-
-All environments use online repositories by default.
-Use the following commands to create local repositories
-on master node and change default repository settings:
-
-* CentOS: fuel-package-updates (see --help for options)
-* Ubuntu: fuel-createmirror (see --help for options)
-
-Please refer to the following guide for more information:
-https://docs.mirantis.com/openstack/fuel/fuel-6.1/reference-architecture.html#fuel-rep-mirror
-
-EOF
-
 # Copying bootstrap_admin_node.sh, chmod it and
 # adding /etc/init/bootstrap_admin_node.conf
 cp ${SOURCE}/bootstrap_admin_node.sh /usr/local/sbin/bootstrap_admin_node.sh
@@ -573,7 +546,10 @@ rm -rf ${SOURCE}
 umount -f ${FS} || true
 rm -rf ${FS}
 
-echo "tos orphan 7" >> /etc/ntp.conf
+# Enabling/configuring NTPD and ntpdate services
+echo "server 127.127.1.0"            >> /etc/ntp.conf
+echo "fudge  127.127.1.0 stratum 10" >> /etc/ntp.conf
+echo "tos    orphan 7"               >> /etc/ntp.conf
 
 # Do not show error message on ntpdate failure. Customers should not be confused
 # if admin node does not have access to the internet time servers.
@@ -595,4 +571,4 @@ cp -f /etc/skel/.bash* /root/
 # Blacklist i2c_piix4 module for VirtualBox so it does not create kernel errors
 [[ $(virt-what) = "virtualbox" ]] && echo "blacklist i2c_piix4" > /etc/modprobe.d/blacklist-i2c-piix4.conf
 
-%end
\ No newline at end of file
+%end
index cf8cf80..bddf99c 100644 (file)
@@ -26,12 +26,10 @@ skipx
 drives=""
 removable_drives=""
 for drv in `ls -1 /sys/block | grep "sd\|hd\|vd\|cciss"`; do
-    if !(blkid | grep -q "${drv}.*Fuel"); then
-      if (grep -q 0 /sys/block/${drv}/removable); then
-          drives="${drives} ${drv}"
-      else
-          removable_drives="${removable_drives} ${drv}"
-      fi
+    if (grep -q 0 /sys/block/${drv}/removable); then
+        drives="${drives} ${drv}"
+    else
+        removable_drives="${removable_drives} ${drv}"
     fi
 done
 default_drive=`echo ${drives} ${removable_drives} | awk '{print $1}'`
@@ -154,9 +152,9 @@ if [ "$format_confirmed" != "yes" ] ; then
   chvt 1
 fi
 
-# verify tgtdrive is at least 41GB
+# verify tgtdrive is at least 30GB
 tgtdrivesize=$(( $(cat "/sys/class/block/${tgtdrive}/size") / 2 / 1024 ))
-if [ $tgtdrivesize -lt 41984 ]; then
+if [ $tgtdrivesize -lt 30720 ]; then
     exec < /dev/tty3 > /dev/tty3 2>&1
     chvt 3
     clear
@@ -164,7 +162,7 @@ if [ $tgtdrivesize -lt 41984 ]; then
     echo '********************************************************************'
     echo '*                            E R R O R                             *'
     echo '*                                                                  *'
-    echo '*  Your disk is under 41GB in size. Installation cannot continue.  *'
+    echo '*  Your disk is under 30GB in size. Installation cannot continue.  *'
     echo '*             Restart installation with a larger disk.             *'
     echo '*                                                                  *'
     echo '********************************************************************'
@@ -177,9 +175,7 @@ fi
 tgtdrive=$(echo $tgtdrive | sed -e 's/!/\//')
 
 # source
-if test -e /dev/disk/by-label/"OpenStack_Fuel"; then
-    echo "harddrive --partition=LABEL="OpenStack_Fuel" --dir=/" > /tmp/source.ks
-elif test -e /dev/disk/by-uuid/will_be_substituted_with_actual_uuid; then
+if test -e /dev/disk/by-uuid/will_be_substituted_with_actual_uuid; then
     echo "harddrive --partition=UUID=will_be_substituted_with_actual_uuid --dir=/" > /tmp/source.ks
 else
     echo "cdrom" > /tmp/source.ks
@@ -208,18 +204,16 @@ else
 fi
 echo > /tmp/partition.ks
 echo "partition /boot --onpart=/dev/${bootdev}3" >> /tmp/partition.ks
-echo "partition /boot/efi --onpart=/dev/${bootdev}2" >> /tmp/partition.ks
-echo "partition pv.001 --ondisk=${tgtdrive} --size=41000 --grow" >> /tmp/partition.ks
+echo "partition pv.001 --ondisk=${tgtdrive} --size=30000 --grow" >> /tmp/partition.ks
 echo "volgroup os pv.001" >> /tmp/partition.ks
 echo "logvol swap --vgname=os --recommended --name=swap" >> /tmp/partition.ks
 echo "logvol / --vgname=os --size=10000 --name=root --fstype=ext4" >> /tmp/partition.ks
-echo "logvol /var --vgname=os --size=10000 --percent 30 --grow --name=var --fstype=ext4" >> /tmp/partition.ks
-echo "logvol /var/lib/docker --vgname=os --size=17000  --percent 20 --grow --name=varlibdocker --fstype=ext4" >> /tmp/partition.ks
-echo "logvol /var/log --vgname=os --size=4096 --percent 50 --grow --name=varlog --fstype=ext4" >> /tmp/partition.ks
+echo "logvol /var --vgname=os --size=10000 --percent 60 --grow --name=var --fstype=ext4" >> /tmp/partition.ks
+echo "logvol /var/log --vgname=os --size=4096 --percent 40 --grow --name=varlog --fstype=ext4" >> /tmp/partition.ks
 
 
 # bootloader
-echo "bootloader --location=partition --driveorder=${tgtdrive} --append=' biosdevname=0 crashkernel=none'" > /tmp/bootloader.ks
+echo "bootloader --location=mbr --driveorder=${tgtdrive} --append=' biosdevname=0 crashkernel=none'" > /tmp/bootloader.ks
 
 # Anaconda can not install grub 0.97 on disks which are >4T.
 # The reason is that grub does not support such large geometries
@@ -239,9 +233,6 @@ echo "cat /tmp/grub.script | chroot /mnt/sysimage /sbin/grub --no-floppy --batch
 
 %packages --nobase --excludedocs
 @Core
-fuel
-fuel-library
-fuel-dockerctl
 authconfig
 bind-utils
 cronie
@@ -250,12 +241,7 @@ curl
 daemonize
 dhcp
 docker-io
-fuel-bootstrap-image
-fuel-createmirror
-fuel-target-centos-images
-fuel-package-updates
 fuelmenu
-fuel-docker-images
 gdisk
 lrzip
 lsof
@@ -263,10 +249,8 @@ man
 mlocate
 nmap-ncat
 ntp
-ntpdate
 openssh-clients
 policycoreutils
-python-daemon
 rsync
 ruby21-puppet
 ruby21-rubygem-netaddr
@@ -282,7 +266,6 @@ vim-enhanced
 virt-what
 wget
 yum
-yum-plugin-priorities
 
 %include /tmp/post_partition.ks
 
@@ -302,8 +285,6 @@ echo -e "* soft core unlimited\n* hard core unlimited" >> /etc/security/limits.c
 %post --nochroot --log=/mnt/sysimage/root/anaconda-post-before-chroot.log
 #!/bin/sh
 
-set -x
-
 SOURCE="/mnt/sysimage/tmp/source"
 
 for I in `cat /proc/cmdline`; do case "$I" in *=*) eval $I;; esac ; done
@@ -361,7 +342,6 @@ function save_cfg {
     else
         echo GATEWAY=$gw >> /etc/sysconfig/network
     fi
-    [ -n "$build_images" -a "$build_images" != "0" ] && echo -e "$build_images" > /root/.build_images
 }
 
 # Default FQDN
@@ -376,7 +356,6 @@ gw=$gw
 device="eth0"
 hwaddr=`ifconfig $device | grep -i hwaddr | sed -e 's#^.*hwaddr[[:space:]]*##I'`
 dhcp_interface=$dhcp_interface
-build_images=$build_images
 save_cfg
 
 # Mounting installation source
@@ -387,9 +366,7 @@ echo
 mkdir -p ${SOURCE}
 mkdir -p ${FS}
 
-if test -e /dev/disk/by-label/"OpenStack_Fuel"; then
-    mount /dev/disk/by-label/"OpenStack_Fuel" ${SOURCE}
-elif test -e /dev/disk/by-uuid/will_be_substituted_with_actual_uuid; then
+if test -e /dev/disk/by-uuid/will_be_substituted_with_actual_uuid; then
     mount /dev/disk/by-uuid/will_be_substituted_with_actual_uuid ${FS}
     mount -o loop ${FS}/nailgun.iso ${SOURCE}
 fi
@@ -413,47 +390,61 @@ cp ${SOURCE}/.treeinfo ${repodir}/centos/x86_64
 
 # Copying Ubuntu files
 mkdir -p ${repodir}/ubuntu/x86_64/images
+cp -r ${SOURCE}/ubuntu/conf ${repodir}/ubuntu/x86_64
+cp -r ${SOURCE}/ubuntu/db ${repodir}/ubuntu/x86_64
 cp -r ${SOURCE}/ubuntu/dists ${repodir}/ubuntu/x86_64
+cp -r ${SOURCE}/ubuntu/indices ${repodir}/ubuntu/x86_64
 cp -r ${SOURCE}/ubuntu/pool ${repodir}/ubuntu/x86_64
-
-# We do not ship debian-installer kernel and initrd on ISO.
-# But we still need to be able to create ubuntu cobbler distro
-# which requires kernel and initrd to be available. So, we
-# just touch these files to work around cobbler's limitation.
-touch ${repodir}/ubuntu/x86_64/images/linux
-touch ${repodir}/ubuntu/x86_64/images/initrd.gz
+cp -r ${SOURCE}/ubuntu/installer-amd64/current/images/netboot/ubuntu-installer/amd64/linux ${repodir}/ubuntu/x86_64/images
+cp -r ${SOURCE}/ubuntu/installer-amd64/current/images/netboot/ubuntu-installer/amd64/initrd.gz ${repodir}/ubuntu/x86_64/images
 
 # make links for backward compatibility
 ln -s ${repodir}/centos ${wwwdir}/centos
 ln -s ${repodir}/ubuntu ${wwwdir}/ubuntu
 
+# Copying bootstrap image
+mkdir -p ${wwwdir}/bootstrap
+cp -r ${SOURCE}/bootstrap/initramfs.img ${wwwdir}/bootstrap
+cp -r ${SOURCE}/bootstrap/linux ${wwwdir}/bootstrap
+
+# Copying target images
+cp -r ${SOURCE}/targetimages ${wwwdir}
+
+mkdir -p /root/.ssh
+chmod 700 /root/.ssh
+cp ${SOURCE}/bootstrap/bootstrap.rsa /root/.ssh
+chmod 600 /root/.ssh/bootstrap.rsa
+
 # --------------------------
 # UNPACKING PUPPET MANIFESTS
 # --------------------------
 
 # create folders
-#mkdir -p /etc/puppet/${OPENSTACK_VERSION}/manifests/
-#mkdir -p /etc/puppet/${OPENSTACK_VERSION}/modules/
-#rm -rf /etc/puppet/modules/
+mkdir -p /etc/puppet/${OPENSTACK_VERSION}/manifests/
+mkdir -p /etc/puppet/${OPENSTACK_VERSION}/modules/
+rm -rf /etc/puppet/modules/
 
 # TODO(ikalnitsky): investigate why we need this
-#cp ${SOURCE}/puppet-slave.tgz ${wwwdir}/
+cp ${SOURCE}/puppet-slave.tgz ${wwwdir}/
 
 # place modules and manifests
-#tar zxf ${SOURCE}/puppet-slave.tgz -C /etc/puppet/${OPENSTACK_VERSION}/modules
-#cp /etc/puppet/${OPENSTACK_VERSION}/modules/osnailyfacter/examples/site.pp /etc/puppet/${OPENSTACK_VERSION}/manifests/site.pp
+tar zxf ${SOURCE}/puppet-slave.tgz -C /etc/puppet/${OPENSTACK_VERSION}/modules
+cp /etc/puppet/${OPENSTACK_VERSION}/modules/osnailyfacter/examples/site.pp /etc/puppet/${OPENSTACK_VERSION}/manifests/site.pp
 cp ${SOURCE}/centos-versions.yaml ${SOURCE}/ubuntu-versions.yaml /etc/puppet/${OPENSTACK_VERSION}/manifests/
 
 # make links for backward compatibility
-#pushd /etc/puppet
-#ln -s ${OPENSTACK_VERSION}/manifests/ /etc/puppet/manifests
-#ln -s ${OPENSTACK_VERSION}/modules/ /etc/puppet/modules
-#popd
+pushd /etc/puppet
+ln -s ${OPENSTACK_VERSION}/manifests/ /etc/puppet/manifests
+ln -s ${OPENSTACK_VERSION}/modules/ /etc/puppet/modules
+popd
 
 cp ${SOURCE}/send2syslog.py /bin/send2syslog.py
 mkdir -p /var/lib/hiera
 touch /var/lib/hiera/common.yaml /etc/puppet/hiera.yaml
 
+# Deploy docker images and ctl tools if we built ISO with docker containers support
+[ -d "${SOURCE}/docker" ] && cp -r ${SOURCE}/docker ${wwwdir}/docker
+
 # Prepare local repository specification
 rm /etc/yum.repos.d/CentOS*.repo
 cat > /etc/yum.repos.d/nailgun.repo << EOF
@@ -470,24 +461,6 @@ sed -i 's/^enabled.*/enabled=0/' /etc/yum/pluginconf.d/subscription-manager.conf
 # Disable GSSAPI in ssh server config
 sed -i -e "/^\s*GSSAPICleanupCredentials yes/d" -e "/^\s*GSSAPIAuthentication yes/d" /etc/ssh/sshd_config
 
-# Enable MOTD banner in sshd
-sed -i -e "s/^\s*PrintMotd no/PrintMotd yes/g" /etc/ssh/sshd_config
-
-# Add note regarding local repos creation to MOTD
-cat >> /etc/motd << EOF
-
-All environments use online repositories by default.
-Use the following commands to create local repositories
-on master node and change default repository settings:
-
-* CentOS: fuel-package-updates (see --help for options)
-* Ubuntu: fuel-createmirror (see --help for options)
-
-Please refer to the following guide for more information:
-https://docs.mirantis.com/openstack/fuel/fuel-6.1/reference-architecture.html#fuel-rep-mirror
-
-EOF
-
 # Copying bootstrap_admin_node.sh, chmod it and
 # adding /etc/init/bootstrap_admin_node.conf
 cp ${SOURCE}/bootstrap_admin_node.sh /usr/local/sbin/bootstrap_admin_node.sh
@@ -567,7 +540,10 @@ rm -rf ${SOURCE}
 umount -f ${FS} || true
 rm -rf ${FS}
 
-echo "tos orphan 7" >> /etc/ntp.conf
+# Enabling/configuring NTPD and ntpdate services
+echo "server 127.127.1.0"            >> /etc/ntp.conf
+echo "fudge  127.127.1.0 stratum 10" >> /etc/ntp.conf
+echo "tos    orphan 7"               >> /etc/ntp.conf
 
 # Do not show error message on ntpdate failure. Customers should not be confused
 # if admin node does not have access to the internet time servers.
diff --git a/fuel/build/f_isoroot/f_odlpluginbuild/Makefile b/fuel/build/f_isoroot/f_odlpluginbuild/Makefile
deleted file mode 100644 (file)
index ce9cd73..0000000
+++ /dev/null
@@ -1,38 +0,0 @@
-##############################################################################
-# Copyright (c) 2015 Ericsson AB and others.
-# mskalski@mirantis.com
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
-
-TOP := $(shell pwd)
-DOCKNAME = fuelrepo
-DOCKVERSION = 1.0
-ODL_BRANCH="juno/lithium-sr1"
-ODL_REPO="https://github.com/stackforge/fuel-plugin-opendaylight.git"
-
-.PHONY: all
-all: .odlbuild
-
-.PHONY: clean
-clean:
-       # Deliberately not cleaning nailgun directory to speed up multiple builds
-       @rm -f ../release/opnfv/opendaylight*.rpm
-
-.PHONY: release
-release:.odlbuild
-       @rm -f ../release/opnfv/opendaylight*.rpm
-       @mkdir -p ../release/opnfv
-       @cp  opendaylight*.rpm ../release/opnfv/
-
-.odlbuild:
-       rm -rf fuel-plugin-opendaylight
-       sudo apt-get -y install build-essential ruby-dev rubygems-integration python-pip git rpm createrepo dpkg-dev
-       sudo gem install fpm
-       sudo pip install fuel-plugin-builder
-       git clone -b ${ODL_BRANCH} ${ODL_REPO}
-       INCLUDE_DEPENDENCIES=true fpb --debug --build fuel-plugin-opendaylight/
-       mv fuel-plugin-opendaylight/opendaylight*.rpm .
-       rm -rf fuel-plugin-opendaylight
diff --git a/fuel/build/f_isoroot/f_predeployment/Makefile b/fuel/build/f_isoroot/f_predeployment/Makefile
new file mode 100644 (file)
index 0000000..a5252df
--- /dev/null
@@ -0,0 +1,28 @@
+##############################################################################
+# Copyright (c) 2015 Ericsson AB and others.
+# stefan.k.berg@ericsson.com
+# jonas.bjurel@ericsson.com
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+TOP := $(shell pwd)
+
+.PHONY: all
+all:
+       @mkdir -p release/opnfv
+       @cp pre-deploy.sh release/opnfv
+       @cp sysinfo.sh release/opnfv
+       @cp transform_yaml.py release/opnfv
+       @chmod 755 release/opnfv/*
+
+.PHONY: clean
+clean:
+       @rm -rf release
+
+
+.PHONY: release
+release:clean all
+       @cp -Rvp release/* ../release
diff --git a/fuel/build/f_isoroot/f_predeployment/README b/fuel/build/f_isoroot/f_predeployment/README
new file mode 100644 (file)
index 0000000..3eef9f2
--- /dev/null
@@ -0,0 +1,18 @@
+##############################################################################
+# Copyright (c) 2015 Ericsson AB and others.
+# stefan.k.berg@ericsson.com
+# jonas.bjurel@ericsson.com
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+This is the start of the interactive frontend that will add OPNFV configuration into
+the astute.yaml of the nodes. Currently just a test setup - prepare an installation
+up to the point of "deploy changes", but run "./pre-deploy.sh <envid> fragment.yaml"
+before actually hitting deploy, which will make sure to add the example fragment to
+the nodes.
+
+Note that the only part of the fragment.yaml that actually is acted on is the hosts
+part at this time.
diff --git a/fuel/build/f_isoroot/f_predeployment/pre-deploy.sh b/fuel/build/f_isoroot/f_predeployment/pre-deploy.sh
new file mode 100755 (executable)
index 0000000..c5c6c42
--- /dev/null
@@ -0,0 +1,401 @@
+#!/bin/bash -e
+##############################################################################
+# Copyright (c) 2015 Ericsson AB and others.
+# stefan.k.berg@ericsson.com
+# jonas.bjurel@ericsson.com
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+
+error_exit () {
+    echo "$@" >&2
+    exit 1
+}
+
+get_env() {
+   local env_id=${1:-""}
+
+   if [ -z $env_id ]; then
+      local n_envs=$(fuel env --list | grep -v -E "^id|^--|^ *$" | wc -l)
+      if [ $n_envs -ne 1 ]; then
+          echo "Usage: $0 [<env-id>]" >&2
+          error_exit "If only a single environment is present it can be left" \
+                     "out. Otherwise the environment must be selected"
+      fi
+      env_id=$(fuel env --list | grep -v -E "^id|^--" | awk '{print $1}')
+   else
+      if ! fuel --env $env_id environment 2>/dev/null grep -v -E "^id|^--" | \
+           grep -q ^$env_id; then
+         error_exit "No such environment ID: $env_id"
+      fi
+   fi
+   echo $env_id
+}
+
+get_node_uid () {
+    cat $1 | grep "^uid: " | sed "s/^uid: '//" | sed "s/'$//"
+}
+
+get_node_role () {
+    cat $1 | grep "^role: " | sed "s/^role: //"
+}
+
+get_next_cic () {
+    file=$1
+
+    last=`cat $file | sed 's/.*://' | grep "cic-" | sed 's/cic\-.*sl//' | sort -n | tail -1`
+    if [ -z "$last" ]; then
+        next=1
+    else
+        next=$[$last + 2]
+    fi
+    echo $next
+}
+
+get_next_compute () {
+    file=$1
+
+    last=`cat $file | sed 's/.*://' | grep "cmp-" | sed 's/cmp\-.*sl//' | sort -n | tail -1`
+    if [ -z "$last" ]; then
+        next=7
+    else
+        next=$[$last + 2]
+    fi
+    echo $next
+}
+
+modify_hostnames () {
+    env=$1
+    file=$2
+    for line in `cat $file`
+    do
+        old=`echo $line | sed 's/:.*//'`
+        new=`echo $line | sed 's/.*://'`
+        echo "Applying: $old -> $new"
+
+        for dfile in deployment_$env/*.yaml
+        do
+            sed -i "s/$old/$new/g" $dfile
+        done
+
+        for pfile in provisioning_$env/*.yaml
+        do
+            sed -i "s/$old/$new/g" $pfile
+        done
+    done
+}
+
+setup_hostnames () {
+    ENV=$1
+    cd ${CONFIGDIR}
+    touch hostnames.$ENV
+
+    for dfile in deployment_$ENV/*.yaml
+    do
+        uid=`get_node_uid $dfile`
+        hostname=`grep "^node-$uid:" hostnames.$ENV | sed 's/.*://'`
+        if [ -z $hostname ]; then
+
+            pfile=provisioning_$ENV/node-$uid.yaml
+            role=`get_node_role $dfile`
+
+            case $role in
+                primary-controller)
+                    hostname="cic-pod0-sh0-sl`get_next_cic hostnames.$ENV`"
+                    ;;
+                controller)
+                    hostname="cic-pod0-sh0-sl`get_next_cic hostnames.$ENV`"
+                    ;;
+                compute)
+                    hostname="cmp-pod0-sh0-sl`get_next_compute hostnames.$ENV`"
+                    ;;
+                *)
+                    echo "Unknown node type for UID $uid"
+                    exit 1
+                    ;;
+            esac
+
+            echo "node-$uid:$hostname" >> hostnames.$ENV
+        else
+            echo "Already got hostname $hostname for node-$uid"
+
+        fi
+    done
+
+    rm -f hostnames.$ENV.old
+    mv hostnames.$ENV hostnames.$ENV.old
+    sort hostnames.$ENV.old | uniq > hostnames.$ENV
+    modify_hostnames $ENV hostnames.$ENV
+}
+
+
+
+get_provisioning_info () {
+    ENV=$1
+    mkdir -p ${CONFIGDIR}
+    cd ${CONFIGDIR}
+    rm -Rf provisioning_$ENV
+    echo "Getting provisioning info..."
+    fuel --env $ENV provisioning --default
+    if [ $? -ne 0 ]; then
+        echo "Error: Could not get provisioning info for env $ENV">&2
+        exit 1
+    fi
+}
+
+get_deployment_info () {
+    ENV=$1
+    mkdir -p ${CONFIGDIR}
+    cd ${CONFIGDIR}
+    rm -Rf deployment_$ENV
+    echo "Getting deployment info..."
+    fuel --env $ENV deployment --default
+    if [ $? -ne 0 ]; then
+        echo "Error: Could not get deployment info for env $ENV">&2
+        exit 1
+    fi
+}
+
+transform_yaml () {
+    ENV=$1
+    cd ${CONFIGDIR}
+    for dfile in deployment_$ENV/*.yaml
+    do
+        /opt/opnfv/transform_yaml.py $dfile
+    done
+}
+
+commit_changes () {
+    ENV=$1
+    cd ${CONFIGDIR}
+
+    fuel --env $ENV deployment --upload
+    fuel --env $ENV provisioning --upload
+}
+
+add_yaml_fragment () {
+    ENV=$1
+    FRAGMENT=${CONFIGDIR}/fragment.yaml.$ENV
+
+    cd ${CONFIGDIR}
+    for dfile in deployment_$ENV/*.yaml
+    do
+        cnt=`grep "^opnfv:" $dfile | wc -l `
+        if [ $cnt -eq 0 ]; then
+            echo "Adding fragment to $dfile"
+            cat $FRAGMENT >> $dfile
+       else
+            echo "Already have fragment in $dfile"
+       fi
+    done
+}
+
+
+ip_valid() {
+    IP_ADDRESS="$1"
+    # Check if the format looks right_
+    echo "$IP_ADDRESS" | egrep -qE '[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}' || return 1
+    #check that each octect is less than or equal to 255:
+    echo $IP_ADDRESS | awk -F'.' '$1 <=255 && $2 <= 255 && $3 <=255 && $4 <= 255 {print "Y" } ' | grep -q Y || return 1
+    return 0
+}
+
+
+generate_ntp_entry() {
+    FILE=$1
+    read -p "NTP server:" NTP_SERVER
+    if [ -z "$NTP_SERVER" ]; then
+        return 1
+    elif confirm_yes "Are you sure you want to add this entry (y/n): "; then
+        echo "Confirmed"
+        echo "      server $NTP_SERVER" >> $FILE
+    fi
+}
+
+generate_hostfile_entry() {
+    FILE=$1
+    read -p "Name:" HOST_NAME
+    if [ -z "$HOST_NAME" ]; then
+        return 1
+    else
+        read -p "FQDN:" HOST_FQDN
+        read -p "IP:  " HOST_IP
+        while ! ip_valid "$HOST_IP"
+        do
+            echo "This is not a valid IP! Try again."
+            read -p "IP:  " HOST_IP
+        done
+    fi
+    if confirm_yes "Are you sure you want to add this entry (y/n): "; then
+        echo "Confirmed"
+        echo "  - name: $HOST_NAME" >> $FILE
+        echo "    address: $HOST_IP" >> $FILE
+        echo "    fqdn: $HOST_FQDN" >> $FILE
+    else
+        echo "Not confirmed"
+    fi
+    return 0
+}
+
+generate_dns_entry() {
+    FILE=$1
+    PROMPT=$2
+    read -p "${PROMPT}:" DNS_IP
+    if [ -z "$DNS_IP" ]; then
+        return 1
+    else
+        while ! ip_valid "$DNS_IP"
+        do
+            echo "This is not a valid IP! Try again."
+            read -p "${PROMPT}: " DNS_IP
+        done
+    fi
+    if confirm_yes "Are you sure you want to add this entry (y/n): "; then
+        echo "Confirmed"
+        echo "    - $DNS_IP" >> $FILE
+    else
+        echo "Not confirmed"
+    fi
+    return 0
+}
+
+confirm_yes() {
+    prompt=$1
+    while true
+    do
+        read -p "$prompt" YESNO
+        case $YESNO in
+            [Yy])
+                return 0
+                ;;
+            [Nn])
+                return 1
+                ;;
+        esac
+    done
+}
+
+generate_yaml_fragment() {
+    ENV=$1
+    FRAGMENT=${CONFIGDIR}/fragment.yaml.$ENV
+
+    if [ -f $FRAGMENT ]; then
+        echo "Manual configuration already performed, reusing previous data from $FRAGMENT."
+        echo "Press return to continue or ^C to stop."
+        read ans
+        return
+    fi
+
+    echo "opnfv:" > ${FRAGMENT}
+
+    clear
+    echo -e "\n\nPre-deployment configuration\n\n"
+
+    echo -e "\n\nIPs for the DNS servers to go into /etc/resolv.conf. You will be"
+    echo -e "prompted for one IP at the time. Press return on an empty line"
+    echo -e "to complete your input. If no DNS server is specified, the IP of"
+    echo -e "the Fuel master will be used instead.\n"
+
+    DNSCICYAML=${CONFIGDIR}/cicdns.yaml.$ENV
+    rm -f $DNSCICYAML
+
+    echo -e "\n\n"
+
+    while generate_dns_entry $DNSCICYAML "IP for CIC name servers"
+    do
+        :
+    done
+
+    if [ -f $DNSCICYAML ]; then
+        echo "  dns:" >> $FRAGMENT
+        echo "    controller:" >> $FRAGMENT
+        cat $DNSCICYAML >> $FRAGMENT
+    fi
+
+
+    DNSCMPYAML=${CONFIGDIR}/cmpdns.yaml.$ENV
+    rm -f $DNSCMPYAML
+
+    echo -e "\n\n"
+
+    while generate_dns_entry $DNSCMPYAML "IP for compute node name servers"
+    do
+        :
+    done
+
+
+    if [ -f $DNSCMPYAML ]; then
+        if [ ! -f $DNSCICYAML ]; then
+            echo "  dns:" >> $FRAGMENT
+        fi
+        echo "    compute:" >> $FRAGMENT
+        cat $DNSCMPYAML >> $FRAGMENT
+    fi
+
+    echo -e "\n\nHosts file additions for controllers and compute nodes. You will be"
+    echo -e "prompted for name, FQDN and IP for each entry. Press return when prompted"
+    echo -e "for a name when you have completed your input.\n"
+
+
+    HOSTYAML=${CONFIGDIR}/hosts.yaml.$ENV
+    rm -f $HOSTYAML
+    while generate_hostfile_entry $HOSTYAML
+    do
+        :
+    done
+
+    if [ -f $HOSTYAML ]; then
+        echo "  hosts:" >> $FRAGMENT
+        cat $HOSTYAML >> $FRAGMENT
+    fi
+
+    echo -e "\n\nNTP upstream configuration for controllers.You will be"
+    echo -e "prompted for a NTP server each entry. Press return when prompted"
+    echo -e "for a NTP serverwhen you have completed your input.\n"
+
+
+    NTPYAML=${CONFIGDIR}/ntp.yaml.$ENV
+    rm -f $NTPYAML
+    while generate_ntp_entry $NTPYAML
+    do
+        :
+    done
+
+    if [ -f $NTPYAML ]; then
+        echo "  ntp:" >> $FRAGMENT
+        echo "    controller: |" >> $FRAGMENT
+        cat $NTPYAML >> $FRAGMENT
+
+        echo "    compute: |" >> $FRAGMENT
+        for ctl in `find $CONFIGDIR/deployment_$ENV -name '*controller*.yaml'`
+        do
+           fqdn=`grep "^fqdn:" $ctl | sed 's/fqdn: *//'`
+           echo "      server $fqdn" >> $FRAGMENT
+        done
+    fi
+
+    # If nothing added make sure we get an empty opnfv hash
+    # instead of a NULL hash.
+    if [ $(wc -l $FRAGMENT | awk '{print $1}') -le 1 ]; then
+        echo "opnfv: {}" >$FRAGMENT
+    fi
+}
+
+ENV=$(get_env "$@")
+
+CONFIGDIR="/var/lib/opnfv"
+mkdir -p $CONFIGDIR
+
+get_deployment_info $ENV
+# Uncomment the below to enable the control_bond example
+#transform_yaml $ENV
+get_provisioning_info $ENV
+generate_yaml_fragment $ENV
+# The feature to change hostnames from node-<n> to cmp- or cic- is disabled.
+# To turn it on, uncomment the following line.
+#setup_hostnames $ENV
+add_yaml_fragment $ENV
+commit_changes $ENV
diff --git a/fuel/build/f_isoroot/f_predeployment/sysinfo.sh b/fuel/build/f_isoroot/f_predeployment/sysinfo.sh
new file mode 100755 (executable)
index 0000000..e99cac0
--- /dev/null
@@ -0,0 +1,12 @@
+#!/bin/sh
+##############################################################################
+# Copyright (c) 2015 Ericsson AB and others.
+# stefan.k.berg@ericsson.com
+# jonas.bjurel@ericsson.com
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+dockerctl shell cobbler cobbler system list | grep -v default | xargs -n 1 host  | sort | sed 's/\..* /\t/'
diff --git a/fuel/build/f_isoroot/f_predeployment/transform_yaml.py b/fuel/build/f_isoroot/f_predeployment/transform_yaml.py
new file mode 100755 (executable)
index 0000000..14eec4c
--- /dev/null
@@ -0,0 +1,68 @@
+#!/usr/bin/python
+##############################################################################
+# Copyright (c) 2015 Ericsson AB and others.
+# stefan.k.berg@ericsson.com
+# jonas.bjurel@ericsson.com
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+# Remove control and management network transformations from file.
+# Only to be used together with f_control_bond_example (enable in
+# pre-deploy.sh)
+
+import yaml
+import re
+import sys
+import os
+
+if len(sys.argv) != 2:
+    sys.stderr.write("Usage: "+sys.argv[0]+" <filename>\n")
+    sys.exit(1)
+
+filename = sys.argv[1]
+if not os.path.exists(filename):
+    sys.stderr.write("ERROR: The file "+filename+" could not be opened\n")
+    sys.exit(1)
+
+ignore_values = [ "eth0", "eth1", "br-mgmt", "br-fw-admin" ]
+
+infile = open(filename, 'r')
+doc = yaml.load(infile)
+infile.close()
+
+out={}
+
+for scheme in doc:
+    if scheme == "network_scheme":
+        mytransformation = {}
+        for operation in doc[scheme]:
+            if operation == "transformations":
+                # We need the base bridges for l23network to be happy,
+                # remove everything else.
+                mytrans = [ { "action": "add-br", "name": "br-mgmt" },
+                            { "action": "add-br", "name": "br-fw-admin" } ]
+                for trans in doc[scheme][operation]:
+                    delete = 0
+                    for ignore in ignore_values:
+                        matchObj = re.search(ignore,str(trans))
+                        if matchObj:
+                            delete = 1
+                    if delete == 0:
+                        mytrans.append(trans)
+                    else:
+                        pass
+                        #print "Deleted", trans
+
+                mytransformation[operation] = mytrans
+            else:
+                mytransformation[operation] = doc[scheme][operation]
+        out[scheme] = mytransformation
+    else:
+        out[scheme] = doc[scheme]
+
+outfile = open(filename, 'w')
+outfile.write(yaml.dump(out, default_flow_style=False))
+outfile.close()
diff --git a/fuel/build/f_isoroot/f_repobuild/Makefile b/fuel/build/f_isoroot/f_repobuild/Makefile
deleted file mode 100644 (file)
index 6bfbd35..0000000
+++ /dev/null
@@ -1,56 +0,0 @@
-##############################################################################
-# Copyright (c) 2015 Ericsson AB and others.
-# stefan.k.berg@ericsson.com
-# jonas.bjurel@ericsson.com
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
-
-SHELL := /bin/bash
-TOP := $(shell pwd)
-DOCKNAME = fuelrepo
-DOCKVERSION = 1.0
-
-# try to choose close ubuntu mirror which support rsync protocol
-# https://bugs.launchpad.net/fuel/+bug/1459252
-MIRROR_URLS := $(shell curl -s http://mirrors.ubuntu.com/mirrors.txt)
-MIRROR_HOSTS := $(shell for url in ${MIRROR_URLS}; do echo $$url | cut -d'/' -f3; done)
-RSYNC_HOST := $(shell for host in ${MIRROR_HOSTS}; do rsync -4 --contimeout 5 --no-motd --list-only "$${host}::ubuntu/." &> /dev/null && echo $$host && break; done)
-
-.PHONY: all
-all: .nailgun
-
-.nailgun:
-       sudo apt-get update
-       sudo apt-get upgrade -y
-       sudo apt-get install -y rsync python python-yaml dpkg-dev openssl
-       rm -rf tmpiso tmpdir
-       mkdir tmpiso
-       fuseiso ${ISOCACHE} tmpiso
-       cp tmpiso/ubuntu/pool/main/f/fuel-createmirror/fuel-createmirror_6.1*.deb .
-       fusermount -u tmpiso
-       rm -rf tmpiso
-       sudo dpkg -i fuel-createmirror_6.1*.deb
-       sudo sed -i 's/DOCKER_MODE=true/DOCKER_MODE=false/' /etc/fuel-createmirror/common.cfg
-       sudo sed -i 's/DEBUG="no"/DEBUG="yes"/' /etc/fuel-createmirror/ubuntu.cfg
-       sudo sed -i 's/MIRROR_UBUNTU_HOST="archive.ubuntu.com"/MIRROR_UBUNTU_HOST="${RSYNC_HOST}"/' /etc/fuel-createmirror/common.cfg
-       rm -Rf nailgun
-       sudo mkdir -p /var/www
-       sudo su - -c /opt/fuel-createmirror-6.1/fuel-createmirror
-       sudo chmod -R 755 /var/www/nailgun
-       cp -Rp /var/www/nailgun .
-       touch .nailgun
-
-.PHONY: clean
-clean:
-       # Deliberately not cleaning nailgun directory to speed up multiple builds
-       @rm -rf ../release/opnfv/nailgun fuel-createmirror_6.1*.deb
-
-.PHONY: release
-release:.nailgun
-       @rm -Rf ../release/opnfv/nailgun
-       @mkdir -p ../release/opnfv
-       @cp -Rp nailgun ../release/opnfv/nailgun
-
diff --git a/fuel/build/f_l23network/Makefile b/fuel/build/f_l23network/Makefile
new file mode 100644 (file)
index 0000000..0949737
--- /dev/null
@@ -0,0 +1,28 @@
+##############################################################################
+# Copyright (c) 2015 Ericsson AB and others.
+# stefan.k.berg@ericsson.com
+# jonas.bjurel@ericsson.com
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+TOP := $(shell pwd)
+
+.PHONY: all
+all:
+
+.PHONY: clean
+clean:
+       @rm -rf tmp
+       @rm -rf release
+
+.PHONY: validate-cache
+validate-cache:
+       @echo "No cache validation schema available for $(shell pwd)"
+       @echo "Continuing ..."
+
+.PHONY: release
+release:
+       @cp -Rvp puppet/modules/* $(PUPPET_DEST)
diff --git a/fuel/build/f_l23network/README b/fuel/build/f_l23network/README
new file mode 100644 (file)
index 0000000..9aa4718
--- /dev/null
@@ -0,0 +1,35 @@
+##############################################################################
+# Copyright (c) 2015 Ericsson AB and others.
+# stefan.k.berg@ericsson.com
+# jonas.bjurel@ericsson.com
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+Addition to add entries to /etc/hosts through Astute.
+
+The astute.yaml file should contain entries as those below to have them picked up during deployment:
+
+opnfv:
+  hosts:
+  - name: test1
+    address: 192.168.100.100
+    fqdn: test1.opnfv.org
+  - name: test2
+    address: 192.168.100.101
+    fqdn: test2.opnfv.org
+  - name: test3
+    address: 192.168.100.102
+    fqdn: test3.opnfv.org
+
+The suggested method for adding this information is to prepare for deployment with the Fuel GUI or CLI,
+but before actually deploying:
+
+1. Download the current deployment for all hosts: fuel --env 1 deployment --default
+2. Iterate through the hosts in "deployment_1" and add hosts configuration in the above format to their
+   respective yaml file.
+3. Upload the modifed deployment information: fuel --env 1 deployment --upload
+
+After deploying, the additions will be included in /etc/astute.yaml of each host.
diff --git a/fuel/build/f_l23network/puppet/modules/l23network/lib/puppet/parser/functions/extras_to_hosts.rb b/fuel/build/f_l23network/puppet/modules/l23network/lib/puppet/parser/functions/extras_to_hosts.rb
new file mode 100644 (file)
index 0000000..33bfad8
--- /dev/null
@@ -0,0 +1,21 @@
+#
+# array_or_string_to_array.rb
+#
+
+module Puppet::Parser::Functions
+  newfunction(:extras_to_hosts, :type => :rvalue, :doc => <<-EOS
+              convert extras array passed from Astute into
+              hash for puppet `host` create_resources call
+    EOS
+  ) do |args|
+    hosts=Hash.new
+    extras=args[0]
+    extras.each do |extras|
+      hosts[extras['name']]={:ip=>extras['address'],:host_aliases=>[extras['fqdn']]}
+      notice("Generating extras host entry #{extras['name']} #{extras['address']} #{extras['fqdn']}")
+    end
+    return hosts
+  end
+end
+
+# vim: set ts=2 sw=2 et :
diff --git a/fuel/build/f_l23network/puppet/modules/l23network/manifests/hosts_file.pp b/fuel/build/f_l23network/puppet/modules/l23network/manifests/hosts_file.pp
new file mode 100644 (file)
index 0000000..05cff8d
--- /dev/null
@@ -0,0 +1,18 @@
+class l23network::hosts_file (
+  $nodes,
+  $extras=[],
+  $hosts_file = "/etc/hosts"
+) {
+
+  # OPNFV addition: Add additional lines in /etc/hosts through Astute additions
+
+  $host_resources = nodes_to_hosts($nodes)
+  $extras_host_resources = extras_to_hosts($extras)
+  Host {
+    ensure => present,
+    target => $hosts_file
+  }
+
+  create_resources(host, $host_resources)
+  create_resources(host, $extras_host_resources)
+}
diff --git a/fuel/build/f_l23network/puppet/modules/l23network/manifests/hosts_file.pp.orig b/fuel/build/f_l23network/puppet/modules/l23network/manifests/hosts_file.pp.orig
new file mode 100644 (file)
index 0000000..2295e3f
--- /dev/null
@@ -0,0 +1,16 @@
+class l23network::hosts_file (
+  $nodes,
+  $hosts_file = "/etc/hosts"
+) {
+
+  #Move original hosts file
+
+  $host_resources = nodes_to_hosts($nodes)
+
+  Host {
+    ensure => present,
+    target => $hosts_file
+  }
+
+  create_resources(host, $host_resources)
+}
diff --git a/fuel/build/f_l23network/testing/README b/fuel/build/f_l23network/testing/README
new file mode 100644 (file)
index 0000000..b68eddf
--- /dev/null
@@ -0,0 +1,12 @@
+##############################################################################
+# Copyright (c) 2015 Ericsson AB and others.
+# stefan.k.berg@ericsson.com
+# jonas.bjurel@ericsson.com
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+In order to test the functionality without performing a full deployment, run "puppet apply" on the fake_init.pp
+which will call only the l23network::hosts_file class.
diff --git a/fuel/build/f_l23network/testing/fake_init.pp b/fuel/build/f_l23network/testing/fake_init.pp
new file mode 100644 (file)
index 0000000..bc6b163
--- /dev/null
@@ -0,0 +1,13 @@
+$fuel_settings = parseyaml($astute_settings_yaml)
+
+if $::fuel_settings['nodes'] {
+  $nodes_hash = $::fuel_settings['nodes']
+  $extras_hash = $::fuel_settings['opnfv']['hosts']
+
+  class {'l23network::hosts_file':
+    nodes  => $nodes_hash,
+    extras => $extras_hash
+  }
+
+  include l23network::hosts_file
+}
diff --git a/fuel/build/f_ntp/Makefile b/fuel/build/f_ntp/Makefile
new file mode 100644 (file)
index 0000000..0949737
--- /dev/null
@@ -0,0 +1,28 @@
+##############################################################################
+# Copyright (c) 2015 Ericsson AB and others.
+# stefan.k.berg@ericsson.com
+# jonas.bjurel@ericsson.com
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+TOP := $(shell pwd)
+
+.PHONY: all
+all:
+
+.PHONY: clean
+clean:
+       @rm -rf tmp
+       @rm -rf release
+
+.PHONY: validate-cache
+validate-cache:
+       @echo "No cache validation schema available for $(shell pwd)"
+       @echo "Continuing ..."
+
+.PHONY: release
+release:
+       @cp -Rvp puppet/modules/* $(PUPPET_DEST)
diff --git a/fuel/build/f_ntp/README b/fuel/build/f_ntp/README
new file mode 100644 (file)
index 0000000..2bade72
--- /dev/null
@@ -0,0 +1,33 @@
+##############################################################################
+# Copyright (c) 2015 Ericsson AB and others.
+# stefan.k.berg@ericsson.com
+# jonas.bjurel@ericsson.com
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+Addition to add ntp.conf separately for compute hosts and controller hosts through Astute.
+
+The astute.yaml file should contain entries as those below to have them picked up during deployment:
+
+opnfv:
+  ntp:
+    controller: |
+     line 1
+     line 2
+   compute: |
+     line 1
+     line 2
+
+The suggested method for adding this information is to prepare for deployment with the Fuel GUI or CLI,
+but before actually deploying:
+
+1. Download the current deployment for all hosts: fuel --env 1 deployment --default
+2. Iterate through the hosts in "deployment_1" and add hosts configuration in the above format to their
+   respective yaml file.
+3. Upload the modifed deployment information: fuel --env 1 deployment --upload
+
+After deploying, the additions will be included in /etc/astute.yaml of each host.
+
diff --git a/fuel/build/f_ntp/puppet/modules/opnfv/manifests/ntp.pp b/fuel/build/f_ntp/puppet/modules/opnfv/manifests/ntp.pp
new file mode 100644 (file)
index 0000000..c5dce1b
--- /dev/null
@@ -0,0 +1,80 @@
+##############################################################################
+# Copyright (c) 2015 Ericsson AB and others.
+# stefan.k.berg@ericsson.com
+# jonas.bjurel@ericsson.com
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+# Class: Ntp
+#
+# Add Ntp content passed through astute.yaml into ntp.conf depending on the role
+#
+# Suitable yaml content:
+# <begin>
+# opnfv:
+#   ntp:
+#     controller: |
+#      line 1
+#      line 2
+#    compute: |
+#      line 1
+#      line 2
+# <end>
+#
+#
+#
+
+class opnfv::ntp(
+  $file='/etc/ntp.conf'
+) {
+
+  case $::operatingsystem {
+        centos, redhat: {
+          $service_name = 'ntpd'
+        }
+        debian, ubuntu: {
+          $service_name = 'ntp'
+        }
+  }
+
+  if $::fuel_settings['role'] {
+    if ($::fuel_settings['opnfv'] and
+    $::fuel_settings['opnfv']['ntp']) {
+      case $::fuel_settings['role'] {
+        /controller/: {
+          if $::fuel_settings['opnfv']['ntp']['controller'] {
+            $template = 'opnfv/ntp.conf.controller.erb'
+            $file_content = $::fuel_settings['opnfv']['ntp']['controller']
+          }
+        }
+        /compute/:    {
+          if $::fuel_settings['opnfv']['ntp']['compute'] {
+            $template = 'opnfv/ntp.conf.compute.erb'
+            $file_content = $::fuel_settings['opnfv']['ntp']['compute']
+          }
+        }
+      }
+    }
+  }
+
+  if $file_content {
+    package { 'ntp':
+      ensure => installed,
+    }
+
+    file { $file:
+      content => template($template),
+      notify  => Service['ntp'],
+    }
+
+    service { 'ntp':
+      ensure  => running,
+      name    => $service_name,
+      enable  => true,
+      require => [ Package['ntp'], File[$file]]
+    }
+  }
+}
diff --git a/fuel/build/f_ntp/puppet/modules/opnfv/templates/ntp.conf.compute.erb b/fuel/build/f_ntp/puppet/modules/opnfv/templates/ntp.conf.compute.erb
new file mode 100644 (file)
index 0000000..37ecfd7
--- /dev/null
@@ -0,0 +1,21 @@
+##############################################################################
+# Copyright (c) 2015 Ericsson AB and others.
+# stefan.k.berg@ericsson.com
+# jonas.bjurel@ericsson.com
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+tinker panic 0
+driftfile /var/lib/ntp/ntp.drift
+statistics loopstats peerstats clockstats
+filegen loopstats file loopstats type day enable
+filegen peerstats file peerstats type day enable
+filegen clockstats file clockstats type day enable
+restrict -4 default kod notrap nomodify nopeer noquery
+restrict -6 default kod notrap nomodify nopeer noquery
+restrict 127.0.0.1
+restrict ::1
+<%= @file_content %>
diff --git a/fuel/build/f_ntp/puppet/modules/opnfv/templates/ntp.conf.controller.erb b/fuel/build/f_ntp/puppet/modules/opnfv/templates/ntp.conf.controller.erb
new file mode 100644 (file)
index 0000000..37ecfd7
--- /dev/null
@@ -0,0 +1,21 @@
+##############################################################################
+# Copyright (c) 2015 Ericsson AB and others.
+# stefan.k.berg@ericsson.com
+# jonas.bjurel@ericsson.com
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+tinker panic 0
+driftfile /var/lib/ntp/ntp.drift
+statistics loopstats peerstats clockstats
+filegen loopstats file loopstats type day enable
+filegen peerstats file peerstats type day enable
+filegen clockstats file clockstats type day enable
+restrict -4 default kod notrap nomodify nopeer noquery
+restrict -6 default kod notrap nomodify nopeer noquery
+restrict 127.0.0.1
+restrict ::1
+<%= @file_content %>
diff --git a/fuel/build/f_ntp/testing/README b/fuel/build/f_ntp/testing/README
new file mode 100644 (file)
index 0000000..6d80b0a
--- /dev/null
@@ -0,0 +1,12 @@
+##############################################################################
+# Copyright (c) 2015 Ericsson AB and others.
+# stefan.k.berg@ericsson.com
+# jonas.bjurel@ericsson.com
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+In order to test the functionality without performing a full deployment, run "puppet apply" on the
+fake_init.pp which will call only the opnfv::ntp class.
similarity index 72%
rename from fuel/deploy/environments/__init__.py
rename to fuel/build/f_ntp/testing/fake_init.pp
index fb73157..b9af218 100644 (file)
@@ -1,8 +1,13 @@
-###############################################################################
+##############################################################################
 # Copyright (c) 2015 Ericsson AB and others.
-# szilard.cserey@ericsson.com
+# stefan.k.berg@ericsson.com
+# jonas.bjurel@ericsson.com
 # All rights reserved. This program and the accompanying materials
 # are made available under the terms of the Apache License, Version 2.0
 # which accompanies this distribution, and is available at
 # http://www.apache.org/licenses/LICENSE-2.0
-###############################################################################
+##############################################################################
+
+$fuel_settings = parseyaml($astute_settings_yaml)
+
+include opnfv::ntp
diff --git a/fuel/build/f_odl_docker/Makefile b/fuel/build/f_odl_docker/Makefile
new file mode 100755 (executable)
index 0000000..6135e71
--- /dev/null
@@ -0,0 +1,51 @@
+##############################################################################
+# Copyright (c) 2015 Ericsson AB and others.
+# stefan.k.berg@ericsson.com
+# jonas.bjurel@ericsson.com
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+TOP := $(shell pwd)
+BUILDTAG := loving_daniel
+
+# Edit this to match the GENESIS / OPNFV in your environment
+export OPNFV_PUPPET := $(BUILD_BASE)/../../common/puppet-opnfv
+include ../config.mk
+
+.PHONY: all
+all:
+       @mkdir -p puppet/modules/opnfv/odl_docker
+       @rm -rf tmp
+       @mkdir -p tmp
+       @cp -Rvp ${OPNFV_PUPPET}/manifests/templates/dockerfile tmp/.
+       @docker build -t ${BUILDTAG} tmp/dockerfile/.
+       @docker save ${BUILDTAG} > puppet/modules/opnfv/odl_docker/odl_docker_image.tar
+       @wget ${DOCKER_REPO}/${DOCKER_TAG} -O  puppet/modules/opnfv/odl_docker/docker-latest
+       @echo "OPFNV_PUPPET is: ${OPNFV_PUPPET}"
+       @cp -Rvp ${OPNFV_PUPPET}/manifests/templates/dockerfile/container_scripts  puppet/modules/opnfv
+
+.PHONY: clean
+clean:
+       @rm -rf tmp
+       @rm -rf release
+
+.PHONY: build-clean
+build-clean:
+       @rm -rf tmp
+       @rm -rf release
+       @rm -rf puppet/modules/opnfv/odl_docker/odl_docker_image.tar
+       @rm -rf puppet/modules/opnfv/odl_docker/docker-latest
+
+.PHONY: validate-cache
+validate-cache:
+       @echo "No cache validation schema available for $(shell pwd)"
+       @echo "Continuing ..."
+
+.PHONY: release
+release:
+       # Fetch PP from OPNFV Common
+       @cp -Rvp ${OPNFV_PUPPET}/manifests/odl_docker.pp ${PUPPET_DEST}
+       @cp -Rvp puppet/modules/* $(PUPPET_DEST)
diff --git a/fuel/build/f_odl_docker/dockerfile/Dockerfile b/fuel/build/f_odl_docker/dockerfile/Dockerfile
new file mode 100755 (executable)
index 0000000..e3c7ee5
--- /dev/null
@@ -0,0 +1,72 @@
+####################################################################
+#
+#   Dockerfile to build a ODL (Karaf) Docker Container
+#
+#   Copyright daniel.smith@ericsson.com
+#   License: Apache GPL
+#
+####################################################################
+
+
+#Set the base image - note: the current release of Karaf is using Jdk7 and alot of 12.04, so we will use it rather than 14.04 and backport a ton of stuff
+FROM ubuntu:12.04
+
+# Maintainer Info
+MAINTAINER Daniel Smith
+
+#Run apt-get update one start just to check for updates when building
+RUN echo "Updating APT"
+RUN apt-get update
+RUN echo "Adding wget"
+RUN apt-get install -y wget
+RUN apt-get install -y net-tools
+RUN apt-get install -y openjdk-7-jre
+RUN apt-get install -y openjdk-7-jdk
+RUN apt-get install -y openssh-server
+RUN apt-get install -y vim
+RUN apt-get install -y expect
+RUN apt-get install -y daemontools
+RUN mkdir -p /opt/odl_source
+RUN bash -c 'echo "export JAVA_HOME=/usr/lib/jvm/java-7-openjdk-amd64" >> ~/.bashrc'
+
+
+#Now lets got and fetch the ODL distribution
+RUN echo "Fetching ODL"
+RUN wget https://nexus.opendaylight.org/content/groups/public/org/opendaylight/integration/distribution-karaf/0.2.3-Helium-SR3/distribution-karaf-0.2.3-Helium-SR3.tar.gz -O /opt/odl_source/distribution-karaf-0.2.3-Helium-SR3.tar.gz
+
+RUN echo "Untarring ODL inplace"
+RUN mkdir -p /opt/odl
+RUN tar zxvf /opt/odl_source/distribution-karaf-0.2.3-Helium-SR3.tar.gz -C /opt/odl
+
+RUN echo "Installing DLUX and other features into ODL"
+COPY tmp/dockerfile/container_scripts/start_odl_docker.sh /etc/init.d/start_odl_docker.sh
+COPY tmp/dockerfile/container_scripts/speak.sh /etc/init.d/speak.sh
+RUN chmod 777 /etc/init.d/start_odl_docker.sh
+RUN chmod 777 /etc/init.d/speak.sh
+
+
+# Expose the ports
+# PORTS FOR BASE SYSTEM AND DLUX
+EXPOSE 8101
+EXPOSE 6633
+EXPOSE 1099
+EXPOSE 43506
+EXPOSE 8181
+EXPOSE 8185
+EXPOSE 9000
+EXPOSE 39378
+EXPOSE 33714
+EXPOSE 44444
+EXPOSE 6653
+
+# PORTS FOR OVSDB AND ODL CONTROL
+EXPOSE 12001
+EXPOSE 6640
+EXPOSE 8080
+EXPOSE 7800
+EXPOSE 55130
+EXPOSE 52150
+EXPOSE 36826
+
+# set the ENTRYPOINT - An entry point allows us to run this container as an exectuable
+CMD ["/etc/init.d/start_odl_docker.sh"]
diff --git a/fuel/build/f_odl_docker/dockerfile/container_scripts/check_feature.sh b/fuel/build/f_odl_docker/dockerfile/container_scripts/check_feature.sh
new file mode 100755 (executable)
index 0000000..3e5d0b2
--- /dev/null
@@ -0,0 +1,8 @@
+#!/usr/bin/expect
+spawn /opt/odl/distribution-karaf-0.2.3-Helium-SR3/bin/client
+expect "root>"
+send "feature:list | grep -i odl-restconf\r"
+send "\r\r\r"
+expect "root>"
+send "logout\r"
+
diff --git a/fuel/build/f_odl_docker/dockerfile/container_scripts/speak.sh b/fuel/build/f_odl_docker/dockerfile/container_scripts/speak.sh
new file mode 100755 (executable)
index 0000000..3ba07a8
--- /dev/null
@@ -0,0 +1,17 @@
+#!/usr/bin/expect
+# Ericsson Research Canada
+#
+# Author: Daniel Smith <daniel.smith@ericsson.com>
+#
+# Simple expect script to start up ODL client and load feature set for DLUX and OVSDB
+#
+#  NOTE: THIS WILL BE REPLACED WITH A PROGRAMATIC METHOD SHORTLY
+#  DEPRECATED AFTER ARNO
+
+spawn /opt/odl/distribution-karaf-0.2.3-Helium-SR3/bin/client
+expect "root>"
+send "feature:install odl-base-all odl-aaa-authn odl-restconf odl-nsf-all odl-adsal-northbound odl-mdsal-apidocs  odl-ovsdb-openstack odl-ovsdb-northbound odl-dlux-core"
+send "\r\r\r"
+expect "root>"
+send "logout\r"
+
diff --git a/fuel/build/f_odl_docker/dockerfile/container_scripts/start_odl_docker.sh b/fuel/build/f_odl_docker/dockerfile/container_scripts/start_odl_docker.sh
new file mode 100755 (executable)
index 0000000..1c72dda
--- /dev/null
@@ -0,0 +1,38 @@
+#!/bin/bash
+#  Ericsson Research Canada
+#
+#  Author: Daniel Smith <daniel.smith@ericsson.com>
+#
+#  Start up script for calling karaf / ODL inside a docker container.
+#
+#  This script will also call a couple expect scripts to load the feature set that we want
+
+
+#ENV
+export JAVA_HOME=/usr/lib/jvm/java-7-openjdk-amd64
+
+#MAIN
+echo "Starting up the da Sheilds..."
+/opt/odl/distribution-karaf-0.2.3-Helium-SR3/bin/karaf server &
+echo "Sleeping 5 bad hack"
+sleep 10
+echo "should see stuff listening now"
+netstat -na
+echo " should see proess running for karaf"
+ps -efa
+echo " Starting the packages we want"
+/etc/init.d/speak.sh
+echo "Printout the status - if its right, you should see 8181 appear now"
+netstat -na
+ps -efa
+
+
+
+## This is a loop that keeps our container going currently, prinout the "status of karaf" to the docker logs every minute
+## Cheap - but effective
+while true;
+do
+       echo "Checking status of ODL:"
+       /opt/odl/distribution-karaf-0.2.3-Helium-SR3/bin/status
+       sleep 60
+done
diff --git a/fuel/build/f_odl_docker/puppet/modules/opnfv/manifests/odl_docker.pp b/fuel/build/f_odl_docker/puppet/modules/opnfv/manifests/odl_docker.pp
new file mode 100644 (file)
index 0000000..c286127
--- /dev/null
@@ -0,0 +1,77 @@
+class opnfv::odl_docker
+{
+  case $::fuel_settings['role'] {
+    /controller/: {
+
+      file { '/opt':
+        ensure => 'directory',
+      }
+
+      file { '/opt/opnfv':
+        ensure => 'directory',
+        owner  => 'root',
+        group  => 'root',
+        mode   => 777,
+      }
+
+      file { '/opt/opnfv/odl':
+        ensure => 'directory',
+      }
+
+      file { '/opt/opnfv/odl/odl_docker_image.tar':
+        ensure => present,
+        source => '/etc/puppet/modules/opnfv/odl_docker/odl_docker_image.tar',
+        mode   => 750,
+      }
+
+      file { '/opt/opnfv/odl/docker-latest':
+        ensure => present,
+        source => '/etc/puppet/modules/opnfv/odl_docker/docker-latest',
+        mode   => 750,
+      }
+
+      file { '/opt/opnfv/odl/start_odl_conatiner.sh':
+        ensure => present,
+        source => '/etc/puppet/modules/opnfv/scripts/start_odl_container.sh',
+        mode   => 750,
+      }
+      file { '/opt/opnfv/odl/stage_odl.sh':
+        ensure => present,
+        source => '/etc/puppet/modules/opnfv/scripts/stage_odl.sh',
+        mode   => 750,
+      }
+      file { '/opt/opnfv/odl/config_net_odl.sh':
+        ensure => present,
+        source => '/etc/puppet/modules/opnfv/scripts/config_net_odl.sh',
+        mode   => 750,
+      }
+      file { '/opt/opnfv/odl/change.sh':
+        ensure => present,
+        source => '/etc/puppet/modules/opnfv/scripts/change.sh',
+        mode   => 750,
+      }
+
+
+      # fix failed to find the cgroup root issue
+      # https://github.com/docker/docker/issues/8791
+      case $::operatingsystem {
+        'ubuntu': {
+          package {'cgroup-lite':
+            ensure => present,
+          }
+
+          service {'cgroup-lite':
+            ensure  => running,
+            enable  => true,
+            require => Package['cgroup-lite'],
+          }
+        }
+        'centos': {
+          package {'docker-io':
+            ensure => latest,
+          }
+        }
+      }
+    }
+  }
+}
diff --git a/fuel/build/f_odl_docker/puppet/modules/opnfv/scripts/change.sh b/fuel/build/f_odl_docker/puppet/modules/opnfv/scripts/change.sh
new file mode 100644 (file)
index 0000000..f7f3d6e
--- /dev/null
@@ -0,0 +1,219 @@
+#!/bin/bash
+# script to remove bridges and reset networking for ODL
+
+
+#VARS
+MODE=0
+DNS=8.8.8.8
+
+#ENV
+source ~/openrc
+
+# GET IPS for that node
+function get_ips {
+       BR_MGMT=`grep address /etc/network/ifcfg_backup/ifcfg-br-mgmt | awk -F" " '{print $2}'`
+       BR_STORAGE=`grep address /etc/network/ifcfg_backup/ifcfg-br-storage | awk -F" " '{print $2}'`
+       BR_FW_ADMIN=`grep address /etc/network/ifcfg_backup/ifcfg-br-fw-admin | awk -F" " '{print $2}'`
+       BR_EX=`grep address /etc/network/ifcfg_backup/ifcfg-br-ex | awk -F" " '{print $2}'`
+       DEF_NETMASK=255.255.255.0
+       DEF_GW=172.30.9.1
+}
+
+function backup_ifcfg {
+        echo " backing up "
+        mkdir -p /etc/network/ifcfg_backup
+        mv /etc/network/interfaces.d/ifcfg-br-ex /etc/network/ifcfg_backup/.
+        mv /etc/network/interfaces.d/ifcfg-br-fw-admin /etc/network/ifcfg_backup/.
+        mv /etc/network/interfaces.d/ifcfg-br-mgmt /etc/network/ifcfg_backup/.
+        mv /etc/network/interfaces.d/ifcfg-br-storage /etc/network/ifcfg_backup/.
+        mv /etc/network/interfaces.d/ifcfg-br-prv /etc/network/ifcfg_backup/.
+        mv /etc/network/interfaces.d/ifcfg-eth0 /etc/network/ifcfg_backup/.
+        mv /etc/network/interfaces.d/ifcfg-eth1 /etc/network/ifcfg_backup/.
+        rm -rf /etc/network/interfaces.d/ifcfg-eth1.300
+        rm -rf /etc/network/interfaces.d/ifcfg-eth1.301
+        rm -rf /etc/network/interfaces.d/ifcfg-eth1
+        rm -rf /etc/network/interfaces.d/ifcfg-eth0
+
+}
+
+
+function create_ifcfg_br_mgmt {
+        echo "migrating br_mgmt"
+        echo "auto eth1.300" >> /etc/network/interfaces.d/ifcfg-eth1.300
+        echo "iface eth1.300 inet static" >> /etc/network/interfaces.d/ifcfg-eth1.300
+        echo "     address $BR_MGMT" >> /etc/network/interfaces.d/ifcfg-eth1.300
+        echo "     netmask $DEF_NETMASK" >> /etc/network/interfaces.d/ifcfg-eth1.300
+}
+
+function create_ifcfg_br_storage {
+        echo "migration br_storage"
+        echo "auto eth1.301" >> /etc/network/interfaces.d/ifcfg-eth1.301
+        echo "iface eth1.301 inet static" >> /etc/network/interfaces.d/ifcfg-eth1.301
+        echo "     address $BR_STORAGE" >> /etc/network/interfaces.d/ifcfg-eth1.301
+        echo "     netmask $DEF_NETMASK" >> /etc/network/interfaces.d/ifcfg-eth1.301
+}
+
+function create_ifcfg_br_fw_admin {
+        echo " migratinng br_fw_admin"
+        echo "auto eth1" >> /etc/network/interfaces.d/ifcfg-eth1
+        echo "iface eth1 inet static" >> /etc/network/interfaces.d/ifcfg-eth1
+        echo "     address $BR_FW_ADMIN" >> /etc/network/interfaces.d/ifcfg-eth1
+        echo "     netmask $DEF_NETMASK" >> /etc/network/interfaces.d/ifcfg-eth1
+}
+
+function create_ifcfg_eth0 {
+        echo "migratinng br-ex to eth0 - temporarily"
+        echo "auto eth0" >> /etc/network/interfaces.d/ifcfg-eth0
+        echo "iface eth0 inet static" >> /etc/network/interfaces.d/ifcfg-eth0
+        echo "     address $BR_EX" >> /etc/network/interfaces.d/ifcfg-eth0
+        echo "     netmask $DEF_NETMASK" >> /etc/network/interfaces.d/ifcfg-eth0
+        echo "     gateway $DEF_GW" >> /etc/network/interfaces.d/ifcfg-eth0
+}
+
+function set_mode {
+       if [ -d "/var/lib/glance/images" ]
+       then 
+               echo " controller "
+               MODE=0
+       else 
+               echo " compute "
+               MODE=1
+       fi
+}
+
+
+function stop_ovs {
+        echo "Stopping OpenVSwitch"
+        service openvswitch-switch stop
+
+}
+
+function start_ovs {
+        echo "Starting OVS"
+        service openvswitch-switch start
+        ovs-vsctl show
+}
+
+
+function clean_ovs {
+        echo "cleaning OVS DB"
+        stop_ovs
+        rm -rf /var/log/openvswitch/*
+        mkdir -p /opt/opnfv/odl/ovs_back
+        cp -pr /etc/openvswitch/* /opt/opnfv/odl/ovs_back/.
+        rm -rf /etc/openvswitch/conf.db
+        echo "restarting OVS - you should see Nothing there"
+        start_ovs
+}
+
+
+
+function reboot_me {
+        reboot
+}
+
+function allow_challenge {
+       sed -i -e 's/ChallengeResponseAuthentication no/ChallengeResponseAuthentication yes/g' /etc/ssh/sshd_config
+       service ssh restart
+}
+
+function clean_neutron {
+       subnets=( `neutron subnet-list | awk -F" " '{print $2}' | grep -v id | sed '/^$/d'` )
+       networks=( `neutron net-list | awk -F" " '{print $2}' | grep -v id | sed '/^$/d'` )
+       ports=( `neutron port-list | awk -F" " '{print $2}' | grep -v id | sed '/^$/d'` )
+       routers=( `neutron router-list | awk -F" " '{print $2}' | grep -v id | sed '/^$/d'` )
+
+       #display all elements
+       echo "SUBNETS: ${subnets[@]} "
+       echo "NETWORKS: ${networks[@]} "
+       echo "PORTS: ${ports[@]} "
+       echo "ROUTERS: ${routers[@]} "
+       
+       
+       # get port and subnet for each router
+       for i in "${routers[@]}"
+       do
+               routerport=( `neutron router-port-list $i | awk -F" " '{print $2}' | grep -v id |  sed '/^$/d' `)
+               routersnet=( `neutron router-port-list $i | awk -F" " '{print $8}' | grep -v fixed |  sed '/^$/d' | sed 's/,$//' | sed -e 's/^"//'  -e 's/"$//' `)
+       done
+
+       echo "ROUTER PORTS: ${routerport[@]} "
+       echo "ROUTER SUBNET: ${routersnet[@]} "
+       
+       #remove router subnets
+       echo "router-interface-delete"
+       for i in "${routersnet[@]}"
+       do
+               neutron router-interface-delete ${routers[0]} $i
+       done
+
+       #remove subnets
+       echo "subnet-delete"
+       for i in "${subnets[@]}"
+       do
+               neutron subnet-delete $i
+       done
+
+       #remove nets
+       echo "net-delete"
+       for i in "${networks[@]}"
+       do
+               neutron net-delete $i
+       done
+
+       #remove routers
+       echo "router-delete"
+       for i in "${routers[@]}"
+       do
+               neutron router-delete $i
+       done
+
+       #remove ports
+       echo "port-delete"
+       for i in "${ports[@]}"
+       do
+               neutron port-delete $i
+       done
+
+       #remove subnets
+       echo "subnet-delete second pass"
+       for i in "${subnets[@]}"
+       do
+               neutron subnet-delete $i
+       done
+
+}
+
+function set_dns {
+       sed -i -e 's/nameserver 10.20.0.2/nameserver $DNS/g' /etc/resolv.conf
+}
+
+
+#OUTPUT
+
+function check {
+       echo $BR_MGMT
+       echo $BR_STORAGE
+       echo $BR_FW_ADMIN
+       echo $BR_EX
+}
+
+### MAIN
+
+
+set_mode
+backup_ifcfg
+get_ips
+create_ifcfg_br_mgmt
+create_ifcfg_br_storage
+create_ifcfg_br_fw_admin
+if [ $MODE == "0" ]
+then
+        create_ifcfg_eth0
+fi
+allow_challenge
+clean_ovs
+check
+reboot_me
+
+
diff --git a/fuel/build/f_odl_docker/puppet/modules/opnfv/scripts/config_net_odl.sh b/fuel/build/f_odl_docker/puppet/modules/opnfv/scripts/config_net_odl.sh
new file mode 100755 (executable)
index 0000000..145da80
--- /dev/null
@@ -0,0 +1,192 @@
+#!/bin/bash
+#
+# Author: Daniel Smith (Ericsson)
+#
+# Script to update neutron configuration for OVSDB/ODL integratino
+#
+#  Usage - Set / pass CONTROL_HOST to your needs
+#
+### SET THIS VALUE TO MATCH YOUR SYSTEM
+CONTROL_HOST=192.168.0.2
+BR_EX_IP=172.30.9.70
+
+# ENV
+source ~/openrc
+# VARS
+ML2_CONF=/etc/neutron/plugins/ml2/ml2_conf.ini
+MODE=0
+
+
+# FUNCTIONS
+# Update ml2_conf.ini
+function update_ml2conf {
+        echo "Backing up and modifying ml2_conf.ini"
+        cp $ML2_CONF $ML2_CONF.bak
+        sed -i -e 's/mechanism_drivers =openvswitch/mechanism_drivers = opendaylight/g' $ML2_CONF
+        sed -i -e 's/tenant_network_types = flat,vlan,gre,vxlan/tenant_network_types = vxlan/g' $ML2_CONF
+        sed -i -e 's/bridge_mappings=physnet2:br-prv/bridge_mappings=physnet1:br-ex/g' $ML2_CONF
+        echo "[ml2_odl]" >> $ML2_CONF
+        echo "password = admin" >> $ML2_CONF
+        echo "username = admin" >> $ML2_CONF
+        echo "url = http://${CONTROL_HOST}:8080/controller/nb/v2/neutron" >> $ML2_CONF
+}
+
+function reset_neutrondb {
+        echo "Reseting DB"
+        mysql -e "drop database if exists neutron_ml2;"
+        mysql -e "create database neutron_ml2 character set utf8;"
+        mysql -e "grant all on neutron_ml2.* to 'neutron'@'%';"
+        neutron-db-manage --config-file /etc/neutron/neutron.conf --config-file /etc/neutron/plugin.ini upgrade head
+}
+
+function restart_neutron {
+        echo "Restarting Neutron Server"
+        service neutron-server restart
+        echo "Should see Neutron runing now"
+        service neutron-server status
+        echo "Shouldnt be any nets, but should work (return empty)"
+        neutron net-list
+}
+
+function stop_neutron {
+        echo "Stopping Neutron / OVS components"
+        service  neutron-plugin-openvswitch-agent stop
+        if [ $MODE == "0" ]
+        then
+                service neutron-server stop
+        fi
+}
+
+function disable_agent {
+       echo "Disabling Neutron Plugin Agents from running"
+       service neutron-plugin-openvswitch-agent stop
+       echo 'manual' > /etc/init/neutron-plugin-openvswitch-agent.override
+}
+
+
+
+function verify_ML2_working {
+        echo "checking that we can talk via ML2 properly"
+        curl -u admin:admin http://${CONTROL_HOST}:8080/controller/nb/v2/neutron/networks > /tmp/check_ml2
+        if grep "network" /tmp/check_ml2
+        then
+                echo "Success - ML2 to ODL is working"
+        else
+                echo "im sorry Jim, but its dead"
+        fi
+
+}
+
+
+function set_mode {
+        if [ -d "/var/lib/glance/images" ]
+        then
+                echo "Controller Mode"
+                MODE=0
+        else
+                echo "Compute Mode"
+                MODE=1
+        fi
+}
+
+function stop_ovs {
+        echo "Stopping OpenVSwitch"
+        service openvswitch-switch stop
+
+}
+
+function start_ovs {
+       echo "Starting OVS"
+       service openvswitch-vswitch start
+       ovs-vsctl show
+}
+
+
+function control_setup {
+        echo "Modifying Controller"
+        stop_neutron
+        stop_ovs
+       disable_agent
+        rm -rf /var/log/openvswitch/*
+        mkdir -p /opt/opnfv/odl/ovs_back
+        mv /etc/openvswitch/conf.db /opt/opnfv/odl/ovs_back/.
+        mv /etc/openvswitch/.conf*lock* /opt/opnfv/odl/ovs_back/.
+       rm -rf /etc/openvswitch/conf.db
+       rm -rf /etc/openvswitch/.conf*
+        service openvswitch-switch start
+        ovs-vsctl add-br br-ex
+        ovs-vsctl add-port br-ex eth0
+        ovs-vsctl set interface br-ex type=external
+        ifconfig br-ex 172.30.9.70/24 up
+        service neutron-server restart
+
+        echo "setting up networks"
+        ip link add link eth1 name br-mgmt type vlan id 300
+       ifconfig br-mgmt `grep address /etc/network/interfaces.d/ifcfg-br-mgmt | awk -F" " '{print $2}'`/24 up arp
+        ip link add link eth1 name br-storage type vlan id 301
+       ip link add link eth1 name br-prv type vlan id 1000
+       ifconfig br-storage `grep address /etc/network/interfaces.d/ifcfg-br-storage | awk -F" " '{print $2}'`/24 up arp
+       ifconfig eth1 `grep address /etc/network/interfaces.d/ifcfg-br-fw-admin | awk -F" " '{print $2}'`/24 up arp
+
+       echo "Setting ODL Manager IP"
+        ovs-vsctl set-manager tcp:192.168.0.2:6640
+
+        echo "Verifying ODL ML2 plugin is working"
+        verify_ML2_working
+
+       # BAD HACK - Should be parameterized - this is to catch up 
+       route add default gw 172.30.9.1
+
+}
+
+function clean_ovs {
+       echo "cleaning OVS DB"
+       stop_ovs
+       rm -rf /var/log/openvswitch/*
+       mkdir -p /opt/opnfv/odl/ovs_back
+       cp -pr /etc/openvswitch/* /opt/opnfv/odl/ovs_back/.
+       rm -rf /etc/openvswitch/conf.db
+       echo "restarting OVS - you should see Nothing there"
+       start_ovs
+}
+
+function compute_setup {
+        echo "Modifying Compute"
+        echo "Disabling neutron openvswitch plugin"
+        stop_neutron
+       disable_agent
+        ip link add link eth1 name br-mgmt type vlan id 300
+        ifconfig br-mgmt `grep address /etc/network/interfaces.d/ifcfg-br-mgmt | awk -F" " '{print $2}'`/24 up arp
+        ip link add link eth1 name br-storage type vlan id 301
+       ip link add link eth1 name br-prv type vlan id 1000
+        ifconfig br-storage `grep address /etc/network/interfaces.d/ifcfg-br-storage | awk -F" " '{print $2}'`/24 up arp
+        ifconfig eth1 `grep address /etc/network/interfaces.d/ifcfg-br-fw-admin | awk -F" " '{print $2}'`/24 up arp
+
+        echo "set manager, and route for ODL controller"
+        ovs-vsctl set-manager tcp:192.168.0.2:6640
+        route add 172.17.0.1 gw 192.168.0.2
+        verify_ML2_working
+}
+
+
+# MAIN
+echo "Starting to make call"
+update_ml2conf
+echo "Check Mode"
+set_mode
+
+if [ $MODE == "0" ];
+then
+        echo "Calling control setup"
+        control_setup
+elif [ $MODE == "1" ];
+then
+        echo "Calling compute setup"
+        compute_setup
+
+else
+        echo "Something is bad - call for help"
+        exit
+fi
+
+
diff --git a/fuel/build/f_odl_docker/puppet/modules/opnfv/scripts/stage_odl.sh b/fuel/build/f_odl_docker/puppet/modules/opnfv/scripts/stage_odl.sh
new file mode 100755 (executable)
index 0000000..fa14b47
--- /dev/null
@@ -0,0 +1,54 @@
+#!/bin/bash
+#   Author: Daniel Smith (Ericsson)
+#   Stages ODL Controlleer
+#   Inputs:  odl_docker_image.tar
+#   Usage:  ./stage_odl.sh
+
+# ENVS
+source ~/.bashrc
+source ~/openrc
+
+LOCALPATH=/opt/opnfv/odl
+DOCKERBIN=docker-latest
+ODLIMGNAME=odl_docker_image.tar
+DNS=8.8.8.8
+HOST_IP=`ifconfig br-ex | grep -i "inet addr" | awk -F":" '{print $2}' | awk -F" " '{print $1}'`
+
+
+
+# DEBUG ECHOS
+echo $LOCALPATH
+echo $DOCKERBIN
+echo $ODLIMGNAME
+echo $DNS
+echo $HOST_IP
+
+
+# Set DNS to someting external and default GW - ODL requires a connection to the internet
+sed -i -e 's/nameserver 10.20.0.2/nameserver 8.8.8.8/g' /etc/resolv.conf
+route delete default gw 10.20.0.2
+route add default gw 172.30.9.1
+
+# Start Docker daemon and in background
+echo "Starting Docker"
+chmod +x $LOCALPATH/$DOCKERBIN
+$LOCALPATH/$DOCKERBIN -d &
+#courtesy sleep for virtual env
+sleep 2
+
+# Import the ODL Container
+echo "Importing ODL Container"
+$LOCALPATH/$DOCKERBIN load -i $LOCALPATH/$ODLIMGNAME
+
+# Start ODL, load DLUX and OVSDB modules
+echo "Removing any old install found - file not found is ok here"
+$LOCALPATH/$DOCKERBIN rm odl_docker
+echo "Starting up ODL controller in Daemon mode - no shell possible"
+$LOCALPATH/$DOCKERBIN  run --name odl_docker -p 8181:8181 -p 8185:8185 -p 9000:9000 -p 1099:1099 -p 8101:8101 -p 6633:6633 -p 43506:43506 -p 44444:44444 -p 6653:6653 -p 12001:12001 -p 6400:6400 -p 6640:6640 -p 8080:8080 -p 7800:7800 -p 55130:55130 -p 52150:52150 -p 36826:26826 -i -d -t loving_daniel
+
+# Following, you should see the docker ps listed and a port opened
+echo " you should reach ODL controller at http://HOST_IP:8181/dlux/index.html"
+$LOCALPATH/$DOCKERBINNAME ps -a
+netstat -lnt
+
+
diff --git a/fuel/build/f_odl_docker/puppet/modules/opnfv/scripts/start_odl_container.sh b/fuel/build/f_odl_docker/puppet/modules/opnfv/scripts/start_odl_container.sh
new file mode 100755 (executable)
index 0000000..347ac74
--- /dev/null
@@ -0,0 +1,95 @@
+#!/bin/bash
+#  Ericsson Canada Inc.
+#  Authoer: Daniel Smith
+#
+#   A helper script to install and setup the ODL docker conatiner on the controller
+#
+#
+#   Inputs:  odl_docker_image.tar
+#
+#   Usage:  ./start_odl_docker.sh
+echo "DEPRECATED - USE stage_odl.sh instead  - this will be removed shortly once automated deployment is working - SR1"
+
+
+# ENVS
+source ~/.bashrc
+source ~/openrc
+
+# VARS
+
+# Switch for Dev mode - uses apt-get on control to cheat and get docker installed locally rather than from puppet source
+
+DEV=1
+
+# Switch for 1:1 port mapping of EXPOSED ports in Docker to the host, if set to 0, then random ports will be used - NOTE: this doesnt work for all web services X port on Host --> Y port in Container,
+# especially for SSL/HTTPS cases. Be aware.
+
+MATCH_PORT=1
+
+LOCALPATH=/opt/opnfv/odl
+DOCKERBINNAME=docker-latest
+DOCKERIMAGENAME=odl_docker_image.tar
+DNS=8.8.8.8
+HOST_IP=`ifconfig br-fw-admin  | grep -i "inet addr" | awk -F":" '{print $2}' | awk -F" " '{print $1}'`
+
+
+# Set this to "1" if you want to have your docker container startup into a shell
+
+
+ENABLE_SHELL=1
+
+
+echo " Fetching Docker "
+if [ "$DEV" -eq "1" ];
+# If testing Locally (on a control node) you can set DEV=1 to enable apt-get based install on the control node (not desired target, but good for testing).
+then
+        echo "Dev Mode - Fetching from Internet";
+        echo " this wont work in production builds";
+        apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 --recv-keys 36A1D7869245C8950F966E92D8576A8BA88D21E9
+        mkdir -p $LOCALPATH
+        wget https://get.docker.com/builds/Linux/x86_64/docker-latest -O $LOCALPATH/$DOCKERBINNAME
+        wget http://ftp.us.debian.org/debian/pool/main/d/docker.io/docker.io_1.3.3~dfsg1-2_amd64.deb
+        chmod 777 $LOCALPATH/$DOCKERBINNAME
+        echo "done ";
+else
+        echo "Using Binaries delivered from Puppet"
+       echo "Starting Docker in Daemon mode"
+       chmod +x $LOCALPATH/$DOCKERBINNAME
+       $LOCALPATH/$DOCKERBINNAME -d &
+
+  # wait until docker will be fully initialized
+  # before any further action against just started docker
+  sleep 5
+fi
+
+
+# We need to perform some cleanup of the Openstack Environment
+echo "TODO -- This should be automated in the Fuel deployment at some point"
+echo "However, the timing should come after basic tests are running, since this "
+echo " part will remove the subnet router association that is deployed automativally"
+echo " via fuel. Refer to the ODL + Openstack Integration Page "
+
+# Import the ODL container into docker
+
+echo "Importing ODL container into docker"
+$LOCALPATH/$DOCKERBINNAME load -i $LOCALPATH/$DOCKERIMAGENAME
+
+echo " starting up ODL - DLUX and Mapping Ports"
+if [ "$MATCH_PORT" -eq "1" ]
+then
+        echo "Starting up Docker..."
+        $LOCALPATH/$DOCKERBINNAME rm odl_docker
+fi
+
+if [ "$ENABLE_SHELL" -eq "1" ];
+then
+        echo "Starting Container in Interactive Mode (/bin/bash will be provided, you will need to run ./start_odl_docker.sh inside the container yourself)"
+        $LOCALPATH/$DOCKERBINNAME run --name odl_docker -p 8181:8181 -p 8185:8185 -p 9000:9000 -p 1099:1099 -p 8101:8101 -p 6633:6633 -p 43506:43506 -p 44444:44444 -p 6653:6653 -p 12001:12001 -p 6400:6400 -p 6640:6640 -p 8080:8080 -p 7800:7800 -p 55130:55130 -p 52150:52150 -p 36826:26826 -i -t loving_daniel  /bin/bash
+else
+        echo "Starting Conatiner in Daemon mode - no shell will be provided and docker attach will not provide shell)"
+        $LOCALPATH/$DOCKERBINNAME run --name odl_docker -p 8181:8181 -p 8185:8185 -p 9000:9000 -p 1099:1099 -p 8101:8101 -p 6633:6633 -p 43506:43506 -p 44444:44444 -p 6653:6653 -p 12001:12001 -p 6400:6400 -p 6640:6640 -p 8080:8080 -p 7800:7800 -p 55130:55130 -p 52150:52150 -p 36826:26826 -i -d -t loving_daniel
+        echo "should see the process listed here in docker ps -a"
+        $LOCALPATH/$DOCKERBINNAME ps -a;
+        echo "Match Port  enabled, you can reach the DLUX login at: "
+        echo "http://$HOST_IP:8181/dlux.index.html"
+fi
diff --git a/fuel/build/f_odl_docker/scripts/config_net_odl.sh b/fuel/build/f_odl_docker/scripts/config_net_odl.sh
new file mode 100644 (file)
index 0000000..d292acd
--- /dev/null
@@ -0,0 +1,164 @@
+#!/bin/bash
+#
+# Author: Daniel Smith (Ericsson)
+#
+# Script to update neutron configuration for OVSDB/ODL integratino
+#
+#  Usage - Set / pass CONTROL_HOST to your needs
+#
+CONTROL_HOST=172.30.9.70
+
+# ENV
+source ~/openrc
+
+# VARS
+ML2_CONF=/etc/neutron/plugins/ml2/ml2_conf.ini
+MODE=0
+
+
+# FUNCTIONS
+
+# Update ml2_conf.ini
+function update_ml2conf {
+        echo "Backing up and modifying ml2_conf.ini"
+        cp $ML2_CONF $ML2_CONF.bak
+        sed -i -e 's/mechanism_drivers =openvswitch/mechanism_drivers = opendaylight/g' $ML2_CONF
+        sed -i -e 's/tenant_network_types = flat,vlan,gre,vxlan/tenant_network_types = vxlan/g' $ML2_CONF
+        cat "[ml2_odl]" >> $ML2_CONF
+        cat "password = admin" >> $ML2_CONF
+        cat "username = admin" >> $ML2_CONF
+        cat "url = http://${CONTROL_HOST}:8080/controller/nb/v2/neutron" >> $ML2_CONF
+}
+
+function reset_neutrondb {
+        echo "Reseting DB"
+        mysql -e "drop database if exists neutron_ml2;"
+        mysql -e "create database neutron_ml2 character set utf8;"
+        mysql -e "grant all on neutron_ml2.* to 'neutron'@'%';"
+        neutron-db-manage --config-file /etc/neutron/neutron.conf --config-file /etc/neutron/plugin.ini upgrade head
+}
+
+function restart_neutron {
+        echo "Restarting Neutron Server"
+        service neutron-server restart
+        echo "Should see Neutron runing now"
+        service neutron-server status
+        echo "Shouldnt be any nets, but should work (return empty)"
+        neutron net-list
+}
+
+function stop_neutron {
+        echo "Stopping Neutron / OVS components"
+        service  neutron-plugin-openvswitch-agent stop
+        if [ $MODE == "0" ]
+        then
+                service neutron-server stop
+        fi
+}
+
+
+
+function verify_ML2_working {
+        echo "checking that we can talk via ML2 properly"
+        curl -u admin:admin http://${CONTROL_HOST}:8080/controller/nb/v2/neutron/networks > /tmp/check_ml2
+        if grep "network" /tmp/check_ml2
+        then
+                echo "Success - ML2 to ODL is working"
+        else
+                echo "im sorry Jim, but its dead"
+        fi
+
+}
+
+
+function set_mode {
+        if ls -l /var/lib/glance/images
+        then
+                echo "Controller Mode"
+                MODE=0
+        else
+                echo "Compute Mode"
+                MODE=1
+        fi
+}
+
+function stop_ovs {
+        echo "Stopping OpenVSwitch"
+        service openvswitch-switch stop
+
+}
+
+function control_setup {
+        echo "Modifying Controller"
+        stop_neutron
+        stop_ovs
+        rm -rf /var/log/openvswitch/*
+        mkdir -p /opt/opnfv/odl/ovs_back
+        mv /etc/openvswitch/conf.db /opt/opnfv/odl/ovs_back/.
+        mv /etc/openvswitch/.conf*lock* /opt/opnfv/odl/ovs_back/.
+        service openvswitch-switch start
+        ovs-vsctl set-manager tcp:172.30.9.70:6640
+        ovs-vsctl add-br br-eth0
+        ovs-vsctl add-br br-ex
+        ovs-vsctl add-port br-eth0 eth0
+        ovs-vsctl add-port br-eth0 br-eth0--br-ex
+        ovs-vsctl add-port br-ex br-ex--br-eth0
+        ovs-vsctl set interface br-ex--br-eth0 type=patch
+        ovs-vsctl set interface br-eth0--br-ex type=patch
+        ovs-vsctl set interface br-ex--br-eth0 options:peer=br-eth0--br-ex
+        ovs-vsctl set interface br-eth0--br-ex options:peer=br-ex--br-eth0
+        ifconfig br-ex 172.30.9.70/24 up
+        service neutron-server restart
+
+        echo "setting up networks"
+        ip link add link eth1 name br-mgmt type vlan id 300
+        ip link add link eth1 name br-storage type vlan id 301
+        /etc/init.d/networking restart
+
+
+        echo "Reset Neutron DB"
+        #reset_neutrondb
+        echo "Restarting Neutron Components"
+        #restart_neutron
+        echo "Verifying ODL ML2 plugin is working"
+        verify_ML2_working
+
+}
+
+function compute_setup {
+        echo "do compute stuff here"
+        echo "stopping neutron openvswitch plugin"
+        stop_neutron
+        ip link add link eth1 name br-mgmt type vlan id 300
+        ifconfig br-mgmt `grep address /etc/network/interfaces.d/ifcfg-br-mgmt | awk -F" " '{print $2}'`/24
+        ip link add link eth1 name br-storage type vlan id 301
+        ifconfig br-storage `grep address /etc/network/interfaces.d/ifcfg-br-storage | awk -F" " '{print $2}'`/24
+        ifconfig eth1 `grep address /etc/network/interfaces.d/ifcfg-br-fw-mgmt | awk -F" " '{print $2}'`/24
+        echo "set manager, and route for ODL controller"
+        ovs-vsctl set-manager tcp:192.168.0.2:6640
+        route add 172.17.0.1 gw 192.168.0.2
+        verify_ML2_working
+}
+
+
+# MAIN
+echo "Starting to make call"
+update_ml2conf
+echo "Check Mode"
+set_mode
+
+if [ $MODE == "0" ];
+then
+        echo "Calling control setup"
+        control_setup
+elif [ $MODE == "1" ];
+then
+        echo "Calling compute setup"
+        compute_setup
+
+else
+        echo "Something is bad - call for help"
+        exit
+fi
+
+
diff --git a/fuel/build/f_odl_docker/scripts/config_neutron_for_odl.sh b/fuel/build/f_odl_docker/scripts/config_neutron_for_odl.sh
new file mode 100644 (file)
index 0000000..3b688ae
--- /dev/null
@@ -0,0 +1,146 @@
+#!/bin/bash
+CONTROL_HOST=172.17.0.3
+
+# ENV
+source ~/openrc
+
+
+
+# VARS
+ML2_CONF=/etc/neutron/plugins/ml2/ml2_conf.ini
+MODE=0
+
+
+# FUCNTIONS
+
+
+# Update ml2_conf.ini
+function update_ml2conf {
+        echo "Backing up and modifying ml2_conf.ini"
+        cp $ML2_CONF $ML2_CONF.bak
+        sed -i -e 's/mechanism_drivers =openvswitch/mechanism_drivers = opendaylight/g' $ML2_CONF
+#!/bin/bash
+CONTROL_HOST=172.17.0.3
+
+# ENV
+source ~/openrc
+
+
+
+# VARS
+ML2_CONF=/etc/neutron/plugins/ml2/ml2_conf.ini
+MODE=0
+
+
+# FUCNTIONS
+
+
+# Update ml2_conf.ini
+function update_ml2conf {
+        echo "Backing up and modifying ml2_conf.ini"
+        cp $ML2_CONF $ML2_CONF.bak
+        sed -i -e 's/mechanism_drivers =openvswitch/mechanism_drivers = opendaylight/g' $ML2_CONF
+        sed -i -e 's/tenant_network_types = flat,vlan,gre,vxlan/tenant_network_types = vxlan/g' $ML2_CONF
+        cat "[ml2_odl]" >> $ML2_CONF
+        cat "password = admin" >> $ML2_CONF
+        cat "username = admin" >> $ML2_CONF
+        cat "url = http://${CONTROL_HOST}:8080/controller/nb/v2/neutron" >> $ML2_CONF
+}
+
+function reset_neutrondb {
+        echo "Reseting DB"
+        mysql -e "drop database if exists neutron_ml2;"
+        mysql -e "create database neutron_ml2 character set utf8;"
+        mysql -e "grant all on neutron_ml2.* to 'neutron'@'%';"
+        neutron-db-manage --config-file /etc/neutron/neutron.conf --config-file /etc/neutron/plugin.ini upgrade head
+}
+
+function restart_neutron {
+        echo "Restarting Neutron Server"
+        service neutron-server restart
+        echo "Should see Neutron runing now"
+        service neutron-server status
+        echo "Shouldnt be any nets, but should work (return empty)"
+        neutron net-list
+}
+
+function stop_neutron {
+        echo "Stopping Neutron / OVS components"
+        service  neutron-plugin-openvswitch-agent stop
+        if [ $MODE == "0" ]
+        then
+                service neutron-server stop
+        fi
+}
+
+
+
+function verify_ML2_working {
+        echo "checking that we can talk via ML2 properly"
+        curl -u admin:admin http://${CONTROL_HOST}:8080/controller/nb/v2/neutron/networks > /tmp/check_ml2
+        if grep "network" /tmp/check_ml2
+        then
+                echo "Success - ML2 to ODL is working"
+        else
+                echo "im sorry Jim, but its dead"
+        fi
+
+}
+
+
+function set_mode {
+        if df -k | grep glance
+        then
+                echo "Controller Mode"
+                MODE=0
+        else
+                echo "Compute Mode"
+                MODE=1
+        fi
+}
+
+function stop_ovs {
+        echo "Stopping OpenVSwitch"
+        service openvswitch-switch stop
+
+}
+
+function control_setup {
+        echo "do control stuff here"
+        echo "Reset Neutron DB"
+        #reset_neutrondb
+        echo "Restarting Neutron Components"
+        #restart_neutron
+        echo "Verifying ODL ML2 plugin is working"
+        verify_ML2_working
+
+}
+
+function compute_setup {
+        echo "do compute stuff here"
+        stop_neutron
+        verify_ML2_working
+}
+
+
+# MAIN
+echo "Starting to make call"
+#update_ml2conf
+echo "Check Mode"
+set_mode
+
+if [ $MODE == "0" ];
+then
+        echo "Calling control setup"
+        control_setup
+elif [ $MODE == "1" ];
+then
+        echo "Calling compute setup"
+        compute_setup
+
+else
+        echo "Something is bad - call for help"
+        exit
+fi
+
+
diff --git a/fuel/build/f_odl_docker/scripts/prep_nets_for_odl.sh b/fuel/build/f_odl_docker/scripts/prep_nets_for_odl.sh
new file mode 100755 (executable)
index 0000000..dd4fc9f
--- /dev/null
@@ -0,0 +1,90 @@
+#!/bin/bash
+# a "cheat" way to install docker on the controller
+# can only be used if you have a connecting out to the internet
+
+# Usage: ./install_docker.sh <ip of default route to remove> <ip of default gw to add>
+
+OLDGW=$1
+#!/bin/bash
+# a "cheat" way to install docker on the controller
+# can only be used if you have a connecting out to the internet
+
+# Usage: ./install_docker.sh <ip of default route to remove> <ip of default gw to add>
+
+OLDGW=$1
+NEWGW=$2
+IMAGEPATH=/opt/opnfv
+IMAGENAME=odl_docker_image.tar
+SOURCES=/etc/apt/sources.list
+
+
+if [ "$#" -ne 2]; then
+        echo "Two args not provided, will not touch networking"
+else
+
+        # Fix routes
+        echo "Fixing routes"
+        #DEBUG
+        netstat -rn
+
+        echo "delete old def route"
+        route delete default gw $1
+        echo "adding new def route"
+        route add default gw $2
+
+        echo " you should see a good nslookup now"
+        nslookup www.google.ca
+#!/bin/bash
+# a "cheat" way to install docker on the controller
+# can only be used if you have a connecting out to the internet
+
+# Usage: ./install_docker.sh <ip of default route to remove> <ip of default gw to add>
+
+OLDGW=$1
+NEWGW=$2
+IMAGEPATH=/opt/opnfv
+IMAGENAME=odl_docker_image.tar
+SOURCES=/etc/apt/sources.list
+
+
+if [ "$#" -ne 2]; then
+        echo "Two args not provided, will not touch networking"
+else
+
+        # Fix routes
+        echo "Fixing routes"
+        #DEBUG
+        netstat -rn
+
+        echo "delete old def route"
+        route delete default gw $1
+        echo "adding new def route"
+        route add default gw $2
+
+        echo " you should see a good nslookup now"
+        nslookup www.google.ca
+fi
+
+
+if egrep "mirrors.txt" $SOURCES
+then
+        echo "Sources was already updated, not touching"
+else
+        echo "adding the closests mirrors and docker mirror to the mix"
+        echo "deb mirror://mirrors.ubuntu.com/mirrors.txt precise main restricted universe multiverse" >> /etc/apt/sources.list
+        echo "deb mirror://mirrors.ubuntu.com/mirrors.txt precise-updates main restricted universe multiverse" >> /etc/apt/sources.list
+        echo "deb mirror://mirrors.ubuntu.com/mirrors.txt precise-backports main restricted universe multiverse" >> /etc/apt/sources.list
+        echo "deb mirror://mirrors.ubuntu.com/mirrors.txt precise-security main restricted universe multiverse" >> /etc/apt/sources.list
+        apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 --recv-keys 36A1D7869245C8950F966E92D8576A8BA88D21E9
+        echo "deb https://get.docker.com/ubuntu docker main " > /etc/apt/sources.list.d/docker.list
+fi
+
+echo "Updating"
+apt-get update
+echo "Installing Docker"
+apt-get install -y lxc-docker
+
+echo "Loading ODL Docker Image"
+docker load -i $IMAGEPATH/$IMAGENAME
+
+
diff --git a/fuel/build/f_odl_docker/scripts/setup_ovs_for_odl.sh b/fuel/build/f_odl_docker/scripts/setup_ovs_for_odl.sh
new file mode 100644 (file)
index 0000000..42c9451
--- /dev/null
@@ -0,0 +1,23 @@
+#!/bin/bash
+
+
+
+ok .. so they created br-int
+
+so lets add a physical nic to it
+
+
+# First - Removal all the bridges you find
+
+for i in $(ovs-vsctl list-br)
+do
+       if [ "$i" == "br-int" ];
+       then    
+               echo "skipped br-int"
+       elif [ "$i" == "br-prv"];
+       then
+               echo "skipped br-pr"
+       else
+               ovs-vsctl del-br $i
+       fi
+done
diff --git a/fuel/build/f_opnfv_puppet/Makefile b/fuel/build/f_opnfv_puppet/Makefile
new file mode 100644 (file)
index 0000000..0949737
--- /dev/null
@@ -0,0 +1,28 @@
+##############################################################################
+# Copyright (c) 2015 Ericsson AB and others.
+# stefan.k.berg@ericsson.com
+# jonas.bjurel@ericsson.com
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+TOP := $(shell pwd)
+
+.PHONY: all
+all:
+
+.PHONY: clean
+clean:
+       @rm -rf tmp
+       @rm -rf release
+
+.PHONY: validate-cache
+validate-cache:
+       @echo "No cache validation schema available for $(shell pwd)"
+       @echo "Continuing ..."
+
+.PHONY: release
+release:
+       @cp -Rvp puppet/modules/* $(PUPPET_DEST)
diff --git a/fuel/build/f_opnfv_puppet/README b/fuel/build/f_opnfv_puppet/README
new file mode 100644 (file)
index 0000000..35bea5a
--- /dev/null
@@ -0,0 +1,12 @@
+##############################################################################
+# Copyright (c) 2015 Ericsson AB and others.
+# stefan.k.berg@ericsson.com
+# jonas.bjurel@ericsson.com
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+This is the top level "OPNFV" Puppet class which (hopefully) only will be used to include
+an appropriate set of sub-classes which themselves will be self-contained.
diff --git a/fuel/build/f_opnfv_puppet/puppet/modules/opnfv/manifests/add_packages.pp b/fuel/build/f_opnfv_puppet/puppet/modules/opnfv/manifests/add_packages.pp
new file mode 100644 (file)
index 0000000..ccb3939
--- /dev/null
@@ -0,0 +1,9 @@
+# Class: opnfv::add_packages
+#
+# Ensure added packages are installed:
+#
+
+class opnfv::add_packages {
+  if $::osfamily == 'Debian' {
+  }
+}
diff --git a/fuel/build/f_opnfv_puppet/puppet/modules/opnfv/manifests/init.pp b/fuel/build/f_opnfv_puppet/puppet/modules/opnfv/manifests/init.pp
new file mode 100644 (file)
index 0000000..54f1c86
--- /dev/null
@@ -0,0 +1,28 @@
+##############################################################################
+# Copyright (c) 2015 Ericsson AB and others.
+# stefan.k.berg@ericsson.com
+# jonas.bjurel@ericsson.com
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+# == Class: opnfv
+#
+# This class is used to perform OPNFV inclusions and settings on top of
+# the vanilla Fuel installation.
+#
+# Currently all logic is self contained, i.e. it is sufficient to
+# "include opnfv" from site.pp.
+
+class opnfv {
+  # Configure resolv.conf if parameters passed through astute
+  include opnfv::resolver
+  # Setup OPNFV style NTP config
+  include opnfv::ntp
+  # Make sure all added packages are installed
+  include opnfv::add_packages
+  # Setup OpenDaylight
+  include opnfv::odl_docker
+}
diff --git a/fuel/build/f_opnfv_puppet/puppet/modules/opnfv/manifests/opncheck.pp b/fuel/build/f_opnfv_puppet/puppet/modules/opnfv/manifests/opncheck.pp
new file mode 100644 (file)
index 0000000..0822f02
--- /dev/null
@@ -0,0 +1,21 @@
+##############################################################################
+# Copyright (c) 2015 Ericsson AB and others.
+# stefan.k.berg@ericsson.com
+# jonas.bjurel@ericsson.com
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+# Class: opnfv::opncheck
+#
+# Make sure that /opt/opnfv/pre-deploy.sh has been run by
+# verifying there is an "opnfv:" level in the astute.yaml.
+
+class opnfv::opncheck()
+{
+  unless $::fuel_settings['opnfv'] {
+    fail("Error: You have not run /opt/opnfv/pre-deploy.sh on the Fuel master prior to deploying!")
+  }
+}
diff --git a/fuel/build/f_osnaily/Makefile b/fuel/build/f_osnaily/Makefile
new file mode 100644 (file)
index 0000000..0949737
--- /dev/null
@@ -0,0 +1,28 @@
+##############################################################################
+# Copyright (c) 2015 Ericsson AB and others.
+# stefan.k.berg@ericsson.com
+# jonas.bjurel@ericsson.com
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+TOP := $(shell pwd)
+
+.PHONY: all
+all:
+
+.PHONY: clean
+clean:
+       @rm -rf tmp
+       @rm -rf release
+
+.PHONY: validate-cache
+validate-cache:
+       @echo "No cache validation schema available for $(shell pwd)"
+       @echo "Continuing ..."
+
+.PHONY: release
+release:
+       @cp -Rvp puppet/modules/* $(PUPPET_DEST)
diff --git a/fuel/build/f_osnaily/puppet/modules/osnailyfacter/examples/site.pp b/fuel/build/f_osnaily/puppet/modules/osnailyfacter/examples/site.pp
new file mode 100644 (file)
index 0000000..05cd9e0
--- /dev/null
@@ -0,0 +1,366 @@
+$fuel_settings = parseyaml($astute_settings_yaml)
+
+$openstack_version = {
+  'keystone'   => 'installed',
+  'glance'     => 'installed',
+  'horizon'    => 'installed',
+  'nova'       => 'installed',
+  'novncproxy' => 'installed',
+  'cinder'     => 'installed',
+}
+
+tag("${::fuel_settings['deployment_id']}::${::fuel_settings['environment']}")
+
+#Stages configuration
+stage {'zero': } ->
+stage {'opncheck': } ->
+stage {'first': } ->
+stage {'openstack-custom-repo': } ->
+stage {'netconfig': } ->
+stage {'corosync_setup': } ->
+stage {'openstack-firewall': } -> Stage['main']
+
+class begin_deployment ()
+{
+  $role = $::fuel_settings['role']
+  notify { "***** Beginning deployment of node ${::hostname} with role $role *****": }
+}
+
+class {'begin_deployment': stage => 'zero' }
+
+stage {'glance-image':
+  require => Stage['main'],
+}
+
+if $::fuel_settings['nodes'] {
+  $nodes_hash = $::fuel_settings['nodes']
+# OPNFV addition to add to hosts file
+  if ($::fuel_settings['opnfv'] and
+    $::fuel_settings['opnfv']['hosts']) {
+    $extras_hash = $::fuel_settings['opnfv']['hosts']
+  } else {
+    $extras_hash = undef
+  }
+
+  $dns_nameservers=$::fuel_settings['dns_nameservers']
+  $node = filter_nodes($nodes_hash,'name',$::hostname)
+  if empty($node) {
+    fail("Node $::hostname is not defined in the hash structure")
+  }
+
+  $default_gateway = $node[0]['default_gateway']
+
+  $base_syslog_hash     = $::fuel_settings['base_syslog']
+  $syslog_hash          = $::fuel_settings['syslog']
+
+  $disable_offload      = $::fuel_settings['disable_offload']
+  if $disable_offload {
+    L23network::L3::Ifconfig<||> {
+      ethtool =>     {
+        'K' => ['gso off',  'gro off'],
+      }
+    }
+  }
+
+  $use_neutron = $::fuel_settings['quantum']
+
+  if (!empty(filter_nodes($::fuel_settings['nodes'], 'role', 'ceph-osd')) or
+    $::fuel_settings['storage']['volumes_ceph'] or
+    $::fuel_settings['storage']['images_ceph'] or
+    $::fuel_settings['storage']['objects_ceph']
+  ) {
+    $use_ceph = true
+  } else {
+    $use_ceph = false
+  }
+
+
+  if $use_neutron {
+    prepare_network_config($::fuel_settings['network_scheme'])
+    #
+    $internal_int     = get_network_role_property('management', 'interface')
+    $internal_address = get_network_role_property('management', 'ipaddr')
+    $internal_netmask = get_network_role_property('management', 'netmask')
+    #
+    $public_int = get_network_role_property('ex', 'interface')
+    if $public_int {
+      $public_address = get_network_role_property('ex', 'ipaddr')
+      $public_netmask = get_network_role_property('ex', 'netmask')
+
+      # TODO(Xarses): remove this after completing merge of
+      # multiple-cluster-networks
+      L23network::L3::Ifconfig<| title == $public_int |> {
+        default_gateway => true
+      }
+    } else {
+      # TODO(Xarses): remove this after completing merge of
+      # multiple-cluster-networks
+      $fw_admin_int = get_network_role_property('fw-admin', 'interface')
+      L23network::L3::Ifconfig<| title == $fw_admin_int |> {
+        default_gateway => true
+      }
+    }
+    #
+    $storage_address = get_network_role_property('storage', 'ipaddr')
+    $storage_netmask = get_network_role_property('storage', 'netmask')
+  } else {
+    $internal_address = $node[0]['internal_address']
+    $internal_netmask = $node[0]['internal_netmask']
+    $public_address = $node[0]['public_address']
+    $public_netmask = $node[0]['public_netmask']
+    $storage_address = $node[0]['storage_address']
+    $storage_netmask = $node[0]['storage_netmask']
+    $public_br = $node[0]['public_br']
+    $internal_br = $node[0]['internal_br']
+    $public_int   = $::fuel_settings['public_interface']
+    $internal_int = $::fuel_settings['management_interface']
+
+    # TODO(Xarses): remove this after completing merge of
+    # multiple-cluster-networks
+    L23network::L3::Ifconfig<| title == $public_int |> {
+      default_gateway => true
+    }
+
+  }
+}
+
+if ($::fuel_settings['neutron_mellanox']) {
+  $mellanox_mode = $::fuel_settings['neutron_mellanox']['plugin']
+} else {
+  $mellanox_mode = 'disabled'
+}
+
+# This parameter specifies the verbosity level of log messages
+# in openstack components config.
+# Debug would have set DEBUG level and ignore verbose settings, if any.
+# Verbose would have set INFO level messages
+# In case of non debug and non verbose - WARNING, default level would have set.
+$verbose = true
+$debug = $::fuel_settings['debug']
+
+### Storage Settings ###
+# Determine if any ceph parts have been asked for.
+# This will ensure that monitors are set up on controllers, even if no
+#  ceph-osd roles during deployment
+
+
+### Syslog ###
+#TODO(bogdando) move logging options to astute.yaml
+# Enable error messages reporting to rsyslog. Rsyslog must be installed in this case.
+$use_syslog = $::fuel_settings['use_syslog'] ? { default=>true }
+# Syslog facilities for main openstack services
+# should vary (reserved usage)
+# local1 is reserved for openstack-dashboard
+$syslog_log_facility_glance     = 'LOG_LOCAL2'
+$syslog_log_facility_cinder     = 'LOG_LOCAL3'
+$syslog_log_facility_neutron    = 'LOG_LOCAL4'
+$syslog_log_facility_nova       = 'LOG_LOCAL6'
+$syslog_log_facility_keystone   = 'LOG_LOCAL7'
+# could be the same
+# local0 is free for use
+$syslog_log_facility_murano     = 'LOG_LOCAL0'
+$syslog_log_facility_heat       = 'LOG_LOCAL0'
+$syslog_log_facility_sahara     = 'LOG_LOCAL0'
+$syslog_log_facility_ceilometer = 'LOG_LOCAL0'
+$syslog_log_facility_ceph       = 'LOG_LOCAL0'
+
+### Monit ###
+# Monit for compute nodes.
+# If enabled, will install monit and configure its watchdogs to track
+# nova-compute/api/network (and openvswitch service, if neutron enabled)
+# at compute nodes.
+# TODO(bogdando) set to true once monit package shipped with Fuel ISO
+$use_monit = false
+
+$nova_rate_limits = {
+  'POST' => 100000,
+  'POST_SERVERS' => 100000,
+  'PUT' => 1000, 'GET' => 100000,
+  'DELETE' => 100000
+}
+$cinder_rate_limits = {
+  'POST' => 100000,
+  'POST_SERVERS' => 100000,
+  'PUT' => 100000, 'GET' => 100000,
+  'DELETE' => 100000
+}
+
+###
+class advanced_node_netconfig {
+    $sdn = generate_network_config()
+    notify {"SDN: ${sdn}": }
+}
+
+case $::operatingsystem {
+  'redhat' : {
+    $queue_provider = 'qpid'
+    $custom_mysql_setup_class = 'pacemaker_mysql'
+  }
+  default: {
+    $queue_provider='rabbitmq'
+    $custom_mysql_setup_class='galera'
+  }
+}
+
+class os_common {
+  # OPNFV check if pre_deploy.sh has been run, otherwise fail
+  class {'opnfv::opncheck': stage => 'opncheck' }
+  if ($::fuel_settings['neutron_mellanox']) {
+    if ($::mellanox_mode != 'disabled') {
+      class { 'mellanox_openstack::ofed_recompile' :
+        stage => 'zero',
+      }
+    }
+    if ($::fuel_settings['storage']['iser']) {
+      class { 'mellanox_openstack::iser_rename':
+        stage => 'zero',
+        storage_parent => $::fuel_settings['neutron_mellanox']['storage_parent'],
+        iser_interface_name => $::fuel_settings['neutron_mellanox']['iser_interface_name'],
+      }
+      Class['mellanox_openstack::ofed_recompile'] -> Class['mellanox_openstack::iser_rename']
+    }
+  }
+  class {"l23network::hosts_file": stage => 'netconfig', nodes => $nodes_hash, extras => $extras_hash }
+  class {'l23network': use_ovs=>$use_neutron, stage=> 'netconfig'}
+  if $use_neutron {
+      class {'advanced_node_netconfig': stage => 'netconfig' }
+  } else {
+      class {'osnailyfacter::network_setup': stage => 'netconfig'}
+  }
+
+  if ($::osfamily == 'RedHat') {
+    package {'irqbalance': ensure => present} -> service {'irqbalance': ensure => running }
+  }
+
+  class { 'openstack::firewall':
+    stage => 'openstack-firewall',
+    nova_vnc_ip_range => $::fuel_settings['management_network_range'],
+  }
+
+  $base_syslog_rserver  = {
+    'remote_type' => 'tcp',
+    'server' => $base_syslog_hash['syslog_server'],
+    'port' => $base_syslog_hash['syslog_port']
+  }
+
+  # setting kernel reserved ports
+  # defaults are 49000,35357,41055,58882
+  class { 'openstack::reserved_ports':
+    stage => 'netconfig',
+  }
+
+  # setting service down time and report interval
+  # to 60 and 180 for Nova respectively to allow kernel
+  # to kill dead connections
+  # (see zendesk #1158 as well)
+  $nova_report_interval = '60'
+  $nova_service_down_time  = '180'
+
+  $syslog_rserver = {
+    'remote_type' => $syslog_hash['syslog_transport'],
+    'server' => $syslog_hash['syslog_server'],
+    'port' => $syslog_hash['syslog_port'],
+  }
+  if $syslog_hash['syslog_server'] != "" and $syslog_hash['syslog_port'] != "" and $syslog_hash['syslog_transport'] != "" {
+    $rservers = [$base_syslog_rserver, $syslog_rserver]
+  } else {
+    $rservers = [$base_syslog_rserver]
+  }
+
+  if $use_syslog {
+    class { "::openstack::logging":
+      stage          => 'first',
+      role           => 'client',
+      show_timezone  => true,
+      # log both locally include auth, and remote
+      log_remote     => true,
+      log_local      => true,
+      log_auth_local => true,
+      # keep four weekly log rotations, force rotate if 300M size have exceeded
+      rotation       => 'weekly',
+      keep           => '4',
+      # should be > 30M
+      limitsize      => '300M',
+      # remote servers to send logs to
+      rservers       => $rservers,
+      # should be true, if client is running at virtual node
+      virtual        => str2bool($::is_virtual),
+      # Rabbit doesn't support syslog directly
+      rabbit_log_level => 'NOTICE',
+      debug            => $debug,
+    }
+  }
+
+  class { 'osnailyfacter::atop':
+    stage => 'first',
+  }
+
+  class { 'osnailyfacter::ssh': }
+
+  #case $role {
+    #    /controller/:          { $hostgroup = 'controller' }
+    #    /swift-proxy/: { $hostgroup = 'swift-proxy' }
+    #    /storage/:{ $hostgroup = 'swift-storage'  }
+    #    /compute/: { $hostgroup = 'compute'  }
+    #    /cinder/: { $hostgroup = 'cinder'  }
+    #    default: { $hostgroup = 'generic' }
+    #}
+
+    #  if $nagios != 'false' {
+    #  class {'nagios':
+    #    proj_name       => $proj_name,
+    #    services        => [
+    #      'host-alive','nova-novncproxy','keystone', 'nova-scheduler',
+    #      'nova-consoleauth', 'nova-cert', 'haproxy', 'nova-api', 'glance-api',
+    #      'glance-registry','horizon', 'rabbitmq', 'mysql',
+    #    ],
+    #    whitelist       => ['127.0.0.1', $nagios_master],
+    #    hostgroup       => $hostgroup ,
+    #  }
+    # }
+
+  # Workaround for fuel bug with firewall
+  firewall {'003 remote rabbitmq ':
+    sport   => [ 4369, 5672, 15672, 41055, 55672, 61613 ],
+    source  => $::fuel_settings['master_ip'],
+    proto   => 'tcp',
+    action  => 'accept',
+    require => Class['openstack::firewall'],
+  }
+
+  firewall {'004 remote puppet ':
+    sport   => [ 8140 ],
+    source  => $master_ip,
+    proto   => 'tcp',
+    action  => 'accept',
+    require => Class['openstack::firewall'],
+  }
+
+  class { 'puppet::pull' :
+    modules_source   => $::fuel_settings['puppet_modules_source'],
+    manifests_source => $::fuel_settings['puppet_manifests_source'],
+  }
+} # OS_COMMON ENDS
+
+
+
+node default {
+  case $::fuel_settings['deployment_mode'] {
+    "singlenode": {
+      include "osnailyfacter::cluster_simple"
+      class {'os_common':}
+      class {'opnfv':}
+      }
+    "multinode": {
+      include "osnailyfacter::cluster_simple"
+      class {'os_common':}
+      class {'opnfv':}
+      }
+    /^(ha|ha_compact)$/: {
+      include "osnailyfacter::cluster_ha"
+      class {'os_common':}
+      class {'opnfv':}
+      }
+    "rpmcache": { include osnailyfacter::rpmcache }
+  }
+}
diff --git a/fuel/build/f_osnaily/puppet/modules/osnailyfacter/examples/site.pp.orig b/fuel/build/f_osnaily/puppet/modules/osnailyfacter/examples/site.pp.orig
new file mode 100644 (file)
index 0000000..9ed557a
--- /dev/null
@@ -0,0 +1,353 @@
+$fuel_settings = parseyaml($astute_settings_yaml)
+
+$openstack_version = {
+  'keystone'   => 'installed',
+  'glance'     => 'installed',
+  'horizon'    => 'installed',
+  'nova'       => 'installed',
+  'novncproxy' => 'installed',
+  'cinder'     => 'installed',
+}
+
+tag("${::fuel_settings['deployment_id']}::${::fuel_settings['environment']}")
+
+#Stages configuration
+stage {'zero': } ->
+stage {'first': } ->
+stage {'openstack-custom-repo': } ->
+stage {'netconfig': } ->
+stage {'corosync_setup': } ->
+stage {'openstack-firewall': } -> Stage['main']
+
+class begin_deployment ()
+{
+  $role = $::fuel_settings['role']
+  notify { "***** Beginning deployment of node ${::hostname} with role $role *****": }
+}
+
+class {'begin_deployment': stage => 'zero' }
+
+stage {'glance-image':
+  require => Stage['main'],
+}
+
+if $::fuel_settings['nodes'] {
+  $nodes_hash = $::fuel_settings['nodes']
+  $dns_nameservers=$::fuel_settings['dns_nameservers']
+  $node = filter_nodes($nodes_hash,'name',$::hostname)
+  if empty($node) {
+    fail("Node $::hostname is not defined in the hash structure")
+  }
+
+  $default_gateway = $node[0]['default_gateway']
+
+  $base_syslog_hash     = $::fuel_settings['base_syslog']
+  $syslog_hash          = $::fuel_settings['syslog']
+
+  $disable_offload      = $::fuel_settings['disable_offload']
+  if $disable_offload {
+    L23network::L3::Ifconfig<||> {
+      ethtool =>     {
+        'K' => ['gso off',  'gro off'],
+      }
+    }
+  }
+
+  $use_neutron = $::fuel_settings['quantum']
+
+  if (!empty(filter_nodes($::fuel_settings['nodes'], 'role', 'ceph-osd')) or
+    $::fuel_settings['storage']['volumes_ceph'] or
+    $::fuel_settings['storage']['images_ceph'] or
+    $::fuel_settings['storage']['objects_ceph']
+  ) {
+    $use_ceph = true
+  } else {
+    $use_ceph = false
+  }
+
+
+  if $use_neutron {
+    prepare_network_config($::fuel_settings['network_scheme'])
+    #
+    $internal_int     = get_network_role_property('management', 'interface')
+    $internal_address = get_network_role_property('management', 'ipaddr')
+    $internal_netmask = get_network_role_property('management', 'netmask')
+    #
+    $public_int = get_network_role_property('ex', 'interface')
+    if $public_int {
+      $public_address = get_network_role_property('ex', 'ipaddr')
+      $public_netmask = get_network_role_property('ex', 'netmask')
+
+      # TODO(Xarses): remove this after completing merge of
+      # multiple-cluster-networks
+      L23network::L3::Ifconfig<| title == $public_int |> {
+        default_gateway => true
+      }
+    } else {
+      # TODO(Xarses): remove this after completing merge of
+      # multiple-cluster-networks
+      $fw_admin_int = get_network_role_property('fw-admin', 'interface')
+      L23network::L3::Ifconfig<| title == $fw_admin_int |> {
+        default_gateway => true
+      }
+    }
+    #
+    $storage_address = get_network_role_property('storage', 'ipaddr')
+    $storage_netmask = get_network_role_property('storage', 'netmask')
+  } else {
+    $internal_address = $node[0]['internal_address']
+    $internal_netmask = $node[0]['internal_netmask']
+    $public_address = $node[0]['public_address']
+    $public_netmask = $node[0]['public_netmask']
+    $storage_address = $node[0]['storage_address']
+    $storage_netmask = $node[0]['storage_netmask']
+    $public_br = $node[0]['public_br']
+    $internal_br = $node[0]['internal_br']
+    $public_int   = $::fuel_settings['public_interface']
+    $internal_int = $::fuel_settings['management_interface']
+
+    # TODO(Xarses): remove this after completing merge of
+    # multiple-cluster-networks
+    L23network::L3::Ifconfig<| title == $public_int |> {
+      default_gateway => true
+    }
+
+  }
+}
+
+if ($::fuel_settings['neutron_mellanox']) {
+  $mellanox_mode = $::fuel_settings['neutron_mellanox']['plugin']
+} else {
+  $mellanox_mode = 'disabled'
+}
+
+# This parameter specifies the verbosity level of log messages
+# in openstack components config.
+# Debug would have set DEBUG level and ignore verbose settings, if any.
+# Verbose would have set INFO level messages
+# In case of non debug and non verbose - WARNING, default level would have set.
+$verbose = true
+$debug = $::fuel_settings['debug']
+
+### Storage Settings ###
+# Determine if any ceph parts have been asked for.
+# This will ensure that monitors are set up on controllers, even if no
+#  ceph-osd roles during deployment
+
+
+### Syslog ###
+#TODO(bogdando) move logging options to astute.yaml
+# Enable error messages reporting to rsyslog. Rsyslog must be installed in this case.
+$use_syslog = $::fuel_settings['use_syslog'] ? { default=>true }
+# Syslog facilities for main openstack services
+# should vary (reserved usage)
+# local1 is reserved for openstack-dashboard
+$syslog_log_facility_glance     = 'LOG_LOCAL2'
+$syslog_log_facility_cinder     = 'LOG_LOCAL3'
+$syslog_log_facility_neutron    = 'LOG_LOCAL4'
+$syslog_log_facility_nova       = 'LOG_LOCAL6'
+$syslog_log_facility_keystone   = 'LOG_LOCAL7'
+# could be the same
+# local0 is free for use
+$syslog_log_facility_murano     = 'LOG_LOCAL0'
+$syslog_log_facility_heat       = 'LOG_LOCAL0'
+$syslog_log_facility_sahara     = 'LOG_LOCAL0'
+$syslog_log_facility_ceilometer = 'LOG_LOCAL0'
+$syslog_log_facility_ceph       = 'LOG_LOCAL0'
+
+### Monit ###
+# Monit for compute nodes.
+# If enabled, will install monit and configure its watchdogs to track
+# nova-compute/api/network (and openvswitch service, if neutron enabled)
+# at compute nodes.
+# TODO(bogdando) set to true once monit package shipped with Fuel ISO
+$use_monit = false
+
+$nova_rate_limits = {
+  'POST' => 100000,
+  'POST_SERVERS' => 100000,
+  'PUT' => 1000, 'GET' => 100000,
+  'DELETE' => 100000
+}
+$cinder_rate_limits = {
+  'POST' => 100000,
+  'POST_SERVERS' => 100000,
+  'PUT' => 100000, 'GET' => 100000,
+  'DELETE' => 100000
+}
+
+###
+class advanced_node_netconfig {
+    $sdn = generate_network_config()
+    notify {"SDN: ${sdn}": }
+}
+
+case $::operatingsystem {
+  'redhat' : {
+    $queue_provider = 'qpid'
+    $custom_mysql_setup_class = 'pacemaker_mysql'
+  }
+  default: {
+    $queue_provider='rabbitmq'
+    $custom_mysql_setup_class='galera'
+  }
+}
+
+class os_common {
+  if ($::fuel_settings['neutron_mellanox']) {
+    if ($::mellanox_mode != 'disabled') {
+      class { 'mellanox_openstack::ofed_recompile' :
+        stage => 'zero',
+      }
+    }
+    if ($::fuel_settings['storage']['iser']) {
+      class { 'mellanox_openstack::iser_rename':
+        stage => 'zero',
+        storage_parent => $::fuel_settings['neutron_mellanox']['storage_parent'],
+        iser_interface_name => $::fuel_settings['neutron_mellanox']['iser_interface_name'],
+      }
+      Class['mellanox_openstack::ofed_recompile'] -> Class['mellanox_openstack::iser_rename']
+    }
+  }
+
+  class {"l23network::hosts_file": stage => 'netconfig', nodes => $nodes_hash }
+  class {'l23network': use_ovs=>$use_neutron, stage=> 'netconfig'}
+  if $use_neutron {
+      class {'advanced_node_netconfig': stage => 'netconfig' }
+  } else {
+      class {'osnailyfacter::network_setup': stage => 'netconfig'}
+  }
+
+  if ($::osfamily == 'RedHat') {
+    package {'irqbalance': ensure => present} -> service {'irqbalance': ensure => running }
+  }
+
+  class { 'openstack::firewall':
+    stage => 'openstack-firewall',
+    nova_vnc_ip_range => $::fuel_settings['management_network_range'],
+  }
+
+  $base_syslog_rserver  = {
+    'remote_type' => 'tcp',
+    'server' => $base_syslog_hash['syslog_server'],
+    'port' => $base_syslog_hash['syslog_port']
+  }
+
+  # setting kernel reserved ports
+  # defaults are 49000,35357,41055,58882
+  class { 'openstack::reserved_ports':
+    stage => 'netconfig',
+  }
+
+  # setting service down time and report interval
+  # to 60 and 180 for Nova respectively to allow kernel
+  # to kill dead connections
+  # (see zendesk #1158 as well)
+  $nova_report_interval = '60'
+  $nova_service_down_time  = '180'
+
+  $syslog_rserver = {
+    'remote_type' => $syslog_hash['syslog_transport'],
+    'server' => $syslog_hash['syslog_server'],
+    'port' => $syslog_hash['syslog_port'],
+  }
+  if $syslog_hash['syslog_server'] != "" and $syslog_hash['syslog_port'] != "" and $syslog_hash['syslog_transport'] != "" {
+    $rservers = [$base_syslog_rserver, $syslog_rserver]
+  } else {
+    $rservers = [$base_syslog_rserver]
+  }
+
+  if $use_syslog {
+    class { "::openstack::logging":
+      stage          => 'first',
+      role           => 'client',
+      show_timezone  => true,
+      # log both locally include auth, and remote
+      log_remote     => true,
+      log_local      => true,
+      log_auth_local => true,
+      # keep four weekly log rotations, force rotate if 300M size have exceeded
+      rotation       => 'weekly',
+      keep           => '4',
+      # should be > 30M
+      limitsize      => '300M',
+      # remote servers to send logs to
+      rservers       => $rservers,
+      # should be true, if client is running at virtual node
+      virtual        => str2bool($::is_virtual),
+      # Rabbit doesn't support syslog directly
+      rabbit_log_level => 'NOTICE',
+      debug            => $debug,
+    }
+  }
+
+  class { 'osnailyfacter::atop':
+    stage => 'first',
+  }
+
+  class { 'osnailyfacter::ssh': }
+
+  #case $role {
+    #    /controller/:          { $hostgroup = 'controller' }
+    #    /swift-proxy/: { $hostgroup = 'swift-proxy' }
+    #    /storage/:{ $hostgroup = 'swift-storage'  }
+    #    /compute/: { $hostgroup = 'compute'  }
+    #    /cinder/: { $hostgroup = 'cinder'  }
+    #    default: { $hostgroup = 'generic' }
+    #}
+
+    #  if $nagios != 'false' {
+    #  class {'nagios':
+    #    proj_name       => $proj_name,
+    #    services        => [
+    #      'host-alive','nova-novncproxy','keystone', 'nova-scheduler',
+    #      'nova-consoleauth', 'nova-cert', 'haproxy', 'nova-api', 'glance-api',
+    #      'glance-registry','horizon', 'rabbitmq', 'mysql',
+    #    ],
+    #    whitelist       => ['127.0.0.1', $nagios_master],
+    #    hostgroup       => $hostgroup ,
+    #  }
+    # }
+
+  # Workaround for fuel bug with firewall
+  firewall {'003 remote rabbitmq ':
+    sport   => [ 4369, 5672, 15672, 41055, 55672, 61613 ],
+    source  => $::fuel_settings['master_ip'],
+    proto   => 'tcp',
+    action  => 'accept',
+    require => Class['openstack::firewall'],
+  }
+
+  firewall {'004 remote puppet ':
+    sport   => [ 8140 ],
+    source  => $master_ip,
+    proto   => 'tcp',
+    action  => 'accept',
+    require => Class['openstack::firewall'],
+  }
+
+  class { 'puppet::pull' :
+    modules_source   => $::fuel_settings['puppet_modules_source'],
+    manifests_source => $::fuel_settings['puppet_manifests_source'],
+  }
+} # OS_COMMON ENDS
+
+
+
+node default {
+  case $::fuel_settings['deployment_mode'] {
+    "singlenode": {
+      include "osnailyfacter::cluster_simple"
+      class {'os_common':}
+      }
+    "multinode": {
+      include "osnailyfacter::cluster_simple"
+      class {'os_common':}
+      }
+    /^(ha|ha_compact)$/: {
+      include "osnailyfacter::cluster_ha"
+      class {'os_common':}
+      }
+    "rpmcache": { include osnailyfacter::rpmcache }
+  }
+}
diff --git a/fuel/build/f_resolvconf/Makefile b/fuel/build/f_resolvconf/Makefile
new file mode 100644 (file)
index 0000000..0949737
--- /dev/null
@@ -0,0 +1,28 @@
+##############################################################################
+# Copyright (c) 2015 Ericsson AB and others.
+# stefan.k.berg@ericsson.com
+# jonas.bjurel@ericsson.com
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+TOP := $(shell pwd)
+
+.PHONY: all
+all:
+
+.PHONY: clean
+clean:
+       @rm -rf tmp
+       @rm -rf release
+
+.PHONY: validate-cache
+validate-cache:
+       @echo "No cache validation schema available for $(shell pwd)"
+       @echo "Continuing ..."
+
+.PHONY: release
+release:
+       @cp -Rvp puppet/modules/* $(PUPPET_DEST)
diff --git a/fuel/build/f_resolvconf/README b/fuel/build/f_resolvconf/README
new file mode 100644 (file)
index 0000000..5ff570f
--- /dev/null
@@ -0,0 +1,36 @@
+##############################################################################
+# Copyright (c) 2015 Ericsson AB and others.
+# stefan.k.berg@ericsson.com
+# jonas.bjurel@ericsson.com
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+Addition to generate resolv.conf separately for compute hosts and controller
+hosts through Astute.
+
+The astute.yaml file should contain entries as those below to have them picked
+up during deployment:
+
+opnfv:
+  dns:
+    compute:
+    - 100.100.100.2
+    - 100.100.100.3
+    controller:
+    - 100.100.100.102
+    - 100.100.100.104
+
+The suggested method for adding this information is to prepare for deployment
+with the Fuel GUI or CLI, but before actually deploying:
+
+1. Download the current deployment for all hosts: fuel --env 1 deployment --default
+2. Iterate through the hosts in "deployment_1" and add hosts configuration in
+   the above format to their respective yaml file.
+3. Upload the modifed deployment information: fuel --env 1 deployment --upload
+
+After deploying, the additions will be included in /etc/astute.yaml of each
+host.
+
diff --git a/fuel/build/f_resolvconf/puppet/modules/opnfv/manifests/resolver.pp b/fuel/build/f_resolvconf/puppet/modules/opnfv/manifests/resolver.pp
new file mode 100644 (file)
index 0000000..44f36a2
--- /dev/null
@@ -0,0 +1,73 @@
+##############################################################################
+# Copyright (c) 2015 Ericsson AB and others.
+# stefan.k.berg@ericsson.com
+# jonas.bjurel@ericsson.com
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+# Class: opnfv::resolver
+#
+# Add resolver content passed through astute.yaml into resolv.conf
+# depending on the role
+#
+# Suitable yaml content:
+# <begin>
+# opnfv:
+#  dns:
+#    compute:
+#    - 100.100.100.2
+#    - 100.100.100.3
+#    controller:
+#    - 100.100.100.102
+#    - 100.100.100.104
+# <end>
+#
+#
+#
+
+class opnfv::resolver()
+{
+  if $::fuel_settings['role'] {
+    if $::fuel_settings['role']  == 'primary-controller' {
+      $role = 'controller'
+    } else {
+      $role = $::fuel_settings['role']
+    }
+
+    if ($::fuel_settings['opnfv']
+        and $::fuel_settings['opnfv']['dns']
+        and $::fuel_settings['opnfv']['dns'][$role]) {
+      $nameservers=$::fuel_settings['opnfv']['dns'][$role]
+
+      file { '/etc/resolv.conf':
+            owner   => root,
+            group   => root,
+            mode    => '0644',
+            content => template('opnfv/resolv.conf.erb'),
+      }
+
+      # /etc/resolv.conf is re-generated at each boot by resolvconf, so we
+      # need to store there as well.
+
+      case $::operatingsystem {
+        'ubuntu': {
+          file { '/etc/resolvconf/resolv.conf.d/head':
+            owner   => root,
+            group   => root,
+            mode    => '0644',
+            content => template('opnfv/resolv.conf.erb'),
+          }
+        }
+        'centos': {
+          exec { 'for file in ifcfg-eth*; do grep -q -F "PEERDNS=" $file || echo "PEERDNS=no" >> $file; done ':
+            provider => 'shell',
+            cwd      => '/etc/sysconfig/network-scripts',
+          }
+        }
+      }
+    }
+  }
+}
diff --git a/fuel/build/f_resolvconf/puppet/modules/opnfv/templates/resolv.conf.erb b/fuel/build/f_resolvconf/puppet/modules/opnfv/templates/resolv.conf.erb
new file mode 100644 (file)
index 0000000..7a29dca
--- /dev/null
@@ -0,0 +1,15 @@
+##############################################################################
+# Copyright (c) 2015 Ericsson AB and others.
+# stefan.k.berg@ericsson.com
+# jonas.bjurel@ericsson.com
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+# Dynamic resolv.conf(5) file for glibc resolver(3) generated by resolvconf(8)
+#     DO NOT EDIT THIS FILE BY HAND -- YOUR CHANGES WILL BE OVERWRITTEN
+# Modified by OPNFV.
+<% @nameservers.each do |ns| %>nameserver <%= ns %>
+<% end -%>
diff --git a/fuel/build/f_resolvconf/testing/README b/fuel/build/f_resolvconf/testing/README
new file mode 100644 (file)
index 0000000..6846a8d
--- /dev/null
@@ -0,0 +1,13 @@
+##############################################################################
+# Copyright (c) 2015 Ericsson AB and others.
+# stefan.k.berg@ericsson.com
+# jonas.bjurel@ericsson.com
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+In order to test the functionality without performing a full deployment,
+run "puppet apply" on the fake_init.pp which will call only the
+opnfv::resolvconf class.
diff --git a/fuel/build/f_resolvconf/testing/fake_init.pp b/fuel/build/f_resolvconf/testing/fake_init.pp
new file mode 100644 (file)
index 0000000..496dcd2
--- /dev/null
@@ -0,0 +1,13 @@
+##############################################################################
+# Copyright (c) 2015 Ericsson AB and others.
+# stefan.k.berg@ericsson.com
+# jonas.bjurel@ericsson.com
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+$fuel_settings = parseyaml($astute_settings_yaml)
+
+include opnfv::resolvconf
diff --git a/fuel/build/fuel-agent_1.patch b/fuel/build/fuel-agent_1.patch
deleted file mode 100644 (file)
index b080896..0000000
+++ /dev/null
@@ -1,36 +0,0 @@
-*** build/repos/nailgun/fuel_agent/fuel_agent/manager.py.orig  Thu Sep 24 11:08:38 2015
---- build/repos/nailgun/fuel_agent/fuel_agent/manager.py       Thu Sep 24 11:10:25 2015
-***************
-*** 541,546 ****
---- 541,552 ----
-                      fs_options=fs.options,
-                      fs_label=fs.label,
-                      dev=str(fs.device))
-+                 if fs.type == 'ext4':
-+                     LOG.debug('Trying to disable journaling for ext4 '
-+                               'in order to speed up the build')
-+                     utils.execute('tune2fs', '-O', '^has_journal',
-+                                   str(fs.device))
-+ 
-  
-              # mounting all images into chroot tree
-              self.mount_target(chroot, treat_mtab=False, pseudo=False)
-***************
-*** 631,636 ****
---- 637,652 ----
-              self.umount_target(chroot, pseudo=False, try_lazy_umount=False)
-  
-              for image in self.driver.image_scheme.images:
-+                 # find fs with the same loop device object
-+                 # as image.target_device
-+                 fs = self.driver.partition_scheme.fs_by_device(
-+                     image.target_device)
-+ 
-+                 if fs.type == 'ext4':
-+                     LOG.debug('Trying to re-enable journaling for ext4')
-+                     utils.execute('tune2fs', '-O', 'has_journal',
-+                                   str(fs.device))
-+ 
-                  LOG.debug('Deattaching loop device from file: %s',
-                            image.img_tmp_file)
-                  bu.deattach_loop(str(image.target_device))
diff --git a/fuel/build/fuel-main_1.patch b/fuel/build/fuel-main_1.patch
new file mode 100644 (file)
index 0000000..24b25b2
--- /dev/null
@@ -0,0 +1,104 @@
+diff --git a/docker/astute/Dockerfile b/docker/astute/Dockerfile
+index 55f617a..cd05f19 100644
+--- a/docker/astute/Dockerfile
++++ b/docker/astute/Dockerfile
+@@ -5,7 +5,7 @@
+ FROM fuel/centos
+ MAINTAINER Matthew Mosesohn mmosesohn@mirantis.com
+
+-RUN rm -rf /etc/yum.repos.d/*; echo -e "[nailgun]\nname=Nailgun Local Repo\nbaseurl=http://$(route -n | awk '/^0.0.0.0/ { print $2 }'):_PORT_/os/x86_64/\ngpgcheck=0" > /etc/yum.repos.d/nailgun.repo; yum clean all; yum --quiet install -y ruby21-nailgun-mcagents sysstat
++RUN sleep 15; rm -rf /etc/yum.repos.d/*; echo -e "[nailgun]\nname=Nailgun Local Repo\nbaseurl=http://$(route -n | awk '/^0.0.0.0/ { print $2 }'):_PORT_/os/x86_64/\ngpgcheck=0" > /etc/yum.repos.d/nailgun.repo; yum clean all; yum --quiet install -y ruby21-nailgun-mcagents sysstat
+
+ ADD etc /etc
+ ADD start.sh /usr/local/bin/start.sh
+diff --git a/docker/cobbler/Dockerfile b/docker/cobbler/Dockerfile
+index 0c80abd..3a3d966 100644
+--- a/docker/cobbler/Dockerfile
++++ b/docker/cobbler/Dockerfile
+@@ -5,7 +5,7 @@
+ FROM fuel/centos
+ MAINTAINER Matthew Mosesohn mmosesohn@mirantis.com
+
+-RUN rm -rf /etc/yum.repos.d/*;echo -e "[nailgun]\nname=Nailgun Local Repo\nbaseurl=http://$(route -n | awk '/^0.0.0.0/ { print $2 }'):_PORT_/os/x86_64/\ngpgcheck=0" > /etc/yum.repos.d/nailgun.repo; yum clean all; yum --quiet install -y httpd cobbler dnsmasq xinetd tftp-server; ln -s /etc/dnsmasq.conf /etc/cobbler.dnsmasq.conf
++RUN sleep 15; rm -rf /etc/yum.repos.d/*;echo -e "[nailgun]\nname=Nailgun Local Repo\nbaseurl=http://$(route -n | awk '/^0.0.0.0/ { print $2 }'):_PORT_/os/x86_64/\ngpgcheck=0" > /etc/yum.repos.d/nailgun.repo; yum clean all; yum --quiet install -y httpd cobbler dnsmasq xinetd tftp-server; ln -s /etc/dnsmasq.conf /etc/cobbler.dnsmasq.conf
+
+ ADD etc /etc
+ ADD start.sh /usr/local/bin/start.sh
+diff --git a/docker/mcollective/Dockerfile b/docker/mcollective/Dockerfile
+index e70e87d..d6554b7 100644
+--- a/docker/mcollective/Dockerfile
++++ b/docker/mcollective/Dockerfile
+@@ -4,7 +4,7 @@ MAINTAINER Aleksandr Didenko adidenko@mirantis.com
+
+ WORKDIR /root
+
+-RUN rm -rf /etc/yum.repos.d/*; echo -e "[nailgun]\nname=Nailgun Local Repo\nbaseurl=http://$(route -n | awk '/^0.0.0.0/ { print $2 }'):_PORT_/os/x86_64/\ngpgcheck=0" > /etc/yum.repos.d/nailgun.repo; yum clean all; yum --quiet install -y sudo ruby21-mcollective
++RUN sleep 15; rm -rf /etc/yum.repos.d/*; echo -e "[nailgun]\nname=Nailgun Local Repo\nbaseurl=http://$(route -n | awk '/^0.0.0.0/ { print $2 }'):_PORT_/os/x86_64/\ngpgcheck=0" > /etc/yum.repos.d/nailgun.repo; yum clean all; yum --quiet install -y sudo ruby21-mcollective
+
+ ADD etc /etc
+ ADD start.sh /usr/local/bin/start.sh
+diff --git a/docker/ostf/Dockerfile b/docker/ostf/Dockerfile
+index 43f911e..8da9108 100644
+--- a/docker/ostf/Dockerfile
++++ b/docker/ostf/Dockerfile
+@@ -5,7 +5,7 @@
+ FROM fuel/centos
+ MAINTAINER Matthew Mosesohn mmosesohn@mirantis.com
+
+-RUN rm -rf /etc/yum.repos.d/*;echo -e "[nailgun]\nname=Nailgun Local Repo\nbaseurl=http://$(route -n | awk '/^0.0.0.0/ { print $2 }'):_PORT_/os/x86_64/\ngpgcheck=0" > /etc/yum.repos.d/nailgun.repo;yum clean all; yum --quiet install -y python-fuelclient supervisor postgresql-libs
++RUN sleep 15; rm -rf /etc/yum.repos.d/*;echo -e "[nailgun]\nname=Nailgun Local Repo\nbaseurl=http://$(route -n | awk '/^0.0.0.0/ { print $2 }'):_PORT_/os/x86_64/\ngpgcheck=0" > /etc/yum.repos.d/nailgun.repo;yum clean all; yum --quiet install -y python-fuelclient supervisor postgresql-libs
+
+ ADD etc /etc
+ ADD start.sh /usr/local/bin/start.sh
+diff --git a/docker/postgres/Dockerfile b/docker/postgres/Dockerfile
+index b2930db..63cc4c2 100644
+--- a/docker/postgres/Dockerfile
++++ b/docker/postgres/Dockerfile
+@@ -3,7 +3,7 @@ FROM fuel/centos
+
+ MAINTAINER Aleksandr Didenko adidenko@mirantis.com
+
+-RUN rm -rf /etc/yum.repos.d/*; echo -e "[nailgun]\nname=Nailgun Local Repo\nbaseurl=http://$(route -n | awk '/^0.0.0.0/ { print $2 }'):_PORT_/os/x86_64/\ngpgcheck=0" > /etc/yum.repos.d/nailgun.repo;yum clean all;yum --quiet install -y sudo
++RUN sleep 15; rm -rf /etc/yum.repos.d/*; echo -e "[nailgun]\nname=Nailgun Local Repo\nbaseurl=http://$(route -n | awk '/^0.0.0.0/ { print $2 }'):_PORT_/os/x86_64/\ngpgcheck=0" > /etc/yum.repos.d/nailgun.repo;yum clean all;yum --quiet install -y sudo
+
+ ADD etc /etc
+ ADD start.sh /usr/local/bin/start.sh
+diff --git a/docker/rabbitmq/Dockerfile b/docker/rabbitmq/Dockerfile
+index 201648f..4f3b67c 100644
+--- a/docker/rabbitmq/Dockerfile
++++ b/docker/rabbitmq/Dockerfile
+@@ -3,7 +3,7 @@
+ FROM fuel/centos
+ MAINTAINER Aleksandr Didenko adidenko@mirantis.com
+
+-RUN rm -rf /etc/yum.repos.d/*; echo -e "[nailgun]\nname=Nailgun Local Repo\nbaseurl=http://$(route -n | awk '/^0.0.0.0/ { print $2 }'):_PORT_/os/x86_64/\ngpgcheck=0" > /etc/yum.repos.d/nailgun.repo; yum clean all; yum --quiet install -y rabbitmq-server
++RUN sleep 15; rm -rf /etc/yum.repos.d/*; echo -e "[nailgun]\nname=Nailgun Local Repo\nbaseurl=http://$(route -n | awk '/^0.0.0.0/ { print $2 }'):_PORT_/os/x86_64/\ngpgcheck=0" > /etc/yum.repos.d/nailgun.repo; yum clean all; yum --quiet install -y rabbitmq-server
+
+ ADD etc /etc
+ ADD start.sh /usr/local/bin/start.sh
+diff --git a/docker/rsync/Dockerfile b/docker/rsync/Dockerfile
+index ef737bd..b6eefd1 100644
+--- a/docker/rsync/Dockerfile
++++ b/docker/rsync/Dockerfile
+@@ -5,7 +5,7 @@
+ FROM fuel/centos
+ MAINTAINER Matthew Mosesohn mmosesohn@mirantis.com
+
+-RUN rm -rf /etc/yum.repos.d/*; echo -e "[nailgun]\nname=Nailgun Local Repo\nbaseurl=http://$(route -n | awk '/^0.0.0.0/ { print $2 }'):_PORT_/os/x86_64/\ngpgcheck=0" > /etc/yum.repos.d/nailgun.repo;yum clean all;yum --quiet install -y ruby21-puppet xinetd rsync logrotate
++RUN sleep 15; rm -rf /etc/yum.repos.d/*; echo -e "[nailgun]\nname=Nailgun Local Repo\nbaseurl=http://$(route -n | awk '/^0.0.0.0/ { print $2 }'):_PORT_/os/x86_64/\ngpgcheck=0" > /etc/yum.repos.d/nailgun.repo;yum clean all;yum --quiet install -y ruby21-puppet xinetd rsync logrotate
+
+ ADD etc /etc
+ ADD start.sh /usr/local/bin/start.sh
+diff --git a/docker/rsyslog/Dockerfile b/docker/rsyslog/Dockerfile
+index 5efd623..8721b39 100644
+--- a/docker/rsyslog/Dockerfile
++++ b/docker/rsyslog/Dockerfile
+@@ -2,7 +2,7 @@ FROM fuel/centos
+
+ MAINTAINER Aleksandr Didenko adidenko@mirantis.com
+
+-RUN rm -rf /etc/yum.repos.d/*; echo -e "[nailgun]\nname=Nailgun Local Repo\nbaseurl=http://$(route -n | awk '/^0.0.0.0/ { print $2 }'):_PORT_/os/x86_64/\ngpgcheck=0" > /etc/yum.repos.d/nailgun.repo; yum clean all;yum --quiet install -y anacron rsyslog
++RUN sleep 15; rm -rf /etc/yum.repos.d/*; echo -e "[nailgun]\nname=Nailgun Local Repo\nbaseurl=http://$(route -n | awk '/^0.0.0.0/ { print $2 }'):_PORT_/os/x86_64/\ngpgcheck=0" > /etc/yum.repos.d/nailgun.repo; yum clean all;yum --quiet install -y anacron rsyslog
+
+ ADD etc /etc
+ ADD start.sh /usr/local/bin/start.sh
diff --git a/fuel/build/fuel-main_2.patch b/fuel/build/fuel-main_2.patch
new file mode 100644 (file)
index 0000000..72588cb
--- /dev/null
@@ -0,0 +1,18 @@
+*** fuel-main/sandbox.mk.orig  2015-02-13 12:12:55.362989171 +0100
+--- fuel-main/sandbox.mk       2015-02-13 14:50:39.103017653 +0100
+***************
+*** 71,77 ****
+--- 71,83 ----
+  echo "Updating apt package database"
+  sudo chroot $(SANDBOX_UBUNTU) apt-get update
+  echo "Installing additional packages: $(SANDBOX_DEB_PKGS)"
++ test -e $(SANDBOX_UBUNTU)/sbin/start.orig || mv $(SANDBOX_UBUNTU)/sbin/start $(SANDBOX_UBUNTU)/sbin/start.orig
++ echo "#!/bin/sh" > $(SANDBOX_UBUNTU)/sbin/start
++ echo "exit 0" >> $(SANDBOX_UBUNTU)/sbin/start
++ chmod 755 $(SANDBOX_UBUNTU)/sbin/start
+  test -n "$(SANDBOX_DEB_PKGS)" && sudo chroot $(SANDBOX_UBUNTU) apt-get install --yes $(SANDBOX_DEB_PKGS)
++ test -e $(SANDBOX_UBUNTU)/sbin/start.orig && (cp $(SANDBOX_UBUNTU)/sbin/start.orig $(SANDBOX_UBUNTU)/sbin/start; \
++      rm $(SANDBOX_UBUNTU)/sbin/start.orig)
+  echo "SANDBOX_UBUNTU_UP: done"
+  endef
+
diff --git a/fuel/build/fuel-main_3.patch b/fuel/build/fuel-main_3.patch
deleted file mode 100644 (file)
index 8341d72..0000000
+++ /dev/null
@@ -1,19 +0,0 @@
-*** prepare-build-env.sh.orig  Tue Sep  8 08:47:46 2015
---- prepare-build-env.sh       Tue Sep  8 08:48:22 2015
-***************
-*** 41,47 ****
-  
-    trusty)
-      GEMPKG="ruby ruby-dev"
-!     sudo apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 --recv-keys D5A05778
-      echo "deb http://mirror.fuel-infra.org/devops/ubuntu/ ./" | sudo tee /etc/apt/sources.list.d/fuel-devops.list
-      sudo apt-get update && sudo apt-get -y install nodejs nodejs-legacy npm
-      ;;
---- 41,47 ----
-  
-    trusty)
-      GEMPKG="ruby ruby-dev"
-!     sudo apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 --recv-keys 1D2B45A2
-      echo "deb http://mirror.fuel-infra.org/devops/ubuntu/ ./" | sudo tee /etc/apt/sources.list.d/fuel-devops.list
-      sudo apt-get update && sudo apt-get -y install nodejs nodejs-legacy npm
-      ;;
diff --git a/fuel/build/fuel-main_5.patch b/fuel/build/fuel-main_5.patch
deleted file mode 100644 (file)
index ec75626..0000000
+++ /dev/null
@@ -1,19 +0,0 @@
-*** prepare-build-env.sh.orig  Tue Sep  8 10:29:08 2015
---- prepare-build-env.sh       Tue Sep  8 10:30:21 2015
-***************
-*** 43,49 ****
-      GEMPKG="ruby ruby-dev"
-      sudo apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 --recv-keys 1D2B45A2
-      echo "deb http://mirror.fuel-infra.org/devops/ubuntu/ ./" | sudo tee /etc/apt/sources.list.d/fuel-devops.list
-!     sudo apt-get update && sudo apt-get -y install nodejs nodejs-legacy npm
-      ;;
-  
-    precise)
---- 43,49 ----
-      GEMPKG="ruby ruby-dev"
-      sudo apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 --recv-keys 1D2B45A2
-      echo "deb http://mirror.fuel-infra.org/devops/ubuntu/ ./" | sudo tee /etc/apt/sources.list.d/fuel-devops.list
-!     sudo apt-get update && sudo apt-get -y install nodejs nodejs-legacy npm dosfstools xorriso
-      ;;
-  
-    precise)
index 0d15aec..1101ac9 100644 (file)
@@ -17,9 +17,9 @@ TreeDefault {
 };
 
 BinDirectory "pool/main" {
-  Packages "dists/trusty/main/binary-amd64/Packages";
-  BinOverride "./indices/override.trusty.main";
-  ExtraOverride "./indices/override.trusty.extra.main";
+  Packages "dists/precise/main/binary-amd64/Packages";
+  BinOverride "./indices/override.precise.main";
+  ExtraOverride "./indices/override.precise.extra.main";
 };
 
 Default {
index 02706bd..0252882 100644 (file)
@@ -10,9 +10,9 @@
 
 APT::FTPArchive::Release::Origin "Ubuntu";
 APT::FTPArchive::Release::Label "Ubuntu";
-APT::FTPArchive::Release::Suite "trusty";
-APT::FTPArchive::Release::Version "1.04";
-APT::FTPArchive::Release::Codename "trusty";
+APT::FTPArchive::Release::Suite "precise";
+APT::FTPArchive::Release::Version "12.04";
+APT::FTPArchive::Release::Codename "precise";
 APT::FTPArchive::Release::Architectures "amd64";
 APT::FTPArchive::Release::Components "main";
-APT::FTPArchive::Release::Description "Ubuntu Trusty Tahr 14.04 LTS";
+APT::FTPArchive::Release::Description "Ubuntu Precise 12.04 LTS";
index 3b5b239..2acbcf0 100644 (file)
@@ -17,8 +17,8 @@ TreeDefault {
 };
 
 BinDirectory "pool/debian-installer" {
-  Packages "dists/trusty/main/debian-installer/binary-amd64/Packages";
-  BinOverride "./indices/override.trusty.main.debian-installer";
+  Packages "dists/precise/main/debian-installer/binary-amd64/Packages";
+  BinOverride "./indices/override.precise.main.debian-installer";
 };
 
 Default {
index f0bb849..dbb26d6 100755 (executable)
@@ -122,17 +122,17 @@ prep_make_live() {
     ssh-copy-id root@$FUELHOST
     sshfs root@1${FUELHOST}:/ $TMP_HOSTMOUNT
 
-    if [ -f  $REPO/dists/trusty/main/binary-amd64/Packages.backup ]; then
+    if [ -f  $REPO/dists/precise/main/binary-amd64/Packages.backup ]; then
         echo "Error - found backup file for Packages!"
         exit 1
     fi
 
-    if [ -f  $REPO/dists/trusty/main/binary-amd64/Packages.gz.backup ]; then
+    if [ -f  $REPO/dists/precise/main/binary-amd64/Packages.gz.backup ]; then
         echo "Error - found backup file for Packages.gz!"
         exit 1
     fi
 
-    if [ -f  $REPO/dists/trusty/Release.backup ]; then
+    if [ -f  $REPO/dists/precise/Release.backup ]; then
         echo "Error - found backup file for Release!"
         exit 1
     fi
@@ -142,24 +142,20 @@ prep_make_live() {
         exit 1
     fi
 
-    cp $REPO/dists/trusty/main/binary-amd64/Packages $REPO/dists/trusty/main/binary-amd64/Packages.backup
-    cp $REPO/dists/trusty/main/binary-amd64/Packages.gz $REPO/dists/trusty/main/binary-amd64/Packages.gz.backup
-    cp $REPO/dists/trusty/Release $REPO/dists/trusty/Release.backup
+    cp $REPO/dists/precise/main/binary-amd64/Packages $REPO/dists/precise/main/binary-amd64/Packages.backup
+    cp $REPO/dists/precise/main/binary-amd64/Packages.gz $REPO/dists/precise/main/binary-amd64/Packages.gz.backup
+    cp $REPO/dists/precise/Release $REPO/dists/precise/Release.backup
     cp -Rvp $DEST/etc/puppet $DEST/etc/puppet.backup
 }
 
 post_make_live() {
-    if [ -d $TOP/release/puppet/modules ]; then
-        echo "Installing into Puppet:"
-        cd $TOP/release/puppet/modules
-        if [ `ls -1 | wc -l` -gt 0 ]; then
-            for dir in *
-            do
-                echo "   $dir"
-                cp -Rp $dir $DEST/etc/puppet/modules
-            done
-        fi
-    fi
+    echo "Installing into Puppet:"
+    cd $TOP/release/puppet/modules
+    for dir in *
+    do
+        echo "   $dir"
+        cp -Rp $dir $DEST/etc/puppet/modules
+    done
 }
 
 make_live() {
@@ -214,21 +210,18 @@ iso_copy_puppet() {
     tar xzf $DEST/puppet-slave.tgz
     cd $TOP/release/puppet/modules
 
+    verify_orig_files $TMP_ISOPUPPET/release/puppet $TOP/release/puppet/modules
     # Remove all .orig files before copying as they now have been verfied
+    find $TOP/release/puppet/modules -type f -name '*.orig' -exec rm {} \;
 
-    if [ -d $TOP/release/puppet/modules ]; then
-        if [ `ls -1 | wc -l` -gt 0 ]; then
-            verify_orig_files $TMP_ISOPUPPET/release/puppet $TOP/release/puppet/modules
-            find $TOP/release/puppet/modules -type f -name '*.orig' -exec rm {} \;
-            for dir in $TOP/release/puppet/modules/*
-            do
-                echo "   $dir"
-                cp -Rp $dir $TMP_ISOPUPPET/release/puppet
-            done
-        fi
-    fi
-
+    for dir in $TOP/release/puppet/modules/*
+    do
+        echo "   $dir"
+        cp -Rp $dir $TMP_ISOPUPPET/release/puppet
+    done
     cd $TMP_ISOPUPPET/release/puppet
+
+
     tar czf $DEST/puppet-slave.tgz .
     cd $TOP
     rm -Rf $TMP_ISOPUPPET
@@ -257,7 +250,7 @@ iso_modify_image () {
 make_iso() {
     prep_make_iso
     copy_packages
-    #iso_copy_puppet
+    iso_copy_puppet
     iso_modify_image
     make_iso_image
 }
@@ -270,8 +263,6 @@ copy_packages() {
     do
         echo "   $udeb"
         cp $udeb $REPO/pool/debian-installer
-       echo "Did not expect a package here, not supported"
-       exit 1
     done
 
     cd $TOP/release/packages/ubuntu/pool/main
@@ -279,8 +270,6 @@ copy_packages() {
     do
         echo "   $deb"
         cp $deb $REPO/pool/main
-       echo "Did not expect a package here, not supported"
-       exit 1
     done
 
     echo "Running Fuel package patch file"
@@ -288,8 +277,6 @@ copy_packages() {
 
     for line in `cat $TOP/apply_patches | grep -v "^#" | grep -v "^$"`; do
         echo "Line is $line"
-        echo "Did not expect a line here, not supported"
-        exit 1
         ref=`echo $line | cut -d '>' -f 1`
         origpkg=`echo $line| cut -d '>' -f 2`
         url=`echo $line | cut -d '>' -f 3`
@@ -328,11 +315,10 @@ copy_packages() {
     done
 
     printf "Done running Fuel patch file\n\n"
+
     echo "Running add packages file"
     for line in `cat $TOP/add_opnfv_packages | grep -v "^#" | grep -v "^$"`; do
         echo "Line is $line"
-        echo "Did not expect a line here, not supported"
-        exit 1
         ref=`echo $line | cut -d '>' -f 1`
         origpkg=`echo $line| cut -d '>' -f 2`
         url=`echo $line | cut -d '>' -f 3`
@@ -384,8 +370,6 @@ copy_packages() {
         printf "\n\n" | tee -a  $REPORTFILE
         for line in `cat $TOP/patch-packages/release/patch-replacements`
         do
-            echo "Did not expect a line here, not supported"
-                   exit 1
             frompkg=`echo $line | cut -d ">" -f 1`
             topkg=`echo $line | cut -d ">" -f 2`
             echo "CM: Applying patch to $frompkg" | tee -a $REPORTFILE
@@ -427,19 +411,17 @@ copy_packages() {
     APT_DEB_CONF="$TOP/install/apt-ftparchive-deb.conf"
     APT_UDEB_CONF="$TOP/install/apt-ftparchive-udeb.conf"
 
-    echo Not running echo apt-ftparchive -c "${APT_REL_CONF}" generate "${APT_DEB_CONF}"
-    echo Not running apt-ftparchive -c "${APT_REL_CONF}" generate "${APT_DEB_CONF}"
-    echo Not running apt-ftparchive generate "${APT_UDEB_CONF}"
-    echo Not running apt-ftparchive generate "${APT_UDEB_CONF}"
+    apt-ftparchive -c "${APT_REL_CONF}" generate "${APT_DEB_CONF}"
+    apt-ftparchive generate "${APT_UDEB_CONF}"
 
     # Fuel also needs this index file
-    # cat dists/trusty/main/binary-amd64/Packages | \
-    #    awk '/^Package:/{pkg=$2}
-    /^Version:/{print pkg ": \"" $2 "\""}' > ubuntu-versions.yaml
-    cp ubuntu-versions.yaml $DEST
+    cat dists/precise/main/binary-amd64/Packages | \
+        awk '/^Package:/{pkg=$2}
+    /^Version:/{print pkg ": \"" $2 "\""}' > ubuntu-versions.yaml
+    cp ubuntu-versions.yaml $DEST
 
-    # apt-ftparchive -c "${APT_REL_CONF}" release dists/trusty/ > dists/trusty/Release
-    # gzip -9cf dists/trusty/Release > dists/trusty/Release.gz
+    apt-ftparchive -c "${APT_REL_CONF}" release dists/precise/ > dists/precise/Release
+    gzip -9cf dists/precise/Release > dists/precise/Release.gz
 
     popd > /dev/null
 
@@ -462,8 +444,6 @@ if [ $MODE = "iso" ]; then
     NEWISO=$3
     VOLUMEID="$4 $5"
     REPORTFILE="${NEWISO}.txt"
-    echo "Opening reportfile at $REPORTFILE"
-    touch $REPORTFILE
     if [ ! -f $ORIGISO ]; then
         echo "Can't find original MOS 5.1 iso at $ORIGISO"
         rm $CONF
index a9e74bc..36b8884 100755 (executable)
@@ -31,17 +31,17 @@ DEST=$MOUNT
 REPO=$DEST/var/www/nailgun/ubuntu/fuelweb/x86_64
 
 cd $REPO
-if [ ! -f  $REPO/dists/trusty/main/binary-amd64/Packages.backup ]; then
+if [ ! -f  $REPO/dists/precise/main/binary-amd64/Packages.backup ]; then
     echo "Error - didn't find backup file for Packages!"
     exit 1
 fi
 
-if [ ! -f  $REPO/dists/trusty/main/binary-amd64/Packages.gz.backup ]; then
+if [ ! -f  $REPO/dists/precise/main/binary-amd64/Packages.gz.backup ]; then
     echo "Error - didn't find backup file for Packages.gz!"
     exit 1
 fi
 
-if [ ! -f  $REPO/dists/trusty/Release.backup ]; then
+if [ ! -f  $REPO/dists/precise/Release.backup ]; then
     echo "Error - didn't find backup file for Release!"
     exit 1
 fi
@@ -71,9 +71,9 @@ cd $REPO
 
 echo "Restoring backups of datafiles"
 
-rm -f $REPO/dists/trusty/main/binary-amd64/Packages $REPO/dists/trusty/main/binary-amd64/Packages.gz
-rm -f $REPO/dists/trusty/Release $DEST/etc/puppet/manifests/site.pp
-mv $REPO/dists/trusty/main/binary-amd64/Packages.backup $REPO/dists/trusty/main/binary-amd64/Packages
-mv $REPO/dists/trusty/main/binary-amd64/Packages.gz.backup $REPO/dists/trusty/main/binary-amd64/Packages.gz
-mv $REPO/dists/trusty/Release.backup $REPO/dists/trusty/Release
+rm -f $REPO/dists/precise/main/binary-amd64/Packages $REPO/dists/precise/main/binary-amd64/Packages.gz
+rm -f $REPO/dists/precise/Release $DEST/etc/puppet/manifests/site.pp
+mv $REPO/dists/precise/main/binary-amd64/Packages.backup $REPO/dists/precise/main/binary-amd64/Packages
+mv $REPO/dists/precise/main/binary-amd64/Packages.gz.backup $REPO/dists/precise/main/binary-amd64/Packages.gz
+mv $REPO/dists/precise/Release.backup $REPO/dists/precise/Release
 mv $DEST/etc/puppet/manifests/site.pp.backup $DEST/etc/puppet/manifests/site.pp
diff --git a/fuel/build/opendaylight/Makefile b/fuel/build/opendaylight/Makefile
new file mode 100644 (file)
index 0000000..bd2eeb5
--- /dev/null
@@ -0,0 +1,102 @@
+##############################################################################
+# Copyright (c) 2015 Ericsson AB and others.
+# stefan.k.berg@ericsson.com
+# jonas.bjurel@ericsson.com
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+############################################################################
+# BEGIN of variables to customize
+#
+SHELL = /bin/bash
+
+
+BUILD_DIR := $(shell pwd)
+GIT_DIR := /tmp
+export CONFIG_SPEC_SCRIPT
+export MAVEN_OPTS = -Xmx1024m -XX:MaxPermSize=512m
+MAINTAINER = "Main Tainer <main.tainer@example.org>"
+ODL_SHORT_NAME = odl
+ODL_VERSION = 0.1-1
+DEPEND = openjdk-8-jdk
+TARGET_BUILD_PATH="/tmp/controller/opendaylight/distribution/opendaylight-karaf/target/"
+MAVEN_SPEC = $(BUILD_DIR)/odl_maven/settings.xml
+
+#
+# END of variables to customize
+#############################################################################
+
+.PHONY: all
+all:   odl
+
+############################################################################
+# BEGIN of Include definitions
+#
+include ../config.mk
+#
+# END Include definitions
+#############################################################################
+
+.PHONY: setup
+setup:
+       rm -f "$(BUILD_BASE)/f_odl"
+       ln -s "$(shell readlink -e $(BUILD_DIR))/f_odl" "$(shell readlink -e $(BUILD_BASE))/f_odl"
+
+.PHONY: validate-cache
+validate-cache:
+       @REMOTE_ID=$(shell git ls-remote $(ODL_MAIN_REPO) $(ODL_MAIN_TAG)^{} | awk '{print $$(NF-1)}'); \
+       if [ -z $$REMOTE_ID ] || [ $$REMOTE_ID = " " ]; \
+       then \
+          REMOTE_ID=$(shell git ls-remote $(ODL_MAIN_REPO) $(ODL_MAIN_TAG) | awk '{print $$(NF-1)}'); \
+       fi; \
+       if [ $$REMOTE_ID != $(shell cat $(VERSION_FILE) | grep odl | awk '{print $$NF}') ]; \
+       then \
+          echo "Cache does not match upstream OpenDaylight, cache must be rebuilt!"; \
+          exit 1; \
+       fi
+
+.PHONY: odl
+odl:
+
+ifeq ($(ODL_MAIN_REPO),)
+       @echo "No config-spec target for ODL, nothing to build"
+else
+
+ifeq ($(shell if [ -e .odl-build.log ];then cat .odl-build.log; fi;),$(ODL_MAIN_TAG))
+       @cd /tmp && git clone $(ODL_MAIN_REPO) && cd /tmp/controller && git checkout $(ODL_MAIN_TAG)
+
+       @echo "ODL is up to date"
+else
+       @if [ ! -d "/tmp/controller" ]; then\
+          cd /tmp && git clone $(ODL_MAIN_REPO);\
+       fi;
+
+       @if [ "$(UNIT_TEST)" = "FALSE" ]; then\
+          echo "Building ODL without unit test";\
+          cd /tmp/controller &&\
+          git checkout $(ODL_MAIN_TAG) &&\
+          mvn -D maven.test.skip=true -gs $(MAVEN_SPEC) clean install;\
+       else\
+          echo "Building ODL with unit test";\
+          cd /tmp/controller &&\
+          git checkout $(ODL_MAIN_TAG) &&\
+          mvn -gs $(MAVEN_SPEC) clean install;\
+       fi;
+
+       @echo "odl" `git -C /tmp/controller show | grep commit | head -1 | cut -d " " -f2` >> $(VERSION_FILE)
+       @./make-odl-deb.sh -N $(ODL_SHORT_NAME)_`cd /tmp/controller; git rev-parse --short HEAD` -n $(ODL_SHORT_NAME) -v "$(ODL_VERSION)" -t "$(ODL_MAIN_TAG)" -m $(MAINTAINER) -d $(DEPEND) -p $(TARGET_BUILD_PATH)
+       @echo $(ODL_MAIN_TAG) > .odl-build.log
+endif
+endif
+
+.PHONY: clean $(SUBCLEAN)
+clean: $(SUBCLEAN)
+       @rm -Rf /tmp/controller
+       @rm -f .odl-build.log
+       @./make-odl-deb.sh -C
+
+.PHONY: release
+release:
diff --git a/fuel/build/opendaylight/README b/fuel/build/opendaylight/README
new file mode 100644 (file)
index 0000000..7aa392e
--- /dev/null
@@ -0,0 +1,52 @@
+##############################################################################
+# Copyright (c) 2015 Ericsson AB and others.
+# stefan.k.berg@ericsson.com
+# jonas.bjurel@ericsson.com
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+This directory builds the OpenDaylight debian package by cloning the
+opendaylight.org repo, building the odl tag specified in
+"fuel-build/config-spec" and constructing a debian package source tree under
+"f_odl", which automatically is linked into "fuel_build/." for further build processing.
+
+The opendaylight has the following structure:
+.
++--------+-------------+-----------+
+|        |             |           |
+|     Makefile  make-odl-deb.sh  README
+|                              (this file)
+|
++----------+----------+
+           |          |
+       odl_maven/   f_odl/
+
+Makefile:
+Invoked by the git root Makefile, it builds the clones the odl repo from
+odl, checkout the tag/branch indicated in "fuelbuild/config-spec", builds
+odl, and calls "make-odl-deb.sh" to create a debian package source tree.
+
+make-odl-deb.sh:
+Creates the odl debian package source tree in "f_odl" from the odl build
+results.
+
+odl_maven/:
+Contains needed control files for maven OpenDaylight build
+
+f_odl/:
+Contains buildscripts and the generated odl debian package source tree produced
+by the odl build (make-odl-deb.sh) which is later used by the root build system.
+
+NOTE on the controller/ git repo clone:
+The git controller repo clone Contains all artifacts from the odl build, it only
+exists in /tmp inside the build docker container and is not visible anywhere on
+the build host
+
+Note on build caching:
+The latest build results are cached, and will not be rebuilt unless the fuel-build/
+config-spec is changed in respect to odl version or if make clean is applied.
+./.odl-build.log and ./.odl-build.history are used to keep adequate bookmaking to
+track caching and needed cleanout.
diff --git a/fuel/build/opendaylight/f_odl/Makefile b/fuel/build/opendaylight/f_odl/Makefile
new file mode 100644 (file)
index 0000000..f7ebd3e
--- /dev/null
@@ -0,0 +1,49 @@
+##############################################################################
+# Copyright (c) 2015 Ericsson AB and others.
+# stefan.k.berg@ericsson.com
+# jonas.bjurel@ericsson.com
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+include $(BUILD_BASE)/config.mk
+ODL_NAME_SHORT := odl
+PACKAGE := odl_$(shell cd /tmp/controller; git rev-parse --short HEAD)
+VERSION := 0.1-1
+DEB_NAME := $(PACKAGE)_$(VERSION)
+
+.PHONY: all
+all:   release/pool/main/$(DEB_NAME).deb
+
+release/pool/main/$(DEB_NAME).deb:
+ifeq ($(ODL_MAIN_REPO),)
+       @echo "No config-spec target for ODL, nothing to build"
+else
+       @mkdir -p tmp/src
+       @mkdir -p release/pool/main
+       @cp -rp package/$(DEB_NAME) tmp/src
+       @gzip -f9 tmp/src/$(DEB_NAME)/usr/share/doc/$(ODL_NAME_SHORT)/changelog.Debian
+       @fakeroot dpkg-deb --build tmp/src/$(DEB_NAME)
+       @lintian tmp/src/$(DEB_NAME).deb
+       @cp tmp/src/$(DEB_NAME).deb release/pool/main
+endif
+
+.PHONY: clean
+clean:
+       @rm -rf tmp
+       @rm -rf release
+       @rm -f $(DEB_DEST)/$(DEB_NAME).deb
+
+.PHONY: validate-cache
+validate-cache:
+       @echo "No cache validation schema available for $(shell pwd)"
+       @echo "Continuing ..."
+
+.PHONY: release
+release:release/pool/main/$(DEB_NAME).deb
+ifneq ($(ODL_MAIN_REPO),)
+       @cp release/pool/main/$(DEB_NAME).deb $(DEB_DEST)
+       @cp -Rvp puppet/modules/* $(PUPPET_DEST)
+endif
diff --git a/fuel/build/opendaylight/f_odl/README b/fuel/build/opendaylight/f_odl/README
new file mode 100644 (file)
index 0000000..077962d
--- /dev/null
@@ -0,0 +1,49 @@
+##############################################################################
+# Copyright (c) 2015 Ericsson AB and others.
+# stefan.k.berg@ericsson.com
+# jonas.bjurel@ericsson.com
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+This directory adds the OpenDaylight (odl) package and related puppet
+deployment manifest such that it gets built into the .iso image an deployed
+on the stack controller cluster.
+
+The f_odl has the following structure:
+.
++--------+----------+-----------+------------+
+         |          |           |            |
+      puppet/    Makefile     README   odl_<change_id>
+         |                  (this file)  /<version>
+         |                                   |
+         |                            odl deb pkg src
+      modules/
+         |
+         |
+         |
+       opnfv/
+         |
+         |
+         |
+     manifests/
+         |
+         |
+         |
+      odl.pp
+
+Makefile:
+Invoked by the git root Makefile, it builds the odl debian package from the
+debian pkg source directory (inside this directory) and pushes it together
+with the manifests to the fuel build source artifact directory, such that it
+eventually gets built into the new fuel .iso
+
+odl.pp:
+Controls the installation and configuration of odl
+
+odl deb pkg src:
+Is the debian package source directory tree including all needed odl artifacts
+and debian pakage meta data. This debian source package tree is built from
+fuel-build/opendaylight and doesnt exist before it has bee built.
diff --git a/fuel/build/opendaylight/f_odl/puppet/modules/opnfv/manifests/odl.pp b/fuel/build/opendaylight/f_odl/puppet/modules/opnfv/manifests/odl.pp
new file mode 100644 (file)
index 0000000..6165646
--- /dev/null
@@ -0,0 +1,13 @@
+class opnfv::odl {
+  if $::osfamily == 'Debian' {
+
+
+    case $::fuel_settings['role'] {
+      /controller/: {
+        package { 'odl':
+          ensure => installed,
+        }
+      }
+    }
+  }
+}
diff --git a/fuel/build/opendaylight/f_odl/testing/README b/fuel/build/opendaylight/f_odl/testing/README
new file mode 100644 (file)
index 0000000..2ef4976
--- /dev/null
@@ -0,0 +1,12 @@
+##############################################################################
+# Copyright (c) 2015 Ericsson AB and others.
+# stefan.k.berg@ericsson.com
+# jonas.bjurel@ericsson.com
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+In order to test the functionality without performing a full deployment, run "puppet apply" on the
+fake_init.pp which will call only the opnfv::odl class.
diff --git a/fuel/build/opendaylight/f_odl/testing/fake_init.pp b/fuel/build/opendaylight/f_odl/testing/fake_init.pp
new file mode 100644 (file)
index 0000000..0600d2e
--- /dev/null
@@ -0,0 +1,13 @@
+##############################################################################
+# Copyright (c) 2015 Ericsson AB and others.
+# stefan.k.berg@ericsson.com
+# jonas.bjurel@ericsson.com
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+$fuel_settings = parseyaml($astute_settings_yaml)
+
+include opnfv::odl
diff --git a/fuel/build/opendaylight/make-odl-deb.sh b/fuel/build/opendaylight/make-odl-deb.sh
new file mode 100755 (executable)
index 0000000..5222087
--- /dev/null
@@ -0,0 +1,314 @@
+#!/bin/bash
+set -e
+##############################################################################
+# Copyright (c) 2015 Ericsson AB and others.
+# stefan.k.berg@ericsson.com
+# jonas.bjurel@ericsson.com
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+##############################################################################
+# Default variable declarations
+
+COMMAND=
+PACKAGE_NAME=
+PACKAGE_SHORT_NAME=
+PACKAGE_VERSION=
+TARGET_BUILD_PATH=
+DEPENDENCIES=
+MAINTAINER=
+ARCH="amd64"
+BUILD_HISTORY=".odl-build-history"
+
+##############################################################################
+# subroutine: usage
+# Description: Prints out usage of this script
+
+usage ()
+{
+cat <<EOF
+usage: $0 options
+
+$0 creates a ${PACKAGE_NAME} Debian package
+
+OPTIONS:
+  -n Package shoer name
+  -N Package name
+  -v Version
+  -t Tag
+  -p Target build path, the path where the built tar ball is to be fetched
+  -m Maintainer
+  -d Package dependencies
+  -h Prints this message
+  -C Clean
+
+E.g.: $0 -n my/deb/src/dest/path -N my-package -v 1.0-1 -t myTag -p path/to/the/source -m "Main Tainer <main.tainer.exampe.org> -d myJavaDependence
+EOF
+}
+
+##############################################################################
+# subroutine: clean
+# Description: Cleans up all artifacts from earlier builds
+
+clean ()
+{
+if [ -e $BUILD_HISTORY ]; then
+    while read line
+    do
+       rm -rf $line
+    done < $BUILD_HISTORY
+    rm ${BUILD_HISTORY}
+    exit 0
+fi
+}
+
+##############################################################################
+# make-DEBIAN_control
+# Description: constructs the Debian pack control file
+
+make-DEBIAN_control ()
+{
+cat <<EOF
+Package: $PACKAGE_SHORT_NAME
+Version: $PACKAGE_VERSION
+Section: base
+Priority: optional
+Architecture: $ARCH
+Depends: $DEPENDENCIES
+Maintainer: $MAINTAINER
+Description: OpenDaylight deamon
+ This is a daemon for the opendaylight/odl controller service.
+EOF
+}
+
+##############################################################################
+# subroutine: make-DEBIAN_conffiles
+# Description: Constructs the Debian package config files assignment
+
+make-DEBIAN_conffiles ()
+{
+cat <<EOF
+/etc/odl/etc/all.policy
+/etc/odl/etc/config.properties
+/etc/odl/etc/custom.properties
+/etc/odl/etc/distribution.info
+/etc/odl/etc/equinox-debug.properties
+/etc/odl/etc/java.util.logging.properties
+/etc/odl/etc/jmx.acl.cfg
+/etc/odl/etc/jmx.acl.java.lang.Memory.cfg
+/etc/odl/etc/jmx.acl.org.apache.karaf.bundle.cfg
+/etc/odl/etc/jmx.acl.org.apache.karaf.config.cfg
+/etc/odl/etc/jmx.acl.org.apache.karaf.security.jmx.cfg
+/etc/odl/etc/jmx.acl.osgi.compendium.cm.cfg
+/etc/odl/etc/jre.properties
+/etc/odl/etc/keys.properties
+/etc/odl/etc/org.apache.felix.fileinstall-deploy.cfg
+/etc/odl/etc/org.apache.karaf.command.acl.bundle.cfg
+/etc/odl/etc/org.apache.karaf.command.acl.config.cfg
+/etc/odl/etc/org.apache.karaf.command.acl.feature.cfg
+/etc/odl/etc/org.apache.karaf.command.acl.jaas.cfg
+/etc/odl/etc/org.apache.karaf.command.acl.kar.cfg
+/etc/odl/etc/org.apache.karaf.command.acl.shell.cfg
+/etc/odl/etc/org.apache.karaf.command.acl.system.cfg
+/etc/odl/etc/org.apache.karaf.features.cfg
+/etc/odl/etc/org.apache.karaf.features.obr.cfg
+/etc/odl/etc/org.apache.karaf.features.repos.cfg
+/etc/odl/etc/org.apache.karaf.jaas.cfg
+/etc/odl/etc/org.apache.karaf.kar.cfg
+/etc/odl/etc/org.apache.karaf.log.cfg
+/etc/odl/etc/org.apache.karaf.management.cfg
+/etc/odl/etc/org.apache.karaf.shell.cfg
+/etc/odl/etc/org.ops4j.pax.logging.cfg
+/etc/odl/etc/org.ops4j.pax.url.mvn.cfg
+/etc/odl/etc/regions-config.xml
+/etc/odl/etc/shell.init.script
+/etc/odl/etc/startup.properties
+/etc/odl/etc/system.properties
+/etc/odl/etc/users.properties
+/etc/odl/configuration/context.xml
+/etc/odl/configuration/logback.xml
+/etc/odl/configuration/tomcat-logging.properties
+/etc/odl/configuration/tomcat-server.xml
+EOF
+}
+
+##############################################################################
+# subroutine: make-DEBIAN_postinst
+# Description: Constructs the Debian package post installation script
+
+make-DEBIAN_postinst ()
+{
+cat <<EOF
+#!/bin/bash -e
+ln -s /etc/${PACKAGE_SHORT_NAME}/* ${TARGET_INSTALL_PATH}
+echo "OpenDaylight $TAG version $PACKAGE_VERSION has been installed"
+EOF
+}
+
+##############################################################################
+# subroutine: make-DEBIAN_bin
+# Description: Constructs the bin script (normally under /usr/bin)
+
+make-DEBIAN_bin ()
+{
+cat <<EOF
+#!/bin/bash -e
+${TARGET_INSTALL_PATH}bin/karaf $@
+EOF
+}
+
+##############################################################################
+# subroutine: make-DEBIAN_copyright
+# Description: Constructs the copyright text (normally under /usr/share/doc...)
+
+make-DEBIAN_copyright ()
+{
+cat <<EOF
+OpenDaylight - an open source SDN controller
+Licensed to the Apache Software Foundation (ASF) under one or more
+contributor license agreements.  See the NOTICE file distributed with
+this work for additional information regarding copyright ownership.
+The ASF licenses this file to You under the Apache License, Version 2.0
+(the "License"); you may not use this file except in compliance with
+the License.  You may obtain a copy of the License at
+
+http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+EOF
+}
+
+##############################################################################
+# subroutine: make-DEBIAN_changelog
+# Description: Constructs the changelog text (normally under /usr/share/doc...)
+
+make-DEBIAN_changelog ()
+{
+cat <<EOF
+$PACKAGE_SHORT_NAME ($PACKAGE_VERSION) precise-proposed; urgency=low
+
+  * Derived from $PACKAGE_NAME $PACHAGE_VERSION
+
+ -- $MAINTAINER  $(date)
+EOF
+}
+
+##############################################################################
+# MAIN
+
+while getopts "N:n:v:d:Chm:t:p:" OPTION
+do
+    case $OPTION in
+        h)
+            usage
+            exit 0
+            ;;
+
+        N)
+            PACKAGE_NAME=$OPTARG
+            COMMAND+="-N ${PACKAGE_NAME} "
+            ;;
+
+        n)
+            PACKAGE_SHORT_NAME=$OPTARG
+            COMMAND+="-n ${PACKAGE_SHORT_NAME} "
+            ;;
+
+       v)
+            PACKAGE_VERSION=$OPTARG
+            COMMAND+="-v ${PACKAGE_VERSION} "
+            ;;
+
+       p)
+           TARGET_BUILD_PATH=$OPTARG
+           COMMAND+="-p ${TARGET_BUILD_PATH} "
+           ;;
+
+       t)
+            TAG=$OPTARG
+            COMMAND+="-t ${TAG} "
+            ;;
+
+       m)
+           MAINTAINER=$OPTARG
+            COMMAND+="-m ${MAINTAINER} "
+            ;;
+
+       d)
+           DEPENDENCIES=$OPTARG
+            COMMAND+="-d ${DEPENDENCIES} "
+            ;;
+
+        A)
+           ARCH=$OPTARG
+           COMMAND+="-A ${ARCH} "
+            ;;
+
+        C)
+           COMMAND+="-C "
+           clean
+           exit 0
+            ;;
+    esac
+done
+
+# Constructing script variables
+DEB_PACK_BASE_PATH="f_${PACKAGE_SHORT_NAME}/package/${PACKAGE_NAME}_${PACKAGE_VERSION}"
+echo ${DEB_PACK_BASE_PATH} >> "$BUILD_HISTORY"
+TARGET_INSTALL_PATH="/usr/share/java/${PACKAGE_SHORT_NAME}/"
+DEB_PACK_CONTENT_PATH="${DEB_PACK_BASE_PATH}/usr/share/java/${PACKAGE_SHORT_NAME}/"
+DEB_PACK_CONFIG_PATH="${DEB_PACK_BASE_PATH}/etc/${PACKAGE_SHORT_NAME}"
+TARGET_TAR=$(ls ${TARGET_BUILD_PATH}*.tar.gz)
+TARGET_TAR="${TARGET_TAR##*/}"
+TAR_PATH="${TARGET_TAR%.*}"
+TAR_PATH="${TAR_PATH%.*}"
+if [ -e $DEB_PACK_BASE_PATH ]; then
+    rm -R $DEB_PACK_BASE_PATH
+fi
+
+# Create Deb pack content and configuration
+mkdir -p ${DEB_PACK_CONTENT_PATH}
+cp ${TARGET_BUILD_PATH}${TARGET_TAR} ${DEB_PACK_CONTENT_PATH}
+tar -xzf ${DEB_PACK_CONTENT_PATH}${TARGET_TAR} -C ${DEB_PACK_CONTENT_PATH}
+rm ${DEB_PACK_CONTENT_PATH}${TARGET_TAR}
+mv ${DEB_PACK_CONTENT_PATH}${TAR_PATH}/* ${DEB_PACK_CONTENT_PATH}.
+rm -R ${DEB_PACK_CONTENT_PATH}${TAR_PATH}
+
+# Crate and populate Deb pack config target
+mkdir -p ${DEB_PACK_CONFIG_PATH}/etc
+mv ${DEB_PACK_CONTENT_PATH}etc/* ${DEB_PACK_CONFIG_PATH}/etc/
+rm -R ${DEB_PACK_CONTENT_PATH}etc
+mkdir -p ${DEB_PACK_CONFIG_PATH}/configuration
+mv ${DEB_PACK_CONTENT_PATH}configuration/* ${DEB_PACK_CONFIG_PATH}/configuration/
+rm -R ${DEB_PACK_CONTENT_PATH}configuration
+
+# Set package permisions
+find ${DEB_PACK_CONTENT_PATH} -type d -print -exec chmod 755 {} \;
+find ${DEB_PACK_CONFIG_PATH}/etc/ -type f -print -exec chmod 644 {} \;
+find ${DEB_PACK_CONFIG_PATH}/etc/ -type d -print -exec chmod 755 {} \;
+
+# Create package usr/bin odl script
+mkdir  "${DEB_PACK_BASE_PATH}/usr/bin"
+chmod 755 "${DEB_PACK_BASE_PATH}/usr/bin"
+make-DEBIAN_bin > "${DEB_PACK_BASE_PATH}/usr/bin/odl"
+chmod 755 "${DEB_PACK_BASE_PATH}/usr/bin/odl"
+
+# Create Deb pack install meta-data
+mkdir "${DEB_PACK_BASE_PATH}/DEBIAN"
+make-DEBIAN_control > "${DEB_PACK_BASE_PATH}/DEBIAN/control"
+make-DEBIAN_conffiles > "${DEB_PACK_BASE_PATH}/DEBIAN/conffiles"
+mkdir -p "${DEB_PACK_BASE_PATH}/usr/share/doc/${PACKAGE_SHORT_NAME}"
+make-DEBIAN_copyright > "${DEB_PACK_BASE_PATH}/usr/share/doc/${PACKAGE_SHORT_NAME}/copyright"
+make-DEBIAN_changelog > "${DEB_PACK_BASE_PATH}/usr/share/doc/${PACKAGE_SHORT_NAME}/changelog.Debian"
+
+# Create Deb pack post install symlinks and usr/bin scripts
+make-DEBIAN_postinst > "${DEB_PACK_BASE_PATH}/DEBIAN/postinst"
+chmod 755  "${DEB_PACK_BASE_PATH}/DEBIAN/postinst"
+mkdir -p "${DEB_PACK_BASE_PATH}/usr/bin"
diff --git a/fuel/build/opendaylight/odl_maven/settings.xml b/fuel/build/opendaylight/odl_maven/settings.xml
new file mode 100644 (file)
index 0000000..35a4442
--- /dev/null
@@ -0,0 +1,46 @@
+<settings xmlns="http://maven.apache.org/SETTINGS/1.0.0"
+    xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+    xsi:schemaLocation="http://maven.apache.org/SETTINGS/1.0.0 http://maven.apache.org/xsd/settings-1.0.0.xsd">
+
+    <profiles>
+        <profile>
+            <id>opendaylight-release</id>
+            <repositories>
+                <repository>
+                    <releases>
+                        <enabled>true</enabled>
+                        <updatePolicy>never</updatePolicy>
+                    </releases>
+                    <snapshots>
+                        <enabled>false</enabled>
+                    </snapshots>
+                    <id>opendaylight-mirror</id>
+                    <name>opendaylight-mirror</name>
+                    <url>http://nexus.opendaylight.org/content/groups/public/</url>
+                </repository>
+            </repositories>
+        </profile>
+
+        <profile>
+            <id>opendaylight-snapshots</id>
+            <repositories>
+                <repository>
+                    <releases>
+                        <enabled>false</enabled>
+                    </releases>
+                    <snapshots>
+                        <enabled>true</enabled>
+                    </snapshots>
+                    <id>opendaylight-snapshot</id>
+                    <name>opendaylight-snapshot</name>
+                    <url>http://nexus.opendaylight.org/content/repositories/opendaylight.snapshot/</url>
+                </repository>
+            </repositories>
+        </profile>
+    </profiles>
+
+    <activeProfiles>
+        <activeProfile>opendaylight-release</activeProfile>
+        <activeProfile>opendaylight-snapshots</activeProfile>
+    </activeProfiles>
+</settings>
index 339c9e7..bd3a437 100644 (file)
@@ -8,7 +8,7 @@
 # http://www.apache.org/licenses/LICENSE-2.0
 ##############################################################################
 
-SUBDIRS := 
+SUBDIRS := debootstrap novnc neutron-common
 SUBCLEAN = $(addsuffix .clean,$(SUBDIRS))
 
 .PHONY: $(SUBDIRS) $(SUBCLEAN) clean
diff --git a/fuel/build/patch-packages/debootstrap/Makefile b/fuel/build/patch-packages/debootstrap/Makefile
new file mode 100644 (file)
index 0000000..0109312
--- /dev/null
@@ -0,0 +1,28 @@
+##############################################################################
+# Copyright (c) 2015 Ericsson AB and others.
+# stefan.k.berg@ericsson.com
+# jonas.bjurel@ericsson.com
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+TOP := $(shell pwd)
+
+.PHONY: all
+all:
+
+.PHONY: clean
+clean:
+       @rm -rf udebPackage
+       @rm -rf *.udeb
+       @rm -rf patch-replacements
+       @rm -rf .udebpackage
+
+.PHONY: release
+release:
+       ../tools/udeb_unpack debootstrap-udeb_1.0.4*.udeb $(ORIGISO)
+       patch -s -p0 < debootstrap.patch
+       ../tools/udeb_pack $(REVSTATE)
+       @cp *.udeb $(UDEB_DEST)
diff --git a/fuel/build/patch-packages/debootstrap/debootstrap.patch b/fuel/build/patch-packages/debootstrap/debootstrap.patch
new file mode 100644 (file)
index 0000000..62342c9
--- /dev/null
@@ -0,0 +1,12 @@
+--- udebPackage/usr/share/debootstrap/scripts/gutsy.orig       2014-11-10 18:21:37.000000000 +0000
++++ udebPackage/usr/share/debootstrap/scripts/gutsy    2015-04-15 09:28:44.290437000 +0000
+@@ -112,7 +112,8 @@
+
+       p; progress $baseprog $bases INSTCORE "Installing core packages" #2
+       ln -sf mawk "$TARGET/usr/bin/awk"
+-      x_core_install base-files base-passwd
++      x_core_install base-passwd
++      x_core_install base-files
+       p; progress $baseprog $bases INSTCORE "Installing core packages" #3
+       x_core_install dpkg
+
diff --git a/fuel/build/patch-packages/neutron-common/Makefile b/fuel/build/patch-packages/neutron-common/Makefile
new file mode 100644 (file)
index 0000000..e9d43a4
--- /dev/null
@@ -0,0 +1,19 @@
+TOP := $(shell pwd)
+
+.PHONY: all
+all:
+
+.PHONY: clean
+clean:
+       @rm -rf package
+       @rm -rf *.deb
+       @rm -rf patch-replacements
+       @rm -rf .package
+
+.PHONY: release
+release:
+       ../tools/deb_unpack neutron-common_*.deb $(ORIGISO)
+       patch -s -p0 < quota.patch
+       ../tools/deb_pack $(REVSTATE)
+       @cp *.deb ../release/packages
+       @cat patch-replacements >> ../release/patch-replacements
diff --git a/fuel/build/patch-packages/neutron-common/quota.patch b/fuel/build/patch-packages/neutron-common/quota.patch
new file mode 100644 (file)
index 0000000..6f179f0
--- /dev/null
@@ -0,0 +1,67 @@
+*** package/etc/neutron/neutron.conf.orig      2015-05-25 15:50:09.933131041 +0200
+--- package/etc/neutron/neutron.conf   2015-05-25 15:55:07.859210010 +0200
+***************
+*** 502,518 ****
+  # default_quota = -1
+  
+  # Number of networks allowed per tenant. A negative value means unlimited.
+! # quota_network = 10
+  
+  # Number of subnets allowed per tenant. A negative value means unlimited.
+! # quota_subnet = 10
+  
+  # Number of ports allowed per tenant. A negative value means unlimited.
+  # quota_port = 50
+  
+  # Number of security groups allowed per tenant. A negative value means
+  # unlimited.
+! # quota_security_group = 10
+  
+  # Number of security group rules allowed per tenant. A negative value means
+  # unlimited.
+--- 502,521 ----
+  # default_quota = -1
+  
+  # Number of networks allowed per tenant. A negative value means unlimited.
+! # This quota modified by OPNFV: 10 -> 50
+! quota_network = 50
+  
+  # Number of subnets allowed per tenant. A negative value means unlimited.
+! # This quota modified by OPNFV: 10 -> 50
+! quota_subnet = 50
+  
+  # Number of ports allowed per tenant. A negative value means unlimited.
+  # quota_port = 50
+  
+  # Number of security groups allowed per tenant. A negative value means
+  # unlimited.
+! # This quota modified by OPNFV: 10 -> 50
+! quota_security_group = 50
+  
+  # Number of security group rules allowed per tenant. A negative value means
+  # unlimited.
+***************
+*** 538,547 ****
+  # quota_health_monitor = -1
+  
+  # Number of routers allowed per tenant. A negative value means unlimited.
+! # quota_router = 10
+  
+  # Number of floating IPs allowed per tenant. A negative value means unlimited.
+! # quota_floatingip = 50
+  
+  # Number of firewalls allowed per tenant. A negative value means unlimited.
+  # quota_firewall = 1
+--- 541,552 ----
+  # quota_health_monitor = -1
+  
+  # Number of routers allowed per tenant. A negative value means unlimited.
+! # This quota modified by OPNFV: 10 -> 50
+! quota_router = 50
+  
+  # Number of floating IPs allowed per tenant. A negative value means unlimited.
+! # This quota modified by OPNFV: 50 -> 100
+! quota_floatingip = 100
+  
+  # Number of firewalls allowed per tenant. A negative value means unlimited.
+  # quota_firewall = 1
diff --git a/fuel/build/patch-packages/novnc/Makefile b/fuel/build/patch-packages/novnc/Makefile
new file mode 100644 (file)
index 0000000..16c0196
--- /dev/null
@@ -0,0 +1,22 @@
+# This is a temporary patch which add missing files
+# inside novnc ubuntu package.
+# Related bug: https://bugs.launchpad.net/fuel/+bug/1433894
+TOP := $(shell pwd)
+
+.PHONY: all
+all:
+
+.PHONY: clean
+clean:
+       @rm -rf package
+       @rm -rf *.deb
+       @rm -rf patch-replacements
+       @rm -rf .package
+
+.PHONY: release
+release:
+       ../tools/deb_unpack novnc_0.5.1*.deb $(ORIGISO)
+       ./fix-missing.sh
+       ../tools/deb_pack $(REVSTATE)
+       @cp *.deb ../release/packages
+       @cat patch-replacements >> ../release/patch-replacements
diff --git a/fuel/build/patch-packages/novnc/fix-missing.sh b/fuel/build/patch-packages/novnc/fix-missing.sh
new file mode 100755 (executable)
index 0000000..61ef1db
--- /dev/null
@@ -0,0 +1,9 @@
+#!/bin/bash
+
+MISSING_FILES="keyboard.js keysymdef.js keysym.js"
+NOVNC_SOURCE="http://raw.githubusercontent.com/kanaka/noVNC/v0.5.1/include"
+
+for file in $MISSING_FILES
+do
+  wget -P package/usr/share/novnc/include/ "$NOVNC_SOURCE/$file"
+done
index aab823b..3525d4d 100644 (file)
@@ -16,91 +16,8 @@ There are two Fuel@OPNF autonomous scripts fo this, complying to the OPNFV CI pi
 
 For more info on usage:
 ./build.sh -h
-sudo ./deploy.sh -h
-python deploy.py -h
+./deploy.sh -h
 
-usage: python deploy.py [-h] [-nf] [-nh] [-fo] [-co] [-c] [-iso [ISO_FILE]]
-                        [-dea [DEA_FILE]] [-dha [DHA_FILE]] [-s STORAGE_DIR]
-                        [-b PXE_BRIDGE] [-p FUEL_PLUGINS_DIR]
+To be able to deploy on a certain metal environment there needs to be a Deplyment Environment Adaptor" executable with propper added to $PATH such that
+deploy.sh can call it by $dea [options] as indicated by ./deploy -h.
 
-optional arguments:
-  -h, --help           show this help message and exit
-  -nf                  Do not install Fuel Master (and Node VMs when using
-                       libvirt)
-  -nh                  Don't run health check after deployment
-  -fo                  Install Fuel Master only (and Node VMs when using
-                       libvirt)
-  -co                  Cleanup VMs and Virtual Networks according to what is
-                       defined in DHA
-  -c                   Cleanup after deploy
-  -iso [ISO_FILE]      ISO File [default: OPNFV.iso]
-  -dea [DEA_FILE]      Deployment Environment Adapter: dea.yaml
-  -dha [DHA_FILE]      Deployment Hardware Adapter: dha.yaml
-  -s STORAGE_DIR       Storage Directory [default: images]
-  -b PXE_BRIDGE        Linux Bridge for booting up the Fuel Master VM
-                       [default: pxebr]
-  -p FUEL_PLUGINS_DIR  Fuel Plugins directory
-
-
-
-* EXAMPLES:
-
-- Install Fuel Master and deploy OPNFV Cloud from scratch on Hardware Environment:
-
-    sudo ./deploy.sh -iso ~/ISO/opnfv.iso -dea ~/CONF/hardware/dea.yaml -dha ~/CONF/hardware/dha.yaml -s /mnt/images -b pxebr
-
-
-- Install Fuel Master and deploy OPNFV Cloud from scratch on Virtual Environment:
-
-    sudo ./deploy.sh -iso ~/ISO/opnfv.iso -dea ~/CONF/virtual/dea.yaml -dha ~/CONF/virtual/dha.yaml -s /mnt/images
-
-
-- Deploy OPNFV Cloud on an already active Environment where Fuel Master VM is running so no need to install Fuel again:
-
-    sudo ./deploy.sh -nf -dea ~/CONF/virtual/dea.yaml -dha ~/CONF/virtual/dha.yaml
-
-    => with plugin installation
-    sudo ./deploy.sh -nf -dea ~/CONF/virtual/dea.yaml -dha ~/CONF/virtual/dha.yaml
-
-    => with cleanup after deployment is finished
-    sudo ./deploy.sh -nf -dea ~/CONF/virtual/dea.yaml -dha ~/CONF/virtual/dha.yaml -c
-
-    => no healthcheck after deployment is completed
-    sudo ./deploy.sh -nf -dea ~/CONF/virtual/dea.yaml -dha ~/CONF/virtual/dha.yaml -nh
-
-
-- Install Fuel Master only (and Node VMs when using virtual environment):
-
-    => for virtual environment:
-    sudo ./deploy.sh -iso ~/ISO/opnfv.iso -dea ~/CONF/virtual/dea.yaml -dha ~/CONF/virtual/dha.yaml -s /mnt/images
-
-    => for hardware environment:
-    sudo ./deploy.sh -iso ~/ISO/opnfv.iso -dea ~/CONF/hardware/dea.yaml -dha ~/CONF/hardware/dha.yaml -s /mnt/images -b pxebr
-
-
-- Cleanup a running OPNFV environment:
-
-    sudo ./deploy.sh -co -dha ~/CONF/virtual/dha.yaml
-
-
-* WARNINGS:
-
-=>  If optional argument -s <storage_dir> is not specified, Autodeployment will use
-"<current_working_dir>/images" as default, and it will create it, if it hasn't been created before
-
-=>  If optional argument -b <pxe_bridge> is not specified, Autodeployment will use "pxebr" as default,
-if the bridge does not exist, the application will terminate with an error message
-
-=>  If argument -iso [ISO_FILE] is not specified, Autodeployment will use "<current_working_dir>/OPNFV.iso"
-as default, if the iso file does not exist, the application will terminate with an error message
-
-=>  If argument -dea [DEA_FILE] is not specified, Autodeployment will use "<current_working_dir>/dea.yaml"
-as default, if DEA file does not exist, the application will terminate with an error message
-
-=>  If argument -dha [DHA_FILE] is not specified, Autodeployment will use "<current_working_dir>/dha.yaml"
-as default, if DHA file does not exist, the application will terminate with an error message
-
-=> Optional argument -b PXE_BRIDGE is not required for Autodeployment in virtual environment,
-   even if it is specified it will not be used at all because virtual environment is using a different virtual network setup
-
-=> If optional argument -p FUEL_PLUGINS_DIR is not specified, no external plugins will be installed in Fuel
\ No newline at end of file
index f8e164a..51ccdae 100755 (executable)
@@ -79,32 +79,6 @@ EOF
 # END of usage description
 ############################################################################
 
-############################################################################
-# Begin of string xor function
-#
-function  xor()
-{
-      local res=(`echo "$1" | sed "s/../0x& /g"`)
-      shift 1
-      while [[ "$1" ]]; do
-            local one=(`echo "$1" | sed "s/../0x& /g"`)
-            local count1=${#res[@]}
-            if [ $count1 -lt ${#one[@]} ]
-            then
-                  count1=${#one[@]}
-            fi
-            for (( i = 0; i < $count1; i++ ))
-            do
-                  res[$i]=$((${one[$i]:-0} ^ ${res[$i]:-0}))
-            done
-            shift 1
-      done
-       printf "%02x" "${res[@]}"
-}
-#
-# END of string xor function
-############################################################################
-
 ############################################################################
 # BEGIN of variables to customize
 #
@@ -113,7 +87,7 @@ RESULT_DIR="${BUILD_BASE}/release"
 BUILD_SPEC="${BUILD_BASE}/config.mk"
 CACHE_DIR="cache"
 LOCAL_CACHE_ARCH_NAME="fuel-cache"
-
+REMOTE_CACHE_ARCH_NAME="fuel_cache-$(md5sum ${BUILD_SPEC}| cut -f1 -d " ")"
 REMOTE_ACCESS_METHD=curl
 INCLUDE_DIR=../include
 #
@@ -143,14 +117,6 @@ BUILD_DIR=
 BUILD_LOG=
 BUILD_VERSION=
 MAKE_ARGS=
-FUEL_GIT_SRC="$(make -f ../build/config.mk get-fuel-repo | cut -d " " -f1)"
-FUEL_GIT_BRANCH="$(make -f ../build/config.mk get-fuel-repo | cut -d " " -f2)"
-CACHE_MD5=$(md5sum ../build/cache.mk | cut -f1 -d " ")
-CONFIG_MD5=$(md5sum ../build/config.mk | cut -f1 -d " ")
-FUEL_COMMIT_ID=$(git ls-remote $FUEL_GIT_SRC -t $FUEL_GIT_BRANCH | cut -d $'\t' -f1)
-REMOTE_CACHE_ARCH_HASH_TMP="$(xor $CACHE_MD5 $CONFIG_MD5)"
-REMOTE_CACHE_ARCH_HASH="$(xor $REMOTE_CACHE_ARCH_HASH_TMP $FUEL_COMMIT_ID)"
-REMOTE_CACHE_ARCH_NAME="fuel_cache-$REMOTE_CACHE_ARCH_HASH"
 #
 # END of script assigned variables
 ############################################################################
@@ -169,53 +135,53 @@ source ${INCLUDE_DIR}/build.sh.debug
 while getopts "s:c:v:f:l:r:RtTh" OPTION
 do
     case $OPTION in
-        h)
-            usage
-            rc=0
-            exit $rc
-            ;;
-
-        s)
-            BUILD_SPEC=${OPTARG}
-            ;;
-
-        c)
-            BUILD_CACHE_URI=${OPTARG}
-            ;;
-
-        l)
-            BUILD_LOG=${OPTARG}
-            ;;
-
-        v)
-            BUILD_VERSION=${OPTARG}
-            ;;
-
-        f)
-            BUILD_FLAGS=${OPTARG}
-            ;;
-
-        r)  REMOTE_ACCESS_METHD=${OPTARG}
-            ;;
-
-        R)
-            RECURSIVE=1
-            ;;
-
-        t)
-            INTEGRATION_TEST=1
-            ;;
-
-        T)
-            INTEGRATION_TEST=1
-            FULL_INTEGRATION_TEST=1
-            ;;
-
-        *)
-            echo "${OPTION} is not a valid argument"
-            rc=100
-            exit $rc
-            ;;
+       h)
+           usage
+           rc=0
+           exit $rc
+           ;;
+
+       s)
+           BUILD_SPEC=${OPTARG}
+           ;;
+
+       c)
+           BUILD_CACHE_URI=${OPTARG}
+           ;;
+
+       l)
+           BUILD_LOG=${OPTARG}
+           ;;
+
+       v)
+           BUILD_VERSION=${OPTARG}
+           ;;
+
+       f)
+           BUILD_FLAGS=${OPTARG}
+           ;;
+
+       r)  REMOTE_ACCESS_METHD=${OPTARG}
+           ;;
+
+       R)
+           RECURSIVE=1
+           ;;
+
+       t)
+           INTEGRATION_TEST=1
+           ;;
+
+       T)
+           INTEGRATION_TEST=1
+           FULL_INTEGRATION_TEST=1
+           ;;
+
+       *)
+           echo "${OPTION} is not a valid argument"
+           rc=100
+           exit $rc
+           ;;
     esac
 done
 
@@ -225,44 +191,44 @@ fi
 
 for ((i=0; i<${#BUILD_FLAGS};i++)); do
     case ${BUILD_FLAGS:$i:1} in
-        s)
-            rc=0
-            exit $rc
-            ;;
-
-        f)
-            rc=1
-            exit $rc
-            ;;
-
-        t)
-            UNIT_TEST=1
-            ;;
-
-        i)
-            INTERACTIVE=1
-            ;;
-
-        P)
-            POPULATE_CACHE=1
-            ;;
-
-        d)
-            DETACH=1
-            echo "Detach is not yet supported - exiting ...."
-            rc=100
-            exit $rc
-            ;;
-
-        D)
-            DEBUG=1
-            ;;
-
-        *)
-            echo "${BUILD_FLAGS:$i:1} is not a valid build flag - exiting ...."
-            rc=100
-            exit $rc
-            ;;
+       s)
+           rc=0
+           exit $rc
+           ;;
+
+       f)
+           rc=1
+           exit $rc
+           ;;
+
+       t)
+           UNIT_TEST=1
+           ;;
+
+       i)
+           INTERACTIVE=1
+           ;;
+
+       P)
+           POPULATE_CACHE=1
+           ;;
+
+       d)
+           DETACH=1
+           echo "Detach is not yet supported - exiting ...."
+           rc=100
+           exit $rc
+           ;;
+
+       D)
+           DEBUG=1
+           ;;
+
+       *)
+           echo "${BUILD_FLAGS:$i:1} is not a valid build flag - exiting ...."
+           rc=100
+           exit $rc
+           ;;
     esac
 done
 
@@ -286,13 +252,13 @@ fi
 
 if [ ! -z ${BUILD_LOG} ]; then
     if [[ ${RECURSIVE} -ne 1 ]]; then
-        set +e
-        eval $0 -R $@ > ${BUILD_LOG} 2>&1
-        rc=$?
-        set -e
-        if [ $rc -ne 0]; then
-            exit $rc
-        fi
+       set +e
+       eval $0 -R $@ > ${BUILD_LOG} 2>&1
+       rc=$?
+       set -e
+       if [ $rc -ne 0]; then
+           exit $rc
+       fi
     fi
 fi
 
@@ -318,86 +284,47 @@ echo $$ > ${LOCK_FILE}
 
 if [ ! -z ${BUILD_CACHE_URI} ]; then
     if [ ${POPULATE_CACHE} -ne 1 ]; then
-        rm -rf ${CACHE_TMP}/cache
-        mkdir -p ${CACHE_TMP}/cache
-        echo "Downloading cache archive ${BUILD_CACHE_URI}/${REMOTE_CACHE_ARCH_NAME} ..."
-        set +e
-        ${REMOTE_ACCESS_METHD} -o ${CACHE_TMP}/cache/${LOCAL_CACHE_ARCH_NAME}.tgz ${BUILD_CACHE_URI}/${REMOTE_CACHE_ARCH_NAME}.tgz
-        rc=$?
-        set -e
-        if [ $rc -ne 0 ]; then
-            echo "Remote cache does not exist, or is not accessible - a new cache will be built ..."
-            POPULATE_CACHE=1
-        else
-            echo "Unpacking cache archive ..."
-            set +e
-            tar -C ${CACHE_TMP}/cache -xvf ${CACHE_TMP}/cache/${LOCAL_CACHE_ARCH_NAME}.tgz
-            rc=$?
-            set -e
-            if [ $rc -ne 0 ]; then
-                echo "WARNING: The cache seems to be corrupt or has trailing garbage, will try to use brute force"
-                echo "Info about the cache below:"
-                set +e
-                file ${CACHE_TMP}/cache/${LOCAL_CACHE_ARCH_NAME}.tgz
-                tar -C ${CACHE_TMP}/cache -tvf ${CACHE_TMP}/cache/${LOCAL_CACHE_ARCH_NAME}.tgz
-                set -e
-                echo "Current time is: `date`"
-                set +e
-                pushd ${CACHE_TMP}/cache
-                gunzip -dcq ${CACHE_TMP}/cache/${LOCAL_CACHE_ARCH_NAME}.tgz | tar -xvf -
-                rc=$?
-                set -e
-                popd
-                if [ $rc -ne 0 ]; then
-                    echo "ERROR: Not able to resolve the cache corruption"
-                    POPULATE_CACHE=1
-                else
-                    echo "The chache corruption was resolved"
-                    cp ${CACHE_TMP}/cache/cache/.versions ${BUILD_BASE}/.
-                    set +e
-                    make -C ${BUILD_BASE} validate-cache;
-                    rc=$?
-                    set -e
-                    if [ $rc -ne 0 ]; then
-                        echo "Cache invalid - a new cache will be built "
-                        POPULATE_CACHE=1
-                    else
-                        echo "Cache is up to date and will be used"
-                        cp -rf ${CACHE_TMP}/cache/cache/. ${BUILD_BASE}
-                    fi
-                fi
-            else
-                echo "Cache archive is intact"
-                cp ${CACHE_TMP}/cache/cache/.versions ${BUILD_BASE}/.
-                set +e
-                make -C ${BUILD_BASE} validate-cache;
-                rc=$?
-                set -e
-
-                if [ $rc -ne 0 ]; then
-                    echo "Cache invalid - a new cache will be built "
-                    POPULATE_CACHE=1
-                else
-                    echo "Cache is up to date and will be used"
-                    cp -rf ${CACHE_TMP}/cache/cache/. ${BUILD_BASE}
-                fi
-            fi
-            rm -rf ${CACHE_TMP}/cache
-        fi
+       rm -rf ${CACHE_TMP}/cache
+       mkdir -p ${CACHE_TMP}/cache
+       echo "Downloading cach file ${BUILD_CACHE_URI}/${REMOTE_CACHE_ARCH_NAME} ..."
+       set +e
+       ${REMOTE_ACCESS_METHD} -o ${CACHE_TMP}/cache/${LOCAL_CACHE_ARCH_NAME}.tgz ${BUILD_CACHE_URI}/${REMOTE_CACHE_ARCH_NAME}.tgz
+       rc=$?
+       set -e
+       if [ $rc -ne 0 ]; then
+               echo "Remote cache does not exist, or is not accessible - a new cache will be built ..."
+               POPULATE_CACHE=1
+       else
+           echo "Unpacking cache file ..."
+           tar -C ${CACHE_TMP}/cache -xvf ${CACHE_TMP}/cache/${LOCAL_CACHE_ARCH_NAME}.tgz
+           cp ${CACHE_TMP}/cache/cache/.versions ${BUILD_BASE}/.
+           set +e
+                   make -C ${BUILD_BASE} validate-cache;
+           rc=$?
+           set -e
+
+           if [ $rc -ne 0 ]; then
+               echo "Cache invalid - a new cache will be built "
+               POPULATE_CACHE=1
+           else
+               cp -rf ${CACHE_TMP}/cache/cache/. ${BUILD_BASE}
+           fi
+           rm -rf ${CACHE_TMP}/cache
+       fi
     fi
 fi
 
 if [ ${POPULATE_CACHE} -eq 1 ]; then
     if [ ${DEBUG} -eq 0 ]; then
-        set +e
-        cd ${BUILD_BASE} && make clean
-        rc=$?
-        set -e
-        if [ $rc -ne 0 ]; then
-            echo "Build - make clean failed, exiting ..."
-            rc=100
-            exit $rc
-        fi
+       set +e
+       cd ${BUILD_BASE} && make clean
+       rc=$?
+       set -e
+       if [ $rc -ne 0 ]; then
+           echo "Build - make clean failed, exiting ..."
+           rc=100
+           exit $rc
+       fi
     fi
 fi
 
@@ -425,12 +352,12 @@ if [ ${DEBUG} -eq 0 ]; then
     rc=$?
     set -e
     if [ $rc -gt 0 ]; then
-        echo "Build: make all failed, exiting ..."
-        rc=200
-        exit $rc
+       echo "Build: make all failed, exiting ..."
+       rc=200
+       exit $rc
     fi
 else
-    debug_make
+debug_make
 fi
 set +e
 make -C ${BUILD_BASE} prepare-cache
@@ -450,20 +377,11 @@ cp ${RESULT_DIR}/*.iso* ${BUILD_DIR}
 
 if [ $POPULATE_CACHE -eq 1 ]; then
     if [ ! -z ${BUILD_CACHE_URI} ]; then
-        echo "Building cache ..."
-        tar --dereference -C ${BUILD_BASE} -caf ${BUILD_BASE}/${LOCAL_CACHE_ARCH_NAME}.tgz ${CACHE_DIR}
-        set +e
-        tar -C ${CACHE_TMP}/cache -tvf ${BUILD_BASE}/${LOCAL_CACHE_ARCH_NAME}.tgz
-        rc=$?
-        set -e
-        if [ $rc -ne 0 ]; then
-            echo "WARNING the cache archive generated seems to be corrupt, or containing trailing garbage"
-        else
-            echo "The Cache archive build is intact"
-        fi
-        echo "Uploading cache ${BUILD_CACHE_URI}/${REMOTE_CACHE_ARCH_NAME}"
-        ${REMOTE_ACCESS_METHD} -T ${BUILD_BASE}/${LOCAL_CACHE_ARCH_NAME}.tgz ${BUILD_CACHE_URI}/${REMOTE_CACHE_ARCH_NAME}.tgz
-        rm ${BUILD_BASE}/${LOCAL_CACHE_ARCH_NAME}.tgz
+       echo "Building cache ..."
+       tar --dereference -C ${BUILD_BASE} -caf ${BUILD_BASE}/${LOCAL_CACHE_ARCH_NAME}.tgz ${CACHE_DIR}
+       echo "Uploading cache ${BUILD_CACHE_URI}/${REMOTE_CACHE_ARCH_NAME}"
+       ${REMOTE_ACCESS_METHD} -T ${BUILD_BASE}/${LOCAL_CACHE_ARCH_NAME}.tgz ${BUILD_CACHE_URI}/${REMOTE_CACHE_ARCH_NAME}.tgz
+       rm ${BUILD_BASE}/${LOCAL_CACHE_ARCH_NAME}.tgz
     fi
 fi
 echo "Success!!!"
index d5b70d0..df23249 100755 (executable)
@@ -1,8 +1,12 @@
-#!/bin/bash
+#!/bin/bash -x
+set -o xtrace
 set -o errexit
-topdir=$(dirname $(readlink -f $BASH_SOURCE))
-deploydir=$(cd ${topdir}/../deploy; pwd)
-pushd ${deploydir} > /dev/null
-echo -e "python deploy.py $@\n"
-python deploy.py $@
-popd > /dev/null
\ No newline at end of file
+set -o nounset
+set -o pipefail
+
+WORKSPACE=$(readlink -e ..)
+ISO_LOCATION="$(readlink -f $(find $WORKSPACE -iname 'fuel*iso' -type f))"
+INTERFACE="fuel"
+
+cd "${WORKSPACE}/deploy"
+./deploy_fuel.sh "$ISO_LOCATION" $INTERFACE 2>&1 | tee deploy_fuel.log
diff --git a/fuel/deploy/README b/fuel/deploy/README
deleted file mode 100644 (file)
index 167078b..0000000
+++ /dev/null
@@ -1,186 +0,0 @@
-
-======== PREREQUISITES ========
-
-the following dependencies and python modules are required to be installed:
-
-- for Ubuntu:
-
-sudo apt-get install -y libvirt-bin qemu-kvm python-pip fuseiso mkisofs
-sudo apt-get install -y python-dev libz-dev libxml2-dev libxslt-dev
-sudo pip install pyyaml netaddr paramiko lxml scp pycrypto ecdsa
-
-During libvirt install the user is added to the libvirtd group, so you have to
-logout then login back again
-
-
-======== PREPARE and RUN the OPNFV Autodeployment ========
-
-
---- Step.1 Prepare the DEA and DHA configuration files and the OPNFV ISO file
-
-Make sure that you are using the right DEA - Deployment Environment Adapter and
-DHA - Deployment Hardware Adapter configuration files, the ones provided are only templates
-you will have to modify them according to your needs
-
-- If wou wish to deploy OPNFV cloud environment on top of KVM/Libvirt
-  virtualization use as example the following configuration files:
-
-  * SR1 configuration files
-
-  =>   templates/virtual_environment/conf/ha
-                dea.yaml
-                dha.yaml
-
-
-  * ARNO configuration files
-
-  =>   templates/virtual_environment/old_conf/ha
-                dea.yaml
-                dha.yaml
-
-  =>   templates/virtual_environment/old_conf/multinode
-                dea.yaml
-                dha.yaml
-
-
-- If you wish to deploy OPNFV cloud environment on hardware
-  use as example the following configuration files:
-
-  * SR1 configuration files
-
-  =>   templates/hardware_environment/conf/ericsson_montreal_lab/ha
-                dea.yaml
-                dha.yaml
-
-  =>   templates/hardware_environment/conf/linux_foundation_lab/pod1/ha
-                dea.yaml
-                dha.yaml
-
-  =>   templates/hardware_environment/conf/linux_foundation_lab/pod2/ha
-                dea.yaml
-                dha.yaml
-
-
-  * ARNO configuration files
-
-  =>   templates/hardware_environment/old_conf/ericsson_montreal_lab/ha
-                dea.yaml
-                dha.yaml
-
-  =>   templates/hardware_environment/old_conf/ericsson_montreal_lab/multinode
-                dea.yaml
-                dha.yaml
-
-  =>   templates/hardware_environment/old_conf/linux_foundation_lab/ha
-                dea.yaml
-                dha.yaml
-
-  =>   templates/hardware_environment/old_conf/linux_foundation_lab/multinode
-                dea.yaml
-                dha.yaml
-
-
---- Step.2 Run Autodeployment ---
-
-usage: python deploy.py [-h] [-nf] [-nh] [-fo] [-co] [-c] [-iso [ISO_FILE]]
-                        [-dea [DEA_FILE]] [-dha [DHA_FILE]] [-s STORAGE_DIR]
-                        [-b PXE_BRIDGE] [-p FUEL_PLUGINS_DIR]
-
-optional arguments:
-  -h, --help           show this help message and exit
-  -nf                  Do not install Fuel Master (and Node VMs when using libvirt)
-  -nh                  Don't run health check after deployment
-  -fo                  Install Fuel Master only (and Node VMs when using libvirt)
-  -co                  Cleanup VMs and Virtual Networks according to what is
-                       defined in DHA
-  -c                   Cleanup after deploy
-  -iso [ISO_FILE]      ISO File [default: OPNFV.iso]
-  -dea [DEA_FILE]      Deployment Environment Adapter: dea.yaml
-  -dha [DHA_FILE]      Deployment Hardware Adapter: dha.yaml
-  -s STORAGE_DIR       Storage Directory [default: images]
-  -b PXE_BRIDGE        Linux Bridge for booting up the Fuel Master VM
-                       [default: pxebr]
-  -p FUEL_PLUGINS_DIR  Fuel Plugins directory
-
-
-* EXAMPLES:
-
-- Install Fuel Master and deploy OPNFV Cloud from scratch on Hardware Environment:
-
-    sudo python deploy.py -iso ~/ISO/opnfv.iso -dea ~/CONF/hardware/dea.yaml -dha ~/CONF/hardware/dha.yaml -s /mnt/images -b pxebr
-
-
-- Install Fuel Master and deploy OPNFV Cloud from scratch on Virtual Environment:
-
-    sudo python deploy.py -iso ~/ISO/opnfv.iso -dea ~/CONF/virtual/dea.yaml -dha ~/CONF/virtual/dha.yaml -s /mnt/images
-
-
-- Deploy OPNFV Cloud on an already active Environment where Fuel Master VM is running so no need to install Fuel again:
-
-    sudo python deploy.py -nf -dea ~/CONF/virtual/dea.yaml -dha ~/CONF/virtual/dha.yaml
-
-    => with plugin installation
-    sudo python deploy.py -nf -dea ~/CONF/virtual/dea.yaml -dha ~/CONF/virtual/dha.yaml
-
-    => with cleanup after deployment is finished
-    sudo python deploy.py -nf -dea ~/CONF/virtual/dea.yaml -dha ~/CONF/virtual/dha.yaml -c
-
-    => no healthcheck after deployment is completed
-    sudo python deploy.py -nf -dea ~/CONF/virtual/dea.yaml -dha ~/CONF/virtual/dha.yaml -nh
-
-
-- Install Fuel Master only (and Node VMs when using virtual environment):
-
-    => for virtual environment:
-    sudo python deploy.py -iso ~/ISO/opnfv.iso -dea ~/CONF/virtual/dea.yaml -dha ~/CONF/virtual/dha.yaml -s /mnt/images
-
-    => for hardware environment:
-    sudo python deploy.py -iso ~/ISO/opnfv.iso -dea ~/CONF/hardware/dea.yaml -dha ~/CONF/hardware/dha.yaml -s /mnt/images -b pxebr
-
-
-- Cleanup a running OPNFV environment:
-
-    sudo python deploy.py -co -dha ~/CONF/virtual/dha.yaml
-
-
-* WARNINGS:
-
-=>  If optional argument -s <storage_dir> is not specified, Autodeployment will use
-"<current_working_dir>/images" as default, and it will create it, if it hasn't been created before
-
-=>  If optional argument -b <pxe_bridge> is not specified, Autodeployment will use "pxebr" as default,
-if the bridge does not exist, the application will terminate with an error message
-
-=>  If argument -iso [ISO_FILE] is not specified, Autodeployment will use "<current_working_dir>/OPNFV.iso"
-as default, if the iso file does not exist, the application will terminate with an error message
-
-=>  If argument -dea [DEA_FILE] is not specified, Autodeployment will use "<current_working_dir>/dea.yaml"
-as default, if DEA file does not exist, the application will terminate with an error message
-
-=>  If argument -dha [DHA_FILE] is not specified, Autodeployment will use "<current_working_dir>/dha.yaml"
-as default, if DHA file does not exist, the application will terminate with an error message
-
-=> Optional argument -b PXE_BRIDGE is not required for Autodeployment in virtual environment,
-   even if it is specified it will not be used at all because virtual environment is using a different virtual network setup
-
-=> If optional argument -p FUEL_PLUGINS_DIR is not specified, no external plugins will be installed in Fuel
-
-
---- Networking considerations ---
-
-For Virtual Environment:
-
-There are some NAT, IPTABLE conflicts on the edge of libvirt bridging and Fuel Master
-according to http://wiki.libvirt.org/page/Networking
-netfilter on the bridges should be disabled
-
-Add these lines to /etc/sysctl.conf
-
-cat >> /etc/sysctl.conf <<EOF
-net.bridge.bridge-nf-call-ip6tables = 0
-net.bridge.bridge-nf-call-iptables = 0
-net.bridge.bridge-nf-call-arptables = 0
-EOF
-
-and then reload configuration:
-sysctl -p /etc/sysctl.conf
diff --git a/fuel/deploy/README.txt b/fuel/deploy/README.txt
new file mode 100644 (file)
index 0000000..d392f8f
--- /dev/null
@@ -0,0 +1,71 @@
+
+======== How to prepare and run the OPNFV Autodeployment =======
+
+in fuel/build/deploy run these:
+
+
+
+--- Step.1 Install prerequisites
+
+sudo ./install-ubuntu-packages.sh
+
+
+
+
+
+
+--- Step.2-A If wou want to deploy OPNFV cloud environment on top of KVM/Libvirt virtualization
+             run the following environment setup script
+
+sudo python setup_environment.py <storage_directory> <path_to_dha_file>
+
+Example:
+         sudo python setup_environment.py /mnt/images dha.yaml
+
+
+
+
+
+
+--- Step.2-B If you want to deploy OPNFV cloud environment on baremetal run the
+             following environment setup script
+
+sudo python setup_vfuel.py <storage_directory> <path_to_dha_file>
+
+Example:
+         sudo python setup_vfuel.py /mnt/images dha.yaml
+
+
+WARNING!:
+setup_vfuel.py adds the following snippet into /etc/network/interfaces
+making sure to replace in setup_vfuel.py interfafe 'p1p1.20' with your actual outbound
+interface in order to provide network access to the Fuel master for DNS and NTP.
+
+iface vfuelnet inet static
+       bridge_ports em1
+       address 10.40.0.1
+       netmask 255.255.255.0
+       pre-down iptables -t nat -D POSTROUTING --out-interface p1p1.20 -j MASQUERADE  -m comment --comment "vfuelnet"
+       pre-down iptables -D FORWARD --in-interface vfuelnet --out-interface p1p1.20 -m comment --comment "vfuelnet"
+       post-up iptables -t nat -A POSTROUTING --out-interface p1p1.20 -j MASQUERADE  -m comment --comment "vfuelnet"
+       post-up iptables -A FORWARD --in-interface vfuelnet --out-interface p1p1.20 -m comment --comment "vfuelnet"
+
+
+
+
+
+
+--- Step.3 Start Autodeployment
+Make sure you use the right Deployment Environment Adapter and
+Deployment Hardware Adaper configuration files:
+
+       - for baremetal:  baremetal/dea.yaml   baremetal/dha.yaml
+
+       - for libvirt:    libvirt/dea.yaml   libvirt/dha.yaml
+
+
+sudo python deploy.py [-nf] <isofile> <deafile> <dhafile>
+
+Example:
+         sudo python deploy.py ~/ISO/opnfv.iso baremetal/dea.yaml baremetal/dha.yaml
+
index fb73157..e69de29 100644 (file)
@@ -1,8 +0,0 @@
-###############################################################################
-# Copyright (c) 2015 Ericsson AB and others.
-# szilard.cserey@ericsson.com
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-###############################################################################
diff --git a/fuel/deploy/baremetal/dea.yaml b/fuel/deploy/baremetal/dea.yaml
new file mode 100644 (file)
index 0000000..eb3019c
--- /dev/null
@@ -0,0 +1,982 @@
+title: Deployment Environment Adapter (DEA)
+# DEA API version supported
+version: 1.1
+created: Tue May  5 15:33:07 UTC 2015
+comment: Test environment Ericsson Montreal
+environment_name: opnfv
+environment_mode: multinode
+wanted_release: Juno on Ubuntu 12.04.4
+nodes:
+- id: 1
+  interfaces: interface1
+  transformations: controller1
+  role: controller
+- id: 2
+  interfaces: interface1
+  transformations: compute1
+  role: compute
+fuel:
+  ADMIN_NETWORK:
+    ipaddress: 10.40.0.2
+    netmask: 255.255.255.0
+    dhcp_pool_start: 10.40.0.3
+    dhcp_pool_end: 10.40.0.254
+  DNS_UPSTREAM: 10.118.32.193
+  DNS_DOMAIN: opnfvericsson.ca
+  DNS_SEARCH: opnfvericsson.ca
+  FUEL_ACCESS:
+    user: admin
+    password: admin
+  HOSTNAME: opnfv
+  NTP1: 0.ca.pool.ntp.org
+  NTP2: 1.ca.pool.ntp.org
+  NTP3: 2.ca.pool.ntp.org
+interfaces:
+  interface1:
+    eth0:
+    - fuelweb_admin
+    eth2:
+    - public
+    - management
+    - storage
+    - private
+transformations:
+  controller1:
+    - action: add-br
+      name: br-eth0
+    - action: add-port
+      bridge: br-eth0
+      name: eth0
+    - action: add-br
+      name: br-eth1
+    - action: add-port
+      bridge: br-eth1
+      name: eth1
+    - action: add-br
+      name: br-eth2
+    - action: add-port
+      bridge: br-eth2
+      name: eth2
+    - action: add-br
+      name: br-eth3
+    - action: add-port
+      bridge: br-eth3
+      name: eth3
+    - action: add-br
+      name: br-eth4
+    - action: add-port
+      bridge: br-eth4
+      name: eth4
+    - action: add-br
+      name: br-eth5
+    - action: add-port
+      bridge: br-eth5
+      name: eth5
+    - action: add-br
+      name: br-ex
+    - action: add-br
+      name: br-mgmt
+    - action: add-br
+      name: br-storage
+    - action: add-br
+      name: br-fw-admin
+    - action: add-patch
+      bridges:
+      - br-eth2
+      - br-storage
+      tags:
+      - 220
+      - 0
+      vlan_ids:
+      - 220
+      - 0
+    - action: add-patch
+      bridges:
+      - br-eth2
+      - br-mgmt
+      tags:
+      - 320
+      - 0
+      vlan_ids:
+      - 320
+      - 0
+    - action: add-patch
+      bridges:
+      - br-eth0
+      - br-fw-admin
+      trunks:
+      - 0
+    - action: add-patch
+      bridges:
+      - br-eth2
+      - br-ex
+      tags:
+      - 120
+      - 0
+      vlan_ids:
+      - 120
+      - 0
+    - action: add-br
+      name: br-prv
+    - action: add-patch
+      bridges:
+      - br-eth2
+      - br-prv
+  compute1:
+    - action: add-br
+      name: br-eth0
+    - action: add-port
+      bridge: br-eth0
+      name: eth0
+    - action: add-br
+      name: br-eth1
+    - action: add-port
+      bridge: br-eth1
+      name: eth1
+    - action: add-br
+      name: br-eth2
+    - action: add-port
+      bridge: br-eth2
+      name: eth2
+    - action: add-br
+      name: br-eth3
+    - action: add-port
+      bridge: br-eth3
+      name: eth3
+    - action: add-br
+      name: br-eth4
+    - action: add-port
+      bridge: br-eth4
+      name: eth4
+    - action: add-br
+      name: br-eth5
+    - action: add-port
+      bridge: br-eth5
+      name: eth5
+    - action: add-br
+      name: br-mgmt
+    - action: add-br
+      name: br-storage
+    - action: add-br
+      name: br-fw-admin
+    - action: add-patch
+      bridges:
+      - br-eth2
+      - br-storage
+      tags:
+      - 220
+      - 0
+      vlan_ids:
+      - 220
+      - 0
+    - action: add-patch
+      bridges:
+      - br-eth2
+      - br-mgmt
+      tags:
+      - 320
+      - 0
+      vlan_ids:
+      - 320
+      - 0
+    - action: add-patch
+      bridges:
+      - br-eth0
+      - br-fw-admin
+      trunks:
+      - 0
+    - action: add-br
+      name: br-prv
+    - action: add-patch
+      bridges:
+      - br-eth2
+      - br-prv
+opnfv:
+  compute: {}
+  controller: {}
+network:
+  networking_parameters:
+    base_mac: fa:16:3e:00:00:00
+    dns_nameservers:
+    - 10.118.32.193
+    - 8.8.8.8
+    floating_ranges:
+    - - 172.16.0.130
+      - 172.16.0.254
+    gre_id_range:
+    - 2
+    - 65535
+    internal_cidr: 192.168.111.0/24
+    internal_gateway: 192.168.111.1
+    net_l23_provider: ovs
+    segmentation_type: vlan
+    vlan_range:
+    - 2022
+    - 2023
+  networks:
+  - cidr: 172.16.0.0/24
+    gateway: 172.16.0.1
+    ip_ranges:
+    - - 172.16.0.2
+      - 172.16.0.126
+    meta:
+      assign_vip: true
+      cidr: 172.16.0.0/24
+      configurable: true
+      floating_range_var: floating_ranges
+      ip_range:
+      - 172.16.0.2
+      - 172.16.0.126
+      map_priority: 1
+      name: public
+      notation: ip_ranges
+      render_addr_mask: public
+      render_type: null
+      use_gateway: true
+      vlan_start: null
+    name: public
+    vlan_start: 120
+  - cidr: 192.168.0.0/24
+    gateway: null
+    ip_ranges:
+    - - 192.168.0.2
+      - 192.168.0.254
+    meta:
+      assign_vip: true
+      cidr: 192.168.0.0/24
+      configurable: true
+      map_priority: 2
+      name: management
+      notation: cidr
+      render_addr_mask: internal
+      render_type: cidr
+      use_gateway: false
+      vlan_start: 101
+    name: management
+    vlan_start: 320
+  - cidr: 192.168.1.0/24
+    gateway: null
+    ip_ranges:
+    - - 192.168.1.2
+      - 192.168.1.254
+    meta:
+      assign_vip: false
+      cidr: 192.168.1.0/24
+      configurable: true
+      map_priority: 2
+      name: storage
+      notation: cidr
+      render_addr_mask: storage
+      render_type: cidr
+      use_gateway: false
+      vlan_start: 102
+    name: storage
+    vlan_start: 220
+  - cidr: null
+    gateway: null
+    ip_ranges: []
+    meta:
+      assign_vip: false
+      configurable: false
+      map_priority: 2
+      name: private
+      neutron_vlan_range: true
+      notation: null
+      render_addr_mask: null
+      render_type: null
+      seg_type: vlan
+      use_gateway: false
+      vlan_start: null
+    name: private
+    vlan_start: null
+  - cidr: 10.40.0.0/24
+    gateway: null
+    ip_ranges:
+    - - 10.40.0.3
+      - 10.40.0.254
+    meta:
+      assign_vip: false
+      configurable: false
+      map_priority: 0
+      notation: ip_ranges
+      render_addr_mask: null
+      render_type: null
+      unmovable: true
+      use_gateway: true
+    name: fuelweb_admin
+    vlan_start: null
+settings:
+  editable:
+    access:
+      email:
+        description: Email address for Administrator
+        label: email
+        type: text
+        value: admin@localhost
+        weight: 40
+      metadata:
+        label: Access
+        weight: 10
+      password:
+        description: Password for Administrator
+        label: password
+        type: password
+        value: admin
+        weight: 20
+      tenant:
+        description: Tenant (project) name for Administrator
+        label: tenant
+        regex:
+          error: Invalid tenant name
+          source: ^(?!services$)(?!nova$)(?!glance$)(?!keystone$)(?!neutron$)(?!cinder$)(?!swift$)(?!ceph$)(?![Gg]uest$).*
+        type: text
+        value: admin
+        weight: 30
+      user:
+        description: Username for Administrator
+        label: username
+        regex:
+          error: Invalid username
+          source: ^(?!services$)(?!nova$)(?!glance$)(?!keystone$)(?!neutron$)(?!cinder$)(?!swift$)(?!ceph$)(?![Gg]uest$).*
+        type: text
+        value: admin
+        weight: 10
+    additional_components:
+      ceilometer:
+        description: If selected, Ceilometer component will be installed
+        label: Install Ceilometer
+        type: checkbox
+        value: false
+        weight: 40
+      heat:
+        description: ''
+        label: ''
+        type: hidden
+        value: true
+        weight: 30
+      metadata:
+        label: Additional Components
+        weight: 20
+      murano:
+        description: If selected, Murano component will be installed
+        label: Install Murano
+        restrictions:
+        - cluster:net_provider != 'neutron'
+        type: checkbox
+        value: false
+        weight: 20
+      sahara:
+        description: If selected, Sahara component will be installed
+        label: Install Sahara
+        type: checkbox
+        value: false
+        weight: 10
+    common:
+      auth_key:
+        description: Public key(s) to include in authorized_keys on deployed nodes
+        label: Public Key
+        type: text
+        value: ''
+        weight: 70
+      auto_assign_floating_ip:
+        description: If selected, OpenStack will automatically assign a floating IP
+          to a new instance
+        label: Auto assign floating IP
+        restrictions:
+        - cluster:net_provider == 'neutron'
+        type: checkbox
+        value: false
+        weight: 40
+      compute_scheduler_driver:
+        label: Scheduler driver
+        type: radio
+        value: nova.scheduler.filter_scheduler.FilterScheduler
+        values:
+        - data: nova.scheduler.filter_scheduler.FilterScheduler
+          description: Currently the most advanced OpenStack scheduler. See the OpenStack
+            documentation for details.
+          label: Filter scheduler
+        - data: nova.scheduler.simple.SimpleScheduler
+          description: This is 'naive' scheduler which tries to find the least loaded
+            host
+          label: Simple scheduler
+        weight: 40
+      debug:
+        description: Debug logging mode provides more information, but requires more
+          disk space.
+        label: OpenStack debug logging
+        type: checkbox
+        value: false
+        weight: 20
+      disable_offload:
+        description: If set, generic segmentation offload (gso) and generic receive
+          offload (gro) on physical nics will be disabled. See ethtool man.
+        label: Disable generic offload on physical nics
+        restrictions:
+        - action: hide
+          condition: cluster:net_provider == 'neutron' and networking_parameters:segmentation_type
+            == 'gre'
+        type: checkbox
+        value: true
+        weight: 80
+      libvirt_type:
+        label: Hypervisor type
+        type: radio
+        value: kvm
+        values:
+        - data: kvm
+          description: Choose this type of hypervisor if you run OpenStack on hardware
+          label: KVM
+          restrictions:
+          - settings:common.libvirt_type.value == 'vcenter'
+        - data: qemu
+          description: Choose this type of hypervisor if you run OpenStack on virtual
+            hosts.
+          label: QEMU
+          restrictions:
+          - settings:common.libvirt_type.value == 'vcenter'
+        - data: vcenter
+          description: Choose this type of hypervisor if you run OpenStack in a vCenter
+            environment.
+          label: vCenter
+          restrictions:
+          - settings:common.libvirt_type.value != 'vcenter' or cluster:net_provider
+            == 'neutron'
+        weight: 30
+      metadata:
+        label: Common
+        weight: 30
+      nova_quota:
+        description: Quotas are used to limit CPU and memory usage for tenants. Enabling
+          quotas will increase load on the Nova database.
+        label: Nova quotas
+        type: checkbox
+        value: false
+        weight: 25
+      resume_guests_state_on_host_boot:
+        description: Whether to resume previous guests state when the host reboots.
+          If enabled, this option causes guests assigned to the host to resume their
+          previous state. If the guest was running a restart will be attempted when
+          nova-compute starts. If the guest was not running previously, a restart
+          will not be attempted.
+        label: Resume guests state on host boot
+        type: checkbox
+        value: true
+        weight: 60
+      use_cow_images:
+        description: For most cases you will want qcow format. If it's disabled, raw
+          image format will be used to run VMs. OpenStack with raw format currently
+          does not support snapshotting.
+        label: Use qcow format for images
+        type: checkbox
+        value: true
+        weight: 50
+    corosync:
+      group:
+        description: ''
+        label: Group
+        type: text
+        value: 226.94.1.1
+        weight: 10
+      metadata:
+        label: Corosync
+        restrictions:
+        - action: hide
+          condition: 'true'
+        weight: 50
+      port:
+        description: ''
+        label: Port
+        type: text
+        value: '12000'
+        weight: 20
+      verified:
+        description: Set True only if multicast is configured correctly on router.
+        label: Need to pass network verification.
+        type: checkbox
+        value: false
+        weight: 10
+    external_dns:
+      dns_list:
+        description: List of upstream DNS servers, separated by comma
+        label: DNS list
+        type: text
+        value: 10.118.32.193, 8.8.8.8
+        weight: 10
+      metadata:
+        label: Upstream DNS
+        weight: 90
+    external_ntp:
+      metadata:
+        label: Upstream NTP
+        weight: 100
+      ntp_list:
+        description: List of upstream NTP servers, separated by comma
+        label: NTP servers list
+        type: text
+        value: 0.pool.ntp.org, 1.pool.ntp.org
+        weight: 10
+    kernel_params:
+      kernel:
+        description: Default kernel parameters
+        label: Initial parameters
+        type: text
+        value: console=ttyS0,9600 console=tty0 rootdelay=90 nomodeset
+        weight: 45
+      metadata:
+        label: Kernel parameters
+        weight: 40
+    neutron_mellanox:
+      metadata:
+        enabled: true
+        label: Mellanox Neutron components
+        toggleable: false
+        weight: 50
+      plugin:
+        label: Mellanox drivers and SR-IOV plugin
+        type: radio
+        value: disabled
+        values:
+        - data: disabled
+          description: If selected, Mellanox drivers, Neutron and Cinder plugin will
+            not be installed.
+          label: Mellanox drivers and plugins disabled
+          restrictions:
+          - settings:storage.iser.value == true
+        - data: drivers_only
+          description: If selected, Mellanox Ethernet drivers will be installed to
+            support networking over Mellanox NIC. Mellanox Neutron plugin will not
+            be installed.
+          label: Install only Mellanox drivers
+          restrictions:
+          - settings:common.libvirt_type.value != 'kvm'
+        - data: ethernet
+          description: If selected, both Mellanox Ethernet drivers and Mellanox network
+            acceleration (Neutron) plugin will be installed.
+          label: Install Mellanox drivers and SR-IOV plugin
+          restrictions:
+          - settings:common.libvirt_type.value != 'kvm' or not (cluster:net_provider
+            == 'neutron' and networking_parameters:segmentation_type == 'vlan')
+        weight: 60
+      vf_num:
+        description: Note that one virtual function will be reserved to the storage
+          network, in case of choosing iSER.
+        label: Number of virtual NICs
+        restrictions:
+        - settings:neutron_mellanox.plugin.value != 'ethernet'
+        type: text
+        value: '16'
+        weight: 70
+    nsx_plugin:
+      connector_type:
+        description: Default network transport type to use
+        label: NSX connector type
+        type: select
+        value: stt
+        values:
+        - data: gre
+          label: GRE
+        - data: ipsec_gre
+          label: GRE over IPSec
+        - data: stt
+          label: STT
+        - data: ipsec_stt
+          label: STT over IPSec
+        - data: bridge
+          label: Bridge
+        weight: 80
+      l3_gw_service_uuid:
+        description: UUID for the default L3 gateway service to use with this cluster
+        label: L3 service UUID
+        regex:
+          error: Invalid L3 gateway service UUID
+          source: '[a-f\d]{8}-[a-f\d]{4}-[a-f\d]{4}-[a-f\d]{4}-[a-f\d]{12}'
+        type: text
+        value: ''
+        weight: 50
+      metadata:
+        enabled: false
+        label: VMware NSX
+        restrictions:
+        - action: hide
+          condition: cluster:net_provider != 'neutron' or networking_parameters:net_l23_provider
+            != 'nsx'
+        weight: 20
+      nsx_controllers:
+        description: One or more IPv4[:port] addresses of NSX controller node, separated
+          by comma (e.g. 10.40.30.2,192.168.110.254:443)
+        label: NSX controller endpoint
+        regex:
+          error: Invalid controller endpoints, specify valid IPv4[:port] pair
+          source: ^(([\d]|[1-9][\d]|1[\d]{2}|2[0-4][\d]|25[0-5])\.){3}([\d]|[1-9][\d]|1[\d]{2}|2[0-4][\d]|25[0-5])(:(6553[0-5]|655[0-2][\d]|65[0-4][\d]{2}|6[0-4][\d]{3}|5[\d]{4}|[\d][\d]{0,3}))?(,(([\d]|[1-9][\d]|1[\d]{2}|2[0-4][\d]|25[0-5])\.){3}([\d]|[1-9][\d]|1[\d]{2}|2[0-4][\d]|25[0-5])(:(6553[0-5]|655[0-2][\d]|65[0-4][\d]{2}|6[0-4][\d]{3}|5[\d]{4}|[\d][\d]{0,3}))?)*$
+        type: text
+        value: ''
+        weight: 60
+      nsx_password:
+        description: Password for Administrator
+        label: NSX password
+        regex:
+          error: Empty password
+          source: \S
+        type: password
+        value: ''
+        weight: 30
+      nsx_username:
+        description: NSX administrator's username
+        label: NSX username
+        regex:
+          error: Empty username
+          source: \S
+        type: text
+        value: admin
+        weight: 20
+      packages_url:
+        description: URL to NSX specific packages
+        label: URL to NSX bits
+        regex:
+          error: Invalid URL, specify valid HTTP/HTTPS URL with IPv4 address (e.g.
+            http://10.20.0.2/nsx)
+          source: ^https?://(([\d]|[1-9][\d]|1[\d]{2}|2[0-4][\d]|25[0-5])\.){3}([\d]|[1-9][\d]|1[\d]{2}|2[0-4][\d]|25[0-5])(:(6553[0-5]|655[0-2][\d]|65[0-4][\d]{2}|6[0-4][\d]{3}|5[\d]{4}|[\d][\d]{0,3}))?(/.*)?$
+        type: text
+        value: ''
+        weight: 70
+      replication_mode:
+        description: ''
+        label: NSX cluster has Service nodes
+        type: checkbox
+        value: true
+        weight: 90
+      transport_zone_uuid:
+        description: UUID of the pre-existing default NSX Transport zone
+        label: Transport zone UUID
+        regex:
+          error: Invalid transport zone UUID
+          source: '[a-f\d]{8}-[a-f\d]{4}-[a-f\d]{4}-[a-f\d]{4}-[a-f\d]{12}'
+        type: text
+        value: ''
+        weight: 40
+    provision:
+      metadata:
+        label: Provision
+        restrictions:
+        - action: hide
+          condition: not ('experimental' in version:feature_groups)
+        weight: 80
+      method:
+        description: Which provision method to use for this cluster.
+        label: Provision method
+        type: radio
+        value: cobbler
+        values:
+        - data: image
+          description: Copying pre-built images on a disk.
+          label: Image
+        - data: cobbler
+          description: Install from scratch using anaconda or debian-installer.
+          label: Classic (use anaconda or debian-installer)
+    public_network_assignment:
+      assign_to_all_nodes:
+        description: When disabled, public network will be assigned to controllers
+          and zabbix-server only
+        label: Assign public network to all nodes
+        type: checkbox
+        value: false
+        weight: 10
+      metadata:
+        label: Public network assignment
+        restrictions:
+        - action: hide
+          condition: cluster:net_provider != 'neutron'
+        weight: 50
+    storage:
+      ephemeral_ceph:
+        description: Configures Nova to store ephemeral volumes in RBD. This works
+          best if Ceph is enabled for volumes and images, too. Enables live migration
+          of all types of Ceph backed VMs (without this option, live migration will
+          only work with VMs launched from Cinder volumes).
+        label: Ceph RBD for ephemeral volumes (Nova)
+        restrictions:
+        - settings:common.libvirt_type.value == 'vcenter'
+        type: checkbox
+        value: false
+        weight: 75
+      images_ceph:
+        description: Configures Glance to use the Ceph RBD backend to store images.
+          If enabled, this option will prevent Swift from installing.
+        label: Ceph RBD for images (Glance)
+        type: checkbox
+        value: false
+        weight: 30
+      images_vcenter:
+        description: Configures Glance to use the vCenter/ESXi backend to store images.
+          If enabled, this option will prevent Swift from installing.
+        label: VMWare vCenter/ESXi datastore for images (Glance)
+        restrictions:
+        - settings:common.libvirt_type.value != 'vcenter'
+        type: checkbox
+        value: false
+        weight: 35
+      iser:
+        description: 'High performance block storage: Cinder volumes over iSER protocol
+          (iSCSI over RDMA). This feature requires SR-IOV capabilities in the NIC,
+          and will use a dedicated virtual function for the storage network.'
+        label: iSER protocol for volumes (Cinder)
+        restrictions:
+        - settings:storage.volumes_lvm.value != true or settings:common.libvirt_type.value
+          != 'kvm'
+        type: checkbox
+        value: false
+        weight: 11
+      metadata:
+        label: Storage
+        weight: 60
+      objects_ceph:
+        description: Configures RadosGW front end for Ceph RBD. This exposes S3 and
+          Swift API Interfaces. If enabled, this option will prevent Swift from installing.
+        label: Ceph RadosGW for objects (Swift API)
+        restrictions:
+        - settings:storage.images_ceph.value == false
+        type: checkbox
+        value: false
+        weight: 80
+      osd_pool_size:
+        description: Configures the default number of object replicas in Ceph. This
+          number must be equal to or lower than the number of deployed 'Storage -
+          Ceph OSD' nodes.
+        label: Ceph object replication factor
+        regex:
+          error: Invalid number
+          source: ^[1-9]\d*$
+        restrictions:
+        - settings:common.libvirt_type.value == 'vcenter'
+        type: text
+        value: '2'
+        weight: 85
+      vc_datacenter:
+        description: Inventory path to a datacenter. If you want to use ESXi host
+          as datastore, it should be "ha-datacenter".
+        label: Datacenter name
+        regex:
+          error: Empty datacenter
+          source: \S
+        restrictions:
+        - action: hide
+          condition: settings:storage.images_vcenter.value == false or settings:common.libvirt_type.value
+            != 'vcenter'
+        type: text
+        value: ''
+        weight: 65
+      vc_datastore:
+        description: Datastore associated with the datacenter.
+        label: Datastore name
+        regex:
+          error: Empty datastore
+          source: \S
+        restrictions:
+        - action: hide
+          condition: settings:storage.images_vcenter.value == false or settings:common.libvirt_type.value
+            != 'vcenter'
+        type: text
+        value: ''
+        weight: 60
+      vc_host:
+        description: IP Address of vCenter/ESXi
+        label: vCenter/ESXi IP
+        regex:
+          error: Specify valid IPv4 address
+          source: ^(([\d]|[1-9][\d]|1[\d]{2}|2[0-4][\d]|25[0-5])\.){3}([\d]|[1-9][\d]|1[\d]{2}|2[0-4][\d]|25[0-5])$
+        restrictions:
+        - action: hide
+          condition: settings:storage.images_vcenter.value == false or settings:common.libvirt_type.value
+            != 'vcenter'
+        type: text
+        value: ''
+        weight: 45
+      vc_image_dir:
+        description: The name of the directory where the glance images will be stored
+          in the VMware datastore.
+        label: Datastore Images directory
+        regex:
+          error: Empty images directory
+          source: \S
+        restrictions:
+        - action: hide
+          condition: settings:storage.images_vcenter.value == false or settings:common.libvirt_type.value
+            != 'vcenter'
+        type: text
+        value: /openstack_glance
+        weight: 70
+      vc_password:
+        description: vCenter/ESXi admin password
+        label: Password
+        regex:
+          error: Empty password
+          source: \S
+        restrictions:
+        - action: hide
+          condition: settings:storage.images_vcenter.value == false or settings:common.libvirt_type.value
+            != 'vcenter'
+        type: password
+        value: ''
+        weight: 55
+      vc_user:
+        description: vCenter/ESXi admin username
+        label: Username
+        regex:
+          error: Empty username
+          source: \S
+        restrictions:
+        - action: hide
+          condition: settings:storage.images_vcenter.value == false or settings:common.libvirt_type.value
+            != 'vcenter'
+        type: text
+        value: ''
+        weight: 50
+      volumes_ceph:
+        description: Configures Cinder to store volumes in Ceph RBD images.
+        label: Ceph RBD for volumes (Cinder)
+        restrictions:
+        - settings:storage.volumes_lvm.value == true or settings:common.libvirt_type.value
+          == 'vcenter'
+        type: checkbox
+        value: false
+        weight: 20
+      volumes_lvm:
+        description: Requires at least one Storage - Cinder LVM node.
+        label: Cinder LVM over iSCSI for volumes
+        restrictions:
+        - settings:storage.volumes_ceph.value == true
+        type: checkbox
+        value: false
+        weight: 10
+      volumes_vmdk:
+        description: Configures Cinder to store volumes via VMware vCenter.
+        label: VMware vCenter for volumes (Cinder)
+        restrictions:
+        - settings:common.libvirt_type.value != 'vcenter' or settings:storage.volumes_lvm.value
+          == true
+        type: checkbox
+        value: false
+        weight: 15
+    syslog:
+      metadata:
+        label: Syslog
+        weight: 50
+      syslog_port:
+        description: Remote syslog port
+        label: Port
+        regex:
+          error: Invalid Syslog port
+          source: ^([1-9][0-9]{0,3}|[1-5][0-9]{4}|6[0-4][0-9]{3}|65[0-4][0-9]{2}|655[0-2][0-9]|6553[0-5])$
+        type: text
+        value: '514'
+        weight: 20
+      syslog_server:
+        description: Remote syslog hostname
+        label: Hostname
+        type: text
+        value: ''
+        weight: 10
+      syslog_transport:
+        label: Syslog transport protocol
+        type: radio
+        value: tcp
+        values:
+        - data: udp
+          description: ''
+          label: UDP
+        - data: tcp
+          description: ''
+          label: TCP
+        weight: 30
+    vcenter:
+      cluster:
+        description: vCenter cluster name. If you have multiple clusters, use comma
+          to separate names
+        label: Cluster
+        regex:
+          error: Invalid cluster list
+          source: ^([^,\ ]+([\ ]*[^,\ ])*)(,[^,\ ]+([\ ]*[^,\ ])*)*$
+        type: text
+        value: ''
+        weight: 40
+      datastore_regex:
+        description: The Datastore regexp setting specifies the data stores to use
+          with Compute. For example, "nas.*". If you want to use all available datastores,
+          leave this field blank
+        label: Datastore regexp
+        regex:
+          error: Invalid datastore regexp
+          source: ^(\S.*\S|\S|)$
+        type: text
+        value: ''
+        weight: 50
+      host_ip:
+        description: IP Address of vCenter
+        label: vCenter IP
+        regex:
+          error: Specify valid IPv4 address
+          source: ^(([\d]|[1-9][\d]|1[\d]{2}|2[0-4][\d]|25[0-5])\.){3}([\d]|[1-9][\d]|1[\d]{2}|2[0-4][\d]|25[0-5])$
+        type: text
+        value: ''
+        weight: 10
+      metadata:
+        label: vCenter
+        restrictions:
+        - action: hide
+          condition: settings:common.libvirt_type.value != 'vcenter'
+        weight: 20
+      use_vcenter:
+        description: ''
+        label: ''
+        type: hidden
+        value: true
+        weight: 5
+      vc_password:
+        description: vCenter admin password
+        label: Password
+        regex:
+          error: Empty password
+          source: \S
+        type: password
+        value: admin
+        weight: 30
+      vc_user:
+        description: vCenter admin username
+        label: Username
+        regex:
+          error: Empty username
+          source: \S
+        type: text
+        value: admin
+        weight: 20
+      vlan_interface:
+        description: Physical ESXi host ethernet adapter for VLAN networking (e.g.
+          vmnic1). If empty "vmnic0" is used by default
+        label: ESXi VLAN interface
+        restrictions:
+        - action: hide
+          condition: cluster:net_provider != 'nova_network' or networking_parameters:net_manager
+            != 'VlanManager'
+        type: text
+        value: ''
+        weight: 60
+    zabbix:
+      metadata:
+        label: Zabbix Access
+        restrictions:
+        - action: hide
+          condition: not ('experimental' in version:feature_groups)
+        weight: 70
+      password:
+        description: Password for Zabbix Administrator
+        label: password
+        type: password
+        value: zabbix
+        weight: 20
+      username:
+        description: Username for Zabbix Administrator
+        label: username
+        type: text
+        value: admin
+        weight: 10
diff --git a/fuel/deploy/baremetal/dha.yaml b/fuel/deploy/baremetal/dha.yaml
new file mode 100644 (file)
index 0000000..6240f07
--- /dev/null
@@ -0,0 +1,53 @@
+title: Deployment Hardware Adapter (DHA)
+# DHA API version supported
+version: 1.1
+created: Mon May  4 09:03:46 UTC 2015
+comment: Test environment Ericsson Montreal
+
+# Adapter to use for this definition
+adapter: ipmi
+
+# Node list.
+# Mandatory properties are id and role.
+# The MAC address of the PXE boot interface for Fuel is not
+# mandatory to be defined.
+# All other properties are adapter specific.
+
+nodes:
+- id: 1
+  pxeMac: 14:58:D0:54:7A:28
+  ipmiIp: 10.118.32.205
+  ipmiUser: username
+  ipmiPass: password
+- id: 2
+  pxeMac: 14:58:D0:55:E2:E0
+  ipmiIp: 10.118.32.202
+  ipmiUser: username
+  ipmiPass: password
+# Adding the Fuel node as node id 3 which may not be correct - please
+# adjust as needed.
+- id: 3
+  libvirtName: vFuel
+  libvirtTemplate: vFuel
+  isFuel: yes
+  username: root
+  password: r00tme
+
+# Deployment power on strategy
+# all:      Turn on all nodes at once. There will be no correlation
+#           between the DHA and DEA node numbering. MAC addresses
+#           will be used to select the node roles though.
+# sequence: Turn on the nodes in sequence starting with the lowest order
+#           node and wait for the node to be detected by Fuel. Not until
+#           the node has been detected and assigned a role will the next
+#           node be turned on.
+powerOnStrategy: sequence
+
+# If fuelCustomInstall is set to true, Fuel is assumed to be installed by
+# calling the DHA adapter function "dha_fuelCustomInstall()"  with two
+# arguments: node ID and the ISO file name to deploy. The custom install
+# function is then to handle all necessary logic to boot the Fuel master
+# from the ISO and then return.
+# Allowed values: true, false
+fuelCustomInstall: true
+
similarity index 77%
rename from fuel/deploy/templates/hardware_environment/vms/fuel.xml
rename to fuel/deploy/baremetal/vm/vFuel
index e3e3f80..1b4f4eb 100644 (file)
@@ -1,15 +1,15 @@
-<domain type='kvm' id='62'>
-  <name>fuel</name>
+<domain type='kvm'>
+  <name>vFuel</name>
   <memory unit='KiB'>8290304</memory>
   <currentMemory unit='KiB'>8290304</currentMemory>
-  <vcpu placement='static'>4</vcpu>
+  <vcpu placement='static'>2</vcpu>
   <resource>
     <partition>/machine</partition>
   </resource>
   <os>
-    <type arch='x86_64' machine='pc-i440fx-rhel7.0.0'>hvm</type>
-    <boot dev='cdrom'/>
+    <type arch='x86_64' machine='pc-i440fx-utopic'>hvm</type>
     <boot dev='hd'/>
+    <boot dev='cdrom'/>
     <bootmenu enable='no'/>
   </os>
   <features>
     <suspend-to-disk enabled='no'/>
   </pm>
   <devices>
-    <emulator>/usr/libexec/qemu-kvm</emulator>
+    <emulator>/usr/bin/kvm</emulator>
     <disk type='file' device='disk'>
       <driver name='qemu' type='raw'/>
+      <source file='/mnt/images/vFuel.raw'/>
       <target dev='vda' bus='virtio'/>
     </disk>
     <disk type='block' device='cdrom'>
       <driver name='qemu' type='raw'/>
-      <target dev='hdb' bus='ide'/>
+      <target dev='hda' bus='ide'/>
       <readonly/>
     </disk>
     <controller type='usb' index='0' model='ich9-ehci1'>
     <controller type='usb' index='0' model='ich9-uhci3'>
       <master startport='4'/>
     </controller>
-    <controller type='pci' index='0' model='pci-root'>
+    <controller type='pci' index='0' model='pci-root'/>
+    <controller type='virtio-serial' index='0'>
     </controller>
     <controller type='ide' index='0'>
     </controller>
-    <controller type='virtio-serial' index='0'>
-    </controller>
     <interface type='bridge'>
+      <source bridge='vfuelnet'/>
       <model type='virtio'/>
     </interface>
     <serial type='pty'>
-      <source path='/dev/pts/0'/>
       <target port='0'/>
     </serial>
-    <console type='pty' tty='/dev/pts/0'>
-      <source path='/dev/pts/0'/>
+    <console type='pty'>
       <target type='serial' port='0'/>
     </console>
     <input type='mouse' bus='ps2'/>
@@ -84,8 +83,5 @@
     <memballoon model='virtio'>
     </memballoon>
   </devices>
-  <seclabel type='dynamic' model='selinux' relabel='yes'>
-    <label>system_u:system_r:svirt_t:s0:c52,c932</label>
-    <imagelabel>system_u:object_r:svirt_image_t:s0:c52,c932</imagelabel>
-  </seclabel>
-</domain>
\ No newline at end of file
+  <seclabel type='dynamic' model='apparmor' relabel='yes'/>
+</domain>
index 2d68c1b..d0037d7 100644 (file)
@@ -1,13 +1,6 @@
-###############################################################################
-# Copyright (c) 2015 Ericsson AB and others.
-# szilard.cserey@ericsson.com
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-###############################################################################
-
 import common
+import os
+import shutil
 
 from configure_settings import ConfigureSettings
 from configure_network import ConfigureNetwork
@@ -21,9 +14,6 @@ exec_cmd = common.exec_cmd
 parse = common.parse
 err = common.err
 log = common.log
-delete = common.delete
-create_dir_if_not_exists = common.create_dir_if_not_exists
-
 
 class ConfigureEnvironment(object):
 
@@ -31,6 +21,7 @@ class ConfigureEnvironment(object):
         self.env_id = None
         self.dea = dea
         self.yaml_config_dir = yaml_config_dir
+        self.env_name = self.dea.get_property('environment_name')
         self.release_id = release_id
         self.node_id_roles_dict = node_id_roles_dict
         self.required_networks = []
@@ -45,20 +36,21 @@ class ConfigureEnvironment(object):
 
     def configure_environment(self):
         log('Configure environment')
-        delete(self.yaml_config_dir)
-        create_dir_if_not_exists(self.yaml_config_dir)
-        env_name = self.dea.get_env_name()
-        env_mode = self.dea.get_env_mode()
-        env_net_segment_type = self.dea.get_env_net_segment_type()
+        if os.path.exists(self.yaml_config_dir):
+            log('Deleting existing config directory %s' % self.yaml_config_dir)
+            shutil.rmtree(self.yaml_config_dir)
+        log('Creating new config directory %s' % self.yaml_config_dir)
+        os.makedirs(self.yaml_config_dir)
+
+        mode = self.dea.get_property('environment_mode')
         log('Creating environment %s release %s, mode %s, network-mode neutron'
-            ', net-segment-type %s'
-            % (env_name, self.release_id, env_mode, env_net_segment_type))
+            ', net-segment-type vlan' % (self.env_name, self.release_id, mode))
         exec_cmd('fuel env create --name %s --release %s --mode %s '
-                 '--network-mode neutron --net-segment-type %s'
-                 % (env_name, self.release_id, env_mode, env_net_segment_type))
+                 '--network-mode neutron --net-segment-type vlan'
+                 % (self.env_name, self.release_id, mode))
 
-        if not self.env_exists(env_name):
-            err('Failed to create environment %s' % env_name)
+        if not self.env_exists(self.env_name):
+            err('Failed to create environment %s' % self.env_name)
         self.config_settings()
         self.config_network()
         self.config_nodes()
@@ -76,3 +68,6 @@ class ConfigureEnvironment(object):
         nodes = ConfigureNodes(self.yaml_config_dir, self.env_id,
                                self.node_id_roles_dict, self.dea)
         nodes.config_nodes()
+
+
+
index 0027894..295eb90 100644 (file)
@@ -1,13 +1,3 @@
-###############################################################################
-# Copyright (c) 2015 Ericsson AB and others.
-# szilard.cserey@ericsson.com
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-###############################################################################
-
-
 import common
 import yaml
 import io
@@ -21,8 +11,6 @@ parse = common.parse
 err = common.err
 check_file_exists = common.check_file_exists
 log = common.log
-backup = common.backup
-
 
 class ConfigureNetwork(object):
 
@@ -53,7 +41,6 @@ class ConfigureNetwork(object):
         network_yaml = ('%s/network_%s.yaml'
                         % (self.yaml_config_dir, self.env_id))
         check_file_exists(network_yaml)
-        backup(network_yaml)
 
         network_config = self.dea.get_property('network')
 
@@ -71,4 +58,4 @@ class ConfigureNetwork(object):
             network.update(net_id[network['name']])
 
         with io.open(network_yaml, 'w') as stream:
-            yaml.dump(network_config, stream, default_flow_style=False)
+            yaml.dump(network_config, stream, default_flow_style=False)
\ No newline at end of file
index e76d222..4d1315a 100644 (file)
@@ -1,13 +1,3 @@
-###############################################################################
-# Copyright (c) 2015 Ericsson AB and others.
-# szilard.cserey@ericsson.com
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-###############################################################################
-
-
 import common
 import yaml
 import io
@@ -22,7 +12,6 @@ parse = common.parse
 err = common.err
 check_file_exists = common.check_file_exists
 log = common.log
-backup = common.backup
 
 
 class ConfigureNodes(object):
@@ -37,7 +26,7 @@ class ConfigureNodes(object):
         log('Configure nodes')
         for node_id, roles_blade in self.node_id_roles_dict.iteritems():
             exec_cmd('fuel node set --node-id %s --role %s --env %s'
-                     % (node_id, roles_blade[0], self.env_id))
+                     % (node_id, ','.join(roles_blade[0]), self.env_id))
 
         self.download_deployment_config()
         for node_id, roles_blade in self.node_id_roles_dict.iteritems():
@@ -48,20 +37,22 @@ class ConfigureNodes(object):
         self.upload_deployment_config()
 
     def modify_node_network_schemes(self, node_id, roles_blade):
-        log('Modify network transformations for node %s' % node_id)
+        log('Modify node network transformations in environment %s'
+            % self.env_id)
         type = self.dea.get_node_property(roles_blade[1], 'transformations')
-        transformations = self.dea.get_property(type)
-        deployment_dir = '%s/deployment_%s' % (
-            self.yaml_config_dir, self.env_id)
-        backup(deployment_dir)
-        for node_file in glob.glob(deployment_dir + '/*_%s.yaml' % node_id):
+        transformations = self.dea.get_transformations(type)
+
+        for node_file in glob.glob('%s/deployment_%s/*_%s.yaml'
+                                   % (self.yaml_config_dir, self.env_id,
+                                      node_id)):
             with io.open(node_file) as stream:
-                node = yaml.load(stream)
+               node = yaml.load(stream)
 
-            node['network_scheme'].update(transformations)
+            node['network_scheme']['transformations'] = transformations
 
             with io.open(node_file, 'w') as stream:
-                yaml.dump(node, stream, default_flow_style=False)
+               yaml.dump(node, stream, default_flow_style=False)
+
 
     def download_deployment_config(self):
         log('Download deployment config for environment %s' % self.env_id)
@@ -88,7 +79,6 @@ class ConfigureNodes(object):
         interface_yaml = ('%s/node_%s/interfaces.yaml'
                           % (self.yaml_config_dir, node_id))
         check_file_exists(interface_yaml)
-        backup('%s/node_%s' % (self.yaml_config_dir, node_id))
 
         with io.open(interface_yaml) as stream:
             interfaces = yaml.load(stream)
@@ -96,10 +86,10 @@ class ConfigureNodes(object):
         net_name_id = {}
         for interface in interfaces:
             for network in interface['assigned_networks']:
-                net_name_id[network['name']] = network['id']
+                 net_name_id[network['name']] = network['id']
 
         type = self.dea.get_node_property(roles_blade[1], 'interfaces')
-        interface_config = self.dea.get_property(type)
+        interface_config = self.dea.get_interfaces(type)
 
         for interface in interfaces:
             interface['assigned_networks'] = []
@@ -111,4 +101,4 @@ class ConfigureNodes(object):
                     interface['assigned_networks'].append(net)
 
         with io.open(interface_yaml, 'w') as stream:
-            yaml.dump(interfaces, stream, default_flow_style=False)
+            yaml.dump(interfaces, stream, default_flow_style=False)
\ No newline at end of file
index fa918fd..ac0afdc 100644 (file)
@@ -1,12 +1,3 @@
-###############################################################################
-# Copyright (c) 2015 Ericsson AB and others.
-# szilard.cserey@ericsson.com
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-###############################################################################
-
 import common
 import yaml
 import io
@@ -20,8 +11,6 @@ parse = common.parse
 err = common.err
 check_file_exists = common.check_file_exists
 log = common.log
-backup = common.backup
-
 
 class ConfigureSettings(object):
 
@@ -51,7 +40,6 @@ class ConfigureSettings(object):
         settings_yaml = ('%s/settings_%s.yaml'
                          % (self.yaml_config_dir, self.env_id))
         check_file_exists(settings_yaml)
-        backup(settings_yaml)
 
         settings = self.dea.get_property('settings')
 
index 705dda5..c8714f8 100644 (file)
@@ -1,17 +1,7 @@
-###############################################################################
-# Copyright (c) 2015 Ericsson AB and others.
-# szilard.cserey@ericsson.com
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-###############################################################################
-
-
-import os
+import time
 import yaml
 import io
-import glob
+import sys
 
 import common
 from dea import DeploymentEnvironmentAdapter
@@ -29,79 +19,188 @@ parse = common.parse
 err = common.err
 check_file_exists = common.check_file_exists
 log = common.log
-commafy = common.commafy
-ArgParser = common.ArgParser
-
 
 class Deploy(object):
 
-    def __init__(self, dea_file, blade_node_file, no_health_check):
+    def __init__(self, dea_file, macs_file):
         self.dea = DeploymentEnvironmentAdapter(dea_file)
-        self.blade_node_file = blade_node_file
-        self.no_health_check = no_health_check
+        self.macs_file = macs_file
         self.macs_per_blade = {}
         self.blades = self.dea.get_node_ids()
-        self.blade_node_dict = {}
-        self.node_roles_dict = {}
+        self.node_ids_dict = {}
+        self.node_id_roles_dict = {}
+        self.supported_release = None
         self.env_id = None
-        self.wanted_release = self.dea.get_property('wanted_release')
-
-    def get_blade_node_mapping(self):
-        with io.open(self.blade_node_file, 'r') as stream:
-            self.blade_node_dict = yaml.load(stream)
-
-    def assign_roles_to_cluster_node_ids(self):
-        self.node_roles_dict = {}
-        for blade, node in self.blade_node_dict.iteritems():
-            roles = commafy(self.dea.get_node_role(blade))
-            self.node_roles_dict[node] = (roles, blade)
-
-    def configure_environment(self):
+        self.wanted_release = self.dea.get_wanted_release()
+
+    def cleanup_fuel_environments(self, env_list):
+        WAIT_LOOP = 60
+        SLEEP_TIME = 10
+        for env in env_list:
+            log('Deleting environment %s' % env[E['id']])
+            exec_cmd('fuel env --env %s --delete' % env[E['id']])
+        all_env_erased = False
+        for i in range(WAIT_LOOP):
+            env_list = parse(exec_cmd('fuel env list'))
+            if env_list:
+               time.sleep(SLEEP_TIME)
+            else:
+               all_env_erased = True
+               break
+        if not all_env_erased:
+            err('Could not erase these environments %s'
+                % [(env[E['id']], env[E['status']]) for env in env_list])
+
+    def cleanup_fuel_nodes(self, node_list):
+        for node in node_list:
+            if node[N['status']] == 'discover':
+                log('Deleting node %s' % node[N['id']])
+                exec_cmd('fuel node --node-id %s --delete-from-db'
+                         % node[N['id']])
+                exec_cmd('dockerctl shell cobbler cobbler system remove '
+                         '--name node-%s' % node[N['id']])
+
+    def check_previous_installation(self):
+        log('Check previous installation')
+        env_list = parse(exec_cmd('fuel env list'))
+        if env_list:
+            self.cleanup_fuel_environments(env_list)
+            node_list = parse(exec_cmd('fuel node list'))
+            if node_list:
+                self.cleanup_fuel_nodes(node_list)
+
+    def check_supported_release(self):
+        log('Check supported release: %s' % self.wanted_release)
         release_list = parse(exec_cmd('fuel release -l'))
         for release in release_list:
             if release[R['name']] == self.wanted_release:
+                self.supported_release = release
+                break
+        if not self.supported_release:
+            err('This Fuel does not contain the following release: %s'
+                % self.wanted_release)
+
+    def check_prerequisites(self):
+        log('Check prerequisites')
+        self.check_supported_release()
+        self.check_previous_installation()
+
+    def get_mac_addresses(self):
+        with io.open(self.macs_file, 'r') as stream:
+            self.macs_per_blade = yaml.load(stream)
+
+    def find_mac_in_dict(self, mac):
+        for blade, mac_list in self.macs_per_blade.iteritems():
+            if mac in mac_list:
+                return blade
+
+    def all_blades_discovered(self):
+        for blade, node_id in self.node_ids_dict.iteritems():
+            if not node_id:
+                return False
+        return True
+
+    def not_discovered_blades_summary(self):
+        summary = ''
+        for blade, node_id in self.node_ids_dict.iteritems():
+            if not node_id:
+                summary += '\n[blade %s]' % blade
+        return summary
+
+    def node_discovery(self, node_list, discovered_macs):
+        for node in node_list:
+            if (node[N['status']] == 'discover' and
+                node[N['online']] == 'True' and
+                node[N['mac']] not in discovered_macs):
+                discovered_macs.append(node[N['mac']])
+                blade = self.find_mac_in_dict(node[N['mac']])
+                if blade:
+                    log('Blade %s discovered as Node %s with MAC %s'
+                        % (blade, node[N['id']], node[N['mac']]))
+                    self.node_ids_dict[blade] = node[N['id']]
+
+    def discovery_waiting_loop(self, discovered_macs):
+        WAIT_LOOP = 180
+        SLEEP_TIME = 10
+        all_discovered = False
+        for i in range(WAIT_LOOP):
+            node_list = parse(exec_cmd('fuel node list'))
+            if node_list:
+                self.node_discovery(node_list, discovered_macs)
+            if self.all_blades_discovered():
+                all_discovered = True
                 break
+            else:
+                time.sleep(SLEEP_TIME)
+        return all_discovered
+
+    def wait_for_discovered_blades(self):
+        log('Wait for discovered blades')
+        discovered_macs = []
+        for blade in self.blades:
+            self.node_ids_dict[blade] = None
+        all_discovered = self.discovery_waiting_loop(discovered_macs)
+        if not all_discovered:
+            err('Not all blades have been discovered: %s'
+                % self.not_discovered_blades_summary())
+
+    def assign_roles_to_cluster_node_ids(self):
+        self.node_id_roles_dict = {}
+        for blade, node_id in self.node_ids_dict.iteritems():
+            role_list = []
+            role = self.dea.get_node_role(blade)
+            if role == 'controller':
+                role_list.extend(['controller', 'mongo'])
+            elif role == 'compute':
+                role_list.extend(['compute'])
+            self.node_id_roles_dict[node_id] = (role_list, blade)
+
+    def configure_environment(self):
         config_env = ConfigureEnvironment(self.dea, YAML_CONF_DIR,
-                                          release[R['id']],
-                                          self.node_roles_dict)
+                                          self.supported_release[R['id']],
+                                          self.node_id_roles_dict)
         config_env.configure_environment()
         self.env_id = config_env.env_id
 
     def deploy_cloud(self):
         dep = Deployment(self.dea, YAML_CONF_DIR, self.env_id,
-                         self.node_roles_dict, self.no_health_check)
+                         self.node_id_roles_dict)
         dep.deploy()
 
     def deploy(self):
-
-        self.get_blade_node_mapping()
-
+        self.get_mac_addresses()
+        self.check_prerequisites()
+        self.wait_for_discovered_blades()
         self.assign_roles_to_cluster_node_ids()
-
         self.configure_environment()
-
         self.deploy_cloud()
 
+def usage():
+    print '''
+    Usage:
+    python deploy.py <dea_file> <macs_file>
 
-def parse_arguments():
-    parser = ArgParser(prog='python %s' % __file__)
-    parser.add_argument('-nh', dest='no_health_check', action='store_true',
-                        default=False,
-                        help='Don\'t run health check after deployment')
-    parser.add_argument('dea_file', action='store',
-                        help='Deployment Environment Adapter: dea.yaml')
-    parser.add_argument('blade_node_file', action='store',
-                        help='Blade Node mapping: blade_node.yaml')
-    args = parser.parse_args()
-    check_file_exists(args.dea_file)
-    check_file_exists(args.blade_node_file)
-    return (args.dea_file, args.blade_node_file, args.no_health_check)
+    Example:
+            python deploy.py dea.yaml macs.yaml
+    '''
 
+def parse_arguments():
+    if len(sys.argv) != 3:
+        log('Incorrect number of arguments')
+        usage()
+        sys.exit(1)
+    dea_file = sys.argv[-2]
+    macs_file = sys.argv[-1]
+    check_file_exists(dea_file)
+    check_file_exists(macs_file)
+    return dea_file, macs_file
 
 def main():
-    dea_file, blade_node_file, no_health_check = parse_arguments()
-    deploy = Deploy(dea_file, blade_node_file, no_health_check)
+
+    dea_file, macs_file = parse_arguments()
+
+    deploy = Deploy(dea_file, macs_file)
     deploy.deploy()
 
 if __name__ == '__main__':
-    main()
+    main()
\ No newline at end of file
index 90f24fd..cf56c36 100644 (file)
@@ -1,13 +1,3 @@
-###############################################################################
-# Copyright (c) 2015 Ericsson AB and others.
-# szilard.cserey@ericsson.com
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-###############################################################################
-
-
 import common
 import os
 import shutil
@@ -29,13 +19,42 @@ log = common.log
 
 class Deployment(object):
 
-    def __init__(self, dea, yaml_config_dir, env_id, node_id_roles_dict,
-                 no_health_check):
+    def __init__(self, dea, yaml_config_dir, env_id, node_id_roles_dict):
         self.dea = dea
         self.yaml_config_dir = yaml_config_dir
         self.env_id = env_id
         self.node_id_roles_dict = node_id_roles_dict
-        self.no_health_check = no_health_check
+
+    def download_deployment_info(self):
+        log('Download deployment info for environment %s' % self.env_id)
+        deployment_dir = '%s/deployment_%s' \
+                         % (self.yaml_config_dir, self.env_id)
+        if os.path.exists(deployment_dir):
+            shutil.rmtree(deployment_dir)
+        exec_cmd('fuel --env %s deployment --default --dir %s'
+                 % (self.env_id, self.yaml_config_dir))
+
+    def upload_deployment_info(self):
+        log('Upload deployment info for environment %s' % self.env_id)
+        exec_cmd('fuel --env %s deployment --upload --dir %s'
+                 % (self.env_id, self.yaml_config_dir))
+
+    def config_opnfv(self):
+        log('Configure OPNFV settings on environment %s' % self.env_id)
+        opnfv_compute = self.dea.get_opnfv('compute')
+        opnfv_controller = self.dea.get_opnfv('controller')
+        self.download_deployment_info()
+        for node_file in glob.glob('%s/deployment_%s/*.yaml'
+                                   % (self.yaml_config_dir, self.env_id)):
+             with io.open(node_file) as stream:
+                 node = yaml.load(stream)
+             if node['role'] == 'compute':
+                node.update(opnfv_compute)
+             else:
+                node.update(opnfv_controller)
+             with io.open(node_file, 'w') as stream:
+                 yaml.dump(node, stream, default_flow_style=False)
+        self.upload_deployment_info()
 
     def run_deploy(self):
         WAIT_LOOP = 180
@@ -56,8 +75,7 @@ class Deployment(object):
             if env[0][E['status']] == 'operational':
                 ready = True
                 break
-            elif (env[0][E['status']] == 'error'
-                  or env[0][E['status']] == 'stopped'):
+            elif env[0][E['status']] == 'error':
                 break
             else:
                 time.sleep(SLEEP_TIME)
@@ -84,14 +102,12 @@ class Deployment(object):
 
     def health_check(self):
         log('Now running sanity and smoke health checks')
-        r = exec_cmd('fuel health --env %s --check sanity,smoke --force'
-                     % self.env_id)
-        log(r)
-        if 'failure' in r:
-            err('Healthcheck failed!')
+        exec_cmd('fuel health --env %s --check sanity,smoke --force'
+                 % self.env_id)
+        log('Health checks passed !')
 
     def deploy(self):
+        self.config_opnfv()
         self.run_deploy()
         self.verify_node_status()
-        if not self.no_health_check:
-            self.health_check()
+        self.health_check()
\ No newline at end of file
index 2a8c0d1..6dbda67 100644 (file)
@@ -1,20 +1,7 @@
-###############################################################################
-# Copyright (c) 2015 Ericsson AB and others.
-# szilard.cserey@ericsson.com
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-###############################################################################
-
 import subprocess
 import sys
 import os
 import logging
-import argparse
-import shutil
-import stat
-import errno
 
 N = {'id': 0, 'status': 1, 'name': 2, 'cluster': 3, 'ip': 4, 'mac': 5,
      'roles': 6, 'pending_roles': 7, 'online': 8}
@@ -22,7 +9,7 @@ E = {'id': 0, 'status': 1, 'name': 2, 'mode': 3, 'release_id': 4,
      'changes': 5, 'pending_release_id': 6}
 R = {'id': 0, 'name': 1, 'state': 2, 'operating_system': 3, 'version': 4}
 RO = {'name': 0, 'conflicts': 1}
-CWD = os.getcwd()
+
 LOG = logging.getLogger(__name__)
 LOG.setLevel(logging.DEBUG)
 formatter = logging.Formatter('%(message)s')
@@ -32,7 +19,6 @@ LOG.addHandler(out_handler)
 out_handler = logging.FileHandler('autodeploy.log', mode='w')
 out_handler.setFormatter(formatter)
 LOG.addHandler(out_handler)
-os.chmod('autodeploy.log', stat.S_IRWXU | stat.S_IRWXG | stat.S_IRWXO)
 
 def exec_cmd(cmd, check=True):
     process = subprocess.Popen(cmd,
@@ -48,7 +34,6 @@ def exec_cmd(cmd, check=True):
             return response
     return response, return_code
 
-
 def run_proc(cmd):
     process = subprocess.Popen(cmd,
                                stdout=subprocess.PIPE,
@@ -56,16 +41,14 @@ def run_proc(cmd):
                                shell=True)
     return process
 
-
 def parse(printout):
     parsed_list = []
     lines = printout.splitlines()
     for l in lines[2:]:
-        parsed = [e.strip() for e in l.split('|')]
-        parsed_list.append(parsed)
+         parsed = [e.strip() for e in l.split('|')]
+         parsed_list.append(parsed)
     return parsed_list
 
-
 def clean(lines):
     parsed_list = []
     parsed = []
@@ -78,76 +61,22 @@ def clean(lines):
         parsed_list.append(parsed)
     return parsed if len(parsed_list) == 1 else parsed_list
 
-
 def err(message):
     LOG.error('%s\n' % message)
     sys.exit(1)
 
-
-def warn(message):
-    LOG.warning('%s\n' % message)
-
-
 def check_file_exists(file_path):
-    if not os.path.dirname(file_path):
-        file_path = '%s/%s' % (CWD, file_path)
     if not os.path.isfile(file_path):
         err('ERROR: File %s not found\n' % file_path)
 
-
 def check_dir_exists(dir_path):
-    if not os.path.dirname(dir_path):
-        dir_path = '%s/%s' % (CWD, dir_path)
     if not os.path.isdir(dir_path):
         err('ERROR: Directory %s not found\n' % dir_path)
 
-
-def create_dir_if_not_exists(dir_path):
-    if not os.path.isdir(dir_path):
-        log('Creating directory %s' % dir_path)
-        os.makedirs(dir_path)
-
-
-def delete(f):
-    if os.path.isfile(f):
-        log('Deleting file %s' % f)
-        os.remove(f)
-    elif os.path.isdir(f):
-        log('Deleting directory %s' % f)
-        shutil.rmtree(f)
-
-
-def commafy(comma_separated_list):
-    l = [c.strip() for c in comma_separated_list.split(',')]
-    return ','.join(l)
-
-
 def check_if_root():
     r = exec_cmd('whoami')
     if r != 'root':
         err('You need be root to run this application')
 
-
 def log(message):
     LOG.debug('%s\n' % message)
-
-
-class ArgParser(argparse.ArgumentParser):
-
-    def error(self, message):
-        sys.stderr.write('ERROR: %s\n' % message)
-        self.print_help()
-        sys.exit(2)
-
-
-def backup(path):
-    src = path
-    dst = path + '_orig'
-    delete(dst)
-    try:
-        shutil.copytree(src, dst)
-    except OSError as e:
-        if e.errno == errno.ENOTDIR:
-            shutil.copy(src, dst)
-        else:
-            raise
index 5f1a415..8066b6a 100644 (file)
@@ -1,20 +1,8 @@
-###############################################################################
-# Copyright (c) 2015 Ericsson AB and others.
-# szilard.cserey@ericsson.com
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-###############################################################################
-
-
 import yaml
 import io
 import netaddr
 
-
 class DeploymentEnvironmentAdapter(object):
-
     def __init__(self, yaml_path):
         self.dea_struct = None
         self.parse_yaml(yaml_path)
@@ -31,15 +19,6 @@ class DeploymentEnvironmentAdapter(object):
         with io.open(yaml_path) as yaml_file:
             self.dea_struct = yaml.load(yaml_file)
 
-    def get_env_name(self):
-        return self.get_property('environment')['name']
-
-    def get_env_mode(self):
-        return self.get_property('environment')['mode']
-
-    def get_env_net_segment_type(self):
-        return self.get_property('environment')['net_segment_type']
-
     def get_fuel_config(self):
         return self.dea_struct['fuel']
 
@@ -88,12 +67,14 @@ class DeploymentEnvironmentAdapter(object):
     def get_network_names(self):
         return self.network_names
 
-    def get_dns_list(self):
-        settings = self.get_property('settings')
-        dns_list = settings['editable']['external_dns']['dns_list']['value']
-        return [d.strip() for d in dns_list.split(',')]
+    def get_interfaces(self, type):
+        return self.dea_struct['interfaces'][type]
+
+    def get_transformations(self, type):
+        return self.dea_struct['transformations'][type]
+
+    def get_opnfv(self, role):
+        return {'opnfv': self.dea_struct['opnfv'][role]}
 
-    def get_ntp_list(self):
-        settings = self.get_property('settings')
-        ntp_list = settings['editable']['external_ntp']['ntp_list']['value']
-        return [n.strip() for n in ntp_list.split(',')]
+    def get_wanted_release(self):
+        return self.dea_struct['wanted_release']
\ No newline at end of file
index 178ae76..9d1a3d2 100644 (file)
@@ -1,51 +1,33 @@
-###############################################################################
-# Copyright (c) 2015 Ericsson AB and others.
-# szilard.cserey@ericsson.com
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-###############################################################################
-
-
+import sys
 import os
+import shutil
 import io
 import re
-import sys
 import netaddr
-import yaml
 
 from dea import DeploymentEnvironmentAdapter
 from dha import DeploymentHardwareAdapter
 from install_fuel_master import InstallFuelMaster
 from deploy_env import CloudDeploy
-from execution_environment import ExecutionEnvironment
 import common
 
 log = common.log
 exec_cmd = common.exec_cmd
 err = common.err
-warn = common.warn
 check_file_exists = common.check_file_exists
-check_dir_exists = common.check_dir_exists
-create_dir_if_not_exists = common.create_dir_if_not_exists
-delete = common.delete
 check_if_root = common.check_if_root
-ArgParser = common.ArgParser
 
 FUEL_VM = 'fuel'
+TMP_DIR = '%s/fueltmp' % os.getenv('HOME')
 PATCH_DIR = 'fuel_patch'
-WORK_DIR = '~/deploy'
-CWD = os.getcwd()
-
+WORK_DIR = 'deploy'
 
 class cd:
-
     def __init__(self, new_path):
         self.new_path = os.path.expanduser(new_path)
 
     def __enter__(self):
-        self.saved_path = CWD
+        self.saved_path = os.getcwd()
         os.chdir(self.new_path)
 
     def __exit__(self, etype, value, traceback):
@@ -54,27 +36,31 @@ class cd:
 
 class AutoDeploy(object):
 
-    def __init__(self, no_fuel, fuel_only, no_health_check, cleanup_only,
-                 cleanup, storage_dir, pxe_bridge, iso_file, dea_file,
-                 dha_file, fuel_plugins_dir):
-        self.no_fuel = no_fuel
-        self.fuel_only = fuel_only
-        self.no_health_check = no_health_check
-        self.cleanup_only = cleanup_only
-        self.cleanup = cleanup
-        self.storage_dir = storage_dir
-        self.pxe_bridge = pxe_bridge
+    def __init__(self, without_fuel, iso_file, dea_file, dha_file):
+        self.without_fuel = without_fuel
         self.iso_file = iso_file
         self.dea_file = dea_file
         self.dha_file = dha_file
-        self.fuel_plugins_dir = fuel_plugins_dir
-        self.dea = (DeploymentEnvironmentAdapter(dea_file)
-                    if not cleanup_only else None)
+        self.dea = DeploymentEnvironmentAdapter(dea_file)
         self.dha = DeploymentHardwareAdapter(dha_file)
         self.fuel_conf = {}
         self.fuel_node_id = self.dha.get_fuel_node_id()
+        self.fuel_custom = self.dha.use_fuel_custom_install()
         self.fuel_username, self.fuel_password = self.dha.get_fuel_access()
-        self.tmp_dir = None
+
+    def setup_dir(self, dir):
+        self.cleanup_dir(dir)
+        os.makedirs(dir)
+
+    def cleanup_dir(self, dir):
+        if os.path.isdir(dir):
+            shutil.rmtree(dir)
+
+    def power_off_blades(self):
+        node_ids = self.dha.get_all_node_ids()
+        node_ids = list(set(node_ids) - set([self.fuel_node_id]))
+        for node_id in node_ids:
+            self.dha.node_power_off(node_id)
 
     def modify_ip(self, ip_addr, index, val):
         ip_str = str(netaddr.IPAddress(ip_addr))
@@ -91,9 +77,11 @@ class AutoDeploy(object):
         self.fuel_conf['showmenu'] = 'yes'
 
     def install_fuel_master(self):
+        if self.without_fuel:
+            log('Not Installing Fuel Master')
+            return
         log('Install Fuel Master')
-        new_iso = '%s/deploy-%s' \
-                  % (self.tmp_dir, os.path.basename(self.iso_file))
+        new_iso = '%s/deploy-%s' % (TMP_DIR, os.path.basename(self.iso_file))
         self.patch_iso(new_iso)
         self.iso_file = new_iso
         self.install_iso()
@@ -102,36 +90,40 @@ class AutoDeploy(object):
         fuel = InstallFuelMaster(self.dea_file, self.dha_file,
                                  self.fuel_conf['ip'], self.fuel_username,
                                  self.fuel_password, self.fuel_node_id,
-                                 self.iso_file, WORK_DIR,
-                                 self.fuel_plugins_dir)
-        fuel.install()
+                                 self.iso_file, WORK_DIR)
+        if self.fuel_custom:
+            log('Custom Fuel install')
+            fuel.custom_install()
+        else:
+            log('Ordinary Fuel install')
+            fuel.install()
 
     def patch_iso(self, new_iso):
-        tmp_orig_dir = '%s/origiso' % self.tmp_dir
-        tmp_new_dir = '%s/newiso' % self.tmp_dir
+        tmp_orig_dir = '%s/origiso' % TMP_DIR
+        tmp_new_dir = '%s/newiso' % TMP_DIR
         self.copy(tmp_orig_dir, tmp_new_dir)
         self.patch(tmp_new_dir, new_iso)
 
     def copy(self, tmp_orig_dir, tmp_new_dir):
         log('Copying...')
-        os.makedirs(tmp_orig_dir)
-        os.makedirs(tmp_new_dir)
+        self.setup_dir(tmp_orig_dir)
+        self.setup_dir(tmp_new_dir)
         exec_cmd('fuseiso %s %s' % (self.iso_file, tmp_orig_dir))
         with cd(tmp_orig_dir):
             exec_cmd('find . | cpio -pd %s' % tmp_new_dir)
         with cd(tmp_new_dir):
             exec_cmd('fusermount -u %s' % tmp_orig_dir)
-        delete(tmp_orig_dir)
+        shutil.rmtree(tmp_orig_dir)
         exec_cmd('chmod -R 755 %s' % tmp_new_dir)
 
     def patch(self, tmp_new_dir, new_iso):
         log('Patching...')
-        patch_dir = '%s/%s' % (CWD, PATCH_DIR)
+        patch_dir = '%s/%s' % (os.getcwd(), PATCH_DIR)
         ks_path = '%s/ks.cfg.patch' % patch_dir
 
         with cd(tmp_new_dir):
             exec_cmd('cat %s | patch -p0' % ks_path)
-            delete('.rr_moved')
+            shutil.rmtree('.rr_moved')
             isolinux = 'isolinux/isolinux.cfg'
             log('isolinux.cfg before: %s'
                 % exec_cmd('grep netmask %s' % isolinux))
@@ -157,152 +149,51 @@ class AutoDeploy(object):
             f.write(data)
 
     def deploy_env(self):
-        dep = CloudDeploy(self.dea, self.dha, self.fuel_conf['ip'],
-                          self.fuel_username, self.fuel_password,
-                          self.dea_file, WORK_DIR, self.no_health_check)
-        return dep.deploy()
-
-    def setup_execution_environment(self):
-        exec_env = ExecutionEnvironment(self.storage_dir, self.pxe_bridge,
-                                        self.dha_file, self.dea)
-        exec_env.setup_environment()
-
-    def cleanup_execution_environment(self):
-        exec_env = ExecutionEnvironment(self.storage_dir, self.pxe_bridge,
-                                        self.dha_file, self.dea)
-        exec_env.cleanup_environment()
-
-    def create_tmp_dir(self):
-        self.tmp_dir = '%s/fueltmp' % CWD
-        delete(self.tmp_dir)
-        create_dir_if_not_exists(self.tmp_dir)
+        dep = CloudDeploy(self.dha, self.fuel_conf['ip'], self.fuel_username,
+                          self.fuel_password, self.dea_file, WORK_DIR)
+        dep.deploy()
 
     def deploy(self):
-        self.collect_fuel_info()
-        if not self.no_fuel:
-            self.setup_execution_environment()
-            self.create_tmp_dir()
-            self.install_fuel_master()
-        if not self.fuel_only:
-            return self.deploy_env()
-        return True
-
-    def run(self):
         check_if_root()
-        if self.cleanup_only:
-            self.cleanup_execution_environment()
-        else:
-            deploy_success = self.deploy()
-            if self.cleanup:
-                self.cleanup_execution_environment()
-            return deploy_success
-        return True
-
-def check_bridge(pxe_bridge, dha_path):
-    with io.open(dha_path) as yaml_file:
-        dha_struct = yaml.load(yaml_file)
-    if dha_struct['adapter'] != 'libvirt':
-        log('Using Linux Bridge %s for booting up the Fuel Master VM'
-            % pxe_bridge)
-        r = exec_cmd('ip link show %s' % pxe_bridge)
-        if pxe_bridge in r and 'state DOWN' in r:
-            err('Linux Bridge {0} is not Active, bring'
-                ' it UP first: [ip link set dev {0} up]'.format(pxe_bridge))
-
+        self.setup_dir(TMP_DIR)
+        self.collect_fuel_info()
+        self.power_off_blades()
+        self.install_fuel_master()
+        self.cleanup_dir(TMP_DIR)
+        self.deploy_env()
 
-def check_fuel_plugins_dir(dir):
-    msg = None
-    if not dir:
-        msg = 'Fuel Plugins Directory not specified!'
-    elif not os.path.isdir(dir):
-        msg = 'Fuel Plugins Directory does not exist!'
-    elif not os.listdir(dir):
-        msg = 'Fuel Plugins Directory is empty!'
-    if msg:
-        warn('%s No external plugins will be installed!' % msg)
+def usage():
+    print '''
+    Usage:
+    python deploy.py [-nf] <isofile> <deafile> <dhafile>
 
+    Optional arguments:
+      -nf   Do not install Fuel master
+    '''
 
 def parse_arguments():
-    parser = ArgParser(prog='python %s' % __file__)
-    parser.add_argument('-nf', dest='no_fuel', action='store_true',
-                        default=False,
-                        help='Do not install Fuel Master (and Node VMs when '
-                             'using libvirt)')
-    parser.add_argument('-nh', dest='no_health_check', action='store_true',
-                        default=False,
-                        help='Don\'t run health check after deployment')
-    parser.add_argument('-fo', dest='fuel_only', action='store_true',
-                        default=False,
-                        help='Install Fuel Master only (and Node VMs when '
-                             'using libvirt)')
-    parser.add_argument('-co', dest='cleanup_only', action='store_true',
-                        default=False,
-                        help='Cleanup VMs and Virtual Networks according to '
-                             'what is defined in DHA')
-    parser.add_argument('-c', dest='cleanup', action='store_true',
-                        default=False,
-                        help='Cleanup after deploy')
-    if {'-iso', '-dea', '-dha', '-h'}.intersection(sys.argv):
-        parser.add_argument('-iso', dest='iso_file', action='store', nargs='?',
-                            default='%s/OPNFV.iso' % CWD,
-                            help='ISO File [default: OPNFV.iso]')
-        parser.add_argument('-dea', dest='dea_file', action='store', nargs='?',
-                            default='%s/dea.yaml' % CWD,
-                            help='Deployment Environment Adapter: dea.yaml')
-        parser.add_argument('-dha', dest='dha_file', action='store', nargs='?',
-                            default='%s/dha.yaml' % CWD,
-                            help='Deployment Hardware Adapter: dha.yaml')
-    else:
-        parser.add_argument('iso_file', action='store', nargs='?',
-                            default='%s/OPNFV.iso' % CWD,
-                            help='ISO File [default: OPNFV.iso]')
-        parser.add_argument('dea_file', action='store', nargs='?',
-                            default='%s/dea.yaml' % CWD,
-                            help='Deployment Environment Adapter: dea.yaml')
-        parser.add_argument('dha_file', action='store', nargs='?',
-                            default='%s/dha.yaml' % CWD,
-                            help='Deployment Hardware Adapter: dha.yaml')
-    parser.add_argument('-s', dest='storage_dir', action='store',
-                        default='%s/images' % CWD,
-                        help='Storage Directory [default: images]')
-    parser.add_argument('-b', dest='pxe_bridge', action='store',
-                        default='pxebr',
-                        help='Linux Bridge for booting up the Fuel Master VM '
-                             '[default: pxebr]')
-    parser.add_argument('-p', dest='fuel_plugins_dir', action='store',
-                        help='Fuel Plugins directory')
-
-    args = parser.parse_args()
-    log(args)
-
-    check_file_exists(args.dha_file)
-
-    if not args.cleanup_only:
-        check_file_exists(args.dea_file)
-        check_fuel_plugins_dir(args.fuel_plugins_dir)
-
-    if not args.no_fuel and not args.cleanup_only:
-        log('Using OPNFV ISO file: %s' % args.iso_file)
-        check_file_exists(args.iso_file)
-        log('Using image directory: %s' % args.storage_dir)
-        create_dir_if_not_exists(args.storage_dir)
-        check_bridge(args.pxe_bridge, args.dha_file)
-
-    kwargs = {'no_fuel': args.no_fuel, 'fuel_only': args.fuel_only,
-              'no_health_check': args.no_health_check,
-              'cleanup_only': args.cleanup_only, 'cleanup': args.cleanup,
-              'storage_dir': args.storage_dir, 'pxe_bridge': args.pxe_bridge,
-              'iso_file': args.iso_file, 'dea_file': args.dea_file,
-              'dha_file': args.dha_file,
-              'fuel_plugins_dir': args.fuel_plugins_dir}
-    return kwargs
-
+    if (len(sys.argv) < 4 or len(sys.argv) > 5
+        or (len(sys.argv) == 5 and sys.argv[1] != '-nf')):
+        log('Incorrect number of arguments')
+        usage()
+        sys.exit(1)
+    without_fuel = False
+    if len(sys.argv) == 5 and sys.argv[1] == '-nf':
+        without_fuel = True
+    iso_file = sys.argv[-3]
+    dea_file = sys.argv[-2]
+    dha_file = sys.argv[-1]
+    check_file_exists(iso_file)
+    check_file_exists(dea_file)
+    check_file_exists(dha_file)
+    return (without_fuel, iso_file, dea_file, dha_file)
 
 def main():
-    kwargs = parse_arguments()
 
-    d = AutoDeploy(**kwargs)
-    sys.exit(d.run())
+    without_fuel, iso_file, dea_file, dha_file = parse_arguments()
+
+    d = AutoDeploy(without_fuel, iso_file, dea_file, dha_file)
+    d.deploy()
 
 if __name__ == '__main__':
-    main()
+    main()
\ No newline at end of file
index be8bed3..9bc8fbb 100644 (file)
@@ -1,18 +1,7 @@
-###############################################################################
-# Copyright (c) 2015 Ericsson AB and others.
-# szilard.cserey@ericsson.com
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-###############################################################################
-
-
 import os
 import io
 import yaml
 import glob
-import time
 
 from ssh_client import SSHClient
 import common
@@ -21,48 +10,38 @@ exec_cmd = common.exec_cmd
 err = common.err
 check_file_exists = common.check_file_exists
 log = common.log
-parse = common.parse
-commafy = common.commafy
-N = common.N
-E = common.E
-R = common.R
-RO = common.RO
 
 CLOUD_DEPLOY_FILE = 'deploy.py'
-BLADE_RESTART_TIMES = 3
 
 
 class CloudDeploy(object):
 
-    def __init__(self, dea, dha, fuel_ip, fuel_username, fuel_password,
-                 dea_file, work_dir, no_health_check):
-        self.dea = dea
+    def __init__(self, dha, fuel_ip, fuel_username, fuel_password, dea_file,
+                 work_dir):
         self.dha = dha
         self.fuel_ip = fuel_ip
         self.fuel_username = fuel_username
         self.fuel_password = fuel_password
         self.dea_file = dea_file
         self.work_dir = work_dir
-        self.no_health_check = no_health_check
         self.file_dir = os.path.dirname(os.path.realpath(__file__))
         self.ssh = SSHClient(self.fuel_ip, self.fuel_username,
                              self.fuel_password)
-        self.blade_node_file = '%s/blade_node.yaml' % self.file_dir
+        self.macs_file = '%s/macs.yaml' % self.file_dir
         self.node_ids = self.dha.get_node_ids()
-        self.wanted_release = self.dea.get_property('wanted_release')
-        self.blade_node_dict = {}
-        self.macs_per_blade = {}
 
     def upload_cloud_deployment_files(self):
+        dest ='~/%s/' % self.work_dir
+
         with self.ssh as s:
-            s.exec_cmd('rm -rf %s' % self.work_dir, False)
-            s.exec_cmd('mkdir %s' % self.work_dir)
-            s.scp_put(self.dea_file, self.work_dir)
-            s.scp_put(self.blade_node_file, self.work_dir)
-            s.scp_put('%s/common.py' % self.file_dir, self.work_dir)
-            s.scp_put('%s/dea.py' % self.file_dir, self.work_dir)
+            s.exec_cmd('rm -rf %s' % self.work_dir, check=False)
+            s.exec_cmd('mkdir ~/%s' % self.work_dir)
+            s.scp_put(self.dea_file, dest)
+            s.scp_put(self.macs_file, dest)
+            s.scp_put('%s/common.py' % self.file_dir, dest)
+            s.scp_put('%s/dea.py' % self.file_dir, dest)
             for f in glob.glob('%s/cloud/*' % self.file_dir):
-                s.scp_put(f, self.work_dir)
+                s.scp_put(f, dest)
 
     def power_off_nodes(self):
         for node_id in self.node_ids:
@@ -74,173 +53,35 @@ class CloudDeploy(object):
 
     def set_boot_order(self, boot_order_list):
         for node_id in self.node_ids:
-            self.dha.node_set_boot_order(node_id, boot_order_list[:])
+            self.dha.node_set_boot_order(node_id, boot_order_list)
 
     def get_mac_addresses(self):
-        self.macs_per_blade = {}
+        macs_per_node = {}
         for node_id in self.node_ids:
-            self.macs_per_blade[node_id] = self.dha.get_node_pxe_mac(node_id)
+            macs_per_node[node_id] = self.dha.get_node_pxe_mac(node_id)
+        with io.open(self.macs_file, 'w') as stream:
+            yaml.dump(macs_per_node, stream, default_flow_style=False)
 
     def run_cloud_deploy(self, deploy_app):
         log('START CLOUD DEPLOYMENT')
         deploy_app = '%s/%s' % (self.work_dir, deploy_app)
         dea_file = '%s/%s' % (self.work_dir, os.path.basename(self.dea_file))
-        blade_node_file = '%s/%s' % (
-            self.work_dir, os.path.basename(self.blade_node_file))
-        with self.ssh as s:
-            status = s.run(
-                'python %s %s %s %s' % (
-                    deploy_app, ('-nh' if self.no_health_check else ''),
-                    dea_file, blade_node_file))
-        return status
-
-    def check_supported_release(self):
-        log('Check supported release: %s' % self.wanted_release)
-        found = False
-        release_list = parse(self.ssh.exec_cmd('fuel release -l'))
-        for release in release_list:
-            if release[R['name']] == self.wanted_release:
-                found = True
-                break
-        if not found:
-            err('This Fuel does not contain the following release: %s'
-                % self.wanted_release)
-
-    def check_previous_installation(self):
-        log('Check previous installation')
-        env_list = parse(self.ssh.exec_cmd('fuel env list'))
-        if env_list:
-            self.cleanup_fuel_environments(env_list)
-            node_list = parse(self.ssh.exec_cmd('fuel node list'))
-            if node_list:
-                self.cleanup_fuel_nodes(node_list)
-
-    def cleanup_fuel_environments(self, env_list):
-        WAIT_LOOP = 60
-        SLEEP_TIME = 10
-        for env in env_list:
-            log('Deleting environment %s' % env[E['id']])
-            self.ssh.exec_cmd('fuel env --env %s --delete --force'
-                              % env[E['id']])
-        all_env_erased = False
-        for i in range(WAIT_LOOP):
-            env_list = parse(self.ssh.exec_cmd('fuel env list'))
-            if env_list:
-                time.sleep(SLEEP_TIME)
-            else:
-                all_env_erased = True
-                break
-        if not all_env_erased:
-            err('Could not erase these environments %s'
-                % [(env[E['id']], env[E['status']]) for env in env_list])
-
-    def cleanup_fuel_nodes(self, node_list):
-        for node in node_list:
-            if node[N['status']] == 'discover':
-                log('Deleting node %s' % node[N['id']])
-                self.ssh.exec_cmd('fuel node --node-id %s --delete-from-db '
-                                  '--force' % node[N['id']])
-                self.ssh.exec_cmd('cobbler system remove --name node-%s'
-                                  % node[N['id']], False)
-
-    def check_prerequisites(self):
-        log('Check prerequisites')
+        macs_file = '%s/%s' % (self.work_dir, os.path.basename(self.macs_file))
         with self.ssh:
-            self.check_supported_release()
-            self.check_previous_installation()
+            self.ssh.run('python %s %s %s' % (deploy_app, dea_file, macs_file))
 
-    def wait_for_discovered_blades(self):
-        log('Wait for discovered blades')
-        discovered_macs = []
-        restart_times = BLADE_RESTART_TIMES
-
-        for blade in self.node_ids:
-            self.blade_node_dict[blade] = None
+    def deploy(self):
 
-        with self.ssh:
-            all_discovered = self.discovery_waiting_loop(discovered_macs)
-
-        while not all_discovered and restart_times != 0:
-            restart_times -= 1
-            for blade in self.get_not_discovered_blades():
-                self.dha.node_reset(blade)
-            with self.ssh:
-                all_discovered = self.discovery_waiting_loop(discovered_macs)
-
-        if not all_discovered:
-            err('Not all blades have been discovered: %s'
-                % self.not_discovered_blades_summary())
-
-        with io.open(self.blade_node_file, 'w') as stream:
-            yaml.dump(self.blade_node_dict, stream, default_flow_style=False)
-
-    def discovery_waiting_loop(self, discovered_macs):
-        WAIT_LOOP = 360
-        SLEEP_TIME = 10
-        all_discovered = False
-        for i in range(WAIT_LOOP):
-            node_list = parse(self.ssh.exec_cmd('fuel node list'))
-            if node_list:
-                self.node_discovery(node_list, discovered_macs)
-            if self.all_blades_discovered():
-                all_discovered = True
-                break
-            else:
-                time.sleep(SLEEP_TIME)
-        return all_discovered
-
-    def node_discovery(self, node_list, discovered_macs):
-        for node in node_list:
-            if (node[N['status']] == 'discover' and
-                node[N['online']] == 'True' and
-                node[N['mac']] not in discovered_macs):
-                discovered_macs.append(node[N['mac']])
-                blade = self.find_mac_in_dict(node[N['mac']])
-                if blade:
-                    log('Blade %s discovered as Node %s with MAC %s'
-                        % (blade, node[N['id']], node[N['mac']]))
-                    self.blade_node_dict[blade] = node[N['id']]
-
-    def find_mac_in_dict(self, mac):
-        for blade, mac_list in self.macs_per_blade.iteritems():
-            if mac in mac_list:
-                return blade
-
-    def all_blades_discovered(self):
-        for blade, node_id in self.blade_node_dict.iteritems():
-            if not node_id:
-                return False
-        return True
-
-    def not_discovered_blades_summary(self):
-        summary = ''
-        for blade, node_id in self.blade_node_dict.iteritems():
-            if not node_id:
-                summary += '\n[blade %s]' % blade
-        return summary
-
-    def get_not_discovered_blades(self):
-        not_discovered_blades = []
-        for blade, node_id in self.blade_node_dict.iteritems():
-            if not node_id:
-                not_discovered_blades.append(blade)
-        return not_discovered_blades
-
-    def set_boot_order_nodes(self):
         self.power_off_nodes()
-        self.set_boot_order(['pxe', 'disk'])
-        self.power_on_nodes()
-
-    def deploy(self):
 
-        self.set_boot_order_nodes()
+        self.set_boot_order(['pxe', 'disk'])
 
-        self.check_prerequisites()
+        self.power_on_nodes()
 
         self.get_mac_addresses()
 
-        self.wait_for_discovered_blades()
+        check_file_exists(self.macs_file)
 
         self.upload_cloud_deployment_files()
 
-        return self.run_cloud_deploy(CLOUD_DEPLOY_FILE)
+        self.run_cloud_deploy(CLOUD_DEPLOY_FILE)
index 1feee60..bf9a951 100644 (file)
@@ -1,13 +1,3 @@
-###############################################################################
-# Copyright (c) 2015 Ericsson AB and others.
-# szilard.cserey@ericsson.com
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-###############################################################################
-
-
 import yaml
 import io
 
@@ -15,20 +5,15 @@ from dha_adapters.libvirt_adapter import LibvirtAdapter
 from dha_adapters.ipmi_adapter import IpmiAdapter
 from dha_adapters.hp_adapter import HpAdapter
 
-
 class DeploymentHardwareAdapter(object):
-
     def __new__(cls, yaml_path):
         with io.open(yaml_path) as yaml_file:
             dha_struct = yaml.load(yaml_file)
         type = dha_struct['adapter']
 
         if cls is DeploymentHardwareAdapter:
-            if type == 'libvirt':
-                return LibvirtAdapter(yaml_path)
-            if type == 'ipmi':
-                return IpmiAdapter(yaml_path)
-            if type == 'hp':
-                return HpAdapter(yaml_path)
+            if type == 'libvirt': return LibvirtAdapter(yaml_path)
+            if type == 'ipmi': return IpmiAdapter(yaml_path)
+            if type == 'hp': return HpAdapter(yaml_path)
 
         return super(DeploymentHardwareAdapter, cls).__new__(cls)
index fb73157..e69de29 100644 (file)
@@ -1,8 +0,0 @@
-###############################################################################
-# Copyright (c) 2015 Ericsson AB and others.
-# szilard.cserey@ericsson.com
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-###############################################################################
index 29e04f1..884e9ce 100644 (file)
@@ -1,18 +1,7 @@
-###############################################################################
-# Copyright (c) 2015 Ericsson AB and others.
-# szilard.cserey@ericsson.com
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-###############################################################################
-
 import yaml
 import io
 
-
 class HardwareAdapter(object):
-
     def __init__(self, yaml_path):
         self.dha_struct = None
         self.parse_yaml(yaml_path)
@@ -45,15 +34,18 @@ class HardwareAdapter(object):
         node_ids.sort()
         return node_ids
 
+    def use_fuel_custom_install(self):
+        return self.dha_struct['fuelCustomInstall']
+
     def get_node_property(self, node_id, property_name):
         for node in self.dha_struct['nodes']:
             if node['id'] == node_id and property_name in node:
                 return node[property_name]
 
+    def node_can_zero_mbr(self, node_id):
+        return self.get_node_property(node_id, 'nodeCanZeroMBR')
+
     def get_fuel_access(self):
         for node in self.dha_struct['nodes']:
             if 'isFuel' in node and node['isFuel']:
                 return node['username'], node['password']
-
-    def get_disks(self):
-        return self.dha_struct['disks']
index 51f55f3..8fc38ad 100644 (file)
@@ -1,13 +1,3 @@
-###############################################################################
-# Copyright (c) 2015 Ericsson AB and others.
-# szilard.cserey@ericsson.com
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-###############################################################################
-
-
 import common
 from ipmi_adapter import IpmiAdapter
 from ssh_client import SSHClient
@@ -20,7 +10,6 @@ DEV = {'pxe': 'bootsource5',
 
 ROOT = '/system1/bootconfig1'
 
-
 class HpAdapter(IpmiAdapter):
 
     def __init__(self, yaml_path):
@@ -30,7 +19,7 @@ class HpAdapter(IpmiAdapter):
         log('Set boot order %s on Node %s' % (boot_order_list, node_id))
         ip, username, password = self.get_access_info(node_id)
         ssh = SSHClient(ip, username, password)
-        with ssh as s:
-            for order, dev in enumerate(boot_order_list):
+        for order, dev in enumerate(boot_order_list):
+            with ssh as s:
                 s.exec_cmd('set %s/%s bootorder=%s'
-                           % (ROOT, DEV[dev], order + 1))
+                           % (ROOT, DEV[dev], order+1))
index 25aa36e..d97fd2d 100644 (file)
@@ -1,21 +1,8 @@
-###############################################################################
-# Copyright (c) 2015 Ericsson AB and others.
-# szilard.cserey@ericsson.com
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-###############################################################################
-
-
 import common
-import time
 from hardware_adapter import HardwareAdapter
 
 log = common.log
 exec_cmd = common.exec_cmd
-err = common.err
-
 
 class IpmiAdapter(HardwareAdapter):
 
@@ -40,72 +27,28 @@ class IpmiAdapter(HardwareAdapter):
         return mac_list
 
     def node_power_on(self, node_id):
-        WAIT_LOOP = 200
-        SLEEP_TIME = 3
         log('Power ON Node %s' % node_id)
         cmd_prefix = self.ipmi_cmd(node_id)
         state = exec_cmd('%s chassis power status' % cmd_prefix)
         if state == 'Chassis Power is off':
             exec_cmd('%s chassis power on' % cmd_prefix)
-            done = False
-            for i in range(WAIT_LOOP):
-                state, _ = exec_cmd('%s chassis power status' % cmd_prefix,
-                                    False)
-                if state == 'Chassis Power is on':
-                    done = True
-                    break
-                else:
-                    time.sleep(SLEEP_TIME)
-            if not done:
-                err('Could Not Power ON Node %s' % node_id)
 
     def node_power_off(self, node_id):
-        WAIT_LOOP = 200
-        SLEEP_TIME = 3
         log('Power OFF Node %s' % node_id)
         cmd_prefix = self.ipmi_cmd(node_id)
         state = exec_cmd('%s chassis power status' % cmd_prefix)
         if state == 'Chassis Power is on':
-            done = False
             exec_cmd('%s chassis power off' % cmd_prefix)
-            for i in range(WAIT_LOOP):
-                state, _ = exec_cmd('%s chassis power status' % cmd_prefix,
-                                    False)
-                if state == 'Chassis Power is off':
-                    done = True
-                    break
-                else:
-                    time.sleep(SLEEP_TIME)
-            if not done:
-                err('Could Not Power OFF Node %s' % node_id)
 
     def node_reset(self, node_id):
-        WAIT_LOOP = 600
-        log('RESET Node %s' % node_id)
+        log('Reset Node %s' % node_id)
         cmd_prefix = self.ipmi_cmd(node_id)
         state = exec_cmd('%s chassis power status' % cmd_prefix)
         if state == 'Chassis Power is on':
-            was_shut_off = False
-            done = False
             exec_cmd('%s chassis power reset' % cmd_prefix)
-            for i in range(WAIT_LOOP):
-                state, _ = exec_cmd('%s chassis power status' % cmd_prefix,
-                                    False)
-                if state == 'Chassis Power is off':
-                    was_shut_off = True
-                elif state == 'Chassis Power is on' and was_shut_off:
-                    done = True
-                    break
-                time.sleep(1)
-            if not done:
-                err('Could Not RESET Node %s' % node_id)
-        else:
-            err('Cannot RESET Node %s because it\'s not Active, state: %s'
-                % (node_id, state))
 
     def node_set_boot_order(self, node_id, boot_order_list):
         log('Set boot order %s on Node %s' % (boot_order_list, node_id))
-        boot_order_list.reverse()
         cmd_prefix = self.ipmi_cmd(node_id)
         for dev in boot_order_list:
             if dev == 'pxe':
index b285c16..dde4946 100644 (file)
@@ -1,13 +1,3 @@
-###############################################################################
-# Copyright (c) 2015 Ericsson AB and others.
-# szilard.cserey@ericsson.com
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-###############################################################################
-
-
 import common
 from lxml import etree
 from hardware_adapter import HardwareAdapter
@@ -20,7 +10,6 @@ DEV = {'pxe': 'network',
        'disk': 'hd',
        'iso': 'cdrom'}
 
-
 class LibvirtAdapter(HardwareAdapter):
 
     def __init__(self, yaml_path):
@@ -99,8 +88,7 @@ class LibvirtAdapter(HardwareAdapter):
     def node_eject_iso(self, node_id):
         vm_name = self.get_node_property(node_id, 'libvirtName')
         device = self.get_name_of_device(vm_name, 'cdrom')
-        exec_cmd('virsh change-media %s --eject %s --config --live'
-                 % (vm_name, device), False)
+        exec_cmd('virsh change-media %s --eject %s' % (vm_name, device), False)
 
     def node_insert_iso(self, node_id, iso_file):
         vm_name = self.get_node_property(node_id, 'libvirtName')
@@ -108,6 +96,12 @@ class LibvirtAdapter(HardwareAdapter):
         exec_cmd('virsh change-media %s --insert %s %s'
                  % (vm_name, device, iso_file))
 
+    def get_disks(self):
+        return self.dha_struct['disks']
+
+    def get_node_role(self, node_id):
+        return self.get_node_property(node_id, 'role')
+
     def get_node_pxe_mac(self, node_id):
         mac_list = []
         vm_name = self.get_node_property(node_id, 'libvirtName')
@@ -131,6 +125,3 @@ class LibvirtAdapter(HardwareAdapter):
                     device = target.get('dev')
                     if device:
                         return device
-
-    def get_virt_net_conf_dir(self):
-        return self.dha_struct['virtNetConfDir']
diff --git a/fuel/deploy/environments/execution_environment.py b/fuel/deploy/environments/execution_environment.py
deleted file mode 100644 (file)
index 63be5cd..0000000
+++ /dev/null
@@ -1,78 +0,0 @@
-###############################################################################
-# Copyright (c) 2015 Ericsson AB and others.
-# szilard.cserey@ericsson.com
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-###############################################################################
-
-
-from lxml import etree
-
-import common
-from dha_adapters.libvirt_adapter import LibvirtAdapter
-
-exec_cmd = common.exec_cmd
-err = common.err
-log = common.log
-check_dir_exists = common.check_dir_exists
-check_file_exists = common.check_file_exists
-check_if_root = common.check_if_root
-
-
-class ExecutionEnvironment(object):
-
-    def __init__(self, storage_dir, dha_file, root_dir):
-        self.storage_dir = storage_dir
-        self.dha = LibvirtAdapter(dha_file)
-        self.root_dir = root_dir
-        self.parser = etree.XMLParser(remove_blank_text=True)
-        self.fuel_node_id = self.dha.get_fuel_node_id()
-
-    def delete_vm(self, node_id):
-        vm_name = self.dha.get_node_property(node_id, 'libvirtName')
-        r, c = exec_cmd('virsh dumpxml %s' % vm_name, False)
-        if c:
-            return
-        self.undefine_vm_delete_disk(r, vm_name)
-
-    def undefine_vm_delete_disk(self, printout, vm_name):
-        disk_files = []
-        xml_dump = etree.fromstring(printout, self.parser)
-        disks = xml_dump.xpath('/domain/devices/disk')
-        for disk in disks:
-            sources = disk.xpath('source')
-            for source in sources:
-                source_file = source.get('file')
-                if source_file:
-                    disk_files.append(source_file)
-        log('Deleting VM %s with disks %s' % (vm_name, disk_files))
-        exec_cmd('virsh destroy %s' % vm_name, False)
-        exec_cmd('virsh undefine %s' % vm_name, False)
-        for file in disk_files:
-            exec_cmd('rm -f %s' % file)
-
-    def define_vm(self, vm_name, temp_vm_file, disk_path):
-        log('Creating VM %s with disks %s' % (vm_name, disk_path))
-        with open(temp_vm_file) as f:
-            vm_xml = etree.parse(f)
-        names = vm_xml.xpath('/domain/name')
-        for name in names:
-            name.text = vm_name
-        uuids = vm_xml.xpath('/domain/uuid')
-        for uuid in uuids:
-            uuid.getparent().remove(uuid)
-        disks = vm_xml.xpath('/domain/devices/disk')
-        for disk in disks:
-            if (disk.get('type') == 'file' and
-                disk.get('device') == 'disk'):
-                sources = disk.xpath('source')
-                for source in sources:
-                    disk.remove(source)
-                source = etree.Element('source')
-                source.set('file', disk_path)
-                disk.append(source)
-        with open(temp_vm_file, 'w') as f:
-            vm_xml.write(f, pretty_print=True, xml_declaration=True)
-        exec_cmd('virsh define %s' % temp_vm_file)
diff --git a/fuel/deploy/environments/libvirt_environment.py b/fuel/deploy/environments/libvirt_environment.py
deleted file mode 100644 (file)
index 785eeca..0000000
+++ /dev/null
@@ -1,107 +0,0 @@
-###############################################################################
-# Copyright (c) 2015 Ericsson AB and others.
-# szilard.cserey@ericsson.com
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-###############################################################################
-
-
-from lxml import etree
-import glob
-
-import common
-from execution_environment import ExecutionEnvironment
-
-exec_cmd = common.exec_cmd
-err = common.err
-log = common.log
-check_dir_exists = common.check_dir_exists
-check_file_exists = common.check_file_exists
-check_if_root = common.check_if_root
-
-
-class LibvirtEnvironment(ExecutionEnvironment):
-
-    def __init__(self, storage_dir, dha_file, dea, root_dir):
-        super(LibvirtEnvironment, self).__init__(
-            storage_dir, dha_file, root_dir)
-        self.dea = dea
-        self.network_dir = '%s/%s' % (self.root_dir,
-                                      self.dha.get_virt_net_conf_dir())
-        self.node_ids = self.dha.get_all_node_ids()
-        self.net_names = self.collect_net_names()
-
-    def create_storage(self, node_id, disk_path, disk_sizes):
-        if node_id == self.fuel_node_id:
-            disk_size = disk_sizes['fuel']
-        else:
-            roles = self.dea.get_node_role(node_id)
-            role = 'controller' if 'controller' in roles else 'compute'
-            disk_size = disk_sizes[role]
-        exec_cmd('fallocate -l %s %s' % (disk_size, disk_path))
-
-    def create_vms(self):
-        temp_dir = exec_cmd('mktemp -d')
-        disk_sizes = self.dha.get_disks()
-        for node_id in self.node_ids:
-            vm_name = self.dha.get_node_property(node_id, 'libvirtName')
-            vm_template = '%s/%s' % (self.root_dir,
-                                     self.dha.get_node_property(
-                                         node_id, 'libvirtTemplate'))
-            check_file_exists(vm_template)
-            disk_path = '%s/%s.raw' % (self.storage_dir, vm_name)
-            self.create_storage(node_id, disk_path, disk_sizes)
-            temp_vm_file = '%s/%s' % (temp_dir, vm_name)
-            exec_cmd('cp %s %s' % (vm_template, temp_vm_file))
-            self.define_vm(vm_name, temp_vm_file, disk_path)
-        exec_cmd('rm -fr %s' % temp_dir)
-
-    def start_vms(self):
-        for node_id in self.node_ids:
-            self.dha.node_power_on(node_id)
-
-    def create_networks(self):
-        for net_file in glob.glob('%s/*' % self.network_dir):
-            exec_cmd('virsh net-define %s' % net_file)
-        for net in self.net_names:
-            log('Creating network %s' % net)
-            exec_cmd('virsh net-autostart %s' % net)
-            exec_cmd('virsh net-start %s' % net)
-
-    def delete_networks(self):
-        for net in self.net_names:
-            log('Deleting network %s' % net)
-            exec_cmd('virsh net-destroy %s' % net, False)
-            exec_cmd('virsh net-undefine %s' % net, False)
-
-    def get_net_name(self, net_file):
-        with open(net_file) as f:
-            net_xml = etree.parse(f)
-            name_list = net_xml.xpath('/network/name')
-            for name in name_list:
-                net_name = name.text
-        return net_name
-
-    def collect_net_names(self):
-        net_list = []
-        for net_file in glob.glob('%s/*' % self.network_dir):
-            name = self.get_net_name(net_file)
-            net_list.append(name)
-        return net_list
-
-    def delete_vms(self):
-        for node_id in self.node_ids:
-            self.delete_vm(node_id)
-
-    def setup_environment(self):
-        check_dir_exists(self.network_dir)
-        self.cleanup_environment()
-        self.create_networks()
-        self.create_vms()
-        self.start_vms()
-
-    def cleanup_environment(self):
-        self.delete_vms()
-        self.delete_networks()
diff --git a/fuel/deploy/environments/virtual_fuel.py b/fuel/deploy/environments/virtual_fuel.py
deleted file mode 100644 (file)
index cb8be63..0000000
+++ /dev/null
@@ -1,70 +0,0 @@
-###############################################################################
-# Copyright (c) 2015 Ericsson AB and others.
-# szilard.cserey@ericsson.com
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-###############################################################################
-
-
-from lxml import etree
-
-import common
-from execution_environment import ExecutionEnvironment
-
-exec_cmd = common.exec_cmd
-log = common.log
-check_file_exists = common.check_file_exists
-check_if_root = common.check_if_root
-
-
-class VirtualFuel(ExecutionEnvironment):
-
-    def __init__(self, storage_dir, pxe_bridge, dha_file, root_dir):
-        super(VirtualFuel, self).__init__(storage_dir, dha_file, root_dir)
-        self.pxe_bridge = pxe_bridge
-
-    def set_vm_nic(self, temp_vm_file):
-        with open(temp_vm_file) as f:
-            vm_xml = etree.parse(f)
-        interfaces = vm_xml.xpath('/domain/devices/interface')
-        for interface in interfaces:
-            interface.getparent().remove(interface)
-        interface = etree.Element('interface')
-        interface.set('type', 'bridge')
-        source = etree.SubElement(interface, 'source')
-        source.set('bridge', self.pxe_bridge)
-        model = etree.SubElement(interface, 'model')
-        model.set('type', 'virtio')
-        devices = vm_xml.xpath('/domain/devices')
-        if devices:
-            device = devices[0]
-            device.append(interface)
-        with open(temp_vm_file, 'w') as f:
-            vm_xml.write(f, pretty_print=True, xml_declaration=True)
-
-    def create_vm(self):
-        temp_dir = exec_cmd('mktemp -d')
-        vm_name = self.dha.get_node_property(self.fuel_node_id, 'libvirtName')
-        vm_template = '%s/%s' % (self.root_dir,
-                                 self.dha.get_node_property(
-                                     self.fuel_node_id, 'libvirtTemplate'))
-        check_file_exists(vm_template)
-        disk_path = '%s/%s.raw' % (self.storage_dir, vm_name)
-        disk_sizes = self.dha.get_disks()
-        disk_size = disk_sizes['fuel']
-        exec_cmd('fallocate -l %s %s' % (disk_size, disk_path))
-        temp_vm_file = '%s/%s' % (temp_dir, vm_name)
-        exec_cmd('cp %s %s' % (vm_template, temp_vm_file))
-        self.set_vm_nic(temp_vm_file)
-        self.define_vm(vm_name, temp_vm_file, disk_path)
-        exec_cmd('rm -fr %s' % temp_dir)
-
-    def setup_environment(self):
-        check_if_root()
-        self.cleanup_environment()
-        self.create_vm()
-
-    def cleanup_environment(self):
-        self.delete_vm(self.fuel_node_id)
diff --git a/fuel/deploy/execution_environment.py b/fuel/deploy/execution_environment.py
deleted file mode 100644 (file)
index e671463..0000000
+++ /dev/null
@@ -1,46 +0,0 @@
-###############################################################################
-# Copyright (c) 2015 Ericsson AB and others.
-# szilard.cserey@ericsson.com
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-###############################################################################
-
-
-import yaml
-import io
-import os
-
-import common
-from environments.libvirt_environment import LibvirtEnvironment
-from environments.virtual_fuel import VirtualFuel
-
-exec_cmd = common.exec_cmd
-err = common.err
-log = common.log
-check_dir_exists = common.check_dir_exists
-check_file_exists = common.check_file_exists
-check_if_root = common.check_if_root
-ArgParser = common.ArgParser
-
-
-class ExecutionEnvironment(object):
-
-    def __new__(cls, storage_dir, pxe_bridge, dha_path, dea):
-
-        with io.open(dha_path) as yaml_file:
-            dha_struct = yaml.load(yaml_file)
-
-        type = dha_struct['adapter']
-
-        root_dir = os.path.dirname(os.path.realpath(__file__))
-
-        if cls is ExecutionEnvironment:
-            if type == 'libvirt':
-                return LibvirtEnvironment(storage_dir, dha_path, dea, root_dir)
-
-            if type == 'ipmi' or type == 'hp':
-                return VirtualFuel(storage_dir, pxe_bridge, dha_path, root_dir)
-
-        return super(ExecutionEnvironment, cls).__new__(cls)
diff --git a/fuel/deploy/fuel_patch/ks.cfg.patch b/fuel/deploy/fuel_patch/ks.cfg.patch
deleted file mode 100644 (file)
index 1896957..0000000
+++ /dev/null
@@ -1,19 +0,0 @@
-*** ks.cfg.orig        Wed Apr 15 21:47:09 2015
---- ks.cfg     Wed Apr 15 21:47:24 2015
-***************
-*** 35,41 ****
-  default_drive=`echo ${drives} ${removable_drives} | awk '{print $1}'`
-  
-  installdrive="undefined"
-! forceformat="no"
-  for I in `cat /proc/cmdline`; do case "$I" in *=*) eval $I;; esac ; done
-  
-  set ${drives} ${removable_drives}
---- 35,41 ----
-  default_drive=`echo ${drives} ${removable_drives} | awk '{print $1}'`
-  
-  installdrive="undefined"
-! forceformat="yes"
-  for I in `cat /proc/cmdline`; do case "$I" in *=*) eval $I;; esac ; done
-  
-  set ${drives} ${removable_drives}
diff --git a/fuel/deploy/install-ubuntu-packages.sh b/fuel/deploy/install-ubuntu-packages.sh
new file mode 100755 (executable)
index 0000000..1ebd7c0
--- /dev/null
@@ -0,0 +1,18 @@
+#!/bin/bash
+##############################################################################
+# Copyright (c) 2015 Ericsson AB and others.
+# stefan.k.berg@ericsson.com
+# jonas.bjurel@ericsson.com
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+# Tools for installation on the libvirt server/base host
+#
+apt-get install -y libvirt-bin qemu-kvm tightvncserver virt-manager \
+   sshpass fuseiso genisoimage blackbox xterm python-yaml python-netaddr \
+   python-paramiko python-lxml python-pip
+pip install scp
+restart libvirt-bin
\ No newline at end of file
index 0e3c1c0..bb8e7e1 100644 (file)
@@ -1,37 +1,20 @@
-###############################################################################
-# Copyright (c) 2015 Ericsson AB and others.
-# szilard.cserey@ericsson.com
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-###############################################################################
-
-
 import common
 import time
 import os
-import glob
 from ssh_client import SSHClient
 from dha_adapters.libvirt_adapter import LibvirtAdapter
 
 log = common.log
 err = common.err
 clean = common.clean
-delete = common.delete
 
 TRANSPLANT_FUEL_SETTINGS = 'transplant_fuel_settings.py'
 BOOTSTRAP_ADMIN = '/usr/local/sbin/bootstrap_admin_node'
-FUEL_CLIENT_CONFIG = '/etc/fuel/client/config.yaml'
-PLUGINS_DIR = '~/plugins'
-LOCAL_PLUGIN_FOLDER = '/opt/opnfv'
-
 
 class InstallFuelMaster(object):
 
-    def __init__(self, dea_file, dha_file, fuel_ip, fuel_username,
-                 fuel_password, fuel_node_id, iso_file, work_dir,
-                 fuel_plugins_dir):
+    def __init__(self, dea_file, dha_file, fuel_ip, fuel_username, fuel_password,
+                 fuel_node_id, iso_file, work_dir):
         self.dea_file = dea_file
         self.dha = LibvirtAdapter(dha_file)
         self.fuel_ip = fuel_ip
@@ -39,9 +22,7 @@ class InstallFuelMaster(object):
         self.fuel_password = fuel_password
         self.fuel_node_id = fuel_node_id
         self.iso_file = iso_file
-        self.iso_dir = os.path.dirname(self.iso_file)
         self.work_dir = work_dir
-        self.fuel_plugins_dir = fuel_plugins_dir
         self.file_dir = os.path.dirname(os.path.realpath(__file__))
         self.ssh = SSHClient(self.fuel_ip, self.fuel_username,
                              self.fuel_password)
@@ -51,16 +32,21 @@ class InstallFuelMaster(object):
 
         self.dha.node_power_off(self.fuel_node_id)
 
+        self.zero_mbr_set_boot_order()
+
+        self.proceed_with_installation()
+
+    def custom_install(self):
+        log('Start Custom Fuel Installation')
+
+        self.dha.node_power_off(self.fuel_node_id)
+
         log('Zero the MBR')
         self.dha.node_zero_mbr(self.fuel_node_id)
 
         self.dha.node_set_boot_order(self.fuel_node_id, ['disk', 'iso'])
 
-        try:
-            self.proceed_with_installation()
-        except Exception as e:
-            self.post_install_cleanup()
-            err(e)
+        self.proceed_with_installation()
 
     def proceed_with_installation(self):
         log('Eject ISO')
@@ -82,7 +68,7 @@ class InstallFuelMaster(object):
 
         log('Let the Fuel deployment continue')
         log('Found FUEL menu as PID %s, now killing it' % fuel_menu_pid)
-        self.ssh_exec_cmd('kill %s' % fuel_menu_pid, False)
+        self.ssh_exec_cmd('kill %s' % fuel_menu_pid)
 
         log('Wait until installation complete')
         self.wait_until_installation_completed()
@@ -90,36 +76,22 @@ class InstallFuelMaster(object):
         log('Waiting for one minute for Fuel to stabilize')
         time.sleep(60)
 
-        self.delete_deprecated_fuel_client_config_from_fuel_6_1()
-
-        self.collect_plugin_files()
-
-        self.install_plugins()
-
-        self.post_install_cleanup()
+        log('Eject ISO')
+        self.dha.node_eject_iso(self.fuel_node_id)
 
         log('Fuel Master installed successfully !')
 
-    def collect_plugin_files(self):
-        with self.ssh as s:
-            s.exec_cmd('mkdir %s' % PLUGINS_DIR)
-            if self.fuel_plugins_dir:
-                for f in glob.glob('%s/*.rpm' % self.fuel_plugins_dir):
-                    s.scp_put(f, PLUGINS_DIR)
-            else:
-                s.exec_cmd('cp %s/*.rpm %s' % (LOCAL_PLUGIN_FOLDER,
-                                               PLUGINS_DIR))
-
-    def install_plugins(self):
-        log('Installing Fuel Plugins')
-        with self.ssh as s:
-            r = s.exec_cmd('find %s -type f -name \'*.rpm\'' % PLUGINS_DIR)
-            for f in r.splitlines():
-                log('Found plugin %s, installing ...' % f)
-                r, e = s.exec_cmd('fuel plugins --install %s' % f, False)
-                if e and 'does not update installed package' not in r:
-                    raise Exception('Installation of Fuel Plugin %s '
-                                    'failed: %s' % (f, e))
+    def zero_mbr_set_boot_order(self):
+        if self.dha.node_can_zero_mbr(self.fuel_node_id):
+            log('Fuel Node %s capable of zeroing MBR so doing that...'
+                % self.fuel_node_id)
+            self.dha.node_zero_mbr(self.fuel_node_id)
+            self.dha.node_set_boot_order(self.fuel_node_id, ['disk', 'iso'])
+        elif self.dha.node_can_set_boot_order_live(self.fuel_node_id):
+            log('Node %s can change ISO boot order live' % self.fuel_node_id)
+            self.dha.node_set_boot_order(self.fuel_node_id, ['iso', 'disk'])
+        else:
+            err('No way to install Fuel node')
 
     def wait_for_node_up(self):
         WAIT_LOOP = 60
@@ -131,14 +103,14 @@ class InstallFuelMaster(object):
                 success = True
                 break
             except Exception as e:
-                log('Trying to SSH into Fuel VM %s ... sleeping %s seconds'
-                    % (self.fuel_ip, SLEEP_TIME))
+                log('EXCEPTION [%s] received when SSH-ing into Fuel VM %s ... '
+                    'sleeping %s seconds' % (e, self.fuel_ip, SLEEP_TIME))
                 time.sleep(SLEEP_TIME)
             finally:
                 self.ssh.close()
 
         if not success:
-            raise Exception('Could not SSH into Fuel VM %s' % self.fuel_ip)
+           err('Could not SSH into Fuel VM %s' % self.fuel_ip)
 
     def wait_until_fuel_menu_up(self):
         WAIT_LOOP = 60
@@ -155,35 +127,39 @@ class InstallFuelMaster(object):
                 else:
                     break
         if not fuel_menu_pid:
-            raise Exception('Could not find the Fuel Menu Process ID')
+            err('Could not find the Fuel Menu Process ID')
         return fuel_menu_pid
 
     def get_fuel_menu_pid(self, printout, search):
+        fuel_menu_pid = None
         for line in printout.splitlines():
-            if line.endswith(search):
-                return clean(line)[1]
+            if search in line:
+                fuel_menu_pid = clean(line)[1]
+                break
+        return fuel_menu_pid
 
-    def ssh_exec_cmd(self, cmd, check=True):
+    def ssh_exec_cmd(self, cmd):
         with self.ssh:
-            ret = self.ssh.exec_cmd(cmd, check=check)
+            ret = self.ssh.exec_cmd(cmd)
         return ret
 
     def inject_own_astute_yaml(self):
+        dest ='~/%s/' % self.work_dir
+
         with self.ssh as s:
-            s.exec_cmd('rm -rf %s' % self.work_dir, False)
-            s.exec_cmd('mkdir %s' % self.work_dir)
-            s.scp_put(self.dea_file, self.work_dir)
-            s.scp_put('%s/common.py' % self.file_dir, self.work_dir)
-            s.scp_put('%s/dea.py' % self.file_dir, self.work_dir)
-            s.scp_put('%s/transplant_fuel_settings.py'
-                      % self.file_dir, self.work_dir)
+            s.exec_cmd('rm -rf %s' % self.work_dir, check=False)
+            s.exec_cmd('mkdir ~/%s' % self.work_dir)
+            s.scp_put(self.dea_file, dest)
+            s.scp_put('%s/common.py' % self.file_dir, dest)
+            s.scp_put('%s/dea.py' % self.file_dir, dest)
+            s.scp_put('%s/transplant_fuel_settings.py' % self.file_dir, dest)
             log('Modifying Fuel astute')
-            s.run('python %s/%s %s/%s'
+            s.run('python ~/%s/%s ~/%s/%s'
                   % (self.work_dir, TRANSPLANT_FUEL_SETTINGS,
                      self.work_dir, os.path.basename(self.dea_file)))
 
     def wait_until_installation_completed(self):
-        WAIT_LOOP = 360
+        WAIT_LOOP = 180
         SLEEP_TIME = 10
         CMD = 'ps -ef | grep %s | grep -v grep' % BOOTSTRAP_ADMIN
 
@@ -198,21 +174,4 @@ class InstallFuelMaster(object):
                     time.sleep(SLEEP_TIME)
 
         if not install_completed:
-            raise Exception('Fuel installation did not complete')
-
-    def post_install_cleanup(self):
-        log('Eject ISO file %s' % self.iso_file)
-        self.dha.node_eject_iso(self.fuel_node_id)
-        log('Remove ISO directory %s' % self.iso_dir)
-        delete(self.iso_dir)
-
-    def delete_deprecated_fuel_client_config_from_fuel_6_1(self):
-        with self.ssh as s:
-            response, error = s.exec_cmd('fuel -v', False)
-        if (error and
-            'DEPRECATION WARNING' in error and
-            '6.1.0' in error and
-            FUEL_CLIENT_CONFIG in error):
-            log('Delete deprecated fuel client config %s' % FUEL_CLIENT_CONFIG)
-            with self.ssh as s:
-                s.exec_cmd('rm %s' % FUEL_CLIENT_CONFIG, False)
+            err('Fuel installation did not complete')
@@ -1,38 +1,40 @@
 title: Deployment Environment Adapter (DEA)
 # DEA API version supported
-version:
-created:
-comment: Config for LF POD2 - HA deployment with Ceph and Opendaylight
-environment:
-  name: opnfv
-  mode: ha
-  net_segment_type: gre
-wanted_release: Juno on Ubuntu 14.04.1
+version: 1.1
+created: Sat Apr 25 16:26:22 UTC 2015
+comment: Small libvirt setup
+environment_name: opnfv59-b
+environment_mode: multinode
+wanted_release: Juno on Ubuntu 12.04.4
 nodes:
 - id: 1
-  interfaces: interfaces_1
-  transformations: transformations_1
-  role: ceph-osd,controller
+  interfaces: interface1
+  transformations: controller1
+  role: controller
 - id: 2
-  interfaces: interfaces_1
-  transformations: transformations_1
-  role: ceph-osd,controller
+  interfaces: interface1
+  transformations: controller1
+  role: controller
 - id: 3
-  interfaces: interfaces_1
-  transformations: transformations_1
-  role: ceph-osd,controller
+  interfaces: interface1
+  transformations: controller1
+  role: controller
 - id: 4
-  interfaces: interfaces_1
-  transformations: transformations_2
-  role: ceph-osd,compute
+  interfaces: interface1
+  transformations: compute1
+  role: compute
 - id: 5
-  interfaces: interfaces_1
-  transformations: transformations_2
-  role: ceph-osd,compute
+  interfaces: interface1
+  transformations: compute1
+  role: compute
+- id: 6
+  interfaces: interface1
+  transformations: compute1
+  role: compute
 fuel:
   ADMIN_NETWORK:
     ipaddress: 10.20.0.2
-    netmask: 255.255.0.0
+    netmask: 255.255.255.0
     dhcp_pool_start: 10.20.0.3
     dhcp_pool_end: 10.20.0.254
   DNS_UPSTREAM: 8.8.8.8
@@ -41,104 +43,178 @@ fuel:
   FUEL_ACCESS:
     user: admin
     password: admin
-  HOSTNAME: opnfv
+  HOSTNAME: opnfv59
   NTP1: 0.pool.ntp.org
   NTP2: 1.pool.ntp.org
   NTP3: 2.pool.ntp.org
-interfaces_1:
-  eth0:
-  - fuelweb_admin
-  - management
-  - storage
-  - private
-  eth2:
-  - public
-transformations_1:
-  transformations:
-  - action: add-br
-    name: br-fw-admin
-  - action: add-br
-    name: br-mgmt
-  - action: add-br
-    name: br-storage
-  - action: add-br
-    name: br-ex
-  - action: add-br
-    name: br-floating
-    provider: ovs
-  - action: add-patch
-    bridges:
-    - br-floating
-    - br-ex
-    mtu: 65000
-    provider: ovs
-  - action: add-br
-    name: br-mesh
-  - action: add-port
-    bridge: br-fw-admin
-    name: eth0
-  - action: add-port
-    bridge: br-mgmt
-    name: eth0.300
-  - action: add-port
-    bridge: br-storage
-    name: eth0.301
-  - action: add-port
-    bridge: br-mesh
-    name: eth0.302
-  - action: add-port
-    bridge: br-ex
-    name: eth2
-transformations_2:
-  transformations:
-  - action: add-br
-    name: br-fw-admin
-  - action: add-br
-    name: br-mgmt
-  - action: add-br
-    name: br-storage
-  - action: add-br
-    name: br-mesh
-  - action: add-port
-    bridge: br-fw-admin
-    name: eth0
-  - action: add-port
-    bridge: br-mgmt
-    name: eth0.300
-  - action: add-port
-    bridge: br-storage
-    name: eth0.301
-  - action: add-port
-    bridge: br-mesh
-    name: eth0.302
+interfaces:
+  interface1:
+    eth0:
+    - fuelweb_admin
+    - management
+    eth1:
+    - storage
+    eth2:
+    - private
+    eth3:
+    - public
+transformations:
+  controller1:
+    - action: add-br
+      name: br-eth0
+    - action: add-port
+      bridge: br-eth0
+      name: eth0
+    - action: add-br
+      name: br-eth1
+    - action: add-port
+      bridge: br-eth1
+      name: eth1
+    - action: add-br
+      name: br-eth2
+    - action: add-port
+      bridge: br-eth2
+      name: eth2
+    - action: add-br
+      name: br-eth3
+    - action: add-port
+      bridge: br-eth3
+      name: eth3
+    - action: add-br
+      name: br-ex
+    - action: add-br
+      name: br-mgmt
+    - action: add-br
+      name: br-storage
+    - action: add-br
+      name: br-fw-admin
+    - action: add-patch
+      bridges:
+      - br-eth1
+      - br-storage
+      tags:
+      - 102
+      - 0
+      vlan_ids:
+      - 102
+      - 0
+    - action: add-patch
+      bridges:
+      - br-eth0
+      - br-mgmt
+      tags:
+      - 101
+      - 0
+      vlan_ids:
+      - 101
+      - 0
+    - action: add-patch
+      bridges:
+      - br-eth0
+      - br-fw-admin
+      trunks:
+      - 0
+    - action: add-patch
+      bridges:
+      - br-eth3
+      - br-ex
+      trunks:
+      - 0
+    - action: add-br
+      name: br-prv
+    - action: add-patch
+      bridges:
+      - br-eth2
+      - br-prv
+  compute1:
+    - action: add-br
+      name: br-eth0
+    - action: add-port
+      bridge: br-eth0
+      name: eth0
+    - action: add-br
+      name: br-eth1
+    - action: add-port
+      bridge: br-eth1
+      name: eth1
+    - action: add-br
+      name: br-eth2
+    - action: add-port
+      bridge: br-eth2
+      name: eth2
+    - action: add-br
+      name: br-eth3
+    - action: add-port
+      bridge: br-eth3
+      name: eth3
+    - action: add-br
+      name: br-mgmt
+    - action: add-br
+      name: br-storage
+    - action: add-br
+      name: br-fw-admin
+    - action: add-patch
+      bridges:
+      - br-eth1
+      - br-storage
+      tags:
+      - 102
+      - 0
+      vlan_ids:
+      - 102
+      - 0
+    - action: add-patch
+      bridges:
+      - br-eth0
+      - br-mgmt
+      tags:
+      - 101
+      - 0
+      vlan_ids:
+      - 101
+      - 0
+    - action: add-patch
+      bridges:
+      - br-eth0
+      - br-fw-admin
+      trunks:
+      - 0
+    - action: add-br
+      name: br-prv
+    - action: add-patch
+      bridges:
+      - br-eth2
+      - br-prv
+opnfv:
+  compute: {}
+  controller: {}
 network:
-  management_vip: 192.168.1.2
-  management_vrouter_vip: 192.168.1.3
   networking_parameters:
     base_mac: fa:16:3e:00:00:00
     dns_nameservers:
     - 8.8.4.4
     - 8.8.8.8
     floating_ranges:
-    - - 172.30.10.160
-      - 172.30.10.254
+    - - 172.16.0.130
+      - 172.16.0.254
     gre_id_range:
     - 2
     - 65535
     internal_cidr: 192.168.111.0/24
     internal_gateway: 192.168.111.1
     net_l23_provider: ovs
-    segmentation_type: gre
+    segmentation_type: vlan
     vlan_range:
     - 1000
     - 1030
   networks:
-  - cidr: 172.30.10.0/24
-    gateway: 172.30.10.1
+  - cidr: 172.16.0.0/24
+    gateway: 172.16.0.1
     ip_ranges:
-    - - 172.30.10.64
-      - 172.30.10.159
+    - - 172.16.0.2
+      - 172.16.0.126
     meta:
+      assign_vip: true
       cidr: 172.16.0.0/24
       configurable: true
       floating_range_var: floating_ranges
@@ -151,36 +227,16 @@ network:
       render_addr_mask: public
       render_type: null
       use_gateway: true
-      vips:
-      - haproxy
-      - vrouter
       vlan_start: null
     name: public
     vlan_start: null
-  - cidr: 192.168.2.0/24
-    gateway: null
-    ip_ranges:
-    - - 192.168.2.2
-      - 192.168.2.254
-    meta:
-      cidr: 192.168.2.0/24
-      configurable: true
-      map_priority: 2
-      name: private
-      notation: cidr
-      render_addr_mask: private
-      render_type: cidr
-      seg_type: gre
-      use_gateway: false
-      vlan_start: 103
-    name: private
-    vlan_start: 302
-  - cidr: 192.168.1.0/24
+  - cidr: 192.168.0.0/24
     gateway: null
     ip_ranges:
-    - - 192.168.1.2
-      - 192.168.1.254
+    - - 192.168.0.1
+      - 192.168.0.254
     meta:
+      assign_vip: true
       cidr: 192.168.0.0/24
       configurable: true
       map_priority: 2
@@ -189,18 +245,16 @@ network:
       render_addr_mask: internal
       render_type: cidr
       use_gateway: false
-      vips:
-      - haproxy
-      - vrouter
       vlan_start: 101
     name: management
-    vlan_start: 300
-  - cidr: 192.168.0.0/24
+    vlan_start: 101
+  - cidr: 192.168.1.0/24
     gateway: null
     ip_ranges:
-    - - 192.168.0.2
-      - 192.168.0.254
+    - - 192.168.1.1
+      - 192.168.1.254
     meta:
+      assign_vip: false
       cidr: 192.168.1.0/24
       configurable: true
       map_priority: 2
@@ -211,13 +265,31 @@ network:
       use_gateway: false
       vlan_start: 102
     name: storage
-    vlan_start: 301
-  - cidr: 10.20.0.0/16
-    gateway: 10.20.0.2
+    vlan_start: 102
+  - cidr: null
+    gateway: null
+    ip_ranges: []
+    meta:
+      assign_vip: false
+      configurable: false
+      map_priority: 2
+      name: private
+      neutron_vlan_range: true
+      notation: null
+      render_addr_mask: null
+      render_type: null
+      seg_type: vlan
+      use_gateway: false
+      vlan_start: null
+    name: private
+    vlan_start: null
+  - cidr: 10.20.0.0/24
+    gateway: null
     ip_ranges:
     - - 10.20.0.3
       - 10.20.0.254
     meta:
+      assign_vip: false
       configurable: false
       map_priority: 0
       notation: ip_ranges
@@ -227,17 +299,12 @@ network:
       use_gateway: true
     name: fuelweb_admin
     vlan_start: null
-  public_vip: 172.30.10.64
-  public_vrouter_vip: 172.30.10.65
 settings:
   editable:
     access:
       email:
         description: Email address for Administrator
-        label: Email
-        regex:
-          error: Invalid email
-          source: ^\S+@\S+$
+        label: email
         type: text
         value: admin@localhost
         weight: 40
@@ -246,30 +313,25 @@ settings:
         weight: 10
       password:
         description: Password for Administrator
-        label: Password
-        regex:
-          error: Empty password
-          source: \S
+        label: password
         type: password
         value: admin
         weight: 20
       tenant:
         description: Tenant (project) name for Administrator
-        label: Tenant
+        label: tenant
         regex:
           error: Invalid tenant name
-          source: ^(?!services$)(?!nova$)(?!glance$)(?!keystone$)(?!neutron$)(?!cinder$)(?!swift$)(?!ceph$)(?![Gg]uest$)(?!.*
-            +.*$).+
+          source: ^(?!services$)(?!nova$)(?!glance$)(?!keystone$)(?!neutron$)(?!cinder$)(?!swift$)(?!ceph$)(?![Gg]uest$).*
         type: text
         value: admin
         weight: 30
       user:
         description: Username for Administrator
-        label: Username
+        label: username
         regex:
           error: Invalid username
-          source: ^(?!services$)(?!nova$)(?!glance$)(?!keystone$)(?!neutron$)(?!cinder$)(?!swift$)(?!ceph$)(?![Gg]uest$)(?!.*
-            +.*$).+
+          source: ^(?!services$)(?!nova$)(?!glance$)(?!keystone$)(?!neutron$)(?!cinder$)(?!swift$)(?!ceph$)(?![Gg]uest$).*
         type: text
         value: admin
         weight: 10
@@ -289,14 +351,6 @@ settings:
       metadata:
         label: Additional Components
         weight: 20
-      mongo:
-        description: If selected, You can use external Mongo DB as ceilometer backend
-        label: Use external Mongo DB
-        restrictions:
-        - settings:additional_components.ceilometer.value == false
-        type: checkbox
-        value: false
-        weight: 40
       murano:
         description: If selected, Murano component will be installed
         label: Install Murano
@@ -315,7 +369,7 @@ settings:
       auth_key:
         description: Public key(s) to include in authorized_keys on deployed nodes
         label: Public Key
-        type: textarea
+        type: text
         value: ''
         weight: 70
       auto_assign_floating_ip:
@@ -323,11 +377,24 @@ settings:
           to a new instance
         label: Auto assign floating IP
         restrictions:
-        - action: hide
-          condition: cluster:net_provider == 'neutron'
+        - cluster:net_provider == 'neutron'
         type: checkbox
         value: false
         weight: 40
+      compute_scheduler_driver:
+        label: Scheduler driver
+        type: radio
+        value: nova.scheduler.filter_scheduler.FilterScheduler
+        values:
+        - data: nova.scheduler.filter_scheduler.FilterScheduler
+          description: Currently the most advanced OpenStack scheduler. See the OpenStack
+            documentation for details.
+          label: Filter scheduler
+        - data: nova.scheduler.simple.SimpleScheduler
+          description: This is 'naive' scheduler which tries to find the least loaded
+            host
+          label: Simple scheduler
+        weight: 40
       debug:
         description: Debug logging mode provides more information, but requires more
           disk space.
@@ -335,6 +402,17 @@ settings:
         type: checkbox
         value: false
         weight: 20
+      disable_offload:
+        description: If set, generic segmentation offload (gso) and generic receive
+          offload (gro) on physical nics will be disabled. See ethtool man.
+        label: Disable generic offload on physical nics
+        restrictions:
+        - action: hide
+          condition: cluster:net_provider == 'neutron' and networking_parameters:segmentation_type
+            == 'gre'
+        type: checkbox
+        value: true
+        weight: 80
       libvirt_type:
         label: Hypervisor type
         type: radio
@@ -343,10 +421,21 @@ settings:
         - data: kvm
           description: Choose this type of hypervisor if you run OpenStack on hardware
           label: KVM
+          restrictions:
+          - settings:common.libvirt_type.value == 'vcenter'
         - data: qemu
           description: Choose this type of hypervisor if you run OpenStack on virtual
             hosts.
           label: QEMU
+          restrictions:
+          - settings:common.libvirt_type.value == 'vcenter'
+        - data: vcenter
+          description: Choose this type of hypervisor if you run OpenStack in a vCenter
+            environment.
+          label: vCenter
+          restrictions:
+          - settings:common.libvirt_type.value != 'vcenter' or cluster:net_provider
+            == 'neutron'
         weight: 30
       metadata:
         label: Common
@@ -358,19 +447,12 @@ settings:
         type: checkbox
         value: false
         weight: 25
-      puppet_debug:
-        description: Debug puppet logging mode provides more information, but requires
-          more disk space.
-        label: Puppet debug logging
-        type: checkbox
-        value: true
-        weight: 20
       resume_guests_state_on_host_boot:
         description: Whether to resume previous guests state when the host reboots.
           If enabled, this option causes guests assigned to the host to resume their
           previous state. If the guest was running a restart will be attempted when
-          nova-compute starts. If the guest was not running previously, a restart will
-          not be attempted.
+          nova-compute starts. If the guest was not running previously, a restart
+          will not be attempted.
         label: Resume guests state on host boot
         type: checkbox
         value: true
@@ -383,10 +465,6 @@ settings:
         type: checkbox
         value: true
         weight: 50
-      use_vcenter:
-        type: hidden
-        value: false
-        weight: 30
     corosync:
       group:
         description: ''
@@ -416,74 +494,19 @@ settings:
       dns_list:
         description: List of upstream DNS servers, separated by comma
         label: DNS list
-        regex:
-          error: Invalid IP address list
-          source: ^\*$|^(?:\d|1?\d\d|2[0-4]\d|25[0-5])(?:\.(?:\d|1?\d\d|2[0-4]\d|25[0-5])){3}(?:\s*,\s*(?:\d|1?\d\d|2[0-4]\d|25[0-5])(?:\.(?:\d|1?\d\d|2[0-4]\d|25[0-5])){3})*$
         type: text
-        value: 8.8.4.4, 8.8.8.8
+        value: 8.8.8.8, 8.8.4.4
         weight: 10
       metadata:
-        label: Host OS DNS Servers
+        label: Upstream DNS
         weight: 90
-    external_mongo:
-      hosts_ip:
-        description: IP Addresses of MongoDB. Use comma to split IPs
-        label: MongoDB hosts IP
-        regex:
-          error: Invalid hosts ip sequence
-          source: ^(((25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.){3}(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?),)*((25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.){3}(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)$
-        type: text
-        value: ''
-        weight: 30
-      metadata:
-        label: External MongoDB
-        restrictions:
-        - action: hide
-          condition: settings:additional_components.mongo.value == false
-        weight: 20
-      mongo_db_name:
-        description: Mongo database name
-        label: Database name
-        regex:
-          error: Invalid database name
-          source: ^\w+$
-        type: text
-        value: ceilometer
-        weight: 30
-      mongo_password:
-        description: Mongo database password
-        label: Password
-        regex:
-          error: Password contains spaces
-          source: ^\S*$
-        type: password
-        value: ceilometer
-        weight: 30
-      mongo_replset:
-        description: Name for Mongo replication set
-        label: Replset
-        type: text
-        value: ''
-        weight: 30
-      mongo_user:
-        description: Mongo database username
-        label: Username
-        regex:
-          error: Empty username
-          source: ^\w+$
-        type: text
-        value: ceilometer
-        weight: 30
     external_ntp:
       metadata:
-        label: Host OS NTP Servers
+        label: Upstream NTP
         weight: 100
       ntp_list:
         description: List of upstream NTP servers, separated by comma
-        label: NTP server list
-        regex:
-          error: Invalid NTP server list
-          source: ^\s*(?:(?:\w+(?:-+\w+)*\.)+[a-z]+|\d{1,3}(?:\.\d{1,3}){3})\s*(?:,\s*(?:(?:\w+(?:-+\w+)*\.)+[a-z]+|\d{1,3}(\.\d{1,3}){3})\s*)*$
+        label: NTP servers list
         type: text
         value: 0.pool.ntp.org, 1.pool.ntp.org
         weight: 10
@@ -492,32 +515,15 @@ settings:
         description: Default kernel parameters
         label: Initial parameters
         type: text
-        value: console=ttyS0,9600 console=tty0 net.ifnames=0 biosdevname=0 rootdelay=90
-          nomodeset
+        value: console=ttyS0,9600 console=tty0 rootdelay=90 nomodeset
         weight: 45
       metadata:
         label: Kernel parameters
         weight: 40
-    murano_settings:
-      metadata:
-        label: Murano Settings
-        restrictions:
-        - action: hide
-          condition: settings:additional_components.murano.value == false
-        weight: 20
-      murano_repo_url:
-        description: ''
-        label: Murano Repository URL
-        type: text
-        value: http://storage.apps.openstack.org/
-        weight: 10
     neutron_mellanox:
       metadata:
         enabled: true
         label: Mellanox Neutron components
-        restrictions:
-        - action: hide
-          condition: not ('experimental' in version:feature_groups)
         toggleable: false
         weight: 50
       plugin:
@@ -532,8 +538,9 @@ settings:
           restrictions:
           - settings:storage.iser.value == true
         - data: drivers_only
-          description: If selected, Mellanox Ethernet drivers will be installed to support
-            networking over Mellanox NIC. Mellanox Neutron plugin will not be installed.
+          description: If selected, Mellanox Ethernet drivers will be installed to
+            support networking over Mellanox NIC. Mellanox Neutron plugin will not
+            be installed.
           label: Install only Mellanox drivers
           restrictions:
           - settings:common.libvirt_type.value != 'kvm'
@@ -554,77 +561,117 @@ settings:
         type: text
         value: '16'
         weight: 70
-    opendaylight:
+    nsx_plugin:
+      connector_type:
+        description: Default network transport type to use
+        label: NSX connector type
+        type: select
+        value: stt
+        values:
+        - data: gre
+          label: GRE
+        - data: ipsec_gre
+          label: GRE over IPSec
+        - data: stt
+          label: STT
+        - data: ipsec_stt
+          label: STT over IPSec
+        - data: bridge
+          label: Bridge
+        weight: 80
+      l3_gw_service_uuid:
+        description: UUID for the default L3 gateway service to use with this cluster
+        label: L3 service UUID
+        regex:
+          error: Invalid L3 gateway service UUID
+          source: '[a-f\d]{8}-[a-f\d]{4}-[a-f\d]{4}-[a-f\d]{4}-[a-f\d]{12}'
+        type: text
+        value: ''
+        weight: 50
       metadata:
-        enabled: true
-        label: OpenDaylight plugin
-        plugin_id: 1
+        enabled: false
+        label: VMware NSX
         restrictions:
-        - cluster:net_provider != 'neutron': Only neutron is supported by OpenDaylight
-        toggleable: true
-        weight: 70
-      rest_api_port:
-        description: Port on which ODL REST API will be available.
-        label: Port number
+        - action: hide
+          condition: cluster:net_provider != 'neutron' or networking_parameters:net_l23_provider
+            != 'nsx'
+        weight: 20
+      nsx_controllers:
+        description: One or more IPv4[:port] addresses of NSX controller node, separated
+          by comma (e.g. 10.30.30.2,192.168.110.254:443)
+        label: NSX controller endpoint
         regex:
-          error: Invalid port number
-          source: ^([1-9][0-9]{0,3}|[1-5][0-9]{4}|6[0-4][0-9]{3}|65[0-4][0-9]{2}|655[0-2][0-9]|6553[0-5])$
+          error: Invalid controller endpoints, specify valid IPv4[:port] pair
+          source: ^(([\d]|[1-9][\d]|1[\d]{2}|2[0-4][\d]|25[0-5])\.){3}([\d]|[1-9][\d]|1[\d]{2}|2[0-4][\d]|25[0-5])(:(6553[0-5]|655[0-2][\d]|65[0-4][\d]{2}|6[0-4][\d]{3}|5[\d]{4}|[\d][\d]{0,3}))?(,(([\d]|[1-9][\d]|1[\d]{2}|2[0-4][\d]|25[0-5])\.){3}([\d]|[1-9][\d]|1[\d]{2}|2[0-4][\d]|25[0-5])(:(6553[0-5]|655[0-2][\d]|65[0-4][\d]{2}|6[0-4][\d]{3}|5[\d]{4}|[\d][\d]{0,3}))?)*$
         type: text
-        value: '8282'
-        weight: 40
-      use_vxlan:
-        description: Configure neutron to use VXLAN tunneling
-        label: Use vxlan
-        restrictions:
-        - action: disable
-          condition: networking_parameters:segmentation_type == 'vlan'
-          message: Neutron with GRE segmentation required
-        type: checkbox
-        value: true
+        value: ''
+        weight: 60
+      nsx_password:
+        description: Password for Administrator
+        label: NSX password
+        regex:
+          error: Empty password
+          source: \S
+        type: password
+        value: ''
+        weight: 30
+      nsx_username:
+        description: NSX administrator's username
+        label: NSX username
+        regex:
+          error: Empty username
+          source: \S
+        type: text
+        value: admin
         weight: 20
-      vni_range_end:
-        description: VXLAN VNI IDs range end
-        label: VNI range end
+      packages_url:
+        description: URL to NSX specific packages
+        label: URL to NSX bits
         regex:
-          error: Invalid ID number
-          source: ^\d+$
-        restrictions:
-        - action: hide
-          condition: networking_parameters:segmentation_type == 'vlan'
+          error: Invalid URL, specify valid HTTP/HTTPS URL with IPv4 address (e.g.
+            http://10.20.0.2/nsx)
+          source: ^https?://(([\d]|[1-9][\d]|1[\d]{2}|2[0-4][\d]|25[0-5])\.){3}([\d]|[1-9][\d]|1[\d]{2}|2[0-4][\d]|25[0-5])(:(6553[0-5]|655[0-2][\d]|65[0-4][\d]{2}|6[0-4][\d]{3}|5[\d]{4}|[\d][\d]{0,3}))?(/.*)?$
         type: text
-        value: '10000'
-        weight: 31
-      vni_range_start:
-        description: VXLAN VNI IDs range start
-        label: VNI range start
+        value: ''
+        weight: 70
+      replication_mode:
+        description: ''
+        label: NSX cluster has Service nodes
+        type: checkbox
+        value: true
+        weight: 90
+      transport_zone_uuid:
+        description: UUID of the pre-existing default NSX Transport zone
+        label: Transport zone UUID
         regex:
-          error: Invalid ID number
-          source: ^\d+$
-        restrictions:
-        - action: hide
-          condition: networking_parameters:segmentation_type == 'vlan'
+          error: Invalid transport zone UUID
+          source: '[a-f\d]{8}-[a-f\d]{4}-[a-f\d]{4}-[a-f\d]{4}-[a-f\d]{12}'
         type: text
-        value: '10'
-        weight: 30
+        value: ''
+        weight: 40
     provision:
       metadata:
         label: Provision
+        restrictions:
+        - action: hide
+          condition: not ('experimental' in version:feature_groups)
         weight: 80
       method:
         description: Which provision method to use for this cluster.
         label: Provision method
         type: radio
-        value: image
+        value: cobbler
         values:
         - data: image
           description: Copying pre-built images on a disk.
           label: Image
         - data: cobbler
           description: Install from scratch using anaconda or debian-installer.
-          label: (DEPRECATED) Classic (use anaconda or debian-installer)
+          label: Classic (use anaconda or debian-installer)
     public_network_assignment:
       assign_to_all_nodes:
-        description: When disabled, public network will be assigned to controllers only
+        description: When disabled, public network will be assigned to controllers
+          and zabbix-server only
         label: Assign public network to all nodes
         type: checkbox
         value: false
@@ -635,118 +682,42 @@ settings:
         - action: hide
           condition: cluster:net_provider != 'neutron'
         weight: 50
-    repo_setup:
-      metadata:
-        always_editable: true
-        label: Repositories
-        weight: 50
-      repos:
-        description: 'Please note: the first repository will be considered the operating
-          system mirror that will be used during node provisioning.
-
-          To create a local repository mirror on the Fuel master node, please follow
-          the instructions provided by running "fuel-createmirror --help" on the Fuel
-          master node.
-
-          Please make sure your Fuel master node has Internet access to the repository
-          before attempting to create a mirror.
-
-          For more details, please refer to the documentation (https://docs.mirantis.com/openstack/fuel/fuel-6.1/operations.html#external-ubuntu-ops).
-
-          '
-        extra_priority: null
-        type: custom_repo_configuration
-        value:
-        - name: ubuntu
-          priority: null
-          section: main
-          suite: trusty
-          type: deb
-          uri: http://10.20.0.2:8080/ubuntu-part
-        - name: ubuntu-updates
-          priority: null
-          section: main
-          suite: trusty
-          type: deb
-          uri: http://10.20.0.2:8080/ubuntu-part
-        - name: ubuntu-security
-          priority: null
-          section: main
-          suite: trusty
-          type: deb
-          uri: http://10.20.0.2:8080/ubuntu-part
-        - name: mos
-          priority: 1050
-          section: main restricted
-          suite: mos6.1
-          type: deb
-          uri: http://10.20.0.2:8080/2014.2.2-6.1/ubuntu/x86_64
-        - name: mos-updates
-          priority: 1050
-          section: main restricted
-          suite: mos6.1-updates
-          type: deb
-          uri: http://10.20.0.2:8080/mos-ubuntu
-        - name: mos-security
-          priority: 1050
-          section: main restricted
-          suite: mos6.1-security
-          type: deb
-          uri: http://10.20.0.2:8080/mos-ubuntu
-        - name: mos-holdback
-          priority: 1100
-          section: main restricted
-          suite: mos6.1-holdback
-          type: deb
-          uri: http://10.20.0.2:8080/mos-ubuntu
-        - name: Auxiliary
-          priority: 1150
-          section: main restricted
-          suite: auxiliary
-          type: deb
-          uri: http://10.20.0.2:8080/2014.2.2-6.1/ubuntu/auxiliary
     storage:
       ephemeral_ceph:
-        description: Configures Nova to store ephemeral volumes in RBD. This works best
-          if Ceph is enabled for volumes and images, too. Enables live migration of
-          all types of Ceph backed VMs (without this option, live migration will only
-          work with VMs launched from Cinder volumes).
+        description: Configures Nova to store ephemeral volumes in RBD. This works
+          best if Ceph is enabled for volumes and images, too. Enables live migration
+          of all types of Ceph backed VMs (without this option, live migration will
+          only work with VMs launched from Cinder volumes).
         label: Ceph RBD for ephemeral volumes (Nova)
+        restrictions:
+        - settings:common.libvirt_type.value == 'vcenter'
         type: checkbox
-        value: true
+        value: false
         weight: 75
       images_ceph:
         description: Configures Glance to use the Ceph RBD backend to store images.
           If enabled, this option will prevent Swift from installing.
         label: Ceph RBD for images (Glance)
-        restrictions:
-        - settings:storage.images_vcenter.value == true: Only one Glance backend could
-            be selected.
         type: checkbox
-        value: true
+        value: false
         weight: 30
       images_vcenter:
         description: Configures Glance to use the vCenter/ESXi backend to store images.
           If enabled, this option will prevent Swift from installing.
         label: VMWare vCenter/ESXi datastore for images (Glance)
         restrictions:
-        - action: hide
-          condition: settings:common.use_vcenter.value != true
-        - condition: settings:storage.images_ceph.value == true
-          message: Only one Glance backend could be selected.
+        - settings:common.libvirt_type.value != 'vcenter'
         type: checkbox
         value: false
         weight: 35
       iser:
         description: 'High performance block storage: Cinder volumes over iSER protocol
-          (iSCSI over RDMA). This feature requires SR-IOV capabilities in the NIC, and
-          will use a dedicated virtual function for the storage network.'
+          (iSCSI over RDMA). This feature requires SR-IOV capabilities in the NIC,
+          and will use a dedicated virtual function for the storage network.'
         label: iSER protocol for volumes (Cinder)
         restrictions:
         - settings:storage.volumes_lvm.value != true or settings:common.libvirt_type.value
           != 'kvm'
-        - action: hide
-          condition: not ('experimental' in version:feature_groups)
         type: checkbox
         value: false
         weight: 11
@@ -764,31 +735,123 @@ settings:
         weight: 80
       osd_pool_size:
         description: Configures the default number of object replicas in Ceph. This
-          number must be equal to or lower than the number of deployed 'Storage - Ceph
-          OSD' nodes.
+          number must be equal to or lower than the number of deployed 'Storage -
+          Ceph OSD' nodes.
         label: Ceph object replication factor
         regex:
           error: Invalid number
           source: ^[1-9]\d*$
+        restrictions:
+        - settings:common.libvirt_type.value == 'vcenter'
         type: text
         value: '2'
         weight: 85
+      vc_datacenter:
+        description: Inventory path to a datacenter. If you want to use ESXi host
+          as datastore, it should be "ha-datacenter".
+        label: Datacenter name
+        regex:
+          error: Empty datacenter
+          source: \S
+        restrictions:
+        - action: hide
+          condition: settings:storage.images_vcenter.value == false or settings:common.libvirt_type.value
+            != 'vcenter'
+        type: text
+        value: ''
+        weight: 65
+      vc_datastore:
+        description: Datastore associated with the datacenter.
+        label: Datastore name
+        regex:
+          error: Empty datastore
+          source: \S
+        restrictions:
+        - action: hide
+          condition: settings:storage.images_vcenter.value == false or settings:common.libvirt_type.value
+            != 'vcenter'
+        type: text
+        value: ''
+        weight: 60
+      vc_host:
+        description: IP Address of vCenter/ESXi
+        label: vCenter/ESXi IP
+        regex:
+          error: Specify valid IPv4 address
+          source: ^(([\d]|[1-9][\d]|1[\d]{2}|2[0-4][\d]|25[0-5])\.){3}([\d]|[1-9][\d]|1[\d]{2}|2[0-4][\d]|25[0-5])$
+        restrictions:
+        - action: hide
+          condition: settings:storage.images_vcenter.value == false or settings:common.libvirt_type.value
+            != 'vcenter'
+        type: text
+        value: ''
+        weight: 45
+      vc_image_dir:
+        description: The name of the directory where the glance images will be stored
+          in the VMware datastore.
+        label: Datastore Images directory
+        regex:
+          error: Empty images directory
+          source: \S
+        restrictions:
+        - action: hide
+          condition: settings:storage.images_vcenter.value == false or settings:common.libvirt_type.value
+            != 'vcenter'
+        type: text
+        value: /openstack_glance
+        weight: 70
+      vc_password:
+        description: vCenter/ESXi admin password
+        label: Password
+        regex:
+          error: Empty password
+          source: \S
+        restrictions:
+        - action: hide
+          condition: settings:storage.images_vcenter.value == false or settings:common.libvirt_type.value
+            != 'vcenter'
+        type: password
+        value: ''
+        weight: 55
+      vc_user:
+        description: vCenter/ESXi admin username
+        label: Username
+        regex:
+          error: Empty username
+          source: \S
+        restrictions:
+        - action: hide
+          condition: settings:storage.images_vcenter.value == false or settings:common.libvirt_type.value
+            != 'vcenter'
+        type: text
+        value: ''
+        weight: 50
       volumes_ceph:
         description: Configures Cinder to store volumes in Ceph RBD images.
         label: Ceph RBD for volumes (Cinder)
         restrictions:
-        - settings:storage.volumes_lvm.value == true
+        - settings:storage.volumes_lvm.value == true or settings:common.libvirt_type.value
+          == 'vcenter'
         type: checkbox
-        value: true
+        value: false
         weight: 20
       volumes_lvm:
-        description: It is recommended to have at least one Storage - Cinder LVM node.
+        description: Requires at least one Storage - Cinder LVM node.
         label: Cinder LVM over iSCSI for volumes
         restrictions:
         - settings:storage.volumes_ceph.value == true
         type: checkbox
-        value: false
+        value: true
         weight: 10
+      volumes_vmdk:
+        description: Configures Cinder to store volumes via VMware vCenter.
+        label: VMware vCenter for volumes (Cinder)
+        restrictions:
+        - settings:common.libvirt_type.value != 'vcenter' or settings:storage.volumes_lvm.value
+          == true
+        type: checkbox
+        value: false
+        weight: 15
     syslog:
       metadata:
         label: Syslog
@@ -820,22 +883,94 @@ settings:
           description: ''
           label: TCP
         weight: 30
-    workloads_collector:
-      enabled:
+    vcenter:
+      cluster:
+        description: vCenter cluster name. If you have multiple clusters, use comma
+          to separate names
+        label: Cluster
+        regex:
+          error: Invalid cluster list
+          source: ^([^,\ ]+([\ ]*[^,\ ])*)(,[^,\ ]+([\ ]*[^,\ ])*)*$
+        type: text
+        value: ''
+        weight: 40
+      datastore_regex:
+        description: The Datastore regexp setting specifies the data stores to use
+          with Compute. For example, "nas.*". If you want to use all available datastores,
+          leave this field blank
+        label: Datastore regexp
+        regex:
+          error: Invalid datastore regexp
+          source: ^(\S.*\S|\S|)$
+        type: text
+        value: ''
+        weight: 50
+      host_ip:
+        description: IP Address of vCenter
+        label: vCenter IP
+        regex:
+          error: Specify valid IPv4 address
+          source: ^(([\d]|[1-9][\d]|1[\d]{2}|2[0-4][\d]|25[0-5])\.){3}([\d]|[1-9][\d]|1[\d]{2}|2[0-4][\d]|25[0-5])$
+        type: text
+        value: ''
+        weight: 10
+      metadata:
+        label: vCenter
+        restrictions:
+        - action: hide
+          condition: settings:common.libvirt_type.value != 'vcenter'
+        weight: 20
+      use_vcenter:
+        description: ''
+        label: ''
         type: hidden
         value: true
+        weight: 5
+      vc_password:
+        description: vCenter admin password
+        label: Password
+        regex:
+          error: Empty password
+          source: \S
+        type: password
+        value: admin
+        weight: 30
+      vc_user:
+        description: vCenter admin username
+        label: Username
+        regex:
+          error: Empty username
+          source: \S
+        type: text
+        value: admin
+        weight: 20
+      vlan_interface:
+        description: Physical ESXi host ethernet adapter for VLAN networking (e.g.
+          vmnic1). If empty "vmnic0" is used by default
+        label: ESXi VLAN interface
+        restrictions:
+        - action: hide
+          condition: cluster:net_provider != 'nova_network' or networking_parameters:net_manager
+            != 'VlanManager'
+        type: text
+        value: ''
+        weight: 60
+    zabbix:
       metadata:
-        label: Workloads Collector User
+        label: Zabbix Access
         restrictions:
         - action: hide
-          condition: 'true'
-        weight: 10
+          condition: not ('experimental' in version:feature_groups)
+        weight: 70
       password:
+        description: Password for Zabbix Administrator
+        label: password
         type: password
-        value: pBkLbu1k
-      tenant:
-        type: text
-        value: services
-      user:
+        value: zabbix
+        weight: 20
+      username:
+        description: Username for Zabbix Administrator
+        label: username
         type: text
-        value: fuel_stats_user
+        value: admin
+        weight: 10
diff --git a/fuel/deploy/libvirt/dha.yaml b/fuel/deploy/libvirt/dha.yaml
new file mode 100644 (file)
index 0000000..ce61e53
--- /dev/null
@@ -0,0 +1,80 @@
+title: Deployment Hardware Adapter (DHA)
+# DHA API version supported
+version: 1.1
+created: Sat Apr 25 16:26:22 UTC 2015
+comment: Small libvirt setup
+
+# Adapter to use for this definition
+adapter: libvirt
+
+# Node list.
+# Mandatory fields are id and role.
+# The MAC address of the PXE boot interface is not mandatory
+#   to be set, but the field must be present.
+# All other fields are adapter specific.
+
+nodes:
+- id: 1
+  pxeMac: 52:54:00:aa:dd:84
+  libvirtName: controller1
+  libvirtTemplate: controller
+  role: controller
+- id: 2
+  pxeMac: 52:54:00:aa:dd:84
+  libvirtName: controller2
+  libvirtTemplate: controller
+  role: controller
+- id: 3
+  pxeMac: 52:54:00:aa:dd:84
+  libvirtName: controller3
+  libvirtTemplate: controller
+  role: controller
+- id: 4
+  pxeMac: 52:54:00:41:64:f3
+  libvirtName: compute1
+  libvirtTemplate: compute
+  role: compute
+- id: 5
+  pxeMac: 52:54:00:69:a0:79
+  libvirtName: compute2
+  libvirtTemplate: compute
+  role: compute
+- id: 6
+  pxeMac: 52:54:00:69:a0:79
+  libvirtName: compute3
+  libvirtTemplate: compute
+  role: compute
+- id: 7
+  pxeMac: 52:54:00:f8:b0:75
+  libvirtName: fuel-master
+  libvirtTemplate: fuel-master
+  isFuel: yes
+  nodeCanZeroMBR: yes
+  nodeCanSetBootOrderLive: yes
+  username: root
+  password: r00tme
+
+disks:
+  fuel: 30G
+  controller: 30G
+  compute: 30G
+
+# Deployment power on strategy
+# all:      Turn on all nodes at once. There will be no correlation
+#           between the DHA and DEA node numbering. MAC addresses
+#           will be used to select the node roles though.
+# sequence: Turn on the nodes in sequence starting with the lowest order
+#           node and wait for the node to be detected by Fuel. Not until
+#           the node has been detected and assigned a role will the next
+#           node be turned on.
+powerOnStrategy: all
+
+# If fuelCustomInstall is set to true, Fuel is assumed to be installed by
+# calling the DHA adapter function "dha_fuelCustomInstall()"  with two
+# arguments: node ID and the ISO file name to deploy. The custom install
+# function is then to handle all necessary logic to boot the Fuel master
+# from the ISO and then return.
+# Allowed values: true, false
+
+fuelCustomInstall: false
+
similarity index 98%
rename from fuel/deploy/templates/virtual_environment/vms/compute.xml
rename to fuel/deploy/libvirt/vms/compute
index fbef4bd..7591509 100644 (file)
@@ -1,5 +1,5 @@
 <domain type='kvm'>
-  <name>compute</name>
+  <name>compute4</name>
   <memory unit='KiB'>8388608</memory>
   <currentMemory unit='KiB'>8388608</currentMemory>
   <vcpu placement='static'>2</vcpu>
@@ -7,7 +7,7 @@
     <type arch='x86_64' machine='pc-1.0'>hvm</type>
     <boot dev='network'/>
     <boot dev='hd'/>
-    <bios rebootTimeout='30000'/>
+    <bootmenu enable='yes'/>
   </os>
   <features>
     <acpi/>
@@ -1,13 +1,12 @@
 <domain type='kvm'>
-  <name>controller</name>
-  <memory unit='KiB'>8388608</memory>
-  <currentMemory unit='KiB'>8388608</currentMemory>
+  <name>controller1</name>
+  <memory unit='KiB'>2097152</memory>
+  <currentMemory unit='KiB'>2097152</currentMemory>
   <vcpu placement='static'>2</vcpu>
   <os>
     <type arch='x86_64' machine='pc-1.0'>hvm</type>
     <boot dev='network'/>
     <boot dev='hd'/>
-    <bios rebootTimeout='30000'/>
   </os>
   <features>
     <acpi/>
similarity index 99%
rename from fuel/deploy/templates/virtual_environment/vms/fuel.xml
rename to fuel/deploy/libvirt/vms/fuel-master
index 1a32860..f4e652b 100644 (file)
@@ -1,5 +1,5 @@
 <domain type='kvm'>
-  <name>fuel</name>
+  <name>fuel-master</name>
   <memory unit='KiB'>2097152</memory>
   <currentMemory unit='KiB'>2097152</currentMemory>
   <vcpu placement='static'>2</vcpu>
diff --git a/fuel/deploy/reap.py b/fuel/deploy/reap.py
deleted file mode 100644 (file)
index c72b33c..0000000
+++ /dev/null
@@ -1,339 +0,0 @@
-###############################################################################
-# Copyright (c) 2015 Ericsson AB and others.
-# szilard.cserey@ericsson.com
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-###############################################################################
-
-
-import common
-import time
-import os
-import yaml
-import glob
-import shutil
-
-N = common.N
-E = common.E
-R = common.R
-ArgParser = common.ArgParser
-exec_cmd = common.exec_cmd
-parse = common.parse
-err = common.err
-log = common.log
-delete = common.delete
-commafy = common.commafy
-
-DEA_1 = '''
-title: Deployment Environment Adapter (DEA)
-# DEA API version supported
-version: 1.1
-created: {date}
-comment: {comment}
-'''
-
-DHA_1 = '''
-title: Deployment Hardware Adapter (DHA)
-# DHA API version supported
-version: 1.1
-created: {date}
-comment: {comment}
-
-# Adapter to use for this definition
-# adapter: [ipmi|libvirt]
-adapter:
-
-# Node list.
-# Mandatory properties are id and role.
-# All other properties are adapter specific.
-# For Non-Fuel nodes controlled by:
-#   - ipmi adapter you need to provide:
-#       pxeMac
-#       ipmiIp
-#       ipmiUser
-#       ipmiPass
-#   - libvirt adapter you need to provide:
-#       libvirtName: <whatever>
-#       libvirtTemplate: [libvirt/vms/controller.xml | libvirt/vms/compute.xml]
-#
-# For the Fuel Node you need to provide:
-#       libvirtName: <whatever>
-#       libvirtTemplate: libvirt/vms/fuel.xml
-#       isFuel: yes
-#       username: root
-#       password: r00tme
-'''
-
-DHA_2 = '''
-# Adding the Fuel node as node id {node_id}
-# which may not be correct - please adjust as needed.
-'''
-
-DISKS = {'fuel': '30G',
-         'controller': '30G',
-         'compute': '30G'}
-
-
-class Reap(object):
-
-    def __init__(self, dea_file, dha_file, comment):
-        self.dea_file = dea_file
-        self.dha_file = dha_file
-        self.comment = comment
-        self.temp_dir = None
-        self.env = None
-        self.env_id = None
-        self.last_node = None
-
-    def get_env(self):
-        env_list = parse(exec_cmd('fuel env'))
-        if len(env_list) > 1:
-            err('Not exactly one environment')
-        self.env = env_list[0]
-        self.env_id = self.env[E['id']]
-
-    def download_config(self, config_type):
-        log('Download %s config for environment %s'
-            % (config_type, self.env_id))
-        exec_cmd('fuel %s --env %s --download --dir %s'
-                 % (config_type, self.env_id, self.temp_dir))
-
-    def write(self, file, text, newline=True):
-        mode = 'a' if os.path.isfile(file) else 'w'
-        with open(file, mode) as f:
-            f.write('%s%s' % (text, ('\n' if newline else '')))
-
-    def write_yaml(self, file, data, newline=True):
-        self.write(file, yaml.dump(data, default_flow_style=False).strip(),
-                   newline)
-
-    def get_node_by_id(self, node_list, node_id):
-        for node in node_list:
-            if node[N['id']] == node_id:
-                return node
-
-    def reap_interface(self, node_id, interfaces):
-        interface, mac = self.get_interface(node_id)
-        if_name = None
-        if interfaces:
-            if_name = self.check_dict_exists(interfaces, interface)
-        if not if_name:
-            if_name = 'interfaces_%s' % str(len(interfaces) + 1)
-            interfaces[if_name] = interface
-        return if_name, mac
-
-    def reap_transformation(self, node_id, roles, transformations):
-        main_role = 'controller' if 'controller' in roles else 'compute'
-        node_file = glob.glob('%s/deployment_%s/*%s_%s.yaml'
-                              % (self.temp_dir, self.env_id,
-                                 main_role, node_id))
-        tr_name = None
-        with open(node_file[0]) as f:
-            node_config = yaml.load(f)
-        transformation = {'transformations':
-                              node_config['network_scheme']['transformations']}
-        if transformations:
-            tr_name = self.check_dict_exists(transformations, transformation)
-        if not tr_name:
-            tr_name = 'transformations_%s' % str(len(transformations) + 1)
-            transformations[tr_name] = transformation
-        return tr_name
-
-    def check_dict_exists(self, main_dict, dict):
-        for key, val in main_dict.iteritems():
-            if cmp(dict, val) == 0:
-                return key
-
-    def reap_nodes_interfaces_transformations(self):
-        node_list = parse(exec_cmd('fuel node'))
-        real_node_ids = [node[N['id']] for node in node_list]
-        real_node_ids.sort()
-        min_node = real_node_ids[0]
-
-        interfaces = {}
-        transformations = {}
-        dea_nodes = []
-        dha_nodes = []
-
-        for real_node_id in real_node_ids:
-            node_id = int(real_node_id) - int(min_node) + 1
-            self.last_node = node_id
-            node = self.get_node_by_id(node_list, real_node_id)
-            roles = commafy(node[N['roles']])
-            if not roles:
-                err('Fuel Node %s has no role' % real_node_id)
-            dea_node = {'id': node_id,
-                        'role': roles}
-            dha_node = {'id': node_id}
-            if_name, mac = self.reap_interface(real_node_id, interfaces)
-            tr_name = self.reap_transformation(real_node_id, roles,
-                                               transformations)
-            dea_node.update(
-                {'interfaces': if_name,
-                 'transformations': tr_name})
-
-            dha_node.update(
-                {'pxeMac': mac if mac else None,
-                 'ipmiIp': None,
-                 'ipmiUser': None,
-                 'ipmiPass': None,
-                 'libvirtName': None,
-                 'libvirtTemplate': None})
-
-            dea_nodes.append(dea_node)
-            dha_nodes.append(dha_node)
-
-        self.write_yaml(self.dha_file, {'nodes': dha_nodes}, False)
-        self.write_yaml(self.dea_file, {'nodes': dea_nodes})
-        self.write_yaml(self.dea_file, interfaces)
-        self.write_yaml(self.dea_file, transformations)
-        self.reap_fuel_node_info()
-        self.write_yaml(self.dha_file, {'disks': DISKS})
-
-    def reap_fuel_node_info(self):
-        dha_nodes = []
-        dha_node = {
-            'id': self.last_node + 1,
-            'libvirtName': None,
-            'libvirtTemplate': None,
-            'isFuel': True,
-            'username': 'root',
-            'password': 'r00tme'}
-
-        dha_nodes.append(dha_node)
-
-        self.write(self.dha_file, DHA_2.format(node_id=dha_node['id']), False)
-        self.write_yaml(self.dha_file, dha_nodes)
-
-    def reap_environment_info(self):
-        network_file = ('%s/network_%s.yaml'
-                        % (self.temp_dir, self.env_id))
-        network = self.read_yaml(network_file)
-        env = {'environment':
-                   {'name': self.env[E['name']],
-                    'mode': self.env[E['mode']],
-                    'net_segment_type':
-                        network['networking_parameters']['segmentation_type']}}
-        self.write_yaml(self.dea_file, env)
-        wanted_release = None
-        rel_list = parse(exec_cmd('fuel release'))
-        for rel in rel_list:
-            if rel[R['id']] == self.env[E['release_id']]:
-                wanted_release = rel[R['name']]
-        self.write_yaml(self.dea_file, {'wanted_release': wanted_release})
-
-    def reap_fuel_settings(self):
-        data = self.read_yaml('/etc/fuel/astute.yaml')
-        fuel = {}
-        del data['ADMIN_NETWORK']['mac']
-        del data['ADMIN_NETWORK']['interface']
-        for key in ['ADMIN_NETWORK', 'HOSTNAME', 'DNS_DOMAIN', 'DNS_SEARCH',
-                    'DNS_UPSTREAM', 'NTP1', 'NTP2', 'NTP3', 'FUEL_ACCESS']:
-            fuel[key] = data[key]
-        for key in fuel['ADMIN_NETWORK'].keys():
-            if key not in ['ipaddress', 'netmask',
-                           'dhcp_pool_start', 'dhcp_pool_end']:
-                del fuel['ADMIN_NETWORK'][key]
-        self.write_yaml(self.dea_file, {'fuel': fuel})
-
-    def reap_network_settings(self):
-        network_file = ('%s/network_%s.yaml'
-                        % (self.temp_dir, self.env_id))
-        data = self.read_yaml(network_file)
-        network = {}
-        network['networking_parameters'] = data['networking_parameters']
-        network['networks'] = data['networks']
-        for net in network['networks']:
-            del net['id']
-            del net['group_id']
-        self.write_yaml(self.dea_file, {'network': network})
-
-    def reap_settings(self):
-        settings_file = '%s/settings_%s.yaml' % (self.temp_dir, self.env_id)
-        settings = self.read_yaml(settings_file)
-        self.write_yaml(self.dea_file, {'settings': settings})
-
-    def get_interface(self, real_node_id):
-        exec_cmd('fuel node --node-id %s --network --download --dir %s'
-                 % (real_node_id, self.temp_dir))
-        interface_file = ('%s/node_%s/interfaces.yaml'
-                          % (self.temp_dir, real_node_id))
-        interfaces = self.read_yaml(interface_file)
-        interface_config = {}
-        pxe_mac = None
-        for interface in interfaces:
-            networks = []
-            for network in interface['assigned_networks']:
-                networks.append(network['name'])
-                if network['name'] == 'fuelweb_admin':
-                    pxe_mac = interface['mac']
-            if networks:
-                interface_config[interface['name']] = networks
-        return interface_config, pxe_mac
-
-    def read_yaml(self, yaml_file):
-        with open(yaml_file) as f:
-            data = yaml.load(f)
-            return data
-
-    def intro(self):
-        delete(self.dea_file)
-        delete(self.dha_file)
-        self.temp_dir = exec_cmd('mktemp -d')
-        date = time.strftime('%c')
-        self.write(self.dea_file,
-                   DEA_1.format(date=date, comment=self.comment), False)
-        self.write(self.dha_file,
-                   DHA_1.format(date=date, comment=self.comment))
-        self.get_env()
-        self.download_config('deployment')
-        self.download_config('settings')
-        self.download_config('network')
-
-    def finale(self):
-        log('DEA file is available at %s' % self.dea_file)
-        log('DHA file is available at %s (this is just a template)'
-            % self.dha_file)
-        shutil.rmtree(self.temp_dir)
-
-    def reap(self):
-        self.intro()
-        self.reap_environment_info()
-        self.reap_nodes_interfaces_transformations()
-        self.reap_fuel_settings()
-        self.reap_network_settings()
-        self.reap_settings()
-        self.finale()
-
-
-def usage():
-    print '''
-    Usage:
-    python reap.py <dea_file> <dha_file> <comment>
-    '''
-
-
-def parse_arguments():
-    parser = ArgParser(prog='python %s' % __file__)
-    parser.add_argument('dea_file', nargs='?', action='store',
-                        default='dea.yaml',
-                        help='Deployment Environment Adapter: dea.yaml')
-    parser.add_argument('dha_file', nargs='?', action='store',
-                        default='dha.yaml',
-                        help='Deployment Hardware Adapter: dha.yaml')
-    parser.add_argument('comment', nargs='?', action='store', help='Comment')
-    args = parser.parse_args()
-    return (args.dea_file, args.dha_file, args.comment)
-
-
-def main():
-    dea_file, dha_file, comment = parse_arguments()
-
-    r = Reap(dea_file, dha_file, comment)
-    r.reap()
-
-
-if __name__ == '__main__':
-    main()
diff --git a/fuel/deploy/setup_environment.py b/fuel/deploy/setup_environment.py
new file mode 100644 (file)
index 0000000..4e0e7ba
--- /dev/null
@@ -0,0 +1,165 @@
+import sys
+from lxml import etree
+import os
+import glob
+import common
+
+from dha import DeploymentHardwareAdapter
+
+exec_cmd = common.exec_cmd
+err = common.err
+log = common.log
+check_dir_exists = common.check_dir_exists
+check_file_exists = common.check_file_exists
+check_if_root = common.check_if_root
+
+
+class LibvirtEnvironment(object):
+
+    def __init__(self, storage_dir, dha_file):
+        self.dha = DeploymentHardwareAdapter(dha_file)
+        self.storage_dir = storage_dir
+        self.parser = etree.XMLParser(remove_blank_text=True)
+        self.file_dir = os.path.dirname(os.path.realpath(__file__))
+        self.network_dir = '%s/libvirt/networks' % self.file_dir
+        self.vm_dir = '%s/libvirt/vms' % self.file_dir
+        self.node_ids = self.dha.get_all_node_ids()
+        self.fuel_node_id = self.dha.get_fuel_node_id()
+        self.net_names = self.collect_net_names()
+
+    def create_storage(self, node_id, disk_path, disk_sizes):
+        if node_id == self.fuel_node_id:
+           disk_size = disk_sizes['fuel']
+        else:
+           role = self.dha.get_node_role(node_id)
+           disk_size = disk_sizes[role]
+        exec_cmd('fallocate -l %s %s' % (disk_size, disk_path))
+
+    def create_vms(self):
+        temp_dir = exec_cmd('mktemp -d')
+        disk_sizes = self.dha.get_disks()
+        for node_id in self.node_ids:
+            vm_name = self.dha.get_node_property(node_id, 'libvirtName')
+            vm_template = self.dha.get_node_property(node_id,
+                                                     'libvirtTemplate')
+            disk_path = '%s/%s.raw' % (self.storage_dir, vm_name)
+            self.create_storage(node_id, disk_path, disk_sizes)
+            self.define_vm(vm_name, vm_template, temp_dir, disk_path)
+        exec_cmd('rm -fr %s' % temp_dir)
+
+    def define_vm(self, vm_name, vm_template, temp_dir, disk_path):
+        log('Creating VM %s with disks %s' % (vm_name, disk_path))
+        temp_vm_file = '%s/%s' % (temp_dir, vm_name)
+        exec_cmd('cp %s/%s %s' % (self.vm_dir, vm_template, temp_vm_file))
+        with open(temp_vm_file) as f:
+            vm_xml = etree.parse(f)
+            names = vm_xml.xpath('/domain/name')
+            for name in names:
+                name.text = vm_name
+            uuids = vm_xml.xpath('/domain/uuid')
+            for uuid in uuids:
+                uuid.getparent().remove(uuid)
+            disks = vm_xml.xpath('/domain/devices/disk')
+            for disk in disks:
+                sources = disk.xpath('source')
+                for source in sources:
+                    source.set('file', disk_path)
+        with open(temp_vm_file, 'w') as f:
+            vm_xml.write(f, pretty_print=True, xml_declaration=True)
+        exec_cmd('virsh define %s' % temp_vm_file)
+
+    def create_networks(self):
+        for net_file in glob.glob('%s/*' % self.network_dir):
+            exec_cmd('virsh net-define %s' % net_file)
+        for net in self.net_names:
+            log('Creating network %s' % net)
+            exec_cmd('virsh net-autostart %s' % net)
+            exec_cmd('virsh net-start %s' % net)
+
+    def delete_networks(self):
+        for net in self.net_names:
+            log('Deleting network %s' % net)
+            exec_cmd('virsh net-destroy %s' % net, False)
+            exec_cmd('virsh net-undefine %s' % net, False)
+
+    def get_net_name(self, net_file):
+        with open(net_file) as f:
+            net_xml = etree.parse(f)
+            name_list = net_xml.xpath('/network/name')
+            for name in name_list:
+                net_name = name.text
+        return net_name
+
+    def collect_net_names(self):
+        net_list = []
+        for net_file in glob.glob('%s/*' % self.network_dir):
+            name = self.get_net_name(net_file)
+            net_list.append(name)
+        return net_list
+
+    def delete_vms(self):
+        for node_id in self.node_ids:
+            vm_name = self.dha.get_node_property(node_id, 'libvirtName')
+            r, c = exec_cmd('virsh dumpxml %s' % vm_name, False)
+            if c > 0:
+                log(r)
+                continue
+            self.undefine_vm_delete_disk(r, vm_name)
+
+    def undefine_vm_delete_disk(self, printout, vm_name):
+        disk_files = []
+        xml_dump = etree.fromstring(printout, self.parser)
+        disks = xml_dump.xpath('/domain/devices/disk')
+        for disk in disks:
+            sources = disk.xpath('source')
+            for source in sources:
+                source_file = source.get('file')
+                if source_file:
+                    disk_files.append(source_file)
+        log('Deleting VM %s with disks %s' % (vm_name, disk_files))
+        exec_cmd('virsh destroy %s' % vm_name, False)
+        exec_cmd('virsh undefine %s' % vm_name, False)
+        for file in disk_files:
+            exec_cmd('rm -f %s' % file)
+
+    def setup_environment(self):
+        check_if_root()
+        check_dir_exists(self.network_dir)
+        check_dir_exists(self.vm_dir)
+        self.cleanup_environment()
+        self.create_vms()
+        self.create_networks()
+
+    def cleanup_environment(self):
+        self.delete_vms()
+        self.delete_networks()
+
+
+def usage():
+    print '''
+    Usage:
+    python setup_environment.py <storage_directory> <dha_file>
+
+    Example:
+            python setup_environment.py /mnt/images dha.yaml
+    '''
+
+def parse_arguments():
+    if len(sys.argv) != 3:
+        log('Incorrect number of arguments')
+        usage()
+        sys.exit(1)
+    storage_dir = sys.argv[-2]
+    dha_file = sys.argv[-1]
+    check_dir_exists(storage_dir)
+    check_file_exists(dha_file)
+    return storage_dir, dha_file
+
+def main():
+    storage_dir, dha_file = parse_arguments()
+
+    virt = LibvirtEnvironment(storage_dir, dha_file)
+    virt.setup_environment()
+
+if __name__ == '__main__':
+    main()
\ No newline at end of file
diff --git a/fuel/deploy/setup_vfuel.py b/fuel/deploy/setup_vfuel.py
new file mode 100644 (file)
index 0000000..65ee013
--- /dev/null
@@ -0,0 +1,143 @@
+import sys
+from lxml import etree
+import os
+
+import common
+from dha import DeploymentHardwareAdapter
+
+exec_cmd = common.exec_cmd
+err = common.err
+log = common.log
+check_dir_exists = common.check_dir_exists
+check_file_exists = common.check_file_exists
+check_if_root = common.check_if_root
+
+VFUELNET = '''
+iface vfuelnet inet static
+        bridge_ports em1
+        address 10.40.0.1
+        netmask 255.255.255.0
+        pre-down iptables -t nat -D POSTROUTING --out-interface p1p1.20 -j MASQUERADE  -m comment --comment "vfuelnet"
+        pre-down iptables -D FORWARD --in-interface vfuelnet --out-interface p1p1.20 -m comment --comment "vfuelnet"
+        post-up iptables -t nat -A POSTROUTING --out-interface p1p1.20 -j MASQUERADE  -m comment --comment "vfuelnet"
+        post-up iptables -A FORWARD --in-interface vfuelnet --out-interface p1p1.20 -m comment --comment "vfuelnet"
+'''
+VM_DIR = 'baremetal/vm'
+FUEL_DISK_SIZE = '30G'
+IFACE = 'vfuelnet'
+INTERFACE_CONFIG = '/etc/network/interfaces'
+
+class VFuel(object):
+
+    def __init__(self, storage_dir, dha_file):
+        self.dha = DeploymentHardwareAdapter(dha_file)
+        self.storage_dir = storage_dir
+        self.parser = etree.XMLParser(remove_blank_text=True)
+        self.fuel_node_id = self.dha.get_fuel_node_id()
+        self.file_dir = os.path.dirname(os.path.realpath(__file__))
+        self.vm_dir = '%s/%s' % (self.file_dir, VM_DIR)
+
+    def setup_environment(self):
+        check_if_root()
+        check_dir_exists(self.vm_dir)
+        self.setup_networking()
+        self.delete_vm()
+        self.create_vm()
+
+    def setup_networking(self):
+        with open(INTERFACE_CONFIG) as f:
+            data = f.read()
+        if VFUELNET not in data:
+            log('Appending to file %s:\n %s' % (INTERFACE_CONFIG, VFUELNET))
+            with open(INTERFACE_CONFIG, 'a') as f:
+                f.write('\n%s\n' % VFUELNET)
+            if exec_cmd('ip link show | grep %s' % IFACE):
+                log('Bring DOWN interface %s' % IFACE)
+                exec_cmd('ifdown %s' % IFACE, False)
+            log('Bring UP interface %s' % IFACE)
+            exec_cmd('ifup %s' % IFACE, False)
+
+    def delete_vm(self):
+        vm_name = self.dha.get_node_property(self.fuel_node_id, 'libvirtName')
+        r, c = exec_cmd('virsh dumpxml %s' % vm_name, False)
+        if c > 0:
+            log(r)
+            return
+        self.undefine_vm_delete_disk(r, vm_name)
+
+    def undefine_vm_delete_disk(self, printout, vm_name):
+        disk_files = []
+        xml_dump = etree.fromstring(printout, self.parser)
+        disks = xml_dump.xpath('/domain/devices/disk')
+        for disk in disks:
+            sources = disk.xpath('source')
+            for source in sources:
+                source_file = source.get('file')
+                if source_file:
+                    disk_files.append(source_file)
+        log('Deleting VM %s with disks %s' % (vm_name, disk_files))
+        exec_cmd('virsh destroy %s' % vm_name, False)
+        exec_cmd('virsh undefine %s' % vm_name, False)
+        for file in disk_files:
+            exec_cmd('rm -f %s' % file)
+
+    def create_vm(self):
+        temp_dir = exec_cmd('mktemp -d')
+        vm_name = self.dha.get_node_property(self.fuel_node_id, 'libvirtName')
+        vm_template = self.dha.get_node_property(self.fuel_node_id,
+                                                 'libvirtTemplate')
+        disk_path = '%s/%s.raw' % (self.storage_dir, vm_name)
+        exec_cmd('fallocate -l %s %s' % (FUEL_DISK_SIZE, disk_path))
+        self.define_vm(vm_name, vm_template, temp_dir, disk_path)
+        exec_cmd('rm -fr %s' % temp_dir)
+
+    def define_vm(self, vm_name, vm_template, temp_dir, disk_path):
+        log('Creating VM %s with disks %s' % (vm_name, disk_path))
+        temp_vm_file = '%s/%s' % (temp_dir, vm_name)
+        exec_cmd('cp %s/%s %s' % (self.vm_dir, vm_template, temp_vm_file))
+        with open(temp_vm_file) as f:
+            vm_xml = etree.parse(f)
+            names = vm_xml.xpath('/domain/name')
+            for name in names:
+                name.text = vm_name
+            uuids = vm_xml.xpath('/domain/uuid')
+            for uuid in uuids:
+                uuid.getparent().remove(uuid)
+            disks = vm_xml.xpath('/domain/devices/disk')
+            for disk in disks:
+                sources = disk.xpath('source')
+                for source in sources:
+                    source.set('file', disk_path)
+        with open(temp_vm_file, 'w') as f:
+            vm_xml.write(f, pretty_print=True, xml_declaration=True)
+        exec_cmd('virsh define %s' % temp_vm_file)
+
+
+def usage():
+    print '''
+    Usage:
+    python setup_vfuel.py <storage_directory> <dha_file>
+
+    Example:
+            python setup_vfuel.py /mnt/images dha.yaml
+    '''
+
+def parse_arguments():
+    if len(sys.argv) != 3:
+        log('Incorrect number of arguments')
+        usage()
+        sys.exit(1)
+    storage_dir = sys.argv[-2]
+    dha_file = sys.argv[-1]
+    check_dir_exists(storage_dir)
+    check_file_exists(dha_file)
+    return storage_dir, dha_file
+
+def main():
+    storage_dir, dha_file = parse_arguments()
+
+    vfuel = VFuel(storage_dir, dha_file)
+    vfuel.setup_environment()
+
+if __name__ == '__main__':
+    main()
index 0f6b8c7..9ea227a 100644 (file)
@@ -1,13 +1,3 @@
-###############################################################################
-# Copyright (c) 2015 Ericsson AB and others.
-# szilard.cserey@ericsson.com
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-###############################################################################
-
-
 import paramiko
 import common
 import scp
@@ -16,7 +6,6 @@ TIMEOUT = 600
 log = common.log
 err = common.err
 
-
 class SSHClient(object):
 
     def __init__(self, host, username, password):
@@ -29,8 +18,7 @@ class SSHClient(object):
         self.client = paramiko.SSHClient()
         self.client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
         self.client.connect(self.host, username=self.username,
-                            password=self.password, look_for_keys=False,
-                            timeout=timeout)
+                            password=self.password, timeout=timeout)
 
     def close(self):
         if self.client is not None:
@@ -44,7 +32,7 @@ class SSHClient(object):
     def __exit__(self, type, value, traceback):
         self.close()
 
-    def exec_cmd(self, command, check=True, sudo=False, timeout=TIMEOUT):
+    def exec_cmd(self, command, sudo=False, timeout=TIMEOUT, check=True):
         if sudo and self.username != 'root':
             command = "sudo -S -p '' %s" % command
         stdin, stdout, stderr = self.client.exec_command(command,
@@ -72,15 +60,16 @@ class SSHClient(object):
             if chan.recv_ready():
                 data = chan.recv(1024)
                 while data:
-                    log(data.strip())
+                    print data
                     data = chan.recv(1024)
 
             if chan.recv_stderr_ready():
                 error_buff = chan.recv_stderr(1024)
                 while error_buff:
-                    log(error_buff.strip())
+                    print error_buff
                     error_buff = chan.recv_stderr(1024)
-        return chan.recv_exit_status()
+        exit_status = chan.recv_exit_status()
+        log('Exit status %s' % exit_status)
 
     def scp_get(self, remote, local='.', dir=False):
         try:
diff --git a/fuel/deploy/templates/hardware_environment/conf/ericsson_montreal_lab/dea.yaml b/fuel/deploy/templates/hardware_environment/conf/ericsson_montreal_lab/dea.yaml
deleted file mode 100644 (file)
index 23b2809..0000000
+++ /dev/null
@@ -1,844 +0,0 @@
-title: Deployment Environment Adapter (DEA)
-# DEA API version supported
-version:
-created:
-comment: Config for Ericsson Montreal Lab - HA deployment with Ceph and Opendaylight
-environment:
-  name: opnfv
-  mode: ha
-  net_segment_type: gre
-wanted_release: Juno on Ubuntu 14.04.1
-nodes:
-- id: 1
-  interfaces: interfaces_1
-  transformations: transformations_1
-  role: ceph-osd,controller
-- id: 2
-  interfaces: interfaces_1
-  transformations: transformations_1
-  role: ceph-osd,controller
-- id: 3
-  interfaces: interfaces_1
-  transformations: transformations_1
-  role: ceph-osd,controller
-- id: 4
-  interfaces: interfaces_1
-  transformations: transformations_2
-  role: ceph-osd,compute
-- id: 5
-  interfaces: interfaces_1
-  transformations: transformations_2
-  role: ceph-osd,compute
-- id: 6
-  interfaces: interfaces_1
-  transformations: transformations_2
-  role: ceph-osd,compute
-fuel:
-  ADMIN_NETWORK:
-    ipaddress: 10.20.0.2
-    netmask: 255.255.0.0
-    dhcp_pool_start: 10.20.0.3
-    dhcp_pool_end: 10.20.0.254
-  DNS_UPSTREAM: 10.118.32.193
-  DNS_DOMAIN: opnfvericsson.ca
-  DNS_SEARCH: opnfvericsson.ca
-  FUEL_ACCESS:
-    user: admin
-    password: admin
-  HOSTNAME: opnfv
-  NTP1: 10.118.34.219
-  NTP2:
-  NTP3:
-interfaces_1:
-  eth0:
-  - fuelweb_admin
-  eth2:
-  - public
-  - management
-  - storage
-  - private
-transformations_1:
-  transformations:
-  - action: add-br
-    name: br-fw-admin
-  - action: add-br
-    name: br-mgmt
-  - action: add-br
-    name: br-storage
-  - action: add-br
-    name: br-ex
-  - action: add-br
-    name: br-floating
-    provider: ovs
-  - action: add-patch
-    bridges:
-    - br-floating
-    - br-ex
-    mtu: 65000
-    provider: ovs
-  - action: add-br
-    name: br-mesh
-  - action: add-port
-    bridge: br-fw-admin
-    name: eth0
-  - action: add-port
-    bridge: br-mgmt
-    name: eth2.320
-  - action: add-port
-    bridge: br-storage
-    name: eth2.220
-  - action: add-port
-    bridge: br-mesh
-    name: eth2.20
-  - action: add-port
-    bridge: br-ex
-    name: eth0
-transformations_2:
-  transformations:
-  - action: add-br
-    name: br-fw-admin
-  - action: add-br
-    name: br-mgmt
-  - action: add-br
-    name: br-storage
-  - action: add-br
-    name: br-mesh
-  - action: add-port
-    bridge: br-fw-admin
-    name: eth0
-  - action: add-port
-    bridge: br-mgmt
-    name: eth2.320
-  - action: add-port
-    bridge: br-storage
-    name: eth2.220
-  - action: add-port
-    bridge: br-mesh
-    name: eth2.20
-network:
-  management_vip: 192.168.0.2
-  management_vrouter_vip: 192.168.0.3
-  networking_parameters:
-    base_mac: fa:16:3e:00:00:00
-    dns_nameservers:
-    - 10.118.32.193
-    floating_ranges:
-    - - 10.118.34.226
-      - 10.118.34.230
-    gre_id_range:
-    - 2
-    - 65535
-    internal_cidr: 192.168.111.0/24
-    internal_gateway: 192.168.111.1
-    net_l23_provider: ovs
-    segmentation_type: gre
-    vlan_range:
-    - 2022
-    - 2023
-  networks:
-  - cidr: 10.118.34.192/24
-    gateway: 10.118.34.193
-    ip_ranges:
-    - - 10.118.34.220
-      - 10.118.34.225
-    meta:
-      cidr: 172.16.0.0/24
-      configurable: true
-      floating_range_var: floating_ranges
-      ip_range:
-      - 172.16.0.2
-      - 172.16.0.126
-      map_priority: 1
-      name: public
-      notation: ip_ranges
-      render_addr_mask: public
-      render_type: null
-      use_gateway: true
-      vips:
-      - haproxy
-      - vrouter
-      vlan_start: null
-    name: public
-    vlan_start: null
-  - cidr: 192.168.2.0/24
-    gateway: null
-    ip_ranges:
-    - - 192.168.2.2
-      - 192.168.2.254
-    meta:
-      assign_vip: 192.168.2.0/24
-      configurable: true
-      map_priority: 2
-      name: private
-      notation: cidr
-      render_addr_mask: private
-      render_type: cidr
-      seg_type: gre
-      use_gateway: false
-      vlan_start: 103
-    name: private
-    vlan_start: 20
-  - cidr: 192.168.0.0/24
-    gateway: null
-    ip_ranges:
-    - - 192.168.0.1
-      - 192.168.0.254
-    meta:
-      cidr: 192.168.0.0/24
-      configurable: true
-      map_priority: 2
-      name: management
-      notation: cidr
-      render_addr_mask: internal
-      render_type: cidr
-      use_gateway: false
-      vips:
-      - haproxy
-      - vrouter
-      vlan_start: 101
-    name: management
-    vlan_start: 320
-  - cidr: 192.168.1.0/24
-    gateway: null
-    ip_ranges:
-    - - 192.168.1.2
-      - 192.168.1.254
-    meta:
-      cidr: 192.168.1.0/24
-      configurable: true
-      map_priority: 2
-      name: storage
-      notation: cidr
-      render_addr_mask: storage
-      render_type: cidr
-      use_gateway: false
-      vlan_start: 102
-    name: storage
-    vlan_start: 220
-  - cidr: 10.20.0.0/16
-    gateway: 10.20.0.2
-    ip_ranges:
-    - - 10.20.0.3
-      - 10.20.0.254
-    meta:
-      configurable: false
-      map_priority: 0
-      notation: ip_ranges
-      render_addr_mask: null
-      render_type: null
-      unmovable: true
-      use_gateway: true
-    name: fuelweb_admin
-    vlan_start: null
-  public_vip: 10.118.34.220
-  public_vrouter_vip: 10.118.34.221
-settings:
-  editable:
-    access:
-      email:
-        description: Email address for Administrator
-        label: Email
-        regex:
-          error: Invalid email
-          source: ^\S+@\S+$
-        type: text
-        value: admin@localhost
-        weight: 40
-      metadata:
-        label: Access
-        weight: 10
-      password:
-        description: Password for Administrator
-        label: Password
-        regex:
-          error: Empty password
-          source: \S
-        type: password
-        value: admin
-        weight: 20
-      tenant:
-        description: Tenant (project) name for Administrator
-        label: Tenant
-        regex:
-          error: Invalid tenant name
-          source: ^(?!services$)(?!nova$)(?!glance$)(?!keystone$)(?!neutron$)(?!cinder$)(?!swift$)(?!ceph$)(?![Gg]uest$)(?!.*
-            +.*$).+
-        type: text
-        value: admin
-        weight: 30
-      user:
-        description: Username for Administrator
-        label: Username
-        regex:
-          error: Invalid username
-          source: ^(?!services$)(?!nova$)(?!glance$)(?!keystone$)(?!neutron$)(?!cinder$)(?!swift$)(?!ceph$)(?![Gg]uest$)(?!.*
-            +.*$).+
-        type: text
-        value: admin
-        weight: 10
-    additional_components:
-      ceilometer:
-        description: If selected, Ceilometer component will be installed
-        label: Install Ceilometer
-        type: checkbox
-        value: false
-        weight: 40
-      heat:
-        description: ''
-        label: ''
-        type: hidden
-        value: true
-        weight: 30
-      metadata:
-        label: Additional Components
-        weight: 20
-      mongo:
-        description: If selected, You can use external Mongo DB as ceilometer backend
-        label: Use external Mongo DB
-        restrictions:
-        - settings:additional_components.ceilometer.value == false
-        type: checkbox
-        value: false
-        weight: 40
-      murano:
-        description: If selected, Murano component will be installed
-        label: Install Murano
-        restrictions:
-        - cluster:net_provider != 'neutron'
-        type: checkbox
-        value: false
-        weight: 20
-      sahara:
-        description: If selected, Sahara component will be installed
-        label: Install Sahara
-        type: checkbox
-        value: false
-        weight: 10
-    common:
-      auth_key:
-        description: Public key(s) to include in authorized_keys on deployed nodes
-        label: Public Key
-        type: textarea
-        value: ''
-        weight: 70
-      auto_assign_floating_ip:
-        description: If selected, OpenStack will automatically assign a floating IP
-          to a new instance
-        label: Auto assign floating IP
-        restrictions:
-        - action: hide
-          condition: cluster:net_provider == 'neutron'
-        type: checkbox
-        value: false
-        weight: 40
-      debug:
-        description: Debug logging mode provides more information, but requires more
-          disk space.
-        label: OpenStack debug logging
-        type: checkbox
-        value: false
-        weight: 20
-      libvirt_type:
-        label: Hypervisor type
-        type: radio
-        value: kvm
-        values:
-        - data: kvm
-          description: Choose this type of hypervisor if you run OpenStack on hardware
-          label: KVM
-        - data: qemu
-          description: Choose this type of hypervisor if you run OpenStack on virtual
-            hosts.
-          label: QEMU
-        weight: 30
-      metadata:
-        label: Common
-        weight: 30
-      nova_quota:
-        description: Quotas are used to limit CPU and memory usage for tenants. Enabling
-          quotas will increase load on the Nova database.
-        label: Nova quotas
-        type: checkbox
-        value: false
-        weight: 25
-      puppet_debug:
-        description: Debug puppet logging mode provides more information, but requires
-          more disk space.
-        label: Puppet debug logging
-        type: checkbox
-        value: true
-        weight: 20
-      resume_guests_state_on_host_boot:
-        description: Whether to resume previous guests state when the host reboots.
-          If enabled, this option causes guests assigned to the host to resume their
-          previous state. If the guest was running a restart will be attempted when
-          nova-compute starts. If the guest was not running previously, a restart will
-          not be attempted.
-        label: Resume guests state on host boot
-        type: checkbox
-        value: true
-        weight: 60
-      use_cow_images:
-        description: For most cases you will want qcow format. If it's disabled, raw
-          image format will be used to run VMs. OpenStack with raw format currently
-          does not support snapshotting.
-        label: Use qcow format for images
-        type: checkbox
-        value: true
-        weight: 50
-      use_vcenter:
-        type: hidden
-        value: false
-        weight: 30
-    corosync:
-      group:
-        description: ''
-        label: Group
-        type: text
-        value: 226.94.1.1
-        weight: 10
-      metadata:
-        label: Corosync
-        restrictions:
-        - action: hide
-          condition: 'true'
-        weight: 50
-      port:
-        description: ''
-        label: Port
-        type: text
-        value: '12000'
-        weight: 20
-      verified:
-        description: Set True only if multicast is configured correctly on router.
-        label: Need to pass network verification.
-        type: checkbox
-        value: false
-        weight: 10
-    external_dns:
-      dns_list:
-        description: List of upstream DNS servers, separated by comma
-        label: DNS list
-        regex:
-          error: Invalid IP address list
-          source: ^\*$|^(?:\d|1?\d\d|2[0-4]\d|25[0-5])(?:\.(?:\d|1?\d\d|2[0-4]\d|25[0-5])){3}(?:\s*,\s*(?:\d|1?\d\d|2[0-4]\d|25[0-5])(?:\.(?:\d|1?\d\d|2[0-4]\d|25[0-5])){3})*$
-        type: text
-        value: 10.118.32.193
-        weight: 10
-      metadata:
-        label: Host OS DNS Servers
-        weight: 90
-    external_mongo:
-      hosts_ip:
-        description: IP Addresses of MongoDB. Use comma to split IPs
-        label: MongoDB hosts IP
-        regex:
-          error: Invalid hosts ip sequence
-          source: ^(((25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.){3}(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?),)*((25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.){3}(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)$
-        type: text
-        value: ''
-        weight: 30
-      metadata:
-        label: External MongoDB
-        restrictions:
-        - action: hide
-          condition: settings:additional_components.mongo.value == false
-        weight: 20
-      mongo_db_name:
-        description: Mongo database name
-        label: Database name
-        regex:
-          error: Invalid database name
-          source: ^\w+$
-        type: text
-        value: ceilometer
-        weight: 30
-      mongo_password:
-        description: Mongo database password
-        label: Password
-        regex:
-          error: Password contains spaces
-          source: ^\S*$
-        type: password
-        value: ceilometer
-        weight: 30
-      mongo_replset:
-        description: Name for Mongo replication set
-        label: Replset
-        type: text
-        value: ''
-        weight: 30
-      mongo_user:
-        description: Mongo database username
-        label: Username
-        regex:
-          error: Empty username
-          source: ^\w+$
-        type: text
-        value: ceilometer
-        weight: 30
-    external_ntp:
-      metadata:
-        label: Host OS NTP Servers
-        weight: 100
-      ntp_list:
-        description: List of upstream NTP servers, separated by comma
-        label: NTP server list
-        regex:
-          error: Invalid NTP server list
-          source: ^\s*(?:(?:\w+(?:-+\w+)*\.)+[a-z]+|\d{1,3}(?:\.\d{1,3}){3})\s*(?:,\s*(?:(?:\w+(?:-+\w+)*\.)+[a-z]+|\d{1,3}(\.\d{1,3}){3})\s*)*$
-        type: text
-        value:  10.118.34.219
-        weight: 10
-    kernel_params:
-      kernel:
-        description: Default kernel parameters
-        label: Initial parameters
-        type: text
-        value: console=ttyS0,9600 console=tty0 net.ifnames=0 biosdevname=0 rootdelay=90
-          nomodeset
-        weight: 45
-      metadata:
-        label: Kernel parameters
-        weight: 40
-    murano_settings:
-      metadata:
-        label: Murano Settings
-        restrictions:
-        - action: hide
-          condition: settings:additional_components.murano.value == false
-        weight: 20
-      murano_repo_url:
-        description: ''
-        label: Murano Repository URL
-        type: text
-        value: http://storage.apps.openstack.org/
-        weight: 10
-    neutron_mellanox:
-      metadata:
-        enabled: true
-        label: Mellanox Neutron components
-        restrictions:
-        - action: hide
-          condition: not ('experimental' in version:feature_groups)
-        toggleable: false
-        weight: 50
-      plugin:
-        label: Mellanox drivers and SR-IOV plugin
-        type: radio
-        value: disabled
-        values:
-        - data: disabled
-          description: If selected, Mellanox drivers, Neutron and Cinder plugin will
-            not be installed.
-          label: Mellanox drivers and plugins disabled
-          restrictions:
-          - settings:storage.iser.value == true
-        - data: drivers_only
-          description: If selected, Mellanox Ethernet drivers will be installed to support
-            networking over Mellanox NIC. Mellanox Neutron plugin will not be installed.
-          label: Install only Mellanox drivers
-          restrictions:
-          - settings:common.libvirt_type.value != 'kvm'
-        - data: ethernet
-          description: If selected, both Mellanox Ethernet drivers and Mellanox network
-            acceleration (Neutron) plugin will be installed.
-          label: Install Mellanox drivers and SR-IOV plugin
-          restrictions:
-          - settings:common.libvirt_type.value != 'kvm' or not (cluster:net_provider
-            == 'neutron' and networking_parameters:segmentation_type == 'vlan')
-        weight: 60
-      vf_num:
-        description: Note that one virtual function will be reserved to the storage
-          network, in case of choosing iSER.
-        label: Number of virtual NICs
-        restrictions:
-        - settings:neutron_mellanox.plugin.value != 'ethernet'
-        type: text
-        value: '16'
-        weight: 70
-    opendaylight:
-      metadata:
-        enabled: true
-        label: OpenDaylight plugin
-        plugin_id: 1
-        restrictions:
-        - cluster:net_provider != 'neutron': Only neutron is supported by OpenDaylight
-        toggleable: true
-        weight: 70
-      rest_api_port:
-        description: Port on which ODL REST API will be available.
-        label: Port number
-        regex:
-          error: Invalid port number
-          source: ^([1-9][0-9]{0,3}|[1-5][0-9]{4}|6[0-4][0-9]{3}|65[0-4][0-9]{2}|655[0-2][0-9]|6553[0-5])$
-        type: text
-        value: '8282'
-        weight: 40
-      use_vxlan:
-        description: Configure neutron to use VXLAN tunneling
-        label: Use vxlan
-        restrictions:
-        - action: disable
-          condition: networking_parameters:segmentation_type == 'vlan'
-          message: Neutron with GRE segmentation required
-        type: checkbox
-        value: true
-        weight: 20
-      vni_range_end:
-        description: VXLAN VNI IDs range end
-        label: VNI range end
-        regex:
-          error: Invalid ID number
-          source: ^\d+$
-        restrictions:
-        - action: hide
-          condition: networking_parameters:segmentation_type == 'vlan'
-        type: text
-        value: '10000'
-        weight: 31
-      vni_range_start:
-        description: VXLAN VNI IDs range start
-        label: VNI range start
-        regex:
-          error: Invalid ID number
-          source: ^\d+$
-        restrictions:
-        - action: hide
-          condition: networking_parameters:segmentation_type == 'vlan'
-        type: text
-        value: '10'
-        weight: 30
-    provision:
-      metadata:
-        label: Provision
-        weight: 80
-      method:
-        description: Which provision method to use for this cluster.
-        label: Provision method
-        type: radio
-        value: image
-        values:
-        - data: image
-          description: Copying pre-built images on a disk.
-          label: Image
-        - data: cobbler
-          description: Install from scratch using anaconda or debian-installer.
-          label: (DEPRECATED) Classic (use anaconda or debian-installer)
-    public_network_assignment:
-      assign_to_all_nodes:
-        description: When disabled, public network will be assigned to controllers only
-        label: Assign public network to all nodes
-        type: checkbox
-        value: false
-        weight: 10
-      metadata:
-        label: Public network assignment
-        restrictions:
-        - action: hide
-          condition: cluster:net_provider != 'neutron'
-        weight: 50
-    repo_setup:
-      metadata:
-        always_editable: true
-        label: Repositories
-        weight: 50
-      repos:
-        description: 'Please note: the first repository will be considered the operating
-          system mirror that will be used during node provisioning.
-
-          To create a local repository mirror on the Fuel master node, please follow
-          the instructions provided by running "fuel-createmirror --help" on the Fuel
-          master node.
-
-          Please make sure your Fuel master node has Internet access to the repository
-          before attempting to create a mirror.
-
-          For more details, please refer to the documentation (https://docs.mirantis.com/openstack/fuel/fuel-6.1/operations.html#external-ubuntu-ops).
-
-          '
-        extra_priority: null
-        type: custom_repo_configuration
-        value:
-        - name: ubuntu
-          priority: null
-          section: main
-          suite: trusty
-          type: deb
-          uri: http://10.20.0.2:8080/ubuntu-part
-        - name: ubuntu-updates
-          priority: null
-          section: main
-          suite: trusty
-          type: deb
-          uri: http://10.20.0.2:8080/ubuntu-part
-        - name: ubuntu-security
-          priority: null
-          section: main
-          suite: trusty
-          type: deb
-          uri: http://10.20.0.2:8080/ubuntu-part
-        - name: mos
-          priority: 1050
-          section: main restricted
-          suite: mos6.1
-          type: deb
-          uri: http://10.20.0.2:8080/2014.2.2-6.1/ubuntu/x86_64
-        - name: mos-updates
-          priority: 1050
-          section: main restricted
-          suite: mos6.1-updates
-          type: deb
-          uri: http://10.20.0.2:8080/mos-ubuntu
-        - name: mos-security
-          priority: 1050
-          section: main restricted
-          suite: mos6.1-security
-          type: deb
-          uri: http://10.20.0.2:8080/mos-ubuntu
-        - name: mos-holdback
-          priority: 1100
-          section: main restricted
-          suite: mos6.1-holdback
-          type: deb
-          uri: http://10.20.0.2:8080/mos-ubuntu
-        - name: Auxiliary
-          priority: 1150
-          section: main restricted
-          suite: auxiliary
-          type: deb
-          uri: http://10.20.0.2:8080/2014.2.2-6.1/ubuntu/auxiliary
-    storage:
-      ephemeral_ceph:
-        description: Configures Nova to store ephemeral volumes in RBD. This works best
-          if Ceph is enabled for volumes and images, too. Enables live migration of
-          all types of Ceph backed VMs (without this option, live migration will only
-          work with VMs launched from Cinder volumes).
-        label: Ceph RBD for ephemeral volumes (Nova)
-        type: checkbox
-        value: true
-        weight: 75
-      images_ceph:
-        description: Configures Glance to use the Ceph RBD backend to store images.
-          If enabled, this option will prevent Swift from installing.
-        label: Ceph RBD for images (Glance)
-        restrictions:
-        - settings:storage.images_vcenter.value == true: Only one Glance backend could
-            be selected.
-        type: checkbox
-        value: true
-        weight: 30
-      images_vcenter:
-        description: Configures Glance to use the vCenter/ESXi backend to store images.
-          If enabled, this option will prevent Swift from installing.
-        label: VMWare vCenter/ESXi datastore for images (Glance)
-        restrictions:
-        - action: hide
-          condition: settings:common.use_vcenter.value != true
-        - condition: settings:storage.images_ceph.value == true
-          message: Only one Glance backend could be selected.
-        type: checkbox
-        value: false
-        weight: 35
-      iser:
-        description: 'High performance block storage: Cinder volumes over iSER protocol
-          (iSCSI over RDMA). This feature requires SR-IOV capabilities in the NIC, and
-          will use a dedicated virtual function for the storage network.'
-        label: iSER protocol for volumes (Cinder)
-        restrictions:
-        - settings:storage.volumes_lvm.value != true or settings:common.libvirt_type.value
-          != 'kvm'
-        - action: hide
-          condition: not ('experimental' in version:feature_groups)
-        type: checkbox
-        value: false
-        weight: 11
-      metadata:
-        label: Storage
-        weight: 60
-      objects_ceph:
-        description: Configures RadosGW front end for Ceph RBD. This exposes S3 and
-          Swift API Interfaces. If enabled, this option will prevent Swift from installing.
-        label: Ceph RadosGW for objects (Swift API)
-        restrictions:
-        - settings:storage.images_ceph.value == false
-        type: checkbox
-        value: false
-        weight: 80
-      osd_pool_size:
-        description: Configures the default number of object replicas in Ceph. This
-          number must be equal to or lower than the number of deployed 'Storage - Ceph
-          OSD' nodes.
-        label: Ceph object replication factor
-        regex:
-          error: Invalid number
-          source: ^[1-9]\d*$
-        type: text
-        value: '2'
-        weight: 85
-      volumes_ceph:
-        description: Configures Cinder to store volumes in Ceph RBD images.
-        label: Ceph RBD for volumes (Cinder)
-        restrictions:
-        - settings:storage.volumes_lvm.value == true
-        type: checkbox
-        value: true
-        weight: 20
-      volumes_lvm:
-        description: It is recommended to have at least one Storage - Cinder LVM node.
-        label: Cinder LVM over iSCSI for volumes
-        restrictions:
-        - settings:storage.volumes_ceph.value == true
-        type: checkbox
-        value: false
-        weight: 10
-    syslog:
-      metadata:
-        label: Syslog
-        weight: 50
-      syslog_port:
-        description: Remote syslog port
-        label: Port
-        regex:
-          error: Invalid Syslog port
-          source: ^([1-9][0-9]{0,3}|[1-5][0-9]{4}|6[0-4][0-9]{3}|65[0-4][0-9]{2}|655[0-2][0-9]|6553[0-5])$
-        type: text
-        value: '514'
-        weight: 20
-      syslog_server:
-        description: Remote syslog hostname
-        label: Hostname
-        type: text
-        value: ''
-        weight: 10
-      syslog_transport:
-        label: Syslog transport protocol
-        type: radio
-        value: tcp
-        values:
-        - data: udp
-          description: ''
-          label: UDP
-        - data: tcp
-          description: ''
-          label: TCP
-        weight: 30
-    workloads_collector:
-      enabled:
-        type: hidden
-        value: true
-      metadata:
-        label: Workloads Collector User
-        restrictions:
-        - action: hide
-          condition: 'true'
-        weight: 10
-      password:
-        type: password
-        value: pBkLbu1k
-      tenant:
-        type: text
-        value: services
-      user:
-        type: text
-        value: fuel_stats_user
diff --git a/fuel/deploy/templates/hardware_environment/conf/ericsson_montreal_lab/dha.yaml b/fuel/deploy/templates/hardware_environment/conf/ericsson_montreal_lab/dha.yaml
deleted file mode 100644 (file)
index ca446f6..0000000
+++ /dev/null
@@ -1,54 +0,0 @@
-title: Deployment Hardware Adapter (DHA)
-# DHA API version supported
-version:
-created:
-comment: Config for Ericsson Montreal Lab
-
-# Adapter to use for this definition
-adapter: hp
-
-# Node list.
-# Mandatory property is id, all other properties are adapter specific.
-
-nodes:
-- id: 1
-  pxeMac: 14:58:D0:54:7A:D8
-  ipmiIp: 10.118.32.198
-  ipmiUser: <username>
-  ipmiPass: <password>
-- id: 2
-  pxeMac: 14:58:D0:55:E2:E0
-  ipmiIp: 10.118.32.202
-  ipmiUser: <username>
-  ipmiPass: <password>
-- id: 3
-  pxeMac: 9C:B6:54:8A:25:C0
-  ipmiIp: 10.118.32.213
-  ipmiUser: <username>
-  ipmiPass: <password>
-- id: 4
-  pxeMac: 14:58:D0:54:28:80
-  ipmiIp: 10.118.32.201
-  ipmiUser: <username>
-  ipmiPass: <password>
-- id: 5
-  pxeMac: 14:58:D0:54:E7:88
-  ipmiIp: 10.118.32.203
-  ipmiUser: <username>
-  ipmiPass: <password>
-- id: 6
-  pxeMac: 14:58:D0:54:7A:28
-  ipmiIp: 10.118.32.205
-  ipmiUser: <username>
-  ipmiPass: <password>
-# Adding the Fuel node as node id 7 which may not be correct - please
-# adjust as needed.
-- id: 7
-  libvirtName: fuel-opnfv
-  libvirtTemplate: templates/hardware_environment/vms/fuel.xml
-  isFuel: yes
-  username: root
-  password: r00tme
-
-disks:
-  fuel: 50G
\ No newline at end of file
diff --git a/fuel/deploy/templates/hardware_environment/conf/linux_foundation_lab/pod1/dea.yaml b/fuel/deploy/templates/hardware_environment/conf/linux_foundation_lab/pod1/dea.yaml
deleted file mode 100644 (file)
index db29fe9..0000000
+++ /dev/null
@@ -1,841 +0,0 @@
-title: Deployment Environment Adapter (DEA)
-# DEA API version supported
-version:
-created:
-comment: Config for LF POD1 - HA deployment with Ceph and Opendaylight
-environment:
-  name: opnfv
-  mode: ha
-  net_segment_type: gre
-wanted_release: Juno on Ubuntu 14.04.1
-nodes:
-- id: 1
-  interfaces: interfaces_1
-  transformations: transformations_1
-  role: ceph-osd,controller
-- id: 2
-  interfaces: interfaces_1
-  transformations: transformations_1
-  role: ceph-osd,controller
-- id: 3
-  interfaces: interfaces_1
-  transformations: transformations_1
-  role: ceph-osd,controller
-- id: 4
-  interfaces: interfaces_1
-  transformations: transformations_2
-  role: ceph-osd,compute
-- id: 5
-  interfaces: interfaces_1
-  transformations: transformations_2
-  role: ceph-osd,compute
-fuel:
-  ADMIN_NETWORK:
-    ipaddress: 10.20.0.2
-    netmask: 255.255.0.0
-    dhcp_pool_start: 10.20.0.3
-    dhcp_pool_end: 10.20.0.254
-  DNS_UPSTREAM: 8.8.8.8
-  DNS_DOMAIN: domain.tld
-  DNS_SEARCH: domain.tld
-  FUEL_ACCESS:
-    user: admin
-    password: admin
-  HOSTNAME: opnfv
-  NTP1: 0.pool.ntp.org
-  NTP2: 1.pool.ntp.org
-  NTP3: 2.pool.ntp.org
-interfaces_1:
-  eth0:
-  - public
-  eth1:
-  - fuelweb_admin
-  - management
-  - storage
-  - private
-transformations_1:
-  transformations:
-  - action: add-br
-    name: br-fw-admin
-  - action: add-br
-    name: br-mgmt
-  - action: add-br
-    name: br-storage
-  - action: add-br
-    name: br-ex
-  - action: add-br
-    name: br-floating
-    provider: ovs
-  - action: add-patch
-    bridges:
-    - br-floating
-    - br-ex
-    mtu: 65000
-    provider: ovs
-  - action: add-br
-    name: br-mesh
-  - action: add-port
-    bridge: br-fw-admin
-    name: eth0
-  - action: add-port
-    bridge: br-mgmt
-    name: eth1.300
-  - action: add-port
-    bridge: br-storage
-    name: eth1.301
-  - action: add-port
-    bridge: br-mesh
-    name: eth1.302
-  - action: add-port
-    bridge: br-ex
-    name: eth0
-transformations_2:
-  transformations:
-  - action: add-br
-    name: br-fw-admin
-  - action: add-br
-    name: br-mgmt
-  - action: add-br
-    name: br-storage
-  - action: add-br
-    name: br-mesh
-  - action: add-port
-    bridge: br-fw-admin
-    name: eth0
-  - action: add-port
-    bridge: br-mgmt
-    name: eth1.300
-  - action: add-port
-    bridge: br-storage
-    name: eth1.301
-  - action: add-port
-    bridge: br-mesh
-    name: eth1.302
-network:
-  management_vip: 192.168.0.2
-  management_vrouter_vip: 192.168.0.3
-  networking_parameters:
-    base_mac: fa:16:3e:00:00:00
-    dns_nameservers:
-    - 8.8.4.4
-    - 8.8.8.8
-    floating_ranges:
-    - - 172.30.9.160
-      - 172.30.9.254
-    gre_id_range:
-    - 2
-    - 65535
-    internal_cidr: 192.168.111.0/24
-    internal_gateway: 192.168.111.1
-    net_l23_provider: ovs
-    segmentation_type: gre
-    vlan_range:
-    - 1000
-    - 1030
-  networks:
-  - cidr: 172.30.9.0/24
-    gateway: 172.30.9.1
-    ip_ranges:
-    - - 172.30.9.64
-      - 172.30.9.159
-    meta:
-      cidr: 172.16.0.0/24
-      configurable: true
-      floating_range_var: floating_ranges
-      ip_range:
-      - 172.16.0.2
-      - 172.16.0.126
-      map_priority: 1
-      name: public
-      notation: ip_ranges
-      render_addr_mask: public
-      render_type: null
-      use_gateway: true
-      vips:
-      - haproxy
-      - vrouter
-      vlan_start: null
-    name: public
-    vlan_start: null
-  - cidr: 192.168.2.0/24
-    gateway: null
-    ip_ranges:
-    - - 192.168.2.2
-      - 192.168.2.254
-    meta:
-      assign_vip: 192.168.2.0/24
-      configurable: true
-      map_priority: 2
-      name: private
-      notation: cidr
-      render_addr_mask: private
-      render_type: cidr
-      seg_type: gre
-      use_gateway: false
-      vlan_start: 103
-    name: private
-    vlan_start: 302
-  - cidr: 192.168.0.0/24
-    gateway: null
-    ip_ranges:
-    - - 192.168.0.2
-      - 192.168.0.254
-    meta:
-      cidr: 192.168.0.0/24
-      configurable: true
-      map_priority: 2
-      name: management
-      notation: cidr
-      render_addr_mask: internal
-      render_type: cidr
-      use_gateway: false
-      vips:
-      - haproxy
-      - vrouter
-      vlan_start: 101
-    name: management
-    vlan_start: 300
-  - cidr: 192.168.1.0/24
-    gateway: null
-    ip_ranges:
-    - - 192.168.1.2
-      - 192.168.1.254
-    meta:
-      cidr: 192.168.1.0/24
-      configurable: true
-      map_priority: 2
-      name: storage
-      notation: cidr
-      render_addr_mask: storage
-      render_type: cidr
-      use_gateway: false
-      vlan_start: 102
-    name: storage
-    vlan_start: 301
-  - cidr: 10.20.0.0/16
-    gateway: 10.20.0.2
-    ip_ranges:
-    - - 10.20.0.3
-      - 10.20.0.254
-    meta:
-      configurable: false
-      map_priority: 0
-      notation: ip_ranges
-      render_addr_mask: null
-      render_type: null
-      unmovable: true
-      use_gateway: true
-    name: fuelweb_admin
-    vlan_start: null
-  public_vip: 172.30.9.64
-  public_vrouter_vip: 172.30.9.65
-settings:
-  editable:
-    access:
-      email:
-        description: Email address for Administrator
-        label: Email
-        regex:
-          error: Invalid email
-          source: ^\S+@\S+$
-        type: text
-        value: admin@localhost
-        weight: 40
-      metadata:
-        label: Access
-        weight: 10
-      password:
-        description: Password for Administrator
-        label: Password
-        regex:
-          error: Empty password
-          source: \S
-        type: password
-        value: admin
-        weight: 20
-      tenant:
-        description: Tenant (project) name for Administrator
-        label: Tenant
-        regex:
-          error: Invalid tenant name
-          source: ^(?!services$)(?!nova$)(?!glance$)(?!keystone$)(?!neutron$)(?!cinder$)(?!swift$)(?!ceph$)(?![Gg]uest$)(?!.*
-            +.*$).+
-        type: text
-        value: admin
-        weight: 30
-      user:
-        description: Username for Administrator
-        label: Username
-        regex:
-          error: Invalid username
-          source: ^(?!services$)(?!nova$)(?!glance$)(?!keystone$)(?!neutron$)(?!cinder$)(?!swift$)(?!ceph$)(?![Gg]uest$)(?!.*
-            +.*$).+
-        type: text
-        value: admin
-        weight: 10
-    additional_components:
-      ceilometer:
-        description: If selected, Ceilometer component will be installed
-        label: Install Ceilometer
-        type: checkbox
-        value: false
-        weight: 40
-      heat:
-        description: ''
-        label: ''
-        type: hidden
-        value: true
-        weight: 30
-      metadata:
-        label: Additional Components
-        weight: 20
-      mongo:
-        description: If selected, You can use external Mongo DB as ceilometer backend
-        label: Use external Mongo DB
-        restrictions:
-        - settings:additional_components.ceilometer.value == false
-        type: checkbox
-        value: false
-        weight: 40
-      murano:
-        description: If selected, Murano component will be installed
-        label: Install Murano
-        restrictions:
-        - cluster:net_provider != 'neutron'
-        type: checkbox
-        value: false
-        weight: 20
-      sahara:
-        description: If selected, Sahara component will be installed
-        label: Install Sahara
-        type: checkbox
-        value: false
-        weight: 10
-    common:
-      auth_key:
-        description: Public key(s) to include in authorized_keys on deployed nodes
-        label: Public Key
-        type: textarea
-        value: ''
-        weight: 70
-      auto_assign_floating_ip:
-        description: If selected, OpenStack will automatically assign a floating IP
-          to a new instance
-        label: Auto assign floating IP
-        restrictions:
-        - action: hide
-          condition: cluster:net_provider == 'neutron'
-        type: checkbox
-        value: false
-        weight: 40
-      debug:
-        description: Debug logging mode provides more information, but requires more
-          disk space.
-        label: OpenStack debug logging
-        type: checkbox
-        value: false
-        weight: 20
-      libvirt_type:
-        label: Hypervisor type
-        type: radio
-        value: kvm
-        values:
-        - data: kvm
-          description: Choose this type of hypervisor if you run OpenStack on hardware
-          label: KVM
-        - data: qemu
-          description: Choose this type of hypervisor if you run OpenStack on virtual
-            hosts.
-          label: QEMU
-        weight: 30
-      metadata:
-        label: Common
-        weight: 30
-      nova_quota:
-        description: Quotas are used to limit CPU and memory usage for tenants. Enabling
-          quotas will increase load on the Nova database.
-        label: Nova quotas
-        type: checkbox
-        value: false
-        weight: 25
-      puppet_debug:
-        description: Debug puppet logging mode provides more information, but requires
-          more disk space.
-        label: Puppet debug logging
-        type: checkbox
-        value: true
-        weight: 20
-      resume_guests_state_on_host_boot:
-        description: Whether to resume previous guests state when the host reboots.
-          If enabled, this option causes guests assigned to the host to resume their
-          previous state. If the guest was running a restart will be attempted when
-          nova-compute starts. If the guest was not running previously, a restart will
-          not be attempted.
-        label: Resume guests state on host boot
-        type: checkbox
-        value: true
-        weight: 60
-      use_cow_images:
-        description: For most cases you will want qcow format. If it's disabled, raw
-          image format will be used to run VMs. OpenStack with raw format currently
-          does not support snapshotting.
-        label: Use qcow format for images
-        type: checkbox
-        value: true
-        weight: 50
-      use_vcenter:
-        type: hidden
-        value: false
-        weight: 30
-    corosync:
-      group:
-        description: ''
-        label: Group
-        type: text
-        value: 226.94.1.1
-        weight: 10
-      metadata:
-        label: Corosync
-        restrictions:
-        - action: hide
-          condition: 'true'
-        weight: 50
-      port:
-        description: ''
-        label: Port
-        type: text
-        value: '12000'
-        weight: 20
-      verified:
-        description: Set True only if multicast is configured correctly on router.
-        label: Need to pass network verification.
-        type: checkbox
-        value: false
-        weight: 10
-    external_dns:
-      dns_list:
-        description: List of upstream DNS servers, separated by comma
-        label: DNS list
-        regex:
-          error: Invalid IP address list
-          source: ^\*$|^(?:\d|1?\d\d|2[0-4]\d|25[0-5])(?:\.(?:\d|1?\d\d|2[0-4]\d|25[0-5])){3}(?:\s*,\s*(?:\d|1?\d\d|2[0-4]\d|25[0-5])(?:\.(?:\d|1?\d\d|2[0-4]\d|25[0-5])){3})*$
-        type: text
-        value: 8.8.4.4, 8.8.8.8
-        weight: 10
-      metadata:
-        label: Host OS DNS Servers
-        weight: 90
-    external_mongo:
-      hosts_ip:
-        description: IP Addresses of MongoDB. Use comma to split IPs
-        label: MongoDB hosts IP
-        regex:
-          error: Invalid hosts ip sequence
-          source: ^(((25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.){3}(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?),)*((25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.){3}(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)$
-        type: text
-        value: ''
-        weight: 30
-      metadata:
-        label: External MongoDB
-        restrictions:
-        - action: hide
-          condition: settings:additional_components.mongo.value == false
-        weight: 20
-      mongo_db_name:
-        description: Mongo database name
-        label: Database name
-        regex:
-          error: Invalid database name
-          source: ^\w+$
-        type: text
-        value: ceilometer
-        weight: 30
-      mongo_password:
-        description: Mongo database password
-        label: Password
-        regex:
-          error: Password contains spaces
-          source: ^\S*$
-        type: password
-        value: ceilometer
-        weight: 30
-      mongo_replset:
-        description: Name for Mongo replication set
-        label: Replset
-        type: text
-        value: ''
-        weight: 30
-      mongo_user:
-        description: Mongo database username
-        label: Username
-        regex:
-          error: Empty username
-          source: ^\w+$
-        type: text
-        value: ceilometer
-        weight: 30
-    external_ntp:
-      metadata:
-        label: Host OS NTP Servers
-        weight: 100
-      ntp_list:
-        description: List of upstream NTP servers, separated by comma
-        label: NTP server list
-        regex:
-          error: Invalid NTP server list
-          source: ^\s*(?:(?:\w+(?:-+\w+)*\.)+[a-z]+|\d{1,3}(?:\.\d{1,3}){3})\s*(?:,\s*(?:(?:\w+(?:-+\w+)*\.)+[a-z]+|\d{1,3}(\.\d{1,3}){3})\s*)*$
-        type: text
-        value:  0.pool.ntp.org, 1.pool.ntp.org
-        weight: 10
-    kernel_params:
-      kernel:
-        description: Default kernel parameters
-        label: Initial parameters
-        type: text
-        value: console=ttyS0,9600 console=tty0 net.ifnames=0 biosdevname=0 rootdelay=90
-          nomodeset
-        weight: 45
-      metadata:
-        label: Kernel parameters
-        weight: 40
-    murano_settings:
-      metadata:
-        label: Murano Settings
-        restrictions:
-        - action: hide
-          condition: settings:additional_components.murano.value == false
-        weight: 20
-      murano_repo_url:
-        description: ''
-        label: Murano Repository URL
-        type: text
-        value: http://storage.apps.openstack.org/
-        weight: 10
-    neutron_mellanox:
-      metadata:
-        enabled: true
-        label: Mellanox Neutron components
-        restrictions:
-        - action: hide
-          condition: not ('experimental' in version:feature_groups)
-        toggleable: false
-        weight: 50
-      plugin:
-        label: Mellanox drivers and SR-IOV plugin
-        type: radio
-        value: disabled
-        values:
-        - data: disabled
-          description: If selected, Mellanox drivers, Neutron and Cinder plugin will
-            not be installed.
-          label: Mellanox drivers and plugins disabled
-          restrictions:
-          - settings:storage.iser.value == true
-        - data: drivers_only
-          description: If selected, Mellanox Ethernet drivers will be installed to support
-            networking over Mellanox NIC. Mellanox Neutron plugin will not be installed.
-          label: Install only Mellanox drivers
-          restrictions:
-          - settings:common.libvirt_type.value != 'kvm'
-        - data: ethernet
-          description: If selected, both Mellanox Ethernet drivers and Mellanox network
-            acceleration (Neutron) plugin will be installed.
-          label: Install Mellanox drivers and SR-IOV plugin
-          restrictions:
-          - settings:common.libvirt_type.value != 'kvm' or not (cluster:net_provider
-            == 'neutron' and networking_parameters:segmentation_type == 'vlan')
-        weight: 60
-      vf_num:
-        description: Note that one virtual function will be reserved to the storage
-          network, in case of choosing iSER.
-        label: Number of virtual NICs
-        restrictions:
-        - settings:neutron_mellanox.plugin.value != 'ethernet'
-        type: text
-        value: '16'
-        weight: 70
-    opendaylight:
-      metadata:
-        enabled: true
-        label: OpenDaylight plugin
-        plugin_id: 1
-        restrictions:
-        - cluster:net_provider != 'neutron': Only neutron is supported by OpenDaylight
-        toggleable: true
-        weight: 70
-      rest_api_port:
-        description: Port on which ODL REST API will be available.
-        label: Port number
-        regex:
-          error: Invalid port number
-          source: ^([1-9][0-9]{0,3}|[1-5][0-9]{4}|6[0-4][0-9]{3}|65[0-4][0-9]{2}|655[0-2][0-9]|6553[0-5])$
-        type: text
-        value: '8282'
-        weight: 40
-      use_vxlan:
-        description: Configure neutron to use VXLAN tunneling
-        label: Use vxlan
-        restrictions:
-        - action: disable
-          condition: networking_parameters:segmentation_type == 'vlan'
-          message: Neutron with GRE segmentation required
-        type: checkbox
-        value: true
-        weight: 20
-      vni_range_end:
-        description: VXLAN VNI IDs range end
-        label: VNI range end
-        regex:
-          error: Invalid ID number
-          source: ^\d+$
-        restrictions:
-        - action: hide
-          condition: networking_parameters:segmentation_type == 'vlan'
-        type: text
-        value: '10000'
-        weight: 31
-      vni_range_start:
-        description: VXLAN VNI IDs range start
-        label: VNI range start
-        regex:
-          error: Invalid ID number
-          source: ^\d+$
-        restrictions:
-        - action: hide
-          condition: networking_parameters:segmentation_type == 'vlan'
-        type: text
-        value: '10'
-        weight: 30
-    provision:
-      metadata:
-        label: Provision
-        weight: 80
-      method:
-        description: Which provision method to use for this cluster.
-        label: Provision method
-        type: radio
-        value: image
-        values:
-        - data: image
-          description: Copying pre-built images on a disk.
-          label: Image
-        - data: cobbler
-          description: Install from scratch using anaconda or debian-installer.
-          label: (DEPRECATED) Classic (use anaconda or debian-installer)
-    public_network_assignment:
-      assign_to_all_nodes:
-        description: When disabled, public network will be assigned to controllers only
-        label: Assign public network to all nodes
-        type: checkbox
-        value: false
-        weight: 10
-      metadata:
-        label: Public network assignment
-        restrictions:
-        - action: hide
-          condition: cluster:net_provider != 'neutron'
-        weight: 50
-    repo_setup:
-      metadata:
-        always_editable: true
-        label: Repositories
-        weight: 50
-      repos:
-        description: 'Please note: the first repository will be considered the operating
-          system mirror that will be used during node provisioning.
-
-          To create a local repository mirror on the Fuel master node, please follow
-          the instructions provided by running "fuel-createmirror --help" on the Fuel
-          master node.
-
-          Please make sure your Fuel master node has Internet access to the repository
-          before attempting to create a mirror.
-
-          For more details, please refer to the documentation (https://docs.mirantis.com/openstack/fuel/fuel-6.1/operations.html#external-ubuntu-ops).
-
-          '
-        extra_priority: null
-        type: custom_repo_configuration
-        value:
-        - name: ubuntu
-          priority: null
-          section: main
-          suite: trusty
-          type: deb
-          uri: http://10.20.0.2:8080/ubuntu-part
-        - name: ubuntu-updates
-          priority: null
-          section: main
-          suite: trusty
-          type: deb
-          uri: http://10.20.0.2:8080/ubuntu-part
-        - name: ubuntu-security
-          priority: null
-          section: main
-          suite: trusty
-          type: deb
-          uri: http://10.20.0.2:8080/ubuntu-part
-        - name: mos
-          priority: 1050
-          section: main restricted
-          suite: mos6.1
-          type: deb
-          uri: http://10.20.0.2:8080/2014.2.2-6.1/ubuntu/x86_64
-        - name: mos-updates
-          priority: 1050
-          section: main restricted
-          suite: mos6.1-updates
-          type: deb
-          uri: http://10.20.0.2:8080/mos-ubuntu
-        - name: mos-security
-          priority: 1050
-          section: main restricted
-          suite: mos6.1-security
-          type: deb
-          uri: hhttp://10.20.0.2:8080/mos-ubuntu
-        - name: mos-holdback
-          priority: 1100
-          section: main restricted
-          suite: mos6.1-holdback
-          type: deb
-          uri: http://10.20.0.2:8080/mos-ubuntu
-        - name: Auxiliary
-          priority: 1150
-          section: main restricted
-          suite: auxiliary
-          type: deb
-          uri: http://10.20.0.2:8080/2014.2.2-6.1/ubuntu/auxiliary
-    storage:
-      ephemeral_ceph:
-        description: Configures Nova to store ephemeral volumes in RBD. This works best
-          if Ceph is enabled for volumes and images, too. Enables live migration of
-          all types of Ceph backed VMs (without this option, live migration will only
-          work with VMs launched from Cinder volumes).
-        label: Ceph RBD for ephemeral volumes (Nova)
-        type: checkbox
-        value: true
-        weight: 75
-      images_ceph:
-        description: Configures Glance to use the Ceph RBD backend to store images.
-          If enabled, this option will prevent Swift from installing.
-        label: Ceph RBD for images (Glance)
-        restrictions:
-        - settings:storage.images_vcenter.value == true: Only one Glance backend could
-            be selected.
-        type: checkbox
-        value: true
-        weight: 30
-      images_vcenter:
-        description: Configures Glance to use the vCenter/ESXi backend to store images.
-          If enabled, this option will prevent Swift from installing.
-        label: VMWare vCenter/ESXi datastore for images (Glance)
-        restrictions:
-        - action: hide
-          condition: settings:common.use_vcenter.value != true
-        - condition: settings:storage.images_ceph.value == true
-          message: Only one Glance backend could be selected.
-        type: checkbox
-        value: false
-        weight: 35
-      iser:
-        description: 'High performance block storage: Cinder volumes over iSER protocol
-          (iSCSI over RDMA). This feature requires SR-IOV capabilities in the NIC, and
-          will use a dedicated virtual function for the storage network.'
-        label: iSER protocol for volumes (Cinder)
-        restrictions:
-        - settings:storage.volumes_lvm.value != true or settings:common.libvirt_type.value
-          != 'kvm'
-        - action: hide
-          condition: not ('experimental' in version:feature_groups)
-        type: checkbox
-        value: false
-        weight: 11
-      metadata:
-        label: Storage
-        weight: 60
-      objects_ceph:
-        description: Configures RadosGW front end for Ceph RBD. This exposes S3 and
-          Swift API Interfaces. If enabled, this option will prevent Swift from installing.
-        label: Ceph RadosGW for objects (Swift API)
-        restrictions:
-        - settings:storage.images_ceph.value == false
-        type: checkbox
-        value: false
-        weight: 80
-      osd_pool_size:
-        description: Configures the default number of object replicas in Ceph. This
-          number must be equal to or lower than the number of deployed 'Storage - Ceph
-          OSD' nodes.
-        label: Ceph object replication factor
-        regex:
-          error: Invalid number
-          source: ^[1-9]\d*$
-        type: text
-        value: '2'
-        weight: 85
-      volumes_ceph:
-        description: Configures Cinder to store volumes in Ceph RBD images.
-        label: Ceph RBD for volumes (Cinder)
-        restrictions:
-        - settings:storage.volumes_lvm.value == true
-        type: checkbox
-        value: true
-        weight: 20
-      volumes_lvm:
-        description: It is recommended to have at least one Storage - Cinder LVM node.
-        label: Cinder LVM over iSCSI for volumes
-        restrictions:
-        - settings:storage.volumes_ceph.value == true
-        type: checkbox
-        value: false
-        weight: 10
-    syslog:
-      metadata:
-        label: Syslog
-        weight: 50
-      syslog_port:
-        description: Remote syslog port
-        label: Port
-        regex:
-          error: Invalid Syslog port
-          source: ^([1-9][0-9]{0,3}|[1-5][0-9]{4}|6[0-4][0-9]{3}|65[0-4][0-9]{2}|655[0-2][0-9]|6553[0-5])$
-        type: text
-        value: '514'
-        weight: 20
-      syslog_server:
-        description: Remote syslog hostname
-        label: Hostname
-        type: text
-        value: ''
-        weight: 10
-      syslog_transport:
-        label: Syslog transport protocol
-        type: radio
-        value: tcp
-        values:
-        - data: udp
-          description: ''
-          label: UDP
-        - data: tcp
-          description: ''
-          label: TCP
-        weight: 30
-    workloads_collector:
-      enabled:
-        type: hidden
-        value: true
-      metadata:
-        label: Workloads Collector User
-        restrictions:
-        - action: hide
-          condition: 'true'
-        weight: 10
-      password:
-        type: password
-        value: pBkLbu1k
-      tenant:
-        type: text
-        value: services
-      user:
-        type: text
-        value: fuel_stats_user
diff --git a/fuel/deploy/templates/hardware_environment/conf/linux_foundation_lab/pod1/dha.yaml b/fuel/deploy/templates/hardware_environment/conf/linux_foundation_lab/pod1/dha.yaml
deleted file mode 100644 (file)
index 724d6d8..0000000
+++ /dev/null
@@ -1,49 +0,0 @@
-title: Deployment Hardware Adapter (DHA)
-# DHA API version supported
-version:
-created:
-comment: Config for LF POD1
-
-# Adapter to use for this definition
-adapter: ipmi
-
-# Node list.
-# Mandatory property is id, all other properties are adapter specific.
-
-nodes:
-- id: 1
-  pxeMac: 00:25:b5:b0:00:ef
-  ipmiIp: 172.30.8.69
-  ipmiUser: admin
-  ipmiPass: octopus
-- id: 2
-  pxeMac: 00:25:b5:b0:00:cf
-  ipmiIp: 172.30.8.78
-  ipmiUser: admin
-  ipmiPass: octopus
-- id: 3
-  pxeMac: 00:25:b5:b0:00:8f
-  ipmiIp: 172.30.8.68
-  ipmiUser: admin
-  ipmiPass: octopus
-- id: 4
-  pxeMac: 00:25:b5:b0:00:6f
-  ipmiIp: 172.30.8.77
-  ipmiUser: admin
-  ipmiPass: octopus
-- id: 5
-  pxeMac: 00:25:b5:b0:00:4f
-  ipmiIp: 172.30.8.67
-  ipmiUser: admin
-  ipmiPass: octopus
-# Adding the Fuel node as node id 6 which may not be correct - please
-# adjust as needed.
-- id: 6
-  libvirtName: fuel-opnfv
-  libvirtTemplate: templates/hardware_environment/vms/fuel.xml
-  isFuel: yes
-  username: root
-  password: r00tme
-
-disks:
-  fuel: 50G
\ No newline at end of file
diff --git a/fuel/deploy/templates/hardware_environment/conf/linux_foundation_lab/pod2/dha.yaml b/fuel/deploy/templates/hardware_environment/conf/linux_foundation_lab/pod2/dha.yaml
deleted file mode 100644 (file)
index cfc9709..0000000
+++ /dev/null
@@ -1,49 +0,0 @@
-title: Deployment Hardware Adapter (DHA)
-# DHA API version supported
-version:
-created:
-comment: Config for LF POD2 and Opendaylight
-
-# Adapter to use for this definition
-adapter: ipmi
-
-# Node list.
-# Mandatory property is id, all other properties are adapter specific.
-
-nodes:
-- id: 1
-  pxeMac: 00:25:b5:a0:00:2a
-  ipmiIp: 172.30.8.75
-  ipmiUser: admin
-  ipmiPass: octopus
-- id: 2
-  pxeMac: 00:25:b5:a0:00:3a
-  ipmiIp: 172.30.8.65
-  ipmiUser: admin
-  ipmiPass: octopus
-- id: 3
-  pxeMac: 00:25:b5:a0:00:4a
-  ipmiIp: 172.30.8.74
-  ipmiUser: admin
-  ipmiPass: octopus
-- id: 4
-  pxeMac: 00:25:b5:a0:00:5a
-  ipmiIp: 172.30.8.73
-  ipmiUser: admin
-  ipmiPass: octopus
-- id: 5
-  pxeMac: 00:25:b5:a0:00:6a
-  ipmiIp: 172.30.8.72
-  ipmiUser: admin
-  ipmiPass: octopus
-# Adding the Fuel node as node id 6 which may not be correct - please
-# adjust as needed.
-- id: 6
-  libvirtName: fuel-opnfv
-  libvirtTemplate: templates/hardware_environment/vms/fuel.xml
-  isFuel: yes
-  username: root
-  password: r00tme
-
-disks:
-  fuel: 50G
\ No newline at end of file
diff --git a/fuel/deploy/templates/hardware_environment/conf/opnfv_box/dea.yaml b/fuel/deploy/templates/hardware_environment/conf/opnfv_box/dea.yaml
deleted file mode 100644 (file)
index 0895e4f..0000000
+++ /dev/null
@@ -1,842 +0,0 @@
-title: Deployment Environment Adapter (DEA)
-# DEA API version supported
-version:
-created:
-comment: Config for OPNFV BOX - HA deployment with Ceph
-environment:
-  name: opnfv_virt
-  mode: ha
-  net_segment_type: gre
-wanted_release: Juno on Ubuntu 14.04.1
-nodes:
-- id: 1
-  interfaces: interfaces_1
-  transformations: transformations_1
-  role: ceph-osd,controller
-- id: 2
-  interfaces: interfaces_1
-  transformations: transformations_1
-  role: ceph-osd,controller
-- id: 3
-  interfaces: interfaces_1
-  transformations: transformations_1
-  role: ceph-osd,controller
-- id: 4
-  interfaces: interfaces_1
-  transformations: transformations_2
-  role: ceph-osd,compute
-- id: 5
-  interfaces: interfaces_1
-  transformations: transformations_2
-  role: ceph-osd,compute
-opnfv:
-  hosts:
-  - name:
-    address:
-    fqdn:
-fuel:
-  ADMIN_NETWORK:
-    ipaddress: 10.20.0.2
-    netmask: 255.255.0.0
-    dhcp_pool_start: 10.20.0.3
-    dhcp_pool_end: 10.20.0.254
-  DNS_UPSTREAM: 8.8.8.8
-  DNS_DOMAIN: domain.tld
-  DNS_SEARCH: domain.tld
-  FUEL_ACCESS:
-    user: admin
-    password: admin
-  HOSTNAME: opnfv
-  NTP1: 0.pool.ntp.org
-  NTP2: 1.pool.ntp.org
-  NTP3: 2.pool.ntp.org
-interfaces_1:
-  eth0:
-  - public
-  eth1:
-  - fuelweb_admin
-  - management
-  - storage
-  - private
-transformations_1:
-  transformations:
-  - action: add-br
-    name: br-fw-admin
-  - action: add-br
-    name: br-mgmt
-  - action: add-br
-    name: br-storage
-  - action: add-br
-    name: br-ex
-  - action: add-br
-    name: br-floating
-    provider: ovs
-  - action: add-patch
-    bridges:
-    - br-floating
-    - br-ex
-    mtu: 65000
-    provider: ovs
-  - action: add-br
-    name: br-mesh
-  - action: add-port
-    bridge: br-fw-admin
-    name: eth0
-  - action: add-port
-    bridge: br-mgmt
-    name: eth1.300
-  - action: add-port
-    bridge: br-storage
-    name: eth1.301
-  - action: add-port
-    bridge: br-mesh
-    name: eth1.302
-  - action: add-port
-    bridge: br-ex
-    name: eth0
-transformations_2:
-  transformations:
-  - action: add-br
-    name: br-fw-admin
-  - action: add-br
-    name: br-mgmt
-  - action: add-br
-    name: br-storage
-  - action: add-br
-    name: br-mesh
-  - action: add-port
-    bridge: br-fw-admin
-    name: eth0
-  - action: add-port
-    bridge: br-mgmt
-    name: eth1.300
-  - action: add-port
-    bridge: br-storage
-    name: eth1.301
-  - action: add-port
-    bridge: br-mesh
-    name: eth1.302
-network:
-  networking_parameters:
-    base_mac: fa:16:3e:00:00:00
-    dns_nameservers:
-    - 8.8.4.4
-    - 8.8.8.8
-    floating_ranges:
-    - - 172.30.10.83
-      - 172.30.10.92
-    gre_id_range:
-    - 2
-    - 65535
-    internal_cidr: 192.168.111.0/24
-    internal_gateway: 192.168.111.1
-    net_l23_provider: ovs
-    segmentation_type: gre
-    vlan_range:
-    - 1000
-    - 1030
-  networks:
-  - cidr: 172.30.10.0/24
-    gateway: 172.30.10.1
-    ip_ranges:
-    - - 172.30.10.73
-      - 172.30.10.82
-    meta:
-      cidr: 172.30.10.0/24
-      configurable: true
-      floating_range_var: floating_ranges
-      ip_range:
-      - 172.30.10.73
-      - 172.30.10.82
-      map_priority: 1
-      name: public
-      notation: ip_ranges
-      render_addr_mask: public
-      render_type: null
-      use_gateway: true
-      vips:
-      - haproxy
-      - vrouter
-      vlan_start: null
-    name: public
-    vlan_start: null
-  - cidr: 192.168.0.0/24
-    gateway: null
-    ip_ranges:
-    - - 192.168.0.1
-      - 192.168.0.254
-    meta:
-      cidr: 192.168.0.0/24
-      configurable: true
-      map_priority: 2
-      name: management
-      notation: cidr
-      render_addr_mask: internal
-      render_type: cidr
-      use_gateway: false
-      vips:
-      - haproxy
-      - vrouter
-      vlan_start: 300
-    name: management
-    vlan_start: 300
-  - cidr: 192.168.1.0/24
-    gateway: null
-    ip_ranges:
-    - - 192.168.1.1
-      - 192.168.1.254
-    meta:
-      cidr: 192.168.1.0/24
-      configurable: true
-      map_priority: 2
-      name: storage
-      notation: cidr
-      render_addr_mask: storage
-      render_type: cidr
-      use_gateway: false
-      vlan_start: 301
-    name: storage
-    vlan_start: 301
-  - cidr: 192.168.2.0/24
-    gateway: null
-    ip_ranges:
-    - - 192.168.2.1
-      - 192.168.2.254
-    meta:
-      assign_vip: 192.168.2.0/24
-      configurable: true
-      map_priority: 2
-      name: private
-      notation: cidr
-      render_addr_mask: private
-      render_type: cidr
-      seg_type: gre
-      use_gateway: false
-      vlan_start: 302
-    name: private
-    vlan_start: 302
-  - cidr: 10.20.0.0/24
-    gateway: 10.20.0.2
-    ip_ranges:
-    - - 10.20.0.3
-      - 10.20.255.254
-    meta:
-      configurable: false
-      map_priority: 0
-      notation: ip_ranges
-      render_addr_mask: null
-      render_type: null
-      unmovable: true
-      use_gateway: true
-    name: fuelweb_admin
-    vlan_start: null
-settings:
-  editable:
-    access:
-      email:
-        description: Email address for Administrator
-        label: Email
-        regex:
-          error: Invalid email
-          source: ^\S+@\S+$
-        type: text
-        value: admin@localhost
-        weight: 40
-      metadata:
-        label: Access
-        weight: 10
-      password:
-        description: Password for Administrator
-        label: Password
-        regex:
-          error: Empty password
-          source: \S
-        type: password
-        value: admin
-        weight: 20
-      tenant:
-        description: Tenant (project) name for Administrator
-        label: Tenant
-        regex:
-          error: Invalid tenant name
-          source: ^(?!services$)(?!nova$)(?!glance$)(?!keystone$)(?!neutron$)(?!cinder$)(?!swift$)(?!ceph$)(?![Gg]uest$)(?!.*
-            +.*$).+
-        type: text
-        value: admin
-        weight: 30
-      user:
-        description: Username for Administrator
-        label: Username
-        regex:
-          error: Invalid username
-          source: ^(?!services$)(?!nova$)(?!glance$)(?!keystone$)(?!neutron$)(?!cinder$)(?!swift$)(?!ceph$)(?![Gg]uest$)(?!.*
-            +.*$).+
-        type: text
-        value: admin
-        weight: 10
-    additional_components:
-      ceilometer:
-        description: If selected, Ceilometer component will be installed
-        label: Install Ceilometer
-        type: checkbox
-        value: false
-        weight: 40
-      heat:
-        description: ''
-        label: ''
-        type: hidden
-        value: true
-        weight: 30
-      metadata:
-        label: Additional Components
-        weight: 20
-      mongo:
-        description: If selected, You can use external Mongo DB as ceilometer backend
-        label: Use external Mongo DB
-        restrictions:
-        - settings:additional_components.ceilometer.value == false
-        type: checkbox
-        value: false
-        weight: 40
-      murano:
-        description: If selected, Murano component will be installed
-        label: Install Murano
-        restrictions:
-        - cluster:net_provider != 'neutron'
-        type: checkbox
-        value: false
-        weight: 20
-      sahara:
-        description: If selected, Sahara component will be installed
-        label: Install Sahara
-        type: checkbox
-        value: false
-        weight: 10
-    common:
-      auth_key:
-        description: Public key(s) to include in authorized_keys on deployed nodes
-        label: Public Key
-        type: textarea
-        value: ''
-        weight: 70
-      auto_assign_floating_ip:
-        description: If selected, OpenStack will automatically assign a floating IP
-          to a new instance
-        label: Auto assign floating IP
-        restrictions:
-        - action: hide
-          condition: cluster:net_provider == 'neutron'
-        type: checkbox
-        value: false
-        weight: 40
-      debug:
-        description: Debug logging mode provides more information, but requires more
-          disk space.
-        label: OpenStack debug logging
-        type: checkbox
-        value: false
-        weight: 20
-      libvirt_type:
-        label: Hypervisor type
-        type: radio
-        value: kvm
-        values:
-        - data: kvm
-          description: Choose this type of hypervisor if you run OpenStack on hardware
-          label: KVM
-        - data: qemu
-          description: Choose this type of hypervisor if you run OpenStack on virtual
-            hosts.
-          label: QEMU
-        weight: 30
-      metadata:
-        label: Common
-        weight: 30
-      nova_quota:
-        description: Quotas are used to limit CPU and memory usage for tenants. Enabling
-          quotas will increase load on the Nova database.
-        label: Nova quotas
-        type: checkbox
-        value: false
-        weight: 25
-      puppet_debug:
-        description: Debug puppet logging mode provides more information, but requires
-          more disk space.
-        label: Puppet debug logging
-        type: checkbox
-        value: true
-        weight: 20
-      resume_guests_state_on_host_boot:
-        description: Whether to resume previous guests state when the host reboots.
-          If enabled, this option causes guests assigned to the host to resume their
-          previous state. If the guest was running a restart will be attempted when
-          nova-compute starts. If the guest was not running previously, a restart will
-          not be attempted.
-        label: Resume guests state on host boot
-        type: checkbox
-        value: true
-        weight: 60
-      use_cow_images:
-        description: For most cases you will want qcow format. If it's disabled, raw
-          image format will be used to run VMs. OpenStack with raw format currently
-          does not support snapshotting.
-        label: Use qcow format for images
-        type: checkbox
-        value: true
-        weight: 50
-      use_vcenter:
-        type: hidden
-        value: false
-        weight: 30
-    corosync:
-      group:
-        description: ''
-        label: Group
-        type: text
-        value: 226.94.1.1
-        weight: 10
-      metadata:
-        label: Corosync
-        restrictions:
-        - action: hide
-          condition: 'true'
-        weight: 50
-      port:
-        description: ''
-        label: Port
-        type: text
-        value: '12000'
-        weight: 20
-      verified:
-        description: Set True only if multicast is configured correctly on router.
-        label: Need to pass network verification.
-        type: checkbox
-        value: false
-        weight: 10
-    external_dns:
-      dns_list:
-        description: List of upstream DNS servers, separated by comma
-        label: DNS list
-        regex:
-          error: Invalid IP address list
-          source: ^\*$|^(?:\d|1?\d\d|2[0-4]\d|25[0-5])(?:\.(?:\d|1?\d\d|2[0-4]\d|25[0-5])){3}(?:\s*,\s*(?:\d|1?\d\d|2[0-4]\d|25[0-5])(?:\.(?:\d|1?\d\d|2[0-4]\d|25[0-5])){3})*$
-        type: text
-        value: 8.8.4.4, 8.8.8.8
-        weight: 10
-      metadata:
-        label: Host OS DNS Servers
-        weight: 90
-    external_mongo:
-      hosts_ip:
-        description: IP Addresses of MongoDB. Use comma to split IPs
-        label: MongoDB hosts IP
-        regex:
-          error: Invalid hosts ip sequence
-          source: ^(((25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.){3}(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?),)*((25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.){3}(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)$
-        type: text
-        value: ''
-        weight: 30
-      metadata:
-        label: External MongoDB
-        restrictions:
-        - action: hide
-          condition: settings:additional_components.mongo.value == false
-        weight: 20
-      mongo_db_name:
-        description: Mongo database name
-        label: Database name
-        regex:
-          error: Invalid database name
-          source: ^\w+$
-        type: text
-        value: ceilometer
-        weight: 30
-      mongo_password:
-        description: Mongo database password
-        label: Password
-        regex:
-          error: Password contains spaces
-          source: ^\S*$
-        type: password
-        value: ceilometer
-        weight: 30
-      mongo_replset:
-        description: Name for Mongo replication set
-        label: Replset
-        type: text
-        value: ''
-        weight: 30
-      mongo_user:
-        description: Mongo database username
-        label: Username
-        regex:
-          error: Empty username
-          source: ^\w+$
-        type: text
-        value: ceilometer
-        weight: 30
-    external_ntp:
-      metadata:
-        label: Host OS NTP Servers
-        weight: 100
-      ntp_list:
-        description: List of upstream NTP servers, separated by comma
-        label: NTP server list
-        regex:
-          error: Invalid NTP server list
-          source: ^\s*(?:(?:\w+(?:-+\w+)*\.)+[a-z]+|\d{1,3}(?:\.\d{1,3}){3})\s*(?:,\s*(?:(?:\w+(?:-+\w+)*\.)+[a-z]+|\d{1,3}(\.\d{1,3}){3})\s*)*$
-        type: text
-        value:  0.pool.ntp.org, 1.pool.ntp.org
-        weight: 10
-    kernel_params:
-      kernel:
-        description: Default kernel parameters
-        label: Initial parameters
-        type: text
-        value: console=ttyS0,9600 console=tty0 net.ifnames=0 biosdevname=0 rootdelay=90
-          nomodeset
-        weight: 45
-      metadata:
-        label: Kernel parameters
-        weight: 40
-    murano_settings:
-      metadata:
-        label: Murano Settings
-        restrictions:
-        - action: hide
-          condition: settings:additional_components.murano.value == false
-        weight: 20
-      murano_repo_url:
-        description: ''
-        label: Murano Repository URL
-        type: text
-        value: http://storage.apps.openstack.org/
-        weight: 10
-    neutron_mellanox:
-      metadata:
-        enabled: true
-        label: Mellanox Neutron components
-        restrictions:
-        - action: hide
-          condition: not ('experimental' in version:feature_groups)
-        toggleable: false
-        weight: 50
-      plugin:
-        label: Mellanox drivers and SR-IOV plugin
-        type: radio
-        value: disabled
-        values:
-        - data: disabled
-          description: If selected, Mellanox drivers, Neutron and Cinder plugin will
-            not be installed.
-          label: Mellanox drivers and plugins disabled
-          restrictions:
-          - settings:storage.iser.value == true
-        - data: drivers_only
-          description: If selected, Mellanox Ethernet drivers will be installed to support
-            networking over Mellanox NIC. Mellanox Neutron plugin will not be installed.
-          label: Install only Mellanox drivers
-          restrictions:
-          - settings:common.libvirt_type.value != 'kvm'
-        - data: ethernet
-          description: If selected, both Mellanox Ethernet drivers and Mellanox network
-            acceleration (Neutron) plugin will be installed.
-          label: Install Mellanox drivers and SR-IOV plugin
-          restrictions:
-          - settings:common.libvirt_type.value != 'kvm' or not (cluster:net_provider
-            == 'neutron' and networking_parameters:segmentation_type == 'vlan')
-        weight: 60
-      vf_num:
-        description: Note that one virtual function will be reserved to the storage
-          network, in case of choosing iSER.
-        label: Number of virtual NICs
-        restrictions:
-        - settings:neutron_mellanox.plugin.value != 'ethernet'
-        type: text
-        value: '16'
-        weight: 70
-    opendaylight:
-      metadata:
-        enabled: true
-        label: OpenDaylight plugin
-        plugin_id: 1
-        restrictions:
-        - cluster:net_provider != 'neutron': Only neutron is supported by OpenDaylight
-        toggleable: true
-        weight: 70
-      rest_api_port:
-        description: Port on which ODL REST API will be available.
-        label: Port number
-        regex:
-          error: Invalid port number
-          source: ^([1-9][0-9]{0,3}|[1-5][0-9]{4}|6[0-4][0-9]{3}|65[0-4][0-9]{2}|655[0-2][0-9]|6553[0-5])$
-        type: text
-        value: '8282'
-        weight: 40
-      use_vxlan:
-        description: Configure neutron to use VXLAN tunneling
-        label: Use vxlan
-        restrictions:
-        - action: disable
-          condition: networking_parameters:segmentation_type == 'vlan'
-          message: Neutron with GRE segmentation required
-        type: checkbox
-        value: true
-        weight: 20
-      vni_range_end:
-        description: VXLAN VNI IDs range end
-        label: VNI range end
-        regex:
-          error: Invalid ID number
-          source: ^\d+$
-        restrictions:
-        - action: hide
-          condition: networking_parameters:segmentation_type == 'vlan'
-        type: text
-        value: '10000'
-        weight: 31
-      vni_range_start:
-        description: VXLAN VNI IDs range start
-        label: VNI range start
-        regex:
-          error: Invalid ID number
-          source: ^\d+$
-        restrictions:
-        - action: hide
-          condition: networking_parameters:segmentation_type == 'vlan'
-        type: text
-        value: '10'
-        weight: 30
-    provision:
-      metadata:
-        label: Provision
-        weight: 80
-      method:
-        description: Which provision method to use for this cluster.
-        label: Provision method
-        type: radio
-        value: image
-        values:
-        - data: image
-          description: Copying pre-built images on a disk.
-          label: Image
-        - data: cobbler
-          description: Install from scratch using anaconda or debian-installer.
-          label: (DEPRECATED) Classic (use anaconda or debian-installer)
-    public_network_assignment:
-      assign_to_all_nodes:
-        description: When disabled, public network will be assigned to controllers only
-        label: Assign public network to all nodes
-        type: checkbox
-        value: false
-        weight: 10
-      metadata:
-        label: Public network assignment
-        restrictions:
-        - action: hide
-          condition: cluster:net_provider != 'neutron'
-        weight: 50
-    repo_setup:
-      metadata:
-        always_editable: true
-        label: Repositories
-        weight: 50
-      repos:
-        description: 'Please note: the first repository will be considered the operating
-          system mirror that will be used during node provisioning.
-
-          To create a local repository mirror on the Fuel master node, please follow
-          the instructions provided by running "fuel-createmirror --help" on the Fuel
-          master node.
-
-          Please make sure your Fuel master node has Internet access to the repository
-          before attempting to create a mirror.
-
-          For more details, please refer to the documentation (https://docs.mirantis.com/openstack/fuel/fuel-6.1/operations.html#external-ubuntu-ops).
-
-          '
-        extra_priority: null
-        type: custom_repo_configuration
-        value:
-        - name: ubuntu
-          priority: null
-          section: main
-          suite: trusty
-          type: deb
-          uri: http://10.20.0.2:8080/ubuntu-part
-        - name: ubuntu-updates
-          priority: null
-          section: main
-          suite: trusty
-          type: deb
-          uri: http://10.20.0.2:8080/ubuntu-part
-        - name: ubuntu-security
-          priority: null
-          section: main
-          suite: trusty
-          type: deb
-          uri: http://10.20.0.2:8080/ubuntu-part
-        - name: mos
-          priority: 1050
-          section: main restricted
-          suite: mos6.1
-          type: deb
-          uri: http://10.20.0.2:8080/2014.2.2-6.1/ubuntu/x86_64
-        - name: mos-updates
-          priority: 1050
-          section: main restricted
-          suite: mos6.1-updates
-          type: deb
-          uri: http://10.20.0.2:8080/mos-ubuntu
-        - name: mos-security
-          priority: 1050
-          section: main restricted
-          suite: mos6.1-security
-          type: deb
-          uri: http://10.20.0.2:8080/mos-ubuntu
-        - name: mos-holdback
-          priority: 1100
-          section: main restricted
-          suite: mos6.1-holdback
-          type: deb
-          uri: http://10.20.0.2:8080/mos-ubuntu
-        - name: Auxiliary
-          priority: 1150
-          section: main restricted
-          suite: auxiliary
-          type: deb
-          uri: http://10.20.0.2:8080/2014.2.2-6.1/ubuntu/auxiliary
-    storage:
-      ephemeral_ceph:
-        description: Configures Nova to store ephemeral volumes in RBD. This works best
-          if Ceph is enabled for volumes and images, too. Enables live migration of
-          all types of Ceph backed VMs (without this option, live migration will only
-          work with VMs launched from Cinder volumes).
-        label: Ceph RBD for ephemeral volumes (Nova)
-        type: checkbox
-        value: true
-        weight: 75
-      images_ceph:
-        description: Configures Glance to use the Ceph RBD backend to store images.
-          If enabled, this option will prevent Swift from installing.
-        label: Ceph RBD for images (Glance)
-        restrictions:
-        - settings:storage.images_vcenter.value == true: Only one Glance backend could
-            be selected.
-        type: checkbox
-        value: true
-        weight: 30
-      images_vcenter:
-        description: Configures Glance to use the vCenter/ESXi backend to store images.
-          If enabled, this option will prevent Swift from installing.
-        label: VMWare vCenter/ESXi datastore for images (Glance)
-        restrictions:
-        - action: hide
-          condition: settings:common.use_vcenter.value != true
-        - condition: settings:storage.images_ceph.value == true
-          message: Only one Glance backend could be selected.
-        type: checkbox
-        value: false
-        weight: 35
-      iser:
-        description: 'High performance block storage: Cinder volumes over iSER protocol
-          (iSCSI over RDMA). This feature requires SR-IOV capabilities in the NIC, and
-          will use a dedicated virtual function for the storage network.'
-        label: iSER protocol for volumes (Cinder)
-        restrictions:
-        - settings:storage.volumes_lvm.value != true or settings:common.libvirt_type.value
-          != 'kvm'
-        - action: hide
-          condition: not ('experimental' in version:feature_groups)
-        type: checkbox
-        value: false
-        weight: 11
-      metadata:
-        label: Storage
-        weight: 60
-      objects_ceph:
-        description: Configures RadosGW front end for Ceph RBD. This exposes S3 and
-          Swift API Interfaces. If enabled, this option will prevent Swift from installing.
-        label: Ceph RadosGW for objects (Swift API)
-        restrictions:
-        - settings:storage.images_ceph.value == false
-        type: checkbox
-        value: false
-        weight: 80
-      osd_pool_size:
-        description: Configures the default number of object replicas in Ceph. This
-          number must be equal to or lower than the number of deployed 'Storage - Ceph
-          OSD' nodes.
-        label: Ceph object replication factor
-        regex:
-          error: Invalid number
-          source: ^[1-9]\d*$
-        type: text
-        value: '2'
-        weight: 85
-      volumes_ceph:
-        description: Configures Cinder to store volumes in Ceph RBD images.
-        label: Ceph RBD for volumes (Cinder)
-        restrictions:
-        - settings:storage.volumes_lvm.value == true
-        type: checkbox
-        value: true
-        weight: 20
-      volumes_lvm:
-        description: It is recommended to have at least one Storage - Cinder LVM node.
-        label: Cinder LVM over iSCSI for volumes
-        restrictions:
-        - settings:storage.volumes_ceph.value == true
-        type: checkbox
-        value: false
-        weight: 10
-    syslog:
-      metadata:
-        label: Syslog
-        weight: 50
-      syslog_port:
-        description: Remote syslog port
-        label: Port
-        regex:
-          error: Invalid Syslog port
-          source: ^([1-9][0-9]{0,3}|[1-5][0-9]{4}|6[0-4][0-9]{3}|65[0-4][0-9]{2}|655[0-2][0-9]|6553[0-5])$
-        type: text
-        value: '514'
-        weight: 20
-      syslog_server:
-        description: Remote syslog hostname
-        label: Hostname
-        type: text
-        value: ''
-        weight: 10
-      syslog_transport:
-        label: Syslog transport protocol
-        type: radio
-        value: tcp
-        values:
-        - data: udp
-          description: ''
-          label: UDP
-        - data: tcp
-          description: ''
-          label: TCP
-        weight: 30
-    workloads_collector:
-      enabled:
-        type: hidden
-        value: true
-      metadata:
-        label: Workloads Collector User
-        restrictions:
-        - action: hide
-          condition: 'true'
-        weight: 10
-      password:
-        type: password
-        value: pBkLbu1k
-      tenant:
-        type: text
-        value: services
-      user:
-        type: text
-        value: fuel_stats_user
diff --git a/fuel/deploy/templates/hardware_environment/conf/opnfv_box/dha.yaml b/fuel/deploy/templates/hardware_environment/conf/opnfv_box/dha.yaml
deleted file mode 100644 (file)
index c2624f2..0000000
+++ /dev/null
@@ -1,49 +0,0 @@
-title: Deployment Hardware Adapter (DHA)
-# DHA API version supported
-version:
-created:
-comment: Config for OPNFV BOX
-
-# Adapter to use for this definition
-adapter: ipmi
-
-# Node list.
-# Mandatory property is id, all other properties are adapter specific.
-
-nodes:
-- id: 1
-  pxeMac: b8:ae:ed:76:4d:a4
-  ipmiIp: <ipmi_ip>
-  ipmiUser: <username>
-  ipmiPass: <password>
-- id: 2
-  pxeMac: b8:ae:ed:76:4d:94
-  ipmiIp: <ipmi_ip>
-  ipmiUser: <username>
-  ipmiPass: <password>
-- id: 3
-  pxeMac: b8:ae:ed:76:4c:eb
-  ipmiIp: <ipmi_ip>
-  ipmiUser: <username>
-  ipmiPass: <password>
-- id: 4
-  pxeMac: b8:ae:ed:76:37:62
-  ipmiIp: <ipmi_ip>
-  ipmiUser: <username>
-  ipmiPass: <password>
-- id: 5
-  pxeMac: b8:ae:ed:76:4d:95
-  ipmiIp: <ipmi_ip>
-  ipmiUser: <username>
-  ipmiPass: <password>
-# Adding the Fuel node as node id 6 which may not be correct - please
-# adjust as needed.
-- id: 6
-  libvirtName: fuel-opnfv
-  libvirtTemplate: templates/hardware_environment/vms/fuel.xml
-  isFuel: yes
-  username: root
-  password: r00tme
-
-disks:
-  fuel: 50G
\ No newline at end of file
diff --git a/fuel/deploy/templates/virtual_environment/conf/dea.yaml b/fuel/deploy/templates/virtual_environment/conf/dea.yaml
deleted file mode 100644 (file)
index bc9a1f9..0000000
+++ /dev/null
@@ -1,838 +0,0 @@
-title: Deployment Environment Adapter (DEA)
-# DEA API version supported
-version:
-created:
-comment: Config for Virtual Environment - HA deployment with Ceph and Opendaylight
-environment:
-  name: opnfv_virt
-  mode: ha
-  net_segment_type: gre
-wanted_release: Juno on Ubuntu 14.04.1
-nodes:
-- id: 1
-  interfaces: interfaces_1
-  transformations: transformations_1
-  role: ceph-osd,controller
-- id: 2
-  interfaces: interfaces_1
-  transformations: transformations_2
-  role: ceph-osd,compute
-- id: 3
-  interfaces: interfaces_1
-  transformations: transformations_2
-  role: ceph-osd,compute
-- id: 4
-  interfaces: interfaces_1
-  transformations: transformations_2
-  role: ceph-osd,compute
-fuel:
-  ADMIN_NETWORK:
-    ipaddress: 10.20.0.2
-    netmask: 255.255.0.0
-    dhcp_pool_start: 10.20.0.3
-    dhcp_pool_end: 10.20.0.254
-  DNS_UPSTREAM: 10.118.32.193
-  DNS_DOMAIN: opnfvericsson.ca
-  DNS_SEARCH: opnfvericsson.ca
-  FUEL_ACCESS:
-    user: admin
-    password: admin
-  HOSTNAME: opnfv_virt
-  NTP1: 10.118.34.219
-  NTP2:
-  NTP3:
-interfaces_1:
-  eth0:
-  - fuelweb_admin
-  - management
-  eth1:
-  - storage
-  eth2:
-  - private
-  eth3:
-  - public
-transformations_1:
-  transformations:
-  - action: add-br
-    name: br-fw-admin
-  - action: add-br
-    name: br-mgmt
-  - action: add-br
-    name: br-storage
-  - action: add-br
-    name: br-ex
-  - action: add-br
-    name: br-floating
-    provider: ovs
-  - action: add-patch
-    bridges:
-    - br-floating
-    - br-ex
-    mtu: 65000
-    provider: ovs
-  - action: add-br
-    name: br-mesh
-  - action: add-port
-    bridge: br-fw-admin
-    name: eth0
-  - action: add-port
-    bridge: br-mgmt
-    name: eth0.101
-  - action: add-port
-    bridge: br-storage
-    name: eth1.102
-  - action: add-port
-    bridge: br-mesh
-    name: eth2.103
-  - action: add-port
-    bridge: br-ex
-    name: eth3
-transformations_2:
-  transformations:
-  - action: add-br
-    name: br-fw-admin
-  - action: add-br
-    name: br-mgmt
-  - action: add-br
-    name: br-storage
-  - action: add-br
-    name: br-mesh
-  - action: add-port
-    bridge: br-fw-admin
-    name: eth0
-  - action: add-port
-    bridge: br-mgmt
-    name: eth0.101
-  - action: add-port
-    bridge: br-storage
-    name: eth1.102
-  - action: add-port
-    bridge: br-mesh
-    name: eth2.103
-network:
-  management_vip: 192.168.0.2
-  management_vrouter_vip: 192.168.0.3
-  networking_parameters:
-    base_mac: fa:16:3e:00:00:00
-    dns_nameservers:
-    - 10.118.32.193
-    floating_ranges:
-    - - 172.16.0.130
-      - 172.16.0.254
-    gre_id_range:
-    - 2
-    - 65535
-    internal_cidr: 192.168.111.0/24
-    internal_gateway: 192.168.111.1
-    net_l23_provider: ovs
-    segmentation_type: gre
-    vlan_range:
-    - 1000
-    - 1030
-  networks:
-  - cidr: 172.16.0.0/24
-    gateway: 172.16.0.1
-    ip_ranges:
-    - - 172.16.0.2
-      - 172.16.0.126
-    meta:
-      cidr: 172.16.0.0/24
-      configurable: true
-      floating_range_var: floating_ranges
-      ip_range:
-      - 172.16.0.2
-      - 172.16.0.126
-      map_priority: 1
-      name: public
-      notation: ip_ranges
-      render_addr_mask: public
-      render_type: null
-      use_gateway: true
-      vips:
-      - haproxy
-      - vrouter
-      vlan_start: null
-    name: public
-    vlan_start: null
-  - cidr: 192.168.0.0/24
-    gateway: null
-    ip_ranges:
-    - - 192.168.0.2
-      - 192.168.0.254
-    meta:
-      cidr: 192.168.0.0/24
-      configurable: true
-      map_priority: 2
-      name: management
-      notation: cidr
-      render_addr_mask: internal
-      render_type: cidr
-      use_gateway: false
-      vips:
-      - haproxy
-      - vrouter
-      vlan_start: 101
-    name: management
-    vlan_start: 101
-  - cidr: 192.168.1.0/24
-    gateway: null
-    ip_ranges:
-    - - 192.168.1.1
-      - 192.168.1.254
-    meta:
-      cidr: 192.168.1.0/24
-      configurable: true
-      map_priority: 2
-      name: storage
-      notation: cidr
-      render_addr_mask: storage
-      render_type: cidr
-      use_gateway: false
-      vlan_start: 102
-    name: storage
-    vlan_start: 102
-  - cidr: 192.168.2.0/24
-    gateway: null
-    ip_ranges:
-    - - 192.168.2.2
-      - 192.168.2.254
-    meta:
-      cidr: 192.168.2.0/24
-      configurable: true
-      map_priority: 2
-      name: private
-      notation: cidr
-      render_addr_mask: private
-      render_type: cidr
-      seg_type: gre
-      use_gateway: false
-      vlan_start: 103
-    name: private
-    vlan_start: 103
-  - cidr: 10.20.0.0/16
-    gateway: 10.20.0.2
-    ip_ranges:
-    - - 10.20.0.3
-      - 10.20.0.254
-    meta:
-      configurable: false
-      map_priority: 0
-      notation: ip_ranges
-      render_addr_mask: null
-      render_type: null
-      unmovable: true
-      use_gateway: true
-    name: fuelweb_admin
-    vlan_start: null
-  public_vip: 172.16.0.2
-  public_vrouter_vip: 172.16.0.3
-settings:
-  editable:
-    access:
-      email:
-        description: Email address for Administrator
-        label: Email
-        regex:
-          error: Invalid email
-          source: ^\S+@\S+$
-        type: text
-        value: admin@localhost
-        weight: 40
-      metadata:
-        label: Access
-        weight: 10
-      password:
-        description: Password for Administrator
-        label: Password
-        regex:
-          error: Empty password
-          source: \S
-        type: password
-        value: admin
-        weight: 20
-      tenant:
-        description: Tenant (project) name for Administrator
-        label: Tenant
-        regex:
-          error: Invalid tenant name
-          source: ^(?!services$)(?!nova$)(?!glance$)(?!keystone$)(?!neutron$)(?!cinder$)(?!swift$)(?!ceph$)(?![Gg]uest$)(?!.*
-            +.*$).+
-        type: text
-        value: admin
-        weight: 30
-      user:
-        description: Username for Administrator
-        label: Username
-        regex:
-          error: Invalid username
-          source: ^(?!services$)(?!nova$)(?!glance$)(?!keystone$)(?!neutron$)(?!cinder$)(?!swift$)(?!ceph$)(?![Gg]uest$)(?!.*
-            +.*$).+
-        type: text
-        value: admin
-        weight: 10
-    additional_components:
-      ceilometer:
-        description: If selected, Ceilometer component will be installed
-        label: Install Ceilometer
-        type: checkbox
-        value: false
-        weight: 40
-      heat:
-        description: ''
-        label: ''
-        type: hidden
-        value: true
-        weight: 30
-      metadata:
-        label: Additional Components
-        weight: 20
-      mongo:
-        description: If selected, You can use external Mongo DB as ceilometer backend
-        label: Use external Mongo DB
-        restrictions:
-        - settings:additional_components.ceilometer.value == false
-        type: checkbox
-        value: false
-        weight: 40
-      murano:
-        description: If selected, Murano component will be installed
-        label: Install Murano
-        restrictions:
-        - cluster:net_provider != 'neutron'
-        type: checkbox
-        value: false
-        weight: 20
-      sahara:
-        description: If selected, Sahara component will be installed
-        label: Install Sahara
-        type: checkbox
-        value: false
-        weight: 10
-    common:
-      auth_key:
-        description: Public key(s) to include in authorized_keys on deployed nodes
-        label: Public Key
-        type: textarea
-        value: ''
-        weight: 70
-      auto_assign_floating_ip:
-        description: If selected, OpenStack will automatically assign a floating IP
-          to a new instance
-        label: Auto assign floating IP
-        restrictions:
-        - action: hide
-          condition: cluster:net_provider == 'neutron'
-        type: checkbox
-        value: false
-        weight: 40
-      debug:
-        description: Debug logging mode provides more information, but requires more
-          disk space.
-        label: OpenStack debug logging
-        type: checkbox
-        value: false
-        weight: 20
-      libvirt_type:
-        label: Hypervisor type
-        type: radio
-        value: qemu
-        values:
-        - data: kvm
-          description: Choose this type of hypervisor if you run OpenStack on hardware
-          label: KVM
-        - data: qemu
-          description: Choose this type of hypervisor if you run OpenStack on virtual
-            hosts.
-          label: QEMU
-        weight: 30
-      metadata:
-        label: Common
-        weight: 30
-      nova_quota:
-        description: Quotas are used to limit CPU and memory usage for tenants. Enabling
-          quotas will increase load on the Nova database.
-        label: Nova quotas
-        type: checkbox
-        value: false
-        weight: 25
-      puppet_debug:
-        description: Debug puppet logging mode provides more information, but requires
-          more disk space.
-        label: Puppet debug logging
-        type: checkbox
-        value: true
-        weight: 20
-      resume_guests_state_on_host_boot:
-        description: Whether to resume previous guests state when the host reboots.
-          If enabled, this option causes guests assigned to the host to resume their
-          previous state. If the guest was running a restart will be attempted when
-          nova-compute starts. If the guest was not running previously, a restart will
-          not be attempted.
-        label: Resume guests state on host boot
-        type: checkbox
-        value: true
-        weight: 60
-      use_cow_images:
-        description: For most cases you will want qcow format. If it's disabled, raw
-          image format will be used to run VMs. OpenStack with raw format currently
-          does not support snapshotting.
-        label: Use qcow format for images
-        type: checkbox
-        value: true
-        weight: 50
-      use_vcenter:
-        type: hidden
-        value: false
-        weight: 30
-    corosync:
-      group:
-        description: ''
-        label: Group
-        type: text
-        value: 226.94.1.1
-        weight: 10
-      metadata:
-        label: Corosync
-        restrictions:
-        - action: hide
-          condition: 'true'
-        weight: 50
-      port:
-        description: ''
-        label: Port
-        type: text
-        value: '12000'
-        weight: 20
-      verified:
-        description: Set True only if multicast is configured correctly on router.
-        label: Need to pass network verification.
-        type: checkbox
-        value: false
-        weight: 10
-    external_dns:
-      dns_list:
-        description: List of upstream DNS servers, separated by comma
-        label: DNS list
-        regex:
-          error: Invalid IP address list
-          source: ^\*$|^(?:\d|1?\d\d|2[0-4]\d|25[0-5])(?:\.(?:\d|1?\d\d|2[0-4]\d|25[0-5])){3}(?:\s*,\s*(?:\d|1?\d\d|2[0-4]\d|25[0-5])(?:\.(?:\d|1?\d\d|2[0-4]\d|25[0-5])){3})*$
-        type: text
-        value: 10.118.32.193
-        weight: 10
-      metadata:
-        label: Host OS DNS Servers
-        weight: 90
-    external_mongo:
-      hosts_ip:
-        description: IP Addresses of MongoDB. Use comma to split IPs
-        label: MongoDB hosts IP
-        regex:
-          error: Invalid hosts ip sequence
-          source: ^(((25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.){3}(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?),)*((25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.){3}(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)$
-        type: text
-        value: ''
-        weight: 30
-      metadata:
-        label: External MongoDB
-        restrictions:
-        - action: hide
-          condition: settings:additional_components.mongo.value == false
-        weight: 20
-      mongo_db_name:
-        description: Mongo database name
-        label: Database name
-        regex:
-          error: Invalid database name
-          source: ^\w+$
-        type: text
-        value: ceilometer
-        weight: 30
-      mongo_password:
-        description: Mongo database password
-        label: Password
-        regex:
-          error: Password contains spaces
-          source: ^\S*$
-        type: password
-        value: ceilometer
-        weight: 30
-      mongo_replset:
-        description: Name for Mongo replication set
-        label: Replset
-        type: text
-        value: ''
-        weight: 30
-      mongo_user:
-        description: Mongo database username
-        label: Username
-        regex:
-          error: Empty username
-          source: ^\w+$
-        type: text
-        value: ceilometer
-        weight: 30
-    external_ntp:
-      metadata:
-        label: Host OS NTP Servers
-        weight: 100
-      ntp_list:
-        description: List of upstream NTP servers, separated by comma
-        label: NTP server list
-        regex:
-          error: Invalid NTP server list
-          source: ^\s*(?:(?:\w+(?:-+\w+)*\.)+[a-z]+|\d{1,3}(?:\.\d{1,3}){3})\s*(?:,\s*(?:(?:\w+(?:-+\w+)*\.)+[a-z]+|\d{1,3}(\.\d{1,3}){3})\s*)*$
-        type: text
-        value: 10.118.34.219
-        weight: 10
-    kernel_params:
-      kernel:
-        description: Default kernel parameters
-        label: Initial parameters
-        type: text
-        value: console=ttyS0,9600 console=tty0 net.ifnames=0 biosdevname=0 rootdelay=90
-          nomodeset
-        weight: 45
-      metadata:
-        label: Kernel parameters
-        weight: 40
-    murano_settings:
-      metadata:
-        label: Murano Settings
-        restrictions:
-        - action: hide
-          condition: settings:additional_components.murano.value == false
-        weight: 20
-      murano_repo_url:
-        description: ''
-        label: Murano Repository URL
-        type: text
-        value: http://storage.apps.openstack.org/
-        weight: 10
-    neutron_mellanox:
-      metadata:
-        enabled: true
-        label: Mellanox Neutron components
-        restrictions:
-        - action: hide
-          condition: not ('experimental' in version:feature_groups)
-        toggleable: false
-        weight: 50
-      plugin:
-        label: Mellanox drivers and SR-IOV plugin
-        type: radio
-        value: disabled
-        values:
-        - data: disabled
-          description: If selected, Mellanox drivers, Neutron and Cinder plugin will
-            not be installed.
-          label: Mellanox drivers and plugins disabled
-          restrictions:
-          - settings:storage.iser.value == true
-        - data: drivers_only
-          description: If selected, Mellanox Ethernet drivers will be installed to support
-            networking over Mellanox NIC. Mellanox Neutron plugin will not be installed.
-          label: Install only Mellanox drivers
-          restrictions:
-          - settings:common.libvirt_type.value != 'kvm'
-        - data: ethernet
-          description: If selected, both Mellanox Ethernet drivers and Mellanox network
-            acceleration (Neutron) plugin will be installed.
-          label: Install Mellanox drivers and SR-IOV plugin
-          restrictions:
-          - settings:common.libvirt_type.value != 'kvm' or not (cluster:net_provider
-            == 'neutron' and networking_parameters:segmentation_type == 'vlan')
-        weight: 60
-      vf_num:
-        description: Note that one virtual function will be reserved to the storage
-          network, in case of choosing iSER.
-        label: Number of virtual NICs
-        restrictions:
-        - settings:neutron_mellanox.plugin.value != 'ethernet'
-        type: text
-        value: '16'
-        weight: 70
-    opendaylight:
-      metadata:
-        enabled: true
-        label: OpenDaylight plugin
-        plugin_id: 1
-        restrictions:
-        - cluster:net_provider != 'neutron': Only neutron is supported by OpenDaylight
-        toggleable: true
-        weight: 70
-      rest_api_port:
-        description: Port on which ODL REST API will be available.
-        label: Port number
-        regex:
-          error: Invalid port number
-          source: ^([1-9][0-9]{0,3}|[1-5][0-9]{4}|6[0-4][0-9]{3}|65[0-4][0-9]{2}|655[0-2][0-9]|6553[0-5])$
-        type: text
-        value: '8282'
-        weight: 40
-      use_vxlan:
-        description: Configure neutron to use VXLAN tunneling
-        label: Use vxlan
-        restrictions:
-        - action: disable
-          condition: networking_parameters:segmentation_type == 'vlan'
-          message: Neutron with GRE segmentation required
-        type: checkbox
-        value: true
-        weight: 20
-      vni_range_end:
-        description: VXLAN VNI IDs range end
-        label: VNI range end
-        regex:
-          error: Invalid ID number
-          source: ^\d+$
-        restrictions:
-        - action: hide
-          condition: networking_parameters:segmentation_type == 'vlan'
-        type: text
-        value: '10000'
-        weight: 31
-      vni_range_start:
-        description: VXLAN VNI IDs range start
-        label: VNI range start
-        regex:
-          error: Invalid ID number
-          source: ^\d+$
-        restrictions:
-        - action: hide
-          condition: networking_parameters:segmentation_type == 'vlan'
-        type: text
-        value: '10'
-        weight: 30
-    provision:
-      metadata:
-        label: Provision
-        weight: 80
-      method:
-        description: Which provision method to use for this cluster.
-        label: Provision method
-        type: radio
-        value: image
-        values:
-        - data: image
-          description: Copying pre-built images on a disk.
-          label: Image
-        - data: cobbler
-          description: Install from scratch using anaconda or debian-installer.
-          label: (DEPRECATED) Classic (use anaconda or debian-installer)
-    public_network_assignment:
-      assign_to_all_nodes:
-        description: When disabled, public network will be assigned to controllers only
-        label: Assign public network to all nodes
-        type: checkbox
-        value: false
-        weight: 10
-      metadata:
-        label: Public network assignment
-        restrictions:
-        - action: hide
-          condition: cluster:net_provider != 'neutron'
-        weight: 50
-    repo_setup:
-      metadata:
-        always_editable: true
-        label: Repositories
-        weight: 50
-      repos:
-        description: 'Please note: the first repository will be considered the operating
-          system mirror that will be used during node provisioning.
-
-          To create a local repository mirror on the Fuel master node, please follow
-          the instructions provided by running "fuel-createmirror --help" on the Fuel
-          master node.
-
-          Please make sure your Fuel master node has Internet access to the repository
-          before attempting to create a mirror.
-
-          For more details, please refer to the documentation (https://docs.mirantis.com/openstack/fuel/fuel-6.1/operations.html#external-ubuntu-ops).
-
-          '
-        extra_priority: null
-        type: custom_repo_configuration
-        value:
-        - name: ubuntu
-          priority: null
-          section: main
-          suite: trusty
-          type: deb
-          uri: http://10.20.0.2:8080/ubuntu-part
-        - name: ubuntu-updates
-          priority: null
-          section: main
-          suite: trusty
-          type: deb
-          uri: http://10.20.0.2:8080/ubuntu-part
-        - name: ubuntu-security
-          priority: null
-          section: main
-          suite: trusty
-          type: deb
-          uri: http://10.20.0.2:8080/ubuntu-part
-        - name: mos
-          priority: 1050
-          section: main restricted
-          suite: mos6.1
-          type: deb
-          uri: http://10.20.0.2:8080/2014.2.2-6.1/ubuntu/x86_64
-        - name: mos-updates
-          priority: 1050
-          section: main restricted
-          suite: mos6.1-updates
-          type: deb
-          uri: http://10.20.0.2:8080/mos-ubuntu
-        - name: mos-security
-          priority: 1050
-          section: main restricted
-          suite: mos6.1-security
-          type: deb
-          uri: http://10.20.0.2:8080/mos-ubuntu
-        - name: mos-holdback
-          priority: 1100
-          section: main restricted
-          suite: mos6.1-holdback
-          type: deb
-          uri: http://10.20.0.2:8080/mos-ubuntu
-        - name: Auxiliary
-          priority: 1150
-          section: main restricted
-          suite: auxiliary
-          type: deb
-          uri: http://10.20.0.2:8080/2014.2.2-6.1/ubuntu/auxiliary
-    storage:
-      ephemeral_ceph:
-        description: Configures Nova to store ephemeral volumes in RBD. This works best
-          if Ceph is enabled for volumes and images, too. Enables live migration of
-          all types of Ceph backed VMs (without this option, live migration will only
-          work with VMs launched from Cinder volumes).
-        label: Ceph RBD for ephemeral volumes (Nova)
-        type: checkbox
-        value: true
-        weight: 75
-      images_ceph:
-        description: Configures Glance to use the Ceph RBD backend to store images.
-          If enabled, this option will prevent Swift from installing.
-        label: Ceph RBD for images (Glance)
-        restrictions:
-        - settings:storage.images_vcenter.value == true: Only one Glance backend could
-            be selected.
-        type: checkbox
-        value: true
-        weight: 30
-      images_vcenter:
-        description: Configures Glance to use the vCenter/ESXi backend to store images.
-          If enabled, this option will prevent Swift from installing.
-        label: VMWare vCenter/ESXi datastore for images (Glance)
-        restrictions:
-        - action: hide
-          condition: settings:common.use_vcenter.value != true
-        - condition: settings:storage.images_ceph.value == true
-          message: Only one Glance backend could be selected.
-        type: checkbox
-        value: false
-        weight: 35
-      iser:
-        description: 'High performance block storage: Cinder volumes over iSER protocol
-          (iSCSI over RDMA). This feature requires SR-IOV capabilities in the NIC, and
-          will use a dedicated virtual function for the storage network.'
-        label: iSER protocol for volumes (Cinder)
-        restrictions:
-        - settings:storage.volumes_lvm.value != true or settings:common.libvirt_type.value
-          != 'kvm'
-        - action: hide
-          condition: not ('experimental' in version:feature_groups)
-        type: checkbox
-        value: false
-        weight: 11
-      metadata:
-        label: Storage
-        weight: 60
-      objects_ceph:
-        description: Configures RadosGW front end for Ceph RBD. This exposes S3 and
-          Swift API Interfaces. If enabled, this option will prevent Swift from installing.
-        label: Ceph RadosGW for objects (Swift API)
-        restrictions:
-        - settings:storage.images_ceph.value == false
-        type: checkbox
-        value: false
-        weight: 80
-      osd_pool_size:
-        description: Configures the default number of object replicas in Ceph. This
-          number must be equal to or lower than the number of deployed 'Storage - Ceph
-          OSD' nodes.
-        label: Ceph object replication factor
-        regex:
-          error: Invalid number
-          source: ^[1-9]\d*$
-        type: text
-        value: '2'
-        weight: 85
-      volumes_ceph:
-        description: Configures Cinder to store volumes in Ceph RBD images.
-        label: Ceph RBD for volumes (Cinder)
-        restrictions:
-        - settings:storage.volumes_lvm.value == true
-        type: checkbox
-        value: true
-        weight: 20
-      volumes_lvm:
-        description: It is recommended to have at least one Storage - Cinder LVM node.
-        label: Cinder LVM over iSCSI for volumes
-        restrictions:
-        - settings:storage.volumes_ceph.value == true
-        type: checkbox
-        value: false
-        weight: 10
-    syslog:
-      metadata:
-        label: Syslog
-        weight: 50
-      syslog_port:
-        description: Remote syslog port
-        label: Port
-        regex:
-          error: Invalid Syslog port
-          source: ^([1-9][0-9]{0,3}|[1-5][0-9]{4}|6[0-4][0-9]{3}|65[0-4][0-9]{2}|655[0-2][0-9]|6553[0-5])$
-        type: text
-        value: '514'
-        weight: 20
-      syslog_server:
-        description: Remote syslog hostname
-        label: Hostname
-        type: text
-        value: ''
-        weight: 10
-      syslog_transport:
-        label: Syslog transport protocol
-        type: radio
-        value: tcp
-        values:
-        - data: udp
-          description: ''
-          label: UDP
-        - data: tcp
-          description: ''
-          label: TCP
-        weight: 30
-    workloads_collector:
-      enabled:
-        type: hidden
-        value: true
-      metadata:
-        label: Workloads Collector User
-        restrictions:
-        - action: hide
-          condition: 'true'
-        weight: 10
-      password:
-        type: password
-        value: pBkLbu1k
-      tenant:
-        type: text
-        value: services
-      user:
-        type: text
-        value: fuel_stats_user
diff --git a/fuel/deploy/templates/virtual_environment/conf/dha.yaml b/fuel/deploy/templates/virtual_environment/conf/dha.yaml
deleted file mode 100644 (file)
index 6d476b8..0000000
+++ /dev/null
@@ -1,38 +0,0 @@
-title: Deployment Hardware Adapter (DHA)
-# DHA API version supported
-version:
-created:
-comment: Config for Virtual Environment
-
-# Adapter to use for this definition
-adapter: libvirt
-
-# Node list.
-# Mandatory property is id, all other properties are adapter specific.
-
-nodes:
-- id: 1
-  libvirtName: controller1
-  libvirtTemplate: templates/virtual_environment/vms/controller.xml
-- id: 2
-  libvirtName: compute1
-  libvirtTemplate: templates/virtual_environment/vms/compute.xml
-- id: 3
-  libvirtName: compute2
-  libvirtTemplate: templates/virtual_environment/vms/compute.xml
-- id: 4
-  libvirtName: compute3
-  libvirtTemplate: templates/virtual_environment/vms/compute.xml
-- id: 5
-  libvirtName: fuel-master
-  libvirtTemplate: templates/virtual_environment/vms/fuel.xml
-  isFuel: yes
-  username: root
-  password: r00tme
-
-virtNetConfDir: templates/virtual_environment/networks
-
-disks:
-  fuel: 50G
-  controller: 50G
-  compute: 50G
index d2aece8..bb4f9b6 100644 (file)
@@ -1,13 +1,3 @@
-###############################################################################
-# Copyright (c) 2015 Ericsson AB and others.
-# szilard.cserey@ericsson.com
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-###############################################################################
-
-
 import sys
 import common
 import io
@@ -16,16 +6,12 @@ from dea import DeploymentEnvironmentAdapter
 
 check_file_exists = common.check_file_exists
 
-ASTUTE_YAML = '/etc/fuel/astute.yaml'
-
-
 def usage():
     print '''
     Usage:
     python transplant_fuel_settings.py <deafile>
     '''
 
-
 def parse_arguments():
     if len(sys.argv) != 2:
         usage()
@@ -34,7 +20,6 @@ def parse_arguments():
     check_file_exists(dea_file)
     return dea_file
 
-
 def transplant(dea, astute):
     fuel_conf = dea.get_fuel_config()
     for key in fuel_conf.iterkeys():
@@ -45,17 +30,17 @@ def transplant(dea, astute):
             astute[key] = fuel_conf[key]
     return astute
 
-
 def main():
     dea_file = parse_arguments()
-    check_file_exists(ASTUTE_YAML)
+    astute_yaml = '/etc/fuel/astute.yaml'
+    check_file_exists(astute_yaml)
     dea = DeploymentEnvironmentAdapter(dea_file)
-    with io.open(ASTUTE_YAML) as stream:
+    with io.open(astute_yaml) as stream:
         astute = yaml.load(stream)
     transplant(dea, astute)
-    with io.open(ASTUTE_YAML, 'w') as stream:
+    with io.open(astute_yaml, 'w') as stream:
         yaml.dump(astute, stream, default_flow_style=False)
 
 
 if __name__ == '__main__':
-    main()
+    main()
\ No newline at end of file
diff --git a/fuel/docs/.DS_Store b/fuel/docs/.DS_Store
deleted file mode 100644 (file)
index 9a874b5..0000000
Binary files a/fuel/docs/.DS_Store and /dev/null differ
index 790bcf5..b05164f 100644 (file)
@@ -1,6 +1,6 @@
-============================================================================
-OPNFV Build instructions for - Fuel deployment tool - OPNFV Arno SR1 release
-============================================================================
+========================================================================
+OPNFV Build instructions for - Fuel deployment tool - OPNFV Arno release
+========================================================================
 
 .. contents:: Table of Contents
    :backlinks: none
@@ -8,34 +8,31 @@ OPNFV Build instructions for - Fuel deployment tool - OPNFV Arno SR1 release
 Abstract
 ========
 
-This document describes how to build the Fuel deployment tool for the Arno SR1 release of OPNFV, the build system, dependencies and required system resources.
+This document describes how to build the Fuel deployment tool for the Arno release of OPNFV, the build system, dependencies and required system resources.
 
 License
 =======
-Arno SR1 release of OPNFV when using Fuel as a deployment tool DOCs (c) by Jonas Bjurel (Ericsson AB)
+Arno release of OPNFV when using Fuel as a deployment tool DOCs (c) by Jonas Bjurel (Ericsson AB)
 
-Arno SR1 release of OPNFV when using Fuel as a deployment tool DOCs (c) are licensed under a Creative Commons Attribution 4.0 International License. You should have received a copy of the license along with this. If not, see <http://creativecommons.org/licenses/by/4.0/>.
+Arno release of OPNFV when using Fuel as a deployment tool DOCs (c) are licensed under a Creative Commons Attribution 4.0 International License. You should have received a copy of the license along with this. If not, see <http://creativecommons.org/licenses/by/4.0/>.
 
 
 
 Version history
 ===============
 
-+--------------------+--------------------+--------------------+----------------------+
-| **Date**           | **Ver.**           | **Author**         | **Comment**          |
-|                    |                    |                    |                      |
-+--------------------+--------------------+--------------------+----------------------+
-| 2015-06-03         | 1.0.0              | Jonas Bjurel       | Instructions for     |
-|                    |                    | (Ericsson AB)      | the Arno release     |
-+--------------------+--------------------+--------------------+----------------------+
-| 2015-09-24         | 1.1.0              | Jonas Bjurel       | Instructions for     |
-|                    |                    | (Ericsson AB)      | the Arno SR1 release |
-+--------------------+--------------------+--------------------+----------------------+
++--------------------+--------------------+--------------------+--------------------+
+| **Date**           | **Ver.**           | **Author**         | **Comment**        |
+|                    |                    |                    |                    |
++--------------------+--------------------+--------------------+--------------------+
+| 2015-06-03         | 1.0.0              | Jonas Bjurel       | Instructions for   |
+|                    |                    | (Ericsson AB)      | the Arno release   |
++--------------------+--------------------+--------------------+--------------------+
 
 Introduction
 ============
 
-This document describes the build system used to build the Fuel deployment tool for the Arno SR1 release of OPNFV, required dependencies and minimum requirements on the host to be used for the buildsystem.
+This document describes the build system used to build the Fuel deployment tool for the Arno release of OPNFV, required dependencies and minimum requirements on the host to be used for the buildsystem.
 
 The Fuel build system is desigened around Docker containers such that dependencies outside of the build system can be kept to a minimum. It also shields the host from any potential dangerous operations performed by the build system.
 
@@ -103,9 +100,9 @@ Now it is time to clone the code repository:
 
 Now you should have the OPNFV genesis repository with the Fuel directories stored locally on your build host.
 
-Check out the Arno SR1 release:
+Check out the Arno release:
 <cd genesis>
-<git checkout arno.2015.2.0>
+<git checkout arno.2015.1.0>
 
 Building
 ========
@@ -173,7 +170,7 @@ References
 -
 
 :Authors: Jonas Bjurel (Ericsson)
-:Version: 1.1.0
+:Version: 1.0.0
 
 **Documentation tracking**
 
index aedbb53..56699e9 100644 (file)
@@ -9,13 +9,13 @@ OPNFV Installation instructions for the Arno release of OPNFV when using Fuel as
 Abstract
 ========
 
-This document describes how to install the Arno SR1 release of OPNFV when using Fuel as a deployment tool covering it's limitations, dependencies and required system resources.
+This document describes how to install the Arno release of OPNFV when using Fuel as a deployment tool covering it's limitations, dependencies and required system resources.
 
 License
 =======
-Arno SR1 release of OPNFV when using Fuel as a deployment tool Docs (c) by Jonas Bjurel (Ericsson AB)
+Arno release of OPNFV when using Fuel as a deployment tool Docs (c) by Jonas Bjurel (Ericsson AB)
 
-Arno SR1 release of OPNFV when using Fuel as a deployment tool Docs are licensed under a Creative Commons Attribution 4.0 International License. You should have received a copy of the license along with this. If not, see <http://creativecommons.org/licenses/by/4.0/>.
+Arno release of OPNFV when using Fuel as a deployment tool Docs are licensed under a Creative Commons Attribution 4.0 International License. You should have received a copy of the license along with this. If not, see <http://creativecommons.org/licenses/by/4.0/>.
 
 Version history
 ===============
@@ -27,61 +27,52 @@ Version history
 | 2015-06-03         | 1.0.0              | Jonas Bjurel       | Installation       |
 |                    |                    | (Ericsson AB)      | instructions for   |
 |                    |                    |                    | the Arno release   |
-|                   |                    |                    |                    |
-| 2015-09-27        | 1.1.0              | Daniel Smith       | ARNO SR1-RC1       |
-|                    |                    |  (Ericsson AB)     | update             |
-|                   |                    |                    |                    |
-|                   |                    |                    |                    |
 +--------------------+--------------------+--------------------+--------------------+
 
 
 Introduction
 ============
 
-This document describes providing guidelines on how to install and configure the Arno SR1 release of OPNFV when using Fuel as a deployment tool including required software and hardware configurations.
+This document describes providing guidelines on how to install and configure the Arno release of OPNFV when using Fuel as a deployment tool including required software and hardware configurations.
 
-Although the available installation options gives a high degree of freedom in how the system is set-up including architecture, services and features, etc. said permutations may not provide an OPNFV compliant reference architecture. This instruction provides a step-by-step guide that results in an OPNFV Arno SR1 compliant deployment.
+Although the available installation options gives a high degree of freedom in how the system is set-up including architecture, services and features, etc. said permutations may not provide an OPNFV compliant reference architecture. This instruction provides a step-by-step guide that results in an OPNFV Arno compliant deployment.
 
 The audience of this document is assumed to have good knowledge in networking and Unix/Linux administration.
 
 Preface
 =======
 
-Before starting the installation of the Arno SR1 release of OPNFV when using Fuel as a deployment tool, some planning must be done.
+Before starting the installation of the Arno release of OPNFV when using Fuel as a deployment tool, some planning must be done.
 
 Retrieving the ISO image
 ------------------------
 
-First of all, the Fuel deployment ISO image needs to be retrieved, the .iso image of the Arno SR1 release of OPNFV when using Fuel as a deployment tool can be found at http://artifacts.opnfv.org/arno.2015.2.0/fuel/arno.2015.2.0.fuel.iso
-
+First of all, the Fuel deployment ISO image needs to be retrieved, the .iso image of the Arno release of OPNFV when using Fuel as a deployment tool can be found at http://artifacts.opnfv.org/arno.2015.1.0/fuel/arno.2015.1.0.fuel.iso
 
 Building the ISO image
 ----------------------
 
-
 Alternatively, you may build the .iso from source by cloning the opnfv/genesis git repository.  To retrieve the repository for the Arno release use the following command:
 
-- git clone https://<linux foundation uid>@gerrit.opnf.org/gerrit/genesis
+<git clone https://<linux foundation uid>@gerrit.opnf.org/gerrit/genesis>
 
-Check-out the Arno SR1 release tag to set the branch to the baseline required to replicate the Arno SR1 release:
+Check-out the Arno release tag to set the branch to the baseline required to replicate the Arno release:
 
-- cd genesis; git checkout stable/arno2015.2.0
+<cd genesis; git checkout arno.2015.1.0>
 
 Go to the fuel directory and build the .iso:
 
-- cd fuel/build; make all
-
-For more information on how to build, please see "OPNFV Build instructions for - Arno SR1 release of OPNFV when using Fuel as a deployment tool which you retrieved with the repository at </genesis/fuel/docs/src/build-instructions.rst>
+<cd fuel/build; make all>
 
-Next, familiarize yourself with the Fuel 6.1 version by reading the following documents:
+For more information on how to build, please see "OPNFV Build instructions for - Arno release of OPNFV when using Fuel as a deployment tool which you retrieved with the repository at </genesis/fuel/docs/src/build-instructions.rst>
 
-- Fuel planning guide <https://docs.mirantis.com/openstack/fuel/fuel-6.1/planning-guide.html>
+Next, familiarize yourself with the Fuel 6.0.1 version by reading the following documents:
 
-- Fuel user guide <http://docs.mirantis.com/openstack/fuel/fuel-6.1/user-guide.html#user-guide>
+- Fuel planning guide <http://docs.mirantis.com/openstack/fuel/fuel-6.0/planning-guide.html#planning-guide>
 
-- Fuel operations guide <http://docs.mirantis.com/openstack/fuel/fuel-6.1/operations.html#operations-guide>
+- Fuel user guide <http://docs.mirantis.com/openstack/fuel/fuel-6.0/user-guide.html#user-guide>
 
-- Fuel Plugin Developers Guide <https://wiki.openstack.org/wiki/Fuel/Plugins>
+- Fuel operations guide <http://docs.mirantis.com/openstack/fuel/fuel-6.0/operations.html#operations-guide>
 
 A number of deployment specific parameters must be collected, those are:
 
@@ -93,40 +84,27 @@ A number of deployment specific parameters must be collected, those are:
 
 4.     Provider NTP addresses
 
-5.     Network Topology you plan to Deploy (VLAN, GRE(VXLAN), FLAT)
-
-6.     Linux Distro you intend to deploy.
-
-7.     How many nodes and what roles you want to deploy (Controllers, Storage, Computes)
-
-8.     Monitoring Options you want to deploy (Ceilometer, MongoDB).
-
-9.     Other options not covered in the document are available in the links above
-
-
 This information will be needed for the configuration procedures provided in this document.
 
 Hardware requirements
 =====================
 
-The following minimum hardware requirements must be met for the installation of Arno SR1 using Fuel:
+The following minimum hardware requirements must be met for the installation of Arno using Fuel:
 
 +--------------------+------------------------------------------------------+
 | **HW Aspect**      | **Requirement**                                      |
 |                    |                                                      |
 +--------------------+------------------------------------------------------+
-| **# of nodes**     | Minimum 6 (3 for non redundant deployment):          |
+| **# of servers**   | Minimum 5 (3 for non redundant deployment):          |
 |                    |                                                      |
 |                    | - 1 Fuel deployment master (may be virtualized)      |
 |                    |                                                      |
 |                    | - 3(1) Controllers                                   |
 |                    |                                                      |
 |                    | - 1 Compute                                          |
-|                    |                                                      |
-|                    | - 1 Ceilometer (VM option)                           |
 +--------------------+------------------------------------------------------+
-| **CPU**            | Minimum 1 socket x86_AMD64 with Virtualization       |
-|                    |   support                                            |
+| **CPU**            | Minimum 1 socket x86_AMD64 Ivy bridge 1.6 GHz        |
+|                    |                                                      |
 +--------------------+------------------------------------------------------+
 | **RAM**            | Minimum 16GB/server (Depending on VNF work load)     |
 |                    |                                                      |
@@ -134,30 +112,14 @@ The following minimum hardware requirements must be met for the installation of
 | **Disk**           | Minimum 256GB 10kRPM spinning disks                  |
 |                    |                                                      |
 +--------------------+------------------------------------------------------+
-| **Networks**       | 4 Tagged VLANs (PUBLIC, MGMT, STORAGE, PRIVATE)      |
+| **NICs**           | - 2(1)x10GE Niantec for Private/Public (Redundant)   |
 |                    |                                                      |
-|                    | 1 Un-Tagged VLAN for PXE Boot - ADMIN Network        |
+|                    | - 2(1)x10GE Niantec for SAN (Redundant)              |
+|                    |                                                      |
+|                    | - 2(1)x1GE for admin (PXE) and control (RabitMQ,etc) |
 |                    |                                                      |
-|                    | note: These can be run on single NIC - or spread out |
-|                    |  over other nics as your hardware supports           |
 +--------------------+------------------------------------------------------+
 
-Help with Hardware Requirements
-===============================
-
-
-Calculate hardware requirements:
-
-You can use the Fuel Hardware Calculator <https://www.mirantis.com/openstack-services/bom-calculator/> to calculate the hardware required for your OpenStack environment.
-
-When choosing the hardware on which you will deploy your OpenStack environment, you should think about:
-
-        - CPU -- Consider the number of virtual machines that you plan to deploy in your cloud environment and the CPU per virtual machine.
-        - Memory -- Depends on the amount of RAM assigned per virtual machine and the controller node.
-        - Storage -- Depends on the local drive space per virtual machine, remote volumes that can be attached to a virtual machine, and object storage.
-        - Networking -- Depends on the Choose Network Topology, the network bandwidth per virtual machine, and network storage.
-
-
 Top of the rack (TOR) Configuration requirements
 ================================================
 
@@ -165,7 +127,8 @@ The switching infrastructure provides connectivity for the OPNFV infrastructure
 
 The physical TOR switches are **not** automatically configured from the OPNFV reference platform. All the networks involved in the OPNFV infrastructure as well as the provider networks and the private tenant VLANs needs to be manually configured.
 
-Manual configuration of the Arno SR1 hardware platform should be carried out according to the Pharos specification TODO-<insert link to Pharos ARNO SR1 Specification>
+
+Manual configuration of the Arno hardware platform should be carried out according to the Pharos specification http://artifacts.opnfv.org/arno.2015.1.0/docs/pharos-spec.arno.2015.1.0.pdf
 
 OPNFV Software installation and deployment
 ==========================================
@@ -174,29 +137,35 @@ This section describes the installation of the OPNFV installation server (Fuel m
 
 Install Fuel master
 -------------------
-1. Mount the Arno SR1 ISO  file as a boot device to the jump host server.
+1. Mount the built arno.2015.1.0.fuel.iso file as a boot device to the jump host server.
 
 2. Reboot the jump host to establish the Fuel server.
 
    - The system now boots from the ISO image.
 
-   - Select 'DVD Fuel Install (Static IP)'
+3. Change the grub boot parameters
 
-   - Press [Enter].
+   - When the grub boot menu shows up - Press Tab to edit the kernel parameters
 
-3. Wait until screen Fuel setup is shown (Note: This can take up to 30 minutes).
+   - Change <showmenu=no> to <showmenu=yes>.
 
-4. Configure DHCP/Static IP information for your FUEL node - For example, ETH0 is 10.20.0.2/24 for FUEL booting and ETH1 is DHCP in your corporate/lab network.
+   - Change <netmask=255.255.255.0> to <netmask=255.255.0.0>.
 
-   - Configure eth1 or other network interfaces here as well (if you have them present on your FUEL server).
+   - Press [Enter].
+
+4. Wait until screen Fuel setup is shown (Note: This can take up to 30 minutes).
 
 5. Select PXE Setup and change the following fields to appropriate values (example below):
 
-   - DHCP Pool Start 10.20.0.3
+   - Static Pool Start 10.20.0.3
+
+   - Static Pool End 10.20.0.254
 
-   - DHCP Pool End 10.20.0.254
+   - DHCP Pool Start 10.20.128.3
 
-   - DHCP Pool Gateway  10.20.0.2 (ip of Fuel node)
+   - DHCP Pool End 10.20.128.254
+
+6. Select DNS & Hostname and change the following fields to appropriate values:
 
    - Hostname <OPNFV Region name>-fuel
 
@@ -206,7 +175,7 @@ Install Fuel master
 
    - Hostname to test DNS <Hostname to test DNS>
 
-6. Select Time Sync and change the following fields to appropriate values:
+7. Select Time Sync and change the following fields to appropriate values:
 
    - NTP Server 1 <Customer NTP server 1>
 
@@ -214,94 +183,67 @@ Install Fuel master
 
    - NTP Server 3 <Customer NTP server 3>
 
-7. Start the installation.
+   **Note: This step is only to pass the network sanity test, the actual ntp parameters will be set with the pre-deploy script.**
+
+8. Start the installation.
 
    - Select Quit Setup and press Save and Quit.
 
    - Installation starts, wait until a screen with logon credentials is shown.
 
-
-Boot the Node Servers
----------------------
-
-After the Fuel Master node has rebooted from the above step and is at the login prompt, you should boot the Node Servers (Your Compute/Control/Storage blades (nested or real)) with a PXE Booting Scheme so that the FUEL 
-Master can pick them up for control.
-
-8. Enable PXE booting
-
-    - For every controller and compute server: enable PXE Booting as the first boot device in the BIOS boot order menu and hard disk as the second boot device in the same menu.
-
-9. Reboot all the control and compute blades.
-
-10. Wait for the availability of nodes showing up in the Fuel GUI.
-
-    - Wait until all nodes are displayed in top right corner of the Fuel GUI: <total number of server> TOTAL NODES and <total number of servers> UNALLOCATED NODES.
-
-
-
-Install ODL Plugin on FUEL node
--------------------------------
-
-11. SSH to your FUEL node   (e.g. root@10.20.0.2  pwd: r00tme)
-
-12. Verify the plugin exists at /opt/opnfv/opendaylight-0.6-0.6.1-1.noarch.rpm
-
-13. Install the plugin with the command
-
-    - "fuel plugins --install /opt/opnfv/opendaylight-0.6-0.6.1-1.noarch.rpm"
-
-    - Expected output: "Plugin  opendaylight-0.6-0.6.1-1.noarch.rpm was successfully installed."
-
+   Note: This will take about 15 minutes.
 
 Create an OPNFV Environment
 ---------------------------
 
-14. Connect to Fuel WEB UI with a browser towards port http://<ip of fuel server>:8000 (login admin/admin)
+9. Connect to Fuel with a browser towards port 8000
 
-15. Create and name a new OpenStack environment, to be installed.
+10. Create and name a new OpenStack environment, to be installed.
 
-16. Select <Juno on Ubuntu> or <Juno on CentOS> as per your which in the "OpenStack Release" field and press "Next"
+11. Select <Juno on Ubuntu> or <Juno on CentOS> as per your which in the "OpenStack Release" field.
 
-17. Select compute virtulization method.
+12. Select deployment mode.
 
-    - Select KVM as hypervisor (or one of your choosing) and press "Next"
+    - Select the Multi-node with HA.
 
-18. Select network mode.
+13. Select compute node mode.
 
-    - Select Neutron with GRE segmentation and press "Next"
+    - Select KVM as hypervisor (unless you're not deploying bare metal or nested KVM/ESXI).
 
-    Note: this is the supportted method when using the ODL installation, other options will not work with the plugin and this Instruction Set.
+14. Select network mode.
 
-19. Select Storage Back-ends.
+    - Select Neutron with VLAN segmentation
 
-    - Select "Yes, use Ceph" if you intend to deploy Ceph Backends and press "Next"
+    ** Note: This will later be overridden to VXLAN by OpenDaylight.**
 
+15. Select Storage Back-ends.
 
-20. Select additional services you wish to install.
+    - Select Ceph for Cinder and default for glance.
 
-    - Check option <Install Celiometer (OpenStack Telemetry)> and press "Next"
-       Note: If you use Ceilometer and you only have 5 nodes, you may have to run in a 3/1/1 (controller/ceilo-mongo/compute) configuration. Suggest adding more compute nodes
+16. Select additional services.
 
-21. Create the new environment.
+    - Check option <Install Celiometer (OpenStack Telemetry)>.
 
-    - Click "Create" Button
+17. Create the new environment.
 
 Configure the OPNFV environment
 -------------------------------
 
-22. Enable PXE booting (if you haven't done this already)
+18. Enable PXE booting
 
     - For every controller and compute server: enable PXE Booting as the first boot device in the BIOS boot order menu and hard disk as the second boot device in the same menu.
 
-23. Wait for the availability of nodes showing up in the Fuel GUI.
+19. Reboot all the control and compute blades.
+
+20. Wait for the availability of nodes showing up in the Fuel GUI.
 
     - Wait until all nodes are displayed in top right corner of the Fuel GUI: <total number of server> TOTAL NODES and <total number of servers> UNALLOCATED NODES.
 
-24. Open the environment you previously created.
+21. Open the environment you previously created.
 
-25. Open the networks tab.
+22. Open the networks tab.
 
-26. Update the Public network configuration.
+23. Update the public network configuration.
 
     Change the following fields to appropriate values:
 
@@ -311,67 +253,57 @@ Configure the OPNFV environment
 
     - CIDR to <CIDR for Public IP Addresses>
 
-    - Check VLAN tagging.
-
-    - Set appropriate VLAN id.
-
     - Gateway to <Gateway for Public IP Addresses>
 
-    - Set floating ip ranges
-
-
-27. Update the Storage Network Configuration
-
-    - Set CIDR to appropriate value  (default 192.168.1.0/24)
+    - Check VLAN tagging.
 
-    - Set vlan to appropriate value  (default 102)
+    - Set appropriate VLAN id.
 
-28. Update the Management network configuration.
+24. Update the management network configuration.
 
-    - Set CIDR to appropriate value (default 192.168.0.0/24)
+    - Set CIDR to 172.16.255.128/25 (or as per your which).
 
     - Check VLAN tagging.
 
-    - Set appropriate VLAN id. (default 101)
-
-29. Update the Private Network Information
+    - Set appropriate VLAN id.
 
-    - Set CIDR to appropriate value (default 192.168.2.0/24
+25. Update the Neutron L2 configuration.
 
-    - Check and set VLAN tag appropriately (default 103)
+    - Set VLAN ID range.
 
-30. Update the Neutron L3 configuration.
+26. Update the Neutron L3 configuration.
 
     - Set Internal network CIDR to an appropriate value
 
     - Set Internal network gateway to an appropriate value
 
-    - Set Guest OS DNS Server values appropriately
+    - Set Floating IP ranges.
 
-31. Save Settings.
+    - Set DNS Servers
 
-32. Click on the "Nodes" Tab in the FUEL WEB UI.
+27. Save Settings.
 
-33. Assign roles.
+28. Click "verify network" to check the network set-up consistency and connectivity
 
-    - Click on "+Add Nodes" button
+29. Update the storage configuration.
 
-    - Check "Controller" and the "Storage-Ceph OSD"  in the Assign Roles Section
+30. Open the nodes tab.
 
-    - Check the 3 Nodes you want to act as Controllers from the bottom half of the screen
+31. Assign roles.
 
-    - Click <Apply Changes>.
+    - Check <Controller and Telemetry MongoDB>.
 
-    - Click on "+Add Nodes" button
+    - Check the three servers you want to be installed as Controllers in pane <Assign Role>.
 
-    - Check "Compute" in the Assign Roles Section
+    - Click <Apply Changes>.
 
-    - Check the Nodes that you want to act as Computes from the bottom half of the screen
+    - Check <Compute>.
 
-    - Click <Apply Changes>.
+    - Check nodes to be installed as compute nodes in pane Assign Role.
 
+    - Click <Apply Changes>.
 
-34. Configure interfaces.
+32. Configure interfaces.
 
     - Check Select <All> to select all nodes with Control, Telemetry, MongoDB and Compute node roles.
 
@@ -381,84 +313,67 @@ Configure the OPNFV environment
 
     - Assign interfaces (bonded) for mgmt-, admin-, private-, public- and storage networks
 
-    - Note: Set MTU level to at least MTU=2090 (recommended MTU=2140 for SDN over VXLAN Usage) for each network
-
-    - Click Apply
-
-Enable ODL
-----------
+Deploy the OPNFV environment
+----------------------------
+**NOTE: Before the deployment is performed, the OPNFV pre-deploy script must be run**
 
-35. In the FUEL UI of your Enviornment, click the "Settings" Tab
+35. Run the pre-deploy script.
+    Log on as root to the Fuel node.
+    Print Fuel environment Id (fuel env)
+    #> id | status | name | mode | release_id | changes <id>| new | <CEE Region name>| ha_compact | 2 | <ite specific information>
 
-    - Enable OpenStack debug logging (in the Common Section) - optional
+36. Run the pre-deployment script (/opt/opnfv/pre-deploy.sh <id>)
+    As prompted for-, set the DNS servers to go into /etc/resolv.conf.
+    As prompted for-, set any Hosts file additions for controllers and compute nodes. You will be prompted for name, FQDN and IP for each entry. Press return when prompted for a name when you have completed your input.
+    As prompted for-, set NTP upstream configuration for controllers. You will be prompted for a NTP server each entry. Press return when prompted for a NTP server when you have completed your input.
 
-    - Check the OpenDaylight Lithium Plugin Section
-
-    - Check to enable VXLAN
-
-    - Modify VNI and Port Range if desired
-
-    - Click "Save Settings" at the bottom to Save.
-
-
-OPTIONAL - Set Local Mirror Repos
----------------------------------
-
-The following steps can be executed if you are in an environment with no connection to the internet.  The Fuel server delivers a local repo that can be used for 
-installation / deployment of openstack.
-
-36.  In the Fuel UI of your Environment, click the Settings Tab and scroll to the Repositories Section.
-
-   - Replace the URI values for the "Name" values outlined below:
-
-   - "ubuntu" URI="deb http://<ip-of-fuel-server>:8080/ubuntu-part trusty main"
-   - "ubuntu-security" URI="deb http://<ip-of-fuel-server>:8080/ubuntu-part trusty main"
-   - "ubuntu-updates" URI="deb http://<ip-of-fuel-server>:8080/ubuntu-part trusty main"
-   - "mos-updates"  URI="deb http://<ip-of-fuel-server>:8080/mos-ubuntu mos6.1-updates main restricted"
-   - "mos-security" URI="deb http://<ip-of-fuel-server>:8080/mos-ubuntu mos6.1-security main restricted"
-   - "mos-holdback" URI="deb http://<ip-of-fuel-server>:8080/mos-ubuntu mos6.1-holdback main restricted"
-
-   - Click "Save Settings" at the bottom to Save your changes
-
-Verify Networks
----------------
-
-Its is important that Verify Networks be done as it will ensure that you can not only communicate on the networks you have setup, but can fetch the packages needed for a succesful
-deployment.
-
-37.  From the FUEL UI in your Environment, Select the Networks Tab
-
-   - At the bottom of the page, Select "Verify Networks"
-
-   - Continue to fix your topology (physical switch, etc) until the "Verification Succeeded - Your network is configured correctly" message is shown
-
-Deploy Your Environment
------------------------
-
-38. Deploy the environment.
+37. Deploy the environment.
     In the Fuel GUI, click Deploy Changes.
 
-    - Wait until your Environment is deployed and the Horizon URI to connect is displayed in the FUEL GUI for your Environment
-
 Installation health-check
 =========================
 
-39. Perform system health-check
+38. Perform system health-check
+Now that the OPNFV environment has been created, and before the post installation configurations is started, perform a system health check from the Fuel GUI:
+
+- Select the “Health check” TAB.
+- Select all test cases
+- And click “Run tests”
+
+All test cases should pass.
+
+Post installation and deployment actions
+========================================
 
-    - Click the "Health Check" tab inside your Environment in the FUEL Web UI
+Activate OpenDaylight and VXLAN network segmentation
+----------------------------------------------------
+** Note: With the current release, the OpenDaylight option is experimental!**
+** Note: With ODL enabled, L3 features will no longer be available **
+The activation of ODL within a deployed Fuel system is a two part process.
 
-    - Check "Select All" and Click "Run Tests"
+The first part involves staging the ODL container, i.e. starting the ODL container itself.
+The second part involves a reconfiguration of the underlying networking components to enable VXLAN tunneling.
+The staging of the ODL container works without manual intervention except for editing with a valid DNS IP for your system
 
-       Note: Live-Migraition test will fail (Bug in ODL currently), you can skip this test in the list if you choose to not see the error message, simply uncheck it in the list
+For the second part - the reconfiguration of the networking, the script <config_net_odl.sh> is provided as a baseline example to show what needs to be configured for your system system setup. Since there are many variants of valid networking topologies, this script will not be 100% correct in all deployment cases and some manual script modifications maybe required.
 
-    - Allow tests to run and investigate results where appropriate
+39. Enable the ODL controller
+ssh to any of the OpenStack controllers and issue the following command as root user: </opt/opnfv/odl/stage_odl.sh>
+This script will start ODL, load modules and make the Controller ready for use.
+** Note: - The script should only be ran on a single controller (even if the system is setup in a High Availability OpenStack mode). **
 
 40. Verify that the OpenDaylight GUI is accessible
+Point your browser to the following URL: <http://{ODL-CONTROLLER-IP}:8181/dlux/index.html> and login:
+Username: Admin
+Password: Admin
 
-Point your browser to the following URL: http://{Controller-VIP}:8181/index.html> and login:
+41. Reconfiguring the networking and switch to VXLAN network segmentation
+ssh to all of the nodes and issue the following command </opt/opnfv/odl/config_net_odl.sh> in the order specified below:
+a. All compute nodes
+b. All OpenStack controller nodes except the one running the ODL-controller
+c. The OpenStack controller also running the ODL controller
 
-    - Username: admin
-    - Password: admin
+This script will reconfigure the networking from VLAN Segregation to VXLAN mode.
 
 References
 ==========
@@ -487,11 +402,11 @@ Fuel
 
 `Fuel documentation <https://wiki.openstack.org/wiki/Fuel>`_
 
-:Authors: Daniel Smith (Ericsson AB)
-:Version: 1.1.0
+:Authors: Jonas Bjurel (Ericsson AB)
+:Version: 1.0.0
 
 **Documentation tracking**
 
 Revision: _sha1_
 
-Build date: _date
+Build date: _date_
index 4357897..220e80b 100644 (file)
@@ -1,6 +1,6 @@
-=========================================================================================
-OPNFV Release Note for the Arno SR1 release of OPNFV when using Fuel as a deployment tool
-=========================================================================================
+=====================================================================================
+OPNFV Release Note for the Arno release of OPNFV when using Fuel as a deployment tool
+=====================================================================================
 
 
 .. contents:: Table of Contents
@@ -10,14 +10,14 @@ OPNFV Release Note for the Arno SR1 release of OPNFV when using Fuel as a deploy
 Abstract
 ========
 
-This document compiles the release notes for the Arno SR1 release of OPNFV when using Fuel as a deployment tool.
+This document compiles the release notes for the Arno release of OPNFV when using Fuel as a deployment tool.
 
 License
 =======
 
-Arno SR1 release with the Fuel deployment tool Docs (c) by Jonas Bjurel (Ericsson AB)
+Arno release with the Fuel deployment tool Docs (c) by Jonas Bjurel (Ericsson AB)
 
-Arno SR1 release with the Fuel deployment tool Docs are licensed under a Creative Commons Attribution 4.0 International License. You should have received a copy of the license along with this. If not, see <http://creativecommons.org/licenses/by/4.0/>.
+Arno release with the Fuel deployment tool Docs are licensed under a Creative Commons Attribution 4.0 International License. You should have received a copy of the license along with this. If not, see <http://creativecommons.org/licenses/by/4.0/>.
 
 Version history
 ===============
@@ -29,9 +29,6 @@ Version history
 | 2015-06-03         | 1.0.0              | Jonas Bjurel       | Arno SR0 release   |
 |                    |                    |                    |                    |
 +--------------------+--------------------+--------------------+--------------------+
-| 2015-09-28         | 1.1.3              | Jonas Bjurel       | Arno SR1 release   |
-|                    |                    |                    |                    |
-+--------------------+--------------------+--------------------+--------------------+
 
 Important notes
 ===============
@@ -43,7 +40,7 @@ Carefully follow the installation-instructions and pay special attention to the
 Summary
 =======
 
-For Arno SR1, the typical use of Fuel as an OpenStack installer is supplemented with OPNFV unique components such as `OpenDaylight <http://www.opendaylight.org/software>`_ version Helium as well as OPNFV-unique configurations.
+For Arno, the typical use of Fuel as an OpenStack installer is supplemented with OPNFV unique components such as `OpenDaylight <http://www.opendaylight.org/software>`_ version Helium as well as OPNFV-unique configurations.
 
 This Arno artefact provides Fuel as the deployment stage tool in the OPNFV CI pipeline including:
 
@@ -63,16 +60,16 @@ Release Data
 | **Project**                          | genesis/bgs                          |
 |                                      |                                      |
 +--------------------------------------+--------------------------------------+
-| **Repo/tag**                         | genesis/arno.2015.2.0                |
+| **Repo/tag**                         | genesis/arno.2015.1.0                |
 |                                      |                                      |
 +--------------------------------------+--------------------------------------+
-| **Release designation**              | Arno Base Service release 1 (SR1)    |
+| **Release designation**              | Arno Base Service release 0 (SR0)    |
 |                                      |                                      |
 +--------------------------------------+--------------------------------------+
-| **Release date**                     | 2015-10-01                           |
+| **Release date**                     | 2015-06-04                           |
 |                                      |                                      |
 +--------------------------------------+--------------------------------------+
-| **Purpose of the delivery**          | OPNFV Arno Base SR1 release          |
+| **Purpose of the delivery**          | OPNFV Arno Base SR0 release          |
 |                                      |                                      |
 +--------------------------------------+--------------------------------------+
 
@@ -81,15 +78,15 @@ Version change
 
 Module version changes
 ~~~~~~~~~~~~~~~~~~~~~~
-This is the second tracked release of genesis/fuel. It is based on following upstream versions:
+This is the first tracked release of genesis/fuel. It is based on following upstream versions:
 
-- Fuel 6.1.0
+- Fuel 6.0.1
 - OpenStack Juno release
-- OpenDaylight Litium release
+- OpenDaylight Helium-SR3
 
 Document version changes
 ~~~~~~~~~~~~~~~~~~~~~~~~
-This is the second tracked version of the fuel installer for OPNFV. It comes with the following documentation:
+This is the first tracked version of the fuel installer for OPNFV. It comes with the following documentation:
 
 - OPNFV Installation instructions for Arno with Fuel as deployment tool
 - OPNFV Release Notes for Arno use of Fuel as deployment tool
@@ -105,10 +102,10 @@ Feature additions
 | **JIRA REFERENCE**                   | **SLOGAN**                           |
 |                                      |                                      |
 +--------------------------------------+--------------------------------------+
-| JIRA: FUEL-4                         | Baselining Fuel 6.0.1 for OPNFV      |
+| JIRA:-                               | Baselining Fuel 6.0.1 for OPNFV      |
 |                                      |                                      |
 +--------------------------------------+--------------------------------------+
-| JIRA: FUEL-17                        | Integration of OpenDaylight          |
+| JIRA:-                               | Integration of OpenDaylight          |
 |                                      |                                      |
 +--------------------------------------+--------------------------------------+
 
@@ -121,17 +118,8 @@ Bug corrections
 | **JIRA REFERENCE**                   | **SLOGAN**                           |
 |                                      |                                      |
 +--------------------------------------+--------------------------------------+
-| JIRA: BGS-57                         | The OpenDaylight Helium release is   |
-|                                      | not fully functional and the         |
-|                                      | resulting Fuel integration is not    |
-|                                      | able to cope with the deficiancies.  |
-|                                      | It is therefore not recommended to   |
-|                                      | to enable this option.               |
-|                                      | A functional integration of ODL      |
-|                                      | version: Lithium is expected to be   |
-|                                      | available in an upcomming service    |
-|                                      | release.                             |
 |                                      |                                      |
+| -                                    | -                                    |
 +--------------------------------------+--------------------------------------+
 
 Deliverables
@@ -139,13 +127,13 @@ Deliverables
 
 Software deliverables
 ~~~~~~~~~~~~~~~~~~~~~
-Fuel-based installer iso file <arno.2015.2.0.fuel.iso>
+Fuel-based installer iso file <arno.2015.1.0.fuel.iso>
 
 Documentation deliverables
 ~~~~~~~~~~~~~~~~~~~~~~~~~~
-- OPNFV Installation instructions for Arno release with the Fuel deployment tool - ver. 1.1.0
-- OPNFV Build instructions for Arno release with the Fuel deployment tool - ver. 1.1.0
-- OPNFV Release Note for Arno release with the Fuel deployment tool - ver. 1.1.3 (this document)
+- OPNFV Installation instructions for Arno release with the Fuel deployment tool - ver. 1.0.0
+- OPNFV Build instructions for Arno release with the Fuel deployment tool - ver. 1.0.0
+- OPNFV Release Note for Arno release with the Fuel deployment tool - ver. 1.0.0 (this document)
 
 Known Limitations, Issues and Workarounds
 =========================================
@@ -171,32 +159,63 @@ Known issues
 | **JIRA REFERENCE**                   | **SLOGAN**                           |
 |                                      |                                      |
 +--------------------------------------+--------------------------------------+
-| JIRA: FUEL-43                        | VMs not accessible through SSH due   |
-|                                      | to VXLAN 50 Byte overhead and lack   |
-|                                      | of proper MTU value setting on       |
-|                                      | virtual ethernet devices             |
-+--------------------------------------+--------------------------------------+
-| JIRA: FUEL-44                        | Centos 6.5 option has not been       |
-|                                      | enough verified                      |
+| JIRA: BGS-57                         | The OpenDaylight Helium release is   |
+|                                      | not fully functional and the         |
+|                                      | resulting Fuel integration is not    |
+|                                      | able to cope with the deficiancies.  |
+|                                      | It is therefore not recommended to   |
+|                                      | to enable this option.               |
+|                                      | A functional integration of ODL      |
+|                                      | version: Lithium is expected to be   |
+|                                      | available in an upcomming service    |
+|                                      | release.                             |
+|                                      |                                      |
 +--------------------------------------+--------------------------------------+
 
-
 Workarounds
 -----------
-See JIRA: `FUEL-43 <https://jira.opnfv.org/browse/FUEL-43>`
+Current workaround for the JIRA: BGS-57 is not to enable OpenDaylight networking - see installation instructions.  
 
 
 Test Result
 ===========
-Arno SR1 release with the Fuel deployment tool has undergone QA test runs with the following results:
-https://wiki.opnfv.org/arno_sr1_result_page?rev=1443626728
+
+Arno release with the Fuel deployment tool has undergone QA test runs with the following results:
+
++--------------------------------------+--------------------------------------+
+| **TEST-SUITE**                       | **Results:**                         |
+|                                      |                                      |
++--------------------------------------+--------------------------------------+
+| Tempest test suite 1:                | 27 out of 105 testcases fails        |
+|                                      | see note (1) and note (2)            |
++--------------------------------------+--------------------------------------+
+| Tempest test suite 2:                | 26 out of 100 testcases fails        |
+|                                      | see note (1) and note (2)            |
++--------------------------------------+--------------------------------------+
+| Tempest test suite 3:                | 14 out of 106 testcases fails        |
+|                                      | see note (1) and note (2)            |
++--------------------------------------+--------------------------------------+
+| Rally test suite suie 1:             | 10 out of 18 testcases fails         |
+|                                      | see note (1) and note (3)            |
++--------------------------------------+--------------------------------------+
+| ODL test suite suie                  | 7 out of 7 testcases fails           |
+|                                      | see note (1) and note (4)            |
++--------------------------------------+--------------------------------------+
+| vPING                                | OK                                   |
+|                                      | see note (1)                         |
++--------------------------------------+--------------------------------------+
+
+** - Note (1): Have been run with ODL controller active but not with integrated ODL networking VXLAN segmentation activated **
+** - Note (2): see https://wiki.opnfv.org/r1_tempest **
+** - Note (3): see https://wiki.opnfv.org/r1_rally_bench **
+** - Note (4): see https://wiki.opnfv.org/r1_odl_suite **
 
 References
 ==========
 For more information on the OPNFV Arno release, please see http://wiki.opnfv.org/releases/arno.
 
 :Authors: Jonas Bjurel (Ericsson)
-:Version: 1.1.3
+:Version: 1.0.0
 
 **Documentation tracking**
 
index 25de4b9..9e70427 100644 (file)
@@ -205,7 +205,7 @@ network:
     gateway: 172.30.9.1
     ip_ranges:
     - - 172.30.9.70
-      - 172.30.9.79
+      - 172.30.9.70
     meta:
       assign_vip: true
       cidr: 172.16.0.0/24
index 3abbdce..fd0e7b3 100644 (file)
@@ -205,7 +205,7 @@ network:
     gateway: 172.30.9.1
     ip_ranges:
     - - 172.30.9.70
-      - 172.30.9.79
+      - 172.30.9.70
     meta:
       assign_vip: true
       cidr: 172.16.0.0/24
index ad5d4d1..099c21e 100644 (file)
@@ -9,7 +9,6 @@
     <boot dev='network'/>
     <boot dev='hd'/>
     <bootmenu enable='yes'/>
-    <bios rebootTimeout='30000'/>
   </os>
   <features>
     <acpi/>
index 3905906..76569e0 100644 (file)
@@ -8,7 +8,6 @@
     <type arch='x86_64' machine='pc-1.0'>hvm</type>
     <boot dev='network'/>
     <boot dev='hd'/>
-    <bios rebootTimeout='30000'/>
   </os>
   <features>
     <acpi/>
index ca1bd3b..715d4c4 100644 (file)
@@ -8,7 +8,6 @@
     <type arch='x86_64' machine='pc-1.0'>hvm</type>
     <boot dev='network'/>
     <boot dev='hd'/>
-    <bios rebootTimeout='30000'/>
   </os>
   <features>
     <acpi/>
index d2a7841..9ff8017 100644 (file)
@@ -98,5 +98,6 @@
       <address type='pci' domain='0x0000' bus='0x00' slot='0x08' function='0x0'/>
     </memballoon>
   </devices>
+  <seclabel type='dynamic' model='apparmor' relabel='yes'/>
 </domain>
 
diff --git a/juju/ci/build.sh b/juju/ci/build.sh
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/juju/ci/deploy.sh b/juju/ci/deploy.sh
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/opensteak/ci/build.sh b/opensteak/ci/build.sh
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/opensteak/ci/deploy.sh b/opensteak/ci/deploy.sh
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/puppet.md b/puppet.md
new file mode 100644 (file)
index 0000000..2d05fee
--- /dev/null
+++ b/puppet.md
@@ -0,0 +1,75 @@
+# Puppet installation
+
+---
+## Puppet modules
+
+###genesis
+[https://gerrit.opnfv.org/gerrit/genesis](https://gerrit.opnfv.org/gerrit/genesis)
+
+This module is a small set of puppet code. Its intention is mostly to glue the variables in foreman to the quickstack module. These classes could also be used as a site.pp node manifest for running a puppet apply.
+
+There is a class for each the control node, network node and compute nodes. The site.pp/node-manifest should have one of these classes and the
+appropriate variables that they check for applied to each of the them. This module will invoke the quickstack classes.
+
+There is not HA or ceph configuration in this module yet, though quickstack does support them so they can be added.
+
+###Quickstack
+
+[https://github.com/radez/astapor/tree/opendaylight](https://github.com/radez/astapor/tree/opendaylight)
+
+This module is the composition layer that will tie together all the puppet modules that will install openstack together.
+
+Quickstack is maintained as part of astapor. Pull the astapor module from this fork and link the  puppet/modules/quickstack directory to your puppet modules directory so it can be used.
+
+The fork is necessary because there is a patch on this fork to teach quickstack how to configure neutron with ODL.
+
+Once this patch is merged the upstream astapor module can be cloned from upstream at
+[https://github.com/redhat-openstack/astapor](https://github.com/redhat-openstack/astapor).
+
+### openstack-puppet-modules (OPM)
+*or the stackforge puppet modules*
+
+[https://repos.fedorapeople.org/repos/openstack/openstack-juno/epel-7/](https://repos.fedorapeople.org/repos/openstack/openstack-juno/epel-7/)
+
+Pull the latest copy of the openstack-puppet-modules rpm from RDO. This rpm has all the puppet modules needed to install openstack in it that have been tested together. You could also just pull all the stackforge modules. Put these modules on your puppet module path.
+
+### puppet-opendaylight
+[https://github.com/dfarrell07/puppet-opendaylight](https://github.com/dfarrell07/puppet-opendaylight)
+
+This module is not included in OPM pull a clone a copy of it to your puppet modules path.
+
+---
+##Installation Options
+###Puppet apply
+
+TODO: add details to do a puppet apply install
+
+###Foreman
+[http://theforeman.org/](http://theforeman.org/)
+
+Foreman is a provisioning and puppet master platform in one project.
+
+Get a copy of foreman-installer on a node in an empty broadcast domain. Foreman expects to be the dhcp server on the network.
+
+Run foreman-installer something like this:
+
+
+    sudo foreman-installer \
+        --enable-foreman-proxy \
+        --foreman-proxy-tftp=true \
+        --foreman-proxy-tftp-servername=10.1.254.1 \
+        --foreman-proxy-dhcp=true \
+        --foreman-proxy-dhcp-interface=em1 \
+        --foreman-proxy-dhcp-gateway=10.1.254.254 \
+        --foreman-proxy-dhcp-range="10.1.254.2 10.1.254.40" \
+        --foreman-proxy-dhcp-nameservers="10.1.254.1" \
+        --foreman-proxy-dns=true \
+        --foreman-proxy-dns-interface=em1 \
+        --foreman-proxy-dns-zone=example.com \
+        --foreman-proxy-dns-reverse=254.1.10.in-addr.arpa \
+        --foreman-proxy-dns-forwarders=10.1.16.30 \
+        --foreman-proxy-foreman-base-url=https://foreman.example.com
+
+* dhcp-nameservers is the dns address given to provisioned hosts, point this to foreman
+* dns-forwarders is an external dns address that dns reqs will be forewarded to that foreman do not handle
+* make sure that your foreman-base-url will be resolvable
diff --git a/puppet/opnfv/manifests/compute.pp b/puppet/opnfv/manifests/compute.pp
new file mode 100644 (file)
index 0000000..696f671
--- /dev/null
@@ -0,0 +1,78 @@
+class opnfv::compute {
+
+  if $private_ip == '' { fail('private_ip is empty') }
+  if $mysql_ip == '' { fail('mysql_ip is empty') }
+  if $amqp_ip == '' { fail('mysql_ip is empty') }
+
+  if $admin_password == '' { fail('admin_password is empty') }
+
+  if $nova_user_password == '' { fail('nova_user_password is empty') }
+  if $nova_db_password == '' { fail('nova_db_password is empty') }
+
+  if $neutron_user_password == '' { fail('nova_user_password is empty') }
+  if $neutron_db_password == '' { fail('nova_db_password is empty') }
+
+  if $ceilometer_user_password == '' { fail('ceilometer_user_password is empty') }
+  if $ceilometer_metering_secret == '' { fail('ceilometer_user_password is empty') }
+
+  class { "quickstack::neutron::compute":
+    auth_host                    => $private_ip,
+    glance_host                  => $private_ip,
+    libvirt_images_rbd_pool      => 'volumes',
+    libvirt_images_rbd_ceph_conf => '/etc/ceph/ceph.conf',
+    libvirt_inject_password      => 'false',
+    libvirt_inject_key           => 'false',
+    libvirt_images_type          => 'rbd',
+    nova_host                    => $private_ip,
+    nova_db_password              => $nova_db_password,
+    nova_user_password            => $nova_user_password,
+    private_network              => '',
+    private_iface                => '',
+    private_ip                   => '',
+    rbd_user                     => 'volumes',
+    rbd_secret_uuid              => '',
+    network_device_mtu           => $quickstack::params::network_device_mtu,
+
+    admin_password                => $admin_password,
+    ssl                           => false,
+
+    mysql_host                    => $mysql_ip,
+    mysql_ca                     => $quickstack::params::mysql_ca,
+    amqp_host                     => $amqp_ip,
+    amqp_username                 => 'guest',
+    amqp_password                 => 'guest',
+    #amqp_nssdb_password           => $quickstack::params::amqp_nssdb_password,
+
+    ceilometer                    => 'true',
+    ceilometer_metering_secret    => $ceilometer_metering_secret,
+    ceilometer_user_password      => $ceilometer_user_password,
+
+    cinder_backend_gluster        => $quickstack::params::cinder_backend_gluster,
+
+    agent_type                   => 'ovs',
+    enable_tunneling             => true,
+
+    neutron_db_password          => $neutron_db_password,
+    neutron_user_password        => $neutron_user_password,
+    neutron_host                 => $private_ip,
+
+    #ovs_bridge_mappings          = $quickstack::params::ovs_bridge_mappings,
+    #ovs_bridge_uplinks           = $quickstack::params::ovs_bridge_uplinks,
+    #ovs_vlan_ranges              = $quickstack::params::ovs_vlan_ranges,
+    ovs_tunnel_iface             => 'em1',
+    ovs_tunnel_network           => '',
+    ovs_l2_population            => 'True',
+    ml2_mechanism_drivers         => ['opendaylight'],
+    odl_controller_ip             => '10.1.254.4',
+
+    tenant_network_type          => 'vxlan',
+    tunnel_id_ranges             => '1:1000',
+    #ovs_vxlan_udp_port           = $quickstack::params::ovs_vxlan_udp_port,
+    ovs_tunnel_types             => ['vxlan'],
+
+    verbose                      => $quickstack::params::verbose,
+    security_group_api           => 'neutron',
+
+  }
+
+}
diff --git a/puppet/opnfv/manifests/controller.pp b/puppet/opnfv/manifests/controller.pp
new file mode 100644 (file)
index 0000000..5c9e140
--- /dev/null
@@ -0,0 +1,106 @@
+class opnfv::controller {
+
+  if $admin_email == '' { fail('admin_email is empty') }
+  if $admin_password == '' { fail('admin_password is empty') }
+
+  if $public_ip == '' { fail('public_ip is empty') }
+  if $private_ip == '' { fail('private_ip is empty') }
+
+  if $mysql_ip == '' { fail('mysql_ip is empty') }
+  if $mysql_root_password == '' { fail('mysql_root_password is empty') }
+  if $amqp_ip == '' { fail('mysql_ip is empty') }
+
+  if $memcache_ip == '' { fail('memcache_ip is empty') }
+  if $neutron_ip == '' { fail('neutron_ip is empty') }
+
+  if $keystone_admin_token == '' { fail('keystone_admin_token is empty') }
+  if $keystone_db_password == '' { fail('keystone_db_password is empty') }
+
+  if $horizon_secret_key == '' { fail('horizon_secret_key is empty') }
+
+  if $nova_user_password == '' { fail('nova_user_password is empty') }
+  if $nova_db_password == '' { fail('nova_db_password is empty') }
+
+  if $cinder_user_password == '' { fail('cinder_user_password is empty') }
+  if $cinder_db_password == '' { fail('cinder_db_password is empty') }
+
+  if $glance_user_password == '' { fail('glance_user_password is empty') }
+  if $glance_db_password == '' { fail('glance_db_password is empty') }
+
+  if $neutron_user_password == '' { fail('neutron_user_password is empty') }
+  if $neutron_db_password == '' { fail('neutron_db_password is empty') }
+  if $neutron_metadata_shared_secret == '' { fail('neutron_metadata_shared_secret is empty') }
+
+  if $ceilometer_user_password == '' { fail('ceilometer_user_password is empty') }
+  if $ceilometer_metering_secret == '' { fail('ceilometer_user_password is empty') }
+
+  if $heat_user_password == '' { fail('heat_user_password is empty') }
+  if $heat_db_password == '' { fail('heat_db_password is empty') }
+  if $heat_auth_encrypt_key == '' { fail('heat_auth_encrypt_key is empty') }
+
+  if $swift_user_password == '' { fail('swift_user_password is empty') }
+  if $swift_shared_secret == '' { fail('swift_shared_secret is empty') }
+  if $swift_admin_password == '' { fail('swift_admin_password is empty') }
+
+  class { "quickstack::neutron::controller":
+    admin_email                   => $admin_email,
+    admin_password                => $admin_password,
+    controller_admin_host         => $private_ip,
+    controller_priv_host          => $private_ip,
+    controller_pub_host           => $public_ip,
+    ssl                           => false,
+    #support_profile               => $quickstack::params::support_profile,
+    #freeipa                       => $quickstack::params::freeipa,
+
+    mysql_host                    => $mysql_ip,
+    mysql_root_password           => $mysql_root_password,
+    #amqp_provider                 => $amqp_provider,
+    amqp_host                     => $amqp_ip,
+    amqp_username                 => 'guest',
+    amqp_password                 => 'guest',
+    #amqp_nssdb_password           => $quickstack::params::amqp_nssdb_password,
+
+    keystone_admin_token          => $keystone_admin_token,
+    keystone_db_password          => $keystone_db_password,
+
+    ceilometer_metering_secret    => $ceilometer_metering_secret,
+    ceilometer_user_password      => $ceilometer_user_password,
+
+    cinder_backend_gluster        => $quickstack::params::cinder_backend_gluster,
+    cinder_backend_gluster_name   => $quickstack::params::cinder_backend_gluster_name,
+    cinder_gluster_shares         => $quickstack::params::cinder_gluster_shares,
+    cinder_user_password          => $cinder_user_password,
+    cinder_db_password            => $cinder_db_password,
+
+    glance_db_password            => $glance_db_password,
+    glance_user_password          => $glance_user_password,
+
+    heat_cfn                      => true,
+    heat_cloudwatch               => true,
+    heat_db_password              => $heat_db_password,
+    heat_user_password            => $heat_user_password,
+    heat_auth_encrypt_key         => $heat_auth_encrypt_key,
+
+    horizon_secret_key            => $horizon_secret_key,
+    horizon_ca                    => $quickstack::params::horizon_ca,
+    horizon_cert                  => $quickstack::params::horizon_cert,
+    horizon_key                   => $quickstack::params::horizon_key,
+
+    #neutron                       => true,
+    neutron_metadata_proxy_secret => $neutron_metadata_shared_secret,
+    neutron_db_password           => $neutron_db_password,
+    neutron_user_password         => $neutron_user_password,
+    ml2_mechanism_drivers         => ['opendaylight'],
+    odl_controller_ip             => '10.1.254.4',
+
+    nova_db_password              => $nova_db_password,
+    nova_user_password            => $nova_user_password,
+
+    swift_shared_secret           => $swift_shared_secret,
+    swift_admin_password          => $swift_admin_password,
+    swift_ringserver_ip           => '192.168.203.1',
+    swift_storage_ips             => ["192.168.203.2","192.168.203.3","192.168.203.4"],
+    swift_storage_device          => 'device1',
+  }
+
+}
diff --git a/puppet/opnfv/manifests/init.pp b/puppet/opnfv/manifests/init.pp
new file mode 100644 (file)
index 0000000..9c00fdd
--- /dev/null
@@ -0,0 +1,10 @@
+class opnfv {
+    # cent rpms don't setup selinux
+    # correctly for ovs to set odl as
+    # its manager. disabling it till
+    # that's fixed.
+    exec {'disable selinux':
+        command => '/usr/sbin/setenforce 0',
+        unless => '/usr/sbin/getenforce | grep Permissive',
+    }
+}
diff --git a/puppet/opnfv/manifests/network.pp b/puppet/opnfv/manifests/network.pp
new file mode 100644 (file)
index 0000000..37cc75e
--- /dev/null
@@ -0,0 +1,50 @@
+class opnfv::network {
+
+  if $private_ip == '' { fail('private_ip is empty') }
+  if $mysql_ip == '' { fail('mysql_ip is empty') }
+  if $amqp_ip == '' { fail('amqp_ip is empty') }
+  if $opendaylight_ip == '' { fail('opendaylight_ip is empty') }
+   
+
+  if $nova_user_password == '' { fail('nova_user_password is empty') }
+  if $nova_db_password == '' { fail('nova_db_password is empty') }
+
+  if $neutron_user_password == '' { fail('neutron_user_password is empty') }
+  if $neutron_db_password == '' { fail('neutron_db_password is empty') }
+  if $neutron_metadata_shared_secret == '' { fail('neutron_metadata_shared_secret is empty') }
+
+  class { "opendaylight":
+        features => ['odl-base-all','odl-aaa-authn','odl-restconf',
+                     'odl-nsf-all','odl-adsal-northbound','odl-mdsal-apidocs',
+                     'odl-ovsdb-openstack','odl-ovsdb-northbound','odl-dlux-core'],
+  }
+
+
+  class { "quickstack::neutron::networker":
+    require                       => Class['opendaylight'],
+    neutron_metadata_proxy_secret => $neutron_metadata_shared_secret,
+    neutron_db_password           => $neutron_db_password,
+    neutron_user_password         => $neutron_user_password,
+    nova_db_password              => $nova_db_password,
+    nova_user_password            => $nova_user_password,
+
+    controller_priv_host          => $private_ip,
+
+    agent_type                    => 'ovs',
+    enable_tunneling              => true,
+    ovs_tunnel_iface              => 'em1',
+    ovs_tunnel_network            => '',
+    ovs_l2_population             => 'True',
+    ovs_tunnel_types              => ['vxlan'],
+    external_network_bridge       => 'br-ex',
+    tenant_network_type           => 'vxlan',
+    tunnel_id_ranges              => '1:1000',
+    ml2_mechanism_drivers         => ['opendaylight'],
+    odl_controller_ip             => $opendaylight_ip,
+
+    mysql_host                    => $mysql_ip,
+    amqp_host                     => $amqp_ip,
+    amqp_username                 => 'guest',
+    amqp_password                 => 'guest',
+  }
+}