Merge "Amended documents with further alignment of structure and some editorials...
authorJonas Bjurel <jonas.bjurel@ericsson.com>
Tue, 2 Jun 2015 22:15:52 +0000 (22:15 +0000)
committerGerrit Code Review <gerrit@172.30.200.206>
Tue, 2 Jun 2015 22:15:52 +0000 (22:15 +0000)
14 files changed:
common/puppet-opnfv/manifests/templates/dockerfile/Dockerfile
foreman/ci/README.md [new file with mode: 0644]
foreman/ci/Vagrantfile [new file with mode: 0644]
foreman/ci/bootstrap.sh [new file with mode: 0755]
foreman/ci/clean.sh
foreman/ci/deploy.sh
foreman/ci/nat_setup.sh [new file with mode: 0755]
foreman/ci/opnfv_ksgen_settings.yml [new file with mode: 0644]
foreman/ci/reload_playbook.yml [new file with mode: 0644]
foreman/ci/vm_nodes_provision.sh [new file with mode: 0755]
fuel/build/f_odl_docker/puppet/modules/opnfv/manifests/odl_docker.pp
fuel/build/f_odl_docker/puppet/modules/opnfv/scripts/change.sh [new file with mode: 0644]
fuel/build/f_odl_docker/puppet/modules/opnfv/scripts/config_net_odl.sh
fuel/build/f_odl_docker/puppet/modules/opnfv/scripts/stage_odl.sh

index 5241a80..80a92d8 100644 (file)
@@ -38,11 +38,11 @@ RUN bash -c 'echo "export JAVA_HOME=/usr/lib/jvm/java-7-openjdk-amd64" >> ~/.bas
 
 #Now lets got and fetch the ODL distribution
 RUN echo "Fetching ODL"
-RUN wget https://nexus.opendaylight.org/content/groups/public/org/opendaylight/integration/distribution-karaf/0.2.2-Helium-SR2/distribution-karaf-0.2.2-Helium-SR2.tar.gz -O /opt/odl_source/distribution-karaf-0.2.2-Helium-SR2.tar.gz
+RUN wget https://nexus.opendaylight.org/content/groups/public/org/opendaylight/integration/distribution-karaf/0.2.3-Helium-SR3/distribution-karaf-0.2.3-Helium-SR3.tar.gz -O /opt/odl_source/distribution-karaf-0.2.3-Helium-SR3.tar.gz
 
 RUN echo "Untarring ODL inplace"
 RUN mkdir -p /opt/odl
-RUN tar zxvf /opt/odl_source/distribution-karaf-0.2.2-Helium-SR2.tar.gz -C /opt/odl
+RUN tar zxvf /opt/odl_source/distribution-karaf-0.2.3-Helium-SR3.tar.gz -C /opt/odl
 
 RUN echo "Installing DLUX and other features into ODL"
 #COPY dockerfile/container_scripts/start_odl_docker.sh /etc/init.d/start_odl_docker.sh
diff --git a/foreman/ci/README.md b/foreman/ci/README.md
new file mode 100644 (file)
index 0000000..9417ee5
--- /dev/null
@@ -0,0 +1,86 @@
+# Foreman/QuickStack Automatic Deployment README
+
+A simple bash script (deploy.sh) will provision out a Foreman/QuickStack VM Server and 4-5 other baremetal or VM nodes in an OpenStack HA + OpenDaylight environment.
+
+##Pre-Requisites
+####Baremetal:
+* At least 5 baremetal servers, with 3 interfaces minimum, all connected to separate VLANs
+* DHCP should not be running in any VLAN. Foreman will act as a DHCP server.
+* On the baremetal server that will be your JumpHost, you need to have the 3 interfaces configured with IP addresses
+* On baremetal JumpHost you will need an RPM based linux (CentOS 7 will do) with the kernel up to date (yum update kernel) + at least 2GB of RAM
+* Nodes will need to be set to PXE boot first in priority, and off the first NIC, connected to the same VLAN as NIC 1 * of your JumpHost
+* Nodes need to have BMC/OOB management via IPMI setup
+* Internet access via first (Admin) or third interface (Public)
+* No other hypervisors should be running on JumpHost
+
+####VM Nodes:
+* JumpHost with 3 interfaces, configured with IP, connected to separate VLANS
+* DHCP should not be running in any VLAN.  Foreman will act as a DHCP Server
+* On baremetal JumpHost you will need an RPM based linux (CentOS 7 will do) with the kernel up to date (yum update kernel) + at least 24GB of RAM
+* Internet access via the first (Admin) or third interface (Public)
+* No other hypervisors should be running on JumpHost
+
+##How It Works
+
+###deploy.sh:
+
+* Detects your network configuration (3 or 4 usable interfaces)
+* Modifies a “ksgen.yml” settings file and Vagrantfile with necessary network info
+* Installs Vagrant and dependencies
+* Downloads Centos7 Vagrant basebox, and issues a “vagrant up” to start the VM
+* The Vagrantfile points to bootstrap.sh as the provisioner to takeover rest of the install
+
+###bootstrap.sh:
+
+* Is initiated inside of the VM once it is up
+* Installs Khaleesi, Ansible, and Python dependencies
+* Makes a call to Khaleesi to start a playbook: opnfv.yml + “ksgen.yml” settings file
+
+###Khaleesi (Ansible):
+
+* Runs through the playbook to install Foreman/QuickStack inside of the VM
+* Configures services needed for a JumpHost: DHCP, TFTP, DNS
+* Uses info from “ksgen.yml” file to add your nodes into Foreman and set them to Build mode
+
+####Baremetal Only:
+* Issues an API call to Foreman to rebuild all nodes
+* Ansible then waits to make sure nodes come back via ssh checks
+* Ansible then waits for puppet to run on each node and complete
+
+####VM Only:
+* deploy.sh then brings up 5 more Vagrant VMs
+* Checks into Foreman and tells Foreman nodes are built
+* Configures and starts puppet on each node
+
+##Execution Instructions
+
+* On your JumpHost, clone 'git clone https://github.com/trozet/bgs_vagrant.git' to as root to /root/
+
+####Baremetal Only:
+* Edit opnvf_ksgen_settings.yml → “nodes” section:
+
+  * For each node, compute, controller1..3:
+    * mac_address - change to mac_address of that node's Admin NIC (1st NIC)
+    * bmc_ip - change to IP of BMC (out-of-band) IP
+    * bmc_mac - same as above, but MAC address
+    * bmc_user - IPMI username
+    * bmc_pass - IPMI password
+
+  * For each controller node:
+    * private_mac - change to mac_address of node's Private NIC (2nd NIC)
+
+* Execute deploy.sh via: ./deploy.sh -base_config /root/bgs_vagrant/opnfv_ksgen_settings.yml
+
+####VM Only:
+* Execute deploy.sh via: ./deploy.sh -virtual
+* Install directory for each VM will be in /tmp (for example /tmp/compute, /tmp/controller1)
+
+####Both Approaches:
+* Install directory for foreman-server is /tmp/bgs_vagrant/ - This is where vagrant will be launched from automatically
+* To access the VM you can 'cd /tmp/bgs_vagrant' and type 'vagrant ssh'
+* To access Foreman enter the IP address shown in 'cat /tmp/bgs_vagrant/opnfv_ksgen_settings.yml | grep foreman_url'
+* The user/pass by default is admin//octopus
+
+##Redeploying
+Make sure you run ./clean.sh for the baremetal deployment with your opnfv_ksgen_settings.yml file as "-base_config".  This will ensure that your nodes are turned off and that your VM is destroyed ("vagrant destroy" in the /tmp/bgs_vagrant directory).
+For VM redeployment, make sure you "vagrant destroy" in each /tmp/<node> as well if you want to redeploy.  To check and make sure no VMs are still running on your Jumphost you can use "vboxmanage list runningvms".
diff --git a/foreman/ci/Vagrantfile b/foreman/ci/Vagrantfile
new file mode 100644 (file)
index 0000000..100e12d
--- /dev/null
@@ -0,0 +1,93 @@
+# -*- mode: ruby -*-
+# vi: set ft=ruby :
+
+# All Vagrant configuration is done below. The "2" in Vagrant.configure
+# configures the configuration version (we support older styles for
+# backwards compatibility). Please don't change it unless you know what
+# you're doing.
+Vagrant.configure(2) do |config|
+  # The most common configuration options are documented and commented below.
+  # For a complete reference, please see the online documentation at
+  # https://docs.vagrantup.com.
+
+  # Every Vagrant development environment requires a box. You can search for
+  # boxes at https://atlas.hashicorp.com/search.
+  config.vm.box = "chef/centos-7.0"
+
+  # Disable automatic box update checking. If you disable this, then
+  # boxes will only be checked for updates when the user runs
+  # `vagrant box outdated`. This is not recommended.
+  # config.vm.box_check_update = false
+
+  # Create a forwarded port mapping which allows access to a specific port
+  # within the machine from a port on the host machine. In the example below,
+  # accessing "localhost:8080" will access port 80 on the guest machine.
+  # config.vm.network "forwarded_port", guest: 80, host: 8080
+  # Create a private network, which allows host-only access to the machine
+  # using a specific IP.
+  # config.vm.network "private_network", ip: "192.168.33.10"
+
+  # Create a public network, which generally matched to bridged network.
+  # Bridged networks make the machine appear as another physical device on
+  # your network.
+  # config.vm.network "public_network"
+  config.vm.network "public_network", ip: "10.4.1.2", bridge: 'eth_replace0'
+  config.vm.network "public_network", ip: "10.4.9.2", bridge: 'eth_replace1'
+  config.vm.network "public_network", ip: "10.2.84.2", bridge: 'eth_replace2'
+  config.vm.network "public_network", ip: "10.3.84.2", bridge: 'eth_replace3'
+
+  # IP address of your LAN's router
+  default_gw = ""
+  nat_flag = false
+
+  # Share an additional folder to the guest VM. The first argument is
+  # the path on the host to the actual folder. The second argument is
+  # the path on the guest to mount the folder. And the optional third
+  # argument is a set of non-required options.
+  # config.vm.synced_folder "../data", "/vagrant_data"
+
+  # Provider-specific configuration so you can fine-tune various
+  # backing providers for Vagrant. These expose provider-specific options.
+  # Example for VirtualBox:
+  #
+   config.vm.provider "virtualbox" do |vb|
+  #   # Display the VirtualBox GUI when booting the machine
+  #   vb.gui = true
+  #
+  #   # Customize the amount of memory on the VM:
+     vb.memory = 2048
+     vb.cpus = 2
+   end
+  #
+  # View the documentation for the provider you are using for more
+  # information on available options.
+
+  # Define a Vagrant Push strategy for pushing to Atlas. Other push strategies
+  # such as FTP and Heroku are also available. See the documentation at
+  # https://docs.vagrantup.com/v2/push/atlas.html for more information.
+  # config.push.define "atlas" do |push|
+  #   push.app = "YOUR_ATLAS_USERNAME/YOUR_APPLICATION_NAME"
+  # end
+
+  # Enable provisioning with a shell script. Additional provisioners such as
+  # Puppet, Chef, Ansible, Salt, and Docker are also available. Please see the
+  # documentation for more information about their specific syntax and use.
+  # config.vm.provision "shell", inline: <<-SHELL
+  #   sudo apt-get update
+  #   sudo apt-get install -y apache2
+  # SHELL
+  
+  config.ssh.username = 'root'
+  config.ssh.password = 'vagrant'
+  config.ssh.insert_key = 'true'
+  config.vm.provision "ansible" do |ansible|
+     ansible.playbook = "reload_playbook.yml"
+  end
+  config.vm.provision :shell, :inline => "mount -t vboxsf vagrant /vagrant"
+  config.vm.provision :shell, :inline => "route add default gw #{default_gw}"
+  if nat_flag
+    config.vm.provision :shell, path: "nat_setup.sh"
+  end
+  config.vm.provision :shell, path: "bootstrap.sh"
+end
diff --git a/foreman/ci/bootstrap.sh b/foreman/ci/bootstrap.sh
new file mode 100755 (executable)
index 0000000..839dfaa
--- /dev/null
@@ -0,0 +1,55 @@
+#!/usr/bin/env bash
+
+#bootstrap script for installing/running Khaleesi in Foreman/QuickStack VM
+#author: Tim Rozet (trozet@redhat.com)
+#
+#Uses Vagrant and VirtualBox
+#VagrantFile uses bootsrap.sh which Installs Khaleesi
+#Khaleesi will install and configure Foreman/QuickStack
+#
+#Pre-requisties:
+#Target system should be Centos7
+#Ensure the host's kernel is up to date (yum update)
+
+##VARS
+reset=`tput sgr0`
+blue=`tput setaf 4`
+red=`tput setaf 1`
+green=`tput setaf 2`
+
+##END VARS
+
+
+# Install EPEL repo for access to many other yum repos
+# Major version is pinned to force some consistency for Arno
+yum install -y epel-release-7*
+
+# Install other required packages
+# Major version is pinned to force some consistency for Arno
+if ! yum -y install python-pip-1* python-virtualenv-1* gcc-4* git-1* sshpass-1* ansible-1* python-requests-1*; then
+  printf '%s\n' 'bootstrap.sh: failed to install required packages' >&2
+  exit 1
+fi
+
+cd /opt
+
+echo "Cloning khaleesi to /opt"
+
+if [ ! -d khaleesi ]; then
+  if ! git clone -b opnfv https://github.com/trozet/khaleesi.git; then
+    printf '%s\n' 'bootstrap.sh: Unable to git clone khaleesi' >&2
+    exit 1
+  fi
+fi
+
+cd khaleesi
+
+cp ansible.cfg.example ansible.cfg
+
+echo "Completed Installing Khaleesi"
+
+cd /opt/khaleesi/
+
+ansible localhost -m setup -i local_hosts
+
+./run.sh --no-logs --use /vagrant/opnfv_ksgen_settings.yml playbooks/opnfv.yml
index 3f3c879..f61ac93 100755 (executable)
@@ -51,9 +51,10 @@ esac
 done
 
 
-##install ipmitool
+# Install ipmitool
+# Major version is pinned to force some consistency for Arno
 if ! yum list installed | grep -i ipmitool; then
-  if ! yum -y install ipmitool; then
+  if ! yum -y install ipmitool-1*; then
     echo "${red}Unable to install ipmitool!${reset}"
     exit 1
   fi
index 87256cb..77024c6 100755 (executable)
@@ -186,18 +186,13 @@ done
 ##disable selinux
 /sbin/setenforce 0
 
-##install EPEL
-if ! yum repolist | grep "epel/"; then
-  if ! rpm -Uvh http://dl.fedoraproject.org/pub/epel/7/x86_64/e/epel-release-7-5.noarch.rpm; then
-    printf '%s\n' 'deploy.sh: Unable to configure EPEL repo' >&2
-    exit 1
-  fi
-else
-  printf '%s\n' 'deploy.sh: Skipping EPEL repo as it is already configured.'
-fi
+# Install EPEL repo for access to many other yum repos
+# Major version is pinned to force some consistency for Arno
+yum install -y epel-release-7*
 
-##install dependencies
-if ! yum -y install binutils gcc make patch libgomp glibc-headers glibc-devel kernel-headers kernel-devel dkms psmisc; then
+# Install other required packages
+# Major versions are pinned to force some consistency for Arno
+if ! yum install -y binutils-2* gcc-4* make-3* patch-2* libgomp-4* glibc-headers-2* glibc-devel-2* kernel-headers-3* kernel-devel-3* dkms-2* psmisc-22*; then
   printf '%s\n' 'deploy.sh: Unable to install depdency packages' >&2
   exit 1
 fi
@@ -240,7 +235,7 @@ fi
 
 ##install Ansible
 if ! yum list installed | grep -i ansible; then
-  if ! yum -y install ansible; then
+  if ! yum -y install ansible-1*; then
     printf '%s\n' 'deploy.sh: Unable to install Ansible package' >&2
     exit 1
   fi
diff --git a/foreman/ci/nat_setup.sh b/foreman/ci/nat_setup.sh
new file mode 100755 (executable)
index 0000000..349e416
--- /dev/null
@@ -0,0 +1,44 @@
+#!/usr/bin/env bash
+
+#NAT setup script to setup NAT from Admin -> Public interface
+#on a Vagrant VM
+#Called by Vagrantfile in conjunction with deploy.sh
+#author: Tim Rozet (trozet@redhat.com)
+#
+#Uses Vagrant and VirtualBox
+#VagrantFile uses nat_setup.sh which sets up NAT
+#
+
+##make sure firewalld is stopped and disabled
+if ! systemctl stop firewalld; then
+  printf '%s\n' 'nat_setup.sh: Unable to stop firewalld' >&2
+  exit 1
+fi
+
+systemctl disable firewalld
+
+# Install iptables
+# Major version is pinned to force some consistency for Arno
+if ! yum -y install iptables-services-1*; then
+  printf '%s\n' 'nat_setup.sh: Unable to install iptables-services' >&2
+  exit 1
+fi
+
+##start and enable iptables service
+if ! systemctl start iptables; then
+  printf '%s\n' 'nat_setup.sh: Unable to start iptables-services' >&2
+  exit 1
+fi
+
+systemctl enable iptables
+
+##enable IP forwarding
+echo 1 > /proc/sys/net/ipv4/ip_forward
+
+##Configure iptables
+/sbin/iptables -t nat -I POSTROUTING -o enp0s10 -j MASQUERADE
+/sbin/iptables -I FORWARD 1 -i enp0s10 -o enp0s8 -m state --state RELATED,ESTABLISHED -j ACCEPT
+/sbin/iptables -I FORWARD 1 -i enp0s8 -o enp0s10 -j ACCEPT
+/sbin/iptables -I INPUT 1 -j ACCEPT
+/sbin/iptables -I OUTPUT 1 -j ACCEPT
+
diff --git a/foreman/ci/opnfv_ksgen_settings.yml b/foreman/ci/opnfv_ksgen_settings.yml
new file mode 100644 (file)
index 0000000..21840dd
--- /dev/null
@@ -0,0 +1,338 @@
+global_params:
+  admin_email: opnfv@opnfv.com
+  ha_flag: "true"
+  odl_flag: "true"
+  private_network:
+  storage_network:
+  controllers_hostnames_array: oscontroller1,oscontroller2,oscontroller3
+  controllers_ip_array:
+  amqp_vip:
+  private_subnet:
+  cinder_admin_vip:
+  cinder_private_vip:
+  cinder_public_vip:
+  db_vip:
+  glance_admin_vip:
+  glance_private_vip:
+  glance_public_vip:
+  heat_admin_vip:
+  heat_private_vip:
+  heat_public_vip:
+  heat_cfn_admin_vip:
+  heat_cfn_private_vip:
+  heat_cfn_public_vip:
+  horizon_admin_vip:
+  horizon_private_vip:
+  horizon_public_vip:
+  keystone_admin_vip:
+  keystone_private_vip:
+  keystone_public_vip:
+  loadbalancer_vip:
+  neutron_admin_vip:
+  neutron_private_vip:
+  neutron_public_vip:
+  nova_admin_vip:
+  nova_private_vip:
+  nova_public_vip:
+  external_network_flag: "true"
+  public_gateway:
+  public_dns:
+  public_network:
+  public_subnet:
+  public_allocation_start:
+  public_allocation_end:
+  deployment_type:
+network_type: multi_network
+default_gw:
+foreman:
+  seed_values:
+    - { name: heat_cfn, oldvalue: true, newvalue: false }
+workaround_puppet_version_lock: false
+opm_branch: master
+installer:
+  name: puppet
+  short_name: pupt
+  network:
+    auto_assign_floating_ip: false
+    variant:
+      short_name: m2vx
+    plugin:
+      name: neutron
+workaround_openstack_packstack_rpm: false
+tempest:
+  repo:
+    Fedora:
+      '19': http://REPLACE_ME/~REPLACE_ME/openstack-tempest-icehouse/fedora-19/
+      '20': http://REPLACE_ME/~REPLACE_ME/openstack-tempest-icehouse/fedora-20/
+    RedHat:
+       '7.0': https://repos.fedorapeople.org/repos/openstack/openstack-juno/epel-7/
+  use_virtual_env: false
+  public_allocation_end: 10.2.84.71
+  skip:
+    files: null
+    tests: null
+  public_allocation_start: 10.2.84.51
+  physnet: physnet1
+  use_custom_repo: false
+  public_subnet_cidr: 10.2.84.0/24
+  public_subnet_gateway: 10.2.84.1
+  additional_default_settings:
+  - section: compute
+    option: flavor_ref
+    value: 1
+  cirros_image_file: cirros-0.3.1-x86_64-disk.img
+  setup_method: tempest/rpm
+  test_name: all
+  rdo:
+     version: juno
+     rpm: http://repos.fedorapeople.org/repos/openstack/openstack-juno/rdo-release-juno-1.noarch.rpm
+  rpm:
+    version: 20141201
+  dir: ~{{ nodes.tempest.remote_user }}/tempest-dir
+tmp:
+  node_prefix: '{{ node.prefix | reject("none") | join("-") }}-'
+  anchors:
+  - https://repos.fedorapeople.org/repos/openstack/openstack-juno/rdo-release-juno-1.noarch.rpm
+  - http://repos.fedorapeople.org/repos/openstack/openstack-juno/
+opm_repo: https://github.com/redhat-openstack/openstack-puppet-modules.git
+workaround_vif_plugging: false
+openstack_packstack_rpm: http://REPLACE_ME/brewroot/packages/openstack-puppet-modules/2013.2/9.el6ost/noarch/openstack-puppet-modules-2013.2-9.el6ost.noarch.rpm
+nodes:
+  compute:
+    name: oscompute11.opnfv.com
+    hostname: oscompute11.opnfv.com
+    short_name: oscompute11
+    type: compute
+    host_type: baremetal
+    hostgroup: Compute
+    mac_address: "10:23:45:67:89:AB"
+    bmc_ip: 10.4.17.2
+    bmc_mac: "10:23:45:67:88:AB"
+    bmc_user: root
+    bmc_pass: root
+    ansible_ssh_pass: "Op3nStack"
+    admin_password: ""
+    groups:
+    - compute
+    - foreman_nodes
+    - puppet
+    - rdo
+    - neutron
+  controller1:
+    name: oscontroller1.opnfv.com
+    hostname: oscontroller1.opnfv.com
+    short_name: oscontroller1
+    type: controller
+    host_type: baremetal
+    hostgroup: Controller_Network_ODL
+    mac_address: "10:23:45:67:89:AC"
+    bmc_ip: 10.4.17.3
+    bmc_mac: "10:23:45:67:88:AC"
+    bmc_user: root
+    bmc_pass: root
+    private_ip: controller1_private
+    private_mac: "10:23:45:67:87:AC"
+    ansible_ssh_pass: "Op3nStack"
+    admin_password: "octopus"
+    groups:
+    - controller
+    - foreman_nodes
+    - puppet
+    - rdo
+    - neutron
+  controller2:
+    name: oscontroller2.opnfv.com
+    hostname: oscontroller2.opnfv.com
+    short_name: oscontroller2
+    type: controller
+    host_type: baremetal
+    hostgroup: Controller_Network
+    mac_address: "10:23:45:67:89:AD"
+    bmc_ip: 10.4.17.4
+    bmc_mac: "10:23:45:67:88:AD"
+    bmc_user: root
+    bmc_pass: root
+    private_ip: controller2_private
+    private_mac: "10:23:45:67:87:AD"
+    ansible_ssh_pass: "Op3nStack"
+    admin_password: "octopus"
+    groups:
+    - controller
+    - foreman_nodes
+    - puppet
+    - rdo
+    - neutron
+  controller3:
+    name: oscontroller3.opnfv.com
+    hostname: oscontroller3.opnfv.com
+    short_name: oscontroller3
+    type: controller
+    host_type: baremetal
+    hostgroup: Controller_Network
+    mac_address: "10:23:45:67:89:AE"
+    bmc_ip: 10.4.17.5
+    bmc_mac: "10:23:45:67:88:AE"
+    bmc_user: root
+    bmc_pass: root
+    private_ip: controller3_private
+    private_mac: "10:23:45:67:87:AE"
+    ansible_ssh_pass: "Op3nStack"
+    admin_password: "octopus"
+    groups:
+    - controller
+    - foreman_nodes
+    - puppet
+    - rdo
+    - neutron
+workaround_mysql_centos7: true
+distro:
+  name: centos
+  centos:
+    '7.0':
+      repos: []
+  short_name: c
+  short_version: 70
+  version: '7.0'
+  rhel:
+    '7.0':
+      kickstart_url: http://REPLACE_ME/released/RHEL-7/7.0/Server/x86_64/os/
+      repos:
+      - section: rhel7-server-rpms
+        name: Packages for RHEL 7 - $basearch
+        baseurl: http://REPLACE_ME/rel-eng/repos/rhel-7.0/x86_64/
+        gpgcheck: 0
+      - section: rhel-7-server-update-rpms
+        name: Update Packages for Enterprise Linux 7 - $basearch
+        baseurl: http://REPLACE_ME/rel-eng/repos/rhel-7.0-z/x86_64/
+        gpgcheck: 0
+      - section: rhel-7-server-optional-rpms
+        name: Optional Packages for Enterprise Linux 7 - $basearch
+        baseurl: http://REPLACE_ME/released/RHEL-7/7.0/Server-optional/x86_64/os/
+        gpgcheck: 0
+      - section: rhel-7-server-extras-rpms
+        name: Optional Packages for Enterprise Linux 7 - $basearch
+        baseurl: http://REPLACE_ME/rel-eng/EXTRAS-7.0-RHEL-7-20140610.0/compose/Server/x86_64/os/
+        gpgcheck: 0
+    '6.5':
+      kickstart_url: http://REPLACE_ME/released/RHEL-6/6.5/Server/x86_64/os/
+      repos:
+      - section: rhel6.5-server-rpms
+        name: Packages for RHEL 6.5 - $basearch
+        baseurl: http://REPLACE_ME.REPLACE_ME/released/RHEL-6/6.5/Server/$basearch/os/Server
+        gpgcheck: 0
+      - section: rhel-6.5-server-update-rpms
+        name: Update Packages for Enterprise Linux 6.5 - $basearch
+        baseurl: http://REPLACE_ME.REPLACE_ME/rel-eng/repos/RHEL-6.5-Z/$basearch/
+        gpgcheck: 0
+      - section: rhel-6.5-server-optional-rpms
+        name: Optional Packages for Enterprise Linux 6.5 - $basearch
+        baseurl: http://REPLACE_ME.REPLACE_ME/released/RHEL-6/6.5/Server/optional/$basearch/os
+        gpgcheck: 0
+      - section: rhel6.5-server-rpms-32bit
+        name: Packages for RHEL 6.5 - i386
+        baseurl: http://REPLACE_ME.REPLACE_ME/released/RHEL-6/6.5/Server/i386/os/Server
+        gpgcheck: 0
+        enabled: 1
+      - section: rhel-6.5-server-update-rpms-32bit
+        name: Update Packages for Enterprise Linux 6.5 - i686
+        baseurl: http://REPLACE_ME.REPLACE_ME/rel-eng/repos/RHEL-6.5-Z/i686/
+        gpgcheck: 0
+        enabled: 1
+      - section: rhel-6.5-server-optional-rpms-32bit
+        name: Optional Packages for Enterprise Linux 6.5 - i386
+        baseurl: http://REPLACE_ME.REPLACE_ME/released/RHEL-6/6.5/Server/optional/i386/os
+        gpgcheck: 0
+        enabled: 1
+    subscription:
+      username: REPLACE_ME
+      password: HWj8TE28Qi0eP2c
+      pool: 8a85f9823e3d5e43013e3ddd4e2a0977
+  config:
+    selinux: permissive
+    ntp_server: 0.pool.ntp.org
+    dns_servers:
+    - 10.4.1.1
+    - 10.4.0.2
+    reboot_delay: 1
+    initial_boot_timeout: 180
+node:
+  prefix:
+  - rdo
+  - pupt
+  - ffqiotcxz1
+  - null
+product:
+  repo_type: production
+  name: rdo
+  short_name: rdo
+  rpm:
+    CentOS: https://repos.fedorapeople.org/repos/openstack/openstack-juno/rdo-release-juno-1.noarch.rpm
+    Fedora: https://repos.fedorapeople.org/repos/openstack/openstack-juno/rdo-release-juno-1.noarch.rpm
+    RedHat: https://repos.fedorapeople.org/repos/openstack/openstack-juno/rdo-release-juno-1.noarch.rpm
+  short_version: ju
+  repo:
+    production:
+      CentOS:
+        7.0.1406: http://repos.fedorapeople.org/repos/openstack/openstack-juno/epel-7
+        '6.5': http://repos.fedorapeople.org/repos/openstack/openstack-juno/epel-6
+        '7.0': http://repos.fedorapeople.org/repos/openstack/openstack-juno/epel-7
+      Fedora:
+        '20': http://repos.fedorapeople.org/repos/openstack/openstack-juno/fedora-20
+        '21': http://repos.fedorapeople.org/repos/openstack/openstack-juno/fedora-21
+      RedHat:
+        '6.6': http://repos.fedorapeople.org/repos/openstack/openstack-juno/epel-6
+        '6.5': http://repos.fedorapeople.org/repos/openstack/openstack-juno/epel-6
+        '7.0': http://repos.fedorapeople.org/repos/openstack/openstack-juno/epel-7
+  version: juno
+  config:
+    enable_epel: y
+  short_repo: prod
+tester:
+  name: tempest
+distro_reboot_options: '--no-wall '' Reboot is triggered by Ansible'' '
+job:
+  verbosity: 1
+  archive:
+  - '{{ tempest.dir }}/etc/tempest.conf'
+  - '{{ tempest.dir }}/etc/tempest.conf.sample'
+  - '{{ tempest.dir }}/*.log'
+  - '{{ tempest.dir }}/*.xml'
+  - /root/
+  - /var/log/
+  - /etc/nova
+  - /etc/ceilometer
+  - /etc/cinder
+  - /etc/glance
+  - /etc/keystone
+  - /etc/neutron
+  - /etc/ntp
+  - /etc/puppet
+  - /etc/qpid
+  - /etc/qpidd.conf
+  - /root
+  - /etc/yum.repos.d
+  - /etc/yum.repos.d
+topology:
+  name: multinode
+  short_name: mt
+workaround_neutron_ovs_udev_loop: true
+workaround_glance_table_utf8: false
+verbosity:
+  debug: 0
+  info: 1
+  warning: 2
+  warn: 2
+  errors: 3
+provisioner:
+  username: admin
+  network:
+    type: nova
+    name: external
+  skip: skip_provision
+  foreman_url: https://10.2.84.2/api/v2/
+  password: octopus
+  type: foreman
+workaround_nova_compute_fix: false
+workarounds:
+  enabled: true
+
diff --git a/foreman/ci/reload_playbook.yml b/foreman/ci/reload_playbook.yml
new file mode 100644 (file)
index 0000000..9e3d053
--- /dev/null
@@ -0,0 +1,16 @@
+---
+- hosts: all
+  tasks:
+    - name: restart machine
+      shell: sleep 2 && shutdown -r now "Ansible updates triggered"
+      async: 1
+      poll: 0
+      ignore_errors: true
+
+    - name: waiting for server to come back
+      local_action: wait_for host="{{ ansible_ssh_host }}"
+                    port="{{ ansible_ssh_port }}"
+                    state=started
+                    delay=60
+                    timeout=180
+      sudo: false
diff --git a/foreman/ci/vm_nodes_provision.sh b/foreman/ci/vm_nodes_provision.sh
new file mode 100755 (executable)
index 0000000..d0bba64
--- /dev/null
@@ -0,0 +1,91 @@
+#!/usr/bin/env bash
+
+#bootstrap script for VM OPNFV nodes
+#author: Tim Rozet (trozet@redhat.com)
+#
+#Uses Vagrant and VirtualBox
+#VagrantFile uses vm_nodes_provision.sh which configures linux on nodes
+#Depends on Foreman being up to be able to register and apply puppet
+#
+#Pre-requisties:
+#Target system should be Centos7 Vagrant VM
+
+##VARS
+reset=`tput sgr0`
+blue=`tput setaf 4`
+red=`tput setaf 1`
+green=`tput setaf 2`
+
+host_name=REPLACE
+dns_server=REPLACE
+##END VARS
+
+##set hostname
+echo "${blue} Setting Hostname ${reset}"
+hostnamectl set-hostname $host_name
+
+##remove NAT DNS
+echo "${blue} Removing DNS server on first interface ${reset}"
+if ! grep 'PEERDNS=no' /etc/sysconfig/network-scripts/ifcfg-enp0s3; then
+  echo "PEERDNS=no" >> /etc/sysconfig/network-scripts/ifcfg-enp0s3
+  systemctl restart NetworkManager
+fi
+
+if ! ping www.google.com -c 5; then 
+  echo "${red} No internet connection, check your route and DNS setup ${reset}"
+  exit 1
+fi
+
+# Install EPEL repo for access to many other yum repos
+# Major version is pinned to force some consistency for Arno
+yum install -y epel-release-7*
+
+# Update device-mapper-libs, needed for libvirtd on compute nodes
+# Major version is pinned to force some consistency for Arno
+if ! yum -y upgrade device-mapper-libs-1*; then
+   echo "${red} WARN: Unable to upgrade device-mapper-libs...nova-compute may not function ${reset}"
+fi
+
+# Install other required packages
+# Major version is pinned to force some consistency for Arno
+echo "${blue} Installing Puppet ${reset}"
+if ! yum install -y puppet-3*; then
+  printf '%s\n' 'vm_nodes_provision.sh: failed to install required packages' >&2
+  exit 1
+fi
+
+echo "${blue} Configuring puppet ${reset}"
+cat > /etc/puppet/puppet.conf << EOF
+
+[main]
+vardir = /var/lib/puppet
+logdir = /var/log/puppet
+rundir = /var/run/puppet
+ssldir = \$vardir/ssl
+
+[agent]
+pluginsync      = true
+report          = true
+ignoreschedules = true
+daemon          = false
+ca_server       = foreman-server.opnfv.com
+certname        = $host_name
+environment     = production
+server          = foreman-server.opnfv.com
+runinterval     = 600
+
+EOF
+
+# Setup puppet to run on system reboot
+/sbin/chkconfig --level 345 puppet on
+
+/usr/bin/puppet agent --config /etc/puppet/puppet.conf -o --tags no_such_tag --server foreman-server.opnfv.com --no-daemonize
+
+sync
+
+# Inform the build system that we are done.
+echo "Informing Foreman that we are built"
+wget -q -O /dev/null --no-check-certificate http://foreman-server.opnfv.com:80/unattended/built
+
+echo "Starting puppet"
+systemctl start puppet
index bace585..c286127 100644 (file)
@@ -40,11 +40,16 @@ class opnfv::odl_docker
         source => '/etc/puppet/modules/opnfv/scripts/stage_odl.sh',
         mode   => 750,
       }
-      file { '/opt/opnfv/odl/config_net.sh':
+      file { '/opt/opnfv/odl/config_net_odl.sh':
         ensure => present,
         source => '/etc/puppet/modules/opnfv/scripts/config_net_odl.sh',
         mode   => 750,
       }
+      file { '/opt/opnfv/odl/change.sh':
+        ensure => present,
+        source => '/etc/puppet/modules/opnfv/scripts/change.sh',
+        mode   => 750,
+      }
 
 
       # fix failed to find the cgroup root issue
diff --git a/fuel/build/f_odl_docker/puppet/modules/opnfv/scripts/change.sh b/fuel/build/f_odl_docker/puppet/modules/opnfv/scripts/change.sh
new file mode 100644 (file)
index 0000000..f7f3d6e
--- /dev/null
@@ -0,0 +1,219 @@
+#!/bin/bash
+# script to remove bridges and reset networking for ODL
+
+
+#VARS
+MODE=0
+DNS=8.8.8.8
+
+#ENV
+source ~/openrc
+
+# GET IPS for that node
+function get_ips {
+       BR_MGMT=`grep address /etc/network/ifcfg_backup/ifcfg-br-mgmt | awk -F" " '{print $2}'`
+       BR_STORAGE=`grep address /etc/network/ifcfg_backup/ifcfg-br-storage | awk -F" " '{print $2}'`
+       BR_FW_ADMIN=`grep address /etc/network/ifcfg_backup/ifcfg-br-fw-admin | awk -F" " '{print $2}'`
+       BR_EX=`grep address /etc/network/ifcfg_backup/ifcfg-br-ex | awk -F" " '{print $2}'`
+       DEF_NETMASK=255.255.255.0
+       DEF_GW=172.30.9.1
+}
+
+function backup_ifcfg {
+        echo " backing up "
+        mkdir -p /etc/network/ifcfg_backup
+        mv /etc/network/interfaces.d/ifcfg-br-ex /etc/network/ifcfg_backup/.
+        mv /etc/network/interfaces.d/ifcfg-br-fw-admin /etc/network/ifcfg_backup/.
+        mv /etc/network/interfaces.d/ifcfg-br-mgmt /etc/network/ifcfg_backup/.
+        mv /etc/network/interfaces.d/ifcfg-br-storage /etc/network/ifcfg_backup/.
+        mv /etc/network/interfaces.d/ifcfg-br-prv /etc/network/ifcfg_backup/.
+        mv /etc/network/interfaces.d/ifcfg-eth0 /etc/network/ifcfg_backup/.
+        mv /etc/network/interfaces.d/ifcfg-eth1 /etc/network/ifcfg_backup/.
+        rm -rf /etc/network/interfaces.d/ifcfg-eth1.300
+        rm -rf /etc/network/interfaces.d/ifcfg-eth1.301
+        rm -rf /etc/network/interfaces.d/ifcfg-eth1
+        rm -rf /etc/network/interfaces.d/ifcfg-eth0
+
+}
+
+
+function create_ifcfg_br_mgmt {
+        echo "migrating br_mgmt"
+        echo "auto eth1.300" >> /etc/network/interfaces.d/ifcfg-eth1.300
+        echo "iface eth1.300 inet static" >> /etc/network/interfaces.d/ifcfg-eth1.300
+        echo "     address $BR_MGMT" >> /etc/network/interfaces.d/ifcfg-eth1.300
+        echo "     netmask $DEF_NETMASK" >> /etc/network/interfaces.d/ifcfg-eth1.300
+}
+
+function create_ifcfg_br_storage {
+        echo "migration br_storage"
+        echo "auto eth1.301" >> /etc/network/interfaces.d/ifcfg-eth1.301
+        echo "iface eth1.301 inet static" >> /etc/network/interfaces.d/ifcfg-eth1.301
+        echo "     address $BR_STORAGE" >> /etc/network/interfaces.d/ifcfg-eth1.301
+        echo "     netmask $DEF_NETMASK" >> /etc/network/interfaces.d/ifcfg-eth1.301
+}
+
+function create_ifcfg_br_fw_admin {
+        echo " migratinng br_fw_admin"
+        echo "auto eth1" >> /etc/network/interfaces.d/ifcfg-eth1
+        echo "iface eth1 inet static" >> /etc/network/interfaces.d/ifcfg-eth1
+        echo "     address $BR_FW_ADMIN" >> /etc/network/interfaces.d/ifcfg-eth1
+        echo "     netmask $DEF_NETMASK" >> /etc/network/interfaces.d/ifcfg-eth1
+}
+
+function create_ifcfg_eth0 {
+        echo "migratinng br-ex to eth0 - temporarily"
+        echo "auto eth0" >> /etc/network/interfaces.d/ifcfg-eth0
+        echo "iface eth0 inet static" >> /etc/network/interfaces.d/ifcfg-eth0
+        echo "     address $BR_EX" >> /etc/network/interfaces.d/ifcfg-eth0
+        echo "     netmask $DEF_NETMASK" >> /etc/network/interfaces.d/ifcfg-eth0
+        echo "     gateway $DEF_GW" >> /etc/network/interfaces.d/ifcfg-eth0
+}
+
+function set_mode {
+       if [ -d "/var/lib/glance/images" ]
+       then 
+               echo " controller "
+               MODE=0
+       else 
+               echo " compute "
+               MODE=1
+       fi
+}
+
+
+function stop_ovs {
+        echo "Stopping OpenVSwitch"
+        service openvswitch-switch stop
+
+}
+
+function start_ovs {
+        echo "Starting OVS"
+        service openvswitch-switch start
+        ovs-vsctl show
+}
+
+
+function clean_ovs {
+        echo "cleaning OVS DB"
+        stop_ovs
+        rm -rf /var/log/openvswitch/*
+        mkdir -p /opt/opnfv/odl/ovs_back
+        cp -pr /etc/openvswitch/* /opt/opnfv/odl/ovs_back/.
+        rm -rf /etc/openvswitch/conf.db
+        echo "restarting OVS - you should see Nothing there"
+        start_ovs
+}
+
+
+
+function reboot_me {
+        reboot
+}
+
+function allow_challenge {
+       sed -i -e 's/ChallengeResponseAuthentication no/ChallengeResponseAuthentication yes/g' /etc/ssh/sshd_config
+       service ssh restart
+}
+
+function clean_neutron {
+       subnets=( `neutron subnet-list | awk -F" " '{print $2}' | grep -v id | sed '/^$/d'` )
+       networks=( `neutron net-list | awk -F" " '{print $2}' | grep -v id | sed '/^$/d'` )
+       ports=( `neutron port-list | awk -F" " '{print $2}' | grep -v id | sed '/^$/d'` )
+       routers=( `neutron router-list | awk -F" " '{print $2}' | grep -v id | sed '/^$/d'` )
+
+       #display all elements
+       echo "SUBNETS: ${subnets[@]} "
+       echo "NETWORKS: ${networks[@]} "
+       echo "PORTS: ${ports[@]} "
+       echo "ROUTERS: ${routers[@]} "
+       
+       
+       # get port and subnet for each router
+       for i in "${routers[@]}"
+       do
+               routerport=( `neutron router-port-list $i | awk -F" " '{print $2}' | grep -v id |  sed '/^$/d' `)
+               routersnet=( `neutron router-port-list $i | awk -F" " '{print $8}' | grep -v fixed |  sed '/^$/d' | sed 's/,$//' | sed -e 's/^"//'  -e 's/"$//' `)
+       done
+
+       echo "ROUTER PORTS: ${routerport[@]} "
+       echo "ROUTER SUBNET: ${routersnet[@]} "
+       
+       #remove router subnets
+       echo "router-interface-delete"
+       for i in "${routersnet[@]}"
+       do
+               neutron router-interface-delete ${routers[0]} $i
+       done
+
+       #remove subnets
+       echo "subnet-delete"
+       for i in "${subnets[@]}"
+       do
+               neutron subnet-delete $i
+       done
+
+       #remove nets
+       echo "net-delete"
+       for i in "${networks[@]}"
+       do
+               neutron net-delete $i
+       done
+
+       #remove routers
+       echo "router-delete"
+       for i in "${routers[@]}"
+       do
+               neutron router-delete $i
+       done
+
+       #remove ports
+       echo "port-delete"
+       for i in "${ports[@]}"
+       do
+               neutron port-delete $i
+       done
+
+       #remove subnets
+       echo "subnet-delete second pass"
+       for i in "${subnets[@]}"
+       do
+               neutron subnet-delete $i
+       done
+
+}
+
+function set_dns {
+       sed -i -e 's/nameserver 10.20.0.2/nameserver $DNS/g' /etc/resolv.conf
+}
+
+
+#OUTPUT
+
+function check {
+       echo $BR_MGMT
+       echo $BR_STORAGE
+       echo $BR_FW_ADMIN
+       echo $BR_EX
+}
+
+### MAIN
+
+
+set_mode
+backup_ifcfg
+get_ips
+create_ifcfg_br_mgmt
+create_ifcfg_br_storage
+create_ifcfg_br_fw_admin
+if [ $MODE == "0" ]
+then
+        create_ifcfg_eth0
+fi
+allow_challenge
+clean_ovs
+check
+reboot_me
+
+
index d292acd..145da80 100755 (executable)
@@ -6,28 +6,29 @@
 #
 #  Usage - Set / pass CONTROL_HOST to your needs
 #
-CONTROL_HOST=172.30.9.70
+### SET THIS VALUE TO MATCH YOUR SYSTEM
+CONTROL_HOST=192.168.0.2
+BR_EX_IP=172.30.9.70
 
 # ENV
 source ~/openrc
-
 # VARS
 ML2_CONF=/etc/neutron/plugins/ml2/ml2_conf.ini
 MODE=0
 
 
 # FUNCTIONS
-
 # Update ml2_conf.ini
 function update_ml2conf {
         echo "Backing up and modifying ml2_conf.ini"
         cp $ML2_CONF $ML2_CONF.bak
         sed -i -e 's/mechanism_drivers =openvswitch/mechanism_drivers = opendaylight/g' $ML2_CONF
         sed -i -e 's/tenant_network_types = flat,vlan,gre,vxlan/tenant_network_types = vxlan/g' $ML2_CONF
-        cat "[ml2_odl]" >> $ML2_CONF
-        cat "password = admin" >> $ML2_CONF
-        cat "username = admin" >> $ML2_CONF
-        cat "url = http://${CONTROL_HOST}:8080/controller/nb/v2/neutron" >> $ML2_CONF
+        sed -i -e 's/bridge_mappings=physnet2:br-prv/bridge_mappings=physnet1:br-ex/g' $ML2_CONF
+        echo "[ml2_odl]" >> $ML2_CONF
+        echo "password = admin" >> $ML2_CONF
+        echo "username = admin" >> $ML2_CONF
+        echo "url = http://${CONTROL_HOST}:8080/controller/nb/v2/neutron" >> $ML2_CONF
 }
 
 function reset_neutrondb {
@@ -56,6 +57,12 @@ function stop_neutron {
         fi
 }
 
+function disable_agent {
+       echo "Disabling Neutron Plugin Agents from running"
+       service neutron-plugin-openvswitch-agent stop
+       echo 'manual' > /etc/init/neutron-plugin-openvswitch-agent.override
+}
+
 
 
 function verify_ML2_working {
@@ -72,7 +79,7 @@ function verify_ML2_working {
 
 
 function set_mode {
-        if ls -l /var/lib/glance/images
+        if [ -d "/var/lib/glance/images" ]
         then
                 echo "Controller Mode"
                 MODE=0
@@ -88,52 +95,73 @@ function stop_ovs {
 
 }
 
+function start_ovs {
+       echo "Starting OVS"
+       service openvswitch-vswitch start
+       ovs-vsctl show
+}
+
+
 function control_setup {
         echo "Modifying Controller"
         stop_neutron
         stop_ovs
+       disable_agent
         rm -rf /var/log/openvswitch/*
         mkdir -p /opt/opnfv/odl/ovs_back
         mv /etc/openvswitch/conf.db /opt/opnfv/odl/ovs_back/.
         mv /etc/openvswitch/.conf*lock* /opt/opnfv/odl/ovs_back/.
+       rm -rf /etc/openvswitch/conf.db
+       rm -rf /etc/openvswitch/.conf*
         service openvswitch-switch start
-        ovs-vsctl set-manager tcp:172.30.9.70:6640
-        ovs-vsctl add-br br-eth0
         ovs-vsctl add-br br-ex
-        ovs-vsctl add-port br-eth0 eth0
-        ovs-vsctl add-port br-eth0 br-eth0--br-ex
-        ovs-vsctl add-port br-ex br-ex--br-eth0
-        ovs-vsctl set interface br-ex--br-eth0 type=patch
-        ovs-vsctl set interface br-eth0--br-ex type=patch
-        ovs-vsctl set interface br-ex--br-eth0 options:peer=br-eth0--br-ex
-        ovs-vsctl set interface br-eth0--br-ex options:peer=br-ex--br-eth0
+        ovs-vsctl add-port br-ex eth0
+        ovs-vsctl set interface br-ex type=external
         ifconfig br-ex 172.30.9.70/24 up
         service neutron-server restart
 
         echo "setting up networks"
         ip link add link eth1 name br-mgmt type vlan id 300
+       ifconfig br-mgmt `grep address /etc/network/interfaces.d/ifcfg-br-mgmt | awk -F" " '{print $2}'`/24 up arp
         ip link add link eth1 name br-storage type vlan id 301
-        /etc/init.d/networking restart
+       ip link add link eth1 name br-prv type vlan id 1000
+       ifconfig br-storage `grep address /etc/network/interfaces.d/ifcfg-br-storage | awk -F" " '{print $2}'`/24 up arp
+       ifconfig eth1 `grep address /etc/network/interfaces.d/ifcfg-br-fw-admin | awk -F" " '{print $2}'`/24 up arp
 
+       echo "Setting ODL Manager IP"
+        ovs-vsctl set-manager tcp:192.168.0.2:6640
 
-        echo "Reset Neutron DB"
-        #reset_neutrondb
-        echo "Restarting Neutron Components"
-        #restart_neutron
         echo "Verifying ODL ML2 plugin is working"
         verify_ML2_working
 
+       # BAD HACK - Should be parameterized - this is to catch up 
+       route add default gw 172.30.9.1
+
+}
+
+function clean_ovs {
+       echo "cleaning OVS DB"
+       stop_ovs
+       rm -rf /var/log/openvswitch/*
+       mkdir -p /opt/opnfv/odl/ovs_back
+       cp -pr /etc/openvswitch/* /opt/opnfv/odl/ovs_back/.
+       rm -rf /etc/openvswitch/conf.db
+       echo "restarting OVS - you should see Nothing there"
+       start_ovs
 }
 
 function compute_setup {
-        echo "do compute stuff here"
-        echo "stopping neutron openvswitch plugin"
+        echo "Modifying Compute"
+        echo "Disabling neutron openvswitch plugin"
         stop_neutron
+       disable_agent
         ip link add link eth1 name br-mgmt type vlan id 300
-        ifconfig br-mgmt `grep address /etc/network/interfaces.d/ifcfg-br-mgmt | awk -F" " '{print $2}'`/24
+        ifconfig br-mgmt `grep address /etc/network/interfaces.d/ifcfg-br-mgmt | awk -F" " '{print $2}'`/24 up arp
         ip link add link eth1 name br-storage type vlan id 301
-        ifconfig br-storage `grep address /etc/network/interfaces.d/ifcfg-br-storage | awk -F" " '{print $2}'`/24
-        ifconfig eth1 `grep address /etc/network/interfaces.d/ifcfg-br-fw-mgmt | awk -F" " '{print $2}'`/24
+       ip link add link eth1 name br-prv type vlan id 1000
+        ifconfig br-storage `grep address /etc/network/interfaces.d/ifcfg-br-storage | awk -F" " '{print $2}'`/24 up arp
+        ifconfig eth1 `grep address /etc/network/interfaces.d/ifcfg-br-fw-admin | awk -F" " '{print $2}'`/24 up arp
+
         echo "set manager, and route for ODL controller"
         ovs-vsctl set-manager tcp:192.168.0.2:6640
         route add 172.17.0.1 gw 192.168.0.2
index a7613c3..fa14b47 100755 (executable)
@@ -15,6 +15,7 @@ DNS=8.8.8.8
 HOST_IP=`ifconfig br-ex | grep -i "inet addr" | awk -F":" '{print $2}' | awk -F" " '{print $1}'`
 
 
+
 # DEBUG ECHOS
 echo $LOCALPATH
 echo $DOCKERBIN
@@ -23,6 +24,10 @@ echo $DNS
 echo $HOST_IP
 
 
+# Set DNS to someting external and default GW - ODL requires a connection to the internet
+sed -i -e 's/nameserver 10.20.0.2/nameserver 8.8.8.8/g' /etc/resolv.conf
+route delete default gw 10.20.0.2
+route add default gw 172.30.9.1
 
 # Start Docker daemon and in background
 echo "Starting Docker"