Merge "Adding installation of ansible and dependencies to host attempting to execute...
authorTim Rozet <trozet@redhat.com>
Fri, 13 Jan 2017 20:34:22 +0000 (20:34 +0000)
committerGerrit Code Review <gerrit@opnfv.org>
Fri, 13 Jan 2017 20:34:22 +0000 (20:34 +0000)
28 files changed:
build/Makefile
build/build_ovs_nsh.sh [deleted file]
build/csit-environment.yaml
build/opnfv-environment.yaml
build/overcloud-full.sh
build/rpm_specs/openstack-congress.spec
build/variables.sh
ci/clean.sh
ci/deploy.sh
ci/util.sh
config/deploy/deploy_settings.yaml
config/deploy/os-odl_l2-sdnvpn-ha.yaml [deleted file]
config/inventory/pod_example_settings.yaml
contrib/simple_deploy.sh [new file with mode: 0644]
docs/installationprocedure/baremetal.rst
lib/overcloud-deploy-functions.sh
lib/parse-functions.sh
lib/post-install-functions.sh
lib/python/apex/common/constants.py
lib/python/apex/common/utils.py
lib/python/apex/deploy_settings.py
lib/python/apex/inventory.py
lib/python/apex/network_settings.py
lib/python/apex_python_utils.py
lib/undercloud-functions.sh
lib/utility-functions.sh
tests/test_apex_deploy_settings.py
tests/test_apex_inventory.py

index 30af162..af84ea8 100644 (file)
@@ -162,17 +162,21 @@ $(BUILD_DIR)/noarch/python-tackerclient-2016.2-1.git$(TACKERCLIENT_COMMIT).noarc
 #  CONGRESS   #
 ###############
 
+.PHONY: congress-clean
+congress-clean:
+       @rm -rf $(BUILD_DIR)/openstack-congress-2016.2
+       @rm -f $(BUILD_DIR)/openstack-congress.tar.gz
+
 $(BUILD_DIR)/openstack-congress.tar.gz:
        @echo "Preparing the Congress RPM prerequisites"
-       git clone $(CONGRESS_REPO) -b $(CONGRESS_BRANCH) $(BUILD_DIR)/openstack-congress-2016.1
-       cd $(BUILD_DIR)/openstack-congress-2016.1 && curl -O https://radez.fedorapeople.org/openstack-congress.service
-       tar czf $(BUILD_DIR)/openstack-congress.tar.gz -C $(BUILD_DIR) openstack-congress-2016.1
+       git clone $(CONGRESS_REPO) -b $(CONGRESS_BRANCH) $(BUILD_DIR)/openstack-congress-2016.2
+       cd $(BUILD_DIR)/openstack-congress-2016.2 && curl -O https://radez.fedorapeople.org/openstack-congress.service
+       tar czf $(BUILD_DIR)/openstack-congress.tar.gz -C $(BUILD_DIR) openstack-congress-2016.2
 
 .PHONY: congress-rpm
-congress-rpm: $(BUILD_DIR)/openstack-congress.tar.gz $(BUILD_DIR)/noarch/openstack-congress-2016.1-1.git$(CONGRESS_COMMIT).noarch.rpm
-
+congress-rpm: $(BUILD_DIR)/noarch/openstack-congress-2016.2-1.git$(CONGRESS_COMMIT).noarch.rpm
 
-$(BUILD_DIR)/noarch/openstack-congress-2016.1-1.git$(CONGRESS_COMMIT).noarch.rpm:
+$(BUILD_DIR)/noarch/openstack-congress-2016.2-1.git$(CONGRESS_COMMIT).noarch.rpm: $(BUILD_DIR)/openstack-congress.tar.gz
        @echo "Building the Congress RPM"
        rpmbuild --clean -ba --target noarch rpm_specs/openstack-congress.spec $(RPM_DIR_ARGS) -D 'git .git$(CONGRESS_COMMIT)'
 
diff --git a/build/build_ovs_nsh.sh b/build/build_ovs_nsh.sh
deleted file mode 100755 (executable)
index 834df5b..0000000
+++ /dev/null
@@ -1,22 +0,0 @@
-#!/usr/bin/env bash
-##############################################################################
-# Copyright (c) 2016 Tim Rozet (Red Hat) and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
-set -e
-
-yum -y install  rpm-build autoconf automake libtool systemd-units openssl openssl-devel python python-twisted-core python-zope-interface python-six desktop-file-utils groff graphviz  procps-ng libcap-ng libcap-ng-devel PyQt4 selinux-policy-devel kernel-devel kernel-headers kernel-tools
-./boot.sh
-libtoolize --force
-aclocal
-autoheader
-automake --force-missing --add-missing
-autoconf
-./configure
-yum -y install rpmdevtools
-make rpm-fedora RPMBUILD_OPT="\"-D kversion `rpm -q kernel | rpmdev-sort  | tail -n -1 | sed  's/^kernel-//'`\" --without check"
-make rpm-fedora-kmod RPMBUILD_OPT="\"-D kversion `rpm -q kernel | rpmdev-sort  | tail -n -1 | sed  's/^kernel-//'`\""
index 0225cb2..4ef5501 100644 (file)
@@ -5,6 +5,7 @@ parameters:
 #  CloudDomain:
 
 parameter_defaults:
+  GlanceBackend: file
   CeilometerStoreEvents: true
   NeutronEnableForceMetadata: true
   NeutronEnableDHCPMetadata: true
index ff6fd6a..8ae2048 100644 (file)
@@ -12,6 +12,7 @@ parameter_defaults:
   OvercloudControlFlavor: control
   OvercloudComputeFlavor: compute
   controllerImage: overcloud-full
+
   ExtraConfig:
     tripleo::ringbuilder::build_ring: False
     nova::nova_public_key:
@@ -24,6 +25,7 @@ parameter_defaults:
       nova-os_compute_api:servers:show:host_status:
         key: 'os_compute_api:servers:show:host_status'
         value: 'rule:admin_or_owner'
+    nova::api::default_floating_pool: 'external'
   ControllerServices:
     - OS::TripleO::Services::CACerts
 #    - OS::TripleO::Services::CephClient
index 75330f3..81e23aa 100755 (executable)
@@ -45,9 +45,9 @@ done
 
 # tar up the congress puppet module
 rm -rf puppet-congress
-git clone -b stable/mitaka https://github.com/radez/puppet-congress
+git clone https://github.com/openstack/puppet-congress
 pushd puppet-congress > /dev/null
-git archive --format=tar.gz --prefix=congress/ origin/stable/mitaka > ${BUILD_DIR}/puppet-congress.tar.gz
+git archive --format=tar.gz --prefix=congress/ HEAD > ${BUILD_DIR}/puppet-congress.tar.gz
 popd > /dev/null
 
 # tar up the fd.io module
@@ -88,7 +88,6 @@ qemu-img resize overcloud-full_build.qcow2 +500MB
 # enable connection tracking for protocal sctp
 # install the congress rpms
 # upload and explode the congress puppet module
-# install doctor driver ## Can be removed in Newton
 # install fd.io yum repo and packages
 # upload puppet fdio
 # git clone vsperf into the overcloud image
@@ -106,7 +105,7 @@ LIBGUESTFS_BACKEND=direct virt-customize \
     --run-command "mkdir /root/dpdk_rpms" \
     --upload ${BUILD_DIR}/fdio.repo:/etc/yum.repos.d/fdio.repo \
     $dpdk_pkg_str \
-    --run-command "yum install --downloadonly --downloaddir=/root/fdio vpp vpp-devel vpp-lib vpp-python-api vpp-plugins" \
+    --run-command "yum install --downloadonly --downloaddir=/root/fdio vpp vpp-devel vpp-lib vpp-api-python vpp-plugins" \
     --upload ${BUILD_DIR}/noarch/$netvpp_pkg:/root/fdio \
     --run-command "yum install -y etcd" \
     --run-command "pip install python-etcd" \
@@ -122,7 +121,6 @@ LIBGUESTFS_BACKEND=direct virt-customize \
     --install "python2-congressclient" \
     --upload ${BUILD_DIR}/puppet-congress.tar.gz:/etc/puppet/modules/ \
     --run-command "cd /etc/puppet/modules/ && tar xzf puppet-congress.tar.gz" \
-    --run-command "cd /usr/lib/python2.7/site-packages/congress/datasources && curl -O $doctor_driver" \
     --run-command "yum install -y /root/fdio/*.rpm" \
     --run-command "rm -f /etc/sysctl.d/80-vpp.conf" \
     --install unzip \
@@ -151,27 +149,5 @@ LIBGUESTFS_BACKEND=direct virt-customize \
     --run-command "cd /usr/lib/python2.7/site-packages/ && git apply /tmp/osc_auth_fix.diff" \
     -a overcloud-full_build.qcow2
 
-rm -rf ovs_nsh_patches
-rm -rf ovs
-git clone https://github.com/yyang13/ovs_nsh_patches.git
-git clone https://github.com/openvswitch/ovs.git
-pushd ovs > /dev/null
-git reset --hard 7d433ae57ebb90cd68e8fa948a096f619ac4e2d8
-cp ../ovs_nsh_patches/*.patch ./
-# Hack for build servers that have no git config
-git config user.email "apex@opnfv.com"
-git config user.name "apex"
-git am *.patch
-popd > /dev/null
-tar czf ovs.tar.gz ovs
-
-# BUILD NSH OVS
-LIBGUESTFS_BACKEND=direct virt-customize \
-    --upload ${BUILD_ROOT}/build_ovs_nsh.sh:/root/ \
-    --upload ${BUILD_DIR}/ovs.tar.gz:/root/ \
-    --run-command "cd /root/ && tar xzf ovs.tar.gz" \
-    --run-command "cd /root/ovs && /root/build_ovs_nsh.sh" \
-    -a overcloud-full_build.qcow2
-
 mv -f overcloud-full_build.qcow2 overcloud-full.qcow2
 popd > /dev/null
index fd8e95e..5f4d7b3 100644 (file)
@@ -1,7 +1,7 @@
 %define debug_package %{nil}
 
 Name:          openstack-congress
-Version:       2016.1
+Version:       2016.2
 Release:       1%{?git}%{?dist}
 Summary:       OpenStack servicevm/device manager
 
@@ -11,19 +11,13 @@ URL:                https://wiki.openstack.org/wiki/Congress/Installation
 Source0:       openstack-congress.tar.gz
 
 BuildArch:     noarch
+
 BuildRequires: python-setuptools python2-oslo-config python2-debtcollector libffi-devel python-devel openssl-devel python2-oslo-config python2-debtcollector python34-devel
-#Requires:     pbr>=0.8 Paste PasteDeploy>=1.5.0 Routes>=1.12.3!=2.0 anyjson>=0.3.3 argparse
-#Requires:     Babel>=1.3 eventlet>=0.16.1!=0.17.0 greenlet>=0.3.2 httplib2>=0.7.5 requests>=2.2.0!=2.4.0
-#Requires:     iso8601>=0.1.9 kombu>=2.5.0 netaddr>=0.7.12 SQLAlchemy<1.1.0>=0.9.7
-#Requires:     WebOb>=1.2.3 python-heatclient>=0.3.0 python-keystoneclient>=1.1.0 alembic>=0.7.2 six>=1.9.0
-#Requires:     stevedore>=1.5.0 http oslo.config>=1.11.0 oslo.messaging!=1.17.0!=1.17.1>=1.16.0 oslo.rootwrap>=2.0.0 python-novaclient>=2.22.0
 
 %description
 OpenStack policy manager
 
 %prep
-#git archive --format=tar.gz --prefix=openstack-congress-%{version}/ HEAD > openstack-congress.tar.gz
-
 %setup -q
 
 
@@ -33,7 +27,9 @@ OpenStack policy manager
 
 
 %install
-/usr/bin/python setup.py install --prefix=%{buildroot} --install-lib=%{buildroot}/usr/lib/python2.7/site-packages
+/usr/bin/python setup.py install --root=%{buildroot}
+
+rm -rf %{buildroot}/usr/lib/python2.7/site-packages/congress_tempest_tests
 
 install -d -m 755 %{buildroot}/var/log/congress/
 install -d -m 755 %{buildroot}/etc/congress/snapshot/
@@ -63,17 +59,17 @@ exit 0
 %systemd_postun_with_restart openstack-congress
 
 %files
-
-%config /etc/congress/congress.conf
-/etc/congress/policy.json
+%{python2_sitelib}/congress-*.egg-info
 /etc/congress/api-paste.ini
-/bin/congress-server
-/bin/congress-db-manage
+/etc/congress/congress.conf
+/etc/congress/policy.json
+/usr/bin/congress-db-manage
+/usr/bin/congress-server
 %{_unitdir}/openstack-congress.service
-/usr/lib/python2.7/site-packages/congress/*
-/usr/lib/python2.7/site-packages/congress-*
-/usr/lib/python2.7/site-packages/congress_tempest_tests/*
-/usr/lib/python2.7/site-packages/antlr3runtime/*
+/usr/lib/python2.7/site-packages/congress
+/usr/lib/python2.7/site-packages/congress_dashboard
+/usr/lib/python2.7/site-packages/antlr3runtime
+
 %dir %attr(0750, congress, root) %{_localstatedir}/log/congress
 
 %changelog
index 0aedff9..a40eb23 100644 (file)
@@ -19,7 +19,6 @@ onos_release_file=onos-1.6.0-rc2.tar.gz
 onos_jdk_uri=http://artifacts.opnfv.org/apex/colorado
 onos_ovs_uri=http://artifacts.opnfv.org/apex/colorado
 onos_ovs_pkg=package_ovs_rpm3.tar.gz
-doctor_driver=https://raw.githubusercontent.com/openstack/congress/master/congress/datasources/doctor_driver.py
 if [ -z ${GS_PATHNAME+x} ]; then
     GS_PATHNAME=/colorado
 fi
@@ -43,9 +42,9 @@ tackerclient_commit=$(git ls-remote ${tackerclient_repo} ${tackerclient_branch}
 tackerclient_pkg=python-tackerclient-2016.2-1.git${tackerclient_commit}.noarch.rpm
 
 congress_repo="http://github.com/openstack/congress"
-congress_branch="stable/mitaka"
+congress_branch="stable/newton"
 congress_commit=$(git ls-remote ${congress_repo} ${congress_branch} | awk '{print substr($1,1,7)}')
-congress_pkg=openstack-congress-2016.1-1.git${congress_commit}$(rpm -E %dist).noarch.rpm
+congress_pkg=openstack-congress-2016.2-1.git${congress_commit}$(rpm -E %dist).noarch.rpm
 
 netvpp_repo="https://github.com/openstack/networking-vpp"
 netvpp_branch="master"
index 1e5e320..262e74b 100755 (executable)
 #author: Dan Radez (dradez@redhat.com)
 #author: Tim Rozet (trozet@redhat.com)
 
+# Backwards compat for old ENV Vars
+# Remove in E Release
+if [ -n "$CONFIG" ]; then
+    echo -e "${red}WARNING: ENV var CONFIG is Deprecated, please unset CONFIG and export BASE in its place${reset}"
+    echo -e "${red}WARNING: CONFIG will be removed in E${reset}"
+    BASE=$CONFIG
+fi
+if [ -n "$RESOURCES" ]; then
+    echo -e "${red}WARNING: ENV var RESOURCES is Deprecated, please unset RESOURCES and export IMAGES in its place${reset}"
+    echo -e "${red}WARNING: RESOURCES will be removed in E${reset}"
+    IMAGES=$RESOURCES
+fi
+
 # Use default if no param passed
-CONFIG=${CONFIG:-'/var/opt/opnfv'}
-RESOURCES=${RESOURCES:-"$CONFIG/images"}
-LIB=${LIB:-"$CONFIG/lib"}
+BASE=${BASE:-'/var/opt/opnfv'}
+IMAGES=${IMAGES:-"$BASE/images"}
+LIB=${LIB:-"$BASE/lib"}
 reset=$(tput sgr0 || echo "")
 blue=$(tput setaf 4 || echo "")
 red=$(tput setaf 1 || echo "")
index fde6290..b55f47e 100755 (executable)
@@ -37,12 +37,25 @@ declare -A deploy_options_array
 declare -a performance_options
 declare -A NET_MAP
 
+# Backwards compat for old ENV Vars
+# Remove in E Release
+if [ -n "$CONFIG" ]; then
+    echo -e "${red}WARNING: ENV var CONFIG is Deprecated, please unset CONFIG and export BASE in its place${reset}"
+    echo -e "${red}WARNING: CONFIG will be removed in E${reset}"
+    BASE=$CONFIG
+fi
+if [ -n "$RESOURCES" ]; then
+    echo -e "${red}WARNING: ENV var RESOURCES is Deprecated, please unset RESOURCES and export IMAGES in its place${reset}"
+    echo -e "${red}WARNING: RESOURCES will be removed in E${reset}"
+    IMAGES=$RESOURCES
+fi
+
 APEX_TMP_DIR=$(python3 -c "import tempfile; print(tempfile.mkdtemp())")
 SSH_OPTIONS=(-o StrictHostKeyChecking=no -o GlobalKnownHostsFile=/dev/null -o UserKnownHostsFile=/dev/null -o LogLevel=error)
 DEPLOY_OPTIONS=""
-CONFIG=${CONFIG:-'/var/opt/opnfv'}
-RESOURCES=${RESOURCES:-"$CONFIG/images"}
-LIB=${LIB:-"$CONFIG/lib"}
+BASE=${BASE:-'/var/opt/opnfv'}
+IMAGES=${IMAGES:-"$BASE/images"}
+LIB=${LIB:-"$BASE/lib"}
 OPNFV_NETWORK_TYPES="admin tenant external storage api"
 ENV_FILE="opnfv-environment.yaml"
 
index 480858d..34821a7 100755 (executable)
@@ -2,9 +2,22 @@
 # Utility script used to interact with a deployment
 # @author Tim Rozet (trozet@redhat.com)
 
-CONFIG=${CONFIG:-'/var/opt/opnfv'}
-RESOURCES=${RESOURCES:-"$CONFIG/images"}
-LIB=${LIB:-"$CONFIG/lib"}
+# Backwards compat for old ENV Vars
+# Remove in E Release
+if [ -n "$CONFIG" ]; then
+    echo -e "${red}WARNING: ENV var CONFIG is Deprecated, please unset CONFIG and export BASE in its place${reset}"
+    echo -e "${red}WARNING: CONFIG will be removed in E${reset}"
+    BASE=$CONFIG
+fi
+if [ -n "$RESOURCES" ]; then
+    echo -e "${red}WARNING: ENV var RESOURCES is Deprecated, please unset RESOURCES and export IMAGES in its place${reset}"
+    echo -e "${red}WARNING: RESOURCES will be removed in E${reset}"
+    IMAGES=$RESOURCES
+fi
+
+BASE=${BASE:-'/var/opt/opnfv'}
+IMAGES=${IMAGES:-"$BASE/images"}
+LIB=${LIB:-"$BASE/lib"}
 VALID_CMDS="undercloud overcloud opendaylight debug-stack mock-detached -h --help"
 
 source $LIB/utility-functions.sh
index ee1dc14..ea35ae7 100644 (file)
@@ -14,9 +14,9 @@ deploy_options:
   sdn_controller: opendaylight
 
   # Which version of ODL to use. This is only valid if 'opendaylight' was used
-  # above. If 'Boron' is specified, ODL Boron will be used. If no value is specified,
-  # Lithium will be used.
-  #odl_version: Boron
+  # above. Valid options are 'beryllium', 'boron' and 'carbon'. If no value
+  # is specified, Beryllium will be used.
+  #odl_version: boron
 
   # Whether to configure ODL L3 support. This will disable the Neutron L3 Agent and
   # use ODL instead.
diff --git a/config/deploy/os-odl_l2-sdnvpn-ha.yaml b/config/deploy/os-odl_l2-sdnvpn-ha.yaml
deleted file mode 100644 (file)
index f6904f0..0000000
+++ /dev/null
@@ -1,10 +0,0 @@
-global_params:
-  ha_enabled: true
-
-deploy_options:
-  sdn_controller: opendaylight
-  sdn_l3: false
-  tacker: true
-  congress: true
-  sfc: false
-  vpn: true
index 3e34abe..c08b30c 100644 (file)
@@ -8,6 +8,7 @@ nodes:
     cpus: 2
     memory: 8192
     disk: 40
+    disk_device: sdb
     arch: "x86_64"
     capabilities: "profile:control"
   node2:
@@ -19,6 +20,7 @@ nodes:
     cpus: 2
     memory: 8192
     disk: 40
+    disk_device: sdb
     arch: "x86_64"
     capabilities: "profile:control"
   node3:
@@ -30,6 +32,7 @@ nodes:
     cpus: 2
     memory: 8192
     disk: 40
+    disk_device: sdb
     arch: "x86_64"
     capabilities: "profile:control"
   node4:
@@ -41,6 +44,7 @@ nodes:
     cpus: 2
     memory: 8192
     disk: 40
+    disk_device: sdb
     arch: "x86_64"
     capabilities: "profile:compute"
   node5:
@@ -52,5 +56,6 @@ nodes:
     cpus: 2
     memory: 8192
     disk: 40
+    disk_device: sdb
     arch: "x86_64"
     capabilities: "profile:compute"
diff --git a/contrib/simple_deploy.sh b/contrib/simple_deploy.sh
new file mode 100644 (file)
index 0000000..f4b082a
--- /dev/null
@@ -0,0 +1,19 @@
+#!/bin/bash
+set -e
+apex_home=$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )
+export CONFIG=$apex_home/build
+export LIB=$apex_home/lib
+export RESOURCES=$apex_home/.build/images/
+export PYTHONPATH=$PYTHONPATH:$apex_home/lib/python
+$apex_home/ci/dev_dep_check.sh
+pushd $apex_home/build
+make clean
+make undercloud
+make overcloud-opendaylight
+pushd $apex_home/ci
+./clean.sh
+./dev_dep_check.sh
+out=/tmp/opnfv-deploy.out
+echo "All further output will be piped to $out"
+(nohup ./deploy.sh -v -n $apex_home/config/network/network_settings.yaml -d $apex_home/config/deploy/os-odl_l3-nofeature-noha.yaml &> $out &)
+popd
index 15c80c3..83cda32 100644 (file)
@@ -202,6 +202,7 @@ IPMI configuration information gathered in section
     - ``cpus``: (Introspected*) CPU cores available
     - ``memory``: (Introspected*) Memory available in Mib
     - ``disk``: (Introspected*) Disk space available in Gb
+    - ``disk_device``: (Opt***) Root disk device to use for installation
     - ``arch``: (Introspected*) System architecture
     - ``capabilities``: (Opt**) Node's role in deployment
         values: profile:control or profile:compute
@@ -213,6 +214,14 @@ IPMI configuration information gathered in section
     ** If capabilities profile is not specified then Apex will select node's roles
     in the OPNFV cluster in a non-deterministic fashion.
 
+    \*** disk_device declares which hard disk to use as the root device for
+    installation.  The format is a comma delimited list of devices, such as
+    "sda,sdb,sdc".  The disk chosen will be the first device in the list which
+    is found by introspection to exist on the system.  Currently, only a single
+    definition is allowed for all nodes.  Therefore if multiple disk_device
+    definitions occur within the inventory, only the last definition on a node
+    will be used for all nodes.
+
 Creating the Settings Files
 ---------------------------
 
index 169640f..980478c 100755 (executable)
@@ -55,15 +55,15 @@ function overcloud_deploy {
 
 
   # Make sure the correct overcloud image is available
-  if [ ! -f $RESOURCES/overcloud-full-${SDN_IMAGE}.qcow2 ]; then
-      echo "${red} $RESOURCES/overcloud-full-${SDN_IMAGE}.qcow2 is required to execute your deployment."
+  if [ ! -f $IMAGES/overcloud-full-${SDN_IMAGE}.qcow2 ]; then
+      echo "${red} $IMAGES/overcloud-full-${SDN_IMAGE}.qcow2 is required to execute your deployment."
       echo "Please install the opnfv-apex package to provide this overcloud image for deployment.${reset}"
       exit 1
   fi
 
   echo "Copying overcloud image to Undercloud"
   ssh -T ${SSH_OPTIONS[@]} "stack@$UNDERCLOUD" "rm -f overcloud-full.qcow2"
-  scp ${SSH_OPTIONS[@]} $RESOURCES/overcloud-full-${SDN_IMAGE}.qcow2 "stack@$UNDERCLOUD":overcloud-full.qcow2
+  scp ${SSH_OPTIONS[@]} $IMAGES/overcloud-full-${SDN_IMAGE}.qcow2 "stack@$UNDERCLOUD":overcloud-full.qcow2
 
   # Install ovs-dpdk inside the overcloud image if it is enabled.
   if [[ "${deploy_options_array['dataplane']}" == 'ovs_dpdk' || "${deploy_options_array['dataplane']}" == 'fdio' ]]; then
@@ -130,9 +130,11 @@ EOI
   # Set ODL version accordingly
   if [[ "${deploy_options_array['sdn_controller']}" == 'opendaylight' && -n "${deploy_options_array['odl_version']}" ]]; then
     case "${deploy_options_array['odl_version']}" in
+      beryllium) odl_version=''
+              ;;
       boron)  odl_version='boron'
               ;;
-      cabron) odl_version='master'
+      carbon) odl_version='master'
               ;;
       *) echo -e "${red}Invalid ODL version ${deploy_options_array['odl_version']}.  Please use 'carbon' or 'boron' values.${reset}"
          exit 1
@@ -287,11 +289,19 @@ openstack overcloud image upload
 
 echo "Configuring undercloud and discovering nodes"
 openstack baremetal import --json instackenv.json
-openstack baremetal configure boot
+
 bash -x set_perf_images.sh ${performance_roles[@]}
-#if [[ -z "$virtual" ]]; then
-#  openstack baremetal introspection bulk start
-#fi
+if [[ -z "$virtual" ]]; then
+  openstack baremetal introspection bulk start
+  if [[ -n "$root_disk_list" ]]; then
+    openstack baremetal configure boot --root-device=${root_disk_list}
+  else
+    openstack baremetal configure boot
+  fi
+else
+  openstack baremetal configure boot
+fi
+
 echo "Configuring flavors"
 for flavor in baremetal control compute; do
   echo -e "${blue}INFO: Updating flavor: \${flavor}${reset}"
index 84da75c..94eac01 100755 (executable)
@@ -25,7 +25,7 @@ parse_network_settings() {
       done
   fi
 
-  if output=$(python3 -B $LIB/python/apex_python_utils.py parse-net-settings -s $NETSETS -td $APEX_TMP_DIR -e $CONFIG/network-environment.yaml $parse_ext); then
+  if output=$(python3 -B $LIB/python/apex_python_utils.py parse-net-settings -s $NETSETS -td $APEX_TMP_DIR -e $BASE/network-environment.yaml $parse_ext); then
       echo -e "${blue}${output}${reset}"
       eval "$output"
   else
@@ -59,6 +59,7 @@ parse_deploy_settings() {
 ##params: none
 ##usage: parse_inventory_file
 parse_inventory_file() {
+  local output
   if [ "$virtual" == "TRUE" ]; then inv_virt="--virtual"; fi
   if [[ "$ha_enabled" == "True" ]]; then inv_ha="--ha"; fi
   instackenv_output=$(python3 -B $LIB/python/apex_python_utils.py parse-inventory -f $INVENTORY_FILE $inv_virt $inv_ha)
@@ -69,5 +70,12 @@ cat > instackenv.json << EOF
 $instackenv_output
 EOF
 EOI
+  if output=$(python3 -B $LIB/python/apex_python_utils.py parse-inventory -f $INVENTORY_FILE $inv_virt $inv_ha --export-bash); then
+    echo -e "${blue}${output}${reset}"
+    eval "$output"
+  else
+    echo -e "${red}ERROR: Failed to parse inventory bash settings file ${INVENTORY_FILE}${reset}"
+    exit 1
+  fi
 
 }
index 102b86f..eab1740 100755 (executable)
@@ -123,7 +123,9 @@ if [ "${deploy_options_array['dataplane']}" == 'fdio' ] || [ "${deploy_options_a
     done
 fi
 
-if [ "${deploy_options_array['congress']}" == 'True' ]; then
+# TODO: Change this back to True once everything is back in
+#       place with tht and puppet-congress for deployment
+if [ "${deploy_options_array['congress']}" == 'NeverTrue' ]; then
     ds_configs="--config username=\$OS_USERNAME
                 --config tenant_name=\$OS_TENANT_NAME
                 --config password=\$OS_PASSWORD
index 741bb4f..3aa28ea 100644 (file)
@@ -27,3 +27,4 @@ COMPUTE_PRE = "OS::TripleO::ComputeExtraConfigPre"
 CONTROLLER_PRE = "OS::TripleO::ControllerExtraConfigPre"
 PRE_CONFIG_DIR = "/usr/share/openstack-tripleo-heat-templates/puppet/" \
                  "extraconfig/pre_deploy/"
+DEFAULT_ROOT_DEV = 'sda'
index d623638..8e6896f 100644 (file)
@@ -21,3 +21,11 @@ def parse_yaml(yaml_file):
     with open(yaml_file) as f:
         parsed_dict = yaml.safe_load(f)
         return parsed_dict
+
+
+def write_str(bash_str, path=None):
+    if path:
+        with open(path, 'w') as file:
+            file.write(bash_str)
+    else:
+        print(bash_str)
index 3583646..3133d7f 100644 (file)
@@ -11,6 +11,8 @@
 import yaml
 import logging
 
+from .common import utils
+
 REQ_DEPLOY_SETTINGS = ['sdn_controller',
                        'odl_version',
                        'sdn_l3',
@@ -82,6 +84,8 @@ class DeploySettings(dict):
             if req_set not in deploy_options:
                 if req_set == 'dataplane':
                     self['deploy_options'][req_set] = 'ovs'
+                elif req_set == 'ceph':
+                    self['deploy_options'][req_set] = True
                 else:
                     self['deploy_options'][req_set] = False
 
@@ -163,12 +167,7 @@ class DeploySettings(dict):
         if 'performance' in self['deploy_options']:
             bash_str += self._dump_performance()
         bash_str += self._dump_deploy_options_array()
-
-        if path:
-            with open(path, 'w') as file:
-                file.write(bash_str)
-        else:
-            print(bash_str)
+        utils.write_str(bash_str, path)
 
 
 class DeploySettingsException(Exception):
index 711eb18..ce16ef4 100644 (file)
 import yaml
 import json
 
+from .common import constants
+from .common import utils
+
 
 class Inventory(dict):
     """
     This class parses an APEX inventory yaml file into an object. It
     generates or detects all missing fields for deployment.
 
-    It then collapses one level of identifcation from the object to
+    It then collapses one level of identification from the object to
     convert it to a structure that can be dumped into a json file formatted
     such that Triple-O can read the resulting json as an instackenv.json file.
     """
     def __init__(self, source, ha=True, virtual=False):
         init_dict = {}
+        self.root_device = constants.DEFAULT_ROOT_DEV
         if isinstance(source, str):
             with open(source, 'r') as inventory_file:
                 yaml_dict = yaml.safe_load(inventory_file)
@@ -40,8 +44,13 @@ class Inventory(dict):
             node['pm_user'] = node['ipmi_user']
             node['mac'] = [node['mac_address']]
 
-            for i in ('ipmi_ip', 'ipmi_pass', 'ipmi_user', 'mac_address'):
-                del i
+            for i in ('ipmi_ip', 'ipmi_pass', 'ipmi_user', 'mac_address',
+                      'disk_device'):
+                if i == 'disk_device' and 'disk_device' in node.keys():
+                    self.root_device = node[i]
+                else:
+                    continue
+                del node[i]
 
             return node
 
@@ -53,7 +62,7 @@ class Inventory(dict):
                                      'nodes for HA baremetal deployment')
         elif len(self['nodes']) < 2:
             raise InventoryException('You must provide at least 2 nodes '
-                                     'for non-HA baremetal deployment${reset}')
+                                     'for non-HA baremetal deployment')
 
         if virtual:
             self['arch'] = 'x86_64'
@@ -67,6 +76,16 @@ class Inventory(dict):
     def dump_instackenv_json(self):
         print(json.dumps(dict(self), sort_keys=True, indent=4))
 
+    def dump_bash(self, path=None):
+        """
+        Prints settings for bash consumption.
+
+        If optional path is provided, bash string will be written to the file
+        instead of stdout.
+        """
+        bash_str = "{}={}\n".format('root_disk_list', str(self.root_device))
+        utils.write_str(bash_str, path)
+
 
 class InventoryException(Exception):
     def __init__(self, value):
index 64065ca..b04f141 100644 (file)
@@ -12,7 +12,7 @@ import logging
 import ipaddress
 
 from copy import copy
-
+from .common import utils
 from . import ip_utils
 from .common.constants import (
     CONTROLLER,
@@ -338,11 +338,7 @@ class NetworkSettings(dict):
         bash_str += flatten('dns_servers', self['dns_servers'], ' ')
         bash_str += flatten('domain_name', self['dns-domain'], ' ')
         bash_str += flatten('ntp_server', self['ntp_servers'][0], ' ')
-        if path:
-            with open(path, 'w') as file:
-                file.write(bash_str)
-        else:
-            print(bash_str)
+        utils.write_str(bash_str, path)
 
     def get_ip_addr_family(self,):
         """
index b0ebb27..e21d046 100755 (executable)
@@ -22,7 +22,6 @@ from apex import NetworkEnvironment
 from apex import DeploySettings
 from apex import Inventory
 from apex import ip_utils
-from apex.common.constants import ADMIN_NETWORK
 
 
 def parse_net_settings(args):
@@ -66,7 +65,10 @@ def run_clean(args):
 
 def parse_inventory(args):
     inventory = Inventory(args.file, ha=args.ha, virtual=args.virtual)
-    inventory.dump_instackenv_json()
+    if args.export_bash is True:
+        inventory.dump_bash()
+    else:
+        inventory.dump_instackenv_json()
 
 
 def find_ip(args):
@@ -200,6 +202,11 @@ def get_parser():
                            default=False,
                            action='store_true',
                            help='Indicate if deployment inventory is virtual')
+    inventory.add_argument('--export-bash',
+                           default=False,
+                           dest='export_bash',
+                           action='store_true',
+                           help='Export bash variables from inventory')
     inventory.set_defaults(func=parse_inventory)
 
     clean = subparsers.add_parser('clean',
index 6f7addb..080fcbb 100755 (executable)
@@ -19,7 +19,7 @@ function setup_undercloud_vm {
       define_vm undercloud hd 30 "$undercloud_nets" 4 12288
 
       ### this doesn't work for some reason I was getting hangup events so using cp instead
-      #virsh vol-upload --pool default --vol undercloud.qcow2 --file $CONFIG/stack/undercloud.qcow2
+      #virsh vol-upload --pool default --vol undercloud.qcow2 --file $BASE/stack/undercloud.qcow2
       #2015-12-05 12:57:20.569+0000: 8755: info : libvirt version: 1.2.8, package: 16.el7_1.5 (CentOS BuildSystem <http://bugs.centos.org>, 2015-11-03-13:56:46, worker1.bsys.centos.org)
       #2015-12-05 12:57:20.569+0000: 8755: warning : virKeepAliveTimerInternal:143 : No response from client 0x7ff1e231e630 after 6 keepalive messages in 35 seconds
       #2015-12-05 12:57:20.569+0000: 8756: warning : virKeepAliveTimerInternal:143 : No response from client 0x7ff1e231e630 after 6 keepalive messages in 35 seconds
@@ -28,14 +28,14 @@ function setup_undercloud_vm {
       #error: Reconnected to the hypervisor
 
       local undercloud_dst=/var/lib/libvirt/images/undercloud.qcow2
-      cp -f $RESOURCES/undercloud.qcow2 $undercloud_dst
+      cp -f $IMAGES/undercloud.qcow2 $undercloud_dst
 
       # resize Undercloud machine
       echo "Checking if Undercloud needs to be resized..."
       undercloud_size=$(LIBGUESTFS_BACKEND=direct virt-filesystems --long -h --all -a $undercloud_dst |grep device | grep -Eo "[0-9\.]+G" | sed -n 's/\([0-9][0-9]*\).*/\1/p')
       if [ "$undercloud_size" -lt 30 ]; then
         qemu-img resize /var/lib/libvirt/images/undercloud.qcow2 +25G
-        LIBGUESTFS_BACKEND=direct virt-resize --expand /dev/sda1 $RESOURCES/undercloud.qcow2 $undercloud_dst
+        LIBGUESTFS_BACKEND=direct virt-resize --expand /dev/sda1 $IMAGES/undercloud.qcow2 $undercloud_dst
         LIBGUESTFS_BACKEND=direct virt-customize -a $undercloud_dst --run-command 'xfs_growfs -d /dev/sda1 || true'
         new_size=$(LIBGUESTFS_BACKEND=direct virt-filesystems --long -h --all -a $undercloud_dst |grep filesystem | grep -Eo "[0-9\.]+G" | sed -n 's/\([0-9][0-9]*\).*/\1/p')
         if [ "$new_size" -lt 30 ]; then
@@ -136,12 +136,12 @@ function configure_undercloud {
     ovs_dpdk_bridge=''
   fi
 
-  if ! controller_nic_template=$(python3 -B $LIB/python/apex_python_utils.py nic-template -r controller -s $NETSETS -t $CONFIG/nics-template.yaml.jinja2 -e "br-ex"); then
+  if ! controller_nic_template=$(python3 -B $LIB/python/apex_python_utils.py nic-template -r controller -s $NETSETS -t $BASE/nics-template.yaml.jinja2 -e "br-ex"); then
     echo -e "${red}ERROR: Failed to generate controller NIC heat template ${reset}"
     exit 1
   fi
 
-  if ! compute_nic_template=$(python3 -B $LIB/python/apex_python_utils.py nic-template -r compute -s $NETSETS -t $CONFIG/nics-template.yaml.jinja2 -e $ext_net_type -d "$ovs_dpdk_bridge"); then
+  if ! compute_nic_template=$(python3 -B $LIB/python/apex_python_utils.py nic-template -r compute -s $NETSETS -t $BASE/nics-template.yaml.jinja2 -e $ext_net_type -d "$ovs_dpdk_bridge"); then
     echo -e "${red}ERROR: Failed to generate compute NIC heat template ${reset}"
     exit 1
   fi
index 5c28b46..c12619a 100644 (file)
@@ -80,26 +80,6 @@ function opendaylight_connect {
 ##outputs heat stack deployment failures
 ##params: none
 function debug_stack {
-  local failure_output
-  local phys_id
-  declare -a resource_arr
-  declare -a phys_id_arr
-
   source ~/stackrc
-
-  IFS=$'\n'
-  for resource in $(openstack stack resource list -n 5 overcloud | grep FAILED); do
-    unset IFS
-    resource_arr=(${resource//|/ })
-    phys_id=$(openstack stack resource show ${resource_arr[-1]} ${resource_arr[0]} | grep physical_resource_id 2> /dev/null)
-    if [ -n "$phys_id" ]; then
-      phys_id_arr=(${phys_id//|/ })
-      failure_output+="******************************************************"
-      failure_output+="\n${resource}:\n\n$(openstack stack deployment show ${phys_id_arr[-1]} 2> /dev/null)"
-      failure_output+="\n******************************************************"
-    fi
-    unset phys_id
-  done
-
-  echo -e $failure_output
+  openstack stack failures list overcloud --long
 }
index 2af187b..a0af121 100644 (file)
@@ -22,7 +22,8 @@ deploy_files = ('deploy_settings.yaml',
                 'os-nosdn-nofeature-noha.yaml',
                 'os-nosdn-ovs-noha.yaml',
                 'os-ocl-nofeature-ha.yaml',
-                'os-odl_l2-sdnvpn-ha.yaml',
+                'os-odl_l2-bgpvpn-ha.yaml',
+                'os-odl_l2-bgpvpn-noha.yaml',
                 'os-odl_l3-nofeature-ha.yaml',
                 'os-nosdn-nofeature-ha.yaml',
                 'os-nosdn-ovs-ha.yaml',
index 08a3415..ec75856 100644 (file)
@@ -7,12 +7,16 @@
 # http://www.apache.org/licenses/LICENSE-2.0
 ##############################################################################
 
+import sys
+
 from apex.inventory import Inventory
 from apex.inventory import InventoryException
 
 from nose.tools import assert_is_instance
 from nose.tools import assert_raises
 from nose.tools import assert_equal
+from nose.tools import assert_regexp_matches
+from io import StringIO
 
 inventory_files = ('intel_pod2_settings.yaml',
                    'nokia_pod1_settings.yaml',
@@ -59,3 +63,19 @@ class TestInventory(object):
         e = InventoryException("test")
         print(e)
         assert_is_instance(e, InventoryException)
+
+    def test_dump_bash_default(self):
+        i = Inventory('../config/inventory/intel_pod2_settings.yaml')
+        out = StringIO()
+        sys.stdout = out
+        i.dump_bash()
+        output = out.getvalue().strip()
+        assert_regexp_matches(output, 'root_disk_list=sda')
+
+    def test_dump_bash_set_root_device(self):
+        i = Inventory('../config/inventory/pod_example_settings.yaml')
+        out = StringIO()
+        sys.stdout = out
+        i.dump_bash()
+        output = out.getvalue().strip()
+        assert_regexp_matches(output, 'root_disk_list=sdb')