Merge "Changes to Installation Procedure docs"
authorTim Rozet <trozet@redhat.com>
Wed, 25 Jan 2017 17:01:52 +0000 (17:01 +0000)
committerGerrit Code Review <gerrit@opnfv.org>
Wed, 25 Jan 2017 17:01:52 +0000 (17:01 +0000)
62 files changed:
.gitignore
build/Makefile
build/build_ovs_nsh.sh [deleted file]
build/cache.sh
build/csit-environment.yaml [new file with mode: 0644]
build/instackenv.json.example [deleted file]
build/mvn_settings.xml [deleted file]
build/network-environment.yaml
build/nics-template.yaml.jinja2
build/opnfv-environment.yaml
build/opnfv-puppet-tripleo.patch [deleted file]
build/overcloud-full.sh
build/overcloud-onos.sh
build/overcloud-opendaylight.sh
build/rpm_specs/networking-vpp.spec
build/rpm_specs/openstack-congress.spec
build/rpm_specs/openstack-tacker.spec
build/rpm_specs/opnfv-apex-common.spec
build/rpm_specs/opnfv-apex-onos.spec
build/rpm_specs/opnfv-apex-release.spec [new file with mode: 0644]
build/rpm_specs/opnfv-apex.spec
build/rpm_specs/python-tackerclient.spec
build/undercloud.sh
build/variables.sh
ci/build.sh
ci/clean.sh
ci/deploy.sh
ci/dev_dep_check.sh
ci/run_smoke_tests.sh
ci/util.sh
config/deploy/deploy_settings.yaml
config/deploy/os-odl_l2-bgpvpn-noha.yaml [moved from config/deploy/os-odl_l2-sdnvpn-ha.yaml with 77% similarity]
config/deploy/os-odl_l2-fdio-ha.yaml
config/deploy/os-odl_l2-fdio-noha.yaml
config/deploy/os-odl_l3-csit-noha.yaml [new file with mode: 0644]
config/inventory/pod_example_settings.yaml
config/network/network_settings.yaml
config/network/network_settings_v6.yaml
config/network/network_settings_vlans.yaml
config/yum.repos.d/opnfv-apex.repo [new file with mode: 0644]
contrib/simple_deploy.sh [new file with mode: 0644]
docs/installationprocedure/architecture.rst
docs/installationprocedure/baremetal.rst
docs/installationprocedure/requirements.rst
docs/releasenotes/release-notes.rst
lib/common-functions.sh
lib/configure-deps-functions.sh
lib/overcloud-deploy-functions.sh
lib/parse-functions.sh
lib/post-install-functions.sh
lib/python/apex/common/constants.py
lib/python/apex/common/utils.py
lib/python/apex/deploy_settings.py
lib/python/apex/inventory.py
lib/python/apex/network_environment.py
lib/python/apex/network_settings.py
lib/python/apex_python_utils.py
lib/undercloud-functions.sh
lib/utility-functions.sh
tests/smoke_tests/execute_smoke_tests.sh
tests/test_apex_deploy_settings.py
tests/test_apex_inventory.py

index f6ef061..89f2a28 100644 (file)
@@ -5,3 +5,5 @@
 /docs_build/
 /docs_output/
 /releng/
+.build/
+.cache/
index c693e89..af84ea8 100644 (file)
@@ -9,56 +9,98 @@
 
 export USE_MASTER = ""
 export CENTDNLD = http://mirrors.cat.pdx.edu/centos/7.2.1511/isos/x86_64/CentOS-7-x86_64-DVD-1511.iso
-export CENTISO = $(shell pwd)/$(shell basename $(CENTDNLD))
 export RELEASE = "0"
-export ISO = $(shell pwd)/release/OPNFV-CentOS-7-x86_64-${RELEASE}.iso
+
+export RPM_DIST = $(shell rpm -E %dist)
+
+export TACKER_REPO := $(shell awk -F\= '/^tacker_repo/ {print $$2}' variables.sh )
+export TACKER_BRANCH := $(shell awk -F\= '/^tacker_branch/ {print $$2}' variables.sh )
+export TACKER_COMMIT := $(shell git ls-remote $(TACKER_REPO) $(TACKER_BRANCH) | awk '{print substr($$1,1,7)}')
+
+export TACKERCLIENT_REPO := $(shell awk -F\= '/^tackerclient_repo/ {print $$2}' variables.sh )
+export TACKERCLIENT_BRANCH := $(shell awk -F\= '/^tackerclient_branch/ {print $$2}' variables.sh )
+export TACKERCLIENT_COMMIT := $(shell git ls-remote $(TACKERCLIENT_REPO) $(TACKERCLIENT_BRANCH) | awk '{print substr($$1,1,7)}')
+
+export CONGRESS_REPO := $(shell awk -F\= '/^congress_repo/ {print $$2}' variables.sh )
+export CONGRESS_BRANCH := $(shell awk -F\= '/^congress_branch/ {print $$2}' variables.sh )
+export CONGRESS_COMMIT := $(shell git ls-remote $(CONGRESS_REPO) $(CONGRESS_BRANCH) | awk '{print substr($$1,1,7)}')
+
+export NETVPP_VERS := $(shell grep Version $(shell pwd)/rpm_specs/networking-vpp.spec | head -n 1 | awk '{ print $$2 }')
+export NETVPP_REPO := $(shell awk -F\= '/^netvpp_repo/ {print $$2}' variables.sh )
+export NETVPP_BRANCH := $(shell awk -F\= '/^netvpp_branch/ {print $$2}' variables.sh )
+export NETVPP_COMMIT := $(shell git ls-remote $(NETVPP_REPO) $(NETVPP_BRANCH) | awk '{print substr($$1,1,7)}')
+
+export RELRPMVERS = $(shell grep Version $(shell pwd)/rpm_specs/opnfv-apex-release.spec | head -n 1 | awk '{ print $$2 }')
 export RPMVERS = $(shell grep Version $(shell pwd)/rpm_specs/opnfv-apex.spec | head -n 1 | awk '{ print $$2 }')
-export RPMCOM = $(shell pwd)/noarch/opnfv-apex-common-$(RPMVERS)-$(shell echo ${RELEASE} | tr -d '_-').noarch.rpm
-export RPMUDR = $(shell pwd)/noarch/opnfv-apex-undercloud-$(RPMVERS)-$(shell echo ${RELEASE} | tr -d '_-').noarch.rpm
-export RPMODL = $(shell pwd)/noarch/opnfv-apex-$(RPMVERS)-$(shell echo ${RELEASE} | tr -d '_-').noarch.rpm
-export RPMONO = $(shell pwd)/noarch/opnfv-apex-onos-$(RPMVERS)-$(shell echo ${RELEASE} | tr -d '_-').noarch.rpm
 
+export BUILD_ROOT = $(shell pwd)
+export BUILD_DIR = $(shell dirname $$(pwd))/.build
+export CACHE_DIR = $(shell dirname $$(pwd))/.cache
+export RPM_DIR_ARGS = -D '_topdir $(BUILD_DIR)' -D '_builddir $(BUILD_DIR)' -D '_sourcedir $(BUILD_DIR)' -D '_rpmdir $(BUILD_DIR)' -D '_specdir $(BUILD_DIR)' -D '_srcrpmdir $(BUILD_DIR)'
+
+export RPMREL = $(BUILD_DIR)/noarch/opnfv-apex-release-$(RPMVERS)-$(shell echo ${RELEASE} | tr -d '_-').noarch.rpm
+export RPMCOM = $(BUILD_DIR)/noarch/opnfv-apex-common-$(RPMVERS)-$(shell echo ${RELEASE} | tr -d '_-').noarch.rpm
+export RPMUDR = $(BUILD_DIR)/noarch/opnfv-apex-undercloud-$(RPMVERS)-$(shell echo ${RELEASE} | tr -d '_-').noarch.rpm
+export RPMODL = $(BUILD_DIR)/noarch/opnfv-apex-$(RPMVERS)-$(shell echo ${RELEASE} | tr -d '_-').noarch.rpm
+export RPMONO = $(BUILD_DIR)/noarch/opnfv-apex-onos-$(RPMVERS)-$(shell echo ${RELEASE} | tr -d '_-').noarch.rpm
+export ISO = $(BUILD_DIR)/release/OPNFV-CentOS-7-x86_64-${RELEASE}.iso
+export CENTISO = $(BUILD_DIR)/$(shell basename $(CENTDNLD))
 
 .PHONY: all
 all: iso
 
 .PHONY: clean
-clean: images-clean rpms-clean iso-clean tacker-clean tackerclient-clean congress-clean vpp-clean
+clean:
+       rm -rf $(BUILD_DIR)
+
+.PHONY: clean-cache
+clean-cache:
+       rm -rf $(CACHE_DIR)
 
 .PHONY: images
 images: undercloud overcloud-full overcloud-opendaylight overcloud-onos
 
-.PHONY: images-clean
-images-clean: undercloud-clean overcloud-full-clean overcloud-opendaylight-clean overcloud-onos-clean
-       rm -rf images/
-
 .PHONY: rpms
 rpms: common-rpm undercloud-rpm opendaylight-rpm onos-rpm
 
 .PHONY: rpms-check
-rpms-check: common-rpm-check undercloud-rpm-check opendaylight-rpm-check onos-rpm-check
+rpms-check: release-rpm-check common-rpm-check undercloud-rpm-check opendaylight-rpm-check onos-rpm-check
 
 .PHONY: rpms-clean
-rpms-clean: common-rpm-clean undercloud-rpm-clean opendaylight-rpm-clean onos-rpm-clean
-       rm -rf noarch
+rpms-clean:
+       rm -rf $(BUILD_DIR)/noarch
+       rm -rf $(BUILD_DIR)/BUILDROOT
+
+$(BUILD_DIR)/opnfv-apex-release.tar.gz:
+       mkdir -p $(BUILD_DIR)
+       pushd ../ && git archive --format=tar.gz --prefix=opnfv-apex-release-$(RELRPMVERS)/ HEAD > $(BUILD_DIR)/opnfv-apex-release.tar.gz
+
+.PHONY: release-rpm-check
+release-rpm-check: $(BUILD_DIR)/opnfv-apex-release.tar.gz
+       rpmbuild --clean -bi -bl rpm_specs/opnfv-apex-release.spec $(RPM_DIR_ARGS) -D "release $(shell echo $(RELEASE) | tr -d '_-')"
+
+.PHONY: release-rpm
+release-rpm: $(BUILD_DIR)/opnfv-apex-release.tar.gz $(RPMREL)
 
-opnfv-apex-common.tar.gz:
-       pushd ../ && git archive --format=tar.gz --prefix=opnfv-apex-common-$(RPMVERS)/ HEAD > build/opnfv-apex-common.tar.gz
+$(RPMREL):
+       @echo "Building the Apex Release RPM"
+       # build the release RPM
+       rpmbuild --clean -ba rpm_specs/opnfv-apex-release.spec $(RPM_DIR_ARGS) -D "release $(shell echo $(RELEASE) | tr -d '_-')"
+
+$(BUILD_DIR)/opnfv-apex-common.tar.gz:
+       pushd ../ && git archive --format=tar.gz --prefix=opnfv-apex-common-$(RPMVERS)/ HEAD > $(BUILD_DIR)/opnfv-apex-common.tar.gz
 
 .PHONY: common-rpm-check
-common-rpm-check: opnfv-apex-common.tar.gz
-       rpmbuild --clean -bi -bl rpm_specs/opnfv-apex-common.spec -D '_topdir %(echo `pwd`)' -D '_builddir %(echo `pwd`)' -D '_sourcedir %(echo `pwd`)' -D '_rpmdir %(echo `pwd`)' -D '_specdir %(echo `pwd`)' -D '_srcrpmdir %(echo `pwd`)' -D "release $(shell echo $(RELEASE) | tr -d '_-')"
+common-rpm-check: $(BUILD_DIR)/opnfv-apex-common.tar.gz
+       rpmbuild --clean -bi -bl rpm_specs/opnfv-apex-common.spec $(RPM_DIR_ARGS) -D "release $(shell echo $(RELEASE) | tr -d '_-')"
 
 .PHONY: common-rpm
-common-rpm: opnfv-apex-common.tar.gz $(RPMCOM)
+common-rpm: $(BUILD_DIR)/opnfv-apex-common.tar.gz $(RPMCOM)
 
 $(RPMCOM):
        @echo "Building the Apex Common RPM"
        # build the common RPM
-       rpmbuild --clean -ba rpm_specs/opnfv-apex-common.spec -D '_topdir %(echo `pwd`)' -D '_builddir %(echo `pwd`)' -D '_sourcedir %(echo `pwd`)' -D '_rpmdir %(echo `pwd`)' -D '_specdir %(echo `pwd`)' -D '_srcrpmdir %(echo `pwd`)' -D "release $(shell echo $(RELEASE) | tr -d '_-')"
-
-.PHONY: common-rpm-clean
-common-rpm-clean:
+       rpmbuild --clean -ba rpm_specs/opnfv-apex-common.spec $(RPM_DIR_ARGS) -D "release $(shell echo $(RELEASE) | tr -d '_-')"
 
 ##################
 #  PYTHON TESTS  #
@@ -88,178 +130,109 @@ python-pep8-check:
 #  TACKER     #
 ###############
 
-.PHONY: tacker-clean
-
-tacker-clean:
-       rm -rf openstack-tacker-2015.2
-       rm -f openstack-tacker.tar.gz
-
-openstack-tacker.tar.gz:
+$(BUILD_DIR)/openstack-tacker.tar.gz:
        @echo "Preparing the Tacker RPM prerequisites"
-       git clone http://github.com/trozet/tacker -b SFC_colorado openstack-tacker-2015.2
-       tar czf openstack-tacker.tar.gz openstack-tacker-2015.2
+       git clone $(TACKER_REPO) -b $(TACKER_BRANCH) $(BUILD_DIR)/openstack-tacker-2016.2
+       tar czf $(BUILD_DIR)/openstack-tacker.tar.gz -C $(BUILD_DIR) openstack-tacker-2016.2
 
 .PHONY: tacker-rpm
-tacker-rpm: openstack-tacker.tar.gz openstack-tacker-2015.2-1.trozet.noarch.rpm
+tacker-rpm: $(BUILD_DIR)/openstack-tacker.tar.gz $(BUILD_DIR)/noarch/openstack-tacker-2016.2-1.git$(TACKER_COMMIT).noarch.rpm
 
-openstack-tacker-2015.2-1.trozet.noarch.rpm:
+$(BUILD_DIR)/noarch/openstack-tacker-2016.2-1.git$(TACKER_COMMIT).noarch.rpm:
        @echo "Building the Tacker RPM"
-       rpmbuild --clean -bb --target noarch rpm_specs/openstack-tacker.spec -D '_topdir %(echo `pwd`)' -D '_builddir %(echo `pwd`)' -D '_sourcedir %(echo `pwd`)' -D '_rpmdir %(echo `pwd`)' -D '_specdir %(echo `pwd`)' -D '_srcrpmdir %(echo `pwd`)'
+       rpmbuild --clean -ba --target noarch rpm_specs/openstack-tacker.spec $(RPM_DIR_ARGS) -D 'git .git$(TACKER_COMMIT)'
 
 #################
 #  TACKERCLIENT #
 #################
 
-.PHONY: tackerclient-clean
-tackerclient-clean:
-       rm -rf python-tackerclient-2015.2
-       rm -f python-tackerclient.tar.gz
-
-python-tackerclient.tar.gz:
+$(BUILD_DIR)/python-tackerclient.tar.gz:
        @echo "Preparing the TackerClient RPM prerequisites"
-       git clone http://github.com/trozet/python-tackerclient -b SFC_refactor python-tackerclient-2015.2
-       tar czf python-tackerclient.tar.gz python-tackerclient-2015.2
+       git clone $(TACKERCLIENT_REPO) -b $(TACKERCLIENT_BRANCH) $(BUILD_DIR)/python-tackerclient-2016.2
+       tar czf $(BUILD_DIR)/python-tackerclient.tar.gz -C $(BUILD_DIR) python-tackerclient-2016.2
 
 .PHONY: tackerclient-rpm
-tackerclient-rpm: python-tackerclient.tar.gz python-tackerclient-2015.2-1.trozet.noarch.rpm
+tackerclient-rpm: $(BUILD_DIR)/python-tackerclient.tar.gz $(BUILD_DIR)/noarch/python-tackerclient-2016.2-1.git$(TACKERCLIENT_COMMIT).noarch.rpm
 
-python-tackerclient-2015.2-1.trozet.noarch.rpm:
+$(BUILD_DIR)/noarch/python-tackerclient-2016.2-1.git$(TACKERCLIENT_COMMIT).noarch.rpm:
        @echo "Building the TackerClient RPM"
-       rpmbuild --clean -bb --target noarch rpm_specs/python-tackerclient.spec -D '_topdir %(echo `pwd`)' -D '_builddir %(echo `pwd`)' -D '_sourcedir %(echo `pwd`)' -D '_rpmdir %(echo `pwd`)' -D '_specdir %(echo `pwd`)' -D '_srcrpmdir %(echo `pwd`)'
+       rpmbuild --clean -ba --target noarch rpm_specs/python-tackerclient.spec $(RPM_DIR_ARGS) -D 'git .git$(TACKERCLIENT_COMMIT)'
 
 ###############
 #  CONGRESS   #
 ###############
 
 .PHONY: congress-clean
-
 congress-clean:
-       @rm -rf openstack-congress-2016.1
-       @rm -f openstack-congress.tar.gz
+       @rm -rf $(BUILD_DIR)/openstack-congress-2016.2
+       @rm -f $(BUILD_DIR)/openstack-congress.tar.gz
 
-openstack-congress.tar.gz:
+$(BUILD_DIR)/openstack-congress.tar.gz:
        @echo "Preparing the Congress RPM prerequisites"
-       git clone http://github.com/openstack/congress -b stable/mitaka openstack-congress-2016.1
-       cd openstack-congress-2016.1 && curl -O https://radez.fedorapeople.org/openstack-congress.service
-       tar czf openstack-congress.tar.gz openstack-congress-2016.1
+       git clone $(CONGRESS_REPO) -b $(CONGRESS_BRANCH) $(BUILD_DIR)/openstack-congress-2016.2
+       cd $(BUILD_DIR)/openstack-congress-2016.2 && curl -O https://radez.fedorapeople.org/openstack-congress.service
+       tar czf $(BUILD_DIR)/openstack-congress.tar.gz -C $(BUILD_DIR) openstack-congress-2016.2
 
 .PHONY: congress-rpm
-congress-rpm: openstack-congress.tar.gz openstack-congress-2016.1-1.noarch.rpm
+congress-rpm: $(BUILD_DIR)/noarch/openstack-congress-2016.2-1.git$(CONGRESS_COMMIT).noarch.rpm
 
-openstack-congress-2016.1-1.noarch.rpm:
+$(BUILD_DIR)/noarch/openstack-congress-2016.2-1.git$(CONGRESS_COMMIT).noarch.rpm: $(BUILD_DIR)/openstack-congress.tar.gz
        @echo "Building the Congress RPM"
-       rpmbuild --clean -bb --target noarch rpm_specs/openstack-congress.spec -D '_topdir %(echo `pwd`)' -D '_builddir %(echo `pwd`)' -D '_sourcedir %(echo `pwd`)' -D '_rpmdir %(echo `pwd`)' -D '_specdir %(echo `pwd`)' -D '_srcrpmdir %(echo `pwd`)'
+       rpmbuild --clean -ba --target noarch rpm_specs/openstack-congress.spec $(RPM_DIR_ARGS) -D 'git .git$(CONGRESS_COMMIT)'
 
-###############
-#     VPP     #
-###############
+##################
+# NETWORKING-VPP #
+##################
 
-VPP_DIR = vpp
-MAVEN = apache-maven-3.3.9
-MAVEN_FILE = $(MAVEN)-bin.tar.gz
-
-.PHONY: vpp-clean
-vpp-clean:
-       @rm -f vpp-bin.tar.gz
-       @rm -rf vpp-bin vpp
-       @rm -rf honeycomb $(MAVEN) $(MAVEN_FILE) honeycomb-1.0.0-99.noarch.rpm
-       @rm -rf networking-vpp networking-vpp.noarch.rpm
-
-.PHONY: vpp-build
-vpp-build: vpp-bin.tar.gz
-
-vpp-bin.tar.gz: vpp honeycomb $(MAVEN)
-       $(MAKE) -C $(VPP_DIR) UNATTENDED=yes install-dep bootstrap build pkg-rpm
-       $(MAKE) -C $(VPP_DIR)/build-root PLATFORM=vpp TAG=vpp_debug vpp-api-install
-       pushd vpp/vpp-api/python && python setup.py bdist
-       pushd vpp/build-root/build-vpp-native/vpp-api/java/ && ../../../../../$(MAVEN)/bin/mvn install:install-file -Dfile=jvpp-registry-16.12.jar -DgroupId=io.fd.vpp -DartifactId=jvpp-registry -Dversion=16.12-SNAPSHOT -Dpackaging=jar
-       pushd vpp/build-root/build-vpp-native/vpp-api/java/ && ../../../../../$(MAVEN)/bin/mvn install:install-file -Dfile=jvpp-core-16.12.jar -DgroupId=io.fd.vpp -DartifactId=jvpp-core -Dversion=16.12-SNAPSHOT -Dpackaging=jar
-       pushd honeycomb && ../$(MAVEN)/bin/mvn clean install -DskipTests
-       pushd honeycomb/packaging/rpm/ && BUILD_NUMBER=99 ./rpmbuild.sh
-       mkdir vpp-bin
-       mv vpp/build-root/*.rpm vpp-bin/
-       mv honeycomb/packaging/rpm/RPMS/noarch/*.rpm .
-       mv vpp/vpp-api/python/dist/*.tar.gz vpp-bin/
-       tar czf vpp-bin.tar.gz vpp-bin
-
-vpp:
-       git clone https://gerrit.fd.io/r/vpp
-
-honeycomb:
-       git clone https://gerrit.fd.io/r/p/honeycomb.git
-
-$(MAVEN): $(MAVEN_FILE)
-       tar zxvf apache-maven-3.3.9-bin.tar.gz
-       cp mvn_settings.xml $(MAVEN)/conf/settings.xml
-
-$(MAVEN_FILE):
-       wget http://www.webhostingjams.com/mirror/apache/maven/maven-3/3.3.9/binaries/apache-maven-3.3.9-bin.tar.gz
+$(BUILD_DIR)/python-networking-vpp.tar.gz:
+       @echo "Preparing the networking-vpp RPM prerequisites"
+       git clone $(NETVPP_REPO) $(BUILD_DIR)/python-networking-vpp-$(NETVPP_VERS)
+       tar czf $(BUILD_DIR)/python-networking-vpp.tar.gz -C $(BUILD_DIR) python-networking-vpp-$(NETVPP_VERS)
 
 .PHONY: networking-vpp-rpm
-networking-vpp-rpm: networking-vpp.noarch.rpm
-
-networking-vpp.noarch.rpm: networking-vpp
-       pushd networking-vpp && rpmbuild --clean -bb ../rpm_specs/networking-vpp.spec
-       mv networking-vpp/build/rpm/noarch/*.rpm networking-vpp.noarch.rpm
+networking-vpp-rpm: $(BUILD_DIR)/noarch/python-networking-vpp-$(NETVPP_VERS)-1.git$(NETVPP_COMMIT)$(RPM_DIST).noarch.rpm
 
-networking-vpp:
-       git clone https://git.openstack.org/openstack/networking-vpp
+$(BUILD_DIR)/noarch/python-networking-vpp-$(NETVPP_VERS)-1.git$(NETVPP_COMMIT)$(RPM_DIST).noarch.rpm: $(BUILD_DIR)/python-networking-vpp.tar.gz
+       @echo "Building the Networking VPP RPM"
+       rpmbuild --clean -ba --target noarch rpm_specs/networking-vpp.spec $(RPM_DIR_ARGS) -D 'git .git$(NETVPP_COMMIT)'
 
 ###############
 #  UNDERCLOUD #
 ###############
 
-.PHONY: undercloud-clean
-undercloud-clean:
-       rm -f images/undercloud.*
-       rm -rf opnfv-tht.tar.gz
-       rm -rf opnfv-tht/
-
 .PHONY: undercloud
-undercloud: images/undercloud.qcow2
+undercloud: $(BUILD_DIR)/undercloud.qcow2
 
-images/undercloud.qcow2: tackerclient-rpm
+$(BUILD_DIR)/undercloud.qcow2: tackerclient-rpm
        @echo "Building the Apex Undercloud Image"
        @./undercloud.sh
 
-opnfv-apex-undercloud.tar.gz: images/undercloud.qcow2
+$(BUILD_DIR)/opnfv-apex-undercloud.tar.gz: $(BUILD_DIR)/undercloud.qcow2
        @echo "Preparing the Apex Undercloud RPM prerequisites"
-       pushd ../ && git archive --format=tar --prefix=opnfv-apex-undercloud-$(RPMVERS)/ HEAD > build/opnfv-apex-undercloud.tar
-       tar -rf opnfv-apex-undercloud.tar \
-               --xform="s:images/undercloud.qcow2:opnfv-apex-undercloud-$(RPMVERS)/build/undercloud.qcow2:" images/undercloud.qcow2
-       gzip -f opnfv-apex-undercloud.tar
+       pushd ../ && git archive --format=tar --prefix=opnfv-apex-undercloud-$(RPMVERS)/ HEAD > $(BUILD_DIR)/opnfv-apex-undercloud.tar
+       tar -rf $(BUILD_DIR)/opnfv-apex-undercloud.tar \
+               --xform="s:.*undercloud.qcow2:opnfv-apex-undercloud-$(RPMVERS)/build/undercloud.qcow2:" $(BUILD_DIR)/undercloud.qcow2
+       gzip -f $(BUILD_DIR)/opnfv-apex-undercloud.tar
 
 .PHONY: undercloud-rpm-check
-undercloud-rpm-check: opnfv-apex-undercloud.tar.gz
-       rpmbuild --clean -bi -bl rpm_specs/opnfv-apex-undercloud.spec -D '_topdir %(echo `pwd`)' -D '_builddir %(echo `pwd`)' -D '_sourcedir %(echo `pwd`)' -D '_rpmdir %(echo `pwd`)' -D '_specdir %(echo `pwd`)' -D '_srcrpmdir %(echo `pwd`)' -D "release $(shell echo $(RELEASE) | tr -d '_-')"
+undercloud-rpm-check: $(BUILD_DIR)/opnfv-apex-undercloud.tar.gz
+       rpmbuild --clean -bi -bl rpm_specs/opnfv-apex-undercloud.spec $(RPM_DIR_ARGS) -D "release $(shell echo $(RELEASE) | tr -d '_-')"
 
 .PHONY: undercloud-rpm
-undercloud-rpm: opnfv-apex-undercloud.tar.gz $(RPMUDR)
+undercloud-rpm: $(BUILD_DIR)/opnfv-apex-undercloud.tar.gz $(RPMUDR)
 
 $(RPMUDR):
        @echo "Building the Apex Undercloud RPM"
-       rpmbuild --clean -ba rpm_specs/opnfv-apex-undercloud.spec -D '_topdir %(echo `pwd`)' -D '_builddir %(echo `pwd`)' -D '_sourcedir %(echo `pwd`)' -D '_rpmdir %(echo `pwd`)' -D '_specdir %(echo `pwd`)' -D '_srcrpmdir %(echo `pwd`)' -D "release $(shell echo $(RELEASE) | tr -d '_-')"
-
-.PHONY: undercloud-rpm-clean
-undercloud-rpm-clean:
+       rpmbuild --clean -ba rpm_specs/opnfv-apex-undercloud.spec $(RPM_DIR_ARGS) -D "release $(shell echo $(RELEASE) | tr -d '_-')"
 
 ###############
 #  OVERCLOUD  #
 ###############
 
-.PHONY: overcloud-full-clean
-overcloud-full-clean:
-       rm -rf images/overcloud-full.d
-       rm -f images/overcloud-full.*
-       rm -rf opnfv-puppet-tripleo.tar.gz
-       rm -rf opnfv-puppet-tripleo/
-       rm -rf os-net-config.tar.gz
-       rm -rf os-net-config/
-
 .PHONY: overcloud-full
-overcloud-full: images/overcloud-full.qcow2
+overcloud-full: $(BUILD_DIR)/overcloud-full.qcow2
 
-images/overcloud-full.qcow2: congress-rpm networking-vpp-rpm
+$(BUILD_DIR)/overcloud-full.qcow2: congress-rpm tacker-rpm networking-vpp-rpm
        @echo "Building the Apex Base Overcloud Image"
        @./overcloud-full.sh
 
@@ -267,71 +240,53 @@ images/overcloud-full.qcow2: congress-rpm networking-vpp-rpm
 #    ODL      #
 ###############
 
-.PHONY: overcloud-opendaylight-clean
-overcloud-opendaylight-clean:
-       @rm -f images/overcloud-full-opendaylight.qcow2
-
 .PHONY: overcloud-opendaylight
-overcloud-opendaylight: images/overcloud-full-opendaylight.qcow2
+overcloud-opendaylight: $(BUILD_DIR)/overcloud-full-opendaylight.qcow2
 
-images/overcloud-full-opendaylight.qcow2: images/overcloud-full.qcow2
+$(BUILD_DIR)/overcloud-full-opendaylight.qcow2: $(BUILD_DIR)/overcloud-full.qcow2
        @echo "Building the Apex OpenDaylight Overcloud Image"
        @./overcloud-opendaylight.sh
 
-opnfv-apex.tar.gz: images/overcloud-full-opendaylight.qcow2
-       tar -czf opnfv-apex.tar.gz --xform="s:images/overcloud-full-opendaylight.qcow2:opnfv-apex-$(RPMVERS)/build/images/overcloud-full-opendaylight.qcow2:" images/overcloud-full-opendaylight.qcow2
+$(BUILD_DIR)/opnfv-apex.tar.gz: $(BUILD_DIR)/overcloud-full-opendaylight.qcow2
+       tar -czf $(BUILD_DIR)/opnfv-apex.tar.gz --xform="s:.*overcloud-full-opendaylight.qcow2:opnfv-apex-$(RPMVERS)/build/overcloud-full-opendaylight.qcow2:" $(BUILD_DIR)/overcloud-full-opendaylight.qcow2
 
 .PHONY: opendaylight-rpm-check
-opendaylight-rpm-check: opnfv-apex.tar.gz
-       rpmbuild --clean -bi -bl rpm_specs/opnfv-apex.spec -D '_topdir %(echo `pwd`)' -D '_builddir %(echo `pwd`)' -D '_sourcedir %(echo `pwd`)' -D '_rpmdir %(echo `pwd`)' -D '_specdir %(echo `pwd`)' -D '_srcrpmdir %(echo `pwd`)' -D "release $(shell echo $(RELEASE) | tr -d '_-')"
+opendaylight-rpm-check: $(BUILD_DIR)/opnfv-apex.tar.gz
+       rpmbuild --clean -bi -bl rpm_specs/opnfv-apex.spec $(RPM_DIR_ARGS) -D "release $(shell echo $(RELEASE) | tr -d '_-')"
 
 .PHONY: opendaylight-rpm
-opendaylight-rpm: opnfv-apex.tar.gz $(RPMODL)
+opendaylight-rpm: $(BUILD_DIR)/opnfv-apex.tar.gz $(RPMODL)
 
 $(RPMODL):
        @echo "Building the Apex OpenDaylight RPM"
        # build the overcloud RPM
-       rpmbuild --clean -ba rpm_specs/opnfv-apex.spec -D '_topdir %(echo `pwd`)' -D '_builddir %(echo `pwd`)' -D '_sourcedir %(echo `pwd`)' -D '_rpmdir %(echo `pwd`)' -D '_specdir %(echo `pwd`)' -D '_srcrpmdir %(echo `pwd`)' -D "release $(shell echo $(RELEASE) | tr -d '_-')"
-
-.PHONY: opendaylight-rpm-clean
-opendaylight-rpm-clean:
+       rpmbuild --clean -ba rpm_specs/opnfv-apex.spec $(RPM_DIR_ARGS) -D "release $(shell echo $(RELEASE) | tr -d '_-')"
 
 ###############
 #    ONOS     #
 ###############
 
-.PHONY: overcloud-onos-clean
-overcloud-onos-clean:
-       @rm -f images/overcloud-full-onos.qcow2
-       @rm -rf images/puppet-onos
-       @rm -f images/puppet-onos.tar.gz
-
 .PHONY: overcloud-onos
-overcloud-onos: images/overcloud-full-onos.qcow2
+overcloud-onos: $(BUILD_DIR)/overcloud-full-onos.qcow2
 
-images/overcloud-full-onos.qcow2: images/overcloud-full.qcow2
+$(BUILD_DIR)/overcloud-full-onos.qcow2: $(BUILD_DIR)/overcloud-full.qcow2
        @echo "Building the Apex ONOS Overcloud Image"
        @./overcloud-onos.sh
 
-.PHONY: onos-rpm-clean
-onos-rpm-clean:
-       @#rpmbuild --clean rpm_specs/opnfv-apex-onos.spec -D "release $(shell echo $RELEASE | tr -d '_-')"
-       rm -rf opnfv-apex-onos.tar.gz
-
-opnfv-apex-onos.tar.gz: images/overcloud-full-onos.qcow2
-       tar -czf opnfv-apex-onos.tar.gz --xform="s:images/overcloud-full-onos.qcow2:opnfv-apex-onos-$(RPMVERS)/build/images/overcloud-full-onos.qcow2:" images/overcloud-full-onos.qcow2
+$(BUILD_DIR)/opnfv-apex-onos.tar.gz: $(BUILD_DIR)/overcloud-full-onos.qcow2
+       tar -czf $(BUILD_DIR)/opnfv-apex-onos.tar.gz --xform="s:.*overcloud-full-onos.qcow2:opnfv-apex-onos-$(RPMVERS)/build/overcloud-full-onos.qcow2:" $(BUILD_DIR)/overcloud-full-onos.qcow2
 
 .PHONY: onos-rpm-check
-onos-rpm-check: opnfv-apex-onos.tar.gz
-       rpmbuild --clean -bi -bl rpm_specs/opnfv-apex-onos.spec -D '_topdir %(echo `pwd`)' -D '_builddir %(echo `pwd`)' -D '_sourcedir %(echo `pwd`)' -D '_rpmdir %(echo `pwd`)' -D '_specdir %(echo `pwd`)' -D '_srcrpmdir %(echo `pwd`)' -D "release $(shell echo $(RELEASE) | tr -d '_-')"
+onos-rpm-check: $(BUILD_DIR)/opnfv-apex-onos.tar.gz
+       rpmbuild --clean -bi -bl rpm_specs/opnfv-apex-onos.spec $(RPM_DIR_ARGS) -D "release $(shell echo $(RELEASE) | tr -d '_-')"
 
 .PHONY: onos-rpm
-onos-rpm: opnfv-apex-onos.tar.gz $(RPMONO)
+onos-rpm: $(BUILD_DIR)/opnfv-apex-onos.tar.gz $(RPMONO)
 
 $(RPMONO):
        @echo "Building the Apex ONOS RPM"
        # build the overcloud RPM
-       rpmbuild --clean -ba rpm_specs/opnfv-apex-onos.spec -D '_topdir %(echo `pwd`)' -D '_builddir %(echo `pwd`)' -D '_sourcedir %(echo `pwd`)' -D '_rpmdir %(echo `pwd`)' -D '_specdir %(echo `pwd`)' -D '_srcrpmdir %(echo `pwd`)' -D "release $(shell echo $(RELEASE) | tr -d '_-')"
+       rpmbuild --clean -ba rpm_specs/opnfv-apex-onos.spec $(RPM_DIR_ARGS) -D "release $(shell echo $(RELEASE) | tr -d '_-')"
 
 ###############
 #    ISO      #
@@ -340,10 +295,9 @@ $(RPMONO):
 $(CENTISO):
        curl $(CENTDNLD) -z $(CENTISO) -o $(CENTISO) --verbose --silent --location
 
-.PHONY: iso-clean
 iso-clean:
-       @rm -Rf centos
-       @rm -Rf release
+       @rm -Rf $(BUILD_DIR)/centos
+       @rm -Rf $(BUILD_DIR)/release
        @rm -f $(ISO)
 
 .PHONY: mount-centiso umount-centiso
@@ -362,33 +316,26 @@ umount-centiso:
 .PHONY: iso
 iso:   iso-clean images rpms $(CENTISO)
        @echo "Building the Apex ISO"
-       @mkdir centos release
-       cd centos && bsdtar -xf ../$(shell basename $(CENTISO))
+       @mkdir $(BUILD_DIR)/centos $(BUILD_DIR)/release
+       cd $(BUILD_DIR)/centos && bsdtar -xf ../$(shell basename $(CENTISO))
        # modify the installer iso's contents
-       @chmod -R u+w centos
-       @cp -f isolinux.cfg centos/isolinux/isolinux.cfg
-       @ln $(RPMCOM) centos/Packages
-       @ln $(RPMUDR) centos/Packages
-       @ln $(RPMODL) centos/Packages
-       @ln $(RPMONO) centos/Packages
+       @chmod -R u+w $(BUILD_DIR)/centos
+       @cp -f isolinux.cfg $(BUILD_DIR)/centos/isolinux/isolinux.cfg
+       @ln $(RPMCOM) $(BUILD_DIR)/centos/Packages
+       @ln $(RPMUDR) $(BUILD_DIR)/centos/Packages
+       @ln $(RPMODL) $(BUILD_DIR)/centos/Packages
+       @ln $(RPMONO) $(BUILD_DIR)/centos/Packages
        # add packages to the centos packages
-       cd centos/Packages && yumdownloader openvswitch
-       cd centos/Packages && yumdownloader openstack-tripleo
-       cd centos/Packages && yumdownloader jq
-       cd centos/Packages && yumdownloader python34
-       cd centos/Packages && yumdownloader python34-libs
-       cd centos/Packages && yumdownloader python34-yaml
-       cd centos/Packages && yumdownloader python34-setuptools
-       cd centos/Packages && yumdownloader ipxe-roms-qemu
-       cd centos/Packages && curl -O https://radez.fedorapeople.org/python34-markupsafe-0.23-9.el7.centos.x86_64.rpm
-       cd centos/Packages && curl -O https://radez.fedorapeople.org/python3-jinja2-2.8-5.el7.centos.noarch.rpm
-       cd centos/Packages && curl -O http://artifacts.opnfv.org/apex/dependencies/python3-ipmi-0.3.0-1.noarch.rpm
+       cd $(BUILD_DIR)/centos/Packages && yumdownloader openvswitch openstack-tripleo jq python34 python34-libs python34-yaml python34-setuptools ipxe-roms-qemu
+       cd $(BUILD_DIR)/centos/Packages && curl -O https://radez.fedorapeople.org/python34-markupsafe-0.23-9.el7.centos.x86_64.rpm
+       cd $(BUILD_DIR)/centos/Packages && curl -O https://radez.fedorapeople.org/python3-jinja2-2.8-5.el7.centos.noarch.rpm
+       cd $(BUILD_DIR)/centos/Packages && curl -O http://artifacts.opnfv.org/apex/dependencies/python3-ipmi-0.3.0-1.noarch.rpm
        # regenerate yum repo data
        @echo "Generating new yum metadata"
-       createrepo --update -g ../c7-opnfv-x86_64-comps.xml centos
+       createrepo --update -g $(BUILD_ROOT)/c7-opnfv-x86_64-comps.xml $(BUILD_DIR)/centos
        # build the iso
        @echo "Building OPNFV iso"
-       mkisofs -b isolinux/isolinux.bin -no-emul-boot -boot-load-size 4 -boot-info-table -V "OPNFV CentOS 7 x86_64" -R -J -v -T -o $(ISO) centos
+       mkisofs -b isolinux/isolinux.bin -no-emul-boot -boot-load-size 4 -boot-info-table -V "OPNFV CentOS 7 x86_64" -R -J -v -T -o $(ISO) $(BUILD_DIR)/centos
        isohybrid $(ISO)
        @printf "\n\nISO is built at $(ISO)\n\n"
 
@@ -397,37 +344,27 @@ iso:      iso-clean images rpms $(CENTISO)
 ####################
 
 .PHONY: python3-jinja2
-python3-jinja2: python3-markupsafe python-jinja2-2.8-5.fc24.src.rpm
-       curl -O http://ftp.linux.ncsu.edu/pub/fedora/linux//development/24/Everything/source/tree/Packages/p/python-jinja2-2.8-5.fc24.src.rpm
-       rpm2cpio python-jinja2-2.8-5.fc24.src.rpm | cpio -idmv
-       sed -i 's/python3-devel/python34-devel/' python-jinja2.spec
-       sed -i 's/python3-setuptools/python34-setuptools/' python-jinja2.spec
-       sed -i 's/python3-pytest/python34-pytest/' python-jinja2.spec
-       sed -i 's/python3-markupsafe/python34-markupsafe/' python-jinja2.spec
-       rpmbuild -ba python-jinja2.spec -D '_topdir %(echo `pwd`)' -D '_builddir %(echo `pwd`)' -D '_sourcedir %(echo `pwd`)' -D '_rpmdir %(echo `pwd`)' -D '_specdir %(echo `pwd`)' -D '_srcrpmdir %(echo `pwd`)' -D "with_python3 1"
-
-.PHONY: python3-jinja2-clean
-python3-jinja2-clean:
-       rm -f python-jinja2-2.8-5.fc24.src.rpm
-       rm -f python-jinja2.spec
-       rm -f Jinja2-2.8.tar.gz
+python3-jinja2: python3-markupsafe
+       cd $(BUILD_DIR) \
+       && curl -O -L artifacts.opnfv.org/apex/dependencies/python-jinja2-2.8-5.fc24.src.rpm \
+       && rpm2cpio python-jinja2-2.8-5.fc24.src.rpm | cpio -idmv \
+       && sed -i 's/python3-devel/python34-devel/' python-jinja2.spec \
+       && sed -i 's/python3-setuptools/python34-setuptools/' python-jinja2.spec \
+       && sed -i 's/python3-pytest/python34-pytest/' python-jinja2.spec \
+       && sed -i 's/python3-markupsafe/python34-markupsafe/' python-jinja2.spec \
+       && rpmbuild -ba python-jinja2.spec $(RPM_DIR_ARGS) -D "with_python3 1"
 
 ########################
 #  python3-markupsafe  #
 ########################
 
 .PHONY: python3-markupsafe
-python3-markupsafe: python-markupsafe-0.23-9.fc24.src.rpm
-       curl -O http://ftp.linux.ncsu.edu/pub/fedora/linux//development/24/Everything/source/tree/Packages/p/python-markupsafe-0.23-9.fc24.src.rpm
-       rpm2cpio python-markupsafe-0.23-9.fc24.src.rpm | cpio -idmv
-       sed -i 's/python3-devel/python34-devel/' python-markupsafe.spec
-       sed -i 's/python3-setuptools/python34-setuptools/' python-markupsafe.spec
-       sed -i 's/python3-pytest/python34-pytest/' python-markupsafe.spec
-       sed -i 's/python3-markupsafe/python34-markupsafe/' python-markupsafe.spec
-       rpmbuild -ba python-markupsafe.spec -D '_topdir %(echo `pwd`)' -D '_builddir %(echo `pwd`)' -D '_sourcedir %(echo `pwd`)' -D '_rpmdir %(echo `pwd`)' -D '_specdir %(echo `pwd`)' -D '_srcrpmdir %(echo `pwd`)' -D "with_python3 1"
-
-.PHONY: python3-markupsafe-clean
-python3-markupsafe-clean:
-       rm -f python-markupsafe-0.23-9.fc24.src.rpm
-       rm -f python-markupsafe.spec
-       rm -f MarkupSafe-0.23.tar.gz
+python3-markupsafe:
+       cd $(BUILD_DIR) \
+       && curl -O -L artifacts.opnfv.org/apex/dependencies/python-markupsafe-0.23-9.fc24.src.rpm \
+       && rpm2cpio python-markupsafe-0.23-9.fc24.src.rpm | cpio -idmv \
+       && sed -i 's/python3-devel/python34-devel/' python-markupsafe.spec \
+       && sed -i 's/python3-setuptools/python34-setuptools/' python-markupsafe.spec \
+       && sed -i 's/python3-pytest/python34-pytest/' python-markupsafe.spec \
+       && sed -i 's/python3-markupsafe/python34-markupsafe/' python-markupsafe.spec \
+       && rpmbuild -ba python-markupsafe.spec $(RPM_DIR_ARGS) -D "with_python3 1"
diff --git a/build/build_ovs_nsh.sh b/build/build_ovs_nsh.sh
deleted file mode 100755 (executable)
index 834df5b..0000000
+++ /dev/null
@@ -1,22 +0,0 @@
-#!/usr/bin/env bash
-##############################################################################
-# Copyright (c) 2016 Tim Rozet (Red Hat) and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
-set -e
-
-yum -y install  rpm-build autoconf automake libtool systemd-units openssl openssl-devel python python-twisted-core python-zope-interface python-six desktop-file-utils groff graphviz  procps-ng libcap-ng libcap-ng-devel PyQt4 selinux-policy-devel kernel-devel kernel-headers kernel-tools
-./boot.sh
-libtoolize --force
-aclocal
-autoheader
-automake --force-missing --add-missing
-autoconf
-./configure
-yum -y install rpmdevtools
-make rpm-fedora RPMBUILD_OPT="\"-D kversion `rpm -q kernel | rpmdev-sort  | tail -n -1 | sed  's/^kernel-//'`\" --without check"
-make rpm-fedora-kmod RPMBUILD_OPT="\"-D kversion `rpm -q kernel | rpmdev-sort  | tail -n -1 | sed  's/^kernel-//'`\""
index 5c7e5e7..4c530b0 100644 (file)
@@ -7,20 +7,16 @@
 # which accompanies this distribution, and is available at
 # http://www.apache.org/licenses/LICENSE-2.0
 ##############################################################################
-
-CACHE_DIR="$(pwd)/cache"
+source ./variables.sh
 
 # Make sure the cache dir exists
 function cache_dir {
+    if [ -f $CACHE_DIR ]; then rm -rf $CACHE_DIR; fi
     if [ ! -d $CACHE_DIR/ ]; then mkdir $CACHE_DIR/; fi
-    if [ ! -f $CACHE_DIR/.cache ]; then touch $CACHE_DIR/.cache; fi
+    if [ ! -f $CACHE_DIR/$CACHE_HISTORY ]; then touch $CACHE_DIR/$CACHE_HISTORY; fi
     echo "Cache Dir: $CACHE_DIR"
 }
 
-function cache_git_tar {
-    echo "cache_git_tar git ls-remote"
-}
-
 # $1 = download url
 # $2 = filename to write to
 function curl_file {
@@ -33,8 +29,8 @@ function curl_file {
     until curl -C- -L -o $CACHE_DIR/$2 $1  || (( count++ >= 20 )); do
         echo -n '' #do nothing, we just want to loop
     done
-    sed -i "/$2/d" $CACHE_DIR/.cache
-    echo "$(md5sum $CACHE_DIR/$2) $2" >> $CACHE_DIR/.cache
+    sed -i "/$2/d" $CACHE_DIR/$CACHE_HISTORY
+    echo "$(md5sum $CACHE_DIR/$2) $2" >> $CACHE_DIR/$CACHE_HISTORY
 }
 
 # $1 =  download url
@@ -50,22 +46,27 @@ function populate_cache {
 
     # check if the cache file exists
     # and if it has an md5 compare that
-    echo "Checking cache file exists: ${filename}"
+    echo "Checking if cache file exists: ${filename}"
     if [ ! -f $CACHE_DIR/${filename} ]; then
         echo "Cache file: ${CACHE_DIR}/${filename} missing...will download..."
         curl_file $1 $filename
     else
         echo "Cache file exists...comparing MD5 checksum"
-        if [ -z $remote_md5 ]; then
+        if [ -z "$remote_md5" ]; then
             remote_md5="$(curl -sf -L ${1}.md5 | awk {'print $1'})"
         fi
         if [ -z "$remote_md5" ]; then
             echo "Got empty MD5 from remote for $filename, skipping MD5 check"
             curl_file $1 $filename
         else
-            my_md5=$(grep ${filename} $CACHE_DIR/.cache | awk {'print $1'})
+            my_md5=$(grep ${filename} ${CACHE_DIR}/${CACHE_HISTORY} | awk {'print $1'})
+            if [ -z "$my_md5" ]; then
+                echo "${filename} missing in ${CACHE_HISTORY} file. Caculating md5..."
+                my_md5=$(md5sum ${CACHE_DIR}/${filename} | awk {'print $1'})
+            fi
             if [ "$remote_md5" != "$my_md5" ]; then
-                echo "MD5 mismatch, cache file MD5 is ${my_md5}"
+                echo "MD5 mismatch, local cache file MD5 is ${my_md5}"
+                echo "              remote cache file MD5 is ${remote_md5}"
                 echo "Downloading $filename"
                 curl_file $1 $filename
             else
@@ -74,8 +75,3 @@ function populate_cache {
         fi
     fi
 }
-
-# $1 = filename to get from cache
-function get_cached_file {
-  cp -f $CACHE_DIR/$1 .
-}
diff --git a/build/csit-environment.yaml b/build/csit-environment.yaml
new file mode 100644 (file)
index 0000000..4ef5501
--- /dev/null
@@ -0,0 +1,80 @@
+#Environment file used to list common parameters required for all deployment
+#types
+
+parameters:
+#  CloudDomain:
+
+parameter_defaults:
+  GlanceBackend: file
+  CeilometerStoreEvents: true
+  NeutronEnableForceMetadata: true
+  NeutronEnableDHCPMetadata: true
+  NeutronEnableIsolatedMetadata: true
+  OvercloudControlFlavor: control
+  OvercloudComputeFlavor: compute
+  controllerImage: overcloud-full
+  ExtraConfig:
+    tripleo::ringbuilder::build_ring: False
+    nova::nova_public_key:
+      type: 'ssh-rsa'
+      replace_public_key:
+    nova::nova_private_key:
+      type: 'ssh-rsa'
+      replace_private_key:
+    nova::policy::policies:
+      nova-os_compute_api:servers:show:host_status:
+        key: 'os_compute_api:servers:show:host_status'
+        value: 'rule:admin_or_owner'
+  ControllerServices:
+    - OS::TripleO::Services::CACerts
+    - OS::TripleO::Services::CinderApi
+    - OS::TripleO::Services::CinderBackup
+    - OS::TripleO::Services::CinderScheduler
+    - OS::TripleO::Services::CinderVolume
+    - OS::TripleO::Services::Core
+    - OS::TripleO::Services::Kernel
+    - OS::TripleO::Services::Keystone
+    - OS::TripleO::Services::GlanceApi
+    - OS::TripleO::Services::GlanceRegistry
+    - OS::TripleO::Services::MySQL
+    - OS::TripleO::Services::NeutronDhcpAgent
+    - OS::TripleO::Services::NeutronMetadataAgent
+    - OS::TripleO::Services::NeutronApi
+    - OS::TripleO::Services::NeutronCorePlugin
+    - OS::TripleO::Services::RabbitMQ
+    - OS::TripleO::Services::HAproxy
+    - OS::TripleO::Services::Keepalived
+    - OS::TripleO::Services::Memcached
+    - OS::TripleO::Services::Pacemaker
+    - OS::TripleO::Services::Redis
+    - OS::TripleO::Services::NovaConductor
+    - OS::TripleO::Services::MongoDb
+    - OS::TripleO::Services::NovaApi
+    - OS::TripleO::Services::NovaMetadata
+    - OS::TripleO::Services::NovaScheduler
+    - OS::TripleO::Services::NovaConsoleauth
+    - OS::TripleO::Services::NovaVncProxy
+    - OS::TripleO::Services::Ntp
+    - OS::TripleO::Services::Snmp
+    - OS::TripleO::Services::Timezone
+    - OS::TripleO::Services::Horizon
+    - OS::TripleO::Services::TripleoPackages
+    - OS::TripleO::Services::TripleoFirewall
+    - OS::TripleO::Services::OpenDaylightApi
+    - OS::TripleO::Services::OpenDaylightOvs
+    - OS::TripleO::Services::VipHosts
+  ComputeServices:
+    - OS::TripleO::Services::CACerts
+    - OS::TripleO::Services::Timezone
+    - OS::TripleO::Services::Ntp
+    - OS::TripleO::Services::Snmp
+    - OS::TripleO::Services::NovaCompute
+    - OS::TripleO::Services::NovaLibvirt
+    - OS::TripleO::Services::Kernel
+    - OS::TripleO::Services::ComputeNeutronCorePlugin
+    - OS::TripleO::Services::ComputeNeutronMetadataAgent
+    - OS::TripleO::Services::TripleoPackages
+    - OS::TripleO::Services::TripleoFirewall
+    - OS::TripleO::Services::NeutronSriovAgent
+    - OS::TripleO::Services::OpenDaylightOvs
+    - OS::TripleO::Services::VipHosts
diff --git a/build/instackenv.json.example b/build/instackenv.json.example
deleted file mode 100644 (file)
index 370ff20..0000000
+++ /dev/null
@@ -1,74 +0,0 @@
-{
-  "nodes": [
-    {
-      "cpu": "24",
-      "memory": "64220",
-      "disk": "40",
-      "arch": "x86_64",
-      "mac": [
-        "b8:ca:3a:63:8b:58"
-      ],
-      "pm_type": "pxe_ipmitool",
-      "pm_addr": "10.1.8.70",
-      "pm_user": "root",
-      "pm_password": "password",
-      "capabilities": "profile:control"
-    },
-    {
-      "cpu": "24",
-      "memory": "64220",
-      "disk": "40",
-      "arch": "x86_64",
-      "mac": [
-        "b8:ca:3a:63:87:88"
-      ],
-      "pm_type": "pxe_ipmitool",
-      "pm_addr": "10.1.8.73",
-      "pm_user": "root",
-      "pm_password": "password",
-      "capabilities": "profile:control"
-    },
-    {
-      "cpu": "24",
-      "memory": "64220",
-      "disk": "40",
-      "arch": "x86_64",
-      "mac": [
-        "b8:ca:3a:63:87:10"
-      ],
-      "pm_type": "pxe_ipmitool",
-      "pm_addr": "10.1.8.74",
-      "pm_user": "root",
-      "pm_password": "password",
-      "capabilities": "profile:control"
-    },
-    {
-      "cpu": "24",
-      "memory": "64220",
-      "disk": "40",
-      "arch": "x86_64",
-      "mac": [
-        "B8:CA:3A:63:87:48"
-      ],
-      "pm_type": "pxe_ipmitool",
-      "pm_addr": "10.1.8.76",
-      "pm_user": "root",
-      "pm_password": "password",
-      "capabilities": "profile:compute"
-    },
-    {
-      "cpu": "24",
-      "memory": "64220",
-      "disk": "40",
-      "arch": "x86_64",
-      "mac": [
-        "B8:CA:3A:63:8B:18"
-      ],
-      "pm_type": "pxe_ipmitool",
-      "pm_addr": "10.1.8.78",
-      "pm_user": "root",
-      "pm_password": "password",
-      "capabilities": "profile:compute"
-    }
-  ]
-}
diff --git a/build/mvn_settings.xml b/build/mvn_settings.xml
deleted file mode 100644 (file)
index c774461..0000000
+++ /dev/null
@@ -1,106 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<!-- vi: set et smarttab sw=2 tabstop=2: -->
-<settings xmlns="http://maven.apache.org/SETTINGS/1.0.0"
-  xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
-  xsi:schemaLocation="http://maven.apache.org/SETTINGS/1.0.0 http://maven.apache.org/xsd/settings-1.0.0.xsd">
-  <profiles>
-    <profile>
-      <id>fd.io-release</id>
-      <repositories>
-        <repository>
-          <id>fd.io-mirror</id>
-          <name>fd.io-mirror</name>
-          <url>https://nexus.fd.io/content/groups/public/</url>
-          <releases>
-            <enabled>true</enabled>
-            <updatePolicy>never</updatePolicy>
-          </releases>
-          <snapshots>
-            <enabled>false</enabled>
-          </snapshots>
-        </repository>
-      </repositories>
-      <pluginRepositories>
-        <pluginRepository>
-          <id>fd.io-mirror</id>
-          <name>fd.io-mirror</name>
-          <url>https://nexus.fd.io/content/repositories/public/</url>
-          <releases>
-            <enabled>true</enabled>
-            <updatePolicy>never</updatePolicy>
-          </releases>
-          <snapshots>
-            <enabled>false</enabled>
-          </snapshots>
-        </pluginRepository>
-      </pluginRepositories>
-    </profile>
-    <profile>
-      <id>fd.io-snapshots</id>
-      <repositories>
-        <repository>
-          <id>fd.io-snapshot</id>
-          <name>fd.io-snapshot</name>
-          <url>https://nexus.fd.io/content/repositories/fd.io.snapshot/</url>
-          <releases>
-            <enabled>false</enabled>
-          </releases>
-          <snapshots>
-            <enabled>true</enabled>
-          </snapshots>
-        </repository>
-      </repositories>
-      <pluginRepositories>
-        <pluginRepository>
-          <id>fd.io-snapshot</id>
-          <name>fd.io-snapshot</name>
-          <url>https://nexus.fd.io/content/repositories/fd.io.snapshot/</url>
-          <releases>
-            <enabled>false</enabled>
-          </releases>
-          <snapshots>
-            <enabled>true</enabled>
-          </snapshots>
-        </pluginRepository>
-      </pluginRepositories>
-    </profile>
-    <profile>
-      <id>opendaylight-snapshots</id>
-      <repositories>
-        <repository>
-          <id>opendaylight-snapshot</id>
-          <name>opendaylight-snapshot</name>
-          <url>https://nexus.opendaylight.org/content/repositories/opendaylight.snapshot/</url>
-          <releases>
-            <enabled>false</enabled>
-          </releases>
-          <snapshots>
-            <enabled>true</enabled>
-          </snapshots>
-        </repository>
-      </repositories>
-      <pluginRepositories>
-        <pluginRepository>
-          <id>opendaylight-shapshot</id>
-          <name>opendaylight-snapshot</name>
-          <url>https://nexus.opendaylight.org/content/repositories/opendaylight.snapshot/</url>
-          <releases>
-            <enabled>false</enabled>
-          </releases>
-          <snapshots>
-            <enabled>true</enabled>
-          </snapshots>
-        </pluginRepository>
-      </pluginRepositories>
-    </profile>
-  </profiles>
-  <activeProfiles>
-    <activeProfile>fd.io-release</activeProfile>
-    <activeProfile>fd.io-snapshots</activeProfile>
-    <activeProfile>opendaylight-snapshots</activeProfile>
-  </activeProfiles>
-</settings>
-
index bd65dd9..8367371 100644 (file)
@@ -46,24 +46,7 @@ resource_registry:
 
 parameter_defaults:
   NeutronExternalNetworkBridge: 'br-ex'
-  ControlPlaneSubnetCidr: "24"
-  ControlPlaneDefaultRoute: 192.0.2.1
-  ExternalNetCidr: 192.168.37.0/24
-  ExternalAllocationPools: [{'start': '192.168.37.50', 'end': '192.168.37.99'}]
-  ExternalInterfaceDefaultRoute: 192.168.37.1
-  EC2MetadataIp: 192.0.2.1
-  DnsServers: ["8.8.8.8","8.8.4.4"]
-  TenantNetCidr: 172.17.0.0/24
-  TenantAllocationPools: [{'start': '172.17.0.10', 'end': '172.17.0.200'}]
-  StorageNetCidr: 172.18.0.0/24
-  StorageAllocationPools: [{'start': '172.18.0.10', 'end': '172.18.0.200'}]
 
-  # Customize the VLAN IDs to match the local environment
-  InternalApiNetworkVlanID: 10 # usually collapsed onto Admin/Provisioning
-  StorageNetworkVlanID: 20
-  # StorageMgmtNetworkVlanID: 30 # NOT IMPLEMENTED
-  TenantNetworkVlanID: 40
-  ExternalNetworkVlanID: 50
   ServiceNetMap:
     NeutronTenantNetwork: tenant
     CeilometerApiNetwork: internal_api
index ee83011..c7d0a1b 100644 (file)
@@ -85,13 +85,13 @@ resources:
         os_net_config:
           network_config:
             -
-            {%- if nets['tenant']['nic_mapping'][role]['vlan'] is number or nets['storage']['nic_mapping'][role]['vlan'] is number or nets['api']['nic_mapping'][role]['vlan'] is number or  nets['external'][0]['nic_mapping'][role]['vlan'] is number %}
+            {%- if not nets['external'][0]['enabled'] or nets['tenant']['nic_mapping'][role]['vlan'] is number or nets['storage']['nic_mapping'][role]['vlan'] is number or nets['api']['nic_mapping'][role]['vlan'] is number or  nets['external'][0]['nic_mapping'][role]['vlan'] is number %}
               type: ovs_bridge
               name: {get_input: bridge_name}
               members:
                 -
                   type: interface
-                  name: {{ nets[role]['admin']['members'][0] }}
+                  name: {{ nets['admin']['nic_mapping'][role]['members'][0] }}
                   # force the MAC address of the bridge to this interface
                   primary: true
                 {%- if nets['external'][0]['enabled'] and nets['external'][0]['nic_mapping'][role]['vlan'] is number %}
@@ -147,7 +147,7 @@ resources:
                 -
                   ip_netmask: 169.254.169.254/32
                   next_hop: {get_param: EC2MetadataIp}
-                {%- if external_net_af == 6 or role == 'compute' %}
+                {%- if external_net_af == 6 or role == 'compute' or not nets['external'][0]['enabled'] %}
                 -
                   default: true
                   next_hop: {get_param: ControlPlaneDefaultRoute}
index f45ad69..8ae2048 100644 (file)
@@ -12,6 +12,7 @@ parameter_defaults:
   OvercloudControlFlavor: control
   OvercloudComputeFlavor: compute
   controllerImage: overcloud-full
+
   ExtraConfig:
     tripleo::ringbuilder::build_ring: False
     nova::nova_public_key:
@@ -24,6 +25,7 @@ parameter_defaults:
       nova-os_compute_api:servers:show:host_status:
         key: 'os_compute_api:servers:show:host_status'
         value: 'rule:admin_or_owner'
+    nova::api::default_floating_pool: 'external'
   ControllerServices:
     - OS::TripleO::Services::CACerts
 #    - OS::TripleO::Services::CephClient
@@ -79,12 +81,12 @@ parameter_defaults:
     - OS::TripleO::Services::GnocchiApi
     - OS::TripleO::Services::GnocchiMetricd
     - OS::TripleO::Services::GnocchiStatsd
-    - OS::Tripleo::Services::ManilaApi
-    - OS::Tripleo::Services::ManilaScheduler
-    - OS::Tripleo::Services::ManilaBackendGeneric
-    - OS::Tripleo::Services::ManilaBackendNetapp
-    - OS::Tripleo::Services::ManilaBackendCephFs
-    - OS::Tripleo::Services::ManilaShare
+    - OS::TripleO::Services::ManilaApi
+    - OS::TripleO::Services::ManilaScheduler
+    - OS::TripleO::Services::ManilaBackendGeneric
+    - OS::TripleO::Services::ManilaBackendNetapp
+    - OS::TripleO::Services::ManilaBackendCephFs
+    - OS::TripleO::Services::ManilaShare
     - OS::TripleO::Services::AodhApi
     - OS::TripleO::Services::AodhEvaluator
     - OS::TripleO::Services::AodhNotifier
diff --git a/build/opnfv-puppet-tripleo.patch b/build/opnfv-puppet-tripleo.patch
deleted file mode 100644 (file)
index 99d3c74..0000000
+++ /dev/null
@@ -1,62 +0,0 @@
-From 9f012bc3e4f23fa756f5435ee69e5d51dd6fc874 Mon Sep 17 00:00:00 2001
-From: Tim Rozet <tdrozet@gmail.com>
-Date: Thu, 3 Mar 2016 14:36:11 -0500
-Subject: [PATCH] Adds ODL to load balancer
-
----
- manifests/loadbalancer.pp | 27 +++++++++++++++++++++++++++
- 1 file changed, 27 insertions(+)
-
-diff --git a/manifests/loadbalancer.pp b/manifests/loadbalancer.pp
-index 2fcfac6..6e13566 100644
---- a/manifests/loadbalancer.pp
-+++ b/manifests/loadbalancer.pp
-@@ -247,6 +247,10 @@
- #  (optional) Enable or not Redis binding
- #  Defaults to false
- #
-+# [*opendaylight*]
-+#  (optional) Enable or not OpenDaylight binding
-+#  Defaults to false
-+#
- class tripleo::loadbalancer (
-   $controller_virtual_ip,
-   $control_virtual_interface,
-@@ -299,6 +303,7 @@ class tripleo::loadbalancer (
-   $mysql_clustercheck        = false,
-   $rabbitmq                  = false,
-   $redis                     = false,
-+  $opendaylight              = false,
- ) {
-   if !$controller_host and !$controller_hosts {
-@@ -1044,4 +1049,26 @@ class tripleo::loadbalancer (
-     }
-   }
-+  $opendaylight_api_vip = hiera('opendaylight_api_vip', $controller_virtual_ip)
-+  $opendaylight_bind_opts = {
-+    "${opendaylight_api_vip}:8081" => [],
-+    "${public_virtual_ip}:8081" => [],
-+  }
-+
-+  if $opendaylight {
-+    haproxy::listen { 'opendaylight':
-+      bind             => $opendaylight_bind_opts,
-+      options          => {
-+        'balance'   => 'source',
-+      },
-+      collect_exported => false,
-+    }
-+    haproxy::balancermember { 'opendaylight':
-+      listening_service => 'opendaylight',
-+      ports             => '8081',
-+      ipaddresses       => hiera('opendaylight_api_node_ips', $controller_hosts_real),
-+      server_names      => $controller_hosts_names_real,
-+      options           => ['check', 'inter 2000', 'rise 2', 'fall 5'],
-+    }
-+  }
- }
--- 
-2.5.0
-
index fa4444a..1264430 100755 (executable)
@@ -7,71 +7,54 @@
 # which accompanies this distribution, and is available at
 # http://www.apache.org/licenses/LICENSE-2.0
 ##############################################################################
-set -e
+set -xe
 source ./cache.sh
 source ./variables.sh
 source ./functions.sh
 
 populate_cache "$rdo_images_uri/overcloud-full.tar"
 
-if [ ! -d images/ ]; then mkdir images; fi
-tar -xf cache/overcloud-full.tar -C images/
-mv -f images/overcloud-full.qcow2 images/overcloud-full_build.qcow2
+if [ ! -d ${BUILD_DIR} ]; then mkdir ${BUILD_DIR}; fi
+tar -xf ${CACHE_DIR}/overcloud-full.tar -C ${BUILD_DIR}/
+mv -f ${BUILD_DIR}/overcloud-full.qcow2 ${BUILD_DIR}/overcloud-full_build.qcow2
 
 ##########################################################
 #####  Prep initial overcloud image with common deps #####
 ##########################################################
 
+pushd ${BUILD_DIR} > /dev/null
+
 # prep opnfv-puppet-tripleo for undercloud
 clone_fork opnfv-puppet-tripleo
 pushd opnfv-puppet-tripleo > /dev/null
-git archive --format=tar.gz --prefix=tripleo/ HEAD > ../opnfv-puppet-tripleo.tar.gz
+git archive --format=tar.gz --prefix=tripleo/ HEAD > ${BUILD_DIR}/opnfv-puppet-tripleo.tar.gz
 popd > /dev/null
 
 # download customized os-net-config
 rm -fr os-net-config
 git clone https://github.com/trozet/os-net-config.git -b stable/colorado
-pushd os-net-config > /dev/null
-pushd os_net_config > /dev/null
-git archive --format=tar.gz --prefix=os_net_config/ HEAD > ../../os-net-config.tar.gz
-popd > /dev/null
+pushd os-net-config/os_net_config > /dev/null
+git archive --format=tar.gz --prefix=os_net_config/ HEAD > ${BUILD_DIR}/os-net-config.tar.gz
 popd > /dev/null
 
-pushd images > /dev/null
-
 dpdk_pkg_str=''
 for package in ${dpdk_rpms[@]}; do
   wget "$dpdk_uri_base/$package"
-  dpdk_pkg_str+=" --upload $package:/root/dpdk_rpms"
-done
-
-fdio_pkg_str=''
-for package in ${fdio_pkgs[@]}; do
-  wget "$fdio_uri_base/$package"
-  fdio_pkg_str+=" --upload $package:/root/fdio"
+  dpdk_pkg_str+=" --upload ${BUILD_DIR}/${package}:/root/dpdk_rpms"
 done
 
 # tar up the congress puppet module
 rm -rf puppet-congress
-git clone -b stable/mitaka https://github.com/radez/puppet-congress
+git clone https://github.com/openstack/puppet-congress
 pushd puppet-congress > /dev/null
-git archive --format=tar.gz --prefix=congress/ origin/stable/mitaka > ../puppet-congress.tar.gz
+git archive --format=tar.gz --prefix=congress/ HEAD > ${BUILD_DIR}/puppet-congress.tar.gz
 popd > /dev/null
 
-# create fd.io yum repo file
-#cat > /tmp/fdio-master.repo << EOF
-#[fdio-master]
-#name=fd.io master branch latest merge
-#baseurl=https://nexus.fd.io/content/repositories/fd.io.master.centos7/
-#enabled=1
-#gpgcheck=0
-#EOF
-
 # tar up the fd.io module
 rm -rf puppet-fdio
-git clone https://github.com/radez/puppet-fdio
+git clone https://git.fd.io/puppet-fdio
 pushd puppet-fdio > /dev/null
-git archive --format=tar.gz --prefix=fdio/ HEAD > ../puppet-fdio.tar.gz
+git archive --format=tar.gz --prefix=fdio/ HEAD > ${BUILD_DIR}/puppet-fdio.tar.gz
 popd > /dev/null
 
 # tar up vsperf
@@ -81,25 +64,38 @@ tar czf vsperf.tar.gz vsperf
 
 # tar up the tacker puppet module
 rm -rf puppet-tacker
-# TODO move this back to radez puppet-tacker after PR is accepted
-git clone -b fix_db_sync https://github.com/trozet/puppet-tacker
+git clone https://github.com/openstack/puppet-tacker
 pushd puppet-tacker > /dev/null
-git archive --format=tar.gz --prefix=tacker/ HEAD > ../puppet-tacker.tar.gz
+git archive --format=tar.gz --prefix=tacker/ HEAD > ${BUILD_DIR}/puppet-tacker.tar.gz
 popd > /dev/null
 
+# Master FD.IO Repo
+cat > ${BUILD_DIR}/fdio.repo << EOF
+[fdio-master]
+name=fd.io master branch latest merge
+baseurl=https://nexus.fd.io/content/repositories/fd.io.master.centos7/
+enabled=1
+gpgcheck=0
+EOF
+
+# Increase disk size by 500MB to accommodate more packages
+qemu-img resize overcloud-full_build.qcow2 +500MB
+
+# expand file system to max disk size
 # installing forked opnfv-puppet-tripleo
 # enable connection tracking for protocal sctp
 # upload dpdk rpms but do not install
 # enable connection tracking for protocal sctp
 # install the congress rpms
 # upload and explode the congress puppet module
-# install doctor driver ## Can be removed in Newton
 # install fd.io yum repo and packages
 # upload puppet fdio
 # git clone vsperf into the overcloud image
 # upload the tacker puppet module and untar it
+# install tacker
 LIBGUESTFS_BACKEND=direct virt-customize \
-    --upload ../opnfv-puppet-tripleo.tar.gz:/etc/puppet/modules \
+    --run-command "xfs_growfs /dev/sda" \
+    --upload ${BUILD_DIR}/opnfv-puppet-tripleo.tar.gz:/etc/puppet/modules \
     --run-command "yum update -y python-ipaddress rabbitmq-server erlang*" \
     --run-command "if ! rpm -qa | grep python-redis; then yum install -y python-redis; fi" \
     --run-command "sed -i 's/^#UseDNS.*$/UseDNS no/' /etc/ssh/sshd_config" \
@@ -107,11 +103,10 @@ LIBGUESTFS_BACKEND=direct virt-customize \
     --run-command "cd /etc/puppet/modules && rm -rf tripleo && tar xzf opnfv-puppet-tripleo.tar.gz" \
     --run-command "echo 'nf_conntrack_proto_sctp' > /etc/modules-load.d/nf_conntrack_proto_sctp.conf" \
     --run-command "mkdir /root/dpdk_rpms" \
-    --run-command "mkdir /root/fdio" \
+    --upload ${BUILD_DIR}/fdio.repo:/etc/yum.repos.d/fdio.repo \
     $dpdk_pkg_str \
-    $fdio_pkg_str \
-    --upload ../networking-vpp.noarch.rpm:/root/fdio \
-    --run-command "pip install distro flask_restful" \
+    --run-command "yum install --downloadonly --downloaddir=/root/fdio vpp vpp-devel vpp-lib vpp-api-python vpp-plugins" \
+    --upload ${BUILD_DIR}/noarch/$netvpp_pkg:/root/fdio \
     --run-command "yum install -y etcd" \
     --run-command "pip install python-etcd" \
     --run-command "puppet module install cristifalcas/etcd" \
@@ -119,56 +114,37 @@ LIBGUESTFS_BACKEND=direct virt-customize \
     --install "centos-release-qemu-ev" \
     --run-command "yum install -y qemu-kvm-ev-2.3.0-31.el7_2.21.1.x86_64" \
     --run-command "yum remove -y qemu-system-x86" \
-    --upload ../os-net-config.tar.gz:/usr/lib/python2.7/site-packages \
+    --upload ${BUILD_DIR}/os-net-config.tar.gz:/usr/lib/python2.7/site-packages \
     --run-command "cd /usr/lib/python2.7/site-packages/ && rm -rf os_net_config && tar xzf os-net-config.tar.gz" \
-    --upload ../noarch/$openstack_congress_pkg:/root/ \
-    --install /root/$openstack_congress_pkg \
+    --upload ${BUILD_DIR}/noarch/$congress_pkg:/root/ \
+    --install /root/$congress_pkg \
     --install "python2-congressclient" \
-    --upload puppet-congress.tar.gz:/etc/puppet/modules/ \
+    --upload ${BUILD_DIR}/puppet-congress.tar.gz:/etc/puppet/modules/ \
     --run-command "cd /etc/puppet/modules/ && tar xzf puppet-congress.tar.gz" \
-    --run-command "cd /usr/lib/python2.7/site-packages/congress/datasources && curl -O $doctor_driver" \
-    --run-command "sed -i \"s/'--detailed-exitcodes',/'--detailed-exitcodes','-l','syslog','-l','console',/g\" /var/lib/heat-config/hooks/puppet" \
     --run-command "yum install -y /root/fdio/*.rpm" \
     --run-command "rm -f /etc/sysctl.d/80-vpp.conf" \
     --install unzip \
-    --upload puppet-fdio.tar.gz:/etc/puppet/modules \
+    --upload ${BUILD_DIR}/puppet-fdio.tar.gz:/etc/puppet/modules \
     --run-command "cd /etc/puppet/modules && tar xzf puppet-fdio.tar.gz" \
-    --upload vsperf.tar.gz:/var/opt \
+    --upload ${BUILD_DIR}/vsperf.tar.gz:/var/opt \
     --run-command "cd /var/opt && tar xzf vsperf.tar.gz" \
-    --upload puppet-tacker.tar.gz:/etc/puppet/modules/ \
+    --upload ${BUILD_DIR}/puppet-tacker.tar.gz:/etc/puppet/modules/ \
     --run-command "cd /etc/puppet/modules/ && tar xzf puppet-tacker.tar.gz" \
+    --upload ${BUILD_DIR}/noarch/$tacker_pkg:/root/ \
+    --install /root/$tacker_pkg \
+    --upload ${BUILD_DIR}/noarch/$tackerclient_pkg:/root/ \
+    --install /root/$tackerclient_pkg \
     --run-command "pip install python-senlinclient" \
-    --upload ../neutron/agent/interface/interface.py:/usr/lib/python2.7/site-packages/neutron/agent/linux/ \
+    --upload ${BUILD_ROOT}/neutron/agent/interface/interface.py:/usr/lib/python2.7/site-packages/neutron/agent/linux/ \
     --run-command "mkdir /root/fdio_neutron_l3" \
-    --upload ../neutron/agent/l3/namespaces.py:/root/fdio_neutron_l3/ \
-    --upload ../neutron/agent/l3/router_info.py:/root/fdio_neutron_l3/ \
-    --upload ../puppet-neutron/manifests/agents/ml2/networking-vpp.pp:/etc/puppet/modules/neutron/manifests/agents/ml2/ \
-    --upload ../puppet-neutron/manifests/plugins/ml2/networking-vpp.pp:/etc/puppet/modules/neutron/manifests/plugins/ml2/ \
-    --upload ../puppet-neutron/lib/puppet/type/neutron_agent_vpp.rb:/etc/puppet/modules/neutron/lib/puppet/type/ \
+    --upload ${BUILD_ROOT}/neutron/agent/l3/namespaces.py:/root/fdio_neutron_l3/ \
+    --upload ${BUILD_ROOT}/neutron/agent/l3/router_info.py:/root/fdio_neutron_l3/ \
+    --upload ${BUILD_ROOT}/puppet-neutron/manifests/agents/ml2/networking-vpp.pp:/etc/puppet/modules/neutron/manifests/agents/ml2/ \
+    --upload ${BUILD_ROOT}/puppet-neutron/manifests/plugins/ml2/networking-vpp.pp:/etc/puppet/modules/neutron/manifests/plugins/ml2/ \
+    --upload ${BUILD_ROOT}/puppet-neutron/lib/puppet/type/neutron_agent_vpp.rb:/etc/puppet/modules/neutron/lib/puppet/type/ \
     --mkdir /etc/puppet/modules/neutron/lib/puppet/provider/neutron_agent_vpp \
-    --upload ../puppet-neutron/lib/puppet/provider/neutron_agent_vpp/ini_setting.rb:/etc/puppet/modules/neutron/lib/puppet/provider/neutron_agent_vpp/ \
-    -a overcloud-full_build.qcow2
-
-rm -rf ovs_nsh_patches
-rm -rf ovs
-git clone https://github.com/yyang13/ovs_nsh_patches.git
-git clone https://github.com/openvswitch/ovs.git
-pushd ovs > /dev/null
-git reset --hard 7d433ae57ebb90cd68e8fa948a096f619ac4e2d8
-cp ../ovs_nsh_patches/*.patch ./
-# Hack for build servers that have no git config
-git config user.email "apex@opnfv.com"
-git config user.name "apex"
-git am *.patch
-popd > /dev/null
-tar czf ovs.tar.gz ovs
-
-# BUILD NSH OVS
-LIBGUESTFS_BACKEND=direct virt-customize \
-    --upload ../build_ovs_nsh.sh:/root/ \
-    --upload ovs.tar.gz:/root/ \
-    --run-command "cd /root/ && tar xzf ovs.tar.gz" \
-    --run-command "cd /root/ovs && /root/build_ovs_nsh.sh" \
+    --upload ${BUILD_ROOT}/puppet-neutron/lib/puppet/provider/neutron_agent_vpp/ini_setting.rb:/etc/puppet/modules/neutron/lib/puppet/provider/neutron_agent_vpp/ \
+    --run-command "sed -i -E 's/timeout=[0-9]+/timeout=60/g' /usr/share/openstack-puppet/modules/rabbitmq/lib/puppet/provider/rabbitmqctl.rb" \
     -a overcloud-full_build.qcow2
 
 mv -f overcloud-full_build.qcow2 overcloud-full.qcow2
index 921ad50..c00cee7 100755 (executable)
@@ -11,7 +11,7 @@ set -e
 source ./cache.sh
 source ./variables.sh
 
-pushd images > /dev/null
+pushd ${BUILD_DIR} > /dev/null
 cp -f overcloud-full.qcow2 overcloud-full-onos_build.qcow2
 
 #######################################
@@ -23,7 +23,7 @@ rm -rf puppet-onos
 populate_cache "$onos_release_uri/$onos_release_file" "$(curl https://downloads.onosproject.org/nightly/ | grep $onos_release_file | grep -o -e '[0-9a-f]\{32\}')"
 populate_cache "$onos_jdk_uri/jdk-8u51-linux-x64.tar.gz"
 
-LIBGUESTFS_BACKEND=direct virt-customize --upload $CACHE_DIR/$onos_release_file:/opt/ \
+LIBGUESTFS_BACKEND=direct virt-customize --upload ${CACHE_DIR}/${onos_release_file}:/opt/ \
                                          --run-command "mkdir /opt/onos && cd /opt/ && tar -xzf $onos_release_file -C /opt/onos --strip-components=1" \
                                          -a overcloud-full-onos_build.qcow2
 
@@ -31,10 +31,10 @@ LIBGUESTFS_BACKEND=direct virt-customize --upload $CACHE_DIR/$onos_release_file:
 git clone https://github.com/bobzhouHW/puppet-onos.git
 tar --xform="s:puppet-onos/:onos/:" -czf puppet-onos.tar.gz puppet-onos
 
-LIBGUESTFS_BACKEND=direct virt-customize --upload $CACHE_DIR/jdk-8u51-linux-x64.tar.gz:/opt/ \
-                                         --upload puppet-onos/files/install_jdk8.tar:/opt/ \
+LIBGUESTFS_BACKEND=direct virt-customize --upload ${CACHE_DIR}/jdk-8u51-linux-x64.tar.gz:/opt/ \
+                                         --upload ${BUILD_DIR}/puppet-onos/files/install_jdk8.tar:/opt/ \
                                          --run-command "cd /opt/ && tar -xf install_jdk8.tar && sh /opt/install_jdk8/install_jdk8.sh" \
-                                         --upload puppet-onos.tar.gz:/etc/puppet/modules/ \
+                                         --upload ${BUILD_DIR}/puppet-onos.tar.gz:/etc/puppet/modules/ \
                                          --run-command "cd /etc/puppet/modules/ && tar xzf puppet-onos.tar.gz" \
                                          -a overcloud-full-onos_build.qcow2
 
index af65910..0b00211 100755 (executable)
@@ -10,7 +10,7 @@
 set -e
 source ./variables.sh
 
-pushd images > /dev/null
+pushd ${BUILD_DIR} > /dev/null
 
 cp -f overcloud-full.qcow2 overcloud-full-opendaylight_build.qcow2
 
@@ -22,11 +22,11 @@ cp -f overcloud-full.qcow2 overcloud-full-opendaylight_build.qcow2
 rm -rf fds
 git clone https://gerrit.opnfv.org/gerrit/fds
 pushd fds > /dev/null
-tar -czvf ../networking-odl.tar.gz networking-odl
+tar -czvf ${BUILD_DIR}/networking-odl.tar.gz networking-odl
 popd > /dev/null
 
 # Beryllium Repo
-cat > /tmp/opendaylight.repo << EOF
+cat > ${BUILD_DIR}/opendaylight.repo << EOF
 [opendaylight-4-release]
 name=CentOS CBS OpenDaylight Beryllium repository
 baseurl=http://cbs.centos.org/repos/nfv7-opendaylight-4-release/\$basearch/os/
@@ -35,7 +35,7 @@ gpgcheck=0
 EOF
 
 # Boron Repo
-cat > /tmp/opendaylight_boron.repo << EOF
+cat > ${BUILD_DIR}/opendaylight_boron.repo << EOF
 [opendaylight-5-release]
 name=CentOS CBS OpenDaylight Boron repository
 baseurl=http://cbs.centos.org/repos/nfv7-opendaylight-5-testing/\$basearch/os/
@@ -43,46 +43,53 @@ enabled=1
 gpgcheck=0
 EOF
 
-# SDNVPN - Copy tunnel setup script
-wget https://raw.githubusercontent.com/openstack/fuel-plugin-opendaylight/brahmaputra-sr2/deployment_scripts/puppet/modules/opendaylight/templates/setup_TEPs.py
+# Master Repo
+cat > ${BUILD_DIR}/opendaylight_master.repo << EOF
+[opendaylight-6-release]
+name=CentOS CBS OpenDaylight Carbon repository
+baseurl=http://cbs.centos.org/repos/nfv7-opendaylight-6-testing/\$basearch/os/
+enabled=1
+gpgcheck=0
+EOF
+
+#BGPVPN Repo
+cat > ${BUILD_DIR}/bgpvpn.repo << EOF
+[bgpvpn]
+name=bgpvpn
+baseurl=https://trunk.rdoproject.org/centos7/consistent/
+includepkgs=python2-networking-bgpvpn
+enabled=1
+gpgcheck=0
+priority=1
+EOF
 
-# Honeycomb RPM
-wget $fdio_uri_base/$honeycomb_pkg
+# OpenDaylight Puppet Module
+rm -rf puppet-opendaylight
+git clone -b master https://github.com/dfarrell07/puppet-opendaylight
+pushd puppet-opendaylight > /dev/null
+git archive --format=tar.gz --prefix=opendaylight/ HEAD > ${BUILD_DIR}/puppet-opendaylight.tar.gz
+popd > /dev/null
 
 # install ODL packages
 # install Jolokia for ODL HA
 # Patch in OPNFV custom puppet-tripleO
 # install Honeycomb
 LIBGUESTFS_BACKEND=direct virt-customize \
-    --upload networking-odl.tar.gz:/root/ \
-    --upload /tmp/opendaylight_boron.repo:/etc/yum.repos.d/opendaylight.repo \
+    --upload ${BUILD_DIR}/networking-odl.tar.gz:/root/ \
+    --upload ${BUILD_DIR}/opendaylight_boron.repo:/etc/yum.repos.d/opendaylight.repo \
     --run-command "yum install --downloadonly --downloaddir=/root/boron/ opendaylight" \
-    --upload /tmp/opendaylight.repo:/etc/yum.repos.d/opendaylight.repo \
+    --upload ${BUILD_DIR}/opendaylight_master.repo:/etc/yum.repos.d/opendaylight.repo \
+    --run-command "yum install --downloadonly --downloaddir=/root/master/ opendaylight" \
+    --upload ${BUILD_DIR}/opendaylight.repo:/etc/yum.repos.d/opendaylight.repo \
     --install opendaylight,python-networking-odl \
-    --install https://github.com/michaeltchapman/networking_rpm/raw/master/openstack-neutron-bgpvpn-2015.2-1.el7.centos.noarch.rpm \
+    --upload ${BUILD_DIR}/bgpvpn.repo:/etc/yum.repos.d/bgpvpn.repo \
+    --install python-networking-bgpvpn \
     --run-command "wget https://github.com/rhuss/jolokia/releases/download/v1.3.3/jolokia-1.3.3-bin.tar.gz -O /tmp/jolokia-1.3.3-bin.tar.gz" \
     --run-command "tar -xvf /tmp/jolokia-1.3.3-bin.tar.gz -C /opt/opendaylight/system/org" \
-    --upload $honeycomb_pkg:/root/ \
-    --run-command "yum -y install /root/$honeycomb_pkg" \
-    --upload ./setup_TEPs.py:/tmp \
+    --install honeycomb \
+    --upload ${BUILD_DIR}/puppet-opendaylight.tar.gz:/etc/puppet/modules/ \
+    --run-command "cd /etc/puppet/modules/ && tar xzf puppet-opendaylight.tar.gz" \
     -a overcloud-full-opendaylight_build.qcow2
 
-    # Move these two lines above the -a overcloud-full-opendaylight_build.qcow2 when the patch has been rebased
-    #--upload ../opnfv-puppet-tripleo.patch:/tmp \
-    #--run-command "cd /etc/puppet/modules/tripleo && patch -Np1 < /tmp/opnfv-puppet-tripleo.patch" \
-
-## WORK AROUND
-## when OpenDaylight lands in upstream RDO manager this can be removed
-
-# upload the opendaylight puppet module
-rm -rf puppet-opendaylight
-git clone -b opnfv_integration https://github.com/dfarrell07/puppet-opendaylight
-pushd puppet-opendaylight > /dev/null
-git archive --format=tar.gz --prefix=opendaylight/ HEAD > ../puppet-opendaylight.tar.gz
-popd > /dev/null
-LIBGUESTFS_BACKEND=direct virt-customize --upload puppet-opendaylight.tar.gz:/etc/puppet/modules/ \
-                                         --run-command "cd /etc/puppet/modules/ && tar xzf puppet-opendaylight.tar.gz" \
-                                         -a overcloud-full-opendaylight_build.qcow2
-
 mv overcloud-full-opendaylight_build.qcow2 overcloud-full-opendaylight.qcow2
 popd > /dev/null
index 2fd4e04..97668c8 100644 (file)
@@ -1,27 +1,27 @@
-%define name networking-vpp
-%define version %(python setup.py --version)
 %define release 1
-%define _topdir %(pwd)/build/rpm
-%define _builddir %(pwd)
-%define _rpmdir %(pwd)/build/rpm
-
-Summary: OpenStack Networking for VPP
-Name: %{name}
-Version: %{version}
-Release: %{release}
-License: Apache 2.0
-Group: Development/Libraries
+
+Summary:   OpenStack Networking for VPP
+Name:      python-networking-vpp
+Version:   0.0.1
+Release:   %{release}%{?git}%{?dist}
+
+License:   Apache 2.0
+Group:     Applications/Internet
+Source0:   python-networking-vpp.tar.gz
+Url:       https://github.com/openstack/networking-vpp/
+
 BuildArch: noarch
-Requires: vpp
-Vendor: OpenStack <openstack-dev@lists.openstack.org>
-Packager: Feng Pan <fpan@redhat.com>
-Url: http://www.openstack.org/
+AutoReq:   no
+Requires:  vpp
+Vendor:    OpenStack <openstack-dev@lists.openstack.org>
+Packager:  Feng Pan <fpan@redhat.com>
 
 %description
 ML2 Mechanism driver and small control plane for OpenVPP forwarder
 
 %prep
-cat << EOF > %{_builddir}/networking-vpp-agent.service
+%setup -q
+cat << EOF > %{_builddir}/neutron-vpp-agent.service
 [Unit]
 Description=Networking VPP ML2 Agent
 
@@ -38,13 +38,13 @@ WantedBy=multi-user.target
 EOF
 
 %install
-/usr/bin/python setup.py install -O1 --root=$RPM_BUILD_ROOT --record=INSTALLED_FILES
+python setup.py install -O1 --root=%{buildroot} --record=INSTALLED_FILES
 mkdir -p %{buildroot}/usr/lib/systemd/system
-install %{_builddir}/networking-vpp-agent.service %{buildroot}/usr/lib/systemd/system
+install %{_builddir}/neutron-vpp-agent.service %{buildroot}/usr/lib/systemd/system
 
 %clean
-rm -rf $RPM_BUILD_ROOT
+rm -rf %{buildroot}
 
 %files -f INSTALLED_FILES
 %defattr(-,root,root)
-%attr(644,root,root) /usr/lib/systemd/system/networking-vpp-agent.service
+%attr(644,root,root) /usr/lib/systemd/system/neutron-vpp-agent.service
index 4a109fb..37023be 100644 (file)
@@ -1,8 +1,8 @@
 %define debug_package %{nil}
 
 Name:          openstack-congress
-Version:       2016.1
-Release:       1%{?dist}
+Version:       2016.2
+Release:       1%{?git}%{?dist}
 Summary:       OpenStack servicevm/device manager
 
 Group:         Applications/Internet
@@ -11,20 +11,15 @@ URL:                https://wiki.openstack.org/wiki/Congress/Installation
 Source0:       openstack-congress.tar.gz
 
 BuildArch:     noarch
-BuildRequires: python-setuptools python2-oslo-config python2-debtcollector libffi-devel python-devel openssl-devel
-#Requires:     pbr>=0.8 Paste PasteDeploy>=1.5.0 Routes>=1.12.3!=2.0 anyjson>=0.3.3 argparse
-#Requires:     Babel>=1.3 eventlet>=0.16.1!=0.17.0 greenlet>=0.3.2 httplib2>=0.7.5 requests>=2.2.0!=2.4.0
-#Requires:     iso8601>=0.1.9 kombu>=2.5.0 netaddr>=0.7.12 SQLAlchemy<1.1.0>=0.9.7
-#Requires:     WebOb>=1.2.3 python-heatclient>=0.3.0 python-keystoneclient>=1.1.0 alembic>=0.7.2 six>=1.9.0
-#Requires:     stevedore>=1.5.0 http oslo.config>=1.11.0 oslo.messaging!=1.17.0!=1.17.1>=1.16.0 oslo.rootwrap>=2.0.0 python-novaclient>=2.22.0
+
+BuildRequires: python-setuptools python2-oslo-config python2-debtcollector libffi-devel python-devel openssl-devel python2-oslo-config python2-debtcollector python34-devel
 
 %description
 OpenStack policy manager
 
 %prep
-#git archive --format=tar.gz --prefix=openstack-congress-%{version}/ HEAD > openstack-congress.tar.gz
-
 %setup -q
+rm requirements.txt
 
 
 %build
@@ -33,7 +28,9 @@ OpenStack policy manager
 
 
 %install
-/usr/bin/python setup.py install --prefix=%{buildroot} --install-lib=%{buildroot}/usr/lib/python2.7/site-packages
+/usr/bin/python setup.py install --root=%{buildroot}
+
+rm -rf %{buildroot}/usr/lib/python2.7/site-packages/congress_tempest_tests
 
 install -d -m 755 %{buildroot}/var/log/congress/
 install -d -m 755 %{buildroot}/etc/congress/snapshot/
@@ -63,17 +60,17 @@ exit 0
 %systemd_postun_with_restart openstack-congress
 
 %files
-
-%config /etc/congress/congress.conf
-/etc/congress/policy.json
+%{python2_sitelib}/congress-*.egg-info
 /etc/congress/api-paste.ini
-/bin/congress-server
-/bin/congress-db-manage
+/etc/congress/congress.conf
+/etc/congress/policy.json
+/usr/bin/congress-db-manage
+/usr/bin/congress-server
 %{_unitdir}/openstack-congress.service
-/usr/lib/python2.7/site-packages/congress/*
-/usr/lib/python2.7/site-packages/congress-*
-/usr/lib/python2.7/site-packages/congress_tempest_tests/*
-/usr/lib/python2.7/site-packages/antlr3runtime/*
+/usr/lib/python2.7/site-packages/congress
+/usr/lib/python2.7/site-packages/congress_dashboard
+/usr/lib/python2.7/site-packages/antlr3runtime
+
 %dir %attr(0750, congress, root) %{_localstatedir}/log/congress
 
 %changelog
index b4c9ada..7ed5cbd 100644 (file)
@@ -1,8 +1,8 @@
 %define debug_package %{nil}
 
 Name:   openstack-tacker
-Version:  2015.2
-Release:  1.trozet
+Version:  2016.2
+Release:  1%{?git}
 Summary:  OpenStack servicevm/device manager
 
 Group:    Applications/Internet
@@ -12,11 +12,6 @@ Source0:  openstack-tacker.tar.gz
 
 BuildArch:  noarch
 BuildRequires:  python-setuptools
-#Requires:  pbr>=0.8 Paste PasteDeploy>=1.5.0 Routes>=1.12.3!=2.0 anyjson>=0.3.3 argparse
-#Requires:  Babel>=1.3 eventlet>=0.16.1!=0.17.0 greenlet>=0.3.2 httplib2>=0.7.5 requests>=2.2.0!=2.4.0
-#Requires:  iso8601>=0.1.9 kombu>=2.5.0 netaddr>=0.7.12 SQLAlchemy<1.1.0>=0.9.7
-#Requires:  WebOb>=1.2.3 python-heatclient>=0.3.0 python-keystoneclient>=1.1.0 alembic>=0.7.2 six>=1.9.0
-#Requires:  stevedore>=1.5.0 http oslo.config>=1.11.0 oslo.messaging!=1.17.0!=1.17.1>=1.16.0 oslo.rootwrap>=2.0.0 python-novaclient>=2.22.0 
 
 %description
 OpenStack servicevm/device manager
@@ -31,11 +26,14 @@ rm requirements.txt
 
 
 %install
-/usr/bin/python setup.py install --prefix=%{buildroot} --install-lib=%{buildroot}/usr/lib/python2.7/site-packages
+/usr/bin/python setup.py install --root=%{buildroot}
 #remove tests
 rm -rf %{buildroot}/usr/lib/python2.7/site-packages/tacker/tests
-
-install -p -D -m 644 apex/systemd/openstack-tacker.service %{buildroot}%{_unitdir}/openstack-tacker.service
+# Move config files from /usr/etc/ to /etc
+mv %{buildroot}/usr/etc %{buildroot}
+#install -p -D -m 644 apex/systemd/openstack-tacker.service %{buildroot}%{_unitdir}/openstack-tacker.service
+# Remove egg-info
+rm -rf %{buildroot}/usr/lib/python2.7/site-packages/*egg-info
 
 install -d -m 755 %{buildroot}%{_localstatedir}/cache/tacker
 install -d -m 755 %{buildroot}%{_sharedstatedir}/tacker
@@ -58,17 +56,24 @@ exit 0
 %systemd_postun_with_restart openstack-tacker
 
 %files
-/bin/tacker-server
-/bin/tacker-db-manage
-/bin/tacker-rootwrap
-/etc/init.d/tacker-server
-%{_unitdir}/openstack-tacker.service
-/etc/rootwrap.d/servicevm.filters
-%config(noreplace) %attr(-, root, tacker) %{_sysconfdir}/tacker/*
+/usr/bin/tacker-server
+/usr/bin/tacker-db-manage
+/usr/bin/tacker-rootwrap
+#%{_unitdir}/openstack-tacker.service
 /usr/lib/python2.7/site-packages/tacker/*
-/usr/lib/python2.7/site-packages/tacker-*
+#%config(noreplace) %attr(-, root, tacker) %{_sysconfdir}/tacker/tacker.conf`
+%{_sysconfdir}/init.d/tacker-server
+%{_sysconfdir}/rootwrap.d/tacker.filters
+%{_sysconfdir}/tacker/api-paste.ini
+%{_sysconfdir}/tacker/policy.json
+%{_sysconfdir}/tacker/rootwrap.conf
 %dir %attr(0750, tacker, root) %{_localstatedir}/cache/tacker
 %dir %attr(0750, tacker, root) %{_sharedstatedir}/tacker
 %dir %attr(0750, tacker, root) %{_localstatedir}/log/tacker
 
 %changelog
+* Wed Nov 30 2016 Dan Radez <dradez@redhat.com> - 2016.2-1
+- Version update for Newton
+
+* Mon Jul 25 2016 Tim Rozet <trozet@redhat.com> - 2015.2-1
+- Initial Commit
index 33e1486..09d54e0 100644 (file)
@@ -46,6 +46,7 @@ install config/deploy/os-nosdn-ovs-ha.yaml %{buildroot}%{_sysconfdir}/opnfv-apex
 install config/deploy/os-odl_l2-nofeature-ha.yaml %{buildroot}%{_sysconfdir}/opnfv-apex/os-odl_l2-nofeature-ha.yaml
 install config/deploy/os-odl_l2-sfc-noha.yaml %{buildroot}%{_sysconfdir}/opnfv-apex/os-odl_l2-sfc-noha.yaml
 install config/deploy/os-odl_l2-bgpvpn-ha.yaml %{buildroot}%{_sysconfdir}/opnfv-apex/os-odl_l2-bgpvpn-ha.yaml
+install config/deploy/os-odl_l2-bgpvpn-noha.yaml %{buildroot}%{_sysconfdir}/opnfv-apex/os-odl_l2-bgpvpn-noha.yaml
 install config/deploy/os-odl_l2-fdio-ha.yaml %{buildroot}%{_sysconfdir}/opnfv-apex/os-odl_l2-fdio-ha.yaml
 install config/deploy/os-odl_l2-fdio-noha.yaml %{buildroot}%{_sysconfdir}/opnfv-apex/os-odl_l2-fdio-noha.yaml
 install config/deploy/os-odl_l3-nofeature-ha.yaml %{buildroot}%{_sysconfdir}/opnfv-apex/os-odl_l3-nofeature-ha.yaml
@@ -70,6 +71,7 @@ mkdir -p %{buildroot}%{python3_sitelib}/apex/
 install lib/python/apex/__init__.py %{buildroot}%{python3_sitelib}/apex/
 install lib/python/apex/deploy_settings.py %{buildroot}%{python3_sitelib}/apex/
 install lib/python/apex/ip_utils.py %{buildroot}%{python3_sitelib}/apex/
+install lib/python/apex/inventory.py %{buildroot}%{python3_sitelib}/apex/
 install lib/python/apex/network_environment.py %{buildroot}%{python3_sitelib}/apex/
 install lib/python/apex/network_settings.py %{buildroot}%{python3_sitelib}/apex/
 install lib/python/apex/clean.py %{buildroot}%{python3_sitelib}/apex/
@@ -118,6 +120,7 @@ install config/inventory/pod_example_settings.yaml %{buildroot}%{_docdir}/opnfv/
 %{_sysconfdir}/opnfv-apex/os-odl_l2-nofeature-ha.yaml
 %{_sysconfdir}/opnfv-apex/os-odl_l2-sfc-noha.yaml
 %{_sysconfdir}/opnfv-apex/os-odl_l2-bgpvpn-ha.yaml
+%{_sysconfdir}/opnfv-apex/os-odl_l2-bgpvpn-noha.yaml
 %{_sysconfdir}/opnfv-apex/os-odl_l2-fdio-noha.yaml
 %{_sysconfdir}/opnfv-apex/os-odl_l2-fdio-ha.yaml
 %{_sysconfdir}/opnfv-apex/os-odl_l3-nofeature-ha.yaml
index ef1c66d..7bdd777 100644 (file)
@@ -21,13 +21,15 @@ https://wiki.opnfv.org/apex
 
 %install
 mkdir -p %{buildroot}%{_var}/opt/opnfv/images/
-install build/images/overcloud-full-onos.qcow2 %{buildroot}%{_var}/opt/opnfv/images/
+install build/overcloud-full-onos.qcow2 %{buildroot}%{_var}/opt/opnfv/images/
 
 %files
 %defattr(644, root, root, -)
 %{_var}/opt/opnfv/images/overcloud-full-onos.qcow2
 
 %changelog
+* Wed Dec 7 2016 Tim Rozet <trozet@redhat.com> - 4.0-2
+- Make install path consistent
 * Wed Nov 2 2016 Dan Radez <dradez@redhat.com> - 4.0-1
 - Version update for Danube
 * Mon Apr 04 2016 Dan Radez <dradez@redhat.com> - 3.0-0
diff --git a/build/rpm_specs/opnfv-apex-release.spec b/build/rpm_specs/opnfv-apex-release.spec
new file mode 100644 (file)
index 0000000..7dd43d9
--- /dev/null
@@ -0,0 +1,30 @@
+Name:          opnfv-apex-release
+Version:       danube
+Release:       %{release}
+Summary:       RPM Release file
+
+Group:         System Environment
+License:       Apache 2.0
+URL:           https://gerrit.opnfv.org/gerrit/apex.git
+Source0:       opnfv-apex-release.tar.gz
+
+BuildArch:     noarch
+Requires:      rdo-release = newton epel-release libvirt-python
+
+%description
+RPM Release file that provides a yum repo file to install OPNFV Apex
+
+%prep
+%setup -q
+
+%install
+mkdir -p %{buildroot}%{_sysconfdir}/yum.repos.d/
+install config/yum.repos.d/opnfv-apex.repo %{buildroot}%{_sysconfdir}/yum.repos.d/
+
+%files
+%defattr(644, root, root, -)
+%{_sysconfdir}/yum.repos.d/opnfv-apex.repo
+
+%changelog
+* Wed Nov 23 2016 Dan Radez <dradez@redhat.com> - 3.0-1
+- Initial Packaging
index 1844d8c..a74733c 100644 (file)
@@ -21,13 +21,15 @@ https://wiki.opnfv.org/apex
 
 %install
 mkdir -p %{buildroot}%{_var}/opt/opnfv/images/
-install build/images/overcloud-full-opendaylight.qcow2 %{buildroot}%{_var}/opt/opnfv/images/
+install build/overcloud-full-opendaylight.qcow2 %{buildroot}%{_var}/opt/opnfv/images/
 
 %files
 %defattr(644, root, root, -)
 %{_var}/opt/opnfv/images/overcloud-full-opendaylight.qcow2
 
 %changelog
+* Wed Dec 7 2016 Tim Rozet <trozet@redhat.com> - 4.0-2
+- Make install path consistent
 * Wed Nov 2 2016 Dan Radez <dradez@redhat.com> - 4.0-1
 - Version update for Danube
 * Mon Apr 04 2016 Dan Radez <dradez@redhat.com> - 3.0-0
index 72cbf62..5758f7c 100644 (file)
@@ -1,8 +1,8 @@
 %define debug_package %{nil}
 
 Name:          python-tackerclient
-Version:       2015.2
-Release:       1.trozet
+Version:       2016.2
+Release:       1%{?git}
 Summary:       CLI and Client Library for OpenStack Networking
 
 Group:         Applications/Internet
@@ -37,4 +37,8 @@ rm requirements.txt
 /usr/lib/python2.7/site-packages/python_tackerclient-*
 
 %changelog
+* Wed Nov 30 2016 Dan Radez <dradez@redhat.com> - 2016.2-1
+- Version update for Newton
 
+* Mon Jul 25 2016 Tim Rozet <trozet@redhat.com> - 2015.2-1
+- Initial Commit
index eaa3b50..adf08ea 100755 (executable)
@@ -7,22 +7,27 @@
 # which accompanies this distribution, and is available at
 # http://www.apache.org/licenses/LICENSE-2.0
 ##############################################################################
-set -e
+set -xe
 source ./cache.sh
 source ./variables.sh
 source ./functions.sh
 
 populate_cache "$rdo_images_uri/undercloud.qcow2"
-if [ ! -d images ]; then mkdir images/; fi
-cp -f cache/undercloud.qcow2 images/undercloud_build.qcow2
+if [ ! -d "$BUILD_DIR" ]; then mkdir ${BUILD_DIR}; fi
+cp -f ${CACHE_DIR}/undercloud.qcow2 ${BUILD_DIR}/undercloud_build.qcow2
+
+pushd ${BUILD_DIR} > /dev/null
 
 # prep opnfv-tht for undercloud
 clone_fork opnfv-tht
 pushd opnfv-tht > /dev/null
-git archive --format=tar.gz --prefix=openstack-tripleo-heat-templates/ HEAD > ../opnfv-tht.tar.gz
+git archive --format=tar.gz --prefix=openstack-tripleo-heat-templates/ HEAD > ${BUILD_DIR}/opnfv-tht.tar.gz
 popd > /dev/null
 
-pushd images > /dev/null
+# Add custom IPA to allow kernel params
+curl -fO https://raw.githubusercontent.com/trozet/ironic-python-agent/opnfv_kernel/ironic_python_agent/extensions/image.py
+python3 -c 'import py_compile; py_compile.compile("image.py", cfile="image.pyc")'
+
 # installing forked opnfv-tht
 # enabling ceph OSDs to live on the controller
 # OpenWSMan package update supports the AMT Ironic driver for the TealBox
@@ -31,18 +36,23 @@ pushd images > /dev/null
 # add congress password to python-tripleoclient
 # add tacker password to python-tripleoclient
 # upload tacker repo and install the client package
+# Add performance image scripts
+# hack for disabling undercloud package update
 LIBGUESTFS_BACKEND=direct virt-customize \
-    --upload ../opnfv-tht.tar.gz:/usr/share \
+    --run-command "sed -i 's/^#UseDNS.*$/UseDNS no/' /etc/ssh/sshd_config" \
+    --run-command "sed -i 's/^GSSAPIAuthentication.*$/GSSAPIAuthentication no/' /etc/ssh/sshd_config" \
+    --upload ${BUILD_DIR}/opnfv-tht.tar.gz:/usr/share \
     --install "openstack-utils" \
     --install "ceph-common" \
     --run-command "cd /usr/share && rm -rf openstack-tripleo-heat-templates && tar xzf opnfv-tht.tar.gz" \
     --run-command "sed -i '/ControllerEnableCephStorage/c\\  ControllerEnableCephStorage: true' /usr/share/openstack-tripleo-heat-templates/environments/storage-environment.yaml" \
     --run-command "sed -i '/ComputeEnableCephStorage/c\\  ComputeEnableCephStorage: true' /usr/share/openstack-tripleo-heat-templates/environments/storage-environment.yaml" \
-    --run-command "curl http://download.opensuse.org/repositories/Openwsman/CentOS_CentOS-7/Openwsman.repo > /etc/yum.repos.d/wsman.repo" \
+    --run-command "curl -f http://download.opensuse.org/repositories/Openwsman/CentOS_CentOS-7/Openwsman.repo > /etc/yum.repos.d/wsman.repo" \
     --run-command "yum update -y openwsman*" \
     --run-command "cp /usr/share/instack-undercloud/undercloud.conf.sample /home/stack/undercloud.conf && chown stack:stack /home/stack/undercloud.conf" \
-    --upload ../opnfv-environment.yaml:/home/stack/ \
-    --upload ../virtual-environment.yaml:/home/stack/ \
+    --upload ${BUILD_ROOT}/opnfv-environment.yaml:/home/stack/ \
+    --upload ${BUILD_ROOT}/csit-environment.yaml:/home/stack/ \
+    --upload ${BUILD_ROOT}/virtual-environment.yaml:/home/stack/ \
     --install "python2-congressclient" \
     --run-command "sed -i '/SERVICE_LIST/a\\    \x27congress\x27: {\x27password_field\x27: \x27OVERCLOUD_CONGRESS_PASSWORD\x27},' /usr/lib/python2.7/site-packages/tripleoclient/constants.py" \
     --run-command "sed -i '/PASSWORD_NAMES =/a\\    \"OVERCLOUD_CONGRESS_PASSWORD\",' /usr/lib/python2.7/site-packages/tripleoclient/utils.py" \
@@ -52,24 +62,18 @@ LIBGUESTFS_BACKEND=direct virt-customize \
     --run-command "sed -i '/PASSWORD_NAMES =/a\\    \"OVERCLOUD_TACKER_PASSWORD\",' /usr/lib/python2.7/site-packages/tripleoclient/utils.py" \
     --run-command "sed -i '/AodhPassword/a\\        parameters\[\x27TackerPassword\x27\] = passwords\[\x27OVERCLOUD_TACKER_PASSWORD\x27\]' /usr/lib/python2.7/site-packages/tripleoclient/v1/overcloud_deploy.py" \
     --run-command "sed -i '/^SERVICES/a\    \x27tacker\x27: {\x27description\x27: \x27Tacker Service\x27, \x27type\x27: \x27servicevm\x27, \x27path\x27: \x27/\x27, \x27port\x27: 8888 },' /usr/lib/python2.7/site-packages/os_cloud_config/keystone.py" \
-    --upload ../noarch/python-tackerclient-2015.2-1.trozet.noarch.rpm:/root/ \
-    --install /root/python-tackerclient-2015.2-1.trozet.noarch.rpm \
+    --upload ${BUILD_DIR}/noarch/$tackerclient_pkg:/root/ \
+    --install /root/$tackerclient_pkg \
     --install "python2-aodhclient" \
     --install "openstack-heat-engine" \
     --install "openstack-heat-api-cfn" \
     --install "openstack-heat-api" \
+    --upload ${BUILD_ROOT}/build_perf_image.sh:/home/stack \
+    --upload ${BUILD_ROOT}/set_perf_images.sh:/home/stack \
+    --upload ${BUILD_DIR}/image.py:/root \
+    --upload ${BUILD_DIR}/image.pyc:/root \
+    --run-command "sed -i '/pkg_upgrade_cmd =/c\\    \$pkg_upgrade_cmd =echo' /usr/share/instack-undercloud/puppet-stack-config/puppet-stack-config.pp" \
     -a undercloud_build.qcow2
 
-# Add custom IPA to allow kernel params
-wget https://raw.githubusercontent.com/trozet/ironic-python-agent/opnfv_kernel/ironic_python_agent/extensions/image.py
-python3 -c 'import py_compile; py_compile.compile("image.py", cfile="image.pyc")'
-
-# Add performance image scripts
-LIBGUESTFS_BACKEND=direct virt-customize --upload ../build_perf_image.sh:/home/stack \
-                                         --upload ../set_perf_images.sh:/home/stack \
-                                         --upload image.py:/root \
-                                         --upload image.pyc:/root \
-                                         -a undercloud_build.qcow2
-
 mv -f undercloud_build.qcow2 undercloud.qcow2
 popd > /dev/null
index c4ac16c..a40eb23 100644 (file)
@@ -8,14 +8,17 @@
 # http://www.apache.org/licenses/LICENSE-2.0
 ##############################################################################
 
+BUILD_ROOT=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
+BUILD_DIR="$(dirname ${BUILD_ROOT})/.build"
+CACHE_DIR="$(dirname ${BUILD_ROOT})/.cache"
+CACHE_HISTORY=".cache_history"
+
 rdo_images_uri=http://buildlogs.centos.org/centos/7/cloud/x86_64/tripleo_images/newton/delorean
 onos_release_uri=https://downloads.onosproject.org/nightly/
 onos_release_file=onos-1.6.0-rc2.tar.gz
 onos_jdk_uri=http://artifacts.opnfv.org/apex/colorado
 onos_ovs_uri=http://artifacts.opnfv.org/apex/colorado
 onos_ovs_pkg=package_ovs_rpm3.tar.gz
-openstack_congress_pkg="openstack-congress-2016.1-1$(rpm -E '%dist').noarch.rpm"
-doctor_driver=https://raw.githubusercontent.com/openstack/congress/master/congress/datasources/doctor_driver.py
 if [ -z ${GS_PATHNAME+x} ]; then
     GS_PATHNAME=/colorado
 fi
@@ -28,11 +31,22 @@ dpdk_rpms=(
 'ovs4opnfv-e8acab14-openvswitch-2.5.90-0.12032.gitc61e93d6.1.el7.centos.x86_64.rpm'
 )
 
-fdio_uri_base=http://artifacts.opnfv.org/apex/colorado
-fdio_pkgs=(
-'vpp-16.09-release.x86_64.rpm'
-'vpp-devel-16.09-release.x86_64.rpm'
-'vpp-lib-16.09-release.x86_64.rpm'
-'vpp-python-api-16.09-release.x86_64.rpm'
-)
-honeycomb_pkg='honeycomb-1.16.9-FINAL.noarch.rpm'
+tacker_repo="http://github.com/openstack/tacker"
+tacker_branch="stable/newton"
+tacker_commit=$(git ls-remote ${tacker_repo} ${tacker_branch} | awk '{print substr($1,1,7)}')
+tacker_pkg=openstack-tacker-2016.2-1.git${tacker_commit}.noarch.rpm
+
+tackerclient_repo="http://github.com/openstack/python-tackerclient"
+tackerclient_branch="stable/newton"
+tackerclient_commit=$(git ls-remote ${tackerclient_repo} ${tackerclient_branch} | awk '{print substr($1,1,7)}')
+tackerclient_pkg=python-tackerclient-2016.2-1.git${tackerclient_commit}.noarch.rpm
+
+congress_repo="http://github.com/openstack/congress"
+congress_branch="stable/newton"
+congress_commit=$(git ls-remote ${congress_repo} ${congress_branch} | awk '{print substr($1,1,7)}')
+congress_pkg=openstack-congress-2016.2-1.git${congress_commit}$(rpm -E %dist).noarch.rpm
+
+netvpp_repo="https://github.com/openstack/networking-vpp"
+netvpp_branch="master"
+netvpp_commit=$(git ls-remote ${netvpp_repo} ${netvpp_branch} | awk '{print substr($1,1,7)}')
+netvpp_pkg=python-networking-vpp-0.0.1-1.git${NETVPP_COMMIT}$(rpm -E %dist).noarch.rpm
index f1333ce..3531a65 100755 (executable)
@@ -8,17 +8,17 @@
 # http://www.apache.org/licenses/LICENSE-2.0
 ##############################################################################
 
-set -e
+set -xe
 
 display_usage ()
 {
 cat << EOF
 $0 Builds the Apex OPNFV Deployment Toolchain
 
-usage: $0 [ -c cache_dir ] -r release_name [ --iso | --rpms ]
+usage: $0 [ -c cache_dest_dir ] -r release_name [ --iso | --rpms ]
 
 OPTIONS:
-  -c cache destination - directory of cached files, defaults to ./cache
+  -c cache destination - destination to save tarball of cache
   -r release name/version of the build result
   --iso build the iso (implies RPMs too)
   --rpms build the rpms
@@ -30,12 +30,13 @@ build -c file:///tmp/cache -r dev123
 EOF
 }
 
-BUILD_BASE=$(readlink -e ../build/)
+APEX_ROOT=$(dirname $(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd))
 CACHE_DEST=""
-CACHE_DIR="cache"
+CACHE_DIR="${APEX_ROOT}/.cache"
 CACHE_NAME="apex-cache"
 MAKE_TARGETS="images"
 REQUIRED_PKGS="rpm-build python-docutils"
+RELEASE_RPM=""
 
 parse_cmdline() {
   while [ "${1:0:1}" = "-" ]
@@ -45,7 +46,7 @@ parse_cmdline() {
                 display_usage
                 exit 0
             ;;
-        -c|--cache-dir)
+        -c|--cache-dest)
                 CACHE_DEST=${2}
                 shift 2
             ;;
@@ -63,11 +64,21 @@ parse_cmdline() {
                 echo "Buiding opnfv-apex RPMs"
                 shift 1
             ;;
+        --release-rpm )
+                RELEASE_RPM=" release-rpm"
+                echo "Buiding opnfv-apex RPMs"
+                shift 1
+            ;;
         --debug )
                 debug="TRUE"
                 echo "Enable debug output"
                 shift 1
             ;;
+        --build-cache )
+                MAKE_TARGETS=""
+                echo "Building Cache"
+                shift 1
+            ;;
         *)
                 display_usage
                 exit 1
@@ -78,11 +89,26 @@ parse_cmdline() {
 }
 
 run_make() {
-  make $MAKE_ARGS -C ${BUILD_BASE} $1
+  make $MAKE_ARGS -C ${BUILD_DIRECTORY} $1
 }
 
 parse_cmdline "$@"
 
+if [ -z "$BUILD_DIRECTORY" ]; then
+  if [ -d "${APEX_ROOT}/build" ]; then
+    BUILD_DIRECTORY="${APEX_ROOT}/build"
+  else
+    echo "Cannot find build directory, please provide BUILD_DIRECTORY environment variable...exiting"
+    exit 1
+  fi
+elif [ ! -d "$BUILD_DIRECTORY" ]; then
+  echo "Provided build directory is invalid: ${BUILD_DIRECTORY} ...exiting"
+  exit 1
+fi
+
+# Add release rpm to make targets if defined
+MAKE_TARGETS+=$RELEASE_RPM
+
 # Install build dependencies
 for pkg in $REQUIRED_PKGS; do
   if ! rpm -q $pkg > /dev/null; then
@@ -95,21 +121,29 @@ done
 
 if [ -n "$RELEASE" ]; then MAKE_ARGS+="RELEASE=$RELEASE "; fi
 
-# Get the Old Cache
-if [ -n "$CACHE_DEST" ]; then
+# Get the Old Cache and build new cache history file
+if [[ -n "$CACHE_DEST" && -n "$MAKE_TARGETS" ]]; then
     echo "Retrieving Cache"
     if [ -f $CACHE_DEST/${CACHE_NAME}.tgz ]; then
         echo "Cache found at ${CACHE_DEST}/${CACHE_NAME}.tgz"
-        rm -rf $BUILD_BASE/$CACHE_DIR
-        echo "Unpacking Cache to $BUILD_BASE"
-        tar -xvzf $CACHE_DEST/${CACHE_NAME}.tgz -C ${BUILD_BASE}
+        rm -rf $CACHE_DIR
+        mkdir $CACHE_DIR
+        echo "Unpacking Cache to ${CACHE_DIR}"
+        tar -xvzf ${CACHE_DEST}/${CACHE_NAME}.tgz -C ${CACHE_DIR}
         echo "Cache contents after unpack:"
-        ls -l $BUILD_BASE/$CACHE_DIR
-    elif [ ! -d $BUILD_BASE/$CACHE_DIR ]; then
-        mkdir $BUILD_BASE/$CACHE_DIR
+        ls -al ${CACHE_DIR}
+    else
+        echo "No Cache Found"
     fi
 fi
 
+# Ensure the build cache dir exists
+if [ ! -d "$CACHE_DIR" ]; then
+    rm -rf ${CACHE_DIR}
+    echo "Creating Build Cache Directory"
+    mkdir ${CACHE_DIR}
+fi
+
 # Conditionally execute RPM build checks if the specs change and target is not rpm or iso
 if [[ "$MAKE_TARGETS" == "images" ]]; then
     commit_file_list=$(git show --pretty="format:" --name-status)
@@ -123,6 +157,9 @@ if [[ "$MAKE_TARGETS" == "images" ]]; then
         if [[ $commit_file_list == *build/rpm_specs/opnfv-apex-undercloud.spec* ]]; then
             MAKE_TARGETS+=" undercloud-rpm-check"
         fi
+        if [[ $commit_file_list == *build/rpm_specs/opnfv-apex-release.spec* ]]; then
+            MAKE_TARGETS+=" release-rpm-check"
+        fi
         if [[ $commit_file_list == *build/rpm_specs/opnfv-apex-common.spec* ]]; then
             MAKE_TARGETS+=" common-rpm-check"
         fi
@@ -154,8 +191,11 @@ echo "Build Complete"
 # Build new Cache
 if [ -n "$CACHE_DEST" ]; then
     echo "Building Cache"
-    if [ ! -d $CACHE_DEST ]; then mkdir -p $CACHE_DEST; fi
-    tar --atime-preserve --dereference -C $BUILD_BASE -caf $BUILD_BASE/${CACHE_NAME}.tgz $CACHE_DIR -C ${CACHE_DEST}/
+    ls -lah ${CACHE_DIR}
+    # ensure the destination exists
+    mkdir -p ${CACHE_DEST}
+    # roll the cache tarball
+    tar --atime-preserve --dereference -caf ${CACHE_DEST}/${CACHE_NAME}.tgz -C ${CACHE_DIR} .
     if [ -f "${CACHE_DEST}/${CACHE_NAME}.tgz" ]; then
       echo "Cache Build Complete"
     else
index 1e5e320..262e74b 100755 (executable)
 #author: Dan Radez (dradez@redhat.com)
 #author: Tim Rozet (trozet@redhat.com)
 
+# Backwards compat for old ENV Vars
+# Remove in E Release
+if [ -n "$CONFIG" ]; then
+    echo -e "${red}WARNING: ENV var CONFIG is Deprecated, please unset CONFIG and export BASE in its place${reset}"
+    echo -e "${red}WARNING: CONFIG will be removed in E${reset}"
+    BASE=$CONFIG
+fi
+if [ -n "$RESOURCES" ]; then
+    echo -e "${red}WARNING: ENV var RESOURCES is Deprecated, please unset RESOURCES and export IMAGES in its place${reset}"
+    echo -e "${red}WARNING: RESOURCES will be removed in E${reset}"
+    IMAGES=$RESOURCES
+fi
+
 # Use default if no param passed
-CONFIG=${CONFIG:-'/var/opt/opnfv'}
-RESOURCES=${RESOURCES:-"$CONFIG/images"}
-LIB=${LIB:-"$CONFIG/lib"}
+BASE=${BASE:-'/var/opt/opnfv'}
+IMAGES=${IMAGES:-"$BASE/images"}
+LIB=${LIB:-"$BASE/lib"}
 reset=$(tput sgr0 || echo "")
 blue=$(tput setaf 4 || echo "")
 red=$(tput setaf 1 || echo "")
index 482e134..b55f47e 100755 (executable)
@@ -24,6 +24,7 @@ green=$(tput setaf 2 || echo "")
 
 interactive="FALSE"
 ping_site="8.8.8.8"
+dnslookup_site="www.google.com"
 post_config="TRUE"
 debug="FALSE"
 
@@ -36,13 +37,27 @@ declare -A deploy_options_array
 declare -a performance_options
 declare -A NET_MAP
 
+# Backwards compat for old ENV Vars
+# Remove in E Release
+if [ -n "$CONFIG" ]; then
+    echo -e "${red}WARNING: ENV var CONFIG is Deprecated, please unset CONFIG and export BASE in its place${reset}"
+    echo -e "${red}WARNING: CONFIG will be removed in E${reset}"
+    BASE=$CONFIG
+fi
+if [ -n "$RESOURCES" ]; then
+    echo -e "${red}WARNING: ENV var RESOURCES is Deprecated, please unset RESOURCES and export IMAGES in its place${reset}"
+    echo -e "${red}WARNING: RESOURCES will be removed in E${reset}"
+    IMAGES=$RESOURCES
+fi
+
 APEX_TMP_DIR=$(python3 -c "import tempfile; print(tempfile.mkdtemp())")
 SSH_OPTIONS=(-o StrictHostKeyChecking=no -o GlobalKnownHostsFile=/dev/null -o UserKnownHostsFile=/dev/null -o LogLevel=error)
 DEPLOY_OPTIONS=""
-CONFIG=${CONFIG:-'/var/opt/opnfv'}
-RESOURCES=${RESOURCES:-"$CONFIG/images"}
-LIB=${LIB:-"$CONFIG/lib"}
+BASE=${BASE:-'/var/opt/opnfv'}
+IMAGES=${IMAGES:-"$BASE/images"}
+LIB=${LIB:-"$BASE/lib"}
 OPNFV_NETWORK_TYPES="admin tenant external storage api"
+ENV_FILE="opnfv-environment.yaml"
 
 VM_CPUS=4
 VM_RAM=8
@@ -82,6 +97,7 @@ display_usage() {
   echo -e "   --inventory | -i : Full path to inventory yaml file. Required only for baremetal"
   echo -e "   --net-settings | -n : Full path to network settings file. Optional."
   echo -e "   --ping-site | -p : site to use to verify IP connectivity. Optional. Defaults to 8.8.8.8"
+  echo -e "   --dnslookup-site : site to use to verify DNS resolution. Optional. Defaults to www.google.com"
   echo -e "   --virtual | -v : Virtualize overcloud nodes instead of using baremetal."
   echo -e "   --no-post-config : disable Post Install configuration."
   echo -e "   --debug : enable debug output."
@@ -118,11 +134,21 @@ parse_cmdline() {
                 echo "Network Settings Configuration file: $2"
                 shift 2
             ;;
+        -e|--environment-file)
+                ENV_FILE=$2
+                echo "Base OOO Environment file: $2"
+                shift 2
+            ;;
         -p|--ping-site)
                 ping_site=$2
                 echo "Using $2 as the ping site"
                 shift 2
             ;;
+        --dnslookup-site)
+                dnslookup_site=$2
+                echo "Using $2 as the dnslookup site"
+                shift 2
+            ;;
         -v|--virtual)
                 virtual="TRUE"
                 echo "Executing a Virtual Deployment"
index a15eb8b..2a6266c 100755 (executable)
@@ -32,7 +32,7 @@ if ! sudo yum update -y ipxe-roms-qemu; then
 fi
 
 # check for other packages
-for i in epel-release python34-PyYAML openvswitch openstack-tripleo libguestfs libguestfs-tools-c libvirt-python; do
+for i in epel-release python34-PyYAML openvswitch openstack-tripleo libguestfs libguestfs-tools-c libvirt-python python2-oslo-config python2-debtcollector python34-devel libxslt-devel libxml2-devel; do
 # Make sure deploy deps are installed
     if ! rpm -q $i > /dev/null; then
         if ! sudo yum install -y $i; then
@@ -64,11 +64,18 @@ virt_pkgs=(
 'perl-Sys-Guestfs-1.32.7-3.el7.x86_64.rpm'
 'python-libguestfs-1.32.7-3.el7.x86_64.rpm'
 )
-
+dir=/tmp/packages.$RANDOM
+mkdir -p $dir
+pushd $dir
+all_packages=""
 for pkg in ${virt_pkgs[@]}; do
-    if ! rpm -q ${pkg%-*-*}; then
-        if ! sudo yum -y install $virt_uri_base/$pkg; then
-            echo "ERROR: Failed to update $pkg"
-        fi
+    if ! wget $virt_uri_base/$pkg; then
+        echo "ERROR: Failed to download $pkg"
     fi
+    all_packages="$all_packages $pkg"
 done
+if [[ $all_packages != "" ]];then
+    yum install -y $all_packages
+fi
+rm -rf $dir
+popd
index 8985ed8..7cbd390 100755 (executable)
@@ -4,5 +4,12 @@ source ../lib/utility-functions.sh
 
 export ANSIBLE_HOST_KEY_CHECKING=False
 
+./dev_dep_check.sh
+
+yum install python-devel -y
+yum install openssl-devel -y
+easy_install pip
+pip install ansible
+
 echo 'See ~stack/smoke-tests.out on the undercloud for result log'
 ansible-playbook -i "$(get_undercloud_ip)," ../tests/smoke_tests/smoke_tests.yml
index 480858d..34821a7 100755 (executable)
@@ -2,9 +2,22 @@
 # Utility script used to interact with a deployment
 # @author Tim Rozet (trozet@redhat.com)
 
-CONFIG=${CONFIG:-'/var/opt/opnfv'}
-RESOURCES=${RESOURCES:-"$CONFIG/images"}
-LIB=${LIB:-"$CONFIG/lib"}
+# Backwards compat for old ENV Vars
+# Remove in E Release
+if [ -n "$CONFIG" ]; then
+    echo -e "${red}WARNING: ENV var CONFIG is Deprecated, please unset CONFIG and export BASE in its place${reset}"
+    echo -e "${red}WARNING: CONFIG will be removed in E${reset}"
+    BASE=$CONFIG
+fi
+if [ -n "$RESOURCES" ]; then
+    echo -e "${red}WARNING: ENV var RESOURCES is Deprecated, please unset RESOURCES and export IMAGES in its place${reset}"
+    echo -e "${red}WARNING: RESOURCES will be removed in E${reset}"
+    IMAGES=$RESOURCES
+fi
+
+BASE=${BASE:-'/var/opt/opnfv'}
+IMAGES=${IMAGES:-"$BASE/images"}
+LIB=${LIB:-"$BASE/lib"}
 VALID_CMDS="undercloud overcloud opendaylight debug-stack mock-detached -h --help"
 
 source $LIB/utility-functions.sh
index e7821f1..ea35ae7 100644 (file)
@@ -14,9 +14,9 @@ deploy_options:
   sdn_controller: opendaylight
 
   # Which version of ODL to use. This is only valid if 'opendaylight' was used
-  # above. If 'Boron' is specified, ODL Boron will be used. If no value is specified,
-  # Lithium will be used.
-  #odl_version: Boron
+  # above. Valid options are 'beryllium', 'boron' and 'carbon'. If no value
+  # is specified, Beryllium will be used.
+  #odl_version: boron
 
   # Whether to configure ODL L3 support. This will disable the Neutron L3 Agent and
   # use ODL instead.
@@ -48,6 +48,13 @@ deploy_options:
   # Whether to run vsperf after the install has completed
   #vsperf: false
 
+  # Specify a device for ceph to use for the OSDs. By default a virtual disk
+  # is created for the OSDs. This setting allows you to specify a different
+  # target for the OSDs. The setting must be valid on all overcloud nodes.
+  # The controllers and the compute nodes all have OSDs setup on them and
+  # therefore this device name must be valid on all overcloud nodes.
+  #ceph_device: /dev/sdb
+
   # Set performance options on specific roles. The valid roles are 'Compute', 'Controller'
   # and 'Storage', and the valid sections are 'kernel' and 'nova'
   #performance:
similarity index 77%
rename from config/deploy/os-odl_l2-sdnvpn-ha.yaml
rename to config/deploy/os-odl_l2-bgpvpn-noha.yaml
index f6904f0..011c2b3 100644 (file)
@@ -1,10 +1,10 @@
 global_params:
-  ha_enabled: true
+  ha_enabled: false
 
 deploy_options:
   sdn_controller: opendaylight
   sdn_l3: false
-  tacker: true
+  tacker: false
   congress: true
   sfc: false
   vpn: true
index 8283657..d4a86b0 100644 (file)
@@ -4,7 +4,7 @@ global_params:
 deploy_options:
   sdn_controller: opendaylight
   sdn_l3: false
-  odl_version: boron
+  odl_version: carbon
   tacker: true
   congress: true
   sfc: false
index 53d6181..b9e0cea 100644 (file)
@@ -4,7 +4,7 @@ global_params:
 deploy_options:
   sdn_controller: opendaylight
   sdn_l3: false
-  odl_version: boron
+  odl_version: carbon
   tacker: true
   congress: true
   sfc: false
diff --git a/config/deploy/os-odl_l3-csit-noha.yaml b/config/deploy/os-odl_l3-csit-noha.yaml
new file mode 100644 (file)
index 0000000..ad61e26
--- /dev/null
@@ -0,0 +1,12 @@
+global_params:
+  ha_enabled: false
+
+deploy_options:
+  sdn_controller: opendaylight
+  odl_version: boron
+  sdn_l3: true
+  tacker: false
+  congress: false
+  sfc: false
+  vpn: false
+  ceph: false
index 3e34abe..c08b30c 100644 (file)
@@ -8,6 +8,7 @@ nodes:
     cpus: 2
     memory: 8192
     disk: 40
+    disk_device: sdb
     arch: "x86_64"
     capabilities: "profile:control"
   node2:
@@ -19,6 +20,7 @@ nodes:
     cpus: 2
     memory: 8192
     disk: 40
+    disk_device: sdb
     arch: "x86_64"
     capabilities: "profile:control"
   node3:
@@ -30,6 +32,7 @@ nodes:
     cpus: 2
     memory: 8192
     disk: 40
+    disk_device: sdb
     arch: "x86_64"
     capabilities: "profile:control"
   node4:
@@ -41,6 +44,7 @@ nodes:
     cpus: 2
     memory: 8192
     disk: 40
+    disk_device: sdb
     arch: "x86_64"
     capabilities: "profile:compute"
   node5:
@@ -52,5 +56,6 @@ nodes:
     cpus: 2
     memory: 8192
     disk: 40
+    disk_device: sdb
     arch: "x86_64"
     capabilities: "profile:compute"
index 6cf9ae8..1cba64b 100644 (file)
@@ -66,7 +66,7 @@ networks:                            # Network configurations
         - em1                        # Member Interface to bridge to for installer VM (use multiple values for bond)
       vlan: native                   # VLAN tag to use for this network on Installer VM, native means none
       ip: 192.0.2.1                  # IP to assign to Installer VM on this network
-    usable_ip_range:
+    overcloud_ip_range:
       - 192.0.2.11
       - 192.0.2.99                   # Usable ip range for the overcloud node IPs (including VIPs) and last IP will be
                                      # used for host bridge (i.e. br-admin). If empty entire range is usable.
@@ -121,7 +121,7 @@ networks:                            # Network configurations
       floating_ip_range:
         - 192.168.37.200
         - 192.168.37.220             # Range to allocate to floating IPs for the public network with Neutron
-      usable_ip_range:
+      overcloud_ip_range:
         - 192.168.37.10
         - 192.168.37.199             # Usable ip range for the overcloud node IPs (including VIPs) and last IP will be used for host
                                      # bridge (i.e. br-public). If empty entire range is usable.  Cannot overlap with dhcp_range or introspection_range.
@@ -154,7 +154,7 @@ networks:                            # Network configurations
       floating_ip_range:
         - 192.168.38.200
         - 192.168.38.220             # Range to allocate to floating IPs for the public network with Neutron
-      usable_ip_range:
+      overcloud_ip_range:
         - 192.168.38.10
         - 192.168.38.199             # Usable IP range for overcloud nodes (including VIPs), usually this is a shared subnet.
                                      # Cannot overlap with dhcp_range or introspection_range.
@@ -214,4 +214,6 @@ apex:
     admin:
       introspection_range:
         - 192.0.2.100
-        - 192.0.2.120                # Range used for introspection phase (examining nodes).  This cannot overlap with dhcp_range or usable_ip_range.
+        - 192.0.2.120                # Range used for introspection phase (examining nodes).  This cannot overlap with dhcp_range or overcloud_ip_range.
+                                     # If the external network 'public' is disabled, then this range will be re-used to configure the floating ip range
+                                     # for the overcloud default external network
index b12614f..54c4113 100644 (file)
@@ -66,7 +66,7 @@ networks:                            # Network configurations
         - em1                        # Member Interface to bridge to for installer VM (use multiple values for bond)
       vlan: native                   # VLAN tag to use for this network on Installer VM, native means none
       ip: 192.0.2.1                  # IP to assign to Installer VM on this network
-    usable_ip_range:
+    overcloud_ip_range:
       - 192.0.2.11
       - 192.0.2.99                   # Usable ip range for the overcloud node IPs (including VIPs) and last IP will be
                                      # used for host bridge (i.e. br-admin).
@@ -76,7 +76,7 @@ networks:                            # Network configurations
     dhcp_range:
       - 192.0.2.2
       - 192.0.2.10                   # DHCP range for the admin network, if empty it will be automatically provisioned.
-                                     # Cannot overlap with usable_ip_range or introspection_range.
+                                     # Cannot overlap with overcloud_ip_range or introspection_range.
     nic_mapping:                     # Mapping of network configuration for Overcloud Nodes
       compute:                       # Mapping for compute profile (nodes that will be used as Compute nodes)
         phys_type: interface         # Physical interface type (interface or bond)
@@ -177,4 +177,6 @@ apex:
     admin:
       introspection_range:
         - 192.0.2.100
-        - 192.0.2.120                # Range used for introspection phase (examining nodes).  This cannot overlap with dhcp_range or usable_ip_range.
+        - 192.0.2.120                # Range used for introspection phase (examining nodes).  This cannot overlap with dhcp_range or overcloud_ip_range.
+                                     # If the external network 'public' is disabled, then this range will be re-used to configure the floating ip range
+                                     # for the overcloud default external network
index c514364..d892289 100644 (file)
@@ -66,7 +66,7 @@ networks:                            # Network configurations
         - em1                        # Member Interface to bridge to for installer VM (use multiple values for bond)
       vlan: native                   # VLAN tag to use for this network on Installer VM, native means none
       ip: 192.0.2.1                  # IP to assign to Installer VM on this network
-    usable_ip_range:
+    overcloud_ip_range:
       - 192.0.2.11
       - 192.0.2.99                   # Usable ip range for the overcloud node IPs (including VIPs) and last IP will be
                                      # used for host bridge (i.e. br-admin). If empty entire range is usable.
@@ -76,7 +76,7 @@ networks:                            # Network configurations
     dhcp_range:
       - 192.0.2.2
       - 192.0.2.10                   # DHCP range for the admin network, if empty it will be automatically provisioned.
-                                     # Cannot overlap with usable_ip_range or introspection_range.
+                                     # Cannot overlap with overcloud_ip_range or introspection_range.
     nic_mapping:                     # Mapping of network configuration for Overcloud Nodes
       compute:                       # Mapping for compute profile (nodes that will be used as Compute nodes)
         phys_type: interface         # Physical interface type (interface or bond)
@@ -122,7 +122,7 @@ networks:                            # Network configurations
       floating_ip_range:
         - 192.168.37.200
         - 192.168.37.220             # Range to allocate to floating IPs for the public network with Neutron
-      usable_ip_range:
+      overcloud_ip_range:
         - 192.168.37.10
         - 192.168.37.199             # Usable IP range on the public network, usually this is a shared subnet
       nic_mapping:                   # Mapping of network configuration for Overcloud Nodes
@@ -154,7 +154,7 @@ networks:                            # Network configurations
       floating_ip_range:
         - 192.168.38.200
         - 192.168.38.220             # Range to allocate to floating IPs for the public network with Neutron
-      usable_ip_range:
+      overcloud_ip_range:
         - 192.168.38.10
         - 192.168.38.199             # Usable ip range for the overcloud node IPs (including VIPs) and last IP will be used for host
                                      # bridge (i.e. br-public). If empty entire range is usable.
@@ -214,4 +214,6 @@ apex:
     admin:
       introspection_range:
         - 192.0.2.100
-        - 192.0.2.120                # Range used for introspection phase (examining nodes).  This cannot overlap with dhcp_range or usable_ip_range.
+        - 192.0.2.120                # Range used for introspection phase (examining nodes).  This cannot overlap with dhcp_range or overcloud_ip_range.
+                                     # If the external network 'public' is disabled, then this range will be re-used to configure the floating ip range
+                                     # for the overcloud default external network
diff --git a/config/yum.repos.d/opnfv-apex.repo b/config/yum.repos.d/opnfv-apex.repo
new file mode 100644 (file)
index 0000000..aff0942
--- /dev/null
@@ -0,0 +1,6 @@
+[opnfv-apex]
+name=OPNFV Apex Danube
+baseurl=http://artifacts.opnfv.org/apex/danube/yumrepo/
+failovermethod=priority
+enabled=1
+gpgcheck=0
diff --git a/contrib/simple_deploy.sh b/contrib/simple_deploy.sh
new file mode 100644 (file)
index 0000000..6ad727d
--- /dev/null
@@ -0,0 +1,19 @@
+#!/bin/bash
+set -e
+apex_home=$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )/../
+export CONFIG=$apex_home/build
+export LIB=$apex_home/lib
+export RESOURCES=$apex_home/.build/images/
+export PYTHONPATH=$PYTHONPATH:$apex_home/lib/python
+$apex_home/ci/dev_dep_check.sh || true
+$apex_home/ci/clean.sh
+pushd $apex_home/build
+make clean
+make undercloud
+make overcloud-opendaylight
+popd
+pushd $apex_home/ci
+echo "All further output will be piped to $PWD/nohup.out"
+(nohup ./deploy.sh -v -n $apex_home/config/network/network_settings.yaml -d $apex_home/config/deploy/os-odl_l3-nofeature-noha.yaml &)
+tail -f nohup.out
+popd
index c2b38d0..3353678 100644 (file)
@@ -44,6 +44,7 @@ will run the following services:
 - OpenDaylight
 - HA Proxy
 - Pacemaker & VIPs
+- Ceph Monitors and OSDs
 
 Stateless OpenStack services
   All running statesless OpenStack services are load balanced by HA Proxy.
@@ -77,6 +78,12 @@ Pacemaker & VIPs
   start up order and Virtual IPs associated with specific services are running
   on the proper host.
 
+Ceph Monitors & OSDs
+  The Ceph monitors run on each of the control nodes. Each control node also
+  has a Ceph OSD running on it. By default the OSDs use an autogenerated
+  virtual disk as their target device. A non-autogenerated device can be
+  specified in the deploy file.
+
 VM Migration is configured and VMs can be evacuated as needed or as invoked
 by tools such as heat as part of a monitored stack deployment in the overcloud.
 
index 878a49d..83cda32 100644 (file)
@@ -94,9 +94,10 @@ Install Bare Metal Jumphost
     support is completed.
 
 1b. If your Jump host already has CentOS 7 with libvirt running on it then
-    install the install the RDO Release RPM:
+    install the install the RDO Newton Release RPM and epel-release:
 
-    ``sudo yum install -y https://www.rdoproject.org/repos/rdo-release.rpm``
+    ``sudo yum install https://repos.fedorapeople.org/repos/openstack/openstack-newton/rdo-release-newton-4.noarch.rpm``
+    ``sudo yum install epel-release``
 
     The RDO Project release repository is needed to install OpenVSwitch, which
     is a dependency of opnfv-apex. If you do not have external connectivity to
@@ -113,11 +114,26 @@ Install Bare Metal Jumphost
     the USB device as the boot media on your Jumphost
 
 2b. If your Jump host already has CentOS 7 with libvirt running on it then
-    install the opnfv-apex RPMs from the OPNFV artifacts site
-    <http://artifacts.opnfv.org/apex.html>. The following RPMS are available
-    for installation:
+    install the opnfv-apex RPMs using the OPNFV artifacts yum repo. This yum
+    repo is created at release. It will not exist before release day.
+
+    ``sudo yum install http://artifacts.opnfv.org/apex/danube/opnfv-apex-release-danube.noarch.rpm``
+
+    Once you have installed the repo definitions for Apex, RDO and EPEL then
+    yum install Apex:
+
+    ``sudo yum install opnfv-apex``
+
+    If ONOS will be used, install the ONOS rpm instead of the opnfv-apex rpm.
 
-    - opnfv-apex                  - OpenDaylight L2 / L3 and ONOS support *
+    ``sudo yum install opnfv-apex-onos``
+
+2c. If you choose not to use the Apex yum repo or you choose to use
+    pre-released RPMs you can download and install the required RPMs from the
+    artifacts site <http://artifacts.opnfv.org/apex.html>. The following RPMs
+    are available for installation:
+
+    - opnfv-apex                  - OpenDaylight L2 / L3 and ODL SFC support *
     - opnfv-apex-onos             - ONOS support *
     - opnfv-apex-undercloud       - (reqed) Undercloud Image
     - opnfv-apex-common           - (reqed) Supporting config files and scripts
@@ -136,20 +152,18 @@ Install Bare Metal Jumphost
     no longer carry them and they will not need special handling for
     installation.
 
-    Python 3.4 is also required and it needs to be installed if you are using
-    the Centos 7 base image:
 
+    The EPEL and RDO yum repos are still required:
     ``sudo yum install epel-release``
-    ``sudo yum install python34``
+    ``sudo yum install https://repos.fedorapeople.org/repos/openstack/openstack-newton/rdo-release-newton-4.noarch.rpm``
 
-    To install these RPMs download them to the local disk on your CentOS 7
-    install and pass the file names directly to yum:
+    Once the apex RPMs are downloaded install them by passing the file names
+    directly to yum:
     ``sudo yum install python34-markupsafe-<version>.rpm
     python3-jinja2-<version>.rpm python3-ipmi-<version>.rpm``
     ``sudo yum install opnfv-apex-<version>.rpm
     opnfv-apex-undercloud-<version>.rpm opnfv-apex-common-<version>.rpm``
 
-
 3.  After the operating system and the opnfv-apex RPMs are installed, login to
     your Jumphost as root.
 
@@ -188,6 +202,7 @@ IPMI configuration information gathered in section
     - ``cpus``: (Introspected*) CPU cores available
     - ``memory``: (Introspected*) Memory available in Mib
     - ``disk``: (Introspected*) Disk space available in Gb
+    - ``disk_device``: (Opt***) Root disk device to use for installation
     - ``arch``: (Introspected*) System architecture
     - ``capabilities``: (Opt**) Node's role in deployment
         values: profile:control or profile:compute
@@ -199,6 +214,14 @@ IPMI configuration information gathered in section
     ** If capabilities profile is not specified then Apex will select node's roles
     in the OPNFV cluster in a non-deterministic fashion.
 
+    \*** disk_device declares which hard disk to use as the root device for
+    installation.  The format is a comma delimited list of devices, such as
+    "sda,sdb,sdc".  The disk chosen will be the first device in the list which
+    is found by introspection to exist on the system.  Currently, only a single
+    definition is allowed for all nodes.  Therefore if multiple disk_device
+    definitions occur within the inventory, only the last definition on a node
+    will be used for all nodes.
+
 Creating the Settings Files
 ---------------------------
 
index 1b3fe87..507b671 100644 (file)
@@ -33,7 +33,7 @@ Network requirements include:
 
        -  Private Tenant-Networking Network*
 
-       -  External Network
+       -  External Network*
 
        -  Storage Network*
 
index 927c07c..3e6e0aa 100644 (file)
@@ -222,6 +222,7 @@ Deliverables
 Software Deliverables
 ~~~~~~~~~~~~~~~~~~~~~
 - Apex .iso file
+- Apex release .rpm (opnfv-apex-release)
 - Apex overcloud .rpm (opnfv-apex) - For nosdn and OpenDaylight Scenarios
 - Apex overcloud onos .rpm (opnfv-apex-onos) - ONOS Scenarios
 - Apex undercloud .rpm (opnfv-apex-undercloud)
@@ -347,6 +348,33 @@ Scenario os-odl_l2-fdio-noha known issues
 * `APEX-217 <https://jira.opnfv.org/browse/APEX-217>`_:
    qemu not configured with correct group:user
 
+Scenario os-nosdn-fdio-noha known issues
+----------------------------------------
+
+Note that a set of manual configration steps need to be performed
+post an automated deployment for the scenario to be fully functional.
+Please refer to `FDS-159 <https://jira.opnfv.org/browse/FDS-159>`_ and
+`FDS-160 <https://jira.opnfv.org/browse/FDS-160>`_ for details.
+
+* `FDS-155 <https://jira.opnfv.org/browse/FDS-155>`_:
+   os-nosdn-fdio-noha scenario: tempest_smoke_serial causes
+   mariadb/mysqld process to hang
+* `FDS-156 <https://jira.opnfv.org/browse/FDS-156>`_:
+   os-nosdn-fdio-noha scenario: Race conditions for
+   network-vif-plugged notification
+* `FDS-157 <https://jira.opnfv.org/browse/FDS-157>`_:
+   os-nosdn-fdio-noha scenario: Intermittently VMs
+   would get assigned 2 IPs instead of 1
+* `FDS-158 <https://jira.opnfv.org/browse/FDS-158>`_:
+   os-nosdn-fdio-noha scenario: VM start/launch fails with
+   "no more IP addresses" in neutron logs
+* `FDS-159 <https://jira.opnfv.org/browse/FDS-159>`_:
+   os-nosdn-fdio-noha scenario: Security groups not yet supported
+* `FDS-160 <https://jira.opnfv.org/browse/FDS-160>`_:
+   os-nosdn-fdio-noha scenario: Vlan fix on controller
+* `FDS-161 <https://jira.opnfv.org/browse/FDS-161>`_:
+   os-nosdn-fdio-noha scenario: VPP fails with certain UCS B-series blades
+
 .. _HAIssues:
 
 General HA scenario known issues
index 2d11345..709dbf9 100644 (file)
@@ -281,7 +281,7 @@ contains_prefix() {
 #params: none
 function verify_internet {
   if ping -c 2 $ping_site > /dev/null; then
-    if ping -c 2 www.google.com > /dev/null; then
+    if ping -c 2 $dnslookup_site > /dev/null; then
       echo "${blue}Internet connectivity detected${reset}"
       return 0
     else
index 1d238f8..b979996 100755 (executable)
@@ -75,7 +75,7 @@ EOF
       if [[ "$network" != "admin" && "$network" != "external" ]]; then
         continue
       fi
-      this_interface=$(eval echo \${${network}_bridged_interface})
+      this_interface=$(eval echo \${${network}_installer_vm_members})
       # check if this a bridged interface for this network
       if [[ ! -z "$this_interface" || "$this_interface" != "none" ]]; then
         if ! attach_interface_to_ovs ${NET_MAP[$network]} ${this_interface} ${network}; then
index 4b592b4..980478c 100755 (executable)
@@ -55,15 +55,15 @@ function overcloud_deploy {
 
 
   # Make sure the correct overcloud image is available
-  if [ ! -f $RESOURCES/overcloud-full-${SDN_IMAGE}.qcow2 ]; then
-      echo "${red} $RESOURCES/overcloud-full-${SDN_IMAGE}.qcow2 is required to execute your deployment."
+  if [ ! -f $IMAGES/overcloud-full-${SDN_IMAGE}.qcow2 ]; then
+      echo "${red} $IMAGES/overcloud-full-${SDN_IMAGE}.qcow2 is required to execute your deployment."
       echo "Please install the opnfv-apex package to provide this overcloud image for deployment.${reset}"
       exit 1
   fi
 
   echo "Copying overcloud image to Undercloud"
   ssh -T ${SSH_OPTIONS[@]} "stack@$UNDERCLOUD" "rm -f overcloud-full.qcow2"
-  scp ${SSH_OPTIONS[@]} $RESOURCES/overcloud-full-${SDN_IMAGE}.qcow2 "stack@$UNDERCLOUD":overcloud-full.qcow2
+  scp ${SSH_OPTIONS[@]} $IMAGES/overcloud-full-${SDN_IMAGE}.qcow2 "stack@$UNDERCLOUD":overcloud-full.qcow2
 
   # Install ovs-dpdk inside the overcloud image if it is enabled.
   if [[ "${deploy_options_array['dataplane']}" == 'ovs_dpdk' || "${deploy_options_array['dataplane']}" == 'fdio' ]]; then
@@ -128,10 +128,22 @@ EOI
   fi
 
   # Set ODL version accordingly
-  if [[ "${deploy_options_array['sdn_controller']}" == 'opendaylight' && "${deploy_options_array['odl_version']}" == 'boron' ]]; then
+  if [[ "${deploy_options_array['sdn_controller']}" == 'opendaylight' && -n "${deploy_options_array['odl_version']}" ]]; then
+    case "${deploy_options_array['odl_version']}" in
+      beryllium) odl_version=''
+              ;;
+      boron)  odl_version='boron'
+              ;;
+      carbon) odl_version='master'
+              ;;
+      *) echo -e "${red}Invalid ODL version ${deploy_options_array['odl_version']}.  Please use 'carbon' or 'boron' values.${reset}"
+         exit 1
+         ;;
+    esac
+
     ssh -T ${SSH_OPTIONS[@]} "stack@$UNDERCLOUD" <<EOI
       LIBGUESTFS_BACKEND=direct virt-customize --run-command "yum -y remove opendaylight" \
-                                               --run-command "yum -y install /root/boron/*" \
+                                               --run-command "yum -y install /root/${odl_version}/*" \
                                                -a overcloud-full.qcow2
 EOI
   fi
@@ -204,8 +216,10 @@ EOI
     DEPLOY_OPTIONS+=" -e /usr/share/openstack-tripleo-heat-templates/environments/numa.yaml"
   fi
 
-  # make sure ceph is installed
-  DEPLOY_OPTIONS+=" -e /usr/share/openstack-tripleo-heat-templates/environments/storage-environment.yaml"
+  # check if ceph should be enabled
+  if [ "${deploy_options_array['ceph']}" == 'True' ]; then
+    DEPLOY_OPTIONS+=" -e /usr/share/openstack-tripleo-heat-templates/environments/storage-environment.yaml"
+  fi
 
   #DEPLOY_OPTIONS+=" -e /usr/share/openstack-tripleo-heat-templates/environments/network-isolation.yaml"
   DEPLOY_OPTIONS+=" -e network-environment.yaml"
@@ -247,22 +261,22 @@ EOI
      DEPLOY_OPTIONS+=" -e virtual-environment.yaml"
   fi
 
-  DEPLOY_OPTIONS+=" -e opnfv-environment.yaml"
+  DEPLOY_OPTIONS+=" -e ${ENV_FILE}"
 
   echo -e "${blue}INFO: Deploy options set:\n${DEPLOY_OPTIONS}${reset}"
 
   ssh -T ${SSH_OPTIONS[@]} "stack@$UNDERCLOUD" <<EOI
 if [ "${deploy_options_array['tacker']}" == 'False' ]; then
-    sed -i '/EnableTacker:/c\  EnableTacker: false' opnfv-environment.yaml
+    sed -i '/EnableTacker:/c\  EnableTacker: false' ${ENV_FILE}
 fi
 
 # Create a key for use by nova for live migration
 echo "Creating nova SSH key for nova resize support"
 ssh-keygen -f nova_id_rsa -b 1024 -P ""
 public_key=\'\$(cat nova_id_rsa.pub | cut -d ' ' -f 2)\'
-sed -i "s#replace_public_key:#key: \$public_key#g" opnfv-environment.yaml
-python -c 'open("opnfv-environment-new.yaml", "w").write((open("opnfv-environment.yaml").read().replace("replace_private_key:", "key: \"" + "".join(open("nova_id_rsa").readlines()).replace("\\n","\\\n") + "\"")))'
-mv -f opnfv-environment-new.yaml opnfv-environment.yaml
+sed -i "s#replace_public_key:#key: \$public_key#g" ${ENV_FILE}
+python -c 'open("opnfv-environment-new.yaml", "w").write((open("${ENV_FILE}").read().replace("replace_private_key:", "key: \"" + "".join(open("nova_id_rsa").readlines()).replace("\\n","\\\n") + "\"")))'
+mv -f opnfv-environment-new.yaml ${ENV_FILE}
 
 source stackrc
 set -o errexit
@@ -275,11 +289,19 @@ openstack overcloud image upload
 
 echo "Configuring undercloud and discovering nodes"
 openstack baremetal import --json instackenv.json
-openstack baremetal configure boot
+
 bash -x set_perf_images.sh ${performance_roles[@]}
-#if [[ -z "$virtual" ]]; then
-#  openstack baremetal introspection bulk start
-#fi
+if [[ -z "$virtual" ]]; then
+  openstack baremetal introspection bulk start
+  if [[ -n "$root_disk_list" ]]; then
+    openstack baremetal configure boot --root-device=${root_disk_list}
+  else
+    openstack baremetal configure boot
+  fi
+else
+  openstack baremetal configure boot
+fi
+
 echo "Configuring flavors"
 for flavor in baremetal control compute; do
   echo -e "${blue}INFO: Updating flavor: \${flavor}${reset}"
@@ -300,7 +322,7 @@ for dns_server in ${dns_servers}; do
   dns_server_ext="\${dns_server_ext} --dns-nameserver \${dns_server}"
 done
 neutron subnet-update \$(neutron subnet-list | grep -Ev "id|tenant|external|storage" | grep -v \\\\-\\\\- | awk {'print \$2'}) \${dns_server_ext}
-sed -i '/CloudDomain:/c\  CloudDomain: '${domain_name} opnfv-environment.yaml
+sed -i '/CloudDomain:/c\  CloudDomain: '${domain_name} ${ENV_FILE}
 echo "Executing overcloud deployment, this should run for an extended period without output."
 sleep 60 #wait for Hypervisor stats to check-in to nova
 # save deploy command so it can be used for debugging
index 84da75c..94eac01 100755 (executable)
@@ -25,7 +25,7 @@ parse_network_settings() {
       done
   fi
 
-  if output=$(python3 -B $LIB/python/apex_python_utils.py parse-net-settings -s $NETSETS -td $APEX_TMP_DIR -e $CONFIG/network-environment.yaml $parse_ext); then
+  if output=$(python3 -B $LIB/python/apex_python_utils.py parse-net-settings -s $NETSETS -td $APEX_TMP_DIR -e $BASE/network-environment.yaml $parse_ext); then
       echo -e "${blue}${output}${reset}"
       eval "$output"
   else
@@ -59,6 +59,7 @@ parse_deploy_settings() {
 ##params: none
 ##usage: parse_inventory_file
 parse_inventory_file() {
+  local output
   if [ "$virtual" == "TRUE" ]; then inv_virt="--virtual"; fi
   if [[ "$ha_enabled" == "True" ]]; then inv_ha="--ha"; fi
   instackenv_output=$(python3 -B $LIB/python/apex_python_utils.py parse-inventory -f $INVENTORY_FILE $inv_virt $inv_ha)
@@ -69,5 +70,12 @@ cat > instackenv.json << EOF
 $instackenv_output
 EOF
 EOI
+  if output=$(python3 -B $LIB/python/apex_python_utils.py parse-inventory -f $INVENTORY_FILE $inv_virt $inv_ha --export-bash); then
+    echo -e "${blue}${output}${reset}"
+    eval "$output"
+  else
+    echo -e "${red}ERROR: Failed to parse inventory bash settings file ${INVENTORY_FILE}${reset}"
+    exit 1
+  fi
 
 }
index 604eb70..51287c4 100755 (executable)
 function configure_post_install {
   local opnfv_attach_networks ovs_ip ip_range net_cidr tmp_ip af external_network_ipv6
   external_network_ipv6=False
-  opnfv_attach_networks="admin external"
+  opnfv_attach_networks="admin"
+  if [[ $enabled_network_list =~ "external" ]]; then
+    opnfv_attach_networks+=' external'
+  fi
 
   echo -e "${blue}INFO: Post Install Configuration Running...${reset}"
 
@@ -39,7 +42,7 @@ EOI
     else
       echo -e "${blue}INFO: OVS Bridge ${NET_MAP[$network]} missing IP, will configure${reset}"
       # use last IP of allocation pool
-      eval "ip_range=\${${network}_usable_ip_range}"
+      eval "ip_range=\${${network}_overcloud_ip_range}"
       ovs_ip=${ip_range##*,}
       eval "net_cidr=\${${network}_cidr}"
       if [[ $ovs_ip =~ ^[0-9]+\.[0-9]+\.[0-9]+\.[0-9]+$ ]]; then
@@ -90,25 +93,28 @@ echo "Configuring Neutron external network"
 if [[ -n "$external_nic_mapping_compute_vlan" && "$external_nic_mapping_compute_vlan" != 'native' ]]; then
   neutron net-create external  --router:external=True --tenant-id \$(openstack project show service | grep id | awk '{ print \$4 }') --provider:network_type vlan --provider:segmentation_id ${external_nic_mapping_compute_vlan} --provider:physical_network datacentre
 else
-  neutron net-create external --router:external=True --tenant-id \$(openstack project show service | grep id | awk '{ print \$4 }')
+  neutron net-create external --router:external=True --tenant-id \$(openstack project show service | grep id | awk '{ print \$4 }') --provider:network_type flat --provider:physical_network datacentre
 fi
 if [ "$external_network_ipv6" == "True" ]; then
   neutron subnet-create --name external-net --tenant-id \$(openstack project show service | grep id | awk '{ print \$4 }') external --ip_version 6 --ipv6_ra_mode slaac --ipv6_address_mode slaac --gateway ${external_gateway} --allocation-pool start=${external_floating_ip_range%%,*},end=${external_floating_ip_range##*,} ${external_cidr}
-else
+elif [[ "$enabled_network_list" =~ "external" ]]; then
   neutron subnet-create --name external-net --tenant-id \$(openstack project show service | grep id | awk '{ print \$4 }') --disable-dhcp external --gateway ${external_gateway} --allocation-pool start=${external_floating_ip_range%%,*},end=${external_floating_ip_range##*,} ${external_cidr}
+else
+  # we re-use the introspection range for floating ips with single admin network
+  neutron subnet-create --name external-net --tenant-id \$(openstack project show service | grep id | awk '{ print \$4 }') --disable-dhcp external --gateway ${admin_gateway} --allocation-pool start=${admin_introspection_range%%,*},end=${admin_introspection_range##*,} ${admin_cidr}
 fi
 
 echo "Removing sahara endpoint and service"
 sahara_service_id=\$(openstack service list | grep sahara | cut -d ' ' -f 2)
 sahara_endpoint_id=\$(openstack endpoint list | grep sahara | cut -d ' ' -f 2)
-openstack endpoint delete \$sahara_endpoint_id
-openstack service delete \$sahara_service_id
+[[ -n "\$sahara_endpoint_id" ]] && openstack endpoint delete \$sahara_endpoint_id
+[[ -n "\$sahara_service_id" ]] && openstack service delete \$sahara_service_id
 
 echo "Removing swift endpoint and service"
 swift_service_id=\$(openstack service list | grep swift | cut -d ' ' -f 2)
 swift_endpoint_id=\$(openstack endpoint list | grep swift | cut -d ' ' -f 2)
-openstack endpoint delete \$swift_endpoint_id
-openstack service delete \$swift_service_id
+[[ -n "\$swift_endpoint_id" ]] && openstack endpoint delete \$swift_endpoint_id
+[[ -n "\$swift_service_id" ]] && openstack service delete \$swift_service_id
 
 if [ "${deploy_options_array['dataplane']}" == 'fdio' ] || [ "${deploy_options_array['dataplane']}" == 'ovs_dpdk' ]; then
     for flavor in \$(openstack flavor list -c Name -f value); do
@@ -117,7 +123,9 @@ if [ "${deploy_options_array['dataplane']}" == 'fdio' ] || [ "${deploy_options_a
     done
 fi
 
-if [ "${deploy_options_array['congress']}" == 'True' ]; then
+# TODO: Change this back to True once everything is back in
+#       place with tht and puppet-congress for deployment
+if [ "${deploy_options_array['congress']}" == 'NeverTrue' ]; then
     ds_configs="--config username=\$OS_USERNAME
                 --config tenant_name=\$OS_TENANT_NAME
                 --config password=\$OS_PASSWORD
@@ -145,8 +153,13 @@ EOI
   # for virtual, we NAT external network through Undercloud
   # same goes for baremetal if only jumphost has external connectivity
   if [ "$virtual" == "TRUE" ] || ! test_overcloud_connectivity && [ "$external_network_ipv6" != "True" ]; then
-    if ! configure_undercloud_nat ${external_cidr}; then
-      echo -e "${red}ERROR: Unable to NAT undercloud with external net: ${external_cidr}${reset}"
+    if [[ "$enabled_network_list" =~ "external" ]]; then
+      nat_cidr=${external_cidr}
+    else
+      nat_cidr=${admin_cidr}
+    fi
+    if ! configure_undercloud_nat ${nat_cidr}; then
+      echo -e "${red}ERROR: Unable to NAT undercloud with external net: ${nat_cidr}${reset}"
       exit 1
     else
       echo -e "${blue}INFO: Undercloud VM has been setup to NAT Overcloud external network${reset}"
@@ -208,7 +221,7 @@ done
 # Print out the undercloud IP and dashboard URL
 source stackrc
 echo "Undercloud IP: $UNDERCLOUD, please connect by doing 'opnfv-util undercloud'"
-echo "Overcloud dashboard available at http://\$(openstack stack output show overcloud PublicVip | sed 's/"//g')/dashboard"
+echo "Overcloud dashboard available at http://\$(openstack stack output show overcloud PublicVip -f json | jq -r .output_value)/dashboard"
 EOI
 
 if [[ "$ha_enabled" == 'True' ]]; then
index 741bb4f..3aa28ea 100644 (file)
@@ -27,3 +27,4 @@ COMPUTE_PRE = "OS::TripleO::ComputeExtraConfigPre"
 CONTROLLER_PRE = "OS::TripleO::ControllerExtraConfigPre"
 PRE_CONFIG_DIR = "/usr/share/openstack-tripleo-heat-templates/puppet/" \
                  "extraconfig/pre_deploy/"
+DEFAULT_ROOT_DEV = 'sda'
index d623638..8e6896f 100644 (file)
@@ -21,3 +21,11 @@ def parse_yaml(yaml_file):
     with open(yaml_file) as f:
         parsed_dict = yaml.safe_load(f)
         return parsed_dict
+
+
+def write_str(bash_str, path=None):
+    if path:
+        with open(path, 'w') as file:
+            file.write(bash_str)
+    else:
+        print(bash_str)
index 5490c6e..3133d7f 100644 (file)
@@ -11,6 +11,8 @@
 import yaml
 import logging
 
+from .common import utils
+
 REQ_DEPLOY_SETTINGS = ['sdn_controller',
                        'odl_version',
                        'sdn_l3',
@@ -19,9 +21,10 @@ REQ_DEPLOY_SETTINGS = ['sdn_controller',
                        'dataplane',
                        'sfc',
                        'vpn',
-                       'vpp']
+                       'vpp',
+                       'ceph']
 
-OPT_DEPLOY_SETTINGS = ['performance', 'vsperf']
+OPT_DEPLOY_SETTINGS = ['performance', 'vsperf', 'ceph_device']
 
 VALID_ROLES = ['Controller', 'Compute', 'ObjectStorage']
 VALID_PERF_OPTS = ['kernel', 'nova', 'vpp']
@@ -38,7 +41,7 @@ class DeploySettings(dict):
     """
     def __init__(self, filename):
         init_dict = {}
-        if type(filename) is str:
+        if isinstance(filename, str):
             with open(filename, 'r') as deploy_settings_file:
                 init_dict = yaml.safe_load(deploy_settings_file)
         else:
@@ -81,6 +84,8 @@ class DeploySettings(dict):
             if req_set not in deploy_options:
                 if req_set == 'dataplane':
                     self['deploy_options'][req_set] = 'ovs'
+                elif req_set == 'ceph':
+                    self['deploy_options'][req_set] = True
                 else:
                     self['deploy_options'][req_set] = False
 
@@ -162,12 +167,7 @@ class DeploySettings(dict):
         if 'performance' in self['deploy_options']:
             bash_str += self._dump_performance()
         bash_str += self._dump_deploy_options_array()
-
-        if path:
-            with open(path, 'w') as file:
-                file.write(bash_str)
-        else:
-            print(bash_str)
+        utils.write_str(bash_str, path)
 
 
 class DeploySettingsException(Exception):
index aa21968..ce16ef4 100644 (file)
 import yaml
 import json
 
+from .common import constants
+from .common import utils
+
 
 class Inventory(dict):
     """
     This class parses an APEX inventory yaml file into an object. It
     generates or detects all missing fields for deployment.
 
-    It then collapses one level of identifcation from the object to
+    It then collapses one level of identification from the object to
     convert it to a structure that can be dumped into a json file formatted
     such that Triple-O can read the resulting json as an instackenv.json file.
     """
     def __init__(self, source, ha=True, virtual=False):
         init_dict = {}
-        if type(source) is str:
-            with open(source, 'r') as network_settings_file:
-                yaml_dict = yaml.safe_load(network_settings_file)
+        self.root_device = constants.DEFAULT_ROOT_DEV
+        if isinstance(source, str):
+            with open(source, 'r') as inventory_file:
+                yaml_dict = yaml.safe_load(inventory_file)
             # collapse node identifiers from the structure
             init_dict['nodes'] = list(map(lambda n: n[1],
                                           yaml_dict['nodes'].items()))
@@ -40,8 +44,13 @@ class Inventory(dict):
             node['pm_user'] = node['ipmi_user']
             node['mac'] = [node['mac_address']]
 
-            for i in ('ipmi_ip', 'ipmi_pass', 'ipmi_user', 'mac_address'):
-                del i
+            for i in ('ipmi_ip', 'ipmi_pass', 'ipmi_user', 'mac_address',
+                      'disk_device'):
+                if i == 'disk_device' and 'disk_device' in node.keys():
+                    self.root_device = node[i]
+                else:
+                    continue
+                del node[i]
 
             return node
 
@@ -53,7 +62,7 @@ class Inventory(dict):
                                      'nodes for HA baremetal deployment')
         elif len(self['nodes']) < 2:
             raise InventoryException('You must provide at least 2 nodes '
-                                     'for non-HA baremetal deployment${reset}')
+                                     'for non-HA baremetal deployment')
 
         if virtual:
             self['arch'] = 'x86_64'
@@ -67,6 +76,16 @@ class Inventory(dict):
     def dump_instackenv_json(self):
         print(json.dumps(dict(self), sort_keys=True, indent=4))
 
+    def dump_bash(self, path=None):
+        """
+        Prints settings for bash consumption.
+
+        If optional path is provided, bash string will be written to the file
+        instead of stdout.
+        """
+        bash_str = "{}={}\n".format('root_disk_list', str(self.root_device))
+        utils.write_str(bash_str, path)
+
 
 class InventoryException(Exception):
     def __init__(self, value):
index ae5c602..4fc6f58 100644 (file)
@@ -21,6 +21,7 @@ from .common.constants import (
     COMPUTE_PRE,
     PRE_CONFIG_DIR
 )
+from .network_settings import NetworkSettings
 
 HEAT_NONE = 'OS::Heat::None'
 PORTS = '/ports'
@@ -63,15 +64,13 @@ class NetworkEnvironment(dict):
         Create Network Environment according to Network Settings
         """
         init_dict = {}
-        if type(filename) is str:
+        if isinstance(filename, str):
             with open(filename, 'r') as net_env_fh:
                 init_dict = yaml.safe_load(net_env_fh)
 
         super().__init__(init_dict)
-        try:
-            enabled_nets = net_settings.enabled_network_list
-        except:
-            raise NetworkEnvException('Invalid Network Setting object')
+        if not isinstance(net_settings, NetworkSettings):
+            raise NetworkEnvException('Invalid Network Settings object')
 
         self._set_tht_dir()
 
@@ -86,19 +85,21 @@ class NetworkEnvironment(dict):
             nets[ADMIN_NETWORK]['installer_vm']['ip']
         self[param_def]['DnsServers'] = net_settings['dns_servers']
 
-        if EXTERNAL_NETWORK in enabled_nets:
-            external_cidr = nets[EXTERNAL_NETWORK][0]['cidr']
+        if EXTERNAL_NETWORK in net_settings.enabled_network_list:
+            external_cidr = net_settings.get_network(EXTERNAL_NETWORK)['cidr']
             self[param_def]['ExternalNetCidr'] = str(external_cidr)
-            if type(nets[EXTERNAL_NETWORK][0]['installer_vm']['vlan']) is int:
+            external_vlan = self._get_vlan(net_settings.get_network(
+                                           EXTERNAL_NETWORK))
+            if isinstance(external_vlan, int):
                 self[param_def]['NeutronExternalNetworkBridge'] = '""'
-                self[param_def]['ExternalNetworkVlanID'] = \
-                    nets[EXTERNAL_NETWORK][0]['installer_vm']['vlan']
-            external_range = nets[EXTERNAL_NETWORK][0]['usable_ip_range']
+                self[param_def]['ExternalNetworkVlanID'] = external_vlan
+            external_range = net_settings.get_network(EXTERNAL_NETWORK)[
+                'overcloud_ip_range']
             self[param_def]['ExternalAllocationPools'] = \
                 [{'start': str(external_range[0]),
                   'end': str(external_range[1])}]
             self[param_def]['ExternalInterfaceDefaultRoute'] = \
-                nets[EXTERNAL_NETWORK][0]['gateway']
+                net_settings.get_network(EXTERNAL_NETWORK)['gateway']
 
             if external_cidr.version == 6:
                 postfix = '/external_v6.yaml'
@@ -110,8 +111,8 @@ class NetworkEnvironment(dict):
         # apply resource registry update for EXTERNAL_RESOURCES
         self._config_resource_reg(EXTERNAL_RESOURCES, postfix)
 
-        if TENANT_NETWORK in enabled_nets:
-            tenant_range = nets[TENANT_NETWORK]['usable_ip_range']
+        if TENANT_NETWORK in net_settings.enabled_network_list:
+            tenant_range = nets[TENANT_NETWORK]['overcloud_ip_range']
             self[param_def]['TenantAllocationPools'] = \
                 [{'start': str(tenant_range[0]),
                   'end': str(tenant_range[1])}]
@@ -123,7 +124,7 @@ class NetworkEnvironment(dict):
                 postfix = '/tenant.yaml'
 
             tenant_vlan = self._get_vlan(nets[TENANT_NETWORK])
-            if type(tenant_vlan) is int:
+            if isinstance(tenant_vlan, int):
                 self[param_def]['TenantNetworkVlanID'] = tenant_vlan
         else:
             postfix = '/noop.yaml'
@@ -131,8 +132,8 @@ class NetworkEnvironment(dict):
         # apply resource registry update for TENANT_RESOURCES
         self._config_resource_reg(TENANT_RESOURCES, postfix)
 
-        if STORAGE_NETWORK in enabled_nets:
-            storage_range = nets[STORAGE_NETWORK]['usable_ip_range']
+        if STORAGE_NETWORK in net_settings.enabled_network_list:
+            storage_range = nets[STORAGE_NETWORK]['overcloud_ip_range']
             self[param_def]['StorageAllocationPools'] = \
                 [{'start': str(storage_range[0]),
                   'end': str(storage_range[1])}]
@@ -143,7 +144,7 @@ class NetworkEnvironment(dict):
             else:
                 postfix = '/storage.yaml'
             storage_vlan = self._get_vlan(nets[STORAGE_NETWORK])
-            if type(storage_vlan) is int:
+            if isinstance(storage_vlan, int):
                 self[param_def]['StorageNetworkVlanID'] = storage_vlan
         else:
             postfix = '/noop.yaml'
@@ -151,8 +152,8 @@ class NetworkEnvironment(dict):
         # apply resource registry update for STORAGE_RESOURCES
         self._config_resource_reg(STORAGE_RESOURCES, postfix)
 
-        if API_NETWORK in enabled_nets:
-            api_range = nets[API_NETWORK]['usable_ip_range']
+        if API_NETWORK in net_settings.enabled_network_list:
+            api_range = nets[API_NETWORK]['overcloud_ip_range']
             self[param_def]['InternalApiAllocationPools'] = \
                 [{'start': str(api_range[0]),
                   'end': str(api_range[1])}]
@@ -163,7 +164,7 @@ class NetworkEnvironment(dict):
             else:
                 postfix = '/internal_api.yaml'
             api_vlan = self._get_vlan(nets[API_NETWORK])
-            if type(api_vlan) is int:
+            if isinstance(api_vlan, int):
                 self[param_def]['InternalApiNetworkVlanID'] = api_vlan
         else:
             postfix = '/noop.yaml'
@@ -184,9 +185,9 @@ class NetworkEnvironment(dict):
                 self[param_def][flag] = True
 
     def _get_vlan(self, network):
-        if type(network['nic_mapping'][CONTROLLER]['vlan']) is int:
+        if isinstance(network['nic_mapping'][CONTROLLER]['vlan'], int):
             return network['nic_mapping'][CONTROLLER]['vlan']
-        elif type(network['nic_mapping'][COMPUTE]['vlan']) is int:
+        elif isinstance(network['nic_mapping'][COMPUTE]['vlan'], int):
             return network['nic_mapping'][COMPUTE]['vlan']
         else:
             return 'native'
index c9f7d45..b04f141 100644 (file)
@@ -12,9 +12,8 @@ import logging
 import ipaddress
 
 from copy import copy
-
-from . import ip_utils
 from .common import utils
+from . import ip_utils
 from .common.constants import (
     CONTROLLER,
     COMPUTE,
@@ -42,7 +41,7 @@ class NetworkSettings(dict):
     """
     def __init__(self, filename):
         init_dict = {}
-        if type(filename) is str:
+        if isinstance(filename, str):
             with open(filename, 'r') as network_settings_file:
                 init_dict = yaml.safe_load(network_settings_file)
         else:
@@ -55,7 +54,7 @@ class NetworkSettings(dict):
             def merge(pri, sec):
                 for key, val in sec.items():
                     if key in pri:
-                        if type(val) is dict:
+                        if isinstance(val, dict):
                             merge(pri[key], val)
                         # else
                         # do not overwrite what's already there
@@ -71,7 +70,14 @@ class NetworkSettings(dict):
 
     def get_network(self, network):
         if network == EXTERNAL_NETWORK and self['networks'][network]:
-            return self['networks'][network][0]
+            for net in self['networks'][network]:
+                if 'public' in net:
+                    return net
+
+            raise NetworkSettingsException("The external network, "
+                                           "'public', should be defined "
+                                           "when external networks are "
+                                           "enabled")
         else:
             return self['networks'][network]
 
@@ -92,14 +98,11 @@ class NetworkSettings(dict):
                 if _network.get('enabled', True):
                     logging.info("{} enabled".format(network))
                     self._config_required_settings(network)
-                    if network == EXTERNAL_NETWORK:
-                        nicmap = _network['nic_mapping']
-                    else:
-                        nicmap = _network['nic_mapping']
+                    nicmap = _network['nic_mapping']
                     iface = nicmap[CONTROLLER]['members'][0]
                     self._config_ip_range(network=network,
                                           interface=iface,
-                                          ip_range='usable_ip_range',
+                                          ip_range='overcloud_ip_range',
                                           start_offset=21, end_offset=21)
                     self.enabled_network_list.append(network)
                     self._validate_overcloud_nic_order(network)
@@ -137,7 +140,7 @@ class NetworkSettings(dict):
 
             if interfaces:
                 interface = interfaces[0]
-                if type(_role.get('vlan', 'native')) is not int and \
+                if not isinstance(_role.get('vlan', 'native'), int) and \
                    any(y == interface for x, y in self.nics[role].items()):
                     raise NetworkSettingsException(
                         "Duplicate {} already specified for "
@@ -183,7 +186,6 @@ class NetworkSettings(dict):
             ip = ipaddress.ip_address(_network['installer_vm']['ip'])
             nic_if = ip_utils.get_interface(ucloud_if_list[0], ip.version)
             if nic_if:
-                ucloud_if_list = [nic_if]
                 logging.info("{}_bridged_interface: {}".
                              format(network, nic_if))
             else:
@@ -312,16 +314,16 @@ class NetworkSettings(dict):
             flatten lists to delim separated strings
             flatten dics to underscored key names and string values
             """
-            if type(obj) is list:
+            if isinstance(obj, list):
                 return "{}=\'{}\'\n".format(name,
                                             delim.join(map(lambda x: str(x),
                                                            obj)))
-            elif type(obj) is dict:
+            elif isinstance(obj, dict):
                 flat_str = ''
                 for k in obj:
                     flat_str += flatten("{}_{}".format(name, k), obj[k])
                 return flat_str
-            elif type(obj) is str:
+            elif isinstance(obj, str):
                 return "{}='{}'\n".format(name, obj)
             else:
                 return "{}={}\n".format(name, str(obj))
@@ -336,11 +338,7 @@ class NetworkSettings(dict):
         bash_str += flatten('dns_servers', self['dns_servers'], ' ')
         bash_str += flatten('domain_name', self['dns-domain'], ' ')
         bash_str += flatten('ntp_server', self['ntp_servers'][0], ' ')
-        if path:
-            with open(path, 'w') as file:
-                file.write(bash_str)
-        else:
-            print(bash_str)
+        utils.write_str(bash_str, path)
 
     def get_ip_addr_family(self,):
         """
index b0ebb27..e21d046 100755 (executable)
@@ -22,7 +22,6 @@ from apex import NetworkEnvironment
 from apex import DeploySettings
 from apex import Inventory
 from apex import ip_utils
-from apex.common.constants import ADMIN_NETWORK
 
 
 def parse_net_settings(args):
@@ -66,7 +65,10 @@ def run_clean(args):
 
 def parse_inventory(args):
     inventory = Inventory(args.file, ha=args.ha, virtual=args.virtual)
-    inventory.dump_instackenv_json()
+    if args.export_bash is True:
+        inventory.dump_bash()
+    else:
+        inventory.dump_instackenv_json()
 
 
 def find_ip(args):
@@ -200,6 +202,11 @@ def get_parser():
                            default=False,
                            action='store_true',
                            help='Indicate if deployment inventory is virtual')
+    inventory.add_argument('--export-bash',
+                           default=False,
+                           dest='export_bash',
+                           action='store_true',
+                           help='Export bash variables from inventory')
     inventory.set_defaults(func=parse_inventory)
 
     clean = subparsers.add_parser('clean',
index 6ba9a54..080fcbb 100755 (executable)
@@ -19,7 +19,7 @@ function setup_undercloud_vm {
       define_vm undercloud hd 30 "$undercloud_nets" 4 12288
 
       ### this doesn't work for some reason I was getting hangup events so using cp instead
-      #virsh vol-upload --pool default --vol undercloud.qcow2 --file $CONFIG/stack/undercloud.qcow2
+      #virsh vol-upload --pool default --vol undercloud.qcow2 --file $BASE/stack/undercloud.qcow2
       #2015-12-05 12:57:20.569+0000: 8755: info : libvirt version: 1.2.8, package: 16.el7_1.5 (CentOS BuildSystem <http://bugs.centos.org>, 2015-11-03-13:56:46, worker1.bsys.centos.org)
       #2015-12-05 12:57:20.569+0000: 8755: warning : virKeepAliveTimerInternal:143 : No response from client 0x7ff1e231e630 after 6 keepalive messages in 35 seconds
       #2015-12-05 12:57:20.569+0000: 8756: warning : virKeepAliveTimerInternal:143 : No response from client 0x7ff1e231e630 after 6 keepalive messages in 35 seconds
@@ -28,14 +28,14 @@ function setup_undercloud_vm {
       #error: Reconnected to the hypervisor
 
       local undercloud_dst=/var/lib/libvirt/images/undercloud.qcow2
-      cp -f $RESOURCES/undercloud.qcow2 $undercloud_dst
+      cp -f $IMAGES/undercloud.qcow2 $undercloud_dst
 
       # resize Undercloud machine
       echo "Checking if Undercloud needs to be resized..."
       undercloud_size=$(LIBGUESTFS_BACKEND=direct virt-filesystems --long -h --all -a $undercloud_dst |grep device | grep -Eo "[0-9\.]+G" | sed -n 's/\([0-9][0-9]*\).*/\1/p')
       if [ "$undercloud_size" -lt 30 ]; then
         qemu-img resize /var/lib/libvirt/images/undercloud.qcow2 +25G
-        LIBGUESTFS_BACKEND=direct virt-resize --expand /dev/sda1 $RESOURCES/undercloud.qcow2 $undercloud_dst
+        LIBGUESTFS_BACKEND=direct virt-resize --expand /dev/sda1 $IMAGES/undercloud.qcow2 $undercloud_dst
         LIBGUESTFS_BACKEND=direct virt-customize -a $undercloud_dst --run-command 'xfs_growfs -d /dev/sda1 || true'
         new_size=$(LIBGUESTFS_BACKEND=direct virt-filesystems --long -h --all -a $undercloud_dst |grep filesystem | grep -Eo "[0-9\.]+G" | sed -n 's/\([0-9][0-9]*\).*/\1/p')
         if [ "$new_size" -lt 30 ]; then
@@ -71,12 +71,12 @@ function setup_undercloud_vm {
   CNT=10
   echo -n "${blue}Waiting for Undercloud's dhcp address${reset}"
   undercloud_mac=$(virsh domiflist undercloud | grep default | awk '{ print $5 }')
-  while ! $(arp -e | grep ${undercloud_mac} > /dev/null) && [ $CNT -gt 0 ]; do
+  while ! $(arp -en | grep ${undercloud_mac} > /dev/null) && [ $CNT -gt 0 ]; do
       echo -n "."
       sleep 10
       CNT=$((CNT-1))
   done
-  UNDERCLOUD=$(arp -e | grep ${undercloud_mac} | awk {'print $1'})
+  UNDERCLOUD=$(arp -en | grep ${undercloud_mac} | awk {'print $1'})
 
   if [ -z "$UNDERCLOUD" ]; then
     echo "\n\nCan't get IP for Undercloud. Can Not Continue."
@@ -136,12 +136,12 @@ function configure_undercloud {
     ovs_dpdk_bridge=''
   fi
 
-  if ! controller_nic_template=$(python3 -B $LIB/python/apex_python_utils.py nic-template -r controller -s $NETSETS -t $CONFIG/nics-template.yaml.jinja2 -e "br-ex"); then
+  if ! controller_nic_template=$(python3 -B $LIB/python/apex_python_utils.py nic-template -r controller -s $NETSETS -t $BASE/nics-template.yaml.jinja2 -e "br-ex"); then
     echo -e "${red}ERROR: Failed to generate controller NIC heat template ${reset}"
     exit 1
   fi
 
-  if ! compute_nic_template=$(python3 -B $LIB/python/apex_python_utils.py nic-template -r compute -s $NETSETS -t $CONFIG/nics-template.yaml.jinja2 -e $ext_net_type -d "$ovs_dpdk_bridge"); then
+  if ! compute_nic_template=$(python3 -B $LIB/python/apex_python_utils.py nic-template -r compute -s $NETSETS -t $BASE/nics-template.yaml.jinja2 -e $ext_net_type -d "$ovs_dpdk_bridge"); then
     echo -e "${red}ERROR: Failed to generate compute NIC heat template ${reset}"
     exit 1
   fi
@@ -207,6 +207,10 @@ openstack-config --set undercloud.conf DEFAULT undercloud_hostname "undercloud.$
 sudo openstack-config --set /etc/ironic/ironic.conf disk_utils iscsi_verify_attempts 30
 sudo openstack-config --set /etc/ironic/ironic.conf disk_partitioner check_device_max_retries 40
 
+if [[ -n "${deploy_options_array['ceph_device']}" ]]; then
+    sed -i '/ExtraConfig/a\\    ceph::profile::params::osds: {\\x27${deploy_options_array['ceph_device']}\\x27: {}}' ${ENV_FILE}
+fi
+
 sudo sed -i '/CephClusterFSID:/c\\  CephClusterFSID: \\x27$(cat /proc/sys/kernel/random/uuid)\\x27' /usr/share/openstack-tripleo-heat-templates/environments/storage-environment.yaml
 sudo sed -i '/CephMonKey:/c\\  CephMonKey: \\x27'"\$(ceph-authtool --gen-print-key)"'\\x27' /usr/share/openstack-tripleo-heat-templates/environments/storage-environment.yaml
 sudo sed -i '/CephAdminKey:/c\\  CephAdminKey: \\x27'"\$(ceph-authtool --gen-print-key)"'\\x27' /usr/share/openstack-tripleo-heat-templates/environments/storage-environment.yaml
@@ -249,6 +253,7 @@ sudo systemctl restart openstack-heat-api
 EOI
 
 # configure external network
+if [[ "$enabled_network_list" =~ "external" ]]; then
   ssh -T ${SSH_OPTIONS[@]} "root@$UNDERCLOUD" << EOI
 if [[ "$external_installer_vm_vlan" != "native" ]]; then
   cat <<EOF > /etc/sysconfig/network-scripts/ifcfg-vlan${external_installer_vm_vlan}
@@ -270,6 +275,7 @@ else
   fi
 fi
 EOI
+fi
 
 # WORKAROUND: must restart the above services to fix sync problem with nova compute manager
 # TODO: revisit and file a bug if necessary. This should eventually be removed
index bf4128a..c12619a 100644 (file)
@@ -24,7 +24,7 @@ function undercloud_connect {
 ##outputs the Undercloud's IP address
 ##params: none
 function get_undercloud_ip {
-  echo $(arp -a | grep $(virsh domiflist undercloud | grep default |\
+  echo $(arp -an | grep $(virsh domiflist undercloud | grep default |\
     awk '{print $5}') | grep -Eo "[0-9]+\.[0-9]+\.[0-9]+\.[0-9]+")
 }
 
@@ -80,26 +80,6 @@ function opendaylight_connect {
 ##outputs heat stack deployment failures
 ##params: none
 function debug_stack {
-  local failure_output
-  local phys_id
-  declare -a resource_arr
-  declare -a phys_id_arr
-
   source ~/stackrc
-
-  IFS=$'\n'
-  for resource in $(openstack stack resource list -n 5 overcloud | grep FAILED); do
-    unset IFS
-    resource_arr=(${resource//|/ })
-    phys_id=$(openstack stack resource show ${resource_arr[-1]} ${resource_arr[0]} | grep physical_resource_id 2> /dev/null)
-    if [ -n "$phys_id" ]; then
-      phys_id_arr=(${phys_id//|/ })
-      failure_output+="******************************************************"
-      failure_output+="\n${resource}:\n\n$(openstack stack deployment show ${phys_id_arr[-1]} 2> /dev/null)"
-      failure_output+="\n******************************************************"
-    fi
-    unset phys_id
-  done
-
-  echo -e $failure_output
+  openstack stack failures list overcloud --long
 }
index 5ebbf3a..a9a7b74 100755 (executable)
@@ -1,3 +1,3 @@
 #!/usr/bin/env bash
 
-python ~/snaps/snaps/unit_test_suite.py -e ~stack/overcloudrc -n external -l INFO &> ~stack/snoke-tests.out
\ No newline at end of file
+python ~/snaps/snaps/unit_test_suite.py -e ~stack/overcloudrc -n external -k -l INFO &> ~stack/smoke-tests.out
\ No newline at end of file
index 2af187b..a0af121 100644 (file)
@@ -22,7 +22,8 @@ deploy_files = ('deploy_settings.yaml',
                 'os-nosdn-nofeature-noha.yaml',
                 'os-nosdn-ovs-noha.yaml',
                 'os-ocl-nofeature-ha.yaml',
-                'os-odl_l2-sdnvpn-ha.yaml',
+                'os-odl_l2-bgpvpn-ha.yaml',
+                'os-odl_l2-bgpvpn-noha.yaml',
                 'os-odl_l3-nofeature-ha.yaml',
                 'os-nosdn-nofeature-ha.yaml',
                 'os-nosdn-ovs-ha.yaml',
index 08a3415..ec75856 100644 (file)
@@ -7,12 +7,16 @@
 # http://www.apache.org/licenses/LICENSE-2.0
 ##############################################################################
 
+import sys
+
 from apex.inventory import Inventory
 from apex.inventory import InventoryException
 
 from nose.tools import assert_is_instance
 from nose.tools import assert_raises
 from nose.tools import assert_equal
+from nose.tools import assert_regexp_matches
+from io import StringIO
 
 inventory_files = ('intel_pod2_settings.yaml',
                    'nokia_pod1_settings.yaml',
@@ -59,3 +63,19 @@ class TestInventory(object):
         e = InventoryException("test")
         print(e)
         assert_is_instance(e, InventoryException)
+
+    def test_dump_bash_default(self):
+        i = Inventory('../config/inventory/intel_pod2_settings.yaml')
+        out = StringIO()
+        sys.stdout = out
+        i.dump_bash()
+        output = out.getvalue().strip()
+        assert_regexp_matches(output, 'root_disk_list=sda')
+
+    def test_dump_bash_set_root_device(self):
+        i = Inventory('../config/inventory/pod_example_settings.yaml')
+        out = StringIO()
+        sys.stdout = out
+        i.dump_bash()
+        output = out.getvalue().strip()
+        assert_regexp_matches(output, 'root_disk_list=sdb')