Merge "Updating Apex ENV Var Names"
authorTim Rozet <trozet@redhat.com>
Mon, 3 Apr 2017 15:43:53 +0000 (15:43 +0000)
committerGerrit Code Review <gerrit@opnfv.org>
Mon, 3 Apr 2017 15:43:53 +0000 (15:43 +0000)
63 files changed:
.gitignore
docs/jenkins-job-builder/opnfv-jjb-usage.rst
jjb/apex/apex-build.sh
jjb/apex/apex-deploy.sh
jjb/cperf/cperf-ci-jobs.yml
jjb/daisy4nfv/daisy-daily-jobs.yml
jjb/daisy4nfv/daisy-project-jobs.yml
jjb/daisy4nfv/daisy4nfv-merge-jobs.yml
jjb/daisy4nfv/daisy4nfv-verify-jobs.yml
jjb/dovetail/dovetail-ci-jobs.yml
jjb/dovetail/dovetail-weekly-jobs.yml
jjb/releng/opnfv-docker.sh
jjb/test-requirements.txt [new file with mode: 0644]
jjb/xci/bifrost-periodic-jobs.yml [new file with mode: 0644]
jjb/xci/bifrost-provision.sh [new file with mode: 0755]
jjb/xci/xci-daily-jobs.yml
jjb/xci/xci-deploy.sh
jjb/yardstick/yardstick-daily.sh
modules/requirements.txt [new file with mode: 0644]
modules/test-requirements.txt [new file with mode: 0644]
prototypes/bifrost/playbooks/roles/bifrost-ironic-install/templates/ironic-inspector.conf.j2 [deleted file]
prototypes/bifrost/playbooks/roles/bifrost-ironic-install/templates/ironic.conf.j2 [deleted file]
prototypes/xci/README.rst [new file with mode: 0644]
prototypes/xci/config/env-vars
prototypes/xci/file/aio/configure-opnfvhost.yml [new file with mode: 0644]
prototypes/xci/file/aio/flavor-vars.yml
prototypes/xci/file/aio/inventory
prototypes/xci/file/aio/openstack_user_config.yml [deleted file]
prototypes/xci/file/ha/configure-targethosts.yml [new file with mode: 0644]
prototypes/xci/file/ha/flavor-vars.yml
prototypes/xci/file/ha/inventory
prototypes/xci/file/ha/openstack_user_config.yml
prototypes/xci/file/ha/user_variables.yml [new file with mode: 0644]
prototypes/xci/file/mini/configure-targethosts.yml [new file with mode: 0644]
prototypes/xci/file/mini/flavor-vars.yml
prototypes/xci/file/mini/inventory
prototypes/xci/file/mini/openstack_user_config.yml
prototypes/xci/file/mini/user_variables.yml [new file with mode: 0644]
prototypes/xci/file/noha/configure-targethosts.yml [new file with mode: 0644]
prototypes/xci/file/noha/flavor-vars.yml
prototypes/xci/file/noha/inventory
prototypes/xci/file/noha/openstack_user_config.yml
prototypes/xci/file/noha/user_variables.yml [new file with mode: 0644]
prototypes/xci/file/user_variables.yml [deleted file]
prototypes/xci/playbooks/configure-localhost.yml
prototypes/xci/playbooks/configure-opnfvhost.yml
prototypes/xci/playbooks/provision-vm-nodes.yml [new file with mode: 0644]
prototypes/xci/playbooks/roles/configure-network/tasks/main.yml [new file with mode: 0644]
prototypes/xci/playbooks/roles/configure-nfs/tasks/main.yml [new file with mode: 0644]
prototypes/xci/playbooks/roles/remove-folders/tasks/main.yml
prototypes/xci/template/compute.interface.j2
prototypes/xci/template/controller.interface.j2
prototypes/xci/template/opnfv.interface.j2
prototypes/xci/var/Debian.yml
prototypes/xci/var/opnfv.yml
prototypes/xci/xci-deploy.sh
setup.py [new file with mode: 0644]
tox.ini [new file with mode: 0644]
utils/test/reporting/img/danube.jpg
utils/test/testapi/opnfv_testapi/common/raises.py [new file with mode: 0644]
utils/test/testapi/opnfv_testapi/resources/handlers.py
utils/test/testapi/opnfv_testapi/resources/result_handlers.py
utils/test/testapi/opnfv_testapi/resources/scenario_handlers.py

index 431e521..eeabaeb 100644 (file)
@@ -35,3 +35,4 @@ testapi_venv/
 .cache
 .tox
 *.retry
+job_output/
index 52dbdeb..f34833f 100644 (file)
@@ -21,6 +21,14 @@ Make changes::
     To ssh://agardner@gerrit.opnfv.org:29418/releng.git
      * [new branch]      HEAD -> refs/publish/master
 
+Test with tox::
+
+    tox -v -ejjb
+
+Submit the change to gerrit::
+
+    git review -v
+
 Follow the link to gerrit https://gerrit.opnfv.org/gerrit/51 in a few moments
 the verify job will have completed and you will see Verified +1 jenkins-ci in
 the gerrit ui.
index 220d024..b6b2f21 100755 (executable)
@@ -28,10 +28,10 @@ cd $WORKSPACE/ci
 ./build.sh $BUILD_ARGS
 RPM_VERSION=$(grep Version: $WORKSPACE/build/rpm_specs/opnfv-apex.spec | awk '{ print $2 }')-$(echo $OPNFV_ARTIFACT_VERSION | tr -d '_-')
 # list the contents of BUILD_OUTPUT directory
-echo "Build Directory is ${BUILD_DIRECTORY}"
+echo "Build Directory is ${BUILD_DIRECTORY}/../.build"
 echo "Build Directory Contents:"
 echo "-------------------------"
-ls -al $BUILD_DIRECTORY
+ls -al ${BUILD_DIRECTORY}/../.build
 
 # list the contents of CACHE directory
 echo "Cache Directory is ${CACHE_DIRECTORY}"
@@ -47,10 +47,10 @@ if ! echo $BUILD_TAG | grep "apex-verify" 1> /dev/null; then
     echo "OPNFV_GIT_URL=$(git config --get remote.origin.url)"
     echo "OPNFV_GIT_SHA1=$(git rev-parse HEAD)"
     echo "OPNFV_ARTIFACT_URL=$GS_URL/opnfv-$OPNFV_ARTIFACT_VERSION.iso"
-    echo "OPNFV_ARTIFACT_SHA512SUM=$(sha512sum $BUILD_DIRECTORY/release/OPNFV-CentOS-7-x86_64-$OPNFV_ARTIFACT_VERSION.iso | cut -d' ' -f1)"
+    echo "OPNFV_ARTIFACT_SHA512SUM=$(sha512sum $BUILD_DIRECTORY/../.build/release/OPNFV-CentOS-7-x86_64-$OPNFV_ARTIFACT_VERSION.iso | cut -d' ' -f1)"
     echo "OPNFV_SRPM_URL=$GS_URL/opnfv-apex-$RPM_VERSION.src.rpm"
     echo "OPNFV_RPM_URL=$GS_URL/opnfv-apex-$RPM_VERSION.noarch.rpm"
-    echo "OPNFV_RPM_SHA512SUM=$(sha512sum $BUILD_DIRECTORY/noarch/opnfv-apex-$RPM_VERSION.noarch.rpm | cut -d' ' -f1)"
+    echo "OPNFV_RPM_SHA512SUM=$(sha512sum $BUILD_DIRECTORY/../.build/noarch/opnfv-apex-$RPM_VERSION.noarch.rpm | cut -d' ' -f1)"
     echo "OPNFV_BUILD_URL=$BUILD_URL"
   ) > $WORKSPACE/opnfv.properties
 fi
index 9c7bcb2..a86776b 100755 (executable)
@@ -123,7 +123,7 @@ if [[ "$BUILD_DIRECTORY" == *verify* || "$BUILD_DIRECTORY" == *promote* ]]; then
 # use RPMs
 else
     # find version of RPM
-    VERSION_EXTENSION=$(echo $(basename $RPM_LIST) | grep -Eo '[0-9]+\.[0-9]+-[0-9]{8}')
+    VERSION_EXTENSION=$(echo $(basename $RPM_LIST) | grep -Eo '[0-9]+\.[0-9]+-([0-9]{8}|[a-z]+-[0-9]\.[0-9]+)')
     # build RPM List which already includes base Apex RPM
     for pkg in ${APEX_PKGS}; do
         RPM_LIST+=" ${RPM_INSTALL_PATH}/opnfv-apex-${pkg}-${VERSION_EXTENSION}.noarch.rpm"
index 2742f08..f6e0685 100644 (file)
             undercloud_mac=$(sudo virsh domiflist undercloud | grep default | \
                               grep -Eo "[0-9a-f]+:[0-9a-f]+:[0-9a-f]+:[0-9a-f]+:[0-9a-f]+:[0-9a-f]+")
             INSTALLER_IP=$(/usr/sbin/arp -e | grep ${undercloud_mac} | awk {'print $1'})
-            sudo scp $INSTALLER_IP:/home/stack/stackrc /tmp/stackrc
-            source /tmp/stackrc
+
+            sudo scp -o StrictHostKeyChecking=no root@$INSTALLER_IP:/home/stack/overcloudrc /tmp/overcloudrc
+            sudo chmod 755 /tmp/overcloudrc
+            source /tmp/overcloudrc
 
             # robot suites need the ssh key to log in to controller nodes, so throwing it
             # in tmp, and mounting /tmp as $HOME as far as robot is concerned
-            sudo mkdir -p /tmp/.ssh
-            sudo scp $INSTALLER_IP:/home/stack/.ssh/id_rsa /tmp/.ssh/
-            sudo chmod -R 0600 /tmp/.ssh
+            sudo rm -rf /tmp/.ssh
+            sudo mkdir /tmp/.ssh
+            sudo chmod 0700 /tmp/.ssh
+            sudo scp -o StrictHostKeyChecking=no root@$INSTALLER_IP:/home/stack/.ssh/id_rsa /tmp/.ssh/
+            sudo chown -R jenkins-ci:jenkins-ci /tmp/.ssh
+            # done with sudo. jenkins-ci is the user from this point
+            chmod 0600 /tmp/.ssh/id_rsa
 
             # cbench requires the openflow drop test feature to be installed.
             sshpass -p karaf ssh -o StrictHostKeyChecking=no \
 
             docker pull opnfv/cperf:$DOCKER_TAG
 
-            robot_cmd="pybot -e exclude -L TRACE \
+            robot_cmd="pybot -e exclude -L TRACE -d /tmp \
                         -v ODL_SYSTEM_1_IP:${SDN_CONTROLLER_IP} \
                         -v ODL_SYSTEM_IP:${SDN_CONTROLLER_IP} \
                         -v BUNDLEFOLDER:/opt/opendaylight \
                         -v of_port:6653"
             robot_suite="/home/opnfv/repos/odl_test/csit/suites/openflowplugin/Performance/010_Cbench.robot"
 
-            docker run -v /tmp:/tmp opnfv/cperf:$DOCKER_TAG ${robot_cmd} ${robot_suite}
+            docker run -ti -v /tmp:/tmp opnfv/cperf:$DOCKER_TAG ${robot_cmd} ${robot_suite}
 
 - builder:
     name: cperf-cleanup
index c5d8e7e..aac76ba 100644 (file)
@@ -71,7 +71,6 @@
             use-build-blocker: true
             blocking-jobs:
                 - 'daisy-daily-.*'
-                - 'daisy4nfv-(merge|verify)-.*'
             block-level: 'NODE'
 
     wrappers:
index 52769ca..e631ee9 100644 (file)
             enabled: true
             max-total: 4
             option: 'project'
+        - build-blocker:
+            use-build-blocker: true
+            blocking-jobs:
+                - '{installer}-daily-.*'
+            block-level: 'NODE'
 
     scm:
         - git-scm
             enabled: true
             max-total: 6
             option: 'project'
-        - build-blocker:
-            use-build-blocker: true
-            blocking-jobs:
-                - '{installer}-daily-.*'
-                - 'daisy4nfv-(merge|verify)-.*'
-            block-level: 'NODE'
 
     scm:
         - git-scm
index 95e72e5..9e7b867 100644 (file)
@@ -29,7 +29,7 @@
         - 'build':
             slave-label: 'opnfv-build-centos'
         - 'deploy-virtual':
-            slave-label: 'opnfv-build-centos'
+            slave-label: 'daisy-virtual'
 #####################################
 # jobs
 #####################################
         - build-blocker:
             use-build-blocker: true
             blocking-jobs:
-                - '{alias}-(merge|verify)-.*'
-                - '{project}-daily-.*'
+                - '{alias}-merge-(master|danube)'
             block-level: 'NODE'
 
     scm:
index 9f44d99..a0ec2eb 100644 (file)
@@ -1,10 +1,7 @@
 - project:
     name: 'daisy4nfv-verify-jobs'
-
     project: 'daisy'
-
     installer: 'daisy'
-
 ##########################################################
 # use alias to keep the jobs'name existed alread unchanged
 ##########################################################
@@ -26,7 +23,9 @@
 # patch verification phases
 #####################################
     phase:
-        - 'build':
+        - unit:
+            slave-label: 'opnfv-build'
+        - build:
             slave-label: 'opnfv-build-centos'
 #####################################
 # jobs
 #####################################
 - job-template:
     name: '{alias}-verify-{stream}'
-
     project-type: multijob
-
     disabled: false
-
     concurrent: true
-
     properties:
         - logrotate-default
         - throttle:
             enabled: true
             max-total: 4
             option: 'project'
-
     scm:
         - git-scm
-
     wrappers:
         - ssh-agent-wrapper
         - timeout:
             timeout: 360
             fail: true
-
     triggers:
         - gerrit:
             server-name: 'gerrit.opnfv.org'
     builders:
         - description-setter:
             description: "Built on $NODE_NAME"
+        - multijob:
+            name: unit
+            condition: SUCCESSFUL
+            projects:
+                - name: '{alias}-verify-{name}-{stream}'
+                  current-parameters: true
+                  node-parameters: false
+                  kill-phase-on: FAILURE
+                  abort-all-job: true
         - multijob:
             name: build
             condition: SUCCESSFUL
 
 - job-template:
     name: '{alias}-verify-{phase}-{stream}'
-
     disabled: '{obj:disabled}'
-
     concurrent: true
-
     properties:
         - logrotate-default
         - throttle:
             enabled: true
             max-total: 6
             option: 'project'
-        - build-blocker:
-            use-build-blocker: true
-            blocking-jobs:
-                - '{alias}-(merge|verify)-.*'
-                - '{installer}-daily-.*'
-            block-level: 'NODE'
-
     scm:
         - git-scm
-
     wrappers:
         - ssh-agent-wrapper
         - timeout:
             timeout: 360
             fail: true
-
     parameters:
         - project-parameter:
             project: '{project}'
         - '{slave-label}-defaults'
         - '{alias}-verify-defaults':
             gs-pathname: '{gs-pathname}'
-
     builders:
         - description-setter:
             description: "Built on $NODE_NAME"
         - shell:
             !include-raw: ./daisy4nfv-workspace-cleanup.sh
 
+- builder:
+    name: daisy-verify-unit-macro
+    builders:
+        - shell: |
+            #!/bin/bash
+            set -o errexit
+            set -o pipefail
+            set -o xtrace
+            tox -e py27
+
 #####################################
 # parameter macros
 #####################################
index b65e6d5..5651fc3 100644 (file)
         - timeout:
             timeout: 180
             abort: true
+        - fix-workspace-permissions
 
     triggers:
         - '{auto-trigger-name}'
         - 'dovetail-cleanup'
         - 'dovetail-run'
 
-    wrappers:
-        - fix-workspace-permissions
-
     publishers:
         - archive:
             artifacts: 'results/**/*'
index 7b3ede9..eaa11b5 100644 (file)
@@ -78,6 +78,7 @@
         - timeout:
             timeout: '{job-timeout}'
             abort: true
+        - fix-workspace-permissions
 
     parameters:
         - project-parameter:
         - 'dovetail-cleanup'
         - 'dovetail-run'
 
-    wrappers:
-        - fix-workspace-permissions
-
     publishers:
         - archive:
             artifacts: 'results/**/*'
index 9bd711b..5d73a9d 100644 (file)
@@ -17,14 +17,16 @@ echo "Starting opnfv-docker for $DOCKER_REPO_NAME ..."
 echo "--------------------------------------------------------"
 echo
 
-
-if [[ -n $(ps -ef|grep 'docker build'|grep -v grep) ]]; then
-    echo "There is already another build process in progress:"
-    echo $(ps -ef|grep 'docker build'|grep -v grep)
-    # Abort this job since it will collide and might mess up the current one.
-    echo "Aborting..."
-    exit 1
-fi
+count=30 # docker build jobs might take up to ~30 min
+while [[ -n `ps -ef|grep 'docker build'|grep -v grep` ]]; do
+    echo "Build in progress. Waiting..."
+    sleep 60
+    count=$(( $count - 1 ))
+    if [ $count -eq 0 ]; then
+        echo "Timeout. Aborting..."
+        exit 1
+    fi
+done
 
 # Remove previous running containers if exist
 if [[ -n "$(docker ps -a | grep $DOCKER_REPO_NAME)" ]]; then
diff --git a/jjb/test-requirements.txt b/jjb/test-requirements.txt
new file mode 100644 (file)
index 0000000..6b700dc
--- /dev/null
@@ -0,0 +1 @@
+jenkins-job-builder
diff --git a/jjb/xci/bifrost-periodic-jobs.yml b/jjb/xci/bifrost-periodic-jobs.yml
new file mode 100644 (file)
index 0000000..31aa8da
--- /dev/null
@@ -0,0 +1,146 @@
+- project:
+    project: 'releng'
+
+    name: 'bifrost-periodic'
+#--------------------------------
+# Branch Anchors
+#--------------------------------
+# the versions stated here default to branches which then later
+# on used for checking out the branches, pulling in head of the branch.
+    master: &master
+        stream: master
+        openstack-bifrost-version: '{stream}'
+        opnfv-releng-version: 'master'
+        gs-pathname: ''
+    ocata: &ocata
+        stream: ocata
+        openstack-bifrost-version: 'stable/{stream}'
+        opnfv-releng-version: 'master'
+        gs-pathname: '/{stream}'
+#--------------------------------
+#        XCI PODs
+#--------------------------------
+    pod:
+        - virtual:
+            <<: *master
+        - virtual:
+            <<: *ocata
+#--------------------------------
+# XCI PODs
+#--------------------------------
+#--------------------------------
+# Supported Distros
+#--------------------------------
+    distro:
+        - 'xenial':
+            disabled: false
+            slave-label: xci-xenial-virtual
+            dib-os-release: 'xenial'
+            dib-os-element: 'ubuntu-minimal'
+            dib-os-packages: 'vlan,vim,less,bridge-utils,sudo,language-pack-en,iputils-ping,rsyslog,curl,python,debootstrap,ifenslave,ifenslave-2.6,lsof,lvm2,tcpdump,nfs-kernel-server,chrony,iptables'
+            extra-dib-elements: 'openssh-server'
+        - 'centos7':
+            disabled: true
+            slave-label: xci-centos7-virtual
+            dib-os-release: '7'
+            dib-os-element: 'centos7'
+            dib-os-packages: 'vim,less,bridge-utils,iputils,rsyslog,curl'
+            extra-dib-elements: 'openssh-server'
+        - 'suse':
+            disabled: true
+            slave-label: xci-suse-virtual
+            dib-os-release: '42.2'
+            dib-os-element: 'opensuse-minimal'
+            dib-os-packages: 'vim,less,bridge-utils,iputils,rsyslog,curl'
+            extra-dib-elements: 'openssh-server'
+
+#--------------------------------
+# jobs
+#--------------------------------
+    jobs:
+        - 'bifrost-provision-{pod}-{distro}-periodic-{stream}'
+
+#--------------------------------
+# job templates
+#--------------------------------
+- job-template:
+    name: 'bifrost-provision-{pod}-{distro}-periodic-{stream}'
+
+    disabled: '{obj:disabled}'
+
+    concurrent: false
+
+    properties:
+        - build-blocker:
+            use-build-blocker: true
+            blocking-jobs:
+                - '^xci-os.*'
+                - '^xci-deploy.*'
+                - '^xci-functest.*'
+                - '^bifrost-periodic-.*'
+                - '^osa-periodic-.*'
+            block-level: 'NODE'
+        - logrotate-default
+
+    parameters:
+        - project-parameter:
+            project: '{project}'
+            branch: '{opnfv-releng-version}'
+        - string:
+            name: XCI_FLAVOR
+            default: 'ha'
+        - string:
+            name: OPENSTACK_BIFROST_VERSION
+            default: '{openstack-bifrost-version}'
+        - string:
+            name: OPNFV_RELENG_VERSION
+            default: '{opnfv-releng-version}'
+        - string:
+            name: DISTRO
+            default: '{distro}'
+        - string:
+            name: DIB_OS_RELEASE
+            default: '{dib-os-release}'
+        - string:
+            name: DIB_OS_ELEMENT
+            default: '{dib-os-element}'
+        - string:
+            name: DIB_OS_PACKAGES
+            default: '{dib-os-packages}'
+        - string:
+            name: EXTRA_DIB_ELEMENTS
+            default: '{extra-dib-elements}'
+        - string:
+            name: CLEAN_DIB_IMAGES
+            default: 'true'
+        - label:
+            name: SLAVE_LABEL
+            default: '{slave-label}'
+        - string:
+            name: ANSIBLE_VERBOSITY
+            default: ''
+
+    wrappers:
+        - fix-workspace-permissions
+
+    scm:
+        - git-scm
+
+    # trigger is disabled until we know which jobs we will have
+    # and adjust stuff accordingly
+    triggers:
+        - timed: '#@midnight'
+
+    builders:
+        - description-setter:
+            description: "Built on $NODE_NAME - Scenario: $DEPLOY_SCENARIO"
+        - 'bifrost-provision-builder'
+
+#---------------------------
+# builder macros
+#---------------------------
+- builder:
+    name: bifrost-provision-builder
+    builders:
+        - shell:
+            !include-raw: ./bifrost-provision.sh
diff --git a/jjb/xci/bifrost-provision.sh b/jjb/xci/bifrost-provision.sh
new file mode 100755 (executable)
index 0000000..d8e17c4
--- /dev/null
@@ -0,0 +1,107 @@
+#!/bin/bash
+# SPDX-license-identifier: Apache-2.0
+##############################################################################
+# Copyright (c) 2016 Ericsson AB and others.
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+set -o errexit
+set -o nounset
+set -o pipefail
+
+trap cleanup_and_upload EXIT
+
+function fix_ownership() {
+    if [ -z "${JOB_URL+x}" ]; then
+        echo "Not running as part of Jenkins. Handle the logs manually."
+    else
+        # Make sure cache exists
+        [[ ! -d ${HOME}/.cache ]] && mkdir ${HOME}/.cache
+
+        sudo chown -R jenkins:jenkins $WORKSPACE
+        sudo chown -R jenkins:jenkins ${HOME}/.cache
+    fi
+}
+
+function cleanup_and_upload() {
+    original_exit=$?
+    fix_ownership
+    exit $original_exit
+}
+
+# check distro to see if we support it
+if [[ ! "$DISTRO" =~ (xenial|centos7|suse) ]]; then
+    echo "Distro $DISTRO is not supported!"
+    exit 1
+fi
+
+# remove previously cloned repos
+sudo /bin/rm -rf /opt/bifrost /opt/openstack-ansible /opt/releng /opt/functest
+
+# Fix up permissions
+fix_ownership
+
+# ensure the versions to checkout are set
+export OPENSTACK_BIFROST_VERSION=${OPENSTACK_BIFROST_VERSION:-master}
+export OPNFV_RELENG_VERSION=${OPNFV_RELENG_VERSION:-master}
+
+# log some info
+echo -e "\n"
+echo "***********************************************************************"
+echo "*                                                                     *"
+echo "*                      Provision OpenStack Nodes                      *"
+echo "*                                                                     *"
+echo "                       bifrost version: $OPENSTACK_BIFROST_VERSION"
+echo "                       releng version: $OPNFV_RELENG_VERSION"
+echo "*                                                                     *"
+echo "***********************************************************************"
+echo -e "\n"
+
+# clone the repos and checkout the versions
+sudo git clone --quiet https://git.openstack.org/openstack/bifrost /opt/bifrost
+cd /opt/bifrost && sudo git checkout --quiet $OPENSTACK_BIFROST_VERSION
+echo "xci: using bifrost commit"
+git show --oneline -s --pretty=format:'%h - %s (%cr) <%an>'
+
+sudo git clone --quiet https://gerrit.opnfv.org/gerrit/releng /opt/releng
+cd /opt/releng && sudo git checkout --quiet $OPNFV_RELENG_VERSION
+echo "xci: using releng commit"
+git show --oneline -s --pretty=format:'%h - %s (%cr) <%an>'
+
+# combine opnfv and upstream scripts/playbooks
+sudo /bin/cp -rf /opt/releng/prototypes/bifrost/* /opt/bifrost/
+
+# cleanup remnants of previous deployment
+cd /opt/bifrost
+sudo -E ./scripts/destroy-env.sh
+
+# provision 6 VMs; xcimaster, controller00, controller01, controller02, compute00, and compute01
+cd /opt/bifrost
+sudo -E ./scripts/bifrost-provision.sh
+
+# list the provisioned VMs
+cd /opt/bifrost
+source env-vars
+ironic node-list
+virsh list
+
+
+echo "OpenStack nodes are provisioned!"
+# here we have to do something in order to capture what was the working sha1
+# hardcoding stuff for the timebeing
+
+cd /opt/bifrost
+BIFROST_GIT_SHA1=$(git rev-parse HEAD)
+
+# log some info
+echo -e "\n"
+echo "***********************************************************************"
+echo "*                       BIFROST SHA1 TO PIN                           *"
+echo "*                                                                     *"
+echo "    $BIFROST_GIT_SHA1"
+echo "*                                                                     *"
+echo "***********************************************************************"
+
+echo -e "\n"
index dbe3b65..f9fbc1d 100644 (file)
@@ -1,34 +1,37 @@
+#--------------------------------
+# These jobs run on a daily basis and deploy OpenStack
+# using the pinned versions of opnfv/releng, openstack/bifrost
+# and openstack/openstack-ansible. Due to this, there is no
+# version/branch is set/passed to jobs and instead the versions
+# are checked out based on what is configured.
+#--------------------------------
 - project:
-    name: 'bifrost-osa-daily'
+    project: 'releng'
+
+    name: 'xci-daily'
 #--------------------------------
-# BRANCH ANCHORS
+# Branch Anchors
 #--------------------------------
-# the versions stated here default to branches which then later
-# on used for checking out the branches, pulling in head of the branch.
-# but they also allow us to state sha1 so instead of checking out the
-# branches, we can check out sha1 if we want to use locked/specific
-# sha1 or manually enter sha1.
     master: &master
         stream: master
-        openstack-osa-version: '{stream}'
-        openstack-bifrost-version: '{stream}'
-        opnfv-releng-version: 'master'
+        opnfv-releng-version: master
         gs-pathname: ''
     ocata: &ocata
         stream: ocata
-        openstack-osa-version: 'stable/{stream}'
-        openstack-bifrost-version: 'stable/{stream}'
-        opnfv-releng-version: 'master'
+        opnfv-releng-version: master
         gs-pathname: '/{stream}'
 #--------------------------------
-#       scenarios
+# Scenarios
 #--------------------------------
     scenario:
-        # HA scenarios
         - 'os-nosdn-nofeature-ha':
             auto-trigger-name: 'daily-trigger-disabled'
+            xci-flavor: 'ha'
+        - 'os-nosdn-nofeature-noha':
+            auto-trigger-name: 'daily-trigger-disabled'
+            xci-flavor: 'noha'
 #--------------------------------
-#        XCI PODs
+# XCI PODs
 #--------------------------------
     pod:
         - virtual:
@@ -36,7 +39,7 @@
         - virtual:
             <<: *ocata
 #--------------------------------
-#        Supported Distros
+# Supported Distros
 #--------------------------------
     distro:
         - 'xenial':
@@ -44,7 +47,7 @@
             slave-label: xci-xenial-virtual
             dib-os-release: 'xenial'
             dib-os-element: 'ubuntu-minimal'
-            dib-os-packages: 'vlan,vim,less,bridge-utils,sudo,language-pack-en,iputils-ping,rsyslog,curl,python,debootstrap,ifenslave,ifenslave-2.6,lsof,lvm2,tcpdump,nfs-kernel-server,chrony'
+            dib-os-packages: 'vlan,vim,less,bridge-utils,sudo,language-pack-en,iputils-ping,rsyslog,curl,python,debootstrap,ifenslave,ifenslave-2.6,lsof,lvm2,tcpdump,nfs-kernel-server,chrony,iptabls'
             extra-dib-elements: 'openssh-server'
         - 'centos7':
             disabled: true
@@ -65,7 +68,6 @@
 #        Phases
 #--------------------------------
     phase:
-        - 'provision'
         - 'deploy'
         - 'functest'
 #--------------------------------
         - 'xci-{scenario}-{pod}-{distro}-daily-{stream}'
         - 'xci-{phase}-{pod}-{distro}-daily-{stream}'
 
-#--------------------------------
-# VM defaults
-#--------------------------------
-- defaults:
-    name: daily_vm_defaults
-    test-vm-num-nodes: '6'
-    test-vm-node-names: 'xcimaster controller00 controller01 controller02 compute00 compute01'
-    vm-domain-type: 'kvm'
-    vm-cpu: '8'
-    vm-disk: '100'
-    vm-memory-size: '16384'
-    vm-disk-cache: 'unsafe'
-
 #--------------------------------
 # job templates
 #--------------------------------
             use-build-blocker: true
             blocking-jobs:
                 - '^xci-os.*'
+                - '^xci-deploy.*'
+                - '^xci-functest.*'
+                - '^bifrost-periodic-.*'
+                - '^osa-periodic-.*'
             block-level: 'NODE'
         - logrotate-default
 
     parameters:
-        - string:
-            name: OPENSTACK_OSA_VERSION
-            default: '{openstack-osa-version}'
-        - string:
-            name: OPENSTACK_BIFROST_VERSION
-            default: '{openstack-osa-version}'
-        - string:
-            name: OPNFV_RELENG_VERSION
-            default: '{opnfv-releng-version}'
-        - string:
-            name: USE_PROMOTED_VERSIONS
-            default: 'true'
         - string:
             name: DEPLOY_SCENARIO
             default: '{scenario}'
+        - string:
+            name: XCI_FLAVOR
+            default: '{xci-flavor}'
         - label:
             name: SLAVE_LABEL
             default: '{slave-label}'
         - '{auto-trigger-name}'
 
     wrappers:
-        - xci-fix-perms-workspace
+        - fix-workspace-permissions
 
     builders:
         - description-setter:
             description: "Built on $NODE_NAME"
-        - trigger-builds:
-            - project: 'xci-provision-{pod}-{distro}-daily-{stream}'
-              current-parameters: false
-              predefined-parameters: |
-                OPENSTACK_OSA_VERSION=$OPENSTACK_OSA_VERSION
-                OPENSTACK_BIFROST_VERSION=$OPENSTACK_BIFROST_VERSION
-                OPNFV_RELENG_VERSION=$OPNFV_RELENG_VERSION
-                USE_PROMOTED_VERSIONS=$USE_PROMOTED_VERSIONS
-                DEPLOY_SCENARIO=$DEPLOY_SCENARIO
-              same-node: true
-              block: true
         - trigger-builds:
             - project: 'xci-deploy-{pod}-{distro}-daily-{stream}'
               current-parameters: false
               predefined-parameters: |
-                OPENSTACK_OSA_VERSION=$OPENSTACK_OSA_VERSION
-                OPENSTACK_BIFROST_VERSION=$OPENSTACK_BIFROST_VERSION
-                OPNFV_RELENG_VERSION=$OPNFV_RELENG_VERSION
-                USE_PROMOTED_VERSIONS=$USE_PROMOTED_VERSIONS
                 DEPLOY_SCENARIO=$DEPLOY_SCENARIO
+                XCI_FLAVOR=$XCI_FLAVOR
               same-node: true
               block: true
         - trigger-builds:
             - project: 'xci-functest-{pod}-{distro}-daily-{stream}'
               current-parameters: false
               predefined-parameters: |
-                OPENSTACK_OSA_VERSION=$OPENSTACK_OSA_VERSION
-                OPENSTACK_BIFROST_VERSION=$OPENSTACK_BIFROST_VERSION
-                OPNFV_RELENG_VERSION=$OPNFV_RELENG_VERSION
-                USE_PROMOTED_VERSIONS=$USE_PROMOTED_VERSIONS
                 DEPLOY_SCENARIO=$DEPLOY_SCENARIO
+                XCI_FLAVOR=$XCI_FLAVOR
               same-node: true
               block: true
               block-thresholds:
 
     disabled: '{obj:disabled}'
 
-    defaults: daily_vm_defaults
-
     concurrent: false
 
     properties:
         - build-blocker:
             use-build-blocker: true
             blocking-jobs:
-                - '^xci-provision.*'
                 - '^xci-deploy.*'
                 - '^xci-functest.*'
+                - '^bifrost-periodic-.*'
+                - '^osa-periodic-.*'
             block-level: 'NODE'
         - logrotate-default
 
+    wrappers:
+        - fix-workspace-permissions
+
+    scm:
+        - git-scm
+
     parameters:
-        - string:
-            name: OPENSTACK_OSA_VERSION
-            default: '{openstack-osa-version}'
-        - string:
-            name: OPENSTACK_BIFROST_VERSION
-            default: '{openstack-osa-version}'
-        - string:
-            name: OPNFV_RELENG_VERSION
-            default: '{opnfv-releng-version}'
-        - string:
-            name: USE_PROMOTED_VERSIONS
-            default: 'true'
+        - project-parameter:
+            project: '{project}'
+            branch: '{opnfv-releng-version}'
         - string:
             name: DEPLOY_SCENARIO
             default: 'os-nosdn-nofeature-ha'
+        - string:
+            name: XCI_FLAVOR
+            default: 'ha'
         - string:
             name: DISTRO
             default: '{distro}'
         - string:
             name: DIB_OS_ELEMENT
             default: '{dib-os-element}'
-        - string:
-            name: EXTRA_DIB_ELEMENTS
-            default: '{extra-dib-elements}'
         - string:
             name: DIB_OS_PACKAGES
             default: '{dib-os-packages}'
         - string:
-            name: TEST_VM_NUM_NODES
-            default: '{test-vm-num-nodes}'
-        - string:
-            name: TEST_VM_NODE_NAMES
-            default: '{test-vm-node-names}'
-        - string:
-            name: VM_DOMAIN_TYPE
-            default: '{vm-domain-type}'
-        - string:
-            name: VM_CPU
-            default: '{vm-cpu}'
-        - string:
-            name: VM_DISK
-            default: '{vm-disk}'
-        - string:
-            name: VM_MEMORY_SIZE
-            default: '{vm-memory-size}'
-        - string:
-            name: VM_DISK_CACHE
-            default: '{vm-disk-cache}'
+            name: EXTRA_DIB_ELEMENTS
+            default: '{extra-dib-elements}'
         - string:
             name: CLEAN_DIB_IMAGES
             default: 'true'
             name: ANSIBLE_VERBOSITY
             default: ''
 
-    wrappers:
-        - xci-fix-perms-workspace
-
     builders:
         - description-setter:
             description: "Built on $NODE_NAME - Scenario: $DEPLOY_SCENARIO"
         - 'xci-{phase}-builder'
 
-#---------------------------
-# wrapper macros
-#---------------------------
-- wrapper:
-    name: xci-fix-perms-workspace
-    wrappers:
-        - pre-scm-buildstep:
-          - shell: |
-                #!/bin/bash
-                sudo chown -R $USER $WORKSPACE || exit 1
-
 #---------------------------
 # builder macros
 #---------------------------
-- builder:
-    name: xci-provision-builder
-    builders:
-        - shell:
-            !include-raw: ./xci-provision.sh
 - builder:
     name: xci-deploy-builder
     builders:
         - shell:
             !include-raw: ./xci-deploy.sh
+
 - builder:
     name: xci-functest-builder
     builders:
index 87f9ec8..cf5fe32 100755 (executable)
@@ -11,83 +11,21 @@ set -o errexit
 set -o nounset
 set -o pipefail
 
-trap cleanup_and_upload EXIT
-
-function fix_ownership() {
-    if [ -z "${JOB_URL+x}" ]; then
-        echo "Not running as part of Jenkins. Handle the logs manually."
-    else
-        # Make sure cache exists
-        [[ ! -d ${HOME}/.cache ]] && mkdir ${HOME}/.cache
-
-        sudo chown -R jenkins:jenkins $WORKSPACE
-        sudo chown -R jenkins:jenkins ${HOME}/.cache
-    fi
-}
-
-function cleanup_and_upload() {
-    original_exit=$?
-    fix_ownership
-    exit $original_exit
-}
-
-# check distro to see if we support it
-if [[ ! "$DISTRO" =~ (xenial|centos7|suse) ]]; then
-    echo "Distro $DISTRO is not supported!"
-    exit 1
+cd $WORKSPACE/prototypes/xci
+
+# for daily jobs, we want to use working versions
+# for periodic jobs, we will use whatever is set in the job, probably master
+if [[ "$JOB_NAME" =~ "daily" ]]; then
+    # source pinned-vars to get releng version
+    source ./config/pinned-versions
+
+    # checkout the version
+    git checkout -q $OPNFV_RELENG_VERSION
+    echo "Info: Using $OPNFV_RELENG_VERSION"
+elif [[ "$JOB_NAME" =~ "periodic" ]]; then
+    echo "Info: Using $OPNFV_RELENG_VERSION"
 fi
 
-# remove previously cloned repos
-sudo /bin/rm -rf /opt/openstack-ansible /opt/stack /opt/releng /opt/functest
-
-# Fix up permissions
-fix_ownership
-
-# openstack-ansible enables strict host key checking by default
-export ANSIBLE_HOST_KEY_CHECKING=False
-
-# ensure the versions to checkout are set
-export OPENSTACK_OSA_VERSION=${OPENSTACK_OSA_VERSION:-master}
-export OPNFV_RELENG_VERSION=${OPNFV_RELENG_VERSION:-master}
-
-# log some info
-echo -e "\n"
-echo "***********************************************************************"
-echo "*                                                                     *"
-echo "*                         Deploy OpenStack                            *"
-echo "*                                                                     *"
-echo "                 openstack-ansible version: $OPENSTACK_OSA_VERSION"
-echo "                       releng version: $OPNFV_RELENG_VERSION"
-echo "*                                                                     *"
-echo "***********************************************************************"
-echo -e "\n"
-# clone releng repo
-sudo git clone --quiet https://gerrit.opnfv.org/gerrit/releng /opt/releng
-cd /opt/releng && sudo git checkout --quiet $OPNFV_RELENG_VERSION
-echo "xci: using openstack-ansible commit"
-git show --oneline -s --pretty=format:'%h - %s (%cr) <%an>'
-
-# display the nodes
-echo "xci: OpenStack nodes"
-cd /opt/bifrost
-source env-vars
-ironic node-list
-
-# this script will be reused for promoting openstack-ansible versions and using
-# promoted openstack-ansible versions as part of xci daily.
-USE_PROMOTED_VERSIONS=${USE_PROMOTED_VERSIONS:-false}
-if [ $USE_PROMOTED_VERSIONS = "true" ]; then
-    echo "TBD: Will use the promoted versions of openstack/opnfv projects"
-fi
-
-cd /opt/releng/prototypes/openstack-ansible/scripts
-sudo -E ./osa-deploy.sh
-
-# log some info
-echo -e "\n"
-echo "***********************************************************************"
-echo "*                                                                     *"
-echo "*                OpenStack deployment is completed!                   *"
-echo "*                                                                     *"
-echo "***********************************************************************"
-echo -e "\n"
+# proceed with the deployment
+cd $WORKSPACE/prototypes/xci
+sudo -E ./xci-deploy.sh
index f769e9c..973f83a 100755 (executable)
@@ -18,7 +18,7 @@ if [[ ${INSTALLER_TYPE} == 'apex' ]]; then
 elif [[ ${INSTALLER_TYPE} == 'joid' ]]; then
     # If production lab then creds may be retrieved dynamically
     # creds are on the jumphost, always in the same folder
-    labconfig="-v $LAB_CONFIG/admin-openrc:/home/opnfv/openrc"
+    labconfig="-v $LAB_CONFIG/admin-openrc:/etc/yardstick/openstack.creds"
     # If dev lab, credentials may not be the default ones, just provide a path to put them into docker
     # replace the default one by the customized one provided by jenkins config
 fi
diff --git a/modules/requirements.txt b/modules/requirements.txt
new file mode 100644 (file)
index 0000000..1eaf8d0
--- /dev/null
@@ -0,0 +1,3 @@
+paramiko>=2.0.1
+mock==1.3.0
+requests==2.9.1
diff --git a/modules/test-requirements.txt b/modules/test-requirements.txt
new file mode 100644 (file)
index 0000000..99d7f13
--- /dev/null
@@ -0,0 +1,6 @@
+# The order of packages is significant, because pip processes them in the order
+# of appearance. Changing the order has an impact on the overall integration
+# process, which may cause wedges in the gate later.
+
+nose
+coverage
diff --git a/prototypes/bifrost/playbooks/roles/bifrost-ironic-install/templates/ironic-inspector.conf.j2 b/prototypes/bifrost/playbooks/roles/bifrost-ironic-install/templates/ironic-inspector.conf.j2
deleted file mode 100644 (file)
index dc4e3ff..0000000
+++ /dev/null
@@ -1,66 +0,0 @@
-{#
-# Note(TheJulia): This file is based upon the file format provided by the git
-# committed example located at:
-# http://git.openstack.org/cgit/openstack/ironic-inspector/tree/example.conf
-#}
-[DEFAULT]
-{% if enable_keystone is defined and enable_keystone | bool == true %}
-auth_strategy = keystone
-{% else %}
-auth_strategy = {{ inspector_auth | default('noauth') }}
-{% endif %}
-debug = {{ inspector_debug | bool }}
-
-[database]
-connection=mysql+pymysql://inspector:{{ ironic_db_password }}@localhost/inspector?charset=utf8
-min_pool_size = 1
-max_pool_size = 5
-
-[firewall]
-manage_firewall = {{ inspector_manage_firewall | bool | default('false') }}
-
-[ironic]
-{% if enable_keystone is defined and enable_keystone | bool == true %}
-os_region = {{ keystone.bootstrap.region_name | default('RegionOne') }}
-project_name = baremetal
-username = {{ ironic_inspector.keystone.default_username }}
-password = {{ ironic_inspector.keystone.default_password }}
-auth_url = {{ ironic_inspector.service_catalog.auth_url }}
-auth_type = password
-auth_strategy = keystone
-user_domain_id = default
-project_domain_id = default
-
-{% else %}
-auth_strategy = {{ ironic_auth_strategy | default('noauth') }}
-{% endif %}
-
-{% if enable_keystone is defined and enable_keystone | bool == true %}
-[keystone_authtoken]
-auth_plugin = password
-auth_url = {{ ironic_inspector.service_catalog.auth_url }}
-username = {{ ironic_inspector.service_catalog.username }}
-password = {{ ironic_inspector.service_catalog.password }}
-user_domain_id = default
-project_name = service
-project_domain_id = default
-
-{% endif %}
-{#
-# Note(TheJulia) preserving ironic_url in the configuration
-# in case future changes allow breaking of the deployment across
-# multiple nodes.
-#ironic_url = http://localhost:6385/
-#}
-
-[processing]
-add_ports = {{ inspector_port_addition | default('pxe') }}
-keep_ports = {{ inspector_keep_ports | default('present') }}
-ramdisk_logs_dir = {{ inspector_data_dir }}/log
-always_store_ramdisk_logs = {{ inspector_store_ramdisk_logs | default('true') | bool }}
-{% if inspector.discovery.enabled == true %}
-node_not_found_hook = enroll
-
-[discovery]
-enroll_node_driver = {{ inspector.discovery.default_node_driver }}
-{% endif %}
diff --git a/prototypes/bifrost/playbooks/roles/bifrost-ironic-install/templates/ironic.conf.j2 b/prototypes/bifrost/playbooks/roles/bifrost-ironic-install/templates/ironic.conf.j2
deleted file mode 100644 (file)
index d8896fa..0000000
+++ /dev/null
@@ -1,92 +0,0 @@
-# {{ ansible_managed }}
-# For additional details on configuring ironic, you may wish to reference
-# the sample configuration file which can be located at
-# http://git.openstack.org/cgit/openstack/ironic/tree/etc/ironic/ironic.conf.sample
-
-
-[DEFAULT]
-# NOTE(TheJulia): Until Bifrost supports neutron or some other network
-# configuration besides a flat network where bifrost orchustrates the
-# control instead of ironic, noop is the only available network driver.
-enabled_network_interfaces = noop
-{% if testing | bool == true %}
-enabled_drivers = agent_ipmitool,pxe_ipmitool
-debug = true
-{% else %}
-enabled_drivers = {{ enabled_drivers }}
-debug = false
-{% endif %}
-
-rabbit_userid = ironic
-rabbit_password = {{ ironic_db_password }}
-
-{% if enable_keystone is defined and enable_keystone | bool == true %}
-auth_strategy = keystone
-{% else %}
-auth_strategy = noauth
-{% endif %}
-
-[pxe]
-pxe_append_params = systemd.journald.forward_to_console=yes {{ extra_kernel_options | default('') }}
-pxe_config_template = $pybasedir/drivers/modules/ipxe_config.template
-tftp_server = {{ hostvars[inventory_hostname]['ansible_' + ans_network_interface]['ipv4']['address'] }}
-tftp_root = /tftpboot
-pxe_bootfile_name = undionly.kpxe
-ipxe_enabled = true
-ipxe_boot_script = /etc/ironic/boot.ipxe
-
-[deploy]
-http_url = http://{{ hostvars[inventory_hostname]['ansible_' + ans_network_interface]['ipv4']['address'] }}:{{ file_url_port }}/
-http_root = {{ http_boot_folder }}
-
-[conductor]
-api_url = http://{{ hostvars[inventory_hostname]['ansible_' + ans_network_interface]['ipv4']['address'] }}:6385/
-clean_nodes = {{ cleaning | lower }}
-automated_clean = {{ cleaning | lower }}
-
-[database]
-connection = mysql+pymysql://ironic:{{ ironic_db_password }}@localhost/ironic?charset=utf8
-min_pool_size = 1
-max_pool_size = 5
-
-[dhcp]
-dhcp_provider = none
-
-{% if testing | bool == true %}
-[ssh]
-libvirt_uri = qemu:///system
-{% endif %}
-
-{% if enable_cors | bool == true %}
-[cors]
-allowed_origin = {{ cors_allowed_origin | default('allowed_origin=http://localhost:8000') }}
-allow_credentials = {{ enable_cors_credential_support | default('true') }}
-{% endif %}
-
-[ilo]
-use_web_server_for_images = true
-
-{% if enable_inspector | bool == true %}
-[inspector]
-enabled = true
-{% endif %}
-
-{% if enable_keystone is defined and enable_keystone | bool == true %}
-[keystone]
-region_name = {{ keystone.bootstrap.region_name | default('RegionOne')}}
-[keystone_authtoken]
-auth_plugin = password
-auth_url = {{ ironic.service_catalog.auth_url }}
-username = {{ ironic.service_catalog.username }}
-password = {{ ironic.service_catalog.password }}
-user_domain_id = default
-project_name = {{ ironic.service_catalog.project_name }}
-project_domain_id = default
-
-[service_catalog]
-auth_url = {{ ironic.service_catalog.auth_url }}
-auth_type = password
-tenant_name = {{ ironic.service_catalog.project_name }}
-username = {{ ironic.service_catalog.username }}
-password = {{ ironic.service_catalog.password }}
-{% endif %}
diff --git a/prototypes/xci/README.rst b/prototypes/xci/README.rst
new file mode 100644 (file)
index 0000000..8318cdb
--- /dev/null
@@ -0,0 +1,217 @@
+###########################
+OPNFV XCI Developer Sandbox
+###########################
+
+The XCI Developer Sandbox is created by the OPNFV community for the OPNFV
+community in order to
+
+- provide means for OPNFV developers to work with OpenStack master branch,
+  cutting the time it takes to develop new features significantly and testing
+  them on OPNFV Infrastructure
+- enable OPNFV developers to identify bugs earlier, issue fixes faster, and
+  get feedback on a daily basis
+- establish mechanisms to run additional testing on OPNFV Infrastructure to
+  provide feedback to OpenStack community
+- make the solutions we put in place available to other LF Networking Projects
+  OPNFV works with closely
+
+More information about OPNFV XCI and the sandbox can be seen on
+`OPNFV Wiki <https://wiki.opnfv.org/pages/viewpage.action?pageId=8687635>`_.
+
+===================================
+Components of XCI Developer Sandbox
+===================================
+
+The sandbox uses OpenStack projects for VM node creation, provisioning
+and OpenStack installation.
+
+- **openstack/bifrost:** Bifrost (pronounced bye-frost) is a set of Ansible
+  playbooks that automates the task of deploying a base image onto a set
+  of known hardware using ironic. It provides modular utility for one-off
+  operating system deployment with as few operational requirements as
+  reasonably possible. Bifrost supports different operating systems such as
+  Ubuntu, CentOS, and openSUSE.
+  More information about this project can be seen on
+  `Bifrost documentation <https://docs.openstack.org/developer/bifrost/>`_.
+
+- **openstack/openstack-ansible:** OpenStack-Ansible is an official OpenStack
+  project which aims to deploy production environments from source in a way
+  that makes it scalable while also being simple to operate, upgrade, and grow.
+  More information about this project can be seen on
+  `OpenStack Ansible documentation <https://docs.openstack.org/developer/openstack-ansible/>`_.
+
+- **opnfv/releng:** OPNFV Releng Project provides additional scripts, Ansible
+  playbooks and configuration options in order for developers to have easy
+  way of using openstack/bifrost and openstack/openstack-ansible by just
+  setting couple of environment variables and executing a single script.
+  More infromation about this project can be seen on
+  `OPNFV Releng documentation <https://wiki.opnfv.org/display/releng>_`.
+
+==========
+Basic Flow
+==========
+
+Here are the steps that take place upon the execution of the sandbox script
+``xci-deploy.sh``:
+
+1. Sources environment variables in order to set things up properly.
+2. Installs ansible on the host where sandbox script is executed.
+3. Creates and provisions VM nodes based on the flavor chosen by the user.
+4. Configures the host where the sandbox script is executed.
+5. Configures the deployment host which the OpenStack installation will
+   be driven from.
+6. Configures the target hosts where OpenStack will be installed.
+7. Configures the target hosts as controller(s) and compute(s) nodes.
+8. Starts the OpenStack installation.
+
+=====================
+Sandbox Prerequisites
+=====================
+
+In order to use this sandbox, the host must have certain packages installed.
+
+- libvirt
+- python
+- pip
+- git
+- <fix the list with all the dependencies>
+- passwordless sudo
+
+The host must also have enough CPU/RAM/Disk in order to host number of VM
+nodes that will be created based on the chosen flavor. See the details from
+`this link <https://wiki.opnfv.org/display/INF/XCI+Developer+Sandbox#XCIDeveloperSandbox-Prerequisites>`_.
+
+===========================
+Flavors Provided by Sandbox
+===========================
+
+OPNFV XCI Sandbox provides different flavors such as all in one (aio) which
+puts much lower requirements on the host machine and full-blown HA.
+
+* aio: Single node which acts as the deployment host, controller and compute.
+* mini: One deployment host, 1 controller node and 1 compute node.
+* noha: One deployment host, 1 controller node and 2 compute nodes.
+* ha: One deployment host, 3 controller nodes and 2 compute nodes.
+
+See the details of the flavors from
+`this link <https://wiki.opnfv.org/display/INF/XCI+Developer+Sandbox#XCIDeveloperSandbox-AvailableFlavors>`_.
+
+==========
+How to Use
+==========
+
+Basic Usage
+-----------
+
+clone OPNFV Releng repository
+
+    git clone https://gerrit.opnfv.org/gerrit/releng.git
+
+change into directory where the sandbox script is located
+
+    cd releng/prototypes/xci
+
+execute sandbox script
+
+    sudo -E ./xci-deploy.sh
+
+Issuing above command will start aio sandbox deployment and the sandbox
+should be ready between 1,5 and 2 hours depending on the host machine.
+
+Advanced Usage
+--------------
+
+The flavor to deploy, the versions of upstream components to use can
+be configured by developers by setting certain environment variables.
+Below example deploys noha flavor using the latest of openstack-ansible
+master branch and stores logs in different location than what is configured.
+
+clone OPNFV Releng repository
+
+    git clone https://gerrit.opnfv.org/gerrit/releng.git
+
+change into directory where the sandbox script is located
+
+    cd releng/prototypes/xci
+
+set the sandbox flavor
+
+    export XCI_FLAVOR=noha
+
+set the version to use for openstack-ansible
+
+    export OPENSTACK_OSA_VERSION=master
+
+set where the logs should be stored
+
+    export LOG_PATH=/home/jenkins/xcilogs
+
+execute sandbox script
+
+    sudo -E ./xci-deploy.sh
+
+Warning::
+
+    Please encure you always execute the sandbox script using **sudo -E**
+    in order to make the environment variables you set available to the
+    sandbox script or you end up with the default settings.
+
+===============
+User Variables
+===============
+
+All user variables can be set from command line by exporting them before
+executing the script. The current user variables can be seen from
+``releng/prototypes/xci/config/user-vars``.
+
+The variables can also be set directly within the file before executing
+the sandbox script.
+
+===============
+Pinned Versions
+===============
+
+As explained above, the users can pick and choose which versions to use. If
+you want to be on the safe side, you can use the pinned versions the sandbox
+provides. They can be seen from ``releng/prototypes/xci/config/pinned-versions``.
+
+How Pinned Versions are Determined
+----------------------------------
+
+OPNFV runs periodic jobs against upstream projects openstack/bifrost and
+openstack/ansible using latest on master and stable/ocata branches,
+continuously chasing the HEAD of corresponding branches.
+
+Once a working version is identified, the versions of the upstream components
+are then bumped in releng repo.
+
+===========================================
+Limitations, Known Issues, and Improvements
+===========================================
+
+The list can be seen using `this link <https://jira.opnfv.org/issues/?filter=11616>`_.
+
+=========
+Changelog
+=========
+
+Changelog can be seen using `this link <https://jira.opnfv.org/issues/?filter=11625>`_.
+
+=======
+Testing
+=======
+
+Sandbox is continuously tested by OPNFV CI to ensure the changes do not impact
+users. In fact, OPNFV CI itself uses the sandbox scripts to run daily platform
+verification jobs.
+
+=======
+Support
+=======
+
+OPNFV XCI issues are tracked on OPNFV JIRA Releng project. If you encounter
+and issue or identify a bug, please submit an issue to JIRA using
+`this link <https://jira.opnfv.org/projects/RELENG>_`.
+
+If you have questions or comments, you can ask them on ``#opnfv-pharos`` IRC
+channel on Freenode.
index 1bb553b..052be2a 100755 (executable)
@@ -10,8 +10,12 @@ export CLEAN_DIB_IMAGES=false
 export OPNFV_HOST_IP=192.168.122.2
 export XCI_FLAVOR_ANSIBLE_FILE_PATH=$OPNFV_RELENG_PATH/prototypes/xci/file/$XCI_FLAVOR
 export JOB_NAME=${JOB_NAME:-false}
+# TODO: this currently matches to bifrost ansible version
+# there is perhaps better way to do this
+export XCI_ANSIBLE_PIP_VERSION=2.1.5.0
 export ANSIBLE_HOST_KEY_CHECKING=False
 export DISTRO=${DISTRO:-ubuntu}
 export DIB_OS_RELEASE=${DIB_OS_RELEASE:-xenial}
 export DIB_OS_ELEMENT=${DIB_OS_ELEMENT:-ubuntu-minimal}
 export DIB_OS_PACKAGES=${DIB_OS_PACKAGES:-"vlan,vim,less,bridge-utils,sudo,language-pack-en,iputils-ping,rsyslog,curl,python,debootstrap,ifenslave,ifenslave-2.6,lsof,lvm2,tcpdump,nfs-kernel-server,chrony,iptables"}
+export EXTRA_DIB_ELEMENTS=${EXTRA_DIB_ELEMENTS:-"openssh-server"}
diff --git a/prototypes/xci/file/aio/configure-opnfvhost.yml b/prototypes/xci/file/aio/configure-opnfvhost.yml
new file mode 100644 (file)
index 0000000..5c66d40
--- /dev/null
@@ -0,0 +1,22 @@
+---
+- hosts: opnfv
+  remote_user: root
+  vars_files:
+  vars_files:
+    - ../var/opnfv.yml
+  roles:
+    - role: remove-folders
+    - { role: clone-repository, project: "openstack/openstack-ansible", repo: "{{ OPENSTACK_OSA_GIT_URL }}", dest: "{{ OPENSTACK_OSA_PATH }}", version: "{{ OPENSTACK_OSA_VERSION }}" }
+  tasks:
+    - name: bootstrap ansible on opnfv host
+      command: "/bin/bash ./scripts/bootstrap-ansible.sh"
+      args:
+        chdir: "{{OPENSTACK_OSA_PATH}}"
+    - name: bootstrap opnfv host as aio
+      command: "/bin/bash ./scripts/bootstrap-aio.sh"
+      args:
+        chdir: "{{OPENSTACK_OSA_PATH}}"
+    - name: install OpenStack on opnfv host - this command doesn't log anything to console
+      command: "/bin/bash ./scripts/run-playbooks.sh"
+      args:
+        chdir: "{{OPENSTACK_OSA_PATH}}"
index e69de29..6ac1e0f 100644 (file)
@@ -0,0 +1,3 @@
+---
+# this file is added intentionally in order to simplify putting files in place
+# in future, it might contain vars specific to this flavor
index e69de29..9a3dd9e 100644 (file)
@@ -0,0 +1,2 @@
+[opnfv]
+opnfv ansible_ssh_host=192.168.122.2
diff --git a/prototypes/xci/file/aio/openstack_user_config.yml b/prototypes/xci/file/aio/openstack_user_config.yml
deleted file mode 100644 (file)
index e69de29..0000000
diff --git a/prototypes/xci/file/ha/configure-targethosts.yml b/prototypes/xci/file/ha/configure-targethosts.yml
new file mode 100644 (file)
index 0000000..6dc147f
--- /dev/null
@@ -0,0 +1,36 @@
+---
+- hosts: all
+  remote_user: root
+  tasks:
+    - name: add public key to host
+      copy:
+        src: ../file/authorized_keys
+        dest: /root/.ssh/authorized_keys
+    - name: configure modules
+      copy:
+        src: ../file/modules
+        dest: /etc/modules
+
+- hosts: controller
+  remote_user: root
+  vars_files:
+    - ../var/{{ ansible_os_family }}.yml
+    - ../var/flavor-vars.yml
+  roles:
+    # TODO: this only works for ubuntu/xenial and need to be adjusted for other distros
+    - { role: configure-network, when: ansible_distribution_release == "xenial", src: "../template/controller.interface.j2", dest: "/etc/network/interfaces" }
+
+- hosts: compute
+  remote_user: root
+  vars_files:
+    - ../var/{{ ansible_os_family }}.yml
+    - ../var/flavor-vars.yml
+  roles:
+    # TODO: this only works for ubuntu/xenial and need to be adjusted for other distros
+    - { role: configure-network, when: ansible_distribution_release == "xenial", src: "../template/compute.interface.j2", dest: "/etc/network/interfaces" }
+
+- hosts: compute01
+  remote_user: root
+  # TODO: this role is for configuring NFS on xenial and adjustment needed for other distros
+  roles:
+    - role: configure-nfs
index e69de29..3cd1d62 100644 (file)
@@ -0,0 +1,37 @@
+---
+host_info: {
+    'opnfv': {
+        'MGMT_IP': '172.29.236.10',
+        'VLAN_IP': '192.168.122.2',
+        'STORAGE_IP': '172.29.244.10'
+    },
+    'controller00': {
+        'MGMT_IP': '172.29.236.11',
+        'VLAN_IP': '192.168.122.3',
+        'STORAGE_IP': '172.29.244.11'
+    },
+    'controller01': {
+        'MGMT_IP': '172.29.236.12',
+        'VLAN_IP': '192.168.122.4',
+        'STORAGE_IP': '172.29.244.12'
+    },
+    'controller02': {
+        'MGMT_IP': '172.29.236.13',
+        'VLAN_IP': '192.168.122.5',
+        'STORAGE_IP': '172.29.244.13'
+    },
+    'compute00': {
+        'MGMT_IP': '172.29.236.14',
+        'VLAN_IP': '192.168.122.6',
+        'STORAGE_IP': '172.29.244.14',
+        'VLAN_IP_SECOND': '173.29.241.1',
+        'VXLAN_IP': '172.29.240.14'
+    },
+    'compute01': {
+        'MGMT_IP': '172.29.236.15',
+        'VLAN_IP': '192.168.122.7',
+        'STORAGE_IP': '172.29.244.15',
+        'VLAN_IP_SECOND': '173.29.241.2',
+        'VXLAN_IP': '172.29.240.15'
+    }
+}
index e69de29..94b1d07 100644 (file)
@@ -0,0 +1,11 @@
+[opnfv]
+opnfv ansible_ssh_host=192.168.122.2
+
+[controller]
+controller00 ansible_ssh_host=192.168.122.3
+controller01 ansible_ssh_host=192.168.122.4
+controller02 ansible_ssh_host=192.168.122.5
+
+[compute]
+compute00 ansible_ssh_host=192.168.122.6
+compute01 ansible_ssh_host=192.168.122.7
index e69de29..43e88c0 100644 (file)
@@ -0,0 +1,278 @@
+---
+cidr_networks:
+  container: 172.29.236.0/22
+  tunnel: 172.29.240.0/22
+  storage: 172.29.244.0/22
+
+used_ips:
+  - "172.29.236.1,172.29.236.50"
+  - "172.29.240.1,172.29.240.50"
+  - "172.29.244.1,172.29.244.50"
+  - "172.29.248.1,172.29.248.50"
+
+global_overrides:
+  internal_lb_vip_address: 172.29.236.222
+  external_lb_vip_address: 192.168.122.220
+  tunnel_bridge: "br-vxlan"
+  management_bridge: "br-mgmt"
+  provider_networks:
+    - network:
+        container_bridge: "br-mgmt"
+        container_type: "veth"
+        container_interface: "eth1"
+        ip_from_q: "container"
+        type: "raw"
+        group_binds:
+          - all_containers
+          - hosts
+        is_container_address: true
+        is_ssh_address: true
+    - network:
+        container_bridge: "br-vxlan"
+        container_type: "veth"
+        container_interface: "eth10"
+        ip_from_q: "tunnel"
+        type: "vxlan"
+        range: "1:1000"
+        net_name: "vxlan"
+        group_binds:
+          - neutron_linuxbridge_agent
+    - network:
+        container_bridge: "br-vlan"
+        container_type: "veth"
+        container_interface: "eth12"
+        host_bind_override: "eth12"
+        type: "flat"
+        net_name: "flat"
+        group_binds:
+          - neutron_linuxbridge_agent
+    - network:
+        container_bridge: "br-vlan"
+        container_type: "veth"
+        container_interface: "eth11"
+        type: "vlan"
+        range: "1:1"
+        net_name: "vlan"
+        group_binds:
+          - neutron_linuxbridge_agent
+    - network:
+        container_bridge: "br-storage"
+        container_type: "veth"
+        container_interface: "eth2"
+        ip_from_q: "storage"
+        type: "raw"
+        group_binds:
+          - glance_api
+          - cinder_api
+          - cinder_volume
+          - nova_compute
+
+# ##
+# ## Infrastructure
+# ##
+
+# galera, memcache, rabbitmq, utility
+shared-infra_hosts:
+  controller00:
+    ip: 172.29.236.11
+  controller01:
+    ip: 172.29.236.12
+  controller02:
+    ip: 172.29.236.13
+
+# repository (apt cache, python packages, etc)
+repo-infra_hosts:
+  controller00:
+    ip: 172.29.236.11
+  controller01:
+    ip: 172.29.236.12
+  controller02:
+    ip: 172.29.236.13
+
+# load balancer
+# Ideally the load balancer should not use the Infrastructure hosts.
+# Dedicated hardware is best for improved performance and security.
+haproxy_hosts:
+  controller00:
+    ip: 172.29.236.11
+  controller01:
+    ip: 172.29.236.12
+  controller02:
+    ip: 172.29.236.13
+
+# rsyslog server
+# log_hosts:
+# log1:
+#  ip: 172.29.236.14
+
+# ##
+# ## OpenStack
+# ##
+
+# keystone
+identity_hosts:
+  controller00:
+    ip: 172.29.236.11
+  controller01:
+    ip: 172.29.236.12
+  controller02:
+    ip: 172.29.236.13
+
+# cinder api services
+storage-infra_hosts:
+  controller00:
+    ip: 172.29.236.11
+  controller01:
+    ip: 172.29.236.12
+  controller02:
+    ip: 172.29.236.13
+
+# glance
+# The settings here are repeated for each infra host.
+# They could instead be applied as global settings in
+# user_variables, but are left here to illustrate that
+# each container could have different storage targets.
+image_hosts:
+  controller00:
+    ip: 172.29.236.11
+    container_vars:
+      limit_container_types: glance
+      glance_nfs_client:
+        - server: "172.29.244.15"
+          remote_path: "/images"
+          local_path: "/var/lib/glance/images"
+          type: "nfs"
+          options: "_netdev,auto"
+  controller01:
+    ip: 172.29.236.12
+    container_vars:
+      limit_container_types: glance
+      glance_nfs_client:
+        - server: "172.29.244.15"
+          remote_path: "/images"
+          local_path: "/var/lib/glance/images"
+          type: "nfs"
+          options: "_netdev,auto"
+  controller02:
+    ip: 172.29.236.13
+    container_vars:
+      limit_container_types: glance
+      glance_nfs_client:
+        - server: "172.29.244.15"
+          remote_path: "/images"
+          local_path: "/var/lib/glance/images"
+          type: "nfs"
+          options: "_netdev,auto"
+
+# nova api, conductor, etc services
+compute-infra_hosts:
+  controller00:
+    ip: 172.29.236.11
+  controller01:
+    ip: 172.29.236.12
+  controller02:
+    ip: 172.29.236.13
+
+# heat
+orchestration_hosts:
+  controller00:
+    ip: 172.29.236.11
+  controller01:
+    ip: 172.29.236.12
+  controller02:
+    ip: 172.29.236.13
+
+# horizon
+dashboard_hosts:
+  controller00:
+    ip: 172.29.236.11
+  controller01:
+    ip: 172.29.236.12
+  controller02:
+    ip: 172.29.236.13
+
+# neutron server, agents (L3, etc)
+network_hosts:
+  controller00:
+    ip: 172.29.236.11
+  controller01:
+    ip: 172.29.236.12
+  controller02:
+    ip: 172.29.236.13
+
+# ceilometer (telemetry API)
+metering-infra_hosts:
+  controller00:
+    ip: 172.29.236.11
+  controller01:
+    ip: 172.29.236.12
+  controller02:
+    ip: 172.29.236.13
+
+# aodh (telemetry alarm service)
+metering-alarm_hosts:
+  controller00:
+    ip: 172.29.236.11
+  controller01:
+    ip: 172.29.236.12
+  controller02:
+    ip: 172.29.236.13
+
+# gnocchi (telemetry metrics storage)
+metrics_hosts:
+  controller00:
+    ip: 172.29.236.11
+  controller01:
+    ip: 172.29.236.12
+  controller02:
+    ip: 172.29.236.13
+
+# nova hypervisors
+compute_hosts:
+  compute00:
+    ip: 172.29.236.14
+  compute01:
+    ip: 172.29.236.15
+
+# ceilometer compute agent (telemetry)
+metering-compute_hosts:
+  compute00:
+    ip: 172.29.236.14
+  compute01:
+    ip: 172.29.236.15
+# cinder volume hosts (NFS-backed)
+# The settings here are repeated for each infra host.
+# They could instead be applied as global settings in
+# user_variables, but are left here to illustrate that
+# each container could have different storage targets.
+storage_hosts:
+  controller00:
+    ip: 172.29.236.11
+    container_vars:
+      cinder_backends:
+        limit_container_types: cinder_volume
+        lvm:
+          volume_group: cinder-volumes
+          volume_driver: cinder.volume.drivers.lvm.LVMVolumeDriver
+          volume_backend_name: LVM_iSCSI
+          iscsi_ip_address: "172.29.244.11"
+  controller01:
+    ip: 172.29.236.12
+    container_vars:
+      cinder_backends:
+        limit_container_types: cinder_volume
+        lvm:
+          volume_group: cinder-volumes
+          volume_driver: cinder.volume.drivers.lvm.LVMVolumeDriver
+          volume_backend_name: LVM_iSCSI
+          iscsi_ip_address: "172.29.244.12"
+  controller02:
+    ip: 172.29.236.13
+    container_vars:
+      cinder_backends:
+        limit_container_types: cinder_volume
+        lvm:
+          volume_group: cinder-volumes
+          volume_driver: cinder.volume.drivers.lvm.LVMVolumeDriver
+          volume_backend_name: LVM_iSCSI
+          iscsi_ip_address: "172.29.244.13"
diff --git a/prototypes/xci/file/ha/user_variables.yml b/prototypes/xci/file/ha/user_variables.yml
new file mode 100644 (file)
index 0000000..65cbcc1
--- /dev/null
@@ -0,0 +1,27 @@
+---
+# Copyright 2014, Rackspace US, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# ##
+# ## This file contains commonly used overrides for convenience. Please inspect
+# ## the defaults for each role to find additional override options.
+# ##
+
+# # Debug and Verbose options.
+debug: false
+
+haproxy_keepalived_external_vip_cidr: "192.168.122.220/32"
+haproxy_keepalived_internal_vip_cidr: "172.29.236.222/32"
+haproxy_keepalived_external_interface: br-vlan
+haproxy_keepalived_internal_interface: br-mgmt
diff --git a/prototypes/xci/file/mini/configure-targethosts.yml b/prototypes/xci/file/mini/configure-targethosts.yml
new file mode 100644 (file)
index 0000000..395f44a
--- /dev/null
@@ -0,0 +1,32 @@
+---
+- hosts: all
+  remote_user: root
+  tasks:
+    - name: add public key to host
+      copy:
+        src: ../file/authorized_keys
+        dest: /root/.ssh/authorized_keys
+    - name: configure modules
+      copy:
+        src: ../file/modules
+        dest: /etc/modules
+
+- hosts: controller
+  remote_user: root
+  vars_files:
+    - ../var/{{ ansible_os_family }}.yml
+    - ../var/flavor-vars.yml
+  roles:
+    # TODO: this only works for ubuntu/xenial and need to be adjusted for other distros
+    - { role: configure-network, when: ansible_distribution_release == "xenial", src: "../template/controller.interface.j2", dest: "/etc/network/interfaces" }
+
+- hosts: compute
+  remote_user: root
+  vars_files:
+    - ../var/{{ ansible_os_family }}.yml
+    - ../var/flavor-vars.yml
+  roles:
+    # TODO: this only works for ubuntu/xenial and need to be adjusted for other distros
+    - { role: configure-network, when: ansible_distribution_release == "xenial", src: "../template/compute.interface.j2", dest: "/etc/network/interfaces" }
+  # TODO: this role is for configuring NFS on xenial and adjustment needed for other distros
+    - role: configure-nfs
index e69de29..01fba71 100644 (file)
@@ -0,0 +1,20 @@
+---
+host_info: {
+    'opnfv': {
+        'MGMT_IP': '172.29.236.10',
+        'VLAN_IP': '192.168.122.2',
+        'STORAGE_IP': '172.29.244.10'
+    },
+    'controller00': {
+        'MGMT_IP': '172.29.236.11',
+        'VLAN_IP': '192.168.122.3',
+        'STORAGE_IP': '172.29.244.11'
+    },
+    'compute00': {
+        'MGMT_IP': '172.29.236.12',
+        'VLAN_IP': '192.168.122.4',
+        'VLAN_IP_SECOND': '173.29.241.1',
+        'VXLAN_IP': '172.29.240.12',
+        'STORAGE_IP': '172.29.244.12'
+    },
+}
index e69de29..eb73e5e 100644 (file)
@@ -0,0 +1,8 @@
+[opnfv]
+opnfv ansible_ssh_host=192.168.122.2
+
+[controller]
+controller00 ansible_ssh_host=192.168.122.3
+
+[compute]
+compute00 ansible_ssh_host=192.168.122.4
index e69de29..c41f432 100644 (file)
@@ -0,0 +1,186 @@
+---
+cidr_networks:
+  container: 172.29.236.0/22
+  tunnel: 172.29.240.0/22
+  storage: 172.29.244.0/22
+
+used_ips:
+  - "172.29.236.1,172.29.236.50"
+  - "172.29.240.1,172.29.240.50"
+  - "172.29.244.1,172.29.244.50"
+  - "172.29.248.1,172.29.248.50"
+
+global_overrides:
+  internal_lb_vip_address: 172.29.236.11
+  external_lb_vip_address: 192.168.122.3
+  tunnel_bridge: "br-vxlan"
+  management_bridge: "br-mgmt"
+  provider_networks:
+    - network:
+        container_bridge: "br-mgmt"
+        container_type: "veth"
+        container_interface: "eth1"
+        ip_from_q: "container"
+        type: "raw"
+        group_binds:
+          - all_containers
+          - hosts
+        is_container_address: true
+        is_ssh_address: true
+    - network:
+        container_bridge: "br-vxlan"
+        container_type: "veth"
+        container_interface: "eth10"
+        ip_from_q: "tunnel"
+        type: "vxlan"
+        range: "1:1000"
+        net_name: "vxlan"
+        group_binds:
+          - neutron_linuxbridge_agent
+    - network:
+        container_bridge: "br-vlan"
+        container_type: "veth"
+        container_interface: "eth12"
+        host_bind_override: "eth12"
+        type: "flat"
+        net_name: "flat"
+        group_binds:
+          - neutron_linuxbridge_agent
+    - network:
+        container_bridge: "br-vlan"
+        container_type: "veth"
+        container_interface: "eth11"
+        type: "vlan"
+        range: "1:1"
+        net_name: "vlan"
+        group_binds:
+          - neutron_linuxbridge_agent
+    - network:
+        container_bridge: "br-storage"
+        container_type: "veth"
+        container_interface: "eth2"
+        ip_from_q: "storage"
+        type: "raw"
+        group_binds:
+          - glance_api
+          - cinder_api
+          - cinder_volume
+          - nova_compute
+
+# ##
+# ## Infrastructure
+# ##
+
+# galera, memcache, rabbitmq, utility
+shared-infra_hosts:
+  controller00:
+    ip: 172.29.236.11
+
+# repository (apt cache, python packages, etc)
+repo-infra_hosts:
+  controller00:
+    ip: 172.29.236.11
+
+# load balancer
+# Ideally the load balancer should not use the Infrastructure hosts.
+# Dedicated hardware is best for improved performance and security.
+haproxy_hosts:
+  controller00:
+    ip: 172.29.236.11
+
+# rsyslog server
+# log_hosts:
+# log1:
+#  ip: 172.29.236.14
+
+# ##
+# ## OpenStack
+# ##
+
+# keystone
+identity_hosts:
+  controller00:
+    ip: 172.29.236.11
+
+# cinder api services
+storage-infra_hosts:
+  controller00:
+    ip: 172.29.236.11
+
+# glance
+# The settings here are repeated for each infra host.
+# They could instead be applied as global settings in
+# user_variables, but are left here to illustrate that
+# each container could have different storage targets.
+image_hosts:
+  controller00:
+    ip: 172.29.236.11
+    container_vars:
+      limit_container_types: glance
+      glance_nfs_client:
+        - server: "172.29.244.12"
+          remote_path: "/images"
+          local_path: "/var/lib/glance/images"
+          type: "nfs"
+          options: "_netdev,auto"
+
+# nova api, conductor, etc services
+compute-infra_hosts:
+  controller00:
+    ip: 172.29.236.11
+
+# heat
+orchestration_hosts:
+  controller00:
+    ip: 172.29.236.11
+
+# horizon
+dashboard_hosts:
+  controller00:
+    ip: 172.29.236.11
+
+# neutron server, agents (L3, etc)
+network_hosts:
+  controller00:
+    ip: 172.29.236.11
+
+# ceilometer (telemetry API)
+metering-infra_hosts:
+  controller00:
+    ip: 172.29.236.11
+
+# aodh (telemetry alarm service)
+metering-alarm_hosts:
+  controller00:
+    ip: 172.29.236.11
+
+# gnocchi (telemetry metrics storage)
+metrics_hosts:
+  controller00:
+    ip: 172.29.236.11
+
+# nova hypervisors
+compute_hosts:
+  compute00:
+    ip: 172.29.236.12
+
+# ceilometer compute agent (telemetry)
+metering-compute_hosts:
+  compute00:
+    ip: 172.29.236.12
+# cinder volume hosts (NFS-backed)
+# The settings here are repeated for each infra host.
+# They could instead be applied as global settings in
+# user_variables, but are left here to illustrate that
+# each container could have different storage targets.
+storage_hosts:
+  controller00:
+    ip: 172.29.236.11
+    container_vars:
+      cinder_backends:
+        limit_container_types: cinder_volume
+        lvm:
+          volume_group: cinder-volumes
+          volume_driver: cinder.volume.drivers.lvm.LVMVolumeDriver
+          volume_backend_name: LVM_iSCSI
+          iscsi_ip_address: "172.29.244.11"
diff --git a/prototypes/xci/file/mini/user_variables.yml b/prototypes/xci/file/mini/user_variables.yml
new file mode 100644 (file)
index 0000000..e4a63a2
--- /dev/null
@@ -0,0 +1,27 @@
+---
+# Copyright 2014, Rackspace US, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# ##
+# ## This file contains commonly used overrides for convenience. Please inspect
+# ## the defaults for each role to find additional override options.
+# ##
+
+# # Debug and Verbose options.
+debug: false
+
+haproxy_keepalived_external_vip_cidr: "192.168.122.3/32"
+haproxy_keepalived_internal_vip_cidr: "172.29.236.11/32"
+haproxy_keepalived_external_interface: br-vlan
+haproxy_keepalived_internal_interface: br-mgmt
diff --git a/prototypes/xci/file/noha/configure-targethosts.yml b/prototypes/xci/file/noha/configure-targethosts.yml
new file mode 100644 (file)
index 0000000..6dc147f
--- /dev/null
@@ -0,0 +1,36 @@
+---
+- hosts: all
+  remote_user: root
+  tasks:
+    - name: add public key to host
+      copy:
+        src: ../file/authorized_keys
+        dest: /root/.ssh/authorized_keys
+    - name: configure modules
+      copy:
+        src: ../file/modules
+        dest: /etc/modules
+
+- hosts: controller
+  remote_user: root
+  vars_files:
+    - ../var/{{ ansible_os_family }}.yml
+    - ../var/flavor-vars.yml
+  roles:
+    # TODO: this only works for ubuntu/xenial and need to be adjusted for other distros
+    - { role: configure-network, when: ansible_distribution_release == "xenial", src: "../template/controller.interface.j2", dest: "/etc/network/interfaces" }
+
+- hosts: compute
+  remote_user: root
+  vars_files:
+    - ../var/{{ ansible_os_family }}.yml
+    - ../var/flavor-vars.yml
+  roles:
+    # TODO: this only works for ubuntu/xenial and need to be adjusted for other distros
+    - { role: configure-network, when: ansible_distribution_release == "xenial", src: "../template/compute.interface.j2", dest: "/etc/network/interfaces" }
+
+- hosts: compute01
+  remote_user: root
+  # TODO: this role is for configuring NFS on xenial and adjustment needed for other distros
+  roles:
+    - role: configure-nfs
index e69de29..7f52d34 100644 (file)
@@ -0,0 +1,27 @@
+---
+host_info: {
+    'opnfv': {
+        'MGMT_IP': '172.29.236.10',
+        'VLAN_IP': '192.168.122.2',
+        'STORAGE_IP': '172.29.244.10'
+    },
+    'controller00': {
+        'MGMT_IP': '172.29.236.11',
+        'VLAN_IP': '192.168.122.3',
+        'STORAGE_IP': '172.29.244.11'
+    },
+    'compute00': {
+        'MGMT_IP': '172.29.236.12',
+        'VLAN_IP': '192.168.122.4',
+        'VLAN_IP_SECOND': '173.29.241.1',
+        'VXLAN_IP': '172.29.240.12',
+        'STORAGE_IP': '172.29.244.12'
+    },
+    'compute01': {
+        'MGMT_IP': '172.29.236.13',
+        'VLAN_IP': '192.168.122.5',
+        'VLAN_IP_SECOND': '173.29.241.2',
+        'VXLAN_IP': '172.29.240.13',
+        'STORAGE_IP': '172.29.244.13'
+    }
+}
index e69de29..b4f9f6d 100644 (file)
@@ -0,0 +1,9 @@
+[opnfv]
+opnfv ansible_ssh_host=192.168.122.2
+
+[controller]
+controller00 ansible_ssh_host=192.168.122.3
+
+[compute]
+compute00 ansible_ssh_host=192.168.122.4
+compute01 ansible_ssh_host=192.168.122.5
index e69de29..9997415 100644 (file)
@@ -0,0 +1,190 @@
+---
+cidr_networks:
+  container: 172.29.236.0/22
+  tunnel: 172.29.240.0/22
+  storage: 172.29.244.0/22
+
+used_ips:
+  - "172.29.236.1,172.29.236.50"
+  - "172.29.240.1,172.29.240.50"
+  - "172.29.244.1,172.29.244.50"
+  - "172.29.248.1,172.29.248.50"
+
+global_overrides:
+  internal_lb_vip_address: 172.29.236.11
+  external_lb_vip_address: 192.168.122.3
+  tunnel_bridge: "br-vxlan"
+  management_bridge: "br-mgmt"
+  provider_networks:
+    - network:
+        container_bridge: "br-mgmt"
+        container_type: "veth"
+        container_interface: "eth1"
+        ip_from_q: "container"
+        type: "raw"
+        group_binds:
+          - all_containers
+          - hosts
+        is_container_address: true
+        is_ssh_address: true
+    - network:
+        container_bridge: "br-vxlan"
+        container_type: "veth"
+        container_interface: "eth10"
+        ip_from_q: "tunnel"
+        type: "vxlan"
+        range: "1:1000"
+        net_name: "vxlan"
+        group_binds:
+          - neutron_linuxbridge_agent
+    - network:
+        container_bridge: "br-vlan"
+        container_type: "veth"
+        container_interface: "eth12"
+        host_bind_override: "eth12"
+        type: "flat"
+        net_name: "flat"
+        group_binds:
+          - neutron_linuxbridge_agent
+    - network:
+        container_bridge: "br-vlan"
+        container_type: "veth"
+        container_interface: "eth11"
+        type: "vlan"
+        range: "1:1"
+        net_name: "vlan"
+        group_binds:
+          - neutron_linuxbridge_agent
+    - network:
+        container_bridge: "br-storage"
+        container_type: "veth"
+        container_interface: "eth2"
+        ip_from_q: "storage"
+        type: "raw"
+        group_binds:
+          - glance_api
+          - cinder_api
+          - cinder_volume
+          - nova_compute
+
+# ##
+# ## Infrastructure
+# ##
+
+# galera, memcache, rabbitmq, utility
+shared-infra_hosts:
+  controller00:
+    ip: 172.29.236.11
+
+# repository (apt cache, python packages, etc)
+repo-infra_hosts:
+  controller00:
+    ip: 172.29.236.11
+
+# load balancer
+# Ideally the load balancer should not use the Infrastructure hosts.
+# Dedicated hardware is best for improved performance and security.
+haproxy_hosts:
+  controller00:
+    ip: 172.29.236.11
+
+# rsyslog server
+# log_hosts:
+# log1:
+#  ip: 172.29.236.14
+
+# ##
+# ## OpenStack
+# ##
+
+# keystone
+identity_hosts:
+  controller00:
+    ip: 172.29.236.11
+
+# cinder api services
+storage-infra_hosts:
+  controller00:
+    ip: 172.29.236.11
+
+# glance
+# The settings here are repeated for each infra host.
+# They could instead be applied as global settings in
+# user_variables, but are left here to illustrate that
+# each container could have different storage targets.
+image_hosts:
+  controller00:
+    ip: 172.29.236.11
+    container_vars:
+      limit_container_types: glance
+      glance_nfs_client:
+        - server: "172.29.244.13"
+          remote_path: "/images"
+          local_path: "/var/lib/glance/images"
+          type: "nfs"
+          options: "_netdev,auto"
+
+# nova api, conductor, etc services
+compute-infra_hosts:
+  controller00:
+    ip: 172.29.236.11
+
+# heat
+orchestration_hosts:
+  controller00:
+    ip: 172.29.236.11
+
+# horizon
+dashboard_hosts:
+  controller00:
+    ip: 172.29.236.11
+
+# neutron server, agents (L3, etc)
+network_hosts:
+  controller00:
+    ip: 172.29.236.11
+
+# ceilometer (telemetry API)
+metering-infra_hosts:
+  controller00:
+    ip: 172.29.236.11
+
+# aodh (telemetry alarm service)
+metering-alarm_hosts:
+  controller00:
+    ip: 172.29.236.11
+
+# gnocchi (telemetry metrics storage)
+metrics_hosts:
+  controller00:
+    ip: 172.29.236.11
+
+# nova hypervisors
+compute_hosts:
+  compute00:
+    ip: 172.29.236.12
+  compute01:
+    ip: 172.29.236.13
+
+# ceilometer compute agent (telemetry)
+metering-compute_hosts:
+  compute00:
+    ip: 172.29.236.12
+  compute01:
+    ip: 172.29.236.13
+# cinder volume hosts (NFS-backed)
+# The settings here are repeated for each infra host.
+# They could instead be applied as global settings in
+# user_variables, but are left here to illustrate that
+# each container could have different storage targets.
+storage_hosts:
+  controller00:
+    ip: 172.29.236.11
+    container_vars:
+      cinder_backends:
+        limit_container_types: cinder_volume
+        lvm:
+          volume_group: cinder-volumes
+          volume_driver: cinder.volume.drivers.lvm.LVMVolumeDriver
+          volume_backend_name: LVM_iSCSI
+          iscsi_ip_address: "172.29.244.11"
diff --git a/prototypes/xci/file/noha/user_variables.yml b/prototypes/xci/file/noha/user_variables.yml
new file mode 100644 (file)
index 0000000..e4a63a2
--- /dev/null
@@ -0,0 +1,27 @@
+---
+# Copyright 2014, Rackspace US, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# ##
+# ## This file contains commonly used overrides for convenience. Please inspect
+# ## the defaults for each role to find additional override options.
+# ##
+
+# # Debug and Verbose options.
+debug: false
+
+haproxy_keepalived_external_vip_cidr: "192.168.122.3/32"
+haproxy_keepalived_internal_vip_cidr: "172.29.236.11/32"
+haproxy_keepalived_external_interface: br-vlan
+haproxy_keepalived_internal_interface: br-mgmt
diff --git a/prototypes/xci/file/user_variables.yml b/prototypes/xci/file/user_variables.yml
deleted file mode 100644 (file)
index e69de29..0000000
index 6a298e0..2a55964 100644 (file)
         src: "{{XCI_FLAVOR_ANSIBLE_FILE_PATH}}/configure-opnfvhost.yml"
         dest: "{{OPNFV_RELENG_PATH}}/prototypes/xci/playbooks"
       when: XCI_FLAVOR == "aio"
+    - name: copy flavor inventory
+      copy:
+        src: "{{XCI_FLAVOR_ANSIBLE_FILE_PATH}}/inventory"
+        dest: "{{OPNFV_RELENG_PATH}}/prototypes/xci/playbooks"
     - name: copy flavor vars
       copy:
         src: "{{XCI_FLAVOR_ANSIBLE_FILE_PATH}}/flavor-vars.yml"
index 868052d..6689c8d 100644 (file)
@@ -17,6 +17,8 @@
     - role: remove-folders
     - { role: clone-repository, project: "opnfv/releng", repo: "{{ OPNFV_RELENG_GIT_URL }}", dest: "{{ OPNFV_RELENG_PATH }}", version: "{{ OPNFV_RELENG_VERSION }}" }
     - { role: clone-repository, project: "openstack/openstack-ansible", repo: "{{ OPENSTACK_OSA_GIT_URL }}", dest: "{{ OPENSTACK_OSA_PATH }}", version: "{{ OPENSTACK_OSA_VERSION }}" }
+    # TODO: this only works for ubuntu/xenial and need to be adjusted for other distros
+    - { role: configure-network, when: ansible_distribution_release == "xenial", src: "../template/opnfv.interface.j2", dest: "/etc/network/interfaces" }
   tasks:
     - name: generate SSH keys
       shell: ssh-keygen -b 2048 -t rsa -f /root/.ssh/id_rsa -q -N ""
       shell: "/bin/cp -rf {{OPNFV_RELENG_PATH}}/prototypes/xci/file/setup-openstack.yml {{OPENSTACK_OSA_PATH}}/playbooks"
     - name: copy OPNFV role requirements
       shell: "/bin/cp -rf {{OPNFV_RELENG_PATH}}/prototypes/xci/file/ansible-role-requirements.yml {{OPENSTACK_OSA_PATH}}"
-    # TODO: this only works for ubuntu/xenial and need to be adjusted for other distros
-    # TODO: convert this into a role
-    - name: configure network for ubuntu xenial
-      template:
-        src: ../template/opnfv.interface.j2
-        dest: /etc/network/interfaces
-      notify:
-        - restart ubuntu xenial network service
-      when: ansible_distribution_release == "xenial"
-  handlers:
-    - name: restart ubuntu xenial network service
-      shell: "/sbin/ifconfig ens3 0 &&/sbin/ifdown -a && /sbin/ifup -a"
 - hosts: localhost
   remote_user: root
   tasks:
diff --git a/prototypes/xci/playbooks/provision-vm-nodes.yml b/prototypes/xci/playbooks/provision-vm-nodes.yml
new file mode 100644 (file)
index 0000000..9a32d0b
--- /dev/null
@@ -0,0 +1,32 @@
+---
+# SPDX-license-identifier: Apache-2.0
+##############################################################################
+# Copyright (c) 2017 Ericsson AB and others.
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+- hosts: localhost
+  remote_user: root
+  vars_files:
+    - ../var/{{ ansible_os_family }}.yml
+    - ../var/opnfv.yml
+  roles:
+    # using these roles here ensures that we can reuse this playbook in different context
+    - role: remove-folders
+    - { role: clone-repository, project: "opnfv/releng", repo: "{{ OPNFV_RELENG_GIT_URL }}", dest: "{{ OPNFV_RELENG_PATH }}", version: "{{ OPNFV_RELENG_VERSION }}" }
+    - { role: clone-repository, project: "opnfv/bifrost", repo: "{{ OPENSTACK_BIFROST_GIT_URL }}", dest: "{{ OPENSTACK_BIFROST_PATH }}", version: "{{ OPENSTACK_BIFROST_VERSION }}" }
+  tasks:
+    - name: combine opnfv/releng and openstack/bifrost scripts/playbooks
+      copy:
+        src: "{{ OPNFV_RELENG_PATH }}/prototypes/bifrost/"
+        dest: "{{ OPENSTACK_BIFROST_PATH }}"
+    - name: destroy VM nodes created by previous deployment
+      command: "/bin/bash ./scripts/destroy-env.sh"
+      args:
+        chdir: "{{ OPENSTACK_BIFROST_PATH }}"
+    - name: create and provision VM nodes for the flavor {{ XCI_FLAVOR }}
+      command: "/bin/bash ./scripts/bifrost-provision.sh"
+      args:
+        chdir: "{{ OPENSTACK_BIFROST_PATH }}"
diff --git a/prototypes/xci/playbooks/roles/configure-network/tasks/main.yml b/prototypes/xci/playbooks/roles/configure-network/tasks/main.yml
new file mode 100644 (file)
index 0000000..8bc8482
--- /dev/null
@@ -0,0 +1,16 @@
+---
+# SPDX-license-identifier: Apache-2.0
+##############################################################################
+# Copyright (c) 2017 Ericsson AB and others.
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+# TODO: this role needs to be adjusted for different distros
+- name: configure network for {{ ansible_os_family }} on interface {{ interface }}
+  template:
+    src: "{{ src }}"
+    dest: "{{ dest }}"
+- name: restart ubuntu xenial network service
+  shell: "/sbin/ifconfig {{ interface }} 0 &&/sbin/ifdown -a && /sbin/ifup -a"
diff --git a/prototypes/xci/playbooks/roles/configure-nfs/tasks/main.yml b/prototypes/xci/playbooks/roles/configure-nfs/tasks/main.yml
new file mode 100644 (file)
index 0000000..b188f4d
--- /dev/null
@@ -0,0 +1,36 @@
+---
+# SPDX-license-identifier: Apache-2.0
+##############################################################################
+# Copyright (c) 2017 Ericsson AB and others.
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+# TODO: this is for xenial and needs to be adjusted for different distros
+- block:
+    - name: make NFS dir
+      file:
+        dest: /images
+        mode: 777
+        state: directory
+    - name: configure NFS service
+      lineinfile:
+        dest: /etc/services
+        state: present
+        create: yes
+        line: "{{ item }}"
+      with_items:
+        - "nfs        2049/tcp"
+        - "nfs        2049/udp"
+    - name: configure NFS exports on ubuntu xenial
+      copy:
+        src: ../file/exports
+        dest: /etc/exports
+      when: ansible_distribution_release == "xenial"
+    # TODO: the service name might be different on other distros and needs to be adjusted
+    - name: restart ubuntu xenial NFS service
+      service:
+        name: nfs-kernel-server
+        state: restarted
+  when: ansible_distribution_release == "xenial"
index fb321df..ac8c0f7 100644 (file)
@@ -14,6 +14,7 @@
     recurse: no
   with_items:
     - "{{ OPNFV_RELENG_PATH }}"
+    - "{{ OPENSTACK_BIFROST_PATH }}"
     - "{{ OPENSTACK_OSA_PATH }}"
     - "{{ OPENSTACK_OSA_ETC_PATH }}"
     - "{{ LOG_PATH }} "
index 1719f6a..0c5147c 100644 (file)
@@ -7,23 +7,23 @@ iface lo inet loopback
 
 
 # Physical interface
-auto ens3
-iface ens3 inet manual
+auto {{ interface }}
+iface {{ interface }} inet manual
 
 # Container/Host management VLAN interface
-auto ens3.10
-iface ens3.10 inet manual
-    vlan-raw-device ens3
+auto {{ interface }}.10
+iface {{ interface }}.10 inet manual
+    vlan-raw-device {{ interface }}
 
 # OpenStack Networking VXLAN (tunnel/overlay) VLAN interface
-auto ens3.30
-iface ens3.30 inet manual
-    vlan-raw-device ens3
+auto {{ interface }}.30
+iface {{ interface }}.30 inet manual
+    vlan-raw-device {{ interface }}
 
 # Storage network VLAN interface (optional)
-auto ens3.20
-iface ens3.20 inet manual
-    vlan-raw-device ens3
+auto {{ interface }}.20
+iface {{ interface }}.20 inet manual
+    vlan-raw-device {{ interface }}
 
 # Container/Host management bridge
 auto br-mgmt
@@ -31,7 +31,7 @@ iface br-mgmt inet static
     bridge_stp off
     bridge_waitport 0
     bridge_fd 0
-    bridge_ports ens3.10
+    bridge_ports {{ interface }}.10
     address {{host_info[inventory_hostname].MGMT_IP}}
     netmask 255.255.252.0
 
@@ -41,7 +41,7 @@ iface br-vxlan inet static
     bridge_stp off
     bridge_waitport 0
     bridge_fd 0
-    bridge_ports ens3.30
+    bridge_ports {{ interface }}.30
     address {{host_info[inventory_hostname].VXLAN_IP}}
     netmask 255.255.252.0
 
@@ -51,7 +51,7 @@ iface br-vlan inet static
     bridge_stp off
     bridge_waitport 0
     bridge_fd 0
-    bridge_ports ens3
+    bridge_ports {{ interface }}
     address {{host_info[inventory_hostname].VLAN_IP}}
     netmask 255.255.255.0
     gateway 192.168.122.1
@@ -81,6 +81,6 @@ iface br-storage inet static
     bridge_stp off
     bridge_waitport 0
     bridge_fd 0
-    bridge_ports ens3.20
+    bridge_ports {{ interface }}.20
     address {{host_info[inventory_hostname].STORAGE_IP}}
     netmask 255.255.252.0
index 74aeea9..fbaa8b8 100644 (file)
@@ -6,23 +6,23 @@ auto lo
 iface lo inet loopback
 
 # Physical interface
-auto ens3
-iface ens3 inet manual
+auto {{ interface }}
+iface {{ interface }} inet manual
 
 # Container/Host management VLAN interface
-auto ens3.10
-iface ens3.10 inet manual
-    vlan-raw-device ens3
+auto {{ interface }}.10
+iface {{ interface }}.10 inet manual
+    vlan-raw-device {{ interface }}
 
 # OpenStack Networking VXLAN (tunnel/overlay) VLAN interface
-auto ens3.30
-iface ens3.30 inet manual
-    vlan-raw-device ens3
+auto {{ interface }}.30
+iface {{ interface }}.30 inet manual
+    vlan-raw-device {{ interface }}
 
 # Storage network VLAN interface (optional)
-auto ens3.20
-iface ens3.20 inet manual
-    vlan-raw-device ens3
+auto {{ interface }}.20
+iface {{ interface }}.20 inet manual
+    vlan-raw-device {{ interface }}
 
 # Container/Host management bridge
 auto br-mgmt
@@ -30,7 +30,7 @@ iface br-mgmt inet static
     bridge_stp off
     bridge_waitport 0
     bridge_fd 0
-    bridge_ports ens3.10
+    bridge_ports {{ interface }}.10
     address {{host_info[inventory_hostname].MGMT_IP}}
     netmask 255.255.252.0
 
@@ -46,7 +46,7 @@ iface br-vxlan inet manual
     bridge_stp off
     bridge_waitport 0
     bridge_fd 0
-    bridge_ports ens3.30
+    bridge_ports {{ interface }}.30
 
 # OpenStack Networking VLAN bridge
 auto br-vlan
@@ -54,7 +54,7 @@ iface br-vlan inet static
     bridge_stp off
     bridge_waitport 0
     bridge_fd 0
-    bridge_ports ens3
+    bridge_ports {{ interface }}
     address {{host_info[inventory_hostname].VLAN_IP}}
     netmask 255.255.255.0
     gateway 192.168.122.1
@@ -66,6 +66,6 @@ iface br-storage inet static
     bridge_stp off
     bridge_waitport 0
     bridge_fd 0
-    bridge_ports ens3.20
+    bridge_ports {{ interface }}.20
     address {{host_info[inventory_hostname].STORAGE_IP}}
     netmask 255.255.252.0
index 74aeea9..fbaa8b8 100644 (file)
@@ -6,23 +6,23 @@ auto lo
 iface lo inet loopback
 
 # Physical interface
-auto ens3
-iface ens3 inet manual
+auto {{ interface }}
+iface {{ interface }} inet manual
 
 # Container/Host management VLAN interface
-auto ens3.10
-iface ens3.10 inet manual
-    vlan-raw-device ens3
+auto {{ interface }}.10
+iface {{ interface }}.10 inet manual
+    vlan-raw-device {{ interface }}
 
 # OpenStack Networking VXLAN (tunnel/overlay) VLAN interface
-auto ens3.30
-iface ens3.30 inet manual
-    vlan-raw-device ens3
+auto {{ interface }}.30
+iface {{ interface }}.30 inet manual
+    vlan-raw-device {{ interface }}
 
 # Storage network VLAN interface (optional)
-auto ens3.20
-iface ens3.20 inet manual
-    vlan-raw-device ens3
+auto {{ interface }}.20
+iface {{ interface }}.20 inet manual
+    vlan-raw-device {{ interface }}
 
 # Container/Host management bridge
 auto br-mgmt
@@ -30,7 +30,7 @@ iface br-mgmt inet static
     bridge_stp off
     bridge_waitport 0
     bridge_fd 0
-    bridge_ports ens3.10
+    bridge_ports {{ interface }}.10
     address {{host_info[inventory_hostname].MGMT_IP}}
     netmask 255.255.252.0
 
@@ -46,7 +46,7 @@ iface br-vxlan inet manual
     bridge_stp off
     bridge_waitport 0
     bridge_fd 0
-    bridge_ports ens3.30
+    bridge_ports {{ interface }}.30
 
 # OpenStack Networking VLAN bridge
 auto br-vlan
@@ -54,7 +54,7 @@ iface br-vlan inet static
     bridge_stp off
     bridge_waitport 0
     bridge_fd 0
-    bridge_ports ens3
+    bridge_ports {{ interface }}
     address {{host_info[inventory_hostname].VLAN_IP}}
     netmask 255.255.255.0
     gateway 192.168.122.1
@@ -66,6 +66,6 @@ iface br-storage inet static
     bridge_stp off
     bridge_waitport 0
     bridge_fd 0
-    bridge_ports ens3.20
+    bridge_ports {{ interface }}.20
     address {{host_info[inventory_hostname].STORAGE_IP}}
     netmask 255.255.252.0
index 6d03e0f..d13d080 100644 (file)
@@ -7,4 +7,5 @@
 # which accompanies this distribution, and is available at
 # http://www.apache.org/licenses/LICENSE-2.0
 ##############################################################################
-# this is placeholder and left blank intentionally to complete later on
+# this is the interface the VM nodes are connected to libvirt network "default"
+interface: "ens3"
index 174d9b3..dd3761b 100644 (file)
 OPNFV_RELENG_GIT_URL: "{{ lookup('env','OPNFV_RELENG_GIT_URL') }}"
 OPNFV_RELENG_PATH: "{{ lookup('env','OPNFV_RELENG_PATH') }}"
 OPNFV_RELENG_VERSION: "{{ lookup('env','OPNFV_RELENG_VERSION') }}"
+OPENSTACK_BIFROST_GIT_URL: "{{ lookup('env','OPENSTACK_BIFROST_GIT_URL') }}"
+OPENSTACK_BIFROST_PATH: "{{ lookup('env','OPENSTACK_BIFROST_PATH') }}"
+OPENSTACK_BIFROST_VERSION: "{{ lookup('env','OPENSTACK_BIFROST_VERSION') }}"
 OPENSTACK_OSA_GIT_URL: "{{ lookup('env','OPENSTACK_OSA_GIT_URL') }}"
 OPENSTACK_OSA_PATH: "{{ lookup('env','OPENSTACK_OSA_PATH') }}"
 OPENSTACK_OSA_VERSION: "{{ lookup('env','OPENSTACK_OSA_VERSION') }}"
 OPENSTACK_OSA_ETC_PATH: "{{ lookup('env','OPENSTACK_OSA_ETC_PATH') }}"
+XCI_ANSIBLE_PIP_VERSION: "{{ lookup('env','XCI_ANSIBLE_PIP_VERSION') }}"
 XCI_FLAVOR: "{{ lookup('env','XCI_FLAVOR') }}"
 XCI_FLAVOR_ANSIBLE_FILE_PATH: "{{ lookup('env','XCI_FLAVOR_ANSIBLE_FILE_PATH') }}"
 LOG_PATH: "{{ lookup('env','LOG_PATH') }}"
index 277206d..7eb3ecd 100755 (executable)
@@ -2,30 +2,42 @@
 set -o errexit
 set -o nounset
 set -o pipefail
-set -o xtrace
 
+#-------------------------------------------------------------------------------
 # This script must run as root
+#-------------------------------------------------------------------------------
 if [[ $(whoami) != "root" ]]; then
     echo "Error: This script must be run as root!"
     exit 1
 fi
 
+#-------------------------------------------------------------------------------
+# Set environment variables
+#-------------------------------------------------------------------------------
+# The order of sourcing the variable files is significant so please do not
+# change it or things might stop working.
+# - user-vars: variables that can be configured or overriden by user.
+# - pinned-versions: versions to checkout. These can be overriden if you want to
+#   use different/more recent versions of the tools but you might end up using
+#   something that is not verified by OPNFV XCI.
+# - flavor-vars: settings for VM nodes for the chosen flavor.
+# - env-vars: variables for the xci itself and you should not need to change or
+#   override any of them.
+#-------------------------------------------------------------------------------
 # find where are we
 XCI_PATH="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
-
 # source user vars
 source $XCI_PATH/config/user-vars
-
 # source pinned versions
 source $XCI_PATH/config/pinned-versions
-
 # source flavor configuration
 source "$XCI_PATH/flavors/${XCI_FLAVOR}-vars"
-
 # source xci configuration
 source $XCI_PATH/config/env-vars
 
-# log info to console
+#-------------------------------------------------------------------------------
+# Log info to console
+#-------------------------------------------------------------------------------
 echo "Info: Starting XCI Deployment"
 echo "Info: Deployment parameters"
 echo "-------------------------------------------------------------------------"
@@ -36,41 +48,153 @@ echo "openstack/openstack-ansible version: $OPENSTACK_OSA_VERSION"
 echo "-------------------------------------------------------------------------"
 
 #-------------------------------------------------------------------------------
-# Cleanup the leftovers from the previous deployment
-#-------------------------------------------------------------------------------
-echo "Info: Cleaning up the previous deployment"
-$XCI_PATH/../bifrost/scripts/destroy-env.sh > /dev/null 2>&1
-/bin/rm -rf /opt/releng /opt/bifrost /opt/openstack-ansible
-
+# Install ansible on localhost
 #-------------------------------------------------------------------------------
-# Clone the repositories and checkout the versions
-#-------------------------------------------------------------------------------
-echo "Info: Cloning repositories and checking out versions"
-git clone --quiet $OPNFV_RELENG_GIT_URL $OPNFV_RELENG_PATH && \
-    cd $OPNFV_RELENG_PATH
-echo "Info: Cloned opnfv/releng. HEAD currently points at"
-echo "      $(git show --oneline -s --pretty=format:'%h - %s (%cr) <%an>')"
-git clone --quiet $OPENSTACK_BIFROST_GIT_URL $OPENSTACK_BIFROST_PATH && \
-    cd $OPENSTACK_BIFROST_PATH
-echo "Info: Cloned openstack/bifrost. HEAD currently points at"
-echo "      $(git show --oneline -s --pretty=format:'%h - %s (%cr) <%an>')"
+pip install ansible==$XCI_ANSIBLE_PIP_VERSION
 
-#-------------------------------------------------------------------------------
-# Combine opnfv and upstream scripts/playbooks
-#-------------------------------------------------------------------------------
-echo "Info: Combining opnfv/releng and opestack/bifrost scripts/playbooks"
-/bin/cp -rf $OPNFV_RELENG_PATH/prototypes/bifrost/* $OPENSTACK_BIFROST_PATH/
+# TODO: The xci playbooks can be put into a playbook which will be done later.
 
 #-------------------------------------------------------------------------------
 # Start provisioning VM nodes
 #-------------------------------------------------------------------------------
+# This playbook
+# - removes directories that were created by the previous xci run
+# - clones opnfv/releng and openstack/bifrost repositories
+# - combines opnfv/releng and openstack/bifrost scripts/playbooks
+# - destorys VMs, removes ironic db, leases, logs
+# - creates and provisions VMs for the chosen flavor
+#-------------------------------------------------------------------------------
 echo "Info: Starting provisining VM nodes using openstack/bifrost"
-echo "      This might take between 10 to 20 minutes depending on the flavor and the host"
 echo "-------------------------------------------------------------------------"
-cd $OPENSTACK_BIFROST_PATH
-STARTTIME=$(date +%s)
-./scripts/bifrost-provision.sh
-ENDTIME=$(date +%s)
+cd $XCI_PATH/playbooks
+ansible-playbook $ANSIBLE_VERBOSITY -i inventory provision-vm-nodes.yml
 echo "-----------------------------------------------------------------------"
 echo "Info: VM nodes are provisioned!"
-echo "Info: It took $(($ENDTIME - $STARTTIME)) seconds to provising the VM nodes"
+
+#-------------------------------------------------------------------------------
+# Configure localhost
+#-------------------------------------------------------------------------------
+# This playbook
+# - removes directories that were created by the previous xci run
+# - clones opnfv/releng repository
+# - creates log directory
+# - copies flavor files such as playbook, inventory, and var file
+#-------------------------------------------------------------------------------
+echo "Info: Configuring localhost for openstack-ansible"
+echo "-----------------------------------------------------------------------"
+cd $XCI_PATH/playbooks
+ansible-playbook $ANSIBLE_VERBOSITY -i inventory configure-localhost.yml
+echo "-----------------------------------------------------------------------"
+echo "Info: Configured localhost host for openstack-ansible"
+
+#-------------------------------------------------------------------------------
+# Configure openstack-ansible deployment host, opnfv
+#-------------------------------------------------------------------------------
+# This playbook
+# - removes directories that were created by the previous xci run
+# - clones opnfv/releng and openstack/openstack-ansible repositories
+# - configures network
+# - generates/prepares ssh keys
+# - bootstraps ansible
+# - copies flavor files to be used by openstack-ansible
+#-------------------------------------------------------------------------------
+echo "Info: Configuring opnfv deployment host for openstack-ansible"
+echo "-----------------------------------------------------------------------"
+cd $OPNFV_RELENG_PATH/prototypes/xci/playbooks
+ansible-playbook $ANSIBLE_VERBOSITY -i inventory configure-opnfvhost.yml
+echo "-----------------------------------------------------------------------"
+echo "Info: Configured opnfv deployment host for openstack-ansible"
+
+#-------------------------------------------------------------------------------
+# Skip the rest if the flavor is aio since the target host for aio is opnfv
+#-------------------------------------------------------------------------------
+if [[ $XCI_FLAVOR == "aio" ]]; then
+    echo "xci: aio has been installed"
+    exit 0
+fi
+
+#-------------------------------------------------------------------------------
+# Configure target hosts for openstack-ansible
+#-------------------------------------------------------------------------------
+# This playbook
+# - adds public keys to target hosts
+# - configures network
+# - configures nfs
+#-------------------------------------------------------------------------------
+echo "Info: Configuring target hosts for openstack-ansible"
+echo "-----------------------------------------------------------------------"
+cd $OPNFV_RELENG_PATH/prototypes/xci/playbooks
+ansible-playbook $ANSIBLE_VERBOSITY -i inventory configure-targethosts.yml
+echo "-----------------------------------------------------------------------"
+echo "Info: Configured target hosts"
+
+#-------------------------------------------------------------------------------
+# Set up target hosts for openstack-ansible
+#-------------------------------------------------------------------------------
+# This is openstack-ansible playbook. Check upstream documentation for details.
+#-------------------------------------------------------------------------------
+echo "Info: Setting up target hosts for openstack-ansible"
+echo "-----------------------------------------------------------------------"
+sudo -E /bin/sh -c "ssh root@$OPNFV_HOST_IP openstack-ansible \
+     $OPENSTACK_OSA_PATH/playbooks/setup-hosts.yml" | \
+     tee $LOG_PATH/setup-hosts.log
+echo "-----------------------------------------------------------------------"
+# check the log to see if we have any error
+if grep -q 'failed=1\|unreachable=1' $LOG_PATH/setup-hosts.log; then
+    echo "Error: OpenStack node setup failed!"
+    exit 1
+fi
+echo "Info: Set up target hosts for openstack-ansible successfuly"
+
+#-------------------------------------------------------------------------------
+# Set up infrastructure
+#-------------------------------------------------------------------------------
+# This is openstack-ansible playbook. Check upstream documentation for details.
+#-------------------------------------------------------------------------------
+echo "Info: Setting up infrastructure"
+echo "-----------------------------------------------------------------------"
+echo "xci: running ansible playbook setup-infrastructure.yml"
+sudo -E /bin/sh -c "ssh root@$OPNFV_HOST_IP openstack-ansible \
+     $OPENSTACK_OSA_PATH/playbooks//setup-infrastructure.yml" | \
+     tee $LOG_PATH/setup-infrastructure.log
+echo "-----------------------------------------------------------------------"
+# check the log to see if we have any error
+if grep -q 'failed=1\|unreachable=1' $LOG_PATH/setup-infrastructure.log; then
+    echo "Error: OpenStack node setup failed!"
+    exit 1
+fi
+
+#-------------------------------------------------------------------------------
+# Verify database cluster
+#-------------------------------------------------------------------------------
+echo "Info: Verifying database cluster"
+echo "-----------------------------------------------------------------------"
+sudo -E /bin/sh -c "ssh root@$OPNFV_HOST_IP ansible -i $OPENSTACK_OSA_PATH/playbooks/inventory/ \
+           galera_container -m shell \
+           -a "mysql -h localhost -e 'show status like \"%wsrep_cluster_%\";'"" \
+           | tee $LOG_PATH/galera.log
+echo "-----------------------------------------------------------------------"
+# check the log to see if we have any error
+if grep -q 'FAILED' $LOG_PATH/galera.log; then
+    echo "Error: Database cluster verification failed!"
+    exit 1
+fi
+echo "Info: Database cluster verification successful!"
+
+#-------------------------------------------------------------------------------
+# Install OpenStack
+#-------------------------------------------------------------------------------
+# This is openstack-ansible playbook. Check upstream documentation for details.
+#-------------------------------------------------------------------------------
+echo "Info: Installing OpenStack on target hosts"
+echo "-----------------------------------------------------------------------"
+sudo -E /bin/sh -c "ssh root@$OPNFV_HOST_IP openstack-ansible \
+     $OPENSTACK_OSA_PATH/playbooks/setup-openstack.yml" | \
+     tee $LOG_PATH/opnfv-setup-openstack.log
+echo "-----------------------------------------------------------------------"
+# check the log to see if we have any error
+if grep -q 'failed=1\|unreachable=1' $LOG_PATH/opnfv-setup-openstack.log; then
+   echo "Error: OpenStack installation failed!"
+   exit 1
+fi
+echo "Info: OpenStack installation is successfully completed!"
diff --git a/setup.py b/setup.py
new file mode 100644 (file)
index 0000000..2d9246e
--- /dev/null
+++ b/setup.py
@@ -0,0 +1,9 @@
+#!/usr/bin/env python
+
+from setuptools import setup
+
+setup(
+    name="opnfv",
+    version="master",
+    url="https://www.opnfv.org",
+)
diff --git a/tox.ini b/tox.ini
new file mode 100644 (file)
index 0000000..e9f5fbb
--- /dev/null
+++ b/tox.ini
@@ -0,0 +1,34 @@
+# Tox (http://tox.testrun.org/) is a tool for running tests
+# in multiple virtualenvs. This configuration file will run the
+# test suite on all supported python versions. To use it, "pip install tox"
+# and then run "tox" from this directory.
+
+[tox]
+envlist = py27
+skipsdist = True
+
+[testenv]
+usedevelop = True
+setenv=
+  HOME = {envtmpdir}
+  PYTHONPATH = {toxinidir}
+
+[testenv:jjb]
+deps =
+       -rjjb/test-requirements.txt
+commands=
+       jenkins-jobs test -o job_output -r jjb/
+
+[testenv:modules]
+deps=
+       -rmodules/requirements.txt
+       -rmodules/test-requirements.txt
+commands =
+       nosetests -w modules \
+       --with-xunit \
+       --xunit-file=modules/nosetests.xml \
+       --cover-package=opnfv \
+       --with-coverage \
+       --cover-xml \
+       --cover-html \
+       tests/unit
index a577835..2d8e27b 100644 (file)
Binary files a/utils/test/reporting/img/danube.jpg and b/utils/test/reporting/img/danube.jpg differ
diff --git a/utils/test/testapi/opnfv_testapi/common/raises.py b/utils/test/testapi/opnfv_testapi/common/raises.py
new file mode 100644 (file)
index 0000000..ed3a84e
--- /dev/null
@@ -0,0 +1,31 @@
+import httplib
+
+from tornado import web
+
+
+class Raiser(object):
+    code = httplib.OK
+
+    def __init__(self, reason):
+        raise web.HTTPError(self.code, reason)
+
+
+class BadRequest(Raiser):
+    code = httplib.BAD_REQUEST
+
+
+class Forbidden(Raiser):
+    code = httplib.FORBIDDEN
+
+
+class NotFound(Raiser):
+    code = httplib.NOT_FOUND
+
+
+class Unauthorized(Raiser):
+    code = httplib.UNAUTHORIZED
+
+
+class CodeTBD(object):
+    def __init__(self, code, reason):
+        raise web.HTTPError(code, reason)
index bf8a92b..c2b1a64 100644 (file)
 
 from datetime import datetime
 import functools
-import httplib
 import json
 
 from tornado import gen
 from tornado import web
 
 import models
+from opnfv_testapi.common import raises
 from opnfv_testapi.tornado_swagger import swagger
 
 DEFAULT_REPRESENTATION = "application/json"
@@ -56,9 +56,7 @@ class GenericApiHandler(web.RequestHandler):
                     try:
                         self.json_args = json.loads(self.request.body)
                     except (ValueError, KeyError, TypeError) as error:
-                        raise web.HTTPError(httplib.BAD_REQUEST,
-                                            "Bad Json format [{}]".
-                                            format(error))
+                        raises.BadRequest("Bad Json format [{}]".format(error))
 
     def finish_request(self, json_object=None):
         if json_object:
@@ -83,13 +81,11 @@ class GenericApiHandler(web.RequestHandler):
                 try:
                     token = self.request.headers['X-Auth-Token']
                 except KeyError:
-                    raise web.HTTPError(httplib.UNAUTHORIZED,
-                                        "No Authentication Header.")
+                    raises.Unauthorized("No Authentication Header.")
                 query = {'access_token': token}
                 check = yield self._eval_db_find_one(query, 'tokens')
                 if not check:
-                    raise web.HTTPError(httplib.FORBIDDEN,
-                                        "Invalid Token.")
+                    raises.Forbidden("Invalid Token.")
             ret = yield gen.coroutine(method)(self, *args, **kwargs)
             raise gen.Return(ret)
         return wrapper
@@ -101,14 +97,13 @@ class GenericApiHandler(web.RequestHandler):
         :param db_checks: [(table, exist, query, error)]
         """
         if self.json_args is None:
-            raise web.HTTPError(httplib.BAD_REQUEST, "no body")
+            raises.BadRequest('no body')
 
         data = self.table_cls.from_dict(self.json_args)
         for miss in miss_checks:
             miss_data = data.__getattribute__(miss)
             if miss_data is None or miss_data == '':
-                raise web.HTTPError(httplib.BAD_REQUEST,
-                                    '{} missing'.format(miss))
+                raises.BadRequest('{} missing'.format(miss))
 
         for k, v in kwargs.iteritems():
             data.__setattr__(k, v)
@@ -117,7 +112,7 @@ class GenericApiHandler(web.RequestHandler):
             check = yield self._eval_db_find_one(query(data), table)
             if (exist and not check) or (not exist and check):
                 code, message = error(data)
-                raise web.HTTPError(code, message)
+                raises.CodeTBD(code, message)
 
         if self.table != 'results':
             data.creation_date = datetime.now()
@@ -153,18 +148,16 @@ class GenericApiHandler(web.RequestHandler):
     def _get_one(self, query):
         data = yield self._eval_db_find_one(query)
         if data is None:
-            raise web.HTTPError(httplib.NOT_FOUND,
-                                "[{}] not exist in table [{}]"
-                                .format(query, self.table))
+            raises.NotFound("[{}] not exist in table [{}]"
+                            .format(query, self.table))
         self.finish_request(self.format_data(data))
 
     @authenticate
     def _delete(self, query):
         data = yield self._eval_db_find_one(query)
         if data is None:
-            raise web.HTTPError(httplib.NOT_FOUND,
-                                "[{}] not exit in table [{}]"
-                                .format(query, self.table))
+            raises.NotFound("[{}] not exit in table [{}]"
+                            .format(query, self.table))
 
         yield self._eval_db(self.table, 'remove', query)
         self.finish_request()
@@ -172,14 +165,13 @@ class GenericApiHandler(web.RequestHandler):
     @authenticate
     def _update(self, query, db_keys):
         if self.json_args is None:
-            raise web.HTTPError(httplib.BAD_REQUEST, "No payload")
+            raises.BadRequest("No payload")
 
         # check old data exist
         from_data = yield self._eval_db_find_one(query)
         if from_data is None:
-            raise web.HTTPError(httplib.NOT_FOUND,
-                                "{} could not be found in table [{}]"
-                                .format(query, self.table))
+            raises.NotFound("{} could not be found in table [{}]"
+                            .format(query, self.table))
 
         data = self.table_cls.from_dict(from_data)
         # check new data exist
@@ -187,9 +179,8 @@ class GenericApiHandler(web.RequestHandler):
         if not equal:
             to_data = yield self._eval_db_find_one(new_query)
             if to_data is not None:
-                raise web.HTTPError(httplib.FORBIDDEN,
-                                    "{} already exists in table [{}]"
-                                    .format(new_query, self.table))
+                raises.Forbidden("{} already exists in table [{}]"
+                                 .format(new_query, self.table))
 
         # we merge the whole document """
         edit_request = self._update_requests(data)
@@ -206,7 +197,7 @@ class GenericApiHandler(web.RequestHandler):
             request = self._update_request(request, k, v,
                                            data.__getattribute__(k))
         if not request:
-            raise web.HTTPError(httplib.FORBIDDEN, "Nothing to update")
+            raises.Forbidden("Nothing to update")
 
         edit_request = data.format()
         edit_request.update(request)
index 44b9f8c..3e78057 100644 (file)
@@ -11,8 +11,8 @@ from datetime import timedelta
 import httplib
 
 from bson import objectid
-from tornado import web
 
+from opnfv_testapi.common import raises
 from opnfv_testapi.resources import handlers
 from opnfv_testapi.resources import result_models
 from opnfv_testapi.tornado_swagger import swagger
@@ -30,8 +30,7 @@ class GenericResultHandler(handlers.GenericApiHandler):
         try:
             value = int(value)
         except:
-            raise web.HTTPError(httplib.BAD_REQUEST,
-                                '{} must be int'.format(key))
+            raises.BadRequest('{} must be int'.format(key))
         return value
 
     def set_query(self):
index a2856db..9d0233c 100644 (file)
@@ -1,8 +1,7 @@
 import functools
 import httplib
 
-from tornado import web
-
+from opnfv_testapi.common import raises
 from opnfv_testapi.resources import handlers
 import opnfv_testapi.resources.scenario_models as models
 from opnfv_testapi.tornado_swagger import swagger
@@ -185,8 +184,7 @@ class ScenarioGURHandler(GenericScenarioHandler):
     def _update_requests_rename(self, data):
         data.name = self._term.get('name')
         if not data.name:
-            raise web.HTTPError(httplib.BAD_REQUEST,
-                                "new scenario name is not provided")
+            raises.BadRequest("new scenario name is not provided")
 
     def _update_requests_add_installer(self, data):
         data.installers.append(models.ScenarioInstaller.from_dict(self._term))