venv/
ENV/
node_modules/
+.coverage
+=1.3.1
+cover/
+coverage.xml
+nosetests.xml
+testapi_venv/
+.cache
+.tox
* jose.lausuch@ericsson.com
* koffirodrigue@gmail.com
* r-mibu@cq.jp.nec.com
+* tbramwell@linuxfoundation.org
Or Add the group releng-contributors
.. _releng-jobs.yaml:
https://gerrit.opnfv.org/gerrit/gitweb?p=releng.git;a=blob;f=jjb/releng-jobs.yaml;
.. _skip vote:
- https://wiki.jenkins-ci.org/display/JENKINS/Gerrit+Trigger#GerritTrigger-SkipVote
\ No newline at end of file
+ https://wiki.jenkins-ci.org/display/JENKINS/Gerrit+Trigger#GerritTrigger-SkipVote
set -o nounset
set -o pipefail
-APEX_PKGS="common undercloud onos"
+APEX_PKGS="common undercloud" # removed onos for danube
IPV6_FLAG=False
# log info to console
sudo yum -y install wget
fi
-if [[ "$BUILD_DIRECTORY" == *verify* || "$BUILD_DIRECTORY" == *csit* ]]; then
+if [[ "$BUILD_DIRECTORY" == *verify* || "$BUILD_DIRECTORY" == *promote* ]]; then
# Build is from a verify, use local build artifacts (not RPMs)
cd $WORKSPACE/../${BUILD_DIRECTORY}
WORKSPACE=$(pwd)
fi
fi
-# use local build for verify and csit promote
-if [[ "$BUILD_DIRECTORY" == *verify* || "$BUILD_DIRECTORY" == *csit* ]]; then
+# use local build for verify and promote
+if [[ "$BUILD_DIRECTORY" == *verify* || "$BUILD_DIRECTORY" == *promote* ]]; then
if [ ! -e "${WORKSPACE}/build/lib" ]; then
ln -s ${WORKSPACE}/lib ${WORKSPACE}/build/lib
fi
else
clean_opts=''
fi
- if [[ "$BUILD_DIRECTORY" == *verify* || "$BUILD_DIRECTORY" == *csit* ]]; then
+ if [[ "$BUILD_DIRECTORY" == *verify* || "$BUILD_DIRECTORY" == *promote* ]]; then
sudo CONFIG=${CONFIG} LIB=${LIB} ./clean.sh ${clean_opts}
else
sudo CONFIG=${CONFIG} LIB=${LIB} opnfv-clean ${clean_opts}
if [[ "$JOB_NAME" == *virtual* ]]; then
# settings for virtual deployment
- if [ "$IPV6_FLAG" == "True" ]; then
- NETWORK_FILE="${NETWORK_SETTINGS_DIR}/network_settings_v6.yaml"
- elif echo ${DEPLOY_SCENARIO} | grep fdio; then
- NETWORK_FILE="${NETWORK_SETTINGS_DIR}/network_settings_vpp.yaml"
- else
- NETWORK_FILE="${NETWORK_SETTINGS_DIR}/network_settings.yaml"
- fi
DEPLOY_CMD="${DEPLOY_CMD} -v"
+ if [[ "${DEPLOY_SCENARIO}" =~ fdio|ovs ]]; then
+ DEPLOY_CMD="${DEPLOY_CMD} --virtual-default-ram 14 --virtual-compute-ram 8"
+ fi
if [[ "$JOB_NAME" == *csit* ]]; then
- DEPLOY_CMD="${DEPLOY_CMD} -e csit-environment.yaml --virtual-computes 2"
+ DEPLOY_CMD="${DEPLOY_CMD} -e csit-environment.yaml"
+ fi
+ if [[ "$JOB_NAME" == *promote* ]]; then
+ DEPLOY_CMD="${DEPLOY_CMD} --virtual-computes 2"
fi
else
# settings for bare metal deployment
- if [ "$IPV6_FLAG" == "True" ]; then
- NETWORK_FILE="/root/network/network_settings_v6.yaml"
- elif [[ "$JOB_NAME" == *master* ]]; then
- NETWORK_FILE="/root/network/network_settings-master.yaml"
- else
- NETWORK_FILE="/root/network/network_settings.yaml"
- fi
+ NETWORK_SETTINGS_DIR="/root/network"
INVENTORY_FILE="/root/inventory/pod_settings.yaml"
if ! sudo test -e "$INVENTORY_FILE"; then
DEPLOY_CMD="${DEPLOY_CMD} -i ${INVENTORY_FILE}"
fi
+if [ "$IPV6_FLAG" == "True" ]; then
+ NETWORK_FILE="${NETWORK_SETTINGS_DIR}/network_settings_v6.yaml"
+elif echo ${DEPLOY_SCENARIO} | grep fdio; then
+ NETWORK_FILE="${NETWORK_SETTINGS_DIR}/network_settings_vpp.yaml"
+else
+ NETWORK_FILE="${NETWORK_SETTINGS_DIR}/network_settings.yaml"
+fi
+
# Check that network settings file exists
if ! sudo test -e "$NETWORK_FILE"; then
echo "ERROR: Required settings file missing: Network Settings file ${NETWORK_FILE}"
set -o pipefail
SSH_OPTIONS=(-o StrictHostKeyChecking=no -o GlobalKnownHostsFile=/dev/null -o UserKnownHostsFile=/dev/null -o LogLevel=error)
+SNAP_TYPE=$(echo ${JOB_NAME} | sed -n 's/^apex-\(.\+\)-promote.*$/\1/p')
echo "Creating Apex snapshot..."
echo "-------------------------"
# tar up artifacts
DATE=`date +%Y-%m-%d`
-tar czf ../apex-csit-snap-${DATE}.tar.gz .
+tar czf ../apex-${SNAP_TYPE}-snap-${DATE}.tar.gz .
popd > /dev/null
sudo rm -rf ${tmp_dir}
-echo "Snapshot saved as apex-csit-snap-${DATE}.tar.gz"
+echo "Snapshot saved as apex-${SNAP_TYPE}-snap-${DATE}.tar.gz"
# update opnfv properties file
-curl -O -L http://$GS_URL/snapshot.properties
-sed -i '/^OPNFV_SNAP_URL=/{h;s#=.*#='${GS_URL}'/apex-csit-snap-'${DATE}'.tar.gz#};${x;/^$/{s##OPNFV_SNAP_URL='${GS_URL}'/apex-csit-snap-'${DATE}'.tar.gz#;H};x}' snapshot.properties
-snap_sha=$(sha512sum apex-csit-snap-${DATE}.tar.gz | cut -d' ' -f1)
-sed -i '/^OPNFV_SNAP_SHA512SUM=/{h;s/=.*/='${snap_sha}'/};${x;/^$/{s//OPNFV_SNAP_SHA512SUM='${snap_sha}'/;H};x}' snapshot.properties
-echo "OPNFV_SNAP_URL=$GS_URL/apex-csit-snap-${DATE}.tar.gz"
-echo "OPNFV_SNAP_SHA512SUM=$(sha512sum apex-csit-snap-${DATE}.tar.gz | cut -d' ' -f1)"
-echo "Updated properties file: "
-cat snapshot.properties
+if [ "$SNAP_TYPE" == 'csit' ]; then
+ curl -O -L http://$GS_URL/snapshot.properties
+ sed -i '/^OPNFV_SNAP_URL=/{h;s#=.*#='${GS_URL}'/apex-csit-snap-'${DATE}'.tar.gz#};${x;/^$/{s##OPNFV_SNAP_URL='${GS_URL}'/apex-csit-snap-'${DATE}'.tar.gz#;H};x}' snapshot.properties
+ snap_sha=$(sha512sum apex-csit-snap-${DATE}.tar.gz | cut -d' ' -f1)
+ sed -i '/^OPNFV_SNAP_SHA512SUM=/{h;s/=.*/='${snap_sha}'/};${x;/^$/{s//OPNFV_SNAP_SHA512SUM='${snap_sha}'/;H};x}' snapshot.properties
+ echo "OPNFV_SNAP_URL=$GS_URL/apex-csit-snap-${DATE}.tar.gz"
+ echo "OPNFV_SNAP_SHA512SUM=$(sha512sum apex-csit-snap-${DATE}.tar.gz | cut -d' ' -f1)"
+ echo "Updated properties file: "
+ cat snapshot.properties
+fi
RPM_INSTALL_PATH=$BUILD_DIRECTORY/noarch
RPM_LIST=$RPM_INSTALL_PATH/$(basename $OPNFV_RPM_URL)
VERSION_EXTENSION=$(echo $(basename $OPNFV_RPM_URL) | sed 's/opnfv-apex-//')
-for pkg in common undercloud onos; do
+for pkg in common undercloud; do # removed onos for danube
RPM_LIST+=" ${RPM_INSTALL_PATH}/opnfv-apex-${pkg}-${VERSION_EXTENSION}"
done
SRPM_INSTALL_PATH=$BUILD_DIRECTORY
SRPM_LIST=$SRPM_INSTALL_PATH/$(basename $OPNFV_SRPM_URL)
VERSION_EXTENSION=$(echo $(basename $OPNFV_SRPM_URL) | sed 's/opnfv-apex-//')
-for pkg in common undercloud onos; do
+for pkg in common undercloud; do # removed onos for danube
SRPM_LIST+=" ${SRPM_INSTALL_PATH}/opnfv-apex-${pkg}-${VERSION_EXTENSION}"
done
}
uploadsnap () {
# Uploads snapshot artifact and updated properties file
echo "Uploading snapshot artifacts"
- gsutil cp $WORKSPACE/apex-csit-snap-`date +%Y-%m-%d`.tar.gz gs://$GS_URL/ > gsutil.iso.log
- gsutil cp $WORKSPACE/snapshot.properties gs://$GS_URL/snapshot.properties > gsutil.latest.log
+ SNAP_TYPE=$(echo ${JOB_NAME} | sed -n 's/^apex-\(.\+\)-promote.*$/\1/p')
+ gsutil cp $WORKSPACE/apex-${SNAP_TYPE}-snap-`date +%Y-%m-%d`.tar.gz gs://$GS_URL/ > gsutil.iso.log
+ if [ "$SNAP_TYPE" == 'csit' ]; then
+ gsutil cp $WORKSPACE/snapshot.properties gs://$GS_URL/snapshot.properties > gsutil.latest.log
+ fi
echo "Upload complete for Snapshot"
}
-if echo $WORKSPACE | grep csit > /dev/null; then
+if echo $WORKSPACE | grep promote > /dev/null; then
uploadsnap
elif gpg2 --list-keys | grep "opnfv-helpdesk@rt.linuxfoundation.org"; then
echo "Signing Key avaliable"
- 'apex-deploy-virtual-{scenario}-{stream}'
- 'apex-deploy-baremetal-{scenario}-{stream}'
- 'apex-daily-{stream}'
- - 'apex-daily-colorado'
- - 'apex-build-colorado'
- - 'apex-deploy-baremetal-os-odl_l2-fdio-ha-colorado'
- 'apex-csit-promote-daily-{stream}'
+ - 'apex-fdio-promote-daily-{stream}'
# stream: branch with - in place of / (eg. stable-arno)
# branch: branch (eg. stable/arno)
slave: 'lf-pod1'
verify-slave: 'apex-verify-master'
daily-slave: 'apex-daily-master'
+ - danube:
+ branch: 'stable/danube'
+ gs-pathname: '/danube'
+ slave: 'lf-pod1'
+ verify-slave: 'apex-verify-danube'
+ daily-slave: 'apex-daily-danube'
project: 'apex'
- 'os-nosdn-ovs-noha'
- 'os-nosdn-fdio-noha'
- 'os-nosdn-fdio-ha'
+ - 'os-nosdn-kvm-ha'
+ - 'os-nosdn-kvm-noha'
+ - 'os-odl_l2-fdio-noha'
- 'os-odl_l2-fdio-ha'
- 'os-odl_l2-netvirt_gbp_fdio-noha'
- 'os-odl_l2-sfc-noha'
- 'os-odl_l3-fdio_dvr-noha'
- 'os-odl_l3-fdio_dvr-ha'
- 'os-odl_l3-csit-noha'
+ - 'os-odl_l3-nofeature-noha'
- 'os-onos-nofeature-ha'
- 'gate'
builders:
- 'apex-unit-test'
- 'apex-build'
- - trigger-builds:
- - project: 'apex-deploy-virtual-os-nosdn-nofeature-ha-{stream}'
- predefined-parameters: |
- BUILD_DIRECTORY=apex-verify-{stream}
- OPNFV_CLEAN=yes
- git-revision: false
- block: true
- same-node: true
- - trigger-builds:
- - project: 'functest-apex-{verify-slave}-suite-{stream}'
- predefined-parameters: |
- DEPLOY_SCENARIO=os-nosdn-nofeature-ha
- FUNCTEST_SUITE_NAME=healthcheck
- block: true
- same-node: true
- trigger-builds:
- project: 'apex-deploy-virtual-os-odl_l3-nofeature-ha-{stream}'
predefined-parameters: |
blocking-jobs:
- 'apex-daily.*'
- 'apex-verify.*'
- - 'apex-csit.*'
+ - 'apex-.*-promote.*'
builders:
- trigger-builds:
builders:
- trigger-builds:
- - project: 'apex-deploy-baremetal-os-odl_l3-nofeature-ha-{stream}'
+ - project: 'apex-deploy-baremetal-os-odl_l3-nofeature-noha-{stream}'
predefined-parameters:
OPNFV_CLEAN=yes
git-revision: false
- trigger-builds:
- project: 'cperf-apex-intel-pod2-daily-{stream}'
predefined-parameters:
- DEPLOY_SCENARIO=os-odl_l3-nofeature-ha
+ DEPLOY_SCENARIO=os-odl_l3-nofeature-noha
block: true
same-node: true
- 'apex-deploy.*'
- 'apex-build.*'
- 'apex-runner.*'
- - 'apex-csit.*'
+ - 'apex-.*-promote.*'
triggers:
- 'apex-{stream}'
build-step-failure-threshold: 'never'
failure-threshold: 'never'
unstable-threshold: 'FAILURE'
+ # 1.dovetail only master by now, not sync with A/B/C branches
+ # 2.here the stream means the SUT stream, dovetail stream is defined in its own job
+ # 3.only debug testsuite here(includes basic testcase,
+ # i.e. one tempest smoke ipv6, two vping from functest)
+ # 4.not used for release criteria or compliance,
+ # only to debug the dovetail tool bugs with apex
+ - trigger-builds:
+ - project: 'dovetail-apex-{slave}-debug-{stream}'
+ current-parameters: false
+ predefined-parameters:
+ DEPLOY_SCENARIO=os-nosdn-nofeature-ha
+ block: true
+ same-node: true
+ block-thresholds:
+ build-step-failure-threshold: 'never'
+ failure-threshold: 'never'
+ unstable-threshold: 'FAILURE'
- trigger-builds:
- project: 'apex-deploy-baremetal-os-odl_l3-nofeature-ha-{stream}'
predefined-parameters: |
build-step-failure-threshold: 'never'
failure-threshold: 'never'
unstable-threshold: 'FAILURE'
-# Colorado Build
-- job-template:
- name: 'apex-build-colorado'
-
- # Job template for builds
- #
- # Required Variables:
- # stream: branch with - in place of / (eg. stable)
- # branch: branch (eg. stable)
- node: 'apex-daily-colorado'
-
- disabled: false
-
- concurrent: true
-
- parameters:
- - project-parameter:
- project: '{project}'
- branch: 'stable/colorado'
- - apex-parameter:
- gs-pathname: '/colorado'
- - string:
- name: GIT_BASE
- default: https://gerrit.opnfv.org/gerrit/$PROJECT
- description: "Used for overriding the GIT URL coming from parameters macro."
-
- scm:
- - git-scm
-
- properties:
- - logrotate-default
- - build-blocker:
- use-build-blocker: true
- block-level: 'NODE'
- blocking-jobs:
- - 'apex-deploy.*'
- - throttle:
- max-per-node: 1
- max-total: 10
- option: 'project'
-
- builders:
- - 'apex-build'
- - 'apex-upload-artifact'
-
-
-# Colorado FDIO Deploy
-- job-template:
- name: 'apex-deploy-baremetal-os-odl_l2-fdio-ha-colorado'
-
- # Job template for baremetal deployment
- #
- # Required Variables:
- # stream: branch with - in place of / (eg. stable)
- # branch: branch (eg. stable)
- node: 'lf-pod1'
-
- disabled: false
-
- scm:
- - git-scm
-
- parameters:
- - project-parameter:
- project: '{project}'
- branch: 'stable/colorado'
- - apex-parameter:
- gs-pathname: '/colorado'
- - string:
- name: DEPLOY_SCENARIO
- default: 'os-odl_l2-fdio-ha'
- description: "Scenario to deploy with."
-
- properties:
- - logrotate-default
- - build-blocker:
- use-build-blocker: true
- block-level: 'NODE'
- blocking-jobs:
- - 'apex-verify.*'
- - 'apex-deploy.*'
- - 'apex-build.*'
-
-
- builders:
- - 'apex-deploy'
- - 'apex-workspace-cleanup'
+ - trigger-builds:
+ - project: 'apex-deploy-baremetal-os-odl_l2-fdio-noha-{stream}'
+ predefined-parameters: |
+ BUILD_DIRECTORY=apex-build-{stream}/.build
+ OPNFV_CLEAN=yes
+ git-revision: true
+ same-node: true
+ block-thresholds:
+ build-step-failure-threshold: 'never'
+ block: true
+ - trigger-builds:
+ - project: 'functest-apex-{daily-slave}-daily-{stream}'
+ predefined-parameters:
+ DEPLOY_SCENARIO=os-odl_l2-fdio-noha
+ block: true
+ same-node: true
+ block-thresholds:
+ build-step-failure-threshold: 'never'
+ failure-threshold: 'never'
+ unstable-threshold: 'FAILURE'
+ - trigger-builds:
+ - project: 'yardstick-apex-{slave}-daily-{stream}'
+ predefined-parameters:
+ DEPLOY_SCENARIO=os-odl_l2-fdio-noha
+ block: true
+ same-node: true
+ block-thresholds:
+ build-step-failure-threshold: 'never'
+ failure-threshold: 'never'
+ unstable-threshold: 'FAILURE'
+ - trigger-builds:
+ - project: 'apex-deploy-baremetal-os-odl_l2-fdio-ha-{stream}'
+ predefined-parameters: |
+ BUILD_DIRECTORY=apex-build-{stream}/.build
+ OPNFV_CLEAN=yes
+ git-revision: true
+ same-node: true
+ block-thresholds:
+ build-step-failure-threshold: 'never'
+ block: true
+ - trigger-builds:
+ - project: 'functest-apex-{daily-slave}-daily-{stream}'
+ predefined-parameters:
+ DEPLOY_SCENARIO=os-odl_l2-fdio-ha
+ block: true
+ same-node: true
+ block-thresholds:
+ build-step-failure-threshold: 'never'
+ failure-threshold: 'never'
+ unstable-threshold: 'FAILURE'
+ - trigger-builds:
+ - project: 'yardstick-apex-{slave}-daily-{stream}'
+ predefined-parameters:
+ DEPLOY_SCENARIO=os-odl_l2-fdio-ha
+ block: true
+ same-node: true
+ block-thresholds:
+ build-step-failure-threshold: 'never'
+ failure-threshold: 'never'
+ unstable-threshold: 'FAILURE'
+ - trigger-builds:
+ - project: 'apex-deploy-baremetal-os-nosdn-kvm-ha-{stream}'
+ predefined-parameters: |
+ BUILD_DIRECTORY=apex-build-{stream}/.build
+ OPNFV_CLEAN=yes
+ git-revision: true
+ same-node: true
+ block-thresholds:
+ build-step-failure-threshold: 'never'
+ block: true
+ - trigger-builds:
+ - project: 'functest-apex-{daily-slave}-daily-{stream}'
+ predefined-parameters:
+ DEPLOY_SCENARIO=os-nosdn-kvm-ha
+ block: true
+ same-node: true
+ block-thresholds:
+ build-step-failure-threshold: 'never'
+ failure-threshold: 'never'
+ unstable-threshold: 'FAILURE'
+ - trigger-builds:
+ - project: 'yardstick-apex-{slave}-daily-{stream}'
+ predefined-parameters:
+ DEPLOY_SCENARIO=os-nosdn-kvm-ha
+ block: true
+ same-node: true
+ block-thresholds:
+ build-step-failure-threshold: 'never'
+ failure-threshold: 'never'
+ unstable-threshold: 'FAILURE'
+ - trigger-builds:
+ - project: 'apex-deploy-baremetal-os-odl_l3-fdio-noha-{stream}'
+ predefined-parameters: |
+ BUILD_DIRECTORY=apex-build-{stream}/.build
+ OPNFV_CLEAN=yes
+ git-revision: true
+ same-node: true
+ block-thresholds:
+ build-step-failure-threshold: 'never'
+ block: true
+ - trigger-builds:
+ - project: 'functest-apex-{daily-slave}-daily-{stream}'
+ predefined-parameters:
+ DEPLOY_SCENARIO=os-odl_l3-fdio-noha
+ block: true
+ same-node: true
+ block-thresholds:
+ build-step-failure-threshold: 'never'
+ failure-threshold: 'never'
+ unstable-threshold: 'FAILURE'
+ - trigger-builds:
+ - project: 'yardstick-apex-{slave}-daily-{stream}'
+ predefined-parameters:
+ DEPLOY_SCENARIO=os-odl_l3-fdio-noha
+ block: true
+ same-node: true
+ block-thresholds:
+ build-step-failure-threshold: 'never'
+ failure-threshold: 'never'
+ unstable-threshold: 'FAILURE'
-# Colorado FDIO Daily
+# CSIT promote
- job-template:
- name: 'apex-daily-colorado'
+ name: 'apex-csit-promote-daily-{stream}'
- # Job template for daily build
+ # Job template for promoting CSIT Snapshots
#
# Required Variables:
# stream: branch with - in place of / (eg. stable)
# branch: branch (eg. stable)
- node: 'apex-daily-colorado'
+ node: '{daily-slave}'
disabled: false
parameters:
- project-parameter:
project: '{project}'
- branch: 'stable/colorado'
+ branch: '{branch}'
- apex-parameter:
- gs-pathname: '/colorado'
+ gs-pathname: '{gs-pathname}'
properties:
- - logrotate-default
- build-blocker:
use-build-blocker: true
block-level: 'NODE'
- 'apex-deploy.*'
- 'apex-build.*'
- 'apex-runner.*'
+ - 'apex-daily.*'
triggers:
- - 'apex-colorado'
+ - timed: '0 12 * * 0'
builders:
+ - 'apex-build'
- trigger-builds:
- - project: 'apex-build-colorado'
- git-revision: true
- current-parameters: true
- same-node: true
- block: true
- - trigger-builds:
- - project: 'apex-deploy-baremetal-os-odl_l2-fdio-ha-colorado'
+ - project: 'apex-deploy-virtual-os-odl_l3-csit-noha-{stream}'
predefined-parameters: |
- BUILD_DIRECTORY=apex-build-colorado/.build
+ BUILD_DIRECTORY=apex-csit-promote-daily-{stream}
OPNFV_CLEAN=yes
- git-revision: true
- same-node: true
- block-thresholds:
- build-step-failure-threshold: 'never'
+ git-revision: false
block: true
+ same-node: true
- trigger-builds:
- - project: 'functest-apex-apex-daily-colorado-daily-colorado'
- predefined-parameters:
- DEPLOY_SCENARIO=os-odl_l2-fdio-ha
+ - project: 'functest-apex-{daily-slave}-suite-{stream}'
+ predefined-parameters: |
+ DEPLOY_SCENARIO=os-odl_l3-nofeature-noha
+ FUNCTEST_SUITE_NAME=tempest_smoke_serial
block: true
same-node: true
- block-thresholds:
- build-step-failure-threshold: 'never'
- failure-threshold: 'never'
- unstable-threshold: 'FAILURE'
+ - shell:
+ !include-raw-escape: ./apex-snapshot-create.sh
+ - shell:
+ !include-raw-escape: ./apex-upload-artifact.sh
-# CSIT promote
+# FDIO promote
- job-template:
- name: 'apex-csit-promote-daily-{stream}'
+ name: 'apex-fdio-promote-daily-{stream}'
# Job template for promoting CSIT Snapshots
#
- 'apex-runner.*'
- 'apex-daily.*'
- triggers:
- - timed: '0 12 * * 0'
-
builders:
- 'apex-build'
- trigger-builds:
- - project: 'apex-deploy-virtual-os-odl_l3-csit-noha-{stream}'
+ - project: 'apex-deploy-virtual-os-odl_l2-fdio-noha-{stream}'
predefined-parameters: |
- BUILD_DIRECTORY=apex-csit-promote-daily-{stream}
+ BUILD_DIRECTORY=apex-fdio-promote-daily-{stream}
OPNFV_CLEAN=yes
git-revision: false
block: true
same-node: true
- - trigger-builds:
- - project: 'functest-apex-{daily-slave}-suite-{stream}'
- predefined-parameters: |
- DEPLOY_SCENARIO=os-odl_l3-nofeature-noha
- FUNCTEST_SUITE_NAME=tempest_smoke_serial
- block: true
- same-node: true
- shell:
!include-raw-escape: ./apex-snapshot-create.sh
- shell:
- trigger:
name: 'apex-master'
triggers:
- - timed: '0 3 * * *'
+ - timed: '0 3 * * 7'
- trigger:
- name: 'apex-colorado'
+ name: 'apex-danube'
triggers:
- timed: '0 12 * * *'
- trigger:
stream: danube
branch: 'stable/{stream}'
gs-pathname: '/{stream}'
- disabled: true
+ disabled: false
#--------------------------------
# POD, INSTALLER, AND BRANCH MAPPING
#--------------------------------
slave-label: arm-pod3
installer: fuel
<<: *danube
+ - arm-pod3-2:
+ slave-label: arm-pod3-2
+ installer: fuel
+ <<: *danube
#--------------------------------
# master
#--------------------------------
slave-label: arm-pod3
installer: fuel
<<: *master
+ - arm-pod3-2:
+ slave-label: arm-pod3-2
+ installer: fuel
+ <<: *master
#--------------------------------
# scenarios
#--------------------------------
- trigger:
name: 'fuel-os-odl_l2-nofeature-ha-armband-baremetal-master-trigger'
triggers:
- - timed: '0 8 * * 1,3,5,7'
+ - timed: '0 0 * * 1'
- trigger:
name: 'fuel-os-nosdn-nofeature-ha-armband-baremetal-master-trigger'
triggers:
- - timed: '0 16 * * 2,7'
+ - timed: '0 0 * * 2'
- trigger:
name: 'fuel-os-odl_l3-nofeature-ha-armband-baremetal-master-trigger'
triggers:
- - timed: '0 16 * * 1,4,6'
+ - timed: '0 0 * * 3'
- trigger:
name: 'fuel-os-odl_l2-bgpvpn-ha-armband-baremetal-master-trigger'
triggers:
- - timed: '0 8 * * 2,4,6'
+ - timed: '0 0 * * 4'
- trigger:
name: 'fuel-os-odl_l2-nofeature-noha-armband-baremetal-master-trigger'
triggers:
- - timed: '0 16 * * 3,5'
+ - timed: '0 0 * * 5'
- trigger:
name: 'fuel-os-odl_l2-sfc-ha-armband-baremetal-master-trigger'
triggers:
- trigger:
name: 'fuel-os-odl_l2-nofeature-ha-armband-baremetal-danube-trigger'
triggers:
- - timed: '0 0 * * 1'
+ - timed: '0 8 * * 1,4'
- trigger:
name: 'fuel-os-nosdn-nofeature-ha-armband-baremetal-danube-trigger'
triggers:
- - timed: '0 0 * * 2'
+ - timed: '0 16 * * 1,4'
- trigger:
name: 'fuel-os-odl_l2-bgpvpn-ha-armband-baremetal-danube-trigger'
triggers:
- - timed: '0 0 * * 4'
+ - timed: '0 8 * * 2,5'
- trigger:
name: 'fuel-os-odl_l3-nofeature-ha-armband-baremetal-danube-trigger'
triggers:
- - timed: '0 0 * * 3'
+ - timed: '0 16 * * 2,5'
- trigger:
name: 'fuel-os-odl_l2-nofeature-noha-armband-baremetal-danube-trigger'
triggers:
- - timed: '0 0 * * 5'
+ - timed: '0 8 * * 3,6'
- trigger:
name: 'fuel-os-odl_l2-sfc-ha-armband-baremetal-danube-trigger'
triggers:
- - timed: ''
+ - timed: '0 16 * * 3,6'
- trigger:
name: 'fuel-os-odl_l2-sfc-noha-armband-baremetal-danube-trigger'
triggers:
- - timed: ''
+ - timed: '0 8,16 * * 7'
#---------------------------------------------------------------
# Enea Armband CI Virtual Triggers running against master branch
#---------------------------------------------------------------
name: 'fuel-os-odl_l2-sfc-noha-arm-pod3-danube-trigger'
triggers:
- timed: ''
+#--------------------------------------------------------------------------
+# Enea Armband POD 3 Triggers running against master branch (aarch64 slave)
+#--------------------------------------------------------------------------
+- trigger:
+ name: 'fuel-os-odl_l2-nofeature-ha-arm-pod3-2-master-trigger'
+ triggers:
+ - timed: ''
+- trigger:
+ name: 'fuel-os-nosdn-nofeature-ha-arm-pod3-2-master-trigger'
+ triggers:
+ - timed: ''
+- trigger:
+ name: 'fuel-os-odl_l3-nofeature-ha-arm-pod3-2-master-trigger'
+ triggers:
+ - timed: ''
+- trigger:
+ name: 'fuel-os-odl_l2-bgpvpn-ha-arm-pod3-2-master-trigger'
+ triggers:
+ - timed: ''
+- trigger:
+ name: 'fuel-os-odl_l2-nofeature-noha-arm-pod3-2-master-trigger'
+ triggers:
+ - timed: ''
+- trigger:
+ name: 'fuel-os-odl_l2-sfc-ha-arm-pod3-2-master-trigger'
+ triggers:
+ - timed: ''
+- trigger:
+ name: 'fuel-os-odl_l2-sfc-noha-arm-pod3-2-master-trigger'
+ triggers:
+ - timed: ''
+#--------------------------------------------------------------------------
+# Enea Armband POD 3 Triggers running against danube branch (aarch64 slave)
+#--------------------------------------------------------------------------
+- trigger:
+ name: 'fuel-os-odl_l2-nofeature-ha-arm-pod3-2-danube-trigger'
+ triggers:
+ - timed: ''
+- trigger:
+ name: 'fuel-os-nosdn-nofeature-ha-arm-pod3-2-danube-trigger'
+ triggers:
+ - timed: ''
+- trigger:
+ name: 'fuel-os-odl_l3-nofeature-ha-arm-pod3-2-danube-trigger'
+ triggers:
+ - timed: ''
+- trigger:
+ name: 'fuel-os-odl_l2-bgpvpn-ha-arm-pod3-2-danube-trigger'
+ triggers:
+ - timed: ''
+- trigger:
+ name: 'fuel-os-odl_l2-nofeature-noha-arm-pod3-2-danube-trigger'
+ triggers:
+ - timed: ''
+- trigger:
+ name: 'fuel-os-odl_l2-sfc-ha-arm-pod3-2-danube-trigger'
+ triggers:
+ - timed: ''
+- trigger:
+ name: 'fuel-os-odl_l2-sfc-noha-arm-pod3-2-danube-trigger'
+ triggers:
+ - timed: ''
# set deployment parameters
export TMPDIR=${WORKSPACE}/tmpdir
+
+# arm-pod3-2 is an aarch64 jenkins slave for the same POD as the
+# x86 jenkins slave arm-pod3; therefore we use the same pod name
+# to deploy the pod from both jenkins slaves
+if [[ "${NODE_NAME}" == "arm-pod3-2" ]]; then
+ NODE_NAME="arm-pod3"
+fi
+
LAB_NAME=${NODE_NAME/-*}
POD_NAME=${NODE_NAME/*-}
- danube:
branch: 'stable/{stream}'
gs-pathname: '/{stream}'
- disabled: true
+ disabled: false
- job-template:
name: 'armband-{installer}-build-daily-{stream}'
pattern: 'ci/**'
- compare-type: ANT
pattern: 'patches/**'
+ disable-strict-forbidden-file-verification: 'true'
forbidden-file-paths:
- compare-type: ANT
pattern: 'docs/**'
branches:
- branch-compare-type: 'ANT'
branch-pattern: '**/{branch}'
+ disable-strict-forbidden-file-verification: 'true'
forbidden-file-paths:
- compare-type: ANT
pattern: 'docs/**|.gitignore'
- danube:
branch: 'stable/{stream}'
gs-pathname: '/{stream}'
- disabled: true
+ disabled: false
- job-template:
name: 'barometer-verify-{stream}'
branches:
- branch-compare-type: 'ANT'
branch-pattern: '**/{branch}'
+ disable-strict-forbidden-file-verification: 'true'
forbidden-file-paths:
- compare-type: ANT
pattern: 'docs/**|.gitignore'
branches:
- branch-compare-type: 'ANT'
branch-pattern: '**/{branch}'
+ disable-strict-forbidden-file-verification: 'true'
forbidden-file-paths:
- compare-type: ANT
pattern: 'docs/**'
suite:
- 'rubbos'
- 'vstf'
- - 'posca'
+ - 'posca_stress_traffic'
+ - 'posca_stress_ping'
jobs:
- 'bottlenecks-{installer}-{suite}-{pod}-daily-{stream}'
- builder:
name: bottlenecks-env-cleanup
builders:
- - shell: |
- #!/bin/bash
- set -e
- [[ $GERRIT_REFSPEC_DEBUG == true ]] && redirect="/dev/stdout" || redirect="/dev/null"
-
- echo "Bottlenecks: docker containers/images cleaning up"
- if [[ ! -z $(docker ps -a | grep opnfv/bottlenecks) ]]; then
- echo "removing existing opnfv/bottlenecks containers"
- docker ps -a | grep opnfv/bottlenecks | awk '{print $1}' | xargs docker rm -f >$redirect
- fi
-
- if [[ ! -z $(docker images | grep opnfv/bottlenecks) ]]; then
- echo "Bottlenecks: docker images to remove:"
- docker images | head -1 && docker images | grep opnfv/bottlenecks
- image_tags=($(docker images | grep opnfv/bottlenecks | awk '{print $2}'))
- for tag in "${image_tags[@]}"; do
- echo "Removing docker image opnfv/bottlenecks:$tag..."
- docker rmi opnfv/bottlenecks:$tag >$redirect
- done
- fi
+ - shell:
+ !include-raw: ./bottlenecks-cleanup.sh
- builder:
name: bottlenecks-run-suite
builders:
- - shell: |
- #!/bin/bash
- set -e
- [[ $GERRIT_REFSPEC_DEBUG == true ]] && redirect="/dev/stdout" || redirect="/dev/null"
-
- echo "Bottlenecks: to pull image opnfv/bottlenecks:${DOCKER_TAG}"
- docker pull opnfv/bottlenecks:$DOCKER_TAG >${redirect}
-
- echo "Bottlenecks: docker start running"
- opts="--privileged=true -id"
- envs="-e INSTALLER_TYPE=${INSTALLER_TYPE} -e INSTALLER_IP=${INSTALLER_IP} \
- -e NODE_NAME=${NODE_NAME} -e EXTERNAL_NET=${EXTERNAL_NETWORK} \
- -e BOTTLENECKS_BRANCH=${BOTTLENECKS_BRANCH} -e GERRIT_REFSPEC_DEBUG=${GERRIT_REFSPEC_DEBUG} \
- -e BOTTLENECKS_DB_TARGET=${BOTTLENECKS_DB_TARGET} -e PACKAGE_URL=${PACKAGE_URL}"
- cmd="sudo docker run ${opts} ${envs} opnfv/bottlenecks:${DOCKER_TAG} /bin/bash"
- echo "Bottlenecks: docker cmd running ${cmd}"
- ${cmd} >${redirect}
-
- echo "Bottlenecks: obtain docker id"
- container_id=$(docker ps | grep "opnfv/bottlenecks:${DOCKER_TAG}" | awk '{print $1}' | head -1)
- if [ -z ${container_id} ]; then
- echo "Cannot find opnfv/bottlenecks container ID ${container_id}. Please check if it exists."
- docker ps -a
- exit 1
- fi
-
- echo "Bottlenecks: to prepare openstack environment"
- prepare_env="${REPO_DIR}/ci/prepare_env.sh"
- echo "Bottlenecks: docker cmd running: ${prepare_env}"
- sudo docker exec ${container_id} ${prepare_env}
-
- echo "Bottlenecks: to run testsuite ${SUITE_NAME}"
- run_testsuite="${REPO_DIR}/run_tests.sh -s ${SUITE_NAME}"
- echo "Bottlenecks: docker cmd running: ${run_testsuite}"
- sudo docker exec ${container_id} ${run_testsuite}
+ - shell:
+ !include-raw: ./bottlenecks-run-suite.sh
####################
# parameter macros
--- /dev/null
+#!/bin/bash
+set -e
+[[ $GERRIT_REFSPEC_DEBUG == true ]] && redirect="/dev/stdout" || redirect="/dev/null"
+
+BOTTLENECKS_IMAGE=opnfv/bottlenecks
+echo "Bottlenecks: docker containers/images cleaning up"
+
+dangling_images=($(docker images -f "dangling=true" | grep $BOTTLENECKS_IMAGE | awk '{print $3}'))
+if [[ -n $dangling_images ]]; then
+ echo "Removing $BOTTLENECKS_IMAGE:<none> dangling images and their containers"
+ docker images | head -1 && docker images | grep $dangling_images
+ for image_id in "${dangling_images[@]}"; do
+ echo "Bottlenecks: Removing dangling image $image_id"
+ docker rmi -f $image_id >${redirect}
+ done
+fi
+
+for image_id in "${dangling_images[@]}"; do
+ if [[ -n $(docker ps -a | grep $image_id) ]]; then
+ echo "Bottlenecks: Removing containers associated with dangling image: $image_id"
+ docker ps -a | head -1 && docker ps -a | grep $image_id
+ docker ps -a | grep $image_id | awk '{print $1}'| xargs docker rm -f >${redirect}
+ fi
+done
+
+if [[ -n $(docker ps -a | grep $BOTTLENECKS_IMAGE) ]]; then
+ echo "Removing existing $BOTTLENECKS_IMAGE containers"
+ docker ps -a | grep $BOTTLENECKS_IMAGE | awk '{print $1}' | xargs docker rm -f >$redirect
+fi
+
+if [[ -n $(docker images | grep $BOTTLENECKS_IMAGE) ]]; then
+ echo "Bottlenecks: docker images to remove:"
+ docker images | head -1 && docker images | grep $BOTTLENECKS_IMAGE
+ image_tags=($(docker images | grep $BOTTLENECKS_IMAGE | awk '{print $2}'))
+ for tag in "${image_tags[@]}"; do
+ echo "Removing docker image $BOTTLENECKS_IMAGE:$tag..."
+ docker rmi $BOTTLENECKS_IMAGE:$tag >$redirect
+ done
+fi
+
+echo "Yardstick: docker containers/images cleaning up"
+YARDSTICK_IMAGE=opnfv/yardstick
+
+dangling_images=($(docker images -f "dangling=true" | grep $YARDSTICK_IMAGE | awk '{print $3}'))
+if [[ -n $dangling_images ]]; then
+ echo "Removing $YARDSTICK_IMAGE:<none> dangling images and their containers"
+ docker images | head -1 && docker images | grep $dangling_images
+ for image_id in "${dangling_images[@]}"; do
+ echo "Yardstick: Removing dangling image $image_id"
+ docker rmi -f $image_id >${redirect}
+ done
+fi
+
+for image_id in "${dangling_images[@]}"; do
+ if [[ -n $(docker ps -a | grep $image_id) ]]; then
+ echo "Yardstick: Removing containers associated with dangling image: $image_id"
+ docker ps -a | head -1 && docker ps -a | grep $image_id
+ docker ps -a | grep $image_id | awk '{print $1}'| xargs docker rm -f >${redirect}
+ fi
+done
+
+if [[ -n $(docker ps -a | grep $YARDSTICK_IMAGE) ]]; then
+ echo "Removing existing $YARDSTICK_IMAGE containers"
+ docker ps -a | grep $YARDSTICK_IMAGE | awk '{print $1}' | xargs docker rm -f >$redirect
+fi
+
+if [[ -n $(docker images | grep $YARDSTICK_IMAGE) ]]; then
+ echo "Yardstick: docker images to remove:"
+ docker images | head -1 && docker images | grep $YARDSTICK_IMAGE
+ image_tags=($(docker images | grep $YARDSTICK_IMAGE | awk '{print $2}'))
+ for tag in "${image_tags[@]}"; do
+ echo "Removing docker image $YARDSTICK_IMAGE:$tag..."
+ docker rmi $YARDSTICK_IMAGE:$tag >$redirect
+ done
+fi
+
+echo "InfluxDB: docker containers/images cleaning up"
+INFLUXDB_IMAGE=tutum/influxdb
+
+dangling_images=($(docker images -f "dangling=true" | grep $INFLUXDB_IMAGE | awk '{print $3}'))
+if [[ -n $dangling_images ]]; then
+ echo "Removing $INFLUXDB_IMAGE:<none> dangling images and their containers"
+ docker images | head -1 && docker images | grep $dangling_images
+ for image_id in "${dangling_images[@]}"; do
+ echo "InfluxDB: Removing dangling image $image_id"
+ docker rmi -f $image_id >${redirect}
+ done
+fi
+
+for image_id in "${dangling_images[@]}"; do
+ if [[ -n $(docker ps -a | grep $image_id) ]]; then
+ echo "InfluxDB: Removing containers associated with dangling image: $image_id"
+ docker ps -a | head -1 && docker ps -a | grep $image_id
+ docker ps -a | grep $image_id | awk '{print $1}'| xargs docker rm -f >${redirect}
+ fi
+done
+
+if [[ -n $(docker ps -a | grep $INFLUXDB_IMAGE) ]]; then
+ echo "Removing existing $INFLUXDB_IMAGE containers"
+ docker ps -a | grep $INFLUXDB_IMAGE | awk '{print $1}' | xargs docker rm -f >$redirect
+fi
+
+if [[ -n $(docker images | grep $INFLUXDB_IMAGE) ]]; then
+ echo "InfluxDB: docker images to remove:"
+ docker images | head -1 && docker images | grep $INFLUXDB_IMAGE
+ image_tags=($(docker images | grep $INFLUXDB_IMAGE | awk '{print $2}'))
+ for tag in "${image_tags[@]}"; do
+ echo "Removing docker image $INFLUXDB_IMAGE:$tag..."
+ docker rmi $INFLUXDB_IMAGE:$tag >$redirect
+ done
+fi
\ No newline at end of file
suite:
- 'rubbos'
- 'vstf'
- - 'posca'
+ - 'posca_stress_traffic'
+ - 'posca_stress_ping'
################################
# job templates
--- /dev/null
+#!/bin/bash
+#set -e
+[[ $GERRIT_REFSPEC_DEBUG == true ]] && redirect="/dev/stdout" || redirect="/dev/null"
+BOTTLENECKS_IMAGE=opnfv/bottlenecks
+
+if [[ $SUITE_NAME == rubbos || $SUITE_NAME == vstf ]]; then
+ echo "Bottlenecks: to pull image $BOTTLENECKS_IMAGE:${DOCKER_TAG}"
+ docker pull $BOTTLENECKS_IMAGE:$DOCKER_TAG >${redirect}
+
+ echo "Bottlenecks: docker start running"
+ opts="--privileged=true -id"
+ envs="-e INSTALLER_TYPE=${INSTALLER_TYPE} -e INSTALLER_IP=${INSTALLER_IP} \
+ -e NODE_NAME=${NODE_NAME} -e EXTERNAL_NET=${EXTERNAL_NETWORK} \
+ -e BOTTLENECKS_BRANCH=${BOTTLENECKS_BRANCH} -e GERRIT_REFSPEC_DEBUG=${GERRIT_REFSPEC_DEBUG} \
+ -e BOTTLENECKS_DB_TARGET=${BOTTLENECKS_DB_TARGET} -e PACKAGE_URL=${PACKAGE_URL}"
+ cmd="sudo docker run ${opts} ${envs} $BOTTLENECKS_IMAGE:${DOCKER_TAG} /bin/bash"
+ echo "Bottlenecks: docker cmd running ${cmd}"
+ ${cmd} >${redirect}
+
+ echo "Bottlenecks: obtain docker id"
+ container_id=$(docker ps | grep "$BOTTLENECKS_IMAGE:${DOCKER_TAG}" | awk '{print $1}' | head -1)
+ if [ -z ${container_id} ]; then
+ echo "Cannot find $BOTTLENECKS_IMAGE container ID ${container_id}. Please check if it exists."
+ docker ps -a
+ exit 1
+ fi
+
+ echo "Bottlenecks: to prepare openstack environment"
+ prepare_env="${REPO_DIR}/ci/prepare_env.sh"
+ echo "Bottlenecks: docker cmd running: ${prepare_env}"
+ sudo docker exec ${container_id} ${prepare_env}
+
+ echo "Bottlenecks: to run testsuite ${SUITE_NAME}"
+ run_testsuite="${REPO_DIR}/run_tests.sh -s ${SUITE_NAME}"
+ echo "Bottlenecks: docker cmd running: ${run_testsuite}"
+ sudo docker exec ${container_id} ${run_testsuite}
+else
+ echo "Bottlenecks: installing POSCA docker-compose"
+ if [ -d usr/local/bin/docker-compose ]; then
+ rm -rf usr/local/bin/docker-compose
+ fi
+ curl -L https://github.com/docker/compose/releases/download/1.11.0/docker-compose-`uname -s`-`uname -m` > /usr/local/bin/docker-compose
+ chmod +x /usr/local/bin/docker-compose
+
+ echo "Bottlenecks: composing up dockers"
+ cd $WORKSPACE
+ docker-compose -f $WORKSPACE/docker/bottleneck-compose/docker-compose.yml up -d
+
+ echo "Bottlenecks: running traffic stress/factor testing in posca testsuite "
+ POSCA_SCRIPT=/home/opnfv/bottlenecks/testsuites/posca
+ if [[ $SUITE_NAME == posca_stress_traffic ]]; then
+ TEST_CASE=posca_factor_system_bandwidth
+ echo "Bottlenecks: pulling tutum/influxdb for yardstick"
+ docker pull tutum/influxdb:0.13
+ sleep 5
+ docker exec bottleneckcompose_bottlenecks_1 python ${POSCA_SCRIPT}/run_posca.py testcase $TEST_CASE
+ elif [[ $SUITE_NAME == posca_stress_ping ]]; then
+ TEST_CASE=posca_stress_ping
+ sleep 5
+ docker exec bottleneckcompose_bottlenecks_1 python ${POSCA_SCRIPT}/run_posca.py testcase $TEST_CASE
+ fi
+
+ echo "Bottlenecks: cleaning up docker-compose images and dockers"
+ docker-compose -f $WORKSPACE/docker/bottleneck-compose/docker-compose.yml down --rmi all
+fi
\ No newline at end of file
stream: danube
branch: 'stable/{stream}'
gs-pathname: '/{stream}'
- disabled: true
+ disabled: false
#--------------------------------
# POD, INSTALLER, AND BRANCH MAPPING
#--------------------------------
<<: *master
- baremetal:
slave-label: compass-baremetal
- os-version: 'trusty'
+ os-version: 'xenial'
<<: *danube
- virtual:
slave-label: compass-virtual
- os-version: 'trusty'
+ os-version: 'xenial'
<<: *danube
#--------------------------------
# master
- 'os-nosdn-kvm-ha':
disabled: false
auto-trigger-name: 'compass-{scenario}-{pod}-{stream}-trigger'
+ - 'os-nosdn-openo-ha':
+ disabled: false
+ auto-trigger-name: 'compass-{scenario}-{pod}-{stream}-trigger'
+
jobs:
- 'compass-{scenario}-{pod}-daily-{stream}'
name: 'compass-os-nosdn-nofeature-ha-baremetal-centos-master-trigger'
triggers:
- timed: '0 19 * * *'
+- trigger:
+ name: 'compass-os-nosdn-openo-ha-baremetal-centos-master-trigger'
+ triggers:
+ - timed: ''
- trigger:
name: 'compass-os-odl_l2-nofeature-ha-baremetal-centos-master-trigger'
triggers:
name: 'compass-os-nosdn-nofeature-ha-baremetal-master-trigger'
triggers:
- timed: '0 2 * * *'
+- trigger:
+ name: 'compass-os-nosdn-openo-ha-baremetal-master-trigger'
+ triggers:
+ - timed: '0 3 * * *'
- trigger:
name: 'compass-os-odl_l2-nofeature-ha-baremetal-master-trigger'
triggers:
- trigger:
name: 'compass-os-nosdn-nofeature-ha-baremetal-danube-trigger'
triggers:
- - timed: ''
+ - timed: '0 9 * * *'
+- trigger:
+ name: 'compass-os-nosdn-openo-ha-baremetal-danube-trigger'
+ triggers:
+ - timed: '0 13 * * *'
- trigger:
name: 'compass-os-odl_l2-nofeature-ha-baremetal-danube-trigger'
triggers:
- - timed: ''
+ - timed: '0 17 * * *'
- trigger:
name: 'compass-os-odl_l3-nofeature-ha-baremetal-danube-trigger'
triggers:
- - timed: ''
+ - timed: '0 21 * * *'
- trigger:
name: 'compass-os-onos-nofeature-ha-baremetal-danube-trigger'
triggers:
- - timed: ''
+ - timed: '0 1 * * *'
- trigger:
name: 'compass-os-ocl-nofeature-ha-baremetal-danube-trigger'
triggers:
- - timed: ''
+ - timed: '0 5 * * *'
- trigger:
name: 'compass-os-onos-sfc-ha-baremetal-danube-trigger'
triggers:
name: 'compass-os-nosdn-nofeature-ha-virtual-master-trigger'
triggers:
- timed: '0 21 * * *'
+- trigger:
+ name: 'compass-os-nosdn-openo-ha-virtual-master-trigger'
+ triggers:
+ - timed: '0 22 * * *'
- trigger:
name: 'compass-os-odl_l2-nofeature-ha-virtual-master-trigger'
triggers:
name: 'compass-os-nosdn-nofeature-ha-virtual-danube-trigger'
triggers:
- timed: '0 21 * * *'
+- trigger:
+ name: 'compass-os-nosdn-openo-ha-virtual-danube-trigger'
+ triggers:
+ - timed: '0 22 * * *'
- trigger:
name: 'compass-os-odl_l2-nofeature-ha-virtual-danube-trigger'
triggers:
export NETWORK_CONF_FILE=network_ocl.yml
elif [[ "${DEPLOY_SCENARIO}" =~ "-onos" ]]; then
export NETWORK_CONF_FILE=network_onos.yml
+elif [[ "${DEPLOY_SCENARIO}" =~ "-openo" ]]; then
+ export NETWORK_CONF_FILE=network_openo.yml
else
export NETWORK_CONF_FILE=network.yml
fi
- danube:
branch: 'stable/{stream}'
gs-pathname: '/{stream}'
- disabled: true
+ disabled: false
jobs:
- 'compass-build-iso-{stream}'
#####################################
jobs:
- 'compass-verify-{distro}-{stream}'
+ - 'compass-verify-k8-{distro}-{stream}'
- 'compass-verify-{phase}-{distro}-{stream}'
#####################################
# job templates
file-paths:
- compare-type: ANT
pattern: '**/*'
+ disable-strict-forbidden-file-verification: 'true'
forbidden-file-paths:
- compare-type: ANT
pattern: 'docs/**'
node-parameters: true
kill-phase-on: FAILURE
abort-all-job: true
+ - name: 'opnfv-yamllint-verify-{stream}'
+ current-parameters: true
+ node-parameters: true
+ kill-phase-on: FAILURE
+ abort-all-job: true
- multijob:
name: deploy-virtual
condition: SUCCESSFUL
kill-phase-on: NEVER
abort-all-job: true
+- job-template:
+ name: 'compass-verify-k8-{distro}-{stream}'
+
+ project-type: multijob
+
+ disabled: '{obj:disabled}'
+
+ concurrent: true
+
+ properties:
+ - logrotate-default
+ - throttle:
+ enabled: true
+ max-total: 4
+ max-per-node: 1
+ option: 'project'
+ - build-blocker:
+ use-build-blocker: true
+ blocking-jobs:
+ - 'compass-verify-[^-]*-[^-]*'
+ - 'compass-os-.*?-virtual-daily-.*?'
+ block-level: 'NODE'
+
+ scm:
+ - git-scm-gerrit
+
+ wrappers:
+ - ssh-agent-wrapper
+ - timeout:
+ timeout: 120
+ fail: true
+
+ triggers:
+ - gerrit:
+ server-name: 'gerrit.opnfv.org'
+ trigger-on:
+ - comment-added-contains-event:
+ comment-contains-value: 'check k8'
+ - comment-added-contains-event:
+ comment-contains-value: 'verify k8'
+ - comment-added-contains-event:
+ comment-contains-value: 'check kubernetes'
+ - comment-added-contains-event:
+ comment-contains-value: 'verify kubernetes'
+ projects:
+ - project-compare-type: 'ANT'
+ project-pattern: '{project}'
+ branches:
+ - branch-compare-type: 'ANT'
+ branch-pattern: '**/{branch}'
+ file-paths:
+ - compare-type: ANT
+ pattern: '**/*'
+ forbidden-file-paths:
+ - compare-type: ANT
+ pattern: 'docs/**'
+ readable-message: true
+
+ parameters:
+ - project-parameter:
+ project: '{project}'
+ branch: '{branch}'
+ - 'compass-virtual-defaults'
+ - '{installer}-defaults'
+ - 'compass-verify-defaults':
+ installer: '{installer}'
+ gs-pathname: '{gs-pathname}'
+ - string:
+ name: DEPLOY_SCENARIO
+ default: 'k8-nosdn-nofeature-ha'
+
+ builders:
+ - description-setter:
+ description: "Built on $NODE_NAME"
+ - multijob:
+ name: basic
+ condition: SUCCESSFUL
+ projects:
+ - name: 'opnfv-lint-verify-{stream}'
+ current-parameters: true
+ node-parameters: true
+ kill-phase-on: FAILURE
+ abort-all-job: true
+ - name: 'opnfv-yamllint-verify-{stream}'
+ current-parameters: true
+ node-parameters: true
+ kill-phase-on: FAILURE
+ abort-all-job: true
+ - multijob:
+ name: deploy-virtual
+ condition: SUCCESSFUL
+ projects:
+ - name: 'compass-verify-deploy-virtual-{distro}-{stream}'
+ current-parameters: true
+ predefined-parameters: |
+ COMPASS_OS_VERSION={os-version}
+ node-parameters: true
+ kill-phase-on: FAILURE
+ abort-all-job: true
+
- job-template:
name: 'compass-verify-{phase}-{distro}-{stream}'
branches:
- branch-compare-type: 'ANT'
branch-pattern: '**/{branch}'
+ disable-strict-forbidden-file-verification: 'true'
forbidden-file-paths:
- compare-type: ANT
pattern: 'docs/**|.gitignore'
branches:
- branch-compare-type: 'ANT'
branch-pattern: '**/{branch}'
+ disable-strict-forbidden-file-verification: 'true'
forbidden-file-paths:
- compare-type: ANT
pattern: 'docs/**|.gitignore'
set -o nounset
set -o pipefail
- cd $WORKSPACE/ci
- shellcheck -f tty tests/*.sh
+ # shellcheck -f tty tests/*.sh
builders:
- shell: |
#!/bin/bash
- set +e
- # TODO: need to figure out the logic to get ${CONTROLLER_IP} used below
+ set -o errexit
+ set -o nounset
+ set -o pipefail
+ undercloud_mac=$(sudo virsh domiflist undercloud | grep default | \
+ grep -Eo "[0-9a-f]+:[0-9a-f]+:[0-9a-f]+:[0-9a-f]+:[0-9a-f]+:[0-9a-f]+")
+ INSTALLER_IP=$(/usr/sbin/arp -e | grep ${undercloud_mac} | awk {'print $1'})
+ sudo scp $INSTALLER_IP:/home/stack/stackrc /tmp/stackrc
+ source /tmp/stackrc
+
+ # robot suites need the ssh key to log in to controller nodes, so throwing it
+ # in tmp, and mounting /tmp as $HOME as far as robot is concerned
+ sudo mkdir -p /tmp/.ssh
+ sudo scp $INSTALLER_IP:/home/stack/.ssh/id_rsa /tmp/.ssh/
+ sudo chmod -R 0600 /tmp/.ssh
+
+ # cbench requires the openflow drop test feature to be installed.
+ sshpass -p karaf ssh -o StrictHostKeyChecking=no \
+ -o UserKnownHostsFile=/dev/null \
+ -o LogLevel=error \
+ -p 8101 karaf@$SDN_CONTROLLER_IP \
+ feature:install odl-openflowplugin-flow-services-ui odl-openflowplugin-drop-test
+
docker pull opnfv/cperf:$DOCKER_TAG
- robot_cmd="pybot -e exclude -v ODL_SYSTEM_IP:${CONTROLLER_IP} -v switch_count:100 -v loops:10 \
- -v TOOLS_SYSTEM_IP:localhost -v duration_in_seconds:60"
+
+ robot_cmd="pybot -e exclude -L TRACE \
+ -v ODL_SYSTEM_1_IP:${SDN_CONTROLLER_IP} \
+ -v ODL_SYSTEM_IP:${SDN_CONTROLLER_IP} \
+ -v BUNDLEFOLDER:/opt/opendaylight \
+ -v RESTCONFPORT:8081 \
+ -v USER_HOME:/tmp \
+ -v USER:heat-admin \
+ -v ODL_SYSTEM_USER:heat-admin \
+ -v TOOLS_SYSTEM_IP:localhost \
+ -v of_port:6653"
robot_suite="/home/opnfv/repos/odl_test/csit/suites/openflowplugin/Performance/010_Cbench.robot"
- docker run opnfv/cperf:$DOCKER_TAG ${robot_cmd} ${robot_suite}
+
+ docker run -v /tmp:/tmp opnfv/cperf:$DOCKER_TAG ${robot_cmd} ${robot_suite}
- builder:
name: cperf-cleanup
--- /dev/null
+# jenkins job templates for Daisy
+# TODO
+# [ ] enable baremetal jobs after baremetal deployment finish
+# [ ] enable jobs in danuble
+# [ ] add more scenarios
+# [ ] integration with yardstick
+
+- project:
+
+ name: 'daisy'
+ project: '{name}'
+ installer: '{name}'
+
+#--------------------------------
+# BRANCH ANCHORS
+#--------------------------------
+ master: &master
+ stream: master
+ branch: '{stream}'
+ disabled: false
+ gs-pathname: ''
+#--------------------------------
+# POD, INSTALLER, AND BRANCH MAPPING
+#--------------------------------
+# CI PODs
+#--------------------------------
+ pod:
+ - baremetal:
+ slave-label: daisy-baremetal
+ <<: *master
+ - virtual:
+ slave-label: daisy-virtual
+ <<: *master
+#--------------------------------
+# None-CI PODs
+#--------------------------------
+
+#--------------------------------
+# scenarios
+#--------------------------------
+ scenario:
+ # HA scenarios
+ - 'os-nosdn-nofeature-ha':
+ auto-trigger-name: 'daisy-{scenario}-{pod}-daily-{stream}-trigger'
+ # NOHA scenarios
+ - 'os-nosdn-nofeature-noha':
+ auto-trigger-name: 'daisy-{scenario}-{pod}-daily-{stream}-trigger'
+
+ jobs:
+ - '{project}-{scenario}-{pod}-daily-{stream}'
+ - '{project}-deploy-{pod}-daily-{stream}'
+
+########################
+# job templates
+########################
+- job-template:
+ name: '{project}-{scenario}-{pod}-daily-{stream}'
+
+ disabled: '{obj:disabled}'
+
+ concurrent: false
+
+ properties:
+ - logrotate-default
+ - throttle:
+ enabled: true
+ max-total: 4
+ max-per-node: 1
+ option: 'project'
+ - build-blocker:
+ use-build-blocker: true
+ blocking-jobs:
+ - 'daisy.*-deploy-({pod})?-daily-.*'
+ block-level: 'NODE'
+
+ wrappers:
+ - build-name:
+ name: '$BUILD_NUMBER - Scenario: $DEPLOY_SCENARIO'
+
+ triggers:
+ - '{auto-trigger-name}'
+
+ parameters:
+ - project-parameter:
+ project: '{project}'
+ branch: '{branch}'
+ - '{installer}-defaults'
+ - '{slave-label}-defaults':
+ installer: '{installer}'
+ - string:
+ name: DEPLOY_SCENARIO
+ default: '{scenario}'
+ - 'daisy-project-parameter':
+ gs-pathname: '{gs-pathname}'
+
+ builders:
+ - description-setter:
+ description: "POD: $NODE_NAME"
+ - trigger-builds:
+ - project: 'daisy-deploy-{pod}-daily-{stream}'
+ current-parameters: false
+ predefined-parameters:
+ DEPLOY_SCENARIO={scenario}
+ same-node: true
+ block: true
+ - trigger-builds:
+ - project: 'functest-daisy-{pod}-daily-{stream}'
+ current-parameters: false
+ predefined-parameters:
+ DEPLOY_SCENARIO={scenario}
+ same-node: true
+ block: true
+ block-thresholds:
+ build-step-failure-threshold: 'never'
+ failure-threshold: 'never'
+ unstable-threshold: 'FAILURE'
+
+- job-template:
+ name: '{project}-deploy-{pod}-daily-{stream}'
+
+ disabled: '{obj:disabled}'
+
+ concurrent: true
+
+ properties:
+ - logrotate-default
+ - throttle:
+ enabled: true
+ max-total: 4
+ max-per-node: 1
+ option: 'project'
+ - build-blocker:
+ use-build-blocker: true
+ blocking-jobs:
+ - 'daisy.*-deploy-({pod})?-daily-.*'
+ block-level: 'NODE'
+
+ parameters:
+ - project-parameter:
+ project: '{project}'
+ branch: '{branch}'
+ - '{installer}-defaults'
+ - '{slave-label}-defaults':
+ installer: '{installer}'
+ - string:
+ name: DEPLOY_SCENARIO
+ default: 'os-nosdn-nofeature-ha'
+ - 'daisy-project-parameter':
+ gs-pathname: '{gs-pathname}'
+ - string:
+ name: DEPLOY_TIMEOUT
+ default: '150'
+ description: 'Deployment timeout in minutes'
+
+ scm:
+ - git-scm
+
+ wrappers:
+ - build-name:
+ name: '$BUILD_NUMBER - Scenario: $DEPLOY_SCENARIO'
+
+ builders:
+ - description-setter:
+ description: "POD: $NODE_NAME"
+ - shell:
+ !include-raw-escape: ./daisy4nfv-download-artifact.sh
+ - shell:
+ !include-raw-escape: ./daisy-deploy.sh
+
+
+########################
+# trigger macros
+########################
+#-----------------------------------------------
+# Triggers for job running on daisy-baremetal against master branch
+#-----------------------------------------------
+# HA Scenarios
+- trigger:
+ name: 'daisy-os-nosdn-nofeature-ha-baremetal-daily-master-trigger'
+ triggers:
+ - timed: ''
+# NOHA Scenarios
+- trigger:
+ name: 'daisy-os-nosdn-nofeature-noha-baremetal-daily-master-trigger'
+ triggers:
+ - timed: ''
+#-----------------------------------------------
+# Triggers for job running on daisy-virtual against master branch
+#-----------------------------------------------
+- trigger:
+ name: 'daisy-os-nosdn-nofeature-ha-virtual-daily-master-trigger'
+ triggers:
+ - timed: ''
+# NOHA Scenarios
+- trigger:
+ name: 'daisy-os-nosdn-nofeature-noha-virtual-daily-master-trigger'
+ triggers:
+ - timed: 'H 8,22 * * *'
+
--- /dev/null
+#!/bin/bash
+set -o nounset
+set -o pipefail
+
+echo "--------------------------------------------------------"
+echo "This is $INSTALLER_TYPE deploy job!"
+echo "--------------------------------------------------------"
+
+DEPLOY_SCENARIO=${DEPLOY_SCENARIO:-"os-nosdn-nofeature-ha"}
+BRIDGE=${BRIDGE:-pxebr}
+LAB_NAME=${NODE_NAME/-*}
+POD_NAME=${NODE_NAME/*-}
+deploy_ret=0
+
+if [[ ! "$NODE_NAME" =~ "-virtual" ]] && [[ ! "$LAB_NAME" =~ (zte) ]]; then
+ echo "Unsupported lab $LAB_NAME for now, Cannot continue!"
+ exit $deploy_ret
+fi
+
+# clone the securedlab repo
+cd $WORKSPACE
+BASE_DIR=$(cd ./;pwd)
+
+echo "Cloning securedlab repo $BRANCH"
+git clone ssh://jenkins-zte@gerrit.opnfv.org:29418/securedlab --quiet \
+ --branch $BRANCH
+
+# daisy ci/deploy/deploy.sh use $BASE_DIR/labs dir
+cp -r securedlab/labs .
+
+DEPLOY_COMMAND="sudo ./ci/deploy/deploy.sh -b $BASE_DIR \
+ -l $LAB_NAME -p $POD_NAME -B $BRIDGE"
+
+# log info to console
+echo """
+Deployment parameters
+--------------------------------------------------------
+Scenario: $DEPLOY_SCENARIO
+LAB: $LAB_NAME
+POD: $POD_NAME
+BRIDGE: $BRIDGE
+BASE_DIR: $BASE_DIR
+
+Starting the deployment using $INSTALLER_TYPE. This could take some time...
+--------------------------------------------------------
+Issuing command
+$DEPLOY_COMMAND
+"""
+
+# start the deployment
+$DEPLOY_COMMAND
+
+if [ $? -ne 0 ]; then
+ echo
+ echo "Depolyment failed!"
+ deploy_ret=1
+else
+ echo
+ echo "--------------------------------------------------------"
+ echo "Deployment done!"
+fi
+
+exit $deploy_ret
- danube:
branch: 'stable/{stream}'
gs-pathname: '/{stream}'
- disabled: true
+ disabled: false
phase:
- 'build':
- shell:
!include-raw: ./daisy4nfv-download-artifact.sh
- shell:
- !include-raw: ./daisy4nfv-deploy.sh
+ !include-raw: ./daisy-deploy.sh
- builder:
name: 'daisy-test-daily-macro'
echo "OPNFV_GIT_URL=$(git config --get remote.origin.url)"
echo "OPNFV_GIT_SHA1=$(git rev-parse HEAD)"
echo "OPNFV_ARTIFACT_URL=$GS_URL/opnfv-$OPNFV_ARTIFACT_VERSION.bin"
+ echo "OPNFV_ARTIFACT_SHA512SUM=$(sha512sum $OUTPUT_DIR/opnfv-$OPNFV_ARTIFACT_VERSION.bin | cut -d' ' -f1)"
echo "OPNFV_BUILD_URL=$BUILD_URL"
) > $WORKSPACE/opnfv.properties
+++ /dev/null
-#!/bin/bash
-
-echo "Daisy deployment WIP"
set -o pipefail
# use proxy url to replace the nomral URL, for googleusercontent.com will be blocked randomly
-[[ "$NODE_NAME" =~ (zte) ]] && GS_URL=$GS_BASE_PROXY
+[[ "$NODE_NAME" =~ (zte) ]] && GS_URL=${GS_BASE_PROXY%%/*}/$GS_URL
if [[ "$JOB_NAME" =~ "merge" ]]; then
echo "Downloading http://$GS_URL/opnfv-gerrit-$GERRIT_CHANGE_NUMBER.properties"
[[ "$NODE_NAME" =~ (zte) ]] && OPNFV_ARTIFACT_URL=${GS_BASE_PROXY%%/*}/$OPNFV_ARTIFACT_URL
+if [[ ! "$JOB_NAME" =~ (verify|merge) ]]; then
+ # check if we already have the image to avoid redownload
+ BINSTORE="/bin_mount/opnfv_ci/${BRANCH##*/}"
+ if [[ -f "$BINSTORE/$OPNFV_ARTIFACT" && ! -z $OPNFV_ARTIFACT_SHA512SUM ]]; then
+ echo "BIN exists locally. Starting to check the sha512sum."
+ if [[ $OPNFV_ARTIFACT_SHA512SUM = $(sha512sum -b $BINSTORE/$OPNFV_ARTIFACT | cut -d' ' -f1) ]]; then
+ echo "Sha512sum is verified. Skipping the download and using the file from BIN store."
+ ln -s $BINSTORE/$OPNFV_ARTIFACT $WORKSPACE/opnfv.bin
+ echo "--------------------------------------------------------"
+ echo
+ ls -al $WORKSPACE/opnfv.bin
+ echo
+ echo "--------------------------------------------------------"
+ echo "Done!"
+ exit 0
+ fi
+ fi
+fi
+
# log info to console
echo "Downloading the $INSTALLER_TYPE artifact using URL http://$OPNFV_ARTIFACT_URL"
echo "This could take some time..."
echo
# download the file
-curl -s -o $WORKSPACE/opnfv.bin http://$OPNFV_ARTIFACT_URL > gsutil.bin.log 2>&1
+curl -L -s -o $WORKSPACE/opnfv.bin http://$OPNFV_ARTIFACT_URL > gsutil.bin.log 2>&1
# list the file
ls -al $WORKSPACE/opnfv.bin
- danube:
branch: 'stable/{stream}'
gs-pathname: '/{stream}'
- disabled: true
+ disabled: false
#####################################
# patch merge phases
#####################################
pattern: 'code/**'
- compare-type: ANT
pattern: 'deploy/**'
+ disable-strict-forbidden-file-verification: 'true'
forbidden-file-paths:
- compare-type: ANT
pattern: 'docs/**'
- shell:
!include-raw: ./daisy4nfv-download-artifact.sh
- shell:
- !include-raw: ./daisy4nfv-virtual-deploy.sh
+ !include-raw: ./daisy-deploy.sh
- shell:
!include-raw: ./daisy4nfv-workspace-cleanup.sh
- danube:
branch: 'stable/{stream}'
gs-pathname: '/{stream}'
- disabled: true
+ disabled: false
#####################################
# patch verification phases
#####################################
pattern: 'code/**'
- compare-type: ANT
pattern: 'deploy/**'
+ disable-strict-forbidden-file-verification: 'true'
forbidden-file-paths:
- compare-type: ANT
pattern: 'docs/**'
+++ /dev/null
-#!/bin/bash
-
-echo "--------------------------------------------------------"
-echo "This is diasy4nfv virtual deploy job!"
-echo "--------------------------------------------------------"
-
-cd $WORKSPACE
-
-if [[ "$NODE_NAME" =~ "-virtual" ]]; then
- export NETWORK_CONF=./deploy/config/vm_environment/$NODE_NAME/network.yml
- export DHA_CONF=./deploy/config/vm_environment/$NODE_NAME/deploy.yml
-else
- # TODO: For the time being, we need to pass this script to let contributors merge their work.
- echo "No support for non-virtual node"
- exit 0
-fi
-
-sudo ./ci/deploy/deploy.sh -d ${DHA_CONF} -n ${NETWORK_CONF} -p ${NODE_NAME:-"zte-virtual1"}
-
-if [ $? -ne 0 ]; then
- echo "depolyment failed!"
- deploy_ret=1
-fi
-
-echo
-echo "--------------------------------------------------------"
-echo "Done!"
-
-exit $deploy_ret
- fuel:
slave-label: 'ool-virtual2'
pod: 'ool-virtual2'
- - joid:
- slave-label: 'ool-virtual3'
- pod: 'ool-virtual3'
+ #- joid:
+ # slave-label: 'ool-virtual3'
+ # pod: 'ool-virtual3'
inspector:
- 'sample'
builders:
- 'clean-workspace-log'
+ - shell: |
+ # NOTE: Create symbolic link, so that we can archive file outside
+ # of $WORKSPACE .
+ # NOTE: We are printing all logs under 'tests/' during test run,
+ # so this symbolic link should not be in 'tests/'. Otherwise,
+ # we'll have the same log twice in jenkins console log.
+ ln -sfn $HOME/opnfv/functest/results/{stream} functest_results
- 'functest-suite-builder'
- shell: |
functest_log="$HOME/opnfv/functest/results/{stream}/{project}.log"
- to_be_archived="$WORKSPACE/tests/functest-{project}.log"
- cp $functest_log $to_be_archived
# NOTE: checking the test result, as the previous job could return
# 0 regardless the result of doctor test scenario.
grep -e ' OK$' $functest_log || exit 1
publishers:
- archive:
artifacts: 'tests/*.log'
+ - archive:
+ artifacts: 'functest_results/{project}.log'
#####################################
branches:
- branch-compare-type: 'ANT'
branch-pattern: '**/{branch}'
+ disable-strict-forbidden-file-verification: 'true'
forbidden-file-paths:
- compare-type: ANT
pattern: 'docs/**|.gitignore'
dovetail-branch: '{stream}'
gs-pathname: ''
docker-tag: 'latest'
- colorado: &colorado
- stream: colorado
+ danube: &danube
+ stream: danube
branch: 'stable/{stream}'
dovetail-branch: master
gs-pathname: '/{stream}'
slave-label: fuel-baremetal
SUT: fuel
auto-trigger-name: 'daily-trigger-disabled'
- <<: *colorado
+ <<: *danube
- virtual:
slave-label: fuel-virtual
SUT: fuel
auto-trigger-name: 'daily-trigger-disabled'
- <<: *colorado
+ <<: *danube
#compass CI PODs
- baremetal:
slave-label: compass-baremetal
slave-label: compass-baremetal
SUT: compass
auto-trigger-name: 'daily-trigger-disabled'
- <<: *colorado
+ <<: *danube
- virtual:
slave-label: compass-virtual
SUT: compass
auto-trigger-name: 'daily-trigger-disabled'
- <<: *colorado
-#apex CI PODs
- - apex-verify-master:
- slave-label: '{pod}'
- SUT: apex
- auto-trigger-name: 'daily-trigger-disabled'
- <<: *master
- - apex-daily-master:
+ <<: *danube
+#--------------------------------
+# Installers not using labels
+# CI PODs
+# This section should only contain the installers
+# that have not been switched using labels for slaves
+#--------------------------------
+#apex PODs
+ - lf-pod1:
slave-label: '{pod}'
SUT: apex
auto-trigger-name: 'daily-trigger-disabled'
<<: *master
- - apex-verify-colorado:
- slave-label: '{pod}'
- SUT: apex
- auto-trigger-name: 'daily-trigger-disabled'
- <<: *colorado
- - apex-daily-colorado:
+ - lf-pod1:
slave-label: '{pod}'
SUT: apex
auto-trigger-name: 'daily-trigger-disabled'
- <<: *colorado
+ <<: *danube
#armband CI PODs
- armband-baremetal:
slave-label: armband-baremetal
slave-label: armband-baremetal
SUT: fuel
auto-trigger-name: 'daily-trigger-disabled'
- <<: *colorado
+ <<: *danube
- armband-virtual:
slave-label: armband-virtual
SUT: fuel
auto-trigger-name: 'daily-trigger-disabled'
- <<: *colorado
+ <<: *danube
#--------------------------------
# None-CI PODs
#--------------------------------
branches:
- branch-compare-type: 'ANT'
branch-pattern: '**/{branch}'
+ disable-strict-forbidden-file-verification: 'true'
forbidden-file-paths:
- compare-type: ANT
pattern: 'docs/**|.gitignore'
branches:
- branch-compare-type: 'ANT'
branch-pattern: '**/{branch}'
+ disable-strict-forbidden-file-verification: 'true'
forbidden-file-paths:
- compare-type: ANT
pattern: 'docs/**|.gitignore'
branches:
- branch-compare-type: 'ANT'
branch-pattern: '**/{branch}'
+ disable-strict-forbidden-file-verification: 'true'
forbidden-file-paths:
- compare-type: ANT
pattern: 'docs/**|.gitignore'
+++ /dev/null
-#!/bin/bash
-set -o nounset
-
-echo "-----------------------------------------------------------------------"
-echo $GERRIT_CHANGE_COMMIT_MESSAGE
-echo "-----------------------------------------------------------------------"
-
-# proposal for specifying the scenario name in commit message
-# currently only 1 scenario name is supported but depending on
-# the need, it can be expanded, supporting multiple scenarios
-# using comma separated list or something
-SCENARIO_NAME_PATTERN="(?<=@scenario:).*?(?=@)"
-SCENARIO_NAME=(echo $GERRIT_CHANGE_COMMIT_MESSAGE | grep -oP "$SCENARIO_NAME_PATTERN")
-if [[ $? -ne 0 ]]; then
- echo "The patch verification will be done only with build!"
-else
- echo "Will run full verification; build, deploy, and smoke test using scenario $SCENARIO_NAME"
-fi
+++ /dev/null
-#!/bin/bash
-
-if [[ "$JOB_NAME" =~ (verify|merge|daily|weekly) ]]; then
- JOB_TYPE=${BASH_REMATCH[0]}
-else
- echo "Unable to determine job type!"
- exit 1
-fi
-
-echo "Not activated!"
danube: &danube
stream: danube
branch: 'stable/{stream}'
- disabled: true
+ disabled: false
gs-pathname: '/{stream}'
#--------------------------------
# POD, INSTALLER, AND BRANCH MAPPING
auto-trigger-name: 'fuel-{scenario}-{pod}-daily-{stream}-trigger'
- 'os-nosdn-kvm_ovs_dpdk-noha':
auto-trigger-name: 'fuel-{scenario}-{pod}-daily-{stream}-trigger'
+ - 'os-nosdn-kvm_ovs_dpdk_bar-noha':
+ auto-trigger-name: 'fuel-{scenario}-{pod}-daily-{stream}-trigger'
jobs:
- 'fuel-{scenario}-{pod}-daily-{stream}'
use-build-blocker: true
blocking-jobs:
- 'fuel-os-.*?-{pod}-daily-.*'
+ - 'fuel-os-.*?-{pod}-weekly-.*'
block-level: 'NODE'
wrappers:
builders:
- description-setter:
- description: "POD: $NODE_NAME"
+ description: "Built on $NODE_NAME"
- trigger-builds:
- project: 'fuel-deploy-{pod}-daily-{stream}'
current-parameters: false
blocking-jobs:
- 'fuel-deploy-{pod}-daily-.*'
- 'fuel-deploy-generic-daily-.*'
+ - 'fuel-deploy-{pod}-weekly-.*'
+ - 'fuel-deploy-generic-weekly-.*'
block-level: 'NODE'
parameters:
builders:
- description-setter:
- description: "POD: $NODE_NAME"
+ description: "Built on $NODE_NAME"
- shell:
!include-raw-escape: ./fuel-download-artifact.sh
- shell:
- trigger:
name: 'fuel-os-nosdn-kvm_ovs_dpdk-noha-baremetal-daily-master-trigger'
triggers:
- - timed: '30 16 * * *'
+ - timed: ''
+- trigger:
+ name: 'fuel-os-nosdn-kvm_ovs_dpdk_bar-noha-baremetal-daily-master-trigger'
+ triggers:
+ - timed: ''
#-----------------------------------------------
# Triggers for job running on fuel-baremetal against danube branch
#-----------------------------------------------
name: 'fuel-os-nosdn-kvm_ovs_dpdk-noha-baremetal-daily-danube-trigger'
triggers:
- timed: ''
+- trigger:
+ name: 'fuel-os-nosdn-kvm_ovs_dpdk_bar-noha-baremetal-daily-danube-trigger'
+ triggers:
+ - timed: ''
#-----------------------------------------------
# Triggers for job running on fuel-virtual against master branch
#-----------------------------------------------
- trigger:
name: 'fuel-os-nosdn-kvm_ovs_dpdk-noha-virtual-daily-master-trigger'
triggers:
- - timed: ''
+ - timed: '30 16 * * *'
+- trigger:
+ name: 'fuel-os-nosdn-kvm_ovs_dpdk_bar-noha-virtual-daily-master-trigger'
+ triggers:
+ - timed: '30 20 * * *'
#-----------------------------------------------
# Triggers for job running on fuel-virtual against danube branch
#-----------------------------------------------
name: 'fuel-os-nosdn-kvm_ovs_dpdk-noha-virtual-daily-danube-trigger'
triggers:
- timed: ''
+- trigger:
+ name: 'fuel-os-nosdn-kvm_ovs_dpdk_bar-noha-virtual-daily-danube-trigger'
+ triggers:
+ - timed: ''
#-----------------------------------------------
# ZTE POD1 Triggers running against master branch
#-----------------------------------------------
name: 'fuel-os-nosdn-kvm_ovs_dpdk-noha-zte-pod1-daily-master-trigger'
triggers:
- timed: ''
+- trigger:
+ name: 'fuel-os-nosdn-kvm_ovs_dpdk_bar-noha-zte-pod1-daily-master-trigger'
+ triggers:
+ - timed: ''
#-----------------------------------------------
# ZTE POD2 Triggers running against master branch
name: 'fuel-os-nosdn-kvm_ovs_dpdk-noha-zte-pod2-daily-master-trigger'
triggers:
- timed: ''
+- trigger:
+ name: 'fuel-os-nosdn-kvm_ovs_dpdk_bar-noha-zte-pod2-daily-master-trigger'
+ triggers:
+ - timed: ''
#-----------------------------------------------
# ZTE POD3 Triggers running against master branch
#-----------------------------------------------
name: 'fuel-os-nosdn-kvm_ovs_dpdk-noha-zte-pod3-daily-master-trigger'
triggers:
- timed: ''
+- trigger:
+ name: 'fuel-os-nosdn-kvm_ovs_dpdk_bar-noha-zte-pod3-daily-master-trigger'
+ triggers:
+ - timed: ''
#-----------------------------------------------
# ZTE POD1 Triggers running against danube branch
#-----------------------------------------------
name: 'fuel-os-nosdn-kvm_ovs_dpdk-noha-zte-pod1-daily-danube-trigger'
triggers:
- timed: ''
+- trigger:
+ name: 'fuel-os-nosdn-kvm_ovs_dpdk_bar-noha-zte-pod1-daily-danube-trigger'
+ triggers:
+ - timed: ''
#-----------------------------------------------
# ZTE POD2 Triggers running against danube branch
name: 'fuel-os-nosdn-kvm_ovs_dpdk-noha-zte-pod2-daily-danube-trigger'
triggers:
- timed: ''
+- trigger:
+ name: 'fuel-os-nosdn-kvm_ovs_dpdk_bar-noha-zte-pod2-daily-danube-trigger'
+ triggers:
+ - timed: ''
#-----------------------------------------------
# ZTE POD3 Triggers running against danube branch
#-----------------------------------------------
name: 'fuel-os-nosdn-kvm_ovs_dpdk-noha-zte-pod3-daily-danube-trigger'
triggers:
- timed: ''
+- trigger:
+ name: 'fuel-os-nosdn-kvm_ovs_dpdk_bar-noha-zte-pod3-daily-danube-trigger'
+ triggers:
+ - timed: ''
+++ /dev/null
-#!/bin/bash
-
-if [[ "$JOB_NAME" =~ (verify|merge|daily|weekly) ]]; then
- JOB_TYPE=${BASH_REMATCH[0]}
-else
- echo "Unable to determine job type!"
- exit 1
-fi
-
-echo "Not activated!"
# upload logs for baremetal deployments
# work with virtual deployments is still going on so we skip that for the timebeing
-if [[ "$JOB_NAME" =~ "baremetal-daily" ]]; then
+if [[ "$JOB_NAME" =~ (baremetal-daily|baremetal-weekly) ]]; then
echo "Uploading deployment logs"
gsutil cp $WORKSPACE/$FUEL_LOG_FILENAME gs://$GS_URL/logs/$FUEL_LOG_FILENAME > /dev/null 2>&1
echo "Logs are available as http://$GS_URL/logs/$FUEL_LOG_FILENAME"
- danube:
branch: 'stable/{stream}'
gs-pathname: '/{stream}'
- disabled: true
+ disabled: false
jobs:
- 'fuel-build-daily-{stream}'
pattern: 'build/**'
- compare-type: ANT
pattern: 'deploy/**'
+ disable-strict-forbidden-file-verification: 'true'
forbidden-file-paths:
- compare-type: ANT
pattern: 'docs/**'
pattern: 'build/**'
- compare-type: ANT
pattern: 'deploy/**'
+ disable-strict-forbidden-file-verification: 'true'
forbidden-file-paths:
- compare-type: ANT
pattern: 'docs/**'
+++ /dev/null
-#!/bin/bash
-
-if [[ "$JOB_NAME" =~ (verify|merge|daily|weekly) ]]; then
- JOB_TYPE=${BASH_REMATCH[0]}
-else
- echo "Unable to determine job type!"
- exit 1
-fi
-
-echo "Not activated!"
+++ /dev/null
-- project:
- # TODO: rename the project name
- # TODO: get rid of appended -exp from the remainder of the file
- name: 'fuel-verify-jobs-experimental'
-
- project: 'fuel'
-
- installer: 'fuel'
-#------------------------------------
-# branch definitions
-#------------------------------------
- # TODO: enable master once things settle
- stream-exp:
- - experimental:
- branch: 'stable/{stream-exp}'
- gs-pathname: '/{stream-exp}'
- disabled: false
-#------------------------------------
-# patch verification phases
-#------------------------------------
- phase:
- - 'basic':
- # this phase does basic commit message check, unit test and so on
- slave-label: 'opnfv-build'
- - 'build':
- # this phase builds artifacts if valid for given installer
- slave-label: 'opnfv-build-ubuntu'
- - 'deploy-virtual':
- # this phase does virtual deployment using the artifacts produced in previous phase
- slave-label: 'fuel-virtual'
- - 'smoke-test':
- # this phase runs functest smoke test
- slave-label: 'fuel-virtual'
-#------------------------------------
-# jobs
-#------------------------------------
- jobs:
- - 'fuel-verify-{stream-exp}'
- - 'fuel-verify-{phase}-{stream-exp}'
-#------------------------------------
-# job templates
-#------------------------------------
-- job-template:
- name: 'fuel-verify-{stream-exp}'
-
- project-type: multijob
-
- disabled: '{obj:disabled}'
-
- # TODO: this is valid for experimental only
- # enable concurrency for master once things settle
- concurrent: false
-
- properties:
- - logrotate-default
- - throttle:
- enabled: true
- max-total: 4
- option: 'project'
-
- scm:
- - git-scm-gerrit
-
- wrappers:
- - ssh-agent-wrapper
- - timeout:
- timeout: 360
- fail: true
-
- triggers:
- - gerrit:
- server-name: 'gerrit.opnfv.org'
- trigger-on:
- - patchset-created-event:
- exclude-drafts: 'false'
- exclude-trivial-rebase: 'false'
- exclude-no-code-change: 'false'
- - draft-published-event
- - comment-added-contains-event:
- comment-contains-value: 'recheck'
- - comment-added-contains-event:
- comment-contains-value: 'reverify'
- projects:
- - project-compare-type: 'ANT'
- project-pattern: '{project}'
- branches:
- - branch-compare-type: 'ANT'
- branch-pattern: '**/{branch}'
- file-paths:
- - compare-type: ANT
- pattern: 'ci/**'
- - compare-type: ANT
- pattern: 'build/**'
- - compare-type: ANT
- pattern: 'deploy/**'
- forbidden-file-paths:
- - compare-type: ANT
- pattern: 'docs/**'
- readable-message: true
-
- parameters:
- - project-parameter:
- project: '{project}'
- branch: '{branch}'
- - 'opnfv-build-defaults'
- - 'fuel-verify-defaults-exp':
- gs-pathname: '{gs-pathname}'
-
- builders:
- - description-setter:
- description: "Built on $NODE_NAME"
- - multijob:
- name: basic
- condition: SUCCESSFUL
- projects:
- - name: 'fuel-verify-basic-{stream-exp}'
- current-parameters: false
- predefined-parameters: |
- BRANCH=$BRANCH
- GERRIT_REFSPEC=$GERRIT_REFSPEC
- GERRIT_CHANGE_NUMBER=$GERRIT_CHANGE_NUMBER
- GERRIT_CHANGE_COMMIT_MESSAGE=$GERRIT_CHANGE_COMMIT_MESSAGE
- node-parameters: false
- kill-phase-on: FAILURE
- abort-all-job: true
- - multijob:
- name: build
- condition: SUCCESSFUL
- projects:
- - name: 'fuel-verify-build-{stream-exp}'
- current-parameters: false
- predefined-parameters: |
- BRANCH=$BRANCH
- GERRIT_REFSPEC=$GERRIT_REFSPEC
- GERRIT_CHANGE_NUMBER=$GERRIT_CHANGE_NUMBER
- GERRIT_CHANGE_COMMIT_MESSAGE=$GERRIT_CHANGE_COMMIT_MESSAGE
- node-parameters: false
- kill-phase-on: FAILURE
- abort-all-job: true
- - multijob:
- name: deploy-virtual
- condition: SUCCESSFUL
- projects:
- - name: 'fuel-verify-deploy-virtual-{stream-exp}'
- current-parameters: false
- predefined-parameters: |
- BRANCH=$BRANCH
- GERRIT_REFSPEC=$GERRIT_REFSPEC
- GERRIT_CHANGE_NUMBER=$GERRIT_CHANGE_NUMBER
- GERRIT_CHANGE_COMMIT_MESSAGE=$GERRIT_CHANGE_COMMIT_MESSAGE
- node-parameters: false
- kill-phase-on: FAILURE
- abort-all-job: true
- - multijob:
- name: smoke-test
- condition: SUCCESSFUL
- projects:
- - name: 'fuel-verify-smoke-test-{stream-exp}'
- current-parameters: false
- predefined-parameters: |
- BRANCH=$BRANCH
- GERRIT_REFSPEC=$GERRIT_REFSPEC
- GERRIT_CHANGE_NUMBER=$GERRIT_CHANGE_NUMBER
- GERRIT_CHANGE_COMMIT_MESSAGE=$GERRIT_CHANGE_COMMIT_MESSAGE
- node-parameters: false
- kill-phase-on: FAILURE
- abort-all-job: true
-
-- job-template:
- name: 'fuel-verify-{phase}-{stream-exp}'
-
- disabled: '{obj:disabled}'
-
- concurrent: true
-
- properties:
- - logrotate-default
- - throttle:
- enabled: true
- max-total: 6
- option: 'project'
- - build-blocker:
- use-build-blocker: true
- blocking-jobs:
- - 'fuel-verify-deploy-.*'
- - 'fuel-verify-test-.*'
- block-level: 'NODE'
-
- scm:
- - git-scm-gerrit
-
- wrappers:
- - ssh-agent-wrapper
- - timeout:
- timeout: 360
- fail: true
- parameters:
- - project-parameter:
- project: '{project}'
- branch: '{branch}'
- - '{slave-label}-defaults'
- - '{installer}-defaults'
- - 'fuel-verify-defaults-exp':
- gs-pathname: '{gs-pathname}'
-
- builders:
- - description-setter:
- description: "Built on $NODE_NAME"
- - '{project}-verify-{phase}-macro-exp'
-#------------------------------------
-# builder macros
-#------------------------------------
-- builder:
- name: 'fuel-verify-basic-macro-exp'
- builders:
- - shell:
- !include-raw: ./fuel-basic-exp.sh
-
-- builder:
- name: 'fuel-verify-build-macro-exp'
- builders:
- - shell:
- !include-raw: ./fuel-build-exp.sh
- - shell:
- !include-raw: ./fuel-workspace-cleanup.sh
-
-- builder:
- name: 'fuel-verify-deploy-virtual-macro-exp'
- builders:
- - shell:
- !include-raw: ./fuel-deploy-exp.sh
-
-- builder:
- name: 'fuel-verify-smoke-test-macro-exp'
- builders:
- - shell:
- !include-raw: ./fuel-smoke-test-exp.sh
-#------------------------------------
-# parameter macros
-#------------------------------------
-- parameter:
- name: 'fuel-verify-defaults-exp'
- parameters:
- - string:
- name: BUILD_DIRECTORY
- default: $WORKSPACE/build_output
- description: "Directory where the build artifact will be located upon the completion of the build."
- - string:
- name: CACHE_DIRECTORY
- default: $HOME/opnfv/cache/$INSTALLER_TYPE
- description: "Directory where the cache to be used during the build is located."
- - string:
- name: GS_URL
- default: artifacts.opnfv.org/$PROJECT{gs-pathname}
- description: "URL to Google Storage."
- danube:
branch: 'stable/{stream}'
gs-pathname: '/{stream}'
- disabled: true
+ disabled: false
#####################################
# patch verification phases
#####################################
pattern: 'build/**'
- compare-type: ANT
pattern: 'deploy/**'
+ disable-strict-forbidden-file-verification: 'true'
forbidden-file-paths:
- compare-type: ANT
pattern: 'docs/**'
--- /dev/null
+# jenkins job templates for Fuel
+- project:
+
+ name: fuel-weekly
+
+ project: fuel
+
+ installer: fuel
+
+#--------------------------------
+# BRANCH ANCHORS
+#--------------------------------
+ master: &master
+ stream: master
+ branch: '{stream}'
+ disabled: false
+ gs-pathname: ''
+ danube: &danube
+ stream: danube
+ branch: 'stable/{stream}'
+ disabled: false
+ gs-pathname: '/{stream}'
+#--------------------------------
+# POD, INSTALLER, AND BRANCH MAPPING
+#--------------------------------
+# CI PODs
+#--------------------------------
+ pod:
+ - baremetal:
+ slave-label: fuel-baremetal
+ <<: *master
+ - virtual:
+ slave-label: fuel-virtual
+ <<: *master
+ - baremetal:
+ slave-label: fuel-baremetal
+ <<: *danube
+ - virtual:
+ slave-label: fuel-virtual
+ <<: *danube
+#--------------------------------
+# scenarios
+#--------------------------------
+ scenario:
+ # HA scenarios
+ - 'os-nosdn-nofeature-ha':
+ auto-trigger-name: 'weekly-trigger-disabled'
+
+ jobs:
+ - 'fuel-{scenario}-{pod}-weekly-{stream}'
+ - 'fuel-deploy-{pod}-weekly-{stream}'
+
+########################
+# job templates
+########################
+- job-template:
+ name: 'fuel-{scenario}-{pod}-weekly-{stream}'
+
+ disabled: '{obj:disabled}'
+
+ concurrent: false
+
+ properties:
+ - logrotate-default
+ - throttle:
+ enabled: true
+ max-total: 4
+ max-per-node: 1
+ option: 'project'
+ - build-blocker:
+ use-build-blocker: true
+ blocking-jobs:
+ - 'fuel-os-.*?-{pod}-daily-.*'
+ - 'fuel-os-.*?-{pod}-weekly-.*'
+ block-level: 'NODE'
+
+ wrappers:
+ - build-name:
+ name: '$BUILD_NUMBER - Scenario: $DEPLOY_SCENARIO'
+
+ triggers:
+ - '{auto-trigger-name}'
+
+ parameters:
+ - project-parameter:
+ project: '{project}'
+ branch: '{branch}'
+ - '{installer}-defaults'
+ - '{slave-label}-defaults':
+ installer: '{installer}'
+ - string:
+ name: DEPLOY_SCENARIO
+ default: '{scenario}'
+ - fuel-weekly-parameter:
+ gs-pathname: '{gs-pathname}'
+
+ builders:
+ - description-setter:
+ description: "Built on $NODE_NAME"
+ - trigger-builds:
+ - project: 'fuel-deploy-{pod}-weekly-{stream}'
+ current-parameters: false
+ predefined-parameters:
+ DEPLOY_SCENARIO={scenario}
+ same-node: true
+ block: true
+ - trigger-builds:
+ - project: 'functest-fuel-{pod}-weekly-{stream}'
+ current-parameters: false
+ predefined-parameters:
+ DEPLOY_SCENARIO={scenario}
+ same-node: true
+ block: true
+ block-thresholds:
+ build-step-failure-threshold: 'never'
+ failure-threshold: 'never'
+ unstable-threshold: 'FAILURE'
+
+ publishers:
+ - email:
+ recipients: peter.barabas@ericsson.com fzhadaev@mirantis.com
+
+- job-template:
+ name: 'fuel-deploy-{pod}-weekly-{stream}'
+
+ disabled: '{obj:disabled}'
+
+ concurrent: true
+
+ properties:
+ - logrotate-default
+ - throttle:
+ enabled: true
+ max-total: 4
+ max-per-node: 1
+ option: 'project'
+ - build-blocker:
+ use-build-blocker: true
+ blocking-jobs:
+ - 'fuel-deploy-{pod}-daily-.*'
+ - 'fuel-deploy-generic-daily-.*'
+ - 'fuel-deploy-{pod}-weekly-.*'
+ - 'fuel-deploy-generic-weekly-.*'
+ block-level: 'NODE'
+
+ parameters:
+ - project-parameter:
+ project: '{project}'
+ branch: '{branch}'
+ - '{installer}-defaults'
+ - '{slave-label}-defaults':
+ installer: '{installer}'
+ - string:
+ name: DEPLOY_SCENARIO
+ default: 'os-odl_l2-nofeature-ha'
+ - fuel-weekly-parameter:
+ gs-pathname: '{gs-pathname}'
+ - string:
+ name: DEPLOY_TIMEOUT
+ default: '150'
+ description: 'Deployment timeout in minutes'
+
+ scm:
+ - git-scm
+
+ wrappers:
+ - build-name:
+ name: '$BUILD_NUMBER - Scenario: $DEPLOY_SCENARIO'
+
+ builders:
+ - description-setter:
+ description: "Built on $NODE_NAME"
+ - shell:
+ !include-raw-escape: ./fuel-download-artifact.sh
+ - shell:
+ !include-raw-escape: ./fuel-deploy.sh
+
+ publishers:
+ - email:
+ recipients: peter.barabas@ericsson.com fzhadaev@mirantis.com
+
+########################
+# parameter macros
+########################
+- parameter:
+ name: fuel-weekly-parameter
+ parameters:
+ - string:
+ name: BUILD_DIRECTORY
+ default: $WORKSPACE/build_output
+ description: "Directory where the build artifact will be located upon the completion of the build."
+ - string:
+ name: CACHE_DIRECTORY
+ default: $HOME/opnfv/cache/$INSTALLER_TYPE
+ description: "Directory where the cache to be used during the build is located."
+ - string:
+ name: GS_URL
+ default: artifacts.opnfv.org/$PROJECT{gs-pathname}
+ description: "URL to Google Storage."
+########################
+# trigger macros
+########################
+#-----------------------------------------------
+# Triggers for job running on fuel-baremetal against master branch
+#-----------------------------------------------
+# HA Scenarios
+- trigger:
+ name: 'fuel-os-nosdn-nofeature-ha-baremetal-weekly-master-trigger'
+ triggers:
+ - timed: ''
# job configuration for functest
###################################
- project:
- name: functest
+ name: functest-daily
- project: '{name}'
+ project: functest
#--------------------------------
# BRANCH ANCHORS
slave-label: '{pod}'
installer: apex
<<: *master
-# - apex-verify-danube:
-# slave-label: '{pod}'
-# installer: apex
-# <<: *danube
-# - apex-daily-danube:
-# slave-label: '{pod}'
-# installer: apex
-# <<: *danube
+ - apex-verify-danube:
+ slave-label: '{pod}'
+ installer: apex
+ <<: *danube
+ - apex-daily-danube:
+ slave-label: '{pod}'
+ installer: apex
+ <<: *danube
# armband CI PODs
- armband-baremetal:
slave-label: armband-baremetal
slave-label: armband-virtual
installer: fuel
<<: *danube
+# daisy CI PODs
+ - baremetal:
+ slave-label: daisy-baremetal
+ installer: daisy
+ <<: *master
+ - virtual:
+ slave-label: daisy-virtual
+ installer: daisy
+ <<: *master
# netvirt 3rd party ci
- virtual:
slave-label: odl-netvirt-virtual
slave-label: '{pod}'
installer: fuel
<<: *master
+ - arm-pod3-2:
+ slave-label: '{pod}'
+ installer: fuel
+ <<: *master
- zte-pod1:
slave-label: '{pod}'
installer: fuel
slave-label: '{pod}'
installer: fuel
<<: *danube
+ - arm-pod3-2:
+ slave-label: '{pod}'
+ installer: fuel
+ <<: *danube
# PODs for verify jobs triggered by each patch upload
- ool-virtual1:
slave-label: '{pod}'
job-timeout: 60
- 'daily':
job-timeout: 180
- - 'weekly':
- job-timeout: 400
jobs:
- 'functest-{installer}-{pod}-{testsuite}-{stream}'
builders:
- description-setter:
- description: "POD: $NODE_NAME"
+ description: "Built on $NODE_NAME"
- 'functest-{testsuite}-builder'
########################
name: FUNCTEST_SUITE_NAME
default: 'daily'
description: "Daily suite name to run"
-- parameter:
- name: functest-weekly-parameter
- parameters:
- - string:
- name: FUNCTEST_SUITE_NAME
- default: 'weekly'
- description: "Weekly suite name to run"
- parameter:
name: functest-suite-parameter
parameters:
- 'tempest_smoke_serial'
- 'rally_sanity'
- 'odl'
+ - 'odl_netvirt'
- 'onos'
- 'promise'
- 'doctor'
- 'functest-store-results'
- 'functest-exit'
-- builder:
- name: functest-weekly-builder
- builders:
- - 'functest-cleanup'
- - 'set-functest-env'
- - 'functest-weekly'
- - 'functest-store-results'
- - 'functest-exit'
-
- builder:
name: functest-suite-builder
builders:
- 'functest-cleanup'
- 'set-functest-env'
- 'functest-suite'
- - 'functest-store-results'
- - 'functest-exit'
- builder:
name: functest-daily
- shell:
!include-raw: ./functest-loop.sh
-- builder:
- name: functest-weekly
- builders:
- - shell:
- !include-raw: ./functest-loop.sh
- builder:
name: functest-suite
branches:
- branch-compare-type: 'ANT'
branch-pattern: '**/{branch}'
+ disable-strict-forbidden-file-verification: 'true'
forbidden-file-paths:
- compare-type: ANT
pattern: 'docs/**|.gitignore'
#!/bin/bash
-set -e
-echo "Functest: run $FUNCTEST_SUITE_NAME on branch $BRANCH"
-if [[ "$BRANCH" =~ 'brahmaputra' ]]; then
- cmd="${FUNCTEST_REPO_DIR}/docker/run_tests.sh --test $FUNCTEST_SUITE_NAME"
-elif [[ "$BRANCH" =~ 'colorado' ]]; then
- cmd="python ${FUNCTEST_REPO_DIR}/ci/run_tests.py -t $FUNCTEST_SUITE_NAME"
-else
- cmd="functest testcase run $FUNCTEST_SUITE_NAME"
-fi
container_id=$(docker ps -a | grep opnfv/functest | awk '{print $1}' | head -1)
-docker exec $container_id $cmd
+if [ -z $container_id ]; then
+ echo "Functest container not found"
+ exit 1
+fi
+
+global_ret_val=0
-ret_value=$?
-ret_val_file="${HOME}/opnfv/functest/results/${BRANCH##*/}/return_value"
-echo ${ret_value}>${ret_val_file}
+tests=($(echo $FUNCTEST_SUITE_NAME | tr "," "\n"))
+for test in ${tests[@]}; do
+ cmd="python /home/opnfv/repos/functest/functest/ci/run_tests.py -t $test"
+ docker exec $container_id $cmd
+ let global_ret_val+=$?
+done
-exit 0
+exit $global_ret_val
--- /dev/null
+###################################
+# job configuration for functest
+###################################
+- project:
+ name: functest-weekly
+
+ project: functest
+
+#--------------------------------
+# BRANCH ANCHORS
+#--------------------------------
+ master: &master
+ stream: master
+ branch: '{stream}'
+ gs-pathname: ''
+ docker-tag: 'latest'
+ disabled: false
+ danube: &danube
+ stream: danube
+ branch: 'stable/{stream}'
+ gs-pathname: '/{stream}'
+ docker-tag: 'stable'
+ disabled: true
+#--------------------------------
+# POD, INSTALLER, AND BRANCH MAPPING
+#--------------------------------
+# Installers using labels
+# CI PODs
+# This section should only contain the installers
+# that have been switched using labels for slaves
+#--------------------------------
+ pod:
+# fuel CI PODs
+ - baremetal:
+ slave-label: fuel-baremetal
+ installer: fuel
+ <<: *master
+ - virtual:
+ slave-label: fuel-virtual
+ installer: fuel
+ <<: *master
+ - baremetal:
+ slave-label: fuel-baremetal
+ installer: fuel
+ <<: *danube
+ - virtual:
+ slave-label: fuel-virtual
+ installer: fuel
+ <<: *danube
+#--------------------------------
+ jobs:
+ - 'functest-{installer}-{pod}-weekly-{stream}'
+
+################################
+# job template
+################################
+- job-template:
+ name: 'functest-{installer}-{pod}-weekly-{stream}'
+
+ disabled: '{obj:disabled}'
+
+ concurrent: true
+
+ properties:
+ - logrotate-default
+ - throttle:
+ enabled: true
+ max-per-node: 1
+ option: 'project'
+
+ wrappers:
+ - build-name:
+ name: '$BUILD_NUMBER Suite: $FUNCTEST_SUITE_NAME Scenario: $DEPLOY_SCENARIO'
+ - timeout:
+ timeout: '400'
+ abort: true
+
+ parameters:
+ - project-parameter:
+ project: '{project}'
+ branch: '{branch}'
+ - '{installer}-defaults'
+ - '{slave-label}-defaults'
+ - string:
+ name: FUNCTEST_SUITE_NAME
+ default: 'weekly'
+ description: "Weekly suite name to run"
+ - string:
+ name: DEPLOY_SCENARIO
+ default: 'os-odl_l2-nofeature-ha'
+ - string:
+ name: DOCKER_TAG
+ default: '{docker-tag}'
+ description: 'Tag to pull docker image'
+ - string:
+ name: CLEAN_DOCKER_IMAGES
+ default: 'false'
+ description: 'Remove downloaded docker images (opnfv/functest*:*)'
+ - functest-parameter:
+ gs-pathname: '{gs-pathname}'
+
+ scm:
+ - git-scm
+
+ builders:
+ - description-setter:
+ description: "Built on $NODE_NAME"
+ - 'functest-weekly-builder'
+########################
+# builder macros
+########################
+- builder:
+ name: functest-weekly-builder
+ builders:
+ - shell:
+ !include-raw: ./functest-cleanup.sh
+ - shell:
+ !include-raw: ./set-functest-env.sh
+ - shell:
+ !include-raw: ./functest-loop.sh
+ - shell:
+ !include-raw: ../../utils/push-test-logs.sh
+ - shell:
+ !include-raw: ./functest-exit.sh
echo "Credentials file detected: ${RC_FILE_PATH}"
# volume if credentials file path is given to Functest
rc_file_vol="-v ${RC_FILE_PATH}:/home/opnfv/functest/conf/openstack.creds"
+ RC_FLAG=1
fi
if [[ ${INSTALLER_TYPE} == 'apex' ]]; then
ssh_options="-o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no"
- if sudo virsh list | grep instack; then
- instack_mac=$(sudo virsh domiflist instack | grep default | \
- grep -Eo "[0-9a-f]+:[0-9a-f]+:[0-9a-f]+:[0-9a-f]+:[0-9a-f]+:[0-9a-f]+")
- elif sudo virsh list | grep undercloud; then
- instack_mac=$(sudo virsh domiflist undercloud | grep default | \
+ if sudo virsh list | grep undercloud; then
+ echo "Installer VM detected"
+ undercloud_mac=$(sudo virsh domiflist undercloud | grep default | \
grep -Eo "[0-9a-f]+:[0-9a-f]+:[0-9a-f]+:[0-9a-f]+:[0-9a-f]+:[0-9a-f]+")
+ INSTALLER_IP=$(/usr/sbin/arp -e | grep ${undercloud_mac} | awk {'print $1'})
+ sshkey_vol="-v /root/.ssh/id_rsa:/root/.ssh/id_rsa"
+ sudo scp $ssh_options root@${INSTALLER_IP}:/home/stack/stackrc ${HOME}/stackrc
+ stackrc_vol="-v ${HOME}/stackrc:/home/opnfv/functest/conf/stackrc"
+
+ if sudo iptables -C FORWARD -o virbr0 -j REJECT --reject-with icmp-port-unreachable 2> ${redirect}; then
+ sudo iptables -D FORWARD -o virbr0 -j REJECT --reject-with icmp-port-unreachable
+ fi
+ if sudo iptables -C FORWARD -i virbr0 -j REJECT --reject-with icmp-port-unreachable 2> ${redirect}; then
+ sudo iptables -D FORWARD -i virbr0 -j REJECT --reject-with icmp-port-unreachable
+ fi
+ elif [[ "$RC_FLAG" == 1 ]]; then
+ echo "No available installer VM, but credentials provided...continuing"
else
- echo "No available installer VM exists...exiting"
+ echo "No available installer VM exists and no credentials provided...exiting"
exit 1
fi
- INSTALLER_IP=$(/usr/sbin/arp -e | grep ${instack_mac} | awk {'print $1'})
- sshkey_vol="-v /root/.ssh/id_rsa:/root/.ssh/id_rsa"
- sudo scp $ssh_options root@${INSTALLER_IP}:/home/stack/stackrc ${HOME}/stackrc
- stackrc_vol="-v ${HOME}/stackrc:/home/opnfv/functest/conf/stackrc"
- if sudo iptables -C FORWARD -o virbr0 -j REJECT --reject-with icmp-port-unreachable 2> ${redirect}; then
- sudo iptables -D FORWARD -o virbr0 -j REJECT --reject-with icmp-port-unreachable
- fi
- if sudo iptables -C FORWARD -i virbr0 -j REJECT --reject-with icmp-port-unreachable 2> ${redirect}; then
- sudo iptables -D FORWARD -i virbr0 -j REJECT --reject-with icmp-port-unreachable
- fi
fi
choosing-strategy: 'gerrit'
refspec: '$GERRIT_REFSPEC'
<<: *git-scm-defaults
-
+- scm:
+ name: git-scm-with-submodules
+ scm:
+ - git:
+ credentials-id: '$SSH_CREDENTIAL_ID'
+ url: '$GIT_BASE'
+ refspec: ''
+ branches:
+ - 'refs/heads/{branch}'
+ skip-tag: true
+ wipe-workspace: true
+ submodule:
+ recursive: true
+ timeout: 20
+ shallow-clone: true
- trigger:
name: 'daily-trigger-disabled'
triggers:
triggers:
- timed: ''
-# NOTE: unused macro, but we may use this for some jobs.
- trigger:
name: gerrit-trigger-patchset-created
triggers:
- draft-published-event
- comment-added-contains-event:
comment-contains-value: 'recheck'
+ - comment-added-contains-event:
+ comment-contains-value: 'reverify'
projects:
- project-compare-type: 'ANT'
project-pattern: '{project}'
branches:
- branch-compare-type: 'ANT'
branch-pattern: '**/{branch}'
+ file-paths:
+ - compare-type: 'ANT'
+ pattern: '{files}'
+ skip-vote:
+ successful: false
+ failed: false
+ unstable: false
+ notbuilt: false
- trigger:
name: gerrit-trigger-change-merged
branches:
- branch-compare-type: 'ANT'
branch-pattern: '**/{branch}'
+ file-paths:
+ - compare-type: 'ANT'
+ pattern: '{files}'
- trigger:
name: 'experimental'
name: clean-workspace-log
builders:
- shell: |
- find $WORKSPACE -type f -print -name '*.log' | xargs rm -f
+ find $WORKSPACE -type f -name '*.log' | xargs rm -f
- publisher:
name: archive-artifacts
allow-empty: true
fingerprint: true
latest-only: true
+
+- publisher:
+ name: publish-coverage
+ publishers:
+ - cobertura:
+ report-file: "coverage.xml"
+ only-stable: "true"
+ health-auto-update: "false"
+ stability-auto-update: "false"
+ zoom-coverage-chart: "true"
+ targets:
+ - files:
+ healthy: 10
+ unhealthy: 20
+ failing: 30
+ - method:
+ healthy: 50
+ unhealthy: 40
+ failing: 30
+
default-slaves:
- lf-pod1
- parameter:
- name: 'apex-daily-colorado-defaults'
+ name: 'apex-daily-danube-defaults'
parameters:
- label:
name: SLAVE_LABEL
- default: 'apex-daily-colorado'
+ default: 'apex-daily-danube'
- string:
name: GIT_BASE
default: https://gerrit.opnfv.org/gerrit/$PROJECT
- intel-virtual4
- intel-virtual5
- parameter:
- name: 'apex-verify-colorado-defaults'
+ name: 'apex-verify-danube-defaults'
parameters:
- label:
name: SLAVE_LABEL
- default: 'apex-verify-colorado'
+ default: 'apex-verify-danube'
- string:
name: GIT_BASE
default: https://gerrit.opnfv.org/gerrit/$PROJECT
name: GIT_BASE
default: https://gerrit.opnfv.org/gerrit/$PROJECT
description: 'Git URL to use on this Jenkins Slave'
+- parameter:
+ name: 'cengn-pod1-defaults'
+ parameters:
+ - node:
+ name: SLAVE_NAME
+ description: 'Slave name on Jenkins'
+ allowed-slaves:
+ - cengn-pod1
+ default-slaves:
+ - cengn-pod1
+ - string:
+ name: GIT_BASE
+ default: https://gerrit.opnfv.org/gerrit/$PROJECT
+ description: 'Git URL to use on this Jenkins Slave'
- parameter:
name: 'intel-pod1-defaults'
parameters:
name: LAB_CONFIG_URL
default: ssh://jenkins-enea@gerrit.opnfv.org:29418/securedlab
description: 'Base URI to the configuration directory'
+- parameter:
+ name: 'arm-pod3-2-defaults'
+ parameters:
+ - node:
+ name: SLAVE_NAME
+ description: 'Slave name on Jenkins'
+ allowed-slaves:
+ - arm-pod3-2
+ default-slaves:
+ - arm-pod3-2
+ - string:
+ name: GIT_BASE
+ default: https://gerrit.opnfv.org/gerrit/$PROJECT
+ description: 'Git URL to use on this Jenkins Slave'
+ - string:
+ name: LAB_CONFIG_URL
+ default: ssh://jenkins-enea@gerrit.opnfv.org:29418/securedlab
+ description: 'Base URI to the configuration directory'
- parameter:
name: 'intel-virtual6-defaults'
parameters:
--- /dev/null
+- project:
+ name: 'openstack-bifrost-cleanup'
+#--------------------------------
+# branches
+#--------------------------------
+ stream:
+ - master:
+ branch: '{stream}'
+
+#--------------------------------
+# projects
+#--------------------------------
+ project:
+ - 'openstack':
+ project-repo: 'https://git.openstack.org/openstack/bifrost'
+ clone-location: '/opt/bifrost'
+ - 'opnfv':
+ project-repo: 'https://gerrit.opnfv.org/gerrit/releng'
+ clone-location: '/opt/releng'
+
+#--------------------------------
+# jobs
+#--------------------------------
+ jobs:
+ - '{project}-bifrost-cleanup-{stream}'
+
+- job-template:
+ name: '{project}-bifrost-cleanup-{stream}'
+
+ concurrent: false
+
+ node: bifrost-verify-virtual
+
+ # Make sure no verify job is running on any of the slaves since that would
+ # produce build logs after we wipe the destination directory.
+ properties:
+ - build-blocker:
+ blocking-jobs:
+ - '{project}-bifrost-verify-*'
+
+ parameters:
+ - string:
+ name: PROJECT
+ default: '{project}'
+
+ builders:
+ - shell: |
+ #!/bin/bash
+
+ set -eu
+
+ # DO NOT change this unless you know what you are doing.
+ BIFROST_GS_URL="gs://artifacts.opnfv.org/cross-community-ci/openstack/bifrost/$GERRIT_NAME/$GERRIT_CHANGE_NUMBER/"
+
+ # This should never happen... even 'recheck' uses the last jobs'
+ # gerrit information. Better exit with error so we can investigate
+ [[ ! -n $GERRIT_NAME ]] || [[ ! -n $GERRIT_CHANGE_NUMBER ]] && exit 1
+
+ echo "Removing build artifacts for $GERRIT_NAME/$GERRIT_CHANGE_NUMBER"
+
+ if ! [[ "$BIFROST_GS_URL" =~ "/cross-community-ci/openstack/bifrost/" ]]; then
+ echo "Oops! BIFROST_GS_URL=$BIFROST_GS_URL does not seem like a valid"
+ echo "bifrost location on the Google storage server. Please double-check"
+ echo "that it's set properly or fix this line if necessary."
+ echo "gsutil will not be executed until this is fixed!"
+ exit 1
+ fi
+ # No force (-f). We always verify upstream jobs so if there are no logs
+ # something else went wrong and we need to break immediately and investigate
+ gsutil -m rm -r $BIFROST_GS_URL
+
+ triggers:
+ - '{project}-gerrit-trigger-cleanup':
+ branch: '{branch}'
+
+ publishers:
+ - email:
+ recipients: fatih.degirmenci@ericsson.com yroblamo@redhat.com mchandras@suse.de jack.morgan@intel.com zhang.jun3g@zte.com.cn
+#--------------------------------
+# trigger macros
+#--------------------------------
+- trigger:
+ name: 'openstack-gerrit-trigger-cleanup'
+ triggers:
+ - gerrit:
+ server-name: 'review.openstack.org'
+ escape-quotes: true
+ trigger-on:
+ # We only run this when the change is merged or
+ # abandoned since we don't need the logs anymore
+ - patchset-uploaded-event: 'false'
+ - change-merged-event: 'true'
+ - change-abandoned-event: 'true'
+ - change-restored-event: 'false'
+ - draft-published-event: 'false'
+ # This is an OPNFV maintenance job. We don't want to provide
+ # feedback on Gerrit
+ silent: true
+ silent-start: true
+ projects:
+ - project-compare-type: 'PLAIN'
+ project-pattern: 'openstack/bifrost'
+ branches:
+ - branch-compare-type: 'ANT'
+ branch-pattern: '**/{branch}'
+ disable-strict-forbidden-file-verification: 'true'
+ forbidden-file-paths:
+ - compare-type: ANT
+ pattern: 'doc/**'
+ - compare-type: ANT
+ pattern: 'releasenotes/**'
+ disable-strict-forbidden-file-verification: 'true'
+ readable-message: true
+- trigger:
+ name: 'opnfv-gerrit-trigger-cleanup'
+ triggers:
+ - gerrit:
+ server-name: 'gerrit.opnfv.org'
+ trigger-on:
+ # We only run this when the change is merged or
+ # abandoned since we don't need the logs anymore
+ - patchset-uploaded-event: 'false'
+ - change-merged-event: 'true'
+ - change-abandoned-event: 'true'
+ - change-restored-event: 'false'
+ - draft-published-event: 'false'
+ # This is an OPNFV maintenance job. We don't want to provide
+ # feedback on Gerrit
+ silent: true
+ silent-start: true
+ projects:
+ - project-compare-type: 'ANT'
+ project-pattern: 'releng'
+ branches:
+ - branch-compare-type: 'ANT'
+ branch-pattern: '**/{branch}'
+ file-paths:
+ - compare-type: ANT
+ pattern: 'prototypes/bifrost/**'
+ readable-message: true
publishers:
- email:
- recipients: fatih.degirmenci@ericsson.com yroblamo@redhat.com mchandras@suse.de jack.morgan@intel.com zhang.jun3g@zte.com.cn
+ recipients: fatih.degirmenci@ericsson.com yroblamo@redhat.com mchandras@suse.de jack.morgan@intel.com julienjut@gmail.com
#--------------------------------
# trigger macros
#--------------------------------
branches:
- branch-compare-type: 'ANT'
branch-pattern: '**/{branch}'
+ disable-strict-forbidden-file-verification: 'true'
forbidden-file-paths:
- compare-type: ANT
pattern: 'doc/**'
- compare-type: ANT
pattern: 'releasenotes/**'
+ disable-strict-forbidden-file-verification: 'true'
readable-message: true
- trigger:
name: 'opnfv-gerrit-trigger'
branches:
- branch-compare-type: 'ANT'
branch-pattern: '**/{branch}'
+ disable-strict-forbidden-file-verification: 'true'
forbidden-file-paths:
- compare-type: ANT
pattern: 'docs/**|.gitignore'
danube: &danube
stream: danube
branch: 'stable/{stream}'
- disabled: true
+ disabled: false
gs-pathname: '/{stream}'
#--------------------------------
# POD, INSTALLER, AND BRANCH MAPPING
- orange-pod1:
slave-label: orange-pod1
<<: *master
+ - cengn-pod1:
+ slave-label: cengn-pod1
+ <<: *master
#--------------------------------
# scenarios
#--------------------------------
build-step-failure-threshold: 'never'
failure-threshold: 'never'
unstable-threshold: 'FAILURE'
+ # 1.dovetail only master by now, not sync with A/B/C branches
+ # 2.here the stream means the SUT stream, dovetail stream is defined in its own job
+ # 3.only debug testsuite here(includes basic testcase,
+ # i.e. one tempest smoke ipv6, two vping from functest)
+ # 4.not used for release criteria or compliance,
+ # only to debug the dovetail tool bugs with joid
+ - trigger-builds:
+ - project: 'dovetail-joid-{pod}-debug-{stream}'
+ current-parameters: false
+ predefined-parameters:
+ DEPLOY_SCENARIO={scenario}
+ block: true
+ same-node: true
+ block-thresholds:
+ build-step-failure-threshold: 'never'
+ failure-threshold: 'never'
+ unstable-threshold: 'FAILURE'
- job-template:
name: 'joid-deploy-{pod}-daily-{stream}'
name: 'joid-os-nosdn-nofeature-ha-orange-pod1-master-trigger'
triggers:
- timed: ''
+- trigger:
+ name: 'joid-os-nosdn-nofeature-ha-cengn-pod1-master-trigger'
+ triggers:
+ - timed: ''
# os-nosdn-nofeature-ha trigger - branch: danube
- trigger:
name: 'joid-os-nosdn-nofeature-ha-baremetal-danube-trigger'
name: 'joid-os-nosdn-nofeature-ha-orange-pod1-danube-trigger'
triggers:
- timed: ''
+- trigger:
+ name: 'joid-os-nosdn-nofeature-ha-cengn-pod1-danube-trigger'
+ triggers:
+ - timed: ''
# os-odl_l2-nofeature-ha trigger - branch: master
- trigger:
name: 'joid-os-odl_l2-nofeature-ha-baremetal-master-trigger'
name: 'joid-os-odl_l2-nofeature-ha-orange-pod1-master-trigger'
triggers:
- timed: ''
+- trigger:
+ name: 'joid-os-odl_l2-nofeature-ha-cengn-pod1-master-trigger'
+ triggers:
+ - timed: ''
# os-odl_l2-nofeature-ha trigger - branch: danube
- trigger:
name: 'joid-os-odl_l2-nofeature-ha-baremetal-danube-trigger'
name: 'joid-os-odl_l2-nofeature-ha-orange-pod1-danube-trigger'
triggers:
- timed: ''
+- trigger:
+ name: 'joid-os-odl_l2-nofeature-ha-cengn-pod1-danube-trigger'
+ triggers:
+ - timed: ''
# os-onos-nofeature-ha trigger - branch: master
- trigger:
name: 'joid-os-onos-nofeature-ha-baremetal-master-trigger'
name: 'joid-os-onos-nofeature-ha-orange-pod1-master-trigger'
triggers:
- timed: ''
+- trigger:
+ name: 'joid-os-onos-nofeature-ha-cengn-pod1-master-trigger'
+ triggers:
+ - timed: ''
# os-onos-nofeature-ha trigger - branch: danube
- trigger:
name: 'joid-os-onos-nofeature-ha-baremetal-danube-trigger'
name: 'joid-os-onos-nofeature-ha-orange-pod1-danube-trigger'
triggers:
- timed: ''
+- trigger:
+ name: 'joid-os-onos-nofeature-ha-cengn-pod1-danube-trigger'
+ triggers:
+ - timed: ''
# os-onos-sfc-ha trigger - branch: master
- trigger:
name: 'joid-os-onos-sfc-ha-baremetal-master-trigger'
name: 'joid-os-onos-sfc-ha-orange-pod1-master-trigger'
triggers:
- timed: ''
+- trigger:
+ name: 'joid-os-onos-sfc-ha-cengn-pod1-master-trigger'
+ triggers:
+ - timed: ''
# os-onos-sfc-ha trigger - branch: danube
- trigger:
name: 'joid-os-onos-sfc-ha-baremetal-danube-trigger'
name: 'joid-os-onos-sfc-ha-orange-pod1-danube-trigger'
triggers:
- timed: ''
+- trigger:
+ name: 'joid-os-onos-sfc-ha-cengn-pod1-danube-trigger'
+ triggers:
+ - timed: ''
# os-nosdn-lxd-noha trigger - branch: master
- trigger:
name: 'joid-os-nosdn-lxd-noha-baremetal-master-trigger'
name: 'joid-os-nosdn-lxd-noha-orange-pod1-master-trigger'
triggers:
- timed: ''
+- trigger:
+ name: 'joid-os-nosdn-lxd-noha-cengn-pod1-master-trigger'
+ triggers:
+ - timed: ''
# os-nosdn-lxd-noha trigger - branch: danube
- trigger:
name: 'joid-os-nosdn-lxd-noha-baremetal-danube-trigger'
name: 'joid-os-nosdn-lxd-noha-orange-pod1-danube-trigger'
triggers:
- timed: ''
+- trigger:
+ name: 'joid-os-nosdn-lxd-noha-cengn-pod1-danube-trigger'
+ triggers:
+ - timed: ''
# os-nosdn-lxd-ha trigger - branch: master
- trigger:
name: 'joid-os-nosdn-lxd-ha-baremetal-master-trigger'
name: 'joid-os-nosdn-lxd-ha-orange-pod1-master-trigger'
triggers:
- timed: ''
+- trigger:
+ name: 'joid-os-nosdn-lxd-ha-cengn-pod1-master-trigger'
+ triggers:
+ - timed: ''
# os-nosdn-lxd-ha trigger - branch: danube
- trigger:
name: 'joid-os-nosdn-lxd-ha-baremetal-danube-trigger'
name: 'joid-os-nosdn-lxd-ha-orange-pod1-danube-trigger'
triggers:
- timed: ''
+- trigger:
+ name: 'joid-os-nosdn-lxd-ha-cengn-pod1-danube-trigger'
+ triggers:
+ - timed: ''
# os-nosdn-nofeature-noha trigger - branch: master
- trigger:
name: 'joid-os-nosdn-nofeature-noha-baremetal-master-trigger'
name: 'joid-os-nosdn-nofeature-noha-orange-pod1-master-trigger'
triggers:
- timed: ''
+- trigger:
+ name: 'joid-os-nosdn-nofeature-noha-cengn-pod1-master-trigger'
+ triggers:
+ - timed: ''
# os-nosdn-nofeature-noha trigger - branch: danube
- trigger:
name: 'joid-os-nosdn-nofeature-noha-baremetal-danube-trigger'
name: 'joid-os-nosdn-nofeature-noha-orange-pod1-danube-trigger'
triggers:
- timed: ''
+- trigger:
+ name: 'joid-os-nosdn-nofeature-noha-cengn-pod1-danube-trigger'
+ triggers:
+ - timed: ''
# k8-nosdn-nofeature-noha trigger - branch: master
- trigger:
name: 'joid-k8-nosdn-nofeature-noha-baremetal-master-trigger'
name: 'joid-k8-nosdn-nofeature-noha-orange-pod1-master-trigger'
triggers:
- timed: ''
+- trigger:
+ name: 'joid-k8-nosdn-nofeature-noha-cengn-pod1-master-trigger'
+ triggers:
+ - timed: ''
# k8-nosdn-nofeature-noha trigger - branch: danube
- trigger:
name: 'joid-k8-nosdn-nofeature-noha-baremetal-danube-trigger'
name: 'joid-k8-nosdn-nofeature-noha-orange-pod1-danube-trigger'
triggers:
- timed: ''
+- trigger:
+ name: 'joid-k8-nosdn-nofeature-noha-cengn-pod1-danube-trigger'
+ triggers:
+ - timed: ''
# k8-nosdn-lb-noha trigger - branch: master
- trigger:
name: 'joid-k8-nosdn-lb-noha-baremetal-master-trigger'
name: 'joid-k8-nosdn-lb-noha-orange-pod1-master-trigger'
triggers:
- timed: ''
+- trigger:
+ name: 'joid-k8-nosdn-lb-noha-cengn-pod1-master-trigger'
+ triggers:
+ - timed: ''
# k8-nosdn-lb-noha trigger - branch: danube
- trigger:
name: 'joid-k8-nosdn-lb-noha-baremetal-danube-trigger'
name: 'joid-k8-nosdn-lb-noha-orange-pod1-danube-trigger'
triggers:
- timed: ''
+- trigger:
+ name: 'joid-k8-nosdn-lb-noha-cengn-pod1-danube-trigger'
+ triggers:
+ - timed: ''
branches:
- branch-compare-type: 'ANT'
branch-pattern: '**/{branch}'
+ disable-strict-forbidden-file-verification: 'true'
forbidden-file-paths:
- compare-type: ANT
pattern: 'docs/**|.gitignore'
--- /dev/null
+###################################################
+# All the jobs except verify have been removed!
+# They will only be enabled on request by projects!
+###################################################
+- project:
+ name: models
+
+ project: '{name}'
+
+ jobs:
+ - 'models-verify-{stream}'
+
+ stream:
+ - master:
+ branch: '{stream}'
+ gs-pathname: ''
+ disabled: false
+ - danube:
+ branch: 'stable/{stream}'
+ gs-pathname: '/{stream}'
+ disabled: false
+
+- job-template:
+ name: 'models-verify-{stream}'
+
+ disabled: '{obj:disabled}'
+
+ parameters:
+ - project-parameter:
+ project: '{project}'
+ branch: '{branch}'
+ - 'opnfv-build-ubuntu-defaults'
+
+ scm:
+ - git-scm-gerrit
+
+ triggers:
+ - gerrit:
+ server-name: 'gerrit.opnfv.org'
+ trigger-on:
+ - patchset-created-event:
+ exclude-drafts: 'false'
+ exclude-trivial-rebase: 'false'
+ exclude-no-code-change: 'false'
+ - draft-published-event
+ - comment-added-contains-event:
+ comment-contains-value: 'recheck'
+ - comment-added-contains-event:
+ comment-contains-value: 'reverify'
+ projects:
+ - project-compare-type: 'ANT'
+ project-pattern: '{project}'
+ branches:
+ - branch-compare-type: 'ANT'
+ branch-pattern: '**/{branch}'
+ disable-strict-forbidden-file-verification: 'true'
+ forbidden-file-paths:
+ - compare-type: ANT
+ pattern: 'docs/**|.gitignore'
+
+ builders:
+ - shell: |
+ #!/bin/bash
+ set -o errexit
+ set -o nounset
+ set -o pipefail
+
+ # shellcheck -f tty tests/*.sh
branches:
- branch-compare-type: 'ANT'
branch-pattern: '**/{branch}'
+ disable-strict-forbidden-file-verification: 'true'
forbidden-file-paths:
- compare-type: ANT
pattern: 'docs/**|.gitignore'
echo "Unable to extract the url to Fuel ISO properties from ${FUEL_DEPLOY_URL}"
exit 1
fi
+
+# use known/working version of fuel
+FUEL_PROPERTIES_FILE="opnfv-2017-03-06_16-00-15.properties"
curl -L -s -o $WORKSPACE/latest.properties http://artifacts.opnfv.org/fuel/$FUEL_PROPERTIES_FILE
# source the file so we get OPNFV vars
- name: 'functest-fuel-virtual-suite-{stream}'
current-parameters: false
predefined-parameters: |
- DEPLOY_SCENARIO='os-nosdn-multisite-noha'
- FUNCTEST_SUITE_NAME='multisite'
+ DEPLOY_SCENARIO=os-nosdn-multisite-noha
+ FUNCTEST_SUITE_NAME=multisite
OS_REGION=RegionOne
REGIONONE_IP=100.64.209.10
REGIONTWO_IP=100.64.209.11
- danube:
branch: 'stable/{stream}'
gs-pathname: '/{stream}'
- disabled: true
+ disabled: false
timed: ''
- job-template:
branches:
- branch-compare-type: 'ANT'
branch-pattern: '**/{branch}'
+ disable-strict-forbidden-file-verification: 'true'
forbidden-file-paths:
- compare-type: ANT
pattern: 'docs/**|.gitignore'
branches:
- branch-compare-type: 'ANT'
branch-pattern: '**/{branch}'
+ disable-strict-forbidden-file-verification: 'true'
forbidden-file-paths:
- compare-type: ANT
pattern: 'docs/**'
branches:
- branch-compare-type: 'ANT'
branch-pattern: '**/{branch}'
+ disable-strict-forbidden-file-verification: 'true'
forbidden-file-paths:
- compare-type: ANT
pattern: 'docs/**|.gitignore'
- danube:
branch: 'stable/{stream}'
gs-pathname: '/{stream}'
- disabled: true
+ disabled: false
project: 'onosfw'
branches:
- branch-compare-type: 'ANT'
branch-pattern: '**/{branch}'
+ disable-strict-forbidden-file-verification: 'true'
forbidden-file-paths:
- compare-type: ANT
pattern: 'docs/**|.gitignore'
branches:
- branch-compare-type: 'ANT'
branch-pattern: '**/{branch}'
+ disable-strict-forbidden-file-verification: 'true'
forbidden-file-paths:
- compare-type: ANT
pattern: 'docs/**|.gitignore'
- name: 'compass-deploy-virtual-daily-{stream}'
current-parameters: false
predefined-parameters: |
- DEPLOY_SCENARIO=os-nosdn-openo-noha
+ DEPLOY_SCENARIO=os-nosdn-openo-ha
COMPASS_OS_VERSION=xenial
node-parameters: true
kill-phase-on: FAILURE
file-paths:
- compare-type: ANT
pattern: '**/*'
+ disable-strict-forbidden-file-verification: 'true'
forbidden-file-paths:
- compare-type: ANT
pattern: 'docs/**'
--- /dev/null
+#!/bin/bash
+if [ $GERRIT_BRANCH == "master" ]; then
+ RTD_BUILD_VERSION=latest
+else
+ RTD_BUILD_VERSION=${{GERRIT_BRANCH/\//-}}
+fi
+curl -X POST --data "version_slug=$RTD_BUILD_VERSION" https://readthedocs.org/build/opnfvdocsdemo
--- /dev/null
+- project:
+ name: docs-rtd
+ jobs:
+ - 'docs-merge-rtd-{stream}'
+ - 'docs-verify-rtd-{stream}'
+
+ stream:
+ - master:
+ branch: 'master'
+ - danube:
+ branch: 'stable/{stream}'
+
+ project: 'opnfvdocs'
+ rtdproject: 'opnfv'
+ # TODO: Archive Artifacts
+
+- job-template:
+ name: 'docs-merge-rtd-{stream}'
+
+ project-type: freestyle
+
+ parameters:
+ - label:
+ name: SLAVE_LABEL
+ default: 'lf-build1'
+ description: 'Slave label on Jenkins'
+ - project-parameter:
+ project: '{project}'
+ branch: '{branch}'
+ - string:
+ name: GIT_BASE
+ default: https://gerrit.opnfv.org/gerrit/releng
+ description: 'Git URL to use on this Jenkins Slave'
+ scm:
+ - git-scm
+
+ triggers:
+ - gerrit-trigger-change-merged:
+ project: '**'
+ branch: '{branch}'
+ files: 'docs/**/*.*'
+
+ builders:
+ - shell: !include-raw: docs-post-rtd.sh
+
+- job-template:
+ name: 'docs-verify-rtd-{stream}'
+
+ project-type: freestyle
+
+ parameters:
+ - label:
+ name: SLAVE_LABEL
+ default: 'lf-build2'
+ description: 'Slave label on Jenkins'
+ - project-parameter:
+ project: '{project}'
+ branch: '{branch}'
+ - string:
+ name: GIT_BASE
+ default: https://gerrit.opnfv.org/gerrit/opnfvdocs
+ description: 'Git URL to use on this Jenkins Slave'
+ scm:
+ - git-scm-with-submodules:
+ branch: '{branch}'
+
+ triggers:
+ - gerrit-trigger-patchset-created:
+ server: 'gerrit.opnfv.org'
+ project: '**'
+ branch: '{branch}'
+ files: 'docs/**/*.*'
+ - timed: 'H H * * *'
+
+ builders:
+ - shell: |
+ if [ "$GERRIT_PROJECT" != "opnfvdocs" ]; then
+ cd docs/submodules/$GERRIT_PROJECT
+ git fetch origin $GERRIT_REFSPEC && git checkout FETCH_HEAD
+ else
+ git fetch origin $GERRIT_REFSPEC && git checkout FETCH_HEAD
+ fi
+ - shell: |
+ sudo pip install virtualenv
+ virtualenv $WORKSPACE/venv
+ . $WORKSPACE/venv/bin/activate
+ pip install --upgrade pip
+ pip freeze
+ pip install tox
+ tox -edocs
- danube:
branch: 'stable/{stream}'
gs-pathname: '/{stream}'
- disabled: true
+ disabled: false
########################
# job templates
branches:
- branch-compare-type: 'ANT'
branch-pattern: '**/{branch}'
+ skip-vote:
+ successful: true
+ failed: true
+ unstable: true
+ notbuilt: true
builders:
- check-bash-syntax
- danube:
branch: 'stable/{stream}'
gs-pathname: '/{stream}'
- disabled: true
+ disabled: false
- job-template:
name: 'ovsnfv-verify-{stream}'
branches:
- branch-compare-type: 'ANT'
branch-pattern: '**/{branch}'
+ disable-strict-forbidden-file-verification: 'true'
forbidden-file-paths:
- compare-type: ANT
- pattern: 'docs/**|.gitignore'
+ pattern: 'docs/**'
+ - compare-type: ANT
+ pattern: 'governance/**'
+ - compare-type: ANT
+ pattern: '*.txt|.gitignore|.gitreview|INFO|LICENSE'
builders:
- shell: |
branches:
- branch-compare-type: 'ANT'
branch-pattern: '**/{branch}'
+ disable-strict-forbidden-file-verification: 'true'
forbidden-file-paths:
- compare-type: ANT
pattern: 'docs/**|.gitignore'
branches:
- branch-compare-type: 'ANT'
branch-pattern: '**/{branch}'
+ disable-strict-forbidden-file-verification: 'true'
forbidden-file-paths:
- compare-type: ANT
pattern: 'docs/**|.gitignore'
branches:
- branch-compare-type: 'ANT'
branch-pattern: '**/{branch}'
+ disable-strict-forbidden-file-verification: 'true'
forbidden-file-paths:
- compare-type: ANT
pattern: 'docs/**|.gitignore'
##############################################################################
set -e
-envs="INSTALLER_TYPE=${INSTALLER_TYPE} -e INSTALLER_IP=${INSTALLER_IP} -e NODE_NAME=${NODE_NAME}"
-suite="TEST_CASE=all"
+envs="INSTALLER_TYPE=${INSTALLER_TYPE} -e INSTALLER_IP=${INSTALLER_IP}
+-e NODE_NAME=${NODE_NAME} -e CI_DEBUG=${CI_DEBUG}"
dir_imgstore="${HOME}/imgstore"
-img_volume="${dir_imgstore}:/home/opnfv/imgstore"
echo "Qtip: Pulling docker image: opnfv/qtip:${DOCKER_TAG}"
docker pull opnfv/qtip:$DOCKER_TAG
-cmd=" docker run -id -e $envs -e $suite -v ${img_volume} opnfv/qtip:${DOCKER_TAG} /bin/bash"
+cmd=" docker run -id -e $envs opnfv/qtip:${DOCKER_TAG} /bin/bash"
echo "Qtip: Running docker command: ${cmd}"
${cmd}
else
echo "The container ID is: ${container_id}"
QTIP_REPO=/home/opnfv/repos/qtip
-# TODO(yujunz): execute benchmark plan for compute-qpi
+# TODO(zhihui_wu): use qtip cli to execute benchmark test in the future
+ docker exec -t ${container_id} bash -c "cd ${QTIP_REPO}/qtip/runner/ &&
+ python runner.py -d /home/opnfv/qtip/results/ -b all"
+
fi
echo "Qtip done!"
branch: '{stream}'
gs-pathname: ''
docker-tag: latest
+ danube: &danube
+ stream: danube
+ branch: 'stable/{stream}'
+ gs-pathname: '/{stream}'
+ docker-tag: 'stable'
#--------------------------------
# JOB VARIABLES
- zte-pod3:
installer: fuel
<<: *master
+ - zte-pod3:
+ installer: fuel
+ <<: *danube
task:
- daily:
auto-builder-name: qtip-validate-deploy
<<: *master
- '{installer}-defaults'
- '{pod}-defaults'
+ - string:
+ name: CI_DEBUG
+ default: 'false'
+ description: "Show debug output information"
scm:
- git-scm
triggers:
- gerrit-trigger-change-merged:
project: '{project}'
branch: '{branch}'
+ files: '**'
branch: '{stream}'
gs-pathname: ''
disabled: false
+ - danube:
+ branch: 'stable/{stream}'
+ gs-pathname: '/{stream}'
+ disabled: false
################################
## job templates
branches:
- branch-compare-type: 'ANT'
branch-pattern: '**/{branch}'
+ disable-strict-forbidden-file-verification: 'true'
forbidden-file-paths:
- compare-type: ANT
pattern: 'docs/**|.gitignore'
builders:
- qtip-unit-tests-and-docs-build
+ publishers:
+ - publish-coverage
################################
## job builders
danube: &danube
stream: danube
branch: 'stable/{stream}'
- disabled: true
+ disabled: false
functest-arm-receivers: &functest-arm-receivers
receivers: >
cristina.pauna@enea.com
if [[ -n "$(docker images | grep $DOCKER_REPO_NAME)" ]]; then
echo "Docker images to remove:"
docker images | head -1 && docker images | grep $DOCKER_REPO_NAME
- image_tags=($(docker images | grep $DOCKER_REPO_NAME | awk '{print $2}'))
- for tag in "${image_tags[@]}"; do
- if [[ -n "$(docker images|grep $DOCKER_REPO_NAME|grep $tag)" ]]; then
- echo "Removing docker image $DOCKER_REPO_NAME:$tag..."
- docker rmi -f $DOCKER_REPO_NAME:$tag
+ image_ids=($(docker images | grep $DOCKER_REPO_NAME | awk '{print $3}'))
+ for id in "${image_ids[@]}"; do
+ if [[ -n "$(docker images|grep $DOCKER_REPO_NAME|grep $id)" ]]; then
+ echo "Removing docker image $DOCKER_REPO_NAME:$id..."
+ docker rmi -f $id
fi
done
fi
cd $WORKSPACE/docker
-if [ ! -f ${DOCKERFILE} ]; then
- echo "ERROR: Dockerfile not found."
- exit 1
+HOST_ARCH=$(uname -m)
+if [ ! -f "${DOCKERFILE}" ]; then
+ # If this is expected to be a Dockerfile for other arch than x86
+ # and it does not exist, but there is a patch for the said arch,
+ # then apply the patch and create the Dockerfile.${HOST_ARCH} file
+ if [[ "${DOCKERFILE}" == *"${HOST_ARCH}" && \
+ -f "Dockerfile.${HOST_ARCH}.patch" ]]; then
+ patch -o Dockerfile."${HOST_ARCH}" Dockerfile \
+ Dockerfile."${HOST_ARCH}".patch
+ else
+ echo "ERROR: No Dockerfile or ${HOST_ARCH} patch found."
+ exit 1
+ fi
fi
# Get tag version
if [[ "$BRANCH" == "master" ]]; then
DOCKER_TAG="latest"
else
- if [[ "$RELEASE_VERSION" != "" ]]; then
+ if [[ -n "${RELEASE_VERSION-}" ]]; then
release=${BRANCH##*/}
DOCKER_TAG=${release}.${RELEASE_VERSION}
# e.g. colorado.1.0, colorado.2.0, colorado.3.0
echo "Building docker image: $DOCKER_REPO_NAME:$DOCKER_TAG"
echo "--------------------------------------------------------"
echo
-cmd="docker build --no-cache -t $DOCKER_REPO_NAME:$DOCKER_TAG --build-arg BRANCH=$BRANCH
- -f $DOCKERFILE ."
+if [[ $DOCKER_REPO_NAME == *"dovetail"* ]]; then
+ cmd="docker build --no-cache -t $DOCKER_REPO_NAME:$DOCKER_TAG -f $DOCKERFILE ."
+else
+ cmd="docker build --no-cache -t $DOCKER_REPO_NAME:$DOCKER_TAG --build-arg BRANCH=$BRANCH
+ -f $DOCKERFILE ."
+fi
echo ${cmd}
${cmd}
danube: &danube
stream: danube
branch: 'stable/{stream}'
- disabled: true
+ disabled: false
functest-receivers: &functest-receivers
receivers: >
jose.lausuch@ericsson.com morgan.richomme@orange.com
comment-contains-value: 'reverify'
projects:
- project-compare-type: 'REG_EXP'
- project-pattern: 'compass4nfv'
+ project-pattern: ''
branches:
- branch-compare-type: 'ANT'
branch-pattern: '**/{branch}'
else
echo "Uploading mongodump to artifact $artifact_dir"
/usr/local/bin/gsutil cp -r "$workspace"/"$file_name" gs://artifacts.opnfv.org/"$artifact_dir"/
- echo "MongoDump can be found at http://artifacts.opnfv.org/$artifact_dir"
+ echo "MongoDump can be found at http://artifacts.opnfv.org/$artifact_dir.html"
fi
--- /dev/null
+###################################################
+# All the jobs except verify have been removed!
+# They will only be enabled on request by projects!
+###################################################
+- project:
+ name: snaps
+
+ project: '{name}'
+
+ jobs:
+ - 'snaps-verify-{stream}'
+
+ stream:
+ - master:
+ branch: '{stream}'
+ gs-pathname: ''
+ disabled: false
+ - danube:
+ branch: 'stable/{stream}'
+ gs-pathname: '/{stream}'
+ disabled: false
+
+- job-template:
+ name: 'snaps-verify-{stream}'
+
+ disabled: '{obj:disabled}'
+
+ parameters:
+ - project-parameter:
+ project: '{project}'
+ branch: '{branch}'
+ - 'opnfv-build-ubuntu-defaults'
+
+ scm:
+ - git-scm-gerrit
+
+ triggers:
+ - gerrit:
+ server-name: 'gerrit.opnfv.org'
+ trigger-on:
+ - patchset-created-event:
+ exclude-drafts: 'false'
+ exclude-trivial-rebase: 'false'
+ exclude-no-code-change: 'false'
+ - draft-published-event
+ - comment-added-contains-event:
+ comment-contains-value: 'recheck'
+ - comment-added-contains-event:
+ comment-contains-value: 'reverify'
+ projects:
+ - project-compare-type: 'ANT'
+ project-pattern: '{project}'
+ branches:
+ - branch-compare-type: 'ANT'
+ branch-pattern: '**/{branch}'
+ disable-strict-forbidden-file-verification: 'true'
+ forbidden-file-paths:
+ - compare-type: ANT
+ pattern: 'docs/**|.gitignore'
+
+ builders:
+ - shell: |
+ echo "Nothing to verify!"
- danube:
branch: 'stable/{stream}'
gs-pathname: '/{stream}'
- disabled: true
+ disabled: false
- job-template:
name: 'storperf-verify-{stream}'
--- /dev/null
+###################################################
+# All the jobs except verify have been removed!
+# They will only be enabled on request by projects!
+###################################################
+- project:
+ name: ves
+
+ project: '{name}'
+
+ jobs:
+ - 'ves-verify-{stream}'
+
+ stream:
+ - master:
+ branch: '{stream}'
+ gs-pathname: ''
+ disabled: false
+ - danube:
+ branch: 'stable/{stream}'
+ gs-pathname: '/{stream}'
+ disabled: false
+
+- job-template:
+ name: 'ves-verify-{stream}'
+
+ disabled: '{obj:disabled}'
+
+ parameters:
+ - project-parameter:
+ project: '{project}'
+ branch: '{branch}'
+ - 'opnfv-build-ubuntu-defaults'
+
+ scm:
+ - git-scm-gerrit
+
+ triggers:
+ - gerrit:
+ server-name: 'gerrit.opnfv.org'
+ trigger-on:
+ - patchset-created-event:
+ exclude-drafts: 'false'
+ exclude-trivial-rebase: 'false'
+ exclude-no-code-change: 'false'
+ - draft-published-event
+ - comment-added-contains-event:
+ comment-contains-value: 'recheck'
+ - comment-added-contains-event:
+ comment-contains-value: 'reverify'
+ projects:
+ - project-compare-type: 'ANT'
+ project-pattern: '{project}'
+ branches:
+ - branch-compare-type: 'ANT'
+ branch-pattern: '**/{branch}'
+ disable-strict-forbidden-file-verification: 'true'
+ forbidden-file-paths:
+ - compare-type: ANT
+ pattern: 'docs/**|.gitignore'
+
+ builders:
+ - shell: |
+ #!/bin/bash
+ set -o errexit
+ set -o nounset
+ set -o pipefail
+
+ # shellcheck -f tty tests/*.sh
+ # shellcheck -f tty utils/*.sh
- danube:
branch: 'stable/{stream}'
gs-pathname: '/{stream}'
- disabled: true
- slave-label: 'intel-pod12'
+ disabled: false
+ slave-label: 'opnfv-build-ubuntu'
- job-template:
branches:
- branch-compare-type: 'ANT'
branch-pattern: '**/{branch}'
+ disable-strict-forbidden-file-verification: 'true'
forbidden-file-paths:
- compare-type: ANT
pattern: 'docs/**'
branches:
- branch-compare-type: 'ANT'
branch-pattern: '**/{branch}'
+ disable-strict-forbidden-file-verification: 'true'
forbidden-file-paths:
- compare-type: ANT
pattern: 'docs/**'
publishers:
- email:
- recipients: jean.gaoliang@huawei.com matthew.lijun@huawei.com
+ recipients: jean.gaoliang@huawei.com limingjiang@huawei.com
########################
# builder macros
return None
for line in lines:
- if 'controller' in line:
- roles = "controller"
- elif 'compute' in line:
- roles = "compute"
- else:
+ roles = []
+ if any(x in line for x in ['-----', 'Networks']):
continue
- if 'Daylight' in line:
- roles += ", OpenDaylight"
+ if 'controller' in line:
+ roles.append(manager.Role.CONTROLLER)
+ if 'compute' in line:
+ roles.append(manager.Role.COMPUTE)
+ if 'opendaylight' in line.lower():
+ roles.append(manager.Role.ODL)
+
fields = line.split('|')
id = re.sub('[!| ]', '', fields[1]).encode()
name = re.sub('[!| ]', '', fields[2]).encode()
- status_node = re.sub('[!| ]', '', fields[3]).encode()
+ status_node = re.sub('[!| ]', '', fields[3]).encode().lower()
ip = re.sub('[!| ctlplane=]', '', fields[4]).encode()
- if status_node.lower() == 'active':
- status = manager.Node.STATUS_OK
+ ssh_client = None
+ if 'active' in status_node:
+ status = manager.NodeStatus.STATUS_OK
ssh_client = ssh_utils.get_ssh_client(hostname=ip,
username='heat-admin',
pkey_file=self.pkey_file)
+ elif 'error' in status_node:
+ status = manager.NodeStatus.STATUS_ERROR
+ elif 'off' in status_node:
+ status = manager.NodeStatus.STATUS_OFFLINE
else:
- status = manager.Node.STATUS_INACTIVE
- ssh_client = None
+ status = manager.NodeStatus.STATUS_INACTIVE
node = manager.Node(id, ip, name, status, roles, ssh_client)
nodes.append(node)
"grep Description|sed 's/^.*\: //'")
cmd_ver = ("sudo yum info opendaylight 2>/dev/null|"
"grep Version|sed 's/^.*\: //'")
+ description = None
for node in self.nodes:
- if 'controller' in node.get_attribute('roles'):
+ if node.is_controller():
description = node.run_cmd(cmd_descr)
version = node.run_cmd(cmd_ver)
break
installer_user=installer_user,
installer_pwd=installer_pwd)
else:
- raise Exception("Installer adapter is not implemented.")
+ raise Exception("Installer adapter is not implemented for "
+ "the given installer.")
if options and options['cluster'] and len(self.nodes) > 0:
n = []
for node in self.nodes:
- if node.info['cluster'] == options['cluster']:
+ if str(node.info['cluster']) == str(options['cluster']):
n.append(node)
return n
index_ip = i
elif "mac" in fields[i]:
index_mac = i
- elif "roles " in fields[i]:
+ elif "roles " in fields[i] and "pending_roles" not in fields[i]:
index_roles = i
elif "online" in fields[i]:
index_online = i
fields = lines[i].rsplit(' | ')
id = fields[index_id].strip().encode()
ip = fields[index_ip].strip().encode()
- status_node = fields[index_status].strip().encode()
+ status_node = fields[index_status].strip().encode().lower()
name = fields[index_name].strip().encode()
- roles = fields[index_roles].strip().encode()
+ roles_all = fields[index_roles].strip().encode().lower()
+
+ roles = [x for x in [manager.Role.CONTROLLER,
+ manager.Role.COMPUTE,
+ manager.Role.ODL] if x in roles_all]
dict = {"cluster": fields[index_cluster].strip().encode(),
"mac": fields[index_mac].strip().encode(),
"status_node": status_node,
"online": fields[index_online].strip().encode()}
+ ssh_client = None
if status_node == 'ready':
- status = manager.Node.STATUS_OK
+ status = manager.NodeStatus.STATUS_OK
proxy = {'ip': self.installer_ip,
'username': self.installer_user,
'password': self.installer_pwd}
ssh_client = ssh_utils.get_ssh_client(hostname=ip,
username='root',
proxy=proxy)
+ elif 'error' in status_node:
+ status = manager.NodeStatus.STATUS_ERROR
+ elif 'off' in status_node:
+ status = manager.NodeStatus.STATUS_OFFLINE
+ elif 'discover' in status_node:
+ status = manager.NodeStatus.STATUS_UNUSED
else:
- status = manager.Node.STATUS_INACTIVE
- ssh_client = None
+ status = manager.NodeStatus.STATUS_INACTIVE
node = manager.Node(
id, ip, name, status, roles, ssh_client, dict)
cmd = 'source openrc;nova-manage version 2>/dev/null'
version = None
for node in self.nodes:
- if 'controller' in node.get_attribute('roles'):
+ if node.is_controller() and node.is_active():
version = node.run_cmd(cmd)
break
return version
def get_sdn_version(self):
- cmd = "apt-cache show opendaylight|grep Version|sed 's/^.*\: //'"
+ cmd = "apt-cache policy opendaylight|grep Installed"
version = None
for node in self.nodes:
- if 'controller' in node.get_attribute('roles'):
+ if manager.Role.ODL in node.roles and node.is_active():
odl_version = node.run_cmd(cmd)
if odl_version:
- version = 'OpenDaylight ' + odl_version
- break
+ version = 'OpenDaylight ' + odl_version.split(' ')[-1]
+ break
return version
def get_deployment_status(self):
- cmd = 'fuel env|grep operational'
+ cmd = "fuel env|tail -1|awk '{print $3}'"
result = self.installer_node.run_cmd(cmd)
if result is None or len(result) == 0:
- return 'failed'
+ return 'unknown'
+ elif 'operational' in result:
+ return 'active'
+ elif 'deploy' in result:
+ return 'deploying'
else:
return 'active'
status,
openstack_version,
sdn_controller,
- nodes=[]):
+ nodes=None):
self.deployment_info = {
'installer': installer,
sdn_controller=self.deployment_info['sdn_controller'])
for node in self.deployment_info['nodes']:
- s += '\t\t{node_object}\n'.format(node_object=node)
+ s += '{node_object}\n'.format(node_object=node)
return s
-class Node(object):
+class Role():
+ INSTALLER = 'installer'
+ CONTROLLER = 'controller'
+ COMPUTE = 'compute'
+ ODL = 'opendaylight'
+ ONOS = 'onos'
+
+class NodeStatus():
STATUS_OK = 'active'
STATUS_INACTIVE = 'inactive'
STATUS_OFFLINE = 'offline'
- STATUS_FAILED = 'failed'
+ STATUS_ERROR = 'error'
+ STATUS_UNUSED = 'unused'
+
+
+class Node(object):
def __init__(self,
id,
ip,
name,
status,
- roles,
- ssh_client,
- info={}):
+ roles=None,
+ ssh_client=None,
+ info=None):
self.id = id
self.ip = ip
self.name = name
self.roles = roles
self.info = info
+ self.cpu_info = 'unknown'
+ self.memory = 'unknown'
+ self.ovs = 'unknown'
+
+ if ssh_client and Role.INSTALLER not in self.roles:
+ sys_info = self.get_system_info()
+ self.cpu_info = sys_info['cpu_info']
+ self.memory = sys_info['memory']
+ self.ovs = self.get_ovs_info()
+
def get_file(self, src, dest):
'''
SCP file from a node
'''
- if self.status is not Node.STATUS_OK:
+ if self.status is not NodeStatus.STATUS_OK:
logger.info("The node %s is not active" % self.ip)
return 1
logger.info("Fetching %s from %s" % (src, self.ip))
'''
SCP file to a node
'''
- if self.status is not Node.STATUS_OK:
+ if self.status is not NodeStatus.STATUS_OK:
logger.info("The node %s is not active" % self.ip)
return 1
logger.info("Copying %s to %s" % (src, self.ip))
'''
Run command remotely on a node
'''
- if self.status is not Node.STATUS_OK:
- logger.info("The node %s is not active" % self.ip)
- return 1
+ if self.status is not NodeStatus.STATUS_OK:
+ logger.error(
+ "Error running command %s. The node %s is not active"
+ % (cmd, self.ip))
+ return None
_, stdout, stderr = (self.ssh_client.exec_command(cmd))
error = stderr.readlines()
if len(error) > 0:
logger.error("error %s" % ''.join(error))
- return error
+ return None
output = ''.join(stdout.readlines()).rstrip()
return output
'name': self.name,
'status': self.status,
'roles': self.roles,
+ 'cpu_info': self.cpu_info,
+ 'memory': self.memory,
+ 'ovs': self.ovs,
'info': self.info
}
- def get_attribute(self, attribute):
+ def is_active(self):
'''
- Returns an attribute given the name
+ Returns if the node is active
'''
- return self.get_dict()[attribute]
+ if self.status == NodeStatus.STATUS_OK:
+ return True
+ return False
def is_controller(self):
'''
Returns if the node is a controller
'''
- if 'controller' in self.get_attribute('roles'):
- return True
- return False
+ return Role.CONTROLLER in self.roles
def is_compute(self):
'''
Returns if the node is a compute
'''
- if 'compute' in self.get_attribute('roles'):
- return True
- return False
+ return Role.COMPUTE in self.roles
+
+ def is_odl(self):
+ '''
+ Returns if the node is an opendaylight
+ '''
+ return Role.ODL in self.roles
+
+ def get_ovs_info(self):
+ '''
+ Returns the ovs version installed
+ '''
+ if self.is_active():
+ cmd = "ovs-vsctl --version|head -1| sed 's/^.*) //'"
+ return self.run_cmd(cmd)
+ return None
+
+ def get_system_info(self):
+ '''
+ Returns the ovs version installed
+ '''
+ cmd = 'grep MemTotal /proc/meminfo'
+ memory = self.run_cmd(cmd).partition('MemTotal:')[-1].strip().encode()
+
+ cpu_info = {}
+ cmd = 'lscpu'
+ result = self.run_cmd(cmd)
+ for line in result.splitlines():
+ if line.startswith('CPU(s)'):
+ cpu_info['num_cpus'] = line.split(' ')[-1].encode()
+ elif line.startswith('Thread(s) per core'):
+ cpu_info['threads/core'] = line.split(' ')[-1].encode()
+ elif line.startswith('Core(s) per socket'):
+ cpu_info['cores/socket'] = line.split(' ')[-1].encode()
+ elif line.startswith('Model name'):
+ cpu_info['model'] = line.partition(
+ 'Model name:')[-1].strip().encode()
+ elif line.startswith('Architecture'):
+ cpu_info['arch'] = line.split(' ')[-1].encode()
+
+ return {'memory': memory, 'cpu_info': cpu_info}
def __str__(self):
- return str(self.get_dict())
+ return '''
+ name: {name}
+ id: {id}
+ ip: {ip}
+ status: {status}
+ roles: {roles}
+ cpu: {cpu_info}
+ memory: {memory}
+ ovs: {ovs}
+ info: {info}'''.format(name=self.name,
+ id=self.id,
+ ip=self.ip,
+ status=self.status,
+ roles=self.roles,
+ cpu_info=self.cpu_info,
+ memory=self.memory,
+ ovs=self.ovs,
+ info=self.info)
class DeploymentHandler(object):
self.installer_node = Node(id='',
ip=installer_ip,
name=installer,
- status='active',
+ status=NodeStatus.STATUS_OK,
ssh_client=self.installer_connection,
- roles='installer node')
+ roles=Role.INSTALLER)
else:
raise Exception(
'Cannot establish connection to the installer node!')
'''
return self.installer_node
+ def get_arch(self):
+ '''
+ Returns the architecture of the first compute node found
+ '''
+ arch = None
+ for node in self.nodes:
+ if node.is_compute():
+ arch = node.cpu_info.get('arch', None)
+ if arch:
+ break
+ return arch
+
def get_deployment_info(self):
'''
Returns an object of type Deployment
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
-import opnfv.utils.OPNFVLogger as OPNFVLogger
+import opnfv.utils.opnfv_logger as OPNFVLogger
import os
import time
import shutil
if timestamp is None:
timestamp = time.strftime("%Y%m%d-%H%M%S")
- for controller_client in controller_clients:
- self.ofctl_dump_flows(controller_client,
- timestamp=timestamp)
- self.vsctl_show(controller_client,
- timestamp=timestamp)
-
- for compute_client in compute_clients:
- self.ofctl_dump_flows(compute_client,
- timestamp=timestamp)
- self.vsctl_show(compute_client,
- timestamp=timestamp)
+ clients = controller_clients + compute_clients
+ for client in clients:
+ self.ofctl_dump_flows(client, timestamp=timestamp)
+ self.vsctl_show(client, timestamp=timestamp)
if related_error is not None:
dumpdir = os.path.join(self.ovs_dir, timestamp)
+ self.__mkdir_p(dumpdir)
with open(os.path.join(dumpdir, 'error'), 'w') as f:
f.write(related_error)
dib_os_element: "{{ lookup('env','DIB_OS_ELEMENT') }}"
dib_os_release: "{{ lookup('env', 'DIB_OS_RELEASE') }}"
extra_dib_elements: "{{ lookup('env', 'EXTRA_DIB_ELEMENTS') | default('') }}"
- dib_elements: "vm serial-console simple-init devuser infra-cloud-bridge puppet growroot {{ extra_dib_elements }}"
+ dib_elements: "vm enable-serial-console simple-init devuser infra-cloud-bridge puppet growroot {{ extra_dib_elements }}"
dib_packages: "{{ lookup('env', 'DIB_OS_PACKAGES') }}"
when: create_image_via_dib | bool == true and transform_boot_image | bool == false
environment:
USE_DHCP="false"
USE_VENV="false"
BUILD_IMAGE=true
+BAREMETAL_DATA_FILE=${BAREMETAL_DATA_FILE:-'/tmp/baremetal.json'}
PROVISION_WAIT_TIMEOUT=${PROVISION_WAIT_TIMEOUT:-3600}
# Set defaults for ansible command-line options to drive the different
WRITE_INTERFACES_FILE=true
# Set BIFROST_INVENTORY_SOURCE
-export BIFROST_INVENTORY_SOURCE=/tmp/baremetal.csv
+export BIFROST_INVENTORY_SOURCE=/tmp/baremetal.json
# DIB custom elements path
-export ELEMENTS_PATH=/usr/share/diskimage-builder/elements:/opt/puppet-infracloud/files/elements
+export ELEMENTS_PATH=/opt/puppet-infracloud/files/elements
# settings for console access
export DIB_DEV_USER_PWDLESS_SUDO=yes
ANSIBLE=$(which ansible-playbook)
set -x -o nounset
+logs_on_exit() {
+ $SCRIPT_HOME/collect-test-info.sh
+}
+trap logs_on_exit EXIT
+
# Change working directory
cd $BIFROST_HOME/playbooks
-e test_vm_num_nodes=${TEST_VM_NUM_NODES} \
-e test_vm_memory_size=${VM_MEMORY_SIZE} \
-e enable_venv=${ENABLE_VENV} \
- -e test_vm_domain_type=${VM_DOMAIN_TYPE}
+ -e test_vm_domain_type=${VM_DOMAIN_TYPE} \
+ -e baremetal_json_file=${BAREMETAL_DATA_FILE}
# Execute the installation and VM startup test.
${ANSIBLE} -vvvv \
echo "****************************"
fi
-$SCRIPT_HOME/collect-test-info.sh
-
exit $EXITCODE
--- /dev/null
+===============================
+How to deploy OpenStack-Ansible
+===============================
+The script and playbooks defined on this repo will deploy an OpenStack
+cloud based on OpenStack-Ansible.
+It needs to be combined with Bifrost. You need use Bifrost to provide six VMs.
+To learn about how to use Bifrost, you can read the document on
+[/opt/releng/prototypes/bifrost/README.md].
+
+Minimal requirements:
+1. You will need to have a least 150G free space for the partition on where
+ "/var/lib/libvirt/images/" lives.
+2. each vm needs to have at least 8 vCPU, 12 GB RAM, 60 GB HDD.
+
+After provisioning the six VMs please follow that steps:
+
+1.Run the script to deploy OpenStack
+ cd /opt/releng/prototypes/openstack-ansible/scripts/
+ sudo ./osa_deploy.sh
+It will take a lot of time. When the deploy is successful, you will see the
+message "OpenStack deployed successfully".
+
+2.To verify the OpenStack operation
+ 2.1 ssh into the controller::
+ ssh 192.168.122.3
+ 2.2 Enter into the lxc container::
+ lxcname=$(lxc-ls | grep utility)
+ lxc-attach -n $lxcname
+ 2.3 Verify the OpenStack API::
+ source /root/openrc
+ openstack user list
+
+This will show the following output::
++----------------------------------+--------------------+
+| ID | Name |
++----------------------------------+--------------------+
+| 056f8fe41336435991fd80872731cada | aodh |
+| 308f6436e68f40b49d3b8e7ce5c5be1e | glance |
+| 351b71b43a66412d83f9b3cd75485875 | nova |
+| 511129e053394aea825cce13b9f28504 | ceilometer |
+| 5596f71319d44c8991fdc65f3927b62e | gnocchi |
+| 586f49e3398a4c47a2f6fe50135d4941 | stack_domain_admin |
+| 601b329e6b1d427f9a1e05ed28753497 | heat |
+| 67fe383b94964a4781345fbcc30ae434 | cinder |
+| 729bb08351264d729506dad84ed3ccf0 | admin |
+| 9f2beb2b270940048fe6844f0b16281e | neutron |
+| fa68f86dd1de4ddbbb7415b4d9a54121 | keystone |
++----------------------------------+--------------------+
--- /dev/null
+---
+# This file contains an example to show how to set
+# the cinder-volume service to run in a container.
+#
+# Important note:
+# When using LVM or any iSCSI-based cinder backends, such as NetApp with
+# iSCSI protocol, the cinder-volume service *must* run on metal.
+# Reference: https://bugs.launchpad.net/ubuntu/+source/lxc/+bug/1226855
+
+container_skel:
+ cinder_volumes_container:
+ properties:
+ is_metal: false
--- /dev/null
+# /etc/exports: the access control list for filesystems which may be exported
+# to NFS clients. See exports(5).
+#
+# Example for NFSv2 and NFSv3:
+# /srv/homes hostname1(rw,sync,no_subtree_check) hostname2(ro,sync,no_subtree_check)
+#
+# Example for NFSv4:
+# /srv/nfs4 gss/krb5i(rw,sync,fsid=0,crossmnt,no_subtree_check)
+# /srv/nfs4/homes gss/krb5i(rw,sync,no_subtree_check)
+#
+/images *(rw,sync,no_subtree_check,no_root_squash)
+
--- /dev/null
+# /etc/modules: kernel modules to load at boot time.
+#
+# This file contains the names of kernel modules that should be loaded
+# at boot time, one per line. Lines beginning with "#" are ignored.
+# Parameters can be specified after the module name.
+
+bonding
+8021q
--- /dev/null
+---
+cidr_networks:
+ container: 172.29.236.0/22
+ tunnel: 172.29.240.0/22
+ storage: 172.29.244.0/22
+
+used_ips:
+ - "172.29.236.1,172.29.236.50"
+ - "172.29.240.1,172.29.240.50"
+ - "172.29.244.1,172.29.244.50"
+ - "172.29.248.1,172.29.248.50"
+
+global_overrides:
+ internal_lb_vip_address: 172.29.236.222
+ external_lb_vip_address: 192.168.122.220
+ tunnel_bridge: "br-vxlan"
+ management_bridge: "br-mgmt"
+ provider_networks:
+ - network:
+ container_bridge: "br-mgmt"
+ container_type: "veth"
+ container_interface: "eth1"
+ ip_from_q: "container"
+ type: "raw"
+ group_binds:
+ - all_containers
+ - hosts
+ is_container_address: true
+ is_ssh_address: true
+ - network:
+ container_bridge: "br-vxlan"
+ container_type: "veth"
+ container_interface: "eth10"
+ ip_from_q: "tunnel"
+ type: "vxlan"
+ range: "1:1000"
+ net_name: "vxlan"
+ group_binds:
+ - neutron_linuxbridge_agent
+ - network:
+ container_bridge: "br-vlan"
+ container_type: "veth"
+ container_interface: "eth12"
+ host_bind_override: "eth12"
+ type: "flat"
+ net_name: "flat"
+ group_binds:
+ - neutron_linuxbridge_agent
+ - network:
+ container_bridge: "br-vlan"
+ container_type: "veth"
+ container_interface: "eth11"
+ type: "vlan"
+ range: "1:1"
+ net_name: "vlan"
+ group_binds:
+ - neutron_linuxbridge_agent
+ - network:
+ container_bridge: "br-storage"
+ container_type: "veth"
+ container_interface: "eth2"
+ ip_from_q: "storage"
+ type: "raw"
+ group_binds:
+ - glance_api
+ - cinder_api
+ - cinder_volume
+ - nova_compute
+
+###
+### Infrastructure
+###
+
+# galera, memcache, rabbitmq, utility
+shared-infra_hosts:
+ controller00:
+ ip: 172.29.236.11
+ controller01:
+ ip: 172.29.236.12
+ controller02:
+ ip: 172.29.236.13
+
+# repository (apt cache, python packages, etc)
+repo-infra_hosts:
+ controller00:
+ ip: 172.29.236.11
+ controller01:
+ ip: 172.29.236.12
+ controller02:
+ ip: 172.29.236.13
+
+# load balancer
+# Ideally the load balancer should not use the Infrastructure hosts.
+# Dedicated hardware is best for improved performance and security.
+haproxy_hosts:
+ controller00:
+ ip: 172.29.236.11
+ controller01:
+ ip: 172.29.236.12
+ controller02:
+ ip: 172.29.236.13
+
+# rsyslog server
+#log_hosts:
+ # log1:
+ # ip: 172.29.236.14
+
+###
+### OpenStack
+###
+
+# keystone
+identity_hosts:
+ controller00:
+ ip: 172.29.236.11
+ controller01:
+ ip: 172.29.236.12
+ controller02:
+ ip: 172.29.236.13
+
+# cinder api services
+storage-infra_hosts:
+ controller00:
+ ip: 172.29.236.11
+ controller01:
+ ip: 172.29.236.12
+ controller02:
+ ip: 172.29.236.13
+
+# glance
+# The settings here are repeated for each infra host.
+# They could instead be applied as global settings in
+# user_variables, but are left here to illustrate that
+# each container could have different storage targets.
+image_hosts:
+ controller00:
+ ip: 172.29.236.11
+ container_vars:
+ limit_container_types: glance
+ glance_nfs_client:
+ - server: "172.29.244.15"
+ remote_path: "/images"
+ local_path: "/var/lib/glance/images"
+ type: "nfs"
+ options: "_netdev,auto"
+ controller01:
+ ip: 172.29.236.12
+ container_vars:
+ limit_container_types: glance
+ glance_nfs_client:
+ - server: "172.29.244.15"
+ remote_path: "/images"
+ local_path: "/var/lib/glance/images"
+ type: "nfs"
+ options: "_netdev,auto"
+ controller02:
+ ip: 172.29.236.13
+ container_vars:
+ limit_container_types: glance
+ glance_nfs_client:
+ - server: "172.29.244.15"
+ remote_path: "/images"
+ local_path: "/var/lib/glance/images"
+ type: "nfs"
+ options: "_netdev,auto"
+
+# nova api, conductor, etc services
+compute-infra_hosts:
+ controller00:
+ ip: 172.29.236.11
+ controller01:
+ ip: 172.29.236.12
+ controller02:
+ ip: 172.29.236.13
+
+# heat
+orchestration_hosts:
+ controller00:
+ ip: 172.29.236.11
+ controller01:
+ ip: 172.29.236.12
+ controller02:
+ ip: 172.29.236.13
+
+# horizon
+dashboard_hosts:
+ controller00:
+ ip: 172.29.236.11
+ controller01:
+ ip: 172.29.236.12
+ controller02:
+ ip: 172.29.236.13
+
+# neutron server, agents (L3, etc)
+network_hosts:
+ controller00:
+ ip: 172.29.236.11
+ controller01:
+ ip: 172.29.236.12
+ controller02:
+ ip: 172.29.236.13
+
+# ceilometer (telemetry API)
+metering-infra_hosts:
+ controller00:
+ ip: 172.29.236.11
+ controller01:
+ ip: 172.29.236.12
+ controller02:
+ ip: 172.29.236.13
+
+# aodh (telemetry alarm service)
+metering-alarm_hosts:
+ controller00:
+ ip: 172.29.236.11
+ controller01:
+ ip: 172.29.236.12
+ controller02:
+ ip: 172.29.236.13
+
+# gnocchi (telemetry metrics storage)
+metrics_hosts:
+ controller00:
+ ip: 172.29.236.11
+ controller01:
+ ip: 172.29.236.12
+ controller02:
+ ip: 172.29.236.13
+
+# nova hypervisors
+compute_hosts:
+ compute00:
+ ip: 172.29.236.14
+ compute01:
+ ip: 172.29.236.15
+
+# ceilometer compute agent (telemetry)
+metering-compute_hosts:
+ compute00:
+ ip: 172.29.236.14
+ compute01:
+ ip: 172.29.236.15
+# cinder volume hosts (NFS-backed)
+# The settings here are repeated for each infra host.
+# They could instead be applied as global settings in
+# user_variables, but are left here to illustrate that
+# each container could have different storage targets.
+storage_hosts:
+ controller00:
+ ip: 172.29.236.11
+ container_vars:
+ cinder_backends:
+ limit_container_types: cinder_volume
+ lvm:
+ volume_group: cinder-volumes
+ volume_driver: cinder.volume.drivers.lvm.LVMVolumeDriver
+ volume_backend_name: LVM_iSCSI
+ iscsi_ip_address: "172.29.244.11"
+ controller01:
+ ip: 172.29.236.12
+ container_vars:
+ cinder_backends:
+ limit_container_types: cinder_volume
+ lvm:
+ volume_group: cinder-volumes
+ volume_driver: cinder.volume.drivers.lvm.LVMVolumeDriver
+ volume_backend_name: LVM_iSCSI
+ iscsi_ip_address: "172.29.244.12"
+ controller02:
+ ip: 172.29.236.13
+ container_vars:
+ cinder_backends:
+ limit_container_types: cinder_volume
+ lvm:
+ volume_group: cinder-volumes
+ volume_driver: cinder.volume.drivers.lvm.LVMVolumeDriver
+ volume_backend_name: LVM_iSCSI
+ iscsi_ip_address: "172.29.244.13"
--- /dev/null
+---
+# Copyright 2014, Rackspace US, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+###
+### This file contains commonly used overrides for convenience. Please inspect
+### the defaults for each role to find additional override options.
+###
+
+## Debug and Verbose options.
+debug: false
+
+haproxy_keepalived_external_vip_cidr: "192.168.122.220/32"
+haproxy_keepalived_internal_vip_cidr: "172.29.236.222/32"
+haproxy_keepalived_external_interface: br-vlan
+haproxy_keepalived_internal_interface: br-mgmt
--- /dev/null
+[jumphost]
+jumphost ansible_ssh_host=192.168.122.2
+
+[controller]
+controller00 ansible_ssh_host=192.168.122.3
+controller01 ansible_ssh_host=192.168.122.4
+controller02 ansible_ssh_host=192.168.122.5
+
+[compute]
+compute00 ansible_ssh_host=192.168.122.6
+compute01 ansible_ssh_host=192.168.122.7
--- /dev/null
+---
+- hosts: jumphost
+ remote_user: root
+ vars_files:
+ - ../var/ubuntu.yml
+ tasks:
+ - name: generate SSH keys
+ shell: ssh-keygen -b 2048 -t rsa -f /root/.ssh/id_rsa -q -N ""
+ args:
+ creates: /root/.ssh/id_rsa
+ - name: fetch public key
+ fetch: src="/root/.ssh/id_rsa.pub" dest="/"
+ - name: remove the directory
+ shell: "rm -rf {{OSA_PATH}} {{OSA_ETC_PATH}}"
+ - name: git openstack ansible
+ shell: "git clone {{OSA_URL}} {{OSA_PATH}} -b {{OSA_BRANCH}}"
+ - name: copy /opt/openstack-ansible/etc/openstack_deploy to /etc/openstack_deploy
+ shell: "/bin/cp -rf {{OSA_PATH}}/etc/openstack_deploy {{OSA_ETC_PATH}}"
+ - name: bootstrap
+ command: "/bin/bash ./scripts/bootstrap-ansible.sh"
+ args:
+ chdir: "{{OSA_PATH}}"
+ - name: generate password token
+ command: "python pw-token-gen.py --file /etc/openstack_deploy/user_secrets.yml"
+ args:
+ chdir: /opt/openstack-ansible/scripts/
+ - name: copy openstack_user_config.yml to /etc/openstack_deploy
+ copy:
+ src: ../file/openstack_user_config.yml
+ dest: "{{OSA_ETC_PATH}}/openstack_user_config.yml"
+ - name: copy cinder.yml to /etc/openstack_deploy/env.d
+ copy:
+ src: ../file/cinder.yml
+ dest: "{{OSA_ETC_PATH}}/env.d/cinder.yml"
+ - name: copy user_variables.yml to /etc/openstack_deploy/
+ copy:
+ src: ../file/user_variables.yml
+ dest: "{{OSA_ETC_PATH}}/user_variables.yml"
+ - name: configure network
+ template:
+ src: ../template/bifrost/controller.interface.j2
+ dest: /etc/network/interfaces
+ notify:
+ - restart network service
+ handlers:
+ - name: restart network service
+ shell: "/sbin/ifconfig ens3 0 &&/sbin/ifdown -a && /sbin/ifup -a"
+
+- hosts: localhost
+ remote_user: root
+ tasks:
+ - name: Generate authorized_keys
+ shell: "/bin/cat /jumphost/root/.ssh/id_rsa.pub >> ../file/authorized_keys"
--- /dev/null
+---
+- hosts: all
+ remote_user: root
+ vars_files:
+ - ../var/ubuntu.yml
+ tasks:
+ - name: add public key to host
+ copy:
+ src: ../file/authorized_keys
+ dest: /root/.ssh/authorized_keys
+ - name: configure modules
+ copy:
+ src: ../file/modules
+ dest: /etc/modules
+
+- hosts: controller
+ remote_user: root
+ vars_files:
+ - ../var/ubuntu.yml
+ tasks:
+ - name: configure network
+ template:
+ src: ../template/bifrost/controller.interface.j2
+ dest: /etc/network/interfaces
+ notify:
+ - restart network service
+ handlers:
+ - name: restart network service
+ shell: "/sbin/ifconfig ens3 0 &&/sbin/ifdown -a && /sbin/ifup -a"
+
+- hosts: compute
+ remote_user: root
+ vars_files:
+ - ../var/ubuntu.yml
+ tasks:
+ - name: configure network
+ template:
+ src: ../template/bifrost/compute.interface.j2
+ dest: /etc/network/interfaces
+ notify:
+ - restart network service
+ handlers:
+ - name: restart network service
+ shell: "/sbin/ifconfig ens3 0 &&/sbin/ifdown -a && /sbin/ifup -a"
+
+- hosts: compute01
+ remote_user: root
+ tasks:
+ - name: make nfs dir
+ file: "dest=/images mode=777 state=directory"
+ - name: configure sdrvice
+ shell: "echo 'nfs 2049/tcp' >> /etc/services && echo 'nfs 2049/udp' >> /etc/services"
+ - name: configure NFS
+ copy:
+ src: ../file/exports
+ dest: /etc/exports
+ notify:
+ - restart nfs service
+ handlers:
+ - name: restart nfs service
+ service: name=nfs-kernel-server state=restarted
--- /dev/null
+#!/bin/bash
+
+export OSA_PATH=/opt/openstack-ansible
+export LOG_PATH=$OSA_PATH/log
+export PLAYBOOK_PATH=$OSA_PATH/playbooks
+export OSA_BRANCH=${OSA_BRANCH:-"master"}
+
+JUMPHOST_IP="192.168.122.2"
+
+sudo /bin/rm -rf $LOG_PATH
+sudo /bin/mkdir -p $LOG_PATH
+sudo /bin/cp /root/.ssh/id_rsa.pub ../file/authorized_keys
+sudo echo -e '\n'>>../file/authorized_keys
+
+cd ../playbooks/
+# this will prepare the jump host
+# git clone the Openstack-Ansible, bootstrap and configure network
+sudo ansible-playbook -i inventory jumphost_configuration.yml -vvv
+
+# this will prepare the target host
+# such as configure network and NFS
+sudo ansible-playbook -i inventory targethost_configuration.yml
+
+# using OpenStack-Ansible deploy the OpenStack
+
+echo "set UP Host !"
+sudo /bin/sh -c "ssh root@$JUMPHOST_IP openstack-ansible \
+ $PLAYBOOK_PATH/setup-hosts.yml" | \
+ tee $LOG_PATH/setup-host.log
+
+#check the result of openstack-ansible setup-hosts.yml
+#if failed, exit with exit code 1
+grep "failed=1" $LOG_PATH/setup-host.log>/dev/null \
+ || grep "unreachable=1" $LOG_PATH/setup-host.log>/dev/null
+if [ $? -eq 0 ]; then
+ echo "failed setup host!"
+ exit 1
+else
+ echo "setup host successfully!"
+fi
+
+echo "Set UP Infrastructure !"
+sudo /bin/sh -c "ssh root@$JUMPHOST_IP openstack-ansible \
+ $PLAYBOOK_PATH/setup-infrastructure.yml" | \
+ tee $LOG_PATH/setup-infrastructure.log
+
+grep "failed=1" $LOG_PATH/setup-infrastructure.log>/dev/null \
+ || grep "unreachable=1" $LOG_PATH/setup-infrastructure.log>/dev/null
+if [ $? -eq 0 ]; then
+ echo "failed setup infrastructure!"
+ exit 1
+else
+ echo "setup infrastructure successfully!"
+fi
+
+sudo /bin/sh -c "ssh root@$JUMPHOST_IP ansible -i $PLAYBOOK_PATH/inventory/ \
+ galera_container -m shell \
+ -a "mysql -h localhost -e 'show status like \"%wsrep_cluster_%\";'"" \
+ | tee $LOG_PATH/galera.log
+
+grep "FAILED" $LOG_PATH/galera.log>/dev/null
+if [ $? -eq 0 ]; then
+ echo "failed verify the database cluster!"
+ exit 1
+else
+ echo "verify the database cluster successfully!"
+fi
+
+echo "Set UP OpenStack !"
+sudo /bin/sh -c "ssh root@$JUMPHOST_IP openstack-ansible \
+ $PLAYBOOK_PATH/setup-openstack.yml" | \
+ tee $LOG_PATH/setup-openstack.log
+
+grep "failed=1" $LOG_PATH/setup-openstack.log>/dev/null \
+ || grep "unreachable=1" $LOG_PATH/setup-openstack.log>/dev/null
+if [ $? -eq 0 ]; then
+ echo "failed setup openstack!"
+ exit 1
+else
+ echo "OpenStack successfully deployed!"
+ exit 0
+fi
--- /dev/null
+# This file describes the network interfaces available on your system
+# and how to activate them. For more information, see interfaces(5).
+
+# The loopback network interface
+auto lo
+iface lo inet loopback
+
+
+# Physical interface
+auto ens3
+iface ens3 inet manual
+
+# Container/Host management VLAN interface
+auto ens3.10
+iface ens3.10 inet manual
+ vlan-raw-device ens3
+
+# OpenStack Networking VXLAN (tunnel/overlay) VLAN interface
+auto ens3.30
+iface ens3.30 inet manual
+ vlan-raw-device ens3
+
+# Storage network VLAN interface (optional)
+auto ens3.20
+iface ens3.20 inet manual
+ vlan-raw-device ens3
+
+# Container/Host management bridge
+auto br-mgmt
+iface br-mgmt inet static
+ bridge_stp off
+ bridge_waitport 0
+ bridge_fd 0
+ bridge_ports ens3.10
+ address {{host_info[inventory_hostname].MGMT_IP}}
+ netmask 255.255.252.0
+
+# compute1 VXLAN (tunnel/overlay) bridge config
+auto br-vxlan
+iface br-vxlan inet static
+ bridge_stp off
+ bridge_waitport 0
+ bridge_fd 0
+ bridge_ports ens3.30
+ address {{host_info[inventory_hostname].VXLAN_IP}}
+ netmask 255.255.252.0
+
+# OpenStack Networking VLAN bridge
+auto br-vlan
+iface br-vlan inet static
+ bridge_stp off
+ bridge_waitport 0
+ bridge_fd 0
+ bridge_ports ens3
+ address {{host_info[inventory_hostname].VLAN_IP}}
+ netmask 255.255.255.0
+ gateway 192.168.122.1
+ offload-sg off
+ # Create veth pair, don't bomb if already exists
+ pre-up ip link add br-vlan-veth type veth peer name eth12 || true
+ # Set both ends UP
+ pre-up ip link set br-vlan-veth up
+ pre-up ip link set eth12 up
+ # Delete veth pair on DOWN
+ post-down ip link del br-vlan-veth || true
+ bridge_ports br-vlan-veth
+
+# Add an additional address to br-vlan
+iface br-vlan inet static
+ # Flat network default gateway
+ # -- This needs to exist somewhere for network reachability
+ # -- from the router namespace for floating IP paths.
+ # -- Putting this here is primarily for tempest to work.
+ address {{host_info[inventory_hostname].VLAN_IP_SECOND}}
+ netmask 255.255.252.0
+ dns-nameserver 8.8.8.8 8.8.4.4
+
+# compute1 Storage bridge
+auto br-storage
+iface br-storage inet static
+ bridge_stp off
+ bridge_waitport 0
+ bridge_fd 0
+ bridge_ports ens3.20
+ address {{host_info[inventory_hostname].STORAGE_IP}}
+ netmask 255.255.252.0
--- /dev/null
+# This file describes the network interfaces available on your system
+# and how to activate them. For more information, see interfaces(5).
+
+# The loopback network interface
+auto lo
+iface lo inet loopback
+
+# Physical interface
+auto ens3
+iface ens3 inet manual
+
+# Container/Host management VLAN interface
+auto ens3.10
+iface ens3.10 inet manual
+ vlan-raw-device ens3
+
+# OpenStack Networking VXLAN (tunnel/overlay) VLAN interface
+auto ens3.30
+iface ens3.30 inet manual
+ vlan-raw-device ens3
+
+# Storage network VLAN interface (optional)
+auto ens3.20
+iface ens3.20 inet manual
+ vlan-raw-device ens3
+
+# Container/Host management bridge
+auto br-mgmt
+iface br-mgmt inet static
+ bridge_stp off
+ bridge_waitport 0
+ bridge_fd 0
+ bridge_ports ens3.10
+ address {{host_info[inventory_hostname].MGMT_IP}}
+ netmask 255.255.252.0
+
+# OpenStack Networking VXLAN (tunnel/overlay) bridge
+#
+# Only the COMPUTE and NETWORK nodes must have an IP address
+# on this bridge. When used by infrastructure nodes, the
+# IP addresses are assigned to containers which use this
+# bridge.
+#
+auto br-vxlan
+iface br-vxlan inet manual
+ bridge_stp off
+ bridge_waitport 0
+ bridge_fd 0
+ bridge_ports ens3.30
+
+# OpenStack Networking VLAN bridge
+auto br-vlan
+iface br-vlan inet static
+ bridge_stp off
+ bridge_waitport 0
+ bridge_fd 0
+ bridge_ports ens3
+ address {{host_info[inventory_hostname].VLAN_IP}}
+ netmask 255.255.255.0
+ gateway 192.168.122.1
+ dns-nameserver 8.8.8.8 8.8.4.4
+
+# compute1 Storage bridge
+auto br-storage
+iface br-storage inet static
+ bridge_stp off
+ bridge_waitport 0
+ bridge_fd 0
+ bridge_ports ens3.20
+ address {{host_info[inventory_hostname].STORAGE_IP}}
+ netmask 255.255.252.0
--- /dev/null
+---
+OSA_URL: https://git.openstack.org/openstack/openstack-ansible
+OSA_PATH: /opt/openstack-ansible
+OSA_ETC_PATH: /etc/openstack_deploy
+JUMPHOST_IP: 192.168.122.2
+host_info: {'jumphost':{'MGMT_IP': '172.29.236.10','VLAN_IP': '192.168.122.2', 'STORAGE_IP': '172.29.244.10'},'controller00':{'MGMT_IP': '172.29.236.11','VLAN_IP': '192.168.122.3', 'STORAGE_IP': '172.29.244.11'},'controller01':{'MGMT_IP': '172.29.236.12','VLAN_IP': '192.168.122.4', 'STORAGE_IP': '172.29.244.12'},'controller02':{'MGMT_IP': '172.29.236.13','VLAN_IP': '192.168.122.5', 'STORAGE_IP': '172.29.240.13'},'compute00':{'MGMT_IP': '172.29.236.14','VLAN_IP': '192.168.122.6','VLAN_IP_SECOND': '173.29.241.1','VXLAN_IP': '172.29.240.14', 'STORAGE_IP': '172.29.244.14'},'compute01':{'MGMT_IP': '172.29.236.15','VLAN_IP': '192.168.122.7','VLAN_IP_SECOND': '173.29.241.2','VXLAN_IP': '172.29.240.15', 'STORAGE_IP': '172.29.244.15'}}
multiple => true,
}
- # disable selinux in case of RHEL
- if ($::osfamily == 'RedHat') {
- class { 'selinux':
- mode => 'disabled',
- }
- }
-
# update hosts
create_resources('host', hiera_hash('hosts'))
}
'source keystonerc_admin;keystone endpoint-list'" \
| grep $admin_ip | sed 's/ /\n/g' | grep ^http | head -1) &> /dev/null
+elif [ "$installer_type" == "daisy" ]; then
+ verify_connectivity $installer_ip
+ cluster=$(sshpass -p r00tme ssh 2>/dev/null $ssh_options root@${installer_ip} \
+ "source ~/daisyrc_admin; daisy cluster-list"|grep active|head -1|awk -F "|" '{print $3}') &> /dev/null
+ if [ -z $cluster ]; then
+ echo "No active cluster detected in daisy"
+ exit 1
+ fi
+
+ sshpass -p r00tme scp 2>/dev/null $ssh_options root@${installer_ip}:/etc/kolla/admin-openrc.sh $dest_path &> /dev/null
+
else
error "Installer $installer is not supported by this script"
fi
+---
# Vnic configuration for foreman deploy
network:
+---
# Vnic configuration for fuel deploy
network:
'intel-pod5' 'intel-pod6' 'intel-pod7' 'intel-pod8' \
'ericsson-pod1' 'ericsson-pod2' \
'ericsson-virtual1' 'ericsson-virtual2' 'ericsson-virtual3' \
-'ericsson-virtual4' 'ericsson-virtual5' \
+'ericsson-virtual4' 'ericsson-virtual5' 'ericsson-virtual12' \
'arm-pod1' 'arm-pod3' \
'huawei-pod1' 'huawei-pod2' 'huawei-pod3' 'huawei-pod4' 'huawei-pod5' \
'huawei-pod6' 'huawei-pod7' 'huawei-pod12' \
+---
functest:
-
name: tempest_smoke_serial
+---
qtip:
-
name: compute_test_suite
fields:
- field: details.index
-
- name:storage_test_suite
+ name: storage_test_suite
format: qpi
test_family: storage
visualizations:
# which accompanies this distribution, and is available at
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
+import requests
+
from tornado.web import RequestHandler
from tornado.escape import json_encode
+from tornado.escape import json_decode
-class FiltersHandler(RequestHandler):
- def get(self):
- return self.write(json_encode({'status': 'SUCCESS'}))
+class BaseHandler(RequestHandler):
+ def _set_header(self):
+ self.set_header('Access-Control-Allow-Origin', '*')
+ self.set_header('Access-Control-Allow-Headers',
+ 'Content-Type, Content-Length, Authorization, \
+ Accept, X-Requested-With , PRIVATE-TOKEN')
+ self.set_header('Access-Control-Allow-Methods',
+ 'PUT, POST, GET, DELETE, OPTIONS')
-class ScenariosHandler(RequestHandler):
+class FiltersHandler(BaseHandler):
def get(self):
- return self.write(json_encode({'status': 'SUCCESS'}))
+ self._set_header()
+
+ filters = {
+ 'filters': {
+ 'status': ['success', 'warning', 'danger'],
+ 'projects': ['functest', 'yardstick'],
+ 'installers': ['apex', 'compass', 'fuel', 'joid'],
+ 'version': ['colorado', 'master'],
+ 'loops': ['daily', 'weekly', 'monthly'],
+ 'time': ['10 days', '30 days']
+ }
+ }
+ return self.write(json_encode(filters))
+
+
+class ScenariosHandler(BaseHandler):
+ def post(self):
+ self._set_header()
+
+ body = json_decode(self.request.body)
+ args = self._get_args(body)
+
+ scenarios = self._get_result_data(self._get_scenarios(), args)
+
+ return self.write(json_encode(dict(scenarios=scenarios)))
+
+ def _get_result_data(self, data, args):
+ data = self._filter_status(data, args)
+ return {s: self._get_scenario_result(s, data[s], args) for s in data}
+
+ def _filter_status(self, data, args):
+ return {k: v for k, v in data.items() if v['status'] in args['status']}
+
+ def _get_scenario_result(self, scenario, data, args):
+ result = {
+ 'status': data.get('status'),
+ 'installers': self._get_installers_result(data['installers'], args)
+ }
+ return result
+
+ def _get_installers_result(self, data, args):
+ func = self._get_installer_result
+ return {k: func(k, data.get(k, {}), args) for k in args['installers']}
+
+ def _get_installer_result(self, installer, data, args):
+ projects = data.get(args['version'], [])
+ return [self._get_project_data(projects, p) for p in args['projects']]
+
+ def _get_project_data(self, projects, project):
+ atom = {
+ 'project': project,
+ 'score': None,
+ 'status': None
+ }
+ for p in projects:
+ if p['project'] == project:
+ return p
+ return atom
+
+ def _get_scenarios(self):
+ url = 'http://testresults.opnfv.org/test/api/v1/scenarios'
+ resp = requests.get(url).json()
+ data = self._change_to_utf8(resp).get('scenarios', {})
+ return {a.get('name'): self._get_scenario(a.get('installers', [])
+ ) for a in data}
+
+ def _get_scenario(self, data):
+ installers = {a.get('installer'): self._get_installer(a.get('versions',
+ [])
+ ) for a in data}
+ scenario = {
+ 'status': self._get_status(),
+ 'installers': installers
+ }
+ return scenario
+
+ def _get_status(self):
+ return 'success'
+
+ def _get_installer(self, data):
+ return {a.get('version'): self._get_version(a) for a in data}
+
+ def _get_version(self, data):
+ try:
+ scores = data.get('score', {}).get('projects')[0]
+ trusts = data.get('trust_indicator', {}).get('projects')[0]
+ except (TypeError, IndexError):
+ return []
+ else:
+ scores = {key: [dict(date=a.get('date')[:10],
+ score=a.get('score')
+ ) for a in scores[key]] for key in scores}
+ trusts = {key: [dict(date=a.get('date')[:10],
+ status=a.get('status')
+ ) for a in trusts[key]] for key in trusts}
+ atom = self._get_atom(scores, trusts)
+ return [dict(project=k,
+ score=sorted(atom[k], reverse=True)[0].get('score'),
+ status=sorted(atom[k], reverse=True)[0].get('status')
+ ) for k in atom if atom[k]]
+
+ def _get_atom(self, scores, trusts):
+ s = {k: {a['date']: a['score'] for a in scores[k]} for k in scores}
+ t = {k: {a['date']: a['status'] for a in trusts[k]} for k in trusts}
+ return {k: [dict(score=s[k][a], status=t[k][a], data=a
+ ) for a in s[k] if a in t[k]] for k in s}
+
+ def _change_to_utf8(self, obj):
+ if isinstance(obj, dict):
+ return {str(k): self._change_to_utf8(v) for k, v in obj.items()}
+ elif isinstance(obj, list):
+ return [self._change_to_utf8(ele) for ele in obj]
+ else:
+ try:
+ new = eval(obj)
+ if isinstance(new, int):
+ return obj
+ return self._change_to_utf8(new)
+ except (NameError, TypeError, SyntaxError):
+ return str(obj)
+
+ def _get_args(self, body):
+ status = self._get_status_args(body)
+ projects = self._get_projects_args(body)
+ installers = self._get_installers_args(body)
+
+ args = {
+ 'status': status,
+ 'projects': projects,
+ 'installers': installers,
+ 'version': body.get('version', 'master').lower(),
+ 'loops': body.get('loops', 'daily').lower(),
+ 'time': body.get('times', '10 days')[:2].lower()
+ }
+ return args
+
+ def _get_status_args(self, body):
+ status_all = ['success', 'warning', 'danger']
+ status = [a.lower() for a in body.get('status', ['all'])]
+ return status_all if 'all' in status else status
+
+ def _get_projects_args(self, body):
+ project_all = ['functest', 'yardstick']
+ projects = [a.lower() for a in body.get('projects', ['all'])]
+ return project_all if 'all' in projects else projects
+
+ def _get_installers_args(self, body):
+ installer_all = ['apex', 'compass', 'fuel', 'joid']
+ installers = [a.lower() for a in body.get('installers', ['all'])]
+ return installer_all if 'all' in installers else installers
--- /dev/null
+apt-get install -y python-pip
+pip install tornado
+pip install requests
export CONFIG_REPORTING_YAML=./reporting.yaml
declare -a versions=(colorado master)
-declare -a projects=(functest yardstick)
+declare -a projects=(functest storperf yardstick)
project=$1
reporting_type=$2
# $1 | $2
# functest | status, vims, tempest
# yardstick |
+# storperf |
if [ -z "$1" ]; then
echo "********************************"
echo "********************************"
python ./yardstick/reporting-status.py
echo "Yardstick reporting status...OK"
+
+ echo "********************************"
+ echo " Storperf reporting "
+ echo "********************************"
+ python ./storperf/reporting-status.py
+ echo "Storperf reporting status...OK"
+
else
if [ -z "$2" ]; then
reporting_type="status"
# supervisor config
cp /home/opnfv/utils/test/reporting/docker/supervisor.conf /etc/supervisor/conf.d/
-# build pages
-cd pages
ln -s /usr/bin/nodejs /usr/bin/node
-npm install
-npm install -g grunt bower
-bower install --allow-root
-grunt build
user = root
command = service nginx restart
autorestart = true
+
+[program:reporting_angular]
+user = root
+directory = /home/opnfv/utils/test/reporting/pages
+command = bash angular.sh
+autorestart = true
otherTestCases = []
reportingDate = datetime.datetime.now().strftime("%Y-%m-%d %H:%M")
-# init just tempest to get the list of scenarios
-# as all the scenarios run Tempest
-tempest = tc.TestCase("tempest_smoke_serial", "functest", -1)
+# init just connection_check to get the list of scenarios
+# as all the scenarios run connection_check
+healthcheck = tc.TestCase("connection_check", "functest", -1)
# Retrieve the Functest configuration to detect which tests are relevant
# according to the installer, scenario
# Retrieve test cases of Tier 1 (smoke)
config_tiers = functest_yaml_config.get("tiers")
-# we consider Tier 1 (smoke),2 (features)
+# we consider Tier 0 (Healthcheck), Tier 1 (smoke),2 (features)
# to validate scenarios
-# Tier > 4 are not used to validate scenarios but we display the results anyway
+# Tier > 2 are not used to validate scenarios but we display the results anyway
# tricky thing for the API as some tests are Functest tests
# other tests are declared directly in the feature projects
for tier in config_tiers:
- if tier['order'] > 0 and tier['order'] < 2:
+ if tier['order'] >= 0 and tier['order'] < 2:
for case in tier['testcases']:
if case['name'] not in blacklist:
testValid.append(tc.TestCase(case['name'],
# For all the installers
for installer in installers:
# get scenarios
- scenario_results = rp_utils.getScenarios(tempest, installer, version)
+ scenario_results = rp_utils.getScenarios(healthcheck,
+ installer,
+ version)
scenario_stats = rp_utils.getScenarioStats(scenario_results)
items = {}
scenario_result_criteria = {}
- scenario_file_name = ("./display/" + version +
- "/functest/scenario_history.txt")
+ scenario_directory = "./display/" + version + "/functest/"
+ scenario_file_name = scenario_directory + "scenario_history.txt"
+
+ # check that the directory exists, if not create it
+ # (first run on new version)
+ if not os.path.exists(scenario_directory):
+ os.makedirs(scenario_directory)
+
# initiate scenario file if it does not exist
if not os.path.isfile(scenario_file_name):
with open(scenario_file_name, "a") as my_file:
if len(s_result) > 0:
build_tag = s_result[len(s_result)-1]['build_tag']
logger.debug("Build tag: %s" % build_tag)
- s_url = s_url = rp_utils.getJenkinsUrl(build_tag)
+ s_url = rp_utils.getJenkinsUrl(build_tag)
+ if s_url is None:
+ s_url = "http://testresultS.opnfv.org/reporting"
logger.info("last jenkins url: %s" % s_url)
testCases2BeDisplayed = []
# Check if test case is runnable / installer, scenario
'ocl': 'OCL',
'tempest_smoke_serial': 'Tempest (smoke)',
'tempest_full_parallel': 'Tempest (full)',
+ 'tempest_defcore': 'Tempest (Defcore)',
'rally_sanity': 'Rally (smoke)',
'bgpvpn': 'bgpvpn',
'rally_full': 'Rally (full)',
'parser': 'Parser',
'connection_check': 'Health (connection)',
'api_check': 'Health (api)',
- 'snaps_smoke': 'SNAPS'}
+ 'snaps_smoke': 'SNAPS',
+ 'snaps_health_check': 'Health (dhcp)',
+ 'netready': 'Netready',
+ 'barometer': 'Barometer'}
try:
self.displayName = display_name_matrix[self.name]
except:
'ocl': 'ocl',
'tempest_smoke_serial': 'tempest_smoke_serial',
'tempest_full_parallel': 'tempest_full_parallel',
+ 'tempest_defcore': 'tempest_defcore',
'rally_sanity': 'rally_sanity',
'bgpvpn': 'bgpvpn',
'rally_full': 'rally_full',
'parser': 'parser-basics',
'connection_check': 'connection_check',
'api_check': 'api_check',
- 'snaps_smoke': 'snaps_smoke'
- }
+ 'snaps_smoke': 'snaps_smoke',
+ 'snaps_health_check': 'snaps_health_check',
+ 'netready': 'gluon_vping',
+ 'barometer': 'barometercollectd'}
try:
return test_match_matrix[self.name]
except:
-<!DOCTYPE HTML>\r
-<!--\r
- Phantom by HTML5 UP\r
- html5up.net | @ajlkn\r
- Free for personal and commercial use under the CCA 3.0 license (html5up.net/license)\r
--->\r
-<html>\r
- <head>\r
- <title>Phantom by HTML5 UP</title>\r
- <meta charset="utf-8" />\r
- <meta name="viewport" content="width=device-width, initial-scale=1" />\r
- <!--[if lte IE 8]><script src="3rd_party/js/ie/html5shiv.js"></script><![endif]-->\r
- <link rel="stylesheet" href="3rd_party/css/main.css" />\r
- <!--[if lte IE 9]><link rel="stylesheet" href="3rd_party/css/ie9.css" /><![endif]-->\r
- <!--[if lte IE 8]><link rel="stylesheet" href="3rd_party/css/ie8.css" /><![endif]-->\r
- </head>\r
- <body>\r
- <!-- Wrapper -->\r
- <div id="wrapper">\r
-\r
- <!-- Header -->\r
- <header id="header">\r
- <div class="inner">\r
-\r
- <!-- Logo -->\r
- <a href="index.html" class="logo">\r
- <span class="symbol"><img src="img/logo.svg" alt="" /></span><span class="title">Phantom</span>\r
- </a>\r
-\r
- <!-- Nav -->\r
- <!-- <nav>\r
- <ul>\r
- <li><a href="#menu">Menu</a></li>\r
- </ul>\r
- </nav>\r
- --->\r
- </div>\r
- </header>\r
-\r
- <!-- Menu -->\r
- <!--- <nav id="menu">\r
- <h2>Menu</h2>\r
- <ul>\r
- <li><a href="index.html">Home</a></li>\r
- <li><a href="colorado.html">Colorado</a></li>\r
- <li><a href="danube.html">Danube</a></li>\r
- </ul>\r
- </nav>\r
- --->\r
- <!-- Main -->\r
- <div id="main">\r
- <div class="inner">\r
- <header>\r
- <h1>Danube reporting (Master)</h1>\r
- </header>\r
- <section class="tiles">\r
- <article class="style3">\r
- <span class="image">\r
- <img src="img/functest.jpg" alt="" />\r
- </span>\r
- <a href="functest-master.html">\r
- <h2>Functest</h2>\r
- <div class="content">\r
- <p>Functional testing</p>\r
- </div>\r
- </a>\r
- </article>\r
- <article class="style2">\r
- <span class="image">\r
- <img src="img/yardstick.jpg" alt="" />\r
- </span>\r
- <a href="master/yardstick/status-apex.html">\r
- <h2>Yardstick</h2>\r
- <div class="content">\r
- <p>Qualification and performance testing</p>\r
- </div>\r
- </a>\r
- </article>\r
- </section>\r
- </div>\r
- </div>\r
-\r
- <!-- Footer -->\r
- <footer id="footer">\r
- <div class="inner">\r
- <section>\r
- <h2>OPNFV Testing Working group</h2>\r
- </section>\r
- <section>\r
- <h2>Follow</h2>\r
- <ul class="icons">\r
- <li><a href="https://twitter.com/opnfv" class="icon style2 fa-twitter"><span class="label">Twitter</span></a></li>\r
- <li><a href="http://git.opnfv.org" class="icon style2 fa-github"><span class="label">GitHub</span></a></li>\r
- <li><a href="mailto:test-wg@list.opnfv.org" class="icon style2 fa-envelope-o"><span class="label">Email</span></a></li>\r
- </ul>\r
- </section>\r
- <ul class="copyright">\r
- <li>© Untitled. All rights reserved</li><li>Design: <a href="http://html5up.net">HTML5 UP</a></li>\r
- </ul>\r
- </div>\r
- </footer>\r
-\r
- </div>\r
-\r
- <!-- Scripts -->\r
- <script src="3rd_party/js/jquery.min.js"></script>\r
- <script src="3rd_party/js/skel.min.js"></script>\r
- <script src="3rd_party/js/util.js"></script>\r
- <!--[if lte IE 8]><script src="3rd_party/js/ie/respond.min.js"></script><![endif]-->\r
- <script src="3rd_party/js/main.js"></script>\r
-\r
- </body>\r
-</html>\r
+<!DOCTYPE HTML>
+<!--
+ Phantom by HTML5 UP
+ html5up.net | @ajlkn
+ Free for personal and commercial use under the CCA 3.0 license (html5up.net/license)
+-->
+<html>
+ <head>
+ <title>Phantom by HTML5 UP</title>
+ <meta charset="utf-8" />
+ <meta name="viewport" content="width=device-width, initial-scale=1" />
+ <!--[if lte IE 8]><script src="3rd_party/js/ie/html5shiv.js"></script><![endif]-->
+ <link rel="stylesheet" href="3rd_party/css/main.css" />
+ <!--[if lte IE 9]><link rel="stylesheet" href="3rd_party/css/ie9.css" /><![endif]-->
+ <!--[if lte IE 8]><link rel="stylesheet" href="3rd_party/css/ie8.css" /><![endif]-->
+ </head>
+ <body>
+ <!-- Wrapper -->
+ <div id="wrapper">
+
+ <!-- Header -->
+ <header id="header">
+ <div class="inner">
+
+ <!-- Logo -->
+ <a href="index.html" class="logo">
+ <span class="symbol"><img src="img/logo.svg" alt="" /></span><span class="title">Phantom</span>
+ </a>
+
+ <!-- Nav -->
+ <!-- <nav>
+ <ul>
+ <li><a href="#menu">Menu</a></li>
+ </ul>
+ </nav>
+ --->
+ </div>
+ </header>
+
+ <!-- Menu -->
+ <!--- <nav id="menu">
+ <h2>Menu</h2>
+ <ul>
+ <li><a href="index.html">Home</a></li>
+ <li><a href="colorado.html">Colorado</a></li>
+ <li><a href="danube.html">Danube</a></li>
+ </ul>
+ </nav>
+ --->
+ <!-- Main -->
+ <div id="main">
+ <div class="inner">
+ <header>
+ <h1>Danube reporting</h1>
+ </header>
+ <section class="tiles">
+ <article class="style3">
+ <span class="image">
+ <img src="img/functest.jpg" alt="" />
+ </span>
+ <a href="functest-danube.html">
+ <h2>Functest</h2>
+ <div class="content">
+ <p>Functional testing</p>
+ </div>
+ </a>
+ </article>
+ <article class="style2">
+ <span class="image">
+ <img src="img/yardstick.jpg" alt="" />
+ </span>
+ <a href="danube/yardstick/status-apex.html">
+ <h2>Yardstick</h2>
+ <div class="content">
+ <p>Qualification and performance testing</p>
+ </div>
+ </a>
+ </article>
+ <article class="style4">
+ <span class="image">
+ <img src="img/storperf.jpg" alt="" />
+ </span>
+ <a href="danube/storperf/status-apex.html">
+ <h2>Storperf</h2>
+ <div class="content">
+ <p>Storage testing</p>
+ </div>
+ </a>
+ </article>
+ </section>
+ </div>
+ </div>
+
+ <!-- Footer -->
+ <footer id="footer">
+ <div class="inner">
+ <section>
+ <h2>OPNFV Testing Working group</h2>
+ </section>
+ <section>
+ <h2>Follow</h2>
+ <ul class="icons">
+ <li><a href="https://twitter.com/opnfv" class="icon style2 fa-twitter"><span class="label">Twitter</span></a></li>
+ <li><a href="http://git.opnfv.org" class="icon style2 fa-github"><span class="label">GitHub</span></a></li>
+ <li><a href="mailto:test-wg@list.opnfv.org" class="icon style2 fa-envelope-o"><span class="label">Email</span></a></li>
+ </ul>
+ </section>
+ <ul class="copyright">
+ <li>© Untitled. All rights reserved</li><li>Design: <a href="http://html5up.net">HTML5 UP</a></li>
+ </ul>
+ </div>
+ </footer>
+
+ </div>
+
+ <!-- Scripts -->
+ <script src="3rd_party/js/jquery.min.js"></script>
+ <script src="3rd_party/js/skel.min.js"></script>
+ <script src="3rd_party/js/util.js"></script>
+ <!--[if lte IE 8]><script src="3rd_party/js/ie/respond.min.js"></script><![endif]-->
+ <script src="3rd_party/js/main.js"></script>
+
+ </body>
+</html>
--- /dev/null
+<!DOCTYPE HTML>
+<!--
+ Phantom by HTML5 UP
+ html5up.net | @ajlkn
+ Free for personal and commercial use under the CCA 3.0 license (html5up.net/license)
+-->
+<html>
+ <head>
+ <title>Phantom by HTML5 UP</title>
+ <meta charset="utf-8" />
+ <meta name="viewport" content="width=device-width, initial-scale=1" />
+ <!--[if lte IE 8]><script src="3rd_party/js/ie/html5shiv.js"></script><![endif]-->
+ <link rel="stylesheet" href="3rd_party/css/main.css" />
+ <!--[if lte IE 9]><link rel="stylesheet" href="3rd_party/css/ie9.css" /><![endif]-->
+ <!--[if lte IE 8]><link rel="stylesheet" href="3rd_party/css/ie8.css" /><![endif]-->
+ </head>
+ <body>
+ <!-- Wrapper -->
+ <div id="wrapper">
+
+ <!-- Header -->
+ <header id="header">
+ <div class="inner">
+
+ <!-- Logo -->
+ <a href="index.html" class="logo">
+ <span class="symbol"><img src="img/logo.svg" alt="" /></span><span class="title">Phantom</span>
+ </a>
+
+ <!-- Nav -->
+ <!-- <nav>
+ <ul>
+ <li><a href="#menu">Menu</a></li>
+ </ul>
+ </nav>
+ --->
+ </div>
+ </header>
+
+ <!-- Menu -->
+ <!--- <nav id="menu">
+ <h2>Menu</h2>
+ <ul>
+ <li><a href="index.html">Home</a></li>
+ <li><a href="colorado.html">Colorado</a></li>
+ <li><a href="danube.html">Danube</a></li>
+ </ul>
+ </nav>
+ --->
+ <!-- Main -->
+ <div id="main">
+ <div class="inner">
+ <header>
+ <h1>Functest reporting</h1>
+ </header>
+ <section class="tiles">
+ <article class="style5">
+ <span class="image">
+ <img src="img/pic05.jpg" alt="" />
+ </span>
+ <a href="danube/functest/status-apex.html">
+ <h2>Status</h2>
+ <div class="content">
+ <p>Scenario status</p>
+ </div>
+ </a>
+ </article>
+ <article class="style2">
+ <span class="image">
+ <img src="img/pic02.jpg" alt="" />
+ </span>
+ <a href="danube/functest/vims-apex.html">
+ <h2>vIMS</h2>
+ <div class="content">
+ <p>Virtual IMS</p>
+ </div>
+ </a>
+ </article>
+ <article class="style3">
+ <span class="image">
+ <img src="img/pic03.jpg" alt="" />
+ </span>
+ <a href="danube/functest/tempest-apex.html">
+ <h2>Tempest</h2>
+ <div class="content">
+ <p>Tempest OpenStack suite</p>
+ </div>
+ </a>
+ </article>
+ </section>
+ </div>
+ </div>
+
+ <!-- Footer -->
+ <footer id="footer">
+ <div class="inner">
+ <section>
+ <h2>OPNFV Testing Working group</h2>
+ </section>
+ <section>
+ <h2>Follow</h2>
+ <ul class="icons">
+ <li><a href="https://twitter.com/opnfv" class="icon style2 fa-twitter"><span class="label">Twitter</span></a></li>
+ <li><a href="http://git.opnfv.org" class="icon style2 fa-github"><span class="label">GitHub</span></a></li>
+ <li><a href="mailto:test-wg@list.opnfv.org" class="icon style2 fa-envelope-o"><span class="label">Email</span></a></li>
+ </ul>
+ </section>
+ <ul class="copyright">
+ <li>© Untitled. All rights reserved</li><li>Design: <a href="http://html5up.net">HTML5 UP</a></li>
+ </ul>
+ </div>
+ </footer>
+
+ </div>
+
+ <!-- Scripts -->
+ <script src="3rd_party/js/jquery.min.js"></script>
+ <script src="3rd_party/js/skel.min.js"></script>
+ <script src="3rd_party/js/util.js"></script>
+ <!--[if lte IE 8]><script src="3rd_party/js/ie/respond.min.js"></script><![endif]-->
+ <script src="3rd_party/js/main.js"></script>
+
+ </body>
+</html>
-<!DOCTYPE HTML>\r
-<!--\r
- Phantom by HTML5 UP\r
- html5up.net | @ajlkn\r
- Free for personal and commercial use under the CCA 3.0 license (html5up.net/license)\r
--->\r
-<html>\r
- <head>\r
- <title>Phantom by HTML5 UP</title>\r
- <meta charset="utf-8" />\r
- <meta name="viewport" content="width=device-width, initial-scale=1" />\r
- <!--[if lte IE 8]><script src="3rd_party/js/ie/html5shiv.js"></script><![endif]-->\r
- <link rel="stylesheet" href="3rd_party/css/main.css" />\r
- <!--[if lte IE 9]><link rel="stylesheet" href="3rd_party/css/ie9.css" /><![endif]-->\r
- <!--[if lte IE 8]><link rel="stylesheet" href="3rd_party/css/ie8.css" /><![endif]-->\r
- </head>\r
- <body>\r
- <!-- Wrapper -->\r
- <div id="wrapper">\r
-\r
- <!-- Header -->\r
- <header id="header">\r
- <div class="inner">\r
-\r
- <!-- Logo -->\r
- <a href="index.html" class="logo">\r
- <span class="symbol"><img src="img/logo.svg" alt="" /></span><span class="title">Phantom</span>\r
- </a>\r
-\r
- <!-- Nav -->\r
- <!-- <nav>\r
- <ul>\r
- <li><a href="#menu">Menu</a></li>\r
- </ul>\r
- </nav>\r
- --->\r
- </div>\r
- </header>\r
-\r
- <!-- Menu -->\r
- <!--- <nav id="menu">\r
- <h2>Menu</h2>\r
- <ul>\r
- <li><a href="index.html">Home</a></li>\r
- <li><a href="colorado.html">Colorado</a></li>\r
- <li><a href="danube.html">Danube</a></li>\r
- </ul>\r
- </nav>\r
- --->\r
- <!-- Main -->\r
- <div id="main">\r
- <div class="inner">\r
- <header>\r
- <h1>Functest reporting</h1>\r
- </header>\r
- <section class="tiles">\r
- <article class="style5">\r
- <span class="image">\r
- <img src="img/pic05.jpg" alt="" />\r
- </span>\r
- <a href="master/status-apex.html">\r
- <h2>Status</h2>\r
- <div class="content">\r
- <p>Scenario status</p>\r
- </div>\r
- </a>\r
- </article>\r
- <article class="style2">\r
- <span class="image">\r
- <img src="img/pic02.jpg" alt="" />\r
- </span>\r
- <a href="master/vims-apex.html">\r
- <h2>vIMS</h2>\r
- <div class="content">\r
- <p>Virtual IMS</p>\r
- </div>\r
- </a>\r
- </article>\r
- <article class="style3">\r
- <span class="image">\r
- <img src="img/pic03.jpg" alt="" />\r
- </span>\r
- <a href="master/tempest-apex.html">\r
- <h2>Tempest</h2>\r
- <div class="content">\r
- <p>Tempest OpenStack suite</p>\r
- </div>\r
- </a>\r
- </article>\r
- </section>\r
- </div>\r
- </div>\r
-\r
- <!-- Footer -->\r
- <footer id="footer">\r
- <div class="inner">\r
- <section>\r
- <h2>OPNFV Testing Working group</h2>\r
- </section>\r
- <section>\r
- <h2>Follow</h2>\r
- <ul class="icons">\r
- <li><a href="https://twitter.com/opnfv" class="icon style2 fa-twitter"><span class="label">Twitter</span></a></li>\r
- <li><a href="http://git.opnfv.org" class="icon style2 fa-github"><span class="label">GitHub</span></a></li>\r
- <li><a href="mailto:test-wg@list.opnfv.org" class="icon style2 fa-envelope-o"><span class="label">Email</span></a></li>\r
- </ul>\r
- </section>\r
- <ul class="copyright">\r
- <li>© Untitled. All rights reserved</li><li>Design: <a href="http://html5up.net">HTML5 UP</a></li>\r
- </ul>\r
- </div>\r
- </footer>\r
-\r
- </div>\r
-\r
- <!-- Scripts -->\r
- <script src="3rd_party/js/jquery.min.js"></script>\r
- <script src="3rd_party/js/skel.min.js"></script>\r
- <script src="3rd_party/js/util.js"></script>\r
- <!--[if lte IE 8]><script src="3rd_party/js/ie/respond.min.js"></script><![endif]-->\r
- <script src="3rd_party/js/main.js"></script>\r
-\r
- </body>\r
-</html>\r
+<!DOCTYPE HTML>
+<!--
+ Phantom by HTML5 UP
+ html5up.net | @ajlkn
+ Free for personal and commercial use under the CCA 3.0 license (html5up.net/license)
+-->
+<html>
+ <head>
+ <title>Phantom by HTML5 UP</title>
+ <meta charset="utf-8" />
+ <meta name="viewport" content="width=device-width, initial-scale=1" />
+ <!--[if lte IE 8]><script src="3rd_party/js/ie/html5shiv.js"></script><![endif]-->
+ <link rel="stylesheet" href="3rd_party/css/main.css" />
+ <!--[if lte IE 9]><link rel="stylesheet" href="3rd_party/css/ie9.css" /><![endif]-->
+ <!--[if lte IE 8]><link rel="stylesheet" href="3rd_party/css/ie8.css" /><![endif]-->
+ </head>
+ <body>
+ <!-- Wrapper -->
+ <div id="wrapper">
+
+ <!-- Header -->
+ <header id="header">
+ <div class="inner">
+
+ <!-- Logo -->
+ <a href="index.html" class="logo">
+ <span class="symbol"><img src="img/logo.svg" alt="" /></span><span class="title">Phantom</span>
+ </a>
+
+ <!-- Nav -->
+ <!-- <nav>
+ <ul>
+ <li><a href="#menu">Menu</a></li>
+ </ul>
+ </nav>
+ --->
+ </div>
+ </header>
+
+ <!-- Menu -->
+ <!--- <nav id="menu">
+ <h2>Menu</h2>
+ <ul>
+ <li><a href="index.html">Home</a></li>
+ <li><a href="colorado.html">Colorado</a></li>
+ <li><a href="danube.html">Danube</a></li>
+ </ul>
+ </nav>
+ --->
+ <!-- Main -->
+ <div id="main">
+ <div class="inner">
+ <header>
+ <h1>Functest reporting</h1>
+ </header>
+ <section class="tiles">
+ <article class="style5">
+ <span class="image">
+ <img src="img/pic05.jpg" alt="" />
+ </span>
+ <a href="master/functest/status-apex.html">
+ <h2>Status</h2>
+ <div class="content">
+ <p>Scenario status</p>
+ </div>
+ </a>
+ </article>
+ <article class="style2">
+ <span class="image">
+ <img src="img/pic02.jpg" alt="" />
+ </span>
+ <a href="master/functest/vims-apex.html">
+ <h2>vIMS</h2>
+ <div class="content">
+ <p>Virtual IMS</p>
+ </div>
+ </a>
+ </article>
+ <article class="style3">
+ <span class="image">
+ <img src="img/pic03.jpg" alt="" />
+ </span>
+ <a href="master/functest/tempest-apex.html">
+ <h2>Tempest</h2>
+ <div class="content">
+ <p>Tempest OpenStack suite</p>
+ </div>
+ </a>
+ </article>
+ </section>
+ </div>
+ </div>
+
+ <!-- Footer -->
+ <footer id="footer">
+ <div class="inner">
+ <section>
+ <h2>OPNFV Testing Working group</h2>
+ </section>
+ <section>
+ <h2>Follow</h2>
+ <ul class="icons">
+ <li><a href="https://twitter.com/opnfv" class="icon style2 fa-twitter"><span class="label">Twitter</span></a></li>
+ <li><a href="http://git.opnfv.org" class="icon style2 fa-github"><span class="label">GitHub</span></a></li>
+ <li><a href="mailto:test-wg@list.opnfv.org" class="icon style2 fa-envelope-o"><span class="label">Email</span></a></li>
+ </ul>
+ </section>
+ <ul class="copyright">
+ <li>© Untitled. All rights reserved</li><li>Design: <a href="http://html5up.net">HTML5 UP</a></li>
+ </ul>
+ </div>
+ </footer>
+
+ </div>
+
+ <!-- Scripts -->
+ <script src="3rd_party/js/jquery.min.js"></script>
+ <script src="3rd_party/js/skel.min.js"></script>
+ <script src="3rd_party/js/util.js"></script>
+ <!--[if lte IE 8]><script src="3rd_party/js/ie/respond.min.js"></script><![endif]-->
+ <script src="3rd_party/js/main.js"></script>
+
+ </body>
+</html>
-<!DOCTYPE HTML>\r
-<!--\r
- Phantom by HTML5 UP\r
- html5up.net | @ajlkn\r
- Free for personal and commercial use under the CCA 3.0 license (html5up.net/license)\r
--->\r
-<html>\r
- <head>\r
- <title>OPNFV reporting</title>\r
- <meta charset="utf-8" />\r
- <meta name="viewport" content="width=device-width, initial-scale=1" />\r
- <!--[if lte IE 8]><script src="3rd_party/js/ie/html5shiv.js"></script><![endif]-->\r
- <link rel="stylesheet" href="3rd_party/css/main.css" />\r
- <!--[if lte IE 9]><link rel="stylesheet" href="3rd_party/css/ie9.css" /><![endif]-->\r
- <!--[if lte IE 8]><link rel="stylesheet" href="3rd_party/css/ie8.css" /><![endif]-->\r
- </head>\r
- <body>\r
- <!-- Wrapper -->\r
- <div id="wrapper">\r
-\r
- <!-- Header -->\r
- <header id="header">\r
- <div class="inner">\r
-\r
- <!-- Logo -->\r
- <a href="index.html" class="logo">\r
- <span class="symbol"><img src="img/logo.svg" alt="" /></span><span class="title">Phantom</span>\r
- </a>\r
-\r
- <!-- Nav -->\r
- <!-- <nav>\r
- <ul>\r
- <li><a href="#menu">Menu</a></li>\r
- </ul>\r
- </nav>\r
- --->\r
- </div>\r
- </header>\r
-\r
- <!-- Menu -->\r
- <!--- <nav id="menu">\r
- <h2>Menu</h2>\r
- <ul>\r
- <li><a href="index.html">Home</a></li>\r
- <li><a href="colorado.html">Colorado</a></li>\r
- <li><a href="danube.html">Danube</a></li>\r
- </ul>\r
- </nav>\r
- --->\r
- <!-- Main -->\r
- <div id="main">\r
- <div class="inner">\r
- <header>\r
- <h1>OPNFV Testing group reporting</h1>\r
- </header>\r
- <section class="tiles">\r
- <article class="style3">\r
- <span class="image">\r
- <img src="img/colorado.jpg" alt="" />\r
- </span>\r
- <a href="colorado.html">\r
- <h2>Colorado</h2>\r
- <div class="content">\r
- <p>Colorado 1.0 released on the 22nd of September</p>\r
- </div>\r
- </a>\r
- </article>\r
- <article class="style2">\r
- <span class="image">\r
- <img src="img/danube.jpg" alt="" />\r
- </span>\r
- <a href="danube.html">\r
- <h2>Danube</h2>\r
- <div class="content">\r
- <p>Master</p>\r
- </div>\r
- </a>\r
- </article>\r
- </section>\r
- </div>\r
- </div>\r
-\r
- <!-- Footer -->\r
- <footer id="footer">\r
- <div class="inner">\r
- <section>\r
- <h2>OPNFV Testing Working group</h2>\r
- </section>\r
- <section>\r
- <h2>Follow</h2>\r
- <ul class="icons">\r
- <li><a href="https://twitter.com/opnfv" class="icon style2 fa-twitter"><span class="label">Twitter</span></a></li>\r
- <li><a href="http://git.opnfv.org" class="icon style2 fa-github"><span class="label">GitHub</span></a></li>\r
- <li><a href="mailto:test-wg@list.opnfv.org" class="icon style2 fa-envelope-o"><span class="label">Email</span></a></li>\r
- </ul>\r
- </section>\r
- <ul class="copyright">\r
- <li>© Untitled. All rights reserved</li><li>Design: <a href="http://html5up.net">HTML5 UP</a></li>\r
- </ul>\r
- </div>\r
- </footer>\r
-\r
- </div>\r
-\r
- <!-- Scripts -->\r
- <script src="3rd_party/js/jquery.min.js"></script>\r
- <script src="3rd_party/js/skel.min.js"></script>\r
- <script src="3rd_party/js/util.js"></script>\r
- <!--[if lte IE 8]><script src="3rd_party/js/ie/respond.min.js"></script><![endif]-->\r
- <script src="3rd_party/js/main.js"></script>\r
-\r
- </body>\r
-</html>\r
+<!DOCTYPE HTML>
+<!--
+ Phantom by HTML5 UP
+ html5up.net | @ajlkn
+ Free for personal and commercial use under the CCA 3.0 license (html5up.net/license)
+-->
+<html>
+ <head>
+ <title>OPNFV reporting</title>
+ <meta charset="utf-8" />
+ <meta name="viewport" content="width=device-width, initial-scale=1" />
+ <!--[if lte IE 8]><script src="3rd_party/js/ie/html5shiv.js"></script><![endif]-->
+ <link rel="stylesheet" href="3rd_party/css/main.css" />
+ <!--[if lte IE 9]><link rel="stylesheet" href="3rd_party/css/ie9.css" /><![endif]-->
+ <!--[if lte IE 8]><link rel="stylesheet" href="3rd_party/css/ie8.css" /><![endif]-->
+ </head>
+ <body>
+ <!-- Wrapper -->
+ <div id="wrapper">
+
+ <!-- Header -->
+ <header id="header">
+ <div class="inner">
+
+ <!-- Logo -->
+ <a href="index.html" class="logo">
+ <span class="symbol"><img src="img/logo.svg" alt="" /></span><span class="title">Phantom</span>
+ </a>
+
+ <!-- Nav -->
+ <!-- <nav>
+ <ul>
+ <li><a href="#menu">Menu</a></li>
+ </ul>
+ </nav>
+ --->
+ </div>
+ </header>
+
+ <!-- Menu -->
+ <!--- <nav id="menu">
+ <h2>Menu</h2>
+ <ul>
+ <li><a href="index.html">Home</a></li>
+ <li><a href="colorado.html">Colorado</a></li>
+ <li><a href="danube.html">Danube</a></li>
+ </ul>
+ </nav>
+ --->
+ <!-- Main -->
+ <div id="main">
+ <div class="inner">
+ <header>
+ <h1>OPNFV Testing group reporting</h1>
+ </header>
+ <section class="tiles">
+ <article class="style3">
+ <span class="image">
+ <img src="img/colorado.jpg" alt="" />
+ </span>
+ <a href="colorado.html">
+ <h2>Colorado</h2>
+ <div class="content">
+ <p>Colorado 1.0 released on the 22nd of September</p>
+ </div>
+ </a>
+ </article>
+ <article class="style2">
+ <span class="image">
+ <img src="img/danube.jpg" alt="" />
+ </span>
+ <a href="danube.html">
+ <h2>Danube</h2>
+ <div class="content">
+ <p>Danube 1.0 planned on the 22nd of March</p>
+ </div>
+ </a>
+ </article>
+ <article class="style6">
+ <span class="image">
+ <img src="img/euphrates.jpg" alt="" />
+ </span>
+ <a href="master.html">
+ <h2>Euphrates</h2>
+ <div class="content">
+ <p>Master</p>
+ </div>
+ </a>
+ </article>
+ </section>
+ </div>
+ </div>
+
+ <!-- Footer -->
+ <footer id="footer">
+ <div class="inner">
+ <section>
+ <h2>OPNFV Testing Working group</h2>
+ </section>
+ <section>
+ <h2>Follow</h2>
+ <ul class="icons">
+ <li><a href="https://twitter.com/opnfv" class="icon style2 fa-twitter"><span class="label">Twitter</span></a></li>
+ <li><a href="http://git.opnfv.org" class="icon style2 fa-github"><span class="label">GitHub</span></a></li>
+ <li><a href="mailto:test-wg@list.opnfv.org" class="icon style2 fa-envelope-o"><span class="label">Email</span></a></li>
+ </ul>
+ </section>
+ <ul class="copyright">
+ <li>© Untitled. All rights reserved</li><li>Design: <a href="http://html5up.net">HTML5 UP</a></li>
+ </ul>
+ </div>
+ </footer>
+
+ </div>
+
+ <!-- Scripts -->
+ <script src="3rd_party/js/jquery.min.js"></script>
+ <script src="3rd_party/js/skel.min.js"></script>
+ <script src="3rd_party/js/util.js"></script>
+ <!--[if lte IE 8]><script src="3rd_party/js/ie/respond.min.js"></script><![endif]-->
+ <script src="3rd_party/js/main.js"></script>
+
+ </body>
+</html>
--- /dev/null
+<!DOCTYPE HTML>
+<!--
+ Phantom by HTML5 UP
+ html5up.net | @ajlkn
+ Free for personal and commercial use under the CCA 3.0 license (html5up.net/license)
+-->
+<html>
+ <head>
+ <title>Phantom by HTML5 UP</title>
+ <meta charset="utf-8" />
+ <meta name="viewport" content="width=device-width, initial-scale=1" />
+ <!--[if lte IE 8]><script src="3rd_party/js/ie/html5shiv.js"></script><![endif]-->
+ <link rel="stylesheet" href="3rd_party/css/main.css" />
+ <!--[if lte IE 9]><link rel="stylesheet" href="3rd_party/css/ie9.css" /><![endif]-->
+ <!--[if lte IE 8]><link rel="stylesheet" href="3rd_party/css/ie8.css" /><![endif]-->
+ </head>
+ <body>
+ <!-- Wrapper -->
+ <div id="wrapper">
+
+ <!-- Header -->
+ <header id="header">
+ <div class="inner">
+
+ <!-- Logo -->
+ <a href="index.html" class="logo">
+ <span class="symbol"><img src="img/logo.svg" alt="" /></span><span class="title">Phantom</span>
+ </a>
+
+ <!-- Nav -->
+ <!-- <nav>
+ <ul>
+ <li><a href="#menu">Menu</a></li>
+ </ul>
+ </nav>
+ --->
+ </div>
+ </header>
+
+ <!-- Menu -->
+ <!--- <nav id="menu">
+ <h2>Menu</h2>
+ <ul>
+ <li><a href="index.html">Home</a></li>
+ <li><a href="colorado.html">Colorado</a></li>
+ <li><a href="danube.html">Danube</a></li>
+ </ul>
+ </nav>
+ --->
+ <!-- Main -->
+ <div id="main">
+ <div class="inner">
+ <header>
+ <h1>Master reporting</h1>
+ </header>
+ <section class="tiles">
+ <article class="style3">
+ <span class="image">
+ <img src="img/functest.jpg" alt="" />
+ </span>
+ <a href="functest-master.html">
+ <h2>Functest</h2>
+ <div class="content">
+ <p>Functional testing</p>
+ </div>
+ </a>
+ </article>
+ <article class="style2">
+ <span class="image">
+ <img src="img/yardstick.jpg" alt="" />
+ </span>
+ <a href="master/yardstick/status-apex.html">
+ <h2>Yardstick</h2>
+ <div class="content">
+ <p>Qualification and performance testing</p>
+ </div>
+ </a>
+ </article>
+ <article class="style4">
+ <span class="image">
+ <img src="img/storperf.jpg" alt="" />
+ </span>
+ <a href="master/storperf/status-apex.html">
+ <h2>Storperf</h2>
+ <div class="content">
+ <p>Storage testing</p>
+ </div>
+ </a>
+ </article>
+ </section>
+ </div>
+ </div>
+
+ <!-- Footer -->
+ <footer id="footer">
+ <div class="inner">
+ <section>
+ <h2>OPNFV Testing Working group</h2>
+ </section>
+ <section>
+ <h2>Follow</h2>
+ <ul class="icons">
+ <li><a href="https://twitter.com/opnfv" class="icon style2 fa-twitter"><span class="label">Twitter</span></a></li>
+ <li><a href="http://git.opnfv.org" class="icon style2 fa-github"><span class="label">GitHub</span></a></li>
+ <li><a href="mailto:test-wg@list.opnfv.org" class="icon style2 fa-envelope-o"><span class="label">Email</span></a></li>
+ </ul>
+ </section>
+ <ul class="copyright">
+ <li>© Untitled. All rights reserved</li><li>Design: <a href="http://html5up.net">HTML5 UP</a></li>
+ </ul>
+ </div>
+ </footer>
+
+ </div>
+
+ <!-- Scripts -->
+ <script src="3rd_party/js/jquery.min.js"></script>
+ <script src="3rd_party/js/skel.min.js"></script>
+ <script src="3rd_party/js/util.js"></script>
+ <!--[if lte IE 8]><script src="3rd_party/js/ie/respond.min.js"></script><![endif]-->
+ <script src="3rd_party/js/main.js"></script>
+
+ </body>
+</html>
--- /dev/null
+: ${SERVER_URL:='http://testresults.opnfv.org/reporting/api'}
+
+echo "var BASE_URL = 'http://${SERVER_URL}/landing-page'" > app/scripts/app.config.js
+
+apt-get install -y nodejs
+apt-get install -y npm
+npm install
+npm install -g grunt bower
+bower install --allow-root
+grunt build
<!doctype html>
<html ng-app="opnfvApp">
- <head>
+
+<head>
<meta charset="utf-8">
<title>OPNFV-DASHBOARD EXAMPLE</title>
<meta name="description" content="">
<link rel="stylesheet" href="bower_components/chosen/chosen.css" />
<link rel="stylesheet" href="bower_components/selectize/dist/css/selectize.css" />
<link rel="stylesheet" href="bower_components/components-font-awesome/css/font-awesome.css" />
+ <link rel="stylesheet" href="bower_components/angular-tooltips/dist/angular-tooltips.min.css" />
+ <link rel="stylesheet" href="bower_components/animate.css/animate.css" />
+ <link rel="stylesheet" href="bower_components/ng-dialog/css/ngDialog.css" />
+ <link rel="stylesheet" href="bower_components/ng-dialog/css/ngDialog-theme-default.css" />
+ <link rel="stylesheet" href="bower_components/inspiniacss/style.css" />
<!-- endbower -->
<!-- endbuild -->
<!-- build:css(.tmp) styles/style.css -->
- <!--<link rel="stylesheet" href="styles/main.css">-->
- <link rel="stylesheet" href="styles/animate.css">
- <link rel="stylesheet" href="styles/style.css">
+
<link rel="stylesheet" href="styles/custome.css">
+
<!-- endbuild -->
- </head>
- <body class="{{$state.current.data.specialClass}}" id="page-top">
+</head>
+
+<body class="{{$state.current.data.specialClass}}" id="page-top">
<!--[if lte IE 8]>
<p class="browsehappy">You are using an <strong>outdated</strong> browser. Please <a href="http://browsehappy.com/">upgrade your browser</a> to improve your experience.</p>
<![endif]-->
<!-- Add your site or application content here -->
<div ui-view></div>
<!-- Google Analytics: change UA-XXXXX-X to be your site's ID -->
- <!--<script>
+ <!--<script>
!function(A,n,g,u,l,a,r){A.GoogleAnalyticsObject=l,A[l]=A[l]||function(){
(A[l].q=A[l].q||[]).push(arguments)},A[l].l=+new Date,a=n.createElement(g),
r=n.getElementsByTagName(g)[0],a.src=u,r.parentNode.insertBefore(a,r)
<script src="bower_components/microplugin/src/microplugin.js"></script>
<script src="bower_components/selectize/dist/js/selectize.js"></script>
<script src="bower_components/angular-selectize2/dist/angular-selectize.js"></script>
+ <script src="bower_components/angular-tooltips/dist/angular-tooltips.min.js"></script>
+ <script src="bower_components/jQuery-rwdImageMaps/jquery.rwdImageMaps.min.js"></script>
+ <script src="bower_components/ng-dialog/js/ngDialog.js"></script>
<!-- endbower -->
<!-- endbuild -->
- <!-- build:js({.tmp,app}) scripts/scripts.js -->
- <script src="scripts/app.js"></script>
- <!--<script src="scripts/controllers/main.js"></script>-->
- <script src="scripts/config.router.js"></script>
- <script src="scripts/controllers/table.controller.js"></script>
- <script src="scripts/config.js"></script>
- <script src="scripts/directives/mydirective.js"></script>
- <script src="scripts/factory/table.factory.js"></script>
- <!-- endbuild -->
+ <!-- build:js({.tmp,app}) scripts/scripts.js -->
+ <script src="scripts/app.js"></script>
+ <!--<script src="scripts/controllers/main.js"></script>-->
+ <script src="scripts/config.router.js"></script>
+ <script src="scripts/controllers/table.controller.js"></script>
+ <script src="scripts/config.js"></script>
+ <script src="scripts/factory/table.factory.js"></script>
+ <script src="scripts/controllers/case.controller.js"></script>
+ <script src="scripts/controllers/auth.controller.js"></script>
+ <script src="scripts/controllers/admin.controller.js"></script>
+ <script src="scripts/controllers/main.controller.js"></script>
+ <script src="scripts/app.config.js"></script>
+ <script src="scripts/controllers/testvisual.controller.js"></script>
+
+ <!-- endbuild -->
</body>
-</html>
+
+</html>
\ No newline at end of file
* Main module of the application.
*/
angular
- .module('opnfvApp', [
- 'ngAnimate',
- 'ui.router',
- 'oc.lazyLoad',
- 'ui.bootstrap',
- 'ngResource',
- 'selectize'
+ .module('opnfvApp', [
+ 'ngAnimate',
+ 'ui.router',
+ 'oc.lazyLoad',
+ 'ui.bootstrap',
+ 'ngResource',
+ 'selectize',
+ '720kb.tooltips'
- ]);
+ ]);
\ No newline at end of file
* Main config file of the application.
*/
angular
- .module('opnfvApp').config(function () {
+ .module('opnfvApp').config(['$httpProvider', '$qProvider', function($httpProvider, $qProvider) {
- }
+ $httpProvider.defaults.useXDomain = true;
+ delete $httpProvider.defaults.headers.common['X-Requested-With'];
- )
+ $qProvider.errorOnUnhandledRejections(false);
+
+ }
+
+ ]);
\ No newline at end of file
$stateProvider
.state('landingpage', {
url: "/landingpage",
- //controller: 'MainCtrl',
+ controller: 'MainController',
templateUrl: "views/main.html",
data: { pageTitle: '首页', specialClass: 'landing-page' },
resolve: {
--- /dev/null
+'use strict';
+
+/**
+ * @ngdoc function
+ * @name opnfvdashBoardAngularApp.controller:MainPageController
+ * @description
+ * # TableController
+ * Controller of the opnfvdashBoardAngularApp
+ */
+angular.module('opnfvApp')
+ .controller('MainController', ['$scope', '$state', '$stateParams', function($scope, $state, $stateParams) {
+
+ init();
+
+ function init() {
+ $scope.goTest = goTest;
+ $scope.goLogin = goLogin;
+
+ }
+
+ function goTest() {
+ $state.go("select.selectTestCase");
+ }
+
+ function goLogin() {
+ $state.go("login");
+ }
+
+
+
+
+ }]);
\ No newline at end of file
* Controller of the opnfvdashBoardAngularApp
*/
angular.module('opnfvApp')
- .controller('TableController', ['$scope', '$state', '$stateParams', 'TableFactory', function ($scope, $state, $stateParams, TableFactory) {
+ .controller('TableController', ['$scope', '$state', '$stateParams', '$http', 'TableFactory', function($scope, $state, $stateParams, $http, TableFactory) {
$scope.filterlist = [];
$scope.selection = [];
- $scope.statusList = ["Success", "Warning", "Danger"];
- $scope.projectList = ["Deployment", "Functest", "Yardstick"];
- $scope.installerList = ["apex", "compass", "fuel", "joid"];
- $scope.versionlist = ["Colorado", "Master"];
- $scope.loopci = ["Daily", "Weekly", "Monthly"];
- $scope.time = ["10 days", "1 Month"];
+ $scope.statusList = [];
+ $scope.projectList = [];
+ $scope.installerList = [];
+ $scope.versionlist = [];
+ $scope.loopci = [];
+ $scope.time = [];
$scope.tableDataAll = {};
$scope.tableInfoAll = {};
+ $scope.scenario = {};
+ $scope.VersionConfig = {
+ create: true,
+ valueField: 'title',
+ labelField: 'title',
+ delimiter: '|',
+ maxItems: 1,
+ placeholder: 'Version',
+ onChange: function(value) {
+ checkElementArrayValue($scope.selection, $scope.VersionOption);
+ $scope.selection.push(value);
+ // console.log($scope.selection);
+ getScenarioData();
+ }
+ }
- $scope.scenario =
- {
- "scenarios": {
- "os-nosdn-kvm-noha": {
- "status": "Success",
- "installers": {
- "apex": [
- {
- "project": "Deployment",
- "score": "13/14",
- "status": "SUCCESS",
-
-
- },
- {
- "project": "Functest",
- "score": "null",
- "status": "SUCCESS"
- },
- {
- "project": "Yardstick",
- "score": "13/14",
- "status": "SUCCESS"
- }
- ],
- "compass": [
- {
- "project": "Deployment",
- "score": "13/14",
- "status": "SUCCESS"
- },
- {
- "project": "Functest",
- "score": "13/14",
- "status": "SUCCESS"
- },
- {
- "project": "Yardstick",
- "score": "13/14",
- "status": "SUCCESS"
- }
- ],
- "fuel": [
- {
- "project": "Deployment",
- "score": "13/14",
- "status": "SUCCESS"
- },
- {
- "project": "Functest",
- "score": "13/14",
- "status": "SUCCESS"
- },
- {
- "project": "Yardstick",
- "score": "13/14",
- "status": "SUCCESS"
- }
- ],
- "joid": [
- {
- "project": "Deployment",
- "score": "13/14",
- "status": "SUCCESS"
- },
- {
- "project": "Functest",
- "score": "13/14",
- "status": "SUCCESS"
- },
- {
- "project": "Yardstick",
- "score": "13/14",
- "status": "SUCCESS"
- }
- ]
- }
- },
- "os-nosdn-ovs-ha": {
- "status": "Danger",
- "installers": {
- "apex": [
- {
- "project": "Deployment",
- "score": "13/14",
- "status": "SUCCESS",
-
-
- },
- {
- "project": "Functest",
- "score": "13/14",
- "status": "SUCCESS"
- },
- {
- "project": "Yardstick",
- "score": "13/14",
- "status": "SUCCESS"
- }
- ],
- "compass": [
- {
- "project": "Deployment",
- "score": "13/14",
- "status": "SUCCESS"
- },
- {
- "project": "Functest",
- "score": "13/14",
- "status": "SUCCESS"
- },
- {
- "project": "Yardstick",
- "score": "13/14",
- "status": "SUCCESS"
- }
- ],
- "fuel": [
- {
- "project": "Deployment",
- "score": "13/14",
- "status": "SUCCESS"
- },
- {
- "project": "Functest",
- "score": "13/14",
- "status": "SUCCESS"
- },
- {
- "project": "Yardstick",
- "score": "13/14",
- "status": "SUCCESS"
- }
- ],
- "joid": [
- {
- "project": "Deployment",
- "score": "13/14",
- "status": "SUCCESS"
- },
- {
- "project": "Functest",
- "score": "13/14",
- "status": "SUCCESS"
- },
- {
- "project": "Yardstick",
- "score": "13/14",
- "status": "SUCCESS"
- }
- ]
- }
- },
- "os-nosdn-ovs-noha": {
- "status": "Warning",
- "installers": {
- "apex": [
- {
- "project": "Deployment",
- "score": "13/14",
- "status": "SUCCESS",
-
-
- },
- {
- "project": "Functest",
- "score": "13/14",
- "status": "SUCCESS"
- },
- {
- "project": "Yardstick",
- "score": "13/14",
- "status": "SUCCESS"
- }
- ],
- "compass": [
- {
- "project": "Deployment",
- "score": "13/14",
- "status": "SUCCESS"
- },
- {
- "project": "Functest",
- "score": "13/14",
- "status": "SUCCESS"
- },
- {
- "project": "Yardstick",
- "score": "13/14",
- "status": "SUCCESS"
- }
- ],
- "fuel": [
- {
- "project": "Deployment",
- "score": "13/14",
- "status": "SUCCESS"
- },
- {
- "project": "Functest",
- "score": "13/14",
- "status": "SUCCESS"
- },
- {
- "project": "Yardstick",
- "score": "13/14",
- "status": "SUCCESS"
- }
- ],
- "joid": [
- {
- "project": "Deployment",
- "score": "13/14",
- "status": "SUCCESS"
- },
- {
- "project": "Functest",
- "score": "13/14",
- "status": "SUCCESS"
- },
- {
- "project": "Yardstick",
- "score": "13/14",
- "status": "SUCCESS"
- }
- ]
- }
- }
+ $scope.LoopConfig = {
+ create: true,
+ valueField: 'title',
+ labelField: 'title',
+ delimiter: '|',
+ maxItems: 1,
+ placeholder: 'Loop',
+ onChange: function(value) {
+ checkElementArrayValue($scope.selection, $scope.LoopOption);
+ $scope.selection.push(value);
+ // console.log($scope.selection);
+ getScenarioData();
+
+ }
+ }
+
+ $scope.TimeConfig = {
+ create: true,
+ valueField: 'title',
+ labelField: 'title',
+ delimiter: '|',
+ maxItems: 1,
+ placeholder: 'Time',
+ onChange: function(value) {
+ checkElementArrayValue($scope.selection, $scope.TimeOption);
+ $scope.selection.push(value);
+ // console.log($scope.selection)
+ getScenarioData();
+
+
+ }
+ }
+
+
+ init();
+
+ function init() {
+ $scope.toggleSelection = toggleSelection;
+ getScenarioData();
+ // radioSetting();
+ getFilters();
+ }
+
+ function getFilters() {
+ TableFactory.getFilter().get({
+
+
+ }).$promise.then(function(response) {
+ if (response != null) {
+ $scope.statusList = response.filters.status;
+ $scope.projectList = response.filters.projects;
+ $scope.installerList = response.filters.installers;
+ $scope.versionlist = response.filters.version;
+ $scope.loopci = response.filters.loops;
+ $scope.time = response.filters.time;
+
+ $scope.statusListString = $scope.statusList.toString();
+ $scope.projectListString = $scope.projectList.toString();
+ $scope.installerListString = $scope.installerList.toString();
+ $scope.VersionSelected = $scope.versionlist[1];
+ $scope.LoopCiSelected = $scope.loopci[0];
+ $scope.TimeSelected = $scope.time[0];
+ radioSetting($scope.versionlist, $scope.loopci, $scope.time);
+
+ } else {
+ alert("网络错误");
}
+ })
+ }
+
+ function getScenarioData() {
+
+ var utl = BASE_URL + '/scenarios';
+ var data = {
+ 'status': ['success', 'danger', 'warning'],
+ 'projects': ['functest', 'yardstick'],
+ 'installers': ['apex', 'compass', 'fuel', 'joid'],
+ 'version': $scope.VersionSelected,
+ 'loops': $scope.LoopCiSelected,
+ 'time': $scope.TimeSelected
};
+ var config = {
+ headers: {
+ 'Content-Type': 'application/x-www-form-urlencoded;charset=utf-8;'
+ }
+ }
+ $http.post(utl, data, config).then(function(response) {
+ if (response.status == 200) {
+ $scope.scenario = response.data;
+ constructJson();
+ }
+ })
+ }
- // var headData = Object.keys($scope.scenario.scenarios.os_nosdn_kvm_noha.installers);
- // $scope.headData = headData;
- //construct json
+ //construct json
function constructJson() {
var colspan;
for (var item in $scope.scenario.scenarios) {
-
-
-
- var headData = Object.keys($scope.scenario.scenarios[item].installers);
+ var headData = Object.keys($scope.scenario.scenarios[item].installers).sort();
var scenarioStatus = $scope.scenario.scenarios[item].status;
-
+ var scenarioStatusDisplay;
+ if (scenarioStatus == "success") {
+ scenarioStatusDisplay = "navy";
+ } else if (scenarioStatus == "danger") {
+ scenarioStatusDisplay = "danger";
+ } else if (scenarioStatus == "warning") {
+ scenarioStatusDisplay = "warning";
+ }
InstallerData = headData;
var projectData = [];
var datadisplay = [];
var projects = [];
-
for (var j = 0; j < headData.length; j++) {
projectData.push($scope.scenario.scenarios[item].installers[headData[j]]);
for (var k = 0; k < projectData[j].length; k++) {
projects.push(projectData[j][k].project);
var temArray = [];
- temArray.push(projectData[j][k].score);
- temArray.push(projectData[j][k].project);
- temArray.push(headData[j]);
+ if (projectData[j][k].score == null) {
+ temArray.push("null");
+ temArray.push(projectData[j][k].project);
+ temArray.push(headData[j]);
+ } else {
+ temArray.push(projectData[j][k].score);
+ temArray.push(projectData[j][k].project);
+ temArray.push(headData[j]);
+ }
+
+
+ if (projectData[j][k].status == "platinium") {
+ temArray.push("primary");
+ temArray.push("P");
+ } else if (projectData[j][k].status == "gold") {
+ temArray.push("danger");
+ temArray.push("G");
+ } else if (projectData[j][k].status == "silver") {
+ temArray.push("warning");
+ temArray.push("S");
+ } else if (projectData[j][k].status == null) {
+ temArray.push("null");
+ }
+
datadisplay.push(temArray);
}
colspan = projects.length / headData.length;
var tabledata = {
- scenarioName: item, Installer: InstallerData, projectData: projectData, projects: projects,
- datadisplay: datadisplay, colspan: colspan, status: scenarioStatus
+ scenarioName: item,
+ Installer: InstallerData,
+ projectData: projectData,
+ projects: projects,
+ datadisplay: datadisplay,
+ colspan: colspan,
+ status: scenarioStatus,
+ statusDisplay: scenarioStatusDisplay
};
JSON.stringify(tabledata);
$scope.tableDataAll.scenario.push(tabledata);
+ // console.log(tabledata);
+
}
var tempHeadData = [];
-
-
for (var i = 0; i < InstallerData.length; i++) {
for (var j = 0; j < colspan; j++) {
tempHeadData.push(InstallerData[i]);
}
}
- console.log(tempHeadData);
+ //console.log(tempHeadData);
var projectsInfoAll = [];
projectsInfoAll.push(tempA);
}
- console.log(projectsInfoAll);
+ //console.log(projectsInfoAll);
$scope.tableDataAll["colspan"] = colspan;
$scope.tableDataAll["Installer"] = InstallerData;
$scope.tableDataAll["Projects"] = projectsInfoAll;
- console.log($scope.tableDataAll);
+ // console.log($scope.tableDataAll);
+ $scope.colspan = $scope.tableDataAll.colspan;
}
return size;
}
- init();
- function init() {
- $scope.toggleSelection = toggleSelection;
-
- constructJson();
-
- }
-
- // $scope.test=false;
+ $scope.colspan = $scope.tableDataAll.colspan;
+ // console.log($scope.colspan);
- var statusListString = $scope.statusList.toString();
- var projectListString = $scope.projectList.toString();
- var installerListString = $scope.installerList.toString();
-
- $scope.colspan=$scope.tableDataAll.colspan;
- //filter function
- function filterData() {
-
-
- $scope.selectInstallers = [];
- $scope.selectProjects = [];
- $scope.selectStatus = [];
- for (var i = 0; i < $scope.selection.length; i++) {
- if (statusListString.indexOf($scope.selection[i]) > -1) {
- $scope.selectStatus.push($scope.selection[i]);
- }
- if (projectListString.indexOf($scope.selection[i]) > -1) {
- $scope.selectProjects.push($scope.selection[i]);
- }
- if (installerListString.indexOf($scope.selection[i]) > -1) {
- $scope.selectInstallers.push($scope.selection[i]);
- }
- }
-
- $scope.colspan=$scope.selectProjects.length;
- //when some selection is empty, we set it full
- if($scope.selectInstallers.length==0){
- $scope.selectInstallers=$scope.installerList;
-
- }
- if($scope.selectProjects.length==0){
- $scope.selectProjects=$scope.projectList;
- $scope.colspan=$scope.tableDataAll.colspan;
- }
- if($scope.selectStatus.length==0){
- $scope.selectStatus=$scope.statusList
- }
- }
-
-
- //find all same element index
+ //find all same element index
function getSameElementIndex(array, element) {
var indices = [];
var idx = array.indexOf(element);
}
-
- $scope.VersionOption = [
- { title: 'Colorado' },
- { title: 'Master' }
- ];
- $scope.VersionConfig = {
- create: true,
- valueField: 'title',
- labelField: 'title',
- delimiter: '|',
- maxItems: 1,
- placeholder: 'Version',
- onChange: function (value) {
- checkElementArrayValue($scope.selection, $scope.VersionOption);
- $scope.selection.push(value);
- // console.log($scope.selection);
-
+ function radioSetting(array1, array2, array3) {
+ var tempVersion = [];
+ var tempLoop = [];
+ var tempTime = [];
+ for (var i = 0; i < array1.length; i++) {
+ var temp = {
+ title: array1[i]
+ };
+ tempVersion.push(temp);
}
-
- }
-
- $scope.LoopOption = [
- { title: 'Daily' },
- { title: 'Weekly' },
- { title: 'Monthly' }
- ];
- $scope.LoopConfig = {
- create: true,
- valueField: 'title',
- labelField: 'title',
- delimiter: '|',
- maxItems: 1,
- placeholder: 'Loop',
- onChange: function (value) {
- checkElementArrayValue($scope.selection, $scope.LoopOption);
- $scope.selection.push(value);
- // console.log($scope.selection);
-
+ for (var i = 0; i < array2.length; i++) {
+ var temp = {
+ title: array2[i]
+ };
+ tempLoop.push(temp);
}
- }
-
- $scope.TimeOption = [
- { title: '10 days' },
- { title: '1 month' }
- ];
- $scope.TimeConfig = {
- create: true,
- valueField: 'title',
- labelField: 'title',
- delimiter: '|',
- maxItems: 1,
- placeholder: 'Time',
- onChange: function (value) {
- checkElementArrayValue($scope.selection, $scope.TimeOption);
- $scope.selection.push(value);
- // console.log($scope.selection)
-
+ for (var i = 0; i < array3.length; i++) {
+ var temp = {
+ title: array3[i]
+ };
+ tempTime.push(temp);
}
+ $scope.VersionOption = tempVersion;
+ $scope.LoopOption = tempLoop;
+ $scope.TimeOption = tempTime;
}
//remove element in the array
if (idx > -1) {
$scope.selection.splice(idx, 1);
- }
- else {
+ filterData($scope.selection)
+ } else {
$scope.selection.push(status);
+ filterData($scope.selection)
}
- console.log($scope.selection);
- filterData();
+ // console.log($scope.selection);
}
- }]);
+ //filter function
+ function filterData(selection) {
+
+ $scope.selectInstallers = [];
+ $scope.selectProjects = [];
+ $scope.selectStatus = [];
+ for (var i = 0; i < selection.length; i++) {
+ if ($scope.statusListString.indexOf(selection[i]) > -1) {
+ $scope.selectStatus.push(selection[i]);
+ }
+ if ($scope.projectListString.indexOf(selection[i]) > -1) {
+ $scope.selectProjects.push(selection[i]);
+ }
+ if ($scope.installerListString.indexOf(selection[i]) > -1) {
+ $scope.selectInstallers.push(selection[i]);
+ }
+ }
+
+ $scope.colspan = $scope.selectProjects.length;
+ //when some selection is empty, we set it full
+ if ($scope.selectInstallers.length == 0) {
+ $scope.selectInstallers = $scope.installerList;
+
+ }
+ if ($scope.selectProjects.length == 0) {
+ $scope.selectProjects = $scope.projectList;
+ $scope.colspan = $scope.tableDataAll.colspan;
+ }
+ if ($scope.selectStatus.length == 0) {
+ $scope.selectStatus = $scope.statusList
+ }
+
+ // console.log($scope.selectStatus);
+ // console.log($scope.selectProjects);
+
+ }
+
+
+ }]);
\ No newline at end of file
+++ /dev/null
-
-{"scenarios": {
- "os-nosdn-kvm-noha": {
- "status": "Success",
- "installers": {
- "apex": [
- {
- "project": "Deployment",
- "score": "13/14",
- "status": "SUCCESS"
- },
- {
- "project": "Functest",
- "score": "13/14",
- "status": "SUCCESS"
- },
- {
- "project": "Yardstick",
- "socre": "13/14",
- "status": "SUCCESS"
- }
- ],
- "compass": [
- {
- "project": "Deployment",
- "score": "13/14",
- "status": "SUCCESS"
- },
- {
- "project": "Functest",
- "score": "13/14",
- "status": "SUCCESS"
- },
- {
- "project": "Yardstick",
- "socre": "13/14",
- "status": "SUCCESS"
- }
- ],
- "fuel": [
- {
- "project": "Deployment",
- "score": "13/14",
- "status": "SUCCESS"
- },
- {
- "project": "Functest",
- "score": "13/14",
- "status": "SUCCESS"
- },
- {
- "project": "Yardstick",
- "socre": "13/14",
- "status": "SUCCESS"
- }
- ],
- "joid": [
- {
- "project": "Deployment",
- "score": "13/14",
- "status": "SUCCESS"
- },
- {
- "project": "Functest",
- "score": "13/14",
- "status": "SUCCESS"
- },
- {
- "project": "Yardstick",
- "socre": "13/14",
- "status": "SUCCESS"
- }
- ]
- }
- }
-}}
* get data factory
*/
angular.module('opnfvApp')
- .factory('TableFactory', function ($resource, $rootScope) {
- // var baseUrl = base_Url;
+ .factory('TableFactory', function($resource, $rootScope) {
return {
- getFilter: function () {
- return $resource(baseUrl + '/', {}, {
- 'post': {
- method: 'POST',
+ getFilter: function() {
+ return $resource(BASE_URL + '/filters', {}, {
+ 'get': {
+ method: 'GET',
}
});
+ },
+ getScenario: function() {
+ return $resource(BASE_URL + '/scenarios', {}, {
+ 'post': {
+ method: 'POST',
+ }
+ })
}
};
- });
+ });
\ No newline at end of file
.container-tablesize {
- margin: auto 5%;
+ margin: auto 5%;
}
-.btn-outline {
+.btn-outline {
border-color: white;
}
}
.myhr {
- border:0.5px dashed #e7eaec;
- border-top:1px;margin-bottom: 3px;
-}
\ No newline at end of file
+ border: 0.5px dashed #e7eaec;
+ border-top: 1px;
+ margin-bottom: 3px;
+}
+
+td.null {
+ background-color: #e7eaec;
+ color: #e7eaec;
+}
+
+input[type="radio"][disabled],
+input[type="checkbox"][disabled],
+input[type="radio"].disabled,
+input[type="checkbox"].disabled,
+fieldset[disabled] input[type="radio"],
+fieldset[disabled] input[type="checkbox"] {
+ cursor: not-allowed;
+ display: none;
+}
+
+body,
+html {
+ margin: 0;
+ padding: 0;
+ min-height: 100%;
+}
+
+
+/*img[usemap] {
+ border: none;
+ height: auto;
+ max-width: 100%;
+ width: auto;
+}*/
+
+.popup {
+ position: absolute;
+ display: none;
+ /*background-color: #dd8;*/
+ border-radius: 5px 5px 5px 5px;
+ background-color: #f3f3f4;
+ opacity: 0.9;
+}
+
+
+/*
+body {
+ height: 1200px;
+}
+
+html {
+ min-height: 100%;
+}*/
+
+
+/*html,
+body {
+ height: 100%;
+}
+
+#page-wrapper {
+ position: inherit;
+ margin: 0 0 0 220px;
+ min-height: 773px;
+}*/
\ No newline at end of file
<glyph unicode="🔑" d="M250 1200h600q21 0 35.5 -14.5t14.5 -35.5v-400q0 -21 -14.5 -35.5t-35.5 -14.5h-150v-500l-255 -178q-19 -9 -32 -1t-13 29v650h-150q-21 0 -35.5 14.5t-14.5 35.5v400q0 21 14.5 35.5t35.5 14.5zM400 1100v-100h300v100h-300z" />
<glyph unicode="🚪" d="M250 1200h750q39 0 69.5 -40.5t30.5 -84.5v-933l-700 -117v950l600 125h-700v-1000h-100v1025q0 23 15.5 49t34.5 26zM500 525v-100l100 20v100z" />
</font>
-</defs></svg>
+</defs></svg>
\ No newline at end of file
<div class=" col-md-12" data-toggle="buttons" aria-pressed="false">
<label> Status </label>
- <label class="btn btn-outline btn-success btn-sm" style="height:25px; margin-right: 5px;" ng-repeat="status in statusList"
- value={{status}} ng-checked="selection.indexOf(status)>-1" ng-click="toggleSelection(status)">
+ <label class="btn btn-outline btn-success btn-sm" style="height:25px; margin-right: 5px;" ng-repeat="status in statusList" value={{status}} ng-checked="selection.indexOf(status)>-1" ng-click="toggleSelection(status)">
<input type="checkbox" disabled="disabled" > {{status}}
+
</label>
</div>
- <hr class="myhr">
+ <hr class="myhr">
<div class=" col-md-12" data-toggle="buttons">
- <label> Projects </label>
- <label class="btn btn-outline btn-success btn-sm " style="height:25px;margin-right: 5px;" ng-repeat="project in projectList"
- value={{project}} ng-checked="selection.indexOf(project)>-1" ng-click="toggleSelection(project)">
+ <label> Projects </label>
+ <label class="btn btn-outline btn-success btn-sm " style="height:25px;margin-right: 5px;" ng-repeat="project in projectList" value={{project}} ng-checked="selection.indexOf(project)>-1" ng-click="toggleSelection(project)">
<input type="checkbox" disabled="disabled"> {{project}}
</label>
</div>
- <hr class="myhr">
+ <hr class="myhr">
<div class=" col-md-12" data-toggle="buttons">
<label> Installers </label>
- <label class="btn btn-outline btn-success btn-sm" style="height:25px;margin-right: 5px;" ng-repeat="installer in installerList"
- value={{installer}} ng-checked="selection.indexOf(installer)" ng-click="toggleSelection(installer)">
+ <label class="btn btn-outline btn-success btn-sm" style="height:25px;margin-right: 5px;" ng-repeat="installer in installerList" value={{installer}} ng-checked="selection.indexOf(installer)>-1" ng-click="toggleSelection(installer)">
<input type="checkbox" disabled="disabled"> {{installer}}
</label>
- </div>
+ </div>
- <hr style="border:0.5px dashed #e7eaec;border-top:1px;margin-bottom:10px;">
+ <hr style="border:0.5px dashed #e7eaec;border-top:1px;margin-bottom:10px;">
- <div class=" col-md-1" style="margin-top:5px;margin-right: 5px;">
- <selectize options="VersionOption" ng-model="VersionSelected" config="VersionConfig"></selectize>
+ <div class=" col-md-1" style="margin-top:5px;margin-right: 5px;">
+ <selectize options="VersionOption" ng-model="VersionSelected" config="VersionConfig"></selectize>
- </div>
+ </div>
- <div class=" col-md-1" style="margin-top:5px;margin-left: 5px;margin-right: 5px;">
- <selectize options="LoopOption" ng-model="LoopCiSelected" config="LoopConfig"></selectize>
+ <div class=" col-md-1" style="margin-top:5px;margin-right: 5px;">
+ <selectize options="LoopOption" ng-model="LoopCiSelected" config="LoopConfig"></selectize>
- </div>
+ </div>
- <div class=" col-md-1" style="margin-top:5px;margin-left: 5px;margin-right: 5px;">
- <selectize options="TimeOption" ng-model="TimeSelected" config="TimeConfig"></selectize>
+ <div class=" col-md-1" style="margin-top:5px;margin-right: 5px;">
+ <selectize options="TimeOption" ng-model="TimeSelected" config="TimeConfig"></selectize>
+ </div>
</div>
+ <div class="table-responsive">
+
+ <table class="table table-bordered" id="table" ng-model="tableDataAll">
+ <thead class="thead">
+ <tr>
+ <th>Scenario </th>
+ <th colspan={{colspan}} ng-show="selectInstallers.indexOf(key)!=-1" value={{key}} ng-repeat="key in tableDataAll.Installer"><a href="notfound.html">{{key}}</a> </th>
+ </tr>
+ <tr>
-</div>
+ <td></td>
+ <td ng-show="selectProjects.indexOf(project[0])!=-1 && selectInstallers.indexOf(project[1])!=-1" ng-repeat="project in tableDataAll.Projects track by $index" data={{project[1]}} value={{project[0]}}>{{project[0]}}</td>
-<table class="table table-bordered" id="table" ng-model="tableDataAll">
- <thead class="thead">
- <tr >
- <th>Scenario </th>
- <th colspan={{colspan}} ng-show="selectInstallers.indexOf(key)!=-1" value={{key}} ng-repeat="key in tableDataAll.Installer"><a href="notfound.html">{{key}}</a> </th>
- </tr>
+ </tr>
+ </thead>
+ <tbody class="tbody">
+ <tr ng-repeat="scenario in tableDataAll.scenario" ng-show="selectStatus.indexOf(scenario.status)!=-1">
- <tr>
+ <td nowrap="nowrap" data={{scenario.status}}><span class="fa fa-circle text-{{scenario.statusDisplay}}"></span> <a href="notfound.html">{{scenario.scenarioName}}</a> </td>
- <td align="justify"></td>
- <td align="justify" ng-show="selectProjects.indexOf(project[0])!=-1 && selectInstallers.indexOf(project[1])!=-1" ng-repeat="project in tableDataAll.Projects track by $index" data={{project[1]}} value={{project[0]}}>{{project[0]}}</td>
- </tr>
- </thead>
- <tbody class="tbody">
- <tr ng-repeat="scenario in tableDataAll.scenario" ng-show="selectStatus.indexOf(scenario.status)!=-1" >
+ <!--<td style="background-color:#e7eaec" align="justify" ng-if="data[0]=='Not Support'" ng-repeat="data in scenario.datadisplay track by $index" data={{data[1]}} value={{data[2]}}></td>-->
- <td align="justify" data={{scenario.status}}><span class="fa fa-circle text-warning"><a href="notfound.html">{{scenario.scenarioName}}</a></span> </td>
- <td align="justify" ng-show="selectInstallers.indexOf(data[2])!=-1 && selectProjects.indexOf(data[1])!=-1" ng-repeat="data in scenario.datadisplay track by $index" data={{data[1]}} value={{data[2]}} ><span class="label label-danger">D<a href="notfound.html"></a></span> {{data[0]}}</td>
- </tr>
- </tbody>
-</table>
+ <td nowrap="nowrap" ng-show="selectInstallers.indexOf(data[2])!=-1 && selectProjects.indexOf(data[1])!=-1" ng-repeat="data in scenario.datadisplay track by $index" data={{data[1]}} value={{data[2]}} class={{data[0]}}>
+ <span class="label label-{{data[3]}}">{{data[4]}}</a></span> {{data[0]}}</td>
- <div class="pull-right">
- <span class="label label-danger">D</span>danger<span style="padding-left:20px"></span>
- <span class="label label-primary">S</span><span>success</span><span style="padding-left:20px"></span>
- <span class="label label-warning">W</span><span>warning</span>
+ </tr>
+ </tbody>
+ </table>
+ </div>
+
+ <div class="pull-right" style="margin-top: 5px">
+ <span class="label label-danger">G</span>gold<span style="padding-left:20px"></span>
+ <span class="label label-primary">P</span><span>platinium</span><span style="padding-left:20px"></span>
+ <span class="label label-warning">S</span><span>silver</span>
</div>
+ </div>
</div>
</div>
-</div>
-</section>
+</section>
\ No newline at end of file
-
<div class="navbar-wrapper">
- <nav class="navbar navbar-default navbar-fixed-top" role="navigation">
- <div class="container">
- <div class="navbar-header page-scroll">
- <button type="button" class="navbar-toggle collapsed" data-toggle="collapse" data-target="#navbar" aria-expanded="false"
- aria-controls="navbar">
+ <nav class="navbar navbar-default navbar-fixed-top" role="navigation">
+ <div class="container">
+ <div class="navbar-header page-scroll">
+ <button type="button" class="navbar-toggle collapsed" data-toggle="collapse" data-target="#navbar" aria-expanded="false" aria-controls="navbar">
<span class="sr-only">Toggle navigation</span>
<span class="icon-bar"></span>
<span class="icon-bar"></span>
<span class="icon-bar"></span>
</button>
- <a class="navbar-brand" href="index.html">OPNFV-DASHBOARD</a>
- </div>
- <div id="navbar" class="navbar-collapse collapse">
- <ul class="nav navbar-nav navbar-right">
- <li><a href="#page-top">Home</a></li>
- <li><a href="#DashBoard">DashBoard</a></li>
- <!--<li><a href="#team">Team</a></li>
+ <a class="navbar-brand" href="index.html">OPNFV-DASHBOARD</a>
+ </div>
+ <div class="navbar-collapse collapse">
+ <ul class="nav navbar-nav navbar-right">
+ <li><a href="#page-top">Home</a></li>
+ <li><a href="#DashBoard">DashBoard</a></li>
+ <li><a ui-sref="select.selectTestCase">TestCase</a></li>
+ <li><a ui-sref="login">Login</a></li>
+ <!--<li><a href="#team">Team</a></li>
<li><a href="#testimonials">Testimonials</a></li>
<li><a href="#pricing">Pricing</a></li>
<li><a href="#contact">Contact</a></li>-->
- </ul>
- </div>
- </div>
- </nav>
+ </ul>
+ </div>
+ </div>
+ </nav>
</div>
<div id="inSlider" class="carousel carousel-fade" data-ride="carousel">
- <ol class="carousel-indicators">
- <!--<li data-target="#inSlider" data-slide-to="0" class="active"></li>
+ <ol class="carousel-indicators">
+ <!--<li data-target="#inSlider" data-slide-to="0" class="active"></li>
<li data-target="#inSlider" data-slide-to="1"></li>-->
- </ol>
- <div class="carousel-inner" role="listbox">
- <div class="item active">
- <div class="container">
- <div class="carousel-caption">
- <h1>OPNFV<br/> facilitates the development and evolution<br/> of NFV components across<br/> various open source ecosystems<br/>
- </h1>
- <!--<p>Lorem Ipsum is simply dummy text of the printing.</p>
+ </ol>
+ <div class="carousel-inner" role="listbox">
+ <div class="item active">
+ <div class="container">
+ <div class="carousel-caption">
+ <h1>OPNFV<br/> facilitates the development and evolution<br/> of NFV components across<br/> various open source ecosystems<br/>
+ </h1>
+ <!--<p>Lorem Ipsum is simply dummy text of the printing.</p>
<p>-->
- <a class="btn btn-lg btn-primary" href="#" role="button">READ MORE</a>
- <!--<a class="caption-link" href="#" role="button">Inspinia Theme</a>-->
- <!--</p>-->
+ <a class="btn btn-lg btn-primary" href="#" role="button">READ MORE</a>
+ <!--<a class="caption-link" href="#" role="button">Inspinia Theme</a>-->
+ <!--</p>-->
+ </div>
+
+ </div>
+ <!-- Set background for slide in css -->
+ <div class="header-back one" style="background: url('images/header_one.jpg') 50% 0 no-repeat;"></div>
+
</div>
- <!--<div class="carousel-image wow zoomIn">
- <img src="img/landing/laptop.png" alt="laptop"/>
- </div>-->
- </div>
- <!-- Set background for slide in css -->
- <div class="header-back one" style="background: url('images/header_one.jpg') 50% 0 no-repeat;"></div>
</div>
- <!--<div class="item">
- <div class="container">
- <div class="carousel-caption blank">
- <h1>We create meaningful <br/> interfaces that inspire.</h1>
- <p>Cras justo odio, dapibus ac facilisis in, egestas eget quam.</p>
- <p><a class="btn btn-lg btn-primary" href="#" role="button">Learn more</a></p>
- </div>
- </div>-->
- <!-- Set background for slide in css -->
- <!--<div class="header-back two"></div>
- </div>-->
- </div>
- <!--<a class="left carousel-control" href="#inSlider" role="button" data-slide="prev">
- <span class="glyphicon glyphicon-chevron-left" aria-hidden="true"></span>
- <span class="sr-only">Previous</span>
- </a>
- <a class="right carousel-control" href="#inSlider" role="button" data-slide="next">
- <span class="glyphicon glyphicon-chevron-right" aria-hidden="true"></span>
- <span class="sr-only">Next</span>
- </a>-->
+
</div>
- <section id="DashBoard" class="container services">
- <div class="row">
+<section id="DashBoard" class="container services">
+ <div class="row">
- <h1>
- OPNFV’s goals are to:
- </h1>
- <div class="col-sm-3">
+ <h1>
+ OPNFV’s goals are to:
+ </h1>
+ <div class="col-sm-3">
- <p>Develop an integrated and tested open source platform that can be used to build NFV functionality--accelerating
- the introduction of new products and services</p>
- <p><a class="navy-link" href="#" role="button">Details »</a></p>
- </div>
- <div class="col-sm-3">
+ <p>Develop an integrated and tested open source platform that can be used to build NFV functionality--accelerating the introduction of new products and services</p>
+ <p><a class="navy-link" href="#" role="button">Details »</a></p>
+ </div>
+ <div class="col-sm-3">
- <p>Include participation of leading end users to validate that OPNFV meets the needs of user community</p>
- <p><a class="navy-link" href="#" role="button">Details »</a></p>
- </div>
- <div class="col-sm-3">
+ <p>Include participation of leading end users to validate that OPNFV meets the needs of user community</p>
+ <p><a class="navy-link" href="#" role="button">Details »</a></p>
+ </div>
+ <div class="col-sm-3">
- <p>Contribute to and participate in relevant open source projects that will be leveraged in the OPNFV platform;
- ensuring consistency, performance and interoperability among open source components</p>
- <p><a class="navy-link" href="#" role="button">Details »</a></p>
- </div>
- <div class="col-sm-3">
+ <p>Contribute to and participate in relevant open source projects that will be leveraged in the OPNFV platform; ensuring consistency, performance and interoperability among open source components</p>
+ <p><a class="navy-link" href="#" role="button">Details »</a></p>
+ </div>
+ <div class="col-sm-3">
- <p>Establish an ecosystem for NFV solutions based on open standards and software to meet the needs of end users</p>
- <p><a class="navy-link" href="#" role="button">Details »</a></p>
- </div>
- <div class="col-sm-3">
+ <p>Establish an ecosystem for NFV solutions based on open standards and software to meet the needs of end users</p>
+ <p><a class="navy-link" href="#" role="button">Details »</a></p>
+ </div>
+ <div class="col-sm-3">
- <p>Promote OPNFV as the preferred platform and community for open source NFV</p>
- <p><a class="navy-link" href="#" role="button">Details »</a></p>
- </div>
+ <p>Promote OPNFV as the preferred platform and community for open source NFV</p>
+ <p><a class="navy-link" href="#" role="button">Details »</a></p>
</div>
- </section>
+ </div>
+</section>
<div ui-view></div>
<section id="contact" class="gray-section contact" style="background-image: url(images/word_map.png)">
- <div class="container">
- <div class="row m-b-lg">
- <div class="col-lg-12 text-center">
- <div class="navy-line"></div>
- <h1>Contact Us</h1>
- </div>
- </div>
- <div class="row m-b-lg">
- <div class="col-lg-3 col-lg-offset-3">
- <address>
+ <div class="container">
+ <div class="row m-b-lg">
+ <div class="col-lg-12 text-center">
+ <div class="navy-line"></div>
+ <h1>Contact Us</h1>
+ </div>
+ </div>
+ <div class="row m-b-lg">
+ <div class="col-lg-3 col-lg-offset-3">
+ <address>
<strong><span class="navy">Press, Analyst, or Speaking Inquiries</span></strong><br/> pr@opnfv.org
<br/>
</address>
- <address>
+ <address>
<strong><span class="navy">OPNFV Events</span></strong><br/> events@opnfv.org
<br/>
</address>
- <address>
+ <address>
<strong><span class="navy">IT Support</span></strong><br/>opnfv-helpdesk@rt.linuxfoundation.org
<br/>
</address>
- </div>
+ </div>
- <div class="col-lg-4">
- <address>
+ <div class="col-lg-4">
+ <address>
<strong><span class="navy">To submit and track bugs related to OPNFV</span></strong><br/>Please visit https://jira.opnfv.org
<br/>
</address>
- <address>
+ <address>
<strong><span class="navy">Newsletter</span></strong><br/>Sign up for the OPNFV newsletter
<br/>
</address>
- <address>
+ <address>
<strong><span class="navy">Membership</span></strong><br/>Please visit the Join as a Member page
<br/>
</address>
- </div>
- </div>
- <div class="row">
- <div class="col-lg-12 text-center">
- <img src="images/logo.png" />
+ </div>
+ </div>
+ <div class="row">
+ <div class="col-lg-12 text-center">
+ <img src="images/logo.png" />
- </div>
- </div>
- <div class="row">
- <div class="col-lg-8 col-lg-offset-2 text-center m-t-lg m-b-lg">
- <p><strong>© 2016 Open Platform for NFV Project, Inc</strong><br/> A Linux Foundation Collaborative Project. All
- Rights Reserved. Open Platform for NFV and OPNFV are trademarks of the Open Platform for NFV Project, Inc. Linux
- Foundation is a registered trademark of The Linux Foundation. Linux is a registered trademark of Linus Torvalds.
- Please see our terms of use, trademark policy, and privacy policy.
- </p>
- </div>
+ </div>
+ </div>
+ <div class="row">
+ <div class="col-lg-8 col-lg-offset-2 text-center m-t-lg m-b-lg">
+ <p><strong>© 2016 Open Platform for NFV Project, Inc</strong><br/> A Linux Foundation Collaborative Project. All Rights Reserved. Open Platform for NFV and OPNFV are trademarks of the Open Platform for NFV Project, Inc. Linux Foundation
+ is a registered trademark of The Linux Foundation. Linux is a registered trademark of Linus Torvalds. Please see our terms of use, trademark policy, and privacy policy.
+ </p>
+ </div>
+ </div>
</div>
- </div>
</section>
<script>
+ $(document).ready(function() {
- $(document).ready(function () {
+ // $('body').scrollspy({
+ // target: '.navbar-fixed-top',
+ // offset: 80
+ // });
- $('body').scrollspy({
- target: '.navbar-fixed-top',
- offset: 80
+ //Page scrolling feature
+ $('a.page-scroll').bind('click', function(event) {
+ var link = $(this);
+ $('html, body').stop().animate({
+ scrollTop: $(link.attr('href')).offset().top - 50
+ }, 500);
+ event.preventDefault();
+ $("#navbar").collapse('hide');
});
- // Page scrolling feature
$('a.page-scroll').bind('click', function(event) {
var link = $(this);
$('html, body').stop().animate({
$("#navbar").collapse('hide');
});
-
- console.log( $("selectVersion").val());
-
});
- // $(".select2_demo_1").select2();
- // $(".select2_demo_2").select2();
- // $(".select2_demo_3").select2({
- // placeholder: "Version",
- // allowClear: true
- // });
- // $(".select2_demo_4").select2({
- // placeholder: "Period",
- // allowClear: true
- // });
-
-
var config = {
- '.chosen-select' : {},
- '.chosen-select-deselect' : {allow_single_deselect:true},
- '.chosen-select-no-single' : {disable_search_threshold:10},
- '.chosen-select-no-results': {no_results_text:'Oops, nothing found!'},
- '.chosen-select-width' : {width:"95%"}
- }
- for (var selector in config) {
- $(selector).chosen(config[selector]);
- }
+ '.chosen-select': {},
+ '.chosen-select-deselect': {
+ allow_single_deselect: true
+ },
+ '.chosen-select-no-single': {
+ disable_search_threshold: 10
+ },
+ '.chosen-select-no-results': {
+ no_results_text: 'Oops, nothing found!'
+ },
+ '.chosen-select-width': {
+ width: "95%"
+ }
+ }
+ for (var selector in config) {
+ $(selector).chosen(config[selector]);
+ }
var cbpAnimatedHeader = (function() {
var docElem = document.documentElement,
- header = document.querySelector( '.navbar-default' ),
- didScroll = false,
- changeHeaderOn = 200;
+ header = document.querySelector('.navbar-default'),
+ didScroll = false,
+ changeHeaderOn = 200;
+
function init() {
- window.addEventListener( 'scroll', function( event ) {
- if( !didScroll ) {
+ window.addEventListener('scroll', function(event) {
+ if (!didScroll) {
didScroll = true;
- setTimeout( scrollPage, 250 );
+ setTimeout(scrollPage, 250);
}
- }, false );
+ }, false);
}
+
function scrollPage() {
var sy = scrollY();
- if ( sy >= changeHeaderOn ) {
+ if (sy >= changeHeaderOn) {
$(header).addClass('navbar-scroll')
- }
- else {
+ } else {
$(header).removeClass('navbar-scroll')
}
didScroll = false;
}
+
function scrollY() {
return window.pageYOffset || docElem.scrollTop;
}
init();
})();
-
-
-</script>
+</script>
\ No newline at end of file
{
- "name": "opnfv",
- "version": "0.0.0",
- "dependencies": {
- "angular": "^1.4.0",
- "bootstrap": "^3.2.0",
- "angular-animate": "^1.4.0",
- "jquery-slimscroll": "slimscroll#^1.3.8",
- "metisMenu": "~2.0.2",
- "chosen": "^1.6.2",
- "oclazyload": "^1.0.9",
- "angular-bootstrap": "~1.1.2",
- "angular-ui-router": "~0.2.15",
- "angular-resource": "^1.6.0",
- "angular-selectize2": "^3.0.1",
- "components-font-awesome": "^4.7.0"
- },
- "devDependencies": {
- "angular-mocks": "^1.4.0"
- },
- "appPath": "app",
- "moduleName": "opnfvApp",
- "overrides": {
- "bootstrap": {
- "main": [
- "less/bootstrap.less",
- "dist/css/bootstrap.css",
- "dist/js/bootstrap.js"
- ]
+ "name": "opnfv",
+ "version": "0.0.0",
+ "dependencies": {
+ "angular": "^1.4.0",
+ "bootstrap": "^3.2.0",
+ "angular-animate": "^1.4.0",
+ "jquery-slimscroll": "slimscroll#^1.3.8",
+ "metisMenu": "~2.0.2",
+ "chosen": "^1.6.2",
+ "oclazyload": "^1.0.9",
+ "angular-bootstrap": "~1.1.2",
+ "angular-ui-router": "~0.2.15",
+ "angular-resource": "^1.6.0",
+ "angular-selectize2": "^3.0.1",
+ "components-font-awesome": "^4.7.0",
+ "angular-tooltips": "^1.1.8",
+ "jQuery-rwdImageMaps": "*",
+ "animate.css": "^3.5.2",
+ "ng-dialog": "^1.0.0",
+ "inspiniacss": "^0.0.1"
+ },
+ "devDependencies": {
+ "angular-mocks": "^1.4.0"
+ },
+ "appPath": "app",
+ "moduleName": "opnfvApp",
+ "overrides": {
+ "bootstrap": {
+ "main": [
+ "less/bootstrap.less",
+ "dist/css/bootstrap.css",
+ "dist/js/bootstrap.js"
+ ]
+ },
+ "jQuery-rwdImageMaps": {
+ "main": [
+ "jquery.rwdImageMaps.min.js"
+ ]
+ },
+ "inspiniacss": {
+ "main": [
+ "style.css"
+ ]
+ }
}
- }
-}
+}
\ No newline at end of file
'bower_components/microplugin/src/microplugin.js',
'bower_components/selectize/dist/js/selectize.js',
'bower_components/angular-selectize2/dist/angular-selectize.js',
+ 'bower_components/angular-tooltips/dist/angular-tooltips.min.js',
'bower_components/angular-mocks/angular-mocks.js',
// endbower
'app/scripts/**/*.js',
+---
general:
installers:
- apex
versions:
- master
+ - danube
log:
log_file: reporting.log
directories:
# Relative to the path where the repo is cloned:
- dir_reporting: utils/tests/reporting/
- dir_log: utils/tests/reporting/log/
- dir_conf: utils/tests/reporting/conf/
- dir_utils: utils/tests/reporting/utils/
- dir_templates: utils/tests/reporting/templates/
- dir_display: utils/tests/reporting/display/
+ dir_reporting: utils/tests/reporting/
+ dir_log: utils/tests/reporting/log/
+ dir_conf: utils/tests/reporting/conf/
+ dir_utils: utils/tests/reporting/utils/
+ dir_templates: utils/tests/reporting/templates/
+ dir_display: utils/tests/reporting/display/
url: testresults.opnfv.org/reporting/
- ovno
- security_scan
- rally_sanity
+ - healthcheck
+ - odl_netvirt
+ - aaa
+ - cloudify_ims
+ - orchestra_ims
+ - juju_epc
+ - orchestra
+ - promise
max_scenario_criteria: 50
test_conf: https://git.opnfv.org/cgit/functest/plain/functest/ci/testcases.yaml
log_level: ERROR
jenkins_url: https://build.opnfv.org/ci/view/functest/job/
- exclude_noha: False
- exclude_virtual: True
+ exclude_noha: "False"
+ exclude_virtual: "False"
yardstick:
test_conf: https://git.opnfv.org/cgit/yardstick/plain/tests/ci/report_config.yaml
log_level: ERROR
+storperf:
+ test_list:
+ - snia_steady_state
+ log_level: ERROR
+
qtip:
bottleneck:
+
+vsperf:
--- /dev/null
+#!/usr/bin/python
+#
+# This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+import datetime
+import jinja2
+import os
+
+# manage conf
+import utils.reporting_utils as rp_utils
+
+import utils.scenarioResult as sr
+
+installers = rp_utils.get_config('general.installers')
+versions = rp_utils.get_config('general.versions')
+PERIOD = rp_utils.get_config('general.period')
+
+# Logger
+logger = rp_utils.getLogger("Storperf-Status")
+reportingDate = datetime.datetime.now().strftime("%Y-%m-%d %H:%M")
+
+logger.info("*******************************************")
+logger.info("* Generating reporting scenario status *")
+logger.info("* Data retention = %s days *" % PERIOD)
+logger.info("* *")
+logger.info("*******************************************")
+
+# retrieve the list of storperf tests
+storperf_tests = rp_utils.get_config('storperf.test_list')
+logger.info("Storperf tests: %s" % storperf_tests)
+
+# For all the versions
+for version in versions:
+ # For all the installers
+ for installer in installers:
+ # get scenarios results data
+ # for the moment we consider only 1 case snia_steady_state
+ scenario_results = rp_utils.getScenarios("snia_steady_state",
+ installer,
+ version)
+ # logger.info("scenario_results: %s" % scenario_results)
+
+ scenario_stats = rp_utils.getScenarioStats(scenario_results)
+ logger.info("scenario_stats: %s" % scenario_stats)
+ items = {}
+ scenario_result_criteria = {}
+
+ # From each scenarios get results list
+ for s, s_result in scenario_results.items():
+ logger.info("---------------------------------")
+ logger.info("installer %s, version %s, scenario %s", installer,
+ version, s)
+ ten_criteria = len(s_result)
+
+ ten_score = 0
+ for v in s_result:
+ if "PASS" in v['criteria']:
+ ten_score += 1
+
+ logger.info("ten_score: %s / %s" % (ten_score, ten_criteria))
+
+ four_score = 0
+ try:
+ LASTEST_TESTS = rp_utils.get_config(
+ 'general.nb_iteration_tests_success_criteria')
+ s_result.sort(key=lambda x: x['start_date'])
+ four_result = s_result[-LASTEST_TESTS:]
+ logger.debug("four_result: {}".format(four_result))
+ logger.debug("LASTEST_TESTS: {}".format(LASTEST_TESTS))
+ # logger.debug("four_result: {}".format(four_result))
+ four_criteria = len(four_result)
+ for v in four_result:
+ if "PASS" in v['criteria']:
+ four_score += 1
+ logger.info("4 Score: %s / %s " % (four_score,
+ four_criteria))
+ except:
+ logger.error("Impossible to retrieve the four_score")
+
+ try:
+ s_status = (four_score * 100) / four_criteria
+ except:
+ s_status = 0
+ logger.info("Score percent = %s" % str(s_status))
+ s_four_score = str(four_score) + '/' + str(four_criteria)
+ s_ten_score = str(ten_score) + '/' + str(ten_criteria)
+ s_score_percent = str(s_status)
+
+ logger.debug(" s_status: {}".format(s_status))
+ if s_status == 100:
+ logger.info(">>>>> scenario OK, save the information")
+ else:
+ logger.info(">>>> scenario not OK, last 4 iterations = %s, \
+ last 10 days = %s" % (s_four_score, s_ten_score))
+
+ s_url = ""
+ if len(s_result) > 0:
+ build_tag = s_result[len(s_result)-1]['build_tag']
+ logger.debug("Build tag: %s" % build_tag)
+ s_url = s_url = rp_utils.getJenkinsUrl(build_tag)
+ logger.info("last jenkins url: %s" % s_url)
+
+ # Save daily results in a file
+ path_validation_file = ("./display/" + version +
+ "/storperf/scenario_history.txt")
+
+ if not os.path.exists(path_validation_file):
+ with open(path_validation_file, 'w') as f:
+ info = 'date,scenario,installer,details,score\n'
+ f.write(info)
+
+ with open(path_validation_file, "a") as f:
+ info = (reportingDate + "," + s + "," + installer +
+ "," + s_ten_score + "," +
+ str(s_score_percent) + "\n")
+ f.write(info)
+
+ scenario_result_criteria[s] = sr.ScenarioResult(s_status,
+ s_four_score,
+ s_ten_score,
+ s_score_percent,
+ s_url)
+
+ logger.info("--------------------------")
+
+ templateLoader = jinja2.FileSystemLoader(".")
+ templateEnv = jinja2.Environment(loader=templateLoader,
+ autoescape=True)
+
+ TEMPLATE_FILE = "./storperf/template/index-status-tmpl.html"
+ template = templateEnv.get_template(TEMPLATE_FILE)
+
+ outputText = template.render(scenario_results=scenario_result_criteria,
+ installer=installer,
+ period=PERIOD,
+ version=version,
+ date=reportingDate)
+
+ with open("./display/" + version +
+ "/storperf/status-" + installer + ".html", "wb") as fh:
+ fh.write(outputText)
--- /dev/null
+ <html>
+ <head>
+ <meta charset="utf-8">
+ <!-- Bootstrap core CSS -->
+ <link href="https://maxcdn.bootstrapcdn.com/bootstrap/3.3.4/css/bootstrap.min.css" rel="stylesheet">
+ <link href="../../css/default.css" rel="stylesheet">
+ <script type="text/javascript" src="http://ajax.googleapis.com/ajax/libs/jquery/1/jquery.min.js"></script>
+ <script type="text/javascript" src="https://maxcdn.bootstrapcdn.com/bootstrap/3.3.4/js/bootstrap.min.js"></script>
+ <script type="text/javascript" src="http://d3js.org/d3.v2.min.js"></script>
+ <script type="text/javascript" src="../../js/gauge.js"></script>
+ <script type="text/javascript" src="../../js/trend.js"></script>
+ <script>
+ function onDocumentReady() {
+ // Gauge management
+ {% for scenario in scenario_results.keys() -%}
+ var gaugeScenario{{loop.index}} = gauge('#gaugeScenario{{loop.index}}');
+ {%- endfor %}
+ // assign success rate to the gauge
+ function updateReadings() {
+ {% for scenario in scenario_results.keys() -%}
+ gaugeScenario{{loop.index}}.update({{scenario_results[scenario].getScorePercent()}});
+ {%- endfor %}
+ }
+ updateReadings();
+ }
+
+ // trend line management
+ d3.csv("./scenario_history.txt", function(data) {
+ // ***************************************
+ // Create the trend line
+ {% for scenario in scenario_results.keys() -%}
+ // for scenario {{scenario}}
+ // Filter results
+ var trend{{loop.index}} = data.filter(function(row) {
+ return row["scenario"]=="{{scenario}}" && row["installer"]=="{{installer}}";
+ })
+ // Parse the date
+ trend{{loop.index}}.forEach(function(d) {
+ d.date = parseDate(d.date);
+ d.score = +d.score
+ });
+ // Draw the trend line
+ var mytrend = trend("#trend_svg{{loop.index}}",trend{{loop.index}})
+ // ****************************************
+ {%- endfor %}
+ });
+ if ( !window.isLoaded ) {
+ window.addEventListener("load", function() {
+ onDocumentReady();
+ }, false);
+ } else {
+ onDocumentReady();
+ }
+ </script>
+ <script type="text/javascript">
+ $(document).ready(function (){
+ $(".btn-more").click(function() {
+ $(this).hide();
+ $(this).parent().find(".panel-default").show();
+ });
+ })
+ </script>
+ </head>
+ <body>
+ <div class="container">
+ <div class="masthead">
+ <h3 class="text-muted">Storperf status page ({{version}}, {{date}})</h3>
+ <nav>
+ <ul class="nav nav-justified">
+ <li class="active"><a href="http://testresults.opnfv.org/reporting/index.html">Home</a></li>
+ <li><a href="status-apex.html">Apex</a></li>
+ <li><a href="status-compass.html">Compass</a></li>
+ <li><a href="status-daisy.html">Daisy</a></li>
+ <li><a href="status-fuel.html">Fuel</a></li>
+ <li><a href="status-joid.html">Joid</a></li>
+ </ul>
+ </nav>
+ </div>
+<div class="row">
+ <div class="col-md-1"></div>
+ <div class="col-md-10">
+ <div class="page-header">
+ <h2>{{installer}}</h2>
+ </div>
+
+ <div class="scenario-overview">
+ <div class="panel-heading"><h4><b>List of last scenarios ({{version}}) run over the last {{period}} days </b></h4></div>
+ <table class="table">
+ <tr>
+ <th width="40%">Scenario</th>
+ <th width="20%">Status</th>
+ <th width="20%">Trend</th>
+ <th width="10%">Last 4 Iterations</th>
+ <th width="10%">Last 10 Days</th>
+ </tr>
+ {% for scenario,result in scenario_results.iteritems() -%}
+ <tr class="tr-ok">
+ <td><a href="{{scenario_results[scenario].getLastUrl()}}">{{scenario}}</a></td>
+ <td><div id="gaugeScenario{{loop.index}}"></div></td>
+ <td><div id="trend_svg{{loop.index}}"></div></td>
+ <td>{{scenario_results[scenario].getFourDaysScore()}}</td>
+ <td>{{scenario_results[scenario].getTenDaysScore()}}</td>
+ </tr>
+ {%- endfor %}
+ </table>
+ </div>
+
+
+ </div>
+ <div class="col-md-1"></div>
+</div>
def getScenarios(case, installer, version):
- case = case.getName()
+ try:
+ case = case.getName()
+ except:
+ # if case is not an object test case, try the string
+ if type(case) == str:
+ case = case
+ else:
+ raise ValueError("Case cannot be evaluated")
+
period = get_config('general.period')
url_base = get_config('testapi.url')
url_base = get_config('functest.jenkins_url')
try:
build_id = [int(s) for s in build_tag.split("-") if s.isdigit()]
- url_id = build_tag[8:-(len(build_id) + 3)] + "/" + str(build_id[0])
+ url_id = (build_tag[8:-(len(str(build_id[0])) + 1)] +
+ "/" + str(build_id[0]))
jenkins_url = url_base + url_id + "/console"
except:
print('Impossible to get jenkins url:')
+ if "jenkins-" not in build_tag:
+ jenkins_url = None
+
return jenkins_url
class ScenarioResult(object):
def __init__(self, status, four_days_score='', ten_days_score='',
- score_percent=0.0):
+ score_percent=0.0, last_url=''):
self.status = status
self.four_days_score = four_days_score
self.ten_days_score = ten_days_score
self.score_percent = score_percent
+ self.last_url = last_url
def getStatus(self):
return self.status
def getScorePercent(self):
return self.score_percent
+
+ def getLastUrl(self):
+ return self.last_url
import jinja2
import os
-import scenarioResult as sr
+import utils.scenarioResult as sr
from scenarios import config as cf
# manage conf
--- /dev/null
+# .coveragerc to control coverage.py
+
+[run]
+branch = True
+source =
+ opnfv_testapi
+
+[report]
+# Regexes for lines to exclude from consideration
+exclude_lines =
+ # Have to re-enable the standard pragma
+ pragma: no cover
+
+ # Don't complain about missing debug-only code:
+ def __repr__
+ if self\.debug
+
+ # Don't complain if tests don't hit defensive assertion code:
+ raise AssertionError
+ raise NotImplementedError
+
+ # Don't complain if non-runnable code isn't run:
+ if 0:
+ if __name__ == .__main__.:
+
+ignore_errors = True
+
# $ docker build -t opnfv/testapi:tag .
#
# Execution:
-# $ docker run -dti -p 8000:8000 \
-# -e "swagger_url=http://10.63.243.17:8000" \
+# $ docker run -dti -p 8001:8000 \
+# -e "swagger_url=http://10.63.243.17:8001" \
# -e "mongodb_url=mongodb://10.63.243.17:27017/" \
-# -e "api_port=8000"
# opnfv/testapi:tag
#
-# NOTE: providing swagger_url, api_port, mongodb_url is optional.
+# NOTE: providing swagger_url, mongodb_url is optional.
# If not provided, it will use the default one
# configured in config.ini
#
if [ "$swagger_url" != "" ]; then
sudo crudini --set --existing $FILE swagger base_url $swagger_url
fi
-
-if [ "$api_port" != "" ];then
- sudo crudini --set --existing $FILE api port $api_port
-fi
-
port = 8000
# With debug_on set to true, error traces will be shown in HTTP responses
debug = True
+authenticate = False
[swagger]
base_url = http://localhost:8000
"""
import argparse
+import sys
-import tornado.ioloop
import motor
+import tornado.ioloop
-from opnfv_testapi.common.config import APIConfig
-from opnfv_testapi.tornado_swagger import swagger
+from opnfv_testapi.common import config
from opnfv_testapi.router import url_mappings
+from opnfv_testapi.tornado_swagger import swagger
+
+CONF = None
+
-# optionally get config file from command line
-parser = argparse.ArgumentParser()
-parser.add_argument("-c", "--config-file", dest='config_file',
- help="Config file location")
-args = parser.parse_args()
-CONF = APIConfig().parse(args.config_file)
+def parse_config(argv=[]):
+ global CONF
+ parser = argparse.ArgumentParser()
+ parser.add_argument("-c", "--config-file", dest='config_file',
+ help="Config file location")
+ args = parser.parse_args(argv)
+ CONF = config.APIConfig().parse(args.config_file)
-# connecting to MongoDB server, and choosing database
-client = motor.MotorClient(CONF.mongo_url)
-db = client[CONF.mongo_dbname]
-swagger.docs(base_url=CONF.swagger_base_url)
+def get_db():
+ return motor.MotorClient(CONF.mongo_url)[CONF.mongo_dbname]
def make_app():
+ swagger.docs(base_url=CONF.swagger_base_url)
return swagger.Application(
url_mappings.mappings,
- db=db,
+ db=get_db(),
debug=CONF.api_debug_on,
+ auth=CONF.api_authenticate_on
)
def main():
+ parse_config(sys.argv[1:])
application = make_app()
application.listen(CONF.api_port)
tornado.ioloop.IOLoop.current().start()
# http://www.apache.org/licenses/LICENSE-2.0
# feng.xiaowei@zte.com.cn remove prepare_put_request 5-30-2016
##############################################################################
-
-
-from ConfigParser import SafeConfigParser, NoOptionError
+import ConfigParser
+import os
class ParseError(Exception):
return 'error parsing config file : %s' % self.msg
-class APIConfig:
+class APIConfig(object):
"""
The purpose of this class is to load values correctly from the config file.
Each key is declared as an attribute in __init__() and linked in parse()
self.mongo_dbname = None
self.api_port = None
self.api_debug_on = None
+ self.api_authenticate_on = None
self._parser = None
self.swagger_base_url = None
def _get_parameter(self, section, param):
try:
return self._parser.get(section, param)
- except NoOptionError:
- raise ParseError("[%s.%s] parameter not found" % (section, param))
+ except ConfigParser.NoOptionError:
+ raise ParseError("No parameter: [%s.%s]" % (section, param))
def _get_int_parameter(self, section, param):
try:
return int(self._get_parameter(section, param))
except ValueError:
- raise ParseError("[%s.%s] not an int" % (section, param))
+ raise ParseError("Not int: [%s.%s]" % (section, param))
def _get_bool_parameter(self, section, param):
result = self._get_parameter(section, param)
return False
raise ParseError(
- "[%s.%s : %s] not a boolean" % (section, param, result))
+ "Not boolean: [%s.%s : %s]" % (section, param, result))
@staticmethod
def parse(config_location=None):
if config_location is None:
config_location = obj._default_config_location
- obj._parser = SafeConfigParser()
- obj._parser.read(config_location)
- if not obj._parser:
+ if not os.path.exists(config_location):
raise ParseError("%s not found" % config_location)
+ obj._parser = ConfigParser.SafeConfigParser()
+ obj._parser.read(config_location)
+
# Linking attributes to keys from file with their sections
obj.mongo_url = obj._get_parameter("mongo", "url")
obj.mongo_dbname = obj._get_parameter("mongo", "dbname")
obj.api_port = obj._get_int_parameter("api", "port")
obj.api_debug_on = obj._get_bool_parameter("api", "debug")
+ obj.api_authenticate_on = obj._get_bool_parameter("api",
+ "authenticate")
+
obj.swagger_base_url = obj._get_parameter("swagger", "base_url")
return obj
-
- def __str__(self):
- return "mongo_url = %s \n" \
- "mongo_dbname = %s \n" \
- "api_port = %s \n" \
- "api_debug_on = %s \n" \
- "swagger_base_url = %s \n" % (self.mongo_url,
- self.mongo_dbname,
- self.api_port,
- self.api_debug_on,
- self.swagger_base_url)
DEFAULT_REPRESENTATION = "application/json"
HTTP_BAD_REQUEST = 400
+HTTP_UNAUTHORIZED = 401
HTTP_FORBIDDEN = 403
HTTP_NOT_FOUND = 404
HTTP_OK = 200
# feng.xiaowei@zte.com.cn remove DashboardHandler 5-30-2016
##############################################################################
-import json
from datetime import datetime
+import functools
+import json
from tornado import gen
-from tornado.web import RequestHandler, asynchronous, HTTPError
+from tornado import web
-from models import CreateResponse
-from opnfv_testapi.common.constants import DEFAULT_REPRESENTATION, \
- HTTP_BAD_REQUEST, HTTP_NOT_FOUND, HTTP_FORBIDDEN
+import models
+from opnfv_testapi.common import constants
from opnfv_testapi.tornado_swagger import swagger
-class GenericApiHandler(RequestHandler):
+class GenericApiHandler(web.RequestHandler):
def __init__(self, application, request, **kwargs):
super(GenericApiHandler, self).__init__(application, request, **kwargs)
self.db = self.settings["db"]
self.db_testcases = 'testcases'
self.db_results = 'results'
self.db_scenarios = 'scenarios'
+ self.auth = self.settings["auth"]
def prepare(self):
if self.request.method != "GET" and self.request.method != "DELETE":
if self.request.headers.get("Content-Type") is not None:
if self.request.headers["Content-Type"].startswith(
- DEFAULT_REPRESENTATION):
+ constants.DEFAULT_REPRESENTATION):
try:
self.json_args = json.loads(self.request.body)
except (ValueError, KeyError, TypeError) as error:
- raise HTTPError(HTTP_BAD_REQUEST,
- "Bad Json format [{}]".
- format(error))
+ raise web.HTTPError(constants.HTTP_BAD_REQUEST,
+ "Bad Json format [{}]".
+ format(error))
def finish_request(self, json_object=None):
if json_object:
self.write(json.dumps(json_object))
- self.set_header("Content-Type", DEFAULT_REPRESENTATION)
+ self.set_header("Content-Type", constants.DEFAULT_REPRESENTATION)
self.finish()
def _create_response(self, resource):
href = self.request.full_url() + '/' + str(resource)
- return CreateResponse(href=href).format()
+ return models.CreateResponse(href=href).format()
def format_data(self, data):
cls_data = self.table_cls.from_dict(data)
return cls_data.format_http()
- @asynchronous
+ def authenticate(method):
+ @web.asynchronous
+ @gen.coroutine
+ @functools.wraps(method)
+ def wrapper(self, *args, **kwargs):
+ if self.auth:
+ try:
+ token = self.request.headers['X-Auth-Token']
+ except KeyError:
+ raise web.HTTPError(constants.HTTP_UNAUTHORIZED,
+ "No Authentication Header.")
+ query = {'access_token': token}
+ check = yield self._eval_db_find_one(query, 'tokens')
+ if not check:
+ raise web.HTTPError(constants.HTTP_FORBIDDEN,
+ "Invalid Token.")
+ ret = yield gen.coroutine(method)(self, *args, **kwargs)
+ raise gen.Return(ret)
+ return wrapper
+
+ @web.asynchronous
@gen.coroutine
+ @authenticate
def _create(self, miss_checks, db_checks, **kwargs):
"""
:param miss_checks: [miss1, miss2]
:param db_checks: [(table, exist, query, error)]
"""
if self.json_args is None:
- raise HTTPError(HTTP_BAD_REQUEST, "no body")
+ raise web.HTTPError(constants.HTTP_BAD_REQUEST, "no body")
data = self.table_cls.from_dict(self.json_args)
for miss in miss_checks:
miss_data = data.__getattribute__(miss)
if miss_data is None or miss_data == '':
- raise HTTPError(HTTP_BAD_REQUEST,
- '{} missing'.format(miss))
+ raise web.HTTPError(constants.HTTP_BAD_REQUEST,
+ '{} missing'.format(miss))
for k, v in kwargs.iteritems():
data.__setattr__(k, v)
check = yield self._eval_db_find_one(query(data), table)
if (exist and not check) or (not exist and check):
code, message = error(data)
- raise HTTPError(code, message)
+ raise web.HTTPError(code, message)
if self.table != 'results':
data.creation_date = datetime.now()
resource = _id
self.finish_request(self._create_response(resource))
- @asynchronous
+ @web.asynchronous
@gen.coroutine
def _list(self, query=None, res_op=None, *args, **kwargs):
if query is None:
res = res_op(data, *args)
self.finish_request(res)
- @asynchronous
+ @web.asynchronous
@gen.coroutine
def _get_one(self, query):
data = yield self._eval_db_find_one(query)
if data is None:
- raise HTTPError(HTTP_NOT_FOUND,
- "[{}] not exist in table [{}]"
- .format(query, self.table))
+ raise web.HTTPError(constants.HTTP_NOT_FOUND,
+ "[{}] not exist in table [{}]"
+ .format(query, self.table))
self.finish_request(self.format_data(data))
- @asynchronous
+ @web.asynchronous
@gen.coroutine
+ @authenticate
def _delete(self, query):
data = yield self._eval_db_find_one(query)
if data is None:
- raise HTTPError(HTTP_NOT_FOUND,
- "[{}] not exit in table [{}]"
- .format(query, self.table))
+ raise web.HTTPError(constants.HTTP_NOT_FOUND,
+ "[{}] not exit in table [{}]"
+ .format(query, self.table))
yield self._eval_db(self.table, 'remove', query)
self.finish_request()
- @asynchronous
+ @web.asynchronous
@gen.coroutine
+ @authenticate
def _update(self, query, db_keys):
if self.json_args is None:
- raise HTTPError(HTTP_BAD_REQUEST, "No payload")
+ raise web.HTTPError(constants.HTTP_BAD_REQUEST, "No payload")
# check old data exist
from_data = yield self._eval_db_find_one(query)
if from_data is None:
- raise HTTPError(HTTP_NOT_FOUND,
- "{} could not be found in table [{}]"
- .format(query, self.table))
+ raise web.HTTPError(constants.HTTP_NOT_FOUND,
+ "{} could not be found in table [{}]"
+ .format(query, self.table))
data = self.table_cls.from_dict(from_data)
# check new data exist
if not equal:
to_data = yield self._eval_db_find_one(new_query)
if to_data is not None:
- raise HTTPError(HTTP_FORBIDDEN,
- "{} already exists in table [{}]"
- .format(new_query, self.table))
+ raise web.HTTPError(constants.HTTP_FORBIDDEN,
+ "{} already exists in table [{}]"
+ .format(new_query, self.table))
# we merge the whole document """
edit_request = self._update_requests(data)
request = self._update_request(request, k, v,
data.__getattribute__(k))
if not request:
- raise HTTPError(HTTP_FORBIDDEN, "Nothing to update")
+ raise web.HTTPError(constants.HTTP_FORBIDDEN, "Nothing to update")
edit_request = data.format()
edit_request.update(request)
-##############################################################################\r
-# Copyright (c) 2015 Orange\r
-# guyrodrigue.koffi@orange.com / koffirodrigue@gmail.com\r
-# All rights reserved. This program and the accompanying materials\r
-# are made available under the terms of the Apache License, Version 2.0\r
-# which accompanies this distribution, and is available at\r
-# http://www.apache.org/licenses/LICENSE-2.0\r
-# feng.xiaowei@zte.com.cn mv Pod to pod_models.py 5-18-2016\r
-# feng.xiaowei@zte.com.cn add MetaCreateResponse/MetaGetResponse 5-18-2016\r
-# feng.xiaowei@zte.com.cn mv TestProject to project_models.py 5-19-2016\r
-# feng.xiaowei@zte.com.cn delete meta class 5-19-2016\r
-# feng.xiaowei@zte.com.cn add CreateResponse 5-19-2016\r
-# feng.xiaowei@zte.com.cn mv TestCase to testcase_models.py 5-20-2016\r
-# feng.xiaowei@zte.com.cn mv TestResut to result_models.py 5-23-2016\r
-# feng.xiaowei@zte.com.cn add ModelBase 12-20-2016\r
-##############################################################################\r
-import copy\r
-\r
-from opnfv_testapi.tornado_swagger import swagger\r
-\r
-\r
-class ModelBase(object):\r
-\r
- def _format(self, excludes):\r
- new_obj = copy.deepcopy(self)\r
- dicts = new_obj.__dict__\r
- for k in dicts.keys():\r
- if k in excludes:\r
- del dicts[k]\r
- elif dicts[k]:\r
- if hasattr(dicts[k], 'format'):\r
- dicts[k] = dicts[k].format()\r
- elif isinstance(dicts[k], list):\r
- hs = list()\r
- [hs.append(h.format() if hasattr(h, 'format') else str(h))\r
- for h in dicts[k]]\r
- dicts[k] = hs\r
- elif not isinstance(dicts[k], (str, int, float, dict)):\r
- dicts[k] = str(dicts[k])\r
- return dicts\r
-\r
- def format(self):\r
- return self._format(['_id'])\r
-\r
- def format_http(self):\r
- return self._format([])\r
-\r
- @staticmethod\r
- def attr_parser():\r
- return {}\r
-\r
- @classmethod\r
- def from_dict(cls, a_dict):\r
- if a_dict is None:\r
- return None\r
-\r
- attr_parser = cls.attr_parser()\r
- t = cls()\r
- for k, v in a_dict.iteritems():\r
- value = v\r
- if isinstance(v, dict) and k in attr_parser:\r
- value = attr_parser[k].from_dict(v)\r
- elif isinstance(v, list) and k in attr_parser:\r
- value = []\r
- for item in v:\r
- value.append(attr_parser[k].from_dict(item))\r
-\r
- t.__setattr__(k, value)\r
-\r
- return t\r
-\r
-\r
-@swagger.model()\r
-class CreateResponse(ModelBase):\r
- def __init__(self, href=''):\r
- self.href = href\r
-\r
-\r
-@swagger.model()\r
-class Versions(ModelBase):\r
- """\r
- @property versions:\r
- @ptype versions: C{list} of L{Version}\r
- """\r
-\r
- def __init__(self):\r
- self.versions = list()\r
-\r
- @staticmethod\r
- def attr_parser():\r
- return {'versions': Version}\r
-\r
-\r
-@swagger.model()\r
-class Version(ModelBase):\r
- def __init__(self, version=None, description=None):\r
- self.version = version\r
- self.description = description\r
+##############################################################################
+# Copyright (c) 2015 Orange
+# guyrodrigue.koffi@orange.com / koffirodrigue@gmail.com
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+# feng.xiaowei@zte.com.cn mv Pod to pod_models.py 5-18-2016
+# feng.xiaowei@zte.com.cn add MetaCreateResponse/MetaGetResponse 5-18-2016
+# feng.xiaowei@zte.com.cn mv TestProject to project_models.py 5-19-2016
+# feng.xiaowei@zte.com.cn delete meta class 5-19-2016
+# feng.xiaowei@zte.com.cn add CreateResponse 5-19-2016
+# feng.xiaowei@zte.com.cn mv TestCase to testcase_models.py 5-20-2016
+# feng.xiaowei@zte.com.cn mv TestResut to result_models.py 5-23-2016
+# feng.xiaowei@zte.com.cn add ModelBase 12-20-2016
+##############################################################################
+import copy
+import ast
+
+
+from opnfv_testapi.tornado_swagger import swagger
+
+
+class ModelBase(object):
+
+ def format(self):
+ return self._format(['_id'])
+
+ def format_http(self):
+ return self._format([])
+
+ @classmethod
+ def from_dict(cls, a_dict):
+ if a_dict is None:
+ return None
+
+ attr_parser = cls.attr_parser()
+ t = cls()
+ for k, v in a_dict.iteritems():
+ value = v
+ if isinstance(v, dict) and k in attr_parser:
+ value = attr_parser[k].from_dict(v)
+ elif isinstance(v, list) and k in attr_parser:
+ value = []
+ for item in v:
+ value.append(attr_parser[k].from_dict(item))
+
+ t.__setattr__(k, value)
+
+ return t
+
+ @staticmethod
+ def attr_parser():
+ return {}
+
+ def _format(self, excludes):
+ new_obj = copy.deepcopy(self)
+ dicts = new_obj.__dict__
+ for k in dicts.keys():
+ if k in excludes:
+ del dicts[k]
+ elif dicts[k]:
+ dicts[k] = self._obj_format(dicts[k])
+ return dicts
+
+ def _obj_format(self, obj):
+ if self._has_format(obj):
+ obj = obj.format()
+ elif isinstance(obj, unicode):
+ try:
+ obj = self._obj_format(ast.literal_eval(obj))
+ except:
+ try:
+ obj = str(obj)
+ except:
+ obj = obj
+ elif isinstance(obj, list):
+ hs = list()
+ for h in obj:
+ hs.append(self._obj_format(h))
+ obj = hs
+ elif not isinstance(obj, (str, int, float, dict)):
+ obj = str(obj)
+ return obj
+
+ @staticmethod
+ def _has_format(obj):
+ return not isinstance(obj, (str, unicode)) and hasattr(obj, 'format')
+
+
+@swagger.model()
+class CreateResponse(ModelBase):
+ def __init__(self, href=''):
+ self.href = href
+
+
+@swagger.model()
+class Versions(ModelBase):
+ """
+ @property versions:
+ @ptype versions: C{list} of L{Version}
+ """
+
+ def __init__(self):
+ self.versions = list()
+
+ @staticmethod
+ def attr_parser():
+ return {'versions': Version}
+
+
+@swagger.model()
+class Version(ModelBase):
+ def __init__(self, version=None, description=None):
+ self.version = version
+ self.description = description
# which accompanies this distribution, and is available at
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
+import handlers
+from opnfv_testapi.common import constants
from opnfv_testapi.tornado_swagger import swagger
-from handlers import GenericApiHandler
-from pod_models import Pod
-from opnfv_testapi.common.constants import HTTP_FORBIDDEN
+import pod_models
-class GenericPodHandler(GenericApiHandler):
+class GenericPodHandler(handlers.GenericApiHandler):
def __init__(self, application, request, **kwargs):
super(GenericPodHandler, self).__init__(application, request, **kwargs)
self.table = 'pods'
- self.table_cls = Pod
+ self.table_cls = pod_models.Pod
class PodCLHandler(GenericPodHandler):
def error(data):
message = '{} already exists as a pod'.format(data.name)
- return HTTP_FORBIDDEN, message
+ return constants.HTTP_FORBIDDEN, message
miss_checks = ['name']
db_checks = [(self.table, False, query, error)]
# which accompanies this distribution, and is available at
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
+import handlers
+from opnfv_testapi.common import constants
from opnfv_testapi.tornado_swagger import swagger
-from handlers import GenericApiHandler
-from opnfv_testapi.common.constants import HTTP_FORBIDDEN
-from project_models import Project
+import project_models
-class GenericProjectHandler(GenericApiHandler):
+class GenericProjectHandler(handlers.GenericApiHandler):
def __init__(self, application, request, **kwargs):
super(GenericProjectHandler, self).__init__(application,
request,
**kwargs)
self.table = 'projects'
- self.table_cls = Project
+ self.table_cls = project_models.Project
class ProjectCLHandler(GenericProjectHandler):
def error(data):
message = '{} already exists as a project'.format(data.name)
- return HTTP_FORBIDDEN, message
+ return constants.HTTP_FORBIDDEN, message
miss_checks = ['name']
db_checks = [(self.table, False, query, error)]
# which accompanies this distribution, and is available at
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
-from datetime import datetime, timedelta
+from datetime import datetime
+from datetime import timedelta
-from bson.objectid import ObjectId
-from tornado.web import HTTPError
+from bson import objectid
+from tornado import web
-from opnfv_testapi.common.constants import HTTP_BAD_REQUEST, HTTP_NOT_FOUND
-from opnfv_testapi.resources.handlers import GenericApiHandler
-from opnfv_testapi.resources.result_models import TestResult
+from opnfv_testapi.common import constants
+from opnfv_testapi.resources import handlers
+from opnfv_testapi.resources import result_models
from opnfv_testapi.tornado_swagger import swagger
-class GenericResultHandler(GenericApiHandler):
+class GenericResultHandler(handlers.GenericApiHandler):
def __init__(self, application, request, **kwargs):
super(GenericResultHandler, self).__init__(application,
request,
**kwargs)
self.table = self.db_results
- self.table_cls = TestResult
+ self.table_cls = result_models.TestResult
def get_int(self, key, value):
try:
value = int(value)
except:
- raise HTTPError(HTTP_BAD_REQUEST, '{} must be int'.format(key))
+ raise web.HTTPError(constants.HTTP_BAD_REQUEST,
+ '{} must be int'.format(key))
return value
def set_query(self):
def pod_error(data):
message = 'Could not find pod [{}]'.format(data.pod_name)
- return HTTP_NOT_FOUND, message
+ return constants.HTTP_NOT_FOUND, message
def project_query(data):
return {'name': data.project_name}
def project_error(data):
message = 'Could not find project [{}]'.format(data.project_name)
- return HTTP_NOT_FOUND, message
+ return constants.HTTP_NOT_FOUND, message
def testcase_query(data):
return {'project_name': data.project_name, 'name': data.case_name}
def testcase_error(data):
message = 'Could not find testcase [{}] in project [{}]'\
.format(data.case_name, data.project_name)
- return HTTP_NOT_FOUND, message
+ return constants.HTTP_NOT_FOUND, message
miss_checks = ['pod_name', 'project_name', 'case_name']
db_checks = [('pods', True, pod_query, pod_error),
@raise 404: test result not exist
"""
query = dict()
- query["_id"] = ObjectId(result_id)
+ query["_id"] = objectid.ObjectId(result_id)
self._get_one(query)
@swagger.operation(nickname="updateTestResultById")
@raise 404: result not exist
@raise 403: nothing to update
"""
- query = {'_id': ObjectId(result_id)}
+ query = {'_id': objectid.ObjectId(result_id)}
db_keys = []
self._update(query, db_keys)
-from opnfv_testapi.common.constants import HTTP_FORBIDDEN
-from opnfv_testapi.resources.handlers import GenericApiHandler
-from opnfv_testapi.resources.scenario_models import Scenario
+from opnfv_testapi.common import constants
+from opnfv_testapi.resources import handlers
import opnfv_testapi.resources.scenario_models as models
from opnfv_testapi.tornado_swagger import swagger
-class GenericScenarioHandler(GenericApiHandler):
+class GenericScenarioHandler(handlers.GenericApiHandler):
def __init__(self, application, request, **kwargs):
super(GenericScenarioHandler, self).__init__(application,
request,
**kwargs)
self.table = self.db_scenarios
- self.table_cls = Scenario
+ self.table_cls = models.Scenario
class ScenariosCLHandler(GenericScenarioHandler):
def error(data):
message = '{} already exists as a scenario'.format(data.name)
- return HTTP_FORBIDDEN, message
+ return constants.HTTP_FORBIDDEN, message
miss_checks = ['name']
db_checks = [(self.table, False, query, error)]
db_keys = ['name']
self._update(query, db_keys)
+ @swagger.operation(nickname="deleteScenarioByName")
+ def delete(self, name):
+ """
+ @description: delete a scenario by name
+ @return 200: delete success
+ @raise 404: scenario not exist:
+ """
+
+ query = {'name': name}
+ self._delete(query)
+
def _update_query(self, keys, data):
query = dict()
equal = True
return {'scores': ScenarioScore,
'trust_indicators': ScenarioTI}
+ def __eq__(self, other):
+ return [self.project == other.project and
+ self._customs_eq(other) and
+ self._scores_eq(other) and
+ self._ti_eq(other)]
+
+ def __ne__(self, other):
+ return not self.__eq__(other)
+
+ def _customs_eq(self, other):
+ return set(self.customs) == set(other.customs)
+
+ def _scores_eq(self, other):
+ return set(self.scores) == set(other.scores)
+
+ def _ti_eq(self, other):
+ return set(self.trust_indicators) == set(other.trust_indicators)
+
@swagger.model()
class ScenarioVersion(models.ModelBase):
def attr_parser():
return {'projects': ScenarioProject}
+ def __eq__(self, other):
+ return [self.version == other.version and self._projects_eq(other)]
+
+ def __ne__(self, other):
+ return not self.__eq__(other)
+
+ def _projects_eq(self, other):
+ for s_project in self.projects:
+ for o_project in other.projects:
+ if s_project.project == o_project.project:
+ if s_project != o_project:
+ return False
+
+ return True
+
@swagger.model()
class ScenarioInstaller(models.ModelBase):
def attr_parser():
return {'versions': ScenarioVersion}
+ def __eq__(self, other):
+ return [self.installer == other.installer and self._versions_eq(other)]
+
+ def __ne__(self, other):
+ return not self.__eq__(other)
+
+ def _versions_eq(self, other):
+ for s_version in self.versions:
+ for o_version in other.versions:
+ if s_version.version == o_version.version:
+ if s_version != o_version:
+ return False
+
+ return True
+
@swagger.model()
class ScenarioCreateRequest(models.ModelBase):
def attr_parser():
return {'installers': ScenarioInstaller}
+ def __ne__(self, other):
+ return not self.__eq__(other)
+
+ def __eq__(self, other):
+ return [self.name == other.name and self._installers_eq(other)]
+
+ def _installers_eq(self, other):
+ for s_install in self.installers:
+ for o_install in other.installers:
+ if s_install.installer == o_install.installer:
+ if s_install != o_install:
+ return False
+
+ return True
+
@swagger.model()
class Scenarios(models.ModelBase):
# which accompanies this distribution, and is available at
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
-from opnfv_testapi.common.constants import HTTP_FORBIDDEN
-from opnfv_testapi.resources.handlers import GenericApiHandler
-from opnfv_testapi.resources.testcase_models import Testcase
+from opnfv_testapi.common import constants
+from opnfv_testapi.resources import handlers
+from opnfv_testapi.resources import testcase_models
from opnfv_testapi.tornado_swagger import swagger
-class GenericTestcaseHandler(GenericApiHandler):
+class GenericTestcaseHandler(handlers.GenericApiHandler):
def __init__(self, application, request, **kwargs):
super(GenericTestcaseHandler, self).__init__(application,
request,
**kwargs)
self.table = self.db_testcases
- self.table_cls = Testcase
+ self.table_cls = testcase_models.Testcase
class TestcaseCLHandler(GenericTestcaseHandler):
def p_error(data):
message = 'Could not find project [{}]'.format(data.project_name)
- return HTTP_FORBIDDEN, message
+ return constants.HTTP_FORBIDDEN, message
def tc_error(data):
message = '{} already exists as a testcase in project {}'\
.format(data.name, data.project_name)
- return HTTP_FORBIDDEN, message
+ return constants.HTTP_FORBIDDEN, message
miss_checks = ['name']
db_checks = [(self.db_projects, True, p_query, p_error),
# which accompanies this distribution, and is available at
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
-from opnfv_testapi.resources.handlers import VersionHandler
-from opnfv_testapi.resources.testcase_handlers import TestcaseCLHandler, \
- TestcaseGURHandler
-from opnfv_testapi.resources.pod_handlers import PodCLHandler, PodGURHandler
-from opnfv_testapi.resources.project_handlers import ProjectCLHandler, \
- ProjectGURHandler
-from opnfv_testapi.resources.result_handlers import ResultsCLHandler, \
- ResultsGURHandler
-from opnfv_testapi.resources.scenario_handlers import ScenariosCLHandler
-from opnfv_testapi.resources.scenario_handlers import ScenarioGURHandler
+from opnfv_testapi.resources import handlers
+from opnfv_testapi.resources import pod_handlers
+from opnfv_testapi.resources import project_handlers
+from opnfv_testapi.resources import result_handlers
+from opnfv_testapi.resources import scenario_handlers
+from opnfv_testapi.resources import testcase_handlers
mappings = [
# GET /versions => GET API version
- (r"/versions", VersionHandler),
+ (r"/versions", handlers.VersionHandler),
# few examples:
# GET /api/v1/pods => Get all pods
# GET /api/v1/pods/1 => Get details on POD 1
- (r"/api/v1/pods", PodCLHandler),
- (r"/api/v1/pods/([^/]+)", PodGURHandler),
+ (r"/api/v1/pods", pod_handlers.PodCLHandler),
+ (r"/api/v1/pods/([^/]+)", pod_handlers.PodGURHandler),
# few examples:
# GET /projects
# GET /projects/yardstick
- (r"/api/v1/projects", ProjectCLHandler),
- (r"/api/v1/projects/([^/]+)", ProjectGURHandler),
+ (r"/api/v1/projects", project_handlers.ProjectCLHandler),
+ (r"/api/v1/projects/([^/]+)", project_handlers.ProjectGURHandler),
# few examples
# GET /projects/qtip/cases => Get cases for qtip
- (r"/api/v1/projects/([^/]+)/cases", TestcaseCLHandler),
- (r"/api/v1/projects/([^/]+)/cases/([^/]+)", TestcaseGURHandler),
+ (r"/api/v1/projects/([^/]+)/cases", testcase_handlers.TestcaseCLHandler),
+ (r"/api/v1/projects/([^/]+)/cases/([^/]+)",
+ testcase_handlers.TestcaseGURHandler),
# new path to avoid a long depth
# GET /results?project=functest&case=keystone.catalog&pod=1
# POST /results =>
# Push results with mandatory request payload parameters
# (project, case, and pod)
- (r"/api/v1/results", ResultsCLHandler),
- (r"/api/v1/results/([^/]+)", ResultsGURHandler),
+ (r"/api/v1/results", result_handlers.ResultsCLHandler),
+ (r"/api/v1/results/([^/]+)", result_handlers.ResultsGURHandler),
# scenarios
- (r"/api/v1/scenarios", ScenariosCLHandler),
- (r"/api/v1/scenarios/([^/]+)", ScenarioGURHandler),
+ (r"/api/v1/scenarios", scenario_handlers.ScenariosCLHandler),
+ (r"/api/v1/scenarios/([^/]+)", scenario_handlers.ScenarioGURHandler),
]
--- /dev/null
+# to add a new parameter in the config file,
+# the CONF object in config.ini must be updated
+[mongo]
+# URL of the mongo DB
+# Mongo auth url => mongodb://user1:pwd1@host1/?authSource=db1
+url = mongodb://127.0.0.1:27017/
+
+[api]
+# Listening port
+port = 8000
+# With debug_on set to true, error traces will be shown in HTTP responses
+debug = True
+authenticate = False
+
+[swagger]
+base_url = http://localhost:8000
--- /dev/null
+# to add a new parameter in the config file,
+# the CONF object in config.ini must be updated
+[mongo]
+# URL of the mongo DB
+# Mongo auth url => mongodb://user1:pwd1@host1/?authSource=db1
+url = mongodb://127.0.0.1:27017/
+dbname = test_results_collection
+
+[api]
+# Listening port
+port = 8000
+# With debug_on set to true, error traces will be shown in HTTP responses
+debug = True
+authenticate = False
+
+[swagger]
+base_url = http://localhost:8000
--- /dev/null
+# to add a new parameter in the config file,
+# the CONF object in config.ini must be updated
+[api]
+# Listening port
+port = 8000
+# With debug_on set to true, error traces will be shown in HTTP responses
+debug = True
+authenticate = False
+
+[swagger]
+base_url = http://localhost:8000
--- /dev/null
+# to add a new parameter in the config file,
+# the CONF object in config.ini must be updated
+[mongo]
+# URL of the mongo DB
+# Mongo auth url => mongodb://user1:pwd1@host1/?authSource=db1
+url = mongodb://127.0.0.1:27017/
+dbname = test_results_collection
+
+[api]
+# Listening port
+port = 8000
+# With debug_on set to true, error traces will be shown in HTTP responses
+debug = True
+authenticate = notboolean
+
+[swagger]
+base_url = http://localhost:8000
--- /dev/null
+# to add a new parameter in the config file,
+# the CONF object in config.ini must be updated
+[mongo]
+# URL of the mongo DB
+# Mongo auth url => mongodb://user1:pwd1@host1/?authSource=db1
+url = mongodb://127.0.0.1:27017/
+dbname = test_results_collection
+
+[api]
+# Listening port
+port = notint
+# With debug_on set to true, error traces will be shown in HTTP responses
+debug = True
+authenticate = False
+
+[swagger]
+base_url = http://localhost:8000
--- /dev/null
+import ConfigParser
+import os
+
+import pytest
+
+from opnfv_testapi.common import config
+
+
+@pytest.fixture()
+def config_dir():
+ return os.path.dirname(__file__)
+
+
+@pytest.mark.parametrize('exception, config_file, excepted', [
+ (config.ParseError, None, '/etc/opnfv_testapi/config.ini not found'),
+ (ConfigParser.NoSectionError, 'nosection.ini', 'No section:'),
+ (config.ParseError, 'noparam.ini', 'No parameter:'),
+ (config.ParseError, 'notint.ini', 'Not int:'),
+ (config.ParseError, 'notboolean.ini', 'Not boolean:')])
+def pytest_config_exceptions(config_dir, exception, config_file, excepted):
+ file = '{}/{}'.format(config_dir, config_file) if config_file else None
+ with pytest.raises(exception) as error:
+ config.APIConfig().parse(file)
+ assert excepted in str(error.value)
+
+
+def test_config_success():
+ config_dir = os.path.join(os.path.dirname(__file__),
+ '../../../../etc/config.ini')
+ conf = config.APIConfig().parse(config_dir)
+ assert conf.mongo_url == 'mongodb://127.0.0.1:27017/'
+ assert conf.mongo_dbname == 'test_results_collection'
+ assert conf.api_port == 8000
+ assert conf.api_debug_on is True
+ assert conf.api_authenticate_on is False
+ assert conf.swagger_base_url == 'http://localhost:8000'
testcases = MemDb('testcases')
results = MemDb('results')
scenarios = MemDb('scenarios')
+tokens = MemDb('tokens')
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
import json
+from os import path
-from tornado.web import Application
-from tornado.testing import AsyncHTTPTestCase
+import mock
+from tornado import testing
-from opnfv_testapi.router import url_mappings
-from opnfv_testapi.resources.models import CreateResponse
import fake_pymongo
+from opnfv_testapi.cmd import server
+from opnfv_testapi.resources import models
-class TestBase(AsyncHTTPTestCase):
+class TestBase(testing.AsyncHTTPTestCase):
headers = {'Content-Type': 'application/json; charset=UTF-8'}
def setUp(self):
+ self._patch_server()
self.basePath = ''
- self.create_res = CreateResponse
+ self.create_res = models.CreateResponse
self.get_res = None
self.list_res = None
self.update_res = None
self.addCleanup(self._clear)
super(TestBase, self).setUp()
+ def tearDown(self):
+ self.db_patcher.stop()
+
+ def _patch_server(self):
+ server.parse_config([
+ '--config-file',
+ path.join(path.dirname(__file__), 'common/normal.ini')
+ ])
+ self.db_patcher = mock.patch('opnfv_testapi.cmd.server.get_db',
+ self._fake_pymongo)
+ self.db_patcher.start()
+
+ @staticmethod
+ def _fake_pymongo():
+ return fake_pymongo
+
def get_app(self):
- return Application(
- url_mappings.mappings,
- db=fake_pymongo,
- debug=True,
- )
+ return server.make_app()
def create_d(self, *args):
return self.create(self.req_d, *args)
import unittest
from tornado import gen
-from tornado.testing import AsyncHTTPTestCase, gen_test
-from tornado.web import Application
+from tornado import testing
+from tornado import web
import fake_pymongo
-class MyTest(AsyncHTTPTestCase):
+class MyTest(testing.AsyncHTTPTestCase):
def setUp(self):
super(MyTest, self).setUp()
self.db = fake_pymongo
self.io_loop.run_sync(self.fixture_setup)
def get_app(self):
- return Application()
+ return web.Application()
@gen.coroutine
def fixture_setup(self):
yield self.db.pods.insert({'_id': '1', 'name': 'test1'})
yield self.db.pods.insert({'name': 'test2'})
- @gen_test
+ @testing.gen_test
def test_find_one(self):
user = yield self.db.pods.find_one({'name': 'test1'})
self.assertEqual(user, self.test1)
self.db.pods.remove()
- @gen_test
+ @testing.gen_test
def test_find(self):
cursor = self.db.pods.find()
names = []
names.append(ob.get('name'))
self.assertItemsEqual(names, ['test1', 'test2'])
- @gen_test
+ @testing.gen_test
def test_update(self):
yield self.db.pods.update({'_id': '1'}, {'name': 'new_test1'})
user = yield self.db.pods.find_one({'_id': '1'})
None,
check_keys=False)
- @gen_test
+ @testing.gen_test
def test_remove(self):
yield self.db.pods.remove({'_id': '1'})
user = yield self.db.pods.find_one({'_id': '1'})
def _insert_assert(self, docs, error=None, **kwargs):
self._db_assert('insert', error, docs, **kwargs)
- @gen_test
+ @testing.gen_test
def _db_assert(self, method, error, *args, **kwargs):
name_error = None
try:
##############################################################################
import unittest
-from test_base import TestBase
-from opnfv_testapi.resources.pod_models import PodCreateRequest, Pod, Pods
-from opnfv_testapi.common.constants import HTTP_OK, HTTP_BAD_REQUEST, \
- HTTP_FORBIDDEN, HTTP_NOT_FOUND
+from opnfv_testapi.common import constants
+from opnfv_testapi.resources import pod_models
+import test_base as base
-class TestPodBase(TestBase):
+class TestPodBase(base.TestBase):
def setUp(self):
super(TestPodBase, self).setUp()
- self.req_d = PodCreateRequest('zte-1', 'virtual',
- 'zte pod 1', 'ci-pod')
- self.req_e = PodCreateRequest('zte-2', 'metal', 'zte pod 2')
- self.get_res = Pod
- self.list_res = Pods
+ self.req_d = pod_models.PodCreateRequest('zte-1', 'virtual',
+ 'zte pod 1', 'ci-pod')
+ self.req_e = pod_models.PodCreateRequest('zte-2', 'metal', 'zte pod 2')
+ self.get_res = pod_models.Pod
+ self.list_res = pod_models.Pods
self.basePath = '/api/v1/pods'
def assert_get_body(self, pod, req=None):
class TestPodCreate(TestPodBase):
def test_withoutBody(self):
(code, body) = self.create()
- self.assertEqual(code, HTTP_BAD_REQUEST)
+ self.assertEqual(code, constants.HTTP_BAD_REQUEST)
def test_emptyName(self):
- req_empty = PodCreateRequest('')
+ req_empty = pod_models.PodCreateRequest('')
(code, body) = self.create(req_empty)
- self.assertEqual(code, HTTP_BAD_REQUEST)
+ self.assertEqual(code, constants.HTTP_BAD_REQUEST)
self.assertIn('name missing', body)
def test_noneName(self):
- req_none = PodCreateRequest(None)
+ req_none = pod_models.PodCreateRequest(None)
(code, body) = self.create(req_none)
- self.assertEqual(code, HTTP_BAD_REQUEST)
+ self.assertEqual(code, constants.HTTP_BAD_REQUEST)
self.assertIn('name missing', body)
def test_success(self):
code, body = self.create_d()
- self.assertEqual(code, HTTP_OK)
+ self.assertEqual(code, constants.HTTP_OK)
self.assert_create_body(body)
def test_alreadyExist(self):
self.create_d()
code, body = self.create_d()
- self.assertEqual(code, HTTP_FORBIDDEN)
+ self.assertEqual(code, constants.HTTP_FORBIDDEN)
self.assertIn('already exists', body)
class TestPodGet(TestPodBase):
def test_notExist(self):
code, body = self.get('notExist')
- self.assertEqual(code, HTTP_NOT_FOUND)
+ self.assertEqual(code, constants.HTTP_NOT_FOUND)
def test_getOne(self):
self.create_d()
##############################################################################
import unittest
-from test_base import TestBase
-from opnfv_testapi.resources.project_models import ProjectCreateRequest, \
- Project, Projects, ProjectUpdateRequest
-from opnfv_testapi.common.constants import HTTP_OK, HTTP_BAD_REQUEST, \
- HTTP_FORBIDDEN, HTTP_NOT_FOUND
+from opnfv_testapi.common import constants
+from opnfv_testapi.resources import project_models
+import test_base as base
-class TestProjectBase(TestBase):
+class TestProjectBase(base.TestBase):
def setUp(self):
super(TestProjectBase, self).setUp()
- self.req_d = ProjectCreateRequest('vping', 'vping-ssh test')
- self.req_e = ProjectCreateRequest('doctor', 'doctor test')
- self.get_res = Project
- self.list_res = Projects
- self.update_res = Project
+ self.req_d = project_models.ProjectCreateRequest('vping',
+ 'vping-ssh test')
+ self.req_e = project_models.ProjectCreateRequest('doctor',
+ 'doctor test')
+ self.get_res = project_models.Project
+ self.list_res = project_models.Projects
+ self.update_res = project_models.Project
self.basePath = '/api/v1/projects'
def assert_body(self, project, req=None):
class TestProjectCreate(TestProjectBase):
def test_withoutBody(self):
(code, body) = self.create()
- self.assertEqual(code, HTTP_BAD_REQUEST)
+ self.assertEqual(code, constants.HTTP_BAD_REQUEST)
def test_emptyName(self):
- req_empty = ProjectCreateRequest('')
+ req_empty = project_models.ProjectCreateRequest('')
(code, body) = self.create(req_empty)
- self.assertEqual(code, HTTP_BAD_REQUEST)
+ self.assertEqual(code, constants.HTTP_BAD_REQUEST)
self.assertIn('name missing', body)
def test_noneName(self):
- req_none = ProjectCreateRequest(None)
+ req_none = project_models.ProjectCreateRequest(None)
(code, body) = self.create(req_none)
- self.assertEqual(code, HTTP_BAD_REQUEST)
+ self.assertEqual(code, constants.HTTP_BAD_REQUEST)
self.assertIn('name missing', body)
def test_success(self):
(code, body) = self.create_d()
- self.assertEqual(code, HTTP_OK)
+ self.assertEqual(code, constants.HTTP_OK)
self.assert_create_body(body)
def test_alreadyExist(self):
self.create_d()
(code, body) = self.create_d()
- self.assertEqual(code, HTTP_FORBIDDEN)
+ self.assertEqual(code, constants.HTTP_FORBIDDEN)
self.assertIn('already exists', body)
class TestProjectGet(TestProjectBase):
def test_notExist(self):
code, body = self.get('notExist')
- self.assertEqual(code, HTTP_NOT_FOUND)
+ self.assertEqual(code, constants.HTTP_NOT_FOUND)
def test_getOne(self):
self.create_d()
code, body = self.get(self.req_d.name)
- self.assertEqual(code, HTTP_OK)
+ self.assertEqual(code, constants.HTTP_OK)
self.assert_body(body)
def test_list(self):
class TestProjectUpdate(TestProjectBase):
def test_withoutBody(self):
code, _ = self.update(None, 'noBody')
- self.assertEqual(code, HTTP_BAD_REQUEST)
+ self.assertEqual(code, constants.HTTP_BAD_REQUEST)
def test_notFound(self):
code, _ = self.update(self.req_e, 'notFound')
- self.assertEqual(code, HTTP_NOT_FOUND)
+ self.assertEqual(code, constants.HTTP_NOT_FOUND)
def test_newNameExist(self):
self.create_d()
self.create_e()
code, body = self.update(self.req_e, self.req_d.name)
- self.assertEqual(code, HTTP_FORBIDDEN)
+ self.assertEqual(code, constants.HTTP_FORBIDDEN)
self.assertIn("already exists", body)
def test_noUpdate(self):
self.create_d()
code, body = self.update(self.req_d, self.req_d.name)
- self.assertEqual(code, HTTP_FORBIDDEN)
+ self.assertEqual(code, constants.HTTP_FORBIDDEN)
self.assertIn("Nothing to update", body)
def test_success(self):
code, body = self.get(self.req_d.name)
_id = body._id
- req = ProjectUpdateRequest('newName', 'new description')
+ req = project_models.ProjectUpdateRequest('newName', 'new description')
code, body = self.update(req, self.req_d.name)
- self.assertEqual(code, HTTP_OK)
+ self.assertEqual(code, constants.HTTP_OK)
self.assertEqual(_id, body._id)
self.assert_body(body, req)
class TestProjectDelete(TestProjectBase):
def test_notFound(self):
code, body = self.delete('notFound')
- self.assertEqual(code, HTTP_NOT_FOUND)
+ self.assertEqual(code, constants.HTTP_NOT_FOUND)
def test_success(self):
self.create_d()
code, body = self.delete(self.req_d.name)
- self.assertEqual(code, HTTP_OK)
+ self.assertEqual(code, constants.HTTP_OK)
self.assertEqual(body, '')
code, body = self.get(self.req_d.name)
- self.assertEqual(code, HTTP_NOT_FOUND)
+ self.assertEqual(code, constants.HTTP_NOT_FOUND)
if __name__ == '__main__':
unittest.main()
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
import copy
-import unittest
from datetime import datetime, timedelta
+import unittest
-from opnfv_testapi.common.constants import HTTP_OK, HTTP_BAD_REQUEST, \
- HTTP_NOT_FOUND
-from opnfv_testapi.resources.pod_models import PodCreateRequest
-from opnfv_testapi.resources.project_models import ProjectCreateRequest
-from opnfv_testapi.resources.result_models import ResultCreateRequest, \
- TestResult, TestResults, ResultUpdateRequest, TI, TIHistory
-from opnfv_testapi.resources.testcase_models import TestcaseCreateRequest
-from test_base import TestBase
+from opnfv_testapi.common import constants
+from opnfv_testapi.resources import pod_models
+from opnfv_testapi.resources import project_models
+from opnfv_testapi.resources import result_models
+from opnfv_testapi.resources import testcase_models
+import test_base as base
class Details(object):
return t
-class TestResultBase(TestBase):
+class TestResultBase(base.TestBase):
def setUp(self):
self.pod = 'zte-pod1'
self.project = 'functest'
self.build_tag = 'v3.0'
self.scenario = 'odl-l2'
self.criteria = 'passed'
- self.trust_indicator = TI(0.7)
+ self.trust_indicator = result_models.TI(0.7)
self.start_date = "2016-05-23 07:16:09.477097"
self.stop_date = "2016-05-23 07:16:19.477097"
self.update_date = "2016-05-24 07:16:19.477097"
self.update_step = -0.05
super(TestResultBase, self).setUp()
self.details = Details(timestart='0', duration='9s', status='OK')
- self.req_d = ResultCreateRequest(pod_name=self.pod,
- project_name=self.project,
- case_name=self.case,
- installer=self.installer,
- version=self.version,
- start_date=self.start_date,
- stop_date=self.stop_date,
- details=self.details.format(),
- build_tag=self.build_tag,
- scenario=self.scenario,
- criteria=self.criteria,
- trust_indicator=self.trust_indicator)
- self.get_res = TestResult
- self.list_res = TestResults
- self.update_res = TestResult
+ self.req_d = result_models.ResultCreateRequest(
+ pod_name=self.pod,
+ project_name=self.project,
+ case_name=self.case,
+ installer=self.installer,
+ version=self.version,
+ start_date=self.start_date,
+ stop_date=self.stop_date,
+ details=self.details.format(),
+ build_tag=self.build_tag,
+ scenario=self.scenario,
+ criteria=self.criteria,
+ trust_indicator=self.trust_indicator)
+ self.get_res = result_models.TestResult
+ self.list_res = result_models.TestResults
+ self.update_res = result_models.TestResult
self.basePath = '/api/v1/results'
- self.req_pod = PodCreateRequest(self.pod, 'metal', 'zte pod 1')
- self.req_project = ProjectCreateRequest(self.project, 'vping test')
- self.req_testcase = TestcaseCreateRequest(self.case,
- '/cases/vping',
- 'vping-ssh test')
+ self.req_pod = pod_models.PodCreateRequest(
+ self.pod,
+ 'metal',
+ 'zte pod 1')
+ self.req_project = project_models.ProjectCreateRequest(
+ self.project,
+ 'vping test')
+ self.req_testcase = testcase_models.TestcaseCreateRequest(
+ self.case,
+ '/cases/vping',
+ 'vping-ssh test')
self.create_help('/api/v1/pods', self.req_pod)
self.create_help('/api/v1/projects', self.req_project)
self.create_help('/api/v1/projects/%s/cases',
self.project)
def assert_res(self, code, result, req=None):
- self.assertEqual(code, HTTP_OK)
+ self.assertEqual(code, constants.HTTP_OK)
if req is None:
req = self.req_d
self.assertEqual(result.pod_name, req.pod_name)
class TestResultCreate(TestResultBase):
def test_nobody(self):
(code, body) = self.create(None)
- self.assertEqual(code, HTTP_BAD_REQUEST)
+ self.assertEqual(code, constants.HTTP_BAD_REQUEST)
self.assertIn('no body', body)
def test_podNotProvided(self):
req = self.req_d
req.pod_name = None
(code, body) = self.create(req)
- self.assertEqual(code, HTTP_BAD_REQUEST)
+ self.assertEqual(code, constants.HTTP_BAD_REQUEST)
self.assertIn('pod_name missing', body)
def test_projectNotProvided(self):
req = self.req_d
req.project_name = None
(code, body) = self.create(req)
- self.assertEqual(code, HTTP_BAD_REQUEST)
+ self.assertEqual(code, constants.HTTP_BAD_REQUEST)
self.assertIn('project_name missing', body)
def test_testcaseNotProvided(self):
req = self.req_d
req.case_name = None
(code, body) = self.create(req)
- self.assertEqual(code, HTTP_BAD_REQUEST)
+ self.assertEqual(code, constants.HTTP_BAD_REQUEST)
self.assertIn('case_name missing', body)
def test_noPod(self):
req = self.req_d
req.pod_name = 'notExistPod'
(code, body) = self.create(req)
- self.assertEqual(code, HTTP_NOT_FOUND)
+ self.assertEqual(code, constants.HTTP_NOT_FOUND)
self.assertIn('Could not find pod', body)
def test_noProject(self):
req = self.req_d
req.project_name = 'notExistProject'
(code, body) = self.create(req)
- self.assertEqual(code, HTTP_NOT_FOUND)
+ self.assertEqual(code, constants.HTTP_NOT_FOUND)
self.assertIn('Could not find project', body)
def test_noTestcase(self):
req = self.req_d
req.case_name = 'notExistTestcase'
(code, body) = self.create(req)
- self.assertEqual(code, HTTP_NOT_FOUND)
+ self.assertEqual(code, constants.HTTP_NOT_FOUND)
self.assertIn('Could not find testcase', body)
def test_success(self):
(code, body) = self.create_d()
- self.assertEqual(code, HTTP_OK)
+ self.assertEqual(code, constants.HTTP_OK)
self.assert_href(body)
def test_key_with_doc(self):
req = copy.deepcopy(self.req_d)
req.details = {'1.name': 'dot_name'}
(code, body) = self.create(req)
- self.assertEqual(code, HTTP_OK)
+ self.assertEqual(code, constants.HTTP_OK)
self.assert_href(body)
def test_no_ti(self):
- req = ResultCreateRequest(pod_name=self.pod,
- project_name=self.project,
- case_name=self.case,
- installer=self.installer,
- version=self.version,
- start_date=self.start_date,
- stop_date=self.stop_date,
- details=self.details.format(),
- build_tag=self.build_tag,
- scenario=self.scenario,
- criteria=self.criteria)
+ req = result_models.ResultCreateRequest(pod_name=self.pod,
+ project_name=self.project,
+ case_name=self.case,
+ installer=self.installer,
+ version=self.version,
+ start_date=self.start_date,
+ stop_date=self.stop_date,
+ details=self.details.format(),
+ build_tag=self.build_tag,
+ scenario=self.scenario,
+ criteria=self.criteria)
(code, res) = self.create(req)
_id = res.href.split('/')[-1]
- self.assertEqual(code, HTTP_OK)
+ self.assertEqual(code, constants.HTTP_OK)
code, body = self.get(_id)
self.assert_res(code, body, req)
def test_queryPeriodNotInt(self):
code, body = self.query(self._set_query('period=a'))
- self.assertEqual(code, HTTP_BAD_REQUEST)
+ self.assertEqual(code, constants.HTTP_BAD_REQUEST)
self.assertIn('period must be int', body)
def test_queryPeriodFail(self):
def test_queryLastNotInt(self):
code, body = self.query(self._set_query('last=a'))
- self.assertEqual(code, HTTP_BAD_REQUEST)
+ self.assertEqual(code, constants.HTTP_BAD_REQUEST)
self.assertIn('last must be int', body)
def test_queryLast(self):
req = self._create_changed_date(**kwargs)
code, body = self.query(query)
if not found:
- self.assertEqual(code, HTTP_OK)
+ self.assertEqual(code, constants.HTTP_OK)
self.assertEqual(0, len(body.results))
else:
self.assertEqual(1, len(body.results))
new_ti = copy.deepcopy(self.trust_indicator)
new_ti.current += self.update_step
- new_ti.histories.append(TIHistory(self.update_date, self.update_step))
+ new_ti.histories.append(
+ result_models.TIHistory(self.update_date, self.update_step))
new_data = copy.deepcopy(self.req_d)
new_data.trust_indicator = new_ti
- update = ResultUpdateRequest(trust_indicator=new_ti)
+ update = result_models.ResultUpdateRequest(trust_indicator=new_ti)
code, body = self.update(update, _id)
self.assertEqual(_id, body._id)
self.assert_res(code, body, new_data)
from copy import deepcopy
+from datetime import datetime
import json
import os
-from datetime import datetime
-from opnfv_testapi.common.constants import HTTP_BAD_REQUEST
-from opnfv_testapi.common.constants import HTTP_FORBIDDEN
-from opnfv_testapi.common.constants import HTTP_OK
+from opnfv_testapi.common import constants
import opnfv_testapi.resources.scenario_models as models
-from test_testcase import TestBase
+import test_base as base
-class TestScenarioBase(TestBase):
+class TestScenarioBase(base.TestBase):
def setUp(self):
super(TestScenarioBase, self).setUp()
self.get_res = models.Scenario
return res.href.split('/')[-1]
def assert_res(self, code, scenario, req=None):
- self.assertEqual(code, HTTP_OK)
+ self.assertEqual(code, constants.HTTP_OK)
if req is None:
req = self.req_d
- scenario_dict = scenario.format_http()
- self.assertIsNotNone(scenario_dict['_id'])
- self.assertIsNotNone(scenario_dict['creation_date'])
- self.assertDictContainsSubset(req, scenario_dict)
+ self.assertIsNotNone(scenario._id)
+ self.assertIsNotNone(scenario.creation_date)
+
+ scenario == models.Scenario.from_dict(req)
@staticmethod
def _set_query(*args):
class TestScenarioCreate(TestScenarioBase):
def test_withoutBody(self):
(code, body) = self.create()
- self.assertEqual(code, HTTP_BAD_REQUEST)
+ self.assertEqual(code, constants.HTTP_BAD_REQUEST)
def test_emptyName(self):
req_empty = models.ScenarioCreateRequest('')
(code, body) = self.create(req_empty)
- self.assertEqual(code, HTTP_BAD_REQUEST)
+ self.assertEqual(code, constants.HTTP_BAD_REQUEST)
self.assertIn('name missing', body)
def test_noneName(self):
req_none = models.ScenarioCreateRequest(None)
(code, body) = self.create(req_none)
- self.assertEqual(code, HTTP_BAD_REQUEST)
+ self.assertEqual(code, constants.HTTP_BAD_REQUEST)
self.assertIn('name missing', body)
def test_success(self):
(code, body) = self.create_d()
- self.assertEqual(code, HTTP_OK)
+ self.assertEqual(code, constants.HTTP_OK)
self.assert_create_body(body)
def test_alreadyExist(self):
self.create_d()
(code, body) = self.create_d()
- self.assertEqual(code, HTTP_FORBIDDEN)
+ self.assertEqual(code, constants.HTTP_FORBIDDEN)
self.assertIn('already exists', body)
def _query_and_assert(self, query, found=True, reqs=None):
code, body = self.query(query)
if not found:
- self.assertEqual(code, HTTP_OK)
+ self.assertEqual(code, constants.HTTP_OK)
self.assertEqual(0, len(body.scenarios))
else:
self.assertEqual(len(reqs), len(body.scenarios))
def _update_and_assert(self, update_req, new_scenario, name=None):
code, _ = self.update(update_req, self.scenario)
- self.assertEqual(code, HTTP_OK)
+ self.assertEqual(code, constants.HTTP_OK)
self._get_and_assert(self._none_default(name, self.scenario),
new_scenario)
@staticmethod
def _none_default(check, default):
return check if check else default
+
+
+class TestScenarioDelete(TestScenarioBase):
+ def test_notFound(self):
+ code, body = self.delete('notFound')
+ self.assertEqual(code, constants.HTTP_NOT_FOUND)
+
+ def test_success(self):
+ scenario = self.create_return_name(self.req_d)
+ code, _ = self.delete(scenario)
+ self.assertEqual(code, constants.HTTP_OK)
+ code, _ = self.get(scenario)
+ self.assertEqual(code, constants.HTTP_NOT_FOUND)
# which accompanies this distribution, and is available at
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
-import unittest
import copy
+import unittest
-from test_base import TestBase
-from opnfv_testapi.resources.testcase_models import TestcaseCreateRequest, \
- Testcase, Testcases, TestcaseUpdateRequest
-from opnfv_testapi.resources.project_models import ProjectCreateRequest
-from opnfv_testapi.common.constants import HTTP_OK, HTTP_BAD_REQUEST, \
- HTTP_FORBIDDEN, HTTP_NOT_FOUND
+from opnfv_testapi.common import constants
+from opnfv_testapi.resources import project_models
+from opnfv_testapi.resources import testcase_models
+import test_base as base
-class TestCaseBase(TestBase):
+class TestCaseBase(base.TestBase):
def setUp(self):
super(TestCaseBase, self).setUp()
- self.req_d = TestcaseCreateRequest('vping_1',
- '/cases/vping_1',
- 'vping-ssh test')
- self.req_e = TestcaseCreateRequest('doctor_1',
- '/cases/doctor_1',
- 'create doctor')
- self.update_d = TestcaseUpdateRequest('vping_1',
- 'vping-ssh test',
- 'functest')
- self.update_e = TestcaseUpdateRequest('doctor_1',
- 'create doctor',
- 'functest')
- self.get_res = Testcase
- self.list_res = Testcases
- self.update_res = Testcase
+ self.req_d = testcase_models.TestcaseCreateRequest('vping_1',
+ '/cases/vping_1',
+ 'vping-ssh test')
+ self.req_e = testcase_models.TestcaseCreateRequest('doctor_1',
+ '/cases/doctor_1',
+ 'create doctor')
+ self.update_d = testcase_models.TestcaseUpdateRequest('vping_1',
+ 'vping-ssh test',
+ 'functest')
+ self.update_e = testcase_models.TestcaseUpdateRequest('doctor_1',
+ 'create doctor',
+ 'functest')
+ self.get_res = testcase_models.Testcase
+ self.list_res = testcase_models.Testcases
+ self.update_res = testcase_models.Testcase
self.basePath = '/api/v1/projects/%s/cases'
self.create_project()
self.assertIsNotNone(new.creation_date)
def create_project(self):
- req_p = ProjectCreateRequest('functest', 'vping-ssh test')
+ req_p = project_models.ProjectCreateRequest('functest',
+ 'vping-ssh test')
self.create_help('/api/v1/projects', req_p)
self.project = req_p.name
class TestCaseCreate(TestCaseBase):
def test_noBody(self):
(code, body) = self.create(None, 'vping')
- self.assertEqual(code, HTTP_BAD_REQUEST)
+ self.assertEqual(code, constants.HTTP_BAD_REQUEST)
def test_noProject(self):
code, body = self.create(self.req_d, 'noProject')
- self.assertEqual(code, HTTP_FORBIDDEN)
+ self.assertEqual(code, constants.HTTP_FORBIDDEN)
self.assertIn('Could not find project', body)
def test_emptyName(self):
- req_empty = TestcaseCreateRequest('')
+ req_empty = testcase_models.TestcaseCreateRequest('')
(code, body) = self.create(req_empty, self.project)
- self.assertEqual(code, HTTP_BAD_REQUEST)
+ self.assertEqual(code, constants.HTTP_BAD_REQUEST)
self.assertIn('name missing', body)
def test_noneName(self):
- req_none = TestcaseCreateRequest(None)
+ req_none = testcase_models.TestcaseCreateRequest(None)
(code, body) = self.create(req_none, self.project)
- self.assertEqual(code, HTTP_BAD_REQUEST)
+ self.assertEqual(code, constants.HTTP_BAD_REQUEST)
self.assertIn('name missing', body)
def test_success(self):
code, body = self.create_d()
- self.assertEqual(code, HTTP_OK)
+ self.assertEqual(code, constants.HTTP_OK)
self.assert_create_body(body, None, self.project)
def test_alreadyExist(self):
self.create_d()
code, body = self.create_d()
- self.assertEqual(code, HTTP_FORBIDDEN)
+ self.assertEqual(code, constants.HTTP_FORBIDDEN)
self.assertIn('already exists', body)
class TestCaseGet(TestCaseBase):
def test_notExist(self):
code, body = self.get('notExist')
- self.assertEqual(code, HTTP_NOT_FOUND)
+ self.assertEqual(code, constants.HTTP_NOT_FOUND)
def test_getOne(self):
self.create_d()
code, body = self.get(self.req_d.name)
- self.assertEqual(code, HTTP_OK)
+ self.assertEqual(code, constants.HTTP_OK)
self.assert_body(body)
def test_list(self):
class TestCaseUpdate(TestCaseBase):
def test_noBody(self):
code, _ = self.update(case='noBody')
- self.assertEqual(code, HTTP_BAD_REQUEST)
+ self.assertEqual(code, constants.HTTP_BAD_REQUEST)
def test_notFound(self):
code, _ = self.update(self.update_e, 'notFound')
- self.assertEqual(code, HTTP_NOT_FOUND)
+ self.assertEqual(code, constants.HTTP_NOT_FOUND)
def test_newNameExist(self):
self.create_d()
self.create_e()
code, body = self.update(self.update_e, self.req_d.name)
- self.assertEqual(code, HTTP_FORBIDDEN)
+ self.assertEqual(code, constants.HTTP_FORBIDDEN)
self.assertIn("already exists", body)
def test_noUpdate(self):
self.create_d()
code, body = self.update(self.update_d, self.req_d.name)
- self.assertEqual(code, HTTP_FORBIDDEN)
+ self.assertEqual(code, constants.HTTP_FORBIDDEN)
self.assertIn("Nothing to update", body)
def test_success(self):
_id = body._id
code, body = self.update(self.update_e, self.req_d.name)
- self.assertEqual(code, HTTP_OK)
+ self.assertEqual(code, constants.HTTP_OK)
self.assertEqual(_id, body._id)
self.assert_update_body(self.req_d, body, self.update_e)
update = copy.deepcopy(self.update_d)
update.description = {'2. change': 'dollar change'}
code, body = self.update(update, self.req_d.name)
- self.assertEqual(code, HTTP_OK)
+ self.assertEqual(code, constants.HTTP_OK)
class TestCaseDelete(TestCaseBase):
def test_notFound(self):
code, body = self.delete('notFound')
- self.assertEqual(code, HTTP_NOT_FOUND)
+ self.assertEqual(code, constants.HTTP_NOT_FOUND)
def test_success(self):
self.create_d()
code, body = self.delete(self.req_d.name)
- self.assertEqual(code, HTTP_OK)
+ self.assertEqual(code, constants.HTTP_OK)
self.assertEqual(body, '')
code, body = self.get(self.req_d.name)
- self.assertEqual(code, HTTP_NOT_FOUND)
+ self.assertEqual(code, constants.HTTP_NOT_FOUND)
if __name__ == '__main__':
--- /dev/null
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+
+import unittest
+
+from tornado import web
+
+import fake_pymongo
+from opnfv_testapi.common import constants
+from opnfv_testapi.resources import project_models
+from opnfv_testapi.router import url_mappings
+import test_base as base
+
+
+class TestToken(base.TestBase):
+ def get_app(self):
+ return web.Application(
+ url_mappings.mappings,
+ db=fake_pymongo,
+ debug=True,
+ auth=True
+ )
+
+
+class TestTokenCreateProject(TestToken):
+ def setUp(self):
+ super(TestTokenCreateProject, self).setUp()
+ self.req_d = project_models.ProjectCreateRequest('vping')
+ fake_pymongo.tokens.insert({"access_token": "12345"})
+ self.basePath = '/api/v1/projects'
+
+ def test_projectCreateTokenInvalid(self):
+ self.headers['X-Auth-Token'] = '1234'
+ code, body = self.create_d()
+ self.assertEqual(code, constants.HTTP_FORBIDDEN)
+ self.assertIn('Invalid Token.', body)
+
+ def test_projectCreateTokenUnauthorized(self):
+ self.headers.pop('X-Auth-Token')
+ code, body = self.create_d()
+ self.assertEqual(code, constants.HTTP_UNAUTHORIZED)
+ self.assertIn('No Authentication Header.', body)
+
+ def test_projectCreateTokenSuccess(self):
+ self.headers['X-Auth-Token'] = '12345'
+ code, body = self.create_d()
+ self.assertEqual(code, constants.HTTP_OK)
+
+
+class TestTokenDeleteProject(TestToken):
+ def setUp(self):
+ super(TestTokenDeleteProject, self).setUp()
+ self.req_d = project_models.ProjectCreateRequest('vping')
+ fake_pymongo.tokens.insert({"access_token": "12345"})
+ self.basePath = '/api/v1/projects'
+
+ def test_projectDeleteTokenIvalid(self):
+ self.headers['X-Auth-Token'] = '12345'
+ self.create_d()
+ self.headers['X-Auth-Token'] = '1234'
+ code, body = self.delete(self.req_d.name)
+ self.assertEqual(code, constants.HTTP_FORBIDDEN)
+ self.assertIn('Invalid Token.', body)
+
+ def test_projectDeleteTokenUnauthorized(self):
+ self.headers['X-Auth-Token'] = '12345'
+ self.create_d()
+ self.headers.pop('X-Auth-Token')
+ code, body = self.delete(self.req_d.name)
+ self.assertEqual(code, constants.HTTP_UNAUTHORIZED)
+ self.assertIn('No Authentication Header.', body)
+
+ def test_projectDeleteTokenSuccess(self):
+ self.headers['X-Auth-Token'] = '12345'
+ self.create_d()
+ code, body = self.delete(self.req_d.name)
+ self.assertEqual(code, constants.HTTP_OK)
+
+
+class TestTokenUpdateProject(TestToken):
+ def setUp(self):
+ super(TestTokenUpdateProject, self).setUp()
+ self.req_d = project_models.ProjectCreateRequest('vping')
+ fake_pymongo.tokens.insert({"access_token": "12345"})
+ self.basePath = '/api/v1/projects'
+
+ def test_projectUpdateTokenIvalid(self):
+ self.headers['X-Auth-Token'] = '12345'
+ self.create_d()
+ code, body = self.get(self.req_d.name)
+ self.headers['X-Auth-Token'] = '1234'
+ req = project_models.ProjectUpdateRequest('newName', 'new description')
+ code, body = self.update(req, self.req_d.name)
+ self.assertEqual(code, constants.HTTP_FORBIDDEN)
+ self.assertIn('Invalid Token.', body)
+
+ def test_projectUpdateTokenUnauthorized(self):
+ self.headers['X-Auth-Token'] = '12345'
+ self.create_d()
+ code, body = self.get(self.req_d.name)
+ self.headers.pop('X-Auth-Token')
+ req = project_models.ProjectUpdateRequest('newName', 'new description')
+ code, body = self.update(req, self.req_d.name)
+ self.assertEqual(code, constants.HTTP_UNAUTHORIZED)
+ self.assertIn('No Authentication Header.', body)
+
+ def test_projectUpdateTokenSuccess(self):
+ self.headers['X-Auth-Token'] = '12345'
+ self.create_d()
+ code, body = self.get(self.req_d.name)
+ req = project_models.ProjectUpdateRequest('newName', 'new description')
+ code, body = self.update(req, self.req_d.name)
+ self.assertEqual(code, constants.HTTP_OK)
+
+if __name__ == '__main__':
+ unittest.main()
##############################################################################
import unittest
-from test_base import TestBase
-from opnfv_testapi.resources.models import Versions
+from opnfv_testapi.resources import models
+import test_base as base
-class TestVersionBase(TestBase):
+class TestVersionBase(base.TestBase):
def setUp(self):
super(TestVersionBase, self).setUp()
- self.list_res = Versions
+ self.list_res = models.Versions
self.basePath = '/versions'
pip install -r $SCRIPTDIR/requirements.txt
pip install coverage
pip install nose>=1.3.1
+pip install pytest
+pip install mock
find . -type f -name "*.pyc" -delete
--- /dev/null
+# The order of packages is significant, because pip processes them in the order
+# of appearance. Changing the order has an impact on the overall integration
+# process, which may cause wedges in the gate later.
+
+tox
+mock
+pytest
+pytest-cov
+coverage
+pykwalify
+pip_check_reqs
--- /dev/null
+# Tox (http://tox.testrun.org/) is a tool for running tests
+# in multiple virtualenvs. This configuration file will run the
+# test suite on all supported python versions. To use it, "pip install tox"
+# and then run "tox" from this directory.
+
+[tox]
+envlist = py27,pep8
+skipsdist = True
+sitepackages = True
+
+[testenv]
+usedevelop = True
+install_command = pip install -U {opts} {packages}
+deps =
+ -rrequirements.txt
+ -rtest-requirements.txt
+commands=
+ py.test \
+ --basetemp={envtmpdir} \
+ --cov \
+ {posargs}
+setenv=
+ HOME = {envtmpdir}
+ PYTHONPATH = {toxinidir}
+
+[testenv:pep8]
+deps = flake8
+commands = flake8 {toxinidir}
+
+[flake8]
+# H803 skipped on purpose per list discussion.
+# E123, E125 skipped as they are invalid PEP-8.
+
+show-source = True
+ignore = E123,E125,H803,E501
+builtins = _
+exclude = build,dist,doc,legacy,.eggs,.git,.tox,.venv,testapi_venv,venv
+
+[pytest]
+testpaths = opnfv_testapi/tests
+python_functions = test_*
---
- hosts: "{{ host }}"
remote_user: "{{ user }}"
- become: yes
+ become: "yes"
become_method: sudo
vars:
user: "root"
---
- hosts: "{{ host }}"
remote_user: "{{ user }}"
- become: yes
+ become: "yes"
become_method: sudo
vars:
user: "root"
- name: remove temporary update directory
file:
path: "{{ update_path }}"
- state: absent
\ No newline at end of file
+ state: absent
if (Schema[tableName][key].type === 'text' && Schema[tableName][key].hasOwnProperty('fieldtype')) {
column = table[Schema[tableName][key].type](key, Schema[tableName][key].fieldtype);
}
+ else if (Schema[tableName][key].type === 'enum' && Schema[tableName][key].hasOwnProperty('values') && Schema[tableName][key].nullable === true) {
+ console.log(Schema[tableName][key].values);
+ column = table[Schema[tableName][key].type](key, Schema[tableName][key].values).nullable();
+ }
+ else if (Schema[tableName][key].type === 'enum' && Schema[tableName][key].hasOwnProperty('values')) {
+ console.log(Schema[tableName][key].values);
+ column = table[Schema[tableName][key].type](key, Schema[tableName][key].values).notNullable();
+ }
else if (Schema[tableName][key].type === 'string' && Schema[tableName][key].hasOwnProperty('maxlength')) {
column = table[Schema[tableName][key].type](key, Schema[tableName][key].maxlength);
}
lines_of_code: {type: 'integer', nullable: true, unsigned: true},
versions: {type: 'integer', nullable: true, unsigned: true},
no_of_developers: {type: 'integer', nullable: true, unsigned: true},
+ no_of_stars: {type: 'integer', nullable: true, unsigned: true},
+ license: {type: 'enum', nullable: false, values: ['MIT', 'GPL', 'GPL_V2', 'BSD', 'APACHE']},
+ opnfv_indicator: {type: 'enum', nullable: false, values: ['gold', 'silver', 'platinum']},
+ complexity: {type: 'enum', nullable: true, values: ['low', 'medium', 'high']},
+ activity: {type: 'enum', nullable: true, values: ['low', 'medium', 'high']},
+ last_updated: {type: 'dateTime', nullable: true},
},
tag: {
tag_id: {type: 'increments', nullable: false, primary: true},
- name: {type: 'string', maxlength: 150, nullable: false}
+ tag_name: {type: 'string', maxlength: 150, nullable: false}
},
vnf_tags: {
vnf_tag_id: {type: 'increments', nullable: false, primary: true},