From: Morgan Richomme Date: Fri, 17 Mar 2017 08:13:10 +0000 (+0000) Subject: Merge "Add vnf, tag create & Associate Plugins" X-Git-Tag: danube.1.0~133 X-Git-Url: https://gerrit.opnfv.org/gerrit/gitweb?a=commitdiff_plain;h=5a681846dd3c0ef0dd290384bbbcfe678683d923;hp=0ddfff32a990dda5a487229641aa189161f3d25a;p=releng.git Merge "Add vnf, tag create & Associate Plugins" --- diff --git a/.gitignore b/.gitignore index 91ccabc4b..0aa7b8c09 100644 --- a/.gitignore +++ b/.gitignore @@ -27,3 +27,11 @@ wheels/ venv/ ENV/ node_modules/ +.coverage +=1.3.1 +cover/ +coverage.xml +nosetests.xml +testapi_venv/ +.cache +.tox diff --git a/docs/jenkins-job-builder/opnfv-jjb-usage.rst b/docs/jenkins-job-builder/opnfv-jjb-usage.rst index fc968f841..52dbdebe5 100644 --- a/docs/jenkins-job-builder/opnfv-jjb-usage.rst +++ b/docs/jenkins-job-builder/opnfv-jjb-usage.rst @@ -70,6 +70,7 @@ reviewed and submitted. * jose.lausuch@ericsson.com * koffirodrigue@gmail.com * r-mibu@cq.jp.nec.com +* tbramwell@linuxfoundation.org Or Add the group releng-contributors @@ -81,4 +82,4 @@ in `releng-jobs.yaml`_. .. _releng-jobs.yaml: https://gerrit.opnfv.org/gerrit/gitweb?p=releng.git;a=blob;f=jjb/releng-jobs.yaml; .. _skip vote: - https://wiki.jenkins-ci.org/display/JENKINS/Gerrit+Trigger#GerritTrigger-SkipVote \ No newline at end of file + https://wiki.jenkins-ci.org/display/JENKINS/Gerrit+Trigger#GerritTrigger-SkipVote diff --git a/jjb/apex/apex-deploy.sh b/jjb/apex/apex-deploy.sh index dc70488e7..c91e3ee82 100755 --- a/jjb/apex/apex-deploy.sh +++ b/jjb/apex/apex-deploy.sh @@ -3,7 +3,7 @@ set -o errexit set -o nounset set -o pipefail -APEX_PKGS="common undercloud onos" +APEX_PKGS="common undercloud" # removed onos for danube IPV6_FLAG=False # log info to console @@ -15,7 +15,7 @@ if ! rpm -q wget > /dev/null; then sudo yum -y install wget fi -if [[ "$BUILD_DIRECTORY" == *verify* || "$BUILD_DIRECTORY" == *csit* ]]; then +if [[ "$BUILD_DIRECTORY" == *verify* || "$BUILD_DIRECTORY" == *promote* ]]; then # Build is from a verify, use local build artifacts (not RPMs) cd $WORKSPACE/../${BUILD_DIRECTORY} WORKSPACE=$(pwd) @@ -79,8 +79,8 @@ elif [[ "$DEPLOY_SCENARIO" == *gate* ]]; then fi fi -# use local build for verify and csit promote -if [[ "$BUILD_DIRECTORY" == *verify* || "$BUILD_DIRECTORY" == *csit* ]]; then +# use local build for verify and promote +if [[ "$BUILD_DIRECTORY" == *verify* || "$BUILD_DIRECTORY" == *promote* ]]; then if [ ! -e "${WORKSPACE}/build/lib" ]; then ln -s ${WORKSPACE}/lib ${WORKSPACE}/build/lib fi @@ -159,7 +159,7 @@ if [ "$OPNFV_CLEAN" == 'yes' ]; then else clean_opts='' fi - if [[ "$BUILD_DIRECTORY" == *verify* || "$BUILD_DIRECTORY" == *csit* ]]; then + if [[ "$BUILD_DIRECTORY" == *verify* || "$BUILD_DIRECTORY" == *promote* ]]; then sudo CONFIG=${CONFIG} LIB=${LIB} ./clean.sh ${clean_opts} else sudo CONFIG=${CONFIG} LIB=${LIB} opnfv-clean ${clean_opts} @@ -181,26 +181,19 @@ fi if [[ "$JOB_NAME" == *virtual* ]]; then # settings for virtual deployment - if [ "$IPV6_FLAG" == "True" ]; then - NETWORK_FILE="${NETWORK_SETTINGS_DIR}/network_settings_v6.yaml" - elif echo ${DEPLOY_SCENARIO} | grep fdio; then - NETWORK_FILE="${NETWORK_SETTINGS_DIR}/network_settings_vpp.yaml" - else - NETWORK_FILE="${NETWORK_SETTINGS_DIR}/network_settings.yaml" - fi DEPLOY_CMD="${DEPLOY_CMD} -v" + if [[ "${DEPLOY_SCENARIO}" =~ fdio|ovs ]]; then + DEPLOY_CMD="${DEPLOY_CMD} --virtual-default-ram 14 --virtual-compute-ram 8" + fi if [[ "$JOB_NAME" == *csit* ]]; then - DEPLOY_CMD="${DEPLOY_CMD} -e csit-environment.yaml --virtual-computes 2" + DEPLOY_CMD="${DEPLOY_CMD} -e csit-environment.yaml" + fi + if [[ "$JOB_NAME" == *promote* ]]; then + DEPLOY_CMD="${DEPLOY_CMD} --virtual-computes 2" fi else # settings for bare metal deployment - if [ "$IPV6_FLAG" == "True" ]; then - NETWORK_FILE="/root/network/network_settings_v6.yaml" - elif [[ "$JOB_NAME" == *master* ]]; then - NETWORK_FILE="/root/network/network_settings-master.yaml" - else - NETWORK_FILE="/root/network/network_settings.yaml" - fi + NETWORK_SETTINGS_DIR="/root/network" INVENTORY_FILE="/root/inventory/pod_settings.yaml" if ! sudo test -e "$INVENTORY_FILE"; then @@ -211,6 +204,14 @@ else DEPLOY_CMD="${DEPLOY_CMD} -i ${INVENTORY_FILE}" fi +if [ "$IPV6_FLAG" == "True" ]; then + NETWORK_FILE="${NETWORK_SETTINGS_DIR}/network_settings_v6.yaml" +elif echo ${DEPLOY_SCENARIO} | grep fdio; then + NETWORK_FILE="${NETWORK_SETTINGS_DIR}/network_settings_vpp.yaml" +else + NETWORK_FILE="${NETWORK_SETTINGS_DIR}/network_settings.yaml" +fi + # Check that network settings file exists if ! sudo test -e "$NETWORK_FILE"; then echo "ERROR: Required settings file missing: Network Settings file ${NETWORK_FILE}" diff --git a/jjb/apex/apex-snapshot-create.sh b/jjb/apex/apex-snapshot-create.sh index f146dd810..b2a39449e 100644 --- a/jjb/apex/apex-snapshot-create.sh +++ b/jjb/apex/apex-snapshot-create.sh @@ -13,6 +13,7 @@ set -o nounset set -o pipefail SSH_OPTIONS=(-o StrictHostKeyChecking=no -o GlobalKnownHostsFile=/dev/null -o UserKnownHostsFile=/dev/null -o LogLevel=error) +SNAP_TYPE=$(echo ${JOB_NAME} | sed -n 's/^apex-\(.\+\)-promote.*$/\1/p') echo "Creating Apex snapshot..." echo "-------------------------" @@ -81,17 +82,19 @@ sudo chown jenkins-ci:jenkins-ci * # tar up artifacts DATE=`date +%Y-%m-%d` -tar czf ../apex-csit-snap-${DATE}.tar.gz . +tar czf ../apex-${SNAP_TYPE}-snap-${DATE}.tar.gz . popd > /dev/null sudo rm -rf ${tmp_dir} -echo "Snapshot saved as apex-csit-snap-${DATE}.tar.gz" +echo "Snapshot saved as apex-${SNAP_TYPE}-snap-${DATE}.tar.gz" # update opnfv properties file -curl -O -L http://$GS_URL/snapshot.properties -sed -i '/^OPNFV_SNAP_URL=/{h;s#=.*#='${GS_URL}'/apex-csit-snap-'${DATE}'.tar.gz#};${x;/^$/{s##OPNFV_SNAP_URL='${GS_URL}'/apex-csit-snap-'${DATE}'.tar.gz#;H};x}' snapshot.properties -snap_sha=$(sha512sum apex-csit-snap-${DATE}.tar.gz | cut -d' ' -f1) -sed -i '/^OPNFV_SNAP_SHA512SUM=/{h;s/=.*/='${snap_sha}'/};${x;/^$/{s//OPNFV_SNAP_SHA512SUM='${snap_sha}'/;H};x}' snapshot.properties -echo "OPNFV_SNAP_URL=$GS_URL/apex-csit-snap-${DATE}.tar.gz" -echo "OPNFV_SNAP_SHA512SUM=$(sha512sum apex-csit-snap-${DATE}.tar.gz | cut -d' ' -f1)" -echo "Updated properties file: " -cat snapshot.properties +if [ "$SNAP_TYPE" == 'csit' ]; then + curl -O -L http://$GS_URL/snapshot.properties + sed -i '/^OPNFV_SNAP_URL=/{h;s#=.*#='${GS_URL}'/apex-csit-snap-'${DATE}'.tar.gz#};${x;/^$/{s##OPNFV_SNAP_URL='${GS_URL}'/apex-csit-snap-'${DATE}'.tar.gz#;H};x}' snapshot.properties + snap_sha=$(sha512sum apex-csit-snap-${DATE}.tar.gz | cut -d' ' -f1) + sed -i '/^OPNFV_SNAP_SHA512SUM=/{h;s/=.*/='${snap_sha}'/};${x;/^$/{s//OPNFV_SNAP_SHA512SUM='${snap_sha}'/;H};x}' snapshot.properties + echo "OPNFV_SNAP_URL=$GS_URL/apex-csit-snap-${DATE}.tar.gz" + echo "OPNFV_SNAP_SHA512SUM=$(sha512sum apex-csit-snap-${DATE}.tar.gz | cut -d' ' -f1)" + echo "Updated properties file: " + cat snapshot.properties +fi diff --git a/jjb/apex/apex-upload-artifact.sh b/jjb/apex/apex-upload-artifact.sh index ef8ad5329..c2de7d70d 100755 --- a/jjb/apex/apex-upload-artifact.sh +++ b/jjb/apex/apex-upload-artifact.sh @@ -51,13 +51,13 @@ echo "ISO Upload Complete!" RPM_INSTALL_PATH=$BUILD_DIRECTORY/noarch RPM_LIST=$RPM_INSTALL_PATH/$(basename $OPNFV_RPM_URL) VERSION_EXTENSION=$(echo $(basename $OPNFV_RPM_URL) | sed 's/opnfv-apex-//') -for pkg in common undercloud onos; do +for pkg in common undercloud; do # removed onos for danube RPM_LIST+=" ${RPM_INSTALL_PATH}/opnfv-apex-${pkg}-${VERSION_EXTENSION}" done SRPM_INSTALL_PATH=$BUILD_DIRECTORY SRPM_LIST=$SRPM_INSTALL_PATH/$(basename $OPNFV_SRPM_URL) VERSION_EXTENSION=$(echo $(basename $OPNFV_SRPM_URL) | sed 's/opnfv-apex-//') -for pkg in common undercloud onos; do +for pkg in common undercloud; do # removed onos for danube SRPM_LIST+=" ${SRPM_INSTALL_PATH}/opnfv-apex-${pkg}-${VERSION_EXTENSION}" done } @@ -76,12 +76,15 @@ gsutil cp $WORKSPACE/opnfv.properties gs://$GS_URL/latest.properties > gsutil.la uploadsnap () { # Uploads snapshot artifact and updated properties file echo "Uploading snapshot artifacts" - gsutil cp $WORKSPACE/apex-csit-snap-`date +%Y-%m-%d`.tar.gz gs://$GS_URL/ > gsutil.iso.log - gsutil cp $WORKSPACE/snapshot.properties gs://$GS_URL/snapshot.properties > gsutil.latest.log + SNAP_TYPE=$(echo ${JOB_NAME} | sed -n 's/^apex-\(.\+\)-promote.*$/\1/p') + gsutil cp $WORKSPACE/apex-${SNAP_TYPE}-snap-`date +%Y-%m-%d`.tar.gz gs://$GS_URL/ > gsutil.iso.log + if [ "$SNAP_TYPE" == 'csit' ]; then + gsutil cp $WORKSPACE/snapshot.properties gs://$GS_URL/snapshot.properties > gsutil.latest.log + fi echo "Upload complete for Snapshot" } -if echo $WORKSPACE | grep csit > /dev/null; then +if echo $WORKSPACE | grep promote > /dev/null; then uploadsnap elif gpg2 --list-keys | grep "opnfv-helpdesk@rt.linuxfoundation.org"; then echo "Signing Key avaliable" diff --git a/jjb/apex/apex.yml b/jjb/apex/apex.yml index ff9fbec14..60340e9ca 100644 --- a/jjb/apex/apex.yml +++ b/jjb/apex/apex.yml @@ -10,10 +10,8 @@ - 'apex-deploy-virtual-{scenario}-{stream}' - 'apex-deploy-baremetal-{scenario}-{stream}' - 'apex-daily-{stream}' - - 'apex-daily-colorado' - - 'apex-build-colorado' - - 'apex-deploy-baremetal-os-odl_l2-fdio-ha-colorado' - 'apex-csit-promote-daily-{stream}' + - 'apex-fdio-promote-daily-{stream}' # stream: branch with - in place of / (eg. stable-arno) # branch: branch (eg. stable/arno) @@ -24,6 +22,12 @@ slave: 'lf-pod1' verify-slave: 'apex-verify-master' daily-slave: 'apex-daily-master' + - danube: + branch: 'stable/danube' + gs-pathname: '/danube' + slave: 'lf-pod1' + verify-slave: 'apex-verify-danube' + daily-slave: 'apex-daily-danube' project: 'apex' @@ -34,6 +38,9 @@ - 'os-nosdn-ovs-noha' - 'os-nosdn-fdio-noha' - 'os-nosdn-fdio-ha' + - 'os-nosdn-kvm-ha' + - 'os-nosdn-kvm-noha' + - 'os-odl_l2-fdio-noha' - 'os-odl_l2-fdio-ha' - 'os-odl_l2-netvirt_gbp_fdio-noha' - 'os-odl_l2-sfc-noha' @@ -45,6 +52,7 @@ - 'os-odl_l3-fdio_dvr-noha' - 'os-odl_l3-fdio_dvr-ha' - 'os-odl_l3-csit-noha' + - 'os-odl_l3-nofeature-noha' - 'os-onos-nofeature-ha' - 'gate' @@ -176,21 +184,6 @@ builders: - 'apex-unit-test' - 'apex-build' - - trigger-builds: - - project: 'apex-deploy-virtual-os-nosdn-nofeature-ha-{stream}' - predefined-parameters: | - BUILD_DIRECTORY=apex-verify-{stream} - OPNFV_CLEAN=yes - git-revision: false - block: true - same-node: true - - trigger-builds: - - project: 'functest-apex-{verify-slave}-suite-{stream}' - predefined-parameters: | - DEPLOY_SCENARIO=os-nosdn-nofeature-ha - FUNCTEST_SUITE_NAME=healthcheck - block: true - same-node: true - trigger-builds: - project: 'apex-deploy-virtual-os-odl_l3-nofeature-ha-{stream}' predefined-parameters: | @@ -318,7 +311,7 @@ blocking-jobs: - 'apex-daily.*' - 'apex-verify.*' - - 'apex-csit.*' + - 'apex-.*-promote.*' builders: - trigger-builds: @@ -383,7 +376,7 @@ builders: - trigger-builds: - - project: 'apex-deploy-baremetal-os-odl_l3-nofeature-ha-{stream}' + - project: 'apex-deploy-baremetal-os-odl_l3-nofeature-noha-{stream}' predefined-parameters: OPNFV_CLEAN=yes git-revision: false @@ -392,7 +385,7 @@ - trigger-builds: - project: 'cperf-apex-intel-pod2-daily-{stream}' predefined-parameters: - DEPLOY_SCENARIO=os-odl_l3-nofeature-ha + DEPLOY_SCENARIO=os-odl_l3-nofeature-noha block: true same-node: true @@ -570,7 +563,7 @@ - 'apex-deploy.*' - 'apex-build.*' - 'apex-runner.*' - - 'apex-csit.*' + - 'apex-.*-promote.*' triggers: - 'apex-{stream}' @@ -612,6 +605,23 @@ build-step-failure-threshold: 'never' failure-threshold: 'never' unstable-threshold: 'FAILURE' + # 1.dovetail only master by now, not sync with A/B/C branches + # 2.here the stream means the SUT stream, dovetail stream is defined in its own job + # 3.only debug testsuite here(includes basic testcase, + # i.e. one tempest smoke ipv6, two vping from functest) + # 4.not used for release criteria or compliance, + # only to debug the dovetail tool bugs with apex + - trigger-builds: + - project: 'dovetail-apex-{slave}-debug-{stream}' + current-parameters: false + predefined-parameters: + DEPLOY_SCENARIO=os-nosdn-nofeature-ha + block: true + same-node: true + block-thresholds: + build-step-failure-threshold: 'never' + failure-threshold: 'never' + unstable-threshold: 'FAILURE' - trigger-builds: - project: 'apex-deploy-baremetal-os-odl_l3-nofeature-ha-{stream}' predefined-parameters: | @@ -702,104 +712,137 @@ build-step-failure-threshold: 'never' failure-threshold: 'never' unstable-threshold: 'FAILURE' -# Colorado Build -- job-template: - name: 'apex-build-colorado' - - # Job template for builds - # - # Required Variables: - # stream: branch with - in place of / (eg. stable) - # branch: branch (eg. stable) - node: 'apex-daily-colorado' - - disabled: false - - concurrent: true - - parameters: - - project-parameter: - project: '{project}' - branch: 'stable/colorado' - - apex-parameter: - gs-pathname: '/colorado' - - string: - name: GIT_BASE - default: https://gerrit.opnfv.org/gerrit/$PROJECT - description: "Used for overriding the GIT URL coming from parameters macro." - - scm: - - git-scm - - properties: - - logrotate-default - - build-blocker: - use-build-blocker: true - block-level: 'NODE' - blocking-jobs: - - 'apex-deploy.*' - - throttle: - max-per-node: 1 - max-total: 10 - option: 'project' - - builders: - - 'apex-build' - - 'apex-upload-artifact' - - -# Colorado FDIO Deploy -- job-template: - name: 'apex-deploy-baremetal-os-odl_l2-fdio-ha-colorado' - - # Job template for baremetal deployment - # - # Required Variables: - # stream: branch with - in place of / (eg. stable) - # branch: branch (eg. stable) - node: 'lf-pod1' - - disabled: false - - scm: - - git-scm - - parameters: - - project-parameter: - project: '{project}' - branch: 'stable/colorado' - - apex-parameter: - gs-pathname: '/colorado' - - string: - name: DEPLOY_SCENARIO - default: 'os-odl_l2-fdio-ha' - description: "Scenario to deploy with." - - properties: - - logrotate-default - - build-blocker: - use-build-blocker: true - block-level: 'NODE' - blocking-jobs: - - 'apex-verify.*' - - 'apex-deploy.*' - - 'apex-build.*' - - - builders: - - 'apex-deploy' - - 'apex-workspace-cleanup' + - trigger-builds: + - project: 'apex-deploy-baremetal-os-odl_l2-fdio-noha-{stream}' + predefined-parameters: | + BUILD_DIRECTORY=apex-build-{stream}/.build + OPNFV_CLEAN=yes + git-revision: true + same-node: true + block-thresholds: + build-step-failure-threshold: 'never' + block: true + - trigger-builds: + - project: 'functest-apex-{daily-slave}-daily-{stream}' + predefined-parameters: + DEPLOY_SCENARIO=os-odl_l2-fdio-noha + block: true + same-node: true + block-thresholds: + build-step-failure-threshold: 'never' + failure-threshold: 'never' + unstable-threshold: 'FAILURE' + - trigger-builds: + - project: 'yardstick-apex-{slave}-daily-{stream}' + predefined-parameters: + DEPLOY_SCENARIO=os-odl_l2-fdio-noha + block: true + same-node: true + block-thresholds: + build-step-failure-threshold: 'never' + failure-threshold: 'never' + unstable-threshold: 'FAILURE' + - trigger-builds: + - project: 'apex-deploy-baremetal-os-odl_l2-fdio-ha-{stream}' + predefined-parameters: | + BUILD_DIRECTORY=apex-build-{stream}/.build + OPNFV_CLEAN=yes + git-revision: true + same-node: true + block-thresholds: + build-step-failure-threshold: 'never' + block: true + - trigger-builds: + - project: 'functest-apex-{daily-slave}-daily-{stream}' + predefined-parameters: + DEPLOY_SCENARIO=os-odl_l2-fdio-ha + block: true + same-node: true + block-thresholds: + build-step-failure-threshold: 'never' + failure-threshold: 'never' + unstable-threshold: 'FAILURE' + - trigger-builds: + - project: 'yardstick-apex-{slave}-daily-{stream}' + predefined-parameters: + DEPLOY_SCENARIO=os-odl_l2-fdio-ha + block: true + same-node: true + block-thresholds: + build-step-failure-threshold: 'never' + failure-threshold: 'never' + unstable-threshold: 'FAILURE' + - trigger-builds: + - project: 'apex-deploy-baremetal-os-nosdn-kvm-ha-{stream}' + predefined-parameters: | + BUILD_DIRECTORY=apex-build-{stream}/.build + OPNFV_CLEAN=yes + git-revision: true + same-node: true + block-thresholds: + build-step-failure-threshold: 'never' + block: true + - trigger-builds: + - project: 'functest-apex-{daily-slave}-daily-{stream}' + predefined-parameters: + DEPLOY_SCENARIO=os-nosdn-kvm-ha + block: true + same-node: true + block-thresholds: + build-step-failure-threshold: 'never' + failure-threshold: 'never' + unstable-threshold: 'FAILURE' + - trigger-builds: + - project: 'yardstick-apex-{slave}-daily-{stream}' + predefined-parameters: + DEPLOY_SCENARIO=os-nosdn-kvm-ha + block: true + same-node: true + block-thresholds: + build-step-failure-threshold: 'never' + failure-threshold: 'never' + unstable-threshold: 'FAILURE' + - trigger-builds: + - project: 'apex-deploy-baremetal-os-odl_l3-fdio-noha-{stream}' + predefined-parameters: | + BUILD_DIRECTORY=apex-build-{stream}/.build + OPNFV_CLEAN=yes + git-revision: true + same-node: true + block-thresholds: + build-step-failure-threshold: 'never' + block: true + - trigger-builds: + - project: 'functest-apex-{daily-slave}-daily-{stream}' + predefined-parameters: + DEPLOY_SCENARIO=os-odl_l3-fdio-noha + block: true + same-node: true + block-thresholds: + build-step-failure-threshold: 'never' + failure-threshold: 'never' + unstable-threshold: 'FAILURE' + - trigger-builds: + - project: 'yardstick-apex-{slave}-daily-{stream}' + predefined-parameters: + DEPLOY_SCENARIO=os-odl_l3-fdio-noha + block: true + same-node: true + block-thresholds: + build-step-failure-threshold: 'never' + failure-threshold: 'never' + unstable-threshold: 'FAILURE' -# Colorado FDIO Daily +# CSIT promote - job-template: - name: 'apex-daily-colorado' + name: 'apex-csit-promote-daily-{stream}' - # Job template for daily build + # Job template for promoting CSIT Snapshots # # Required Variables: # stream: branch with - in place of / (eg. stable) # branch: branch (eg. stable) - node: 'apex-daily-colorado' + node: '{daily-slave}' disabled: false @@ -809,12 +852,11 @@ parameters: - project-parameter: project: '{project}' - branch: 'stable/colorado' + branch: '{branch}' - apex-parameter: - gs-pathname: '/colorado' + gs-pathname: '{gs-pathname}' properties: - - logrotate-default - build-blocker: use-build-blocker: true block-level: 'NODE' @@ -823,41 +865,36 @@ - 'apex-deploy.*' - 'apex-build.*' - 'apex-runner.*' + - 'apex-daily.*' triggers: - - 'apex-colorado' + - timed: '0 12 * * 0' builders: + - 'apex-build' - trigger-builds: - - project: 'apex-build-colorado' - git-revision: true - current-parameters: true - same-node: true - block: true - - trigger-builds: - - project: 'apex-deploy-baremetal-os-odl_l2-fdio-ha-colorado' + - project: 'apex-deploy-virtual-os-odl_l3-csit-noha-{stream}' predefined-parameters: | - BUILD_DIRECTORY=apex-build-colorado/.build + BUILD_DIRECTORY=apex-csit-promote-daily-{stream} OPNFV_CLEAN=yes - git-revision: true - same-node: true - block-thresholds: - build-step-failure-threshold: 'never' + git-revision: false block: true + same-node: true - trigger-builds: - - project: 'functest-apex-apex-daily-colorado-daily-colorado' - predefined-parameters: - DEPLOY_SCENARIO=os-odl_l2-fdio-ha + - project: 'functest-apex-{daily-slave}-suite-{stream}' + predefined-parameters: | + DEPLOY_SCENARIO=os-odl_l3-nofeature-noha + FUNCTEST_SUITE_NAME=tempest_smoke_serial block: true same-node: true - block-thresholds: - build-step-failure-threshold: 'never' - failure-threshold: 'never' - unstable-threshold: 'FAILURE' + - shell: + !include-raw-escape: ./apex-snapshot-create.sh + - shell: + !include-raw-escape: ./apex-upload-artifact.sh -# CSIT promote +# FDIO promote - job-template: - name: 'apex-csit-promote-daily-{stream}' + name: 'apex-fdio-promote-daily-{stream}' # Job template for promoting CSIT Snapshots # @@ -889,26 +926,16 @@ - 'apex-runner.*' - 'apex-daily.*' - triggers: - - timed: '0 12 * * 0' - builders: - 'apex-build' - trigger-builds: - - project: 'apex-deploy-virtual-os-odl_l3-csit-noha-{stream}' + - project: 'apex-deploy-virtual-os-odl_l2-fdio-noha-{stream}' predefined-parameters: | - BUILD_DIRECTORY=apex-csit-promote-daily-{stream} + BUILD_DIRECTORY=apex-fdio-promote-daily-{stream} OPNFV_CLEAN=yes git-revision: false block: true same-node: true - - trigger-builds: - - project: 'functest-apex-{daily-slave}-suite-{stream}' - predefined-parameters: | - DEPLOY_SCENARIO=os-odl_l3-nofeature-noha - FUNCTEST_SUITE_NAME=tempest_smoke_serial - block: true - same-node: true - shell: !include-raw-escape: ./apex-snapshot-create.sh - shell: @@ -1020,9 +1047,9 @@ - trigger: name: 'apex-master' triggers: - - timed: '0 3 * * *' + - timed: '0 3 * * 7' - trigger: - name: 'apex-colorado' + name: 'apex-danube' triggers: - timed: '0 12 * * *' - trigger: diff --git a/jjb/armband/armband-ci-jobs.yml b/jjb/armband/armband-ci-jobs.yml index 4cb58d916..ddcbbd038 100644 --- a/jjb/armband/armband-ci-jobs.yml +++ b/jjb/armband/armband-ci-jobs.yml @@ -15,7 +15,7 @@ stream: danube branch: 'stable/{stream}' gs-pathname: '/{stream}' - disabled: true + disabled: false #-------------------------------- # POD, INSTALLER, AND BRANCH MAPPING #-------------------------------- @@ -56,6 +56,10 @@ slave-label: arm-pod3 installer: fuel <<: *danube + - arm-pod3-2: + slave-label: arm-pod3-2 + installer: fuel + <<: *danube #-------------------------------- # master #-------------------------------- @@ -67,6 +71,10 @@ slave-label: arm-pod3 installer: fuel <<: *master + - arm-pod3-2: + slave-label: arm-pod3-2 + installer: fuel + <<: *master #-------------------------------- # scenarios #-------------------------------- @@ -262,23 +270,23 @@ - trigger: name: 'fuel-os-odl_l2-nofeature-ha-armband-baremetal-master-trigger' triggers: - - timed: '0 8 * * 1,3,5,7' + - timed: '0 0 * * 1' - trigger: name: 'fuel-os-nosdn-nofeature-ha-armband-baremetal-master-trigger' triggers: - - timed: '0 16 * * 2,7' + - timed: '0 0 * * 2' - trigger: name: 'fuel-os-odl_l3-nofeature-ha-armband-baremetal-master-trigger' triggers: - - timed: '0 16 * * 1,4,6' + - timed: '0 0 * * 3' - trigger: name: 'fuel-os-odl_l2-bgpvpn-ha-armband-baremetal-master-trigger' triggers: - - timed: '0 8 * * 2,4,6' + - timed: '0 0 * * 4' - trigger: name: 'fuel-os-odl_l2-nofeature-noha-armband-baremetal-master-trigger' triggers: - - timed: '0 16 * * 3,5' + - timed: '0 0 * * 5' - trigger: name: 'fuel-os-odl_l2-sfc-ha-armband-baremetal-master-trigger' triggers: @@ -294,31 +302,31 @@ - trigger: name: 'fuel-os-odl_l2-nofeature-ha-armband-baremetal-danube-trigger' triggers: - - timed: '0 0 * * 1' + - timed: '0 8 * * 1,4' - trigger: name: 'fuel-os-nosdn-nofeature-ha-armband-baremetal-danube-trigger' triggers: - - timed: '0 0 * * 2' + - timed: '0 16 * * 1,4' - trigger: name: 'fuel-os-odl_l2-bgpvpn-ha-armband-baremetal-danube-trigger' triggers: - - timed: '0 0 * * 4' + - timed: '0 8 * * 2,5' - trigger: name: 'fuel-os-odl_l3-nofeature-ha-armband-baremetal-danube-trigger' triggers: - - timed: '0 0 * * 3' + - timed: '0 16 * * 2,5' - trigger: name: 'fuel-os-odl_l2-nofeature-noha-armband-baremetal-danube-trigger' triggers: - - timed: '0 0 * * 5' + - timed: '0 8 * * 3,6' - trigger: name: 'fuel-os-odl_l2-sfc-ha-armband-baremetal-danube-trigger' triggers: - - timed: '' + - timed: '0 16 * * 3,6' - trigger: name: 'fuel-os-odl_l2-sfc-noha-armband-baremetal-danube-trigger' triggers: - - timed: '' + - timed: '0 8,16 * * 7' #--------------------------------------------------------------- # Enea Armband CI Virtual Triggers running against master branch #--------------------------------------------------------------- @@ -505,3 +513,65 @@ name: 'fuel-os-odl_l2-sfc-noha-arm-pod3-danube-trigger' triggers: - timed: '' +#-------------------------------------------------------------------------- +# Enea Armband POD 3 Triggers running against master branch (aarch64 slave) +#-------------------------------------------------------------------------- +- trigger: + name: 'fuel-os-odl_l2-nofeature-ha-arm-pod3-2-master-trigger' + triggers: + - timed: '' +- trigger: + name: 'fuel-os-nosdn-nofeature-ha-arm-pod3-2-master-trigger' + triggers: + - timed: '' +- trigger: + name: 'fuel-os-odl_l3-nofeature-ha-arm-pod3-2-master-trigger' + triggers: + - timed: '' +- trigger: + name: 'fuel-os-odl_l2-bgpvpn-ha-arm-pod3-2-master-trigger' + triggers: + - timed: '' +- trigger: + name: 'fuel-os-odl_l2-nofeature-noha-arm-pod3-2-master-trigger' + triggers: + - timed: '' +- trigger: + name: 'fuel-os-odl_l2-sfc-ha-arm-pod3-2-master-trigger' + triggers: + - timed: '' +- trigger: + name: 'fuel-os-odl_l2-sfc-noha-arm-pod3-2-master-trigger' + triggers: + - timed: '' +#-------------------------------------------------------------------------- +# Enea Armband POD 3 Triggers running against danube branch (aarch64 slave) +#-------------------------------------------------------------------------- +- trigger: + name: 'fuel-os-odl_l2-nofeature-ha-arm-pod3-2-danube-trigger' + triggers: + - timed: '' +- trigger: + name: 'fuel-os-nosdn-nofeature-ha-arm-pod3-2-danube-trigger' + triggers: + - timed: '' +- trigger: + name: 'fuel-os-odl_l3-nofeature-ha-arm-pod3-2-danube-trigger' + triggers: + - timed: '' +- trigger: + name: 'fuel-os-odl_l2-bgpvpn-ha-arm-pod3-2-danube-trigger' + triggers: + - timed: '' +- trigger: + name: 'fuel-os-odl_l2-nofeature-noha-arm-pod3-2-danube-trigger' + triggers: + - timed: '' +- trigger: + name: 'fuel-os-odl_l2-sfc-ha-arm-pod3-2-danube-trigger' + triggers: + - timed: '' +- trigger: + name: 'fuel-os-odl_l2-sfc-noha-arm-pod3-2-danube-trigger' + triggers: + - timed: '' diff --git a/jjb/armband/armband-deploy.sh b/jjb/armband/armband-deploy.sh index adabfcaeb..2e5aa3924 100755 --- a/jjb/armband/armband-deploy.sh +++ b/jjb/armband/armband-deploy.sh @@ -32,6 +32,14 @@ fi # set deployment parameters export TMPDIR=${WORKSPACE}/tmpdir + +# arm-pod3-2 is an aarch64 jenkins slave for the same POD as the +# x86 jenkins slave arm-pod3; therefore we use the same pod name +# to deploy the pod from both jenkins slaves +if [[ "${NODE_NAME}" == "arm-pod3-2" ]]; then + NODE_NAME="arm-pod3" +fi + LAB_NAME=${NODE_NAME/-*} POD_NAME=${NODE_NAME/*-} diff --git a/jjb/armband/armband-project-jobs.yml b/jjb/armband/armband-project-jobs.yml index fd37c5af6..f6840a008 100644 --- a/jjb/armband/armband-project-jobs.yml +++ b/jjb/armband/armband-project-jobs.yml @@ -20,7 +20,7 @@ - danube: branch: 'stable/{stream}' gs-pathname: '/{stream}' - disabled: true + disabled: false - job-template: name: 'armband-{installer}-build-daily-{stream}' diff --git a/jjb/armband/armband-verify-jobs.yml b/jjb/armband/armband-verify-jobs.yml index 3486718e4..567456d9b 100644 --- a/jjb/armband/armband-verify-jobs.yml +++ b/jjb/armband/armband-verify-jobs.yml @@ -86,6 +86,7 @@ pattern: 'ci/**' - compare-type: ANT pattern: 'patches/**' + disable-strict-forbidden-file-verification: 'true' forbidden-file-paths: - compare-type: ANT pattern: 'docs/**' diff --git a/jjb/availability/availability.yml b/jjb/availability/availability.yml index 9cb7f8899..302bbc996 100644 --- a/jjb/availability/availability.yml +++ b/jjb/availability/availability.yml @@ -53,6 +53,7 @@ branches: - branch-compare-type: 'ANT' branch-pattern: '**/{branch}' + disable-strict-forbidden-file-verification: 'true' forbidden-file-paths: - compare-type: ANT pattern: 'docs/**|.gitignore' diff --git a/jjb/barometer/barometer.yml b/jjb/barometer/barometer.yml index 6a17e1706..9ec30e809 100644 --- a/jjb/barometer/barometer.yml +++ b/jjb/barometer/barometer.yml @@ -20,7 +20,7 @@ - danube: branch: 'stable/{stream}' gs-pathname: '/{stream}' - disabled: true + disabled: false - job-template: name: 'barometer-verify-{stream}' @@ -55,6 +55,7 @@ branches: - branch-compare-type: 'ANT' branch-pattern: '**/{branch}' + disable-strict-forbidden-file-verification: 'true' forbidden-file-paths: - compare-type: ANT pattern: 'docs/**|.gitignore' @@ -105,6 +106,7 @@ branches: - branch-compare-type: 'ANT' branch-pattern: '**/{branch}' + disable-strict-forbidden-file-verification: 'true' forbidden-file-paths: - compare-type: ANT pattern: 'docs/**' diff --git a/jjb/bottlenecks/bottlenecks-ci-jobs.yml b/jjb/bottlenecks/bottlenecks-ci-jobs.yml index a9ccd6977..2779e316b 100644 --- a/jjb/bottlenecks/bottlenecks-ci-jobs.yml +++ b/jjb/bottlenecks/bottlenecks-ci-jobs.yml @@ -72,7 +72,8 @@ suite: - 'rubbos' - 'vstf' - - 'posca' + - 'posca_stress_traffic' + - 'posca_stress_ping' jobs: - 'bottlenecks-{installer}-{suite}-{pod}-daily-{stream}' @@ -137,65 +138,14 @@ - builder: name: bottlenecks-env-cleanup builders: - - shell: | - #!/bin/bash - set -e - [[ $GERRIT_REFSPEC_DEBUG == true ]] && redirect="/dev/stdout" || redirect="/dev/null" - - echo "Bottlenecks: docker containers/images cleaning up" - if [[ ! -z $(docker ps -a | grep opnfv/bottlenecks) ]]; then - echo "removing existing opnfv/bottlenecks containers" - docker ps -a | grep opnfv/bottlenecks | awk '{print $1}' | xargs docker rm -f >$redirect - fi - - if [[ ! -z $(docker images | grep opnfv/bottlenecks) ]]; then - echo "Bottlenecks: docker images to remove:" - docker images | head -1 && docker images | grep opnfv/bottlenecks - image_tags=($(docker images | grep opnfv/bottlenecks | awk '{print $2}')) - for tag in "${image_tags[@]}"; do - echo "Removing docker image opnfv/bottlenecks:$tag..." - docker rmi opnfv/bottlenecks:$tag >$redirect - done - fi + - shell: + !include-raw: ./bottlenecks-cleanup.sh - builder: name: bottlenecks-run-suite builders: - - shell: | - #!/bin/bash - set -e - [[ $GERRIT_REFSPEC_DEBUG == true ]] && redirect="/dev/stdout" || redirect="/dev/null" - - echo "Bottlenecks: to pull image opnfv/bottlenecks:${DOCKER_TAG}" - docker pull opnfv/bottlenecks:$DOCKER_TAG >${redirect} - - echo "Bottlenecks: docker start running" - opts="--privileged=true -id" - envs="-e INSTALLER_TYPE=${INSTALLER_TYPE} -e INSTALLER_IP=${INSTALLER_IP} \ - -e NODE_NAME=${NODE_NAME} -e EXTERNAL_NET=${EXTERNAL_NETWORK} \ - -e BOTTLENECKS_BRANCH=${BOTTLENECKS_BRANCH} -e GERRIT_REFSPEC_DEBUG=${GERRIT_REFSPEC_DEBUG} \ - -e BOTTLENECKS_DB_TARGET=${BOTTLENECKS_DB_TARGET} -e PACKAGE_URL=${PACKAGE_URL}" - cmd="sudo docker run ${opts} ${envs} opnfv/bottlenecks:${DOCKER_TAG} /bin/bash" - echo "Bottlenecks: docker cmd running ${cmd}" - ${cmd} >${redirect} - - echo "Bottlenecks: obtain docker id" - container_id=$(docker ps | grep "opnfv/bottlenecks:${DOCKER_TAG}" | awk '{print $1}' | head -1) - if [ -z ${container_id} ]; then - echo "Cannot find opnfv/bottlenecks container ID ${container_id}. Please check if it exists." - docker ps -a - exit 1 - fi - - echo "Bottlenecks: to prepare openstack environment" - prepare_env="${REPO_DIR}/ci/prepare_env.sh" - echo "Bottlenecks: docker cmd running: ${prepare_env}" - sudo docker exec ${container_id} ${prepare_env} - - echo "Bottlenecks: to run testsuite ${SUITE_NAME}" - run_testsuite="${REPO_DIR}/run_tests.sh -s ${SUITE_NAME}" - echo "Bottlenecks: docker cmd running: ${run_testsuite}" - sudo docker exec ${container_id} ${run_testsuite} + - shell: + !include-raw: ./bottlenecks-run-suite.sh #################### # parameter macros diff --git a/jjb/bottlenecks/bottlenecks-cleanup.sh b/jjb/bottlenecks/bottlenecks-cleanup.sh new file mode 100644 index 000000000..0ba042318 --- /dev/null +++ b/jjb/bottlenecks/bottlenecks-cleanup.sh @@ -0,0 +1,111 @@ +#!/bin/bash +set -e +[[ $GERRIT_REFSPEC_DEBUG == true ]] && redirect="/dev/stdout" || redirect="/dev/null" + +BOTTLENECKS_IMAGE=opnfv/bottlenecks +echo "Bottlenecks: docker containers/images cleaning up" + +dangling_images=($(docker images -f "dangling=true" | grep $BOTTLENECKS_IMAGE | awk '{print $3}')) +if [[ -n $dangling_images ]]; then + echo "Removing $BOTTLENECKS_IMAGE: dangling images and their containers" + docker images | head -1 && docker images | grep $dangling_images + for image_id in "${dangling_images[@]}"; do + echo "Bottlenecks: Removing dangling image $image_id" + docker rmi -f $image_id >${redirect} + done +fi + +for image_id in "${dangling_images[@]}"; do + if [[ -n $(docker ps -a | grep $image_id) ]]; then + echo "Bottlenecks: Removing containers associated with dangling image: $image_id" + docker ps -a | head -1 && docker ps -a | grep $image_id + docker ps -a | grep $image_id | awk '{print $1}'| xargs docker rm -f >${redirect} + fi +done + +if [[ -n $(docker ps -a | grep $BOTTLENECKS_IMAGE) ]]; then + echo "Removing existing $BOTTLENECKS_IMAGE containers" + docker ps -a | grep $BOTTLENECKS_IMAGE | awk '{print $1}' | xargs docker rm -f >$redirect +fi + +if [[ -n $(docker images | grep $BOTTLENECKS_IMAGE) ]]; then + echo "Bottlenecks: docker images to remove:" + docker images | head -1 && docker images | grep $BOTTLENECKS_IMAGE + image_tags=($(docker images | grep $BOTTLENECKS_IMAGE | awk '{print $2}')) + for tag in "${image_tags[@]}"; do + echo "Removing docker image $BOTTLENECKS_IMAGE:$tag..." + docker rmi $BOTTLENECKS_IMAGE:$tag >$redirect + done +fi + +echo "Yardstick: docker containers/images cleaning up" +YARDSTICK_IMAGE=opnfv/yardstick + +dangling_images=($(docker images -f "dangling=true" | grep $YARDSTICK_IMAGE | awk '{print $3}')) +if [[ -n $dangling_images ]]; then + echo "Removing $YARDSTICK_IMAGE: dangling images and their containers" + docker images | head -1 && docker images | grep $dangling_images + for image_id in "${dangling_images[@]}"; do + echo "Yardstick: Removing dangling image $image_id" + docker rmi -f $image_id >${redirect} + done +fi + +for image_id in "${dangling_images[@]}"; do + if [[ -n $(docker ps -a | grep $image_id) ]]; then + echo "Yardstick: Removing containers associated with dangling image: $image_id" + docker ps -a | head -1 && docker ps -a | grep $image_id + docker ps -a | grep $image_id | awk '{print $1}'| xargs docker rm -f >${redirect} + fi +done + +if [[ -n $(docker ps -a | grep $YARDSTICK_IMAGE) ]]; then + echo "Removing existing $YARDSTICK_IMAGE containers" + docker ps -a | grep $YARDSTICK_IMAGE | awk '{print $1}' | xargs docker rm -f >$redirect +fi + +if [[ -n $(docker images | grep $YARDSTICK_IMAGE) ]]; then + echo "Yardstick: docker images to remove:" + docker images | head -1 && docker images | grep $YARDSTICK_IMAGE + image_tags=($(docker images | grep $YARDSTICK_IMAGE | awk '{print $2}')) + for tag in "${image_tags[@]}"; do + echo "Removing docker image $YARDSTICK_IMAGE:$tag..." + docker rmi $YARDSTICK_IMAGE:$tag >$redirect + done +fi + +echo "InfluxDB: docker containers/images cleaning up" +INFLUXDB_IMAGE=tutum/influxdb + +dangling_images=($(docker images -f "dangling=true" | grep $INFLUXDB_IMAGE | awk '{print $3}')) +if [[ -n $dangling_images ]]; then + echo "Removing $INFLUXDB_IMAGE: dangling images and their containers" + docker images | head -1 && docker images | grep $dangling_images + for image_id in "${dangling_images[@]}"; do + echo "InfluxDB: Removing dangling image $image_id" + docker rmi -f $image_id >${redirect} + done +fi + +for image_id in "${dangling_images[@]}"; do + if [[ -n $(docker ps -a | grep $image_id) ]]; then + echo "InfluxDB: Removing containers associated with dangling image: $image_id" + docker ps -a | head -1 && docker ps -a | grep $image_id + docker ps -a | grep $image_id | awk '{print $1}'| xargs docker rm -f >${redirect} + fi +done + +if [[ -n $(docker ps -a | grep $INFLUXDB_IMAGE) ]]; then + echo "Removing existing $INFLUXDB_IMAGE containers" + docker ps -a | grep $INFLUXDB_IMAGE | awk '{print $1}' | xargs docker rm -f >$redirect +fi + +if [[ -n $(docker images | grep $INFLUXDB_IMAGE) ]]; then + echo "InfluxDB: docker images to remove:" + docker images | head -1 && docker images | grep $INFLUXDB_IMAGE + image_tags=($(docker images | grep $INFLUXDB_IMAGE | awk '{print $2}')) + for tag in "${image_tags[@]}"; do + echo "Removing docker image $INFLUXDB_IMAGE:$tag..." + docker rmi $INFLUXDB_IMAGE:$tag >$redirect + done +fi \ No newline at end of file diff --git a/jjb/bottlenecks/bottlenecks-project-jobs.yml b/jjb/bottlenecks/bottlenecks-project-jobs.yml index 12ea31b13..a0abb9331 100644 --- a/jjb/bottlenecks/bottlenecks-project-jobs.yml +++ b/jjb/bottlenecks/bottlenecks-project-jobs.yml @@ -29,7 +29,8 @@ suite: - 'rubbos' - 'vstf' - - 'posca' + - 'posca_stress_traffic' + - 'posca_stress_ping' ################################ # job templates diff --git a/jjb/bottlenecks/bottlenecks-run-suite.sh b/jjb/bottlenecks/bottlenecks-run-suite.sh new file mode 100644 index 000000000..f69463fc2 --- /dev/null +++ b/jjb/bottlenecks/bottlenecks-run-suite.sh @@ -0,0 +1,65 @@ +#!/bin/bash +#set -e +[[ $GERRIT_REFSPEC_DEBUG == true ]] && redirect="/dev/stdout" || redirect="/dev/null" +BOTTLENECKS_IMAGE=opnfv/bottlenecks + +if [[ $SUITE_NAME == rubbos || $SUITE_NAME == vstf ]]; then + echo "Bottlenecks: to pull image $BOTTLENECKS_IMAGE:${DOCKER_TAG}" + docker pull $BOTTLENECKS_IMAGE:$DOCKER_TAG >${redirect} + + echo "Bottlenecks: docker start running" + opts="--privileged=true -id" + envs="-e INSTALLER_TYPE=${INSTALLER_TYPE} -e INSTALLER_IP=${INSTALLER_IP} \ + -e NODE_NAME=${NODE_NAME} -e EXTERNAL_NET=${EXTERNAL_NETWORK} \ + -e BOTTLENECKS_BRANCH=${BOTTLENECKS_BRANCH} -e GERRIT_REFSPEC_DEBUG=${GERRIT_REFSPEC_DEBUG} \ + -e BOTTLENECKS_DB_TARGET=${BOTTLENECKS_DB_TARGET} -e PACKAGE_URL=${PACKAGE_URL}" + cmd="sudo docker run ${opts} ${envs} $BOTTLENECKS_IMAGE:${DOCKER_TAG} /bin/bash" + echo "Bottlenecks: docker cmd running ${cmd}" + ${cmd} >${redirect} + + echo "Bottlenecks: obtain docker id" + container_id=$(docker ps | grep "$BOTTLENECKS_IMAGE:${DOCKER_TAG}" | awk '{print $1}' | head -1) + if [ -z ${container_id} ]; then + echo "Cannot find $BOTTLENECKS_IMAGE container ID ${container_id}. Please check if it exists." + docker ps -a + exit 1 + fi + + echo "Bottlenecks: to prepare openstack environment" + prepare_env="${REPO_DIR}/ci/prepare_env.sh" + echo "Bottlenecks: docker cmd running: ${prepare_env}" + sudo docker exec ${container_id} ${prepare_env} + + echo "Bottlenecks: to run testsuite ${SUITE_NAME}" + run_testsuite="${REPO_DIR}/run_tests.sh -s ${SUITE_NAME}" + echo "Bottlenecks: docker cmd running: ${run_testsuite}" + sudo docker exec ${container_id} ${run_testsuite} +else + echo "Bottlenecks: installing POSCA docker-compose" + if [ -d usr/local/bin/docker-compose ]; then + rm -rf usr/local/bin/docker-compose + fi + curl -L https://github.com/docker/compose/releases/download/1.11.0/docker-compose-`uname -s`-`uname -m` > /usr/local/bin/docker-compose + chmod +x /usr/local/bin/docker-compose + + echo "Bottlenecks: composing up dockers" + cd $WORKSPACE + docker-compose -f $WORKSPACE/docker/bottleneck-compose/docker-compose.yml up -d + + echo "Bottlenecks: running traffic stress/factor testing in posca testsuite " + POSCA_SCRIPT=/home/opnfv/bottlenecks/testsuites/posca + if [[ $SUITE_NAME == posca_stress_traffic ]]; then + TEST_CASE=posca_factor_system_bandwidth + echo "Bottlenecks: pulling tutum/influxdb for yardstick" + docker pull tutum/influxdb:0.13 + sleep 5 + docker exec bottleneckcompose_bottlenecks_1 python ${POSCA_SCRIPT}/run_posca.py testcase $TEST_CASE + elif [[ $SUITE_NAME == posca_stress_ping ]]; then + TEST_CASE=posca_stress_ping + sleep 5 + docker exec bottleneckcompose_bottlenecks_1 python ${POSCA_SCRIPT}/run_posca.py testcase $TEST_CASE + fi + + echo "Bottlenecks: cleaning up docker-compose images and dockers" + docker-compose -f $WORKSPACE/docker/bottleneck-compose/docker-compose.yml down --rmi all +fi \ No newline at end of file diff --git a/jjb/compass4nfv/compass-ci-jobs.yml b/jjb/compass4nfv/compass-ci-jobs.yml index 7258e89f4..237f8944d 100644 --- a/jjb/compass4nfv/compass-ci-jobs.yml +++ b/jjb/compass4nfv/compass-ci-jobs.yml @@ -15,7 +15,7 @@ stream: danube branch: 'stable/{stream}' gs-pathname: '/{stream}' - disabled: true + disabled: false #-------------------------------- # POD, INSTALLER, AND BRANCH MAPPING #-------------------------------- @@ -32,11 +32,11 @@ <<: *master - baremetal: slave-label: compass-baremetal - os-version: 'trusty' + os-version: 'xenial' <<: *danube - virtual: slave-label: compass-virtual - os-version: 'trusty' + os-version: 'xenial' <<: *danube #-------------------------------- # master @@ -71,6 +71,10 @@ - 'os-nosdn-kvm-ha': disabled: false auto-trigger-name: 'compass-{scenario}-{pod}-{stream}-trigger' + - 'os-nosdn-openo-ha': + disabled: false + auto-trigger-name: 'compass-{scenario}-{pod}-{stream}-trigger' + jobs: - 'compass-{scenario}-{pod}-daily-{stream}' @@ -246,6 +250,10 @@ name: 'compass-os-nosdn-nofeature-ha-baremetal-centos-master-trigger' triggers: - timed: '0 19 * * *' +- trigger: + name: 'compass-os-nosdn-openo-ha-baremetal-centos-master-trigger' + triggers: + - timed: '' - trigger: name: 'compass-os-odl_l2-nofeature-ha-baremetal-centos-master-trigger' triggers: @@ -279,6 +287,10 @@ name: 'compass-os-nosdn-nofeature-ha-baremetal-master-trigger' triggers: - timed: '0 2 * * *' +- trigger: + name: 'compass-os-nosdn-openo-ha-baremetal-master-trigger' + triggers: + - timed: '0 3 * * *' - trigger: name: 'compass-os-odl_l2-nofeature-ha-baremetal-master-trigger' triggers: @@ -311,23 +323,27 @@ - trigger: name: 'compass-os-nosdn-nofeature-ha-baremetal-danube-trigger' triggers: - - timed: '' + - timed: '0 9 * * *' +- trigger: + name: 'compass-os-nosdn-openo-ha-baremetal-danube-trigger' + triggers: + - timed: '0 13 * * *' - trigger: name: 'compass-os-odl_l2-nofeature-ha-baremetal-danube-trigger' triggers: - - timed: '' + - timed: '0 17 * * *' - trigger: name: 'compass-os-odl_l3-nofeature-ha-baremetal-danube-trigger' triggers: - - timed: '' + - timed: '0 21 * * *' - trigger: name: 'compass-os-onos-nofeature-ha-baremetal-danube-trigger' triggers: - - timed: '' + - timed: '0 1 * * *' - trigger: name: 'compass-os-ocl-nofeature-ha-baremetal-danube-trigger' triggers: - - timed: '' + - timed: '0 5 * * *' - trigger: name: 'compass-os-onos-sfc-ha-baremetal-danube-trigger' triggers: @@ -345,6 +361,10 @@ name: 'compass-os-nosdn-nofeature-ha-virtual-master-trigger' triggers: - timed: '0 21 * * *' +- trigger: + name: 'compass-os-nosdn-openo-ha-virtual-master-trigger' + triggers: + - timed: '0 22 * * *' - trigger: name: 'compass-os-odl_l2-nofeature-ha-virtual-master-trigger' triggers: @@ -378,6 +398,10 @@ name: 'compass-os-nosdn-nofeature-ha-virtual-danube-trigger' triggers: - timed: '0 21 * * *' +- trigger: + name: 'compass-os-nosdn-openo-ha-virtual-danube-trigger' + triggers: + - timed: '0 22 * * *' - trigger: name: 'compass-os-odl_l2-nofeature-ha-virtual-danube-trigger' triggers: diff --git a/jjb/compass4nfv/compass-deploy.sh b/jjb/compass4nfv/compass-deploy.sh index 6696e4b3d..534e17e62 100644 --- a/jjb/compass4nfv/compass-deploy.sh +++ b/jjb/compass4nfv/compass-deploy.sh @@ -34,6 +34,8 @@ if [[ "${DEPLOY_SCENARIO}" =~ "-ocl" ]]; then export NETWORK_CONF_FILE=network_ocl.yml elif [[ "${DEPLOY_SCENARIO}" =~ "-onos" ]]; then export NETWORK_CONF_FILE=network_onos.yml +elif [[ "${DEPLOY_SCENARIO}" =~ "-openo" ]]; then + export NETWORK_CONF_FILE=network_openo.yml else export NETWORK_CONF_FILE=network.yml fi diff --git a/jjb/compass4nfv/compass-project-jobs.yml b/jjb/compass4nfv/compass-project-jobs.yml index ed0fee6c0..9b13e693a 100644 --- a/jjb/compass4nfv/compass-project-jobs.yml +++ b/jjb/compass4nfv/compass-project-jobs.yml @@ -16,7 +16,7 @@ - danube: branch: 'stable/{stream}' gs-pathname: '/{stream}' - disabled: true + disabled: false jobs: - 'compass-build-iso-{stream}' diff --git a/jjb/compass4nfv/compass-verify-jobs.yml b/jjb/compass4nfv/compass-verify-jobs.yml index d58138088..e625c686a 100644 --- a/jjb/compass4nfv/compass-verify-jobs.yml +++ b/jjb/compass4nfv/compass-verify-jobs.yml @@ -37,6 +37,7 @@ ##################################### jobs: - 'compass-verify-{distro}-{stream}' + - 'compass-verify-k8-{distro}-{stream}' - 'compass-verify-{phase}-{distro}-{stream}' ##################################### # job templates @@ -95,6 +96,7 @@ file-paths: - compare-type: ANT pattern: '**/*' + disable-strict-forbidden-file-verification: 'true' forbidden-file-paths: - compare-type: ANT pattern: 'docs/**' @@ -125,6 +127,11 @@ node-parameters: true kill-phase-on: FAILURE abort-all-job: true + - name: 'opnfv-yamllint-verify-{stream}' + current-parameters: true + node-parameters: true + kill-phase-on: FAILURE + abort-all-job: true - multijob: name: deploy-virtual condition: SUCCESSFUL @@ -155,6 +162,106 @@ kill-phase-on: NEVER abort-all-job: true +- job-template: + name: 'compass-verify-k8-{distro}-{stream}' + + project-type: multijob + + disabled: '{obj:disabled}' + + concurrent: true + + properties: + - logrotate-default + - throttle: + enabled: true + max-total: 4 + max-per-node: 1 + option: 'project' + - build-blocker: + use-build-blocker: true + blocking-jobs: + - 'compass-verify-[^-]*-[^-]*' + - 'compass-os-.*?-virtual-daily-.*?' + block-level: 'NODE' + + scm: + - git-scm-gerrit + + wrappers: + - ssh-agent-wrapper + - timeout: + timeout: 120 + fail: true + + triggers: + - gerrit: + server-name: 'gerrit.opnfv.org' + trigger-on: + - comment-added-contains-event: + comment-contains-value: 'check k8' + - comment-added-contains-event: + comment-contains-value: 'verify k8' + - comment-added-contains-event: + comment-contains-value: 'check kubernetes' + - comment-added-contains-event: + comment-contains-value: 'verify kubernetes' + projects: + - project-compare-type: 'ANT' + project-pattern: '{project}' + branches: + - branch-compare-type: 'ANT' + branch-pattern: '**/{branch}' + file-paths: + - compare-type: ANT + pattern: '**/*' + forbidden-file-paths: + - compare-type: ANT + pattern: 'docs/**' + readable-message: true + + parameters: + - project-parameter: + project: '{project}' + branch: '{branch}' + - 'compass-virtual-defaults' + - '{installer}-defaults' + - 'compass-verify-defaults': + installer: '{installer}' + gs-pathname: '{gs-pathname}' + - string: + name: DEPLOY_SCENARIO + default: 'k8-nosdn-nofeature-ha' + + builders: + - description-setter: + description: "Built on $NODE_NAME" + - multijob: + name: basic + condition: SUCCESSFUL + projects: + - name: 'opnfv-lint-verify-{stream}' + current-parameters: true + node-parameters: true + kill-phase-on: FAILURE + abort-all-job: true + - name: 'opnfv-yamllint-verify-{stream}' + current-parameters: true + node-parameters: true + kill-phase-on: FAILURE + abort-all-job: true + - multijob: + name: deploy-virtual + condition: SUCCESSFUL + projects: + - name: 'compass-verify-deploy-virtual-{distro}-{stream}' + current-parameters: true + predefined-parameters: | + COMPASS_OS_VERSION={os-version} + node-parameters: true + kill-phase-on: FAILURE + abort-all-job: true + - job-template: name: 'compass-verify-{phase}-{distro}-{stream}' diff --git a/jjb/conductor/conductor.yml b/jjb/conductor/conductor.yml index 1d47624e1..d2ce649fc 100644 --- a/jjb/conductor/conductor.yml +++ b/jjb/conductor/conductor.yml @@ -53,6 +53,7 @@ branches: - branch-compare-type: 'ANT' branch-pattern: '**/{branch}' + disable-strict-forbidden-file-verification: 'true' forbidden-file-paths: - compare-type: ANT pattern: 'docs/**|.gitignore' diff --git a/jjb/copper/copper.yml b/jjb/copper/copper.yml index ea1af473c..d06afe4c0 100644 --- a/jjb/copper/copper.yml +++ b/jjb/copper/copper.yml @@ -53,6 +53,7 @@ branches: - branch-compare-type: 'ANT' branch-pattern: '**/{branch}' + disable-strict-forbidden-file-verification: 'true' forbidden-file-paths: - compare-type: ANT pattern: 'docs/**|.gitignore' @@ -64,5 +65,4 @@ set -o nounset set -o pipefail - cd $WORKSPACE/ci - shellcheck -f tty tests/*.sh + # shellcheck -f tty tests/*.sh diff --git a/jjb/cperf/cperf-ci-jobs.yml b/jjb/cperf/cperf-ci-jobs.yml index 125937e80..d1914f6f1 100644 --- a/jjb/cperf/cperf-ci-jobs.yml +++ b/jjb/cperf/cperf-ci-jobs.yml @@ -113,13 +113,43 @@ builders: - shell: | #!/bin/bash - set +e - # TODO: need to figure out the logic to get ${CONTROLLER_IP} used below + set -o errexit + set -o nounset + set -o pipefail + undercloud_mac=$(sudo virsh domiflist undercloud | grep default | \ + grep -Eo "[0-9a-f]+:[0-9a-f]+:[0-9a-f]+:[0-9a-f]+:[0-9a-f]+:[0-9a-f]+") + INSTALLER_IP=$(/usr/sbin/arp -e | grep ${undercloud_mac} | awk {'print $1'}) + sudo scp $INSTALLER_IP:/home/stack/stackrc /tmp/stackrc + source /tmp/stackrc + + # robot suites need the ssh key to log in to controller nodes, so throwing it + # in tmp, and mounting /tmp as $HOME as far as robot is concerned + sudo mkdir -p /tmp/.ssh + sudo scp $INSTALLER_IP:/home/stack/.ssh/id_rsa /tmp/.ssh/ + sudo chmod -R 0600 /tmp/.ssh + + # cbench requires the openflow drop test feature to be installed. + sshpass -p karaf ssh -o StrictHostKeyChecking=no \ + -o UserKnownHostsFile=/dev/null \ + -o LogLevel=error \ + -p 8101 karaf@$SDN_CONTROLLER_IP \ + feature:install odl-openflowplugin-flow-services-ui odl-openflowplugin-drop-test + docker pull opnfv/cperf:$DOCKER_TAG - robot_cmd="pybot -e exclude -v ODL_SYSTEM_IP:${CONTROLLER_IP} -v switch_count:100 -v loops:10 \ - -v TOOLS_SYSTEM_IP:localhost -v duration_in_seconds:60" + + robot_cmd="pybot -e exclude -L TRACE \ + -v ODL_SYSTEM_1_IP:${SDN_CONTROLLER_IP} \ + -v ODL_SYSTEM_IP:${SDN_CONTROLLER_IP} \ + -v BUNDLEFOLDER:/opt/opendaylight \ + -v RESTCONFPORT:8081 \ + -v USER_HOME:/tmp \ + -v USER:heat-admin \ + -v ODL_SYSTEM_USER:heat-admin \ + -v TOOLS_SYSTEM_IP:localhost \ + -v of_port:6653" robot_suite="/home/opnfv/repos/odl_test/csit/suites/openflowplugin/Performance/010_Cbench.robot" - docker run opnfv/cperf:$DOCKER_TAG ${robot_cmd} ${robot_suite} + + docker run -v /tmp:/tmp opnfv/cperf:$DOCKER_TAG ${robot_cmd} ${robot_suite} - builder: name: cperf-cleanup diff --git a/jjb/daisy4nfv/daisy-daily-jobs.yml b/jjb/daisy4nfv/daisy-daily-jobs.yml new file mode 100644 index 000000000..ffae70f8f --- /dev/null +++ b/jjb/daisy4nfv/daisy-daily-jobs.yml @@ -0,0 +1,199 @@ +# jenkins job templates for Daisy +# TODO +# [ ] enable baremetal jobs after baremetal deployment finish +# [ ] enable jobs in danuble +# [ ] add more scenarios +# [ ] integration with yardstick + +- project: + + name: 'daisy' + project: '{name}' + installer: '{name}' + +#-------------------------------- +# BRANCH ANCHORS +#-------------------------------- + master: &master + stream: master + branch: '{stream}' + disabled: false + gs-pathname: '' +#-------------------------------- +# POD, INSTALLER, AND BRANCH MAPPING +#-------------------------------- +# CI PODs +#-------------------------------- + pod: + - baremetal: + slave-label: daisy-baremetal + <<: *master + - virtual: + slave-label: daisy-virtual + <<: *master +#-------------------------------- +# None-CI PODs +#-------------------------------- + +#-------------------------------- +# scenarios +#-------------------------------- + scenario: + # HA scenarios + - 'os-nosdn-nofeature-ha': + auto-trigger-name: 'daisy-{scenario}-{pod}-daily-{stream}-trigger' + # NOHA scenarios + - 'os-nosdn-nofeature-noha': + auto-trigger-name: 'daisy-{scenario}-{pod}-daily-{stream}-trigger' + + jobs: + - '{project}-{scenario}-{pod}-daily-{stream}' + - '{project}-deploy-{pod}-daily-{stream}' + +######################## +# job templates +######################## +- job-template: + name: '{project}-{scenario}-{pod}-daily-{stream}' + + disabled: '{obj:disabled}' + + concurrent: false + + properties: + - logrotate-default + - throttle: + enabled: true + max-total: 4 + max-per-node: 1 + option: 'project' + - build-blocker: + use-build-blocker: true + blocking-jobs: + - 'daisy.*-deploy-({pod})?-daily-.*' + block-level: 'NODE' + + wrappers: + - build-name: + name: '$BUILD_NUMBER - Scenario: $DEPLOY_SCENARIO' + + triggers: + - '{auto-trigger-name}' + + parameters: + - project-parameter: + project: '{project}' + branch: '{branch}' + - '{installer}-defaults' + - '{slave-label}-defaults': + installer: '{installer}' + - string: + name: DEPLOY_SCENARIO + default: '{scenario}' + - 'daisy-project-parameter': + gs-pathname: '{gs-pathname}' + + builders: + - description-setter: + description: "POD: $NODE_NAME" + - trigger-builds: + - project: 'daisy-deploy-{pod}-daily-{stream}' + current-parameters: false + predefined-parameters: + DEPLOY_SCENARIO={scenario} + same-node: true + block: true + - trigger-builds: + - project: 'functest-daisy-{pod}-daily-{stream}' + current-parameters: false + predefined-parameters: + DEPLOY_SCENARIO={scenario} + same-node: true + block: true + block-thresholds: + build-step-failure-threshold: 'never' + failure-threshold: 'never' + unstable-threshold: 'FAILURE' + +- job-template: + name: '{project}-deploy-{pod}-daily-{stream}' + + disabled: '{obj:disabled}' + + concurrent: true + + properties: + - logrotate-default + - throttle: + enabled: true + max-total: 4 + max-per-node: 1 + option: 'project' + - build-blocker: + use-build-blocker: true + blocking-jobs: + - 'daisy.*-deploy-({pod})?-daily-.*' + block-level: 'NODE' + + parameters: + - project-parameter: + project: '{project}' + branch: '{branch}' + - '{installer}-defaults' + - '{slave-label}-defaults': + installer: '{installer}' + - string: + name: DEPLOY_SCENARIO + default: 'os-nosdn-nofeature-ha' + - 'daisy-project-parameter': + gs-pathname: '{gs-pathname}' + - string: + name: DEPLOY_TIMEOUT + default: '150' + description: 'Deployment timeout in minutes' + + scm: + - git-scm + + wrappers: + - build-name: + name: '$BUILD_NUMBER - Scenario: $DEPLOY_SCENARIO' + + builders: + - description-setter: + description: "POD: $NODE_NAME" + - shell: + !include-raw-escape: ./daisy4nfv-download-artifact.sh + - shell: + !include-raw-escape: ./daisy-deploy.sh + + +######################## +# trigger macros +######################## +#----------------------------------------------- +# Triggers for job running on daisy-baremetal against master branch +#----------------------------------------------- +# HA Scenarios +- trigger: + name: 'daisy-os-nosdn-nofeature-ha-baremetal-daily-master-trigger' + triggers: + - timed: '' +# NOHA Scenarios +- trigger: + name: 'daisy-os-nosdn-nofeature-noha-baremetal-daily-master-trigger' + triggers: + - timed: '' +#----------------------------------------------- +# Triggers for job running on daisy-virtual against master branch +#----------------------------------------------- +- trigger: + name: 'daisy-os-nosdn-nofeature-ha-virtual-daily-master-trigger' + triggers: + - timed: '' +# NOHA Scenarios +- trigger: + name: 'daisy-os-nosdn-nofeature-noha-virtual-daily-master-trigger' + triggers: + - timed: 'H 8,22 * * *' + diff --git a/jjb/daisy4nfv/daisy-deploy.sh b/jjb/daisy4nfv/daisy-deploy.sh new file mode 100755 index 000000000..b512e3f60 --- /dev/null +++ b/jjb/daisy4nfv/daisy-deploy.sh @@ -0,0 +1,63 @@ +#!/bin/bash +set -o nounset +set -o pipefail + +echo "--------------------------------------------------------" +echo "This is $INSTALLER_TYPE deploy job!" +echo "--------------------------------------------------------" + +DEPLOY_SCENARIO=${DEPLOY_SCENARIO:-"os-nosdn-nofeature-ha"} +BRIDGE=${BRIDGE:-pxebr} +LAB_NAME=${NODE_NAME/-*} +POD_NAME=${NODE_NAME/*-} +deploy_ret=0 + +if [[ ! "$NODE_NAME" =~ "-virtual" ]] && [[ ! "$LAB_NAME" =~ (zte) ]]; then + echo "Unsupported lab $LAB_NAME for now, Cannot continue!" + exit $deploy_ret +fi + +# clone the securedlab repo +cd $WORKSPACE +BASE_DIR=$(cd ./;pwd) + +echo "Cloning securedlab repo $BRANCH" +git clone ssh://jenkins-zte@gerrit.opnfv.org:29418/securedlab --quiet \ + --branch $BRANCH + +# daisy ci/deploy/deploy.sh use $BASE_DIR/labs dir +cp -r securedlab/labs . + +DEPLOY_COMMAND="sudo ./ci/deploy/deploy.sh -b $BASE_DIR \ + -l $LAB_NAME -p $POD_NAME -B $BRIDGE" + +# log info to console +echo """ +Deployment parameters +-------------------------------------------------------- +Scenario: $DEPLOY_SCENARIO +LAB: $LAB_NAME +POD: $POD_NAME +BRIDGE: $BRIDGE +BASE_DIR: $BASE_DIR + +Starting the deployment using $INSTALLER_TYPE. This could take some time... +-------------------------------------------------------- +Issuing command +$DEPLOY_COMMAND +""" + +# start the deployment +$DEPLOY_COMMAND + +if [ $? -ne 0 ]; then + echo + echo "Depolyment failed!" + deploy_ret=1 +else + echo + echo "--------------------------------------------------------" + echo "Deployment done!" +fi + +exit $deploy_ret diff --git a/jjb/daisy4nfv/daisy-project-jobs.yml b/jjb/daisy4nfv/daisy-project-jobs.yml index 156740980..9a57e1753 100644 --- a/jjb/daisy4nfv/daisy-project-jobs.yml +++ b/jjb/daisy4nfv/daisy-project-jobs.yml @@ -23,7 +23,7 @@ - danube: branch: 'stable/{stream}' gs-pathname: '/{stream}' - disabled: true + disabled: false phase: - 'build': @@ -196,7 +196,7 @@ - shell: !include-raw: ./daisy4nfv-download-artifact.sh - shell: - !include-raw: ./daisy4nfv-deploy.sh + !include-raw: ./daisy-deploy.sh - builder: name: 'daisy-test-daily-macro' diff --git a/jjb/daisy4nfv/daisy4nfv-build.sh b/jjb/daisy4nfv/daisy4nfv-build.sh index eb29fed72..375d80733 100755 --- a/jjb/daisy4nfv/daisy4nfv-build.sh +++ b/jjb/daisy4nfv/daisy4nfv-build.sh @@ -26,6 +26,7 @@ cd $WORKSPACE echo "OPNFV_GIT_URL=$(git config --get remote.origin.url)" echo "OPNFV_GIT_SHA1=$(git rev-parse HEAD)" echo "OPNFV_ARTIFACT_URL=$GS_URL/opnfv-$OPNFV_ARTIFACT_VERSION.bin" + echo "OPNFV_ARTIFACT_SHA512SUM=$(sha512sum $OUTPUT_DIR/opnfv-$OPNFV_ARTIFACT_VERSION.bin | cut -d' ' -f1)" echo "OPNFV_BUILD_URL=$BUILD_URL" ) > $WORKSPACE/opnfv.properties diff --git a/jjb/daisy4nfv/daisy4nfv-deploy.sh b/jjb/daisy4nfv/daisy4nfv-deploy.sh deleted file mode 100755 index cc2c10388..000000000 --- a/jjb/daisy4nfv/daisy4nfv-deploy.sh +++ /dev/null @@ -1,3 +0,0 @@ -#!/bin/bash - -echo "Daisy deployment WIP" diff --git a/jjb/daisy4nfv/daisy4nfv-download-artifact.sh b/jjb/daisy4nfv/daisy4nfv-download-artifact.sh index 90b5fa62f..1cc0443ad 100755 --- a/jjb/daisy4nfv/daisy4nfv-download-artifact.sh +++ b/jjb/daisy4nfv/daisy4nfv-download-artifact.sh @@ -12,7 +12,7 @@ set -o errexit set -o pipefail # use proxy url to replace the nomral URL, for googleusercontent.com will be blocked randomly -[[ "$NODE_NAME" =~ (zte) ]] && GS_URL=$GS_BASE_PROXY +[[ "$NODE_NAME" =~ (zte) ]] && GS_URL=${GS_BASE_PROXY%%/*}/$GS_URL if [[ "$JOB_NAME" =~ "merge" ]]; then echo "Downloading http://$GS_URL/opnfv-gerrit-$GERRIT_CHANGE_NUMBER.properties" @@ -36,6 +36,25 @@ echo "Using $OPNFV_ARTIFACT for deployment" [[ "$NODE_NAME" =~ (zte) ]] && OPNFV_ARTIFACT_URL=${GS_BASE_PROXY%%/*}/$OPNFV_ARTIFACT_URL +if [[ ! "$JOB_NAME" =~ (verify|merge) ]]; then + # check if we already have the image to avoid redownload + BINSTORE="/bin_mount/opnfv_ci/${BRANCH##*/}" + if [[ -f "$BINSTORE/$OPNFV_ARTIFACT" && ! -z $OPNFV_ARTIFACT_SHA512SUM ]]; then + echo "BIN exists locally. Starting to check the sha512sum." + if [[ $OPNFV_ARTIFACT_SHA512SUM = $(sha512sum -b $BINSTORE/$OPNFV_ARTIFACT | cut -d' ' -f1) ]]; then + echo "Sha512sum is verified. Skipping the download and using the file from BIN store." + ln -s $BINSTORE/$OPNFV_ARTIFACT $WORKSPACE/opnfv.bin + echo "--------------------------------------------------------" + echo + ls -al $WORKSPACE/opnfv.bin + echo + echo "--------------------------------------------------------" + echo "Done!" + exit 0 + fi + fi +fi + # log info to console echo "Downloading the $INSTALLER_TYPE artifact using URL http://$OPNFV_ARTIFACT_URL" echo "This could take some time..." @@ -43,7 +62,7 @@ echo "--------------------------------------------------------" echo # download the file -curl -s -o $WORKSPACE/opnfv.bin http://$OPNFV_ARTIFACT_URL > gsutil.bin.log 2>&1 +curl -L -s -o $WORKSPACE/opnfv.bin http://$OPNFV_ARTIFACT_URL > gsutil.bin.log 2>&1 # list the file ls -al $WORKSPACE/opnfv.bin diff --git a/jjb/daisy4nfv/daisy4nfv-merge-jobs.yml b/jjb/daisy4nfv/daisy4nfv-merge-jobs.yml index a6659b2bf..11531f4a4 100644 --- a/jjb/daisy4nfv/daisy4nfv-merge-jobs.yml +++ b/jjb/daisy4nfv/daisy4nfv-merge-jobs.yml @@ -21,7 +21,7 @@ - danube: branch: 'stable/{stream}' gs-pathname: '/{stream}' - disabled: true + disabled: false ##################################### # patch merge phases ##################################### @@ -84,6 +84,7 @@ pattern: 'code/**' - compare-type: ANT pattern: 'deploy/**' + disable-strict-forbidden-file-verification: 'true' forbidden-file-paths: - compare-type: ANT pattern: 'docs/**' @@ -193,7 +194,7 @@ - shell: !include-raw: ./daisy4nfv-download-artifact.sh - shell: - !include-raw: ./daisy4nfv-virtual-deploy.sh + !include-raw: ./daisy-deploy.sh - shell: !include-raw: ./daisy4nfv-workspace-cleanup.sh diff --git a/jjb/daisy4nfv/daisy4nfv-verify-jobs.yml b/jjb/daisy4nfv/daisy4nfv-verify-jobs.yml index ee82c14b2..ee78ab59f 100644 --- a/jjb/daisy4nfv/daisy4nfv-verify-jobs.yml +++ b/jjb/daisy4nfv/daisy4nfv-verify-jobs.yml @@ -21,7 +21,7 @@ - danube: branch: 'stable/{stream}' gs-pathname: '/{stream}' - disabled: true + disabled: false ##################################### # patch verification phases ##################################### @@ -88,6 +88,7 @@ pattern: 'code/**' - compare-type: ANT pattern: 'deploy/**' + disable-strict-forbidden-file-verification: 'true' forbidden-file-paths: - compare-type: ANT pattern: 'docs/**' diff --git a/jjb/daisy4nfv/daisy4nfv-virtual-deploy.sh b/jjb/daisy4nfv/daisy4nfv-virtual-deploy.sh deleted file mode 100755 index ef4a07b8d..000000000 --- a/jjb/daisy4nfv/daisy4nfv-virtual-deploy.sh +++ /dev/null @@ -1,29 +0,0 @@ -#!/bin/bash - -echo "--------------------------------------------------------" -echo "This is diasy4nfv virtual deploy job!" -echo "--------------------------------------------------------" - -cd $WORKSPACE - -if [[ "$NODE_NAME" =~ "-virtual" ]]; then - export NETWORK_CONF=./deploy/config/vm_environment/$NODE_NAME/network.yml - export DHA_CONF=./deploy/config/vm_environment/$NODE_NAME/deploy.yml -else - # TODO: For the time being, we need to pass this script to let contributors merge their work. - echo "No support for non-virtual node" - exit 0 -fi - -sudo ./ci/deploy/deploy.sh -d ${DHA_CONF} -n ${NETWORK_CONF} -p ${NODE_NAME:-"zte-virtual1"} - -if [ $? -ne 0 ]; then - echo "depolyment failed!" - deploy_ret=1 -fi - -echo -echo "--------------------------------------------------------" -echo "Done!" - -exit $deploy_ret diff --git a/jjb/doctor/doctor.yml b/jjb/doctor/doctor.yml index 2333fca14..c677ef96e 100644 --- a/jjb/doctor/doctor.yml +++ b/jjb/doctor/doctor.yml @@ -22,9 +22,9 @@ - fuel: slave-label: 'ool-virtual2' pod: 'ool-virtual2' - - joid: - slave-label: 'ool-virtual3' - pod: 'ool-virtual3' + #- joid: + # slave-label: 'ool-virtual3' + # pod: 'ool-virtual3' inspector: - 'sample' @@ -145,11 +145,16 @@ builders: - 'clean-workspace-log' + - shell: | + # NOTE: Create symbolic link, so that we can archive file outside + # of $WORKSPACE . + # NOTE: We are printing all logs under 'tests/' during test run, + # so this symbolic link should not be in 'tests/'. Otherwise, + # we'll have the same log twice in jenkins console log. + ln -sfn $HOME/opnfv/functest/results/{stream} functest_results - 'functest-suite-builder' - shell: | functest_log="$HOME/opnfv/functest/results/{stream}/{project}.log" - to_be_archived="$WORKSPACE/tests/functest-{project}.log" - cp $functest_log $to_be_archived # NOTE: checking the test result, as the previous job could return # 0 regardless the result of doctor test scenario. grep -e ' OK$' $functest_log || exit 1 @@ -157,6 +162,8 @@ publishers: - archive: artifacts: 'tests/*.log' + - archive: + artifacts: 'functest_results/{project}.log' ##################################### diff --git a/jjb/domino/domino.yml b/jjb/domino/domino.yml index 5fd9db3f1..8c9be120b 100644 --- a/jjb/domino/domino.yml +++ b/jjb/domino/domino.yml @@ -49,6 +49,7 @@ branches: - branch-compare-type: 'ANT' branch-pattern: '**/{branch}' + disable-strict-forbidden-file-verification: 'true' forbidden-file-paths: - compare-type: ANT pattern: 'docs/**|.gitignore' diff --git a/jjb/dovetail/dovetail-ci-jobs.yml b/jjb/dovetail/dovetail-ci-jobs.yml index e2a334d40..0bd32a4ab 100644 --- a/jjb/dovetail/dovetail-ci-jobs.yml +++ b/jjb/dovetail/dovetail-ci-jobs.yml @@ -20,8 +20,8 @@ dovetail-branch: '{stream}' gs-pathname: '' docker-tag: 'latest' - colorado: &colorado - stream: colorado + danube: &danube + stream: danube branch: 'stable/{stream}' dovetail-branch: master gs-pathname: '/{stream}' @@ -54,12 +54,12 @@ slave-label: fuel-baremetal SUT: fuel auto-trigger-name: 'daily-trigger-disabled' - <<: *colorado + <<: *danube - virtual: slave-label: fuel-virtual SUT: fuel auto-trigger-name: 'daily-trigger-disabled' - <<: *colorado + <<: *danube #compass CI PODs - baremetal: slave-label: compass-baremetal @@ -75,33 +75,29 @@ slave-label: compass-baremetal SUT: compass auto-trigger-name: 'daily-trigger-disabled' - <<: *colorado + <<: *danube - virtual: slave-label: compass-virtual SUT: compass auto-trigger-name: 'daily-trigger-disabled' - <<: *colorado -#apex CI PODs - - apex-verify-master: - slave-label: '{pod}' - SUT: apex - auto-trigger-name: 'daily-trigger-disabled' - <<: *master - - apex-daily-master: + <<: *danube +#-------------------------------- +# Installers not using labels +# CI PODs +# This section should only contain the installers +# that have not been switched using labels for slaves +#-------------------------------- +#apex PODs + - lf-pod1: slave-label: '{pod}' SUT: apex auto-trigger-name: 'daily-trigger-disabled' <<: *master - - apex-verify-colorado: - slave-label: '{pod}' - SUT: apex - auto-trigger-name: 'daily-trigger-disabled' - <<: *colorado - - apex-daily-colorado: + - lf-pod1: slave-label: '{pod}' SUT: apex auto-trigger-name: 'daily-trigger-disabled' - <<: *colorado + <<: *danube #armband CI PODs - armband-baremetal: slave-label: armband-baremetal @@ -117,12 +113,12 @@ slave-label: armband-baremetal SUT: fuel auto-trigger-name: 'daily-trigger-disabled' - <<: *colorado + <<: *danube - armband-virtual: slave-label: armband-virtual SUT: fuel auto-trigger-name: 'daily-trigger-disabled' - <<: *colorado + <<: *danube #-------------------------------- # None-CI PODs #-------------------------------- diff --git a/jjb/dpacc/dpacc.yml b/jjb/dpacc/dpacc.yml index bc61d7447..63eb044ad 100644 --- a/jjb/dpacc/dpacc.yml +++ b/jjb/dpacc/dpacc.yml @@ -53,6 +53,7 @@ branches: - branch-compare-type: 'ANT' branch-pattern: '**/{branch}' + disable-strict-forbidden-file-verification: 'true' forbidden-file-paths: - compare-type: ANT pattern: 'docs/**|.gitignore' diff --git a/jjb/escalator/escalator.yml b/jjb/escalator/escalator.yml index 2265dafce..041a41f91 100644 --- a/jjb/escalator/escalator.yml +++ b/jjb/escalator/escalator.yml @@ -73,6 +73,7 @@ branches: - branch-compare-type: 'ANT' branch-pattern: '**/{branch}' + disable-strict-forbidden-file-verification: 'true' forbidden-file-paths: - compare-type: ANT pattern: 'docs/**|.gitignore' @@ -185,6 +186,7 @@ branches: - branch-compare-type: 'ANT' branch-pattern: '**/{branch}' + disable-strict-forbidden-file-verification: 'true' forbidden-file-paths: - compare-type: ANT pattern: 'docs/**|.gitignore' diff --git a/jjb/fuel/fuel-basic-exp.sh b/jjb/fuel/fuel-basic-exp.sh deleted file mode 100755 index a70a0c765..000000000 --- a/jjb/fuel/fuel-basic-exp.sh +++ /dev/null @@ -1,18 +0,0 @@ -#!/bin/bash -set -o nounset - -echo "-----------------------------------------------------------------------" -echo $GERRIT_CHANGE_COMMIT_MESSAGE -echo "-----------------------------------------------------------------------" - -# proposal for specifying the scenario name in commit message -# currently only 1 scenario name is supported but depending on -# the need, it can be expanded, supporting multiple scenarios -# using comma separated list or something -SCENARIO_NAME_PATTERN="(?<=@scenario:).*?(?=@)" -SCENARIO_NAME=(echo $GERRIT_CHANGE_COMMIT_MESSAGE | grep -oP "$SCENARIO_NAME_PATTERN") -if [[ $? -ne 0 ]]; then - echo "The patch verification will be done only with build!" -else - echo "Will run full verification; build, deploy, and smoke test using scenario $SCENARIO_NAME" -fi diff --git a/jjb/fuel/fuel-build-exp.sh b/jjb/fuel/fuel-build-exp.sh deleted file mode 100755 index f7f613dc0..000000000 --- a/jjb/fuel/fuel-build-exp.sh +++ /dev/null @@ -1,10 +0,0 @@ -#!/bin/bash - -if [[ "$JOB_NAME" =~ (verify|merge|daily|weekly) ]]; then - JOB_TYPE=${BASH_REMATCH[0]} -else - echo "Unable to determine job type!" - exit 1 -fi - -echo "Not activated!" diff --git a/jjb/fuel/fuel-daily-jobs.yml b/jjb/fuel/fuel-daily-jobs.yml index f78c4a317..36f3ce414 100644 --- a/jjb/fuel/fuel-daily-jobs.yml +++ b/jjb/fuel/fuel-daily-jobs.yml @@ -18,7 +18,7 @@ danube: &danube stream: danube branch: 'stable/{stream}' - disabled: true + disabled: false gs-pathname: '/{stream}' #-------------------------------- # POD, INSTALLER, AND BRANCH MAPPING @@ -106,6 +106,8 @@ auto-trigger-name: 'fuel-{scenario}-{pod}-daily-{stream}-trigger' - 'os-nosdn-kvm_ovs_dpdk-noha': auto-trigger-name: 'fuel-{scenario}-{pod}-daily-{stream}-trigger' + - 'os-nosdn-kvm_ovs_dpdk_bar-noha': + auto-trigger-name: 'fuel-{scenario}-{pod}-daily-{stream}-trigger' jobs: - 'fuel-{scenario}-{pod}-daily-{stream}' @@ -132,6 +134,7 @@ use-build-blocker: true blocking-jobs: - 'fuel-os-.*?-{pod}-daily-.*' + - 'fuel-os-.*?-{pod}-weekly-.*' block-level: 'NODE' wrappers: @@ -156,7 +159,7 @@ builders: - description-setter: - description: "POD: $NODE_NAME" + description: "Built on $NODE_NAME" - trigger-builds: - project: 'fuel-deploy-{pod}-daily-{stream}' current-parameters: false @@ -210,6 +213,8 @@ blocking-jobs: - 'fuel-deploy-{pod}-daily-.*' - 'fuel-deploy-generic-daily-.*' + - 'fuel-deploy-{pod}-weekly-.*' + - 'fuel-deploy-generic-weekly-.*' block-level: 'NODE' parameters: @@ -238,7 +243,7 @@ builders: - description-setter: - description: "POD: $NODE_NAME" + description: "Built on $NODE_NAME" - shell: !include-raw-escape: ./fuel-download-artifact.sh - shell: @@ -357,7 +362,11 @@ - trigger: name: 'fuel-os-nosdn-kvm_ovs_dpdk-noha-baremetal-daily-master-trigger' triggers: - - timed: '30 16 * * *' + - timed: '' +- trigger: + name: 'fuel-os-nosdn-kvm_ovs_dpdk_bar-noha-baremetal-daily-master-trigger' + triggers: + - timed: '' #----------------------------------------------- # Triggers for job running on fuel-baremetal against danube branch #----------------------------------------------- @@ -447,6 +456,10 @@ name: 'fuel-os-nosdn-kvm_ovs_dpdk-noha-baremetal-daily-danube-trigger' triggers: - timed: '' +- trigger: + name: 'fuel-os-nosdn-kvm_ovs_dpdk_bar-noha-baremetal-daily-danube-trigger' + triggers: + - timed: '' #----------------------------------------------- # Triggers for job running on fuel-virtual against master branch #----------------------------------------------- @@ -534,7 +547,11 @@ - trigger: name: 'fuel-os-nosdn-kvm_ovs_dpdk-noha-virtual-daily-master-trigger' triggers: - - timed: '' + - timed: '30 16 * * *' +- trigger: + name: 'fuel-os-nosdn-kvm_ovs_dpdk_bar-noha-virtual-daily-master-trigger' + triggers: + - timed: '30 20 * * *' #----------------------------------------------- # Triggers for job running on fuel-virtual against danube branch #----------------------------------------------- @@ -623,6 +640,10 @@ name: 'fuel-os-nosdn-kvm_ovs_dpdk-noha-virtual-daily-danube-trigger' triggers: - timed: '' +- trigger: + name: 'fuel-os-nosdn-kvm_ovs_dpdk_bar-noha-virtual-daily-danube-trigger' + triggers: + - timed: '' #----------------------------------------------- # ZTE POD1 Triggers running against master branch #----------------------------------------------- @@ -711,6 +732,10 @@ name: 'fuel-os-nosdn-kvm_ovs_dpdk-noha-zte-pod1-daily-master-trigger' triggers: - timed: '' +- trigger: + name: 'fuel-os-nosdn-kvm_ovs_dpdk_bar-noha-zte-pod1-daily-master-trigger' + triggers: + - timed: '' #----------------------------------------------- # ZTE POD2 Triggers running against master branch @@ -800,6 +825,10 @@ name: 'fuel-os-nosdn-kvm_ovs_dpdk-noha-zte-pod2-daily-master-trigger' triggers: - timed: '' +- trigger: + name: 'fuel-os-nosdn-kvm_ovs_dpdk_bar-noha-zte-pod2-daily-master-trigger' + triggers: + - timed: '' #----------------------------------------------- # ZTE POD3 Triggers running against master branch #----------------------------------------------- @@ -888,6 +917,10 @@ name: 'fuel-os-nosdn-kvm_ovs_dpdk-noha-zte-pod3-daily-master-trigger' triggers: - timed: '' +- trigger: + name: 'fuel-os-nosdn-kvm_ovs_dpdk_bar-noha-zte-pod3-daily-master-trigger' + triggers: + - timed: '' #----------------------------------------------- # ZTE POD1 Triggers running against danube branch #----------------------------------------------- @@ -976,6 +1009,10 @@ name: 'fuel-os-nosdn-kvm_ovs_dpdk-noha-zte-pod1-daily-danube-trigger' triggers: - timed: '' +- trigger: + name: 'fuel-os-nosdn-kvm_ovs_dpdk_bar-noha-zte-pod1-daily-danube-trigger' + triggers: + - timed: '' #----------------------------------------------- # ZTE POD2 Triggers running against danube branch @@ -1065,6 +1102,10 @@ name: 'fuel-os-nosdn-kvm_ovs_dpdk-noha-zte-pod2-daily-danube-trigger' triggers: - timed: '' +- trigger: + name: 'fuel-os-nosdn-kvm_ovs_dpdk_bar-noha-zte-pod2-daily-danube-trigger' + triggers: + - timed: '' #----------------------------------------------- # ZTE POD3 Triggers running against danube branch #----------------------------------------------- @@ -1153,3 +1194,7 @@ name: 'fuel-os-nosdn-kvm_ovs_dpdk-noha-zte-pod3-daily-danube-trigger' triggers: - timed: '' +- trigger: + name: 'fuel-os-nosdn-kvm_ovs_dpdk_bar-noha-zte-pod3-daily-danube-trigger' + triggers: + - timed: '' diff --git a/jjb/fuel/fuel-deploy-exp.sh b/jjb/fuel/fuel-deploy-exp.sh deleted file mode 100755 index f7f613dc0..000000000 --- a/jjb/fuel/fuel-deploy-exp.sh +++ /dev/null @@ -1,10 +0,0 @@ -#!/bin/bash - -if [[ "$JOB_NAME" =~ (verify|merge|daily|weekly) ]]; then - JOB_TYPE=${BASH_REMATCH[0]} -else - echo "Unable to determine job type!" - exit 1 -fi - -echo "Not activated!" diff --git a/jjb/fuel/fuel-deploy.sh b/jjb/fuel/fuel-deploy.sh index 4efccd611..f5bbd1818 100755 --- a/jjb/fuel/fuel-deploy.sh +++ b/jjb/fuel/fuel-deploy.sh @@ -95,7 +95,7 @@ echo "Deployment is done!" # upload logs for baremetal deployments # work with virtual deployments is still going on so we skip that for the timebeing -if [[ "$JOB_NAME" =~ "baremetal-daily" ]]; then +if [[ "$JOB_NAME" =~ (baremetal-daily|baremetal-weekly) ]]; then echo "Uploading deployment logs" gsutil cp $WORKSPACE/$FUEL_LOG_FILENAME gs://$GS_URL/logs/$FUEL_LOG_FILENAME > /dev/null 2>&1 echo "Logs are available as http://$GS_URL/logs/$FUEL_LOG_FILENAME" diff --git a/jjb/fuel/fuel-project-jobs.yml b/jjb/fuel/fuel-project-jobs.yml index 32ad8907e..1f0ddd363 100644 --- a/jjb/fuel/fuel-project-jobs.yml +++ b/jjb/fuel/fuel-project-jobs.yml @@ -16,7 +16,7 @@ - danube: branch: 'stable/{stream}' gs-pathname: '/{stream}' - disabled: true + disabled: false jobs: - 'fuel-build-daily-{stream}' @@ -125,6 +125,7 @@ pattern: 'build/**' - compare-type: ANT pattern: 'deploy/**' + disable-strict-forbidden-file-verification: 'true' forbidden-file-paths: - compare-type: ANT pattern: 'docs/**' @@ -193,6 +194,7 @@ pattern: 'build/**' - compare-type: ANT pattern: 'deploy/**' + disable-strict-forbidden-file-verification: 'true' forbidden-file-paths: - compare-type: ANT pattern: 'docs/**' diff --git a/jjb/fuel/fuel-smoke-test-exp.sh b/jjb/fuel/fuel-smoke-test-exp.sh deleted file mode 100755 index f7f613dc0..000000000 --- a/jjb/fuel/fuel-smoke-test-exp.sh +++ /dev/null @@ -1,10 +0,0 @@ -#!/bin/bash - -if [[ "$JOB_NAME" =~ (verify|merge|daily|weekly) ]]; then - JOB_TYPE=${BASH_REMATCH[0]} -else - echo "Unable to determine job type!" - exit 1 -fi - -echo "Not activated!" diff --git a/jjb/fuel/fuel-verify-jobs-experimental.yml b/jjb/fuel/fuel-verify-jobs-experimental.yml deleted file mode 100644 index ae6458021..000000000 --- a/jjb/fuel/fuel-verify-jobs-experimental.yml +++ /dev/null @@ -1,255 +0,0 @@ -- project: - # TODO: rename the project name - # TODO: get rid of appended -exp from the remainder of the file - name: 'fuel-verify-jobs-experimental' - - project: 'fuel' - - installer: 'fuel' -#------------------------------------ -# branch definitions -#------------------------------------ - # TODO: enable master once things settle - stream-exp: - - experimental: - branch: 'stable/{stream-exp}' - gs-pathname: '/{stream-exp}' - disabled: false -#------------------------------------ -# patch verification phases -#------------------------------------ - phase: - - 'basic': - # this phase does basic commit message check, unit test and so on - slave-label: 'opnfv-build' - - 'build': - # this phase builds artifacts if valid for given installer - slave-label: 'opnfv-build-ubuntu' - - 'deploy-virtual': - # this phase does virtual deployment using the artifacts produced in previous phase - slave-label: 'fuel-virtual' - - 'smoke-test': - # this phase runs functest smoke test - slave-label: 'fuel-virtual' -#------------------------------------ -# jobs -#------------------------------------ - jobs: - - 'fuel-verify-{stream-exp}' - - 'fuel-verify-{phase}-{stream-exp}' -#------------------------------------ -# job templates -#------------------------------------ -- job-template: - name: 'fuel-verify-{stream-exp}' - - project-type: multijob - - disabled: '{obj:disabled}' - - # TODO: this is valid for experimental only - # enable concurrency for master once things settle - concurrent: false - - properties: - - logrotate-default - - throttle: - enabled: true - max-total: 4 - option: 'project' - - scm: - - git-scm-gerrit - - wrappers: - - ssh-agent-wrapper - - timeout: - timeout: 360 - fail: true - - triggers: - - gerrit: - server-name: 'gerrit.opnfv.org' - trigger-on: - - patchset-created-event: - exclude-drafts: 'false' - exclude-trivial-rebase: 'false' - exclude-no-code-change: 'false' - - draft-published-event - - comment-added-contains-event: - comment-contains-value: 'recheck' - - comment-added-contains-event: - comment-contains-value: 'reverify' - projects: - - project-compare-type: 'ANT' - project-pattern: '{project}' - branches: - - branch-compare-type: 'ANT' - branch-pattern: '**/{branch}' - file-paths: - - compare-type: ANT - pattern: 'ci/**' - - compare-type: ANT - pattern: 'build/**' - - compare-type: ANT - pattern: 'deploy/**' - forbidden-file-paths: - - compare-type: ANT - pattern: 'docs/**' - readable-message: true - - parameters: - - project-parameter: - project: '{project}' - branch: '{branch}' - - 'opnfv-build-defaults' - - 'fuel-verify-defaults-exp': - gs-pathname: '{gs-pathname}' - - builders: - - description-setter: - description: "Built on $NODE_NAME" - - multijob: - name: basic - condition: SUCCESSFUL - projects: - - name: 'fuel-verify-basic-{stream-exp}' - current-parameters: false - predefined-parameters: | - BRANCH=$BRANCH - GERRIT_REFSPEC=$GERRIT_REFSPEC - GERRIT_CHANGE_NUMBER=$GERRIT_CHANGE_NUMBER - GERRIT_CHANGE_COMMIT_MESSAGE=$GERRIT_CHANGE_COMMIT_MESSAGE - node-parameters: false - kill-phase-on: FAILURE - abort-all-job: true - - multijob: - name: build - condition: SUCCESSFUL - projects: - - name: 'fuel-verify-build-{stream-exp}' - current-parameters: false - predefined-parameters: | - BRANCH=$BRANCH - GERRIT_REFSPEC=$GERRIT_REFSPEC - GERRIT_CHANGE_NUMBER=$GERRIT_CHANGE_NUMBER - GERRIT_CHANGE_COMMIT_MESSAGE=$GERRIT_CHANGE_COMMIT_MESSAGE - node-parameters: false - kill-phase-on: FAILURE - abort-all-job: true - - multijob: - name: deploy-virtual - condition: SUCCESSFUL - projects: - - name: 'fuel-verify-deploy-virtual-{stream-exp}' - current-parameters: false - predefined-parameters: | - BRANCH=$BRANCH - GERRIT_REFSPEC=$GERRIT_REFSPEC - GERRIT_CHANGE_NUMBER=$GERRIT_CHANGE_NUMBER - GERRIT_CHANGE_COMMIT_MESSAGE=$GERRIT_CHANGE_COMMIT_MESSAGE - node-parameters: false - kill-phase-on: FAILURE - abort-all-job: true - - multijob: - name: smoke-test - condition: SUCCESSFUL - projects: - - name: 'fuel-verify-smoke-test-{stream-exp}' - current-parameters: false - predefined-parameters: | - BRANCH=$BRANCH - GERRIT_REFSPEC=$GERRIT_REFSPEC - GERRIT_CHANGE_NUMBER=$GERRIT_CHANGE_NUMBER - GERRIT_CHANGE_COMMIT_MESSAGE=$GERRIT_CHANGE_COMMIT_MESSAGE - node-parameters: false - kill-phase-on: FAILURE - abort-all-job: true - -- job-template: - name: 'fuel-verify-{phase}-{stream-exp}' - - disabled: '{obj:disabled}' - - concurrent: true - - properties: - - logrotate-default - - throttle: - enabled: true - max-total: 6 - option: 'project' - - build-blocker: - use-build-blocker: true - blocking-jobs: - - 'fuel-verify-deploy-.*' - - 'fuel-verify-test-.*' - block-level: 'NODE' - - scm: - - git-scm-gerrit - - wrappers: - - ssh-agent-wrapper - - timeout: - timeout: 360 - fail: true - parameters: - - project-parameter: - project: '{project}' - branch: '{branch}' - - '{slave-label}-defaults' - - '{installer}-defaults' - - 'fuel-verify-defaults-exp': - gs-pathname: '{gs-pathname}' - - builders: - - description-setter: - description: "Built on $NODE_NAME" - - '{project}-verify-{phase}-macro-exp' -#------------------------------------ -# builder macros -#------------------------------------ -- builder: - name: 'fuel-verify-basic-macro-exp' - builders: - - shell: - !include-raw: ./fuel-basic-exp.sh - -- builder: - name: 'fuel-verify-build-macro-exp' - builders: - - shell: - !include-raw: ./fuel-build-exp.sh - - shell: - !include-raw: ./fuel-workspace-cleanup.sh - -- builder: - name: 'fuel-verify-deploy-virtual-macro-exp' - builders: - - shell: - !include-raw: ./fuel-deploy-exp.sh - -- builder: - name: 'fuel-verify-smoke-test-macro-exp' - builders: - - shell: - !include-raw: ./fuel-smoke-test-exp.sh -#------------------------------------ -# parameter macros -#------------------------------------ -- parameter: - name: 'fuel-verify-defaults-exp' - parameters: - - string: - name: BUILD_DIRECTORY - default: $WORKSPACE/build_output - description: "Directory where the build artifact will be located upon the completion of the build." - - string: - name: CACHE_DIRECTORY - default: $HOME/opnfv/cache/$INSTALLER_TYPE - description: "Directory where the cache to be used during the build is located." - - string: - name: GS_URL - default: artifacts.opnfv.org/$PROJECT{gs-pathname} - description: "URL to Google Storage." diff --git a/jjb/fuel/fuel-verify-jobs.yml b/jjb/fuel/fuel-verify-jobs.yml index 7f9eff04d..549f7dafa 100644 --- a/jjb/fuel/fuel-verify-jobs.yml +++ b/jjb/fuel/fuel-verify-jobs.yml @@ -15,7 +15,7 @@ - danube: branch: 'stable/{stream}' gs-pathname: '/{stream}' - disabled: true + disabled: false ##################################### # patch verification phases ##################################### @@ -88,6 +88,7 @@ pattern: 'build/**' - compare-type: ANT pattern: 'deploy/**' + disable-strict-forbidden-file-verification: 'true' forbidden-file-paths: - compare-type: ANT pattern: 'docs/**' diff --git a/jjb/fuel/fuel-weekly-jobs.yml b/jjb/fuel/fuel-weekly-jobs.yml new file mode 100644 index 000000000..bd42ed85c --- /dev/null +++ b/jjb/fuel/fuel-weekly-jobs.yml @@ -0,0 +1,210 @@ +# jenkins job templates for Fuel +- project: + + name: fuel-weekly + + project: fuel + + installer: fuel + +#-------------------------------- +# BRANCH ANCHORS +#-------------------------------- + master: &master + stream: master + branch: '{stream}' + disabled: false + gs-pathname: '' + danube: &danube + stream: danube + branch: 'stable/{stream}' + disabled: false + gs-pathname: '/{stream}' +#-------------------------------- +# POD, INSTALLER, AND BRANCH MAPPING +#-------------------------------- +# CI PODs +#-------------------------------- + pod: + - baremetal: + slave-label: fuel-baremetal + <<: *master + - virtual: + slave-label: fuel-virtual + <<: *master + - baremetal: + slave-label: fuel-baremetal + <<: *danube + - virtual: + slave-label: fuel-virtual + <<: *danube +#-------------------------------- +# scenarios +#-------------------------------- + scenario: + # HA scenarios + - 'os-nosdn-nofeature-ha': + auto-trigger-name: 'weekly-trigger-disabled' + + jobs: + - 'fuel-{scenario}-{pod}-weekly-{stream}' + - 'fuel-deploy-{pod}-weekly-{stream}' + +######################## +# job templates +######################## +- job-template: + name: 'fuel-{scenario}-{pod}-weekly-{stream}' + + disabled: '{obj:disabled}' + + concurrent: false + + properties: + - logrotate-default + - throttle: + enabled: true + max-total: 4 + max-per-node: 1 + option: 'project' + - build-blocker: + use-build-blocker: true + blocking-jobs: + - 'fuel-os-.*?-{pod}-daily-.*' + - 'fuel-os-.*?-{pod}-weekly-.*' + block-level: 'NODE' + + wrappers: + - build-name: + name: '$BUILD_NUMBER - Scenario: $DEPLOY_SCENARIO' + + triggers: + - '{auto-trigger-name}' + + parameters: + - project-parameter: + project: '{project}' + branch: '{branch}' + - '{installer}-defaults' + - '{slave-label}-defaults': + installer: '{installer}' + - string: + name: DEPLOY_SCENARIO + default: '{scenario}' + - fuel-weekly-parameter: + gs-pathname: '{gs-pathname}' + + builders: + - description-setter: + description: "Built on $NODE_NAME" + - trigger-builds: + - project: 'fuel-deploy-{pod}-weekly-{stream}' + current-parameters: false + predefined-parameters: + DEPLOY_SCENARIO={scenario} + same-node: true + block: true + - trigger-builds: + - project: 'functest-fuel-{pod}-weekly-{stream}' + current-parameters: false + predefined-parameters: + DEPLOY_SCENARIO={scenario} + same-node: true + block: true + block-thresholds: + build-step-failure-threshold: 'never' + failure-threshold: 'never' + unstable-threshold: 'FAILURE' + + publishers: + - email: + recipients: peter.barabas@ericsson.com fzhadaev@mirantis.com + +- job-template: + name: 'fuel-deploy-{pod}-weekly-{stream}' + + disabled: '{obj:disabled}' + + concurrent: true + + properties: + - logrotate-default + - throttle: + enabled: true + max-total: 4 + max-per-node: 1 + option: 'project' + - build-blocker: + use-build-blocker: true + blocking-jobs: + - 'fuel-deploy-{pod}-daily-.*' + - 'fuel-deploy-generic-daily-.*' + - 'fuel-deploy-{pod}-weekly-.*' + - 'fuel-deploy-generic-weekly-.*' + block-level: 'NODE' + + parameters: + - project-parameter: + project: '{project}' + branch: '{branch}' + - '{installer}-defaults' + - '{slave-label}-defaults': + installer: '{installer}' + - string: + name: DEPLOY_SCENARIO + default: 'os-odl_l2-nofeature-ha' + - fuel-weekly-parameter: + gs-pathname: '{gs-pathname}' + - string: + name: DEPLOY_TIMEOUT + default: '150' + description: 'Deployment timeout in minutes' + + scm: + - git-scm + + wrappers: + - build-name: + name: '$BUILD_NUMBER - Scenario: $DEPLOY_SCENARIO' + + builders: + - description-setter: + description: "Built on $NODE_NAME" + - shell: + !include-raw-escape: ./fuel-download-artifact.sh + - shell: + !include-raw-escape: ./fuel-deploy.sh + + publishers: + - email: + recipients: peter.barabas@ericsson.com fzhadaev@mirantis.com + +######################## +# parameter macros +######################## +- parameter: + name: fuel-weekly-parameter + parameters: + - string: + name: BUILD_DIRECTORY + default: $WORKSPACE/build_output + description: "Directory where the build artifact will be located upon the completion of the build." + - string: + name: CACHE_DIRECTORY + default: $HOME/opnfv/cache/$INSTALLER_TYPE + description: "Directory where the cache to be used during the build is located." + - string: + name: GS_URL + default: artifacts.opnfv.org/$PROJECT{gs-pathname} + description: "URL to Google Storage." +######################## +# trigger macros +######################## +#----------------------------------------------- +# Triggers for job running on fuel-baremetal against master branch +#----------------------------------------------- +# HA Scenarios +- trigger: + name: 'fuel-os-nosdn-nofeature-ha-baremetal-weekly-master-trigger' + triggers: + - timed: '' diff --git a/jjb/functest/functest-ci-jobs.yml b/jjb/functest/functest-daily-jobs.yml similarity index 90% rename from jjb/functest/functest-ci-jobs.yml rename to jjb/functest/functest-daily-jobs.yml index 49901bea2..a3268d3e5 100644 --- a/jjb/functest/functest-ci-jobs.yml +++ b/jjb/functest/functest-daily-jobs.yml @@ -2,9 +2,9 @@ # job configuration for functest ################################### - project: - name: functest + name: functest-daily - project: '{name}' + project: functest #-------------------------------- # BRANCH ANCHORS @@ -88,14 +88,14 @@ slave-label: '{pod}' installer: apex <<: *master -# - apex-verify-danube: -# slave-label: '{pod}' -# installer: apex -# <<: *danube -# - apex-daily-danube: -# slave-label: '{pod}' -# installer: apex -# <<: *danube + - apex-verify-danube: + slave-label: '{pod}' + installer: apex + <<: *danube + - apex-daily-danube: + slave-label: '{pod}' + installer: apex + <<: *danube # armband CI PODs - armband-baremetal: slave-label: armband-baremetal @@ -113,6 +113,15 @@ slave-label: armband-virtual installer: fuel <<: *danube +# daisy CI PODs + - baremetal: + slave-label: daisy-baremetal + installer: daisy + <<: *master + - virtual: + slave-label: daisy-virtual + installer: daisy + <<: *master # netvirt 3rd party ci - virtual: slave-label: odl-netvirt-virtual @@ -149,6 +158,10 @@ slave-label: '{pod}' installer: fuel <<: *master + - arm-pod3-2: + slave-label: '{pod}' + installer: fuel + <<: *master - zte-pod1: slave-label: '{pod}' installer: fuel @@ -177,6 +190,10 @@ slave-label: '{pod}' installer: fuel <<: *danube + - arm-pod3-2: + slave-label: '{pod}' + installer: fuel + <<: *danube # PODs for verify jobs triggered by each patch upload - ool-virtual1: slave-label: '{pod}' @@ -189,8 +206,6 @@ job-timeout: 60 - 'daily': job-timeout: 180 - - 'weekly': - job-timeout: 400 jobs: - 'functest-{installer}-{pod}-{testsuite}-{stream}' @@ -243,7 +258,7 @@ builders: - description-setter: - description: "POD: $NODE_NAME" + description: "Built on $NODE_NAME" - 'functest-{testsuite}-builder' ######################## @@ -256,13 +271,6 @@ name: FUNCTEST_SUITE_NAME default: 'daily' description: "Daily suite name to run" -- parameter: - name: functest-weekly-parameter - parameters: - - string: - name: FUNCTEST_SUITE_NAME - default: 'weekly' - description: "Weekly suite name to run" - parameter: name: functest-suite-parameter parameters: @@ -275,6 +283,7 @@ - 'tempest_smoke_serial' - 'rally_sanity' - 'odl' + - 'odl_netvirt' - 'onos' - 'promise' - 'doctor' @@ -332,23 +341,12 @@ - 'functest-store-results' - 'functest-exit' -- builder: - name: functest-weekly-builder - builders: - - 'functest-cleanup' - - 'set-functest-env' - - 'functest-weekly' - - 'functest-store-results' - - 'functest-exit' - - builder: name: functest-suite-builder builders: - 'functest-cleanup' - 'set-functest-env' - 'functest-suite' - - 'functest-store-results' - - 'functest-exit' - builder: name: functest-daily @@ -356,11 +354,6 @@ - shell: !include-raw: ./functest-loop.sh -- builder: - name: functest-weekly - builders: - - shell: - !include-raw: ./functest-loop.sh - builder: name: functest-suite diff --git a/jjb/functest/functest-project-jobs.yml b/jjb/functest/functest-project-jobs.yml index 42c19a777..14ad73a91 100644 --- a/jjb/functest/functest-project-jobs.yml +++ b/jjb/functest/functest-project-jobs.yml @@ -53,6 +53,7 @@ branches: - branch-compare-type: 'ANT' branch-pattern: '**/{branch}' + disable-strict-forbidden-file-verification: 'true' forbidden-file-paths: - compare-type: ANT pattern: 'docs/**|.gitignore' diff --git a/jjb/functest/functest-suite.sh b/jjb/functest/functest-suite.sh index f28d3d037..228cc3da4 100755 --- a/jjb/functest/functest-suite.sh +++ b/jjb/functest/functest-suite.sh @@ -1,19 +1,18 @@ #!/bin/bash -set -e -echo "Functest: run $FUNCTEST_SUITE_NAME on branch $BRANCH" -if [[ "$BRANCH" =~ 'brahmaputra' ]]; then - cmd="${FUNCTEST_REPO_DIR}/docker/run_tests.sh --test $FUNCTEST_SUITE_NAME" -elif [[ "$BRANCH" =~ 'colorado' ]]; then - cmd="python ${FUNCTEST_REPO_DIR}/ci/run_tests.py -t $FUNCTEST_SUITE_NAME" -else - cmd="functest testcase run $FUNCTEST_SUITE_NAME" -fi container_id=$(docker ps -a | grep opnfv/functest | awk '{print $1}' | head -1) -docker exec $container_id $cmd +if [ -z $container_id ]; then + echo "Functest container not found" + exit 1 +fi + +global_ret_val=0 -ret_value=$? -ret_val_file="${HOME}/opnfv/functest/results/${BRANCH##*/}/return_value" -echo ${ret_value}>${ret_val_file} +tests=($(echo $FUNCTEST_SUITE_NAME | tr "," "\n")) +for test in ${tests[@]}; do + cmd="python /home/opnfv/repos/functest/functest/ci/run_tests.py -t $test" + docker exec $container_id $cmd + let global_ret_val+=$? +done -exit 0 +exit $global_ret_val diff --git a/jjb/functest/functest-weekly-jobs.yml b/jjb/functest/functest-weekly-jobs.yml new file mode 100644 index 000000000..f44f7b8aa --- /dev/null +++ b/jjb/functest/functest-weekly-jobs.yml @@ -0,0 +1,124 @@ +################################### +# job configuration for functest +################################### +- project: + name: functest-weekly + + project: functest + +#-------------------------------- +# BRANCH ANCHORS +#-------------------------------- + master: &master + stream: master + branch: '{stream}' + gs-pathname: '' + docker-tag: 'latest' + disabled: false + danube: &danube + stream: danube + branch: 'stable/{stream}' + gs-pathname: '/{stream}' + docker-tag: 'stable' + disabled: true +#-------------------------------- +# POD, INSTALLER, AND BRANCH MAPPING +#-------------------------------- +# Installers using labels +# CI PODs +# This section should only contain the installers +# that have been switched using labels for slaves +#-------------------------------- + pod: +# fuel CI PODs + - baremetal: + slave-label: fuel-baremetal + installer: fuel + <<: *master + - virtual: + slave-label: fuel-virtual + installer: fuel + <<: *master + - baremetal: + slave-label: fuel-baremetal + installer: fuel + <<: *danube + - virtual: + slave-label: fuel-virtual + installer: fuel + <<: *danube +#-------------------------------- + jobs: + - 'functest-{installer}-{pod}-weekly-{stream}' + +################################ +# job template +################################ +- job-template: + name: 'functest-{installer}-{pod}-weekly-{stream}' + + disabled: '{obj:disabled}' + + concurrent: true + + properties: + - logrotate-default + - throttle: + enabled: true + max-per-node: 1 + option: 'project' + + wrappers: + - build-name: + name: '$BUILD_NUMBER Suite: $FUNCTEST_SUITE_NAME Scenario: $DEPLOY_SCENARIO' + - timeout: + timeout: '400' + abort: true + + parameters: + - project-parameter: + project: '{project}' + branch: '{branch}' + - '{installer}-defaults' + - '{slave-label}-defaults' + - string: + name: FUNCTEST_SUITE_NAME + default: 'weekly' + description: "Weekly suite name to run" + - string: + name: DEPLOY_SCENARIO + default: 'os-odl_l2-nofeature-ha' + - string: + name: DOCKER_TAG + default: '{docker-tag}' + description: 'Tag to pull docker image' + - string: + name: CLEAN_DOCKER_IMAGES + default: 'false' + description: 'Remove downloaded docker images (opnfv/functest*:*)' + - functest-parameter: + gs-pathname: '{gs-pathname}' + + scm: + - git-scm + + builders: + - description-setter: + description: "Built on $NODE_NAME" + - 'functest-weekly-builder' +######################## +# builder macros +######################## +- builder: + name: functest-weekly-builder + builders: + - shell: + !include-raw: ./functest-cleanup.sh + - shell: + !include-raw: ./set-functest-env.sh + - shell: + !include-raw: ./functest-loop.sh + - shell: + !include-raw: ../../utils/push-test-logs.sh + - shell: + !include-raw: ./functest-exit.sh diff --git a/jjb/functest/set-functest-env.sh b/jjb/functest/set-functest-env.sh index abec480dc..05e3d5792 100755 --- a/jjb/functest/set-functest-env.sh +++ b/jjb/functest/set-functest-env.sh @@ -17,32 +17,34 @@ if [[ ${RC_FILE_PATH} != '' ]] && [[ -f ${RC_FILE_PATH} ]] ; then echo "Credentials file detected: ${RC_FILE_PATH}" # volume if credentials file path is given to Functest rc_file_vol="-v ${RC_FILE_PATH}:/home/opnfv/functest/conf/openstack.creds" + RC_FLAG=1 fi if [[ ${INSTALLER_TYPE} == 'apex' ]]; then ssh_options="-o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no" - if sudo virsh list | grep instack; then - instack_mac=$(sudo virsh domiflist instack | grep default | \ - grep -Eo "[0-9a-f]+:[0-9a-f]+:[0-9a-f]+:[0-9a-f]+:[0-9a-f]+:[0-9a-f]+") - elif sudo virsh list | grep undercloud; then - instack_mac=$(sudo virsh domiflist undercloud | grep default | \ + if sudo virsh list | grep undercloud; then + echo "Installer VM detected" + undercloud_mac=$(sudo virsh domiflist undercloud | grep default | \ grep -Eo "[0-9a-f]+:[0-9a-f]+:[0-9a-f]+:[0-9a-f]+:[0-9a-f]+:[0-9a-f]+") + INSTALLER_IP=$(/usr/sbin/arp -e | grep ${undercloud_mac} | awk {'print $1'}) + sshkey_vol="-v /root/.ssh/id_rsa:/root/.ssh/id_rsa" + sudo scp $ssh_options root@${INSTALLER_IP}:/home/stack/stackrc ${HOME}/stackrc + stackrc_vol="-v ${HOME}/stackrc:/home/opnfv/functest/conf/stackrc" + + if sudo iptables -C FORWARD -o virbr0 -j REJECT --reject-with icmp-port-unreachable 2> ${redirect}; then + sudo iptables -D FORWARD -o virbr0 -j REJECT --reject-with icmp-port-unreachable + fi + if sudo iptables -C FORWARD -i virbr0 -j REJECT --reject-with icmp-port-unreachable 2> ${redirect}; then + sudo iptables -D FORWARD -i virbr0 -j REJECT --reject-with icmp-port-unreachable + fi + elif [[ "$RC_FLAG" == 1 ]]; then + echo "No available installer VM, but credentials provided...continuing" else - echo "No available installer VM exists...exiting" + echo "No available installer VM exists and no credentials provided...exiting" exit 1 fi - INSTALLER_IP=$(/usr/sbin/arp -e | grep ${instack_mac} | awk {'print $1'}) - sshkey_vol="-v /root/.ssh/id_rsa:/root/.ssh/id_rsa" - sudo scp $ssh_options root@${INSTALLER_IP}:/home/stack/stackrc ${HOME}/stackrc - stackrc_vol="-v ${HOME}/stackrc:/home/opnfv/functest/conf/stackrc" - if sudo iptables -C FORWARD -o virbr0 -j REJECT --reject-with icmp-port-unreachable 2> ${redirect}; then - sudo iptables -D FORWARD -o virbr0 -j REJECT --reject-with icmp-port-unreachable - fi - if sudo iptables -C FORWARD -i virbr0 -j REJECT --reject-with icmp-port-unreachable 2> ${redirect}; then - sudo iptables -D FORWARD -i virbr0 -j REJECT --reject-with icmp-port-unreachable - fi fi diff --git a/jjb/global/releng-macros.yml b/jjb/global/releng-macros.yml index 9b09e315f..63613f88d 100644 --- a/jjb/global/releng-macros.yml +++ b/jjb/global/releng-macros.yml @@ -61,7 +61,21 @@ choosing-strategy: 'gerrit' refspec: '$GERRIT_REFSPEC' <<: *git-scm-defaults - +- scm: + name: git-scm-with-submodules + scm: + - git: + credentials-id: '$SSH_CREDENTIAL_ID' + url: '$GIT_BASE' + refspec: '' + branches: + - 'refs/heads/{branch}' + skip-tag: true + wipe-workspace: true + submodule: + recursive: true + timeout: 20 + shallow-clone: true - trigger: name: 'daily-trigger-disabled' triggers: @@ -72,7 +86,6 @@ triggers: - timed: '' -# NOTE: unused macro, but we may use this for some jobs. - trigger: name: gerrit-trigger-patchset-created triggers: @@ -86,12 +99,22 @@ - draft-published-event - comment-added-contains-event: comment-contains-value: 'recheck' + - comment-added-contains-event: + comment-contains-value: 'reverify' projects: - project-compare-type: 'ANT' project-pattern: '{project}' branches: - branch-compare-type: 'ANT' branch-pattern: '**/{branch}' + file-paths: + - compare-type: 'ANT' + pattern: '{files}' + skip-vote: + successful: false + failed: false + unstable: false + notbuilt: false - trigger: name: gerrit-trigger-change-merged @@ -108,6 +131,9 @@ branches: - branch-compare-type: 'ANT' branch-pattern: '**/{branch}' + file-paths: + - compare-type: 'ANT' + pattern: '{files}' - trigger: name: 'experimental' @@ -426,7 +452,7 @@ name: clean-workspace-log builders: - shell: | - find $WORKSPACE -type f -print -name '*.log' | xargs rm -f + find $WORKSPACE -type f -name '*.log' | xargs rm -f - publisher: name: archive-artifacts @@ -436,3 +462,23 @@ allow-empty: true fingerprint: true latest-only: true + +- publisher: + name: publish-coverage + publishers: + - cobertura: + report-file: "coverage.xml" + only-stable: "true" + health-auto-update: "false" + stability-auto-update: "false" + zoom-coverage-chart: "true" + targets: + - files: + healthy: 10 + unhealthy: 20 + failing: 30 + - method: + healthy: 50 + unhealthy: 40 + failing: 30 + diff --git a/jjb/global/slave-params.yml b/jjb/global/slave-params.yml index 429828e8e..1905a098a 100644 --- a/jjb/global/slave-params.yml +++ b/jjb/global/slave-params.yml @@ -25,11 +25,11 @@ default-slaves: - lf-pod1 - parameter: - name: 'apex-daily-colorado-defaults' + name: 'apex-daily-danube-defaults' parameters: - label: name: SLAVE_LABEL - default: 'apex-daily-colorado' + default: 'apex-daily-danube' - string: name: GIT_BASE default: https://gerrit.opnfv.org/gerrit/$PROJECT @@ -71,11 +71,11 @@ - intel-virtual4 - intel-virtual5 - parameter: - name: 'apex-verify-colorado-defaults' + name: 'apex-verify-danube-defaults' parameters: - label: name: SLAVE_LABEL - default: 'apex-verify-colorado' + default: 'apex-verify-danube' - string: name: GIT_BASE default: https://gerrit.opnfv.org/gerrit/$PROJECT @@ -381,6 +381,20 @@ name: GIT_BASE default: https://gerrit.opnfv.org/gerrit/$PROJECT description: 'Git URL to use on this Jenkins Slave' +- parameter: + name: 'cengn-pod1-defaults' + parameters: + - node: + name: SLAVE_NAME + description: 'Slave name on Jenkins' + allowed-slaves: + - cengn-pod1 + default-slaves: + - cengn-pod1 + - string: + name: GIT_BASE + default: https://gerrit.opnfv.org/gerrit/$PROJECT + description: 'Git URL to use on this Jenkins Slave' - parameter: name: 'intel-pod1-defaults' parameters: @@ -732,6 +746,24 @@ name: LAB_CONFIG_URL default: ssh://jenkins-enea@gerrit.opnfv.org:29418/securedlab description: 'Base URI to the configuration directory' +- parameter: + name: 'arm-pod3-2-defaults' + parameters: + - node: + name: SLAVE_NAME + description: 'Slave name on Jenkins' + allowed-slaves: + - arm-pod3-2 + default-slaves: + - arm-pod3-2 + - string: + name: GIT_BASE + default: https://gerrit.opnfv.org/gerrit/$PROJECT + description: 'Git URL to use on this Jenkins Slave' + - string: + name: LAB_CONFIG_URL + default: ssh://jenkins-enea@gerrit.opnfv.org:29418/securedlab + description: 'Base URI to the configuration directory' - parameter: name: 'intel-virtual6-defaults' parameters: diff --git a/jjb/infra/bifrost-cleanup-job.yml b/jjb/infra/bifrost-cleanup-job.yml new file mode 100644 index 000000000..571e275da --- /dev/null +++ b/jjb/infra/bifrost-cleanup-job.yml @@ -0,0 +1,140 @@ +- project: + name: 'openstack-bifrost-cleanup' +#-------------------------------- +# branches +#-------------------------------- + stream: + - master: + branch: '{stream}' + +#-------------------------------- +# projects +#-------------------------------- + project: + - 'openstack': + project-repo: 'https://git.openstack.org/openstack/bifrost' + clone-location: '/opt/bifrost' + - 'opnfv': + project-repo: 'https://gerrit.opnfv.org/gerrit/releng' + clone-location: '/opt/releng' + +#-------------------------------- +# jobs +#-------------------------------- + jobs: + - '{project}-bifrost-cleanup-{stream}' + +- job-template: + name: '{project}-bifrost-cleanup-{stream}' + + concurrent: false + + node: bifrost-verify-virtual + + # Make sure no verify job is running on any of the slaves since that would + # produce build logs after we wipe the destination directory. + properties: + - build-blocker: + blocking-jobs: + - '{project}-bifrost-verify-*' + + parameters: + - string: + name: PROJECT + default: '{project}' + + builders: + - shell: | + #!/bin/bash + + set -eu + + # DO NOT change this unless you know what you are doing. + BIFROST_GS_URL="gs://artifacts.opnfv.org/cross-community-ci/openstack/bifrost/$GERRIT_NAME/$GERRIT_CHANGE_NUMBER/" + + # This should never happen... even 'recheck' uses the last jobs' + # gerrit information. Better exit with error so we can investigate + [[ ! -n $GERRIT_NAME ]] || [[ ! -n $GERRIT_CHANGE_NUMBER ]] && exit 1 + + echo "Removing build artifacts for $GERRIT_NAME/$GERRIT_CHANGE_NUMBER" + + if ! [[ "$BIFROST_GS_URL" =~ "/cross-community-ci/openstack/bifrost/" ]]; then + echo "Oops! BIFROST_GS_URL=$BIFROST_GS_URL does not seem like a valid" + echo "bifrost location on the Google storage server. Please double-check" + echo "that it's set properly or fix this line if necessary." + echo "gsutil will not be executed until this is fixed!" + exit 1 + fi + # No force (-f). We always verify upstream jobs so if there are no logs + # something else went wrong and we need to break immediately and investigate + gsutil -m rm -r $BIFROST_GS_URL + + triggers: + - '{project}-gerrit-trigger-cleanup': + branch: '{branch}' + + publishers: + - email: + recipients: fatih.degirmenci@ericsson.com yroblamo@redhat.com mchandras@suse.de jack.morgan@intel.com zhang.jun3g@zte.com.cn +#-------------------------------- +# trigger macros +#-------------------------------- +- trigger: + name: 'openstack-gerrit-trigger-cleanup' + triggers: + - gerrit: + server-name: 'review.openstack.org' + escape-quotes: true + trigger-on: + # We only run this when the change is merged or + # abandoned since we don't need the logs anymore + - patchset-uploaded-event: 'false' + - change-merged-event: 'true' + - change-abandoned-event: 'true' + - change-restored-event: 'false' + - draft-published-event: 'false' + # This is an OPNFV maintenance job. We don't want to provide + # feedback on Gerrit + silent: true + silent-start: true + projects: + - project-compare-type: 'PLAIN' + project-pattern: 'openstack/bifrost' + branches: + - branch-compare-type: 'ANT' + branch-pattern: '**/{branch}' + disable-strict-forbidden-file-verification: 'true' + forbidden-file-paths: + - compare-type: ANT + pattern: 'doc/**' + - compare-type: ANT + pattern: 'releasenotes/**' + disable-strict-forbidden-file-verification: 'true' + readable-message: true +- trigger: + name: 'opnfv-gerrit-trigger-cleanup' + triggers: + - gerrit: + server-name: 'gerrit.opnfv.org' + trigger-on: + # We only run this when the change is merged or + # abandoned since we don't need the logs anymore + - patchset-uploaded-event: 'false' + - change-merged-event: 'true' + - change-abandoned-event: 'true' + - change-restored-event: 'false' + - draft-published-event: 'false' + # This is an OPNFV maintenance job. We don't want to provide + # feedback on Gerrit + silent: true + silent-start: true + projects: + - project-compare-type: 'ANT' + project-pattern: 'releng' + branches: + - branch-compare-type: 'ANT' + branch-pattern: '**/{branch}' + file-paths: + - compare-type: ANT + pattern: 'prototypes/bifrost/**' + readable-message: true diff --git a/jjb/infra/bifrost-verify-jobs.yml b/jjb/infra/bifrost-verify-jobs.yml index c99023edf..33032bc7b 100644 --- a/jjb/infra/bifrost-verify-jobs.yml +++ b/jjb/infra/bifrost-verify-jobs.yml @@ -147,7 +147,7 @@ publishers: - email: - recipients: fatih.degirmenci@ericsson.com yroblamo@redhat.com mchandras@suse.de jack.morgan@intel.com zhang.jun3g@zte.com.cn + recipients: fatih.degirmenci@ericsson.com yroblamo@redhat.com mchandras@suse.de jack.morgan@intel.com julienjut@gmail.com #-------------------------------- # trigger macros #-------------------------------- @@ -172,11 +172,13 @@ branches: - branch-compare-type: 'ANT' branch-pattern: '**/{branch}' + disable-strict-forbidden-file-verification: 'true' forbidden-file-paths: - compare-type: ANT pattern: 'doc/**' - compare-type: ANT pattern: 'releasenotes/**' + disable-strict-forbidden-file-verification: 'true' readable-message: true - trigger: name: 'opnfv-gerrit-trigger' diff --git a/jjb/ipv6/ipv6.yml b/jjb/ipv6/ipv6.yml index a6745cd99..b0db7640a 100644 --- a/jjb/ipv6/ipv6.yml +++ b/jjb/ipv6/ipv6.yml @@ -53,6 +53,7 @@ branches: - branch-compare-type: 'ANT' branch-pattern: '**/{branch}' + disable-strict-forbidden-file-verification: 'true' forbidden-file-paths: - compare-type: ANT pattern: 'docs/**|.gitignore' diff --git a/jjb/joid/joid-daily-jobs.yml b/jjb/joid/joid-daily-jobs.yml index b28dd6025..97e290c8a 100644 --- a/jjb/joid/joid-daily-jobs.yml +++ b/jjb/joid/joid-daily-jobs.yml @@ -20,7 +20,7 @@ danube: &danube stream: danube branch: 'stable/{stream}' - disabled: true + disabled: false gs-pathname: '/{stream}' #-------------------------------- # POD, INSTALLER, AND BRANCH MAPPING @@ -46,6 +46,9 @@ - orange-pod1: slave-label: orange-pod1 <<: *master + - cengn-pod1: + slave-label: cengn-pod1 + <<: *master #-------------------------------- # scenarios #-------------------------------- @@ -154,6 +157,23 @@ build-step-failure-threshold: 'never' failure-threshold: 'never' unstable-threshold: 'FAILURE' + # 1.dovetail only master by now, not sync with A/B/C branches + # 2.here the stream means the SUT stream, dovetail stream is defined in its own job + # 3.only debug testsuite here(includes basic testcase, + # i.e. one tempest smoke ipv6, two vping from functest) + # 4.not used for release criteria or compliance, + # only to debug the dovetail tool bugs with joid + - trigger-builds: + - project: 'dovetail-joid-{pod}-debug-{stream}' + current-parameters: false + predefined-parameters: + DEPLOY_SCENARIO={scenario} + block: true + same-node: true + block-thresholds: + build-step-failure-threshold: 'never' + failure-threshold: 'never' + unstable-threshold: 'FAILURE' - job-template: name: 'joid-deploy-{pod}-daily-{stream}' @@ -232,6 +252,10 @@ name: 'joid-os-nosdn-nofeature-ha-orange-pod1-master-trigger' triggers: - timed: '' +- trigger: + name: 'joid-os-nosdn-nofeature-ha-cengn-pod1-master-trigger' + triggers: + - timed: '' # os-nosdn-nofeature-ha trigger - branch: danube - trigger: name: 'joid-os-nosdn-nofeature-ha-baremetal-danube-trigger' @@ -245,6 +269,10 @@ name: 'joid-os-nosdn-nofeature-ha-orange-pod1-danube-trigger' triggers: - timed: '' +- trigger: + name: 'joid-os-nosdn-nofeature-ha-cengn-pod1-danube-trigger' + triggers: + - timed: '' # os-odl_l2-nofeature-ha trigger - branch: master - trigger: name: 'joid-os-odl_l2-nofeature-ha-baremetal-master-trigger' @@ -258,6 +286,10 @@ name: 'joid-os-odl_l2-nofeature-ha-orange-pod1-master-trigger' triggers: - timed: '' +- trigger: + name: 'joid-os-odl_l2-nofeature-ha-cengn-pod1-master-trigger' + triggers: + - timed: '' # os-odl_l2-nofeature-ha trigger - branch: danube - trigger: name: 'joid-os-odl_l2-nofeature-ha-baremetal-danube-trigger' @@ -271,6 +303,10 @@ name: 'joid-os-odl_l2-nofeature-ha-orange-pod1-danube-trigger' triggers: - timed: '' +- trigger: + name: 'joid-os-odl_l2-nofeature-ha-cengn-pod1-danube-trigger' + triggers: + - timed: '' # os-onos-nofeature-ha trigger - branch: master - trigger: name: 'joid-os-onos-nofeature-ha-baremetal-master-trigger' @@ -284,6 +320,10 @@ name: 'joid-os-onos-nofeature-ha-orange-pod1-master-trigger' triggers: - timed: '' +- trigger: + name: 'joid-os-onos-nofeature-ha-cengn-pod1-master-trigger' + triggers: + - timed: '' # os-onos-nofeature-ha trigger - branch: danube - trigger: name: 'joid-os-onos-nofeature-ha-baremetal-danube-trigger' @@ -297,6 +337,10 @@ name: 'joid-os-onos-nofeature-ha-orange-pod1-danube-trigger' triggers: - timed: '' +- trigger: + name: 'joid-os-onos-nofeature-ha-cengn-pod1-danube-trigger' + triggers: + - timed: '' # os-onos-sfc-ha trigger - branch: master - trigger: name: 'joid-os-onos-sfc-ha-baremetal-master-trigger' @@ -310,6 +354,10 @@ name: 'joid-os-onos-sfc-ha-orange-pod1-master-trigger' triggers: - timed: '' +- trigger: + name: 'joid-os-onos-sfc-ha-cengn-pod1-master-trigger' + triggers: + - timed: '' # os-onos-sfc-ha trigger - branch: danube - trigger: name: 'joid-os-onos-sfc-ha-baremetal-danube-trigger' @@ -323,6 +371,10 @@ name: 'joid-os-onos-sfc-ha-orange-pod1-danube-trigger' triggers: - timed: '' +- trigger: + name: 'joid-os-onos-sfc-ha-cengn-pod1-danube-trigger' + triggers: + - timed: '' # os-nosdn-lxd-noha trigger - branch: master - trigger: name: 'joid-os-nosdn-lxd-noha-baremetal-master-trigger' @@ -336,6 +388,10 @@ name: 'joid-os-nosdn-lxd-noha-orange-pod1-master-trigger' triggers: - timed: '' +- trigger: + name: 'joid-os-nosdn-lxd-noha-cengn-pod1-master-trigger' + triggers: + - timed: '' # os-nosdn-lxd-noha trigger - branch: danube - trigger: name: 'joid-os-nosdn-lxd-noha-baremetal-danube-trigger' @@ -349,6 +405,10 @@ name: 'joid-os-nosdn-lxd-noha-orange-pod1-danube-trigger' triggers: - timed: '' +- trigger: + name: 'joid-os-nosdn-lxd-noha-cengn-pod1-danube-trigger' + triggers: + - timed: '' # os-nosdn-lxd-ha trigger - branch: master - trigger: name: 'joid-os-nosdn-lxd-ha-baremetal-master-trigger' @@ -362,6 +422,10 @@ name: 'joid-os-nosdn-lxd-ha-orange-pod1-master-trigger' triggers: - timed: '' +- trigger: + name: 'joid-os-nosdn-lxd-ha-cengn-pod1-master-trigger' + triggers: + - timed: '' # os-nosdn-lxd-ha trigger - branch: danube - trigger: name: 'joid-os-nosdn-lxd-ha-baremetal-danube-trigger' @@ -375,6 +439,10 @@ name: 'joid-os-nosdn-lxd-ha-orange-pod1-danube-trigger' triggers: - timed: '' +- trigger: + name: 'joid-os-nosdn-lxd-ha-cengn-pod1-danube-trigger' + triggers: + - timed: '' # os-nosdn-nofeature-noha trigger - branch: master - trigger: name: 'joid-os-nosdn-nofeature-noha-baremetal-master-trigger' @@ -388,6 +456,10 @@ name: 'joid-os-nosdn-nofeature-noha-orange-pod1-master-trigger' triggers: - timed: '' +- trigger: + name: 'joid-os-nosdn-nofeature-noha-cengn-pod1-master-trigger' + triggers: + - timed: '' # os-nosdn-nofeature-noha trigger - branch: danube - trigger: name: 'joid-os-nosdn-nofeature-noha-baremetal-danube-trigger' @@ -401,6 +473,10 @@ name: 'joid-os-nosdn-nofeature-noha-orange-pod1-danube-trigger' triggers: - timed: '' +- trigger: + name: 'joid-os-nosdn-nofeature-noha-cengn-pod1-danube-trigger' + triggers: + - timed: '' # k8-nosdn-nofeature-noha trigger - branch: master - trigger: name: 'joid-k8-nosdn-nofeature-noha-baremetal-master-trigger' @@ -414,6 +490,10 @@ name: 'joid-k8-nosdn-nofeature-noha-orange-pod1-master-trigger' triggers: - timed: '' +- trigger: + name: 'joid-k8-nosdn-nofeature-noha-cengn-pod1-master-trigger' + triggers: + - timed: '' # k8-nosdn-nofeature-noha trigger - branch: danube - trigger: name: 'joid-k8-nosdn-nofeature-noha-baremetal-danube-trigger' @@ -427,6 +507,10 @@ name: 'joid-k8-nosdn-nofeature-noha-orange-pod1-danube-trigger' triggers: - timed: '' +- trigger: + name: 'joid-k8-nosdn-nofeature-noha-cengn-pod1-danube-trigger' + triggers: + - timed: '' # k8-nosdn-lb-noha trigger - branch: master - trigger: name: 'joid-k8-nosdn-lb-noha-baremetal-master-trigger' @@ -440,6 +524,10 @@ name: 'joid-k8-nosdn-lb-noha-orange-pod1-master-trigger' triggers: - timed: '' +- trigger: + name: 'joid-k8-nosdn-lb-noha-cengn-pod1-master-trigger' + triggers: + - timed: '' # k8-nosdn-lb-noha trigger - branch: danube - trigger: name: 'joid-k8-nosdn-lb-noha-baremetal-danube-trigger' @@ -453,3 +541,7 @@ name: 'joid-k8-nosdn-lb-noha-orange-pod1-danube-trigger' triggers: - timed: '' +- trigger: + name: 'joid-k8-nosdn-lb-noha-cengn-pod1-danube-trigger' + triggers: + - timed: '' diff --git a/jjb/joid/joid-verify-jobs.yml b/jjb/joid/joid-verify-jobs.yml index 7b8ce7701..03fab553e 100644 --- a/jjb/joid/joid-verify-jobs.yml +++ b/jjb/joid/joid-verify-jobs.yml @@ -86,6 +86,7 @@ branches: - branch-compare-type: 'ANT' branch-pattern: '**/{branch}' + disable-strict-forbidden-file-verification: 'true' forbidden-file-paths: - compare-type: ANT pattern: 'docs/**|.gitignore' diff --git a/jjb/models/models.yml b/jjb/models/models.yml new file mode 100644 index 000000000..683103678 --- /dev/null +++ b/jjb/models/models.yml @@ -0,0 +1,68 @@ +################################################### +# All the jobs except verify have been removed! +# They will only be enabled on request by projects! +################################################### +- project: + name: models + + project: '{name}' + + jobs: + - 'models-verify-{stream}' + + stream: + - master: + branch: '{stream}' + gs-pathname: '' + disabled: false + - danube: + branch: 'stable/{stream}' + gs-pathname: '/{stream}' + disabled: false + +- job-template: + name: 'models-verify-{stream}' + + disabled: '{obj:disabled}' + + parameters: + - project-parameter: + project: '{project}' + branch: '{branch}' + - 'opnfv-build-ubuntu-defaults' + + scm: + - git-scm-gerrit + + triggers: + - gerrit: + server-name: 'gerrit.opnfv.org' + trigger-on: + - patchset-created-event: + exclude-drafts: 'false' + exclude-trivial-rebase: 'false' + exclude-no-code-change: 'false' + - draft-published-event + - comment-added-contains-event: + comment-contains-value: 'recheck' + - comment-added-contains-event: + comment-contains-value: 'reverify' + projects: + - project-compare-type: 'ANT' + project-pattern: '{project}' + branches: + - branch-compare-type: 'ANT' + branch-pattern: '**/{branch}' + disable-strict-forbidden-file-verification: 'true' + forbidden-file-paths: + - compare-type: ANT + pattern: 'docs/**|.gitignore' + + builders: + - shell: | + #!/bin/bash + set -o errexit + set -o nounset + set -o pipefail + + # shellcheck -f tty tests/*.sh diff --git a/jjb/moon/moon.yml b/jjb/moon/moon.yml index a318bc54d..fb28feb53 100644 --- a/jjb/moon/moon.yml +++ b/jjb/moon/moon.yml @@ -42,6 +42,7 @@ branches: - branch-compare-type: 'ANT' branch-pattern: '**/{branch}' + disable-strict-forbidden-file-verification: 'true' forbidden-file-paths: - compare-type: ANT pattern: 'docs/**|.gitignore' diff --git a/jjb/multisite/fuel-deploy-for-multisite.sh b/jjb/multisite/fuel-deploy-for-multisite.sh index d8b40517c..06617610c 100755 --- a/jjb/multisite/fuel-deploy-for-multisite.sh +++ b/jjb/multisite/fuel-deploy-for-multisite.sh @@ -27,6 +27,9 @@ if [[ -z "FUEL_PROPERTIES_FILE" ]]; then echo "Unable to extract the url to Fuel ISO properties from ${FUEL_DEPLOY_URL}" exit 1 fi + +# use known/working version of fuel +FUEL_PROPERTIES_FILE="opnfv-2017-03-06_16-00-15.properties" curl -L -s -o $WORKSPACE/latest.properties http://artifacts.opnfv.org/fuel/$FUEL_PROPERTIES_FILE # source the file so we get OPNFV vars diff --git a/jjb/multisite/multisite-daily-jobs.yml b/jjb/multisite/multisite-daily-jobs.yml index 6b022fd75..23c95f627 100644 --- a/jjb/multisite/multisite-daily-jobs.yml +++ b/jjb/multisite/multisite-daily-jobs.yml @@ -138,8 +138,8 @@ - name: 'functest-fuel-virtual-suite-{stream}' current-parameters: false predefined-parameters: | - DEPLOY_SCENARIO='os-nosdn-multisite-noha' - FUNCTEST_SUITE_NAME='multisite' + DEPLOY_SCENARIO=os-nosdn-multisite-noha + FUNCTEST_SUITE_NAME=multisite OS_REGION=RegionOne REGIONONE_IP=100.64.209.10 REGIONTWO_IP=100.64.209.11 diff --git a/jjb/multisite/multisite-verify-jobs.yml b/jjb/multisite/multisite-verify-jobs.yml index 5ecfafb55..9431e0bac 100644 --- a/jjb/multisite/multisite-verify-jobs.yml +++ b/jjb/multisite/multisite-verify-jobs.yml @@ -19,7 +19,7 @@ - danube: branch: 'stable/{stream}' gs-pathname: '/{stream}' - disabled: true + disabled: false timed: '' - job-template: @@ -57,6 +57,7 @@ branches: - branch-compare-type: 'ANT' branch-pattern: '**/{branch}' + disable-strict-forbidden-file-verification: 'true' forbidden-file-paths: - compare-type: ANT pattern: 'docs/**|.gitignore' diff --git a/jjb/netready/netready.yml b/jjb/netready/netready.yml index 382434ae6..9a4d8858c 100644 --- a/jjb/netready/netready.yml +++ b/jjb/netready/netready.yml @@ -44,6 +44,7 @@ branches: - branch-compare-type: 'ANT' branch-pattern: '**/{branch}' + disable-strict-forbidden-file-verification: 'true' forbidden-file-paths: - compare-type: ANT pattern: 'docs/**' diff --git a/jjb/octopus/octopus.yml b/jjb/octopus/octopus.yml index cb66112fe..c06fa89e8 100644 --- a/jjb/octopus/octopus.yml +++ b/jjb/octopus/octopus.yml @@ -52,6 +52,7 @@ branches: - branch-compare-type: 'ANT' branch-pattern: '**/{branch}' + disable-strict-forbidden-file-verification: 'true' forbidden-file-paths: - compare-type: ANT pattern: 'docs/**|.gitignore' diff --git a/jjb/onosfw/onosfw.yml b/jjb/onosfw/onosfw.yml index 13c96718c..9d6b037e1 100644 --- a/jjb/onosfw/onosfw.yml +++ b/jjb/onosfw/onosfw.yml @@ -16,7 +16,7 @@ - danube: branch: 'stable/{stream}' gs-pathname: '/{stream}' - disabled: true + disabled: false project: 'onosfw' @@ -56,6 +56,7 @@ branches: - branch-compare-type: 'ANT' branch-pattern: '**/{branch}' + disable-strict-forbidden-file-verification: 'true' forbidden-file-paths: - compare-type: ANT pattern: 'docs/**|.gitignore' diff --git a/jjb/openretriever/openretriever-project.yml b/jjb/openretriever/openretriever-project.yml index 3d53f9b2e..3bcfab6d3 100644 --- a/jjb/openretriever/openretriever-project.yml +++ b/jjb/openretriever/openretriever-project.yml @@ -53,6 +53,7 @@ branches: - branch-compare-type: 'ANT' branch-pattern: '**/{branch}' + disable-strict-forbidden-file-verification: 'true' forbidden-file-paths: - compare-type: ANT pattern: 'docs/**|.gitignore' diff --git a/jjb/opera/opera-daily-jobs.yml b/jjb/opera/opera-daily-jobs.yml index d49caf1a6..5d2cc03f3 100644 --- a/jjb/opera/opera-daily-jobs.yml +++ b/jjb/opera/opera-daily-jobs.yml @@ -83,7 +83,7 @@ - name: 'compass-deploy-virtual-daily-{stream}' current-parameters: false predefined-parameters: | - DEPLOY_SCENARIO=os-nosdn-openo-noha + DEPLOY_SCENARIO=os-nosdn-openo-ha COMPASS_OS_VERSION=xenial node-parameters: true kill-phase-on: FAILURE diff --git a/jjb/opera/opera-verify-jobs.yml b/jjb/opera/opera-verify-jobs.yml index b7b5cb3c9..4da41d8d9 100644 --- a/jjb/opera/opera-verify-jobs.yml +++ b/jjb/opera/opera-verify-jobs.yml @@ -76,6 +76,7 @@ file-paths: - compare-type: ANT pattern: '**/*' + disable-strict-forbidden-file-verification: 'true' forbidden-file-paths: - compare-type: ANT pattern: 'docs/**' diff --git a/jjb/opnfvdocs/docs-post-rtd.sh b/jjb/opnfvdocs/docs-post-rtd.sh new file mode 100644 index 000000000..e3dc9b5f0 --- /dev/null +++ b/jjb/opnfvdocs/docs-post-rtd.sh @@ -0,0 +1,7 @@ +#!/bin/bash +if [ $GERRIT_BRANCH == "master" ]; then + RTD_BUILD_VERSION=latest +else + RTD_BUILD_VERSION=${{GERRIT_BRANCH/\//-}} +fi +curl -X POST --data "version_slug=$RTD_BUILD_VERSION" https://readthedocs.org/build/opnfvdocsdemo diff --git a/jjb/opnfvdocs/docs-rtd.yaml b/jjb/opnfvdocs/docs-rtd.yaml new file mode 100644 index 000000000..bf6d0012b --- /dev/null +++ b/jjb/opnfvdocs/docs-rtd.yaml @@ -0,0 +1,90 @@ +- project: + name: docs-rtd + jobs: + - 'docs-merge-rtd-{stream}' + - 'docs-verify-rtd-{stream}' + + stream: + - master: + branch: 'master' + - danube: + branch: 'stable/{stream}' + + project: 'opnfvdocs' + rtdproject: 'opnfv' + # TODO: Archive Artifacts + +- job-template: + name: 'docs-merge-rtd-{stream}' + + project-type: freestyle + + parameters: + - label: + name: SLAVE_LABEL + default: 'lf-build1' + description: 'Slave label on Jenkins' + - project-parameter: + project: '{project}' + branch: '{branch}' + - string: + name: GIT_BASE + default: https://gerrit.opnfv.org/gerrit/releng + description: 'Git URL to use on this Jenkins Slave' + scm: + - git-scm + + triggers: + - gerrit-trigger-change-merged: + project: '**' + branch: '{branch}' + files: 'docs/**/*.*' + + builders: + - shell: !include-raw: docs-post-rtd.sh + +- job-template: + name: 'docs-verify-rtd-{stream}' + + project-type: freestyle + + parameters: + - label: + name: SLAVE_LABEL + default: 'lf-build2' + description: 'Slave label on Jenkins' + - project-parameter: + project: '{project}' + branch: '{branch}' + - string: + name: GIT_BASE + default: https://gerrit.opnfv.org/gerrit/opnfvdocs + description: 'Git URL to use on this Jenkins Slave' + scm: + - git-scm-with-submodules: + branch: '{branch}' + + triggers: + - gerrit-trigger-patchset-created: + server: 'gerrit.opnfv.org' + project: '**' + branch: '{branch}' + files: 'docs/**/*.*' + - timed: 'H H * * *' + + builders: + - shell: | + if [ "$GERRIT_PROJECT" != "opnfvdocs" ]; then + cd docs/submodules/$GERRIT_PROJECT + git fetch origin $GERRIT_REFSPEC && git checkout FETCH_HEAD + else + git fetch origin $GERRIT_REFSPEC && git checkout FETCH_HEAD + fi + - shell: | + sudo pip install virtualenv + virtualenv $WORKSPACE/venv + . $WORKSPACE/venv/bin/activate + pip install --upgrade pip + pip freeze + pip install tox + tox -edocs diff --git a/jjb/opnfvdocs/opnfvdocs.yml b/jjb/opnfvdocs/opnfvdocs.yml index 12950338d..0553cd436 100644 --- a/jjb/opnfvdocs/opnfvdocs.yml +++ b/jjb/opnfvdocs/opnfvdocs.yml @@ -20,7 +20,7 @@ - danube: branch: 'stable/{stream}' gs-pathname: '/{stream}' - disabled: true + disabled: false ######################## # job templates @@ -62,6 +62,11 @@ branches: - branch-compare-type: 'ANT' branch-pattern: '**/{branch}' + skip-vote: + successful: true + failed: true + unstable: true + notbuilt: true builders: - check-bash-syntax diff --git a/jjb/ovsnfv/ovsnfv.yml b/jjb/ovsnfv/ovsnfv.yml index 937a367fb..0e8c713fd 100644 --- a/jjb/ovsnfv/ovsnfv.yml +++ b/jjb/ovsnfv/ovsnfv.yml @@ -16,7 +16,7 @@ - danube: branch: 'stable/{stream}' gs-pathname: '/{stream}' - disabled: true + disabled: false - job-template: name: 'ovsnfv-verify-{stream}' diff --git a/jjb/parser/parser.yml b/jjb/parser/parser.yml index 69fcefc20..35e97c3b3 100644 --- a/jjb/parser/parser.yml +++ b/jjb/parser/parser.yml @@ -53,9 +53,14 @@ branches: - branch-compare-type: 'ANT' branch-pattern: '**/{branch}' + disable-strict-forbidden-file-verification: 'true' forbidden-file-paths: - compare-type: ANT - pattern: 'docs/**|.gitignore' + pattern: 'docs/**' + - compare-type: ANT + pattern: 'governance/**' + - compare-type: ANT + pattern: '*.txt|.gitignore|.gitreview|INFO|LICENSE' builders: - shell: | diff --git a/jjb/pharos/pharos.yml b/jjb/pharos/pharos.yml index 6dae9f33c..12ae5cabe 100644 --- a/jjb/pharos/pharos.yml +++ b/jjb/pharos/pharos.yml @@ -53,6 +53,7 @@ branches: - branch-compare-type: 'ANT' branch-pattern: '**/{branch}' + disable-strict-forbidden-file-verification: 'true' forbidden-file-paths: - compare-type: ANT pattern: 'docs/**|.gitignore' diff --git a/jjb/prediction/prediction.yml b/jjb/prediction/prediction.yml index b380d8c86..a153a9bb0 100644 --- a/jjb/prediction/prediction.yml +++ b/jjb/prediction/prediction.yml @@ -53,6 +53,7 @@ branches: - branch-compare-type: 'ANT' branch-pattern: '**/{branch}' + disable-strict-forbidden-file-verification: 'true' forbidden-file-paths: - compare-type: ANT pattern: 'docs/**|.gitignore' diff --git a/jjb/promise/promise.yml b/jjb/promise/promise.yml index a5aa302c7..eeace5f78 100644 --- a/jjb/promise/promise.yml +++ b/jjb/promise/promise.yml @@ -53,6 +53,7 @@ branches: - branch-compare-type: 'ANT' branch-pattern: '**/{branch}' + disable-strict-forbidden-file-verification: 'true' forbidden-file-paths: - compare-type: ANT pattern: 'docs/**|.gitignore' diff --git a/jjb/qtip/helpers/validate-deploy.sh b/jjb/qtip/helpers/validate-deploy.sh index 16455371f..90b54a1c9 100644 --- a/jjb/qtip/helpers/validate-deploy.sh +++ b/jjb/qtip/helpers/validate-deploy.sh @@ -8,15 +8,14 @@ ############################################################################## set -e -envs="INSTALLER_TYPE=${INSTALLER_TYPE} -e INSTALLER_IP=${INSTALLER_IP} -e NODE_NAME=${NODE_NAME}" -suite="TEST_CASE=all" +envs="INSTALLER_TYPE=${INSTALLER_TYPE} -e INSTALLER_IP=${INSTALLER_IP} +-e NODE_NAME=${NODE_NAME} -e CI_DEBUG=${CI_DEBUG}" dir_imgstore="${HOME}/imgstore" -img_volume="${dir_imgstore}:/home/opnfv/imgstore" echo "Qtip: Pulling docker image: opnfv/qtip:${DOCKER_TAG}" docker pull opnfv/qtip:$DOCKER_TAG -cmd=" docker run -id -e $envs -e $suite -v ${img_volume} opnfv/qtip:${DOCKER_TAG} /bin/bash" +cmd=" docker run -id -e $envs opnfv/qtip:${DOCKER_TAG} /bin/bash" echo "Qtip: Running docker command: ${cmd}" ${cmd} @@ -27,7 +26,10 @@ if [ $(docker ps | grep 'opnfv/qtip' | wc -l) == 0 ]; then else echo "The container ID is: ${container_id}" QTIP_REPO=/home/opnfv/repos/qtip -# TODO(yujunz): execute benchmark plan for compute-qpi +# TODO(zhihui_wu): use qtip cli to execute benchmark test in the future + docker exec -t ${container_id} bash -c "cd ${QTIP_REPO}/qtip/runner/ && + python runner.py -d /home/opnfv/qtip/results/ -b all" + fi echo "Qtip done!" diff --git a/jjb/qtip/qtip-validate-jobs.yml b/jjb/qtip/qtip-validate-jobs.yml index 98f7ab90a..4cd8490fd 100644 --- a/jjb/qtip/qtip-validate-jobs.yml +++ b/jjb/qtip/qtip-validate-jobs.yml @@ -13,6 +13,11 @@ branch: '{stream}' gs-pathname: '' docker-tag: latest + danube: &danube + stream: danube + branch: 'stable/{stream}' + gs-pathname: '/{stream}' + docker-tag: 'stable' #-------------------------------- # JOB VARIABLES @@ -24,6 +29,9 @@ - zte-pod3: installer: fuel <<: *master + - zte-pod3: + installer: fuel + <<: *danube task: - daily: auto-builder-name: qtip-validate-deploy @@ -53,6 +61,10 @@ <<: *master - '{installer}-defaults' - '{pod}-defaults' + - string: + name: CI_DEBUG + default: 'false' + description: "Show debug output information" scm: - git-scm triggers: @@ -139,3 +151,4 @@ - gerrit-trigger-change-merged: project: '{project}' branch: '{branch}' + files: '**' diff --git a/jjb/qtip/qtip-verify-jobs.yml b/jjb/qtip/qtip-verify-jobs.yml index d1fc34d11..dd444c7a5 100644 --- a/jjb/qtip/qtip-verify-jobs.yml +++ b/jjb/qtip/qtip-verify-jobs.yml @@ -12,6 +12,10 @@ branch: '{stream}' gs-pathname: '' disabled: false + - danube: + branch: 'stable/{stream}' + gs-pathname: '/{stream}' + disabled: false ################################ ## job templates @@ -49,12 +53,15 @@ branches: - branch-compare-type: 'ANT' branch-pattern: '**/{branch}' + disable-strict-forbidden-file-verification: 'true' forbidden-file-paths: - compare-type: ANT pattern: 'docs/**|.gitignore' builders: - qtip-unit-tests-and-docs-build + publishers: + - publish-coverage ################################ ## job builders diff --git a/jjb/releng/opnfv-docker-arm.yml b/jjb/releng/opnfv-docker-arm.yml index 09c9f335e..ba540ed76 100644 --- a/jjb/releng/opnfv-docker-arm.yml +++ b/jjb/releng/opnfv-docker-arm.yml @@ -13,7 +13,7 @@ danube: &danube stream: danube branch: 'stable/{stream}' - disabled: true + disabled: false functest-arm-receivers: &functest-arm-receivers receivers: > cristina.pauna@enea.com diff --git a/jjb/releng/opnfv-docker.sh b/jjb/releng/opnfv-docker.sh index c906e1fcd..9bd711bc6 100644 --- a/jjb/releng/opnfv-docker.sh +++ b/jjb/releng/opnfv-docker.sh @@ -43,19 +43,29 @@ fi if [[ -n "$(docker images | grep $DOCKER_REPO_NAME)" ]]; then echo "Docker images to remove:" docker images | head -1 && docker images | grep $DOCKER_REPO_NAME - image_tags=($(docker images | grep $DOCKER_REPO_NAME | awk '{print $2}')) - for tag in "${image_tags[@]}"; do - if [[ -n "$(docker images|grep $DOCKER_REPO_NAME|grep $tag)" ]]; then - echo "Removing docker image $DOCKER_REPO_NAME:$tag..." - docker rmi -f $DOCKER_REPO_NAME:$tag + image_ids=($(docker images | grep $DOCKER_REPO_NAME | awk '{print $3}')) + for id in "${image_ids[@]}"; do + if [[ -n "$(docker images|grep $DOCKER_REPO_NAME|grep $id)" ]]; then + echo "Removing docker image $DOCKER_REPO_NAME:$id..." + docker rmi -f $id fi done fi cd $WORKSPACE/docker -if [ ! -f ${DOCKERFILE} ]; then - echo "ERROR: Dockerfile not found." - exit 1 +HOST_ARCH=$(uname -m) +if [ ! -f "${DOCKERFILE}" ]; then + # If this is expected to be a Dockerfile for other arch than x86 + # and it does not exist, but there is a patch for the said arch, + # then apply the patch and create the Dockerfile.${HOST_ARCH} file + if [[ "${DOCKERFILE}" == *"${HOST_ARCH}" && \ + -f "Dockerfile.${HOST_ARCH}.patch" ]]; then + patch -o Dockerfile."${HOST_ARCH}" Dockerfile \ + Dockerfile."${HOST_ARCH}".patch + else + echo "ERROR: No Dockerfile or ${HOST_ARCH} patch found." + exit 1 + fi fi # Get tag version @@ -64,7 +74,7 @@ echo "Current branch: $BRANCH" if [[ "$BRANCH" == "master" ]]; then DOCKER_TAG="latest" else - if [[ "$RELEASE_VERSION" != "" ]]; then + if [[ -n "${RELEASE_VERSION-}" ]]; then release=${BRANCH##*/} DOCKER_TAG=${release}.${RELEASE_VERSION} # e.g. colorado.1.0, colorado.2.0, colorado.3.0 @@ -77,8 +87,12 @@ fi echo "Building docker image: $DOCKER_REPO_NAME:$DOCKER_TAG" echo "--------------------------------------------------------" echo -cmd="docker build --no-cache -t $DOCKER_REPO_NAME:$DOCKER_TAG --build-arg BRANCH=$BRANCH - -f $DOCKERFILE ." +if [[ $DOCKER_REPO_NAME == *"dovetail"* ]]; then + cmd="docker build --no-cache -t $DOCKER_REPO_NAME:$DOCKER_TAG -f $DOCKERFILE ." +else + cmd="docker build --no-cache -t $DOCKER_REPO_NAME:$DOCKER_TAG --build-arg BRANCH=$BRANCH + -f $DOCKERFILE ." +fi echo ${cmd} ${cmd} diff --git a/jjb/releng/opnfv-docker.yml b/jjb/releng/opnfv-docker.yml index 90a91f802..f69992156 100644 --- a/jjb/releng/opnfv-docker.yml +++ b/jjb/releng/opnfv-docker.yml @@ -13,7 +13,7 @@ danube: &danube stream: danube branch: 'stable/{stream}' - disabled: true + disabled: false functest-receivers: &functest-receivers receivers: > jose.lausuch@ericsson.com morgan.richomme@orange.com diff --git a/jjb/releng/opnfv-lint.yml b/jjb/releng/opnfv-lint.yml index 37cdef28f..166aea8f9 100644 --- a/jjb/releng/opnfv-lint.yml +++ b/jjb/releng/opnfv-lint.yml @@ -102,7 +102,7 @@ comment-contains-value: 'reverify' projects: - project-compare-type: 'REG_EXP' - project-pattern: 'compass4nfv' + project-pattern: '' branches: - branch-compare-type: 'ANT' branch-pattern: '**/{branch}' diff --git a/jjb/releng/testapi-backup-mongodb.sh b/jjb/releng/testapi-backup-mongodb.sh index 8dba17beb..795e479d9 100644 --- a/jjb/releng/testapi-backup-mongodb.sh +++ b/jjb/releng/testapi-backup-mongodb.sh @@ -27,5 +27,5 @@ if [ $? != 0 ]; then else echo "Uploading mongodump to artifact $artifact_dir" /usr/local/bin/gsutil cp -r "$workspace"/"$file_name" gs://artifacts.opnfv.org/"$artifact_dir"/ - echo "MongoDump can be found at http://artifacts.opnfv.org/$artifact_dir" + echo "MongoDump can be found at http://artifacts.opnfv.org/$artifact_dir.html" fi diff --git a/jjb/snaps/snaps.yml b/jjb/snaps/snaps.yml new file mode 100644 index 000000000..50b7c3070 --- /dev/null +++ b/jjb/snaps/snaps.yml @@ -0,0 +1,63 @@ +################################################### +# All the jobs except verify have been removed! +# They will only be enabled on request by projects! +################################################### +- project: + name: snaps + + project: '{name}' + + jobs: + - 'snaps-verify-{stream}' + + stream: + - master: + branch: '{stream}' + gs-pathname: '' + disabled: false + - danube: + branch: 'stable/{stream}' + gs-pathname: '/{stream}' + disabled: false + +- job-template: + name: 'snaps-verify-{stream}' + + disabled: '{obj:disabled}' + + parameters: + - project-parameter: + project: '{project}' + branch: '{branch}' + - 'opnfv-build-ubuntu-defaults' + + scm: + - git-scm-gerrit + + triggers: + - gerrit: + server-name: 'gerrit.opnfv.org' + trigger-on: + - patchset-created-event: + exclude-drafts: 'false' + exclude-trivial-rebase: 'false' + exclude-no-code-change: 'false' + - draft-published-event + - comment-added-contains-event: + comment-contains-value: 'recheck' + - comment-added-contains-event: + comment-contains-value: 'reverify' + projects: + - project-compare-type: 'ANT' + project-pattern: '{project}' + branches: + - branch-compare-type: 'ANT' + branch-pattern: '**/{branch}' + disable-strict-forbidden-file-verification: 'true' + forbidden-file-paths: + - compare-type: ANT + pattern: 'docs/**|.gitignore' + + builders: + - shell: | + echo "Nothing to verify!" diff --git a/jjb/storperf/storperf.yml b/jjb/storperf/storperf.yml index a04a9f4b4..709a1ebab 100644 --- a/jjb/storperf/storperf.yml +++ b/jjb/storperf/storperf.yml @@ -16,7 +16,7 @@ - danube: branch: 'stable/{stream}' gs-pathname: '/{stream}' - disabled: true + disabled: false - job-template: name: 'storperf-verify-{stream}' diff --git a/jjb/ves/ves.yml b/jjb/ves/ves.yml new file mode 100644 index 000000000..e6243f32c --- /dev/null +++ b/jjb/ves/ves.yml @@ -0,0 +1,69 @@ +################################################### +# All the jobs except verify have been removed! +# They will only be enabled on request by projects! +################################################### +- project: + name: ves + + project: '{name}' + + jobs: + - 'ves-verify-{stream}' + + stream: + - master: + branch: '{stream}' + gs-pathname: '' + disabled: false + - danube: + branch: 'stable/{stream}' + gs-pathname: '/{stream}' + disabled: false + +- job-template: + name: 'ves-verify-{stream}' + + disabled: '{obj:disabled}' + + parameters: + - project-parameter: + project: '{project}' + branch: '{branch}' + - 'opnfv-build-ubuntu-defaults' + + scm: + - git-scm-gerrit + + triggers: + - gerrit: + server-name: 'gerrit.opnfv.org' + trigger-on: + - patchset-created-event: + exclude-drafts: 'false' + exclude-trivial-rebase: 'false' + exclude-no-code-change: 'false' + - draft-published-event + - comment-added-contains-event: + comment-contains-value: 'recheck' + - comment-added-contains-event: + comment-contains-value: 'reverify' + projects: + - project-compare-type: 'ANT' + project-pattern: '{project}' + branches: + - branch-compare-type: 'ANT' + branch-pattern: '**/{branch}' + disable-strict-forbidden-file-verification: 'true' + forbidden-file-paths: + - compare-type: ANT + pattern: 'docs/**|.gitignore' + + builders: + - shell: | + #!/bin/bash + set -o errexit + set -o nounset + set -o pipefail + + # shellcheck -f tty tests/*.sh + # shellcheck -f tty utils/*.sh diff --git a/jjb/vswitchperf/vswitchperf.yml b/jjb/vswitchperf/vswitchperf.yml index ef0e90a76..c5c81c898 100644 --- a/jjb/vswitchperf/vswitchperf.yml +++ b/jjb/vswitchperf/vswitchperf.yml @@ -18,8 +18,8 @@ - danube: branch: 'stable/{stream}' gs-pathname: '/{stream}' - disabled: true - slave-label: 'intel-pod12' + disabled: false + slave-label: 'opnfv-build-ubuntu' - job-template: @@ -97,6 +97,7 @@ branches: - branch-compare-type: 'ANT' branch-pattern: '**/{branch}' + disable-strict-forbidden-file-verification: 'true' forbidden-file-paths: - compare-type: ANT pattern: 'docs/**' @@ -152,6 +153,7 @@ branches: - branch-compare-type: 'ANT' branch-pattern: '**/{branch}' + disable-strict-forbidden-file-verification: 'true' forbidden-file-paths: - compare-type: ANT pattern: 'docs/**' diff --git a/jjb/yardstick/yardstick-ci-jobs.yml b/jjb/yardstick/yardstick-ci-jobs.yml index 604eaed25..1f2f3122c 100644 --- a/jjb/yardstick/yardstick-ci-jobs.yml +++ b/jjb/yardstick/yardstick-ci-jobs.yml @@ -272,7 +272,7 @@ publishers: - email: - recipients: jean.gaoliang@huawei.com matthew.lijun@huawei.com + recipients: jean.gaoliang@huawei.com limingjiang@huawei.com ######################## # builder macros diff --git a/modules/opnfv/deployment/apex/adapter.py b/modules/opnfv/deployment/apex/adapter.py index cb827d886..225e17438 100644 --- a/modules/opnfv/deployment/apex/adapter.py +++ b/modules/opnfv/deployment/apex/adapter.py @@ -35,28 +35,34 @@ class ApexAdapter(manager.DeploymentHandler): return None for line in lines: - if 'controller' in line: - roles = "controller" - elif 'compute' in line: - roles = "compute" - else: + roles = [] + if any(x in line for x in ['-----', 'Networks']): continue - if 'Daylight' in line: - roles += ", OpenDaylight" + if 'controller' in line: + roles.append(manager.Role.CONTROLLER) + if 'compute' in line: + roles.append(manager.Role.COMPUTE) + if 'opendaylight' in line.lower(): + roles.append(manager.Role.ODL) + fields = line.split('|') id = re.sub('[!| ]', '', fields[1]).encode() name = re.sub('[!| ]', '', fields[2]).encode() - status_node = re.sub('[!| ]', '', fields[3]).encode() + status_node = re.sub('[!| ]', '', fields[3]).encode().lower() ip = re.sub('[!| ctlplane=]', '', fields[4]).encode() - if status_node.lower() == 'active': - status = manager.Node.STATUS_OK + ssh_client = None + if 'active' in status_node: + status = manager.NodeStatus.STATUS_OK ssh_client = ssh_utils.get_ssh_client(hostname=ip, username='heat-admin', pkey_file=self.pkey_file) + elif 'error' in status_node: + status = manager.NodeStatus.STATUS_ERROR + elif 'off' in status_node: + status = manager.NodeStatus.STATUS_OFFLINE else: - status = manager.Node.STATUS_INACTIVE - ssh_client = None + status = manager.NodeStatus.STATUS_INACTIVE node = manager.Node(id, ip, name, status, roles, ssh_client) nodes.append(node) @@ -73,8 +79,9 @@ class ApexAdapter(manager.DeploymentHandler): "grep Description|sed 's/^.*\: //'") cmd_ver = ("sudo yum info opendaylight 2>/dev/null|" "grep Version|sed 's/^.*\: //'") + description = None for node in self.nodes: - if 'controller' in node.get_attribute('roles'): + if node.is_controller(): description = node.run_cmd(cmd_descr) version = node.run_cmd(cmd_ver) break diff --git a/modules/opnfv/deployment/factory.py b/modules/opnfv/deployment/factory.py index e48a751ad..1ccee4e80 100644 --- a/modules/opnfv/deployment/factory.py +++ b/modules/opnfv/deployment/factory.py @@ -41,4 +41,5 @@ class Factory(object): installer_user=installer_user, installer_pwd=installer_pwd) else: - raise Exception("Installer adapter is not implemented.") + raise Exception("Installer adapter is not implemented for " + "the given installer.") diff --git a/modules/opnfv/deployment/fuel/adapter.py b/modules/opnfv/deployment/fuel/adapter.py index 3e6ef50a0..a217767ba 100644 --- a/modules/opnfv/deployment/fuel/adapter.py +++ b/modules/opnfv/deployment/fuel/adapter.py @@ -66,7 +66,7 @@ class FuelAdapter(manager.DeploymentHandler): if options and options['cluster'] and len(self.nodes) > 0: n = [] for node in self.nodes: - if node.info['cluster'] == options['cluster']: + if str(node.info['cluster']) == str(options['cluster']): n.append(node) return n @@ -114,7 +114,7 @@ class FuelAdapter(manager.DeploymentHandler): index_ip = i elif "mac" in fields[i]: index_mac = i - elif "roles " in fields[i]: + elif "roles " in fields[i] and "pending_roles" not in fields[i]: index_roles = i elif "online" in fields[i]: index_online = i @@ -124,26 +124,36 @@ class FuelAdapter(manager.DeploymentHandler): fields = lines[i].rsplit(' | ') id = fields[index_id].strip().encode() ip = fields[index_ip].strip().encode() - status_node = fields[index_status].strip().encode() + status_node = fields[index_status].strip().encode().lower() name = fields[index_name].strip().encode() - roles = fields[index_roles].strip().encode() + roles_all = fields[index_roles].strip().encode().lower() + + roles = [x for x in [manager.Role.CONTROLLER, + manager.Role.COMPUTE, + manager.Role.ODL] if x in roles_all] dict = {"cluster": fields[index_cluster].strip().encode(), "mac": fields[index_mac].strip().encode(), "status_node": status_node, "online": fields[index_online].strip().encode()} + ssh_client = None if status_node == 'ready': - status = manager.Node.STATUS_OK + status = manager.NodeStatus.STATUS_OK proxy = {'ip': self.installer_ip, 'username': self.installer_user, 'password': self.installer_pwd} ssh_client = ssh_utils.get_ssh_client(hostname=ip, username='root', proxy=proxy) + elif 'error' in status_node: + status = manager.NodeStatus.STATUS_ERROR + elif 'off' in status_node: + status = manager.NodeStatus.STATUS_OFFLINE + elif 'discover' in status_node: + status = manager.NodeStatus.STATUS_UNUSED else: - status = manager.Node.STATUS_INACTIVE - ssh_client = None + status = manager.NodeStatus.STATUS_INACTIVE node = manager.Node( id, ip, name, status, roles, ssh_client, dict) @@ -160,26 +170,30 @@ class FuelAdapter(manager.DeploymentHandler): cmd = 'source openrc;nova-manage version 2>/dev/null' version = None for node in self.nodes: - if 'controller' in node.get_attribute('roles'): + if node.is_controller() and node.is_active(): version = node.run_cmd(cmd) break return version def get_sdn_version(self): - cmd = "apt-cache show opendaylight|grep Version|sed 's/^.*\: //'" + cmd = "apt-cache policy opendaylight|grep Installed" version = None for node in self.nodes: - if 'controller' in node.get_attribute('roles'): + if manager.Role.ODL in node.roles and node.is_active(): odl_version = node.run_cmd(cmd) if odl_version: - version = 'OpenDaylight ' + odl_version - break + version = 'OpenDaylight ' + odl_version.split(' ')[-1] + break return version def get_deployment_status(self): - cmd = 'fuel env|grep operational' + cmd = "fuel env|tail -1|awk '{print $3}'" result = self.installer_node.run_cmd(cmd) if result is None or len(result) == 0: - return 'failed' + return 'unknown' + elif 'operational' in result: + return 'active' + elif 'deploy' in result: + return 'deploying' else: return 'active' diff --git a/modules/opnfv/deployment/manager.py b/modules/opnfv/deployment/manager.py index 8c9599b6e..df735f157 100644 --- a/modules/opnfv/deployment/manager.py +++ b/modules/opnfv/deployment/manager.py @@ -27,7 +27,7 @@ class Deployment(object): status, openstack_version, sdn_controller, - nodes=[]): + nodes=None): self.deployment_info = { 'installer': installer, @@ -89,26 +89,37 @@ class Deployment(object): sdn_controller=self.deployment_info['sdn_controller']) for node in self.deployment_info['nodes']: - s += '\t\t{node_object}\n'.format(node_object=node) + s += '{node_object}\n'.format(node_object=node) return s -class Node(object): +class Role(): + INSTALLER = 'installer' + CONTROLLER = 'controller' + COMPUTE = 'compute' + ODL = 'opendaylight' + ONOS = 'onos' + +class NodeStatus(): STATUS_OK = 'active' STATUS_INACTIVE = 'inactive' STATUS_OFFLINE = 'offline' - STATUS_FAILED = 'failed' + STATUS_ERROR = 'error' + STATUS_UNUSED = 'unused' + + +class Node(object): def __init__(self, id, ip, name, status, - roles, - ssh_client, - info={}): + roles=None, + ssh_client=None, + info=None): self.id = id self.ip = ip self.name = name @@ -117,11 +128,21 @@ class Node(object): self.roles = roles self.info = info + self.cpu_info = 'unknown' + self.memory = 'unknown' + self.ovs = 'unknown' + + if ssh_client and Role.INSTALLER not in self.roles: + sys_info = self.get_system_info() + self.cpu_info = sys_info['cpu_info'] + self.memory = sys_info['memory'] + self.ovs = self.get_ovs_info() + def get_file(self, src, dest): ''' SCP file from a node ''' - if self.status is not Node.STATUS_OK: + if self.status is not NodeStatus.STATUS_OK: logger.info("The node %s is not active" % self.ip) return 1 logger.info("Fetching %s from %s" % (src, self.ip)) @@ -137,7 +158,7 @@ class Node(object): ''' SCP file to a node ''' - if self.status is not Node.STATUS_OK: + if self.status is not NodeStatus.STATUS_OK: logger.info("The node %s is not active" % self.ip) return 1 logger.info("Copying %s to %s" % (src, self.ip)) @@ -153,14 +174,16 @@ class Node(object): ''' Run command remotely on a node ''' - if self.status is not Node.STATUS_OK: - logger.info("The node %s is not active" % self.ip) - return 1 + if self.status is not NodeStatus.STATUS_OK: + logger.error( + "Error running command %s. The node %s is not active" + % (cmd, self.ip)) + return None _, stdout, stderr = (self.ssh_client.exec_command(cmd)) error = stderr.readlines() if len(error) > 0: logger.error("error %s" % ''.join(error)) - return error + return None output = ''.join(stdout.readlines()).rstrip() return output @@ -174,33 +197,91 @@ class Node(object): 'name': self.name, 'status': self.status, 'roles': self.roles, + 'cpu_info': self.cpu_info, + 'memory': self.memory, + 'ovs': self.ovs, 'info': self.info } - def get_attribute(self, attribute): + def is_active(self): ''' - Returns an attribute given the name + Returns if the node is active ''' - return self.get_dict()[attribute] + if self.status == NodeStatus.STATUS_OK: + return True + return False def is_controller(self): ''' Returns if the node is a controller ''' - if 'controller' in self.get_attribute('roles'): - return True - return False + return Role.CONTROLLER in self.roles def is_compute(self): ''' Returns if the node is a compute ''' - if 'compute' in self.get_attribute('roles'): - return True - return False + return Role.COMPUTE in self.roles + + def is_odl(self): + ''' + Returns if the node is an opendaylight + ''' + return Role.ODL in self.roles + + def get_ovs_info(self): + ''' + Returns the ovs version installed + ''' + if self.is_active(): + cmd = "ovs-vsctl --version|head -1| sed 's/^.*) //'" + return self.run_cmd(cmd) + return None + + def get_system_info(self): + ''' + Returns the ovs version installed + ''' + cmd = 'grep MemTotal /proc/meminfo' + memory = self.run_cmd(cmd).partition('MemTotal:')[-1].strip().encode() + + cpu_info = {} + cmd = 'lscpu' + result = self.run_cmd(cmd) + for line in result.splitlines(): + if line.startswith('CPU(s)'): + cpu_info['num_cpus'] = line.split(' ')[-1].encode() + elif line.startswith('Thread(s) per core'): + cpu_info['threads/core'] = line.split(' ')[-1].encode() + elif line.startswith('Core(s) per socket'): + cpu_info['cores/socket'] = line.split(' ')[-1].encode() + elif line.startswith('Model name'): + cpu_info['model'] = line.partition( + 'Model name:')[-1].strip().encode() + elif line.startswith('Architecture'): + cpu_info['arch'] = line.split(' ')[-1].encode() + + return {'memory': memory, 'cpu_info': cpu_info} def __str__(self): - return str(self.get_dict()) + return ''' + name: {name} + id: {id} + ip: {ip} + status: {status} + roles: {roles} + cpu: {cpu_info} + memory: {memory} + ovs: {ovs} + info: {info}'''.format(name=self.name, + id=self.id, + ip=self.ip, + status=self.status, + roles=self.roles, + cpu_info=self.cpu_info, + memory=self.memory, + ovs=self.ovs, + info=self.info) class DeploymentHandler(object): @@ -236,9 +317,9 @@ class DeploymentHandler(object): self.installer_node = Node(id='', ip=installer_ip, name=installer, - status='active', + status=NodeStatus.STATUS_OK, ssh_client=self.installer_connection, - roles='installer node') + roles=Role.INSTALLER) else: raise Exception( 'Cannot establish connection to the installer node!') @@ -279,6 +360,18 @@ class DeploymentHandler(object): ''' return self.installer_node + def get_arch(self): + ''' + Returns the architecture of the first compute node found + ''' + arch = None + for node in self.nodes: + if node.is_compute(): + arch = node.cpu_info.get('arch', None) + if arch: + break + return arch + def get_deployment_info(self): ''' Returns an object of type Deployment diff --git a/modules/opnfv/utils/ovs_logger.py b/modules/opnfv/utils/ovs_logger.py index 75b4cec80..7777a9a16 100644 --- a/modules/opnfv/utils/ovs_logger.py +++ b/modules/opnfv/utils/ovs_logger.py @@ -7,7 +7,7 @@ # http://www.apache.org/licenses/LICENSE-2.0 ############################################################################## -import opnfv.utils.OPNFVLogger as OPNFVLogger +import opnfv.utils.opnfv_logger as OPNFVLogger import os import time import shutil @@ -101,19 +101,13 @@ class OVSLogger(object): if timestamp is None: timestamp = time.strftime("%Y%m%d-%H%M%S") - for controller_client in controller_clients: - self.ofctl_dump_flows(controller_client, - timestamp=timestamp) - self.vsctl_show(controller_client, - timestamp=timestamp) - - for compute_client in compute_clients: - self.ofctl_dump_flows(compute_client, - timestamp=timestamp) - self.vsctl_show(compute_client, - timestamp=timestamp) + clients = controller_clients + compute_clients + for client in clients: + self.ofctl_dump_flows(client, timestamp=timestamp) + self.vsctl_show(client, timestamp=timestamp) if related_error is not None: dumpdir = os.path.join(self.ovs_dir, timestamp) + self.__mkdir_p(dumpdir) with open(os.path.join(dumpdir, 'error'), 'w') as f: f.write(related_error) diff --git a/prototypes/bifrost/playbooks/test-bifrost-infracloud.yaml b/prototypes/bifrost/playbooks/test-bifrost-infracloud.yaml index 541a1f7d4..07d5e245b 100644 --- a/prototypes/bifrost/playbooks/test-bifrost-infracloud.yaml +++ b/prototypes/bifrost/playbooks/test-bifrost-infracloud.yaml @@ -54,7 +54,7 @@ dib_os_element: "{{ lookup('env','DIB_OS_ELEMENT') }}" dib_os_release: "{{ lookup('env', 'DIB_OS_RELEASE') }}" extra_dib_elements: "{{ lookup('env', 'EXTRA_DIB_ELEMENTS') | default('') }}" - dib_elements: "vm serial-console simple-init devuser infra-cloud-bridge puppet growroot {{ extra_dib_elements }}" + dib_elements: "vm enable-serial-console simple-init devuser infra-cloud-bridge puppet growroot {{ extra_dib_elements }}" dib_packages: "{{ lookup('env', 'DIB_OS_PACKAGES') }}" when: create_image_via_dib | bool == true and transform_boot_image | bool == false environment: diff --git a/prototypes/bifrost/scripts/test-bifrost-deployment.sh b/prototypes/bifrost/scripts/test-bifrost-deployment.sh index 914a906f4..b7165ffd1 100755 --- a/prototypes/bifrost/scripts/test-bifrost-deployment.sh +++ b/prototypes/bifrost/scripts/test-bifrost-deployment.sh @@ -18,6 +18,7 @@ ENABLE_VENV="false" USE_DHCP="false" USE_VENV="false" BUILD_IMAGE=true +BAREMETAL_DATA_FILE=${BAREMETAL_DATA_FILE:-'/tmp/baremetal.json'} PROVISION_WAIT_TIMEOUT=${PROVISION_WAIT_TIMEOUT:-3600} # Set defaults for ansible command-line options to drive the different @@ -50,10 +51,10 @@ INVENTORY_DHCP_STATIC_IP=false WRITE_INTERFACES_FILE=true # Set BIFROST_INVENTORY_SOURCE -export BIFROST_INVENTORY_SOURCE=/tmp/baremetal.csv +export BIFROST_INVENTORY_SOURCE=/tmp/baremetal.json # DIB custom elements path -export ELEMENTS_PATH=/usr/share/diskimage-builder/elements:/opt/puppet-infracloud/files/elements +export ELEMENTS_PATH=/opt/puppet-infracloud/files/elements # settings for console access export DIB_DEV_USER_PWDLESS_SUDO=yes @@ -79,6 +80,11 @@ source ${ANSIBLE_INSTALL_ROOT}/ansible/hacking/env-setup ANSIBLE=$(which ansible-playbook) set -x -o nounset +logs_on_exit() { + $SCRIPT_HOME/collect-test-info.sh +} +trap logs_on_exit EXIT + # Change working directory cd $BIFROST_HOME/playbooks @@ -102,7 +108,8 @@ ${ANSIBLE} -vvvv \ -e test_vm_num_nodes=${TEST_VM_NUM_NODES} \ -e test_vm_memory_size=${VM_MEMORY_SIZE} \ -e enable_venv=${ENABLE_VENV} \ - -e test_vm_domain_type=${VM_DOMAIN_TYPE} + -e test_vm_domain_type=${VM_DOMAIN_TYPE} \ + -e baremetal_json_file=${BAREMETAL_DATA_FILE} # Execute the installation and VM startup test. ${ANSIBLE} -vvvv \ @@ -129,6 +136,4 @@ if [ $EXITCODE != 0 ]; then echo "****************************" fi -$SCRIPT_HOME/collect-test-info.sh - exit $EXITCODE diff --git a/prototypes/openstack-ansible/README.md b/prototypes/openstack-ansible/README.md new file mode 100644 index 000000000..34c1d0d03 --- /dev/null +++ b/prototypes/openstack-ansible/README.md @@ -0,0 +1,48 @@ +=============================== +How to deploy OpenStack-Ansible +=============================== +The script and playbooks defined on this repo will deploy an OpenStack +cloud based on OpenStack-Ansible. +It needs to be combined with Bifrost. You need use Bifrost to provide six VMs. +To learn about how to use Bifrost, you can read the document on +[/opt/releng/prototypes/bifrost/README.md]. + +Minimal requirements: +1. You will need to have a least 150G free space for the partition on where + "/var/lib/libvirt/images/" lives. +2. each vm needs to have at least 8 vCPU, 12 GB RAM, 60 GB HDD. + +After provisioning the six VMs please follow that steps: + +1.Run the script to deploy OpenStack + cd /opt/releng/prototypes/openstack-ansible/scripts/ + sudo ./osa_deploy.sh +It will take a lot of time. When the deploy is successful, you will see the +message "OpenStack deployed successfully". + +2.To verify the OpenStack operation + 2.1 ssh into the controller:: + ssh 192.168.122.3 + 2.2 Enter into the lxc container:: + lxcname=$(lxc-ls | grep utility) + lxc-attach -n $lxcname + 2.3 Verify the OpenStack API:: + source /root/openrc + openstack user list + +This will show the following output:: ++----------------------------------+--------------------+ +| ID | Name | ++----------------------------------+--------------------+ +| 056f8fe41336435991fd80872731cada | aodh | +| 308f6436e68f40b49d3b8e7ce5c5be1e | glance | +| 351b71b43a66412d83f9b3cd75485875 | nova | +| 511129e053394aea825cce13b9f28504 | ceilometer | +| 5596f71319d44c8991fdc65f3927b62e | gnocchi | +| 586f49e3398a4c47a2f6fe50135d4941 | stack_domain_admin | +| 601b329e6b1d427f9a1e05ed28753497 | heat | +| 67fe383b94964a4781345fbcc30ae434 | cinder | +| 729bb08351264d729506dad84ed3ccf0 | admin | +| 9f2beb2b270940048fe6844f0b16281e | neutron | +| fa68f86dd1de4ddbbb7415b4d9a54121 | keystone | ++----------------------------------+--------------------+ diff --git a/prototypes/openstack-ansible/file/cinder.yml b/prototypes/openstack-ansible/file/cinder.yml new file mode 100644 index 000000000..e40b39256 --- /dev/null +++ b/prototypes/openstack-ansible/file/cinder.yml @@ -0,0 +1,13 @@ +--- +# This file contains an example to show how to set +# the cinder-volume service to run in a container. +# +# Important note: +# When using LVM or any iSCSI-based cinder backends, such as NetApp with +# iSCSI protocol, the cinder-volume service *must* run on metal. +# Reference: https://bugs.launchpad.net/ubuntu/+source/lxc/+bug/1226855 + +container_skel: + cinder_volumes_container: + properties: + is_metal: false diff --git a/prototypes/openstack-ansible/file/exports b/prototypes/openstack-ansible/file/exports new file mode 100644 index 000000000..315f79d2f --- /dev/null +++ b/prototypes/openstack-ansible/file/exports @@ -0,0 +1,12 @@ +# /etc/exports: the access control list for filesystems which may be exported +# to NFS clients. See exports(5). +# +# Example for NFSv2 and NFSv3: +# /srv/homes hostname1(rw,sync,no_subtree_check) hostname2(ro,sync,no_subtree_check) +# +# Example for NFSv4: +# /srv/nfs4 gss/krb5i(rw,sync,fsid=0,crossmnt,no_subtree_check) +# /srv/nfs4/homes gss/krb5i(rw,sync,no_subtree_check) +# +/images *(rw,sync,no_subtree_check,no_root_squash) + diff --git a/prototypes/openstack-ansible/file/modules b/prototypes/openstack-ansible/file/modules new file mode 100644 index 000000000..60a517f18 --- /dev/null +++ b/prototypes/openstack-ansible/file/modules @@ -0,0 +1,8 @@ +# /etc/modules: kernel modules to load at boot time. +# +# This file contains the names of kernel modules that should be loaded +# at boot time, one per line. Lines beginning with "#" are ignored. +# Parameters can be specified after the module name. + +bonding +8021q diff --git a/prototypes/openstack-ansible/file/openstack_user_config.yml b/prototypes/openstack-ansible/file/openstack_user_config.yml new file mode 100644 index 000000000..2811e62ce --- /dev/null +++ b/prototypes/openstack-ansible/file/openstack_user_config.yml @@ -0,0 +1,278 @@ +--- +cidr_networks: + container: 172.29.236.0/22 + tunnel: 172.29.240.0/22 + storage: 172.29.244.0/22 + +used_ips: + - "172.29.236.1,172.29.236.50" + - "172.29.240.1,172.29.240.50" + - "172.29.244.1,172.29.244.50" + - "172.29.248.1,172.29.248.50" + +global_overrides: + internal_lb_vip_address: 172.29.236.222 + external_lb_vip_address: 192.168.122.220 + tunnel_bridge: "br-vxlan" + management_bridge: "br-mgmt" + provider_networks: + - network: + container_bridge: "br-mgmt" + container_type: "veth" + container_interface: "eth1" + ip_from_q: "container" + type: "raw" + group_binds: + - all_containers + - hosts + is_container_address: true + is_ssh_address: true + - network: + container_bridge: "br-vxlan" + container_type: "veth" + container_interface: "eth10" + ip_from_q: "tunnel" + type: "vxlan" + range: "1:1000" + net_name: "vxlan" + group_binds: + - neutron_linuxbridge_agent + - network: + container_bridge: "br-vlan" + container_type: "veth" + container_interface: "eth12" + host_bind_override: "eth12" + type: "flat" + net_name: "flat" + group_binds: + - neutron_linuxbridge_agent + - network: + container_bridge: "br-vlan" + container_type: "veth" + container_interface: "eth11" + type: "vlan" + range: "1:1" + net_name: "vlan" + group_binds: + - neutron_linuxbridge_agent + - network: + container_bridge: "br-storage" + container_type: "veth" + container_interface: "eth2" + ip_from_q: "storage" + type: "raw" + group_binds: + - glance_api + - cinder_api + - cinder_volume + - nova_compute + +### +### Infrastructure +### + +# galera, memcache, rabbitmq, utility +shared-infra_hosts: + controller00: + ip: 172.29.236.11 + controller01: + ip: 172.29.236.12 + controller02: + ip: 172.29.236.13 + +# repository (apt cache, python packages, etc) +repo-infra_hosts: + controller00: + ip: 172.29.236.11 + controller01: + ip: 172.29.236.12 + controller02: + ip: 172.29.236.13 + +# load balancer +# Ideally the load balancer should not use the Infrastructure hosts. +# Dedicated hardware is best for improved performance and security. +haproxy_hosts: + controller00: + ip: 172.29.236.11 + controller01: + ip: 172.29.236.12 + controller02: + ip: 172.29.236.13 + +# rsyslog server +#log_hosts: + # log1: + # ip: 172.29.236.14 + +### +### OpenStack +### + +# keystone +identity_hosts: + controller00: + ip: 172.29.236.11 + controller01: + ip: 172.29.236.12 + controller02: + ip: 172.29.236.13 + +# cinder api services +storage-infra_hosts: + controller00: + ip: 172.29.236.11 + controller01: + ip: 172.29.236.12 + controller02: + ip: 172.29.236.13 + +# glance +# The settings here are repeated for each infra host. +# They could instead be applied as global settings in +# user_variables, but are left here to illustrate that +# each container could have different storage targets. +image_hosts: + controller00: + ip: 172.29.236.11 + container_vars: + limit_container_types: glance + glance_nfs_client: + - server: "172.29.244.15" + remote_path: "/images" + local_path: "/var/lib/glance/images" + type: "nfs" + options: "_netdev,auto" + controller01: + ip: 172.29.236.12 + container_vars: + limit_container_types: glance + glance_nfs_client: + - server: "172.29.244.15" + remote_path: "/images" + local_path: "/var/lib/glance/images" + type: "nfs" + options: "_netdev,auto" + controller02: + ip: 172.29.236.13 + container_vars: + limit_container_types: glance + glance_nfs_client: + - server: "172.29.244.15" + remote_path: "/images" + local_path: "/var/lib/glance/images" + type: "nfs" + options: "_netdev,auto" + +# nova api, conductor, etc services +compute-infra_hosts: + controller00: + ip: 172.29.236.11 + controller01: + ip: 172.29.236.12 + controller02: + ip: 172.29.236.13 + +# heat +orchestration_hosts: + controller00: + ip: 172.29.236.11 + controller01: + ip: 172.29.236.12 + controller02: + ip: 172.29.236.13 + +# horizon +dashboard_hosts: + controller00: + ip: 172.29.236.11 + controller01: + ip: 172.29.236.12 + controller02: + ip: 172.29.236.13 + +# neutron server, agents (L3, etc) +network_hosts: + controller00: + ip: 172.29.236.11 + controller01: + ip: 172.29.236.12 + controller02: + ip: 172.29.236.13 + +# ceilometer (telemetry API) +metering-infra_hosts: + controller00: + ip: 172.29.236.11 + controller01: + ip: 172.29.236.12 + controller02: + ip: 172.29.236.13 + +# aodh (telemetry alarm service) +metering-alarm_hosts: + controller00: + ip: 172.29.236.11 + controller01: + ip: 172.29.236.12 + controller02: + ip: 172.29.236.13 + +# gnocchi (telemetry metrics storage) +metrics_hosts: + controller00: + ip: 172.29.236.11 + controller01: + ip: 172.29.236.12 + controller02: + ip: 172.29.236.13 + +# nova hypervisors +compute_hosts: + compute00: + ip: 172.29.236.14 + compute01: + ip: 172.29.236.15 + +# ceilometer compute agent (telemetry) +metering-compute_hosts: + compute00: + ip: 172.29.236.14 + compute01: + ip: 172.29.236.15 +# cinder volume hosts (NFS-backed) +# The settings here are repeated for each infra host. +# They could instead be applied as global settings in +# user_variables, but are left here to illustrate that +# each container could have different storage targets. +storage_hosts: + controller00: + ip: 172.29.236.11 + container_vars: + cinder_backends: + limit_container_types: cinder_volume + lvm: + volume_group: cinder-volumes + volume_driver: cinder.volume.drivers.lvm.LVMVolumeDriver + volume_backend_name: LVM_iSCSI + iscsi_ip_address: "172.29.244.11" + controller01: + ip: 172.29.236.12 + container_vars: + cinder_backends: + limit_container_types: cinder_volume + lvm: + volume_group: cinder-volumes + volume_driver: cinder.volume.drivers.lvm.LVMVolumeDriver + volume_backend_name: LVM_iSCSI + iscsi_ip_address: "172.29.244.12" + controller02: + ip: 172.29.236.13 + container_vars: + cinder_backends: + limit_container_types: cinder_volume + lvm: + volume_group: cinder-volumes + volume_driver: cinder.volume.drivers.lvm.LVMVolumeDriver + volume_backend_name: LVM_iSCSI + iscsi_ip_address: "172.29.244.13" diff --git a/prototypes/openstack-ansible/file/user_variables.yml b/prototypes/openstack-ansible/file/user_variables.yml new file mode 100644 index 000000000..3e14bc57e --- /dev/null +++ b/prototypes/openstack-ansible/file/user_variables.yml @@ -0,0 +1,27 @@ +--- +# Copyright 2014, Rackspace US, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +### +### This file contains commonly used overrides for convenience. Please inspect +### the defaults for each role to find additional override options. +### + +## Debug and Verbose options. +debug: false + +haproxy_keepalived_external_vip_cidr: "192.168.122.220/32" +haproxy_keepalived_internal_vip_cidr: "172.29.236.222/32" +haproxy_keepalived_external_interface: br-vlan +haproxy_keepalived_internal_interface: br-mgmt diff --git a/prototypes/openstack-ansible/playbooks/inventory b/prototypes/openstack-ansible/playbooks/inventory new file mode 100644 index 000000000..f53da5305 --- /dev/null +++ b/prototypes/openstack-ansible/playbooks/inventory @@ -0,0 +1,11 @@ +[jumphost] +jumphost ansible_ssh_host=192.168.122.2 + +[controller] +controller00 ansible_ssh_host=192.168.122.3 +controller01 ansible_ssh_host=192.168.122.4 +controller02 ansible_ssh_host=192.168.122.5 + +[compute] +compute00 ansible_ssh_host=192.168.122.6 +compute01 ansible_ssh_host=192.168.122.7 diff --git a/prototypes/openstack-ansible/playbooks/jumphost_configuration.yml b/prototypes/openstack-ansible/playbooks/jumphost_configuration.yml new file mode 100644 index 000000000..c51d83073 --- /dev/null +++ b/prototypes/openstack-ansible/playbooks/jumphost_configuration.yml @@ -0,0 +1,53 @@ +--- +- hosts: jumphost + remote_user: root + vars_files: + - ../var/ubuntu.yml + tasks: + - name: generate SSH keys + shell: ssh-keygen -b 2048 -t rsa -f /root/.ssh/id_rsa -q -N "" + args: + creates: /root/.ssh/id_rsa + - name: fetch public key + fetch: src="/root/.ssh/id_rsa.pub" dest="/" + - name: remove the directory + shell: "rm -rf {{OSA_PATH}} {{OSA_ETC_PATH}}" + - name: git openstack ansible + shell: "git clone {{OSA_URL}} {{OSA_PATH}} -b {{OSA_BRANCH}}" + - name: copy /opt/openstack-ansible/etc/openstack_deploy to /etc/openstack_deploy + shell: "/bin/cp -rf {{OSA_PATH}}/etc/openstack_deploy {{OSA_ETC_PATH}}" + - name: bootstrap + command: "/bin/bash ./scripts/bootstrap-ansible.sh" + args: + chdir: "{{OSA_PATH}}" + - name: generate password token + command: "python pw-token-gen.py --file /etc/openstack_deploy/user_secrets.yml" + args: + chdir: /opt/openstack-ansible/scripts/ + - name: copy openstack_user_config.yml to /etc/openstack_deploy + copy: + src: ../file/openstack_user_config.yml + dest: "{{OSA_ETC_PATH}}/openstack_user_config.yml" + - name: copy cinder.yml to /etc/openstack_deploy/env.d + copy: + src: ../file/cinder.yml + dest: "{{OSA_ETC_PATH}}/env.d/cinder.yml" + - name: copy user_variables.yml to /etc/openstack_deploy/ + copy: + src: ../file/user_variables.yml + dest: "{{OSA_ETC_PATH}}/user_variables.yml" + - name: configure network + template: + src: ../template/bifrost/controller.interface.j2 + dest: /etc/network/interfaces + notify: + - restart network service + handlers: + - name: restart network service + shell: "/sbin/ifconfig ens3 0 &&/sbin/ifdown -a && /sbin/ifup -a" + +- hosts: localhost + remote_user: root + tasks: + - name: Generate authorized_keys + shell: "/bin/cat /jumphost/root/.ssh/id_rsa.pub >> ../file/authorized_keys" diff --git a/prototypes/openstack-ansible/playbooks/targethost_configuration.yml b/prototypes/openstack-ansible/playbooks/targethost_configuration.yml new file mode 100644 index 000000000..ffe788f0e --- /dev/null +++ b/prototypes/openstack-ansible/playbooks/targethost_configuration.yml @@ -0,0 +1,61 @@ +--- +- hosts: all + remote_user: root + vars_files: + - ../var/ubuntu.yml + tasks: + - name: add public key to host + copy: + src: ../file/authorized_keys + dest: /root/.ssh/authorized_keys + - name: configure modules + copy: + src: ../file/modules + dest: /etc/modules + +- hosts: controller + remote_user: root + vars_files: + - ../var/ubuntu.yml + tasks: + - name: configure network + template: + src: ../template/bifrost/controller.interface.j2 + dest: /etc/network/interfaces + notify: + - restart network service + handlers: + - name: restart network service + shell: "/sbin/ifconfig ens3 0 &&/sbin/ifdown -a && /sbin/ifup -a" + +- hosts: compute + remote_user: root + vars_files: + - ../var/ubuntu.yml + tasks: + - name: configure network + template: + src: ../template/bifrost/compute.interface.j2 + dest: /etc/network/interfaces + notify: + - restart network service + handlers: + - name: restart network service + shell: "/sbin/ifconfig ens3 0 &&/sbin/ifdown -a && /sbin/ifup -a" + +- hosts: compute01 + remote_user: root + tasks: + - name: make nfs dir + file: "dest=/images mode=777 state=directory" + - name: configure sdrvice + shell: "echo 'nfs 2049/tcp' >> /etc/services && echo 'nfs 2049/udp' >> /etc/services" + - name: configure NFS + copy: + src: ../file/exports + dest: /etc/exports + notify: + - restart nfs service + handlers: + - name: restart nfs service + service: name=nfs-kernel-server state=restarted diff --git a/prototypes/openstack-ansible/scripts/osa_deploy.sh b/prototypes/openstack-ansible/scripts/osa_deploy.sh new file mode 100755 index 000000000..95f593194 --- /dev/null +++ b/prototypes/openstack-ansible/scripts/osa_deploy.sh @@ -0,0 +1,82 @@ +#!/bin/bash + +export OSA_PATH=/opt/openstack-ansible +export LOG_PATH=$OSA_PATH/log +export PLAYBOOK_PATH=$OSA_PATH/playbooks +export OSA_BRANCH=${OSA_BRANCH:-"master"} + +JUMPHOST_IP="192.168.122.2" + +sudo /bin/rm -rf $LOG_PATH +sudo /bin/mkdir -p $LOG_PATH +sudo /bin/cp /root/.ssh/id_rsa.pub ../file/authorized_keys +sudo echo -e '\n'>>../file/authorized_keys + +cd ../playbooks/ +# this will prepare the jump host +# git clone the Openstack-Ansible, bootstrap and configure network +sudo ansible-playbook -i inventory jumphost_configuration.yml -vvv + +# this will prepare the target host +# such as configure network and NFS +sudo ansible-playbook -i inventory targethost_configuration.yml + +# using OpenStack-Ansible deploy the OpenStack + +echo "set UP Host !" +sudo /bin/sh -c "ssh root@$JUMPHOST_IP openstack-ansible \ + $PLAYBOOK_PATH/setup-hosts.yml" | \ + tee $LOG_PATH/setup-host.log + +#check the result of openstack-ansible setup-hosts.yml +#if failed, exit with exit code 1 +grep "failed=1" $LOG_PATH/setup-host.log>/dev/null \ + || grep "unreachable=1" $LOG_PATH/setup-host.log>/dev/null +if [ $? -eq 0 ]; then + echo "failed setup host!" + exit 1 +else + echo "setup host successfully!" +fi + +echo "Set UP Infrastructure !" +sudo /bin/sh -c "ssh root@$JUMPHOST_IP openstack-ansible \ + $PLAYBOOK_PATH/setup-infrastructure.yml" | \ + tee $LOG_PATH/setup-infrastructure.log + +grep "failed=1" $LOG_PATH/setup-infrastructure.log>/dev/null \ + || grep "unreachable=1" $LOG_PATH/setup-infrastructure.log>/dev/null +if [ $? -eq 0 ]; then + echo "failed setup infrastructure!" + exit 1 +else + echo "setup infrastructure successfully!" +fi + +sudo /bin/sh -c "ssh root@$JUMPHOST_IP ansible -i $PLAYBOOK_PATH/inventory/ \ + galera_container -m shell \ + -a "mysql -h localhost -e 'show status like \"%wsrep_cluster_%\";'"" \ + | tee $LOG_PATH/galera.log + +grep "FAILED" $LOG_PATH/galera.log>/dev/null +if [ $? -eq 0 ]; then + echo "failed verify the database cluster!" + exit 1 +else + echo "verify the database cluster successfully!" +fi + +echo "Set UP OpenStack !" +sudo /bin/sh -c "ssh root@$JUMPHOST_IP openstack-ansible \ + $PLAYBOOK_PATH/setup-openstack.yml" | \ + tee $LOG_PATH/setup-openstack.log + +grep "failed=1" $LOG_PATH/setup-openstack.log>/dev/null \ + || grep "unreachable=1" $LOG_PATH/setup-openstack.log>/dev/null +if [ $? -eq 0 ]; then + echo "failed setup openstack!" + exit 1 +else + echo "OpenStack successfully deployed!" + exit 0 +fi diff --git a/prototypes/openstack-ansible/template/bifrost/compute.interface.j2 b/prototypes/openstack-ansible/template/bifrost/compute.interface.j2 new file mode 100644 index 000000000..1719f6a08 --- /dev/null +++ b/prototypes/openstack-ansible/template/bifrost/compute.interface.j2 @@ -0,0 +1,86 @@ +# This file describes the network interfaces available on your system +# and how to activate them. For more information, see interfaces(5). + +# The loopback network interface +auto lo +iface lo inet loopback + + +# Physical interface +auto ens3 +iface ens3 inet manual + +# Container/Host management VLAN interface +auto ens3.10 +iface ens3.10 inet manual + vlan-raw-device ens3 + +# OpenStack Networking VXLAN (tunnel/overlay) VLAN interface +auto ens3.30 +iface ens3.30 inet manual + vlan-raw-device ens3 + +# Storage network VLAN interface (optional) +auto ens3.20 +iface ens3.20 inet manual + vlan-raw-device ens3 + +# Container/Host management bridge +auto br-mgmt +iface br-mgmt inet static + bridge_stp off + bridge_waitport 0 + bridge_fd 0 + bridge_ports ens3.10 + address {{host_info[inventory_hostname].MGMT_IP}} + netmask 255.255.252.0 + +# compute1 VXLAN (tunnel/overlay) bridge config +auto br-vxlan +iface br-vxlan inet static + bridge_stp off + bridge_waitport 0 + bridge_fd 0 + bridge_ports ens3.30 + address {{host_info[inventory_hostname].VXLAN_IP}} + netmask 255.255.252.0 + +# OpenStack Networking VLAN bridge +auto br-vlan +iface br-vlan inet static + bridge_stp off + bridge_waitport 0 + bridge_fd 0 + bridge_ports ens3 + address {{host_info[inventory_hostname].VLAN_IP}} + netmask 255.255.255.0 + gateway 192.168.122.1 + offload-sg off + # Create veth pair, don't bomb if already exists + pre-up ip link add br-vlan-veth type veth peer name eth12 || true + # Set both ends UP + pre-up ip link set br-vlan-veth up + pre-up ip link set eth12 up + # Delete veth pair on DOWN + post-down ip link del br-vlan-veth || true + bridge_ports br-vlan-veth + +# Add an additional address to br-vlan +iface br-vlan inet static + # Flat network default gateway + # -- This needs to exist somewhere for network reachability + # -- from the router namespace for floating IP paths. + # -- Putting this here is primarily for tempest to work. + address {{host_info[inventory_hostname].VLAN_IP_SECOND}} + netmask 255.255.252.0 + dns-nameserver 8.8.8.8 8.8.4.4 + +# compute1 Storage bridge +auto br-storage +iface br-storage inet static + bridge_stp off + bridge_waitport 0 + bridge_fd 0 + bridge_ports ens3.20 + address {{host_info[inventory_hostname].STORAGE_IP}} + netmask 255.255.252.0 diff --git a/prototypes/openstack-ansible/template/bifrost/controller.interface.j2 b/prototypes/openstack-ansible/template/bifrost/controller.interface.j2 new file mode 100644 index 000000000..74aeea99d --- /dev/null +++ b/prototypes/openstack-ansible/template/bifrost/controller.interface.j2 @@ -0,0 +1,71 @@ +# This file describes the network interfaces available on your system +# and how to activate them. For more information, see interfaces(5). + +# The loopback network interface +auto lo +iface lo inet loopback + +# Physical interface +auto ens3 +iface ens3 inet manual + +# Container/Host management VLAN interface +auto ens3.10 +iface ens3.10 inet manual + vlan-raw-device ens3 + +# OpenStack Networking VXLAN (tunnel/overlay) VLAN interface +auto ens3.30 +iface ens3.30 inet manual + vlan-raw-device ens3 + +# Storage network VLAN interface (optional) +auto ens3.20 +iface ens3.20 inet manual + vlan-raw-device ens3 + +# Container/Host management bridge +auto br-mgmt +iface br-mgmt inet static + bridge_stp off + bridge_waitport 0 + bridge_fd 0 + bridge_ports ens3.10 + address {{host_info[inventory_hostname].MGMT_IP}} + netmask 255.255.252.0 + +# OpenStack Networking VXLAN (tunnel/overlay) bridge +# +# Only the COMPUTE and NETWORK nodes must have an IP address +# on this bridge. When used by infrastructure nodes, the +# IP addresses are assigned to containers which use this +# bridge. +# +auto br-vxlan +iface br-vxlan inet manual + bridge_stp off + bridge_waitport 0 + bridge_fd 0 + bridge_ports ens3.30 + +# OpenStack Networking VLAN bridge +auto br-vlan +iface br-vlan inet static + bridge_stp off + bridge_waitport 0 + bridge_fd 0 + bridge_ports ens3 + address {{host_info[inventory_hostname].VLAN_IP}} + netmask 255.255.255.0 + gateway 192.168.122.1 + dns-nameserver 8.8.8.8 8.8.4.4 + +# compute1 Storage bridge +auto br-storage +iface br-storage inet static + bridge_stp off + bridge_waitport 0 + bridge_fd 0 + bridge_ports ens3.20 + address {{host_info[inventory_hostname].STORAGE_IP}} + netmask 255.255.252.0 diff --git a/prototypes/openstack-ansible/var/ubuntu.yml b/prototypes/openstack-ansible/var/ubuntu.yml new file mode 100644 index 000000000..71f54ecb5 --- /dev/null +++ b/prototypes/openstack-ansible/var/ubuntu.yml @@ -0,0 +1,6 @@ +--- +OSA_URL: https://git.openstack.org/openstack/openstack-ansible +OSA_PATH: /opt/openstack-ansible +OSA_ETC_PATH: /etc/openstack_deploy +JUMPHOST_IP: 192.168.122.2 +host_info: {'jumphost':{'MGMT_IP': '172.29.236.10','VLAN_IP': '192.168.122.2', 'STORAGE_IP': '172.29.244.10'},'controller00':{'MGMT_IP': '172.29.236.11','VLAN_IP': '192.168.122.3', 'STORAGE_IP': '172.29.244.11'},'controller01':{'MGMT_IP': '172.29.236.12','VLAN_IP': '192.168.122.4', 'STORAGE_IP': '172.29.244.12'},'controller02':{'MGMT_IP': '172.29.236.13','VLAN_IP': '192.168.122.5', 'STORAGE_IP': '172.29.240.13'},'compute00':{'MGMT_IP': '172.29.236.14','VLAN_IP': '192.168.122.6','VLAN_IP_SECOND': '173.29.241.1','VXLAN_IP': '172.29.240.14', 'STORAGE_IP': '172.29.244.14'},'compute01':{'MGMT_IP': '172.29.236.15','VLAN_IP': '192.168.122.7','VLAN_IP_SECOND': '173.29.241.2','VXLAN_IP': '172.29.240.15', 'STORAGE_IP': '172.29.244.15'}} diff --git a/prototypes/puppet-infracloud/modules/opnfv/manifests/server.pp b/prototypes/puppet-infracloud/modules/opnfv/manifests/server.pp index fc9bf7163..d167973c4 100644 --- a/prototypes/puppet-infracloud/modules/opnfv/manifests/server.pp +++ b/prototypes/puppet-infracloud/modules/opnfv/manifests/server.pp @@ -239,13 +239,6 @@ class opnfv::server ( multiple => true, } - # disable selinux in case of RHEL - if ($::osfamily == 'RedHat') { - class { 'selinux': - mode => 'disabled', - } - } - # update hosts create_resources('host', hiera_hash('hosts')) } diff --git a/utils/fetch_os_creds.sh b/utils/fetch_os_creds.sh index f00e022f9..c99afaca8 100755 --- a/utils/fetch_os_creds.sh +++ b/utils/fetch_os_creds.sh @@ -201,6 +201,17 @@ elif [ "$installer_type" == "foreman" ]; then 'source keystonerc_admin;keystone endpoint-list'" \ | grep $admin_ip | sed 's/ /\n/g' | grep ^http | head -1) &> /dev/null +elif [ "$installer_type" == "daisy" ]; then + verify_connectivity $installer_ip + cluster=$(sshpass -p r00tme ssh 2>/dev/null $ssh_options root@${installer_ip} \ + "source ~/daisyrc_admin; daisy cluster-list"|grep active|head -1|awk -F "|" '{print $3}') &> /dev/null + if [ -z $cluster ]; then + echo "No active cluster detected in daisy" + exit 1 + fi + + sshpass -p r00tme scp 2>/dev/null $ssh_options root@${installer_ip}:/etc/kolla/admin-openrc.sh $dest_path &> /dev/null + else error "Installer $installer is not supported by this script" fi diff --git a/utils/lab-reconfiguration/foreman.yaml b/utils/lab-reconfiguration/foreman.yaml index 48ab9ab6e..0a37be2e1 100644 --- a/utils/lab-reconfiguration/foreman.yaml +++ b/utils/lab-reconfiguration/foreman.yaml @@ -1,3 +1,4 @@ +--- # Vnic configuration for foreman deploy network: diff --git a/utils/lab-reconfiguration/fuel.yaml b/utils/lab-reconfiguration/fuel.yaml index eb9028b2c..15e64c452 100644 --- a/utils/lab-reconfiguration/fuel.yaml +++ b/utils/lab-reconfiguration/fuel.yaml @@ -1,3 +1,4 @@ +--- # Vnic configuration for fuel deploy network: diff --git a/utils/push-test-logs.sh b/utils/push-test-logs.sh index 5e428d07b..9099657c8 100644 --- a/utils/push-test-logs.sh +++ b/utils/push-test-logs.sh @@ -25,7 +25,7 @@ node_list=(\ 'intel-pod5' 'intel-pod6' 'intel-pod7' 'intel-pod8' \ 'ericsson-pod1' 'ericsson-pod2' \ 'ericsson-virtual1' 'ericsson-virtual2' 'ericsson-virtual3' \ -'ericsson-virtual4' 'ericsson-virtual5' \ +'ericsson-virtual4' 'ericsson-virtual5' 'ericsson-virtual12' \ 'arm-pod1' 'arm-pod3' \ 'huawei-pod1' 'huawei-pod2' 'huawei-pod3' 'huawei-pod4' 'huawei-pod5' \ 'huawei-pod6' 'huawei-pod7' 'huawei-pod12' \ diff --git a/utils/test/dashboard/dashboard/functest/testcases.yaml b/utils/test/dashboard/dashboard/functest/testcases.yaml index 9c33d2e6b..85cb8b292 100644 --- a/utils/test/dashboard/dashboard/functest/testcases.yaml +++ b/utils/test/dashboard/dashboard/functest/testcases.yaml @@ -1,3 +1,4 @@ +--- functest: - name: tempest_smoke_serial diff --git a/utils/test/dashboard/dashboard/qtip/testcases.yaml b/utils/test/dashboard/dashboard/qtip/testcases.yaml index cd337cd73..dfa9cc2db 100644 --- a/utils/test/dashboard/dashboard/qtip/testcases.yaml +++ b/utils/test/dashboard/dashboard/qtip/testcases.yaml @@ -1,3 +1,4 @@ +--- qtip: - name: compute_test_suite @@ -18,7 +19,7 @@ qtip: fields: - field: details.index - - name:storage_test_suite + name: storage_test_suite format: qpi test_family: storage visualizations: diff --git a/utils/test/reporting/api/handlers/landing.py b/utils/test/reporting/api/handlers/landing.py index 137c05007..ae1fd2037 100644 --- a/utils/test/reporting/api/handlers/landing.py +++ b/utils/test/reporting/api/handlers/landing.py @@ -6,15 +6,172 @@ # which accompanies this distribution, and is available at # http://www.apache.org/licenses/LICENSE-2.0 ############################################################################## +import requests + from tornado.web import RequestHandler from tornado.escape import json_encode +from tornado.escape import json_decode -class FiltersHandler(RequestHandler): - def get(self): - return self.write(json_encode({'status': 'SUCCESS'})) +class BaseHandler(RequestHandler): + def _set_header(self): + self.set_header('Access-Control-Allow-Origin', '*') + self.set_header('Access-Control-Allow-Headers', + 'Content-Type, Content-Length, Authorization, \ + Accept, X-Requested-With , PRIVATE-TOKEN') + self.set_header('Access-Control-Allow-Methods', + 'PUT, POST, GET, DELETE, OPTIONS') -class ScenariosHandler(RequestHandler): +class FiltersHandler(BaseHandler): def get(self): - return self.write(json_encode({'status': 'SUCCESS'})) + self._set_header() + + filters = { + 'filters': { + 'status': ['success', 'warning', 'danger'], + 'projects': ['functest', 'yardstick'], + 'installers': ['apex', 'compass', 'fuel', 'joid'], + 'version': ['colorado', 'master'], + 'loops': ['daily', 'weekly', 'monthly'], + 'time': ['10 days', '30 days'] + } + } + return self.write(json_encode(filters)) + + +class ScenariosHandler(BaseHandler): + def post(self): + self._set_header() + + body = json_decode(self.request.body) + args = self._get_args(body) + + scenarios = self._get_result_data(self._get_scenarios(), args) + + return self.write(json_encode(dict(scenarios=scenarios))) + + def _get_result_data(self, data, args): + data = self._filter_status(data, args) + return {s: self._get_scenario_result(s, data[s], args) for s in data} + + def _filter_status(self, data, args): + return {k: v for k, v in data.items() if v['status'] in args['status']} + + def _get_scenario_result(self, scenario, data, args): + result = { + 'status': data.get('status'), + 'installers': self._get_installers_result(data['installers'], args) + } + return result + + def _get_installers_result(self, data, args): + func = self._get_installer_result + return {k: func(k, data.get(k, {}), args) for k in args['installers']} + + def _get_installer_result(self, installer, data, args): + projects = data.get(args['version'], []) + return [self._get_project_data(projects, p) for p in args['projects']] + + def _get_project_data(self, projects, project): + atom = { + 'project': project, + 'score': None, + 'status': None + } + for p in projects: + if p['project'] == project: + return p + return atom + + def _get_scenarios(self): + url = 'http://testresults.opnfv.org/test/api/v1/scenarios' + resp = requests.get(url).json() + data = self._change_to_utf8(resp).get('scenarios', {}) + return {a.get('name'): self._get_scenario(a.get('installers', []) + ) for a in data} + + def _get_scenario(self, data): + installers = {a.get('installer'): self._get_installer(a.get('versions', + []) + ) for a in data} + scenario = { + 'status': self._get_status(), + 'installers': installers + } + return scenario + + def _get_status(self): + return 'success' + + def _get_installer(self, data): + return {a.get('version'): self._get_version(a) for a in data} + + def _get_version(self, data): + try: + scores = data.get('score', {}).get('projects')[0] + trusts = data.get('trust_indicator', {}).get('projects')[0] + except (TypeError, IndexError): + return [] + else: + scores = {key: [dict(date=a.get('date')[:10], + score=a.get('score') + ) for a in scores[key]] for key in scores} + trusts = {key: [dict(date=a.get('date')[:10], + status=a.get('status') + ) for a in trusts[key]] for key in trusts} + atom = self._get_atom(scores, trusts) + return [dict(project=k, + score=sorted(atom[k], reverse=True)[0].get('score'), + status=sorted(atom[k], reverse=True)[0].get('status') + ) for k in atom if atom[k]] + + def _get_atom(self, scores, trusts): + s = {k: {a['date']: a['score'] for a in scores[k]} for k in scores} + t = {k: {a['date']: a['status'] for a in trusts[k]} for k in trusts} + return {k: [dict(score=s[k][a], status=t[k][a], data=a + ) for a in s[k] if a in t[k]] for k in s} + + def _change_to_utf8(self, obj): + if isinstance(obj, dict): + return {str(k): self._change_to_utf8(v) for k, v in obj.items()} + elif isinstance(obj, list): + return [self._change_to_utf8(ele) for ele in obj] + else: + try: + new = eval(obj) + if isinstance(new, int): + return obj + return self._change_to_utf8(new) + except (NameError, TypeError, SyntaxError): + return str(obj) + + def _get_args(self, body): + status = self._get_status_args(body) + projects = self._get_projects_args(body) + installers = self._get_installers_args(body) + + args = { + 'status': status, + 'projects': projects, + 'installers': installers, + 'version': body.get('version', 'master').lower(), + 'loops': body.get('loops', 'daily').lower(), + 'time': body.get('times', '10 days')[:2].lower() + } + return args + + def _get_status_args(self, body): + status_all = ['success', 'warning', 'danger'] + status = [a.lower() for a in body.get('status', ['all'])] + return status_all if 'all' in status else status + + def _get_projects_args(self, body): + project_all = ['functest', 'yardstick'] + projects = [a.lower() for a in body.get('projects', ['all'])] + return project_all if 'all' in projects else projects + + def _get_installers_args(self, body): + installer_all = ['apex', 'compass', 'fuel', 'joid'] + installers = [a.lower() for a in body.get('installers', ['all'])] + return installer_all if 'all' in installers else installers diff --git a/utils/test/reporting/api/install.sh b/utils/test/reporting/api/install.sh new file mode 100755 index 000000000..55d6b77ec --- /dev/null +++ b/utils/test/reporting/api/install.sh @@ -0,0 +1,3 @@ +apt-get install -y python-pip +pip install tornado +pip install requests diff --git a/utils/test/reporting/docker/reporting.sh b/utils/test/reporting/docker/reporting.sh index 1bef1b811..1de13ae32 100755 --- a/utils/test/reporting/docker/reporting.sh +++ b/utils/test/reporting/docker/reporting.sh @@ -4,7 +4,7 @@ export PYTHONPATH="${PYTHONPATH}:." export CONFIG_REPORTING_YAML=./reporting.yaml declare -a versions=(colorado master) -declare -a projects=(functest yardstick) +declare -a projects=(functest storperf yardstick) project=$1 reporting_type=$2 @@ -30,6 +30,7 @@ cp -Rf js display # $1 | $2 # functest | status, vims, tempest # yardstick | +# storperf | if [ -z "$1" ]; then echo "********************************" @@ -52,6 +53,13 @@ if [ -z "$1" ]; then echo "********************************" python ./yardstick/reporting-status.py echo "Yardstick reporting status...OK" + + echo "********************************" + echo " Storperf reporting " + echo "********************************" + python ./storperf/reporting-status.py + echo "Storperf reporting status...OK" + else if [ -z "$2" ]; then reporting_type="status" @@ -71,10 +79,4 @@ echo "daemon off;" >> /etc/nginx/nginx.conf # supervisor config cp /home/opnfv/utils/test/reporting/docker/supervisor.conf /etc/supervisor/conf.d/ -# build pages -cd pages ln -s /usr/bin/nodejs /usr/bin/node -npm install -npm install -g grunt bower -bower install --allow-root -grunt build diff --git a/utils/test/reporting/docker/supervisor.conf b/utils/test/reporting/docker/supervisor.conf index 0c2207793..1e0eed9c8 100644 --- a/utils/test/reporting/docker/supervisor.conf +++ b/utils/test/reporting/docker/supervisor.conf @@ -14,3 +14,9 @@ autorestart = true user = root command = service nginx restart autorestart = true + +[program:reporting_angular] +user = root +directory = /home/opnfv/utils/test/reporting/pages +command = bash angular.sh +autorestart = true diff --git a/utils/test/reporting/functest/reporting-status.py b/utils/test/reporting/functest/reporting-status.py index 158ee597b..af1d1d8a5 100755 --- a/utils/test/reporting/functest/reporting-status.py +++ b/utils/test/reporting/functest/reporting-status.py @@ -28,9 +28,9 @@ testValid = [] otherTestCases = [] reportingDate = datetime.datetime.now().strftime("%Y-%m-%d %H:%M") -# init just tempest to get the list of scenarios -# as all the scenarios run Tempest -tempest = tc.TestCase("tempest_smoke_serial", "functest", -1) +# init just connection_check to get the list of scenarios +# as all the scenarios run connection_check +healthcheck = tc.TestCase("connection_check", "functest", -1) # Retrieve the Functest configuration to detect which tests are relevant # according to the installer, scenario @@ -61,13 +61,13 @@ logger.info("*******************************************") # Retrieve test cases of Tier 1 (smoke) config_tiers = functest_yaml_config.get("tiers") -# we consider Tier 1 (smoke),2 (features) +# we consider Tier 0 (Healthcheck), Tier 1 (smoke),2 (features) # to validate scenarios -# Tier > 4 are not used to validate scenarios but we display the results anyway +# Tier > 2 are not used to validate scenarios but we display the results anyway # tricky thing for the API as some tests are Functest tests # other tests are declared directly in the feature projects for tier in config_tiers: - if tier['order'] > 0 and tier['order'] < 2: + if tier['order'] >= 0 and tier['order'] < 2: for case in tier['testcases']: if case['name'] not in blacklist: testValid.append(tc.TestCase(case['name'], @@ -92,12 +92,20 @@ for version in versions: # For all the installers for installer in installers: # get scenarios - scenario_results = rp_utils.getScenarios(tempest, installer, version) + scenario_results = rp_utils.getScenarios(healthcheck, + installer, + version) scenario_stats = rp_utils.getScenarioStats(scenario_results) items = {} scenario_result_criteria = {} - scenario_file_name = ("./display/" + version + - "/functest/scenario_history.txt") + scenario_directory = "./display/" + version + "/functest/" + scenario_file_name = scenario_directory + "scenario_history.txt" + + # check that the directory exists, if not create it + # (first run on new version) + if not os.path.exists(scenario_directory): + os.makedirs(scenario_directory) + # initiate scenario file if it does not exist if not os.path.isfile(scenario_file_name): with open(scenario_file_name, "a") as my_file: @@ -120,7 +128,9 @@ for version in versions: if len(s_result) > 0: build_tag = s_result[len(s_result)-1]['build_tag'] logger.debug("Build tag: %s" % build_tag) - s_url = s_url = rp_utils.getJenkinsUrl(build_tag) + s_url = rp_utils.getJenkinsUrl(build_tag) + if s_url is None: + s_url = "http://testresultS.opnfv.org/reporting" logger.info("last jenkins url: %s" % s_url) testCases2BeDisplayed = [] # Check if test case is runnable / installer, scenario diff --git a/utils/test/reporting/functest/testCase.py b/utils/test/reporting/functest/testCase.py index df0874e0b..f77136e11 100644 --- a/utils/test/reporting/functest/testCase.py +++ b/utils/test/reporting/functest/testCase.py @@ -27,6 +27,7 @@ class TestCase(object): 'ocl': 'OCL', 'tempest_smoke_serial': 'Tempest (smoke)', 'tempest_full_parallel': 'Tempest (full)', + 'tempest_defcore': 'Tempest (Defcore)', 'rally_sanity': 'Rally (smoke)', 'bgpvpn': 'bgpvpn', 'rally_full': 'Rally (full)', @@ -43,7 +44,10 @@ class TestCase(object): 'parser': 'Parser', 'connection_check': 'Health (connection)', 'api_check': 'Health (api)', - 'snaps_smoke': 'SNAPS'} + 'snaps_smoke': 'SNAPS', + 'snaps_health_check': 'Health (dhcp)', + 'netready': 'Netready', + 'barometer': 'Barometer'} try: self.displayName = display_name_matrix[self.name] except: @@ -122,6 +126,7 @@ class TestCase(object): 'ocl': 'ocl', 'tempest_smoke_serial': 'tempest_smoke_serial', 'tempest_full_parallel': 'tempest_full_parallel', + 'tempest_defcore': 'tempest_defcore', 'rally_sanity': 'rally_sanity', 'bgpvpn': 'bgpvpn', 'rally_full': 'rally_full', @@ -138,8 +143,10 @@ class TestCase(object): 'parser': 'parser-basics', 'connection_check': 'connection_check', 'api_check': 'api_check', - 'snaps_smoke': 'snaps_smoke' - } + 'snaps_smoke': 'snaps_smoke', + 'snaps_health_check': 'snaps_health_check', + 'netready': 'gluon_vping', + 'barometer': 'barometercollectd'} try: return test_match_matrix[self.name] except: diff --git a/utils/test/reporting/html/danube.html b/utils/test/reporting/html/danube.html index 58d6bc0fe..d63e19d90 100644 --- a/utils/test/reporting/html/danube.html +++ b/utils/test/reporting/html/danube.html @@ -1,113 +1,124 @@ - - - - - Phantom by HTML5 UP - - - - - - - - - -
- - - - - - - - - - -
-
-
-

OPNFV Testing Working group

-
-
-

Follow

- -
- -
-
- -
- - - - - - - - - - + + + + + Phantom by HTML5 UP + + + + + + + + + +
+ + + + + + + + + + +
+
+
+

OPNFV Testing Working group

+
+
+

Follow

+ +
+ +
+
+ +
+ + + + + + + + + + diff --git a/utils/test/reporting/html/functest-danube.html b/utils/test/reporting/html/functest-danube.html new file mode 100644 index 000000000..ac99cb057 --- /dev/null +++ b/utils/test/reporting/html/functest-danube.html @@ -0,0 +1,124 @@ + + + + + Phantom by HTML5 UP + + + + + + + + + +
+ + + + + + + + + + +
+
+
+

OPNFV Testing Working group

+
+
+

Follow

+ +
+ +
+
+ +
+ + + + + + + + + + diff --git a/utils/test/reporting/html/functest-master.html b/utils/test/reporting/html/functest-master.html index 03217a6bd..4b1f76347 100644 --- a/utils/test/reporting/html/functest-master.html +++ b/utils/test/reporting/html/functest-master.html @@ -1,124 +1,124 @@ - - - - - Phantom by HTML5 UP - - - - - - - - - -
- - - - - - - - - - -
-
-
-

OPNFV Testing Working group

-
-
-

Follow

- -
- -
-
- -
- - - - - - - - - - + + + + + Phantom by HTML5 UP + + + + + + + + + +
+ + + + + + + + + + +
+
+
+

OPNFV Testing Working group

+
+
+

Follow

+ +
+ +
+
+ +
+ + + + + + + + + + diff --git a/utils/test/reporting/html/index.html b/utils/test/reporting/html/index.html index b2b8b46f8..c6627ffe5 100644 --- a/utils/test/reporting/html/index.html +++ b/utils/test/reporting/html/index.html @@ -1,113 +1,124 @@ - - - - - OPNFV reporting - - - - - - - - - -
- - - - - - - -
-
-
-

OPNFV Testing group reporting

-
-
- - -
-
-
- - -
-
-
-

OPNFV Testing Working group

-
-
-

Follow

- -
- -
-
- -
- - - - - - - - - - + + + + + OPNFV reporting + + + + + + + + + +
+ + + + + + + + + + +
+
+
+

OPNFV Testing Working group

+
+
+

Follow

+ +
+ +
+
+ +
+ + + + + + + + + + diff --git a/utils/test/reporting/html/master.html b/utils/test/reporting/html/master.html new file mode 100644 index 000000000..438bf2023 --- /dev/null +++ b/utils/test/reporting/html/master.html @@ -0,0 +1,124 @@ + + + + + Phantom by HTML5 UP + + + + + + + + + +
+ + + + + + + + + + +
+
+
+

OPNFV Testing Working group

+
+
+

Follow

+ +
+ +
+
+ +
+ + + + + + + + + + diff --git a/utils/test/reporting/img/euphrates.jpg b/utils/test/reporting/img/euphrates.jpg new file mode 100644 index 000000000..3625b50cb Binary files /dev/null and b/utils/test/reporting/img/euphrates.jpg differ diff --git a/utils/test/reporting/img/storperf.jpg b/utils/test/reporting/img/storperf.jpg new file mode 100644 index 000000000..37492e69e Binary files /dev/null and b/utils/test/reporting/img/storperf.jpg differ diff --git a/utils/test/reporting/pages/angular.sh b/utils/test/reporting/pages/angular.sh new file mode 100755 index 000000000..a7f167516 --- /dev/null +++ b/utils/test/reporting/pages/angular.sh @@ -0,0 +1,10 @@ +: ${SERVER_URL:='http://testresults.opnfv.org/reporting/api'} + +echo "var BASE_URL = 'http://${SERVER_URL}/landing-page'" > app/scripts/app.config.js + +apt-get install -y nodejs +apt-get install -y npm +npm install +npm install -g grunt bower +bower install --allow-root +grunt build diff --git a/utils/test/reporting/pages/app/images/green.png b/utils/test/reporting/pages/app/images/green.png new file mode 100644 index 000000000..57fc59927 Binary files /dev/null and b/utils/test/reporting/pages/app/images/green.png differ diff --git a/utils/test/reporting/pages/app/images/green@2x.png b/utils/test/reporting/pages/app/images/green@2x.png new file mode 100644 index 000000000..3bda5beb6 Binary files /dev/null and b/utils/test/reporting/pages/app/images/green@2x.png differ diff --git a/utils/test/reporting/pages/app/index.html b/utils/test/reporting/pages/app/index.html index 7673a4cc2..1159c2176 100644 --- a/utils/test/reporting/pages/app/index.html +++ b/utils/test/reporting/pages/app/index.html @@ -1,6 +1,7 @@ - + + OPNFV-DASHBOARD EXAMPLE @@ -13,17 +14,22 @@ + + + + + - - - + + - - + + + @@ -31,7 +37,7 @@
- - - - - - - - - - + + + + + + + + + + + + + + + - + + \ No newline at end of file diff --git a/utils/test/reporting/pages/app/scripts/app.config.js b/utils/test/reporting/pages/app/scripts/app.config.js new file mode 100644 index 000000000..e69de29bb diff --git a/utils/test/reporting/pages/app/scripts/app.js b/utils/test/reporting/pages/app/scripts/app.js index 6e99ce3d7..d06019c55 100644 --- a/utils/test/reporting/pages/app/scripts/app.js +++ b/utils/test/reporting/pages/app/scripts/app.js @@ -9,12 +9,13 @@ * Main module of the application. */ angular - .module('opnfvApp', [ - 'ngAnimate', - 'ui.router', - 'oc.lazyLoad', - 'ui.bootstrap', - 'ngResource', - 'selectize' + .module('opnfvApp', [ + 'ngAnimate', + 'ui.router', + 'oc.lazyLoad', + 'ui.bootstrap', + 'ngResource', + 'selectize', + '720kb.tooltips' - ]); + ]); \ No newline at end of file diff --git a/utils/test/reporting/pages/app/scripts/config.js b/utils/test/reporting/pages/app/scripts/config.js index 838460a38..1010169d3 100644 --- a/utils/test/reporting/pages/app/scripts/config.js +++ b/utils/test/reporting/pages/app/scripts/config.js @@ -7,8 +7,13 @@ * Main config file of the application. */ angular - .module('opnfvApp').config(function () { + .module('opnfvApp').config(['$httpProvider', '$qProvider', function($httpProvider, $qProvider) { - } + $httpProvider.defaults.useXDomain = true; + delete $httpProvider.defaults.headers.common['X-Requested-With']; - ) + $qProvider.errorOnUnhandledRejections(false); + + } + + ]); \ No newline at end of file diff --git a/utils/test/reporting/pages/app/scripts/config.router.js b/utils/test/reporting/pages/app/scripts/config.router.js index 641ea6a74..d38ad7507 100644 --- a/utils/test/reporting/pages/app/scripts/config.router.js +++ b/utils/test/reporting/pages/app/scripts/config.router.js @@ -23,7 +23,7 @@ angular.module('opnfvApp') $stateProvider .state('landingpage', { url: "/landingpage", - //controller: 'MainCtrl', + controller: 'MainController', templateUrl: "views/main.html", data: { pageTitle: '首页', specialClass: 'landing-page' }, resolve: { diff --git a/utils/test/reporting/pages/app/scripts/controllers/main.controller.js b/utils/test/reporting/pages/app/scripts/controllers/main.controller.js new file mode 100644 index 000000000..2054dc2dd --- /dev/null +++ b/utils/test/reporting/pages/app/scripts/controllers/main.controller.js @@ -0,0 +1,32 @@ +'use strict'; + +/** + * @ngdoc function + * @name opnfvdashBoardAngularApp.controller:MainPageController + * @description + * # TableController + * Controller of the opnfvdashBoardAngularApp + */ +angular.module('opnfvApp') + .controller('MainController', ['$scope', '$state', '$stateParams', function($scope, $state, $stateParams) { + + init(); + + function init() { + $scope.goTest = goTest; + $scope.goLogin = goLogin; + + } + + function goTest() { + $state.go("select.selectTestCase"); + } + + function goLogin() { + $state.go("login"); + } + + + + + }]); \ No newline at end of file diff --git a/utils/test/reporting/pages/app/scripts/controllers/table.controller.js b/utils/test/reporting/pages/app/scripts/controllers/table.controller.js index 8ca1e474c..0f3a17a03 100644 --- a/utils/test/reporting/pages/app/scripts/controllers/table.controller.js +++ b/utils/test/reporting/pages/app/scripts/controllers/table.controller.js @@ -8,255 +8,131 @@ * Controller of the opnfvdashBoardAngularApp */ angular.module('opnfvApp') - .controller('TableController', ['$scope', '$state', '$stateParams', 'TableFactory', function ($scope, $state, $stateParams, TableFactory) { + .controller('TableController', ['$scope', '$state', '$stateParams', '$http', 'TableFactory', function($scope, $state, $stateParams, $http, TableFactory) { $scope.filterlist = []; $scope.selection = []; - $scope.statusList = ["Success", "Warning", "Danger"]; - $scope.projectList = ["Deployment", "Functest", "Yardstick"]; - $scope.installerList = ["apex", "compass", "fuel", "joid"]; - $scope.versionlist = ["Colorado", "Master"]; - $scope.loopci = ["Daily", "Weekly", "Monthly"]; - $scope.time = ["10 days", "1 Month"]; + $scope.statusList = []; + $scope.projectList = []; + $scope.installerList = []; + $scope.versionlist = []; + $scope.loopci = []; + $scope.time = []; $scope.tableDataAll = {}; $scope.tableInfoAll = {}; + $scope.scenario = {}; + $scope.VersionConfig = { + create: true, + valueField: 'title', + labelField: 'title', + delimiter: '|', + maxItems: 1, + placeholder: 'Version', + onChange: function(value) { + checkElementArrayValue($scope.selection, $scope.VersionOption); + $scope.selection.push(value); + // console.log($scope.selection); + getScenarioData(); + } + } - $scope.scenario = - { - "scenarios": { - "os-nosdn-kvm-noha": { - "status": "Success", - "installers": { - "apex": [ - { - "project": "Deployment", - "score": "13/14", - "status": "SUCCESS", - - - }, - { - "project": "Functest", - "score": "null", - "status": "SUCCESS" - }, - { - "project": "Yardstick", - "score": "13/14", - "status": "SUCCESS" - } - ], - "compass": [ - { - "project": "Deployment", - "score": "13/14", - "status": "SUCCESS" - }, - { - "project": "Functest", - "score": "13/14", - "status": "SUCCESS" - }, - { - "project": "Yardstick", - "score": "13/14", - "status": "SUCCESS" - } - ], - "fuel": [ - { - "project": "Deployment", - "score": "13/14", - "status": "SUCCESS" - }, - { - "project": "Functest", - "score": "13/14", - "status": "SUCCESS" - }, - { - "project": "Yardstick", - "score": "13/14", - "status": "SUCCESS" - } - ], - "joid": [ - { - "project": "Deployment", - "score": "13/14", - "status": "SUCCESS" - }, - { - "project": "Functest", - "score": "13/14", - "status": "SUCCESS" - }, - { - "project": "Yardstick", - "score": "13/14", - "status": "SUCCESS" - } - ] - } - }, - "os-nosdn-ovs-ha": { - "status": "Danger", - "installers": { - "apex": [ - { - "project": "Deployment", - "score": "13/14", - "status": "SUCCESS", - - - }, - { - "project": "Functest", - "score": "13/14", - "status": "SUCCESS" - }, - { - "project": "Yardstick", - "score": "13/14", - "status": "SUCCESS" - } - ], - "compass": [ - { - "project": "Deployment", - "score": "13/14", - "status": "SUCCESS" - }, - { - "project": "Functest", - "score": "13/14", - "status": "SUCCESS" - }, - { - "project": "Yardstick", - "score": "13/14", - "status": "SUCCESS" - } - ], - "fuel": [ - { - "project": "Deployment", - "score": "13/14", - "status": "SUCCESS" - }, - { - "project": "Functest", - "score": "13/14", - "status": "SUCCESS" - }, - { - "project": "Yardstick", - "score": "13/14", - "status": "SUCCESS" - } - ], - "joid": [ - { - "project": "Deployment", - "score": "13/14", - "status": "SUCCESS" - }, - { - "project": "Functest", - "score": "13/14", - "status": "SUCCESS" - }, - { - "project": "Yardstick", - "score": "13/14", - "status": "SUCCESS" - } - ] - } - }, - "os-nosdn-ovs-noha": { - "status": "Warning", - "installers": { - "apex": [ - { - "project": "Deployment", - "score": "13/14", - "status": "SUCCESS", - - - }, - { - "project": "Functest", - "score": "13/14", - "status": "SUCCESS" - }, - { - "project": "Yardstick", - "score": "13/14", - "status": "SUCCESS" - } - ], - "compass": [ - { - "project": "Deployment", - "score": "13/14", - "status": "SUCCESS" - }, - { - "project": "Functest", - "score": "13/14", - "status": "SUCCESS" - }, - { - "project": "Yardstick", - "score": "13/14", - "status": "SUCCESS" - } - ], - "fuel": [ - { - "project": "Deployment", - "score": "13/14", - "status": "SUCCESS" - }, - { - "project": "Functest", - "score": "13/14", - "status": "SUCCESS" - }, - { - "project": "Yardstick", - "score": "13/14", - "status": "SUCCESS" - } - ], - "joid": [ - { - "project": "Deployment", - "score": "13/14", - "status": "SUCCESS" - }, - { - "project": "Functest", - "score": "13/14", - "status": "SUCCESS" - }, - { - "project": "Yardstick", - "score": "13/14", - "status": "SUCCESS" - } - ] - } - } + $scope.LoopConfig = { + create: true, + valueField: 'title', + labelField: 'title', + delimiter: '|', + maxItems: 1, + placeholder: 'Loop', + onChange: function(value) { + checkElementArrayValue($scope.selection, $scope.LoopOption); + $scope.selection.push(value); + // console.log($scope.selection); + getScenarioData(); + + } + } + + $scope.TimeConfig = { + create: true, + valueField: 'title', + labelField: 'title', + delimiter: '|', + maxItems: 1, + placeholder: 'Time', + onChange: function(value) { + checkElementArrayValue($scope.selection, $scope.TimeOption); + $scope.selection.push(value); + // console.log($scope.selection) + getScenarioData(); + + + } + } + + + init(); + + function init() { + $scope.toggleSelection = toggleSelection; + getScenarioData(); + // radioSetting(); + getFilters(); + } + + function getFilters() { + TableFactory.getFilter().get({ + + + }).$promise.then(function(response) { + if (response != null) { + $scope.statusList = response.filters.status; + $scope.projectList = response.filters.projects; + $scope.installerList = response.filters.installers; + $scope.versionlist = response.filters.version; + $scope.loopci = response.filters.loops; + $scope.time = response.filters.time; + + $scope.statusListString = $scope.statusList.toString(); + $scope.projectListString = $scope.projectList.toString(); + $scope.installerListString = $scope.installerList.toString(); + $scope.VersionSelected = $scope.versionlist[1]; + $scope.LoopCiSelected = $scope.loopci[0]; + $scope.TimeSelected = $scope.time[0]; + radioSetting($scope.versionlist, $scope.loopci, $scope.time); + + } else { + alert("网络错误"); } + }) + } + + function getScenarioData() { + + var utl = BASE_URL + '/scenarios'; + var data = { + 'status': ['success', 'danger', 'warning'], + 'projects': ['functest', 'yardstick'], + 'installers': ['apex', 'compass', 'fuel', 'joid'], + 'version': $scope.VersionSelected, + 'loops': $scope.LoopCiSelected, + 'time': $scope.TimeSelected }; + var config = { + headers: { + 'Content-Type': 'application/x-www-form-urlencoded;charset=utf-8;' + } + } + $http.post(utl, data, config).then(function(response) { + if (response.status == 200) { + $scope.scenario = response.data; + constructJson(); + } + }) + } - // var headData = Object.keys($scope.scenario.scenarios.os_nosdn_kvm_noha.installers); - // $scope.headData = headData; - //construct json + //construct json function constructJson() { var colspan; @@ -267,19 +143,22 @@ angular.module('opnfvApp') for (var item in $scope.scenario.scenarios) { - - - - var headData = Object.keys($scope.scenario.scenarios[item].installers); + var headData = Object.keys($scope.scenario.scenarios[item].installers).sort(); var scenarioStatus = $scope.scenario.scenarios[item].status; - + var scenarioStatusDisplay; + if (scenarioStatus == "success") { + scenarioStatusDisplay = "navy"; + } else if (scenarioStatus == "danger") { + scenarioStatusDisplay = "danger"; + } else if (scenarioStatus == "warning") { + scenarioStatusDisplay = "warning"; + } InstallerData = headData; var projectData = []; var datadisplay = []; var projects = []; - for (var j = 0; j < headData.length; j++) { projectData.push($scope.scenario.scenarios[item].installers[headData[j]]); @@ -289,9 +168,30 @@ angular.module('opnfvApp') for (var k = 0; k < projectData[j].length; k++) { projects.push(projectData[j][k].project); var temArray = []; - temArray.push(projectData[j][k].score); - temArray.push(projectData[j][k].project); - temArray.push(headData[j]); + if (projectData[j][k].score == null) { + temArray.push("null"); + temArray.push(projectData[j][k].project); + temArray.push(headData[j]); + } else { + temArray.push(projectData[j][k].score); + temArray.push(projectData[j][k].project); + temArray.push(headData[j]); + } + + + if (projectData[j][k].status == "platinium") { + temArray.push("primary"); + temArray.push("P"); + } else if (projectData[j][k].status == "gold") { + temArray.push("danger"); + temArray.push("G"); + } else if (projectData[j][k].status == "silver") { + temArray.push("warning"); + temArray.push("S"); + } else if (projectData[j][k].status == null) { + temArray.push("null"); + } + datadisplay.push(temArray); } @@ -301,13 +201,21 @@ angular.module('opnfvApp') colspan = projects.length / headData.length; var tabledata = { - scenarioName: item, Installer: InstallerData, projectData: projectData, projects: projects, - datadisplay: datadisplay, colspan: colspan, status: scenarioStatus + scenarioName: item, + Installer: InstallerData, + projectData: projectData, + projects: projects, + datadisplay: datadisplay, + colspan: colspan, + status: scenarioStatus, + statusDisplay: scenarioStatusDisplay }; JSON.stringify(tabledata); $scope.tableDataAll.scenario.push(tabledata); + // console.log(tabledata); + } @@ -315,15 +223,13 @@ angular.module('opnfvApp') var tempHeadData = []; - - for (var i = 0; i < InstallerData.length; i++) { for (var j = 0; j < colspan; j++) { tempHeadData.push(InstallerData[i]); } } - console.log(tempHeadData); + //console.log(tempHeadData); var projectsInfoAll = []; @@ -334,13 +240,14 @@ angular.module('opnfvApp') projectsInfoAll.push(tempA); } - console.log(projectsInfoAll); + //console.log(projectsInfoAll); $scope.tableDataAll["colspan"] = colspan; $scope.tableDataAll["Installer"] = InstallerData; $scope.tableDataAll["Projects"] = projectsInfoAll; - console.log($scope.tableDataAll); + // console.log($scope.tableDataAll); + $scope.colspan = $scope.tableDataAll.colspan; } @@ -353,58 +260,11 @@ angular.module('opnfvApp') return size; } - init(); - function init() { - $scope.toggleSelection = toggleSelection; - - constructJson(); - - } - - // $scope.test=false; + $scope.colspan = $scope.tableDataAll.colspan; + // console.log($scope.colspan); - var statusListString = $scope.statusList.toString(); - var projectListString = $scope.projectList.toString(); - var installerListString = $scope.installerList.toString(); - - $scope.colspan=$scope.tableDataAll.colspan; - //filter function - function filterData() { - - - $scope.selectInstallers = []; - $scope.selectProjects = []; - $scope.selectStatus = []; - for (var i = 0; i < $scope.selection.length; i++) { - if (statusListString.indexOf($scope.selection[i]) > -1) { - $scope.selectStatus.push($scope.selection[i]); - } - if (projectListString.indexOf($scope.selection[i]) > -1) { - $scope.selectProjects.push($scope.selection[i]); - } - if (installerListString.indexOf($scope.selection[i]) > -1) { - $scope.selectInstallers.push($scope.selection[i]); - } - } - - $scope.colspan=$scope.selectProjects.length; - //when some selection is empty, we set it full - if($scope.selectInstallers.length==0){ - $scope.selectInstallers=$scope.installerList; - - } - if($scope.selectProjects.length==0){ - $scope.selectProjects=$scope.projectList; - $scope.colspan=$scope.tableDataAll.colspan; - } - if($scope.selectStatus.length==0){ - $scope.selectStatus=$scope.statusList - } - } - - - //find all same element index + //find all same element index function getSameElementIndex(array, element) { var indices = []; var idx = array.indexOf(element); @@ -424,64 +284,31 @@ angular.module('opnfvApp') } - - $scope.VersionOption = [ - { title: 'Colorado' }, - { title: 'Master' } - ]; - $scope.VersionConfig = { - create: true, - valueField: 'title', - labelField: 'title', - delimiter: '|', - maxItems: 1, - placeholder: 'Version', - onChange: function (value) { - checkElementArrayValue($scope.selection, $scope.VersionOption); - $scope.selection.push(value); - // console.log($scope.selection); - + function radioSetting(array1, array2, array3) { + var tempVersion = []; + var tempLoop = []; + var tempTime = []; + for (var i = 0; i < array1.length; i++) { + var temp = { + title: array1[i] + }; + tempVersion.push(temp); } - - } - - $scope.LoopOption = [ - { title: 'Daily' }, - { title: 'Weekly' }, - { title: 'Monthly' } - ]; - $scope.LoopConfig = { - create: true, - valueField: 'title', - labelField: 'title', - delimiter: '|', - maxItems: 1, - placeholder: 'Loop', - onChange: function (value) { - checkElementArrayValue($scope.selection, $scope.LoopOption); - $scope.selection.push(value); - // console.log($scope.selection); - + for (var i = 0; i < array2.length; i++) { + var temp = { + title: array2[i] + }; + tempLoop.push(temp); } - } - - $scope.TimeOption = [ - { title: '10 days' }, - { title: '1 month' } - ]; - $scope.TimeConfig = { - create: true, - valueField: 'title', - labelField: 'title', - delimiter: '|', - maxItems: 1, - placeholder: 'Time', - onChange: function (value) { - checkElementArrayValue($scope.selection, $scope.TimeOption); - $scope.selection.push(value); - // console.log($scope.selection) - + for (var i = 0; i < array3.length; i++) { + var temp = { + title: array3[i] + }; + tempTime.push(temp); } + $scope.VersionOption = tempVersion; + $scope.LoopOption = tempLoop; + $scope.TimeOption = tempTime; } //remove element in the array @@ -508,13 +335,51 @@ angular.module('opnfvApp') if (idx > -1) { $scope.selection.splice(idx, 1); - } - else { + filterData($scope.selection) + } else { $scope.selection.push(status); + filterData($scope.selection) } - console.log($scope.selection); - filterData(); + // console.log($scope.selection); } - }]); + //filter function + function filterData(selection) { + + $scope.selectInstallers = []; + $scope.selectProjects = []; + $scope.selectStatus = []; + for (var i = 0; i < selection.length; i++) { + if ($scope.statusListString.indexOf(selection[i]) > -1) { + $scope.selectStatus.push(selection[i]); + } + if ($scope.projectListString.indexOf(selection[i]) > -1) { + $scope.selectProjects.push(selection[i]); + } + if ($scope.installerListString.indexOf(selection[i]) > -1) { + $scope.selectInstallers.push(selection[i]); + } + } + + $scope.colspan = $scope.selectProjects.length; + //when some selection is empty, we set it full + if ($scope.selectInstallers.length == 0) { + $scope.selectInstallers = $scope.installerList; + + } + if ($scope.selectProjects.length == 0) { + $scope.selectProjects = $scope.projectList; + $scope.colspan = $scope.tableDataAll.colspan; + } + if ($scope.selectStatus.length == 0) { + $scope.selectStatus = $scope.statusList + } + + // console.log($scope.selectStatus); + // console.log($scope.selectProjects); + + } + + + }]); \ No newline at end of file diff --git a/utils/test/reporting/pages/app/scripts/data.json b/utils/test/reporting/pages/app/scripts/data.json deleted file mode 100644 index a15fdf37e..000000000 --- a/utils/test/reporting/pages/app/scripts/data.json +++ /dev/null @@ -1,76 +0,0 @@ - -{"scenarios": { - "os-nosdn-kvm-noha": { - "status": "Success", - "installers": { - "apex": [ - { - "project": "Deployment", - "score": "13/14", - "status": "SUCCESS" - }, - { - "project": "Functest", - "score": "13/14", - "status": "SUCCESS" - }, - { - "project": "Yardstick", - "socre": "13/14", - "status": "SUCCESS" - } - ], - "compass": [ - { - "project": "Deployment", - "score": "13/14", - "status": "SUCCESS" - }, - { - "project": "Functest", - "score": "13/14", - "status": "SUCCESS" - }, - { - "project": "Yardstick", - "socre": "13/14", - "status": "SUCCESS" - } - ], - "fuel": [ - { - "project": "Deployment", - "score": "13/14", - "status": "SUCCESS" - }, - { - "project": "Functest", - "score": "13/14", - "status": "SUCCESS" - }, - { - "project": "Yardstick", - "socre": "13/14", - "status": "SUCCESS" - } - ], - "joid": [ - { - "project": "Deployment", - "score": "13/14", - "status": "SUCCESS" - }, - { - "project": "Functest", - "score": "13/14", - "status": "SUCCESS" - }, - { - "project": "Yardstick", - "socre": "13/14", - "status": "SUCCESS" - } - ] - } - } -}} diff --git a/utils/test/reporting/pages/app/scripts/factory/table.factory.js b/utils/test/reporting/pages/app/scripts/factory/table.factory.js index 22443221e..a2e2aeff0 100644 --- a/utils/test/reporting/pages/app/scripts/factory/table.factory.js +++ b/utils/test/reporting/pages/app/scripts/factory/table.factory.js @@ -4,17 +4,23 @@ * get data factory */ angular.module('opnfvApp') - .factory('TableFactory', function ($resource, $rootScope) { - // var baseUrl = base_Url; + .factory('TableFactory', function($resource, $rootScope) { return { - getFilter: function () { - return $resource(baseUrl + '/', {}, { - 'post': { - method: 'POST', + getFilter: function() { + return $resource(BASE_URL + '/filters', {}, { + 'get': { + method: 'GET', } }); + }, + getScenario: function() { + return $resource(BASE_URL + '/scenarios', {}, { + 'post': { + method: 'POST', + } + }) } }; - }); + }); \ No newline at end of file diff --git a/utils/test/reporting/pages/app/styles/custome.css b/utils/test/reporting/pages/app/styles/custome.css index dadc68a03..b498cf04d 100644 --- a/utils/test/reporting/pages/app/styles/custome.css +++ b/utils/test/reporting/pages/app/styles/custome.css @@ -1,8 +1,8 @@ .container-tablesize { - margin: auto 5%; + margin: auto 5%; } -.btn-outline { +.btn-outline { border-color: white; } @@ -29,6 +29,68 @@ } .myhr { - border:0.5px dashed #e7eaec; - border-top:1px;margin-bottom: 3px; -} \ No newline at end of file + border: 0.5px dashed #e7eaec; + border-top: 1px; + margin-bottom: 3px; +} + +td.null { + background-color: #e7eaec; + color: #e7eaec; +} + +input[type="radio"][disabled], +input[type="checkbox"][disabled], +input[type="radio"].disabled, +input[type="checkbox"].disabled, +fieldset[disabled] input[type="radio"], +fieldset[disabled] input[type="checkbox"] { + cursor: not-allowed; + display: none; +} + +body, +html { + margin: 0; + padding: 0; + min-height: 100%; +} + + +/*img[usemap] { + border: none; + height: auto; + max-width: 100%; + width: auto; +}*/ + +.popup { + position: absolute; + display: none; + /*background-color: #dd8;*/ + border-radius: 5px 5px 5px 5px; + background-color: #f3f3f4; + opacity: 0.9; +} + + +/* +body { + height: 1200px; +} + +html { + min-height: 100%; +}*/ + + +/*html, +body { + height: 100%; +} + +#page-wrapper { + position: inherit; + margin: 0 0 0 220px; + min-height: 773px; +}*/ \ No newline at end of file diff --git a/utils/test/reporting/pages/app/styles/fonts/glyphicons-halflings-regular.svg b/utils/test/reporting/pages/app/styles/fonts/glyphicons-halflings-regular.svg index 187805af6..94fb5490a 100644 --- a/utils/test/reporting/pages/app/styles/fonts/glyphicons-halflings-regular.svg +++ b/utils/test/reporting/pages/app/styles/fonts/glyphicons-halflings-regular.svg @@ -285,4 +285,4 @@ - + \ No newline at end of file diff --git a/utils/test/reporting/pages/app/views/commons/table.html b/utils/test/reporting/pages/app/views/commons/table.html index ed9300edd..f504bd76b 100644 --- a/utils/test/reporting/pages/app/views/commons/table.html +++ b/utils/test/reporting/pages/app/views/commons/table.html @@ -29,81 +29,85 @@
     -
-
+
-   -
-
+
-
+ -
+
-
- +
+ -
+
-
- +
+ -
+
-
- +
+ +
+
+ + + + + + + + - + + -
Scenario {{key}}
{{project[0]}}
- - - - - + + + + - + - - - - - - + - - - - -
Scenario {{key}}
{{scenario.scenarioName}} {{project[0]}}
{{scenario.scenarioName}} D {{data[0]}}
+ + {{data[4]}} {{data[0]}} -
- Ddanger - Ssuccess - Wwarning + + + +
+ +
+ Ggold + Pplatinium + Ssilver
+
- - + \ No newline at end of file diff --git a/utils/test/reporting/pages/app/views/main.html b/utils/test/reporting/pages/app/views/main.html index 1e3fe9e5a..cca893713 100644 --- a/utils/test/reporting/pages/app/views/main.html +++ b/utils/test/reporting/pages/app/views/main.html @@ -1,186 +1,171 @@ - + + - - - - - + -
-
+
+
-

- OPNFV’s goals are to: -

-
+

+ OPNFV’s goals are to: +

+
-

Develop an integrated and tested open source platform that can be used to build NFV functionality--accelerating - the introduction of new products and services

-

Details »

-
-
+

Develop an integrated and tested open source platform that can be used to build NFV functionality--accelerating the introduction of new products and services

+

Details »

+
+
-

Include participation of leading end users to validate that OPNFV meets the needs of user community

-

Details »

-
-
+

Include participation of leading end users to validate that OPNFV meets the needs of user community

+

Details »

+
+
-

Contribute to and participate in relevant open source projects that will be leveraged in the OPNFV platform; - ensuring consistency, performance and interoperability among open source components

-

Details »

-
-
+

Contribute to and participate in relevant open source projects that will be leveraged in the OPNFV platform; ensuring consistency, performance and interoperability among open source components

+

Details »

+
+
-

Establish an ecosystem for NFV solutions based on open standards and software to meet the needs of end users

-

Details »

-
-
+

Establish an ecosystem for NFV solutions based on open standards and software to meet the needs of end users

+

Details »

+
+
-

Promote OPNFV as the preferred platform and community for open source NFV

-

Details »

-
+

Promote OPNFV as the preferred platform and community for open source NFV

+

Details »

-
+
+
-
-
-
- -

Contact Us

-
-
-
-
-
+
+
+
+ +

Contact Us

+
+
+
+
+
Press, Analyst, or Speaking Inquiries
pr@opnfv.org
-
+
OPNFV Events
events@opnfv.org
-
+
IT Support
opnfv-helpdesk@rt.linuxfoundation.org
-
+
-
-
+
+
To submit and track bugs related to OPNFV
Please visit https://jira.opnfv.org
-
+
Newsletter
Sign up for the OPNFV newsletter
-
+
Membership
Please visit the Join as a Member page
-
-
-
-
- +
+
+
+
+ -
-
-
-
-

© 2016 Open Platform for NFV Project, Inc
A Linux Foundation Collaborative Project. All - Rights Reserved. Open Platform for NFV and OPNFV are trademarks of the Open Platform for NFV Project, Inc. Linux - Foundation is a registered trademark of The Linux Foundation. Linux is a registered trademark of Linus Torvalds. - Please see our terms of use, trademark policy, and privacy policy. -

-
+
+
+
+
+

© 2016 Open Platform for NFV Project, Inc
A Linux Foundation Collaborative Project. All Rights Reserved. Open Platform for NFV and OPNFV are trademarks of the Open Platform for NFV Project, Inc. Linux Foundation + is a registered trademark of The Linux Foundation. Linux is a registered trademark of Linus Torvalds. Please see our terms of use, trademark policy, and privacy policy. +

+
+
-
+ \ No newline at end of file diff --git a/utils/test/reporting/pages/bower.json b/utils/test/reporting/pages/bower.json index dd0996d18..bfc4df3d9 100644 --- a/utils/test/reporting/pages/bower.json +++ b/utils/test/reporting/pages/bower.json @@ -1,32 +1,47 @@ { - "name": "opnfv", - "version": "0.0.0", - "dependencies": { - "angular": "^1.4.0", - "bootstrap": "^3.2.0", - "angular-animate": "^1.4.0", - "jquery-slimscroll": "slimscroll#^1.3.8", - "metisMenu": "~2.0.2", - "chosen": "^1.6.2", - "oclazyload": "^1.0.9", - "angular-bootstrap": "~1.1.2", - "angular-ui-router": "~0.2.15", - "angular-resource": "^1.6.0", - "angular-selectize2": "^3.0.1", - "components-font-awesome": "^4.7.0" - }, - "devDependencies": { - "angular-mocks": "^1.4.0" - }, - "appPath": "app", - "moduleName": "opnfvApp", - "overrides": { - "bootstrap": { - "main": [ - "less/bootstrap.less", - "dist/css/bootstrap.css", - "dist/js/bootstrap.js" - ] + "name": "opnfv", + "version": "0.0.0", + "dependencies": { + "angular": "^1.4.0", + "bootstrap": "^3.2.0", + "angular-animate": "^1.4.0", + "jquery-slimscroll": "slimscroll#^1.3.8", + "metisMenu": "~2.0.2", + "chosen": "^1.6.2", + "oclazyload": "^1.0.9", + "angular-bootstrap": "~1.1.2", + "angular-ui-router": "~0.2.15", + "angular-resource": "^1.6.0", + "angular-selectize2": "^3.0.1", + "components-font-awesome": "^4.7.0", + "angular-tooltips": "^1.1.8", + "jQuery-rwdImageMaps": "*", + "animate.css": "^3.5.2", + "ng-dialog": "^1.0.0", + "inspiniacss": "^0.0.1" + }, + "devDependencies": { + "angular-mocks": "^1.4.0" + }, + "appPath": "app", + "moduleName": "opnfvApp", + "overrides": { + "bootstrap": { + "main": [ + "less/bootstrap.less", + "dist/css/bootstrap.css", + "dist/js/bootstrap.js" + ] + }, + "jQuery-rwdImageMaps": { + "main": [ + "jquery.rwdImageMaps.min.js" + ] + }, + "inspiniacss": { + "main": [ + "style.css" + ] + } } - } -} +} \ No newline at end of file diff --git a/utils/test/reporting/pages/test/karma.conf.js b/utils/test/reporting/pages/test/karma.conf.js index 2b0f41cd3..5c2e79b9f 100644 --- a/utils/test/reporting/pages/test/karma.conf.js +++ b/utils/test/reporting/pages/test/karma.conf.js @@ -36,6 +36,7 @@ module.exports = function(config) { 'bower_components/microplugin/src/microplugin.js', 'bower_components/selectize/dist/js/selectize.js', 'bower_components/angular-selectize2/dist/angular-selectize.js', + 'bower_components/angular-tooltips/dist/angular-tooltips.min.js', 'bower_components/angular-mocks/angular-mocks.js', // endbower 'app/scripts/**/*.js', diff --git a/utils/test/reporting/reporting.yaml b/utils/test/reporting/reporting.yaml index 9db0890b2..8c5ce1383 100644 --- a/utils/test/reporting/reporting.yaml +++ b/utils/test/reporting/reporting.yaml @@ -1,3 +1,4 @@ +--- general: installers: - apex @@ -8,6 +9,7 @@ general: versions: - master + - danube log: log_file: reporting.log @@ -19,12 +21,12 @@ general: directories: # Relative to the path where the repo is cloned: - dir_reporting: utils/tests/reporting/ - dir_log: utils/tests/reporting/log/ - dir_conf: utils/tests/reporting/conf/ - dir_utils: utils/tests/reporting/utils/ - dir_templates: utils/tests/reporting/templates/ - dir_display: utils/tests/reporting/display/ + dir_reporting: utils/tests/reporting/ + dir_log: utils/tests/reporting/log/ + dir_conf: utils/tests/reporting/conf/ + dir_utils: utils/tests/reporting/utils/ + dir_templates: utils/tests/reporting/templates/ + dir_display: utils/tests/reporting/display/ url: testresults.opnfv.org/reporting/ @@ -36,17 +38,32 @@ functest: - ovno - security_scan - rally_sanity + - healthcheck + - odl_netvirt + - aaa + - cloudify_ims + - orchestra_ims + - juju_epc + - orchestra + - promise max_scenario_criteria: 50 test_conf: https://git.opnfv.org/cgit/functest/plain/functest/ci/testcases.yaml log_level: ERROR jenkins_url: https://build.opnfv.org/ci/view/functest/job/ - exclude_noha: False - exclude_virtual: True + exclude_noha: "False" + exclude_virtual: "False" yardstick: test_conf: https://git.opnfv.org/cgit/yardstick/plain/tests/ci/report_config.yaml log_level: ERROR +storperf: + test_list: + - snia_steady_state + log_level: ERROR + qtip: bottleneck: + +vsperf: diff --git a/utils/test/reporting/storperf/reporting-status.py b/utils/test/reporting/storperf/reporting-status.py new file mode 100644 index 000000000..888e339f8 --- /dev/null +++ b/utils/test/reporting/storperf/reporting-status.py @@ -0,0 +1,145 @@ +#!/usr/bin/python +# +# This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +import datetime +import jinja2 +import os + +# manage conf +import utils.reporting_utils as rp_utils + +import utils.scenarioResult as sr + +installers = rp_utils.get_config('general.installers') +versions = rp_utils.get_config('general.versions') +PERIOD = rp_utils.get_config('general.period') + +# Logger +logger = rp_utils.getLogger("Storperf-Status") +reportingDate = datetime.datetime.now().strftime("%Y-%m-%d %H:%M") + +logger.info("*******************************************") +logger.info("* Generating reporting scenario status *") +logger.info("* Data retention = %s days *" % PERIOD) +logger.info("* *") +logger.info("*******************************************") + +# retrieve the list of storperf tests +storperf_tests = rp_utils.get_config('storperf.test_list') +logger.info("Storperf tests: %s" % storperf_tests) + +# For all the versions +for version in versions: + # For all the installers + for installer in installers: + # get scenarios results data + # for the moment we consider only 1 case snia_steady_state + scenario_results = rp_utils.getScenarios("snia_steady_state", + installer, + version) + # logger.info("scenario_results: %s" % scenario_results) + + scenario_stats = rp_utils.getScenarioStats(scenario_results) + logger.info("scenario_stats: %s" % scenario_stats) + items = {} + scenario_result_criteria = {} + + # From each scenarios get results list + for s, s_result in scenario_results.items(): + logger.info("---------------------------------") + logger.info("installer %s, version %s, scenario %s", installer, + version, s) + ten_criteria = len(s_result) + + ten_score = 0 + for v in s_result: + if "PASS" in v['criteria']: + ten_score += 1 + + logger.info("ten_score: %s / %s" % (ten_score, ten_criteria)) + + four_score = 0 + try: + LASTEST_TESTS = rp_utils.get_config( + 'general.nb_iteration_tests_success_criteria') + s_result.sort(key=lambda x: x['start_date']) + four_result = s_result[-LASTEST_TESTS:] + logger.debug("four_result: {}".format(four_result)) + logger.debug("LASTEST_TESTS: {}".format(LASTEST_TESTS)) + # logger.debug("four_result: {}".format(four_result)) + four_criteria = len(four_result) + for v in four_result: + if "PASS" in v['criteria']: + four_score += 1 + logger.info("4 Score: %s / %s " % (four_score, + four_criteria)) + except: + logger.error("Impossible to retrieve the four_score") + + try: + s_status = (four_score * 100) / four_criteria + except: + s_status = 0 + logger.info("Score percent = %s" % str(s_status)) + s_four_score = str(four_score) + '/' + str(four_criteria) + s_ten_score = str(ten_score) + '/' + str(ten_criteria) + s_score_percent = str(s_status) + + logger.debug(" s_status: {}".format(s_status)) + if s_status == 100: + logger.info(">>>>> scenario OK, save the information") + else: + logger.info(">>>> scenario not OK, last 4 iterations = %s, \ + last 10 days = %s" % (s_four_score, s_ten_score)) + + s_url = "" + if len(s_result) > 0: + build_tag = s_result[len(s_result)-1]['build_tag'] + logger.debug("Build tag: %s" % build_tag) + s_url = s_url = rp_utils.getJenkinsUrl(build_tag) + logger.info("last jenkins url: %s" % s_url) + + # Save daily results in a file + path_validation_file = ("./display/" + version + + "/storperf/scenario_history.txt") + + if not os.path.exists(path_validation_file): + with open(path_validation_file, 'w') as f: + info = 'date,scenario,installer,details,score\n' + f.write(info) + + with open(path_validation_file, "a") as f: + info = (reportingDate + "," + s + "," + installer + + "," + s_ten_score + "," + + str(s_score_percent) + "\n") + f.write(info) + + scenario_result_criteria[s] = sr.ScenarioResult(s_status, + s_four_score, + s_ten_score, + s_score_percent, + s_url) + + logger.info("--------------------------") + + templateLoader = jinja2.FileSystemLoader(".") + templateEnv = jinja2.Environment(loader=templateLoader, + autoescape=True) + + TEMPLATE_FILE = "./storperf/template/index-status-tmpl.html" + template = templateEnv.get_template(TEMPLATE_FILE) + + outputText = template.render(scenario_results=scenario_result_criteria, + installer=installer, + period=PERIOD, + version=version, + date=reportingDate) + + with open("./display/" + version + + "/storperf/status-" + installer + ".html", "wb") as fh: + fh.write(outputText) diff --git a/utils/test/reporting/storperf/template/index-status-tmpl.html b/utils/test/reporting/storperf/template/index-status-tmpl.html new file mode 100644 index 000000000..e0fcc6828 --- /dev/null +++ b/utils/test/reporting/storperf/template/index-status-tmpl.html @@ -0,0 +1,111 @@ + + + + + + + + + + + + + + + +
+
+

Storperf status page ({{version}}, {{date}})

+ +
+
+
+
+ + +
+

List of last scenarios ({{version}}) run over the last {{period}} days

+ + + + + + + + + {% for scenario,result in scenario_results.iteritems() -%} + + + + + + + + {%- endfor %} +
ScenarioStatusTrendLast 4 IterationsLast 10 Days
{{scenario}}
{{scenario_results[scenario].getFourDaysScore()}}{{scenario_results[scenario].getTenDaysScore()}}
+
+ + +
+
+
diff --git a/utils/test/reporting/utils/reporting_utils.py b/utils/test/reporting/utils/reporting_utils.py index fc5d188af..aab7a3f4f 100644 --- a/utils/test/reporting/utils/reporting_utils.py +++ b/utils/test/reporting/utils/reporting_utils.py @@ -101,7 +101,15 @@ def getApiResults(case, installer, scenario, version): def getScenarios(case, installer, version): - case = case.getName() + try: + case = case.getName() + except: + # if case is not an object test case, try the string + if type(case) == str: + case = case + else: + raise ValueError("Case cannot be evaluated") + period = get_config('general.period') url_base = get_config('testapi.url') @@ -269,11 +277,15 @@ def getJenkinsUrl(build_tag): url_base = get_config('functest.jenkins_url') try: build_id = [int(s) for s in build_tag.split("-") if s.isdigit()] - url_id = build_tag[8:-(len(build_id) + 3)] + "/" + str(build_id[0]) + url_id = (build_tag[8:-(len(str(build_id[0])) + 1)] + + "/" + str(build_id[0])) jenkins_url = url_base + url_id + "/console" except: print('Impossible to get jenkins url:') + if "jenkins-" not in build_tag: + jenkins_url = None + return jenkins_url diff --git a/utils/test/reporting/yardstick/scenarioResult.py b/utils/test/reporting/utils/scenarioResult.py similarity index 84% rename from utils/test/reporting/yardstick/scenarioResult.py rename to utils/test/reporting/utils/scenarioResult.py index 1f7eb2b24..6029d7f42 100644 --- a/utils/test/reporting/yardstick/scenarioResult.py +++ b/utils/test/reporting/utils/scenarioResult.py @@ -10,11 +10,12 @@ class ScenarioResult(object): def __init__(self, status, four_days_score='', ten_days_score='', - score_percent=0.0): + score_percent=0.0, last_url=''): self.status = status self.four_days_score = four_days_score self.ten_days_score = ten_days_score self.score_percent = score_percent + self.last_url = last_url def getStatus(self): return self.status @@ -27,3 +28,6 @@ class ScenarioResult(object): def getScorePercent(self): return self.score_percent + + def getLastUrl(self): + return self.last_url diff --git a/utils/test/reporting/yardstick/reporting-status.py b/utils/test/reporting/yardstick/reporting-status.py index a0f0b0184..12f42ca31 100644 --- a/utils/test/reporting/yardstick/reporting-status.py +++ b/utils/test/reporting/yardstick/reporting-status.py @@ -10,7 +10,7 @@ import datetime import jinja2 import os -import scenarioResult as sr +import utils.scenarioResult as sr from scenarios import config as cf # manage conf diff --git a/utils/test/testapi/.coveragerc b/utils/test/testapi/.coveragerc new file mode 100644 index 000000000..23fb97fba --- /dev/null +++ b/utils/test/testapi/.coveragerc @@ -0,0 +1,27 @@ +# .coveragerc to control coverage.py + +[run] +branch = True +source = + opnfv_testapi + +[report] +# Regexes for lines to exclude from consideration +exclude_lines = + # Have to re-enable the standard pragma + pragma: no cover + + # Don't complain about missing debug-only code: + def __repr__ + if self\.debug + + # Don't complain if tests don't hit defensive assertion code: + raise AssertionError + raise NotImplementedError + + # Don't complain if non-runnable code isn't run: + if 0: + if __name__ == .__main__.: + +ignore_errors = True + diff --git a/utils/test/testapi/docker/Dockerfile b/utils/test/testapi/docker/Dockerfile index 86513e05b..e031e194c 100644 --- a/utils/test/testapi/docker/Dockerfile +++ b/utils/test/testapi/docker/Dockerfile @@ -8,13 +8,12 @@ # $ docker build -t opnfv/testapi:tag . # # Execution: -# $ docker run -dti -p 8000:8000 \ -# -e "swagger_url=http://10.63.243.17:8000" \ +# $ docker run -dti -p 8001:8000 \ +# -e "swagger_url=http://10.63.243.17:8001" \ # -e "mongodb_url=mongodb://10.63.243.17:27017/" \ -# -e "api_port=8000" # opnfv/testapi:tag # -# NOTE: providing swagger_url, api_port, mongodb_url is optional. +# NOTE: providing swagger_url, mongodb_url is optional. # If not provided, it will use the default one # configured in config.ini # diff --git a/utils/test/testapi/docker/prepare-env.sh b/utils/test/testapi/docker/prepare-env.sh index 99433cc8c..9f07efbd1 100755 --- a/utils/test/testapi/docker/prepare-env.sh +++ b/utils/test/testapi/docker/prepare-env.sh @@ -9,8 +9,3 @@ fi if [ "$swagger_url" != "" ]; then sudo crudini --set --existing $FILE swagger base_url $swagger_url fi - -if [ "$api_port" != "" ];then - sudo crudini --set --existing $FILE api port $api_port -fi - diff --git a/utils/test/testapi/etc/config.ini b/utils/test/testapi/etc/config.ini index 0edb73a3f..77cc6c6ee 100644 --- a/utils/test/testapi/etc/config.ini +++ b/utils/test/testapi/etc/config.ini @@ -11,6 +11,7 @@ dbname = test_results_collection port = 8000 # With debug_on set to true, error traces will be shown in HTTP responses debug = True +authenticate = False [swagger] base_url = http://localhost:8000 diff --git a/utils/test/testapi/opnfv_testapi/cmd/server.py b/utils/test/testapi/opnfv_testapi/cmd/server.py index c3d734607..fa2b72250 100644 --- a/utils/test/testapi/opnfv_testapi/cmd/server.py +++ b/utils/test/testapi/opnfv_testapi/cmd/server.py @@ -30,37 +30,43 @@ TODOs : """ import argparse +import sys -import tornado.ioloop import motor +import tornado.ioloop -from opnfv_testapi.common.config import APIConfig -from opnfv_testapi.tornado_swagger import swagger +from opnfv_testapi.common import config from opnfv_testapi.router import url_mappings +from opnfv_testapi.tornado_swagger import swagger + +CONF = None + -# optionally get config file from command line -parser = argparse.ArgumentParser() -parser.add_argument("-c", "--config-file", dest='config_file', - help="Config file location") -args = parser.parse_args() -CONF = APIConfig().parse(args.config_file) +def parse_config(argv=[]): + global CONF + parser = argparse.ArgumentParser() + parser.add_argument("-c", "--config-file", dest='config_file', + help="Config file location") + args = parser.parse_args(argv) + CONF = config.APIConfig().parse(args.config_file) -# connecting to MongoDB server, and choosing database -client = motor.MotorClient(CONF.mongo_url) -db = client[CONF.mongo_dbname] -swagger.docs(base_url=CONF.swagger_base_url) +def get_db(): + return motor.MotorClient(CONF.mongo_url)[CONF.mongo_dbname] def make_app(): + swagger.docs(base_url=CONF.swagger_base_url) return swagger.Application( url_mappings.mappings, - db=db, + db=get_db(), debug=CONF.api_debug_on, + auth=CONF.api_authenticate_on ) def main(): + parse_config(sys.argv[1:]) application = make_app() application.listen(CONF.api_port) tornado.ioloop.IOLoop.current().start() diff --git a/utils/test/testapi/opnfv_testapi/common/config.py b/utils/test/testapi/opnfv_testapi/common/config.py index ecab88ae3..105d4fabf 100644 --- a/utils/test/testapi/opnfv_testapi/common/config.py +++ b/utils/test/testapi/opnfv_testapi/common/config.py @@ -7,9 +7,8 @@ # http://www.apache.org/licenses/LICENSE-2.0 # feng.xiaowei@zte.com.cn remove prepare_put_request 5-30-2016 ############################################################################## - - -from ConfigParser import SafeConfigParser, NoOptionError +import ConfigParser +import os class ParseError(Exception): @@ -24,7 +23,7 @@ class ParseError(Exception): return 'error parsing config file : %s' % self.msg -class APIConfig: +class APIConfig(object): """ The purpose of this class is to load values correctly from the config file. Each key is declared as an attribute in __init__() and linked in parse() @@ -36,20 +35,21 @@ class APIConfig: self.mongo_dbname = None self.api_port = None self.api_debug_on = None + self.api_authenticate_on = None self._parser = None self.swagger_base_url = None def _get_parameter(self, section, param): try: return self._parser.get(section, param) - except NoOptionError: - raise ParseError("[%s.%s] parameter not found" % (section, param)) + except ConfigParser.NoOptionError: + raise ParseError("No parameter: [%s.%s]" % (section, param)) def _get_int_parameter(self, section, param): try: return int(self._get_parameter(section, param)) except ValueError: - raise ParseError("[%s.%s] not an int" % (section, param)) + raise ParseError("Not int: [%s.%s]" % (section, param)) def _get_bool_parameter(self, section, param): result = self._get_parameter(section, param) @@ -59,7 +59,7 @@ class APIConfig: return False raise ParseError( - "[%s.%s : %s] not a boolean" % (section, param, result)) + "Not boolean: [%s.%s : %s]" % (section, param, result)) @staticmethod def parse(config_location=None): @@ -68,28 +68,21 @@ class APIConfig: if config_location is None: config_location = obj._default_config_location - obj._parser = SafeConfigParser() - obj._parser.read(config_location) - if not obj._parser: + if not os.path.exists(config_location): raise ParseError("%s not found" % config_location) + obj._parser = ConfigParser.SafeConfigParser() + obj._parser.read(config_location) + # Linking attributes to keys from file with their sections obj.mongo_url = obj._get_parameter("mongo", "url") obj.mongo_dbname = obj._get_parameter("mongo", "dbname") obj.api_port = obj._get_int_parameter("api", "port") obj.api_debug_on = obj._get_bool_parameter("api", "debug") + obj.api_authenticate_on = obj._get_bool_parameter("api", + "authenticate") + obj.swagger_base_url = obj._get_parameter("swagger", "base_url") return obj - - def __str__(self): - return "mongo_url = %s \n" \ - "mongo_dbname = %s \n" \ - "api_port = %s \n" \ - "api_debug_on = %s \n" \ - "swagger_base_url = %s \n" % (self.mongo_url, - self.mongo_dbname, - self.api_port, - self.api_debug_on, - self.swagger_base_url) diff --git a/utils/test/testapi/opnfv_testapi/common/constants.py b/utils/test/testapi/opnfv_testapi/common/constants.py index 4d39a142d..71bd95216 100644 --- a/utils/test/testapi/opnfv_testapi/common/constants.py +++ b/utils/test/testapi/opnfv_testapi/common/constants.py @@ -10,6 +10,7 @@ DEFAULT_REPRESENTATION = "application/json" HTTP_BAD_REQUEST = 400 +HTTP_UNAUTHORIZED = 401 HTTP_FORBIDDEN = 403 HTTP_NOT_FOUND = 404 HTTP_OK = 200 diff --git a/utils/test/testapi/opnfv_testapi/resources/handlers.py b/utils/test/testapi/opnfv_testapi/resources/handlers.py index a2628e249..8255b526a 100644 --- a/utils/test/testapi/opnfv_testapi/resources/handlers.py +++ b/utils/test/testapi/opnfv_testapi/resources/handlers.py @@ -20,19 +20,19 @@ # feng.xiaowei@zte.com.cn remove DashboardHandler 5-30-2016 ############################################################################## -import json from datetime import datetime +import functools +import json from tornado import gen -from tornado.web import RequestHandler, asynchronous, HTTPError +from tornado import web -from models import CreateResponse -from opnfv_testapi.common.constants import DEFAULT_REPRESENTATION, \ - HTTP_BAD_REQUEST, HTTP_NOT_FOUND, HTTP_FORBIDDEN +import models +from opnfv_testapi.common import constants from opnfv_testapi.tornado_swagger import swagger -class GenericApiHandler(RequestHandler): +class GenericApiHandler(web.RequestHandler): def __init__(self, application, request, **kwargs): super(GenericApiHandler, self).__init__(application, request, **kwargs) self.db = self.settings["db"] @@ -44,49 +44,71 @@ class GenericApiHandler(RequestHandler): self.db_testcases = 'testcases' self.db_results = 'results' self.db_scenarios = 'scenarios' + self.auth = self.settings["auth"] def prepare(self): if self.request.method != "GET" and self.request.method != "DELETE": if self.request.headers.get("Content-Type") is not None: if self.request.headers["Content-Type"].startswith( - DEFAULT_REPRESENTATION): + constants.DEFAULT_REPRESENTATION): try: self.json_args = json.loads(self.request.body) except (ValueError, KeyError, TypeError) as error: - raise HTTPError(HTTP_BAD_REQUEST, - "Bad Json format [{}]". - format(error)) + raise web.HTTPError(constants.HTTP_BAD_REQUEST, + "Bad Json format [{}]". + format(error)) def finish_request(self, json_object=None): if json_object: self.write(json.dumps(json_object)) - self.set_header("Content-Type", DEFAULT_REPRESENTATION) + self.set_header("Content-Type", constants.DEFAULT_REPRESENTATION) self.finish() def _create_response(self, resource): href = self.request.full_url() + '/' + str(resource) - return CreateResponse(href=href).format() + return models.CreateResponse(href=href).format() def format_data(self, data): cls_data = self.table_cls.from_dict(data) return cls_data.format_http() - @asynchronous + def authenticate(method): + @web.asynchronous + @gen.coroutine + @functools.wraps(method) + def wrapper(self, *args, **kwargs): + if self.auth: + try: + token = self.request.headers['X-Auth-Token'] + except KeyError: + raise web.HTTPError(constants.HTTP_UNAUTHORIZED, + "No Authentication Header.") + query = {'access_token': token} + check = yield self._eval_db_find_one(query, 'tokens') + if not check: + raise web.HTTPError(constants.HTTP_FORBIDDEN, + "Invalid Token.") + ret = yield gen.coroutine(method)(self, *args, **kwargs) + raise gen.Return(ret) + return wrapper + + @web.asynchronous @gen.coroutine + @authenticate def _create(self, miss_checks, db_checks, **kwargs): """ :param miss_checks: [miss1, miss2] :param db_checks: [(table, exist, query, error)] """ if self.json_args is None: - raise HTTPError(HTTP_BAD_REQUEST, "no body") + raise web.HTTPError(constants.HTTP_BAD_REQUEST, "no body") data = self.table_cls.from_dict(self.json_args) for miss in miss_checks: miss_data = data.__getattribute__(miss) if miss_data is None or miss_data == '': - raise HTTPError(HTTP_BAD_REQUEST, - '{} missing'.format(miss)) + raise web.HTTPError(constants.HTTP_BAD_REQUEST, + '{} missing'.format(miss)) for k, v in kwargs.iteritems(): data.__setattr__(k, v) @@ -95,7 +117,7 @@ class GenericApiHandler(RequestHandler): check = yield self._eval_db_find_one(query(data), table) if (exist and not check) or (not exist and check): code, message = error(data) - raise HTTPError(code, message) + raise web.HTTPError(code, message) if self.table != 'results': data.creation_date = datetime.now() @@ -107,7 +129,7 @@ class GenericApiHandler(RequestHandler): resource = _id self.finish_request(self._create_response(resource)) - @asynchronous + @web.asynchronous @gen.coroutine def _list(self, query=None, res_op=None, *args, **kwargs): if query is None: @@ -126,40 +148,42 @@ class GenericApiHandler(RequestHandler): res = res_op(data, *args) self.finish_request(res) - @asynchronous + @web.asynchronous @gen.coroutine def _get_one(self, query): data = yield self._eval_db_find_one(query) if data is None: - raise HTTPError(HTTP_NOT_FOUND, - "[{}] not exist in table [{}]" - .format(query, self.table)) + raise web.HTTPError(constants.HTTP_NOT_FOUND, + "[{}] not exist in table [{}]" + .format(query, self.table)) self.finish_request(self.format_data(data)) - @asynchronous + @web.asynchronous @gen.coroutine + @authenticate def _delete(self, query): data = yield self._eval_db_find_one(query) if data is None: - raise HTTPError(HTTP_NOT_FOUND, - "[{}] not exit in table [{}]" - .format(query, self.table)) + raise web.HTTPError(constants.HTTP_NOT_FOUND, + "[{}] not exit in table [{}]" + .format(query, self.table)) yield self._eval_db(self.table, 'remove', query) self.finish_request() - @asynchronous + @web.asynchronous @gen.coroutine + @authenticate def _update(self, query, db_keys): if self.json_args is None: - raise HTTPError(HTTP_BAD_REQUEST, "No payload") + raise web.HTTPError(constants.HTTP_BAD_REQUEST, "No payload") # check old data exist from_data = yield self._eval_db_find_one(query) if from_data is None: - raise HTTPError(HTTP_NOT_FOUND, - "{} could not be found in table [{}]" - .format(query, self.table)) + raise web.HTTPError(constants.HTTP_NOT_FOUND, + "{} could not be found in table [{}]" + .format(query, self.table)) data = self.table_cls.from_dict(from_data) # check new data exist @@ -167,9 +191,9 @@ class GenericApiHandler(RequestHandler): if not equal: to_data = yield self._eval_db_find_one(new_query) if to_data is not None: - raise HTTPError(HTTP_FORBIDDEN, - "{} already exists in table [{}]" - .format(new_query, self.table)) + raise web.HTTPError(constants.HTTP_FORBIDDEN, + "{} already exists in table [{}]" + .format(new_query, self.table)) # we merge the whole document """ edit_request = self._update_requests(data) @@ -186,7 +210,7 @@ class GenericApiHandler(RequestHandler): request = self._update_request(request, k, v, data.__getattribute__(k)) if not request: - raise HTTPError(HTTP_FORBIDDEN, "Nothing to update") + raise web.HTTPError(constants.HTTP_FORBIDDEN, "Nothing to update") edit_request = data.format() edit_request.update(request) diff --git a/utils/test/testapi/opnfv_testapi/resources/models.py b/utils/test/testapi/opnfv_testapi/resources/models.py index f518c97a0..0ea482fd2 100644 --- a/utils/test/testapi/opnfv_testapi/resources/models.py +++ b/utils/test/testapi/opnfv_testapi/resources/models.py @@ -1,98 +1,116 @@ -############################################################################## -# Copyright (c) 2015 Orange -# guyrodrigue.koffi@orange.com / koffirodrigue@gmail.com -# All rights reserved. This program and the accompanying materials -# are made available under the terms of the Apache License, Version 2.0 -# which accompanies this distribution, and is available at -# http://www.apache.org/licenses/LICENSE-2.0 -# feng.xiaowei@zte.com.cn mv Pod to pod_models.py 5-18-2016 -# feng.xiaowei@zte.com.cn add MetaCreateResponse/MetaGetResponse 5-18-2016 -# feng.xiaowei@zte.com.cn mv TestProject to project_models.py 5-19-2016 -# feng.xiaowei@zte.com.cn delete meta class 5-19-2016 -# feng.xiaowei@zte.com.cn add CreateResponse 5-19-2016 -# feng.xiaowei@zte.com.cn mv TestCase to testcase_models.py 5-20-2016 -# feng.xiaowei@zte.com.cn mv TestResut to result_models.py 5-23-2016 -# feng.xiaowei@zte.com.cn add ModelBase 12-20-2016 -############################################################################## -import copy - -from opnfv_testapi.tornado_swagger import swagger - - -class ModelBase(object): - - def _format(self, excludes): - new_obj = copy.deepcopy(self) - dicts = new_obj.__dict__ - for k in dicts.keys(): - if k in excludes: - del dicts[k] - elif dicts[k]: - if hasattr(dicts[k], 'format'): - dicts[k] = dicts[k].format() - elif isinstance(dicts[k], list): - hs = list() - [hs.append(h.format() if hasattr(h, 'format') else str(h)) - for h in dicts[k]] - dicts[k] = hs - elif not isinstance(dicts[k], (str, int, float, dict)): - dicts[k] = str(dicts[k]) - return dicts - - def format(self): - return self._format(['_id']) - - def format_http(self): - return self._format([]) - - @staticmethod - def attr_parser(): - return {} - - @classmethod - def from_dict(cls, a_dict): - if a_dict is None: - return None - - attr_parser = cls.attr_parser() - t = cls() - for k, v in a_dict.iteritems(): - value = v - if isinstance(v, dict) and k in attr_parser: - value = attr_parser[k].from_dict(v) - elif isinstance(v, list) and k in attr_parser: - value = [] - for item in v: - value.append(attr_parser[k].from_dict(item)) - - t.__setattr__(k, value) - - return t - - -@swagger.model() -class CreateResponse(ModelBase): - def __init__(self, href=''): - self.href = href - - -@swagger.model() -class Versions(ModelBase): - """ - @property versions: - @ptype versions: C{list} of L{Version} - """ - - def __init__(self): - self.versions = list() - - @staticmethod - def attr_parser(): - return {'versions': Version} - - -@swagger.model() -class Version(ModelBase): - def __init__(self, version=None, description=None): - self.version = version - self.description = description +############################################################################## +# Copyright (c) 2015 Orange +# guyrodrigue.koffi@orange.com / koffirodrigue@gmail.com +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +# feng.xiaowei@zte.com.cn mv Pod to pod_models.py 5-18-2016 +# feng.xiaowei@zte.com.cn add MetaCreateResponse/MetaGetResponse 5-18-2016 +# feng.xiaowei@zte.com.cn mv TestProject to project_models.py 5-19-2016 +# feng.xiaowei@zte.com.cn delete meta class 5-19-2016 +# feng.xiaowei@zte.com.cn add CreateResponse 5-19-2016 +# feng.xiaowei@zte.com.cn mv TestCase to testcase_models.py 5-20-2016 +# feng.xiaowei@zte.com.cn mv TestResut to result_models.py 5-23-2016 +# feng.xiaowei@zte.com.cn add ModelBase 12-20-2016 +############################################################################## +import copy +import ast + + +from opnfv_testapi.tornado_swagger import swagger + + +class ModelBase(object): + + def format(self): + return self._format(['_id']) + + def format_http(self): + return self._format([]) + + @classmethod + def from_dict(cls, a_dict): + if a_dict is None: + return None + + attr_parser = cls.attr_parser() + t = cls() + for k, v in a_dict.iteritems(): + value = v + if isinstance(v, dict) and k in attr_parser: + value = attr_parser[k].from_dict(v) + elif isinstance(v, list) and k in attr_parser: + value = [] + for item in v: + value.append(attr_parser[k].from_dict(item)) + + t.__setattr__(k, value) + + return t + + @staticmethod + def attr_parser(): + return {} + + def _format(self, excludes): + new_obj = copy.deepcopy(self) + dicts = new_obj.__dict__ + for k in dicts.keys(): + if k in excludes: + del dicts[k] + elif dicts[k]: + dicts[k] = self._obj_format(dicts[k]) + return dicts + + def _obj_format(self, obj): + if self._has_format(obj): + obj = obj.format() + elif isinstance(obj, unicode): + try: + obj = self._obj_format(ast.literal_eval(obj)) + except: + try: + obj = str(obj) + except: + obj = obj + elif isinstance(obj, list): + hs = list() + for h in obj: + hs.append(self._obj_format(h)) + obj = hs + elif not isinstance(obj, (str, int, float, dict)): + obj = str(obj) + return obj + + @staticmethod + def _has_format(obj): + return not isinstance(obj, (str, unicode)) and hasattr(obj, 'format') + + +@swagger.model() +class CreateResponse(ModelBase): + def __init__(self, href=''): + self.href = href + + +@swagger.model() +class Versions(ModelBase): + """ + @property versions: + @ptype versions: C{list} of L{Version} + """ + + def __init__(self): + self.versions = list() + + @staticmethod + def attr_parser(): + return {'versions': Version} + + +@swagger.model() +class Version(ModelBase): + def __init__(self, version=None, description=None): + self.version = version + self.description = description diff --git a/utils/test/testapi/opnfv_testapi/resources/pod_handlers.py b/utils/test/testapi/opnfv_testapi/resources/pod_handlers.py index e1bd9d359..65c27f60a 100644 --- a/utils/test/testapi/opnfv_testapi/resources/pod_handlers.py +++ b/utils/test/testapi/opnfv_testapi/resources/pod_handlers.py @@ -6,17 +6,17 @@ # which accompanies this distribution, and is available at # http://www.apache.org/licenses/LICENSE-2.0 ############################################################################## +import handlers +from opnfv_testapi.common import constants from opnfv_testapi.tornado_swagger import swagger -from handlers import GenericApiHandler -from pod_models import Pod -from opnfv_testapi.common.constants import HTTP_FORBIDDEN +import pod_models -class GenericPodHandler(GenericApiHandler): +class GenericPodHandler(handlers.GenericApiHandler): def __init__(self, application, request, **kwargs): super(GenericPodHandler, self).__init__(application, request, **kwargs) self.table = 'pods' - self.table_cls = Pod + self.table_cls = pod_models.Pod class PodCLHandler(GenericPodHandler): @@ -46,7 +46,7 @@ class PodCLHandler(GenericPodHandler): def error(data): message = '{} already exists as a pod'.format(data.name) - return HTTP_FORBIDDEN, message + return constants.HTTP_FORBIDDEN, message miss_checks = ['name'] db_checks = [(self.table, False, query, error)] diff --git a/utils/test/testapi/opnfv_testapi/resources/project_handlers.py b/utils/test/testapi/opnfv_testapi/resources/project_handlers.py index 94c65b722..f3521961d 100644 --- a/utils/test/testapi/opnfv_testapi/resources/project_handlers.py +++ b/utils/test/testapi/opnfv_testapi/resources/project_handlers.py @@ -6,19 +6,19 @@ # which accompanies this distribution, and is available at # http://www.apache.org/licenses/LICENSE-2.0 ############################################################################## +import handlers +from opnfv_testapi.common import constants from opnfv_testapi.tornado_swagger import swagger -from handlers import GenericApiHandler -from opnfv_testapi.common.constants import HTTP_FORBIDDEN -from project_models import Project +import project_models -class GenericProjectHandler(GenericApiHandler): +class GenericProjectHandler(handlers.GenericApiHandler): def __init__(self, application, request, **kwargs): super(GenericProjectHandler, self).__init__(application, request, **kwargs) self.table = 'projects' - self.table_cls = Project + self.table_cls = project_models.Project class ProjectCLHandler(GenericProjectHandler): @@ -48,7 +48,7 @@ class ProjectCLHandler(GenericProjectHandler): def error(data): message = '{} already exists as a project'.format(data.name) - return HTTP_FORBIDDEN, message + return constants.HTTP_FORBIDDEN, message miss_checks = ['name'] db_checks = [(self.table, False, query, error)] diff --git a/utils/test/testapi/opnfv_testapi/resources/result_handlers.py b/utils/test/testapi/opnfv_testapi/resources/result_handlers.py index 2a1ed56ee..d41ba4820 100644 --- a/utils/test/testapi/opnfv_testapi/resources/result_handlers.py +++ b/utils/test/testapi/opnfv_testapi/resources/result_handlers.py @@ -6,30 +6,32 @@ # which accompanies this distribution, and is available at # http://www.apache.org/licenses/LICENSE-2.0 ############################################################################## -from datetime import datetime, timedelta +from datetime import datetime +from datetime import timedelta -from bson.objectid import ObjectId -from tornado.web import HTTPError +from bson import objectid +from tornado import web -from opnfv_testapi.common.constants import HTTP_BAD_REQUEST, HTTP_NOT_FOUND -from opnfv_testapi.resources.handlers import GenericApiHandler -from opnfv_testapi.resources.result_models import TestResult +from opnfv_testapi.common import constants +from opnfv_testapi.resources import handlers +from opnfv_testapi.resources import result_models from opnfv_testapi.tornado_swagger import swagger -class GenericResultHandler(GenericApiHandler): +class GenericResultHandler(handlers.GenericApiHandler): def __init__(self, application, request, **kwargs): super(GenericResultHandler, self).__init__(application, request, **kwargs) self.table = self.db_results - self.table_cls = TestResult + self.table_cls = result_models.TestResult def get_int(self, key, value): try: value = int(value) except: - raise HTTPError(HTTP_BAD_REQUEST, '{} must be int'.format(key)) + raise web.HTTPError(constants.HTTP_BAD_REQUEST, + '{} must be int'.format(key)) return value def set_query(self): @@ -144,14 +146,14 @@ class ResultsCLHandler(GenericResultHandler): def pod_error(data): message = 'Could not find pod [{}]'.format(data.pod_name) - return HTTP_NOT_FOUND, message + return constants.HTTP_NOT_FOUND, message def project_query(data): return {'name': data.project_name} def project_error(data): message = 'Could not find project [{}]'.format(data.project_name) - return HTTP_NOT_FOUND, message + return constants.HTTP_NOT_FOUND, message def testcase_query(data): return {'project_name': data.project_name, 'name': data.case_name} @@ -159,7 +161,7 @@ class ResultsCLHandler(GenericResultHandler): def testcase_error(data): message = 'Could not find testcase [{}] in project [{}]'\ .format(data.case_name, data.project_name) - return HTTP_NOT_FOUND, message + return constants.HTTP_NOT_FOUND, message miss_checks = ['pod_name', 'project_name', 'case_name'] db_checks = [('pods', True, pod_query, pod_error), @@ -178,7 +180,7 @@ class ResultsGURHandler(GenericResultHandler): @raise 404: test result not exist """ query = dict() - query["_id"] = ObjectId(result_id) + query["_id"] = objectid.ObjectId(result_id) self._get_one(query) @swagger.operation(nickname="updateTestResultById") @@ -193,6 +195,6 @@ class ResultsGURHandler(GenericResultHandler): @raise 404: result not exist @raise 403: nothing to update """ - query = {'_id': ObjectId(result_id)} + query = {'_id': objectid.ObjectId(result_id)} db_keys = [] self._update(query, db_keys) diff --git a/utils/test/testapi/opnfv_testapi/resources/scenario_handlers.py b/utils/test/testapi/opnfv_testapi/resources/scenario_handlers.py index a8c1a94fe..083bf59fc 100644 --- a/utils/test/testapi/opnfv_testapi/resources/scenario_handlers.py +++ b/utils/test/testapi/opnfv_testapi/resources/scenario_handlers.py @@ -1,17 +1,16 @@ -from opnfv_testapi.common.constants import HTTP_FORBIDDEN -from opnfv_testapi.resources.handlers import GenericApiHandler -from opnfv_testapi.resources.scenario_models import Scenario +from opnfv_testapi.common import constants +from opnfv_testapi.resources import handlers import opnfv_testapi.resources.scenario_models as models from opnfv_testapi.tornado_swagger import swagger -class GenericScenarioHandler(GenericApiHandler): +class GenericScenarioHandler(handlers.GenericApiHandler): def __init__(self, application, request, **kwargs): super(GenericScenarioHandler, self).__init__(application, request, **kwargs) self.table = self.db_scenarios - self.table_cls = Scenario + self.table_cls = models.Scenario class ScenariosCLHandler(GenericScenarioHandler): @@ -81,7 +80,7 @@ class ScenariosCLHandler(GenericScenarioHandler): def error(data): message = '{} already exists as a scenario'.format(data.name) - return HTTP_FORBIDDEN, message + return constants.HTTP_FORBIDDEN, message miss_checks = ['name'] db_checks = [(self.table, False, query, error)] @@ -116,6 +115,17 @@ class ScenarioGURHandler(GenericScenarioHandler): db_keys = ['name'] self._update(query, db_keys) + @swagger.operation(nickname="deleteScenarioByName") + def delete(self, name): + """ + @description: delete a scenario by name + @return 200: delete success + @raise 404: scenario not exist: + """ + + query = {'name': name} + self._delete(query) + def _update_query(self, keys, data): query = dict() equal = True diff --git a/utils/test/testapi/opnfv_testapi/resources/scenario_models.py b/utils/test/testapi/opnfv_testapi/resources/scenario_models.py index 73bcbe99e..b84accf4d 100644 --- a/utils/test/testapi/opnfv_testapi/resources/scenario_models.py +++ b/utils/test/testapi/opnfv_testapi/resources/scenario_models.py @@ -49,6 +49,24 @@ class ScenarioProject(models.ModelBase): return {'scores': ScenarioScore, 'trust_indicators': ScenarioTI} + def __eq__(self, other): + return [self.project == other.project and + self._customs_eq(other) and + self._scores_eq(other) and + self._ti_eq(other)] + + def __ne__(self, other): + return not self.__eq__(other) + + def _customs_eq(self, other): + return set(self.customs) == set(other.customs) + + def _scores_eq(self, other): + return set(self.scores) == set(other.scores) + + def _ti_eq(self, other): + return set(self.trust_indicators) == set(other.trust_indicators) + @swagger.model() class ScenarioVersion(models.ModelBase): @@ -64,6 +82,21 @@ class ScenarioVersion(models.ModelBase): def attr_parser(): return {'projects': ScenarioProject} + def __eq__(self, other): + return [self.version == other.version and self._projects_eq(other)] + + def __ne__(self, other): + return not self.__eq__(other) + + def _projects_eq(self, other): + for s_project in self.projects: + for o_project in other.projects: + if s_project.project == o_project.project: + if s_project != o_project: + return False + + return True + @swagger.model() class ScenarioInstaller(models.ModelBase): @@ -79,6 +112,21 @@ class ScenarioInstaller(models.ModelBase): def attr_parser(): return {'versions': ScenarioVersion} + def __eq__(self, other): + return [self.installer == other.installer and self._versions_eq(other)] + + def __ne__(self, other): + return not self.__eq__(other) + + def _versions_eq(self, other): + for s_version in self.versions: + for o_version in other.versions: + if s_version.version == o_version.version: + if s_version != o_version: + return False + + return True + @swagger.model() class ScenarioCreateRequest(models.ModelBase): @@ -126,6 +174,21 @@ class Scenario(models.ModelBase): def attr_parser(): return {'installers': ScenarioInstaller} + def __ne__(self, other): + return not self.__eq__(other) + + def __eq__(self, other): + return [self.name == other.name and self._installers_eq(other)] + + def _installers_eq(self, other): + for s_install in self.installers: + for o_install in other.installers: + if s_install.installer == o_install.installer: + if s_install != o_install: + return False + + return True + @swagger.model() class Scenarios(models.ModelBase): diff --git a/utils/test/testapi/opnfv_testapi/resources/testcase_handlers.py b/utils/test/testapi/opnfv_testapi/resources/testcase_handlers.py index 100a4fd91..3debd6918 100644 --- a/utils/test/testapi/opnfv_testapi/resources/testcase_handlers.py +++ b/utils/test/testapi/opnfv_testapi/resources/testcase_handlers.py @@ -6,19 +6,19 @@ # which accompanies this distribution, and is available at # http://www.apache.org/licenses/LICENSE-2.0 ############################################################################## -from opnfv_testapi.common.constants import HTTP_FORBIDDEN -from opnfv_testapi.resources.handlers import GenericApiHandler -from opnfv_testapi.resources.testcase_models import Testcase +from opnfv_testapi.common import constants +from opnfv_testapi.resources import handlers +from opnfv_testapi.resources import testcase_models from opnfv_testapi.tornado_swagger import swagger -class GenericTestcaseHandler(GenericApiHandler): +class GenericTestcaseHandler(handlers.GenericApiHandler): def __init__(self, application, request, **kwargs): super(GenericTestcaseHandler, self).__init__(application, request, **kwargs) self.table = self.db_testcases - self.table_cls = Testcase + self.table_cls = testcase_models.Testcase class TestcaseCLHandler(GenericTestcaseHandler): @@ -58,12 +58,12 @@ class TestcaseCLHandler(GenericTestcaseHandler): def p_error(data): message = 'Could not find project [{}]'.format(data.project_name) - return HTTP_FORBIDDEN, message + return constants.HTTP_FORBIDDEN, message def tc_error(data): message = '{} already exists as a testcase in project {}'\ .format(data.name, data.project_name) - return HTTP_FORBIDDEN, message + return constants.HTTP_FORBIDDEN, message miss_checks = ['name'] db_checks = [(self.db_projects, True, p_query, p_error), diff --git a/utils/test/testapi/opnfv_testapi/router/url_mappings.py b/utils/test/testapi/opnfv_testapi/router/url_mappings.py index 0ae3c31c3..39cf006af 100644 --- a/utils/test/testapi/opnfv_testapi/router/url_mappings.py +++ b/utils/test/testapi/opnfv_testapi/router/url_mappings.py @@ -6,37 +6,34 @@ # which accompanies this distribution, and is available at # http://www.apache.org/licenses/LICENSE-2.0 ############################################################################## -from opnfv_testapi.resources.handlers import VersionHandler -from opnfv_testapi.resources.testcase_handlers import TestcaseCLHandler, \ - TestcaseGURHandler -from opnfv_testapi.resources.pod_handlers import PodCLHandler, PodGURHandler -from opnfv_testapi.resources.project_handlers import ProjectCLHandler, \ - ProjectGURHandler -from opnfv_testapi.resources.result_handlers import ResultsCLHandler, \ - ResultsGURHandler -from opnfv_testapi.resources.scenario_handlers import ScenariosCLHandler -from opnfv_testapi.resources.scenario_handlers import ScenarioGURHandler +from opnfv_testapi.resources import handlers +from opnfv_testapi.resources import pod_handlers +from opnfv_testapi.resources import project_handlers +from opnfv_testapi.resources import result_handlers +from opnfv_testapi.resources import scenario_handlers +from opnfv_testapi.resources import testcase_handlers mappings = [ # GET /versions => GET API version - (r"/versions", VersionHandler), + (r"/versions", handlers.VersionHandler), # few examples: # GET /api/v1/pods => Get all pods # GET /api/v1/pods/1 => Get details on POD 1 - (r"/api/v1/pods", PodCLHandler), - (r"/api/v1/pods/([^/]+)", PodGURHandler), + (r"/api/v1/pods", pod_handlers.PodCLHandler), + (r"/api/v1/pods/([^/]+)", pod_handlers.PodGURHandler), # few examples: # GET /projects # GET /projects/yardstick - (r"/api/v1/projects", ProjectCLHandler), - (r"/api/v1/projects/([^/]+)", ProjectGURHandler), + (r"/api/v1/projects", project_handlers.ProjectCLHandler), + (r"/api/v1/projects/([^/]+)", project_handlers.ProjectGURHandler), # few examples # GET /projects/qtip/cases => Get cases for qtip - (r"/api/v1/projects/([^/]+)/cases", TestcaseCLHandler), - (r"/api/v1/projects/([^/]+)/cases/([^/]+)", TestcaseGURHandler), + (r"/api/v1/projects/([^/]+)/cases", testcase_handlers.TestcaseCLHandler), + (r"/api/v1/projects/([^/]+)/cases/([^/]+)", + testcase_handlers.TestcaseGURHandler), # new path to avoid a long depth # GET /results?project=functest&case=keystone.catalog&pod=1 @@ -44,10 +41,10 @@ mappings = [ # POST /results => # Push results with mandatory request payload parameters # (project, case, and pod) - (r"/api/v1/results", ResultsCLHandler), - (r"/api/v1/results/([^/]+)", ResultsGURHandler), + (r"/api/v1/results", result_handlers.ResultsCLHandler), + (r"/api/v1/results/([^/]+)", result_handlers.ResultsGURHandler), # scenarios - (r"/api/v1/scenarios", ScenariosCLHandler), - (r"/api/v1/scenarios/([^/]+)", ScenarioGURHandler), + (r"/api/v1/scenarios", scenario_handlers.ScenariosCLHandler), + (r"/api/v1/scenarios/([^/]+)", scenario_handlers.ScenarioGURHandler), ] diff --git a/utils/test/testapi/opnfv_testapi/tests/unit/common/__init__.py b/utils/test/testapi/opnfv_testapi/tests/unit/common/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/utils/test/testapi/opnfv_testapi/tests/unit/common/noparam.ini b/utils/test/testapi/opnfv_testapi/tests/unit/common/noparam.ini new file mode 100644 index 000000000..fda2a09e9 --- /dev/null +++ b/utils/test/testapi/opnfv_testapi/tests/unit/common/noparam.ini @@ -0,0 +1,16 @@ +# to add a new parameter in the config file, +# the CONF object in config.ini must be updated +[mongo] +# URL of the mongo DB +# Mongo auth url => mongodb://user1:pwd1@host1/?authSource=db1 +url = mongodb://127.0.0.1:27017/ + +[api] +# Listening port +port = 8000 +# With debug_on set to true, error traces will be shown in HTTP responses +debug = True +authenticate = False + +[swagger] +base_url = http://localhost:8000 diff --git a/utils/test/testapi/opnfv_testapi/tests/unit/common/normal.ini b/utils/test/testapi/opnfv_testapi/tests/unit/common/normal.ini new file mode 100644 index 000000000..77cc6c6ee --- /dev/null +++ b/utils/test/testapi/opnfv_testapi/tests/unit/common/normal.ini @@ -0,0 +1,17 @@ +# to add a new parameter in the config file, +# the CONF object in config.ini must be updated +[mongo] +# URL of the mongo DB +# Mongo auth url => mongodb://user1:pwd1@host1/?authSource=db1 +url = mongodb://127.0.0.1:27017/ +dbname = test_results_collection + +[api] +# Listening port +port = 8000 +# With debug_on set to true, error traces will be shown in HTTP responses +debug = True +authenticate = False + +[swagger] +base_url = http://localhost:8000 diff --git a/utils/test/testapi/opnfv_testapi/tests/unit/common/nosection.ini b/utils/test/testapi/opnfv_testapi/tests/unit/common/nosection.ini new file mode 100644 index 000000000..9988fc0a4 --- /dev/null +++ b/utils/test/testapi/opnfv_testapi/tests/unit/common/nosection.ini @@ -0,0 +1,11 @@ +# to add a new parameter in the config file, +# the CONF object in config.ini must be updated +[api] +# Listening port +port = 8000 +# With debug_on set to true, error traces will be shown in HTTP responses +debug = True +authenticate = False + +[swagger] +base_url = http://localhost:8000 diff --git a/utils/test/testapi/opnfv_testapi/tests/unit/common/notboolean.ini b/utils/test/testapi/opnfv_testapi/tests/unit/common/notboolean.ini new file mode 100644 index 000000000..b3f327670 --- /dev/null +++ b/utils/test/testapi/opnfv_testapi/tests/unit/common/notboolean.ini @@ -0,0 +1,17 @@ +# to add a new parameter in the config file, +# the CONF object in config.ini must be updated +[mongo] +# URL of the mongo DB +# Mongo auth url => mongodb://user1:pwd1@host1/?authSource=db1 +url = mongodb://127.0.0.1:27017/ +dbname = test_results_collection + +[api] +# Listening port +port = 8000 +# With debug_on set to true, error traces will be shown in HTTP responses +debug = True +authenticate = notboolean + +[swagger] +base_url = http://localhost:8000 diff --git a/utils/test/testapi/opnfv_testapi/tests/unit/common/notint.ini b/utils/test/testapi/opnfv_testapi/tests/unit/common/notint.ini new file mode 100644 index 000000000..d1b752a34 --- /dev/null +++ b/utils/test/testapi/opnfv_testapi/tests/unit/common/notint.ini @@ -0,0 +1,17 @@ +# to add a new parameter in the config file, +# the CONF object in config.ini must be updated +[mongo] +# URL of the mongo DB +# Mongo auth url => mongodb://user1:pwd1@host1/?authSource=db1 +url = mongodb://127.0.0.1:27017/ +dbname = test_results_collection + +[api] +# Listening port +port = notint +# With debug_on set to true, error traces will be shown in HTTP responses +debug = True +authenticate = False + +[swagger] +base_url = http://localhost:8000 diff --git a/utils/test/testapi/opnfv_testapi/tests/unit/common/test_config.py b/utils/test/testapi/opnfv_testapi/tests/unit/common/test_config.py new file mode 100644 index 000000000..aaff6bb91 --- /dev/null +++ b/utils/test/testapi/opnfv_testapi/tests/unit/common/test_config.py @@ -0,0 +1,36 @@ +import ConfigParser +import os + +import pytest + +from opnfv_testapi.common import config + + +@pytest.fixture() +def config_dir(): + return os.path.dirname(__file__) + + +@pytest.mark.parametrize('exception, config_file, excepted', [ + (config.ParseError, None, '/etc/opnfv_testapi/config.ini not found'), + (ConfigParser.NoSectionError, 'nosection.ini', 'No section:'), + (config.ParseError, 'noparam.ini', 'No parameter:'), + (config.ParseError, 'notint.ini', 'Not int:'), + (config.ParseError, 'notboolean.ini', 'Not boolean:')]) +def pytest_config_exceptions(config_dir, exception, config_file, excepted): + file = '{}/{}'.format(config_dir, config_file) if config_file else None + with pytest.raises(exception) as error: + config.APIConfig().parse(file) + assert excepted in str(error.value) + + +def test_config_success(): + config_dir = os.path.join(os.path.dirname(__file__), + '../../../../etc/config.ini') + conf = config.APIConfig().parse(config_dir) + assert conf.mongo_url == 'mongodb://127.0.0.1:27017/' + assert conf.mongo_dbname == 'test_results_collection' + assert conf.api_port == 8000 + assert conf.api_debug_on is True + assert conf.api_authenticate_on is False + assert conf.swagger_base_url == 'http://localhost:8000' diff --git a/utils/test/testapi/opnfv_testapi/tests/unit/fake_pymongo.py b/utils/test/testapi/opnfv_testapi/tests/unit/fake_pymongo.py index 3c4fd01a3..ef74a0857 100644 --- a/utils/test/testapi/opnfv_testapi/tests/unit/fake_pymongo.py +++ b/utils/test/testapi/opnfv_testapi/tests/unit/fake_pymongo.py @@ -242,3 +242,4 @@ projects = MemDb('projects') testcases = MemDb('testcases') results = MemDb('results') scenarios = MemDb('scenarios') +tokens = MemDb('tokens') diff --git a/utils/test/testapi/opnfv_testapi/tests/unit/test_base.py b/utils/test/testapi/opnfv_testapi/tests/unit/test_base.py index fc780e44c..b955f4a5a 100644 --- a/utils/test/testapi/opnfv_testapi/tests/unit/test_base.py +++ b/utils/test/testapi/opnfv_testapi/tests/unit/test_base.py @@ -7,21 +7,23 @@ # http://www.apache.org/licenses/LICENSE-2.0 ############################################################################## import json +from os import path -from tornado.web import Application -from tornado.testing import AsyncHTTPTestCase +import mock +from tornado import testing -from opnfv_testapi.router import url_mappings -from opnfv_testapi.resources.models import CreateResponse import fake_pymongo +from opnfv_testapi.cmd import server +from opnfv_testapi.resources import models -class TestBase(AsyncHTTPTestCase): +class TestBase(testing.AsyncHTTPTestCase): headers = {'Content-Type': 'application/json; charset=UTF-8'} def setUp(self): + self._patch_server() self.basePath = '' - self.create_res = CreateResponse + self.create_res = models.CreateResponse self.get_res = None self.list_res = None self.update_res = None @@ -30,12 +32,24 @@ class TestBase(AsyncHTTPTestCase): self.addCleanup(self._clear) super(TestBase, self).setUp() + def tearDown(self): + self.db_patcher.stop() + + def _patch_server(self): + server.parse_config([ + '--config-file', + path.join(path.dirname(__file__), 'common/normal.ini') + ]) + self.db_patcher = mock.patch('opnfv_testapi.cmd.server.get_db', + self._fake_pymongo) + self.db_patcher.start() + + @staticmethod + def _fake_pymongo(): + return fake_pymongo + def get_app(self): - return Application( - url_mappings.mappings, - db=fake_pymongo, - debug=True, - ) + return server.make_app() def create_d(self, *args): return self.create(self.req_d, *args) diff --git a/utils/test/testapi/opnfv_testapi/tests/unit/test_fake_pymongo.py b/utils/test/testapi/opnfv_testapi/tests/unit/test_fake_pymongo.py index 5f50ba867..7c43fca62 100644 --- a/utils/test/testapi/opnfv_testapi/tests/unit/test_fake_pymongo.py +++ b/utils/test/testapi/opnfv_testapi/tests/unit/test_fake_pymongo.py @@ -9,13 +9,13 @@ import unittest from tornado import gen -from tornado.testing import AsyncHTTPTestCase, gen_test -from tornado.web import Application +from tornado import testing +from tornado import web import fake_pymongo -class MyTest(AsyncHTTPTestCase): +class MyTest(testing.AsyncHTTPTestCase): def setUp(self): super(MyTest, self).setUp() self.db = fake_pymongo @@ -23,7 +23,7 @@ class MyTest(AsyncHTTPTestCase): self.io_loop.run_sync(self.fixture_setup) def get_app(self): - return Application() + return web.Application() @gen.coroutine def fixture_setup(self): @@ -32,13 +32,13 @@ class MyTest(AsyncHTTPTestCase): yield self.db.pods.insert({'_id': '1', 'name': 'test1'}) yield self.db.pods.insert({'name': 'test2'}) - @gen_test + @testing.gen_test def test_find_one(self): user = yield self.db.pods.find_one({'name': 'test1'}) self.assertEqual(user, self.test1) self.db.pods.remove() - @gen_test + @testing.gen_test def test_find(self): cursor = self.db.pods.find() names = [] @@ -47,7 +47,7 @@ class MyTest(AsyncHTTPTestCase): names.append(ob.get('name')) self.assertItemsEqual(names, ['test1', 'test2']) - @gen_test + @testing.gen_test def test_update(self): yield self.db.pods.update({'_id': '1'}, {'name': 'new_test1'}) user = yield self.db.pods.find_one({'_id': '1'}) @@ -71,7 +71,7 @@ class MyTest(AsyncHTTPTestCase): None, check_keys=False) - @gen_test + @testing.gen_test def test_remove(self): yield self.db.pods.remove({'_id': '1'}) user = yield self.db.pods.find_one({'_id': '1'}) @@ -104,7 +104,7 @@ class MyTest(AsyncHTTPTestCase): def _insert_assert(self, docs, error=None, **kwargs): self._db_assert('insert', error, docs, **kwargs) - @gen_test + @testing.gen_test def _db_assert(self, method, error, *args, **kwargs): name_error = None try: diff --git a/utils/test/testapi/opnfv_testapi/tests/unit/test_pod.py b/utils/test/testapi/opnfv_testapi/tests/unit/test_pod.py index a1184d554..922bd46e2 100644 --- a/utils/test/testapi/opnfv_testapi/tests/unit/test_pod.py +++ b/utils/test/testapi/opnfv_testapi/tests/unit/test_pod.py @@ -8,20 +8,19 @@ ############################################################################## import unittest -from test_base import TestBase -from opnfv_testapi.resources.pod_models import PodCreateRequest, Pod, Pods -from opnfv_testapi.common.constants import HTTP_OK, HTTP_BAD_REQUEST, \ - HTTP_FORBIDDEN, HTTP_NOT_FOUND +from opnfv_testapi.common import constants +from opnfv_testapi.resources import pod_models +import test_base as base -class TestPodBase(TestBase): +class TestPodBase(base.TestBase): def setUp(self): super(TestPodBase, self).setUp() - self.req_d = PodCreateRequest('zte-1', 'virtual', - 'zte pod 1', 'ci-pod') - self.req_e = PodCreateRequest('zte-2', 'metal', 'zte pod 2') - self.get_res = Pod - self.list_res = Pods + self.req_d = pod_models.PodCreateRequest('zte-1', 'virtual', + 'zte pod 1', 'ci-pod') + self.req_e = pod_models.PodCreateRequest('zte-2', 'metal', 'zte pod 2') + self.get_res = pod_models.Pod + self.list_res = pod_models.Pods self.basePath = '/api/v1/pods' def assert_get_body(self, pod, req=None): @@ -38,36 +37,36 @@ class TestPodBase(TestBase): class TestPodCreate(TestPodBase): def test_withoutBody(self): (code, body) = self.create() - self.assertEqual(code, HTTP_BAD_REQUEST) + self.assertEqual(code, constants.HTTP_BAD_REQUEST) def test_emptyName(self): - req_empty = PodCreateRequest('') + req_empty = pod_models.PodCreateRequest('') (code, body) = self.create(req_empty) - self.assertEqual(code, HTTP_BAD_REQUEST) + self.assertEqual(code, constants.HTTP_BAD_REQUEST) self.assertIn('name missing', body) def test_noneName(self): - req_none = PodCreateRequest(None) + req_none = pod_models.PodCreateRequest(None) (code, body) = self.create(req_none) - self.assertEqual(code, HTTP_BAD_REQUEST) + self.assertEqual(code, constants.HTTP_BAD_REQUEST) self.assertIn('name missing', body) def test_success(self): code, body = self.create_d() - self.assertEqual(code, HTTP_OK) + self.assertEqual(code, constants.HTTP_OK) self.assert_create_body(body) def test_alreadyExist(self): self.create_d() code, body = self.create_d() - self.assertEqual(code, HTTP_FORBIDDEN) + self.assertEqual(code, constants.HTTP_FORBIDDEN) self.assertIn('already exists', body) class TestPodGet(TestPodBase): def test_notExist(self): code, body = self.get('notExist') - self.assertEqual(code, HTTP_NOT_FOUND) + self.assertEqual(code, constants.HTTP_NOT_FOUND) def test_getOne(self): self.create_d() diff --git a/utils/test/testapi/opnfv_testapi/tests/unit/test_project.py b/utils/test/testapi/opnfv_testapi/tests/unit/test_project.py index 327ddf7b2..afd4a6601 100644 --- a/utils/test/testapi/opnfv_testapi/tests/unit/test_project.py +++ b/utils/test/testapi/opnfv_testapi/tests/unit/test_project.py @@ -8,21 +8,21 @@ ############################################################################## import unittest -from test_base import TestBase -from opnfv_testapi.resources.project_models import ProjectCreateRequest, \ - Project, Projects, ProjectUpdateRequest -from opnfv_testapi.common.constants import HTTP_OK, HTTP_BAD_REQUEST, \ - HTTP_FORBIDDEN, HTTP_NOT_FOUND +from opnfv_testapi.common import constants +from opnfv_testapi.resources import project_models +import test_base as base -class TestProjectBase(TestBase): +class TestProjectBase(base.TestBase): def setUp(self): super(TestProjectBase, self).setUp() - self.req_d = ProjectCreateRequest('vping', 'vping-ssh test') - self.req_e = ProjectCreateRequest('doctor', 'doctor test') - self.get_res = Project - self.list_res = Projects - self.update_res = Project + self.req_d = project_models.ProjectCreateRequest('vping', + 'vping-ssh test') + self.req_e = project_models.ProjectCreateRequest('doctor', + 'doctor test') + self.get_res = project_models.Project + self.list_res = project_models.Projects + self.update_res = project_models.Project self.basePath = '/api/v1/projects' def assert_body(self, project, req=None): @@ -37,41 +37,41 @@ class TestProjectBase(TestBase): class TestProjectCreate(TestProjectBase): def test_withoutBody(self): (code, body) = self.create() - self.assertEqual(code, HTTP_BAD_REQUEST) + self.assertEqual(code, constants.HTTP_BAD_REQUEST) def test_emptyName(self): - req_empty = ProjectCreateRequest('') + req_empty = project_models.ProjectCreateRequest('') (code, body) = self.create(req_empty) - self.assertEqual(code, HTTP_BAD_REQUEST) + self.assertEqual(code, constants.HTTP_BAD_REQUEST) self.assertIn('name missing', body) def test_noneName(self): - req_none = ProjectCreateRequest(None) + req_none = project_models.ProjectCreateRequest(None) (code, body) = self.create(req_none) - self.assertEqual(code, HTTP_BAD_REQUEST) + self.assertEqual(code, constants.HTTP_BAD_REQUEST) self.assertIn('name missing', body) def test_success(self): (code, body) = self.create_d() - self.assertEqual(code, HTTP_OK) + self.assertEqual(code, constants.HTTP_OK) self.assert_create_body(body) def test_alreadyExist(self): self.create_d() (code, body) = self.create_d() - self.assertEqual(code, HTTP_FORBIDDEN) + self.assertEqual(code, constants.HTTP_FORBIDDEN) self.assertIn('already exists', body) class TestProjectGet(TestProjectBase): def test_notExist(self): code, body = self.get('notExist') - self.assertEqual(code, HTTP_NOT_FOUND) + self.assertEqual(code, constants.HTTP_NOT_FOUND) def test_getOne(self): self.create_d() code, body = self.get(self.req_d.name) - self.assertEqual(code, HTTP_OK) + self.assertEqual(code, constants.HTTP_OK) self.assert_body(body) def test_list(self): @@ -88,23 +88,23 @@ class TestProjectGet(TestProjectBase): class TestProjectUpdate(TestProjectBase): def test_withoutBody(self): code, _ = self.update(None, 'noBody') - self.assertEqual(code, HTTP_BAD_REQUEST) + self.assertEqual(code, constants.HTTP_BAD_REQUEST) def test_notFound(self): code, _ = self.update(self.req_e, 'notFound') - self.assertEqual(code, HTTP_NOT_FOUND) + self.assertEqual(code, constants.HTTP_NOT_FOUND) def test_newNameExist(self): self.create_d() self.create_e() code, body = self.update(self.req_e, self.req_d.name) - self.assertEqual(code, HTTP_FORBIDDEN) + self.assertEqual(code, constants.HTTP_FORBIDDEN) self.assertIn("already exists", body) def test_noUpdate(self): self.create_d() code, body = self.update(self.req_d, self.req_d.name) - self.assertEqual(code, HTTP_FORBIDDEN) + self.assertEqual(code, constants.HTTP_FORBIDDEN) self.assertIn("Nothing to update", body) def test_success(self): @@ -112,9 +112,9 @@ class TestProjectUpdate(TestProjectBase): code, body = self.get(self.req_d.name) _id = body._id - req = ProjectUpdateRequest('newName', 'new description') + req = project_models.ProjectUpdateRequest('newName', 'new description') code, body = self.update(req, self.req_d.name) - self.assertEqual(code, HTTP_OK) + self.assertEqual(code, constants.HTTP_OK) self.assertEqual(_id, body._id) self.assert_body(body, req) @@ -126,16 +126,16 @@ class TestProjectUpdate(TestProjectBase): class TestProjectDelete(TestProjectBase): def test_notFound(self): code, body = self.delete('notFound') - self.assertEqual(code, HTTP_NOT_FOUND) + self.assertEqual(code, constants.HTTP_NOT_FOUND) def test_success(self): self.create_d() code, body = self.delete(self.req_d.name) - self.assertEqual(code, HTTP_OK) + self.assertEqual(code, constants.HTTP_OK) self.assertEqual(body, '') code, body = self.get(self.req_d.name) - self.assertEqual(code, HTTP_NOT_FOUND) + self.assertEqual(code, constants.HTTP_NOT_FOUND) if __name__ == '__main__': unittest.main() diff --git a/utils/test/testapi/opnfv_testapi/tests/unit/test_result.py b/utils/test/testapi/opnfv_testapi/tests/unit/test_result.py index 10575a9f5..2c7268eb6 100644 --- a/utils/test/testapi/opnfv_testapi/tests/unit/test_result.py +++ b/utils/test/testapi/opnfv_testapi/tests/unit/test_result.py @@ -7,17 +7,15 @@ # http://www.apache.org/licenses/LICENSE-2.0 ############################################################################## import copy -import unittest from datetime import datetime, timedelta +import unittest -from opnfv_testapi.common.constants import HTTP_OK, HTTP_BAD_REQUEST, \ - HTTP_NOT_FOUND -from opnfv_testapi.resources.pod_models import PodCreateRequest -from opnfv_testapi.resources.project_models import ProjectCreateRequest -from opnfv_testapi.resources.result_models import ResultCreateRequest, \ - TestResult, TestResults, ResultUpdateRequest, TI, TIHistory -from opnfv_testapi.resources.testcase_models import TestcaseCreateRequest -from test_base import TestBase +from opnfv_testapi.common import constants +from opnfv_testapi.resources import pod_models +from opnfv_testapi.resources import project_models +from opnfv_testapi.resources import result_models +from opnfv_testapi.resources import testcase_models +import test_base as base class Details(object): @@ -49,7 +47,7 @@ class Details(object): return t -class TestResultBase(TestBase): +class TestResultBase(base.TestBase): def setUp(self): self.pod = 'zte-pod1' self.project = 'functest' @@ -59,34 +57,41 @@ class TestResultBase(TestBase): self.build_tag = 'v3.0' self.scenario = 'odl-l2' self.criteria = 'passed' - self.trust_indicator = TI(0.7) + self.trust_indicator = result_models.TI(0.7) self.start_date = "2016-05-23 07:16:09.477097" self.stop_date = "2016-05-23 07:16:19.477097" self.update_date = "2016-05-24 07:16:19.477097" self.update_step = -0.05 super(TestResultBase, self).setUp() self.details = Details(timestart='0', duration='9s', status='OK') - self.req_d = ResultCreateRequest(pod_name=self.pod, - project_name=self.project, - case_name=self.case, - installer=self.installer, - version=self.version, - start_date=self.start_date, - stop_date=self.stop_date, - details=self.details.format(), - build_tag=self.build_tag, - scenario=self.scenario, - criteria=self.criteria, - trust_indicator=self.trust_indicator) - self.get_res = TestResult - self.list_res = TestResults - self.update_res = TestResult + self.req_d = result_models.ResultCreateRequest( + pod_name=self.pod, + project_name=self.project, + case_name=self.case, + installer=self.installer, + version=self.version, + start_date=self.start_date, + stop_date=self.stop_date, + details=self.details.format(), + build_tag=self.build_tag, + scenario=self.scenario, + criteria=self.criteria, + trust_indicator=self.trust_indicator) + self.get_res = result_models.TestResult + self.list_res = result_models.TestResults + self.update_res = result_models.TestResult self.basePath = '/api/v1/results' - self.req_pod = PodCreateRequest(self.pod, 'metal', 'zte pod 1') - self.req_project = ProjectCreateRequest(self.project, 'vping test') - self.req_testcase = TestcaseCreateRequest(self.case, - '/cases/vping', - 'vping-ssh test') + self.req_pod = pod_models.PodCreateRequest( + self.pod, + 'metal', + 'zte pod 1') + self.req_project = project_models.ProjectCreateRequest( + self.project, + 'vping test') + self.req_testcase = testcase_models.TestcaseCreateRequest( + self.case, + '/cases/vping', + 'vping-ssh test') self.create_help('/api/v1/pods', self.req_pod) self.create_help('/api/v1/projects', self.req_project) self.create_help('/api/v1/projects/%s/cases', @@ -94,7 +99,7 @@ class TestResultBase(TestBase): self.project) def assert_res(self, code, result, req=None): - self.assertEqual(code, HTTP_OK) + self.assertEqual(code, constants.HTTP_OK) if req is None: req = self.req_d self.assertEqual(result.pod_name, req.pod_name) @@ -129,78 +134,78 @@ class TestResultBase(TestBase): class TestResultCreate(TestResultBase): def test_nobody(self): (code, body) = self.create(None) - self.assertEqual(code, HTTP_BAD_REQUEST) + self.assertEqual(code, constants.HTTP_BAD_REQUEST) self.assertIn('no body', body) def test_podNotProvided(self): req = self.req_d req.pod_name = None (code, body) = self.create(req) - self.assertEqual(code, HTTP_BAD_REQUEST) + self.assertEqual(code, constants.HTTP_BAD_REQUEST) self.assertIn('pod_name missing', body) def test_projectNotProvided(self): req = self.req_d req.project_name = None (code, body) = self.create(req) - self.assertEqual(code, HTTP_BAD_REQUEST) + self.assertEqual(code, constants.HTTP_BAD_REQUEST) self.assertIn('project_name missing', body) def test_testcaseNotProvided(self): req = self.req_d req.case_name = None (code, body) = self.create(req) - self.assertEqual(code, HTTP_BAD_REQUEST) + self.assertEqual(code, constants.HTTP_BAD_REQUEST) self.assertIn('case_name missing', body) def test_noPod(self): req = self.req_d req.pod_name = 'notExistPod' (code, body) = self.create(req) - self.assertEqual(code, HTTP_NOT_FOUND) + self.assertEqual(code, constants.HTTP_NOT_FOUND) self.assertIn('Could not find pod', body) def test_noProject(self): req = self.req_d req.project_name = 'notExistProject' (code, body) = self.create(req) - self.assertEqual(code, HTTP_NOT_FOUND) + self.assertEqual(code, constants.HTTP_NOT_FOUND) self.assertIn('Could not find project', body) def test_noTestcase(self): req = self.req_d req.case_name = 'notExistTestcase' (code, body) = self.create(req) - self.assertEqual(code, HTTP_NOT_FOUND) + self.assertEqual(code, constants.HTTP_NOT_FOUND) self.assertIn('Could not find testcase', body) def test_success(self): (code, body) = self.create_d() - self.assertEqual(code, HTTP_OK) + self.assertEqual(code, constants.HTTP_OK) self.assert_href(body) def test_key_with_doc(self): req = copy.deepcopy(self.req_d) req.details = {'1.name': 'dot_name'} (code, body) = self.create(req) - self.assertEqual(code, HTTP_OK) + self.assertEqual(code, constants.HTTP_OK) self.assert_href(body) def test_no_ti(self): - req = ResultCreateRequest(pod_name=self.pod, - project_name=self.project, - case_name=self.case, - installer=self.installer, - version=self.version, - start_date=self.start_date, - stop_date=self.stop_date, - details=self.details.format(), - build_tag=self.build_tag, - scenario=self.scenario, - criteria=self.criteria) + req = result_models.ResultCreateRequest(pod_name=self.pod, + project_name=self.project, + case_name=self.case, + installer=self.installer, + version=self.version, + start_date=self.start_date, + stop_date=self.stop_date, + details=self.details.format(), + build_tag=self.build_tag, + scenario=self.scenario, + criteria=self.criteria) (code, res) = self.create(req) _id = res.href.split('/')[-1] - self.assertEqual(code, HTTP_OK) + self.assertEqual(code, constants.HTTP_OK) code, body = self.get(_id) self.assert_res(code, body, req) @@ -240,7 +245,7 @@ class TestResultGet(TestResultBase): def test_queryPeriodNotInt(self): code, body = self.query(self._set_query('period=a')) - self.assertEqual(code, HTTP_BAD_REQUEST) + self.assertEqual(code, constants.HTTP_BAD_REQUEST) self.assertIn('period must be int', body) def test_queryPeriodFail(self): @@ -253,7 +258,7 @@ class TestResultGet(TestResultBase): def test_queryLastNotInt(self): code, body = self.query(self._set_query('last=a')) - self.assertEqual(code, HTTP_BAD_REQUEST) + self.assertEqual(code, constants.HTTP_BAD_REQUEST) self.assertIn('last must be int', body) def test_queryLast(self): @@ -292,7 +297,7 @@ class TestResultGet(TestResultBase): req = self._create_changed_date(**kwargs) code, body = self.query(query) if not found: - self.assertEqual(code, HTTP_OK) + self.assertEqual(code, constants.HTTP_OK) self.assertEqual(0, len(body.results)) else: self.assertEqual(1, len(body.results)) @@ -326,10 +331,11 @@ class TestResultUpdate(TestResultBase): new_ti = copy.deepcopy(self.trust_indicator) new_ti.current += self.update_step - new_ti.histories.append(TIHistory(self.update_date, self.update_step)) + new_ti.histories.append( + result_models.TIHistory(self.update_date, self.update_step)) new_data = copy.deepcopy(self.req_d) new_data.trust_indicator = new_ti - update = ResultUpdateRequest(trust_indicator=new_ti) + update = result_models.ResultUpdateRequest(trust_indicator=new_ti) code, body = self.update(update, _id) self.assertEqual(_id, body._id) self.assert_res(code, body, new_data) diff --git a/utils/test/testapi/opnfv_testapi/tests/unit/test_scenario.py b/utils/test/testapi/opnfv_testapi/tests/unit/test_scenario.py index c15dc32ea..7a6e94a93 100644 --- a/utils/test/testapi/opnfv_testapi/tests/unit/test_scenario.py +++ b/utils/test/testapi/opnfv_testapi/tests/unit/test_scenario.py @@ -1,16 +1,14 @@ from copy import deepcopy +from datetime import datetime import json import os -from datetime import datetime -from opnfv_testapi.common.constants import HTTP_BAD_REQUEST -from opnfv_testapi.common.constants import HTTP_FORBIDDEN -from opnfv_testapi.common.constants import HTTP_OK +from opnfv_testapi.common import constants import opnfv_testapi.resources.scenario_models as models -from test_testcase import TestBase +import test_base as base -class TestScenarioBase(TestBase): +class TestScenarioBase(base.TestBase): def setUp(self): super(TestScenarioBase, self).setUp() self.get_res = models.Scenario @@ -38,13 +36,13 @@ class TestScenarioBase(TestBase): return res.href.split('/')[-1] def assert_res(self, code, scenario, req=None): - self.assertEqual(code, HTTP_OK) + self.assertEqual(code, constants.HTTP_OK) if req is None: req = self.req_d - scenario_dict = scenario.format_http() - self.assertIsNotNone(scenario_dict['_id']) - self.assertIsNotNone(scenario_dict['creation_date']) - self.assertDictContainsSubset(req, scenario_dict) + self.assertIsNotNone(scenario._id) + self.assertIsNotNone(scenario.creation_date) + + scenario == models.Scenario.from_dict(req) @staticmethod def _set_query(*args): @@ -61,29 +59,29 @@ class TestScenarioBase(TestBase): class TestScenarioCreate(TestScenarioBase): def test_withoutBody(self): (code, body) = self.create() - self.assertEqual(code, HTTP_BAD_REQUEST) + self.assertEqual(code, constants.HTTP_BAD_REQUEST) def test_emptyName(self): req_empty = models.ScenarioCreateRequest('') (code, body) = self.create(req_empty) - self.assertEqual(code, HTTP_BAD_REQUEST) + self.assertEqual(code, constants.HTTP_BAD_REQUEST) self.assertIn('name missing', body) def test_noneName(self): req_none = models.ScenarioCreateRequest(None) (code, body) = self.create(req_none) - self.assertEqual(code, HTTP_BAD_REQUEST) + self.assertEqual(code, constants.HTTP_BAD_REQUEST) self.assertIn('name missing', body) def test_success(self): (code, body) = self.create_d() - self.assertEqual(code, HTTP_OK) + self.assertEqual(code, constants.HTTP_OK) self.assert_create_body(body) def test_alreadyExist(self): self.create_d() (code, body) = self.create_d() - self.assertEqual(code, HTTP_FORBIDDEN) + self.assertEqual(code, constants.HTTP_FORBIDDEN) self.assertIn('already exists', body) @@ -126,7 +124,7 @@ class TestScenarioGet(TestScenarioBase): def _query_and_assert(self, query, found=True, reqs=None): code, body = self.query(query) if not found: - self.assertEqual(code, HTTP_OK) + self.assertEqual(code, constants.HTTP_OK) self.assertEqual(0, len(body.scenarios)) else: self.assertEqual(len(reqs), len(body.scenarios)) @@ -296,10 +294,23 @@ class TestScenarioUpdate(TestScenarioBase): def _update_and_assert(self, update_req, new_scenario, name=None): code, _ = self.update(update_req, self.scenario) - self.assertEqual(code, HTTP_OK) + self.assertEqual(code, constants.HTTP_OK) self._get_and_assert(self._none_default(name, self.scenario), new_scenario) @staticmethod def _none_default(check, default): return check if check else default + + +class TestScenarioDelete(TestScenarioBase): + def test_notFound(self): + code, body = self.delete('notFound') + self.assertEqual(code, constants.HTTP_NOT_FOUND) + + def test_success(self): + scenario = self.create_return_name(self.req_d) + code, _ = self.delete(scenario) + self.assertEqual(code, constants.HTTP_OK) + code, _ = self.get(scenario) + self.assertEqual(code, constants.HTTP_NOT_FOUND) diff --git a/utils/test/testapi/opnfv_testapi/tests/unit/test_testcase.py b/utils/test/testapi/opnfv_testapi/tests/unit/test_testcase.py index cb767844a..c0494db5d 100644 --- a/utils/test/testapi/opnfv_testapi/tests/unit/test_testcase.py +++ b/utils/test/testapi/opnfv_testapi/tests/unit/test_testcase.py @@ -6,35 +6,33 @@ # which accompanies this distribution, and is available at # http://www.apache.org/licenses/LICENSE-2.0 ############################################################################## -import unittest import copy +import unittest -from test_base import TestBase -from opnfv_testapi.resources.testcase_models import TestcaseCreateRequest, \ - Testcase, Testcases, TestcaseUpdateRequest -from opnfv_testapi.resources.project_models import ProjectCreateRequest -from opnfv_testapi.common.constants import HTTP_OK, HTTP_BAD_REQUEST, \ - HTTP_FORBIDDEN, HTTP_NOT_FOUND +from opnfv_testapi.common import constants +from opnfv_testapi.resources import project_models +from opnfv_testapi.resources import testcase_models +import test_base as base -class TestCaseBase(TestBase): +class TestCaseBase(base.TestBase): def setUp(self): super(TestCaseBase, self).setUp() - self.req_d = TestcaseCreateRequest('vping_1', - '/cases/vping_1', - 'vping-ssh test') - self.req_e = TestcaseCreateRequest('doctor_1', - '/cases/doctor_1', - 'create doctor') - self.update_d = TestcaseUpdateRequest('vping_1', - 'vping-ssh test', - 'functest') - self.update_e = TestcaseUpdateRequest('doctor_1', - 'create doctor', - 'functest') - self.get_res = Testcase - self.list_res = Testcases - self.update_res = Testcase + self.req_d = testcase_models.TestcaseCreateRequest('vping_1', + '/cases/vping_1', + 'vping-ssh test') + self.req_e = testcase_models.TestcaseCreateRequest('doctor_1', + '/cases/doctor_1', + 'create doctor') + self.update_d = testcase_models.TestcaseUpdateRequest('vping_1', + 'vping-ssh test', + 'functest') + self.update_e = testcase_models.TestcaseUpdateRequest('doctor_1', + 'create doctor', + 'functest') + self.get_res = testcase_models.Testcase + self.list_res = testcase_models.Testcases + self.update_res = testcase_models.Testcase self.basePath = '/api/v1/projects/%s/cases' self.create_project() @@ -57,7 +55,8 @@ class TestCaseBase(TestBase): self.assertIsNotNone(new.creation_date) def create_project(self): - req_p = ProjectCreateRequest('functest', 'vping-ssh test') + req_p = project_models.ProjectCreateRequest('functest', + 'vping-ssh test') self.create_help('/api/v1/projects', req_p) self.project = req_p.name @@ -80,46 +79,46 @@ class TestCaseBase(TestBase): class TestCaseCreate(TestCaseBase): def test_noBody(self): (code, body) = self.create(None, 'vping') - self.assertEqual(code, HTTP_BAD_REQUEST) + self.assertEqual(code, constants.HTTP_BAD_REQUEST) def test_noProject(self): code, body = self.create(self.req_d, 'noProject') - self.assertEqual(code, HTTP_FORBIDDEN) + self.assertEqual(code, constants.HTTP_FORBIDDEN) self.assertIn('Could not find project', body) def test_emptyName(self): - req_empty = TestcaseCreateRequest('') + req_empty = testcase_models.TestcaseCreateRequest('') (code, body) = self.create(req_empty, self.project) - self.assertEqual(code, HTTP_BAD_REQUEST) + self.assertEqual(code, constants.HTTP_BAD_REQUEST) self.assertIn('name missing', body) def test_noneName(self): - req_none = TestcaseCreateRequest(None) + req_none = testcase_models.TestcaseCreateRequest(None) (code, body) = self.create(req_none, self.project) - self.assertEqual(code, HTTP_BAD_REQUEST) + self.assertEqual(code, constants.HTTP_BAD_REQUEST) self.assertIn('name missing', body) def test_success(self): code, body = self.create_d() - self.assertEqual(code, HTTP_OK) + self.assertEqual(code, constants.HTTP_OK) self.assert_create_body(body, None, self.project) def test_alreadyExist(self): self.create_d() code, body = self.create_d() - self.assertEqual(code, HTTP_FORBIDDEN) + self.assertEqual(code, constants.HTTP_FORBIDDEN) self.assertIn('already exists', body) class TestCaseGet(TestCaseBase): def test_notExist(self): code, body = self.get('notExist') - self.assertEqual(code, HTTP_NOT_FOUND) + self.assertEqual(code, constants.HTTP_NOT_FOUND) def test_getOne(self): self.create_d() code, body = self.get(self.req_d.name) - self.assertEqual(code, HTTP_OK) + self.assertEqual(code, constants.HTTP_OK) self.assert_body(body) def test_list(self): @@ -136,23 +135,23 @@ class TestCaseGet(TestCaseBase): class TestCaseUpdate(TestCaseBase): def test_noBody(self): code, _ = self.update(case='noBody') - self.assertEqual(code, HTTP_BAD_REQUEST) + self.assertEqual(code, constants.HTTP_BAD_REQUEST) def test_notFound(self): code, _ = self.update(self.update_e, 'notFound') - self.assertEqual(code, HTTP_NOT_FOUND) + self.assertEqual(code, constants.HTTP_NOT_FOUND) def test_newNameExist(self): self.create_d() self.create_e() code, body = self.update(self.update_e, self.req_d.name) - self.assertEqual(code, HTTP_FORBIDDEN) + self.assertEqual(code, constants.HTTP_FORBIDDEN) self.assertIn("already exists", body) def test_noUpdate(self): self.create_d() code, body = self.update(self.update_d, self.req_d.name) - self.assertEqual(code, HTTP_FORBIDDEN) + self.assertEqual(code, constants.HTTP_FORBIDDEN) self.assertIn("Nothing to update", body) def test_success(self): @@ -161,7 +160,7 @@ class TestCaseUpdate(TestCaseBase): _id = body._id code, body = self.update(self.update_e, self.req_d.name) - self.assertEqual(code, HTTP_OK) + self.assertEqual(code, constants.HTTP_OK) self.assertEqual(_id, body._id) self.assert_update_body(self.req_d, body, self.update_e) @@ -174,22 +173,22 @@ class TestCaseUpdate(TestCaseBase): update = copy.deepcopy(self.update_d) update.description = {'2. change': 'dollar change'} code, body = self.update(update, self.req_d.name) - self.assertEqual(code, HTTP_OK) + self.assertEqual(code, constants.HTTP_OK) class TestCaseDelete(TestCaseBase): def test_notFound(self): code, body = self.delete('notFound') - self.assertEqual(code, HTTP_NOT_FOUND) + self.assertEqual(code, constants.HTTP_NOT_FOUND) def test_success(self): self.create_d() code, body = self.delete(self.req_d.name) - self.assertEqual(code, HTTP_OK) + self.assertEqual(code, constants.HTTP_OK) self.assertEqual(body, '') code, body = self.get(self.req_d.name) - self.assertEqual(code, HTTP_NOT_FOUND) + self.assertEqual(code, constants.HTTP_NOT_FOUND) if __name__ == '__main__': diff --git a/utils/test/testapi/opnfv_testapi/tests/unit/test_token.py b/utils/test/testapi/opnfv_testapi/tests/unit/test_token.py new file mode 100644 index 000000000..19b9e3e07 --- /dev/null +++ b/utils/test/testapi/opnfv_testapi/tests/unit/test_token.py @@ -0,0 +1,118 @@ +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 + +import unittest + +from tornado import web + +import fake_pymongo +from opnfv_testapi.common import constants +from opnfv_testapi.resources import project_models +from opnfv_testapi.router import url_mappings +import test_base as base + + +class TestToken(base.TestBase): + def get_app(self): + return web.Application( + url_mappings.mappings, + db=fake_pymongo, + debug=True, + auth=True + ) + + +class TestTokenCreateProject(TestToken): + def setUp(self): + super(TestTokenCreateProject, self).setUp() + self.req_d = project_models.ProjectCreateRequest('vping') + fake_pymongo.tokens.insert({"access_token": "12345"}) + self.basePath = '/api/v1/projects' + + def test_projectCreateTokenInvalid(self): + self.headers['X-Auth-Token'] = '1234' + code, body = self.create_d() + self.assertEqual(code, constants.HTTP_FORBIDDEN) + self.assertIn('Invalid Token.', body) + + def test_projectCreateTokenUnauthorized(self): + self.headers.pop('X-Auth-Token') + code, body = self.create_d() + self.assertEqual(code, constants.HTTP_UNAUTHORIZED) + self.assertIn('No Authentication Header.', body) + + def test_projectCreateTokenSuccess(self): + self.headers['X-Auth-Token'] = '12345' + code, body = self.create_d() + self.assertEqual(code, constants.HTTP_OK) + + +class TestTokenDeleteProject(TestToken): + def setUp(self): + super(TestTokenDeleteProject, self).setUp() + self.req_d = project_models.ProjectCreateRequest('vping') + fake_pymongo.tokens.insert({"access_token": "12345"}) + self.basePath = '/api/v1/projects' + + def test_projectDeleteTokenIvalid(self): + self.headers['X-Auth-Token'] = '12345' + self.create_d() + self.headers['X-Auth-Token'] = '1234' + code, body = self.delete(self.req_d.name) + self.assertEqual(code, constants.HTTP_FORBIDDEN) + self.assertIn('Invalid Token.', body) + + def test_projectDeleteTokenUnauthorized(self): + self.headers['X-Auth-Token'] = '12345' + self.create_d() + self.headers.pop('X-Auth-Token') + code, body = self.delete(self.req_d.name) + self.assertEqual(code, constants.HTTP_UNAUTHORIZED) + self.assertIn('No Authentication Header.', body) + + def test_projectDeleteTokenSuccess(self): + self.headers['X-Auth-Token'] = '12345' + self.create_d() + code, body = self.delete(self.req_d.name) + self.assertEqual(code, constants.HTTP_OK) + + +class TestTokenUpdateProject(TestToken): + def setUp(self): + super(TestTokenUpdateProject, self).setUp() + self.req_d = project_models.ProjectCreateRequest('vping') + fake_pymongo.tokens.insert({"access_token": "12345"}) + self.basePath = '/api/v1/projects' + + def test_projectUpdateTokenIvalid(self): + self.headers['X-Auth-Token'] = '12345' + self.create_d() + code, body = self.get(self.req_d.name) + self.headers['X-Auth-Token'] = '1234' + req = project_models.ProjectUpdateRequest('newName', 'new description') + code, body = self.update(req, self.req_d.name) + self.assertEqual(code, constants.HTTP_FORBIDDEN) + self.assertIn('Invalid Token.', body) + + def test_projectUpdateTokenUnauthorized(self): + self.headers['X-Auth-Token'] = '12345' + self.create_d() + code, body = self.get(self.req_d.name) + self.headers.pop('X-Auth-Token') + req = project_models.ProjectUpdateRequest('newName', 'new description') + code, body = self.update(req, self.req_d.name) + self.assertEqual(code, constants.HTTP_UNAUTHORIZED) + self.assertIn('No Authentication Header.', body) + + def test_projectUpdateTokenSuccess(self): + self.headers['X-Auth-Token'] = '12345' + self.create_d() + code, body = self.get(self.req_d.name) + req = project_models.ProjectUpdateRequest('newName', 'new description') + code, body = self.update(req, self.req_d.name) + self.assertEqual(code, constants.HTTP_OK) + +if __name__ == '__main__': + unittest.main() diff --git a/utils/test/testapi/opnfv_testapi/tests/unit/test_version.py b/utils/test/testapi/opnfv_testapi/tests/unit/test_version.py index b6fbf45dc..c8f3f5062 100644 --- a/utils/test/testapi/opnfv_testapi/tests/unit/test_version.py +++ b/utils/test/testapi/opnfv_testapi/tests/unit/test_version.py @@ -8,14 +8,14 @@ ############################################################################## import unittest -from test_base import TestBase -from opnfv_testapi.resources.models import Versions +from opnfv_testapi.resources import models +import test_base as base -class TestVersionBase(TestBase): +class TestVersionBase(base.TestBase): def setUp(self): super(TestVersionBase, self).setUp() - self.list_res = Versions + self.list_res = models.Versions self.basePath = '/versions' diff --git a/utils/test/testapi/run_test.sh b/utils/test/testapi/run_test.sh index 51db09f65..4efc7af3b 100755 --- a/utils/test/testapi/run_test.sh +++ b/utils/test/testapi/run_test.sh @@ -15,6 +15,8 @@ source $SCRIPTDIR/testapi_venv/bin/activate pip install -r $SCRIPTDIR/requirements.txt pip install coverage pip install nose>=1.3.1 +pip install pytest +pip install mock find . -type f -name "*.pyc" -delete diff --git a/utils/test/testapi/test-requirements.txt b/utils/test/testapi/test-requirements.txt new file mode 100644 index 000000000..4633ad637 --- /dev/null +++ b/utils/test/testapi/test-requirements.txt @@ -0,0 +1,11 @@ +# The order of packages is significant, because pip processes them in the order +# of appearance. Changing the order has an impact on the overall integration +# process, which may cause wedges in the gate later. + +tox +mock +pytest +pytest-cov +coverage +pykwalify +pip_check_reqs diff --git a/utils/test/testapi/tox.ini b/utils/test/testapi/tox.ini new file mode 100644 index 000000000..81c9dfab1 --- /dev/null +++ b/utils/test/testapi/tox.ini @@ -0,0 +1,41 @@ +# Tox (http://tox.testrun.org/) is a tool for running tests +# in multiple virtualenvs. This configuration file will run the +# test suite on all supported python versions. To use it, "pip install tox" +# and then run "tox" from this directory. + +[tox] +envlist = py27,pep8 +skipsdist = True +sitepackages = True + +[testenv] +usedevelop = True +install_command = pip install -U {opts} {packages} +deps = + -rrequirements.txt + -rtest-requirements.txt +commands= + py.test \ + --basetemp={envtmpdir} \ + --cov \ + {posargs} +setenv= + HOME = {envtmpdir} + PYTHONPATH = {toxinidir} + +[testenv:pep8] +deps = flake8 +commands = flake8 {toxinidir} + +[flake8] +# H803 skipped on purpose per list discussion. +# E123, E125 skipped as they are invalid PEP-8. + +show-source = True +ignore = E123,E125,H803,E501 +builtins = _ +exclude = build,dist,doc,legacy,.eggs,.git,.tox,.venv,testapi_venv,venv + +[pytest] +testpaths = opnfv_testapi/tests +python_functions = test_* diff --git a/utils/test/testapi/update/test.yml b/utils/test/testapi/update/test.yml index a8868720d..943105c5f 100644 --- a/utils/test/testapi/update/test.yml +++ b/utils/test/testapi/update/test.yml @@ -1,7 +1,7 @@ --- - hosts: "{{ host }}" remote_user: "{{ user }}" - become: yes + become: "yes" become_method: sudo vars: user: "root" diff --git a/utils/test/testapi/update/update.yml b/utils/test/testapi/update/update.yml index e6663d905..18b75b6bf 100644 --- a/utils/test/testapi/update/update.yml +++ b/utils/test/testapi/update/update.yml @@ -1,7 +1,7 @@ --- - hosts: "{{ host }}" remote_user: "{{ user }}" - become: yes + become: "yes" become_method: sudo vars: user: "root" @@ -47,4 +47,4 @@ - name: remove temporary update directory file: path: "{{ update_path }}" - state: absent \ No newline at end of file + state: absent diff --git a/utils/test/vnfcatalogue/helpers/migrate.js b/utils/test/vnfcatalogue/helpers/migrate.js index a26f6ef88..d884d9c68 100644 --- a/utils/test/vnfcatalogue/helpers/migrate.js +++ b/utils/test/vnfcatalogue/helpers/migrate.js @@ -28,6 +28,14 @@ function createTable(tableName) { if (Schema[tableName][key].type === 'text' && Schema[tableName][key].hasOwnProperty('fieldtype')) { column = table[Schema[tableName][key].type](key, Schema[tableName][key].fieldtype); } + else if (Schema[tableName][key].type === 'enum' && Schema[tableName][key].hasOwnProperty('values') && Schema[tableName][key].nullable === true) { + console.log(Schema[tableName][key].values); + column = table[Schema[tableName][key].type](key, Schema[tableName][key].values).nullable(); + } + else if (Schema[tableName][key].type === 'enum' && Schema[tableName][key].hasOwnProperty('values')) { + console.log(Schema[tableName][key].values); + column = table[Schema[tableName][key].type](key, Schema[tableName][key].values).notNullable(); + } else if (Schema[tableName][key].type === 'string' && Schema[tableName][key].hasOwnProperty('maxlength')) { column = table[Schema[tableName][key].type](key, Schema[tableName][key].maxlength); } diff --git a/utils/test/vnfcatalogue/helpers/schema.js b/utils/test/vnfcatalogue/helpers/schema.js index 2aaf99ae2..4a7559ad0 100644 --- a/utils/test/vnfcatalogue/helpers/schema.js +++ b/utils/test/vnfcatalogue/helpers/schema.js @@ -31,10 +31,16 @@ var Schema = { lines_of_code: {type: 'integer', nullable: true, unsigned: true}, versions: {type: 'integer', nullable: true, unsigned: true}, no_of_developers: {type: 'integer', nullable: true, unsigned: true}, + no_of_stars: {type: 'integer', nullable: true, unsigned: true}, + license: {type: 'enum', nullable: false, values: ['MIT', 'GPL', 'GPL_V2', 'BSD', 'APACHE']}, + opnfv_indicator: {type: 'enum', nullable: false, values: ['gold', 'silver', 'platinum']}, + complexity: {type: 'enum', nullable: true, values: ['low', 'medium', 'high']}, + activity: {type: 'enum', nullable: true, values: ['low', 'medium', 'high']}, + last_updated: {type: 'dateTime', nullable: true}, }, tag: { tag_id: {type: 'increments', nullable: false, primary: true}, - name: {type: 'string', maxlength: 150, nullable: false} + tag_name: {type: 'string', maxlength: 150, nullable: false} }, vnf_tags: { vnf_tag_id: {type: 'increments', nullable: false, primary: true},