Merge "Armband: Disable CI triggers for stable branch"
authorAric Gardner <agardner@linuxfoundation.org>
Tue, 1 Aug 2017 15:53:09 +0000 (15:53 +0000)
committerGerrit Code Review <gerrit@opnfv.org>
Tue, 1 Aug 2017 15:53:09 +0000 (15:53 +0000)
50 files changed:
jjb/apex/apex.yml
jjb/apex/apex.yml.j2
jjb/apex/scenarios.yaml.hidden
jjb/apex/update-build-result.groovy [new file with mode: 0644]
jjb/armband/armband-deploy.sh
jjb/bottlenecks/bottlenecks-ci-jobs.yml
jjb/bottlenecks/bottlenecks-cleanup.sh
jjb/bottlenecks/bottlenecks-run-suite.sh
jjb/compass4nfv/compass-ci-jobs.yml
jjb/compass4nfv/compass-deploy.sh
jjb/daisy4nfv/daisy-daily-jobs.yml
jjb/daisy4nfv/daisy-project-jobs.yml
jjb/doctor/doctor.yml
jjb/dovetail/dovetail-ci-jobs.yml
jjb/dovetail/dovetail-run.sh
jjb/fuel/fuel-daily-jobs.yml
jjb/functest/functest-alpine.sh
jjb/functest/functest-daily-jobs.yml
jjb/global/installer-params.yml
jjb/global/slave-params.yml
jjb/joid/joid-daily-jobs.yml
jjb/orchestra/orchestra-daily-jobs.yml [new file with mode: 0644]
jjb/orchestra/orchestra-project-jobs.yml [new file with mode: 0644]
jjb/ovn4nfv/ovn4nfv-daily-jobs.yml [new file with mode: 0644]
jjb/ovn4nfv/ovn4nfv-project-jobs.yml [new file with mode: 0644]
jjb/qtip/qtip-verify-jobs.yml
jjb/releng/opnfv-docker-arm.yml
jjb/releng/opnfv-docker.sh
jjb/releng/opnfv-docker.yml
jjb/yardstick/yardstick-daily.sh
utils/create_pod_file.py
utils/test/testapi/3rd_party/static/testapi-ui/app.js
utils/test/testapi/opnfv_testapi/cmd/server.py
utils/test/testapi/opnfv_testapi/common/check.py
utils/test/testapi/opnfv_testapi/common/config.py
utils/test/testapi/opnfv_testapi/db/__init__.py [new file with mode: 0644]
utils/test/testapi/opnfv_testapi/db/api.py [new file with mode: 0644]
utils/test/testapi/opnfv_testapi/resources/handlers.py
utils/test/testapi/opnfv_testapi/resources/result_handlers.py
utils/test/testapi/opnfv_testapi/router/url_mappings.py
utils/test/testapi/opnfv_testapi/tests/unit/common/test_config.py
utils/test/testapi/opnfv_testapi/tests/unit/conftest.py [new file with mode: 0644]
utils/test/testapi/opnfv_testapi/tests/unit/fake_pymongo.py
utils/test/testapi/opnfv_testapi/tests/unit/resources/test_base.py
utils/test/testapi/opnfv_testapi/tests/unit/resources/test_result.py
utils/test/testapi/opnfv_testapi/tests/unit/resources/test_token.py
utils/test/testapi/opnfv_testapi/ui/auth/sign.py
utils/test/testapi/opnfv_testapi/ui/auth/user.py
utils/test/testapi/opnfv_testapi/ui/root.py
utils/test/testapi/run_test.sh [deleted file]

index 2b69d59..02c2ba2 100644 (file)
               <<: *master
         - 'os-odl-bgpvpn-ha':
               <<: *master
+        - 'os-ovn-nofeature-noha':
+              <<: *master
+        - 'os-nosdn-fdio-noha':
+              <<: *master
+        - 'os-nosdn-fdio-ha':
+              <<: *master
+        - 'os-odl-fdio-noha':
+              <<: *master
+        - 'os-odl-fdio-ha':
+              <<: *master
 
     platform:
          - 'baremetal'
                 - 'apex-deploy.*'
                 - 'functest.*'
                 - 'yardstick.*'
+                - 'dovetail.*'
         - throttle:
             max-per-node: 1
             max-total: 10
                   kill-phase-on: NEVER
                   abort-all-job: false
                   git-revision: false
+        - multijob:
+            name: Dovetail
+            condition: ALWAYS
+            projects:
+                - name: 'dovetail-apex-baremetal-proposed_tests-{scenario_stream}'
+                  node-parameters: true
+                  current-parameters: false
+                  predefined-parameters:
+                    DEPLOY_SCENARIO=$DEPLOY_SCENARIO
+                  kill-phase-on: NEVER
+                  enable-condition: "def m = '$DEPLOY_SCENARIO' ==~ /os-(nosdn-nofeature|nosdn-kvm|odl_l3-fdio)-ha/"
+                  abort-all-job: false
+                  git-revision: false
         - conditional-step:
             condition-kind: current-status
             condition-worst: SUCCESS
             on-evaluation-failure: mark-unstable
             steps:
                 - shell: 'echo "Tests Passed"'
+# Build status is always success due conditional plugin prefetching
+# build status before multijob phases execute
+#        - conditional-step:
+#            condition-kind: current-status
+#            condition-worst: SUCCESS
+#            condtion-best: SUCCESS
+#            on-evaluation-failure: mark-unstable
+#            steps:
+#                - shell: 'echo "Tests Passed"'
+
+    publishers:
+        - groovy-postbuild:
+            script:
+                !include-raw-escape: ./update-build-result.groovy
 
 
 # danube Daily
                   abort-all-job: true
                   git-revision: false
 
+                - name: 'apex-os-ovn-nofeature-noha-baremetal-master'
+                  node-parameters: false
+                  current-parameters: false
+                  predefined-parameters: |
+                    OPNFV_CLEAN=yes
+                  kill-phase-on: NEVER
+                  abort-all-job: true
+                  git-revision: false
+
+                - name: 'apex-os-nosdn-fdio-noha-baremetal-master'
+                  node-parameters: false
+                  current-parameters: false
+                  predefined-parameters: |
+                    OPNFV_CLEAN=yes
+                  kill-phase-on: NEVER
+                  abort-all-job: true
+                  git-revision: false
+
+                - name: 'apex-os-nosdn-fdio-ha-baremetal-master'
+                  node-parameters: false
+                  current-parameters: false
+                  predefined-parameters: |
+                    OPNFV_CLEAN=yes
+                  kill-phase-on: NEVER
+                  abort-all-job: true
+                  git-revision: false
+
+                - name: 'apex-os-odl-fdio-noha-baremetal-master'
+                  node-parameters: false
+                  current-parameters: false
+                  predefined-parameters: |
+                    OPNFV_CLEAN=yes
+                  kill-phase-on: NEVER
+                  abort-all-job: true
+                  git-revision: false
+
+                - name: 'apex-os-odl-fdio-ha-baremetal-master'
+                  node-parameters: false
+                  current-parameters: false
+                  predefined-parameters: |
+                    OPNFV_CLEAN=yes
+                  kill-phase-on: NEVER
+                  abort-all-job: true
+                  git-revision: false
+
 
 
 # snapshot create
index 06cc2ca..a3cc9c6 100644 (file)
                 - 'apex-deploy.*'
                 - 'functest.*'
                 - 'yardstick.*'
+                - 'dovetail.*'
         - throttle:
             max-per-node: 1
             max-total: 10
                   kill-phase-on: NEVER
                   abort-all-job: false
                   git-revision: false
+        - multijob:
+            name: Dovetail
+            condition: ALWAYS
+            projects:
+                - name: 'dovetail-apex-baremetal-proposed_tests-{scenario_stream}'
+                  node-parameters: true
+                  current-parameters: false
+                  predefined-parameters:
+                    DEPLOY_SCENARIO=$DEPLOY_SCENARIO
+                  kill-phase-on: NEVER
+                  enable-condition: "def m = '$DEPLOY_SCENARIO' ==~ /os-(nosdn-nofeature|nosdn-kvm|odl_l3-fdio)-ha/"
+                  abort-all-job: false
+                  git-revision: false
         - conditional-step:
             condition-kind: current-status
             condition-worst: SUCCESS
             on-evaluation-failure: mark-unstable
             steps:
                 - shell: 'echo "Tests Passed"'
+# Build status is always success due conditional plugin prefetching
+# build status before multijob phases execute
+#        - conditional-step:
+#            condition-kind: current-status
+#            condition-worst: SUCCESS
+#            condtion-best: SUCCESS
+#            on-evaluation-failure: mark-unstable
+#            steps:
+#                - shell: 'echo "Tests Passed"'
+
+    publishers:
+        - groovy-postbuild:
+            script:
+                !include-raw-escape: ./update-build-result.groovy
 
 {% for stream in scenarios %}
 # {{ stream }} Daily
index 748cd21..ee0176d 100644 (file)
@@ -4,6 +4,11 @@ master:
   - 'os-odl-nofeature-ha'
   - 'os-odl-nofeature-noha'
   - 'os-odl-bgpvpn-ha'
+  - 'os-ovn-nofeature-noha'
+  - 'os-nosdn-fdio-noha'
+  - 'os-nosdn-fdio-ha'
+  - 'os-odl-fdio-noha'
+  - 'os-odl-fdio-ha'
 danube:
   - 'os-nosdn-nofeature-noha'
   - 'os-nosdn-nofeature-ha'
diff --git a/jjb/apex/update-build-result.groovy b/jjb/apex/update-build-result.groovy
new file mode 100644 (file)
index 0000000..a569e51
--- /dev/null
@@ -0,0 +1,4 @@
+import hudson.model.*
+if (manager.build.@result == hudson.model.Result.FAILURE) {
+    manager.build.@result = hudson.model.Result.UNSTABLE
+}
index 9964ed5..358b55b 100755 (executable)
@@ -70,10 +70,6 @@ if [[ $LAB_CONFIG_URL =~ ^(git|ssh):// ]]; then
     fi
 fi
 
-if [[ "$NODE_NAME" =~ "virtual" ]]; then
-    POD_NAME="virtual_kvm"
-fi
-
 # releng wants us to use nothing else but opnfv.iso for now. We comply.
 ISO_FILE=$WORKSPACE/opnfv.iso
 
index c56ca19..455fa72 100644 (file)
@@ -70,8 +70,6 @@
        #     <<: *master
 #--------------------------------------------
     suite:
-        - 'rubbos'
-        - 'vstf'
         - 'posca_stress_traffic'
         - 'posca_stress_ping'
 
 
     publishers:
         - email:
-            recipients: hongbo.tianhongbo@huawei.com matthew.lijun@huawei.com liangqi1@huawei.com sunshine.wang@huawei.com
+            recipients: gabriel.yuyang@huawei.com, liyin11@huawei.com
 
 ########################
 # builder macros
index 04e620c..d0e2088 100644 (file)
@@ -10,6 +10,7 @@
 
 #clean up correlated dockers and their images
 bash $WORKSPACE/docker/docker_cleanup.sh -d bottlenecks --debug
+bash $WORKSPACE/docker/docker_cleanup.sh -d Bottlenecks --debug
 bash $WORKSPACE/docker/docker_cleanup.sh -d yardstick --debug
 bash $WORKSPACE/docker/docker_cleanup.sh -d kibana --debug
 bash $WORKSPACE/docker/docker_cleanup.sh -d elasticsearch --debug
index e6f8d1b..b81f4ca 100644 (file)
@@ -1,66 +1,70 @@
 #!/bin/bash
+##############################################################################
+# Copyright (c) 2017 Huawei Technologies Co.,Ltd and others.
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
 #set -e
 [[ $GERRIT_REFSPEC_DEBUG == true ]] && redirect="/dev/stdout" || redirect="/dev/null"
 BOTTLENECKS_IMAGE=opnfv/bottlenecks
 REPORT="True"
 
-if [[ $SUITE_NAME == rubbos || $SUITE_NAME == vstf ]]; then
-    echo "Bottlenecks: to pull image $BOTTLENECKS_IMAGE:${DOCKER_TAG}"
-    docker pull $BOTTLENECKS_IMAGE:$DOCKER_TAG >${redirect}
+RELENG_REPO=${WORKSPACE}/releng
+[ -d ${RELENG_REPO} ] && rm -rf ${RELENG_REPO}
+git clone https://gerrit.opnfv.org/gerrit/releng ${RELENG_REPO} >${redirect}
 
-    echo "Bottlenecks: docker start running"
-    opts="--privileged=true -id"
-    envs="-e INSTALLER_TYPE=${INSTALLER_TYPE} -e INSTALLER_IP=${INSTALLER_IP} \
-          -e NODE_NAME=${NODE_NAME} -e EXTERNAL_NET=${EXTERNAL_NETWORK} \
-          -e BOTTLENECKS_BRANCH=${BOTTLENECKS_BRANCH} -e GERRIT_REFSPEC_DEBUG=${GERRIT_REFSPEC_DEBUG} \
-          -e BOTTLENECKS_DB_TARGET=${BOTTLENECKS_DB_TARGET} -e PACKAGE_URL=${PACKAGE_URL}"
-    cmd="sudo docker run ${opts} ${envs} $BOTTLENECKS_IMAGE:${DOCKER_TAG} /bin/bash"
-    echo "Bottlenecks: docker cmd running ${cmd}"
-    ${cmd} >${redirect}
+OPENRC=/tmp/admin_rc.sh
+OS_CACERT=/tmp/os_cacert
+
+if [[ $SUITE_NAME == *posca* ]]; then
+    POSCA_SCRIPT=/home/opnfv/bottlenecks/testsuites/posca
+
+    echo "BOTTLENECKS INFO: fetching os credentials from $INSTALLER_TYPE"
+    if [[ $INSTALLER_TYPE == 'compass' ]]; then
+        if [[ ${BRANCH} == 'master' ]]; then
+            ${RELENG_REPO}/utils/fetch_os_creds.sh -d ${OPENRC} -i ${INSTALLER_TYPE} -a ${INSTALLER_IP} -o ${OS_CACERT} >${redirect}
+            if [[ -f ${OS_CACERT} ]]; then
+                echo "BOTTLENECKS INFO: successfully fetching os_cacert for openstack: ${OS_CACERT}"
+            else
+                echo "BOTTLENECKS ERROR: couldn't find os_cacert file: ${OS_CACERT}, please check if the it's been properly provided."
+                exit 1
+            fi
+        else
+            ${RELENG_REPO}/utils/fetch_os_creds.sh -d ${OPENRC} -i ${INSTALLER_TYPE} -a ${INSTALLER_IP}  >${redirect}
+        fi
+    fi
 
-    echo "Bottlenecks: obtain docker id"
-    container_id=$(docker ps | grep "$BOTTLENECKS_IMAGE:${DOCKER_TAG}" | awk '{print $1}' | head -1)
-    if [ -z ${container_id} ]; then
-        echo "Cannot find $BOTTLENECKS_IMAGE container ID ${container_id}. Please check if it exists."
-        docker ps -a
+    if [[ -f ${OPENRC} ]]; then
+        echo "BOTTLENECKS INFO: openstack credentials path is ${OPENRC}"
+        if [[ $INSTALLER_TYPE == 'compass' && ${BRANCH} == 'master' ]]; then
+            echo "BOTTLENECKS INFO: writing ${OS_CACERT} to ${OPENRC}"
+            echo "export OS_CACERT=${OS_CACERT}" >> ${OPENRC}
+        fi
+        cat ${OPENRC}
+    else
+        echo "BOTTLENECKS ERROR: couldn't find openstack rc file: ${OPENRC}, please check if the it's been properly provided."
         exit 1
     fi
 
-    echo "Bottlenecks: to prepare openstack environment"
-    prepare_env="${REPO_DIR}/ci/prepare_env.sh"
-    echo "Bottlenecks: docker cmd running: ${prepare_env}"
-    sudo docker exec ${container_id} ${prepare_env}
+    echo "INFO: pulling Bottlenecks docker ${DOCKER_TAG}"
+    docker pull opnfv/bottlenecks:${DOCKER_TAG} >$redirect
 
-    echo "Bottlenecks: to run testsuite ${SUITE_NAME}"
-    run_testsuite="${REPO_DIR}/run_tests.sh -s ${SUITE_NAME}"
-    echo "Bottlenecks: docker cmd running: ${run_testsuite}"
-    sudo docker exec ${container_id} ${run_testsuite}
-else
-    echo "Bottlenecks: installing POSCA docker-compose"
-    if [ -d usr/local/bin/docker-compose ]; then
-        rm -rf usr/local/bin/docker-compose
-    fi
-    curl -L https://github.com/docker/compose/releases/download/1.11.0/docker-compose-`uname -s`-`uname -m` > /usr/local/bin/docker-compose
-    chmod +x /usr/local/bin/docker-compose
+    opts="--privileged=true -id"
+    docker_volume="-v /var/run/docker.sock:/var/run/docker.sock -v /tmp:/tmp"
 
-    echo "Bottlenecks: composing up dockers"
-    cd $WORKSPACE
-    docker-compose -f $WORKSPACE/docker/bottleneck-compose/docker-compose.yml up -d
+    cmd="docker run ${opts} --name bottlenecks-load-master ${docker_volume} opnfv/bottlenecks:${DOCKER_TAG} /bin/bash"
+    echo "BOTTLENECKS INFO: running docker run commond: ${cmd}"
+    ${cmd} >$redirect
+    sleep 5
 
-    echo "Bottlenecks: running traffic stress/factor testing in posca testsuite "
-    POSCA_SCRIPT=/home/opnfv/bottlenecks/testsuites/posca
     if [[ $SUITE_NAME == posca_stress_traffic ]]; then
         TEST_CASE=posca_factor_system_bandwidth
-        echo "Bottlenecks: pulling tutum/influxdb for yardstick"
-        docker pull tutum/influxdb:0.13
-        sleep 5
-        docker exec bottleneckcompose_bottlenecks_1 python ${POSCA_SCRIPT}/run_posca.py testcase $TEST_CASE $REPORT
+        docker exec bottlenecks-load-master python ${POSCA_SCRIPT}/../run_testsuite.py testcase $TEST_CASE $REPORT
     elif [[ $SUITE_NAME == posca_stress_ping ]]; then
         TEST_CASE=posca_factor_ping
-        sleep 5
-        docker exec bottleneckcompose_bottlenecks_1 python ${POSCA_SCRIPT}/run_posca.py testcase $TEST_CASE $REPORT
+        docker exec bottlenecks-load-master python ${POSCA_SCRIPT}/../run_testsuite.py testcase $TEST_CASE $REPORT
     fi
-
-    echo "Bottlenecks: cleaning up docker-compose images and dockers"
-    docker-compose -f $WORKSPACE/docker/bottleneck-compose/docker-compose.yml down --rmi all
-fi
\ No newline at end of file
+fi
index 467e168..c98fd36 100644 (file)
         stream: master
         branch: '{stream}'
         gs-pathname: ''
+        ppa-pathname: '/{stream}'
         disabled: false
         openstack-version: ocata
     danube: &danube
         stream: danube
         branch: 'stable/{stream}'
         gs-pathname: '/{stream}'
+        ppa-pathname: '/{stream}'
         disabled: false
         openstack-version: newton
 #--------------------------------
         - 'os-nosdn-openo-ha':
             disabled: false
             auto-trigger-name: 'compass-{scenario}-{pod}-{stream}-trigger'
+        - 'os-odl-sfc-ha':
+            disabled: false
+            auto-trigger-name: 'compass-{scenario}-{pod}-{stream}-trigger'
+        - 'os-nosdn-dpdk-ha':
+            disabled: false
+            auto-trigger-name: 'compass-{scenario}-{pod}-{stream}-trigger'
+        - 'k8-nosdn-nofeature-ha':
+            disabled: false
+            auto-trigger-name: 'compass-{scenario}-{pod}-{stream}-trigger'
 
 
     jobs:
         - compass-ci-parameter:
             installer: '{installer}'
             gs-pathname: '{gs-pathname}'
+            ppa-pathname: '{ppa-pathname}'
         - string:
             name: DEPLOY_SCENARIO
             default: '{scenario}'
         - compass-ci-parameter:
             installer: '{installer}'
             gs-pathname: '{gs-pathname}'
+            ppa-pathname: '{ppa-pathname}'
         - '{slave-label}-defaults'
         - '{installer}-defaults'
 
     builders:
         - description-setter:
             description: "POD: $NODE_NAME"
-        - shell:
-            !include-raw-escape: ./compass-download-artifact.sh
-        - shell:
-            !include-raw-escape: ./compass-deploy.sh
+        - conditional-step:
+            condition-kind: regex-match
+            regex: master
+            label: '{stream}'
+            steps:
+                - shell:
+                    !include-raw-escape: ./compass-build.sh
+                - shell:
+                    !include-raw-escape: ./compass-deploy.sh
+        - conditional-step:
+            condition-kind: regex-match
+            regex: danube
+            label: '{stream}'
+            steps:
+                - shell:
+                    !include-raw-escape: ./compass-download-artifact.sh
+                - shell:
+                    !include-raw-escape: ./compass-deploy.sh
+
 
 ########################
 # parameter macros
             name: GS_URL
             default: '$GS_BASE{gs-pathname}'
             description: "URL to Google Storage."
+        - string:
+            name: CACHE_DIRECTORY
+            default: "$HOME/opnfv/cache/$PROJECT{gs-pathname}"
+            description: "Directory where the cache to be used during the build is located."
+        - string:
+            name: PPA_REPO
+            default: "http://artifacts.opnfv.org/compass4nfv/package{ppa-pathname}"
+        - string:
+            name: PPA_CACHE
+            default: "$WORKSPACE/work/repo/"
 
 ########################
 # trigger macros
     name: 'compass-os-nosdn-kvm-ha-baremetal-centos-master-trigger'
     triggers:
         - timed: ''
+- trigger:
+    name: 'compass-os-nosdn-dpdk-ha-baremetal-centos-master-trigger'
+    triggers:
+        - timed: ''
+- trigger:
+    name: 'compass-os-odl-sfc-ha-baremetal-centos-master-trigger'
+    triggers:
+        - timed: ''
+- trigger:
+    name: 'compass-k8-nosdn-nofeature-ha-baremetal-centos-master-trigger'
+    triggers:
+        - timed: ''
 
 - trigger:
     name: 'compass-os-nosdn-nofeature-ha-baremetal-master-trigger'
 - trigger:
     name: 'compass-os-odl_l2-moon-ha-baremetal-master-trigger'
     triggers:
-        - timed: ''
+        - timed: '0 12 * * *'
 - trigger:
     name: 'compass-os-nosdn-kvm-ha-baremetal-master-trigger'
     triggers:
         - timed: '0 14 * * *'
+- trigger:
+    name: 'compass-os-nosdn-dpdk-ha-baremetal-master-trigger'
+    triggers:
+        - timed: '0 16 * * *'
+- trigger:
+    name: 'compass-k8-nosdn-nofeature-ha-baremetal-master-trigger'
+    triggers:
+        - timed: ''
+- trigger:
+    name: 'compass-os-odl-sfc-ha-baremetal-master-trigger'
+    triggers:
+        - timed: '0 4 * * *'
+
 
 - trigger:
     name: 'compass-os-nosdn-nofeature-ha-baremetal-danube-trigger'
     name: 'compass-os-nosdn-kvm-ha-baremetal-danube-trigger'
     triggers:
         - timed: ''
+- trigger:
+    name: 'compass-os-nosdn-dpdk-ha-baremetal-danube-trigger'
+    triggers:
+        - timed: ''
+- trigger:
+    name: 'compass-k8-nosdn-nofeature-ha-baremetal-danube-trigger'
+    triggers:
+        - timed: ''
+- trigger:
+    name: 'compass-os-odl-sfc-ha-baremetal-danube-trigger'
+    triggers:
+        - timed: ''
+
 
 - trigger:
     name: 'compass-os-nosdn-nofeature-ha-virtual-master-trigger'
 - trigger:
     name: 'compass-os-odl_l2-moon-ha-virtual-master-trigger'
     triggers:
-        - timed: ''
+        - timed: '0 22 * * *'
 - trigger:
     name: 'compass-os-nosdn-kvm-ha-virtual-master-trigger'
     triggers:
         - timed: '0 23 * * *'
+- trigger:
+    name: 'compass-os-nosdn-dpdk-ha-virtual-master-trigger'
+    triggers:
+        - timed: '0 17 * * *'
+- trigger:
+    name: 'compass-k8-nosdn-nofeature-ha-virtual-master-trigger'
+    triggers:
+        - timed: ''
+- trigger:
+    name: 'compass-os-odl-sfc-ha-virtual-master-trigger'
+    triggers:
+        - timed: '0 16 * * *'
 
 - trigger:
     name: 'compass-os-nosdn-nofeature-ha-virtual-danube-trigger'
     name: 'compass-os-nosdn-kvm-ha-virtual-danube-trigger'
     triggers:
         - timed: ''
+- trigger:
+    name: 'compass-os-nosdn-dpdk-ha-virtual-danube-trigger'
+    triggers:
+        - timed: ''
+- trigger:
+    name: 'compass-os-odl-sfc-ha-virtual-danube-trigger'
+    triggers:
+        - timed: ''
+- trigger:
+    name: 'compass-k8-nosdn-nofeature-ha-virtual-danube-trigger'
+    triggers:
+        - timed: ''
index 2668ccd..7a5af5f 100644 (file)
@@ -6,24 +6,23 @@ echo "Starting the deployment on baremetal environment using $INSTALLER_TYPE. Th
 echo "--------------------------------------------------------"
 echo
 
-# source the properties file so we get OPNFV vars
-source $BUILD_DIRECTORY/latest.properties
-
-# echo the info about artifact that is used during the deployment
-echo "Using ${OPNFV_ARTIFACT_URL/*\/} for deployment"
-
-if [[ ! "$JOB_NAME" =~ (verify|merge) ]]; then
-    # for none-merge deployments
-    # checkout the commit that was used for building the downloaded artifact
-    # to make sure the ISO and deployment mechanism uses same versions
-    echo "Checking out $OPNFV_GIT_SHA1"
-    git checkout $OPNFV_GIT_SHA1 --quiet
-fi
-
 echo 1 > /proc/sys/vm/drop_caches
 
 export CONFDIR=$WORKSPACE/deploy/conf
 if [[ "$BRANCH" = 'stable/danube' ]]; then
+    # source the properties file so we get OPNFV vars
+    source $BUILD_DIRECTORY/latest.properties
+    # echo the info about artifact that is used during the deployment
+    echo "Using ${OPNFV_ARTIFACT_URL/*\/} for deployment"
+
+    if [[ ! "$JOB_NAME" =~ (verify|merge) ]]; then
+        # for none-merge deployments
+        # checkout the commit that was used for building the downloaded artifact
+        # to make sure the ISO and deployment mechanism uses same versions
+        echo "Checking out $OPNFV_GIT_SHA1"
+        git checkout $OPNFV_GIT_SHA1 --quiet
+    fi
+
     export ISO_URL=file://$BUILD_DIRECTORY/compass.iso
 else
     export ISO_URL=file://$BUILD_DIRECTORY/compass.tar.gz
@@ -40,6 +39,8 @@ elif [[ "${DEPLOY_SCENARIO}" =~ "-onos" ]]; then
     export NETWORK_CONF_FILE=network_onos.yml
 elif [[ "${DEPLOY_SCENARIO}" =~ "-openo" ]]; then
     export NETWORK_CONF_FILE=network_openo.yml
+elif [[ "${DEPLOY_SCENARIO}" =~ "-dpdk" ]]; then
+    export NETWORK_CONF_FILE=network_dpdk.yml
 else
     export NETWORK_CONF_FILE=network.yml
 fi
index 592e54d..6524d20 100644 (file)
         - 'os-nosdn-nofeature-noha':
             auto-trigger-name: 'daisy-{scenario}-{pod}-daily-{stream}-trigger'
         # ODL_L3 scenarios
-        - 'os-odl_l3-nofeature-noha':
+        - 'os-odl_l3-nofeature-ha':
             auto-trigger-name: 'daisy-{scenario}-{pod}-daily-{stream}-trigger'
         # ODL_L2 scenarios
-        - 'os-odl_l2-nofeature-noha':
+        - 'os-odl_l2-nofeature-ha':
             auto-trigger-name: 'daisy-{scenario}-{pod}-daily-{stream}-trigger'
 
     jobs:
             installer: '{installer}'
         - string:
             name: DEPLOY_SCENARIO
-            default: 'os-nosdn-nofeature-noha'
+            default: 'os-nosdn-nofeature-ha'
         - 'daisy-project-parameter':
             gs-pathname: '{gs-pathname}'
         - string:
 - trigger:
     name: 'daisy-os-nosdn-nofeature-ha-baremetal-daily-master-trigger'
     triggers:
-        - timed: ''
+        - timed: '0 12 * * *'
 # NOHA Scenarios
 - trigger:
     name: 'daisy-os-nosdn-nofeature-noha-baremetal-daily-master-trigger'
     triggers:
-        - timed: 'H 12 * * *'
+        - timed: ''
 # ODL_L3 Scenarios
 - trigger:
-    name: 'daisy-os-odl_l3-nofeature-noha-baremetal-daily-master-trigger'
+    name: 'daisy-os-odl_l3-nofeature-ha-baremetal-daily-master-trigger'
     triggers:
-        - timed: 'H 16 * * *'
+        - timed: '0 16 * * *'
 # ODL_L2 Scenarios
 - trigger:
-    name: 'daisy-os-odl_l2-nofeature-noha-baremetal-daily-master-trigger'
+    name: 'daisy-os-odl_l2-nofeature-ha-baremetal-daily-master-trigger'
     triggers:
         - timed: ''
 #-----------------------------------------------
 - trigger:
     name: 'daisy-os-nosdn-nofeature-ha-virtual-daily-master-trigger'
     triggers:
-        - timed: ''
+        - timed: '0 12 * * *'
 # NOHA Scenarios
 - trigger:
     name: 'daisy-os-nosdn-nofeature-noha-virtual-daily-master-trigger'
     triggers:
-        - timed: 'H 12 * * *'
+        - timed: ''
 # ODL_L3 Scenarios
 - trigger:
-    name: 'daisy-os-odl_l3-nofeature-noha-virtual-daily-master-trigger'
+    name: 'daisy-os-odl_l3-nofeature-ha-virtual-daily-master-trigger'
     triggers:
-        - timed: 'H 16 * * *'
+        - timed: '0 16 * * *'
 # ODL_L3 Scenarios
 - trigger:
-    name: 'daisy-os-odl_l2-nofeature-noha-virtual-daily-master-trigger'
+    name: 'daisy-os-odl_l2-nofeature-ha-virtual-daily-master-trigger'
     triggers:
         - timed: ''
 
index 57e44e3..bae75dd 100644 (file)
             description: 'Git URL to use on this Jenkins Slave'
         - string:
             name: DEPLOY_SCENARIO
-            default: 'os-nosdn-nofeature-noha'
+            default: 'os-nosdn-nofeature-ha'
         - '{installer}-project-parameter':
             gs-pathname: '{gs-pathname}'
 
index 5bb8f74..c6b2cb6 100644 (file)
             wget https://git.opnfv.org/functest/plain/functest/ci/download_images.sh -O functest/ci/download_images.sh
         - 'functest-suite-builder'
         - shell: |
-            functest_log="$HOME/opnfv/functest/results/{stream}/{project}.log"
+            functest_log="$HOME/opnfv/functest/results/{stream}/$FUNCTEST_SUITE_NAME.log"
             # NOTE: checking the test result, as the previous job could return
             #       0 regardless the result of doctor test scenario.
             grep -e ' OK$' $functest_log || exit 1
         - archive:
             artifacts: 'tests/*.log'
         - archive:
-            artifacts: 'functest_results/{project}.log'
+            artifacts: 'functest_results/$FUNCTEST_SUITE_NAME.log'
 
 
 #####################################
index 43978f6..bcda2b7 100644 (file)
@@ -25,7 +25,7 @@
         branch: 'stable/{stream}'
         dovetail-branch: master
         gs-pathname: '/{stream}'
-        docker-tag: 'cvp.0.2.0'
+        docker-tag: 'cvp.0.3.0'
 
 #-----------------------------------
 # POD, PLATFORM, AND BRANCH MAPPING
 # that have not been switched using labels for slaves
 #--------------------------------
 #apex PODs
-        - lf-pod1:
-            slave-label: '{pod}'
+        - virtual:
+            slave-label: apex-virtual-master
             SUT: apex
             auto-trigger-name: 'daily-trigger-disabled'
             <<: *master
-        - lf-pod1:
-            slave-label: '{pod}'
+        - baremetal:
+            slave-label: apex-baremetal-master
+            SUT: apex
+            auto-trigger-name: 'daily-trigger-disabled'
+            <<: *master
+        - virtual:
+            slave-label: apex-virtual-danube
+            SUT: apex
+            auto-trigger-name: 'daily-trigger-disabled'
+            <<: *danube
+        - baremetal:
+            slave-label: apex-baremetal-danube
             SUT: apex
             auto-trigger-name: 'daily-trigger-disabled'
             <<: *danube
             <<: *danube
 #--------------------------------
     testsuite:
-        - 'debug'
         - 'compliance_set'
         - 'proposed_tests'
 
index bf96fd4..f75cdde 100755 (executable)
@@ -83,6 +83,8 @@ if [[ ${INSTALLER_TYPE} == compass ]]; then
     options="-u root -p root"
 elif [[ ${INSTALLER_TYPE} == fuel ]]; then
     options="-u root -p r00tme"
+elif [[ ${INSTALLER_TYPE} == apex ]]; then
+    options="-u stack -k /root/.ssh/id_rsa"
 else
     echo "Don't support to generate pod.yaml on ${INSTALLER_TYPE} currently."
     echo "HA test cases may not run properly."
@@ -115,6 +117,11 @@ if [ "$INSTALLER_TYPE" == "fuel" ]; then
     sshpass -p r00tme sudo scp $ssh_options root@${INSTALLER_IP}:~/.ssh/id_rsa ${DOVETAIL_CONFIG}/id_rsa
 fi
 
+if [ "$INSTALLER_TYPE" == "apex" ]; then
+    echo "Fetching id_rsa file from jump_server $INSTALLER_IP..."
+    sudo scp $ssh_options stack@${INSTALLER_IP}:~/.ssh/id_rsa ${DOVETAIL_CONFIG}/id_rsa
+fi
+
 # sdnvpn test case needs to download this image first before running
 echo "Download image ubuntu-16.04-server-cloudimg-amd64-disk1.img ..."
 wget -q -nc http://artifacts.opnfv.org/sdnvpn/ubuntu-16.04-server-cloudimg-amd64-disk1.img -P ${DOVETAIL_CONFIG}
index 7a57cb5..2d37f4a 100644 (file)
@@ -44,9 +44,6 @@
         - zte-pod1:
             slave-label: zte-pod1
             <<: *master
-        - zte-pod2:
-            slave-label: zte-pod2
-            <<: *master
         - zte-pod3:
             slave-label: zte-pod3
             <<: *master
 - trigger:
     name: 'fuel-os-nosdn-ovs-ha-zte-pod1-daily-master-trigger'
     triggers:
-        - timed: ''
+        - timed: '0 18 * * *'
 - trigger:
     name: 'fuel-os-nosdn-kvm_ovs_dpdk-ha-zte-pod1-daily-master-trigger'
     triggers:
     name: 'fuel-os-nosdn-kvm_ovs_dpdk_bar-noha-zte-pod1-daily-master-trigger'
     triggers:
         - timed: ''
-
-#-----------------------------------------------
-# ZTE POD2 Triggers running against master branch
-#-----------------------------------------------
-- trigger:
-    name: 'fuel-os-nosdn-nofeature-ha-zte-pod2-daily-master-trigger'
-    triggers:
-        - timed: ''
-- trigger:
-    name: 'fuel-os-odl_l2-nofeature-ha-zte-pod2-daily-master-trigger'
-    triggers:
-        - timed: ''
-- trigger:
-    name: 'fuel-os-odl_l3-nofeature-ha-zte-pod2-daily-master-trigger'
-    triggers:
-        - timed: ''
-- trigger:
-    name: 'fuel-os-onos-sfc-ha-zte-pod2-daily-master-trigger'
-    triggers:
-        - timed: ''
-- trigger:
-    name: 'fuel-os-onos-nofeature-ha-zte-pod2-daily-master-trigger'
-    triggers:
-        - timed: ''
-- trigger:
-    name: 'fuel-os-odl_l2-bgpvpn-ha-zte-pod2-daily-master-trigger'
-    triggers:
-        - timed: ''
-- trigger:
-    name: 'fuel-os-odl_l2-sfc-ha-zte-pod2-daily-master-trigger'
-    triggers:
-        - timed: ''
-- trigger:
-    name: 'fuel-os-nosdn-kvm-ha-zte-pod2-daily-master-trigger'
-    triggers:
-        - timed: ''
-- trigger:
-    name: 'fuel-os-nosdn-ovs-ha-zte-pod2-daily-master-trigger'
-    triggers:
-        - timed: ''
-- trigger:
-    name: 'fuel-os-nosdn-kvm_ovs_dpdk-ha-zte-pod2-daily-master-trigger'
-    triggers:
-        - timed: ''
-- trigger:
-    name: 'fuel-os-nosdn-kvm_ovs_dpdk_bar-ha-zte-pod2-daily-master-trigger'
-    triggers:
-        - timed: ''
-# NOHA Scenarios
-- trigger:
-    name: 'fuel-os-nosdn-nofeature-noha-zte-pod2-daily-master-trigger'
-    triggers:
-        - timed: ''
-- trigger:
-    name: 'fuel-os-odl_l2-nofeature-noha-zte-pod2-daily-master-trigger'
-    triggers:
-        - timed: ''
-- trigger:
-    name: 'fuel-os-odl_l3-nofeature-noha-zte-pod2-daily-master-trigger'
-    triggers:
-        - timed: ''
-- trigger:
-    name: 'fuel-os-onos-sfc-noha-zte-pod2-daily-master-trigger'
-    triggers:
-        - timed: ''
-- trigger:
-    name: 'fuel-os-onos-nofeature-noha-zte-pod2-daily-master-trigger'
-    triggers:
-        - timed: ''
-- trigger:
-    name: 'fuel-os-odl_l2-sfc-noha-zte-pod2-daily-master-trigger'
-    triggers:
-        - timed: ''
-- trigger:
-    name: 'fuel-os-odl_l2-bgpvpn-noha-zte-pod2-daily-master-trigger'
-    triggers:
-        - timed: ''
-- trigger:
-    name: 'fuel-os-nosdn-kvm-noha-zte-pod2-daily-master-trigger'
-    triggers:
-        - timed: ''
-- trigger:
-    name: 'fuel-os-nosdn-ovs-noha-zte-pod2-daily-master-trigger'
-    triggers:
-        - timed: ''
-- trigger:
-    name: 'fuel-os-nosdn-kvm_ovs_dpdk-noha-zte-pod2-daily-master-trigger'
-    triggers:
-        - timed: ''
-- trigger:
-    name: 'fuel-os-nosdn-kvm_ovs_dpdk_bar-noha-zte-pod2-daily-master-trigger'
-    triggers:
-        - timed: ''
 #-----------------------------------------------
 # ZTE POD3 Triggers running against master branch
 #-----------------------------------------------
 - trigger:
     name: 'fuel-os-nosdn-nofeature-ha-zte-pod3-daily-master-trigger'
     triggers:
-        - timed: ''
+        - timed: '0 10 * * *'
 - trigger:
     name: 'fuel-os-odl_l2-nofeature-ha-zte-pod3-daily-master-trigger'
     triggers:
 - trigger:
     name: 'fuel-os-nosdn-kvm-ha-zte-pod3-daily-master-trigger'
     triggers:
-        - timed: '0 10 * * *'
+        - timed: ''
 - trigger:
     name: 'fuel-os-nosdn-ovs-ha-zte-pod3-daily-master-trigger'
     triggers:
     name: 'fuel-os-nosdn-kvm_ovs_dpdk_bar-noha-zte-pod1-daily-danube-trigger'
     triggers:
         - timed: ''
-
-#-----------------------------------------------
-# ZTE POD2 Triggers running against danube branch
-#-----------------------------------------------
-- trigger:
-    name: 'fuel-os-nosdn-nofeature-ha-zte-pod2-daily-danube-trigger'
-    triggers:
-        - timed: ''
-- trigger:
-    name: 'fuel-os-odl_l2-nofeature-ha-zte-pod2-daily-danube-trigger'
-    triggers:
-        - timed: ''
-- trigger:
-    name: 'fuel-os-odl_l3-nofeature-ha-zte-pod2-daily-danube-trigger'
-    triggers:
-        - timed: ''
-- trigger:
-    name: 'fuel-os-onos-sfc-ha-zte-pod2-daily-danube-trigger'
-    triggers:
-        - timed: ''
-- trigger:
-    name: 'fuel-os-onos-nofeature-ha-zte-pod2-daily-danube-trigger'
-    triggers:
-        - timed: ''
-- trigger:
-    name: 'fuel-os-odl_l2-bgpvpn-ha-zte-pod2-daily-danube-trigger'
-    triggers:
-        - timed: ''
-- trigger:
-    name: 'fuel-os-odl_l2-sfc-ha-zte-pod2-daily-danube-trigger'
-    triggers:
-        - timed: ''
-- trigger:
-    name: 'fuel-os-nosdn-kvm-ha-zte-pod2-daily-danube-trigger'
-    triggers:
-        - timed: ''
-- trigger:
-    name: 'fuel-os-nosdn-ovs-ha-zte-pod2-daily-danube-trigger'
-    triggers:
-        - timed: ''
-- trigger:
-    name: 'fuel-os-nosdn-kvm_ovs_dpdk-ha-zte-pod2-daily-danube-trigger'
-    triggers:
-        - timed: ''
-- trigger:
-    name: 'fuel-os-nosdn-kvm_ovs_dpdk_bar-ha-zte-pod2-daily-danube-trigger'
-    triggers:
-        - timed: ''
-# NOHA Scenarios
-- trigger:
-    name: 'fuel-os-nosdn-nofeature-noha-zte-pod2-daily-danube-trigger'
-    triggers:
-        - timed: ''
-- trigger:
-    name: 'fuel-os-odl_l2-nofeature-noha-zte-pod2-daily-danube-trigger'
-    triggers:
-        - timed: ''
-- trigger:
-    name: 'fuel-os-odl_l3-nofeature-noha-zte-pod2-daily-danube-trigger'
-    triggers:
-        - timed: ''
-- trigger:
-    name: 'fuel-os-onos-sfc-noha-zte-pod2-daily-danube-trigger'
-    triggers:
-        - timed: ''
-- trigger:
-    name: 'fuel-os-onos-nofeature-noha-zte-pod2-daily-danube-trigger'
-    triggers:
-        - timed: ''
-- trigger:
-    name: 'fuel-os-odl_l2-sfc-noha-zte-pod2-daily-danube-trigger'
-    triggers:
-        - timed: ''
-- trigger:
-    name: 'fuel-os-odl_l2-bgpvpn-noha-zte-pod2-daily-danube-trigger'
-    triggers:
-        - timed: ''
-- trigger:
-    name: 'fuel-os-nosdn-kvm-noha-zte-pod2-daily-danube-trigger'
-    triggers:
-        - timed: ''
-- trigger:
-    name: 'fuel-os-nosdn-ovs-noha-zte-pod2-daily-danube-trigger'
-    triggers:
-        - timed: ''
-- trigger:
-    name: 'fuel-os-nosdn-kvm_ovs_dpdk-noha-zte-pod2-daily-danube-trigger'
-    triggers:
-        - timed: ''
-- trigger:
-    name: 'fuel-os-nosdn-kvm_ovs_dpdk_bar-noha-zte-pod2-daily-danube-trigger'
-    triggers:
-        - timed: ''
 #-----------------------------------------------
 # ZTE POD3 Triggers running against danube branch
 #-----------------------------------------------
index 512a01e..9084cca 100644 (file)
@@ -64,7 +64,7 @@ if [[ ${INSTALLER_TYPE} == 'compass' && ${DEPLOY_SCENARIO} == *'os-nosdn-openo-h
     envs=${env}" -e OPENO_MSB_ENDPOINT=${openo_msb_endpoint}"
 fi
 
-volumes="${images_vol} ${results_vol} ${sshkey_vol} ${rc_file_vol}"
+volumes="${images_vol} ${results_vol} ${sshkey_vol} ${rc_file_vol} ${cacert_file_vol}"
 
 
 tiers=(healthcheck smoke)
index cc9bac0..f14ca75 100644 (file)
             slave-label: '{alpine-pod}'
             installer: fuel
             <<: *master
+        - huawei-virtual5:
+            slave-label: '{alpine-pod}'
+            installer: compass
+            <<: *master
 
     testsuite:
         - 'suite':
index ee154af..5e07a11 100644 (file)
@@ -75,8 +75,8 @@
             description: 'Model to deploy (os|k8)'
         - string:
             name: OS_RELEASE
-            default: 'newton'
-            description: 'OpenStack release (mitaka|newton)'
+            default: 'ocata'
+            description: 'OpenStack release (mitaka|newton|ocata)'
         - string:
             name: EXTERNAL_NETWORK
             default: ext-net
index 3694c0b..7a257f1 100644 (file)
         - string:
             name: GIT_BASE
             default: https://gerrit.opnfv.org/gerrit/$PROJECT
+- parameter:
+    name: 'huawei-virtual5-defaults'
+    parameters:
+        - label:
+            name: SLAVE_LABEL
+            default: 'huawei-virtual5'
+        - string:
+            name: GIT_BASE
+            default: https://gerrit.opnfv.org/gerrit/$PROJECT
 - parameter:
     name: 'huawei-virtual7-defaults'
     parameters:
index 13ea9b3..1ff260a 100644 (file)
         branch: '{stream}'
         disabled: false
         gs-pathname: ''
-    danube: &danube
-        stream: danube
-        branch: 'stable/{stream}'
-        disabled: false
-        gs-pathname: '/{stream}'
 #--------------------------------
 # POD, INSTALLER, AND BRANCH MAPPING
 #--------------------------------
         - virtual:
             slave-label: joid-virtual
             <<: *master
-        - baremetal:
-            slave-label: joid-baremetal
-            <<: *danube
-        - virtual:
-            slave-label: joid-virtual
-            <<: *danube
 #--------------------------------
 #        None-CI PODs
 #--------------------------------
@@ -62,7 +51,7 @@
         - 'os-nosdn-lxd-noha':
             auto-trigger-name: 'joid-{scenario}-{pod}-{stream}-trigger'
         - 'os-odl_l2-nofeature-ha':
-            auto-trigger-name: 'joid-{scenario}-{pod}-{stream}-trigger'
+            auto-trigger-name: 'daily-trigger-disabled'
         - 'os-onos-nofeature-ha':
             auto-trigger-name: 'daily-trigger-disabled'
         - 'os-odl_l2-nofeature-noha':
         - 'os-ocl-nofeature-noha':
             auto-trigger-name: 'daily-trigger-disabled'
         - 'k8-nosdn-nofeature-noha':
-            auto-trigger-name: 'joid-{scenario}-{pod}-{stream}-trigger'
+            auto-trigger-name: 'daily-trigger-disabled'
         - 'k8-nosdn-lb-noha':
             auto-trigger-name: 'joid-{scenario}-{pod}-{stream}-trigger'
+        - 'k8-ovn-lb-noha':
+            auto-trigger-name: 'joid-{scenario}-{pod}-{stream}-trigger'
 
     jobs:
         - 'joid-{scenario}-{pod}-daily-{stream}'
     name: 'joid-os-nosdn-nofeature-ha-cengn-pod1-master-trigger'
     triggers:
         - timed: ''
-# os-nosdn-nofeature-ha trigger - branch: danube
-- trigger:
-    name: 'joid-os-nosdn-nofeature-ha-baremetal-danube-trigger'
-    triggers:
-        - timed: '0 2 * * *'
-- trigger:
-    name: 'joid-os-nosdn-nofeature-ha-virtual-danube-trigger'
-    triggers:
-        - timed: ''
-- trigger:
-    name: 'joid-os-nosdn-nofeature-ha-orange-pod1-danube-trigger'
-    triggers:
-        - timed: ''
-- trigger:
-    name: 'joid-os-nosdn-nofeature-ha-cengn-pod1-danube-trigger'
-    triggers:
-        - timed: ''
 # os-odl_l2-nofeature-ha trigger - branch: master
 - trigger:
     name: 'joid-os-odl_l2-nofeature-ha-baremetal-master-trigger'
     name: 'joid-os-odl_l2-nofeature-ha-cengn-pod1-master-trigger'
     triggers:
         - timed: ''
-# os-odl_l2-nofeature-ha trigger - branch: danube
-- trigger:
-    name: 'joid-os-odl_l2-nofeature-ha-baremetal-danube-trigger'
-    triggers:
-        - timed: '0 7 * * *'
-- trigger:
-    name: 'joid-os-odl_l2-nofeature-ha-virtual-danube-trigger'
-    triggers:
-        - timed: ''
-- trigger:
-    name: 'joid-os-odl_l2-nofeature-ha-orange-pod1-danube-trigger'
-    triggers:
-        - timed: ''
-- trigger:
-    name: 'joid-os-odl_l2-nofeature-ha-cengn-pod1-danube-trigger'
-    triggers:
-        - timed: ''
 # os-onos-nofeature-ha trigger - branch: master
 - trigger:
     name: 'joid-os-onos-nofeature-ha-baremetal-master-trigger'
     name: 'joid-os-onos-nofeature-ha-cengn-pod1-master-trigger'
     triggers:
         - timed: ''
-# os-onos-nofeature-ha trigger - branch: danube
-- trigger:
-    name: 'joid-os-onos-nofeature-ha-baremetal-danube-trigger'
-    triggers:
-        - timed: '0 12 * * *'
-- trigger:
-    name: 'joid-os-onos-nofeature-ha-virtual-danube-trigger'
-    triggers:
-        - timed: ''
-- trigger:
-    name: 'joid-os-onos-nofeature-ha-orange-pod1-danube-trigger'
-    triggers:
-        - timed: ''
-- trigger:
-    name: 'joid-os-onos-nofeature-ha-cengn-pod1-danube-trigger'
-    triggers:
-        - timed: ''
 # os-onos-sfc-ha trigger - branch: master
 - trigger:
     name: 'joid-os-onos-sfc-ha-baremetal-master-trigger'
     name: 'joid-os-onos-sfc-ha-cengn-pod1-master-trigger'
     triggers:
         - timed: ''
-# os-onos-sfc-ha trigger - branch: danube
-- trigger:
-    name: 'joid-os-onos-sfc-ha-baremetal-danube-trigger'
-    triggers:
-        - timed: '0 17 * * *'
-- trigger:
-    name: 'joid-os-onos-sfc-ha-virtual-danube-trigger'
-    triggers:
-        - timed: ''
-- trigger:
-    name: 'joid-os-onos-sfc-ha-orange-pod1-danube-trigger'
-    triggers:
-        - timed: ''
-- trigger:
-    name: 'joid-os-onos-sfc-ha-cengn-pod1-danube-trigger'
-    triggers:
-        - timed: ''
 # os-nosdn-lxd-noha trigger - branch: master
 - trigger:
     name: 'joid-os-nosdn-lxd-noha-baremetal-master-trigger'
     name: 'joid-os-nosdn-lxd-noha-cengn-pod1-master-trigger'
     triggers:
         - timed: ''
-# os-nosdn-lxd-noha trigger - branch: danube
-- trigger:
-    name: 'joid-os-nosdn-lxd-noha-baremetal-danube-trigger'
-    triggers:
-        - timed: '0 22 * * *'
-- trigger:
-    name: 'joid-os-nosdn-lxd-noha-virtual-danube-trigger'
-    triggers:
-        - timed: ''
-- trigger:
-    name: 'joid-os-nosdn-lxd-noha-orange-pod1-danube-trigger'
-    triggers:
-        - timed: ''
-- trigger:
-    name: 'joid-os-nosdn-lxd-noha-cengn-pod1-danube-trigger'
-    triggers:
-        - timed: ''
 # os-nosdn-lxd-ha trigger - branch: master
 - trigger:
     name: 'joid-os-nosdn-lxd-ha-baremetal-master-trigger'
     name: 'joid-os-nosdn-lxd-ha-cengn-pod1-master-trigger'
     triggers:
         - timed: ''
-# os-nosdn-lxd-ha trigger - branch: danube
-- trigger:
-    name: 'joid-os-nosdn-lxd-ha-baremetal-danube-trigger'
-    triggers:
-        - timed: '0 10 * * *'
-- trigger:
-    name: 'joid-os-nosdn-lxd-ha-virtual-danube-trigger'
-    triggers:
-        - timed: ''
-- trigger:
-    name: 'joid-os-nosdn-lxd-ha-orange-pod1-danube-trigger'
-    triggers:
-        - timed: ''
-- trigger:
-    name: 'joid-os-nosdn-lxd-ha-cengn-pod1-danube-trigger'
-    triggers:
-        - timed: ''
 # os-nosdn-nofeature-noha trigger - branch: master
 - trigger:
     name: 'joid-os-nosdn-nofeature-noha-baremetal-master-trigger'
     name: 'joid-os-nosdn-nofeature-noha-cengn-pod1-master-trigger'
     triggers:
         - timed: ''
-# os-nosdn-nofeature-noha trigger - branch: danube
-- trigger:
-    name: 'joid-os-nosdn-nofeature-noha-baremetal-danube-trigger'
-    triggers:
-        - timed: '0 4 * * *'
-- trigger:
-    name: 'joid-os-nosdn-nofeature-noha-virtual-danube-trigger'
-    triggers:
-        - timed: ''
-- trigger:
-    name: 'joid-os-nosdn-nofeature-noha-orange-pod1-danube-trigger'
-    triggers:
-        - timed: ''
-- trigger:
-    name: 'joid-os-nosdn-nofeature-noha-cengn-pod1-danube-trigger'
-    triggers:
-        - timed: ''
 # k8-nosdn-nofeature-noha trigger - branch: master
 - trigger:
     name: 'joid-k8-nosdn-nofeature-noha-baremetal-master-trigger'
     name: 'joid-k8-nosdn-nofeature-noha-cengn-pod1-master-trigger'
     triggers:
         - timed: ''
-# k8-nosdn-nofeature-noha trigger - branch: danube
-- trigger:
-    name: 'joid-k8-nosdn-nofeature-noha-baremetal-danube-trigger'
-    triggers:
-        - timed: '0 15 * * *'
-- trigger:
-    name: 'joid-k8-nosdn-nofeature-noha-virtual-danube-trigger'
-    triggers:
-        - timed: ''
-- trigger:
-    name: 'joid-k8-nosdn-nofeature-noha-orange-pod1-danube-trigger'
-    triggers:
-        - timed: ''
-- trigger:
-    name: 'joid-k8-nosdn-nofeature-noha-cengn-pod1-danube-trigger'
-    triggers:
-        - timed: ''
 # k8-nosdn-lb-noha trigger - branch: master
 - trigger:
     name: 'joid-k8-nosdn-lb-noha-baremetal-master-trigger'
     name: 'joid-k8-nosdn-lb-noha-cengn-pod1-master-trigger'
     triggers:
         - timed: ''
-# k8-nosdn-lb-noha trigger - branch: danube
+# k8-ovn-lb-noha trigger - branch: master
 - trigger:
-    name: 'joid-k8-nosdn-lb-noha-baremetal-danube-trigger'
+    name: 'joid-k8-ovn-lb-noha-baremetal-master-trigger'
     triggers:
-        - timed: '0 20 * * *'
+        - timed: '5 17 * * *'
 - trigger:
-    name: 'joid-k8-nosdn-lb-noha-virtual-danube-trigger'
+    name: 'joid-k8-ovn-lb-noha-virtual-master-trigger'
     triggers:
         - timed: ''
 - trigger:
-    name: 'joid-k8-nosdn-lb-noha-orange-pod1-danube-trigger'
+    name: 'joid-k8-ovn-lb-noha-orange-pod1-master-trigger'
     triggers:
         - timed: ''
 - trigger:
-    name: 'joid-k8-nosdn-lb-noha-cengn-pod1-danube-trigger'
+    name: 'joid-k8-ovn-lb-noha-cengn-pod1-master-trigger'
     triggers:
         - timed: ''
diff --git a/jjb/orchestra/orchestra-daily-jobs.yml b/jjb/orchestra/orchestra-daily-jobs.yml
new file mode 100644 (file)
index 0000000..6baaab8
--- /dev/null
@@ -0,0 +1,98 @@
+###################################
+# job configuration for orchestra
+###################################
+- project:
+    name: 'orchestra-daily-jobs'
+
+    project: 'orchestra'
+
+#--------------------------------
+# BRANCH ANCHORS
+#--------------------------------
+    master: &master
+        stream: master
+        branch: '{stream}'
+        gs-pathname: ''
+        disabled: false
+
+#-------------------------------------------------------
+# POD, INSTALLER, AND BRANCH MAPPING
+#-------------------------------------------------------
+    pod:
+        - virtual:
+            slave-label: 'joid-virtual'
+            os-version: 'xenial'
+            <<: *master
+
+    jobs:
+        - 'orchestra-{pod}-daily-{stream}'
+
+################################
+# job template
+################################
+- job-template:
+    name: 'orchestra-{pod}-daily-{stream}'
+
+    project-type: multijob
+
+    disabled: '{obj:disabled}'
+
+    concurrent: false
+
+    properties:
+        - logrotate-default
+        - throttle:
+            enabled: true
+            max-total: 1
+            max-per-node: 1
+            option: 'project'
+
+    scm:
+        - git-scm
+
+    wrappers:
+        - ssh-agent-wrapper
+
+        - timeout:
+            timeout: 240
+            fail: true
+
+    triggers:
+         - timed: '@daily'
+
+    parameters:
+        - project-parameter:
+            project: '{project}'
+            branch: '{branch}'
+        - string:
+            name: DEPLOY_SCENARIO
+            default: os-nosdn-openbaton-ha
+        - '{slave-label}-defaults'
+
+    builders:
+        - description-setter:
+            description: "Built on $NODE_NAME"
+        - multijob:
+            name: deploy
+            condition: SUCCESSFUL
+            projects:
+                - name: 'joid-deploy-{pod}-daily-{stream}'
+                  current-parameters: false
+                  predefined-parameters: |
+                    DEPLOY_SCENARIO=os-nosdn-openbaton-ha
+                    COMPASS_OS_VERSION=xenial
+                  node-parameters: true
+                  kill-phase-on: FAILURE
+                  abort-all-job: true
+        - multijob:
+            name: functest
+            condition: SUCCESSFUL
+            projects:
+                - name: 'functest-joid-{pod}-daily-{stream}'
+                  current-parameters: false
+                  predefined-parameters: |
+                    DEPLOY_SCENARIO=os-nosdn-openbaton-ha
+                    FUNCTEST_SUITE_NAME=orchestra_ims
+                  node-parameters: true
+                  kill-phase-on: NEVER
+                  abort-all-job: true
diff --git a/jjb/orchestra/orchestra-project-jobs.yml b/jjb/orchestra/orchestra-project-jobs.yml
new file mode 100644 (file)
index 0000000..0f0c0f6
--- /dev/null
@@ -0,0 +1,50 @@
+- project:
+
+    name: orchestra-project
+
+    project: 'orchestra'
+
+    stream:
+        - master:
+            branch: '{stream}'
+            gs-pathname: ''
+
+    jobs:
+        - 'orchestra-build-{stream}'
+
+- job-template:
+    name: 'orchestra-build-{stream}'
+
+    concurrent: true
+
+    properties:
+        - logrotate-default
+        - throttle:
+            enabled: true
+            max-total: 1
+            max-per-node: 1
+            option: 'project'
+
+    parameters:
+        - project-parameter:
+            project: '{project}'
+            branch: '{branch}'
+
+    scm:
+        - git-scm
+
+    triggers:
+        - timed: 'H 23 * * *'
+
+    builders:
+        - 'orchestra-build-macro'
+
+- builder:
+    name: 'orchestra-build-macro'
+    builders:
+        - shell: |
+            #!/bin/bash
+
+            echo "Hello world!"
+
+
diff --git a/jjb/ovn4nfv/ovn4nfv-daily-jobs.yml b/jjb/ovn4nfv/ovn4nfv-daily-jobs.yml
new file mode 100644 (file)
index 0000000..ed6df41
--- /dev/null
@@ -0,0 +1,87 @@
+- project:
+    name: 'ovn4nfv-daily-jobs'
+
+    project: 'ovn4nfv'
+
+    master: &master
+        stream: master
+        branch: '{stream}'
+        gs-pathname: ''
+        disabled: false
+
+    pod:
+        - virtual:
+            slave-label: 'joid-virtual'
+            os-version: 'xenial'
+            <<: *master
+
+    jobs:
+        - 'ovn4nfv-{pod}-daily-{stream}'
+
+- job-template:
+    name: 'ovn4nfv-{pod}-daily-{stream}'
+
+    project-type: multijob
+
+    disabled: '{obj:disabled}'
+
+    concurrent: false
+
+    properties:
+        - logrotate-default
+        - throttle:
+            enabled: true
+            max-total: 1
+            max-per-node: 1
+            option: 'project'
+
+    scm:
+        - git-scm
+
+    wrappers:
+        - ssh-agent-wrapper
+
+        - timeout:
+            timeout: 240
+            fail: true
+
+    triggers:
+         - timed: '@daily'
+
+    parameters:
+        - project-parameter:
+            project: '{project}'
+            branch: '{branch}'
+        - string:
+            name: DEPLOY_SCENARIO
+            default: os-ovn-nofeature-noha
+        - '{slave-label}-defaults'
+
+    builders:
+        - description-setter:
+            description: "Built on $NODE_NAME"
+        - multijob:
+            name: deploy
+            condition: SUCCESSFUL
+            projects:
+                - name: 'joid-deploy-{pod}-daily-{stream}'
+                  current-parameters: false
+                  predefined-parameters: |
+                    DEPLOY_SCENARIO=os-ovn-nofeature-noha
+                    COMPASS_OS_VERSION=xenial
+                  node-parameters: true
+                  kill-phase-on: FAILURE
+                  abort-all-job: true
+        - multijob:
+            name: functest
+            condition: SUCCESSFUL
+            projects:
+                - name: 'functest-joid-{pod}-daily-{stream}'
+                  current-parameters: false
+                  predefined-parameters: |
+                    DEPLOY_SCENARIO=os-ovn-nofeature-ha
+                    FUNCTEST_SUITE_NAME=ovn4nfv_test_suite
+                  node-parameters: true
+                  kill-phase-on: NEVER
+                  abort-all-job: true
+
diff --git a/jjb/ovn4nfv/ovn4nfv-project-jobs.yml b/jjb/ovn4nfv/ovn4nfv-project-jobs.yml
new file mode 100644 (file)
index 0000000..805aa04
--- /dev/null
@@ -0,0 +1,51 @@
+- project:
+    name: ovn4nfv
+
+    project: '{name}'
+
+
+    stream:
+        - master:
+            branch: '{stream}'
+            gs-pathname: ''
+            disabled: false
+
+    jobs:
+        - 'ovn4nfv-build-{stream}'
+
+- job-template:
+    name: 'ovn4nfv-build-{stream}'
+
+    concurrent: true
+
+    disabled: '{obj:disabled}'
+
+    properties:
+        - logrotate-default
+        - throttle:
+            enabled: true
+            max-total: 1
+            max-per-node: 1
+            option: 'project'
+
+    parametert:
+        - project-parameter:
+            project: '{project}'
+            branch: '{branch}'
+
+    scm:
+        - git-scm
+
+    triggers:
+        - timed: 'H 23 * * *'
+
+    builders:
+        - 'ovn4nfv-build-macro'
+
+- builder:
+    name: 'ovn4nfv-build-macro'
+    builders:
+        - shell: |
+            #!/bin/bash
+
+            echo "hello world"
index 57d24b4..4a7dd45 100644 (file)
 
             echo "Document link(s):" >> gerrit_comment.txt
             find "$local_path" | grep -e 'ipynb$' | \
-                sed -e "s|^$local_path|    https://nbviewer.jupyter.org/urls/$gs_path|" >> gerrit_comment.txt
+                sed -e "s|^$local_path|    https://nbviewer.jupyter.org/url/$gs_path|" >> gerrit_comment.txt
index 417fc70..7502b17 100644 (file)
             name: RELEASE_VERSION
             default: ""
             description: "Release version, e.g. 1.0, 2.0, 3.0"
+        - string:
+            name: DOCKER_DIR
+            default: "docker"
+            description: "Directory containing files needed by the Dockerfile"
         - string:
             name: DOCKERFILE
             default: "Dockerfile.aarch64"
index ebd0c9f..0de3df2 100644 (file)
@@ -54,7 +54,7 @@ if [[ -n "$(docker images | grep $DOCKER_REPO_NAME)" ]]; then
     done
 fi
 
-cd $WORKSPACE/docker
+cd $WORKSPACE/$DOCKER_DIR
 HOST_ARCH=$(uname -m)
 if [ ! -f "${DOCKERFILE}" ]; then
     # If this is expected to be a Dockerfile for other arch than x86
index 095ba41..fa9c441 100644 (file)
     other-receivers: &other-receivers
         receivers: ''
 
-    project:
+    dockerfile: "Dockerfile"
+    dockerdir: "docker"
+
+    # This is the dockerhub repo the image will be pushed to as
+    # 'opnfv/{dockerrepo}. See: DOCKER_REPO_NAME parameter.
+    # 'project' is the OPNFV repo we expect to contain the Dockerfile
+    dockerrepo:
         # projects with jobs for master
         - 'releng-anteater':
+            project: 'releng-anteater'
             <<: *master
             <<: *other-receivers
         - 'bottlenecks':
+            project: 'bottlenecks'
             <<: *master
             <<: *other-receivers
         - 'cperf':
+            project: 'cperf'
             <<: *master
             <<: *other-receivers
         - 'dovetail':
+            project: 'dovetail'
             <<: *master
             <<: *other-receivers
         - 'functest':
+            project: 'functest'
             <<: *master
             <<: *functest-receivers
         - 'qtip':
+            project: 'qtip'
             <<: *master
             <<: *other-receivers
-        - 'storperf':
+        - 'storperf-master':
+            project: 'storperf'
+            dockerdir: 'docker/storperf-master'
+            <<: *master
+            <<: *other-receivers
+        - 'storperf-httpfrontend':
+            project: 'storperf'
+            dockerdir: 'docker/storperf-httpfrontend'
+            <<: *master
+            <<: *other-receivers
+        - 'storperf-reporting':
+            project: 'storperf'
+            dockerdir: 'docker/storperf-reporting'
             <<: *master
             <<: *other-receivers
         - 'yardstick':
+            project: 'yardstick'
             <<: *master
             <<: *other-receivers
         # projects with jobs for stable
         - 'bottlenecks':
+            project: 'bottlenecks'
             <<: *danube
             <<: *other-receivers
         - 'functest':
+            project: 'functest'
             <<: *danube
             <<: *functest-receivers
         - 'qtip':
+            project: 'qtip'
             <<: *danube
             <<: *other-receivers
         - 'storperf':
+            project: 'storperf'
             <<: *danube
             <<: *other-receivers
         - 'yardstick':
+            project: 'yardstick'
             <<: *danube
             <<: *other-receivers
 
     jobs:
-        - '{project}-docker-build-push-{stream}'
+        - "{dockerrepo}-docker-build-push-{stream}"
 
 
 - project:
 
     name: opnfv-monitor-docker        # projects which only monitor dedicated file or path
 
+    dockerfile: "Dockerfile"
+    dockerdir: "docker"
+
     project:
         # projects with jobs for master
         - 'daisy':
+            dockerrepo: 'daisy'
             <<: *master
         - 'escalator':
+            dockerrepo: 'escalator'
             <<: *master
 
     jobs:
 # job templates
 ########################
 - job-template:
-    name: '{project}-docker-build-push-{stream}'
+    name: '{dockerrepo}-docker-build-push-{stream}'
 
     disabled: '{obj:disabled}'
 
             description: "To enable/disable pushing the image to Dockerhub."
         - string:
             name: DOCKER_REPO_NAME
-            default: "opnfv/{project}"
+            default: "opnfv/{dockerrepo}"
             description: "Dockerhub repo to be pushed to."
+        - string:
+            name: DOCKER_DIR
+            default: "{dockerdir}"
+            description: "Directory containing files needed by the Dockerfile"
         - string:
             name: COMMIT_ID
             default: ""
             description: "Release version, e.g. 1.0, 2.0, 3.0"
         - string:
             name: DOCKERFILE
-            default: "Dockerfile"
+            default: "{dockerfile}"
             description: "Dockerfile to use for creating the image."
 
     scm:
index cf37ac2..56d0874 100755 (executable)
@@ -3,6 +3,7 @@ set -e
 [[ $CI_DEBUG == true ]] && redirect="/dev/stdout" || redirect="/dev/null"
 
 rc_file_vol=""
+cacert_file_vol=""
 sshkey=""
 
 if [[ ${INSTALLER_TYPE} == 'apex' ]]; then
@@ -23,6 +24,10 @@ if [[ ${INSTALLER_TYPE} == 'joid' ]]; then
     rc_file_vol="-v $LAB_CONFIG/admin-openrc:/etc/yardstick/openstack.creds"
     # If dev lab, credentials may not be the default ones, just provide a path to put them into docker
     # replace the default one by the customized one provided by jenkins config
+elif [[ ${INSTALLER_TYPE} == 'compass' && ${BRANCH} == 'master' ]]; then
+    cacert_file_vol="-v ${HOME}/os_cacert:/etc/yardstick/os_cacert"
+    echo "export OS_CACERT=/etc/yardstick/os_cacert" >> ${HOME}/opnfv-openrc.sh
+    rc_file_vol="-v ${HOME}/opnfv-openrc.sh:/etc/yardstick/openstack.creds"
 else
     rc_file_vol="-v ${HOME}/opnfv-openrc.sh:/etc/yardstick/openstack.creds"
 fi
@@ -50,7 +55,7 @@ sudo rm -rf ${dir_result}/*
 map_log_dir="-v ${dir_result}:/tmp/yardstick"
 
 # Run docker
-cmd="sudo docker run ${opts} ${envs} ${rc_file_vol} ${map_log_dir} ${sshkey} opnfv/yardstick:${DOCKER_TAG} \
+cmd="sudo docker run ${opts} ${envs} ${rc_file_vol} ${cacert_file_vol} ${map_log_dir} ${sshkey} opnfv/yardstick:${DOCKER_TAG} \
     exec_tests.sh ${YARDSTICK_DB_BACKEND} ${YARDSTICK_SCENARIO_SUITE_NAME}"
 echo "Yardstick: Running docker cmd: ${cmd}"
 ${cmd}
index 197e493..e2c57d2 100644 (file)
@@ -49,7 +49,7 @@ def get_with_passwd():
                                        args.user, installer_pwd=args.password)
 
 
-def create_file(handler):
+def create_file(handler, INSTALLER_TYPE):
     """
     Create the yaml file of nodes info.
     As Yardstick required, node name must be node1, node2, ... and node1 must
@@ -62,27 +62,30 @@ def create_file(handler):
     nodes = handler.nodes
     node_list = []
     index = 1
+    user = 'root'
+    if INSTALLER_TYPE == 'apex':
+        user = 'heat-admin'
     for node in nodes:
         try:
             if node.roles[0].lower() == "controller":
                 node_info = {'name': "node%s" % index, 'role': node.roles[0],
-                             'ip': node.ip, 'user': 'root'}
+                             'ip': node.ip, 'user': user}
                 node_list.append(node_info)
                 index += 1
         except Exception:
             node_info = {'name': node.name, 'role': 'unknown', 'ip': node.ip,
-                         'user': 'root'}
+                         'user': user}
             node_list.append(node_info)
     for node in nodes:
         try:
             if node.roles[0].lower() == "compute":
                 node_info = {'name': "node%s" % index, 'role': node.roles[0],
-                             'ip': node.ip, 'user': 'root'}
+                             'ip': node.ip, 'user': user}
                 node_list.append(node_info)
                 index += 1
         except Exception:
             node_info = {'name': node.name, 'role': 'unknown', 'ip': node.ip,
-                         'user': 'root'}
+                         'user': user}
             node_list.append(node_info)
     if args.INSTALLER_TYPE == 'compass':
         for item in node_list:
@@ -105,7 +108,7 @@ def main():
     if not handler:
         print("Error: failed to get the node's handler.")
         return 1
-    create_file(handler)
+    create_file(handler, args.INSTALLER_TYPE)
 
 
 if __name__ == '__main__':
index 8c701c3..bb31ab0 100644 (file)
@@ -54,8 +54,8 @@
                 controller: 'ResultsController as ctrl'
             }).
             state('userResults', {
-                url: 'user_results',
-                templateUrl: '/testapi-ui/components/results/results.html',
+                url: '/user_results',
+                templateUrl: 'testapi-ui/components/results/results.html',
                 controller: 'ResultsController as ctrl'
             }).
             state('resultsDetail', {
@@ -66,7 +66,7 @@
             }).
             state('profile', {
                 url: '/profile',
-                templateUrl: '/testapi-ui/components/profile/profile.html',
+                templateUrl: 'testapi-ui/components/profile/profile.html',
                 controller: 'ProfileController as ctrl'
             }).
             state('authFailure', {
index 545d5e3..a5ac5eb 100644 (file)
@@ -29,40 +29,18 @@ TODOs :
 
 """
 
-import argparse
-import sys
-
-import motor
 import tornado.ioloop
 
-from opnfv_testapi.common import config
+from opnfv_testapi.common.config import CONF
 from opnfv_testapi.router import url_mappings
 from opnfv_testapi.tornado_swagger import swagger
 
-CONF = None
-
-
-def parse_config(argv=[]):
-    global CONF
-    parser = argparse.ArgumentParser()
-    parser.add_argument("-c", "--config-file", dest='config_file',
-                        help="Config file location")
-    args = parser.parse_args(argv)
-    if args.config_file:
-        config.Config.CONFIG = args.config_file
-    CONF = config.Config()
-
-
-def get_db():
-    return motor.MotorClient(CONF.mongo_url)[CONF.mongo_dbname]
-
 
 def make_app():
     swagger.docs(base_url=CONF.swagger_base_url,
                  static_path=CONF.static_path)
     return swagger.Application(
         url_mappings.mappings,
-        db=get_db(),
         debug=CONF.api_debug,
         auth=CONF.api_authenticate,
         cookie_secret='opnfv-testapi',
@@ -70,7 +48,6 @@ def make_app():
 
 
 def main():
-    parse_config(sys.argv[1:])
     application = make_app()
     application.listen(CONF.api_port)
     tornado.ioloop.IOLoop.current().start()
index 67e8fbd..24ba876 100644 (file)
@@ -13,6 +13,7 @@ from tornado import web
 
 from opnfv_testapi.common import message
 from opnfv_testapi.common import raises
+from opnfv_testapi.db import api as dbapi
 
 
 def authenticate(method):
@@ -26,7 +27,7 @@ def authenticate(method):
             except KeyError:
                 raises.Unauthorized(message.unauthorized())
             query = {'access_token': token}
-            check = yield self._eval_db_find_one(query, 'tokens')
+            check = yield dbapi.db_find_one('tokens', query)
             if not check:
                 raises.Forbidden(message.invalid_token())
         ret = yield gen.coroutine(method)(self, *args, **kwargs)
@@ -38,7 +39,7 @@ def not_exist(xstep):
     @functools.wraps(xstep)
     def wrap(self, *args, **kwargs):
         query = kwargs.get('query')
-        data = yield self._eval_db_find_one(query)
+        data = yield dbapi.db_find_one(self.table, query)
         if not data:
             raises.NotFound(message.not_found(self.table, query))
         ret = yield gen.coroutine(xstep)(self, data, *args, **kwargs)
@@ -78,7 +79,7 @@ def carriers_exist(xstep):
         carriers = kwargs.pop('carriers', {})
         if carriers:
             for table, query in carriers:
-                exist = yield self._eval_db_find_one(query(), table)
+                exist = yield dbapi.db_find_one(table, query())
                 if not exist:
                     raises.Forbidden(message.not_found(table, query()))
         ret = yield gen.coroutine(xstep)(self, *args, **kwargs)
@@ -91,7 +92,7 @@ def new_not_exists(xstep):
     def wrap(self, *args, **kwargs):
         query = kwargs.get('query')
         if query:
-            to_data = yield self._eval_db_find_one(query())
+            to_data = yield dbapi.db_find_one(self.table, query())
             if to_data:
                 raises.Forbidden(message.exist(self.table, query()))
         ret = yield gen.coroutine(xstep)(self, *args, **kwargs)
@@ -105,7 +106,7 @@ def updated_one_not_exist(xstep):
         db_keys = kwargs.pop('db_keys', [])
         query = self._update_query(db_keys, data)
         if query:
-            to_data = yield self._eval_db_find_one(query)
+            to_data = yield dbapi.db_find_one(self.table, query)
             if to_data:
                 raises.Forbidden(message.exist(self.table, query))
         ret = yield gen.coroutine(xstep)(self, data, *args, **kwargs)
index f73c0ab..4cd53c6 100644 (file)
@@ -8,14 +8,16 @@
 # feng.xiaowei@zte.com.cn remove prepare_put_request            5-30-2016
 ##############################################################################
 import ConfigParser
+import argparse
 import os
+import sys
 
 
 class Config(object):
-    CONFIG = None
 
     def __init__(self):
-        self.file = self.CONFIG if self.CONFIG else self._default_config()
+        self.config_file = None
+        self._set_config_file()
         self._parse()
         self._parse_per_page()
         self.static_path = os.path.join(
@@ -24,11 +26,11 @@ class Config(object):
             'static')
 
     def _parse(self):
-        if not os.path.exists(self.file):
-            raise Exception("%s not found" % self.file)
+        if not os.path.exists(self.config_file):
+            raise Exception("%s not found" % self.config_file)
 
         config = ConfigParser.RawConfigParser()
-        config.read(self.file)
+        config.read(self.config_file)
         self._parse_section(config)
 
     def _parse_section(self, config):
@@ -53,8 +55,24 @@ class Config(object):
                 value = False
         return value
 
-    @staticmethod
-    def _default_config():
+    def _set_config_file(self):
+        if not self._set_sys_config_file():
+            self._set_default_config_file()
+
+    def _set_sys_config_file(self):
+        parser = argparse.ArgumentParser()
+        parser.add_argument("-c", "--config-file", dest='config_file',
+                            help="Config file location", metavar="FILE")
+        args, _ = parser.parse_known_args(sys.argv)
+        try:
+            self.config_file = args.config_file
+        finally:
+            return self.config_file is not None
+
+    def _set_default_config_file(self):
         is_venv = os.getenv('VIRTUAL_ENV')
-        return os.path.join('/' if not is_venv else is_venv,
-                            'etc/opnfv_testapi/config.ini')
+        self.config_file = os.path.join('/' if not is_venv else is_venv,
+                                        'etc/opnfv_testapi/config.ini')
+
+
+CONF = Config()
diff --git a/utils/test/testapi/opnfv_testapi/db/__init__.py b/utils/test/testapi/opnfv_testapi/db/__init__.py
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/utils/test/testapi/opnfv_testapi/db/api.py b/utils/test/testapi/opnfv_testapi/db/api.py
new file mode 100644 (file)
index 0000000..c057480
--- /dev/null
@@ -0,0 +1,38 @@
+import motor
+
+from opnfv_testapi.common.config import CONF
+
+DB = motor.MotorClient(CONF.mongo_url)[CONF.mongo_dbname]
+
+
+def db_update(collection, query, update_req):
+    return _eval_db(collection, 'update', query, update_req, check_keys=False)
+
+
+def db_delete(collection, query):
+    return _eval_db(collection, 'remove', query)
+
+
+def db_aggregate(collection, pipelines):
+    return _eval_db(collection, 'aggregate', pipelines, allowDiskUse=True)
+
+
+def db_list(collection, query):
+    return _eval_db(collection, 'find', query)
+
+
+def db_save(collection, data):
+    return _eval_db(collection, 'insert', data, check_keys=False)
+
+
+def db_find_one(collection, query):
+    return _eval_db(collection, 'find_one', query)
+
+
+def _eval_db(collection, method, *args, **kwargs):
+    exec_collection = DB.__getattr__(collection)
+    return exec_collection.__getattribute__(method)(*args, **kwargs)
+
+
+def _eval_db_find_one(query, table=None):
+    return _eval_db(table, 'find_one', query)
index f23cc57..8a3a2db 100644 (file)
@@ -20,8 +20,8 @@
 # feng.xiaowei@zte.com.cn remove DashboardHandler            5-30-2016
 ##############################################################################
 
-from datetime import datetime
 import json
+from datetime import datetime
 
 from tornado import gen
 from tornado import web
@@ -29,6 +29,7 @@ from tornado import web
 from opnfv_testapi.common import check
 from opnfv_testapi.common import message
 from opnfv_testapi.common import raises
+from opnfv_testapi.db import api as dbapi
 from opnfv_testapi.resources import models
 from opnfv_testapi.tornado_swagger import swagger
 
@@ -38,7 +39,6 @@ DEFAULT_REPRESENTATION = "application/json"
 class GenericApiHandler(web.RequestHandler):
     def __init__(self, application, request, **kwargs):
         super(GenericApiHandler, self).__init__(application, request, **kwargs)
-        self.db = self.settings["db"]
         self.json_args = None
         self.table = None
         self.table_cls = None
@@ -90,8 +90,7 @@ class GenericApiHandler(web.RequestHandler):
 
         if self.table != 'results':
             data.creation_date = datetime.now()
-        _id = yield self._eval_db(self.table, 'insert', data.format(),
-                                  check_keys=False)
+        _id = yield dbapi.db_save(self.table, data.format())
         if 'name' in self.json_args:
             resource = data.name
         else:
@@ -110,17 +109,14 @@ class GenericApiHandler(web.RequestHandler):
 
         total_pages = 0
         if page > 0:
-            cursor = self._eval_db(self.table, 'find', query)
+            cursor = dbapi.db_list(self.table, query)
             records_count = yield cursor.count()
             total_pages = self._calc_total_pages(records_count,
                                                  last,
                                                  page,
                                                  per_page)
         pipelines = self._set_pipelines(query, sort, last, page, per_page)
-        cursor = self._eval_db(self.table,
-                               'aggregate',
-                               pipelines,
-                               allowDiskUse=True)
+        cursor = dbapi.db_aggregate(self.table, pipelines)
         data = list()
         while (yield cursor.fetch_next):
             data.append(self.format_data(cursor.next_object()))
@@ -176,7 +172,7 @@ class GenericApiHandler(web.RequestHandler):
     @check.authenticate
     @check.not_exist
     def _delete(self, data, query=None):
-        yield self._eval_db(self.table, 'remove', query)
+        yield dbapi.db_delete(self.table, query)
         self.finish_request()
 
     @check.authenticate
@@ -186,8 +182,7 @@ class GenericApiHandler(web.RequestHandler):
     def _update(self, data, query=None, **kwargs):
         data = self.table_cls.from_dict(data)
         update_req = self._update_requests(data)
-        yield self._eval_db(self.table, 'update', query, update_req,
-                            check_keys=False)
+        yield dbapi.db_update(self.table, query, update_req)
         update_req['_id'] = str(data._id)
         self.finish_request(update_req)
 
@@ -230,23 +225,6 @@ class GenericApiHandler(web.RequestHandler):
             query[key] = new
         return query if not equal else dict()
 
-    def _eval_db(self, table, method, *args, **kwargs):
-        exec_collection = self.db.__getattr__(table)
-        return exec_collection.__getattribute__(method)(*args, **kwargs)
-
-    def _eval_db_find_one(self, query, table=None):
-        if table is None:
-            table = self.table
-        return self._eval_db(table, 'find_one', query)
-
-    def db_save(self, collection, data):
-        self._eval_db(collection, 'insert', data, check_keys=False)
-
-    def db_find_one(self, query, collection=None):
-        if not collection:
-            collection = self.table
-        return self._eval_db(collection, 'find_one', query)
-
 
 class VersionHandler(GenericApiHandler):
     @swagger.operation(nickname='listAllVersions')
index 5eb1b92..2bf1792 100644 (file)
@@ -13,7 +13,7 @@ import json
 
 from bson import objectid
 
-from opnfv_testapi.common import config
+from opnfv_testapi.common.config import CONF
 from opnfv_testapi.common import message
 from opnfv_testapi.common import raises
 from opnfv_testapi.resources import handlers
@@ -21,8 +21,6 @@ from opnfv_testapi.resources import result_models
 from opnfv_testapi.tornado_swagger import swagger
 from opnfv_testapi.ui.auth import constants as auth_const
 
-CONF = config.Config()
-
 
 class GenericResultHandler(handlers.GenericApiHandler):
     def __init__(self, application, request, **kwargs):
@@ -68,7 +66,7 @@ class GenericResultHandler(handlers.GenericApiHandler):
                     del query['public']
                     if role != "reviewer":
                         query['user'] = openid
-            elif k != 'last' and k != 'page':
+            elif k not in ['last', 'page', 'descend']:
                 query[k] = v
             if date_range:
                 query['start_date'] = date_range
@@ -169,18 +167,27 @@ class ResultsCLHandler(GenericResultHandler):
             @type signed: L{string}
             @in signed: query
             @required signed: False
+            @param descend: true, newest2oldest; false, oldest2newest
+            @type descend: L{string}
+            @in descend: query
+            @required descend: False
         """
-        limitations = {'sort': {'_id': -1}}
-        last = self.get_query_argument('last', 0)
-        if last is not None:
-            last = self.get_int('last', last)
-            limitations.update({'last': last})
+        def descend_limit():
+            descend = self.get_query_argument('descend', 'true')
+            return -1 if descend.lower() == 'true' else 1
+
+        def last_limit():
+            return self.get_int('last', self.get_query_argument('last', 0))
+
+        def page_limit():
+            return self.get_int('page', self.get_query_argument('page', 0))
 
-        page = self.get_query_argument('page', None)
-        if page is not None:
-            page = self.get_int('page', page)
-            limitations.update({'page': page,
-                                'per_page': CONF.api_results_per_page})
+        limitations = {
+            'sort': {'_id': descend_limit()},
+            'last': last_limit(),
+            'page': page_limit(),
+            'per_page': CONF.api_results_per_page
+        }
 
         self._list(query=self.set_query(), **limitations)
 
index 37e719b..562fa5e 100644 (file)
@@ -8,7 +8,7 @@
 ##############################################################################
 import tornado.web
 
-from opnfv_testapi.common import config
+from opnfv_testapi.common.config import CONF
 from opnfv_testapi.resources import handlers
 from opnfv_testapi.resources import pod_handlers
 from opnfv_testapi.resources import project_handlers
@@ -58,7 +58,7 @@ mappings = [
     # static path
     (r'/(.*\.(css|png|gif|js|html|json|map|woff2|woff|ttf))',
      tornado.web.StaticFileHandler,
-     {'path': config.Config().static_path}),
+     {'path': CONF.static_path}),
 
     (r'/', root.RootHandler),
     (r'/api/v1/auth/signin', sign.SigninHandler),
index 446b944..cc8743c 100644 (file)
@@ -1,16 +1,15 @@
-import os
+import argparse
 
-from opnfv_testapi.common import config
 
-
-def test_config_success():
-    config_file = os.path.join(os.path.dirname(__file__),
-                               '../../../../etc/config.ini')
-    config.Config.CONFIG = config_file
-    conf = config.Config()
-    assert conf.mongo_url == 'mongodb://127.0.0.1:27017/'
-    assert conf.mongo_dbname == 'test_results_collection'
-    assert conf.api_port == 8000
-    assert conf.api_debug is True
-    assert conf.api_authenticate is False
-    assert conf.swagger_base_url == 'http://localhost:8000'
+def test_config_normal(mocker, config_normal):
+    mocker.patch(
+        'argparse.ArgumentParser.parse_known_args',
+        return_value=(argparse.Namespace(config_file=config_normal), None))
+    from opnfv_testapi.common import config
+    CONF = config.Config()
+    assert CONF.mongo_url == 'mongodb://127.0.0.1:27017/'
+    assert CONF.mongo_dbname == 'test_results_collection'
+    assert CONF.api_port == 8000
+    assert CONF.api_debug is True
+    assert CONF.api_authenticate is False
+    assert CONF.swagger_base_url == 'http://localhost:8000'
diff --git a/utils/test/testapi/opnfv_testapi/tests/unit/conftest.py b/utils/test/testapi/opnfv_testapi/tests/unit/conftest.py
new file mode 100644 (file)
index 0000000..feff1da
--- /dev/null
@@ -0,0 +1,8 @@
+from os import path
+
+import pytest
+
+
+@pytest.fixture
+def config_normal():
+    return path.join(path.dirname(__file__), 'common/normal.ini')
index d95ff37..0ca83df 100644 (file)
@@ -6,9 +6,10 @@
 # which accompanies this distribution, and is available at
 # http://www.apache.org/licenses/LICENSE-2.0
 ##############################################################################
+from operator import itemgetter
+
 from bson.objectid import ObjectId
 from concurrent.futures import ThreadPoolExecutor
-from operator import itemgetter
 
 
 def thread_execute(method, *args, **kwargs):
index 6e4d454..dcec4e9 100644 (file)
@@ -12,13 +12,9 @@ from os import path
 import mock
 from tornado import testing
 
-from opnfv_testapi.common import config
 from opnfv_testapi.resources import models
 from opnfv_testapi.tests.unit import fake_pymongo
 
-config.Config.CONFIG = path.join(path.dirname(__file__),
-                                 '../../../../etc/config.ini')
-
 
 class TestBase(testing.AsyncHTTPTestCase):
     headers = {'Content-Type': 'application/json; charset=UTF-8'}
@@ -37,20 +33,21 @@ class TestBase(testing.AsyncHTTPTestCase):
 
     def tearDown(self):
         self.db_patcher.stop()
+        self.config_patcher.stop()
 
     def _patch_server(self):
-        from opnfv_testapi.cmd import server
-        server.parse_config([
-            '--config-file',
-            path.join(path.dirname(__file__), path.pardir, 'common/normal.ini')
-        ])
-        self.db_patcher = mock.patch('opnfv_testapi.cmd.server.get_db',
-                                     self._fake_pymongo)
+        import argparse
+        config = path.join(path.dirname(__file__), '../common/normal.ini')
+        self.config_patcher = mock.patch(
+            'argparse.ArgumentParser.parse_known_args',
+            return_value=(argparse.Namespace(config_file=config), None))
+        self.db_patcher = mock.patch('opnfv_testapi.db.api.DB',
+                                     fake_pymongo)
+        self.config_patcher.start()
         self.db_patcher.start()
 
-    @staticmethod
-    def _fake_pymongo():
-        return fake_pymongo
+    def set_config_file(self):
+        self.config_file = 'normal.ini'
 
     def get_app(self):
         from opnfv_testapi.cmd import server
index f199bc7..1e83ed3 100644 (file)
@@ -61,9 +61,9 @@ class TestResultBase(base.TestBase):
         self.scenario = 'odl-l2'
         self.criteria = 'passed'
         self.trust_indicator = result_models.TI(0.7)
-        self.start_date = "2016-05-23 07:16:09.477097"
-        self.stop_date = "2016-05-23 07:16:19.477097"
-        self.update_date = "2016-05-24 07:16:19.477097"
+        self.start_date = str(datetime.now())
+        self.stop_date = str(datetime.now() + timedelta(minutes=1))
+        self.update_date = str(datetime.now() + timedelta(days=1))
         self.update_step = -0.05
         super(TestResultBase, self).setUp()
         self.details = Details(timestart='0', duration='9s', status='OK')
@@ -275,7 +275,7 @@ class TestResultGet(TestResultBase):
 
     @executor.query(httplib.OK, '_query_period_one', 1)
     def test_queryPeriodSuccess(self):
-        return self._set_query('period=11')
+        return self._set_query('period=5')
 
     @executor.query(httplib.BAD_REQUEST, message.must_int('last'))
     def test_queryLastNotInt(self):
@@ -306,7 +306,7 @@ class TestResultGet(TestResultBase):
                                'scenario',
                                'trust_indicator',
                                'criteria',
-                               'period=11')
+                               'period=5')
 
     @executor.query(httplib.OK, '_query_success', 0)
     def test_notFound(self):
@@ -324,10 +324,10 @@ class TestResultGet(TestResultBase):
     @executor.query(httplib.OK, '_query_success', 1)
     def test_filterErrorStartdate(self):
         self._create_error_start_date(None)
-        self._create_error_start_date('None')
+        self._create_error_start_date('None')
         self._create_error_start_date('null')
         self._create_error_start_date('')
-        return self._set_query('period=11')
+        return self._set_query('period=5')
 
     def _query_success(self, body, number):
         self.assertEqual(number, len(body.results))
@@ -338,7 +338,7 @@ class TestResultGet(TestResultBase):
 
     def _query_period_one(self, body, number):
         self.assertEqual(number, len(body.results))
-        self.assert_res(body.results[0], self.req_10d_before)
+        self.assert_res(body.results[0], self.req_d)
 
     def _create_error_start_date(self, start_date):
         req = copy.deepcopy(self.req_d)
index b4ba887..940e256 100644 (file)
@@ -10,7 +10,6 @@ from tornado import web
 
 from opnfv_testapi.common import message
 from opnfv_testapi.resources import project_models
-from opnfv_testapi.router import url_mappings
 from opnfv_testapi.tests.unit import executor
 from opnfv_testapi.tests.unit import fake_pymongo
 from opnfv_testapi.tests.unit.resources import test_base as base
@@ -18,6 +17,7 @@ from opnfv_testapi.tests.unit.resources import test_base as base
 
 class TestToken(base.TestBase):
     def get_app(self):
+        from opnfv_testapi.router import url_mappings
         return web.Application(
             url_mappings.mappings,
             db=fake_pymongo,
index 5b36225..4623952 100644 (file)
@@ -1,14 +1,12 @@
 from six.moves.urllib import parse
 from tornado import gen
 from tornado import web
-import logging
 
-from opnfv_testapi.common import config
+from opnfv_testapi.common.config import CONF
+from opnfv_testapi.db import api as dbapi
 from opnfv_testapi.ui.auth import base
 from opnfv_testapi.ui.auth import constants as const
 
-CONF = config.Config()
-
 
 class SigninHandler(base.BaseHandler):
     def get(self):
@@ -48,10 +46,9 @@ class SigninReturnHandler(base.BaseHandler):
             'fullname': self.get_query_argument(const.OPENID_NS_SREG_FULLNAME),
             const.ROLE: role
         }
-        user = yield self.db_find_one({'openid': openid})
+        user = yield dbapi.db_find_one(self.table, {'openid': openid})
         if not user:
-            self.db_save(self.table, new_user_info)
-            logging.info('save to db:%s', new_user_info)
+            dbapi.db_save(self.table, new_user_info)
         else:
             role = user.get(const.ROLE)
 
index 2fca2a8..955cdee 100644 (file)
@@ -2,6 +2,7 @@ from tornado import gen
 from tornado import web
 
 from opnfv_testapi.common import raises
+from opnfv_testapi.db import api as dbapi
 from opnfv_testapi.ui.auth import base
 
 
@@ -12,7 +13,7 @@ class ProfileHandler(base.BaseHandler):
         openid = self.get_secure_cookie('openid')
         if openid:
             try:
-                user = yield self.db_find_one({'openid': openid})
+                user = yield dbapi.db_find_one(self.table, {'openid': openid})
                 self.finish_request({
                     "openid": user.get('openid'),
                     "email": user.get('email'),
index bba7a86..5b2c922 100644 (file)
@@ -1,10 +1,10 @@
 from opnfv_testapi.resources.handlers import GenericApiHandler
-from opnfv_testapi.common import config
+from opnfv_testapi.common.config import CONF
 
 
 class RootHandler(GenericApiHandler):
     def get_template_path(self):
-        return config.Config().static_path
+        return CONF.static_path
 
     def get(self):
         self.render('testapi-ui/index.html')
diff --git a/utils/test/testapi/run_test.sh b/utils/test/testapi/run_test.sh
deleted file mode 100755 (executable)
index 1e05dd6..0000000
+++ /dev/null
@@ -1,40 +0,0 @@
-#!/bin/bash
-
-set -o errexit
-
-# Get script directory
-SCRIPTDIR=`dirname $0`
-
-echo "Running unit tests..."
-
-# Creating virtual environment
-if [ ! -z $VIRTUAL_ENV ]; then
-    venv=$VIRTUAL_ENV
-else
-    venv=$SCRIPTDIR/.venv
-    virtualenv $venv
-fi
-source $venv/bin/activate
-
-# Install requirements
-pip install -r $SCRIPTDIR/requirements.txt
-pip install -r $SCRIPTDIR/test-requirements.txt
-
-find . -type f -name "*.pyc" -delete
-
-nosetests --with-xunit \
-    --with-coverage \
-    --cover-erase \
-    --cover-package=$SCRIPTDIR/opnfv_testapi/cmd \
-    --cover-package=$SCRIPTDIR/opnfv_testapi/common \
-    --cover-package=$SCRIPTDIR/opnfv_testapi/resources \
-    --cover-package=$SCRIPTDIR/opnfv_testapi/router \
-    --cover-xml \
-    --cover-html \
-    $SCRIPTDIR/opnfv_testapi/tests
-
-exit_code=$?
-
-deactivate
-
-exit $exit_code