Merge "Adding POSCA build job for Bottlenecks"
authormei mei <meimei@huawei.com>
Wed, 8 Feb 2017 01:33:49 +0000 (01:33 +0000)
committerGerrit Code Review <gerrit@opnfv.org>
Wed, 8 Feb 2017 01:33:49 +0000 (01:33 +0000)
18 files changed:
jjb/apex/apex-snapshot-create.sh
jjb/apex/apex.yml
jjb/armband/armband-deploy.sh
jjb/compass4nfv/compass-dovetail-jobs.yml [new file with mode: 0644]
jjb/doctor/doctor.yml
jjb/fuel/fuel-daily-jobs.yml
jjb/global/releng-macros.yml
jjb/kvmfornfv/kvmfornfv-upload-artifact.sh
jjb/kvmfornfv/kvmfornfv.yml
jjb/opera/opera-daily-jobs.yml
jjb/releng/testapi-automate.yml
jjb/releng/testapi-docker-deploy.sh [new file with mode: 0644]
jjb/yardstick/yardstick-project-jobs.yml
prototypes/bifrost/scripts/destroy-env.sh
utils/test/testapi/htmlize/htmlize.py
utils/test/vnfcatalogue/helpers/README.md [new file with mode: 0644]
utils/test/vnfcatalogue/helpers/migrate.js [new file with mode: 0644]
utils/test/vnfcatalogue/helpers/schema.js [new file with mode: 0644]

index 09c6a11..5725ac6 100644 (file)
@@ -26,10 +26,10 @@ mkdir -p ${tmp_dir}
 pushd ${tmp_dir} > /dev/null
 echo "Copying overcloudrc and ssh key from Undercloud..."
 # Store overcloudrc
-UNDERCLOUD=$(sudo virsh domifaddr undercloud | grep -Eo '[0-9]+\.[0-9]+\.[0-9]+\.[0-9]')
-scp ${SSH_OPTIONS[@]} stack@${UNDERCLOUD}:overcloudrc ./
+UNDERCLOUD=$(sudo virsh domifaddr undercloud | grep -Eo '[0-9]+\.[0-9]+\.[0-9]+\.[0-9]+')
+sudo scp ${SSH_OPTIONS[@]} stack@${UNDERCLOUD}:overcloudrc ./
 # Copy out ssh key of stack from undercloud
-scp ${SSH_OPTIONS[@]} stack@${UNDERCLOUD}:.ssh/id_rsa ./
+sudo scp ${SSH_OPTIONS[@]} stack@${UNDERCLOUD}:.ssh/id_rsa ./
 popd > /dev/null
 
 echo "Gathering introspection information"
index fcf08ed..512112e 100644 (file)
                 build-step-failure-threshold: 'never'
                 failure-threshold: 'never'
                 unstable-threshold: 'FAILURE'
-
-
+        - trigger-builds:
+          - project: 'apex-deploy-baremetal-os-odl-bgpvpn-ha-{stream}'
+            predefined-parameters: |
+              BUILD_DIRECTORY=apex-build-{stream}/.build
+              OPNFV_CLEAN=yes
+            git-revision: true
+            same-node: true
+            block-thresholds:
+                build-step-failure-threshold: 'never'
+            block: true
+        - trigger-builds:
+          - project: 'functest-apex-{daily-slave}-daily-{stream}'
+            predefined-parameters:
+              DEPLOY_SCENARIO=os-odl-bgpvpn-ha
+            block: true
+            same-node: true
+            block-thresholds:
+                build-step-failure-threshold: 'never'
+                failure-threshold: 'never'
+                unstable-threshold: 'FAILURE'
+        - trigger-builds:
+          - project: 'yardstick-apex-{slave}-daily-{stream}'
+            predefined-parameters:
+              DEPLOY_SCENARIO=os-odl-bgpvpn-ha
+            block: true
+            same-node: true
+            block-thresholds:
+                build-step-failure-threshold: 'never'
+                failure-threshold: 'never'
+                unstable-threshold: 'FAILURE'
 # Colorado Build
 - job-template:
     name: 'apex-build-colorado'
index 6ddd2e9..adabfca 100755 (executable)
@@ -8,7 +8,6 @@
 # which accompanies this distribution, and is available at
 # http://www.apache.org/licenses/LICENSE-2.0
 ##############################################################################
-set -o errexit
 set -o nounset
 set -o pipefail
 
diff --git a/jjb/compass4nfv/compass-dovetail-jobs.yml b/jjb/compass4nfv/compass-dovetail-jobs.yml
new file mode 100644 (file)
index 0000000..46570cc
--- /dev/null
@@ -0,0 +1,214 @@
+- project:
+
+    name: 'compass-dovetail-jobs'
+    installer: 'compass'
+    project: 'compass4nfv'
+#----------------------------------
+# BRANCH ANCHORS
+#----------------------------------
+    colorado: &colorado
+        stream: colorado
+        branch: 'stable/{stream}'
+        gs-pathname: '/{stream}'
+        disabled: false
+        dovetail-branch: master
+#------------------------------------
+# POD, INSTALLER, AND BRANCH MAPPING
+#------------------------------------
+#        CI PODs
+#------------------------------------
+    pod:
+        - baremetal:
+            slave-label: compass-baremetal
+            os-version: 'trusty'
+            <<: *colorado
+#-----------------------------------
+# scenarios
+#-----------------------------------
+    scenario:
+        - 'os-nosdn-nofeature-ha':
+            disabled: false
+            auto-trigger-name: 'compass-{scenario}-{pod}-weekly-{stream}-trigger'
+
+    jobs:
+        - 'compass-{scenario}-{pod}-weekly-{stream}'
+        - 'compass-deploy-{pod}-weekly-{stream}'
+
+########################
+# job templates
+########################
+- job-template:
+    name: 'compass-{scenario}-{pod}-weekly-{stream}'
+
+    disabled: '{obj:disabled}'
+
+    concurrent: false
+
+    properties:
+        - build-blocker:
+            use-build-blocker: true
+            blocking-jobs:
+                - 'compass-os-.*?-{pod}-daily-.*?'
+                - 'compass-os-.*?-{pod}-weekly-.*?'
+            block-level: 'NODE'
+
+    wrappers:
+        - build-name:
+            name: '$BUILD_NUMBER - Scenario: $DEPLOY_SCENARIO'
+
+    triggers:
+        - '{auto-trigger-name}'
+
+    parameters:
+        - project-parameter:
+            project: '{project}'
+            branch: '{branch}'
+        - compass-ci-parameter:
+            installer: '{installer}'
+            gs-pathname: '{gs-pathname}'
+        - string:
+            name: DEPLOY_SCENARIO
+            default: '{scenario}'
+        - '{slave-label}-defaults'
+        - '{installer}-defaults'
+
+    triggers:
+        - '{auto-trigger-name}'
+
+    builders:
+        - description-setter:
+            description: "POD: $NODE_NAME"
+        - trigger-builds:
+            - project: 'compass-deploy-{pod}-weekly-{stream}'
+              current-parameters: false
+              predefined-parameters: |
+                DEPLOY_SCENARIO={scenario}
+                COMPASS_OS_VERSION={os-version}
+              same-node: true
+              block: true
+        - trigger-builds:
+            - project: 'dovetail-compass-{pod}-compliance_set-weekly-{stream}'
+              current-parameters: false
+              predefined-parameters:
+                DEPLOY_SCENARIO={scenario}
+              block: true
+              same-node: true
+              block-thresholds:
+                build-step-failure-threshold: 'never'
+                failure-threshold: 'never'
+                unstable-threshold: 'FAILURE'
+        - trigger-builds:
+            - project: 'dovetail-compass-{pod}-debug-weekly-{stream}'
+              current-parameters: false
+              predefined-parameters:
+                DEPLOY_SCENARIO={scenario}
+              block: true
+              same-node: true
+              block-thresholds:
+                build-step-failure-threshold: 'never'
+                failure-threshold: 'never'
+                unstable-threshold: 'FAILURE'
+        - trigger-builds:
+            - project: 'dovetail-compass-{pod}-proposed_tests-weekly-{stream}'
+              current-parameters: false
+              predefined-parameters:
+                DEPLOY_SCENARIO={scenario}
+              block: true
+              same-node: true
+              block-thresholds:
+                build-step-failure-threshold: 'never'
+                failure-threshold: 'never'
+                unstable-threshold: 'FAILURE'
+
+- job-template:
+    name: 'compass-deploy-{pod}-weekly-{stream}'
+
+    disabled: false
+
+    concurrent: true
+
+    properties:
+        - logrotate-default
+        - throttle:
+            enabled: true
+            max-total: 4
+            max-per-node: 1
+            option: 'project'
+        - build-blocker:
+            use-build-blocker: true
+            blocking-jobs:
+                - 'compass-deploy-{pod}-daily-.*?'
+                - 'compass-deploy-{pod}-weekly-.*'
+                - 'compass-verify-deploy-.*?'
+            block-level: 'NODE'
+
+    wrappers:
+        - build-name:
+            name: '$BUILD_NUMBER - Scenario: $DEPLOY_SCENARIO'
+        - timeout:
+            timeout: 120
+            abort: true
+
+    parameters:
+        - project-parameter:
+            project: '{project}'
+            branch: '{branch}'
+        - compass-ci-parameter:
+            installer: '{installer}'
+            gs-pathname: '{gs-pathname}'
+        - '{slave-label}-defaults'
+        - '{installer}-defaults'
+
+    scm:
+        - git-scm
+
+    wrappers:
+        - build-name:
+            name: '$BUILD_NUMBER - Scenario: $DEPLOY_SCENARIO'
+
+
+    builders:
+        - description-setter:
+            description: "POD: $NODE_NAME"
+        - shell:
+            !include-raw-escape: ./compass-download-artifact.sh
+        - shell:
+            !include-raw-escape: ./compass-deploy.sh
+
+    publishers:
+        - archive:
+            artifacts: 'ansible.log'
+            allow-empty: 'true'
+            fingerprint: true
+
+########################
+# parameter macros
+########################
+- parameter:
+    name: compass-dovetail-parameter
+    parameters:
+        - string:
+            name: BUILD_DIRECTORY
+            default: $WORKSPACE/build_output
+            description: "Directory where the build artifact will be located upon the completion of the build."
+        - string:
+            name: GS_URL
+            default: '$GS_BASE{gs-pathname}'
+            description: "URL to Google Storage."
+        - choice:
+            name: COMPASS_OPENSTACK_VERSION
+            choices:
+                - 'mitaka'
+
+########################
+# trigger macros
+########################
+- trigger:
+    name: 'compass-os-nosdn-nofeature-ha-baremetal-weekly-colorado-trigger'
+    triggers:
+        - timed: 'H H * * 0'
+
+- trigger:
+    name: 'dovetail-weekly-trigger'
+    triggers:
+        - timed: 'H H * * 0'
index 11b4ffb..2333fca 100644 (file)
         - fuel:
             slave-label: 'ool-virtual2'
             pod: 'ool-virtual2'
-        # TODO(r-mibu): enable this once joid is ready
-        #- joid:
-        #    slave-label: 'ool-virtual3'
-        #    pod: 'ool-virtual3'
+        - joid:
+            slave-label: 'ool-virtual3'
+            pod: 'ool-virtual3'
 
     inspector:
         - 'sample'
             branch: '{branch}'
 
     builders:
+        - 'clean-workspace-log'
         - 'functest-suite-builder'
         - shell: |
             functest_log="$HOME/opnfv/functest/results/{stream}/{project}.log"
index a9af1bc..02267bd 100644 (file)
@@ -83,6 +83,8 @@
             auto-trigger-name: 'daily-trigger-disabled'
         - 'os-nosdn-kvm_ovs_dpdk-ha':
             auto-trigger-name: 'fuel-{scenario}-{pod}-daily-{stream}-trigger'
+        - 'os-nosdn-kvm_ovs_dpdk_bar-ha':
+            auto-trigger-name: 'fuel-{scenario}-{pod}-daily-{stream}-trigger'
         # NOHA scenarios
         - 'os-nosdn-nofeature-noha':
             auto-trigger-name: 'fuel-{scenario}-{pod}-daily-{stream}-trigger'
     name: 'fuel-os-nosdn-kvm_ovs_dpdk-ha-baremetal-daily-master-trigger'
     triggers:
         - timed: '30 12 * * *'
+- trigger:
+    name: 'fuel-os-nosdn-kvm_ovs_dpdk_bar-ha-baremetal-daily-master-trigger'
+    triggers:
+        - timed: '30 8 * * *'
 # NOHA Scenarios
 - trigger:
     name: 'fuel-os-nosdn-nofeature-noha-baremetal-daily-master-trigger'
     name: 'fuel-os-nosdn-kvm_ovs_dpdk-ha-baremetal-daily-danube-trigger'
     triggers:
         - timed: ''
+- trigger:
+    name: 'fuel-os-nosdn-kvm_ovs_dpdk_bar-ha-baremetal-daily-danube-trigger'
+    triggers:
+        - timed: ''
 # NOHA Scenarios
 - trigger:
     name: 'fuel-os-nosdn-nofeature-noha-baremetal-daily-danube-trigger'
     name: 'fuel-os-nosdn-kvm_ovs_dpdk-ha-virtual-daily-master-trigger'
     triggers:
         - timed: ''
+- trigger:
+    name: 'fuel-os-nosdn-kvm_ovs_dpdk_bar-ha-virtual-daily-master-trigger'
+    triggers:
+        - timed: ''
 # NOHA Scenarios
 - trigger:
     name: 'fuel-os-nosdn-nofeature-noha-virtual-daily-master-trigger'
     name: 'fuel-os-nosdn-kvm_ovs_dpdk-ha-virtual-daily-danube-trigger'
     triggers:
         - timed: ''
+- trigger:
+    name: 'fuel-os-nosdn-kvm_ovs_dpdk_bar-ha-virtual-daily-danube-trigger'
+    triggers:
+        - timed: ''
 # NOHA Scenarios
 - trigger:
     name: 'fuel-os-nosdn-nofeature-noha-virtual-daily-danube-trigger'
     name: 'fuel-os-nosdn-kvm_ovs_dpdk-ha-zte-pod1-daily-master-trigger'
     triggers:
         - timed: ''
+- trigger:
+    name: 'fuel-os-nosdn-kvm_ovs_dpdk_bar-ha-zte-pod1-daily-master-trigger'
+    triggers:
+        - timed: ''
 # NOHA Scenarios
 - trigger:
     name: 'fuel-os-nosdn-nofeature-noha-zte-pod1-daily-master-trigger'
     name: 'fuel-os-nosdn-kvm_ovs_dpdk-ha-zte-pod2-daily-master-trigger'
     triggers:
         - timed: ''
+- trigger:
+    name: 'fuel-os-nosdn-kvm_ovs_dpdk_bar-ha-zte-pod2-daily-master-trigger'
+    triggers:
+        - timed: ''
 # NOHA Scenarios
 - trigger:
     name: 'fuel-os-nosdn-nofeature-noha-zte-pod2-daily-master-trigger'
     name: 'fuel-os-nosdn-kvm_ovs_dpdk-ha-zte-pod3-daily-master-trigger'
     triggers:
         - timed: ''
+- trigger:
+    name: 'fuel-os-nosdn-kvm_ovs_dpdk_bar-ha-zte-pod3-daily-master-trigger'
+    triggers:
+        - timed: ''
 # NOHA Scenarios
 - trigger:
     name: 'fuel-os-nosdn-nofeature-noha-zte-pod3-daily-master-trigger'
     name: 'fuel-os-nosdn-kvm_ovs_dpdk-ha-zte-pod1-daily-danube-trigger'
     triggers:
         - timed: ''
+- trigger:
+    name: 'fuel-os-nosdn-kvm_ovs_dpdk_bar-ha-zte-pod1-daily-danube-trigger'
+    triggers:
+        - timed: ''
 # NOHA Scenarios
 - trigger:
     name: 'fuel-os-nosdn-nofeature-noha-zte-pod1-daily-danube-trigger'
     name: 'fuel-os-nosdn-kvm_ovs_dpdk-ha-zte-pod2-daily-danube-trigger'
     triggers:
         - timed: ''
+- trigger:
+    name: 'fuel-os-nosdn-kvm_ovs_dpdk_bar-ha-zte-pod2-daily-danube-trigger'
+    triggers:
+        - timed: ''
 # NOHA Scenarios
 - trigger:
     name: 'fuel-os-nosdn-nofeature-noha-zte-pod2-daily-danube-trigger'
     name: 'fuel-os-nosdn-kvm_ovs_dpdk-ha-zte-pod3-daily-danube-trigger'
     triggers:
         - timed: ''
+- trigger:
+    name: 'fuel-os-nosdn-kvm_ovs_dpdk_bar-ha-zte-pod3-daily-danube-trigger'
+    triggers:
+        - timed: ''
 # NOHA Scenarios
 - trigger:
     name: 'fuel-os-nosdn-nofeature-noha-zte-pod3-daily-danube-trigger'
index e644096..7e11d92 100644 (file)
               sed -r -i '4,$s/^/ /g' lint.log
             fi
 
+- builder:
+    name: clean-workspace-log
+    builders:
+        - shell: |
+            find $WORKSPACE -type f -print -name '*.log' | xargs rm -f
+
 - publisher:
     name: archive-artifacts
     publishers:
index 6f8fff3..c6b8005 100755 (executable)
@@ -11,16 +11,17 @@ fi
 
 case "$JOB_TYPE" in
     verify)
-        OPNFV_ARTIFACT_VERSION="gerrit-$GERRIT_CHANGE_NUMBER"
-        GS_UPLOAD_LOCATION="gs://artifacts.opnfv.org/$PROJECT/review/$GERRIT_CHANGE_NUMBER"
-        echo "Removing outdated artifacts produced for the previous patch for the change $GERRIT_CHANGE_NUMBER"
-        gsutil ls $GS_UPLOAD_LOCATION > /dev/null 2>&1 && gsutil rm -r $GS_UPLOAD_LOCATION
-        echo "Uploading artifacts for the change $GERRIT_CHANGE_NUMBER. This could take some time..."
-        ;;
+       OPNFV_ARTIFACT_VERSION="gerrit-$GERRIT_CHANGE_NUMBER"
+       GS_UPLOAD_LOCATION="gs://artifacts.opnfv.org/$PROJECT/review/$GERRIT_CHANGE_NUMBER"
+       echo "Removing outdated artifacts produced for the previous patch for the change $GERRIT_CHANGE_NUMBER"
+       gsutil ls $GS_UPLOAD_LOCATION > /dev/null 2>&1 && gsutil rm -r $GS_UPLOAD_LOCATION
+       echo "Uploading artifacts for the change $GERRIT_CHANGE_NUMBER. This could take some time..."
+       ;;
     daily)
         echo "Uploading daily artifacts This could take some time..."
         OPNFV_ARTIFACT_VERSION=$(date -u +"%Y-%m-%d_%H-%M-%S")
         GS_UPLOAD_LOCATION="gs://$GS_URL/$OPNFV_ARTIFACT_VERSION"
+        GS_LOG_LOCATION="gs://$GS_URL/logs-$(date -u +"%Y-%m-%d")/
         ;;
     *)
         echo "Artifact upload is not enabled for $JOB_TYPE jobs"
@@ -38,10 +39,23 @@ esac
 source $WORKSPACE/opnfv.properties
 
 # upload artifacts
-gsutil cp -r $WORKSPACE/build_output/* $GS_UPLOAD_LOCATION > $WORKSPACE/gsutil.log 2>&1
-gsutil -m setmeta -r \
-    -h "Cache-Control:private, max-age=0, no-transform" \
-    $GS_UPLOAD_LOCATION > /dev/null 2>&1
+if [[ "$PHASE" == "build" ]]; then
+    gsutil cp -r $WORKSPACE/build_output/* $GS_UPLOAD_LOCATION > $WORKSPACE/gsutil.log 2>&1
+    gsutil -m setmeta -r \
+        -h "Cache-Control:private, max-age=0, no-transform" \
+        $GS_UPLOAD_LOCATION > /dev/null 2>&1
+else
+    if [[ "$JOB_TYPE" == "daily" ]]; then
+        log_dir=$WORKSPACE/build_output/log
+        if [[ -d "$log_dir" ]]; then
+            #Uploading logs to artifacts
+            echo "Uploading artifacts for future debugging needs...."
+            gsutil cp -r $WORKSPACE/build_output/log-*.tar.gz $GS_LOG_LOCATION > $WORKSPACE/gsutil.log 2>&1
+        else
+            echo "No test logs/artifacts available for uploading"
+        fi
+    fi
+fi
 
 # upload metadata file for the artifacts built by daily job
 if [[ "$JOB_TYPE" == "daily" ]]; then
index 157f2dc..522e971 100644 (file)
             name: TEST_NAME
             default: '{testname}'
             description: "Daily job to execute kvmfornfv '{testname}' testcase."
+        - string:
+            name: PHASE
+            default: '{phase}'
+            description: "Execution of kvmfornfv daily '{phase}' job ."
 
     builders:
         - description-setter:
             !include-raw: ./kvmfornfv-download-artifact.sh
         - shell:
             !include-raw: ./kvmfornfv-test.sh
+        - shell:
+            !include-raw: ./kvmfornfv-upload-artifact.sh
 - builder:
     name: 'kvmfornfv-packet_forward-daily-build-macro'
     builders:
index 47aa2a4..f1ea1aa 100644 (file)
@@ -63,9 +63,6 @@
             project: '{project}'
             branch: '{branch}'
         - 'huawei-virtual7-defaults'
-        - 'compass-defaults'
-        - 'opera-compass-parameter':
-            gs-pathname: '{gs-pathname}'
 
     builders:
         - description-setter:
             condition: SUCCESSFUL
             projects:
                 - name: 'compass-deploy-virtual-daily-{stream}'
-                  current-parameters: true
+                  current-parameters: false
+                  predefined-parameters: |
+                    DEPLOY_SCENARIO=os-nosdn-openo-noha
+                    COMPASS_OS_VERSION=xenial
                   node-parameters: true
                   kill-phase-on: FAILURE
                   abort-all-job: true
@@ -93,7 +93,7 @@
 #            condition: SUCCESSFUL
 #            projects:
 #                - name: 'functest-compass-baremetal-suite-{stream}'
-#                  current-parameters: true
+#                  current-parameters: false
 #                  predefined-parameters:
 #                    FUNCTEST_SUITE_NAME=opera
 #                  node-parameters: true
             #!/bin/bash
             echo "Hello world!"
 
-########################
-# parameter macros
-########################
-- parameter:
-    name: opera-compass-parameter
-    parameters:
-        - string:
-            name: BUILD_DIRECTORY
-            default: $WORKSPACE/build_output
-            description: "Directory where the build artifact will be located upon the completion of the build."
-        - string:
-            name: GS_URL
-            default: '$GS_BASE{gs-pathname}'
-            description: "URL to Google Storage."
-        - choice:
-            name: COMPASS_OPENSTACK_VERSION
-            choices:
-                - 'newton'
-        - string:
-            name: DEPLOY_SCENARIO
-            default: 'os-nosdn-openo-noha'
-        - string:
-            name: COMPASS_OS_VERSION
-            default: 'xenial'
index 47d217e..8cb4acb 100644 (file)
@@ -4,8 +4,16 @@
         - master:
             branch: '{stream}'
             gs-pathname: ''
+
+    phase:
+        - 'docker-update'
+        - 'docker-deploy':
+            slave-label: 'testresults'
+        - 'generate-doc'
+
     jobs:
         - 'testapi-automate-{stream}'
+        - 'testapi-automate-{phase}-{stream}'
         - 'testapi-verify-{stream}'
 
     project: 'releng'
 
     slave-label: 'testresults'
 
+    parameters:
+        - project-parameter:
+            project: '{project}'
+            branch: '{branch}'
+
     scm:
         - git-scm
 
                     healthy: 50
                     unhealthy: 40
                     failing: 30
+        - 'email-publisher'
 
 - job-template:
     name: 'testapi-automate-{stream}'
 
+    project-type: multijob
+
+    properties:
+        - throttle:
+            enabled: true
+            max-total: 1
+            max-per-node: 1
+            option: 'project'
+
     parameters:
         - project-parameter:
             project: '{project}'
     scm:
         - git-scm
 
+    wrappers:
+        - ssh-agent-wrapper
+        - timeout:
+            timeout: 360
+            fail: true
+
     triggers:
         - gerrit:
             server-name: 'gerrit.opnfv.org'
                     branch-pattern: '**/{branch}'
                 file-paths:
                   - compare-type: 'ANT'
-                    pattern: 'utils/**'
+                    pattern: 'utils/test/testapi/**'
+
+    builders:
+        - description-setter:
+            description: "Built on $NODE_NAME"
+        - multijob:
+            name: docker-update
+            condition: SUCCESSFUL
+            projects:
+                - name: 'testapi-automate-docker-update-{stream}'
+                  current-parameters: true
+                  kill-phase-on: FAILURE
+                  abort-all-job: true
+        - multijob:
+            name: docker-deploy
+            condition: SUCCESSFUL
+            projects:
+                - name: 'testapi-automate-docker-deploy-{stream}'
+                  current-parameters: false
+                  predefined-parameters: |
+                    GIT_BASE=$GIT_BASE
+                  node-label-name: SLAVE_LABEL
+                  node-label: testresults
+                  kill-phase-on: FAILURE
+                  abort-all-job: true
+        - multijob:
+            name: generate-doc
+            condition: SUCCESSFUL
+            projects:
+                - name: 'testapi-automate-generate-doc-{stream}'
+                  current-parameters: true
+                  kill-phase-on: FAILURE
+                  abort-all-job: true
+
+    publishers:
+        - 'email-publisher'
+
+- job-template:
+    name: 'testapi-automate-{phase}-{stream}'
+
+    properties:
+        - throttle:
+            enabled: true
+            max-per-node: 1
+            option: 'project'
+
+    parameters:
+        - project-parameter:
+            project: '{project}'
+            branch: '{branch}'
+        - string:
+            name: DOCKER_TAG
+            default: "latest"
+            description: "Tag name for testapi docker image"
+
+    wrappers:
+        - ssh-agent-wrapper
+        - timeout:
+            timeout: 120
+            fail: true
+
+    scm:
+        - git-scm
 
     builders:
-        - docker-update
-        - testapi-doc-build
-        - upload-doc-artifact
+        - description-setter:
+            description: "Built on $NODE_NAME"
+        - 'testapi-automate-{phase}-macro'
 
 ################################
 # job builders
             bash ./jjb/releng/testapi-backup-mongodb.sh
 
 - builder:
-    name: run-unit-tests
+    name: 'run-unit-tests'
     builders:
         - shell: |
             bash ./utils/test/testapi/run_test.sh
 
 - builder:
-    name: docker-update
+    name: 'testapi-automate-docker-update-macro'
     builders:
         - shell: |
             bash ./jjb/releng/testapi-docker-update.sh
 
 - builder:
-    name: testapi-doc-build
+    name: 'testapi-automate-generate-doc-macro'
+    builders:
+        - 'testapi-doc-build'
+        - 'upload-doc-artifact'
+
+- builder:
+    name: 'testapi-doc-build'
     builders:
         - shell: |
             bash ./utils/test/testapi/htmlize/doc-build.sh
 
 - builder:
-    name: upload-doc-artifact
+    name: 'upload-doc-artifact'
     builders:
         - shell: |
             bash ./utils/test/testapi/htmlize/push-doc-artifact.sh
+
+- builder:
+    name: 'testapi-automate-docker-deploy-macro'
+    builders:
+        - shell: |
+            bash ./jjb/releng/testapi-docker-deploy.sh
+
+################################
+# job publishers
+################################
+
+- publisher:
+    name: 'email-publisher'
+    publishers:
+        - email:
+            recipients: rohitsakala@gmail.com serena.feng.711@gmail.com
+            notify-every-unstable-build: false
+            send-to-individuals: true
diff --git a/jjb/releng/testapi-docker-deploy.sh b/jjb/releng/testapi-docker-deploy.sh
new file mode 100644 (file)
index 0000000..04d71f7
--- /dev/null
@@ -0,0 +1,81 @@
+#!/bin/bash
+
+function check() {
+
+    # Verify hosted
+    sleep 5
+    cmd=`curl -s --head  --request GET http://testresults.opnfv.org/auto/swagger/spec | grep '200 OK' > /dev/null`
+    rc=$?
+    echo $rc
+
+    if [[ $rc == 0 ]]
+    then
+        return 0
+    else
+        return 1
+    fi
+
+}
+
+echo "Getting contianer Id of the currently running one"
+contId=$(sudo docker ps | grep "opnfv/testapi:latest" | awk '{print $1}')
+
+echo "Pulling the latest image"
+sudo docker pull opnfv/testapi:latest
+
+echo "Deleting old containers of opnfv/testapi:old"
+sudo docker ps -a | grep "opnfv/testapi" | grep "old" | awk '{print $1}' | xargs -r sudo docker rm -f
+
+echo "Deleting old images of opnfv/testapi:latest"
+sudo docker images | grep "opnfv/testapi" | grep "old" | awk '{print $3}' | xargs -r sudo docker rmi -f
+
+
+if [[ -z "$contId" ]]
+then
+    echo "No running testapi container"
+
+    echo "Removing stopped testapi containers in the previous iterations"
+    sudo docker ps -f status=exited | grep "opnfv_testapi" | awk '{print $1}' | xargs -r sudo docker rm -f
+else
+    echo $contId
+
+    echo "Get the image id of the currently running conatiner"
+    currImgId=$(sudo docker ps | grep "$contId" | awk '{print $2}')
+    echo $currImgId
+
+    if [[ -z "$currImgId" ]]
+    then
+        echo "No image id found for the container id"
+        exit 1
+    fi
+
+    echo "Changing current image tag to old"
+    sudo docker tag "$currImgId" opnfv/testapi:old
+
+    echo "Removing stopped testapi containers in the previous iteration"
+    sudo docker ps -f status=exited | grep "opnfv_testapi" | awk '{print $1}' | xargs -r sudo docker rm -f
+
+    echo "Renaming the running container name to opnfv_testapi as to identify it."
+    sudo docker rename $contId opnfv_testapi
+
+    echo "Stop the currently running container"
+    sudo docker stop $contId
+fi
+
+echo "Running a container with the new image"
+sudo docker run -dti -p "8711:8000" -e "mongodb_url=mongodb://172.17.0.1:27017" -e "swagger_url=http://testresults.opnfv.org/auto" opnfv/testapi:latest
+
+if check; then
+    echo "TestResults Hosted."
+else
+    echo "TestResults Hosting Failed"
+    if [[ $(sudo docker images | grep "opnfv/testapi" | grep "old" | awk '{print $3}') ]]; then
+        echo "Running old Image"
+        sudo docker run -dti -p "8711:8000" -e "mongodb_url=mongodb://172.17.0.1:27017" -e "swagger_url=http://testresults.opnfv.org/auto" opnfv/testapi:old
+        exit 1
+    fi
+fi
+
+# Echo Images and Containers
+sudo docker images
+sudo docker ps -a
index 4b7ff6f..bbfa152 100644 (file)
             set -o errexit
             set -o pipefail
 
+            sudo apt-get install -y build-essential python-dev python3-dev
+
             echo "Running unit tests..."
             cd $WORKSPACE
-            virtualenv $WORKSPACE/yardstick_venv
-            source $WORKSPACE/yardstick_venv/bin/activate
-
-            # install python packages
-            sudo apt-get install -y build-essential python-dev python-pip python-pkg-resources
-            easy_install -U setuptools==33.1.1
-            easy_install -U pip
-            pip install -r requirements.txt || pip install -r tests/ci/requirements.txt
-            pip install -e .
-
-            # unit tests
-            ./run_tests.sh
-
-            deactivate
+            tox
index 9920046..b73092b 100755 (executable)
@@ -14,26 +14,23 @@ if [[ $(whoami) != "root" ]]; then
     exit 1
 fi
 
-virsh destroy jumphost.opnfvlocal || true
-virsh destroy controller00.opnfvlocal || true
-virsh destroy compute00.opnfvlocal || true
-virsh undefine jumphost.opnfvlocal || true
-virsh undefine controller00.opnfvlocal || true
-virsh undefine compute00.opnfvlocal || true
+# Delete all VMs on the slave since proposed patchsets
+# may leave undesired VM leftovers
+for vm in $(virsh list --all --name); do
+    virsh destroy $vm || true
+    virsh undefine $vm || true
+done
 
 service ironic-conductor stop || true
 
-echo "removing from database"
+echo "removing ironic database"
 if $(which mysql &> /dev/null); then
-    mysql -u root ironic --execute "truncate table ports;"
-    mysql -u root ironic --execute "delete from node_tags;"
-    mysql -u root ironic --execute "delete from nodes;"
-    mysql -u root ironic --execute "delete from conductors;"
+    mysql -u root ironic --execute "drop database ironic;"
 fi
 echo "removing leases"
 [[ -e /var/lib/misc/dnsmasq/dnsmasq.leases ]] && > /var/lib/misc/dnsmasq/dnsmasq.leases
 echo "removing logs"
-rm -rf /var/log/libvirt/baremetal_logs/*.log
+rm -rf /var/log/libvirt/baremetal_logs/*
 
 # clean up dib images only if requested explicitly
 CLEAN_DIB_IMAGES=${CLEAN_DIB_IMAGES:-false}
index 075e31f..70976d2 100644 (file)
@@ -39,12 +39,12 @@ if __name__ == '__main__':
     parser.add_argument('-ru', '--resource-listing-url',
                         type=str,
                         required=False,
-                        default='http://testresults.opnfv.org/test/swagger/spec.json',
+                        default='http://testresults.opnfv.org/auto/swagger/spec.json',
                         help='Resource Listing Spec File')
     parser.add_argument('-au', '--api-declaration-url',
                         type=str,
                         required=False,
-                        default='http://testresults.opnfv.org/test/swagger/spec',
+                        default='http://testresults.opnfv.org/auto/swagger/spec',
                         help='API Declaration Spec File')
     parser.add_argument('-o', '--output-directory',
                         required=True,
diff --git a/utils/test/vnfcatalogue/helpers/README.md b/utils/test/vnfcatalogue/helpers/README.md
new file mode 100644 (file)
index 0000000..6c0ca78
--- /dev/null
@@ -0,0 +1,22 @@
+# Helper Directory
+
+## Helper to migrate database
+
+First make sure nodejs and mysql are installed. Then use
+
+```bash
+npm install bookshelf mysql knex when lodash --save
+```
+
+Create a database named **vnf_catalogue**.
+Enter the mysql credentials in migrate.js.
+
+Then use
+
+```bash
+node migrate
+```
+
+If successful the script will return success message. The current script is
+idempotent is nature, if run twice it will just return error and write nothing.
+
diff --git a/utils/test/vnfcatalogue/helpers/migrate.js b/utils/test/vnfcatalogue/helpers/migrate.js
new file mode 100644 (file)
index 0000000..ec20905
--- /dev/null
@@ -0,0 +1,78 @@
+/*******************************************************************************
+ * Copyright (c) 2017 Kumar Rishabh(penguinRaider) and others.
+ *
+ * All rights reserved. This program and the accompanying materials
+ * are made available under the terms of the Apache License, Version 2.0
+ * which accompanies this distribution, and is available at
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *******************************************************************************/
+
+var knex = require('knex')({
+    client: 'mysql',
+    connection: {
+        host     : 'localhost',
+        user     : '*',
+        password : '*',
+        database : 'vnf_catalogue',
+        charset  : 'utf8'
+    }
+});
+var Schema = require('./schema');
+var sequence = require('when/sequence');
+var _ = require('lodash');
+function createTable(tableName) {
+    return knex.schema.createTable(tableName, function (table) {
+    var column;
+    var columnKeys = _.keys(Schema[tableName]);
+    _.each(columnKeys, function (key) {
+        if (Schema[tableName][key].type === 'text' && Schema[tableName][key].hasOwnProperty('fieldtype')) {
+        column = table[Schema[tableName][key].type](key, Schema[tableName][key].fieldtype);
+        }
+        else if (Schema[tableName][key].type === 'string' && Schema[tableName][key].hasOwnProperty('maxlength')) {
+        column = table[Schema[tableName][key].type](key, Schema[tableName][key].maxlength);
+        }
+        else {
+        column = table[Schema[tableName][key].type](key);
+        }
+        if (Schema[tableName][key].hasOwnProperty('nullable') && Schema[tableName][key].nullable === true) {
+        column.nullable();
+        }
+        else {
+        column.notNullable();
+        }
+        if (Schema[tableName][key].hasOwnProperty('primary') && Schema[tableName][key].primary === true) {
+        column.primary();
+        }
+        if (Schema[tableName][key].hasOwnProperty('unique') && Schema[tableName][key].unique) {
+        column.unique();
+        }
+        if (Schema[tableName][key].hasOwnProperty('unsigned') && Schema[tableName][key].unsigned) {
+        column.unsigned();
+        }
+        if (Schema[tableName][key].hasOwnProperty('references')) {
+        column.references(Schema[tableName][key].references);
+        }
+        if (Schema[tableName][key].hasOwnProperty('defaultTo')) {
+        column.defaultTo(Schema[tableName][key].defaultTo);
+        }
+    });
+    });
+}
+function createTables () {
+    var tables = [];
+    var tableNames = _.keys(Schema);
+    tables = _.map(tableNames, function (tableName) {
+    return function () {
+        return createTable(tableName);
+    };
+    });
+    return sequence(tables);
+}
+createTables()
+.then(function() {
+    console.log('Tables created!!');
+    process.exit(0);
+})
+.catch(function (error) {
+    throw error;
+});
diff --git a/utils/test/vnfcatalogue/helpers/schema.js b/utils/test/vnfcatalogue/helpers/schema.js
new file mode 100644 (file)
index 0000000..2aaf99a
--- /dev/null
@@ -0,0 +1,51 @@
+/*******************************************************************************
+ * Copyright (c) 2017 Kumar Rishabh(penguinRaider) and others.
+ *
+ * All rights reserved. This program and the accompanying materials
+ * are made available under the terms of the Apache License, Version 2.0
+ * which accompanies this distribution, and is available at
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *******************************************************************************/
+var Schema = {
+    photo: {
+        photo_id: {type: 'increments', nullable: false, primary: true},
+        photo_url: {type: 'string', maxlength: 254, nullable: false}
+    },
+    user: {
+        user_id: {type: 'increments', nullable: false, primary: true},
+        user_name: {type: 'string', maxlength: 254, nullable: false},
+        password: {type: 'string', maxlength: 150, nullable: false},
+        email_id: {type: 'string', maxlength: 254, nullable: false, unique: true, validations: {isEmail: true}},
+        photo_id: {type: 'integer', nullable: true, unsigned: true, references: 'photo.photo_id'},
+        company: {type: 'string', maxlength: 254, nullable: false},
+        introduction: {type: 'string', maxlength: 510, nullable: false},
+        last_login: {type: 'dateTime', nullable: true},
+        created_at: {type: 'dateTime', nullable: false},
+    },
+    vnf: {
+        vnf_id: {type: 'increments', nullable: false, primary: true},
+        vnf_name: {type: 'string', maxlength: 254, nullable: false},
+        repo_url: {type: 'string', maxlength: 254, nullable: false},
+        photo_id: {type: 'integer', nullable: true, unsigned: true, references: 'photo.photo_id'},
+        submitter_id: {type: 'integer', nullable: false, unsigned: true, references: 'user.user_id'},
+        lines_of_code: {type: 'integer', nullable: true, unsigned: true},
+        versions: {type: 'integer', nullable: true, unsigned: true},
+        no_of_developers: {type: 'integer', nullable: true, unsigned: true},
+    },
+    tag: {
+        tag_id: {type: 'increments', nullable: false, primary: true},
+        name: {type: 'string', maxlength: 150, nullable: false}
+    },
+    vnf_tags: {
+        vnf_tag_id: {type: 'increments', nullable: false, primary: true},
+        tag_id: {type: 'integer', nullable: false, unsigned: true, references: 'tag.tag_id'},
+        vnf_id: {type: 'integer', nullable: false, unsigned: true, references: 'vnf.vnf_id'},
+    },
+    vnf_contributors: {
+        vnf_contributors_id: {type: 'increments', nullable: false, primary: true},
+        user_id: {type: 'integer', nullable: false, unsigned: true, references: 'user.user_id'},
+        vnf_id: {type: 'integer', nullable: false, unsigned: true, references: 'vnf.vnf_id'},
+        created_at: {type: 'dateTime', nullable: false},
+    }
+};
+module.exports = Schema;