Merge "Add the function of yardstick testcase reporter"
authorMorgan Richomme <morgan.richomme@orange.com>
Tue, 30 Aug 2016 06:21:12 +0000 (06:21 +0000)
committerGerrit Code Review <gerrit@172.30.200.206>
Tue, 30 Aug 2016 06:21:12 +0000 (06:21 +0000)
89 files changed:
jjb/apex/apex-build.sh
jjb/apex/apex.yml
jjb/armband/armband-ci-jobs.yml
jjb/armband/armband-download-artifact.sh
jjb/armband/armband-project-jobs.yml
jjb/armband/build.sh
jjb/availability/availability.yml
jjb/bottlenecks/bottlenecks-ci-jobs.yml
jjb/bottlenecks/bottlenecks-project-jobs.yml
jjb/compass4nfv/compass-build.sh
jjb/compass4nfv/compass-ci-jobs.yml
jjb/compass4nfv/compass-deploy.sh
jjb/compass4nfv/compass-makeppa.sh
jjb/compass4nfv/compass-project-jobs.yml
jjb/conductor/conductor.yml
jjb/copper/copper.yml
jjb/doctor/doctor.yml
jjb/domino/domino.yml
jjb/dpacc/dpacc.yml
jjb/fastpathmetrics/fastpathmetrics.yml
jjb/fuel/fuel-build.sh
jjb/fuel/fuel-ci-jobs.yml
jjb/fuel/fuel-download-artifact.sh
jjb/fuel/fuel-project-jobs.yml
jjb/fuel/fuel-verify-jobs.yml
jjb/functest/functest-ci-jobs.yml
jjb/functest/functest-project-jobs.yml
jjb/ipv6/ipv6.yml
jjb/joid/joid-ci-jobs.yml
jjb/joid/joid-deploy.sh
jjb/joid/joid-verify-jobs.yml
jjb/kvmfornfv/kvmfornfv-download-artifact.sh
jjb/kvmfornfv/kvmfornfv-test.sh
jjb/kvmfornfv/kvmfornfv-upload-artifact.sh
jjb/kvmfornfv/kvmfornfv.yml
jjb/multisite/multisite.yml
jjb/netready/netready.yml
jjb/octopus/octopus.yml
jjb/onosfw/onosfw.yml
jjb/opnfv/installer-params.yml
jjb/opnfv/opnfv-docker.yml
jjb/opnfv/opnfv-docs.yml
jjb/opnfv/opnfv-lint.yml
jjb/opnfv/slave-params.yml
jjb/opnfvdocs/opnfvdocs.yml
jjb/ovsnfv/ovsnfv.yml
jjb/parser/parser.yml
jjb/pharos/pharos.yml
jjb/prediction/prediction.yml
jjb/promise/promise.yml
jjb/qtip/qtip-ci-jobs.yml
jjb/qtip/qtip-project-jobs.yml
jjb/releng-macros.yaml
jjb/storperf/storperf.yml
jjb/vnf_forwarding_graph/vnf_forwarding_graph.yml
jjb/vswitchperf/vswitchperf.yml
jjb/yardstick/yardstick-ci-jobs.yml
jjb/yardstick/yardstick-daily.sh
jjb/yardstick/yardstick-project-jobs.yml
prototypes/bifrost/README.md [new file with mode: 0644]
prototypes/bifrost/playbooks/roles/bifrost-prepare-for-test-dynamic/defaults/main.yml [new file with mode: 0644]
prototypes/bifrost/playbooks/test-bifrost-infracloud.yaml [new file with mode: 0644]
prototypes/bifrost/scripts/destroy_env.sh [new file with mode: 0755]
prototypes/bifrost/scripts/test-bifrost-deployment.sh [new file with mode: 0755]
prototypes/puppet-infracloud/.gitkeep [new file with mode: 0644]
prototypes/puppet-infracloud/README.md [new file with mode: 0644]
prototypes/puppet-infracloud/creds/clouds.yaml [new file with mode: 0644]
prototypes/puppet-infracloud/hiera/common.yaml [new file with mode: 0644]
prototypes/puppet-infracloud/install_modules.sh [new file with mode: 0755]
prototypes/puppet-infracloud/manifests/site.pp [new file with mode: 0644]
prototypes/puppet-infracloud/modules.env [new file with mode: 0644]
prototypes/puppet-infracloud/modules/opnfv/manifests/compute.pp [new file with mode: 0644]
prototypes/puppet-infracloud/modules/opnfv/manifests/controller.pp [new file with mode: 0644]
prototypes/puppet-infracloud/modules/opnfv/manifests/server.pp [new file with mode: 0644]
utils/fetch_os_creds.sh
utils/jenkins-jnlp-connect.sh
utils/test/reporting/functest/reporting-status.py [changed mode: 0644->0755]
utils/test/reporting/functest/reporting-tempest.py [changed mode: 0644->0755]
utils/test/reporting/functest/reporting-vims.py [changed mode: 0644->0755]
utils/test/reporting/functest/reportingConf.py
utils/test/reporting/functest/template/index-status-tmpl.html
utils/test/reporting/functest/template/index-tempest-tmpl.html
utils/test/reporting/functest/template/index-vims-tmpl.html
utils/test/reporting/functest/testCase.py
utils/test/result_collection_api/update/README.md
utils/test/result_collection_api/update/playbook-update.sh [new file with mode: 0755]
utils/test/result_collection_api/update/templates/rm_images.sh [new file with mode: 0755]
utils/test/result_collection_api/update/test.yml [new file with mode: 0644]
utils/test/result_collection_api/update/update.yml

index f6b2e32..e3e3f61 100755 (executable)
@@ -23,7 +23,7 @@ fi
 # start the build
 cd $WORKSPACE/ci
 ./build.sh $BUILD_ARGS
-RPM_VERSION=$(grep Version: $BUILD_DIRECTORY/opnfv-apex.spec | awk '{ print $2 }')-$(echo $OPNFV_ARTIFACT_VERSION | tr -d '_-')
+RPM_VERSION=$(grep Version: $BUILD_DIRECTORY/rpm_specs/opnfv-apex.spec | awk '{ print $2 }')-$(echo $OPNFV_ARTIFACT_VERSION | tr -d '_-')
 # list the contents of BUILD_OUTPUT directory
 echo "Build Directory is ${BUILD_DIRECTORY}"
 echo "Build Directory Contents:"
@@ -44,10 +44,10 @@ if ! echo $BUILD_TAG | grep "apex-verify" 1> /dev/null; then
     echo "OPNFV_GIT_URL=$(git config --get remote.origin.url)"
     echo "OPNFV_GIT_SHA1=$(git rev-parse HEAD)"
     echo "OPNFV_ARTIFACT_URL=$GS_URL/opnfv-$OPNFV_ARTIFACT_VERSION.iso"
-    echo "OPNFV_ARTIFACT_MD5SUM=$(md5sum $BUILD_DIRECTORY/release/OPNFV-CentOS-7-x86_64-$OPNFV_ARTIFACT_VERSION.iso | cut -d' ' -f1)"
+    echo "OPNFV_ARTIFACT_SHA512SUM=$(sha512sum $BUILD_DIRECTORY/release/OPNFV-CentOS-7-x86_64-$OPNFV_ARTIFACT_VERSION.iso | cut -d' ' -f1)"
     echo "OPNFV_SRPM_URL=$GS_URL/opnfv-apex-$RPM_VERSION.src.rpm"
     echo "OPNFV_RPM_URL=$GS_URL/opnfv-apex-$RPM_VERSION.noarch.rpm"
-    echo "OPNFV_RPM_MD5SUM=$(md5sum $BUILD_DIRECTORY/noarch/opnfv-apex-$RPM_VERSION.noarch.rpm | cut -d' ' -f1)"
+    echo "OPNFV_RPM_SHA512SUM=$(sha512sum $BUILD_DIRECTORY/noarch/opnfv-apex-$RPM_VERSION.noarch.rpm | cut -d' ' -f1)"
     echo "OPNFV_BUILD_URL=$BUILD_URL"
   ) > $WORKSPACE/opnfv.properties
 fi
index da9089c..8a5a82f 100644 (file)
@@ -1,15 +1,14 @@
 - project:
     name: apex
     jobs:
-        - 'apex-verify-{stream1}'
-        - 'apex-verify-{stream2}'
+        - 'apex-verify-{stream}'
+        - 'apex-verify-unit-tests-{stream}'
         - 'apex-runner-{platform}-{scenario}-{stream}'
-        - 'apex-runner-cperf-{stream1}'
+        - 'apex-runner-cperf-{stream}'
         - 'apex-build-{stream}'
         - 'apex-deploy-virtual-{scenario}-{stream}'
         - 'apex-deploy-baremetal-{scenario}-{stream}'
-        - 'apex-daily-{stream1}'
-        - 'apex-daily-{stream2}'
+        - 'apex-daily-{stream}'
 
     # stream:    branch with - in place of / (eg. stable-arno)
     # branch:    branch (eg. stable/arno)
         - master:
             branch: 'master'
             gs-pathname: ''
-            block-stream: 'brahmaputra'
-            slave: 'intel-pod7'
+            block-stream: 'colorado'
+            slave: 'lf-pod1'
             verify-slave: 'apex-verify-master'
             daily-slave: 'apex-daily-master'
-        - brahmaputra:
-            branch: 'stable/brahmaputra'
-            gs-pathname: '/brahmaputra'
+        - colorado:
+            branch: 'stable/colorado'
+            gs-pathname: '/colorado'
             block-stream: 'master'
             slave: 'lf-pod1'
-            verify-slave: 'apex-verify-brahmaputra'
-            daily-slave: 'apex-daily-brahmaputra'
-            disabled: true
+            verify-slave: 'apex-verify-colorado'
+            daily-slave: 'apex-daily-colorado'
+            disabled: false
 
     stream1:
         - master:
             branch: 'master'
             gs-pathname: ''
-            block-stream: 'brahmaputra'
+            block-stream: 'colorado'
             slave: 'lf-pod1'
             verify-slave: 'apex-verify-master'
             daily-slave: 'apex-daily-master'
 
     stream2:
-        - brahmaputra:
-            branch: 'stable/brahmaputra'
-            gs-pathname: '/brahmaputra'
+        - colorado:
+            branch: 'stable/colorado'
+            gs-pathname: '/colorado'
             block-stream: 'master'
             slave: 'lf-pod1'
-            verify-slave: 'apex-verify-brahmaputra'
-            daily-slave: 'apex-daily-brahmaputra'
-            disabled: true
+            verify-slave: 'apex-verify-colorado'
+            daily-slave: 'apex-daily-colorado'
+            disabled: false
 
     project: 'apex'
 
@@ -55,6 +54,7 @@
          - 'os-nosdn-nofeature-noha'
          - 'os-nosdn-nofeature-ha'
          - 'os-nosdn-nofeature-ha-ipv6'
+         - 'os-nosdn-ovs-noha'
          - 'os-nosdn-fdio-noha'
          - 'os-odl_l2-nofeature-ha'
          - 'os-odl_l2-bgpvpn-ha'
          - 'baremetal'
          - 'virtual'
 
-# Brahmaputra Verify
+# Unit Test
 - job-template:
-    name: 'apex-verify-{stream2}'
+    name: 'apex-verify-unit-tests-{stream}'
 
-    node: '{slave}'
+    node: '{verify-slave}'
+
+    concurrent: true
 
     parameters:
         - apex-parameter:
                     branch-pattern: '**/{branch}'
                 file-paths:
                   - compare-type: ANT
-                    pattern: 'ci/**'
-                  - compare-type: ANT
-                    pattern: 'build/**'
-                  - compare-type: ANT
-                    pattern: 'lib/**'
-                  - compare-type: ANT
-                    pattern: 'config/**'
-
+                    pattern: 'tests/**'
     properties:
-        - build-blocker:
-            use-build-blocker: true
-            blocking-jobs:
-                - 'apex-daily.*{stream2}'
-                - 'apex-deploy.*{stream2}'
-                - 'apex-build.*{stream2}'
-                - 'apex-runner.*{stream2}'
-                - 'apex-verify-{stream2}'
+        - throttle:
+            max-per-node: 1
+            max-total: 10
+            option: 'project'
 
     builders:
         - 'apex-unit-test'
-        - 'apex-build'
-        - trigger-builds:
-          - project: 'apex-deploy-virtual-os-odl_l2-nofeature-ha-{stream2}'
-            predefined-parameters: |
-              BUILD_DIRECTORY=apex-verify-{stream2}/build_output
-              OPNFV_CLEAN=yes
-            git-revision: false
-            block: true
-        - trigger-builds:
-          - project: 'apex-deploy-virtual-os-onos-nofeature-ha-{stream2}'
-            predefined-parameters: |
-              BUILD_DIRECTORY=apex-verify-{stream2}/build_output
-              OPNFV_CLEAN=yes
-            git-revision: false
-            block: true
-        - trigger-builds:
-          - project: 'apex-deploy-virtual-os-odl_l3-nofeature-ha-{stream2}'
-            predefined-parameters: |
-              BUILD_DIRECTORY=apex-verify-{stream2}/build_output
-              OPNFV_CLEAN=yes
-            git-revision: false
-            block: true
-        - trigger-builds:
-          - project: 'apex-deploy-virtual-os-odl_l2-sfc-noha-{stream2}'
-            predefined-parameters: |
-              BUILD_DIRECTORY=apex-verify-{stream2}/build_output
-              OPNFV_CLEAN=yes
-            git-revision: false
-            block: true
-        - 'apex-workspace-cleanup'
 
-# Master Verify
+# Verify
 - job-template:
-    name: 'apex-verify-{stream1}'
+    name: 'apex-verify-{stream}'
 
     node: '{verify-slave}'
 
             use-build-blocker: true
             block-level: 'NODE'
             blocking-jobs:
-                - 'apex-daily.*{stream1}'
-                - 'apex-deploy.*{stream1}'
-                - 'apex-build.*{stream1}'
-                - 'apex-runner.*{stream1}'
-                - 'apex-verify-{stream1}'
+                - 'apex-daily.*'
+                - 'apex-deploy.*'
+                - 'apex-build.*'
+                - 'apex-runner.*'
+                - 'apex-verify.*'
         - throttle:
             max-per-node: 1
             max-total: 10
         - 'apex-unit-test'
         - 'apex-build'
         - trigger-builds:
-          - project: 'apex-deploy-virtual-os-nosdn-nofeature-ha-{stream1}'
+          - project: 'apex-deploy-virtual-os-nosdn-nofeature-ha-{stream}'
             predefined-parameters: |
-              BUILD_DIRECTORY=apex-verify-{stream1}
+              BUILD_DIRECTORY=apex-verify-{stream}
               OPNFV_CLEAN=yes
             git-revision: false
             block: true
             same-node: true
         - trigger-builds:
-          - project: 'functest-apex-{verify-slave}-suite-{stream1}'
+          - project: 'functest-apex-{verify-slave}-suite-{stream}'
             predefined-parameters: |
               DEPLOY_SCENARIO=os-nosdn-nofeature-ha
               FUNCTEST_SUITE_NAME=healthcheck
             block: true
             same-node: true
         - trigger-builds:
-          - project: 'apex-deploy-virtual-os-odl_l2-nofeature-ha-{stream1}'
+          - project: 'apex-deploy-virtual-os-odl_l2-nofeature-ha-{stream}'
             predefined-parameters: |
-              BUILD_DIRECTORY=apex-verify-{stream1}
+              BUILD_DIRECTORY=apex-verify-{stream}
               OPNFV_CLEAN=yes
             git-revision: false
             block: true
             same-node: true
         - trigger-builds:
-          - project: 'functest-apex-{verify-slave}-suite-{stream1}'
+          - project: 'functest-apex-{verify-slave}-suite-{stream}'
             predefined-parameters: |
               DEPLOY_SCENARIO=os-odl_l2-nofeature-ha
               FUNCTEST_SUITE_NAME=healthcheck
         - build-blocker:
             use-build-blocker: true
             blocking-jobs:
-                - 'apex-daily.*{stream}'
-                - 'apex-verify.*{stream}'
+                - 'apex-daily.*'
+                - 'apex-verify.*'
 
     builders:
         - trigger-builds:
                 unstable-threshold: 'FAILURE'
 
 - job-template:
-    name: 'apex-runner-cperf-{stream1}'
+    name: 'apex-runner-cperf-{stream}'
 
     # runner cperf job
 
             use-build-blocker: false
             block-level: 'NODE'
             blocking-jobs:
-                - 'apex-deploy.*{stream}'
+                - 'apex-deploy.*'
         - throttle:
             max-per-node: 1
             max-total: 10
 
     builders:
         - trigger-builds:
-          - project: 'apex-deploy-baremetal-os-odl_l2-nofeature-ha-{stream1}'
+          - project: 'apex-deploy-baremetal-os-odl_l2-nofeature-ha-{stream}'
             predefined-parameters:
               OPNFV_CLEAN=yes
             git-revision: false
             block: true
         - trigger-builds:
-          - project: 'cperf-apex-intel-pod2-daily-{stream1}'
+          - project: 'cperf-apex-intel-pod2-daily-{stream}'
             predefined-parameters:
               DEPLOY_SCENARIO=os-odl_l2-nofeature-ha
             block: true
             use-build-blocker: true
             block-level: 'NODE'
             blocking-jobs:
-                - 'apex-deploy.*{stream}'
+                - 'apex-deploy.*'
         - throttle:
             max-per-node: 1
             max-total: 10
             use-build-blocker: true
             block-level: 'NODE'
             blocking-jobs:
-                - 'apex-deploy.*{stream}'
+                - 'apex-deploy.*'
         - throttle:
             max-per-node: 1
             max-total: 10
             use-build-blocker: true
             block-level: 'NODE'
             blocking-jobs:
-                - 'apex-verify.*{stream}'
-                - 'apex-deploy.*{stream}'
-                - 'apex-build.*{stream}'
+                - 'apex-verify.*'
+                - 'apex-deploy.*'
+                - 'apex-build.*'
 
 
     builders:
         - 'apex-deploy'
         - 'apex-workspace-cleanup'
 
-# Brahmaputra Daily
+# Daily
 - job-template:
-    name: 'apex-daily-{stream2}'
-
-    # Job template for daily build
-    #
-    # Required Variables:
-    #     stream:    branch with - in place of / (eg. stable)
-    #     branch:    branch (eg. stable)
-    node: '{slave}'
-
-    disabled: true
-
-    scm:
-        - git-scm:
-            credentials-id: '{ssh-credentials}'
-            refspec: ''
-            branch: '{branch}'
-
-    parameters:
-        - project-parameter:
-            project: '{project}'
-        - apex-parameter:
-            gs-pathname: '{gs-pathname}'
-
-    properties:
-        - build-blocker:
-            use-build-blocker: true
-            blocking-jobs:
-                - 'apex-verify.*{stream2}'
-                - 'apex-deploy.*{stream2}'
-                - 'apex-build.*{stream2}'
-                - 'apex-runner.*{stream2}'
-
-    triggers:
-        - 'apex-{stream2}'
-
-    builders:
-        - trigger-builds:
-          - project: 'apex-build-{stream2}'
-            git-revision: true
-            current-parameters: true
-            block: true
-        - trigger-builds:
-          - project: 'apex-deploy-baremetal-os-odl_l2-nofeature-ha-{stream2}'
-            predefined-parameters: |
-              BUILD_DIRECTORY=apex-build-{stream2}/build_output
-              OPNFV_CLEAN=yes
-            git-revision: true
-            block: true
-        - trigger-builds:
-          - project: 'functest-apex-{slave}-daily-{stream2}'
-            predefined-parameters:
-              DEPLOY_SCENARIO=os-odl_l2-nofeature-ha
-            block: true
-            block-thresholds:
-                build-step-failure-threshold: 'never'
-                failure-threshold: 'never'
-                unstable-threshold: 'FAILURE'
-        - trigger-builds:
-          - project: 'yardstick-apex-{slave}-daily-{stream2}'
-            predefined-parameters:
-              DEPLOY_SCENARIO=os-odl_l2-nofeature-ha
-            block: true
-            block-thresholds:
-                build-step-failure-threshold: 'never'
-                failure-threshold: 'never'
-                unstable-threshold: 'FAILURE'
-        - trigger-builds:
-          - project: 'apex-deploy-baremetal-os-onos-nofeature-ha-{stream2}'
-            predefined-parameters:
-              BUILD_DIRECTORY=apex-build-{stream2}/build_output
-            git-revision: true
-            block: true
-        - trigger-builds:
-          - project: 'functest-apex-{slave}-daily-{stream2}'
-            predefined-parameters:
-              DEPLOY_SCENARIO=os-onos-nofeature-ha
-            block: true
-            block-thresholds:
-                build-step-failure-threshold: 'never'
-                failure-threshold: 'never'
-                unstable-threshold: 'FAILURE'
-        - trigger-builds:
-          - project: 'yardstick-apex-{slave}-daily-{stream2}'
-            predefined-parameters:
-              DEPLOY_SCENARIO=os-onos-nofeature-ha
-            block: true
-            block-thresholds:
-                build-step-failure-threshold: 'never'
-                failure-threshold: 'never'
-                unstable-threshold: 'FAILURE'
-        - trigger-builds:
-          - project: 'apex-deploy-baremetal-os-odl_l3-nofeature-ha-{stream2}'
-            predefined-parameters:
-              BUILD_DIRECTORY=apex-build-{stream2}/build_output
-            git-revision: true
-            block: true
-        - trigger-builds:
-          - project: 'functest-apex-{slave}-daily-{stream2}'
-            predefined-parameters:
-              DEPLOY_SCENARIO=os-odl_l3-nofeature-ha
-            block: true
-            block-thresholds:
-                build-step-failure-threshold: 'never'
-                failure-threshold: 'never'
-                unstable-threshold: 'FAILURE'
-        - trigger-builds:
-          - project: 'yardstick-apex-{slave}-daily-{stream2}'
-            predefined-parameters:
-              DEPLOY_SCENARIO=os-odl_l3-nofeature-ha
-            block: true
-            block-thresholds:
-                build-step-failure-threshold: 'never'
-                failure-threshold: 'never'
-                unstable-threshold: 'FAILURE'
-        - trigger-builds:
-          - project: 'apex-deploy-baremetal-os-odl_l2-bgpvpn-ha-{stream2}'
-            predefined-parameters:
-              BUILD_DIRECTORY=apex-build-{stream2}/build_output
-            git-revision: true
-            block: true
-        - trigger-builds:
-          - project: 'functest-apex-{slave}-daily-{stream2}'
-            predefined-parameters:
-              DEPLOY_SCENARIO=os-odl_l2-bgpvpn-ha
-            block: true
-            block-thresholds:
-                build-step-failure-threshold: 'never'
-                failure-threshold: 'never'
-                unstable-threshold: 'FAILURE'
-        - trigger-builds:
-          - project: 'yardstick-apex-{slave}-daily-{stream2}'
-            predefined-parameters:
-              DEPLOY_SCENARIO=os-odl_l2-bgpvpn-ha
-            block: true
-            block-thresholds:
-                build-step-failure-threshold: 'never'
-                failure-threshold: 'never'
-                unstable-threshold: 'FAILURE'
-
-# Master Daily
-- job-template:
-    name: 'apex-daily-{stream1}'
+    name: 'apex-daily-{stream}'
 
     # Job template for daily build
     #
             use-build-blocker: true
             block-level: 'NODE'
             blocking-jobs:
-                - 'apex-verify.*{stream1}'
-                - 'apex-deploy.*{stream1}'
-                - 'apex-build.*{stream1}'
-                - 'apex-runner.*{stream1}'
+                - 'apex-verify.*'
+                - 'apex-deploy.*'
+                - 'apex-build.*'
+                - 'apex-runner.*'
 
     triggers:
-        - 'apex-{stream1}'
+        - 'apex-{stream}'
 
     builders:
         - trigger-builds:
-          - project: 'apex-build-{stream1}'
+          - project: 'apex-build-{stream}'
             git-revision: true
             current-parameters: true
             same-node: true
             block: true
         - trigger-builds:
-          - project: 'apex-deploy-baremetal-os-nosdn-nofeature-ha-{stream1}'
+          - project: 'apex-deploy-baremetal-os-nosdn-nofeature-ha-{stream}'
             predefined-parameters: |
-              BUILD_DIRECTORY=apex-build-{stream1}/build
+              BUILD_DIRECTORY=apex-build-{stream}/build
               OPNFV_CLEAN=yes
             git-revision: true
             same-node: true
                 build-step-failure-threshold: 'never'
             block: true
         - trigger-builds:
-          - project: 'functest-apex-{daily-slave}-daily-{stream1}'
+          - project: 'functest-apex-{daily-slave}-daily-{stream}'
             predefined-parameters:
               DEPLOY_SCENARIO=os-nosdn-nofeature-ha
             block: true
                 failure-threshold: 'never'
                 unstable-threshold: 'FAILURE'
         - trigger-builds:
-          - project: 'yardstick-apex-{slave}-daily-{stream1}'
+          - project: 'yardstick-apex-{slave}-daily-{stream}'
             predefined-parameters:
               DEPLOY_SCENARIO=os-nosdn-nofeature-ha
             block: true
                 failure-threshold: 'never'
                 unstable-threshold: 'FAILURE'
         - trigger-builds:
-          - project: 'apex-deploy-baremetal-os-odl_l2-nofeature-ha-{stream1}'
+          - project: 'apex-deploy-baremetal-os-odl_l2-nofeature-ha-{stream}'
             predefined-parameters: |
-              BUILD_DIRECTORY=apex-build-{stream1}/build
+              BUILD_DIRECTORY=apex-build-{stream}/build
               OPNFV_CLEAN=yes
             git-revision: true
             same-node: true
                 build-step-failure-threshold: 'never'
             block: true
         - trigger-builds:
-          - project: 'functest-apex-{daily-slave}-daily-{stream1}'
+          - project: 'functest-apex-{daily-slave}-daily-{stream}'
             predefined-parameters:
               DEPLOY_SCENARIO=os-odl_l2-nofeature-ha
             block: true
                 failure-threshold: 'never'
                 unstable-threshold: 'FAILURE'
         - trigger-builds:
-          - project: 'yardstick-apex-{slave}-daily-{stream1}'
+          - project: 'yardstick-apex-{slave}-daily-{stream}'
             predefined-parameters:
               DEPLOY_SCENARIO=os-odl_l2-nofeature-ha
             block: true
                 failure-threshold: 'never'
                 unstable-threshold: 'FAILURE'
         - trigger-builds:
-          - project: 'apex-deploy-baremetal-os-odl_l3-nofeature-ha-{stream1}'
+          - project: 'apex-deploy-baremetal-os-odl_l3-nofeature-ha-{stream}'
             predefined-parameters: |
-              BUILD_DIRECTORY=apex-build-{stream1}/build
+              BUILD_DIRECTORY=apex-build-{stream}/build
               OPNFV_CLEAN=yes
             git-revision: true
             same-node: true
                 build-step-failure-threshold: 'never'
             block: true
         - trigger-builds:
-          - project: 'functest-apex-{daily-slave}-daily-{stream1}'
+          - project: 'functest-apex-{daily-slave}-daily-{stream}'
             predefined-parameters:
               DEPLOY_SCENARIO=os-odl_l3-nofeature-ha
             block: true
                 failure-threshold: 'never'
                 unstable-threshold: 'FAILURE'
         - trigger-builds:
-          - project: 'yardstick-apex-{slave}-daily-{stream1}'
+          - project: 'yardstick-apex-{slave}-daily-{stream}'
             predefined-parameters:
               DEPLOY_SCENARIO=os-odl_l3-nofeature-ha
             block: true
                 failure-threshold: 'never'
                 unstable-threshold: 'FAILURE'
         - trigger-builds:
-          - project: 'apex-deploy-baremetal-os-onos-nofeature-ha-{stream1}'
+          - project: 'apex-deploy-baremetal-os-onos-nofeature-ha-{stream}'
             predefined-parameters: |
-              BUILD_DIRECTORY=apex-build-{stream1}/build
+              BUILD_DIRECTORY=apex-build-{stream}/build
               OPNFV_CLEAN=yes
             git-revision: true
             same-node: true
                 build-step-failure-threshold: 'never'
             block: true
         - trigger-builds:
-          - project: 'functest-apex-{daily-slave}-daily-{stream1}'
+          - project: 'functest-apex-{daily-slave}-daily-{stream}'
             predefined-parameters:
               DEPLOY_SCENARIO=os-onos-nofeature-ha
             block: true
                 failure-threshold: 'never'
                 unstable-threshold: 'FAILURE'
         - trigger-builds:
-          - project: 'yardstick-apex-{slave}-daily-{stream1}'
+          - project: 'yardstick-apex-{slave}-daily-{stream}'
             predefined-parameters:
               DEPLOY_SCENARIO=os-onos-nofeature-ha
             block: true
                 failure-threshold: 'never'
                 unstable-threshold: 'FAILURE'
         - trigger-builds:
-          - project: 'apex-deploy-baremetal-os-odl_l2-bgpvpn-ha-{stream1}'
+          - project: 'apex-deploy-baremetal-os-odl_l2-bgpvpn-ha-{stream}'
             predefined-parameters: |
-              BUILD_DIRECTORY=apex-build-{stream1}/build
+              BUILD_DIRECTORY=apex-build-{stream}/build
               OPNFV_CLEAN=yes
             git-revision: true
             same-node: true
                 build-step-failure-threshold: 'never'
             block: true
         - trigger-builds:
-          - project: 'functest-apex-{daily-slave}-daily-{stream1}'
+          - project: 'functest-apex-{daily-slave}-daily-{stream}'
             predefined-parameters:
               DEPLOY_SCENARIO=os-odl_l2-bgpvpn-ha
             block: true
                 failure-threshold: 'never'
                 unstable-threshold: 'FAILURE'
         - trigger-builds:
-          - project: 'yardstick-apex-{slave}-daily-{stream1}'
+          - project: 'yardstick-apex-{slave}-daily-{stream}'
             predefined-parameters:
               DEPLOY_SCENARIO=os-odl_l2-bgpvpn-ha
             block: true
                 failure-threshold: 'never'
                 unstable-threshold: 'FAILURE'
         - trigger-builds:
-          - project: 'apex-deploy-baremetal-os-onos-sfc-ha-{stream1}'
+          - project: 'apex-deploy-baremetal-os-onos-sfc-ha-{stream}'
             predefined-parameters: |
-              BUILD_DIRECTORY=apex-build-{stream1}/build
+              BUILD_DIRECTORY=apex-build-{stream}/build
               OPNFV_CLEAN=yes
             git-revision: true
             same-node: true
                 build-step-failure-threshold: 'never'
             block: true
         - trigger-builds:
-          - project: 'functest-apex-{daily-slave}-daily-{stream1}'
+          - project: 'functest-apex-{daily-slave}-daily-{stream}'
             predefined-parameters:
               DEPLOY_SCENARIO=os-onos-sfc-ha
             block: true
                 failure-threshold: 'never'
                 unstable-threshold: 'FAILURE'
         - trigger-builds:
-          - project: 'yardstick-apex-{slave}-daily-{stream1}'
+          - project: 'yardstick-apex-{slave}-daily-{stream}'
             predefined-parameters:
               DEPLOY_SCENARIO=os-onos-sfc-ha
             block: true
                 failure-threshold: 'never'
                 unstable-threshold: 'FAILURE'
         - trigger-builds:
-          - project: 'apex-deploy-baremetal-os-odl_l2-sfc-noha-{stream1}'
+          - project: 'apex-deploy-baremetal-os-odl_l2-sfc-noha-{stream}'
             predefined-parameters: |
-              BUILD_DIRECTORY=apex-build-{stream1}/build
+              BUILD_DIRECTORY=apex-build-{stream}/build
               OPNFV_CLEAN=yes
             git-revision: true
             same-node: true
                 build-step-failure-threshold: 'never'
             block: true
         - trigger-builds:
-          - project: 'functest-apex-{daily-slave}-daily-{stream1}'
+          - project: 'functest-apex-{daily-slave}-daily-{stream}'
             predefined-parameters:
               DEPLOY_SCENARIO=os-odl_l2-sfc-noha
             block: true
                 failure-threshold: 'never'
                 unstable-threshold: 'FAILURE'
         - trigger-builds:
-          - project: 'yardstick-apex-{slave}-daily-{stream1}'
+          - project: 'yardstick-apex-{slave}-daily-{stream}'
             predefined-parameters:
               DEPLOY_SCENARIO=os-odl_l2-sfc-noha
             block: true
                 failure-threshold: 'never'
                 unstable-threshold: 'FAILURE'
         - trigger-builds:
-          - project: 'apex-deploy-baremetal-os-odl_l2-fdio-noha-{stream1}'
+          - project: 'apex-deploy-baremetal-os-odl_l2-fdio-noha-{stream}'
             predefined-parameters: |
-              BUILD_DIRECTORY=apex-build-{stream1}/build
+              BUILD_DIRECTORY=apex-build-{stream}/build
               OPNFV_CLEAN=yes
             git-revision: true
             same-node: true
                 build-step-failure-threshold: 'never'
             block: true
         - trigger-builds:
-          - project: 'functest-apex-{daily-slave}-daily-{stream1}'
+          - project: 'functest-apex-{daily-slave}-daily-{stream}'
             predefined-parameters:
               DEPLOY_SCENARIO=os-odl_l2-fdio-noha
             block: true
                 failure-threshold: 'never'
                 unstable-threshold: 'FAILURE'
         - trigger-builds:
-          - project: 'yardstick-apex-{slave}-daily-{stream1}'
+          - project: 'yardstick-apex-{slave}-daily-{stream}'
             predefined-parameters:
               DEPLOY_SCENARIO=os-odl_l2-fdio-noha
             block: true
                 failure-threshold: 'never'
                 unstable-threshold: 'FAILURE'
         - trigger-builds:
-          - project: 'apex-deploy-baremetal-os-nosdn-fdio-noha-{stream1}'
+          - project: 'apex-deploy-baremetal-os-nosdn-fdio-noha-{stream}'
             predefined-parameters: |
-              BUILD_DIRECTORY=apex-build-{stream1}/build
+              BUILD_DIRECTORY=apex-build-{stream}/build
               OPNFV_CLEAN=yes
             git-revision: true
             same-node: true
                 build-step-failure-threshold: 'never'
             block: true
         - trigger-builds:
-          - project: 'functest-apex-{daily-slave}-daily-{stream1}'
+          - project: 'functest-apex-{daily-slave}-daily-{stream}'
             predefined-parameters:
               DEPLOY_SCENARIO=os-nosdn-fdio-noha
             block: true
                 failure-threshold: 'never'
                 unstable-threshold: 'FAILURE'
         - trigger-builds:
-          - project: 'yardstick-apex-{slave}-daily-{stream1}'
+          - project: 'yardstick-apex-{slave}-daily-{stream}'
             predefined-parameters:
               DEPLOY_SCENARIO=os-nosdn-fdio-noha
             block: true
                 failure-threshold: 'never'
                 unstable-threshold: 'FAILURE'
         - trigger-builds:
-          - project: 'apex-deploy-baremetal-os-nosdn-fdio-noha-{stream1}'
+          - project: 'apex-deploy-virtual-os-nosdn-nofeature-ha-ipv6-{stream}'
             predefined-parameters: |
-              BUILD_DIRECTORY=apex-build-{stream1}/build
+              BUILD_DIRECTORY=apex-build-{stream}/build
               OPNFV_CLEAN=yes
             git-revision: true
             same-node: true
                 build-step-failure-threshold: 'never'
             block: true
         - trigger-builds:
-          - project: 'functest-apex-{daily-slave}-daily-{stream1}'
-            predefined-parameters:
-              DEPLOY_SCENARIO=os-nosdn-fdio-noha
-            block: true
+          - project: 'apex-deploy-baremetal-os-nosdn-ovs-noha-{stream}'
+            predefined-parameters: |
+              BUILD_DIRECTORY=apex-build-{stream}/build
+              OPNFV_CLEAN=yes
+            git-revision: true
             same-node: true
             block-thresholds:
                 build-step-failure-threshold: 'never'
-                failure-threshold: 'never'
-                unstable-threshold: 'FAILURE'
+            block: true
         - trigger-builds:
-          - project: 'yardstick-apex-{slave}-daily-{stream1}'
+          - project: 'functest-apex-{daily-slave}-daily-{stream}'
             predefined-parameters:
-              DEPLOY_SCENARIO=os-nosdn-fdio-noha
+              DEPLOY_SCENARIO=os-nosdn-ovs-noha
             block: true
             same-node: true
             block-thresholds:
                 failure-threshold: 'never'
                 unstable-threshold: 'FAILURE'
         - trigger-builds:
-          - project: 'apex-deploy-virtual-os-nosdn-nofeature-ha-ipv6-{stream1}'
-            predefined-parameters: |
-              BUILD_DIRECTORY=apex-build-{stream1}/build
-              OPNFV_CLEAN=yes
-            git-revision: true
+          - project: 'yardstick-apex-{slave}-daily-{stream}'
+            predefined-parameters:
+              DEPLOY_SCENARIO=os-nosdn-ovs-noha
+            block: true
             same-node: true
             block-thresholds:
                 build-step-failure-threshold: 'never'
-            block: true
+                failure-threshold: 'never'
+                unstable-threshold: 'FAILURE'
 
 - job-template:
     name: 'apex-gs-clean-{stream}'
             name: GIT_BASE
             default: https://gerrit.opnfv.org/gerrit/$PROJECT
             description: "Used for overriding the GIT URL coming from Global Jenkins configuration in case if the stuff is done on none-LF HW."
+        - string:
+            name: GS_PATHNAME
+            default: '{gs-pathname}'
+            description: "Version directory where opnfv artifacts are stored in gs repository"
         - string:
             name: GS_URL
             default: artifacts.opnfv.org/$PROJECT{gs-pathname}
 - trigger:
     name: 'apex-master'
     triggers:
-        - timed: '0 3 * * *'
+        - timed: '0 0 20 8 *'
 - trigger:
-    name: 'apex-brahmaputra'
+    name: 'apex-colorado'
     triggers:
         - timed: '0 3 * * *'
 - trigger:
index 9d7c198..6ea73e1 100644 (file)
@@ -2,7 +2,6 @@
 - project:
     name: 'armband-ci'
     project: 'armband'
-    installer: 'fuel'
 
 #--------------------------------
 # BRANCH ANCHORS
@@ -11,8 +10,8 @@
         stream: master
         branch: '{stream}'
         gs-pathname: ''
-    brahmaputra: &brahmaputra
-        stream: brahmaputra
+    colorado: &colorado
+        stream: colorado
         branch: 'stable/{stream}'
         gs-pathname: '/{stream}'
 #--------------------------------
 #--------------------------------
     pod:
         - arm-pod1:
-            <<: *brahmaputra
+            installer: fuel
+            <<: *colorado
         - arm-pod2:
-            <<: *brahmaputra
+            installer: fuel
+            <<: *colorado
 #--------------------------------
 #        master
 #--------------------------------
     pod:
         - arm-pod1:
+            installer: fuel
             <<: *master
         - arm-pod2:
+            installer: fuel
             <<: *master
 #--------------------------------
 #       scenarios
     scenario:
         # HA scenarios
         - 'os-nosdn-nofeature-ha':
-            auto-trigger-name: 'daily-trigger-disabled'
+            auto-trigger-name: 'armband-{installer}-{scenario}-{pod}-{stream}-trigger'
         - 'os-odl_l2-nofeature-ha':
-            auto-trigger-name: 'armband-{scenario}-{pod}-{stream}-trigger'
+            auto-trigger-name: 'armband-{installer}-{scenario}-{pod}-{stream}-trigger'
         - 'os-odl_l3-nofeature-ha':
-            auto-trigger-name: 'daily-trigger-disabled'
+            auto-trigger-name: 'armband-{installer}-{scenario}-{pod}-{stream}-trigger'
         - 'os-odl_l2-bgpvpn-ha':
-            auto-trigger-name: 'daily-trigger-disabled'
+            auto-trigger-name: 'armband-{installer}-{scenario}-{pod}-{stream}-trigger'
 
         # NOHA scenarios
         - 'os-odl_l2-nofeature-noha':
-            auto-trigger-name: 'daily-trigger-disabled'
+            auto-trigger-name: 'armband-{installer}-{scenario}-{pod}-{stream}-trigger'
 
     jobs:
-        - 'armband-{scenario}-{pod}-daily-{stream}'
-        - 'armband-deploy-{pod}-daily-{stream}'
+        - 'armband-{installer}-{scenario}-{pod}-daily-{stream}'
+        - 'armband-{installer}-deploy-{pod}-daily-{stream}'
 
 ########################
 # job templates
 ########################
 - job-template:
-    name: 'armband-{scenario}-{pod}-daily-{stream}'
+    name: 'armband-{installer}-{scenario}-{pod}-daily-{stream}'
 
     concurrent: false
 
@@ -72,7 +75,7 @@
         - build-blocker:
             use-build-blocker: true
             blocking-jobs:
-                - 'armband-os-.*?-{pod}-daily-{stream}'
+                - 'armband-{installer}-os-.*?-{pod}-daily-.*'
             block-level: 'NODE'
 
     wrappers:
@@ -96,7 +99,7 @@
 
     builders:
         - trigger-builds:
-            - project: 'armband-deploy-{pod}-daily-{stream}'
+            - project: 'armband-{installer}-deploy-{pod}-daily-{stream}'
               current-parameters: false
               predefined-parameters:
                 DEPLOY_SCENARIO={scenario}
                 unstable-threshold: 'FAILURE'
 
 - job-template:
-    name: 'armband-deploy-{pod}-daily-{stream}'
+    name: 'armband-{installer}-deploy-{pod}-daily-{stream}'
 
     concurrent: false
 
         - build-blocker:
             use-build-blocker: true
             blocking-jobs:
-                - 'armband-deploy-{pod}-daily-{stream}'
-                - 'armband-deploy-generic-daily-.*'
+                - 'armband-{installer}-deploy-{pod}-daily-{stream}'
+                - 'armband-{installer}-deploy-generic-daily-.*'
             block-level: 'NODE'
 
     parameters:
 
     publishers:
         - email:
-            recipients: josep.puigdemont@enea.com armband@enea.com
+            recipients: armband@enea.com
 
 ########################
 # parameter macros
 #----------------------------------------------------------
 # Enea Armband POD 1 Triggers running against master branch
 #----------------------------------------------------------
-# No triggers for master for now
 - trigger:
-    name: 'armband-os-odl_l2-nofeature-ha-arm-pod1-master-trigger'
+    name: 'armband-fuel-os-odl_l2-nofeature-ha-arm-pod1-master-trigger'
     triggers:
-        - timed: ''
+        - timed: '0 3 * * 1,4'
+- trigger:
+    name: 'armband-fuel-os-nosdn-nofeature-ha-arm-pod1-master-trigger'
+    triggers:
+        - timed: '0 15 * * 1,4'
+- trigger:
+    name: 'armband-fuel-os-odl_l3-nofeature-ha-arm-pod1-master-trigger'
+    triggers:
+        - timed: '0 3 * * 2,5'
+- trigger:
+    name: 'armband-fuel-os-odl_l2-bgpvpn-ha-arm-pod1-master-trigger'
+    triggers:
+        - timed: '0 15 * * 2,5'
+- trigger:
+    name: 'armband-fuel-os-odl_l2-nofeature-noha-arm-pod1-master-trigger'
+    triggers:
+        - timed: '0 3 * * 3,6'
 #---------------------------------------------------------------
 # Enea Armband POD 1 Triggers running against brahmaputra branch
 #---------------------------------------------------------------
 - trigger:
-    name: 'armband-os-odl_l2-nofeature-ha-arm-pod1-brahmaputra-trigger'
+    name: 'armband-fuel-os-odl_l2-nofeature-ha-arm-pod1-colorado-trigger'
+    triggers:
+        - timed: ''
+- trigger:
+    name: 'armband-fuel-os-nosdn-nofeature-ha-arm-pod1-colorado-trigger'
+    triggers:
+        - timed: ''
+- trigger:
+    name: 'armband-fuel-os-odl_l3-nofeature-ha-arm-pod1-colorado-trigger'
+    triggers:
+        - timed: ''
+- trigger:
+    name: 'armband-fuel-os-odl_l2-bgpvpn-ha-arm-pod1-colorado-trigger'
+    triggers:
+        - timed: ''
+- trigger:
+    name: 'armband-fuel-os-odl_l2-nofeature-noha-arm-pod1-colorado-trigger'
     triggers:
         - timed: ''
 #----------------------------------------------------------
 #----------------------------------------------------------
 # No triggers for master for now
 - trigger:
-    name: 'armband-os-odl_l2-nofeature-ha-arm-pod2-master-trigger'
+    name: 'armband-fuel-os-odl_l2-nofeature-ha-arm-pod2-master-trigger'
+    triggers:
+        - timed: ''
+- trigger:
+    name: 'armband-fuel-os-nosdn-nofeature-ha-arm-pod2-master-trigger'
+    triggers:
+        - timed: ''
+- trigger:
+    name: 'armband-fuel-os-odl_l3-nofeature-ha-arm-pod2-master-trigger'
+    triggers:
+        - timed: ''
+- trigger:
+    name: 'armband-fuel-os-odl_l2-bgpvpn-ha-arm-pod2-master-trigger'
+    triggers:
+        - timed: ''
+- trigger:
+    name: 'armband-fuel-os-odl_l2-nofeature-noha-arm-pod2-master-trigger'
     triggers:
         - timed: ''
 #---------------------------------------------------------------
 # Enea Armband POD 2 Triggers running against brahmaputra branch
 #---------------------------------------------------------------
 - trigger:
-    name: 'armband-os-odl_l2-nofeature-ha-arm-pod2-brahmaputra-trigger'
+    name: 'armband-fuel-os-odl_l2-nofeature-ha-arm-pod2-colorado-trigger'
+    triggers:
+        - timed: ''
+- trigger:
+    name: 'armband-fuel-os-nosdn-nofeature-ha-arm-pod2-colorado-trigger'
+    triggers:
+        - timed: ''
+- trigger:
+    name: 'armband-fuel-os-odl_l3-nofeature-ha-arm-pod2-colorado-trigger'
+    triggers:
+        - timed: ''
+- trigger:
+    name: 'armband-fuel-os-odl_l2-bgpvpn-ha-arm-pod2-colorado-trigger'
+    triggers:
+        - timed: ''
+- trigger:
+    name: 'armband-fuel-os-odl_l2-nofeature-noha-arm-pod2-colorado-trigger'
     triggers:
         - timed: ''
index 7d01c09..ed7897b 100755 (executable)
@@ -10,6 +10,8 @@
 set -o errexit
 set -o pipefail
 
+echo "Host info: $(hostname) $(hostname -I)"
+
 # Configurable environment variables:
 # ISOSTORE (/iso_mount/opnfv_ci)
 
index 764a5d4..10f8d65 100644 (file)
@@ -7,15 +7,17 @@
 
     project: '{name}'
 
+    installer: 'fuel'
+
     jobs:
         - 'armband-verify-{stream}'
-        - 'armband-build-daily-{stream}'
+        - 'armband-{installer}-build-daily-{stream}'
 
     stream:
         - master:
             branch: '{stream}'
             gs-pathname: ''
-        - brahmaputra:
+        - colorado:
             branch: 'stable/{stream}'
             gs-pathname: '/{stream}'
 
@@ -62,7 +64,7 @@
             echo "Nothing to verify!"
 
 - job-template:
-    name: 'armband-build-daily-{stream}'
+    name: 'armband-{installer}-build-daily-{stream}'
 
     concurrent: false
 
@@ -77,6 +79,7 @@
         - project-parameter:
             project: '{project}'
         - 'opnfv-build-arm-defaults'
+        - '{installer}-defaults'
         - armband-project-parameter:
             gs-pathname: '{gs-pathname}'
 
 
     publishers:
         - email:
-            recipients: josep.puigdemont@enea.com armband@enea.com
+            recipients: armband@enea.com
 
 ########################
 # parameter macros
             name: BUILD_DIRECTORY
             default: $WORKSPACE/build_output
             description: "Directory where the build artifact will be located upon the completion of the build."
+        - string:
+            name: CACHE_DIRECTORY
+            default: $HOME/opnfv/cache/$INSTALLER_TYPE
+            description: "Directory where the cache to be used during the build is located."
         - string:
             name: GS_URL
             default: artifacts.opnfv.org/$PROJECT{gs-pathname}
index 81917f6..300306f 100755 (executable)
@@ -1,6 +1,8 @@
 #!/bin/bash
+# SPDX-license-identifier: Apache-2.0
 ##############################################################################
 # Copyright (c) 2016 Ericsson AB and others.
+# Copyright (c) 2016 Enea AB.
 # All rights reserved. This program and the accompanying materials
 # are made available under the terms of the Apache License, Version 2.0
 # which accompanies this distribution, and is available at
@@ -10,16 +12,58 @@ set -o errexit
 set -o nounset
 set -o pipefail
 
+echo "Host info: $(hostname) $(hostname -I)"
+
 cd $WORKSPACE
 
+# Armband requires initializing git submodules (e.g. for Fuel's clean_cache.sh)
+make submodules-init
+
+# remove the expired items from cache
+test -f $WORKSPACE/ci/clean_cache.sh && $WORKSPACE/ci/clean_cache.sh $CACHE_DIRECTORY
+
+LATEST_ISO_PROPERTIES=$WORKSPACE/latest.iso.properties
+if [[ "$JOB_NAME" =~ "daily" ]]; then
+    # check to see if we already have an artifact on artifacts.opnfv.org
+    # for this commit during daily builds
+    echo "Checking to see if we already built and stored Armband Fuel ISO for this commit"
+
+    curl -s -o $LATEST_ISO_PROPERTIES http://$GS_URL/latest.properties 2>/dev/null
+fi
+
+# get metadata of latest ISO
+if grep -q OPNFV_GIT_SHA1 $LATEST_ISO_PROPERTIES 2>/dev/null; then
+    LATEST_ISO_SHA1=$(grep OPNFV_GIT_SHA1 $LATEST_ISO_PROPERTIES | cut -d'=' -f2)
+    LATEST_ISO_URL=$(grep OPNFV_ARTIFACT_URL $LATEST_ISO_PROPERTIES | cut -d'=' -f2)
+else
+    LATEST_ISO_SHA1=none
+fi
+
 # get current SHA1
 CURRENT_SHA1=$(git rev-parse HEAD)
 
+# set FORCE_BUILD to false for non-daily builds
+FORCE_BUILD=${FORCE_BUILD:-false}
+
+if [[ "$CURRENT_SHA1" == "$LATEST_ISO_SHA1" && "$FORCE_BUILD" == "false" ]]; then
+    echo "***************************************************"
+    echo "   An ISO has already been built for this commit"
+    echo "   $LATEST_ISO_URL"
+    echo "***************************************************"
+else
+    echo "This commit has not been built yet or forced build! Proceeding with the build."
+    /bin/rm -f $LATEST_ISO_PROPERTIES
+    echo
+fi
+
 # log info to console
-echo "Starting the build of Armband. This could take some time..."
+echo "Starting the build of Armband $INSTALLER_TYPE. This could take some time..."
 echo "-----------------------------------------------------------"
 echo
 
+# create the cache directory if it doesn't exist
+mkdir -p $CACHE_DIRECTORY
+
 # set OPNFV_ARTIFACT_VERSION
 if [[ "$JOB_NAME" =~ "merge" ]]; then
     echo "Building Fuel ISO for a merged change"
@@ -39,7 +83,7 @@ NOCACHE_ARG=${NOCACHE_ARG:-}
 
 # start the build
 cd $WORKSPACE/ci
-./build.sh $BUILD_DIRECTORY
+./build.sh -v $OPNFV_ARTIFACT_VERSION $NOCACHE_ARG -c file://$CACHE_DIRECTORY $BUILD_DIRECTORY
 
 # list the build artifacts
 ls -al $BUILD_DIRECTORY
index 0d887e7..c42efff 100644 (file)
         - master:
             branch: '{stream}'
             gs-pathname: ''
-        - brahmaputra:
+            disabled: 'false'
+        - colorado:
             branch: 'stable/{stream}'
             gs-pathname: '/{stream}'
+            disabled: 'false'
 
 - job-template:
     name: 'availability-verify-{stream}'
 
+    disabled: '{obj:disabled}'
+
     parameters:
         - project-parameter:
             project: '{project}'
index 686b7d4..4bc56ab 100644 (file)
@@ -18,8 +18,8 @@
         gs-packagepath: '/{suite}'
         #docker tag used for version control
         docker-tag: 'latest'
-    brahmaputra: &brahmaputra
-        stream: brahmaputra
+    colorado: &colorado
+        stream: colorado
         branch: 'stable/{stream}'
         gs-pathname: '/{stream}'
         gs-packagepath: '/{stream}/{suite}'
             slave-label: compass-baremetal
             installer: compass
             auto-trigger-name: 'daily-trigger-disabled'
-            <<: *brahmaputra
+            <<: *colorado
         - virtual:
             slave-label: compass-virtual
             installer: compass
             auto-trigger-name: 'daily-trigger-disabled'
-            <<: *brahmaputra
+            <<: *colorado
 
 #--------------------------------
 #        None-CI PODs
 #--------------------------------
-        - orange-pod2:
-            slave-label: '{pod}'
-            installer: joid
-            auto-trigger-name: 'daily-trigger-disabled'
-            <<: *brahmaputra
-        - orange-pod2:
-            slave-label: '{pod}'
-            installer: joid
-            auto-trigger-name: 'daily-trigger-disabled'
-            <<: *master
+       # - orange-pod2:
+       #     slave-label: '{pod}'
+       #     installer: joid
+       #     auto-trigger-name: 'daily-trigger-disabled'
+       #     <<: *colorado
+       # - orange-pod2:
+       #     slave-label: '{pod}'
+       #     installer: joid
+       #     auto-trigger-name: 'daily-trigger-disabled'
+       #     <<: *master
         - huawei-pod2:
             slave-label: '{pod}'
             installer: compass
index 28b49bc..fffc22d 100644 (file)
             gs-pathname: ''
             #This is used for different test suite dependent packages storage
             gs-packagepath: '/{suite}'
-        - brahmaputra:
+            disabled: false
+        - colorado:
             branch: 'stable/{stream}'
             gs-pathname: '/{stream}'
             gs-packagepath: '/{stream}/{suite}'
+            disabled: false
 
     suite:
         - 'rubbos'
@@ -35,6 +37,8 @@
 - job-template:
     name: 'bottlenecks-verify-{stream}'
 
+    disabled: '{obj:disabled}'
+
     parameters:
         - project-parameter:
             project: '{project}'
@@ -74,6 +78,8 @@
 - job-template:
     name: 'bottlenecks-merge-{stream}'
 
+    disabled: '{obj:disabled}'
+
     parameters:
         - project-parameter:
             project: '{project}'
 - job-template:
     name: 'bottlenecks-{suite}-upload-artifacts-{stream}'
 
+
+    disabled: '{obj:disabled}'
+
     concurrent: true
 
     properties:
index d08c39c..093debb 100644 (file)
@@ -35,7 +35,7 @@ ls -al $BUILD_DIRECTORY
     echo "OPNFV_GIT_URL=$(git config --get remote.origin.url)"
     echo "OPNFV_GIT_SHA1=$(git rev-parse HEAD)"
     echo "OPNFV_ARTIFACT_URL=$GS_URL/opnfv-$OPNFV_ARTIFACT_VERSION.iso"
-    echo "OPNFV_ARTIFACT_MD5SUM=$(md5sum $BUILD_DIRECTORY/compass.iso | cut -d' ' -f1)"
+    echo "OPNFV_ARTIFACT_SHA512SUM=$(sha512sum $BUILD_DIRECTORY/compass.iso | cut -d' ' -f1)"
     echo "OPNFV_BUILD_URL=$BUILD_URL"
 ) > $BUILD_DIRECTORY/opnfv.properties
 echo
index bb24fdf..426e597 100644 (file)
@@ -10,8 +10,8 @@
         stream: master
         branch: '{stream}'
         gs-pathname: ''
-    brahmaputra: &brahmaputra
-        stream: brahmaputra
+    colorado: &colorado
+        stream: colorado
         branch: 'stable/{stream}'
         gs-pathname: '/{stream}'
 #--------------------------------
         - baremetal:
             slave-label: compass-baremetal
             os-version: 'trusty'
-            <<: *brahmaputra
+            <<: *colorado
         - virtual:
             slave-label: compass-virtual
             os-version: 'trusty'
-            <<: *brahmaputra
+            <<: *colorado
 #--------------------------------
 #        master
 #--------------------------------
         - 'os-nosdn-nofeature-ha':
             disabled: false
             auto-trigger-name: 'compass-{scenario}-{pod}-{stream}-trigger'
+            openstack-os-version: ''
         - 'os-odl_l2-nofeature-ha':
             disabled: false
             auto-trigger-name: 'compass-{scenario}-{pod}-{stream}-trigger'
+            openstack-os-version: ''
         - 'os-odl_l3-nofeature-ha':
             disabled: false
             auto-trigger-name: 'compass-{scenario}-{pod}-{stream}-trigger'
+            openstack-os-version: ''
         - 'os-onos-nofeature-ha':
             disabled: false
             auto-trigger-name: 'compass-{scenario}-{pod}-{stream}-trigger'
+            openstack-os-version: ''
         - 'os-ocl-nofeature-ha':
             disabled: false
             auto-trigger-name: 'compass-{scenario}-{pod}-{stream}-trigger'
+            openstack-os-version: ''
         - 'os-onos-sfc-ha':
             disabled: false
             auto-trigger-name: 'compass-{scenario}-{pod}-{stream}-trigger'
+            openstack-os-version: ''
         - 'os-odl_l2-moon-ha':
             disabled: false
             auto-trigger-name: 'compass-{scenario}-{pod}-{stream}-trigger'
+            openstack-os-version: 'xenial'
         - 'os-nosdn-kvm-ha':
             disabled: false
             auto-trigger-name: 'compass-{scenario}-{pod}-{stream}-trigger'
+            openstack-os-version: ''
 
     jobs:
         - 'compass-{scenario}-{pod}-daily-{stream}'
               predefined-parameters: |
                 DEPLOY_SCENARIO={scenario}
                 COMPASS_OS_VERSION={os-version}
+                COMPASS_OS_VERSION_OPTION={openstack-os-version}
               same-node: true
               block: true
         - trigger-builds:
 - trigger:
     name: 'compass-os-nosdn-nofeature-ha-huawei-pod2-master-trigger'
     triggers:
-        - timed: '0 3 * * *'
+        - timed: '0 19 * * *'
 - trigger:
     name: 'compass-os-odl_l2-nofeature-ha-huawei-pod2-master-trigger'
     triggers:
 - trigger:
     name: 'compass-os-odl_l3-nofeature-ha-huawei-pod2-master-trigger'
     triggers:
-        - timed: '0 19 * * *'
+        - timed: '0 15 * * *'
 - trigger:
     name: 'compass-os-onos-nofeature-ha-huawei-pod2-master-trigger'
     triggers:
-        - timed: '0 15 * * *'
+        - timed: ''
 - trigger:
     name: 'compass-os-ocl-nofeature-ha-huawei-pod2-master-trigger'
     triggers:
 - trigger:
     name: 'compass-os-onos-sfc-ha-huawei-pod2-master-trigger'
     triggers:
-        - timed: '0 7 * * *'
+        - timed: ''
 - trigger:
     name: 'compass-os-odl_l2-moon-ha-huawei-pod2-master-trigger'
     triggers:
 - trigger:
     name: 'compass-os-nosdn-nofeature-ha-baremetal-master-trigger'
     triggers:
-        - timed: '0 3 * * *'
+        - timed: '0 2 * * *'
 - trigger:
     name: 'compass-os-odl_l2-nofeature-ha-baremetal-master-trigger'
     triggers:
-        - timed: '0 23 * * *'
+        - timed: '0 22 * * *'
 - trigger:
     name: 'compass-os-odl_l3-nofeature-ha-baremetal-master-trigger'
     triggers:
-        - timed: '0 19 * * *'
+        - timed: '0 18 * * *'
 - trigger:
     name: 'compass-os-onos-nofeature-ha-baremetal-master-trigger'
     triggers:
-        - timed: '0 15 * * *'
+        - timed: '0 14 * * *'
 - trigger:
     name: 'compass-os-ocl-nofeature-ha-baremetal-master-trigger'
     triggers:
-        - timed: '0 11 * * *'
+        - timed: '0 10 * * *'
 - trigger:
     name: 'compass-os-onos-sfc-ha-baremetal-master-trigger'
     triggers:
-        - timed: '0 7 * * *'
+        - timed: '0 6 * * *'
 - trigger:
     name: 'compass-os-odl_l2-moon-ha-baremetal-master-trigger'
     triggers:
         - timed: ''
 
 - trigger:
-    name: 'compass-os-nosdn-nofeature-ha-baremetal-brahmaputra-trigger'
+    name: 'compass-os-nosdn-nofeature-ha-baremetal-colorado-trigger'
     triggers:
         - timed: ''
 - trigger:
-    name: 'compass-os-odl_l2-nofeature-ha-baremetal-brahmaputra-trigger'
+    name: 'compass-os-odl_l2-nofeature-ha-baremetal-colorado-trigger'
     triggers:
         - timed: ''
 - trigger:
-    name: 'compass-os-odl_l3-nofeature-ha-baremetal-brahmaputra-trigger'
+    name: 'compass-os-odl_l3-nofeature-ha-baremetal-colorado-trigger'
     triggers:
         - timed: ''
 - trigger:
-    name: 'compass-os-onos-nofeature-ha-baremetal-brahmaputra-trigger'
+    name: 'compass-os-onos-nofeature-ha-baremetal-colorado-trigger'
     triggers:
         - timed: ''
 - trigger:
-    name: 'compass-os-ocl-nofeature-ha-baremetal-brahmaputra-trigger'
+    name: 'compass-os-ocl-nofeature-ha-baremetal-colorado-trigger'
     triggers:
         - timed: ''
 - trigger:
-    name: 'compass-os-onos-sfc-ha-baremetal-brahmaputra-trigger'
+    name: 'compass-os-onos-sfc-ha-baremetal-colorado-trigger'
     triggers:
         - timed: ''
 - trigger:
-    name: 'compass-os-odl_l2-moon-ha-baremetal-brahmaputra-trigger'
+    name: 'compass-os-odl_l2-moon-ha-baremetal-colorado-trigger'
     triggers:
         - timed: ''
 - trigger:
-    name: 'compass-os-nosdn-kvm-ha-baremetal-brahmaputra-trigger'
+    name: 'compass-os-nosdn-kvm-ha-baremetal-colorado-trigger'
     triggers:
         - timed: ''
 
 - trigger:
     name: 'compass-os-nosdn-nofeature-ha-virtual-master-trigger'
     triggers:
-        - timed: '0 3 * * *'
+        - timed: '0 21 * * *'
 - trigger:
     name: 'compass-os-odl_l2-nofeature-ha-virtual-master-trigger'
     triggers:
-        - timed: '0 23 * * *'
+        - timed: '0 20 * * *'
 - trigger:
     name: 'compass-os-odl_l3-nofeature-ha-virtual-master-trigger'
     triggers:
 - trigger:
     name: 'compass-os-onos-nofeature-ha-virtual-master-trigger'
     triggers:
-        - timed: '0 15 * * *'
+        - timed: '0 18 * * *'
 - trigger:
     name: 'compass-os-ocl-nofeature-ha-virtual-master-trigger'
     triggers:
-        - timed: '0 11 * * *'
+        - timed: '0 16 * * *'
 - trigger:
     name: 'compass-os-onos-sfc-ha-virtual-master-trigger'
     triggers:
-        - timed: '0 7 * * *'
+        - timed: '0 15 * * *'
 - trigger:
     name: 'compass-os-odl_l2-moon-ha-virtual-master-trigger'
     triggers:
-        - timed: ''
+        - timed: '0 14 * * *'
 - trigger:
     name: 'compass-os-nosdn-kvm-ha-virtual-master-trigger'
     triggers:
         - timed: ''
 
 - trigger:
-    name: 'compass-os-nosdn-nofeature-ha-virtual-brahmaputra-trigger'
+    name: 'compass-os-nosdn-nofeature-ha-virtual-colorado-trigger'
     triggers:
         - timed: ''
 - trigger:
-    name: 'compass-os-odl_l2-nofeature-ha-virtual-brahmaputra-trigger'
+    name: 'compass-os-odl_l2-nofeature-ha-virtual-colorado-trigger'
     triggers:
         - timed: ''
 - trigger:
-    name: 'compass-os-odl_l3-nofeature-ha-virtual-brahmaputra-trigger'
+    name: 'compass-os-odl_l3-nofeature-ha-virtual-colorado-trigger'
     triggers:
         - timed: ''
 - trigger:
-    name: 'compass-os-onos-nofeature-ha-virtual-brahmaputra-trigger'
+    name: 'compass-os-onos-nofeature-ha-virtual-colorado-trigger'
     triggers:
         - timed: ''
 - trigger:
-    name: 'compass-os-ocl-nofeature-ha-virtual-brahmaputra-trigger'
+    name: 'compass-os-ocl-nofeature-ha-virtual-colorado-trigger'
     triggers:
         - timed: ''
 - trigger:
-    name: 'compass-os-onos-sfc-ha-virtual-brahmaputra-trigger'
+    name: 'compass-os-onos-sfc-ha-virtual-colorado-trigger'
     triggers:
         - timed: ''
 - trigger:
-    name: 'compass-os-odl_l2-moon-ha-virtual-brahmaputra-trigger'
+    name: 'compass-os-odl_l2-moon-ha-virtual-colorado-trigger'
     triggers:
         - timed: ''
 - trigger:
-    name: 'compass-os-nosdn-kvm-ha-virtual-brahmaputra-trigger'
+    name: 'compass-os-nosdn-kvm-ha-virtual-colorado-trigger'
     triggers:
         - timed: ''
index d713164..65e44b6 100644 (file)
@@ -25,8 +25,18 @@ echo 1 > /proc/sys/vm/drop_caches
 export CONFDIR=$WORKSPACE/deploy/conf
 export ISO_URL=file://$BUILD_DIRECTORY/compass.iso
 
+cd $WORKSPACE
+
+export OS_VERSION=${COMPASS_OS_VERSION}
+export OPENSTACK_VERSION=${COMPASS_OPENSTACK_VERSION}
+if [[ "${COMPASS_OS_VERSION_OPTION}" = "xenial" ]] && [[ "${OPENSTACK_VERSION}" = "mitaka" ]]; then
+    export OPENSTACK_VERSION=${OPENSTACK_VERSION}_${COMPASS_OS_VERSION_OPTION}
+    export OS_VERSION=${COMPASS_OS_VERSION_OPTION}
+fi
+
 if [[ "${DEPLOY_SCENARIO}" =~ "-ocl" ]]; then
     export NETWORK_CONF_FILE=network_ocl.yml
+    export OPENSTACK_VERSION=liberty
 elif [[ "${DEPLOY_SCENARIO}" =~ "-onos" ]]; then
     export NETWORK_CONF_FILE=network_onos.yml
 else
@@ -42,14 +52,6 @@ else
     export DHA_CONF=$CONFDIR/hardware_environment/$NODE_NAME/${DEPLOY_SCENARIO}.yml
 fi
 
-cd $WORKSPACE
-
-export OS_VERSION=${COMPASS_OS_VERSION}
-export OPENSTACK_VERSION=${COMPASS_OPENSTACK_VERSION}
-if [[ "${COMPASS_OS_VERSION_OPTION}" = "xenial" ]] && [[ "${OPENSTACK_VERSION}" = "mitaka" ]]; then
-    export OPENSTACK_VERSION=${OPENSTACK_VERSION}_${COMPASS_OS_VERSION_OPTION}
-    export OS_VERSION=${COMPASS_OS_VERSION_OPTION}
-fi
 ./deploy.sh --dha ${DHA_CONF} --network ${NETWORK_CONF}
 if [ $? -ne 0 ]; then
     echo "depolyment failed!"
index 83cc059..fc5db23 100644 (file)
@@ -6,12 +6,12 @@ set -o pipefail
 # make ppa
 cd $WORKSPACE/
 ./build/make_repo.sh
-# calc MD5 of ppa
+# calc SHA512 of ppa
 cd $PPA_CACHE
 for i in $(find *.gz *.iso *.img -type f)
 do
-    md5=$(md5sum $i | cut -d ' ' -f1)
-    echo $md5 > $i.md5
+    sha512sum=$(sha512sum $i | cut -d ' ' -f1)
+    echo $sha512sum > $i.sha512
     curl -T $i $PPA_REPO
-    curl -T $i.md5 $PPA_REPO
-done
\ No newline at end of file
+    curl -T $i.sha512 $PPA_REPO
+done
index e92c765..bede7de 100644 (file)
@@ -12,7 +12,7 @@
         - master:
             branch: '{stream}'
             gs-pathname: ''
-        - brahmaputra:
+        - colorado:
             branch: 'stable/{stream}'
             gs-pathname: '/{stream}'
 
index 247f4f2..a5f556a 100644 (file)
         - master:
             branch: '{stream}'
             gs-pathname: ''
-        - brahmaputra:
+            disabled: false
+        - colorado:
             branch: 'stable/{stream}'
             gs-pathname: '/{stream}'
+            disabled: false
 
 - job-template:
     name: 'conductor-verify-{stream}'
 
+    disabled: '{obj:disabled}'
+
     parameters:
         - project-parameter:
             project: '{project}'
index 8aad3f2..b504578 100644 (file)
         - master:
             branch: '{stream}'
             gs-pathname: ''
-        - brahmaputra:
+            disabled: false
+        - colorado:
             branch: 'stable/{stream}'
             gs-pathname: '/{stream}'
+            disabled: false
 
 - job-template:
     name: 'copper-verify-{stream}'
 
+    disabled: '{obj:disabled}'
+
     parameters:
         - project-parameter:
             project: '{project}'
index cf9f643..3b407ef 100644 (file)
         - master:
             branch: '{stream}'
             gs-pathname: ''
-        - brahmaputra:
+            disabled: false
+        - colorado:
             branch: 'stable/{stream}'
             gs-pathname: '/{stream}'
+            disabled: false
 
 - job-template:
     name: 'doctor-verify-{stream}'
 
+    disabled: '{obj:disabled}'
+
     parameters:
         - project-parameter:
             project: '{project}'
index 29e171b..7cee984 100644 (file)
         - master:
             branch: '{stream}'
             gs-pathname: ''
+            disabled: false
+        - colorado:
+            branch: 'stable/{stream}'
+            gs-pathname: '/{stream}'
+            disabled: false
 
 - job-template:
     name: 'domino-verify-{stream}'
 
+    disabled: '{obj:disabled}'
+
     parameters:
         - project-parameter:
             project: '{project}'
index 9d788ba..c660af5 100644 (file)
         - master:
             branch: '{stream}'
             gs-pathname: ''
-        - brahmaputra:
+            disabled: false
+        - colorado:
             branch: 'stable/{stream}'
             gs-pathname: '/{stream}'
+            disabled: false
 
 - job-template:
     name: 'dpacc-verify-{stream}'
 
+    disabled: '{obj:disabled}'
+
     parameters:
         - project-parameter:
             project: '{project}'
index 40549e3..504e07f 100644 (file)
             branch: '{stream}'
             gs-pathname: ''
             disabled: false
-        - brahmaputra:
+        - colorado:
             branch: '{stream}'
-            gs-pathname: ''
-            disabled: true
+            gs-pathname: '/{stream}'
+            disabled: false
 
 - job-template:
     name: 'fastpathmetrics-verify-{stream}'
index 662a1f4..7e36a0c 100755 (executable)
@@ -87,7 +87,7 @@ ls -al $BUILD_DIRECTORY
     echo "OPNFV_GIT_URL=$(git config --get remote.origin.url)"
     echo "OPNFV_GIT_SHA1=$(git rev-parse HEAD)"
     echo "OPNFV_ARTIFACT_URL=$GS_URL/opnfv-$OPNFV_ARTIFACT_VERSION.iso"
-    echo "OPNFV_ARTIFACT_MD5SUM=$(md5sum $BUILD_DIRECTORY/opnfv-$OPNFV_ARTIFACT_VERSION.iso | cut -d' ' -f1)"
+    echo "OPNFV_ARTIFACT_SHA512SUM=$(sha512sum $BUILD_DIRECTORY/opnfv-$OPNFV_ARTIFACT_VERSION.iso | cut -d' ' -f1)"
     echo "OPNFV_BUILD_URL=$BUILD_URL"
 ) > $WORKSPACE/opnfv.properties
 
index b9e201d..0d31c99 100644 (file)
     master: &master
         stream: master
         branch: '{stream}'
+        disabled: false
         gs-pathname: ''
+    colorado: &colorado
+        stream: colorado
+        branch: 'stable/{stream}'
+        disabled: false
+        gs-pathname: '/{stream}'
 #--------------------------------
 # POD, INSTALLER, AND BRANCH MAPPING
 #--------------------------------
         - virtual:
             slave-label: fuel-virtual
             <<: *master
+        - baremetal:
+            slave-label: fuel-baremetal
+            <<: *colorado
+        - virtual:
+            slave-label: fuel-virtual
+            <<: *colorado
 #--------------------------------
 #        None-CI PODs
 #--------------------------------
         - zte-pod1:
             slave-label: zte-pod1
             <<: *master
+        - zte-pod2:
+            slave-label: zte-pod2
+            <<: *master
         - zte-pod3:
             slave-label: zte-pod3
             <<: *master
 - job-template:
     name: 'fuel-{scenario}-{pod}-daily-{stream}'
 
+    disabled: '{obj:disabled}'
+
     concurrent: false
 
     properties:
         - build-blocker:
             use-build-blocker: true
             blocking-jobs:
-                - 'fuel-os-.*?-{pod}-daily-{stream}'
+                - 'fuel-os-.*?-{pod}-daily-.*'
             block-level: 'NODE'
 
     wrappers:
               same-node: true
               block: true
         - trigger-builds:
-            - project: 'yardstick-fuel-{pod}-daily-{stream}'
+            - project: 'functest-fuel-{pod}-daily-{stream}'
               current-parameters: false
               predefined-parameters:
                 DEPLOY_SCENARIO={scenario}
                 failure-threshold: 'never'
                 unstable-threshold: 'FAILURE'
         - trigger-builds:
-            - project: 'functest-fuel-{pod}-daily-{stream}'
+            - project: 'yardstick-fuel-{pod}-daily-{stream}'
               current-parameters: false
               predefined-parameters:
                 DEPLOY_SCENARIO={scenario}
 - job-template:
     name: 'fuel-deploy-{pod}-daily-{stream}'
 
+    disabled: '{obj:disabled}'
+
     concurrent: true
 
     properties:
         - build-blocker:
             use-build-blocker: true
             blocking-jobs:
-                - 'fuel-deploy-{pod}-daily-{stream}'
+                - 'fuel-deploy-{pod}-daily-.*'
                 - 'fuel-deploy-generic-daily-.*'
             block-level: 'NODE'
 
     wrappers:
         - build-name:
             name: '$BUILD_NUMBER - Scenario: $DEPLOY_SCENARIO'
+        - timeout:
+            timeout: 150
+            abort: true
 
     builders:
         - description-setter:
 ########################
 # trigger macros
 ########################
-# os-nosdn-nofeature-ha trigger
-# CI PODs
 #-----------------------------------------------
 # Triggers for job running on fuel-baremetal against master branch
 #-----------------------------------------------
-
 # HA Scenarios
 - trigger:
     name: 'fuel-os-nosdn-nofeature-ha-baremetal-daily-master-trigger'
     triggers:
-        - timed: '0 20 * * *'
+        - timed: '5 20 * * *'
 - trigger:
     name: 'fuel-os-odl_l2-nofeature-ha-baremetal-daily-master-trigger'
     triggers:
-        - timed: '0 23 * * *'
+        - timed: '5 23 * * *'
 - trigger:
     name: 'fuel-os-odl_l3-nofeature-ha-baremetal-daily-master-trigger'
     triggers:
-        - timed: '0 2 * * *'
+        - timed: '5 2 * * *'
 - trigger:
     name: 'fuel-os-onos-sfc-ha-baremetal-daily-master-trigger'
     triggers:
-        - timed: '0 5 * * *'
+        - timed: '5 5 * * *'
 - trigger:
     name: 'fuel-os-onos-nofeature-ha-baremetal-daily-master-trigger'
     triggers:
-        - timed: '0 8 * * *'
+        - timed: '5 8 * * *'
 - trigger:
     name: 'fuel-os-odl_l2-sfc-ha-baremetal-daily-master-trigger'
     triggers:
-        - timed: '0 11 * * *'
+        - timed: '5 11 * * *'
 - trigger:
     name: 'fuel-os-odl_l2-bgpvpn-ha-baremetal-daily-master-trigger'
     triggers:
-        - timed: '0 14 * * *'
+        - timed: '5 14 * * *'
 - trigger:
     name: 'fuel-os-nosdn-kvm-ha-baremetal-daily-master-trigger'
     triggers:
-        - timed: '0 17 * * *'
+        - timed: '5 17 * * *'
 - trigger:
     name: 'fuel-os-nosdn-ovs-ha-baremetal-daily-master-trigger'
     triggers:
-        - timed: '0 20 * * *'
+        - timed: '5 20 * * *'
 
 # NOHA Scenarios
 - trigger:
     triggers:
         - timed: ''
 #-----------------------------------------------
+# Triggers for job running on fuel-baremetal against colorado branch
+#-----------------------------------------------
+# HA Scenarios
+- trigger:
+    name: 'fuel-os-nosdn-nofeature-ha-baremetal-daily-colorado-trigger'
+    triggers:
+        - timed: '0 20 * * *'
+- trigger:
+    name: 'fuel-os-odl_l2-nofeature-ha-baremetal-daily-colorado-trigger'
+    triggers:
+        - timed: '0 23 * * *'
+- trigger:
+    name: 'fuel-os-odl_l3-nofeature-ha-baremetal-daily-colorado-trigger'
+    triggers:
+        - timed: '0 2 * * *'
+- trigger:
+    name: 'fuel-os-onos-sfc-ha-baremetal-daily-colorado-trigger'
+    triggers:
+        - timed: '0 5 * * *'
+- trigger:
+    name: 'fuel-os-onos-nofeature-ha-baremetal-daily-colorado-trigger'
+    triggers:
+        - timed: '0 8 * * *'
+- trigger:
+    name: 'fuel-os-odl_l2-sfc-ha-baremetal-daily-colorado-trigger'
+    triggers:
+        - timed: '0 11 * * *'
+- trigger:
+    name: 'fuel-os-odl_l2-bgpvpn-ha-baremetal-daily-colorado-trigger'
+    triggers:
+        - timed: '0 14 * * *'
+- trigger:
+    name: 'fuel-os-nosdn-kvm-ha-baremetal-daily-colorado-trigger'
+    triggers:
+        - timed: '0 17 * * *'
+- trigger:
+    name: 'fuel-os-nosdn-ovs-ha-baremetal-daily-colorado-trigger'
+    triggers:
+        - timed: '0 20 * * *'
+
+# NOHA Scenarios
+- trigger:
+    name: 'fuel-os-nosdn-nofeature-noha-baremetal-daily-colorado-trigger'
+    triggers:
+        - timed: ''
+- trigger:
+    name: 'fuel-os-odl_l2-nofeature-noha-baremetal-daily-colorado-trigger'
+    triggers:
+        - timed: ''
+- trigger:
+    name: 'fuel-os-odl_l3-nofeature-noha-baremetal-daily-colorado-trigger'
+    triggers:
+        - timed: ''
+- trigger:
+    name: 'fuel-os-onos-sfc-noha-baremetal-daily-colorado-trigger'
+    triggers:
+        - timed: ''
+- trigger:
+    name: 'fuel-os-onos-nofeature-noha-baremetal-daily-colorado-trigger'
+    triggers:
+        - timed: ''
+- trigger:
+    name: 'fuel-os-odl_l2-sfc-noha-baremetal-daily-colorado-trigger'
+    triggers:
+        - timed: ''
+- trigger:
+    name: 'fuel-os-odl_l2-bgpvpn-noha-baremetal-daily-colorado-trigger'
+    triggers:
+        - timed: ''
+- trigger:
+    name: 'fuel-os-nosdn-kvm-noha-baremetal-daily-colorado-trigger'
+    triggers:
+        - timed: ''
+- trigger:
+    name: 'fuel-os-nosdn-ovs-noha-baremetal-daily-colorado-trigger'
+    triggers:
+        - timed: ''
+#-----------------------------------------------
 # Triggers for job running on fuel-virtual against master branch
 #-----------------------------------------------
 - trigger:
     name: 'fuel-os-nosdn-ovs-ha-virtual-daily-master-trigger'
     triggers:
         - timed: ''
-
 # NOHA Scenarios
 - trigger:
     name: 'fuel-os-nosdn-nofeature-noha-virtual-daily-master-trigger'
     triggers:
-        - timed: '0 13 * * *'
+        - timed: '5 13 * * *'
 - trigger:
     name: 'fuel-os-odl_l2-nofeature-noha-virtual-daily-master-trigger'
     triggers:
-        - timed: '30 15 * * *'
+        - timed: '35 15 * * *'
 - trigger:
     name: 'fuel-os-odl_l3-nofeature-noha-virtual-daily-master-trigger'
     triggers:
-        - timed: '0 18 * * *'
+        - timed: '5 18 * * *'
 - trigger:
     name: 'fuel-os-onos-sfc-noha-virtual-daily-master-trigger'
     triggers:
-        - timed: '30 20 * * *'
+        - timed: '35 20 * * *'
 - trigger:
     name: 'fuel-os-onos-nofeature-noha-virtual-daily-master-trigger'
     triggers:
-        - timed: '0 23 * * *'
+        - timed: '5 23 * * *'
 - trigger:
     name: 'fuel-os-odl_l2-sfc-noha-virtual-daily-master-trigger'
     triggers:
-        - timed: '30 1 * * *'
+        - timed: '35 1 * * *'
 - trigger:
     name: 'fuel-os-odl_l2-bgpvpn-noha-virtual-daily-master-trigger'
     triggers:
-        - timed: '0 4 * * *'
+        - timed: '5 4 * * *'
 - trigger:
     name: 'fuel-os-nosdn-kvm-noha-virtual-daily-master-trigger'
     triggers:
-        - timed: '30 6 * * *'
+        - timed: '35 6 * * *'
 - trigger:
     name: 'fuel-os-nosdn-ovs-noha-virtual-daily-master-trigger'
+    triggers:
+        - timed: '5 9 * * *'
+#-----------------------------------------------
+# Triggers for job running on fuel-virtual against colorado branch
+#-----------------------------------------------
+- trigger:
+    name: 'fuel-os-nosdn-nofeature-ha-virtual-daily-colorado-trigger'
+    triggers:
+        - timed: ''
+- trigger:
+    name: 'fuel-os-odl_l2-nofeature-ha-virtual-daily-colorado-trigger'
+    triggers:
+        - timed: ''
+- trigger:
+    name: 'fuel-os-odl_l3-nofeature-ha-virtual-daily-colorado-trigger'
+    triggers:
+        - timed: ''
+- trigger:
+    name: 'fuel-os-onos-sfc-ha-virtual-daily-colorado-trigger'
+    triggers:
+        - timed: ''
+- trigger:
+    name: 'fuel-os-onos-nofeature-ha-virtual-daily-colorado-trigger'
+    triggers:
+        - timed: ''
+- trigger:
+    name: 'fuel-os-odl_l2-bgpvpn-ha-virtual-daily-colorado-trigger'
+    triggers:
+        - timed: ''
+- trigger:
+    name: 'fuel-os-odl_l2-sfc-ha-virtual-daily-colorado-trigger'
+    triggers:
+        - timed: ''
+- trigger:
+    name: 'fuel-os-nosdn-kvm-ha-virtual-daily-colorado-trigger'
+    triggers:
+        - timed: ''
+- trigger:
+    name: 'fuel-os-nosdn-ovs-ha-virtual-daily-colorado-trigger'
+    triggers:
+        - timed: ''
+# NOHA Scenarios
+- trigger:
+    name: 'fuel-os-nosdn-nofeature-noha-virtual-daily-colorado-trigger'
+    triggers:
+        - timed: '0 13 * * *'
+- trigger:
+    name: 'fuel-os-odl_l2-nofeature-noha-virtual-daily-colorado-trigger'
+    triggers:
+        - timed: '30 15 * * *'
+- trigger:
+    name: 'fuel-os-odl_l3-nofeature-noha-virtual-daily-colorado-trigger'
+    triggers:
+        - timed: '0 18 * * *'
+- trigger:
+    name: 'fuel-os-onos-sfc-noha-virtual-daily-colorado-trigger'
+    triggers:
+        - timed: '30 20 * * *'
+- trigger:
+    name: 'fuel-os-onos-nofeature-noha-virtual-daily-colorado-trigger'
+    triggers:
+        - timed: '0 23 * * *'
+- trigger:
+    name: 'fuel-os-odl_l2-sfc-noha-virtual-daily-colorado-trigger'
+    triggers:
+        - timed: '30 1 * * *'
+- trigger:
+    name: 'fuel-os-odl_l2-bgpvpn-noha-virtual-daily-colorado-trigger'
+    triggers:
+        - timed: '0 4 * * *'
+- trigger:
+    name: 'fuel-os-nosdn-kvm-noha-virtual-daily-colorado-trigger'
+    triggers:
+        - timed: '30 6 * * *'
+- trigger:
+    name: 'fuel-os-nosdn-ovs-noha-virtual-daily-colorado-trigger'
     triggers:
         - timed: '0 9 * * *'
-
 #-----------------------------------------------
 # ZTE POD1 Triggers running against master branch
 #-----------------------------------------------
         - timed: ''
 
 #-----------------------------------------------
+# ZTE POD2 Triggers running against master branch
+#-----------------------------------------------
+- trigger:
+    name: 'fuel-os-nosdn-nofeature-ha-zte-pod2-daily-master-trigger'
+    triggers:
+        - timed: ''
+- trigger:
+    name: 'fuel-os-odl_l2-nofeature-ha-zte-pod2-daily-master-trigger'
+    triggers:
+        - timed: '0 18 * * *'
+- trigger:
+    name: 'fuel-os-odl_l3-nofeature-ha-zte-pod2-daily-master-trigger'
+    triggers:
+        - timed: ''
+- trigger:
+    name: 'fuel-os-onos-sfc-ha-zte-pod2-daily-master-trigger'
+    triggers:
+        - timed: ''
+- trigger:
+    name: 'fuel-os-onos-nofeature-ha-zte-pod2-daily-master-trigger'
+    triggers:
+        - timed: ''
+- trigger:
+    name: 'fuel-os-odl_l2-bgpvpn-ha-zte-pod2-daily-master-trigger'
+    triggers:
+        - timed: ''
+- trigger:
+    name: 'fuel-os-odl_l2-sfc-ha-zte-pod2-daily-master-trigger'
+    triggers:
+        - timed: ''
+- trigger:
+    name: 'fuel-os-nosdn-kvm-ha-zte-pod2-daily-master-trigger'
+    triggers:
+        - timed: ''
+- trigger:
+    name: 'fuel-os-nosdn-ovs-ha-zte-pod2-daily-master-trigger'
+    triggers:
+        - timed: ''
+# NOHA Scenarios
+- trigger:
+    name: 'fuel-os-nosdn-nofeature-noha-zte-pod2-daily-master-trigger'
+    triggers:
+        - timed: ''
+- trigger:
+    name: 'fuel-os-odl_l2-nofeature-noha-zte-pod2-daily-master-trigger'
+    triggers:
+        - timed: ''
+- trigger:
+    name: 'fuel-os-odl_l3-nofeature-noha-zte-pod2-daily-master-trigger'
+    triggers:
+        - timed: ''
+- trigger:
+    name: 'fuel-os-onos-sfc-noha-zte-pod2-daily-master-trigger'
+    triggers:
+        - timed: ''
+- trigger:
+    name: 'fuel-os-onos-nofeature-noha-zte-pod2-daily-master-trigger'
+    triggers:
+        - timed: ''
+- trigger:
+    name: 'fuel-os-odl_l2-sfc-noha-zte-pod2-daily-master-trigger'
+    triggers:
+        - timed: ''
+- trigger:
+    name: 'fuel-os-odl_l2-bgpvpn-noha-zte-pod2-daily-master-trigger'
+    triggers:
+        - timed: ''
+- trigger:
+    name: 'fuel-os-nosdn-kvm-noha-zte-pod2-daily-master-trigger'
+    triggers:
+        - timed: ''
+- trigger:
+    name: 'fuel-os-nosdn-ovs-noha-zte-pod2-daily-master-trigger'
+    triggers:
+        - timed: ''
+#-----------------------------------------------
 # ZTE POD3 Triggers running against master branch
 #-----------------------------------------------
 - trigger:
 - trigger:
     name: 'fuel-os-nosdn-ovs-noha-zte-pod3-daily-master-trigger'
     triggers:
-        - timed: ''
\ No newline at end of file
+        - timed: ''
+#-----------------------------------------------
+# ZTE POD1 Triggers running against colorado branch
+#-----------------------------------------------
+- trigger:
+    name: 'fuel-os-nosdn-nofeature-ha-zte-pod1-daily-colorado-trigger'
+    triggers:
+        - timed: ''
+- trigger:
+    name: 'fuel-os-odl_l2-nofeature-ha-zte-pod1-daily-colorado-trigger'
+    triggers:
+        - timed: ''
+- trigger:
+    name: 'fuel-os-odl_l3-nofeature-ha-zte-pod1-daily-colorado-trigger'
+    triggers:
+        - timed: ''
+- trigger:
+    name: 'fuel-os-onos-sfc-ha-zte-pod1-daily-colorado-trigger'
+    triggers:
+        - timed: ''
+- trigger:
+    name: 'fuel-os-onos-nofeature-ha-zte-pod1-daily-colorado-trigger'
+    triggers:
+        - timed: ''
+- trigger:
+    name: 'fuel-os-odl_l2-bgpvpn-ha-zte-pod1-daily-colorado-trigger'
+    triggers:
+        - timed: ''
+- trigger:
+    name: 'fuel-os-odl_l2-sfc-ha-zte-pod1-daily-colorado-trigger'
+    triggers:
+        - timed: ''
+- trigger:
+    name: 'fuel-os-nosdn-kvm-ha-zte-pod1-daily-colorado-trigger'
+    triggers:
+        - timed: ''
+- trigger:
+    name: 'fuel-os-nosdn-ovs-ha-zte-pod1-daily-colorado-trigger'
+    triggers:
+        - timed: ''
+# NOHA Scenarios
+- trigger:
+    name: 'fuel-os-nosdn-nofeature-noha-zte-pod1-daily-colorado-trigger'
+    triggers:
+        - timed: ''
+- trigger:
+    name: 'fuel-os-odl_l2-nofeature-noha-zte-pod1-daily-colorado-trigger'
+    triggers:
+        - timed: ''
+- trigger:
+    name: 'fuel-os-odl_l3-nofeature-noha-zte-pod1-daily-colorado-trigger'
+    triggers:
+        - timed: ''
+- trigger:
+    name: 'fuel-os-onos-sfc-noha-zte-pod1-daily-colorado-trigger'
+    triggers:
+        - timed: ''
+- trigger:
+    name: 'fuel-os-onos-nofeature-noha-zte-pod1-daily-colorado-trigger'
+    triggers:
+        - timed: ''
+- trigger:
+    name: 'fuel-os-odl_l2-sfc-noha-zte-pod1-daily-colorado-trigger'
+    triggers:
+        - timed: ''
+- trigger:
+    name: 'fuel-os-odl_l2-bgpvpn-noha-zte-pod1-daily-colorado-trigger'
+    triggers:
+        - timed: ''
+- trigger:
+    name: 'fuel-os-nosdn-kvm-noha-zte-pod1-daily-colorado-trigger'
+    triggers:
+        - timed: ''
+- trigger:
+    name: 'fuel-os-nosdn-ovs-noha-zte-pod1-daily-colorado-trigger'
+    triggers:
+        - timed: ''
+
+#-----------------------------------------------
+# ZTE POD2 Triggers running against colorado branch
+#-----------------------------------------------
+- trigger:
+    name: 'fuel-os-nosdn-nofeature-ha-zte-pod2-daily-colorado-trigger'
+    triggers:
+        - timed: ''
+- trigger:
+    name: 'fuel-os-odl_l2-nofeature-ha-zte-pod2-daily-colorado-trigger'
+    triggers:
+        - timed: ''
+- trigger:
+    name: 'fuel-os-odl_l3-nofeature-ha-zte-pod2-daily-colorado-trigger'
+    triggers:
+        - timed: ''
+- trigger:
+    name: 'fuel-os-onos-sfc-ha-zte-pod2-daily-colorado-trigger'
+    triggers:
+        - timed: ''
+- trigger:
+    name: 'fuel-os-onos-nofeature-ha-zte-pod2-daily-colorado-trigger'
+    triggers:
+        - timed: ''
+- trigger:
+    name: 'fuel-os-odl_l2-bgpvpn-ha-zte-pod2-daily-colorado-trigger'
+    triggers:
+        - timed: ''
+- trigger:
+    name: 'fuel-os-odl_l2-sfc-ha-zte-pod2-daily-colorado-trigger'
+    triggers:
+        - timed: ''
+- trigger:
+    name: 'fuel-os-nosdn-kvm-ha-zte-pod2-daily-colorado-trigger'
+    triggers:
+        - timed: ''
+- trigger:
+    name: 'fuel-os-nosdn-ovs-ha-zte-pod2-daily-colorado-trigger'
+    triggers:
+        - timed: ''
+# NOHA Scenarios
+- trigger:
+    name: 'fuel-os-nosdn-nofeature-noha-zte-pod2-daily-colorado-trigger'
+    triggers:
+        - timed: ''
+- trigger:
+    name: 'fuel-os-odl_l2-nofeature-noha-zte-pod2-daily-colorado-trigger'
+    triggers:
+        - timed: ''
+- trigger:
+    name: 'fuel-os-odl_l3-nofeature-noha-zte-pod2-daily-colorado-trigger'
+    triggers:
+        - timed: ''
+- trigger:
+    name: 'fuel-os-onos-sfc-noha-zte-pod2-daily-colorado-trigger'
+    triggers:
+        - timed: ''
+- trigger:
+    name: 'fuel-os-onos-nofeature-noha-zte-pod2-daily-colorado-trigger'
+    triggers:
+        - timed: ''
+- trigger:
+    name: 'fuel-os-odl_l2-sfc-noha-zte-pod2-daily-colorado-trigger'
+    triggers:
+        - timed: ''
+- trigger:
+    name: 'fuel-os-odl_l2-bgpvpn-noha-zte-pod2-daily-colorado-trigger'
+    triggers:
+        - timed: ''
+- trigger:
+    name: 'fuel-os-nosdn-kvm-noha-zte-pod2-daily-colorado-trigger'
+    triggers:
+        - timed: ''
+- trigger:
+    name: 'fuel-os-nosdn-ovs-noha-zte-pod2-daily-colorado-trigger'
+    triggers:
+        - timed: ''
+#-----------------------------------------------
+# ZTE POD3 Triggers running against colorado branch
+#-----------------------------------------------
+- trigger:
+    name: 'fuel-os-nosdn-nofeature-ha-zte-pod3-daily-colorado-trigger'
+    triggers:
+        - timed: ''
+- trigger:
+    name: 'fuel-os-odl_l2-nofeature-ha-zte-pod3-daily-colorado-trigger'
+    triggers:
+        - timed: ''
+- trigger:
+    name: 'fuel-os-odl_l3-nofeature-ha-zte-pod3-daily-colorado-trigger'
+    triggers:
+        - timed: ''
+- trigger:
+    name: 'fuel-os-onos-sfc-ha-zte-pod3-daily-colorado-trigger'
+    triggers:
+        - timed: ''
+- trigger:
+    name: 'fuel-os-onos-nofeature-ha-zte-pod3-daily-colorado-trigger'
+    triggers:
+        - timed: ''
+- trigger:
+    name: 'fuel-os-odl_l2-bgpvpn-ha-zte-pod3-daily-colorado-trigger'
+    triggers:
+        - timed: ''
+- trigger:
+    name: 'fuel-os-odl_l2-sfc-ha-zte-pod3-daily-colorado-trigger'
+    triggers:
+        - timed: ''
+- trigger:
+    name: 'fuel-os-nosdn-kvm-ha-zte-pod3-daily-colorado-trigger'
+    triggers:
+        - timed: ''
+- trigger:
+    name: 'fuel-os-nosdn-ovs-ha-zte-pod3-daily-colorado-trigger'
+    triggers:
+        - timed: ''
+# NOHA Scenarios
+- trigger:
+    name: 'fuel-os-nosdn-nofeature-noha-zte-pod3-daily-colorado-trigger'
+    triggers:
+        - timed: ''
+- trigger:
+    name: 'fuel-os-odl_l2-nofeature-noha-zte-pod3-daily-colorado-trigger'
+    triggers:
+        - timed: ''
+- trigger:
+    name: 'fuel-os-odl_l3-nofeature-noha-zte-pod3-daily-colorado-trigger'
+    triggers:
+        - timed: ''
+- trigger:
+    name: 'fuel-os-onos-sfc-noha-zte-pod3-daily-colorado-trigger'
+    triggers:
+        - timed: ''
+- trigger:
+    name: 'fuel-os-onos-nofeature-noha-zte-pod3-daily-colorado-trigger'
+    triggers:
+        - timed: ''
+- trigger:
+    name: 'fuel-os-odl_l2-sfc-noha-zte-pod3-daily-colorado-trigger'
+    triggers:
+        - timed: ''
+- trigger:
+    name: 'fuel-os-odl_l2-bgpvpn-noha-zte-pod3-daily-colorado-trigger'
+    triggers:
+        - timed: ''
+- trigger:
+    name: 'fuel-os-nosdn-kvm-noha-zte-pod3-daily-colorado-trigger'
+    triggers:
+        - timed: ''
+- trigger:
+    name: 'fuel-os-nosdn-ovs-noha-zte-pod3-daily-colorado-trigger'
+    triggers:
+        - timed: ''
index d78ddc5..5685444 100755 (executable)
@@ -10,6 +10,9 @@
 set -o errexit
 set -o pipefail
 
+# use proxy url to replace the nomral URL, for googleusercontent.com will be blocked randomly
+[[ "$NODE_NAME" =~ (zte) ]] && GS_URL=$GS_BASE_PROXY
+
 if [[ "$JOB_NAME" =~ "merge" ]]; then
     echo "Downloading http://$GS_URL/opnfv-gerrit-$GERRIT_CHANGE_NUMBER.properties"
     # get the properties file for the Fuel ISO built for a merged change
@@ -47,6 +50,8 @@ if [[ ! "$JOB_NAME" =~ (verify|merge) ]]; then
     fi
 fi
 
+[[ "$NODE_NAME" =~ (zte) ]] && OPNFV_ARTIFACT_URL=${GS_BASE_PROXY%%/*}/$OPNFV_ARTIFACT_URL
+
 # log info to console
 echo "Downloading the $INSTALLER_TYPE artifact using URL http://$OPNFV_ARTIFACT_URL"
 echo "This could take some time..."
index c160fb8..cf89383 100644 (file)
@@ -13,7 +13,7 @@
             branch: '{stream}'
             gs-pathname: ''
             disabled: false
-        - brahmaputra:
+        - colorado:
             branch: 'stable/{stream}'
             gs-pathname: '/{stream}'
             disabled: false
@@ -30,6 +30,8 @@
 - job-template:
     name: 'fuel-build-daily-{stream}'
 
+    disabled: '{obj:disabled}'
+
     concurrent: false
 
     properties:
 
     concurrent: true
 
-    disabled: false
+    disabled: '{obj:disabled}'
 
     properties:
         - throttle:
index 2b62394..f4bdbdd 100644 (file)
         - colorado:
             branch: 'stable/{stream}'
             gs-pathname: '/{stream}'
-            disabled: true
+            disabled: false
 #####################################
 # patch verification phases
 #####################################
     phase:
         - 'basic':
-            slave-label: 'opnfv-build-ubuntu'
+            slave-label: 'opnfv-build'
         - 'build':
             slave-label: 'opnfv-build-ubuntu'
         - 'deploy-virtual':
-            slave-label: 'fuel-virtual'
+            slave-label: 'opnfv-build'
         - 'smoke-test':
-            slave-label: 'fuel-virtual'
+            slave-label: 'opnfv-build'
 #####################################
 # jobs
 #####################################
         - throttle:
             enabled: true
             max-total: 4
-            max-per-node: 1
             option: 'project'
-        - build-blocker:
-            use-build-blocker: true
-            blocking-jobs:
-                - 'fuel-verify-master'
-                - 'fuel-verify-colorado'
-            block-level: 'NODE'
 
     scm:
         - gerrit-trigger-scm:
             project: '{project}'
         - gerrit-parameter:
             branch: '{branch}'
-        - 'fuel-virtual-defaults'
+        - 'opnfv-build-defaults'
         - 'fuel-verify-defaults':
             gs-pathname: '{gs-pathname}'
 
                     GERRIT_REFSPEC=$GERRIT_REFSPEC
                     GERRIT_CHANGE_NUMBER=$GERRIT_CHANGE_NUMBER
                     GERRIT_CHANGE_COMMIT_MESSAGE=$GERRIT_CHANGE_COMMIT_MESSAGE
-                  node-parameters: true
+                  node-parameters: false
                   kill-phase-on: FAILURE
                   abort-all-job: true
         - multijob:
                     GERRIT_REFSPEC=$GERRIT_REFSPEC
                     GERRIT_CHANGE_NUMBER=$GERRIT_CHANGE_NUMBER
                     GERRIT_CHANGE_COMMIT_MESSAGE=$GERRIT_CHANGE_COMMIT_MESSAGE
-                  node-parameters: true
+                  node-parameters: false
                   kill-phase-on: FAILURE
                   abort-all-job: true
 
index 2917e5b..8e3de3d 100644 (file)
@@ -14,8 +14,8 @@
         branch: '{stream}'
         gs-pathname: ''
         docker-tag: 'latest'
-    brahmaputra: &brahmaputra
-        stream: brahmaputra
+    colorado: &colorado
+        stream: colorado
         branch: 'stable/{stream}'
         gs-pathname: '/{stream}'
         docker-tag: 'stable'
         - baremetal:
             slave-label: fuel-baremetal
             installer: fuel
-            <<: *brahmaputra
+            <<: *colorado
         - virtual:
             slave-label: fuel-virtual
             installer: fuel
-            <<: *brahmaputra
+            <<: *colorado
 # joid CI PODs
         - baremetal:
             slave-label: joid-baremetal
         - baremetal:
             slave-label: joid-baremetal
             installer: joid
-            <<: *brahmaputra
+            <<: *colorado
         - virtual:
             slave-label: joid-virtual
             installer: joid
-            <<: *brahmaputra
+            <<: *colorado
 # compass CI PODs
         - baremetal:
             slave-label: compass-baremetal
         - baremetal:
             slave-label: compass-baremetal
             installer: compass
-            <<: *brahmaputra
+            <<: *colorado
         - virtual:
             slave-label: compass-virtual
             installer: compass
-            <<: *brahmaputra
+            <<: *colorado
 # apex CI PODs
         - apex-verify-master:
             slave-label: '{pod}'
             slave-label: '{pod}'
             installer: apex
             <<: *master
+        - apex-verify-colorado:
+            slave-label: '{pod}'
+            installer: apex
+            <<: *colorado
+        - apex-daily-colorado:
+            slave-label: '{pod}'
+            installer: apex
+            <<: *colorado
 #--------------------------------
 #        None-CI PODs
 #--------------------------------
-        - orange-pod2:
+        - orange-pod1:
             slave-label: '{pod}'
             installer: joid
-            <<: *brahmaputra
+            <<: *master
         - orange-pod5:
             slave-label: '{pod}'
             installer: fuel
             slave-label: '{pod}'
             installer: fuel
             <<: *master
+        - zte-pod2:
+            slave-label: '{pod}'
+            installer: fuel
+            <<: *master
+        - zte-pod3:
+            slave-label: '{pod}'
+            installer: fuel
+            <<: *master
         - arm-pod1:
             slave-label: '{pod}'
             installer: fuel
-            <<: *brahmaputra
+            <<: *colorado
 #--------------------------------
 
     testsuite:
     parameters:
         - project-parameter:
             project: '{project}'
-        - '{slave-label}-defaults'
         - '{installer}-defaults'
+        - '{slave-label}-defaults'
         - 'functest-{testsuite}-parameter'
         - string:
             name: DEPLOY_SCENARIO
                 - 'rally_full'
                 - 'vims'
                 - 'multisite'
+                - 'parser'
 - parameter:
     name: functest-parameter
     parameters:
index 7f86281..a984545 100644 (file)
         - master:
             branch: '{stream}'
             gs-pathname: ''
-        - brahmaputra:
+            disabled: false
+        - colorado:
             branch: 'stable/{stream}'
             gs-pathname: '/{stream}'
+            disabled: false
 
 - job-template:
     name: 'functest-verify-{stream}'
 
+    disabled: '{obj:disabled}'
+
     parameters:
         - project-parameter:
             project: '{project}'
index 5b96912..d9dafdf 100644 (file)
         - master:
             branch: '{stream}'
             gs-pathname: ''
-        - brahmaputra:
+            disabled: false
+        - colorado:
             branch: 'stable/{stream}'
             gs-pathname: '/{stream}'
+            disabled: false
 
 - job-template:
     name: 'ipv6-verify-{stream}'
 
+    disabled: '{obj:disabled}'
+
     parameters:
         - project-parameter:
             project: '{project}'
index ae03eab..6d03709 100644 (file)
     master: &master
         stream: master
         branch: '{stream}'
+        disabled: false
         gs-pathname: ''
-    brahmaputra: &brahmaputra
-        stream: brahmaputra
+    colorado: &colorado
+        stream: colorado
         branch: 'stable/{stream}'
+        disabled: false
         gs-pathname: '/{stream}'
 #--------------------------------
 # POD, INSTALLER, AND BRANCH MAPPING
             <<: *master
         - baremetal:
             slave-label: joid-baremetal
-            <<: *brahmaputra
+            <<: *colorado
         - virtual:
             slave-label: joid-virtual
-            <<: *brahmaputra
+            <<: *colorado
 #--------------------------------
 #        None-CI PODs
 #--------------------------------
-        - orange-pod2:
-            slave-label: orange-pod2
-            <<: *brahmaputra
-        - orange-pod2:
-            slave-label: orange-pod2
-            <<: *master
-        - juniper-pod1:
-            slave-label: juniper-pod1
+        - orange-pod1:
+            slave-label: orange-pod1
             <<: *master
 #--------------------------------
-# new scenario descriptions
+# scenarios
+#--------------------------------
     scenario:
         - 'os-nosdn-nofeature-noha':
-            auto-trigger-name: 'daily-trigger-disabled'
+            auto-trigger-name: 'joid-{scenario}-{pod}-{stream}-trigger'
         - 'os-nosdn-nofeature-ha':
             auto-trigger-name: 'joid-{scenario}-{pod}-{stream}-trigger'
         - 'os-nosdn-lxd-ha':
-            auto-trigger-name: 'daily-trigger-disabled'
+            auto-trigger-name: 'joid-{scenario}-{pod}-{stream}-trigger'
         - 'os-nosdn-lxd-noha':
             auto-trigger-name: 'joid-{scenario}-{pod}-{stream}-trigger'
         - 'os-odl_l2-nofeature-ha':
@@ -86,6 +83,8 @@
 - job-template:
     name: 'joid-{scenario}-{pod}-daily-{stream}'
 
+    disabled: '{obj:disabled}'
+
     concurrent: false
 
     properties:
@@ -97,7 +96,7 @@
         - build-blocker:
             use-build-blocker: true
             blocking-jobs:
-                - 'joid-os-.*?-{pod}-daily-{stream}'
+                - 'joid-os-.*?-{pod}-daily-.*'
             block-level: 'NODE'
 
     wrappers:
 - job-template:
     name: 'joid-deploy-{pod}-daily-{stream}'
 
+    disabled: '{obj:disabled}'
+
     concurrent: true
 
     properties:
         - build-blocker:
             use-build-blocker: true
             blocking-jobs:
-                - 'joid-deploy-{pod}-daily-{stream}'
+                - 'joid-deploy-{pod}-daily-.*'
             block-level: 'NODE'
 
     wrappers:
 - trigger:
     name: 'joid-os-nosdn-nofeature-ha-baremetal-master-trigger'
     triggers:
-        - timed: '0 2 * * *'
+        - timed: '5 2 * * *'
 - trigger:
     name: 'joid-os-nosdn-nofeature-ha-virtual-master-trigger'
     triggers:
         - timed: ''
 - trigger:
-    name: 'joid-os-nosdn-nofeature-ha-orange-pod2-master-trigger'
+    name: 'joid-os-nosdn-nofeature-ha-orange-pod1-master-trigger'
     triggers:
         - timed: ''
+# os-nosdn-nofeature-ha trigger - branch: colorado
 - trigger:
-    name: 'joid-os-nosdn-nofeature-ha-juniper-pod1-master-trigger'
+    name: 'joid-os-nosdn-nofeature-ha-baremetal-colorado-trigger'
     triggers:
-        - timed: ''
-
-# os-nosdn-nofeature-ha trigger - branch: stable/brahmaputra
-- trigger:
-    name: 'joid-os-nosdn-nofeature-ha-baremetal-brahmaputra-trigger'
-    triggers:
-        - timed: ''
-- trigger:
-    name: 'joid-os-nosdn-nofeature-ha-virtual-brahmaputra-trigger'
-    triggers:
-        - timed: ''
+        - timed: '0 2 * * *'
 - trigger:
-    name: 'joid-os-nosdn-nofeature-ha-orange-pod2-brahmaputra-trigger'
+    name: 'joid-os-nosdn-nofeature-ha-virtual-colorado-trigger'
     triggers:
         - timed: ''
 - trigger:
-    name: 'joid-os-nosdn-nofeature-ha-juniper-pod1-brahmaputra-trigger'
+    name: 'joid-os-nosdn-nofeature-ha-orange-pod1-colorado-trigger'
     triggers:
         - timed: ''
-
 # os-odl_l2-nofeature-ha trigger - branch: master
 - trigger:
     name: 'joid-os-odl_l2-nofeature-ha-baremetal-master-trigger'
     triggers:
-        - timed: '0 10 * * *'
+        - timed: '5 7 * * *'
 - trigger:
     name: 'joid-os-odl_l2-nofeature-ha-virtual-master-trigger'
     triggers:
         - timed: ''
 - trigger:
-    name: 'joid-os-odl_l2-nofeature-ha-orange-pod2-master-trigger'
+    name: 'joid-os-odl_l2-nofeature-ha-orange-pod1-master-trigger'
     triggers:
         - timed: ''
+# os-odl_l2-nofeature-ha trigger - branch: colorado
 - trigger:
-    name: 'joid-os-odl_l2-nofeature-ha-juniper-pod1-master-trigger'
+    name: 'joid-os-odl_l2-nofeature-ha-baremetal-colorado-trigger'
     triggers:
-        - timed: ''
-
-# os-odl_l2-nofeature-ha trigger - branch: stable/brahmaputra
+        - timed: '0 7 * * *'
 - trigger:
-    name: 'joid-os-odl_l2-nofeature-ha-baremetal-brahmaputra-trigger'
+    name: 'joid-os-odl_l2-nofeature-ha-virtual-colorado-trigger'
     triggers:
         - timed: ''
 - trigger:
-    name: 'joid-os-odl_l2-nofeature-ha-virtual-brahmaputra-trigger'
+    name: 'joid-os-odl_l2-nofeature-ha-orange-pod1-colorado-trigger'
     triggers:
         - timed: ''
-- trigger:
-    name: 'joid-os-odl_l2-nofeature-ha-orange-pod2-brahmaputra-trigger'
-    triggers:
-        - timed: ''
-- trigger:
-    name: 'joid-os-odl_l2-nofeature-ha-juniper-pod1-brahmaputra-trigger'
-    triggers:
-        - timed: ''
-
 # os-onos-nofeature-ha trigger - branch: master
 - trigger:
     name: 'joid-os-onos-nofeature-ha-baremetal-master-trigger'
     triggers:
-        - timed: '0 18 * * *'
+        - timed: '5 12 * * *'
 - trigger:
     name: 'joid-os-onos-nofeature-ha-virtual-master-trigger'
     triggers:
         - timed: ''
 - trigger:
-    name: 'joid-os-onos-nofeature-ha-orange-pod2-master-trigger'
-    triggers:
-        - timed: ''
-- trigger:
-    name: 'joid-os-onos-nofeature-ha-juniper-pod1-master-trigger'
+    name: 'joid-os-onos-nofeature-ha-orange-pod1-master-trigger'
     triggers:
         - timed: ''
-
-# os-onos-sfc-ha trigger - branch: master
-- trigger:
-    name: 'joid-os-onos-sfc-ha-baremetal-master-trigger'
-    triggers:
-        - timed: '0 22 * * *'
+# os-onos-nofeature-ha trigger - branch: colorado
 - trigger:
-    name: 'joid-os-onos-sfc-ha-virtual-master-trigger'
+    name: 'joid-os-onos-nofeature-ha-baremetal-colorado-trigger'
     triggers:
-        - timed: ''
+        - timed: '0 12 * * *'
 - trigger:
-    name: 'joid-os-onos-sfc-ha-orange-pod2-master-trigger'
+    name: 'joid-os-onos-nofeature-ha-virtual-colorado-trigger'
     triggers:
         - timed: ''
 - trigger:
-    name: 'joid-os-onos-sfc-ha-juniper-pod1-master-trigger'
+    name: 'joid-os-onos-nofeature-ha-orange-pod1-colorado-trigger'
     triggers:
         - timed: ''
-
-# os-onos-nofeature-ha trigger - branch: stable/brahmaputra
+# os-onos-sfc-ha trigger - branch: master
 - trigger:
-    name: 'joid-os-onos-nofeature-ha-baremetal-brahmaputra-trigger'
+    name: 'joid-os-onos-sfc-ha-baremetal-master-trigger'
     triggers:
-        - timed: ''
+        - timed: '5 17 * * *'
 - trigger:
-    name: 'joid-os-onos-nofeature-ha-virtual-brahmaputra-trigger'
+    name: 'joid-os-onos-sfc-ha-virtual-master-trigger'
     triggers:
         - timed: ''
 - trigger:
-    name: 'joid-os-onos-nofeature-ha-orange-pod2-brahmaputra-trigger'
+    name: 'joid-os-onos-sfc-ha-orange-pod1-master-trigger'
     triggers:
         - timed: ''
+# os-onos-sfc-ha trigger - branch: colorado
 - trigger:
-    name: 'joid-os-onos-nofeature-ha-juniper-pod1-brahmaputra-trigger'
+    name: 'joid-os-onos-sfc-ha-baremetal-colorado-trigger'
     triggers:
-        - timed: ''
+        - timed: '0 17 * * *'
 - trigger:
-    name: 'joid-os-onos-sfc-ha-baremetal-brahmaputra-trigger'
+    name: 'joid-os-onos-sfc-ha-virtual-colorado-trigger'
     triggers:
         - timed: ''
 - trigger:
-    name: 'joid-os-onos-sfc-ha-virtual-brahmaputra-trigger'
+    name: 'joid-os-onos-sfc-ha-orange-pod1-colorado-trigger'
     triggers:
         - timed: ''
+# os-nosdn-lxd-noha trigger - branch: master
 - trigger:
-    name: 'joid-os-onos-sfc-ha-orange-pod2-brahmaputra-trigger'
+    name: 'joid-os-nosdn-lxd-noha-baremetal-master-trigger'
     triggers:
-        - timed: ''
+        - timed: '5 22 * * *'
 - trigger:
-    name: 'joid-os-onos-sfc-ha-juniper-pod1-brahmaputra-trigger'
+    name: 'joid-os-nosdn-lxd-noha-virtual-master-trigger'
     triggers:
         - timed: ''
-
-# os-nosdn-lxd-noha trigger - branch: master
 - trigger:
-    name: 'joid-os-nosdn-lxd-noha-baremetal-master-trigger'
+    name: 'joid-os-nosdn-lxd-noha-orange-pod1-master-trigger'
     triggers:
         - timed: ''
+# os-nosdn-lxd-noha trigger - branch: colorado
 - trigger:
-    name: 'joid-os-nosdn-lxd-noha-virtual-master-trigger'
+    name: 'joid-os-nosdn-lxd-noha-baremetal-colorado-trigger'
     triggers:
-        - timed: ''
+        - timed: '0 22 * * *'
 - trigger:
-    name: 'joid-os-nosdn-lxd-noha-orange-pod2-master-trigger'
+    name: 'joid-os-nosdn-lxd-noha-virtual-colorado-trigger'
     triggers:
         - timed: ''
 - trigger:
-    name: 'joid-os-nosdn-lxd-noha-juniper-pod1-master-trigger'
+    name: 'joid-os-nosdn-lxd-noha-orange-pod1-colorado-trigger'
     triggers:
         - timed: ''
-
-# os-nosdn-lxd-noha trigger - branch: stable/brahmaputra
+# os-nosdn-lxd-ha trigger - branch: master
 - trigger:
-    name: 'joid-os-nosdn-lxd-noha-baremetal-brahmaputra-trigger'
+    name: 'joid-os-nosdn-lxd-ha-baremetal-master-trigger'
     triggers:
-        - timed: ''
+        - timed: '5 10 * * *'
 - trigger:
-    name: 'joid-os-nosdn-lxd-noha-virtual-brahmaputra-trigger'
+    name: 'joid-os-nosdn-lxd-ha-virtual-master-trigger'
     triggers:
         - timed: ''
 - trigger:
-    name: 'joid-os-nosdn-lxd-noha-orange-pod2-brahmaputra-trigger'
+    name: 'joid-os-nosdn-lxd-ha-orange-pod1-master-trigger'
     triggers:
         - timed: ''
+# os-nosdn-lxd-ha trigger - branch: colorado
 - trigger:
-    name: 'joid-os-nosdn-lxd-noha-juniper-pod1-brahmaputra-trigger'
+    name: 'joid-os-nosdn-lxd-ha-baremetal-colorado-trigger'
     triggers:
-        - timed: ''
-
-# os-nosdn-lxd-ha trigger - branch: master
+        - timed: '0 10 * * *'
 - trigger:
-    name: 'joid-os-nosdn-lxd-ha-baremetal-master-trigger'
+    name: 'joid-os-nosdn-lxd-ha-virtual-colorado-trigger'
     triggers:
         - timed: ''
 - trigger:
-    name: 'joid-os-nosdn-lxd-ha-virtual-master-trigger'
+    name: 'joid-os-nosdn-lxd-ha-orange-pod1-colorado-trigger'
     triggers:
         - timed: ''
+# os-nosdn-nofeature-noha trigger - branch: master
 - trigger:
-    name: 'joid-os-nosdn-lxd-ha-orange-pod2-master-trigger'
+    name: 'joid-os-nosdn-nofeature-noha-baremetal-master-trigger'
     triggers:
-        - timed: ''
+        - timed: '5 4 * * *'
 - trigger:
-    name: 'joid-os-nosdn-lxd-ha-juniper-pod1-master-trigger'
+    name: 'joid-os-nosdn-nofeature-noha-virtual-master-trigger'
     triggers:
         - timed: ''
-
-# os-nosdn-lxd-ha trigger - branch: stable/brahmaputra
 - trigger:
-    name: 'joid-os-nosdn-lxd-ha-baremetal-brahmaputra-trigger'
+    name: 'joid-os-nosdn-nofeature-noha-orange-pod1-master-trigger'
     triggers:
         - timed: ''
+# os-nosdn-nofeature-noha trigger - branch: colorado
 - trigger:
-    name: 'joid-os-nosdn-lxd-ha-virtual-brahmaputra-trigger'
+    name: 'joid-os-nosdn-nofeature-noha-baremetal-colorado-trigger'
     triggers:
-        - timed: ''
+        - timed: '0 4 * * *'
 - trigger:
-    name: 'joid-os-nosdn-lxd-ha-orange-pod2-brahmaputra-trigger'
+    name: 'joid-os-nosdn-nofeature-noha-virtual-colorado-trigger'
     triggers:
         - timed: ''
 - trigger:
-    name: 'joid-os-nosdn-lxd-ha-juniper-pod1-brahmaputra-trigger'
+    name: 'joid-os-nosdn-nofeature-noha-orange-pod1-colorado-trigger'
     triggers:
         - timed: ''
index da3676e..05c2de1 100644 (file)
 set +e
 set -o nounset
 
-PWD_FILENAME="passwords.sh"
-
-##
 ##
+## Functions
 ##
 function exit_on_error {
     RES=$1
@@ -24,50 +22,23 @@ function exit_on_error {
     fi
 }
 
-
 ##
 ## Create LAB_CONFIG folder if not exists
 ##
-mkdir -p $LAB_CONFIG
-
-##
-## Override default passwords with local settings if needed
-##
 
-if [ -e "$LAB_CONFIG/$PWD_FILENAME" ]; then
-    echo "------ Load local passwords ------"
-    source $LAB_CONFIG/$PWD_FILENAME
-else
-    export MAAS_USER=ubuntu
-    export MAAS_PASSWORD=ubuntu
-    export OS_ADMIN_PASSWORD=openstack
-fi
+mkdir -p $LAB_CONFIG
 
 ##
 ## Set Joid pod config name
 ##
-    # This part will be removed when pod names will be synced between jenkins and joid config
-    case $NODE_NAME in
-        *virtual*)
-            POD=default ;;
-        *)
-            POD=$NODE_NAME ;;
-    esac
-    export POD_NAME=${POD/-}
 
-##
-## Parse Network config
-##
-
-EXTERNAL_NETWORK=${EXTERNAL_NETWORK:-}
-# split EXTERNAL_NETWORK=name;type;first ip;last ip; gateway;network
-IFS=';' read -r -a EXTNET <<< "$EXTERNAL_NETWORK"
-EXTNET_NAME=${EXTNET[0]}
-EXTNET_TYPE=${EXTNET[1]}
-EXTNET_FIP=${EXTNET[2]}
-EXTNET_LIP=${EXTNET[3]}
-EXTNET_GW=${EXTNET[4]}
-EXTNET_NET=${EXTNET[5]}
+case $NODE_NAME in
+    *virtual*)
+        POD=default ;;
+    *)
+        POD=$NODE_NAME ;;
+esac
+export POD_NAME=${POD/-}
 
 ##
 ## Redeploy MAAS or recover the previous config
@@ -77,11 +48,11 @@ cd $WORKSPACE/ci
 if [ -e "$LAB_CONFIG/environments.yaml" ] && [ "$MAAS_REINSTALL" == "false" ]; then
     echo "------ Recover Juju environment to use MAAS ------"
     cp $LAB_CONFIG/environments.yaml .
+    cp $LAB_CONFIG/deployment.yaml .
+    if [ -e $LAB_CONFIG/deployconfig.yaml ]; then
+        cp $LAB_CONFIG/deployconfig.yaml .
+    fi
 else
-    MAASCONFIG=$WORKSPACE/ci/maas/${POD/-*}/${POD/*-}/deployment.yaml
-    echo "------ Set MAAS password ------"
-    sed -i -- "s/user: ubuntu/user: $MAAS_USER/" $MAASCONFIG
-    sed -i -- "s/password: ubuntu/password: $MAAS_PASSWORD/" $MAASCONFIG
     echo "------ Redeploy MAAS ------"
     ./00-maasdeploy.sh $POD_NAME
     exit_on_error $? "MAAS Deploy FAILED"
@@ -117,24 +88,12 @@ fi
 if [ "$HA_MODE" == 'noha' ]; then
     HA_MODE='nonha'
 fi
-SRCBUNDLE="${WORKSPACE}/ci/${SDN_CONTROLLER}/juju-deployer/"
-SRCBUNDLE="${SRCBUNDLE}/ovs-${SDN_CONTROLLER}-${HA_MODE}.yaml"
-
-
-# Modify Bundle
-echo "------ Set openstack password ------"
-sed -i -- "s/admin-password: openstack/admin-password: $OS_ADMIN_PASSWORD/" $SRCBUNDLE
 
-if [ -n "$EXTNET_NAME" ]; then
-    echo "------ Set openstack default network ------"
-    sed -i -- "s/neutron-external-network: ext_net/neutron-external-network: $EXTNET_NAME/" $SRCBUNDLE
+# Add extra to features
+if [ "$EXTRA" != "" ];then
+    NFV_FEATURES="${NFV_FEATURES}_${EXTRA}"
 fi
 
-echo "------ Set ceph disks ------"
-#Find the first line of osd-devices to change the one for ceph, then the other for ceph-osd
-sed -i -- "s@osd-devices: /srv@osd-devices: $CEPH_DISKS@" $SRCBUNDLE
-sed -i -r -- "s/^(\s+osd-reformat: )'no'/\1'$CEPH_REFORMAT'/" $SRCBUNDLE
-
 # temporary sfc feature is availble only on onos and trusty
 if [ "$NFV_FEATURES" == 'sfc' ] && [ "$SDN_CONTROLLER" == 'onos' ];then
     UBUNTU_DISTRO=trusty
@@ -156,17 +115,6 @@ exit_on_error $? "Main deploy FAILED"
 JOID_ADMIN_OPENRC=$LAB_CONFIG/admin-openrc
 echo "------ Create OpenRC file [$JOID_ADMIN_OPENRC] ------"
 
-# get Keystone ip
-case "$HA_MODE" in
-    "ha")
-        KEYSTONE=$(cat bundles.yaml |shyaml get-value openstack-phase1.services.keystone.options.vip)
-        ;;
-    *)
-        KEYSTONE=$(juju status keystone |grep public-address|sed -- 's/.*\: //')
-        ;;
-esac
-
-
 # get controller IP
 case "$SDN_CONTROLLER" in
     "odl")
@@ -181,22 +129,12 @@ case "$SDN_CONTROLLER" in
 esac
 SDN_PASSWORD='admin'
 
-# export the openrc file
-cat << EOF > $JOID_ADMIN_OPENRC
-export OS_USERNAME=admin
-export OS_PASSWORD=$OS_ADMIN_PASSWORD
-export OS_TENANT_NAME=admin
-export OS_AUTH_URL=http://$KEYSTONE:35357/v2.0
-export OS_REGION_NAME=RegionOne
-export OS_ENDPOINT_TYPE='adminURL'
-export CINDER_ENDPOINT_TYPE='adminURL'
-export GLANCE_ENDPOINT_TYPE='adminURL'
-export KEYSTONE_ENDPOINT_TYPE='adminURL'
-export NEUTRON_ENDPOINT_TYPE='adminURL'
-export NOVA_ENDPOINT_TYPE='adminURL'
+# export the openrc file by getting the one generated by joid and add SDN
+# controller for Functest
+cp ./cloud/admin-openrc $JOID_ADMIN_OPENRC
+cat << EOF >> $JOID_ADMIN_OPENRC
 export SDN_CONTROLLER=$SDN_CONTROLLER_IP
 export SDN_PASSWORD=$SDN_PASSWORD
-export OS_INTERFACE=admin
 EOF
 
 ##
@@ -205,28 +143,10 @@ EOF
 
 echo "------ Backup Juju environment ------"
 cp environments.yaml $LAB_CONFIG/
-
-##
-## Basic test to return a realistic result to jenkins
-##
-
-echo "------ Do basic test ------"
-source $JOID_ADMIN_OPENRC
-curl -i -sw '%{http_code}' -H "Content-Type: application/json"   -d "
-{ \"auth\": {
-    \"identity\": {
-      \"methods\": [\"password\"],
-      \"password\": {
-        \"user\": {
-          \"name\": \"admin\",
-          \"domain\": { \"id\": \"default\" },
-          \"password\": \"$OS_ADMIN_PASSWORD\"
-        }
-      }
-    }
-  }
-}"   http://$KEYSTONE:5000/v3/auth/tokens |grep "HTTP/1.1 20" 2>&1 >/dev/null;
-exit_on_error $? "Deploy FAILED to auth to openstack"
+cp deployment.yaml $LAB_CONFIG/
+if [ -e deployconfig.yaml ]; then
+    cp deployconfig.yaml $LAB_CONFIG
+fi
 
 ##
 ## Exit success
index aa5fc67..9d362d8 100644 (file)
@@ -15,7 +15,7 @@
         - colorado:
             branch: 'stable/{stream}'
             gs-pathname: '/{stream}'
-            disabled: true
+            disabled: false
 #####################################
 # patch verification phases
 #####################################
 - job-template:
     name: 'joid-verify-{stream}'
 
-    project-type: multijob
-
     disabled: '{obj:disabled}'
 
+    project-type: multijob
+
     concurrent: true
 
     properties:
index 89b7e31..1f99f17 100755 (executable)
@@ -1,4 +1,7 @@
 #!/bin/bash
+set -o errexit
+set -o nounset
+set -o pipefail
 
 if [[ "$JOB_NAME" =~ (verify|merge|daily|weekly) ]]; then
     JOB_TYPE=${BASH_REMATCH[0]}
index 7a47f9a..868de13 100755 (executable)
@@ -1,3 +1,10 @@
 #!/bin/bash
-
+##########################################################
+##This script includes executing cyclictest scripts.
+##########################################################
+#The latest build packages are stored in build_output
 ls -al $WORKSPACE/build_output
+
+#start the test
+cd $WORKSPACE
+./ci/test_kvmfornfv.sh
index 190ab4b..327ea97 100755 (executable)
@@ -1,4 +1,6 @@
 #!/bin/bash
+set -o errexit
+set -o nounset
 
 if [[ "$JOB_NAME" =~ (verify|merge|daily|weekly) ]]; then
     JOB_TYPE=${BASH_REMATCH[0]}
@@ -9,11 +11,13 @@ fi
 
 case "$JOB_TYPE" in
     verify)
-        echo "Uploading artifacts for the change $GERRIT_CHANGE_NUMBER. This could take some time..."
         GS_UPLOAD_LOCATION="gs://artifacts.opnfv.org/$PROJECT/review/$GERRIT_CHANGE_NUMBER"
+        echo "Removing outdated artifacts produced for the previous patch for the change $GERRIT_CHANGE_NUMBER"
+        gsutil ls $GS_UPLOAD_LOCATION > /dev/null 2>&1 && gsutil rm -r $GS_UPLOAD_LOCATION
+        echo "Uploading artifacts for the change $GERRIT_CHANGE_NUMBER. This could take some time..."
         ;;
     daily)
-        echo "Uploding daily artifacts This could take some time..."
+        echo "Uploading daily artifacts This could take some time..."
         OPNFV_ARTIFACT_VERSION=$(date -u +"%Y-%m-%d_%H-%M-%S")
         GS_UPLOAD_LOCATION="gs://$GS_URL/$OPNFV_ARTIFACT_VERSION"
         ;;
@@ -22,7 +26,7 @@ case "$JOB_TYPE" in
         exit 1
 esac
 
-gsutil cp -r $WORKSPACE/build_output $GS_UPLOAD_LOCATION > $WORKSPACE/gsutil.log 2>&1
+gsutil cp -r $WORKSPACE/build_output/* $GS_UPLOAD_LOCATION > $WORKSPACE/gsutil.log 2>&1
 gsutil -m setmeta -r \
     -h "Cache-Control:private, max-age=0, no-transform" \
     $GS_UPLOAD_LOCATION > /dev/null 2>&1
index 5fcb270..4bb0a15 100644 (file)
@@ -8,10 +8,10 @@
             branch: '{stream}'
             gs-pathname: ''
             disabled: false
-        - brahmaputra:
+        - colorado:
             branch: 'stable/{stream}'
             gs-pathname: '/{stream}'
-            disabled: true
+            disabled: false
 #####################################
 # patch verification phases
 #####################################
 - job-template:
     name: 'kvmfornfv-merge-{stream}'
 
+    disabled: '{obj:disabled}'
+
     parameters:
         - project-parameter:
             project: '{project}'
index ab99eef..21b9730 100644 (file)
             branch: '{stream}'
             gs-pathname: ''
             disabled: false
+            timed: '@midnight'
+        - colorado:
+            branch: 'stable/{stream}'
+            gs-pathname: '/{stream}'
+            disabled: false
+            timed: ''
 
 - job-template:
     name: 'multisite-verify-{stream}'
 
     disabled: '{obj:disabled}'
 
+    concurrent: true
+
     parameters:
         - project-parameter:
             project: '{project}'
@@ -82,6 +90,9 @@
             name: KINGBIRD_LOG_FILE
             default: $WORKSPACE/kingbird.log
         - 'intel-virtual6-defaults'
+        - string:
+            name: DEPLOY_SCENARIO
+            default: 'os-nosdn-multisite-ha'
 
     scm:
         - gerrit-trigger-scm:
             choosing-strategy: 'default'
 
     triggers:
-         - timed: '@midnight'
+         - timed: '{timed}'
 
     builders:
         - trigger-builds:
index bc8f666..3e2f95a 100644 (file)
         - master:
             branch: '{stream}'
             gs-pathname: ''
-        - brahmaputra:
+            disabled: false
+        - colorado:
             branch: 'stable/{stream}'
             gs-pathname: '/{stream}'
+            disabled: false
 
 - job-template:
     name: 'netready-verify-{stream}'
 
+    disabled: '{obj:disabled}'
+
     parameters:
         - project-parameter:
             project: '{project}'
index 1cb71cc..c4e34ca 100644 (file)
         - master:
             branch: '{stream}'
             gs-pathname: ''
-        - brahmaputra:
+            disabled: false
+        - colorado:
             branch: 'stable/{stream}'
             gs-pathname: '/{stream}'
+            disabled: false
 
 - job-template:
     name: 'octopus-verify-{stream}'
 
+    disabled: '{obj:disabled}'
+
     parameters:
         - project-parameter:
             project: '{project}'
index 0335b71..0c90c57 100644 (file)
 # only master branch is enabled at the moment to keep no of jobs sane
     stream:
         - master:
-            branch: 'master'
+            branch: '{stream}'
             gs-pathname: ''
-#        - brahmaputra:
-#            branch: 'stable/brahmaputra'
-#            gs-pathname: '/brahmaputra'
+            disabled: false
+        - colorado:
+            branch: 'stable/{stream}'
+            gs-pathname: '/{stream}'
+            disabled: false
 
     project: 'onosfw'
 
@@ -24,6 +26,8 @@
 - job-template:
     name: 'onosfw-verify-{stream}'
 
+    disabled: '{obj:disabled}'
+
     parameters:
         - project-parameter:
             project: '{project}'
@@ -65,6 +69,8 @@
 - job-template:
     name: 'onosfw-daily-{stream}'
 
+    disabled: '{obj:disabled}'
+
     parameters:
         - project-parameter:
             project: '{project}'
@@ -88,6 +94,8 @@
 - job-template:
     name: 'onosfw-build-{stream}'
 
+    disabled: '{obj:disabled}'
+
     parameters:
         - project-parameter:
             project: '{project}'
                 echo "OPNFV_GIT_URL=$(git config --get remote.origin.url)"
                 echo "OPNFV_GIT_SHA1=$(git rev-parse HEAD)"
                 echo "OPNFV_ARTIFACT_URL=$GS_URL/opnfv-$OPNFV_ARTIFACT_VERSION.iso"
-                echo "OPNFV_ARTIFACT_MD5SUM=$(md5sum $BUILD_DIRECTORY/onosfw.iso | cut -d' ' -f1)"
+                echo "OPNFV_ARTIFACT_SHA512SUM=$(sha512sum $BUILD_DIRECTORY/onosfw.iso | cut -d' ' -f1)"
                 echo "OPNFV_BUILD_URL=$BUILD_URL"
             ) > $BUILD_DIRECTORY/opnfv.properties
             echo
index 833a1d4..fbcaa4b 100644 (file)
             description: 'OpenStack release (liberty|mitaka)'
         - string:
             name: EXTERNAL_NETWORK
-            default: ext-net;flat;192.168.0.2;192.168.0.253;192.168.0.1;192.168.0.0/24
-            description: "External network to create (name;type;first ip;last ip; gateway;network)"
-        - string:
-            name: CEPH_DISKS
-            default: '/srv'
-            description: "Disks to use by ceph by default (space separated list)"
+            default: ext-net4
+            description: "External network used for Floating ips."
         - string:
             name: LAB_CONFIG
             default: "$HOME/joid_config"
             description: "Local lab config and Openstack openrc location"
-        - string:
-            name: CEPH_REFORMAT
-            default: 'false'
-            description: "Format or not disk before using ceph [true/false] (must be done the first time)"
         - string:
             name: MAAS_REINSTALL
             default: 'false'
index 6b4861c..4250eef 100644 (file)
     stream:
         - master:
             branch: '{stream}'
-        - brahmaputra:
+            disabled: false
+        - colorado:
             branch: 'stable/{stream}'
+            disabled: false
 
 ########################
 # job templates
@@ -29,7 +31,7 @@
 - job-template:
     name: '{project}-docker-build-push-{stream}'
 
-    disabled: false
+    disabled: '{obj:disabled}'
 
     parameters:
         - project-parameter:
@@ -41,7 +43,7 @@
             description: "To enable/disable pushing the image to Dockerhub."
         - string:
             name: BASE_VERSION
-            default: "brahmaputra.3"
+            default: "colorado.0"
             description: "Base version to be used."
         - string:
             name: DOCKER_REPO_NAME
@@ -72,7 +74,7 @@
 - job-template:
     name: 'yardstick-docker-build-push-{stream}'
 
-    disabled: false
+    disabled: '{obj:disabled}'
 
     parameters:
         - project-parameter:
@@ -84,7 +86,7 @@
             description: "To enable/disable pushing the image to Dockerhub."
         - string:
             name: BASE_VERSION
-            default: "brahmaputra.3"
+            default: "colorado.0"
             description: "Base version to be used."
         - string:
             name: DOCKER_REPO_NAME
index 2b80b84..7436573 100644 (file)
         - master:
             branch: '{stream}'
             gs-pathname: ''
-        - brahmaputra:
+            disabled: false
+        - colorado:
             branch: 'stable/{stream}'
             gs-pathname: '/{stream}'
+            disabled: false
 
 ########################
 # job templates
@@ -26,6 +28,8 @@
 - job-template:
     name: 'opnfv-docs-verify-{stream}'
 
+    disabled: '{obj:disabled}'
+
     parameters:
         - project-parameter:
             project: $GERRIT_PROJECT
@@ -68,6 +72,8 @@
 - job-template:
     name: 'opnfv-docs-merge-{stream}'
 
+    disabled: '{obj:disabled}'
+
     parameters:
         - project-parameter:
             project: $GERRIT_PROJECT
index 9611a38..f90f95d 100644 (file)
         - master:
             branch: '{stream}'
             gs-pathname: ''
-        - brahmaputra:
+            disabled: false
+        - colorado:
             branch: 'stable/{stream}'
             gs-pathname: '/{stream}'
+            disabled: false
 
 ########################
 # job templates
@@ -25,6 +27,8 @@
 - job-template:
     name: 'opnfv-lint-verify-{stream}'
 
+    disabled: '{obj:disabled}'
+
     parameters:
         - project-parameter:
             project: $GERRIT_PROJECT
@@ -51,7 +55,7 @@
                     comment-contains-value: 'reverify'
             projects:
               - project-compare-type: 'REG_EXP'
-                project-pattern: 'functest|sdnvpn|qtip'
+                project-pattern: 'functest|sdnvpn|qtip|daisy'
                 branches:
                   - branch-compare-type: 'ANT'
                     branch-pattern: '**/{branch}'
index 4d73ad8..73c7c61 100644 (file)
             name: SSH_KEY
             default: /root/.ssh/id_rsa
             description: 'SSH key to use for Apex'
+        - node:
+            name: SLAVE_NAME
+            description: 'Slave name on Jenkins'
+            allowed-slaves:
+                - lf-pod1
+            default-slaves:
+                - lf-pod1
+- parameter:
+    name: 'apex-daily-colorado-defaults'
+    parameters:
+        - label:
+            name: SLAVE_LABEL
+            default: 'apex-daily-colorado'
+        - string:
+            name: GIT_BASE
+            default: https://gerrit.opnfv.org/gerrit/$PROJECT
+            description: 'Git URL to use on this Jenkins Slave'
+        - string:
+            name: SSH_KEY
+            default: /root/.ssh/id_rsa
+            description: 'SSH key to use for Apex'
+        - node:
+            name: SLAVE_NAME
+            description: 'Slave name on Jenkins'
+            allowed-slaves:
+                - lf-pod1
+            default-slaves:
+                - lf-pod1
 - parameter:
     name: 'apex-verify-master-defaults'
     parameters:
             name: SSH_KEY
             default: /root/.ssh/id_rsa
             description: 'SSH key to use for Apex'
+        - node:
+            name: SLAVE_NAME
+            description: 'Slave name on Jenkins'
+            allowed-slaves:
+                - intel-virtual3
+                - intel-virtual4
+                - intel-virtual5
+            default-slaves:
+                - intel-virtual3
+                - intel-virtual4
+                - intel-virtual5
+- parameter:
+    name: 'apex-verify-colorado-defaults'
+    parameters:
+        - label:
+            name: SLAVE_LABEL
+            default: 'apex-verify-colorado'
+        - string:
+            name: GIT_BASE
+            default: https://gerrit.opnfv.org/gerrit/$PROJECT
+            description: 'Git URL to use on this Jenkins Slave'
+        - string:
+            name: SSH_KEY
+            default: /root/.ssh/id_rsa
+            description: 'SSH key to use for Apex'
+        - node:
+            name: SLAVE_NAME
+            description: 'Slave name on Jenkins'
+            allowed-slaves:
+                - intel-virtual3
+                - intel-virtual4
+                - intel-virtual5
+            default-slaves:
+                - intel-virtual3
+                - intel-virtual4
+                - intel-virtual5
 - parameter:
     name: 'lf-pod1-defaults'
     parameters:
             name: GIT_BASE
             default: https://gerrit.opnfv.org/gerrit/$PROJECT
             description: 'Git URL to use on this Jenkins Slave'
-        - string:
-            name: CEPH_DISKS
-            default: /srv
-            description: "Disks to use by ceph (comma separated list)"
         - string:
             name: EXTERNAL_NETWORK
-            default: ext-net;flat;10.5.15.5;10.5.15.250;10.5.15.254;10.5.15.0/24
-            description: "External network to create for pod5 (name;type;first ip;last ip; gateway;network)"
+            default: ext-net
+            description: "External network floating ips"
 #####################################################
 # Parameters for CI virtual PODs
 #####################################################
             name: BUILD_DIRECTORY
             default: $WORKSPACE/build_output
             description: "Directory where the build artifact will be located upon the completion of the build."
+- parameter:
+    name: 'opnfv-build-defaults'
+    parameters:
+        - label:
+            name: SLAVE_LABEL
+            default: 'opnfv-build'
+            description: 'Slave label on Jenkins'
+        - string:
+            name: GIT_BASE
+            default: https://gerrit.opnfv.org/gerrit/$PROJECT
+            description: 'Git URL to use on this Jenkins Slave'
+        - string:
+            name: BUILD_DIRECTORY
+            default: $WORKSPACE/build_output
+            description: "Directory where the build artifact will be located upon the completion of the build."
 - parameter:
     name: 'huawei-build-defaults'
     parameters:
             name: GIT_BASE
             default: https://gerrit.opnfv.org/gerrit/$PROJECT
             description: 'Git URL to use on this Jenkins Slave'
+        - string:
+            name: INSTALLER_IP
+            default: '10.20.6.2'
+            description: 'IP of the installer'
+        - string:
+            name: BRIDGE
+            default: 'br6'
+            description: 'pxe bridge for booting of Fuel master'
 - parameter:
     name: 'zte-pod2-defaults'
     parameters:
             description: 'Git URL to use on this Jenkins Slave'
         - string:
             name: INSTALLER_IP
-            default: '10.20.1.2'
+            default: '10.20.7.2'
             description: 'IP of the installer'
+        - string:
+            name: BRIDGE
+            default: 'br7'
+            description: 'pxe bridge for booting of Fuel master'
 - parameter:
     name: 'zte-pod3-defaults'
     parameters:
             name: CEPH_DISKS
             default: /srv
             description: "Disks to use by ceph (comma separated list)"
+- parameter:
+    name: 'orange-pod1-defaults'
+    parameters:
+        - node:
+            name: SLAVE_NAME
+            description: 'Slave name on Jenkins'
+            allowed-slaves:
+                - orange-pod1
+            default-slaves:
+                - orange-pod1
+        - string:
+            name: GIT_BASE
+            default: https://gerrit.opnfv.org/gerrit/$PROJECT
+            description: 'Git URL to use on this Jenkins Slave'
 - parameter:
     name: 'orange-pod2-defaults'
     parameters:
             name: GIT_BASE
             default: https://gerrit.opnfv.org/gerrit/$PROJECT
             description: 'Git URL to use on this Jenkins Slave'
-        - string:
-            name: CEPH_DISKS
-            default: /dev/sdb /dev/sdc
-            description: "Disks to use by ceph by default (space separated list)"
-        - string:
-            name: EXTERNAL_NETWORK
-            default: ext-net;flat;161.105.231.2;161.105.231.62;161.105.231.1;161.105.231.0/26
-            description: "External network to create (name;type;first ip;last ip; gateway;network)"
 - parameter:
     name: 'orange-pod5-defaults'
     parameters:
index f3e776c..2bf87c2 100644 (file)
         - master:
             branch: '{stream}'
             gs-pathname: ''
-        - brahmaputra:
+            disabled: false
+        - colorado:
             branch: 'stable/{stream}'
             gs-pathname: '/{stream}'
+            disabled: false
 
 ########################
 # job templates
@@ -27,6 +29,8 @@
 - job-template:
     name: 'opnfvdocs-verify-{stream}'
 
+    disabled: '{obj:disabled}'
+
     parameters:
         - project-parameter:
             project: $GERRIT_PROJECT
@@ -68,6 +72,8 @@
 - job-template:
     name: 'opnfvdocs-merge-{stream}'
 
+    disabled: '{obj:disabled}'
+
     parameters:
         - project-parameter:
             project: $GERRIT_PROJECT
index ec5761b..c6f3e4a 100644 (file)
             branch: '{stream}'
             gs-pathname: ''
             disabled: false
-        - brahmaputra:
+        - colorado:
             branch: 'stable/{stream}'
             gs-pathname: '/{stream}'
-            disabled: true
+            disabled: false
 
 - job-template:
     name: 'ovsnfv-verify-{stream}'
 
+    disabled: '{obj:disabled}'
+
     parameters:
         - project-parameter:
             project: '{project}'
@@ -68,6 +70,8 @@
 - job-template:
     name: 'ovsnfv-merge-{stream}'
 
+    disabled: '{obj:disabled}'
+
     parameters:
         - project-parameter:
             project: '{project}'
index 8c72838..7f73a13 100644 (file)
         - master:
             branch: '{stream}'
             gs-pathname: ''
-        - brahmaputra:
+            disabled: false
+        - colorado:
             branch: 'stable/{stream}'
             gs-pathname: '/{stream}'
+            disabled: false
 
 - job-template:
     name: 'parser-verify-{stream}'
 
+    disabled: '{obj:disabled}'
+
     parameters:
         - project-parameter:
             project: '{project}'
index 98d7128..f7ea622 100644 (file)
         - master:
             branch: '{stream}'
             gs-pathname: ''
-        - brahmaputra:
+            disabled: false
+        - colorado:
             branch: 'stable/{stream}'
             gs-pathname: '/{stream}'
+            disabled: false
 
 - job-template:
     name: 'pharos-verify-{stream}'
 
+    disabled: '{obj:disabled}'
+
     parameters:
         - project-parameter:
             project: '{project}'
index b6471bf..a95cd98 100644 (file)
         - master:
             branch: '{stream}'
             gs-pathname: ''
-        - brahmaputra:
+            disabled: false
+        - colorado:
             branch: 'stable/{stream}'
             gs-pathname: '/{stream}'
+            disabled: false
 
 - job-template:
     name: 'prediction-verify-{stream}'
 
+    disabled: '{obj:disabled}'
+
     parameters:
         - project-parameter:
             project: '{project}'
index 97a8b3e..1a4d628 100644 (file)
         - master:
             branch: '{stream}'
             gs-pathname: ''
-        - brahmaputra:
+            disabled: false
+        - colorado:
             branch: 'stable/{stream}'
             gs-pathname: '/{stream}'
+            disabled: false
 
 - job-template:
     name: 'promise-verify-{stream}'
 
+    disabled: '{obj:disabled}'
+
     parameters:
         - project-parameter:
             project: '{project}'
index ef455f8..d454b0f 100644 (file)
         stream: master
         branch: '{stream}'
         gs-pathname: ''
-    brahmaputra: &brahmaputra
-        stream: brahmaputra
-        branch: 'stable/{stream}'
-        gs-pathname: '{stream}'
 #--------------------------------
 # POD, INSTALLER, AND BRANCH MAPPING
 #--------------------------------
-#        brahmaputra
+#        master
 #--------------------------------
     pod:
         - dell-pod1:
             installer: compass
-            auto-trigger-name: 'qtip-daily-dell-pod1-trigger'
-            <<: *brahmaputra
+            auto-trigger-name: 'daily-trigger-disabled'
+            <<: *master
         - orange-pod2:
             installer: joid
             auto-trigger-name: 'daily-trigger-disabled'
-            <<: *brahmaputra
-#--------------------------------
-#        master
-#--------------------------------
+            <<: *master
         - juniper-pod1:
             installer: joid
             <<: *master
             auto-trigger-name: 'daily-trigger-disabled'
         - zte-pod1:
             installer: fuel
-            auto-trigger-name: 'qtip-daily-zte-pod1-trigger'
+            auto-trigger-name: 'daily-trigger-disabled'
             <<: *master
         - zte-pod2:
             installer: fuel
-            auto-trigger-name: 'daily-trigger-disabled'
+            auto-trigger-name: 'qtip-daily-zte-pod2-trigger'
             <<: *master
 
 #--------------------------------
 #trigger macros
 #################
 
-- trigger:
-    name: 'qtip-daily-dell-pod1-trigger'
-    triggers:
-        - timed: '0 3 * * *'
+#- trigger:
+#    name: 'qtip-daily-dell-pod1-trigger'
+#    triggers:
+#        - timed: '0 3 * * *'
 
 #- trigger:
 #    name: 'qtip-daily-juniper-pod1-trigger'
 #       - timed : ' 0 0 * * *'
 
 - trigger:
-    name: 'qtip-daily-zte-pod1-trigger'
+    name: 'qtip-daily-zte-pod2-trigger'
     triggers:
         - timed: '0 5 * * *'
+
index 75f7511..722a9be 100644 (file)
         - master:
             branch: '{stream}'
             gs-pathname: ''
-        - brahmaputra:
+            disabled: false
+        - colorado:
             branch: 'stable/{stream}'
             gs-pathname: '/{stream}'
+            disabled: false
 
 - job-template:
     name: 'qtip-verify-{stream}'
 
+    disabled: '{obj:disabled}'
+
     parameters:
         - project-parameter:
             project: '{project}'
index c14ea6c..8328aec 100644 (file)
@@ -13,7 +13,7 @@
             description: "URL to Google Storage."
         - string:
             name: GS_BASE_PROXY
-            default: build.opnfv.org/artifacts/$PROJECT
+            default: build.opnfv.org/artifacts.opnfv.org/$PROJECT
             description: "URL to Google Storage proxy"
 
 - parameter:
index fb70df7..026b643 100644 (file)
         - master:
             branch: '{stream}'
             gs-pathname: ''
-        - brahmaputra:
+            disabled: false
+        - colorado:
             branch: 'stable/{stream}'
             gs-pathname: '/{stream}'
+            disabled: false
 
 - job-template:
     name: 'storperf-verify-{stream}'
 
+    disabled: '{obj:disabled}'
+
     node: opnfv-build-ubuntu
 
     parameters:
index 0123fcd..c988c06 100644 (file)
         - master:
             branch: '{stream}'
             gs-pathname: ''
-        - brahmaputra:
+            disabled: false
+        - colorado:
             branch: 'stable/{stream}'
             gs-pathname: '/{stream}'
+            disabled: false
 
 - job-template:
     name: 'vnf_forwarding_graph-verify-{stream}'
 
+    disabled: '{obj:disabled}'
+
     parameters:
         - project-parameter:
             project: '{project}'
index bf2fe8f..363423d 100644 (file)
         - master:
             branch: '{stream}'
             gs-pathname: ''
-        - brahmaputra:
+            disabled: false
+        - colorado:
             branch: 'stable/{stream}'
             gs-pathname: '/{stream}'
+            disabled: false
 
 - job-template:
 
     name: 'vswitchperf-daily-{stream}'
 
+    disabled: '{obj:disabled}'
+
     parameters:
         - project-parameter:
             project: '{project}'
@@ -50,6 +54,8 @@
 - job-template:
     name: 'vswitchperf-verify-{stream}'
 
+    disabled: '{obj:disabled}'
+
     project-type: freestyle
 
     concurrent: true
 - job-template:
     name: 'vswitchperf-merge-{stream}'
 
+    disabled: '{obj:disabled}'
+
     project-type: freestyle
 
     concurrent: true
index 041eabd..d9fb435 100644 (file)
@@ -1,5 +1,5 @@
 ###################################
-# job configuration for functest
+# job configuration for yardstick
 ###################################
 - project:
     name: yardstick
         branch: '{stream}'
         gs-pathname: ''
         docker-tag: 'latest'
-    brahmaputra: &brahmaputra
-        stream: brahmaputra
+    colorado: &colorado
+        stream: colorado
         branch: 'stable/{stream}'
         gs-pathname: '{stream}'
-        docker-tag: 'brahmaputra.1.0'
+        docker-tag: 'stable'
 #--------------------------------
 # POD, INSTALLER, AND BRANCH MAPPING
 #--------------------------------
             slave-label: fuel-baremetal
             installer: fuel
             auto-trigger-name: 'daily-trigger-disabled'
-            <<: *brahmaputra
+            <<: *colorado
         - virtual:
             slave-label: fuel-virtual
             installer: fuel
             auto-trigger-name: 'daily-trigger-disabled'
-            <<: *brahmaputra
+            <<: *colorado
 # joid CI PODs
         - baremetal:
             slave-label: joid-baremetal
             slave-label: joid-baremetal
             installer: joid
             auto-trigger-name: 'daily-trigger-disabled'
-            <<: *brahmaputra
+            <<: *colorado
         - virtual:
             slave-label: joid-virtual
             installer: joid
             auto-trigger-name: 'daily-trigger-disabled'
-            <<: *brahmaputra
+            <<: *colorado
 
 # compass CI PODs
         - baremetal:
             slave-label: compass-baremetal
             installer: compass
             auto-trigger-name: 'daily-trigger-disabled'
-            <<: *brahmaputra
+            <<: *colorado
         - virtual:
             slave-label: compass-virtual
             installer: compass
             auto-trigger-name: 'daily-trigger-disabled'
-            <<: *brahmaputra
+            <<: *colorado
 #--------------------------------
 #    Installers not using labels
 #            CI PODs
             slave-label: '{pod}'
             installer: apex
             auto-trigger-name: 'daily-trigger-disabled'
-            <<: *brahmaputra
+            <<: *colorado
 #--------------------------------
 #        None-CI PODs
 #--------------------------------
-        - orange-pod2:
+        - orange-pod1:
             slave-label: '{pod}'
             installer: joid
             auto-trigger-name: 'daily-trigger-disabled'
-            <<: *brahmaputra
+            <<: *master
         - zte-pod1:
             slave-label: '{pod}'
             installer: fuel
             auto-trigger-name: 'daily-trigger-disabled'
             <<: *master
+        - zte-pod2:
+            slave-label: '{pod}'
+            installer: fuel
+            auto-trigger-name: 'daily-trigger-disabled'
+            <<: *master
+        - zte-pod3:
+            slave-label: '{pod}'
+            installer: fuel
+            auto-trigger-name: 'daily-trigger-disabled'
+            <<: *master
+        - arm-pod1:
+            slave-label: '{pod}'
+            installer: fuel
+            auto-trigger-name: 'daily-trigger-disabled'
+            <<: *master
         - orange-pod2:
             slave-label: '{pod}'
             installer: joid
     parameters:
         - project-parameter:
             project: '{project}'
-        - '{slave-label}-defaults'
         - '{installer}-defaults'
+        - '{slave-label}-defaults'
         - 'yardstick-params-{slave-label}'
         - string:
             name: DEPLOY_SCENARIO
             default: '-i 104.197.68.199:8086'
             description: 'Arguments to use in order to choose the backend DB'
 
+- parameter:
+    name: 'yardstick-params-zte-pod2'
+    parameters:
+        - string:
+            name: YARDSTICK_DB_BACKEND
+            default: '-i 104.197.68.199:8086'
+            description: 'Arguments to use in order to choose the backend DB'
+
 - parameter:
     name: 'yardstick-params-zte-pod3'
     parameters:
             default: '-i 104.197.68.199:8086'
             description: 'Arguments to use in order to choose the backend DB'
 
+- parameter:
+    name: 'yardstick-params-orange-pod1'
+    parameters:
+        - string:
+            name: YARDSTICK_DB_BACKEND
+            default: '-i 104.197.68.199:8086'
+            description: 'Arguments to use in order to choose the backend DB'
+
 - parameter:
     name: 'yardstick-params-orange-pod2'
     parameters:
             default: '-i 104.197.68.199:8086'
             description: 'Arguments to use in order to choose the backend DB'
 
+- parameter:
+    name: 'yardstick-params-arm-pod1'
+    parameters:
+        - string:
+            name: YARDSTICK_DB_BACKEND
+            default: '-i 104.197.68.199:8086'
+            description: 'Arguments to use in order to choose the backend DB'
+
 - parameter:
     name: 'yardstick-params-virtual'
     parameters:
index e8df9be..b370541 100755 (executable)
@@ -23,16 +23,22 @@ elif [[ ${INSTALLER_TYPE} == 'joid' ]]; then
     # replace the default one by the customized one provided by jenkins config
 fi
 
+# Set iptables rule to allow forwarding return traffic for container
+if ! sudo iptables -C FORWARD -j RETURN 2> ${redirect} || ! sudo iptables -L FORWARD | awk 'NR==3' | grep RETURN 2> ${redirect}; then
+    sudo iptables -I FORWARD -j RETURN
+fi
+
 opts="--privileged=true --rm"
 envs="-e INSTALLER_TYPE=${INSTALLER_TYPE} -e INSTALLER_IP=${INSTALLER_IP} \
     -e NODE_NAME=${NODE_NAME} -e EXTERNAL_NETWORK=${EXTERNAL_NETWORK} \
     -e YARDSTICK_BRANCH=${GIT_BRANCH##origin/} -e DEPLOY_SCENARIO=${DEPLOY_SCENARIO}"
 
-# Pull the latest image
+# Pull the image with correct tag
+echo "Yardstick: Pulling image opnfv/yardstick:${DOCKER_TAG}"
 docker pull opnfv/yardstick:$DOCKER_TAG >$redirect
 
 # Run docker
-cmd="sudo docker run ${opts} ${envs} ${labconfig} ${sshkey} opnfv/yardstick \
+cmd="sudo docker run ${opts} ${envs} ${labconfig} ${sshkey} opnfv/yardstick:${DOCKER_TAG} \
     exec_tests.sh ${YARDSTICK_DB_BACKEND} ${YARDSTICK_SCENARIO_SUITE_NAME}"
 echo "Yardstick: Running docker cmd: ${cmd}"
 ${cmd}
index 64031b7..db07e9d 100644 (file)
         - master:
             branch: '{stream}'
             gs-pathname: ''
-        - brahmaputra:
+            disabled: false
+        - colorado:
             branch: 'stable/{stream}'
             gs-pathname: '/{stream}'
+            disabled: false
 
 ################################
 # job templates
@@ -26,6 +28,8 @@
 - job-template:
     name: 'yardstick-verify-{stream}'
 
+    disabled: '{obj:disabled}'
+
     parameters:
         - project-parameter:
             project: '{project}'
@@ -63,6 +67,8 @@
 - job-template:
     name: 'yardstick-merge-{stream}'
 
+    disabled: '{obj:disabled}'
+
     parameters:
         - project-parameter:
             project: '{project}'
diff --git a/prototypes/bifrost/README.md b/prototypes/bifrost/README.md
new file mode 100644 (file)
index 0000000..fffd1de
--- /dev/null
@@ -0,0 +1,48 @@
+=====================
+How to deploy bifrost
+=====================
+The scripts and playbooks defined on this repo, need to be combined with proper `Bifrost <http://git.openstack.org/cgit/openstack/bifrost>`_ code.
+
+Please follow that steps:
+
+1. Clone bifrost::
+
+    git clone https://git.openstack.org/openstack/bifrost /opt/bifrost
+
+2. Clone releng::
+
+    git clone https://gerrit.opnfv.org/gerrit/releng /opt/releng
+
+3. Clone infracloud::
+
+    git clone https://git.openstack.org/openstack-infra/puppet-infracloud /opt/puppet-infracloud
+
+4. Combine releng scripts and playbooks with bifrost::
+
+    cp -R /opt/releng/prototypes/bifrost/* /opt/bifrost/
+
+5. Run destroy script if you need to cleanup previous environment::
+
+    cd /opt/bifrost
+    ./scripts/destroy_env.sh
+
+6. Run deployment script to spin up 3 vms with bifrost: jumphost, controller and compute::
+
+    cd /opt/bifrost
+    ./scripts/test-bifrost-deployment.sh
+
+It is likely that the script will show some errors due to timeout. Please ignore the errors, and wait until the vms are completely bootstrapped. To verify it you can check with ironic::
+
+    cd /opt/bifrost
+    source env-vars
+    ironic node-list
+
+And wait until all the vms are in **active** Provisioning State.
+
+7. Check the IPs assigned to each of the VMS. You can check it by looking at inventory:
+
+    cat /tmp/baremetal.csv
+
+8. You can enter into the vms with devuser login/pass:
+
+    ssh devuser@192.168.122.2
diff --git a/prototypes/bifrost/playbooks/roles/bifrost-prepare-for-test-dynamic/defaults/main.yml b/prototypes/bifrost/playbooks/roles/bifrost-prepare-for-test-dynamic/defaults/main.yml
new file mode 100644 (file)
index 0000000..69eb787
--- /dev/null
@@ -0,0 +1,4 @@
+---
+node_ssh_pause: 10
+wait_timeout: 1900
+multinode_testing: false
diff --git a/prototypes/bifrost/playbooks/test-bifrost-infracloud.yaml b/prototypes/bifrost/playbooks/test-bifrost-infracloud.yaml
new file mode 100644 (file)
index 0000000..ba548b3
--- /dev/null
@@ -0,0 +1,66 @@
+# SPDX-license-identifier: Apache-2.0
+##############################################################################
+# Copyright (c) 2016 RedHat and others.
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+---
+- hosts: localhost
+  connection: local
+  name: "Setting pre-test conditions"
+  become: yes
+  ignore_errors: yes
+  tasks:
+  - name: Remove pre-existing leases file
+    file: path=/var/lib/misc/dnsmasq.leases state=absent
+- hosts: localhost
+  connection: local
+  name: "Executes install, enrollment, and testing in one playbook"
+  become: no
+  gather_facts: yes
+  pre_tasks:
+    - name: "Override the ipv4_gateway setting"
+      set_fact:
+         ipv4_gateway: "192.168.122.1"
+  roles:
+    - { role: bifrost-prep-for-install, when: skip_install is not defined }
+  environment:
+    http_proxy: "{{ lookup('env','http_proxy') }}"
+    https_proxy: "{{ lookup('env','https_proxy') }}"
+- hosts: localhost
+  connection: local
+  name: "Executes install, enrollment, and testing in one playbook"
+  become: yes
+  gather_facts: yes
+  roles:
+    - role: bifrost-ironic-install
+      cleaning: false
+      testing: true
+    # NOTE(TheJulia): While the next step creates a ramdisk, some elements
+    # do not support ramdisk-image-create as they invoke steps to cleanup
+    # the ramdisk which causes ramdisk-image-create to believe it failed.
+    - { role: bifrost-create-dib-image, dib_imagename: "{{ http_boot_folder }}/ipa", build_ramdisk: false, dib_os_element: "{{ ipa_dib_os_element|default('debian') }}", dib_os_release: "jessie", dib_elements: "ironic-agent {{ ipa_extra_dib_elements | default('') }}", when: create_ipa_image | bool == true }
+    - { role: bifrost-create-dib-image, dib_imagetype: "qcow2", dib_imagename: "{{deploy_image}}", dib_os_element: "ubuntu-minimal", dib_os_release: "trusty", dib_elements: "vm serial-console simple-init devuser infra-cloud-bridge puppet growroot {{ extra_dib_elements|default('') }}", dib_packages: "openssh-server,vlan,vim,less,bridge-utils,language-pack-en,iputils-ping,rsyslog,curl", when: create_image_via_dib | bool == true and transform_boot_image | bool == false }
+  environment:
+    http_proxy: "{{ lookup('env','http_proxy') }}"
+    https_proxy: "{{ lookup('env','https_proxy') }}"
+- hosts: baremetal
+  name: "Enroll node with Ironic"
+  become: no
+  connection: local
+  roles:
+    - role: ironic-enroll-dynamic
+    - { role: ironic-inspect-node, when: inspect_nodes | default('false') | bool == true }
+- hosts: baremetal
+  vars:
+    multinode_testing: "{{ inventory_dhcp | bool == true }}"
+  name: "Create configuration drive files and deploy machines."
+  become: no
+  connection: local
+  roles:
+    - role: bifrost-configdrives-dynamic
+    - role: bifrost-deploy-nodes-dynamic
+    - role: bifrost-prepare-for-test-dynamic
+      serial: 1
diff --git a/prototypes/bifrost/scripts/destroy_env.sh b/prototypes/bifrost/scripts/destroy_env.sh
new file mode 100755 (executable)
index 0000000..aef4ccb
--- /dev/null
@@ -0,0 +1,38 @@
+# SPDX-license-identifier: Apache-2.0
+##############################################################################
+# Copyright (c) 2016 RedHat and others.
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+#!/bin/bash
+virsh destroy jumphost.opnfvlocal || true
+virsh destroy controller00.opnfvlocal || true
+virsh destroy compute00.opnfvlocal || true
+virsh undefine jumphost.opnfvlocal || true
+virsh undefine controller00.opnfvlocal || true
+virsh undefine compute00.opnfvlocal || true
+
+service ironic-conductor stop
+
+echo "removing from database"
+mysql -u root ironic --execute "truncate table ports;"
+mysql -u root ironic --execute "delete from node_tags;"
+mysql -u root ironic --execute "delete from nodes;"
+mysql -u root ironic --execute "delete from conductors;"
+echo "removing leases"
+> /var/lib/dnsmasq/dnsmasq.leases
+echo "removing logs"
+rm -rf /var/log/libvirt/baremetal_logs/*.log
+
+# clean up images
+rm -rf /httpboot/*
+rm -rf /tftpboot/*
+rm -rf /var/lib/libvirt/images/*.qcow2
+
+echo "restarting services"
+service libvirtd restart
+service ironic-api restart
+service ironic-conductor start
+service ironic-inspector restart
diff --git a/prototypes/bifrost/scripts/test-bifrost-deployment.sh b/prototypes/bifrost/scripts/test-bifrost-deployment.sh
new file mode 100755 (executable)
index 0000000..5df58f8
--- /dev/null
@@ -0,0 +1,121 @@
+# SPDX-license-identifier: Apache-2.0
+##############################################################################
+# Copyright (c) 2016 Ericsson AB and others.
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+#!/bin/bash
+
+set -eux
+set -o pipefail
+export PYTHONUNBUFFERED=1
+SCRIPT_HOME="$(cd "$(dirname "$0")" && pwd)"
+BIFROST_HOME=$SCRIPT_HOME/..
+ANSIBLE_INSTALL_ROOT=${ANSIBLE_INSTALL_ROOT:-/opt/stack}
+ENABLE_VENV="false"
+USE_DHCP="false"
+USE_VENV="false"
+BUILD_IMAGE=true
+
+# Set defaults for ansible command-line options to drive the different
+# tests.
+
+# NOTE(TheJulia/cinerama): The variables defined on the command line
+# for the default and DHCP tests are to drive the use of Cirros as the
+# deployed operating system, and as such sets the test user to cirros,
+# and writes a debian style interfaces file out to the configuration
+# drive as cirros does not support the network_info.json format file
+# placed in the configuration drive. The "build image" test does not
+# use cirros.
+
+TEST_VM_NUM_NODES=3
+export TEST_VM_NODE_NAMES="jumphost.opnfvlocal controller00.opnfvlocal compute00.opnfvlocal"
+export VM_DOMAIN_TYPE="kvm"
+export VM_CPU=4
+export VM_DISK=100
+TEST_PLAYBOOK="test-bifrost-infracloud.yaml"
+USE_INSPECTOR=true
+USE_CIRROS=false
+TESTING_USER=root
+VM_MEMORY_SIZE="8192"
+DOWNLOAD_IPA=true
+CREATE_IPA_IMAGE=false
+INSPECT_NODES=true
+INVENTORY_DHCP=false
+INVENTORY_DHCP_STATIC_IP=false
+WRITE_INTERFACES_FILE=true
+
+# Set BIFROST_INVENTORY_SOURCE
+export BIFROST_INVENTORY_SOURCE=/tmp/baremetal.csv
+
+# DIB custom elements path
+export ELEMENTS_PATH=/usr/share/diskimage-builder/elements:/opt/puppet-infracloud/files/elements
+
+# settings for console access
+export DIB_DEV_USER_PWDLESS_SUDO=yes
+export DIB_DEV_USER_PASSWORD=devuser
+
+# Source Ansible
+# NOTE(TheJulia): Ansible stable-1.9 source method tosses an error deep
+# under the hood which -x will detect, so for this step, we need to suspend
+# and then re-enable the feature.
+set +x +o nounset
+$SCRIPT_HOME/env-setup.sh
+source ${ANSIBLE_INSTALL_ROOT}/ansible/hacking/env-setup
+ANSIBLE=$(which ansible-playbook)
+set -x -o nounset
+
+# Change working directory
+cd $BIFROST_HOME/playbooks
+
+# Syntax check of dynamic inventory test path
+${ANSIBLE} -vvvv \
+       -i inventory/localhost \
+       test-bifrost-create-vm.yaml \
+       --syntax-check \
+       --list-tasks
+${ANSIBLE} -vvvv \
+       -i inventory/localhost \
+       ${TEST_PLAYBOOK} \
+       --syntax-check \
+       --list-tasks \
+       -e testing_user=${TESTING_USER}
+
+# Create the test VMS
+${ANSIBLE} -vvvv \
+       -i inventory/localhost \
+       test-bifrost-create-vm.yaml \
+       -e test_vm_num_nodes=${TEST_VM_NUM_NODES} \
+       -e test_vm_memory_size=${VM_MEMORY_SIZE} \
+       -e enable_venv=${ENABLE_VENV} \
+       -e test_vm_domain_type=${VM_DOMAIN_TYPE}
+
+# Execute the installation and VM startup test.
+${ANSIBLE} -vvvv \
+    -i inventory/bifrost_inventory.py \
+    ${TEST_PLAYBOOK} \
+    -e use_cirros=${USE_CIRROS} \
+    -e testing_user=${TESTING_USER} \
+    -e test_vm_num_nodes=${TEST_VM_NUM_NODES} \
+    -e inventory_dhcp=${INVENTORY_DHCP} \
+    -e inventory_dhcp_static_ip=${INVENTORY_DHCP_STATIC_IP} \
+    -e enable_venv=${ENABLE_VENV} \
+    -e enable_inspector=${USE_INSPECTOR} \
+    -e inspect_nodes=${INSPECT_NODES} \
+    -e download_ipa=${DOWNLOAD_IPA} \
+    -e create_ipa_image=${CREATE_IPA_IMAGE} \
+    -e write_interfaces_file=${WRITE_INTERFACES_FILE} \
+    -e ipv4_gateway=192.168.122.1
+EXITCODE=$?
+
+if [ $EXITCODE != 0 ]; then
+    echo "****************************"
+    echo "Test failed. See logs folder"
+    echo "****************************"
+fi
+
+$SCRIPT_HOME/collect-test-info.sh
+
+exit $EXITCODE
diff --git a/prototypes/puppet-infracloud/.gitkeep b/prototypes/puppet-infracloud/.gitkeep
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/prototypes/puppet-infracloud/README.md b/prototypes/puppet-infracloud/README.md
new file mode 100644 (file)
index 0000000..f3bd672
--- /dev/null
@@ -0,0 +1,52 @@
+===============================
+How to deploy puppet-infracloud
+===============================
+The manifest and mmodules defined on this repo will deploy an OpenStack cloud based on `Infra Cloud <http://docs.openstack.org/infra/system-config/infra-cloud.html>`_ project.
+
+Once all the hardware is provisioned, enter in controller and compute nodes and follow these steps:
+
+1. Clone releng::
+
+    git clone https://gerrit.opnfv.org/gerrit/releng /opt/releng
+
+2. Copy hiera to the right place::
+
+    cp /opt/releng/prototypes/puppet-infracloud/hiera/common.yaml /var/lib/hiera/    
+
+3. Install modules::
+
+    cd /opt/releng/prototypes/puppet-infracloud
+    ./install_modules.sh
+
+4. Apply the infracloud manifest::
+
+    cd /opt/releng/prototypes/puppet-infracloud
+    puppet apply --manifests/site.pp --modulepath=/etc/puppet/modules:/opt/releng/prototypes/puppet-infracloud/modules
+
+5. Once you finish this operation on controller and compute nodes, you will have a functional OpenStack cloud.
+
+In jumphost, follow that steps:
+
+1. Clone releng::
+
+    git clone https://gerrit.opnfv.org/gerrit/releng /opt/releng
+
+2. Create OpenStack clouds config directory:
+
+    mkdir -p /root/.config/openstack
+
+3. Copy credentials file::
+
+    cp /opt/releng/prototypes/puppet-infracloud/creds/clouds.yaml /root/.config/openstack/
+
+4. Install openstack-client:
+
+    pip install python-openstackclient
+
+5. Export the desired cloud::
+
+    export OS_CLOUD=opnfv
+
+6. Start using it::
+
+    openstack server list
diff --git a/prototypes/puppet-infracloud/creds/clouds.yaml b/prototypes/puppet-infracloud/creds/clouds.yaml
new file mode 100644 (file)
index 0000000..eb44db6
--- /dev/null
@@ -0,0 +1,12 @@
+clouds:
+  opnfv:
+    verify: False
+    auth:
+      auth_url: https://controller00.opnfvlocal:5000
+      project_name: opnfv
+      username: opnfv
+      password: pass
+    identity_api_version: '3'
+    region_name: RegionOne
+    user_domain_name: opnfv
+    project_domain_name: opnfv
diff --git a/prototypes/puppet-infracloud/hiera/common.yaml b/prototypes/puppet-infracloud/hiera/common.yaml
new file mode 100644 (file)
index 0000000..6c28f19
--- /dev/null
@@ -0,0 +1,77 @@
+keystone_rabbit_password: pass
+neutron_rabbit_password: pass
+nova_rabbit_password: pass
+root_mysql_password: pass
+keystone_mysql_password: pass
+glance_mysql_password: pass
+neutron_mysql_password: pass
+nova_mysql_password: pass
+keystone_admin_password: pass
+glance_admin_password: pass
+neutron_admin_password: pass
+nova_admin_password: pass
+keystone_admin_token: token
+ssl_key_file_contents: |
+  -----BEGIN PRIVATE KEY-----
+  MIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQC0YX6wsA/Jhe3q
+  ByoiLsyagO5rOCIyzDsMTV0YMWVIa/QybvS1vI+pK9FIoYPbqWFGHXmQF0DJYulb
+  GnB6A0GlT3YXuaKPucaaANr5hTjuEBF6LuQeq+OIO5u7+l56HGWbbVeB7+vnIxK9
+  43G545aBZSGlUnVfFg+v+IQtmRr36iEa5UDd4sahDXcp2Dm3zGgkFhFKie6AJ4UU
+  TzrH2SL6Nhl7i+AenuoUEDdgDWfGnCXozLngfmhKDi6lHDmh5zJhFS7cKz14wLgF
+  37fsWxxxEX8a6gtGYEEHqXV3x3AXO+U98pr15/xQM9O2O3mrqc/zkmcCRUwCjEeD
+  jEHey3UJAgMBAAECggEAGqapBEwPGRRbsY87b2+AtXdFQrw5eU3pj4jCr3dk4o1o
+  uCbiqxNgGnup4VRT2hmtkKF8O4jj/p1JozdF1RE0GsuhxCGeXiPxrwFfWSyQ28Ou
+  AWJ6O/njlVZRTTXRzbLyZEOEgWNEdJMfCsVXIUL6EsYxcW68fr8QtExAo0gSzvwe
+  IVyhopBy4A1jr5jWqjjlgJhoTHQCkp1e9pHiaW5WWHtk2DFdy6huw5PoDRppG42P
+  soMzqHy9AIWXrYaTGNjyybdJvbaiF0X5Bkr6k8ZxMlRuEb3Vpyrj7SsBrUifRJM3
+  +yheSq3drdQHlw5VrukoIgXGYB4zAQq3LndLoL5YTQKBgQDlzz/hB1IuGOKBXRHy
+  p0j+Lyoxt5EiOW2mdEkbTUYyYnD9EDbJ0wdQ5ijtWLw0J3AwhASkH8ZyljOVHKlY
+  Sq2Oo/uroIH4M8cVIBOJQ2/ak98ItLZ1OMMnDxlZva52jBfYwOEkg6OXeLOLmay6
+  ADfxQ56RFqreVHi9J0/jvpn9UwKBgQDI8CZrM4udJTP7gslxeDcRZw6W34CBBFds
+  49d10Tfd05sysOludzWAfGFj27wqIacFcIyYQmnSga9lBhowv+RwdSjcb2QCCjOb
+  b2GdH+qSFU8BTOcd5FscCBV3U8Y1f/iYp0EQ1/GiG2AYcQC67kjWOO4/JZEXsmtq
+  LisFlWTcswKBgQCC/bs/nViuhei2LELKuafVmzTF2giUJX/m3Wm+cjGNDqew18kj
+  CXKmHks93tKIN+KvBNFQa/xF3G/Skt/EP+zl3XravUbYH0tfM0VvfE0JnjgHUlqe
+  PpiebvDYQlJrqDb/ihHLKm3ZLSfKbvIRo4Y/s3dy5CTJTgT0bLAQ9Nf5mQKBgGqb
+  Dqb9d+rtnACqSNnMn9q5xIHDHlhUx1VcJCm70Fn+NG7WcWJMGLSMSNdD8zafGA/I
+  wK7fPWmTqEx+ylJm3HnVjtI0vuheJTcoBq/oCPlsGLhl5pBzYOskVs8yQQyNUoUa
+  52haSTZqM7eD7JFAbqBJIA2cjrf1zwtMZ0LVGegFAoGBAIFSkI+y4tDEEaSsxrMM
+  OBYEZDkffVar6/mDJukvyn0Q584K3I4eXIDoEEfMGgSN2Tza6QamuNFxOPCH+AAv
+  UKvckK4yuYkc7mQIgjCE8N8UF4kgsXjPek61TZT1QVI1aYFb78ZAZ0miudqWkx4t
+  YSNDj7llArylrPGHBLQ38X4/
+  -----END PRIVATE KEY-----
+ssl_cert_file_contents: |
+  -----BEGIN CERTIFICATE-----
+  MIIDcTCCAlmgAwIBAgIJAJsHSxF0u/oaMA0GCSqGSIb3DQEBCwUAME8xCzAJBgNV
+  BAYTAlVTMQ4wDAYDVQQHDAVXb3JsZDEOMAwGA1UECgwFT1BORlYxIDAeBgNVBAMM
+  F2NvbnRyb2xsZXIwMC5vcG5mdmxvY2FsMB4XDTE2MDgxNzE2MzQwOFoXDTE3MDgx
+  NzE2MzQwOFowTzELMAkGA1UEBhMCVVMxDjAMBgNVBAcMBVdvcmxkMQ4wDAYDVQQK
+  DAVPUE5GVjEgMB4GA1UEAwwXY29udHJvbGxlcjAwLm9wbmZ2bG9jYWwwggEiMA0G
+  CSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQC0YX6wsA/Jhe3qByoiLsyagO5rOCIy
+  zDsMTV0YMWVIa/QybvS1vI+pK9FIoYPbqWFGHXmQF0DJYulbGnB6A0GlT3YXuaKP
+  ucaaANr5hTjuEBF6LuQeq+OIO5u7+l56HGWbbVeB7+vnIxK943G545aBZSGlUnVf
+  Fg+v+IQtmRr36iEa5UDd4sahDXcp2Dm3zGgkFhFKie6AJ4UUTzrH2SL6Nhl7i+Ae
+  nuoUEDdgDWfGnCXozLngfmhKDi6lHDmh5zJhFS7cKz14wLgF37fsWxxxEX8a6gtG
+  YEEHqXV3x3AXO+U98pr15/xQM9O2O3mrqc/zkmcCRUwCjEeDjEHey3UJAgMBAAGj
+  UDBOMB0GA1UdDgQWBBQyFVbU5s2ihD0hX3W7GyHiHZGG1TAfBgNVHSMEGDAWgBQy
+  FVbU5s2ihD0hX3W7GyHiHZGG1TAMBgNVHRMEBTADAQH/MA0GCSqGSIb3DQEBCwUA
+  A4IBAQB+xf7I9RVWzRNjMbWBDE6pBvOWnSksv7Jgr4cREvyOxBDaIoO3uQRDDu6r
+  RCgGs1CuwEaFX1SS/OVrKRFiy9kCU/LBZEFwaHRaL2Kj57Z2yNInPIiKB4h9jen2
+  75fYrpq42XUDSI0NpsqAJpmcQqXOOo8V08FlH0/6h8mWdsfQfbyaf+g73+aRZds8
+  Q4ttmBrqY4Pi5CJW46w7LRCA5o92Di3GI9dAh9MVZ3023cTTjDkW04QbluphuTFj
+  O07Npz162/fHTXut+piV78t+1HlfYWY5TOSQMIVwenftA/Bn8+TQAgnLR+nGo/wu
+  oEaxLtj3Jr07+yIjL88ewT+c3fpq
+  -----END CERTIFICATE-----
+infracloud_mysql_password: pass
+opnfv_password: pass
+
+rabbitmq::package_gpg_key: 'https://www.rabbitmq.com/rabbitmq-release-signing-key.asc'
+rabbitmq::repo::apt::key: '0A9AF2115F4687BD29803A206B73A36E6026DFCA'
+
+hosts:
+  jumphost.opnfvlocal:
+    ip: 192.168.122.2
+  controller00.opnfvlocal:
+    ip: 192.168.122.3
+  compute00.opnfvlocal:
+    ip: 192.168.122.4
diff --git a/prototypes/puppet-infracloud/install_modules.sh b/prototypes/puppet-infracloud/install_modules.sh
new file mode 100755 (executable)
index 0000000..5d5acd9
--- /dev/null
@@ -0,0 +1,121 @@
+#!/bin/bash
+# Copyright 2014 OpenStack Foundation.
+# Copyright 2014 Hewlett-Packard Development Company, L.P.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+MODULE_PATH=`puppet config print modulepath | cut -d ':' -f 1`
+SCRIPT_NAME=$(basename $0)
+SCRIPT_DIR=$(readlink -f "$(dirname $0)")
+JUST_CLONED=0
+
+function remove_module {
+    local SHORT_MODULE_NAME=$1
+    if [ -n "$SHORT_MODULE_NAME" ]; then
+        rm -Rf "$MODULE_PATH/$SHORT_MODULE_NAME"
+    else
+        echo "ERROR: remove_module requires a SHORT_MODULE_NAME."
+    fi
+}
+
+function git_clone {
+    local MOD=$1
+    local DEST=$2
+
+    JUST_CLONED=1
+    for attempt in $(seq 0 3); do
+        clone_error=0
+        git clone $MOD $DEST && break || true
+        rm -rf $DEST
+        clone_error=1
+    done
+    return $clone_error
+}
+
+# Array of modules to be installed key:value is module:version.
+declare -A MODULES
+
+# Array of modues to be installed from source and without dependency resolution.
+# key:value is source location, revision to checkout
+declare -A SOURCE_MODULES
+
+# Array of modues to be installed from source and without dependency resolution from openstack git
+# key:value is source location, revision to checkout
+declare -A INTEGRATION_MODULES
+
+# load modules.env to populate MODULES[*] and SOURCE_MODULES[*]
+# for processing.
+MODULE_ENV_FILE=${MODULE_FILE:-modules.env}
+MODULE_ENV_PATH=${MODULE_ENV_PATH:-${SCRIPT_DIR}}
+if [ -f "${MODULE_ENV_PATH}/${MODULE_ENV_FILE}" ] ; then
+    . "${MODULE_ENV_PATH}/${MODULE_ENV_FILE}"
+fi
+
+if [ -z "${!MODULES[*]}" ] && [ -z "${!SOURCE_MODULES[*]}" ] ; then
+    echo ""
+    echo "WARNING: nothing to do, unable to find MODULES or SOURCE_MODULES"
+    echo "  export options, try setting MODULE_ENV_PATH or MODULE_ENV_FILE"
+    echo "  export to the proper location of modules.env file."
+    echo ""
+    exit 0
+fi
+
+MODULE_LIST=`puppet module list --color=false`
+
+# Install modules from source
+for MOD in ${!SOURCE_MODULES[*]} ; do
+    JUST_CLONED=0
+    # get the name of the module directory
+    if [ `echo $MOD | awk -F. '{print $NF}'` = 'git' ]; then
+        echo "Remote repos of the form repo.git are not supported: ${MOD}"
+        exit 1
+    fi
+
+    MODULE_NAME=`echo $MOD | awk -F- '{print $NF}'`
+
+    # set up git base command to use the correct path
+    GIT_CMD_BASE="git --git-dir=${MODULE_PATH}/${MODULE_NAME}/.git --work-tree ${MODULE_PATH}/${MODULE_NAME}"
+    # treat any occurrence of the module as a match
+    if ! echo $MODULE_LIST | grep "${MODULE_NAME}" >/dev/null 2>&1; then
+        # clone modules that are not installed
+        git_clone $MOD "${MODULE_PATH}/${MODULE_NAME}"
+    else
+        if [ ! -d ${MODULE_PATH}/${MODULE_NAME}/.git ]; then
+            echo "Found directory ${MODULE_PATH}/${MODULE_NAME} that is not a git repo, deleting it and reinstalling from source"
+            remove_module $MODULE_NAME
+            git_clone $MOD "${MODULE_PATH}/${MODULE_NAME}"
+        elif [ `${GIT_CMD_BASE} remote show origin | grep 'Fetch URL' | awk -F'URL: ' '{print $2}'` != $MOD ]; then
+            echo "Found remote in ${MODULE_PATH}/${MODULE_NAME} that does not match desired remote ${MOD}, deleting dir and re-cloning"
+            remove_module $MODULE_NAME
+            git_clone $MOD "${MODULE_PATH}/${MODULE_NAME}"
+        fi
+    fi
+
+    # fetch the latest refs from the repo
+    if [[ $JUST_CLONED -eq 0 ]] ; then
+        # If we just cloned the repo, we do not need to remote update
+        for attempt in $(seq 0 3); do
+            clone_error=0
+            $GIT_CMD_BASE remote update && break || true
+            clone_error=1
+        done
+        if [[ $clone_error -ne 0 ]] ; then
+            exit $clone_error
+        fi
+    fi
+    # make sure the correct revision is installed, I have to use rev-list b/c rev-parse does not work with tags
+    if [ `${GIT_CMD_BASE} rev-list HEAD --max-count=1` != `${GIT_CMD_BASE} rev-list ${SOURCE_MODULES[$MOD]} --max-count=1` ]; then
+        # checkout correct revision
+        $GIT_CMD_BASE checkout ${SOURCE_MODULES[$MOD]}
+    fi
+done
diff --git a/prototypes/puppet-infracloud/manifests/site.pp b/prototypes/puppet-infracloud/manifests/site.pp
new file mode 100644 (file)
index 0000000..e524918
--- /dev/null
@@ -0,0 +1,63 @@
+# SPDX-license-identifier: Apache-2.0
+##############################################################################
+# Copyright (c) 2016 RedHat and others.
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+node 'controller00.opnfvlocal' {
+  $group = 'infracloud'
+  class { 'opnfv::server':
+    iptables_public_tcp_ports => [80,5000,5671,8774,9292,9696,35357], # logs,keystone,rabbit,nova,glance,neutron,keystone
+    sysadmins                 => hiera('sysadmins', []),
+    enable_unbound            => false,
+    purge_apt_sources         => false,
+  }
+  class { 'opnfv::controller':
+    keystone_rabbit_password         => hiera('keystone_rabbit_password'),
+    neutron_rabbit_password          => hiera('neutron_rabbit_password'),
+    nova_rabbit_password             => hiera('nova_rabbit_password'),
+    root_mysql_password              => hiera('infracloud_mysql_password'),
+    keystone_mysql_password          => hiera('keystone_mysql_password'),
+    glance_mysql_password            => hiera('glance_mysql_password'),
+    neutron_mysql_password           => hiera('neutron_mysql_password'),
+    nova_mysql_password              => hiera('nova_mysql_password'),
+    keystone_admin_password          => hiera('keystone_admin_password'),
+    glance_admin_password            => hiera('glance_admin_password'),
+    neutron_admin_password           => hiera('neutron_admin_password'),
+    nova_admin_password              => hiera('nova_admin_password'),
+    keystone_admin_token             => hiera('keystone_admin_token'),
+    ssl_key_file_contents            => hiera('ssl_key_file_contents'),
+    ssl_cert_file_contents           => hiera('ssl_cert_file_contents'),
+    br_name                          => 'br-eth0',
+    controller_public_address        => $::fqdn,
+    neutron_subnet_cidr              => '192.168.122.0/24',
+    neutron_subnet_gateway           => '192.168.122.1',
+    neutron_subnet_allocation_pools  => [
+                                          'start=192.168.122.50,end=192.168.122.254',
+                                        ],
+    opnfv_password                   => hiera('opnfv_password'),
+  }
+}
+
+node 'compute00.opnfvlocal' {
+  $group = 'infracloud'
+  class { 'opnfv::server':
+    sysadmins                 => hiera('sysadmins', []),
+    enable_unbound            => false,
+    purge_apt_sources         => false,
+  }
+
+  class { 'opnfv::compute':
+    nova_rabbit_password             => hiera('nova_rabbit_password'),
+    neutron_rabbit_password          => hiera('neutron_rabbit_password'),
+    neutron_admin_password           => hiera('neutron_admin_password'),
+    ssl_cert_file_contents           => hiera('ssl_cert_file_contents'),
+    ssl_key_file_contents            => hiera('ssl_key_file_contents'),
+    br_name                          => 'br-eth0',
+    controller_public_address        => 'controller00.opnfvlocal',
+    virt_type                        => 'qemu',
+  }
+}
+
diff --git a/prototypes/puppet-infracloud/modules.env b/prototypes/puppet-infracloud/modules.env
new file mode 100644 (file)
index 0000000..2df81ec
--- /dev/null
@@ -0,0 +1,81 @@
+# Copyright 2014 OpenStack Foundation.
+# Copyright 2016 RedHat.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# load additional modules from modules.env
+# modules.env should exist in the same folder as install_modules.sh
+#
+# - use export MODULE_FILE to specify an alternate config
+#   when calling install_modules.sh.
+#   This allows for testing environments that are configured with alternate
+#   module configuration.
+
+# Source modules should use tags, explicit refs or remote branches because
+# we do not update local branches in this script.
+# Keep sorted
+
+OPENSTACK_GIT_ROOT=https://git.openstack.org
+
+# InfraCloud modules
+SOURCE_MODULES["$OPENSTACK_GIT_ROOT/openstack/puppet-cinder"]="origin/stable/mitaka"
+SOURCE_MODULES["$OPENSTACK_GIT_ROOT/openstack/puppet-glance"]="origin/stable/mitaka"
+SOURCE_MODULES["$OPENSTACK_GIT_ROOT/openstack/puppet-ironic"]="origin/stable/mitaka"
+SOURCE_MODULES["$OPENSTACK_GIT_ROOT/openstack/puppet-keystone"]="origin/stable/mitaka"
+SOURCE_MODULES["$OPENSTACK_GIT_ROOT/openstack/puppet-neutron"]="origin/stable/mitaka"
+SOURCE_MODULES["$OPENSTACK_GIT_ROOT/openstack/puppet-nova"]="origin/stable/mitaka"
+SOURCE_MODULES["$OPENSTACK_GIT_ROOT/openstack/puppet-openstack_extras"]="origin/stable/mitaka"
+SOURCE_MODULES["$OPENSTACK_GIT_ROOT/openstack/puppet-openstacklib"]="origin/stable/mitaka"
+
+SOURCE_MODULES["https://github.com/duritong/puppet-sysctl"]="v0.0.11"
+SOURCE_MODULES["https://github.com/nanliu/puppet-staging"]="1.0.0"
+SOURCE_MODULES["https://github.com/jfryman/puppet-selinux"]="v0.2.5"
+SOURCE_MODULES["https://github.com/maestrodev/puppet-wget"]="v1.6.0"
+SOURCE_MODULES["https://github.com/puppetlabs/puppetlabs-apache"]="1.8.1"
+SOURCE_MODULES["https://github.com/puppetlabs/puppetlabs-apt"]="2.1.0"
+SOURCE_MODULES["https://github.com/puppetlabs/puppetlabs-concat"]="1.2.5"
+SOURCE_MODULES["https://github.com/puppetlabs/puppetlabs-firewall"]="1.1.3"
+SOURCE_MODULES["https://github.com/puppetlabs/puppetlabs-haproxy"]="1.5.0"
+SOURCE_MODULES["https://github.com/puppetlabs/puppetlabs-inifile"]="1.1.3"
+SOURCE_MODULES["https://github.com/puppetlabs/puppetlabs-mysql"]="3.6.2"
+SOURCE_MODULES["https://github.com/puppetlabs/puppetlabs-ntp"]="3.2.1"
+SOURCE_MODULES["https://github.com/puppetlabs/puppetlabs-rabbitmq"]="5.2.3"
+SOURCE_MODULES["https://github.com/puppetlabs/puppetlabs-stdlib"]="4.10.0"
+SOURCE_MODULES["https://github.com/rafaelfelix/puppet-pear"]="1.0.3"
+SOURCE_MODULES["https://github.com/saz/puppet-memcached"]="v2.6.0"
+SOURCE_MODULES["https://github.com/saz/puppet-timezone"]="v3.3.0"
+SOURCE_MODULES["https://github.com/stankevich/puppet-python"]="1.9.4"
+SOURCE_MODULES["https://github.com/vamsee/puppet-solr"]="0.0.8"
+SOURCE_MODULES["https://github.com/voxpupuli/puppet-alternatives"]="0.3.0"
+SOURCE_MODULES["https://github.com/voxpupuli/puppet-archive"]="v0.5.1"
+SOURCE_MODULES["https://github.com/voxpupuli/puppet-git_resource"]="0.3.0"
+SOURCE_MODULES["https://github.com/voxpupuli/puppet-nodejs"]="1.2.0"
+SOURCE_MODULES["https://github.com/voxpupuli/puppet-puppetboard"]="2.4.0"
+
+
+INTEGRATION_MODULES["$OPENSTACK_GIT_ROOT/openstack-infra/puppet-httpd"]="origin/master"
+INTEGRATION_MODULES["$OPENSTACK_GIT_ROOT/openstack-infra/puppet-infracloud"]="origin/master"
+INTEGRATION_MODULES["$OPENSTACK_GIT_ROOT/openstack-infra/puppet-iptables"]="origin/master"
+INTEGRATION_MODULES["$OPENSTACK_GIT_ROOT/openstack-infra/puppet-pip"]="origin/master"
+INTEGRATION_MODULES["$OPENSTACK_GIT_ROOT/openstack-infra/puppet-snmpd"]="origin/master"
+INTEGRATION_MODULES["$OPENSTACK_GIT_ROOT/openstack-infra/puppet-ssh"]="origin/master"
+INTEGRATION_MODULES["$OPENSTACK_GIT_ROOT/openstack-infra/puppet-ssl_cert_check"]="origin/master"
+INTEGRATION_MODULES["$OPENSTACK_GIT_ROOT/openstack-infra/puppet-sudoers"]="origin/master"
+INTEGRATION_MODULES["$OPENSTACK_GIT_ROOT/openstack-infra/puppet-ulimit"]="origin/master"
+INTEGRATION_MODULES["$OPENSTACK_GIT_ROOT/openstack-infra/puppet-unattended_upgrades"]="origin/master"
+INTEGRATION_MODULES["$OPENSTACK_GIT_ROOT/openstack-infra/puppet-unbound"]="origin/master"
+INTEGRATION_MODULES["$OPENSTACK_GIT_ROOT/openstack-infra/puppet-user"]="origin/master"
+
+for MOD in ${!INTEGRATION_MODULES[*]}; do
+ SOURCE_MODULES[$MOD]=${INTEGRATION_MODULES[$MOD]}
+done
diff --git a/prototypes/puppet-infracloud/modules/opnfv/manifests/compute.pp b/prototypes/puppet-infracloud/modules/opnfv/manifests/compute.pp
new file mode 100644 (file)
index 0000000..ca548a5
--- /dev/null
@@ -0,0 +1,23 @@
+class opnfv::compute (
+  $nova_rabbit_password,
+  $neutron_rabbit_password,
+  $neutron_admin_password,
+  $ssl_cert_file_contents,
+  $ssl_key_file_contents,
+  $br_name,
+  $controller_public_address,
+  $virt_type = 'kvm',
+) {
+  class { '::infracloud::compute':
+    nova_rabbit_password          => $nova_rabbit_password,
+    neutron_rabbit_password       => $neutron_rabbit_password,
+    neutron_admin_password        => $neutron_admin_password,
+    ssl_cert_file_contents        => $ssl_cert_file_contents,
+    ssl_key_file_contents         => $ssl_key_file_contents,
+    br_name                       => $br_name,
+    controller_public_address     => $controller_public_address,
+    virt_type                     => $virt_type,
+  }
+
+}
+
diff --git a/prototypes/puppet-infracloud/modules/opnfv/manifests/controller.pp b/prototypes/puppet-infracloud/modules/opnfv/manifests/controller.pp
new file mode 100644 (file)
index 0000000..7522692
--- /dev/null
@@ -0,0 +1,85 @@
+# SPDX-license-identifier: Apache-2.0
+##############################################################################
+# Copyright (c) 2016 RedHat and others.
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+class opnfv::controller (
+  $keystone_rabbit_password,
+  $neutron_rabbit_password,
+  $nova_rabbit_password,
+  $root_mysql_password,
+  $keystone_mysql_password,
+  $glance_mysql_password,
+  $neutron_mysql_password,
+  $nova_mysql_password,
+  $glance_admin_password,
+  $keystone_admin_password,
+  $neutron_admin_password,
+  $nova_admin_password,
+  $keystone_admin_token,
+  $ssl_key_file_contents,
+  $ssl_cert_file_contents,
+  $br_name,
+  $controller_public_address = $::fqdn,
+  $neutron_subnet_cidr,
+  $neutron_subnet_gateway,
+  $neutron_subnet_allocation_pools,
+  $opnfv_password,
+  $opnfv_email = 'opnfvuser@gmail.com',
+) {
+  class { '::infracloud::controller':
+    keystone_rabbit_password         => $keystone_rabbit_password,
+    neutron_rabbit_password          => $neutron_rabbit_password,
+    nova_rabbit_password             => $nova_rabbit_password,
+    root_mysql_password              => $root_mysql_password,
+    keystone_mysql_password          => $keystone_mysql_password,
+    glance_mysql_password            => $glance_mysql_password,
+    neutron_mysql_password           => $neutron_mysql_password,
+    nova_mysql_password              => $nova_mysql_password,
+    keystone_admin_password          => $keystone_admin_password,
+    glance_admin_password            => $glance_admin_password,
+    neutron_admin_password           => $neutron_admin_password,
+    nova_admin_password              => $nova_admin_password,
+    keystone_admin_token             => $keystone_admin_token,
+    ssl_key_file_contents            => $ssl_key_file_contents,
+    ssl_cert_file_contents           => $ssl_cert_file_contents,
+    br_name                          => $br_name,
+    controller_public_address        => $controller_public_address,
+    neutron_subnet_cidr              => $neutron_subnet_cidr,
+    neutron_subnet_gateway           => $neutron_subnet_gateway,
+    neutron_subnet_allocation_pools  => $neutron_subnet_allocation_pools,
+  }
+
+  # create keystone creds
+  keystone_domain { 'opnfv':
+    ensure  => present,
+    enabled => true,
+  }
+
+  keystone_tenant { 'opnfv':
+    ensure      => present,
+    enabled     => true,
+    description => 'OPNFV cloud',
+    domain      => 'opnfv',
+    require     => Keystone_domain['opnfv'],
+  }
+
+  keystone_user { 'opnfv':
+    ensure   => present,
+    enabled  => true,
+    domain   => 'opnfv',
+    email    => $opnfv_email,
+    password => $opnfv_password,
+    require  => Keystone_tenant['opnfv'],
+  }
+
+  keystone_role { 'user': ensure => present }
+
+  keystone_user_role { 'opnfv::opnfv@opnfv::opnfv':
+    roles => [ 'user', 'admin', ],
+  }
+}
+
diff --git a/prototypes/puppet-infracloud/modules/opnfv/manifests/server.pp b/prototypes/puppet-infracloud/modules/opnfv/manifests/server.pp
new file mode 100644 (file)
index 0000000..5bbcd75
--- /dev/null
@@ -0,0 +1,222 @@
+# SPDX-license-identifier: Apache-2.0
+##############################################################################
+# Copyright (c) 2016 RedHat and others.
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+class opnfv::server (
+  $iptables_public_tcp_ports = [],
+  $iptables_public_udp_ports = [],
+  $iptables_rules4           = [],
+  $iptables_rules6           = [],
+  $sysadmins                 = [],
+  $enable_unbound            = true,
+  $purge_apt_sources         = true,
+) {
+  ###########################################################
+  # Classes for all hosts
+
+  include snmpd
+  include sudoers
+
+  class { 'iptables':
+    public_tcp_ports => $iptables_public_tcp_ports,
+    public_udp_ports => $all_udp,
+    rules4           => $iptables_rules4,
+    rules6           => $iptables_rules6,
+  }
+
+  class { 'timezone':
+    timezone => 'Etc/UTC',
+  }
+
+  if ($enable_unbound) {
+    class { 'unbound':
+      install_resolv_conf => $install_resolv_conf
+    }
+  }
+
+  if ($::in_chroot) {
+    notify { 'rsyslog in chroot':
+      message => 'rsyslog not refreshed, running in chroot',
+    }
+    $rsyslog_notify = []
+  } else {
+    service { 'rsyslog':
+      ensure     => running,
+      enable     => true,
+      hasrestart => true,
+      require    => Package['rsyslog'],
+    }
+    $rsyslog_notify = [ Service['rsyslog'] ]
+  }
+
+  ###########################################################
+  # System tweaks
+
+  # Increase syslog message size in order to capture
+  # python tracebacks with syslog.
+  file { '/etc/rsyslog.d/99-maxsize.conf':
+    ensure  => present,
+    # Note MaxMessageSize is not a puppet variable.
+    content => '$MaxMessageSize 6k',
+    owner   => 'root',
+    group   => 'root',
+    mode    => '0644',
+    notify  => $rsyslog_notify,
+    require => Package['rsyslog'],
+  }
+
+  # We don't like byobu
+  file { '/etc/profile.d/Z98-byobu.sh':
+    ensure => absent,
+  }
+
+  if $::osfamily == 'Debian' {
+
+    # Ubuntu installs their whoopsie package by default, but it eats through
+    # memory and we don't need it on servers
+    package { 'whoopsie':
+      ensure => absent,
+    }
+
+    package { 'popularity-contest':
+      ensure => absent,
+    }
+  }
+
+  ###########################################################
+  # Package resources for all operating systems
+
+  package { 'at':
+    ensure => present,
+  }
+
+  package { 'lvm2':
+    ensure => present,
+  }
+
+  package { 'strace':
+    ensure => present,
+  }
+
+  package { 'tcpdump':
+    ensure => present,
+  }
+
+  package { 'rsyslog':
+    ensure => present,
+  }
+
+  package { 'git':
+    ensure => present,
+  }
+
+  package { 'rsync':
+    ensure => present,
+  }
+
+  case $::osfamily {
+    'RedHat': {
+      $packages = ['parted', 'puppet', 'wget', 'iputils']
+      $user_packages = ['emacs-nox', 'vim-enhanced']
+      $update_pkg_list_cmd = ''
+    }
+    'Debian': {
+      $packages = ['parted', 'puppet', 'wget', 'iputils-ping']
+      case $::operatingsystemrelease {
+        /^(12|14)\.(04|10)$/: {
+          $user_packages = ['emacs23-nox', 'vim-nox', 'iftop',
+                            'sysstat', 'iotop']
+        }
+        default: {
+          $user_packages = ['emacs-nox', 'vim-nox']
+        }
+      }
+      $update_pkg_list_cmd = 'apt-get update >/dev/null 2>&1;'
+    }
+    default: {
+      fail("Unsupported osfamily: ${::osfamily} The 'openstack_project' module only supports osfamily Debian or RedHat (slaves only).")
+    }
+  }
+  package { $packages:
+    ensure => present
+  }
+
+  ###########################################################
+  # Package resources for specific operating systems
+
+  case $::osfamily {
+    'Debian': {
+      # Purge and augment existing /etc/apt/sources.list if requested, and make
+      # sure apt-get update is run before any packages are installed
+      class { '::apt':
+        purge => { 'sources.list' => $purge_apt_sources }
+      }
+
+      # Make sure dig is installed
+      package { 'dnsutils':
+        ensure => present,
+      }
+    }
+    'RedHat': {
+      # Make sure dig is installed
+      package { 'bind-utils':
+        ensure => present,
+      }
+    }
+  }
+
+  ###########################################################
+  # Manage  ntp
+
+  include '::ntp'
+
+  if ($::osfamily == "RedHat") {
+    # Utils in ntp-perl are included in Debian's ntp package; we
+    # add it here for consistency.  See also
+    # https://tickets.puppetlabs.com/browse/MODULES-3660
+    package { 'ntp-perl':
+      ensure => present
+    }
+    # NOTE(pabelanger): We need to ensure ntpdate service starts on boot for
+    # centos-7.  Currently, ntpd explicitly require ntpdate to be running before
+    # the sync process can happen in ntpd.  As a result, if ntpdate is not
+    # running, ntpd will start but fail to sync because of DNS is not properly
+    # setup.
+    package { 'ntpdate':
+      ensure => present,
+    }
+    service { 'ntpdate':
+      enable => true,
+      require => Package['ntpdate'],
+    }
+  }
+
+  ###########################################################
+  # Manage  python/pip
+
+  $desired_virtualenv = '13.1.0'
+  class { '::pip':
+    optional_settings => {
+      'extra-index-url' => '',
+    },
+    manage_pip_conf => true,
+  }
+
+  if (( versioncmp($::virtualenv_version, $desired_virtualenv) < 0 )) {
+    $virtualenv_ensure = $desired_virtualenv
+  } else {
+    $virtualenv_ensure = present
+  }
+  package { 'virtualenv':
+    ensure   => $virtualenv_ensure,
+    provider => openstack_pip,
+    require  => Class['pip'],
+  }
+
+  # add hosts entries
+  create_resources('host', hiera_hash('hosts'))
+}
index e11df59..47fbc91 100755 (executable)
@@ -142,7 +142,6 @@ elif [ "$installer_type" == "compass" ]; then
     sshpass -p root ssh 2>/dev/null $ssh_options root@${installer_ip} \
         "scp $ssh_options ${controller_ip}:/opt/admin-openrc.sh ." &> /dev/null
     sshpass -p root scp 2>/dev/null $ssh_options root@${installer_ip}:~/admin-openrc.sh $dest_path &> /dev/null
-    echo 'export OS_REGION_NAME=regionOne' >> $dest_path
 
     info "This file contains the mgmt keystone API, we need the public one for our rc file"
     public_ip=$(sshpass -p root ssh $ssh_options root@${installer_ip} \
index d268a28..4b710ca 100755 (executable)
@@ -38,6 +38,11 @@ main () {
         exit 1
     fi
 
+    if [[ $(pwd) != "$jenkinshome" ]]; then
+        echo "This script needs to be run from the jenkins users home dir"
+        exit 1
+    fi
+
     if [[ -z $slave_name || -z $slave_secret ]]; then
         echo "slave name or secret not defined, please edit this file to define it"
         exit 1
@@ -49,8 +54,8 @@ main () {
     fi
 
     if [[ $(whoami) != "root" ]]; then
-      if grep "^Defaults requiretty" /etc/sudoers
-        then echo "please comment out Defaults requiretty from /etc/sudoers"
+      if sudo -l | grep "requiretty"; then
+        echo "please comment out Defaults requiretty from /etc/sudoers"
         exit 1
       fi
     fi
old mode 100644 (file)
new mode 100755 (executable)
index 622c375..7c943d8
@@ -99,8 +99,9 @@ for version in conf.versions:
                 for test_case in testValid:
                     test_case.checkRunnable(installer, s,
                                             test_case.getConstraints())
-                    logger.debug("testcase %s is %s" %
+                    logger.debug("testcase %s (%s) is %s" %
                                  (test_case.getDisplayName(),
+                                  test_case.getName(),
                                   test_case.isRunnable))
                     time.sleep(1)
                     if test_case.isRunnable:
@@ -131,8 +132,10 @@ for version in conf.versions:
                 for test_case in otherTestCases:
                     test_case.checkRunnable(installer, s,
                                             test_case.getConstraints())
-                    logger.info("testcase %s is %s" %
-                                (test_case.getName(), test_case.isRunnable))
+                    logger.debug("testcase %s (%s) is %s" %
+                                 (test_case.getDisplayName(),
+                                  test_case.getName(),
+                                  test_case.isRunnable))
                     time.sleep(1)
                     if test_case.isRunnable:
                         dbName = test_case.getDbName()
old mode 100644 (file)
new mode 100755 (executable)
index e3f4e33..0dc1dd3
@@ -24,104 +24,108 @@ logger.info("nb tests executed > %s s " % criteria_nb_test)
 logger.info("test duration < %s s " % criteria_duration)
 logger.info("success rate > %s " % criteria_success_rate)
 
-for installer in installers:
-    # we consider the Tempest results of the last PERIOD days
-    url = conf.URL_BASE + "?case=tempest_smoke_serial"
-    request = Request(url + '&period=' + str(PERIOD) +
-                      '&installer=' + installer + '&version=master')
-    logger.info("Search tempest_smoke_serial results for installer %s"
-                % installer)
-    try:
-        response = urlopen(request)
-        k = response.read()
-        results = json.loads(k)
-    except URLError, e:
-        logger.error("Error code: %s" % e)
-
-    test_results = results['results']
-
-    scenario_results = {}
-    criteria = {}
-    errors = {}
-
-    for r in test_results:
-        # Retrieve all the scenarios per installer
-        # In Brahmaputra use version
-        # Since Colorado use scenario
-        if not r['scenario'] in scenario_results.keys():
-            scenario_results[r['scenario']] = []
-        scenario_results[r['scenario']].append(r)
-
-    for s, s_result in scenario_results.items():
-        scenario_results[s] = s_result[0:5]
-        # For each scenario, we build a result object to deal with
-        # results, criteria and error handling
-        for result in scenario_results[s]:
-            result["start_date"] = result["start_date"].split(".")[0]
-
-            # retrieve results
-            # ****************
-            nb_tests_run = result['details']['tests']
-            nb_tests_failed = result['details']['failures']
-            if nb_tests_run != 0:
-                success_rate = 100*(int(nb_tests_run) -
-                                    int(nb_tests_failed)) / int(nb_tests_run)
-            else:
-                success_rate = 0
-
-            result['details']["tests"] = nb_tests_run
-            result['details']["Success rate"] = str(success_rate) + "%"
-
-            # Criteria management
-            # *******************
-            crit_tests = False
-            crit_rate = False
-            crit_time = False
-
-            # Expect that at least 165 tests are run
-            if nb_tests_run >= criteria_nb_test:
-                crit_tests = True
-
-            # Expect that at least 90% of success
-            if success_rate >= criteria_success_rate:
-                crit_rate = True
-
-            # Expect that the suite duration is inferior to 30m
-            if result['details']['duration'] < criteria_duration:
-                crit_time = True
-
-            result['criteria'] = {'tests': crit_tests,
-                                  'Success rate': crit_rate,
-                                  'duration': crit_time}
-            try:
-                logger.debug("Scenario %s, Installer %s"
-                             % (s_result[1]['scenario'], installer))
-                logger.debug("Nb Test run: %s" % nb_tests_run)
-                logger.debug("Test duration: %s"
-                             % result['details']['duration'])
-                logger.debug("Success rate: %s" % success_rate)
-            except:
-                logger.error("Data format error")
-
-            # Error management
-            # ****************
-            try:
-                errors = result['details']['errors']
-                result['errors'] = errors.replace('{0}', '')
-            except:
-                logger.error("Error field not present (Brahamputra runs?)")
-
-    templateLoader = jinja2.FileSystemLoader(conf.REPORTING_PATH)
-    templateEnv = jinja2.Environment(loader=templateLoader)
-
-    TEMPLATE_FILE = "/template/index-tempest-tmpl.html"
-    template = templateEnv.get_template(TEMPLATE_FILE)
-
-    outputText = template.render(scenario_results=scenario_results,
-                                 items=items,
-                                 installer=installer)
-
-    with open(conf.REPORTING_PATH + "/release/master/index-tempest-" +
-              installer + ".html", "wb") as fh:
-        fh.write(outputText)
+# For all the versions
+for version in conf.versions:
+    for installer in conf.installers:
+        # we consider the Tempest results of the last PERIOD days
+        url = conf.URL_BASE + "?case=tempest_smoke_serial"
+        request = Request(url + '&period=' + str(PERIOD) +
+                          '&installer=' + installer +
+                          '&version=' + version)
+        logger.info("Search tempest_smoke_serial results for installer %s"
+                    " for version %s"
+                    % (installer, version))
+        try:
+            response = urlopen(request)
+            k = response.read()
+            results = json.loads(k)
+        except URLError, e:
+            logger.error("Error code: %s" % e)
+
+        test_results = results['results']
+
+        scenario_results = {}
+        criteria = {}
+        errors = {}
+
+        for r in test_results:
+            # Retrieve all the scenarios per installer
+            # In Brahmaputra use version
+            # Since Colorado use scenario
+            if not r['scenario'] in scenario_results.keys():
+                scenario_results[r['scenario']] = []
+            scenario_results[r['scenario']].append(r)
+
+        for s, s_result in scenario_results.items():
+            scenario_results[s] = s_result[0:5]
+            # For each scenario, we build a result object to deal with
+            # results, criteria and error handling
+            for result in scenario_results[s]:
+                result["start_date"] = result["start_date"].split(".")[0]
+
+                # retrieve results
+                # ****************
+                nb_tests_run = result['details']['tests']
+                nb_tests_failed = result['details']['failures']
+                if nb_tests_run != 0:
+                    success_rate = 100*(int(nb_tests_run) -
+                                        int(nb_tests_failed)) / int(nb_tests_run)
+                else:
+                    success_rate = 0
+
+                result['details']["tests"] = nb_tests_run
+                result['details']["Success rate"] = str(success_rate) + "%"
+
+                # Criteria management
+                # *******************
+                crit_tests = False
+                crit_rate = False
+                crit_time = False
+
+                # Expect that at least 165 tests are run
+                if nb_tests_run >= criteria_nb_test:
+                    crit_tests = True
+
+                # Expect that at least 90% of success
+                if success_rate >= criteria_success_rate:
+                    crit_rate = True
+
+                # Expect that the suite duration is inferior to 30m
+                if result['details']['duration'] < criteria_duration:
+                    crit_time = True
+
+                result['criteria'] = {'tests': crit_tests,
+                                      'Success rate': crit_rate,
+                                      'duration': crit_time}
+                try:
+                    logger.debug("Scenario %s, Installer %s"
+                                 % (s_result[1]['scenario'], installer))
+                    logger.debug("Nb Test run: %s" % nb_tests_run)
+                    logger.debug("Test duration: %s"
+                                 % result['details']['duration'])
+                    logger.debug("Success rate: %s" % success_rate)
+                except:
+                    logger.error("Data format error")
+
+                # Error management
+                # ****************
+                try:
+                    errors = result['details']['errors']
+                    result['errors'] = errors.replace('{0}', '')
+                except:
+                    logger.error("Error field not present (Brahamputra runs?)")
+
+        templateLoader = jinja2.FileSystemLoader(conf.REPORTING_PATH)
+        templateEnv = jinja2.Environment(loader=templateLoader)
+
+        TEMPLATE_FILE = "/template/index-tempest-tmpl.html"
+        template = templateEnv.get_template(TEMPLATE_FILE)
+
+        outputText = template.render(scenario_results=scenario_results,
+                                     items=items,
+                                     installer=installer)
+
+        with open(conf.REPORTING_PATH + "/release/" + version +
+                  "/index-tempest-" + installer + ".html", "wb") as fh:
+            fh.write(outputText)
 logger.info("Tempest automatic reporting succesfully generated.")
old mode 100644 (file)
new mode 100755 (executable)
index d0436ed..a83d92f
@@ -33,81 +33,87 @@ logger.info("****************************************")
 installers = conf.installers
 step_order = ["initialisation", "orchestrator", "vIMS", "sig_test"]
 logger.info("Start processing....")
-for installer in installers:
-    logger.info("Search vIMS results for installer %s" % installer)
-    request = Request(conf.URL_BASE + '?case=vims&installer=' + installer)
-
-    try:
-        response = urlopen(request)
-        k = response.read()
-        results = json.loads(k)
-    except URLError, e:
-        logger.error("Error code: %s" % e)
-
-    test_results = results['results']
-
-    logger.debug("Results found: %s" % test_results)
-
-    scenario_results = {}
-    for r in test_results:
-        if not r['scenario'] in scenario_results.keys():
-            scenario_results[r['scenario']] = []
-        scenario_results[r['scenario']].append(r)
-
-    for s, s_result in scenario_results.items():
-        scenario_results[s] = s_result[0:5]
-        logger.debug("Search for success criteria")
-        for result in scenario_results[s]:
-            result["start_date"] = result["start_date"].split(".")[0]
-            sig_test = result['details']['sig_test']['result']
-            if not sig_test == "" and isinstance(sig_test, list):
-                format_result = sig_test_format(sig_test)
-                if format_result['failures'] > format_result['passed']:
-                    result['details']['sig_test']['duration'] = 0
-                result['details']['sig_test']['result'] = format_result
-            nb_step_ok = 0
-            nb_step = len(result['details'])
-
-            for step_name, step_result in result['details'].items():
-                if step_result['duration'] != 0:
-                    nb_step_ok += 1
-                m, s = divmod(step_result['duration'], 60)
-                m_display = ""
-                if int(m) != 0:
-                    m_display += str(int(m)) + "m "
-                step_result['duration_display'] = m_display + str(int(s)) + "s"
-
-            result['pr_step_ok'] = 0
-            if nb_step != 0:
-                result['pr_step_ok'] = (float(nb_step_ok)/nb_step)*100
-            try:
-                logger.debug("Scenario %s, Installer %s"
-                             % (s_result[1]['scenario'], installer))
-                logger.debug("Orchestrator deployment: %s s"
-                             % result['details']['orchestrator']['duration'])
-                logger.debug("vIMS deployment: %s s"
-                             % result['details']['vIMS']['duration'])
-                logger.debug("Signaling testing: %s s"
-                             % result['details']['sig_test']['duration'])
-                logger.debug("Signaling testing results: %s"
-                             % format_result)
-            except:
-                logger.error("Data badly formatted")
-            logger.debug("------------------------------------------------")
-
-    templateLoader = jinja2.FileSystemLoader(conf.REPORTING_PATH)
-    templateEnv = jinja2.Environment(loader=templateLoader)
-
-    TEMPLATE_FILE = "/template/index-vims-tmpl.html"
-    template = templateEnv.get_template(TEMPLATE_FILE)
-
-    outputText = template.render(scenario_results=scenario_results,
-                                 step_order=step_order,
-                                 installer=installer)
-
-    with open(conf.REPORTING_PATH +
-              "/release/master/index-vims-" +
-              installer + ".html", "wb") as fh:
-        fh.write(outputText)
+
+# For all the versions
+for version in conf.versions:
+    for installer in installers:
+        logger.info("Search vIMS results for installer: %s, version: %s"
+                    % (installer, version))
+        request = Request(conf.URL_BASE + '?case=vims&installer=' +
+                          installer + '&version=' + version)
+
+        try:
+            response = urlopen(request)
+            k = response.read()
+            results = json.loads(k)
+        except URLError, e:
+            logger.error("Error code: %s" % e)
+
+        test_results = results['results']
+
+        logger.debug("Results found: %s" % test_results)
+
+        scenario_results = {}
+        for r in test_results:
+            if not r['scenario'] in scenario_results.keys():
+                scenario_results[r['scenario']] = []
+            scenario_results[r['scenario']].append(r)
+
+        for s, s_result in scenario_results.items():
+            scenario_results[s] = s_result[0:5]
+            logger.debug("Search for success criteria")
+            for result in scenario_results[s]:
+                result["start_date"] = result["start_date"].split(".")[0]
+                sig_test = result['details']['sig_test']['result']
+                if not sig_test == "" and isinstance(sig_test, list):
+                    format_result = sig_test_format(sig_test)
+                    if format_result['failures'] > format_result['passed']:
+                        result['details']['sig_test']['duration'] = 0
+                    result['details']['sig_test']['result'] = format_result
+                nb_step_ok = 0
+                nb_step = len(result['details'])
+
+                for step_name, step_result in result['details'].items():
+                    if step_result['duration'] != 0:
+                        nb_step_ok += 1
+                    m, s = divmod(step_result['duration'], 60)
+                    m_display = ""
+                    if int(m) != 0:
+                        m_display += str(int(m)) + "m "
+
+                    step_result['duration_display'] = m_display + str(int(s)) + "s"
+
+                result['pr_step_ok'] = 0
+                if nb_step != 0:
+                    result['pr_step_ok'] = (float(nb_step_ok)/nb_step)*100
+                try:
+                    logger.debug("Scenario %s, Installer %s"
+                                 % (s_result[1]['scenario'], installer))
+                    logger.debug("Orchestrator deployment: %s s"
+                                 % result['details']['orchestrator']['duration'])
+                    logger.debug("vIMS deployment: %s s"
+                                 % result['details']['vIMS']['duration'])
+                    logger.debug("Signaling testing: %s s"
+                                 % result['details']['sig_test']['duration'])
+                    logger.debug("Signaling testing results: %s"
+                                 % format_result)
+                except:
+                    logger.error("Data badly formatted")
+                logger.debug("----------------------------------------")
+
+        templateLoader = jinja2.FileSystemLoader(conf.REPORTING_PATH)
+        templateEnv = jinja2.Environment(loader=templateLoader)
+
+        TEMPLATE_FILE = "/template/index-vims-tmpl.html"
+        template = templateEnv.get_template(TEMPLATE_FILE)
+
+        outputText = template.render(scenario_results=scenario_results,
+                                     step_order=step_order,
+                                     installer=installer)
+
+        with open(conf.REPORTING_PATH +
+                  "/release/" + version + "/index-vims-" +
+                  installer + ".html", "wb") as fh:
+            fh.write(outputText)
 
 logger.info("vIMS report succesfully generated")
index a58eeec..9230cb2 100644 (file)
 #
 # ****************************************************
 installers = ["apex", "compass", "fuel", "joid"]
-# installers = ["apex"]
 # list of test cases declared in testcases.yaml but that must not be
 # taken into account for the scoring
-blacklist = ["odl", "ovno", "security_scan", "copper", "moon"]
+blacklist = ["ovno", "security_scan", 'odl-sfc']
 # versions = ["brahmaputra", "master"]
-versions = ["master"]
-PERIOD = 10
-MAX_SCENARIO_CRITERIA = 18
+versions = ["master", "colorado"]
+PERIOD = 50
+MAX_SCENARIO_CRITERIA = 50
 # get the last 5 test results to determinate the success criteria
 NB_TESTS = 5
 # REPORTING_PATH = "/usr/share/nginx/html/reporting/functest"
index 0c3fa94..da2213b 100644 (file)
@@ -21,7 +21,7 @@
         <h3 class="text-muted">Functest status page ({{version}})</h3>
         <nav>
           <ul class="nav nav-justified">
-            <li class="active"><a href="index.html">Home</a></li>
+            <li class="active"><a href="http://testresults.opnfv.org/reporting/index.html">Home</a></li>
             <li><a href="index-status-apex.html">Apex</a></li>
             <li><a href="index-status-compass.html">Compass</a></li>
             <li><a href="index-status-fuel.html">Fuel</a></li>
                 </div>
         </div>
         {%- endfor %}
-    *: not used for scenario validation
+    see <a href="https://wiki.opnfv.org/pages/viewpage.action?pageId=6828617">Functest scoring wiki page</a> for details on scenario scoring
     </div>
     <div class="col-md-1"></div>
 </div>
index c562143..42d7ed3 100644 (file)
@@ -21,7 +21,7 @@
         <h3 class="text-muted">Tempest status page</h3>
         <nav>
           <ul class="nav nav-justified">
-            <li class="active"><a href="index.html">Home</a></li>
+            <li class="active"><a href="http://testresults.opnfv.org/reporting/index.html">Home</a></li>
             <li><a href="index-tempest-apex.html">Apex</a></li>
             <li><a href="index-tempest-compass.html">Compass</a></li>
             <li><a href="index-tempest-fuel.html">Fuel</a></li>
index 25499dc..3836be9 100644 (file)
@@ -21,7 +21,7 @@
         <h3 class="text-muted">vIMS status page</h3>
         <nav>
           <ul class="nav nav-justified">
-            <li class="active"><a href="index.html">Home</a></li>
+            <li class="active"><a href="http://testresults.opnfv.org/reporting/index.html">Home</a></li>
             <li><a href="index-vims-fuel.html">Fuel</a></li>
             <li><a href="index-vims-compass.html">Compass</a></li>
             <li><a href="index-vims-joid.html">JOID</a></li>
index e19853a..a906f0d 100644 (file)
@@ -35,7 +35,12 @@ class TestCase(object):
                                'promise': 'Promise',
                                'moon': 'moon',
                                'copper': 'copper',
-                               'security_scan': 'security'
+                               'security_scan': 'security',
+                               'multisite': 'multisite',
+                               'domino': 'domino',
+                               'odl-sfc': 'SFC',
+                               'onos_sfc': 'SFC',
+                               'parser':'parser'
                                }
         try:
             self.displayName = display_name_matrix[self.name]
@@ -122,8 +127,13 @@ class TestCase(object):
                              'doctor': 'doctor-notification',
                              'promise': 'promise',
                              'moon': 'moon',
-                             'copper': 'copper',
-                             'security_scan': 'security'
+                             'copper': 'copper-notification',
+                             'security_scan': 'security',
+                             'multisite': 'multisite',
+                             'domino': 'domino-multinode',
+                             'odl-sfc': 'odl-sfc',
+                             'onos_sfc': 'onos_sfc',
+                             'parser':'parser-basics'
                              }
         try:
             return test_match_matrix[self.name]
index d3aef7e..cb0e67b 100644 (file)
@@ -79,26 +79,21 @@ install ansible, please refer:
 ```
 http://docs.ansible.com/ansible/intro_installation.html
 ```
-run update.yml
+
+playbook-update.sh
+
 arguments:
-: host: remote server, must provide
-user: user used to access to remote server, default to root
-port: exposed port used to access to testapi, default to 8000
-image: testapi's docker image, default to opnfv/testapi:latest
-update_path: templates directory in remote server, default to /tmp/testapi
-mongodb_url: url of mongodb, default to 172.17.0.1, docker0 ip
-swagger_url: swagger access url, default to http://host:port
+: -h|--help           show this help text
+-r|--remote         remote server
+-u|--user           ssh username used to access to remote server
+-i|--identity       ssh PublicKey file used to access to remote server
+-e|--execute        execute update, if not set just check the ansible connectivity
 
 usage:
 ```
-ansible-playbook update.yml --extra-vars "
-host=10.63.243.17
-user=zte
-port=8000
-image=opnfv/testapi
-update_path=/tmp/testapi
-mongodb_url=mongodb://172.17.0.1:27017
-swagger_url=http://10.63.243.17:8000"```
+ssh-agent ./playbook-update.sh -r testresults.opnfv.org -u serena -i ~/.ssh/id_rsa -e
+```
+
 > **Note:**
 
 > - If documents need to be changed, please modify file
diff --git a/utils/test/result_collection_api/update/playbook-update.sh b/utils/test/result_collection_api/update/playbook-update.sh
new file mode 100755 (executable)
index 0000000..86d30e4
--- /dev/null
@@ -0,0 +1,90 @@
+#!/bin/bash
+
+#
+# Author: Serena Feng (feng.xiaoewi@zte.com.cn)
+# Update testapi on remote server using ansible playbook automatically
+#
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+
+set -e
+
+usage="Script to trigger update automatically.
+
+usage:
+    bash $(basename "$0") [-h|--help] [-h <host>] [-u username] [-i identityfile] [-e|--execute]
+
+where:
+    -h|--help           show this help text
+    -r|--remote         remote server
+    -u|--user           ssh username used to access to remote server
+    -i|--identity       ssh PublicKey file used to access to remote server
+    -e|--execute        execute update, if not set just check the ansible connectivity"
+
+remote=testresults.opnfv.org
+user=root
+identity=~/.ssh/id_rsa
+hosts=./hosts
+execute=false
+
+# Parse parameters
+while [[ $# > 0 ]]
+    do
+    key="$1"
+    case $key in
+        -h|--help)
+            echo "$usage"
+            exit 0
+            shift
+        ;;
+        -r|--remote)
+            remote="$2"
+            shift
+        ;;
+        -u|--user)
+            user="$2"
+            shift
+        ;;
+        -i|--identity)
+            identity="$2"
+            shift
+        ;;
+        -e|--execute)
+            execute=true
+        ;;
+        *)
+            echo "unknown option"
+            exit 1
+        ;;
+    esac
+    shift # past argument or value
+done
+
+echo $remote > $hosts
+
+echo "add authentication"
+ssh-add $identity
+
+echo "test ansible connectivity"
+ansible -i ./hosts $remote -m ping -u $user
+
+echo "test playbook connectivity"
+ansible-playbook -i $hosts test.yml -e "host=$remote user=$user"
+
+if [ $execute == true ]; then
+    echo "do update"
+    ansible-playbook -i $hosts update.yml -e "host=$remote \
+    user=$user \
+    port=8082 \
+    image=opnfv/testapi \
+    update_path=/home/$user/testapi \
+    mongodb_url=mongodb://172.17.0.1:27017 \
+    swagger_url=http://testresults.opnfv.org/test"
+fi
+
+rm -fr $hosts
+ssh-agent -k
diff --git a/utils/test/result_collection_api/update/templates/rm_images.sh b/utils/test/result_collection_api/update/templates/rm_images.sh
new file mode 100755 (executable)
index 0000000..6722573
--- /dev/null
@@ -0,0 +1,8 @@
+#!/bin/bash
+
+number=`docker images | awk 'NR != 1' | grep testapi | wc -l`
+if [ $number -gt 0 ]; then
+    images=`docker images -a | awk 'NR != 1' | grep testapi | awk '{print $1}'`
+    echo "begin to rm images $images"
+    docker images | awk 'NR != 1' | grep testapi | awk '{print $3}' | xargs docker rmi -f &>/dev/null
+fi
diff --git a/utils/test/result_collection_api/update/test.yml b/utils/test/result_collection_api/update/test.yml
new file mode 100644 (file)
index 0000000..a886872
--- /dev/null
@@ -0,0 +1,12 @@
+---
+- hosts: "{{ host }}"
+  remote_user: "{{ user }}"
+  become: yes
+  become_method: sudo
+  vars:
+    user: "root"
+  tasks:
+    - name: test connectivity
+      command: "echo hello {{ host }}"
+      register: result
+    - debug: msg="{{ result }}"
index 0883956..e6663d9 100644 (file)
@@ -8,6 +8,7 @@
     port: "8000"
     update_path: "/tmp/testapi"
     image: "opnfv/testapi"
+    mode: "pull"
     mongodb_url: "mongodb://172.17.0.1:27017"
     swagger_url: "http://{{ host }}:{{ port }}"
   tasks:
       copy:
         src: templates/
         dest: "{{ update_path }}"
+    - name: transfer Dockerfile
+      copy:
+        src: ../docker/Dockerfile
+        dest: "{{ update_path }}"
+      when: mode == "build"
     - name: backup mongodb database
       command: "python {{ update_path }}/backup_mongodb.py -u {{ mongodb_url }} -o {{ update_path }}"
     - name: stop and remove old versions
       register: rm_result
     - debug: msg="{{ rm_result.stderr }}"
     - name: delete old docker images
-      command: docker rmi "{{ image }}"
+      command: bash "{{ update_path }}/rm_images.sh"
       ignore_errors: true
     - name: update mongodb
       command: "python {{ update_path }}/update_mongodb.py -u {{ mongodb_url }}"
+    - name: docker build image
+      command: "docker build -t {{ image }} {{ update_path }}"
+      when: mode == "build"
     - name: docker start testapi server
       command: docker run -dti -p "{{ port }}:8000"
                -e "mongodb_url={{ mongodb_url }}"