Merge "Apex: changes build result using groovy-postbuild"
authorAric Gardner <agardner@linuxfoundation.org>
Mon, 31 Jul 2017 14:58:15 +0000 (14:58 +0000)
committerGerrit Code Review <gerrit@opnfv.org>
Mon, 31 Jul 2017 14:58:15 +0000 (14:58 +0000)
16 files changed:
jjb/apex/apex.yml
jjb/apex/scenarios.yaml.hidden
jjb/bottlenecks/bottlenecks-ci-jobs.yml
jjb/bottlenecks/bottlenecks-cleanup.sh
jjb/bottlenecks/bottlenecks-run-suite.sh
jjb/compass4nfv/compass-ci-jobs.yml
jjb/daisy4nfv/daisy-daily-jobs.yml
jjb/daisy4nfv/daisy-project-jobs.yml
jjb/dovetail/dovetail-run.sh
jjb/fuel/fuel-daily-jobs.yml
jjb/orchestra/orchestra-daily-jobs.yml [new file with mode: 0644]
jjb/orchestra/orchestra-project-jobs.yml [new file with mode: 0644]
jjb/ovn4nfv/ovn4nfv-daily-jobs.yml [new file with mode: 0644]
jjb/ovn4nfv/ovn4nfv-project-jobs.yml [new file with mode: 0644]
jjb/qtip/qtip-verify-jobs.yml
utils/create_pod_file.py

index 8f8aa56..f47775f 100644 (file)
               <<: *master
         - 'os-odl-bgpvpn-ha':
               <<: *master
+        - 'os-ovn-nofeature-noha':
+              <<: *master
 
     platform:
          - 'baremetal'
                   abort-all-job: true
                   git-revision: false
 
+                - name: 'apex-os-ovn-nofeature-noha-baremetal-master'
+                  node-parameters: false
+                  current-parameters: false
+                  predefined-parameters: |
+                    OPNFV_CLEAN=yes
+                  kill-phase-on: NEVER
+                  abort-all-job: true
+                  git-revision: false
+
 
 
 # snapshot create
index 748cd21..9f5b069 100644 (file)
@@ -4,6 +4,7 @@ master:
   - 'os-odl-nofeature-ha'
   - 'os-odl-nofeature-noha'
   - 'os-odl-bgpvpn-ha'
+  - 'os-ovn-nofeature-noha'
 danube:
   - 'os-nosdn-nofeature-noha'
   - 'os-nosdn-nofeature-ha'
index c56ca19..455fa72 100644 (file)
@@ -70,8 +70,6 @@
        #     <<: *master
 #--------------------------------------------
     suite:
-        - 'rubbos'
-        - 'vstf'
         - 'posca_stress_traffic'
         - 'posca_stress_ping'
 
 
     publishers:
         - email:
-            recipients: hongbo.tianhongbo@huawei.com matthew.lijun@huawei.com liangqi1@huawei.com sunshine.wang@huawei.com
+            recipients: gabriel.yuyang@huawei.com, liyin11@huawei.com
 
 ########################
 # builder macros
index 04e620c..d0e2088 100644 (file)
@@ -10,6 +10,7 @@
 
 #clean up correlated dockers and their images
 bash $WORKSPACE/docker/docker_cleanup.sh -d bottlenecks --debug
+bash $WORKSPACE/docker/docker_cleanup.sh -d Bottlenecks --debug
 bash $WORKSPACE/docker/docker_cleanup.sh -d yardstick --debug
 bash $WORKSPACE/docker/docker_cleanup.sh -d kibana --debug
 bash $WORKSPACE/docker/docker_cleanup.sh -d elasticsearch --debug
index e6f8d1b..b81f4ca 100644 (file)
@@ -1,66 +1,70 @@
 #!/bin/bash
+##############################################################################
+# Copyright (c) 2017 Huawei Technologies Co.,Ltd and others.
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
 #set -e
 [[ $GERRIT_REFSPEC_DEBUG == true ]] && redirect="/dev/stdout" || redirect="/dev/null"
 BOTTLENECKS_IMAGE=opnfv/bottlenecks
 REPORT="True"
 
-if [[ $SUITE_NAME == rubbos || $SUITE_NAME == vstf ]]; then
-    echo "Bottlenecks: to pull image $BOTTLENECKS_IMAGE:${DOCKER_TAG}"
-    docker pull $BOTTLENECKS_IMAGE:$DOCKER_TAG >${redirect}
+RELENG_REPO=${WORKSPACE}/releng
+[ -d ${RELENG_REPO} ] && rm -rf ${RELENG_REPO}
+git clone https://gerrit.opnfv.org/gerrit/releng ${RELENG_REPO} >${redirect}
 
-    echo "Bottlenecks: docker start running"
-    opts="--privileged=true -id"
-    envs="-e INSTALLER_TYPE=${INSTALLER_TYPE} -e INSTALLER_IP=${INSTALLER_IP} \
-          -e NODE_NAME=${NODE_NAME} -e EXTERNAL_NET=${EXTERNAL_NETWORK} \
-          -e BOTTLENECKS_BRANCH=${BOTTLENECKS_BRANCH} -e GERRIT_REFSPEC_DEBUG=${GERRIT_REFSPEC_DEBUG} \
-          -e BOTTLENECKS_DB_TARGET=${BOTTLENECKS_DB_TARGET} -e PACKAGE_URL=${PACKAGE_URL}"
-    cmd="sudo docker run ${opts} ${envs} $BOTTLENECKS_IMAGE:${DOCKER_TAG} /bin/bash"
-    echo "Bottlenecks: docker cmd running ${cmd}"
-    ${cmd} >${redirect}
+OPENRC=/tmp/admin_rc.sh
+OS_CACERT=/tmp/os_cacert
+
+if [[ $SUITE_NAME == *posca* ]]; then
+    POSCA_SCRIPT=/home/opnfv/bottlenecks/testsuites/posca
+
+    echo "BOTTLENECKS INFO: fetching os credentials from $INSTALLER_TYPE"
+    if [[ $INSTALLER_TYPE == 'compass' ]]; then
+        if [[ ${BRANCH} == 'master' ]]; then
+            ${RELENG_REPO}/utils/fetch_os_creds.sh -d ${OPENRC} -i ${INSTALLER_TYPE} -a ${INSTALLER_IP} -o ${OS_CACERT} >${redirect}
+            if [[ -f ${OS_CACERT} ]]; then
+                echo "BOTTLENECKS INFO: successfully fetching os_cacert for openstack: ${OS_CACERT}"
+            else
+                echo "BOTTLENECKS ERROR: couldn't find os_cacert file: ${OS_CACERT}, please check if the it's been properly provided."
+                exit 1
+            fi
+        else
+            ${RELENG_REPO}/utils/fetch_os_creds.sh -d ${OPENRC} -i ${INSTALLER_TYPE} -a ${INSTALLER_IP}  >${redirect}
+        fi
+    fi
 
-    echo "Bottlenecks: obtain docker id"
-    container_id=$(docker ps | grep "$BOTTLENECKS_IMAGE:${DOCKER_TAG}" | awk '{print $1}' | head -1)
-    if [ -z ${container_id} ]; then
-        echo "Cannot find $BOTTLENECKS_IMAGE container ID ${container_id}. Please check if it exists."
-        docker ps -a
+    if [[ -f ${OPENRC} ]]; then
+        echo "BOTTLENECKS INFO: openstack credentials path is ${OPENRC}"
+        if [[ $INSTALLER_TYPE == 'compass' && ${BRANCH} == 'master' ]]; then
+            echo "BOTTLENECKS INFO: writing ${OS_CACERT} to ${OPENRC}"
+            echo "export OS_CACERT=${OS_CACERT}" >> ${OPENRC}
+        fi
+        cat ${OPENRC}
+    else
+        echo "BOTTLENECKS ERROR: couldn't find openstack rc file: ${OPENRC}, please check if the it's been properly provided."
         exit 1
     fi
 
-    echo "Bottlenecks: to prepare openstack environment"
-    prepare_env="${REPO_DIR}/ci/prepare_env.sh"
-    echo "Bottlenecks: docker cmd running: ${prepare_env}"
-    sudo docker exec ${container_id} ${prepare_env}
+    echo "INFO: pulling Bottlenecks docker ${DOCKER_TAG}"
+    docker pull opnfv/bottlenecks:${DOCKER_TAG} >$redirect
 
-    echo "Bottlenecks: to run testsuite ${SUITE_NAME}"
-    run_testsuite="${REPO_DIR}/run_tests.sh -s ${SUITE_NAME}"
-    echo "Bottlenecks: docker cmd running: ${run_testsuite}"
-    sudo docker exec ${container_id} ${run_testsuite}
-else
-    echo "Bottlenecks: installing POSCA docker-compose"
-    if [ -d usr/local/bin/docker-compose ]; then
-        rm -rf usr/local/bin/docker-compose
-    fi
-    curl -L https://github.com/docker/compose/releases/download/1.11.0/docker-compose-`uname -s`-`uname -m` > /usr/local/bin/docker-compose
-    chmod +x /usr/local/bin/docker-compose
+    opts="--privileged=true -id"
+    docker_volume="-v /var/run/docker.sock:/var/run/docker.sock -v /tmp:/tmp"
 
-    echo "Bottlenecks: composing up dockers"
-    cd $WORKSPACE
-    docker-compose -f $WORKSPACE/docker/bottleneck-compose/docker-compose.yml up -d
+    cmd="docker run ${opts} --name bottlenecks-load-master ${docker_volume} opnfv/bottlenecks:${DOCKER_TAG} /bin/bash"
+    echo "BOTTLENECKS INFO: running docker run commond: ${cmd}"
+    ${cmd} >$redirect
+    sleep 5
 
-    echo "Bottlenecks: running traffic stress/factor testing in posca testsuite "
-    POSCA_SCRIPT=/home/opnfv/bottlenecks/testsuites/posca
     if [[ $SUITE_NAME == posca_stress_traffic ]]; then
         TEST_CASE=posca_factor_system_bandwidth
-        echo "Bottlenecks: pulling tutum/influxdb for yardstick"
-        docker pull tutum/influxdb:0.13
-        sleep 5
-        docker exec bottleneckcompose_bottlenecks_1 python ${POSCA_SCRIPT}/run_posca.py testcase $TEST_CASE $REPORT
+        docker exec bottlenecks-load-master python ${POSCA_SCRIPT}/../run_testsuite.py testcase $TEST_CASE $REPORT
     elif [[ $SUITE_NAME == posca_stress_ping ]]; then
         TEST_CASE=posca_factor_ping
-        sleep 5
-        docker exec bottleneckcompose_bottlenecks_1 python ${POSCA_SCRIPT}/run_posca.py testcase $TEST_CASE $REPORT
+        docker exec bottlenecks-load-master python ${POSCA_SCRIPT}/../run_testsuite.py testcase $TEST_CASE $REPORT
     fi
-
-    echo "Bottlenecks: cleaning up docker-compose images and dockers"
-    docker-compose -f $WORKSPACE/docker/bottleneck-compose/docker-compose.yml down --rmi all
-fi
\ No newline at end of file
+fi
index 310347d..c98fd36 100644 (file)
 - trigger:
     name: 'compass-os-odl_l2-moon-ha-baremetal-master-trigger'
     triggers:
-        - timed: ''
+        - timed: '0 12 * * *'
 - trigger:
     name: 'compass-os-nosdn-kvm-ha-baremetal-master-trigger'
     triggers:
 - trigger:
     name: 'compass-k8-nosdn-nofeature-ha-baremetal-master-trigger'
     triggers:
-        - timed: '0 20 * * *'
+        - timed: ''
 - trigger:
     name: 'compass-os-odl-sfc-ha-baremetal-master-trigger'
     triggers:
 - trigger:
     name: 'compass-os-odl_l2-moon-ha-virtual-master-trigger'
     triggers:
-        - timed: ''
+        - timed: '0 22 * * *'
 - trigger:
     name: 'compass-os-nosdn-kvm-ha-virtual-master-trigger'
     triggers:
 - trigger:
     name: 'compass-k8-nosdn-nofeature-ha-virtual-master-trigger'
     triggers:
-        - timed: '0 18 * * *'
+        - timed: ''
 - trigger:
     name: 'compass-os-odl-sfc-ha-virtual-master-trigger'
     triggers:
index 592e54d..6524d20 100644 (file)
         - 'os-nosdn-nofeature-noha':
             auto-trigger-name: 'daisy-{scenario}-{pod}-daily-{stream}-trigger'
         # ODL_L3 scenarios
-        - 'os-odl_l3-nofeature-noha':
+        - 'os-odl_l3-nofeature-ha':
             auto-trigger-name: 'daisy-{scenario}-{pod}-daily-{stream}-trigger'
         # ODL_L2 scenarios
-        - 'os-odl_l2-nofeature-noha':
+        - 'os-odl_l2-nofeature-ha':
             auto-trigger-name: 'daisy-{scenario}-{pod}-daily-{stream}-trigger'
 
     jobs:
             installer: '{installer}'
         - string:
             name: DEPLOY_SCENARIO
-            default: 'os-nosdn-nofeature-noha'
+            default: 'os-nosdn-nofeature-ha'
         - 'daisy-project-parameter':
             gs-pathname: '{gs-pathname}'
         - string:
 - trigger:
     name: 'daisy-os-nosdn-nofeature-ha-baremetal-daily-master-trigger'
     triggers:
-        - timed: ''
+        - timed: '0 12 * * *'
 # NOHA Scenarios
 - trigger:
     name: 'daisy-os-nosdn-nofeature-noha-baremetal-daily-master-trigger'
     triggers:
-        - timed: 'H 12 * * *'
+        - timed: ''
 # ODL_L3 Scenarios
 - trigger:
-    name: 'daisy-os-odl_l3-nofeature-noha-baremetal-daily-master-trigger'
+    name: 'daisy-os-odl_l3-nofeature-ha-baremetal-daily-master-trigger'
     triggers:
-        - timed: 'H 16 * * *'
+        - timed: '0 16 * * *'
 # ODL_L2 Scenarios
 - trigger:
-    name: 'daisy-os-odl_l2-nofeature-noha-baremetal-daily-master-trigger'
+    name: 'daisy-os-odl_l2-nofeature-ha-baremetal-daily-master-trigger'
     triggers:
         - timed: ''
 #-----------------------------------------------
 - trigger:
     name: 'daisy-os-nosdn-nofeature-ha-virtual-daily-master-trigger'
     triggers:
-        - timed: ''
+        - timed: '0 12 * * *'
 # NOHA Scenarios
 - trigger:
     name: 'daisy-os-nosdn-nofeature-noha-virtual-daily-master-trigger'
     triggers:
-        - timed: 'H 12 * * *'
+        - timed: ''
 # ODL_L3 Scenarios
 - trigger:
-    name: 'daisy-os-odl_l3-nofeature-noha-virtual-daily-master-trigger'
+    name: 'daisy-os-odl_l3-nofeature-ha-virtual-daily-master-trigger'
     triggers:
-        - timed: 'H 16 * * *'
+        - timed: '0 16 * * *'
 # ODL_L3 Scenarios
 - trigger:
-    name: 'daisy-os-odl_l2-nofeature-noha-virtual-daily-master-trigger'
+    name: 'daisy-os-odl_l2-nofeature-ha-virtual-daily-master-trigger'
     triggers:
         - timed: ''
 
index 57e44e3..bae75dd 100644 (file)
             description: 'Git URL to use on this Jenkins Slave'
         - string:
             name: DEPLOY_SCENARIO
-            default: 'os-nosdn-nofeature-noha'
+            default: 'os-nosdn-nofeature-ha'
         - '{installer}-project-parameter':
             gs-pathname: '{gs-pathname}'
 
index bf96fd4..418d66f 100755 (executable)
@@ -83,6 +83,8 @@ if [[ ${INSTALLER_TYPE} == compass ]]; then
     options="-u root -p root"
 elif [[ ${INSTALLER_TYPE} == fuel ]]; then
     options="-u root -p r00tme"
+elif [[ ${INSTALLER_TYPE} == apex ]]; then
+    options="-u stack -k /root/.ssh/id_rsa"
 else
     echo "Don't support to generate pod.yaml on ${INSTALLER_TYPE} currently."
     echo "HA test cases may not run properly."
@@ -115,6 +117,11 @@ if [ "$INSTALLER_TYPE" == "fuel" ]; then
     sshpass -p r00tme sudo scp $ssh_options root@${INSTALLER_IP}:~/.ssh/id_rsa ${DOVETAIL_CONFIG}/id_rsa
 fi
 
+if [ "$INSTALLER_TYPE" == "apex" ]; then
+    echo "Fetching id_rsa file from jump_server $INSTALLER_IP..."
+    scp $ssh_options stack@${INSTALLER_IP}:~/.ssh/id_rsa ${DOVETAIL_CONFIG}/id_rsa
+fi
+
 # sdnvpn test case needs to download this image first before running
 echo "Download image ubuntu-16.04-server-cloudimg-amd64-disk1.img ..."
 wget -q -nc http://artifacts.opnfv.org/sdnvpn/ubuntu-16.04-server-cloudimg-amd64-disk1.img -P ${DOVETAIL_CONFIG}
index 7a57cb5..2d37f4a 100644 (file)
@@ -44,9 +44,6 @@
         - zte-pod1:
             slave-label: zte-pod1
             <<: *master
-        - zte-pod2:
-            slave-label: zte-pod2
-            <<: *master
         - zte-pod3:
             slave-label: zte-pod3
             <<: *master
 - trigger:
     name: 'fuel-os-nosdn-ovs-ha-zte-pod1-daily-master-trigger'
     triggers:
-        - timed: ''
+        - timed: '0 18 * * *'
 - trigger:
     name: 'fuel-os-nosdn-kvm_ovs_dpdk-ha-zte-pod1-daily-master-trigger'
     triggers:
     name: 'fuel-os-nosdn-kvm_ovs_dpdk_bar-noha-zte-pod1-daily-master-trigger'
     triggers:
         - timed: ''
-
-#-----------------------------------------------
-# ZTE POD2 Triggers running against master branch
-#-----------------------------------------------
-- trigger:
-    name: 'fuel-os-nosdn-nofeature-ha-zte-pod2-daily-master-trigger'
-    triggers:
-        - timed: ''
-- trigger:
-    name: 'fuel-os-odl_l2-nofeature-ha-zte-pod2-daily-master-trigger'
-    triggers:
-        - timed: ''
-- trigger:
-    name: 'fuel-os-odl_l3-nofeature-ha-zte-pod2-daily-master-trigger'
-    triggers:
-        - timed: ''
-- trigger:
-    name: 'fuel-os-onos-sfc-ha-zte-pod2-daily-master-trigger'
-    triggers:
-        - timed: ''
-- trigger:
-    name: 'fuel-os-onos-nofeature-ha-zte-pod2-daily-master-trigger'
-    triggers:
-        - timed: ''
-- trigger:
-    name: 'fuel-os-odl_l2-bgpvpn-ha-zte-pod2-daily-master-trigger'
-    triggers:
-        - timed: ''
-- trigger:
-    name: 'fuel-os-odl_l2-sfc-ha-zte-pod2-daily-master-trigger'
-    triggers:
-        - timed: ''
-- trigger:
-    name: 'fuel-os-nosdn-kvm-ha-zte-pod2-daily-master-trigger'
-    triggers:
-        - timed: ''
-- trigger:
-    name: 'fuel-os-nosdn-ovs-ha-zte-pod2-daily-master-trigger'
-    triggers:
-        - timed: ''
-- trigger:
-    name: 'fuel-os-nosdn-kvm_ovs_dpdk-ha-zte-pod2-daily-master-trigger'
-    triggers:
-        - timed: ''
-- trigger:
-    name: 'fuel-os-nosdn-kvm_ovs_dpdk_bar-ha-zte-pod2-daily-master-trigger'
-    triggers:
-        - timed: ''
-# NOHA Scenarios
-- trigger:
-    name: 'fuel-os-nosdn-nofeature-noha-zte-pod2-daily-master-trigger'
-    triggers:
-        - timed: ''
-- trigger:
-    name: 'fuel-os-odl_l2-nofeature-noha-zte-pod2-daily-master-trigger'
-    triggers:
-        - timed: ''
-- trigger:
-    name: 'fuel-os-odl_l3-nofeature-noha-zte-pod2-daily-master-trigger'
-    triggers:
-        - timed: ''
-- trigger:
-    name: 'fuel-os-onos-sfc-noha-zte-pod2-daily-master-trigger'
-    triggers:
-        - timed: ''
-- trigger:
-    name: 'fuel-os-onos-nofeature-noha-zte-pod2-daily-master-trigger'
-    triggers:
-        - timed: ''
-- trigger:
-    name: 'fuel-os-odl_l2-sfc-noha-zte-pod2-daily-master-trigger'
-    triggers:
-        - timed: ''
-- trigger:
-    name: 'fuel-os-odl_l2-bgpvpn-noha-zte-pod2-daily-master-trigger'
-    triggers:
-        - timed: ''
-- trigger:
-    name: 'fuel-os-nosdn-kvm-noha-zte-pod2-daily-master-trigger'
-    triggers:
-        - timed: ''
-- trigger:
-    name: 'fuel-os-nosdn-ovs-noha-zte-pod2-daily-master-trigger'
-    triggers:
-        - timed: ''
-- trigger:
-    name: 'fuel-os-nosdn-kvm_ovs_dpdk-noha-zte-pod2-daily-master-trigger'
-    triggers:
-        - timed: ''
-- trigger:
-    name: 'fuel-os-nosdn-kvm_ovs_dpdk_bar-noha-zte-pod2-daily-master-trigger'
-    triggers:
-        - timed: ''
 #-----------------------------------------------
 # ZTE POD3 Triggers running against master branch
 #-----------------------------------------------
 - trigger:
     name: 'fuel-os-nosdn-nofeature-ha-zte-pod3-daily-master-trigger'
     triggers:
-        - timed: ''
+        - timed: '0 10 * * *'
 - trigger:
     name: 'fuel-os-odl_l2-nofeature-ha-zte-pod3-daily-master-trigger'
     triggers:
 - trigger:
     name: 'fuel-os-nosdn-kvm-ha-zte-pod3-daily-master-trigger'
     triggers:
-        - timed: '0 10 * * *'
+        - timed: ''
 - trigger:
     name: 'fuel-os-nosdn-ovs-ha-zte-pod3-daily-master-trigger'
     triggers:
     name: 'fuel-os-nosdn-kvm_ovs_dpdk_bar-noha-zte-pod1-daily-danube-trigger'
     triggers:
         - timed: ''
-
-#-----------------------------------------------
-# ZTE POD2 Triggers running against danube branch
-#-----------------------------------------------
-- trigger:
-    name: 'fuel-os-nosdn-nofeature-ha-zte-pod2-daily-danube-trigger'
-    triggers:
-        - timed: ''
-- trigger:
-    name: 'fuel-os-odl_l2-nofeature-ha-zte-pod2-daily-danube-trigger'
-    triggers:
-        - timed: ''
-- trigger:
-    name: 'fuel-os-odl_l3-nofeature-ha-zte-pod2-daily-danube-trigger'
-    triggers:
-        - timed: ''
-- trigger:
-    name: 'fuel-os-onos-sfc-ha-zte-pod2-daily-danube-trigger'
-    triggers:
-        - timed: ''
-- trigger:
-    name: 'fuel-os-onos-nofeature-ha-zte-pod2-daily-danube-trigger'
-    triggers:
-        - timed: ''
-- trigger:
-    name: 'fuel-os-odl_l2-bgpvpn-ha-zte-pod2-daily-danube-trigger'
-    triggers:
-        - timed: ''
-- trigger:
-    name: 'fuel-os-odl_l2-sfc-ha-zte-pod2-daily-danube-trigger'
-    triggers:
-        - timed: ''
-- trigger:
-    name: 'fuel-os-nosdn-kvm-ha-zte-pod2-daily-danube-trigger'
-    triggers:
-        - timed: ''
-- trigger:
-    name: 'fuel-os-nosdn-ovs-ha-zte-pod2-daily-danube-trigger'
-    triggers:
-        - timed: ''
-- trigger:
-    name: 'fuel-os-nosdn-kvm_ovs_dpdk-ha-zte-pod2-daily-danube-trigger'
-    triggers:
-        - timed: ''
-- trigger:
-    name: 'fuel-os-nosdn-kvm_ovs_dpdk_bar-ha-zte-pod2-daily-danube-trigger'
-    triggers:
-        - timed: ''
-# NOHA Scenarios
-- trigger:
-    name: 'fuel-os-nosdn-nofeature-noha-zte-pod2-daily-danube-trigger'
-    triggers:
-        - timed: ''
-- trigger:
-    name: 'fuel-os-odl_l2-nofeature-noha-zte-pod2-daily-danube-trigger'
-    triggers:
-        - timed: ''
-- trigger:
-    name: 'fuel-os-odl_l3-nofeature-noha-zte-pod2-daily-danube-trigger'
-    triggers:
-        - timed: ''
-- trigger:
-    name: 'fuel-os-onos-sfc-noha-zte-pod2-daily-danube-trigger'
-    triggers:
-        - timed: ''
-- trigger:
-    name: 'fuel-os-onos-nofeature-noha-zte-pod2-daily-danube-trigger'
-    triggers:
-        - timed: ''
-- trigger:
-    name: 'fuel-os-odl_l2-sfc-noha-zte-pod2-daily-danube-trigger'
-    triggers:
-        - timed: ''
-- trigger:
-    name: 'fuel-os-odl_l2-bgpvpn-noha-zte-pod2-daily-danube-trigger'
-    triggers:
-        - timed: ''
-- trigger:
-    name: 'fuel-os-nosdn-kvm-noha-zte-pod2-daily-danube-trigger'
-    triggers:
-        - timed: ''
-- trigger:
-    name: 'fuel-os-nosdn-ovs-noha-zte-pod2-daily-danube-trigger'
-    triggers:
-        - timed: ''
-- trigger:
-    name: 'fuel-os-nosdn-kvm_ovs_dpdk-noha-zte-pod2-daily-danube-trigger'
-    triggers:
-        - timed: ''
-- trigger:
-    name: 'fuel-os-nosdn-kvm_ovs_dpdk_bar-noha-zte-pod2-daily-danube-trigger'
-    triggers:
-        - timed: ''
 #-----------------------------------------------
 # ZTE POD3 Triggers running against danube branch
 #-----------------------------------------------
diff --git a/jjb/orchestra/orchestra-daily-jobs.yml b/jjb/orchestra/orchestra-daily-jobs.yml
new file mode 100644 (file)
index 0000000..6baaab8
--- /dev/null
@@ -0,0 +1,98 @@
+###################################
+# job configuration for orchestra
+###################################
+- project:
+    name: 'orchestra-daily-jobs'
+
+    project: 'orchestra'
+
+#--------------------------------
+# BRANCH ANCHORS
+#--------------------------------
+    master: &master
+        stream: master
+        branch: '{stream}'
+        gs-pathname: ''
+        disabled: false
+
+#-------------------------------------------------------
+# POD, INSTALLER, AND BRANCH MAPPING
+#-------------------------------------------------------
+    pod:
+        - virtual:
+            slave-label: 'joid-virtual'
+            os-version: 'xenial'
+            <<: *master
+
+    jobs:
+        - 'orchestra-{pod}-daily-{stream}'
+
+################################
+# job template
+################################
+- job-template:
+    name: 'orchestra-{pod}-daily-{stream}'
+
+    project-type: multijob
+
+    disabled: '{obj:disabled}'
+
+    concurrent: false
+
+    properties:
+        - logrotate-default
+        - throttle:
+            enabled: true
+            max-total: 1
+            max-per-node: 1
+            option: 'project'
+
+    scm:
+        - git-scm
+
+    wrappers:
+        - ssh-agent-wrapper
+
+        - timeout:
+            timeout: 240
+            fail: true
+
+    triggers:
+         - timed: '@daily'
+
+    parameters:
+        - project-parameter:
+            project: '{project}'
+            branch: '{branch}'
+        - string:
+            name: DEPLOY_SCENARIO
+            default: os-nosdn-openbaton-ha
+        - '{slave-label}-defaults'
+
+    builders:
+        - description-setter:
+            description: "Built on $NODE_NAME"
+        - multijob:
+            name: deploy
+            condition: SUCCESSFUL
+            projects:
+                - name: 'joid-deploy-{pod}-daily-{stream}'
+                  current-parameters: false
+                  predefined-parameters: |
+                    DEPLOY_SCENARIO=os-nosdn-openbaton-ha
+                    COMPASS_OS_VERSION=xenial
+                  node-parameters: true
+                  kill-phase-on: FAILURE
+                  abort-all-job: true
+        - multijob:
+            name: functest
+            condition: SUCCESSFUL
+            projects:
+                - name: 'functest-joid-{pod}-daily-{stream}'
+                  current-parameters: false
+                  predefined-parameters: |
+                    DEPLOY_SCENARIO=os-nosdn-openbaton-ha
+                    FUNCTEST_SUITE_NAME=orchestra_ims
+                  node-parameters: true
+                  kill-phase-on: NEVER
+                  abort-all-job: true
diff --git a/jjb/orchestra/orchestra-project-jobs.yml b/jjb/orchestra/orchestra-project-jobs.yml
new file mode 100644 (file)
index 0000000..0f0c0f6
--- /dev/null
@@ -0,0 +1,50 @@
+- project:
+
+    name: orchestra-project
+
+    project: 'orchestra'
+
+    stream:
+        - master:
+            branch: '{stream}'
+            gs-pathname: ''
+
+    jobs:
+        - 'orchestra-build-{stream}'
+
+- job-template:
+    name: 'orchestra-build-{stream}'
+
+    concurrent: true
+
+    properties:
+        - logrotate-default
+        - throttle:
+            enabled: true
+            max-total: 1
+            max-per-node: 1
+            option: 'project'
+
+    parameters:
+        - project-parameter:
+            project: '{project}'
+            branch: '{branch}'
+
+    scm:
+        - git-scm
+
+    triggers:
+        - timed: 'H 23 * * *'
+
+    builders:
+        - 'orchestra-build-macro'
+
+- builder:
+    name: 'orchestra-build-macro'
+    builders:
+        - shell: |
+            #!/bin/bash
+
+            echo "Hello world!"
+
+
diff --git a/jjb/ovn4nfv/ovn4nfv-daily-jobs.yml b/jjb/ovn4nfv/ovn4nfv-daily-jobs.yml
new file mode 100644 (file)
index 0000000..ed6df41
--- /dev/null
@@ -0,0 +1,87 @@
+- project:
+    name: 'ovn4nfv-daily-jobs'
+
+    project: 'ovn4nfv'
+
+    master: &master
+        stream: master
+        branch: '{stream}'
+        gs-pathname: ''
+        disabled: false
+
+    pod:
+        - virtual:
+            slave-label: 'joid-virtual'
+            os-version: 'xenial'
+            <<: *master
+
+    jobs:
+        - 'ovn4nfv-{pod}-daily-{stream}'
+
+- job-template:
+    name: 'ovn4nfv-{pod}-daily-{stream}'
+
+    project-type: multijob
+
+    disabled: '{obj:disabled}'
+
+    concurrent: false
+
+    properties:
+        - logrotate-default
+        - throttle:
+            enabled: true
+            max-total: 1
+            max-per-node: 1
+            option: 'project'
+
+    scm:
+        - git-scm
+
+    wrappers:
+        - ssh-agent-wrapper
+
+        - timeout:
+            timeout: 240
+            fail: true
+
+    triggers:
+         - timed: '@daily'
+
+    parameters:
+        - project-parameter:
+            project: '{project}'
+            branch: '{branch}'
+        - string:
+            name: DEPLOY_SCENARIO
+            default: os-ovn-nofeature-noha
+        - '{slave-label}-defaults'
+
+    builders:
+        - description-setter:
+            description: "Built on $NODE_NAME"
+        - multijob:
+            name: deploy
+            condition: SUCCESSFUL
+            projects:
+                - name: 'joid-deploy-{pod}-daily-{stream}'
+                  current-parameters: false
+                  predefined-parameters: |
+                    DEPLOY_SCENARIO=os-ovn-nofeature-noha
+                    COMPASS_OS_VERSION=xenial
+                  node-parameters: true
+                  kill-phase-on: FAILURE
+                  abort-all-job: true
+        - multijob:
+            name: functest
+            condition: SUCCESSFUL
+            projects:
+                - name: 'functest-joid-{pod}-daily-{stream}'
+                  current-parameters: false
+                  predefined-parameters: |
+                    DEPLOY_SCENARIO=os-ovn-nofeature-ha
+                    FUNCTEST_SUITE_NAME=ovn4nfv_test_suite
+                  node-parameters: true
+                  kill-phase-on: NEVER
+                  abort-all-job: true
+
diff --git a/jjb/ovn4nfv/ovn4nfv-project-jobs.yml b/jjb/ovn4nfv/ovn4nfv-project-jobs.yml
new file mode 100644 (file)
index 0000000..805aa04
--- /dev/null
@@ -0,0 +1,51 @@
+- project:
+    name: ovn4nfv
+
+    project: '{name}'
+
+
+    stream:
+        - master:
+            branch: '{stream}'
+            gs-pathname: ''
+            disabled: false
+
+    jobs:
+        - 'ovn4nfv-build-{stream}'
+
+- job-template:
+    name: 'ovn4nfv-build-{stream}'
+
+    concurrent: true
+
+    disabled: '{obj:disabled}'
+
+    properties:
+        - logrotate-default
+        - throttle:
+            enabled: true
+            max-total: 1
+            max-per-node: 1
+            option: 'project'
+
+    parametert:
+        - project-parameter:
+            project: '{project}'
+            branch: '{branch}'
+
+    scm:
+        - git-scm
+
+    triggers:
+        - timed: 'H 23 * * *'
+
+    builders:
+        - 'ovn4nfv-build-macro'
+
+- builder:
+    name: 'ovn4nfv-build-macro'
+    builders:
+        - shell: |
+            #!/bin/bash
+
+            echo "hello world"
index 57d24b4..4a7dd45 100644 (file)
 
             echo "Document link(s):" >> gerrit_comment.txt
             find "$local_path" | grep -e 'ipynb$' | \
-                sed -e "s|^$local_path|    https://nbviewer.jupyter.org/urls/$gs_path|" >> gerrit_comment.txt
+                sed -e "s|^$local_path|    https://nbviewer.jupyter.org/url/$gs_path|" >> gerrit_comment.txt
index 197e493..e2c57d2 100644 (file)
@@ -49,7 +49,7 @@ def get_with_passwd():
                                        args.user, installer_pwd=args.password)
 
 
-def create_file(handler):
+def create_file(handler, INSTALLER_TYPE):
     """
     Create the yaml file of nodes info.
     As Yardstick required, node name must be node1, node2, ... and node1 must
@@ -62,27 +62,30 @@ def create_file(handler):
     nodes = handler.nodes
     node_list = []
     index = 1
+    user = 'root'
+    if INSTALLER_TYPE == 'apex':
+        user = 'heat-admin'
     for node in nodes:
         try:
             if node.roles[0].lower() == "controller":
                 node_info = {'name': "node%s" % index, 'role': node.roles[0],
-                             'ip': node.ip, 'user': 'root'}
+                             'ip': node.ip, 'user': user}
                 node_list.append(node_info)
                 index += 1
         except Exception:
             node_info = {'name': node.name, 'role': 'unknown', 'ip': node.ip,
-                         'user': 'root'}
+                         'user': user}
             node_list.append(node_info)
     for node in nodes:
         try:
             if node.roles[0].lower() == "compute":
                 node_info = {'name': "node%s" % index, 'role': node.roles[0],
-                             'ip': node.ip, 'user': 'root'}
+                             'ip': node.ip, 'user': user}
                 node_list.append(node_info)
                 index += 1
         except Exception:
             node_info = {'name': node.name, 'role': 'unknown', 'ip': node.ip,
-                         'user': 'root'}
+                         'user': user}
             node_list.append(node_info)
     if args.INSTALLER_TYPE == 'compass':
         for item in node_list:
@@ -105,7 +108,7 @@ def main():
     if not handler:
         print("Error: failed to get the node's handler.")
         return 1
-    create_file(handler)
+    create_file(handler, args.INSTALLER_TYPE)
 
 
 if __name__ == '__main__':