--- /dev/null
+---
+extends: default
+
+rules:
+ # 120 chars should be enough and don't fail if a line is longer
+ line-length:
+ max: 120
+ level: warning
set -o nounset
set -o pipefail
-APEX_PKGS="common undercloud onos"
IPV6_FLAG=False
# log info to console
echo "--------------------------------------------------------"
echo
+sudo rm -rf /tmp/tmp*
+
if [ -z "$DEPLOY_SCENARIO" ]; then
echo "Deploy scenario not set!"
exit 1
BASE=$CONFIG
IMAGES=$RESOURCES
LIB="/var/opt/opnfv/lib"
-
+ sudo mkdir -p /var/log/apex
+ sudo chmod 777 /var/log/apex
+ cd /var/log/apex
fi
# Install Dependencies
set -o nounset
set -o pipefail
-APEX_PKGS="common undercloud onos"
-
# log info to console
echo "Downloading the Apex artifact. This could take some time..."
echo "--------------------------------------------------------"
# find version of RPM
VERSION_EXTENSION=$(echo $(basename $RPM_LIST) | grep -Eo '[0-9]+\.[0-9]+-([0-9]{8}|[a-z]+-[0-9]\.[0-9]+)')
# build RPM List which already includes base Apex RPM
- for pkg in ${APEX_PKGS}; do
- RPM_LIST+=" ${RPM_INSTALL_PATH}/opnfv-apex-${pkg}-${VERSION_EXTENSION}.noarch.rpm"
- done
+ RPM_LIST+=" ${RPM_INSTALL_PATH}/opnfv-apex-undercloud-${VERSION_EXTENSION}.noarch.rpm"
+ RPM_LIST+=" ${RPM_INSTALL_PATH}/python34-opnfv-apex-${VERSION_EXTENSION}.noarch.rpm"
# remove old / install new RPMs
if rpm -q opnfv-apex > /dev/null; then
RPM_INSTALL_PATH=$BUILD_DIRECTORY/noarch
RPM_LIST=$RPM_INSTALL_PATH/$(basename $OPNFV_RPM_URL)
VERSION_EXTENSION=$(echo $(basename $OPNFV_RPM_URL) | sed 's/opnfv-apex-//')
- for pkg in common undercloud onos; do
- RPM_LIST+=" ${RPM_INSTALL_PATH}/opnfv-apex-${pkg}-${VERSION_EXTENSION}"
- done
+ RPM_LIST+=" ${RPM_INSTALL_PATH}/opnfv-apex-undercloud-${VERSION_EXTENSION}"
+ RPM_LIST+=" ${RPM_INSTALL_PATH}/python34-opnfv-apex-${VERSION_EXTENSION}"
SRPM_INSTALL_PATH=$BUILD_DIRECTORY
SRPM_LIST=$SRPM_INSTALL_PATH/$(basename $OPNFV_SRPM_URL)
VERSION_EXTENSION=$(echo $(basename $OPNFV_SRPM_URL) | sed 's/opnfv-apex-//')
- for pkg in common undercloud onos; do
- SRPM_LIST+=" ${SRPM_INSTALL_PATH}/opnfv-apex-${pkg}-${VERSION_EXTENSION}"
- done
+ SRPM_LIST+=" ${SRPM_INSTALL_PATH}/opnfv-apex-undercloud-${VERSION_EXTENSION}"
+ SRPM_LIST+=" ${SRPM_INSTALL_PATH}/python34-opnfv-apex-${VERSION_EXTENSION}"
if [[ -n "$SIGN_ARTIFACT" && "$SIGN_ARTIFACT" == "true" ]]; then
signrpm
branch-pattern: '**/{branch}'
file-paths:
- compare-type: ANT
- pattern: 'tests/**'
+ pattern: 'apex/tests/**'
properties:
- logrotate-default
- throttle:
pattern: 'lib/**'
- compare-type: ANT
pattern: 'config/**'
+ - compare-type: ANT
+ pattern: 'apex/**'
properties:
- logrotate-default
+ - build-blocker:
+ use-build-blocker: true
+ block-level: 'NODE'
+ blocking-jobs:
+ - 'apex-verify.*'
- throttle:
- max-per-node: 3
+ max-per-node: 1
max-total: 10
option: 'project'
pattern: 'lib/**'
- compare-type: ANT
pattern: 'config/**'
+ - compare-type: ANT
+ pattern: 'apex/**'
properties:
- logrotate-default
+ - build-blocker:
+ use-build-blocker: true
+ block-level: 'NODE'
+ blocking-jobs:
+ - 'apex-verify.*'
- throttle:
- max-per-node: 3
+ max-per-node: 1
max-total: 10
option: 'project'
kill-phase-on: FAILURE
abort-all-job: true
git-revision: true
+ - shell: |
+ echo DEPLOY_SCENARIO=$(echo $GERRIT_EVENT_COMMENT_TEXT | grep start-gate-scenario | grep -Eo 'os-.*') > detected_scenario
+ - inject:
+ properties-file: detected_scenario
- multijob:
name: functest-smoke
condition: SUCCESSFUL
- name: 'functest-apex-virtual-suite-{stream}'
current-parameters: false
predefined-parameters: |
- DEPLOY_SCENARIO={verify-scenario}
+ DEPLOY_SCENARIO=$DEPLOY_SCENARIO
FUNCTEST_SUITE_NAME=healthcheck
GERRIT_BRANCH=$GERRIT_BRANCH
GERRIT_REFSPEC=$GERRIT_REFSPEC
concurrent: true
disabled: false
-
+ quiet-period: 30
scm:
- git-scm-gerrit
wrappers:
- timeout:
- timeout: 120
+ timeout: 140
fail: true
parameters:
enable-condition: "def m = '$DEPLOY_SCENARIO' ==~ /os-(nosdn-nofeature|nosdn-kvm|odl_l3-fdio)-ha/"
abort-all-job: false
git-revision: false
- - multijob:
- name: StorPerf
- condition: ALWAYS
- projects:
- - name: 'storperf-apex-baremetal-daily-{scenario_stream}'
- node-parameters: true
- current-parameters: false
- predefined-parameters:
- DEPLOY_SCENARIO=$DEPLOY_SCENARIO
- kill-phase-on: NEVER
- abort-all-job: false
- git-revision: false
+# - multijob:
+# name: StorPerf
+# condition: ALWAYS
+# projects:
+# - name: 'storperf-apex-baremetal-daily-{scenario_stream}'
+# node-parameters: true
+# current-parameters: false
+# predefined-parameters:
+# DEPLOY_SCENARIO=$DEPLOY_SCENARIO
+# kill-phase-on: NEVER
+# abort-all-job: false
+# git-revision: false
# Build status is always success due conditional plugin prefetching
# build status before multijob phases execute
# - conditional-step:
branch-pattern: '**/{branch}'
file-paths:
- compare-type: ANT
- pattern: 'tests/**'
+ pattern: 'apex/tests/**'
properties:
- logrotate-default
- throttle:
pattern: 'lib/**'
- compare-type: ANT
pattern: 'config/**'
+ - compare-type: ANT
+ pattern: 'apex/**'
properties:
- logrotate-default
+ - build-blocker:
+ use-build-blocker: true
+ block-level: 'NODE'
+ blocking-jobs:
+ - 'apex-verify.*'
- throttle:
- max-per-node: 3
+ max-per-node: 1
max-total: 10
option: 'project'
pattern: 'lib/**'
- compare-type: ANT
pattern: 'config/**'
+ - compare-type: ANT
+ pattern: 'apex/**'
properties:
- logrotate-default
+ - build-blocker:
+ use-build-blocker: true
+ block-level: 'NODE'
+ blocking-jobs:
+ - 'apex-verify.*'
- throttle:
- max-per-node: 3
+ max-per-node: 1
max-total: 10
option: 'project'
kill-phase-on: FAILURE
abort-all-job: true
git-revision: true
+ - shell: |
+ echo DEPLOY_SCENARIO=$(echo $GERRIT_EVENT_COMMENT_TEXT | grep start-gate-scenario | grep -Eo 'os-.*') > detected_scenario
+ - inject:
+ properties-file: detected_scenario
- multijob:
name: functest-smoke
condition: SUCCESSFUL
- name: 'functest-apex-virtual-suite-{stream}'
current-parameters: false
predefined-parameters: |
- DEPLOY_SCENARIO={verify-scenario}
+ DEPLOY_SCENARIO=$DEPLOY_SCENARIO
FUNCTEST_SUITE_NAME=healthcheck
GERRIT_BRANCH=$GERRIT_BRANCH
GERRIT_REFSPEC=$GERRIT_REFSPEC
concurrent: true
disabled: false
-
+ quiet-period: 30
scm:
- git-scm-gerrit
wrappers:
- timeout:
- timeout: 120
+ timeout: 140
fail: true
parameters:
enable-condition: "def m = '$DEPLOY_SCENARIO' ==~ /os-(nosdn-nofeature|nosdn-kvm|odl_l3-fdio)-ha/"
abort-all-job: false
git-revision: false
- - multijob:
- name: StorPerf
- condition: ALWAYS
- projects:
- - name: 'storperf-apex-baremetal-daily-{scenario_stream}'
- node-parameters: true
- current-parameters: false
- predefined-parameters:
- DEPLOY_SCENARIO=$DEPLOY_SCENARIO
- kill-phase-on: NEVER
- abort-all-job: false
- git-revision: false
+# - multijob:
+# name: StorPerf
+# condition: ALWAYS
+# projects:
+# - name: 'storperf-apex-baremetal-daily-{scenario_stream}'
+# node-parameters: true
+# current-parameters: false
+# predefined-parameters:
+# DEPLOY_SCENARIO=$DEPLOY_SCENARIO
+# kill-phase-on: NEVER
+# abort-all-job: false
+# git-revision: false
# Build status is always success due conditional plugin prefetching
# build status before multijob phases execute
# - conditional-step:
slave-label: arm-pod2
installer: fuel
<<: *euphrates
- - arm-pod3:
- slave-label: arm-pod3
+ - arm-pod5:
+ slave-label: arm-pod5
installer: fuel
<<: *euphrates
- arm-pod4:
slave-label: arm-pod4
installer: fuel
<<: *euphrates
- - arm-virtual1:
- slave-label: arm-virtual1
+ - arm-virtual2:
+ slave-label: arm-virtual2
installer: fuel
<<: *euphrates
#--------------------------------
slave-label: arm-pod2
installer: fuel
<<: *master
- - arm-pod3:
- slave-label: arm-pod3
+ - arm-pod5:
+ slave-label: arm-pod5
installer: fuel
<<: *master
- arm-pod4:
slave-label: arm-pod4
installer: fuel
<<: *master
- - arm-virtual1:
- slave-label: arm-virtual1
+ - arm-virtual2:
+ slave-label: arm-virtual2
installer: fuel
<<: *master
#--------------------------------
- trigger:
name: 'fuel-os-nosdn-nofeature-ha-armband-baremetal-master-trigger'
triggers:
- - timed: ''
+ - timed: '0 1 * * *'
- trigger:
name: 'fuel-os-odl_l3-nofeature-ha-armband-baremetal-master-trigger'
triggers:
- - timed: ''
+ - timed: '0 16 * * *'
- trigger:
name: 'fuel-os-odl_l2-bgpvpn-ha-armband-baremetal-master-trigger'
triggers:
# Enea Armband Non CI Virtual Triggers running against euphrates branch
#--------------------------------------------------------------------
- trigger:
- name: 'fuel-os-odl_l2-nofeature-ha-arm-virtual1-euphrates-trigger'
+ name: 'fuel-os-odl_l2-nofeature-ha-arm-virtual2-euphrates-trigger'
triggers:
- timed: ''
- trigger:
- name: 'fuel-os-nosdn-nofeature-ha-arm-virtual1-euphrates-trigger'
+ name: 'fuel-os-nosdn-nofeature-ha-arm-virtual2-euphrates-trigger'
triggers:
- timed: ''
- trigger:
- name: 'fuel-os-odl_l3-nofeature-ha-arm-virtual1-euphrates-trigger'
+ name: 'fuel-os-odl_l3-nofeature-ha-arm-virtual2-euphrates-trigger'
triggers:
- timed: ''
- trigger:
- name: 'fuel-os-odl_l2-bgpvpn-ha-arm-virtual1-euphrates-trigger'
+ name: 'fuel-os-odl_l2-bgpvpn-ha-arm-virtual2-euphrates-trigger'
triggers:
- timed: ''
- trigger:
- name: 'fuel-os-odl_l2-nofeature-noha-arm-virtual1-euphrates-trigger'
+ name: 'fuel-os-odl_l2-nofeature-noha-arm-virtual2-euphrates-trigger'
triggers:
- timed: ''
- trigger:
- name: 'fuel-os-odl_l2-sfc-ha-arm-virtual1-euphrates-trigger'
+ name: 'fuel-os-odl_l2-sfc-ha-arm-virtual2-euphrates-trigger'
triggers:
- timed: ''
- trigger:
- name: 'fuel-os-odl_l2-sfc-noha-arm-virtual1-euphrates-trigger'
+ name: 'fuel-os-odl_l2-sfc-noha-arm-virtual2-euphrates-trigger'
triggers:
- timed: ''
# Enea Armband Non CI Virtual Triggers running against master branch
#--------------------------------------------------------------------
- trigger:
- name: 'fuel-os-odl_l2-nofeature-ha-arm-virtual1-master-trigger'
+ name: 'fuel-os-odl_l2-nofeature-ha-arm-virtual2-master-trigger'
triggers:
- timed: ''
- trigger:
- name: 'fuel-os-nosdn-nofeature-ha-arm-virtual1-master-trigger'
+ name: 'fuel-os-nosdn-nofeature-ha-arm-virtual2-master-trigger'
triggers:
- timed: ''
- trigger:
- name: 'fuel-os-odl_l3-nofeature-ha-arm-virtual1-master-trigger'
+ name: 'fuel-os-odl_l3-nofeature-ha-arm-virtual2-master-trigger'
triggers:
- timed: ''
- trigger:
- name: 'fuel-os-odl_l2-bgpvpn-ha-arm-virtual1-master-trigger'
+ name: 'fuel-os-odl_l2-bgpvpn-ha-arm-virtual2-master-trigger'
triggers:
- timed: ''
- trigger:
- name: 'fuel-os-odl_l2-nofeature-noha-arm-virtual1-master-trigger'
+ name: 'fuel-os-odl_l2-nofeature-noha-arm-virtual2-master-trigger'
triggers:
- timed: ''
- trigger:
- name: 'fuel-os-odl_l2-sfc-ha-arm-virtual1-master-trigger'
+ name: 'fuel-os-odl_l2-sfc-ha-arm-virtual2-master-trigger'
triggers:
- timed: ''
- trigger:
- name: 'fuel-os-odl_l2-sfc-noha-arm-virtual1-master-trigger'
+ name: 'fuel-os-odl_l2-sfc-noha-arm-virtual2-master-trigger'
triggers:
- timed: ''
# Enea Armband POD 3 Triggers running against master branch
#----------------------------------------------------------
- trigger:
- name: 'fuel-os-odl_l2-nofeature-ha-arm-pod3-master-trigger'
+ name: 'fuel-os-odl_l2-nofeature-ha-arm-pod5-master-trigger'
triggers:
- timed: ''
- trigger:
- name: 'fuel-os-nosdn-nofeature-ha-arm-pod3-master-trigger'
+ name: 'fuel-os-nosdn-nofeature-ha-arm-pod5-master-trigger'
triggers:
- timed: ''
- trigger:
- name: 'fuel-os-odl_l3-nofeature-ha-arm-pod3-master-trigger'
+ name: 'fuel-os-odl_l3-nofeature-ha-arm-pod5-master-trigger'
triggers:
- timed: ''
- trigger:
- name: 'fuel-os-odl_l2-bgpvpn-ha-arm-pod3-master-trigger'
+ name: 'fuel-os-odl_l2-bgpvpn-ha-arm-pod5-master-trigger'
triggers:
- timed: ''
- trigger:
- name: 'fuel-os-odl_l2-nofeature-noha-arm-pod3-master-trigger'
+ name: 'fuel-os-odl_l2-nofeature-noha-arm-pod5-master-trigger'
triggers:
- timed: ''
- trigger:
- name: 'fuel-os-odl_l2-sfc-ha-arm-pod3-master-trigger'
+ name: 'fuel-os-odl_l2-sfc-ha-arm-pod5-master-trigger'
triggers:
- timed: ''
- trigger:
- name: 'fuel-os-odl_l2-sfc-noha-arm-pod3-master-trigger'
+ name: 'fuel-os-odl_l2-sfc-noha-arm-pod5-master-trigger'
triggers:
- timed: ''
#---------------------------------------------------------------
# Enea Armband POD 3 Triggers running against euphrates branch
#---------------------------------------------------------------
- trigger:
- name: 'fuel-os-odl_l2-nofeature-ha-arm-pod3-euphrates-trigger'
+ name: 'fuel-os-odl_l2-nofeature-ha-arm-pod5-euphrates-trigger'
triggers:
- timed: ''
- trigger:
- name: 'fuel-os-nosdn-nofeature-ha-arm-pod3-euphrates-trigger'
+ name: 'fuel-os-nosdn-nofeature-ha-arm-pod5-euphrates-trigger'
triggers:
- timed: ''
- trigger:
- name: 'fuel-os-odl_l3-nofeature-ha-arm-pod3-euphrates-trigger'
+ name: 'fuel-os-odl_l3-nofeature-ha-arm-pod5-euphrates-trigger'
triggers:
- timed: ''
- trigger:
- name: 'fuel-os-odl_l2-bgpvpn-ha-arm-pod3-euphrates-trigger'
+ name: 'fuel-os-odl_l2-bgpvpn-ha-arm-pod5-euphrates-trigger'
triggers:
- timed: ''
- trigger:
- name: 'fuel-os-odl_l2-nofeature-noha-arm-pod3-euphrates-trigger'
+ name: 'fuel-os-odl_l2-nofeature-noha-arm-pod5-euphrates-trigger'
triggers:
- timed: ''
- trigger:
- name: 'fuel-os-odl_l2-sfc-ha-arm-pod3-euphrates-trigger'
+ name: 'fuel-os-odl_l2-sfc-ha-arm-pod5-euphrates-trigger'
triggers:
- timed: ''
- trigger:
- name: 'fuel-os-odl_l2-sfc-noha-arm-pod3-euphrates-trigger'
+ name: 'fuel-os-odl_l2-sfc-noha-arm-pod5-euphrates-trigger'
triggers:
- timed: ''
#--------------------------------------------------------------------------
# set deployment parameters
export TMPDIR=${WORKSPACE}/tmpdir
-# arm-pod4 is an aarch64 jenkins slave for the same POD as the
-# x86 jenkins slave arm-pod3; therefore we use the same pod name
-# to deploy the pod from both jenkins slaves
-if [[ "${NODE_NAME}" == "arm-pod4" ]]; then
- NODE_NAME="arm-pod3"
-fi
-
LAB_NAME=${NODE_NAME/-*}
POD_NAME=${NODE_NAME/*-}
[ -d ${RELENG_REPO} ] && rm -rf ${RELENG_REPO}
git clone https://gerrit.opnfv.org/gerrit/releng ${RELENG_REPO} >${redirect}
+YARDSTICK_REPO=${WORKSPACE}/yardstick
+[ -d ${YARDSTICK_REPO} ] && rm -rf ${YARDSTICK_REPO}
+git clone https://gerrit.opnfv.org/gerrit/yardstick ${YARDSTICK_REPO} >${redirect}
+
OPENRC=/tmp/admin_rc.sh
OS_CACERT=/tmp/os_cacert
echo "Don't support to generate pod.yaml on ${INSTALLER_TYPE} currently."
fi
- cmd="sudo python ${RELENG_REPO}/utils/create_pod_file.py -t ${INSTALLER_TYPE} \
+ if [[ ${INSTALLER_TYPE} != compass ]]; then
+ cmd="sudo python ${RELENG_REPO}/utils/create_pod_file.py -t ${INSTALLER_TYPE} \
-i ${INSTALLER_IP} ${options} -f ${BOTTLENECKS_CONFIG}/pod.yaml \
-s ${BOTTLENECKS_CONFIG}/id_rsa"
- echo ${cmd}
- ${cmd}
+ echo ${cmd}
+ ${cmd}
+ else
+ cmd="sudo cp ${YARDSTICK_REPO}/etc/yardstick/nodes/compass_sclab_virtual/pod.yaml \
+ ${BOTTLENECKS_CONFIG}"
+ echo ${cmd}
+ ${cmd}
+ fi
deactivate
#------------------------------------
pod:
- baremetal:
- slave-label: compass-baremetal
+ slave-label: compass-baremetal-branch
os-version: 'xenial'
<<: *danube
#-----------------------------------
- trigger:
name: 'daisy-os-nosdn-nofeature-ha-baremetal-daily-master-trigger'
triggers:
- - timed: '0 12 * * *'
+ - timed: '0 16 * * *'
# Basic NOHA Scenarios
- trigger:
name: 'daisy-os-nosdn-nofeature-noha-baremetal-daily-master-trigger'
- trigger:
name: 'daisy-os-odl-nofeature-ha-baremetal-daily-master-trigger'
triggers:
- - timed: '0 16 * * *'
+ - timed: '0 12 * * *'
#-----------------------------------------------
# Triggers for job running on daisy-virtual against master branch
#-----------------------------------------------
pod:
- arm-pod2:
slave-label: '{pod}'
- - arm-pod3:
+ - arm-pod5:
slave-label: '{pod}'
jobs:
SUT: fuel
auto-trigger-name: 'daily-trigger-disabled'
<<: *master
- - arm-pod3:
+ - arm-pod5:
slave-label: '{pod}'
SUT: fuel
auto-trigger-name: 'daily-trigger-disabled'
<<: *master
- - arm-virtual1:
+ - arm-virtual2:
slave-label: '{pod}'
SUT: fuel
auto-trigger-name: 'daily-trigger-disabled'
- build-name:
name: '$BUILD_NUMBER - Scenario: $DEPLOY_SCENARIO'
- timeout:
- timeout: 180
+ timeout: 240
abort: true
- fix-workspace-permissions
# clean up dependent project docker images, which has no containers and image tag None
clean_images=(opnfv/functest opnfv/yardstick opnfv/testapi mongo)
for clean_image in "${clean_images[@]}"; do
- echo "Removing image $image_id, which has no containers and image tag is None"
dangling_images=($(docker images -f "dangling=true" | grep ${clean_image} | awk '{print $3}'))
if [[ -n ${dangling_images} ]]; then
for image_id in "${dangling_images[@]}"; do
+ echo "Removing image $image_id, which has no containers and image tag is None"
docker rmi $image_id >${redirect}
done
fi
done
-echo "Remove containers with image opnfv/dovetail:<None>..."
+echo "Remove dovetail images with tag None and containers with these images ..."
dangling_images=($(docker images -f "dangling=true" | grep opnfv/dovetail | awk '{print $3}'))
if [[ -n ${dangling_images} ]]; then
for image_id in "${dangling_images[@]}"; do
docker ps -a | grep opnfv/dovetail | awk '{print $1}' | xargs docker rm -f >${redirect}
fi
-echo "Remove dovetail existing images if exist..."
-if [[ ! -z $(docker images | grep opnfv/dovetail) ]]; then
- echo "Docker images to remove:"
- docker images | head -1 && docker images | grep opnfv/dovetail >${redirect}
- image_tags=($(docker images | grep opnfv/dovetail | awk '{print $2}'))
- for tag in "${image_tags[@]}"; do
- echo "Removing docker image opnfv/dovetail:$tag..."
- docker rmi opnfv/dovetail:$tag >${redirect}
- done
-fi
+#echo "Remove dovetail existing images if exist..."
+#if [[ ! -z $(docker images | grep opnfv/dovetail) ]]; then
+# echo "Docker images to remove:"
+# docker images | head -1 && docker images | grep opnfv/dovetail >${redirect}
+# image_tags=($(docker images | grep opnfv/dovetail | awk '{print $2}'))
+# for tag in "${image_tags[@]}"; do
+# echo "Removing docker image opnfv/dovetail:$tag..."
+# docker rmi opnfv/dovetail:$tag >${redirect}
+# done
+#fi
sudo scp $ssh_options stack@${INSTALLER_IP}:~/.ssh/id_rsa ${DOVETAIL_CONFIG}/id_rsa
fi
+image_path=${HOME}/opnfv/dovetail/images
+if [[ ! -d ${image_path} ]]; then
+ mkdir -p ${image_path}
+fi
# sdnvpn test case needs to download this image first before running
-echo "Download image ubuntu-16.04-server-cloudimg-amd64-disk1.img ..."
-wget -q -nc http://artifacts.opnfv.org/sdnvpn/ubuntu-16.04-server-cloudimg-amd64-disk1.img -P ${DOVETAIL_CONFIG}
+ubuntu_image=${image_path}/ubuntu-16.04-server-cloudimg-amd64-disk1.img
+if [[ ! -f ${ubuntu_image} ]]; then
+ echo "Download image ubuntu-16.04-server-cloudimg-amd64-disk1.img ..."
+ wget -q -nc http://artifacts.opnfv.org/sdnvpn/ubuntu-16.04-server-cloudimg-amd64-disk1.img -P ${image_path}
+fi
+sudo cp ${ubuntu_image} ${DOVETAIL_CONFIG}
# functest needs to download this image first before running
-echo "Download image cirros-0.3.5-x86_64-disk.img ..."
-wget -q -nc http://download.cirros-cloud.net/0.3.5/cirros-0.3.5-x86_64-disk.img -P ${DOVETAIL_CONFIG}
+cirros_image=${image_path}/cirros-0.3.5-x86_64-disk.img
+if [[ ! -f ${cirros_image} ]]; then
+ echo "Download image cirros-0.3.5-x86_64-disk.img ..."
+ wget -q -nc http://download.cirros-cloud.net/0.3.5/cirros-0.3.5-x86_64-disk.img -P ${image_path}
+fi
+sudo cp ${cirros_image} ${DOVETAIL_CONFIG}
+
opts="--privileged=true -id"
branch: 'stable/{stream}'
disabled: true
gs-pathname: '/{stream}'
+ danube: &danube
+ stream: danube
+ branch: 'stable/{stream}'
+ disabled: false
+ gs-pathname: '/{stream}'
#--------------------------------
# POD, INSTALLER, AND BRANCH MAPPING
#--------------------------------
- zte-pod3:
slave-label: zte-pod3
<<: *euphrates
+ - zte-pod1:
+ slave-label: zte-pod1
+ <<: *danube
#--------------------------------
# scenarios
#--------------------------------
# 2.here the stream means the SUT stream, dovetail stream is defined in its own job
# 3.only debug testsuite here(refstack, ha, ipv6, bgpvpn)
# 4.not used for release criteria or compliance,
- # only to debug the dovetail tool bugs with bgpvpn
- # 5,only run against scenario os-odl_l2-bgpvpn-ha(regex used here, can extend to more scenarios future)
+ # only to debug the dovetail tool bugs with bgpvpn and nosdn-nofeature
+ # 5.only run against scenario os-odl_l2-bgpvpn-ha(regex used here, can extend to more scenarios future)
+ # 6.ZTE pod1, os-nosdn-nofeature-ha and os-odl_l2-bgpvpn-ha, run against danube
- conditional-step:
condition-kind: regex-match
- regex: os-odl_l2-bgpvpn-ha
+ regex: os-(nosdn-nofeature|odl_l2-bgpvpn)-ha
label: '{scenario}'
steps:
- trigger-builds:
build-step-failure-threshold: 'never'
failure-threshold: 'never'
unstable-threshold: 'FAILURE'
- # ZTE pod1 weekly(Sunday), os-odl_l2-nofeature-ha, run against master and euphrates
- - conditional-step:
- condition-kind: and
- condition-operands:
- - condition-kind: regex-match
- regex: os-odl_l2-nofeature-ha
- label: '{scenario}'
- - condition-kind: regex-match
- regex: zte-pod1
- label: '{pod}'
- - condition-kind: day-of-week
- day-selector: select-days
- days:
- SAT: true
- use-build-time: true
- steps:
- - trigger-builds:
- - project: 'dovetail-fuel-zte-pod1-proposed_tests-{stream}'
- current-parameters: false
- predefined-parameters:
- DEPLOY_SCENARIO={scenario}
- block: true
- same-node: true
- block-thresholds:
- build-step-failure-threshold: 'never'
- failure-threshold: 'never'
- unstable-threshold: 'FAILURE'
publishers:
- email:
- trigger:
name: 'fuel-os-nosdn-nofeature-ha-baremetal-daily-master-trigger'
triggers:
- - timed: '' # '5 20 * * *'
+ - timed: '5 20 * * *'
- trigger:
name: 'fuel-os-odl_l2-nofeature-ha-baremetal-daily-master-trigger'
triggers:
- trigger:
name: 'fuel-os-odl_l3-nofeature-ha-baremetal-daily-master-trigger'
triggers:
- - timed: '' # '5 2 * * *'
+ - timed: '5 2 * * *'
- trigger:
name: 'fuel-os-nosdn-ovs-ha-baremetal-daily-master-trigger'
triggers:
- - timed: '' # '5 5 * * *'
+ - timed: '5 5 * * *'
- trigger:
name: 'fuel-os-onos-sfc-ha-baremetal-daily-master-trigger'
triggers:
name: 'fuel-os-nosdn-kvm_ovs_dpdk_bar-noha-zte-pod3-daily-euphrates-trigger'
triggers:
- timed: ''
+#------------------------------------------------
+# ZTE POD1 Triggers running against danube branch
+#------------------------------------------------
+- trigger:
+ name: 'fuel-os-nosdn-nofeature-ha-zte-pod1-daily-danube-trigger'
+ triggers:
+ - timed: '0 2 * * 6'
+- trigger:
+ name: 'fuel-os-odl_l2-nofeature-ha-zte-pod1-daily-danube-trigger'
+ triggers:
+ - timed: ''
+- trigger:
+ name: 'fuel-os-odl_l3-nofeature-ha-zte-pod1-daily-danube-trigger'
+ triggers:
+ - timed: ''
+- trigger:
+ name: 'fuel-os-onos-sfc-ha-zte-pod1-daily-danube-trigger'
+ triggers:
+ - timed: ''
+- trigger:
+ name: 'fuel-os-onos-nofeature-ha-zte-pod1-daily-danube-trigger'
+ triggers:
+ - timed: ''
+- trigger:
+ name: 'fuel-os-odl_l2-bgpvpn-ha-zte-pod1-daily-danube-trigger'
+ triggers:
+ - timed: '0 2 * * 1,3,5'
+- trigger:
+ name: 'fuel-os-odl_l2-sfc-ha-zte-pod1-daily-danube-trigger'
+ triggers:
+ - timed: ''
+- trigger:
+ name: 'fuel-os-nosdn-kvm-ha-zte-pod1-daily-danube-trigger'
+ triggers:
+ - timed: ''
+- trigger:
+ name: 'fuel-os-nosdn-ovs-ha-zte-pod1-daily-danube-trigger'
+ triggers:
+ - timed: ''
+- trigger:
+ name: 'fuel-os-nosdn-kvm_ovs_dpdk-ha-zte-pod1-daily-danube-trigger'
+ triggers:
+ - timed: ''
+- trigger:
+ name: 'fuel-os-nosdn-kvm_ovs_dpdk_bar-ha-zte-pod1-daily-danube-trigger'
+ triggers:
+ - timed: ''
+# NOHA Scenarios
+- trigger:
+ name: 'fuel-os-nosdn-nofeature-noha-zte-pod1-daily-danube-trigger'
+ triggers:
+ - timed: ''
+- trigger:
+ name: 'fuel-os-odl_l2-nofeature-noha-zte-pod1-daily-danube-trigger'
+ triggers:
+ - timed: ''
+- trigger:
+ name: 'fuel-os-odl_l3-nofeature-noha-zte-pod1-daily-danube-trigger'
+ triggers:
+ - timed: ''
+- trigger:
+ name: 'fuel-os-onos-sfc-noha-zte-pod1-daily-danube-trigger'
+ triggers:
+ - timed: ''
+- trigger:
+ name: 'fuel-os-onos-nofeature-noha-zte-pod1-daily-danube-trigger'
+ triggers:
+ - timed: ''
+- trigger:
+ name: 'fuel-os-odl_l2-sfc-noha-zte-pod1-daily-danube-trigger'
+ triggers:
+ - timed: ''
+- trigger:
+ name: 'fuel-os-odl_l2-bgpvpn-noha-zte-pod1-daily-danube-trigger'
+ triggers:
+ - timed: ''
+- trigger:
+ name: 'fuel-os-nosdn-kvm-noha-zte-pod1-daily-danube-trigger'
+ triggers:
+ - timed: ''
+- trigger:
+ name: 'fuel-os-nosdn-ovs-noha-zte-pod1-daily-danube-trigger'
+ triggers:
+ - timed: ''
+- trigger:
+ name: 'fuel-os-nosdn-kvm_ovs_dpdk-noha-zte-pod1-daily-danube-trigger'
+ triggers:
+ - timed: ''
+- trigger:
+ name: 'fuel-os-nosdn-kvm_ovs_dpdk_bar-noha-zte-pod1-daily-danube-trigger'
+ triggers:
+ - timed: ''
volumes="${images_vol} ${results_vol} ${sshkey_vol} ${rc_file_vol} ${cacert_file_vol}"
+set +e
tiers=(healthcheck smoke features vnf)
for tier in ${tiers[@]}; do
slave-label: '{pod}'
installer: fuel
<<: *master
- - arm-pod3:
+ - arm-pod5:
slave-label: '{pod}'
installer: fuel
<<: *master
slave-label: '{pod}'
installer: fuel
<<: *master
- - arm-virtual1:
+ - arm-virtual2:
slave-label: '{pod}'
installer: fuel
<<: *master
slave-label: '{pod}'
installer: fuel
<<: *danube
- - arm-pod3:
+ - arm-pod5:
slave-label: '{pod}'
installer: fuel
<<: *danube
slave-label: '{pod}'
installer: fuel
<<: *danube
- - arm-virtual1:
+ - arm-virtual2:
slave-label: '{pod}'
installer: fuel
<<: *danube
- ./functest-env-presetup.sh
- ../../utils/fetch_os_creds.sh
- ./functest-alpine.sh
+ - ../../utils/push-test-logs.sh
- builder:
name: functest-daily
[[ $CI_DEBUG == true ]] && redirect="/dev/stdout" || redirect="/dev/null"
+DEPLOY_TYPE=baremetal
+[[ $BUILD_TAG =~ "virtual" ]] && DEPLOY_TYPE=virt
+HOST_ARCH=$(uname -m)
+
# Prepare OpenStack credentials volume
+rc_file_vol="-v ${HOME}/opnfv-openrc.sh:/home/opnfv/functest/conf/openstack.creds"
+
if [[ ${INSTALLER_TYPE} == 'joid' ]]; then
rc_file_vol="-v $LAB_CONFIG/admin-openrc:/home/opnfv/functest/conf/openstack.creds"
elif [[ ${INSTALLER_TYPE} == 'compass' && ${BRANCH} == 'master' ]]; then
cacert_file_vol="-v ${HOME}/os_cacert:/home/opnfv/functest/conf/os_cacert"
echo "export OS_CACERT=/home/opnfv/functest/conf/os_cacert" >> ${HOME}/opnfv-openrc.sh
- rc_file_vol="-v ${HOME}/opnfv-openrc.sh:/home/opnfv/functest/conf/openstack.creds"
-else
- rc_file_vol="-v ${HOME}/opnfv-openrc.sh:/home/opnfv/functest/conf/openstack.creds"
+elif [[ ${INSTALLER_TYPE} == 'fuel' && ${DEPLOY_TYPE} == 'baremetal' ]]; then
+ cacert_file_vol="-v ${HOME}/os_cacert:/etc/ssl/certs/mcp_os_cacert"
fi
sudo iptables -I FORWARD -j RETURN
fi
-DEPLOY_TYPE=baremetal
-[[ $BUILD_TAG =~ "virtual" ]] && DEPLOY_TYPE=virt
-HOST_ARCH=$(uname -m)
-
echo "Functest: Start Docker and prepare environment"
if [ "$BRANCH" != 'stable/danube' ]; then
echo "Functest: Download images that will be used by test cases"
images_dir="${HOME}/opnfv/functest/images"
chmod +x ${WORKSPACE}/functest/ci/download_images.sh
- ${WORKSPACE}/functest/ci/download_images.sh ${images_dir}
+ ${WORKSPACE}/functest/ci/download_images.sh ${images_dir} > ${redirect}
images_vol="-v ${images_dir}:/home/opnfv/functest/images"
echo "Functest: Images successfully downloaded"
fi
+---
- parameter:
name: 'apex-defaults'
parameters:
- - string:
- name: INSTALLER_IP
- default: '192.168.X.X'
- description: 'IP of the installer'
- - string:
- name: INSTALLER_TYPE
- default: apex
- description: 'Installer used for deploying OPNFV on this POD'
- - string:
- name: EXTERNAL_NETWORK
- default: 'external'
- description: 'external network for test'
+ - string:
+ name: INSTALLER_IP
+ default: '192.168.X.X'
+ description: 'IP of the installer'
+ - string:
+ name: INSTALLER_TYPE
+ default: apex
+ description: 'Installer used for deploying OPNFV on this POD'
+ - string:
+ name: EXTERNAL_NETWORK
+ default: 'external'
+ description: 'external network for test'
- parameter:
name: 'compass-defaults'
parameters:
- - string:
- name: INSTALLER_IP
- default: '192.168.200.2'
- description: 'IP of the installer'
- - string:
- name: INSTALLER_TYPE
- default: compass
- description: 'Installer used for deploying OPNFV on this POD'
- - string:
- name: EXTERNAL_NETWORK
- default: 'ext-net'
- description: 'external network for test'
+ - string:
+ name: INSTALLER_IP
+ default: '192.168.200.2'
+ description: 'IP of the installer'
+ - string:
+ name: INSTALLER_TYPE
+ default: compass
+ description: 'Installer used for deploying OPNFV on this POD'
+ - string:
+ name: EXTERNAL_NETWORK
+ default: 'ext-net'
+ description: 'external network for test'
- parameter:
name: 'fuel-defaults'
parameters:
- - string:
- name: INSTALLER_IP
- default: '10.20.0.2'
- description: 'IP of the installer'
- - string:
- name: SALT_MASTER_IP
- default: '192.168.10.100'
- description: 'IP of the salt master (for mcp deployments)'
- - string:
- name: SSH_KEY
- default: '/tmp/mcp.rsa'
- description: 'Path to private SSH key to access environment nodes'
- - string:
- name: INSTALLER_TYPE
- default: fuel
- description: 'Installer used for deploying OPNFV on this POD'
- - string:
- name: EXTERNAL_NETWORK
- default: 'admin_floating_net'
- description: 'external network for test'
- - string:
- name: BRIDGE
- default: 'pxebr'
- description: 'Bridge(s) to be used by salt master'
+ - string:
+ name: INSTALLER_IP
+ default: '10.20.0.2'
+ description: 'IP of the installer'
+ - string:
+ name: SALT_MASTER_IP
+ default: '192.168.10.100'
+ description: 'IP of the salt master (for mcp deployments)'
+ - string:
+ name: SSH_KEY
+ default: '/tmp/mcp.rsa'
+ description: 'Path to private SSH key to access environment nodes'
+ - string:
+ name: INSTALLER_TYPE
+ default: fuel
+ description: 'Installer used for deploying OPNFV on this POD'
+ - string:
+ name: EXTERNAL_NETWORK
+ default: 'admin_floating_net'
+ description: 'external network for test'
+ - string:
+ name: BRIDGE
+ default: 'pxebr'
+ description: 'Bridge(s) to be used by salt master'
- parameter:
name: 'joid-defaults'
parameters:
- - string:
- name: INSTALLER_IP
- default: '192.168.122.5'
- description: 'IP of the installer'
- - string:
- name: INSTALLER_TYPE
- default: joid
- description: 'Installer used for deploying OPNFV on this POD'
- - string:
- name: MODEL
- default: 'os'
- description: 'Model to deploy (os|k8)'
- - string:
- name: OS_RELEASE
- default: 'ocata'
- description: 'OpenStack release (mitaka|newton|ocata)'
- - string:
- name: EXTERNAL_NETWORK
- default: ext-net
- description: "External network used for Floating ips."
- - string:
- name: LAB_CONFIG
- default: "$HOME/joid_config"
- description: "Local lab config and Openstack openrc location"
- - string:
- name: MAAS_REINSTALL
- default: 'false'
- description: "Reinstall MAAS and Bootstrap before deploy [true/false]"
- - string:
- name: UBUNTU_DISTRO
- default: 'xenial'
- description: "Ubuntu distribution to use for Openstack (xenial)"
- - string:
- name: CPU_ARCHITECTURE
- default: 'amd64'
- description: "CPU Architecture to use for Ubuntu distro "
+ - string:
+ name: INSTALLER_IP
+ default: '192.168.122.5'
+ description: 'IP of the installer'
+ - string:
+ name: INSTALLER_TYPE
+ default: joid
+ description: 'Installer used for deploying OPNFV on this POD'
+ - string:
+ name: MODEL
+ default: 'os'
+ description: 'Model to deploy (os|k8)'
+ - string:
+ name: OS_RELEASE
+ default: 'ocata'
+ description: 'OpenStack release (mitaka|newton|ocata)'
+ - string:
+ name: EXTERNAL_NETWORK
+ default: ext-net
+ description: "External network used for Floating ips."
+ - string:
+ name: LAB_CONFIG
+ default: "$HOME/joid_config"
+ description: "Local lab config and Openstack openrc location"
+ - string:
+ name: MAAS_REINSTALL
+ default: 'false'
+ description: "Reinstall MAAS and Bootstrap before deploy [true/false]"
+ - string:
+ name: UBUNTU_DISTRO
+ default: 'xenial'
+ description: "Ubuntu distribution to use for Openstack (xenial)"
+ - string:
+ name: CPU_ARCHITECTURE
+ default: 'amd64'
+ description: "CPU Architecture to use for Ubuntu distro "
- parameter:
name: 'daisy-defaults'
parameters:
- - string:
- name: INSTALLER_IP
- default: '10.20.7.3'
- description: 'IP of the installer'
- - string:
- name: INSTALLER_TYPE
- default: daisy
- description: 'Installer used for deploying OPNFV on this POD'
- - string:
- name: BRIDGE
- default: 'br7'
- description: 'pxe bridge for booting of Daisy master'
+ - string:
+ name: INSTALLER_IP
+ default: '10.20.7.3'
+ description: 'IP of the installer'
+ - string:
+ name: INSTALLER_TYPE
+ default: daisy
+ description: 'Installer used for deploying OPNFV on this POD'
+ - string:
+ name: BRIDGE
+ default: 'br7'
+ description: 'pxe bridge for booting of Daisy master'
- parameter:
name: 'infra-defaults'
parameters:
- - string:
- name: INSTALLER_IP
- default: '192.168.122.2'
- description: 'IP of the installer'
- - string:
- name: INSTALLER_TYPE
- default: infra
- description: 'Installer used for deploying OPNFV on this POD'
+ - string:
+ name: INSTALLER_IP
+ default: '192.168.122.2'
+ description: 'IP of the installer'
+ - string:
+ name: INSTALLER_TYPE
+ default: infra
+ description: 'Installer used for deploying OPNFV on this POD'
+
- parameter:
name: 'netvirt-defaults'
parameters:
- - string:
- name: INSTALLER_IP
- default: '192.168.X.X'
- description: 'IP of the installer'
- - string:
- name: INSTALLER_TYPE
- default: apex
- description: 'Installer used for deploying OPNFV on this POD'
- - string:
- name: EXTERNAL_NETWORK
- default: 'external'
- description: 'external network for test'
+ - string:
+ name: INSTALLER_IP
+ default: '192.168.X.X'
+ description: 'IP of the installer'
+ - string:
+ name: INSTALLER_TYPE
+ default: apex
+ description: 'Installer used for deploying OPNFV on this POD'
+ - string:
+ name: EXTERNAL_NETWORK
+ default: 'external'
+ description: 'external network for test'
+---
# jjb defaults
- defaults:
name: global
wrappers:
- - ssh-agent-wrapper
+ - ssh-agent-wrapper
project-type: freestyle
node: master
properties:
- - logrotate-default
+ - logrotate-default
publishers:
# Any project that has a publisher will not have this macro
+---
# Releng macros
#
# NOTE: make sure macros are listed in execution ordered.
- parameter:
name: project-parameter
parameters:
- - string:
- name: PROJECT
- default: '{project}'
- description: "JJB configured PROJECT parameter to identify an opnfv Gerrit project"
- - string:
- name: GS_BASE
- default: artifacts.opnfv.org/$PROJECT
- description: "URL to Google Storage."
- - string:
- name: GS_BASE_PROXY
- default: build.opnfv.org/artifacts.opnfv.org/$PROJECT
- description: "URL to Google Storage proxy"
- - string:
- name: BRANCH
- default: '{branch}'
- description: "JJB configured BRANCH parameter (e.g. master, stable/danube)"
- - string:
- name: GERRIT_BRANCH
- default: '{branch}'
- description: "JJB configured GERRIT_BRANCH parameter (deprecated)"
+ - string:
+ name: PROJECT
+ default: '{project}'
+ description: "JJB configured PROJECT parameter to identify an opnfv Gerrit project"
+ - string:
+ name: GS_BASE
+ default: artifacts.opnfv.org/$PROJECT
+ description: "URL to Google Storage."
+ - string:
+ name: GS_BASE_PROXY
+ default: build.opnfv.org/artifacts.opnfv.org/$PROJECT
+ description: "URL to Google Storage proxy"
+ - string:
+ name: BRANCH
+ default: '{branch}'
+ description: "JJB configured BRANCH parameter (e.g. master, stable/danube)"
+ - string:
+ name: GERRIT_BRANCH
+ default: '{branch}'
+ description: "JJB configured GERRIT_BRANCH parameter (deprecated)"
- property:
name: logrotate-default
properties:
- - build-discarder:
- days-to-keep: 60
- num-to-keep: 200
- artifact-days-to-keep: 60
- artifact-num-to-keep: 200
+ - build-discarder:
+ days-to-keep: 60
+ num-to-keep: 200
+ artifact-days-to-keep: 60
+ artifact-num-to-keep: 200
- scm:
name: git-scm
scm:
- - git: &git-scm-defaults
- credentials-id: '$SSH_CREDENTIAL_ID'
- url: '$GIT_BASE'
- branches:
- - 'origin/$BRANCH'
- timeout: 15
+ - git: &git-scm-defaults
+ credentials-id: '$SSH_CREDENTIAL_ID'
+ url: '$GIT_BASE'
+ branches:
+ - 'origin/$BRANCH'
+ timeout: 15
- scm:
name: git-scm-gerrit
scm:
- - git:
- choosing-strategy: 'gerrit'
- refspec: '$GERRIT_REFSPEC'
- <<: *git-scm-defaults
+ - git:
+ choosing-strategy: 'gerrit'
+ refspec: '$GERRIT_REFSPEC'
+ <<: *git-scm-defaults
- scm:
name: git-scm-with-submodules
scm:
- - git:
- credentials-id: '$SSH_CREDENTIAL_ID'
- url: '$GIT_BASE'
- refspec: ''
- branches:
- - 'refs/heads/{branch}'
- skip-tag: true
- wipe-workspace: true
- submodule:
- recursive: true
- timeout: 20
+ - git:
+ credentials-id: '$SSH_CREDENTIAL_ID'
+ url: '$GIT_BASE'
+ refspec: ''
+ branches:
+ - 'refs/heads/{branch}'
+ skip-tag: true
+ wipe-workspace: true
+ submodule:
+ recursive: true
+ timeout: 20
- trigger:
name: 'daily-trigger-disabled'
triggers:
- - timed: ''
+ - timed: ''
- trigger:
name: 'weekly-trigger-disabled'
triggers:
- - timed: ''
+ - timed: ''
- trigger:
name: gerrit-trigger-patchset-created
triggers:
- - gerrit:
- server-name: 'gerrit.opnfv.org'
- trigger-on:
- - patchset-created-event:
- exclude-drafts: 'false'
- exclude-trivial-rebase: 'false'
- exclude-no-code-change: 'false'
- - draft-published-event
- - comment-added-contains-event:
- comment-contains-value: 'recheck'
- - comment-added-contains-event:
- comment-contains-value: 'reverify'
- projects:
- - project-compare-type: 'ANT'
- project-pattern: '{project}'
- branches:
- - branch-compare-type: 'ANT'
- branch-pattern: '**/{branch}'
- file-paths:
- - compare-type: 'ANT'
- pattern: '{files}'
- skip-vote:
- successful: false
- failed: false
- unstable: false
- notbuilt: false
+ - gerrit:
+ server-name: 'gerrit.opnfv.org'
+ trigger-on:
+ - patchset-created-event:
+ exclude-drafts: 'false'
+ exclude-trivial-rebase: 'false'
+ exclude-no-code-change: 'false'
+ - draft-published-event
+ - comment-added-contains-event:
+ comment-contains-value: 'recheck'
+ - comment-added-contains-event:
+ comment-contains-value: 'reverify'
+ projects:
+ - project-compare-type: 'ANT'
+ project-pattern: '{project}'
+ branches:
+ - branch-compare-type: 'ANT'
+ branch-pattern: '**/{branch}'
+ file-paths:
+ - compare-type: 'ANT'
+ pattern: '{files}'
+ skip-vote:
+ successful: false
+ failed: false
+ unstable: false
+ notbuilt: false
- trigger:
name: gerrit-trigger-change-merged
triggers:
- - gerrit:
- server-name: 'gerrit.opnfv.org'
- trigger-on:
- - change-merged-event
- - comment-added-contains-event:
- comment-contains-value: 'remerge'
- projects:
- - project-compare-type: 'ANT'
- project-pattern: '{project}'
- branches:
- - branch-compare-type: 'ANT'
- branch-pattern: '**/{branch}'
- file-paths:
- - compare-type: 'ANT'
- pattern: '{files}'
+ - gerrit:
+ server-name: 'gerrit.opnfv.org'
+ trigger-on:
+ - change-merged-event
+ - comment-added-contains-event:
+ comment-contains-value: 'remerge'
+ projects:
+ - project-compare-type: 'ANT'
+ project-pattern: '{project}'
+ branches:
+ - branch-compare-type: 'ANT'
+ branch-pattern: '**/{branch}'
+ file-paths:
+ - compare-type: 'ANT'
+ pattern: '{files}'
- trigger:
name: 'experimental'
triggers:
- - gerrit:
- server-name: 'gerrit.opnfv.org'
- trigger-on:
- - comment-added-contains-event:
- comment-contains-value: 'check-experimental'
- projects:
- - project-compare-type: 'ANT'
- project-pattern: '{project}'
- branches:
- - branch-compare-type: 'ANT'
- branch-pattern: '**/{branch}'
- file-paths:
- - compare-type: 'ANT'
- pattern: '{files}'
- skip-vote:
- successful: true
- failed: true
- unstable: true
- notbuilt: true
+ - gerrit:
+ server-name: 'gerrit.opnfv.org'
+ trigger-on:
+ - comment-added-contains-event:
+ comment-contains-value: 'check-experimental'
+ projects:
+ - project-compare-type: 'ANT'
+ project-pattern: '{project}'
+ branches:
+ - branch-compare-type: 'ANT'
+ branch-pattern: '**/{branch}'
+ file-paths:
+ - compare-type: 'ANT'
+ pattern: '{files}'
+ skip-vote:
+ successful: true
+ failed: true
+ unstable: true
+ notbuilt: true
- wrapper:
name: ssh-agent-wrapper
wrappers:
- - ssh-agent-credentials:
- users:
- - 'd42411ac011ad6f3dd2e1fa34eaa5d87f910eb2e'
+ - ssh-agent-credentials:
+ users:
+ - 'd42411ac011ad6f3dd2e1fa34eaa5d87f910eb2e'
- wrapper:
name: build-timeout
- wrapper:
name: fix-workspace-permissions
wrappers:
- - pre-scm-buildstep:
+ - pre-scm-buildstep:
- shell: |
- #!/bin/bash
- sudo chown -R $USER:$USER $WORKSPACE || exit 1
+ #!/bin/bash
+ sudo chown -R $USER:$USER $WORKSPACE || exit 1
- builder:
name: build-html-and-pdf-docs-output
builders:
- - shell: |
- #!/bin/bash
- set -o errexit
- set -o xtrace
- export PATH=$PATH:/usr/local/bin/
- git clone ssh://gerrit.opnfv.org:29418/opnfvdocs docs_build/_opnfvdocs
- GERRIT_COMMENT=gerrit_comment.txt ./docs_build/_opnfvdocs/scripts/docs-build.sh
+ - shell: |
+ #!/bin/bash
+ set -o errexit
+ set -o xtrace
+ export PATH=$PATH:/usr/local/bin/
+ git clone ssh://gerrit.opnfv.org:29418/opnfvdocs docs_build/_opnfvdocs
+ GERRIT_COMMENT=gerrit_comment.txt ./docs_build/_opnfvdocs/scripts/docs-build.sh
- builder:
name: upload-under-review-docs-to-opnfv-artifacts
builders:
- - shell: |
- #!/bin/bash
- set -o errexit
- set -o pipefail
- set -o xtrace
- export PATH=$PATH:/usr/local/bin/
-
- [[ $GERRIT_CHANGE_NUMBER =~ .+ ]]
- [[ -d docs_output ]] || exit 0
-
- echo
- echo "###########################"
- echo "UPLOADING DOCS UNDER REVIEW"
- echo "###########################"
- echo
-
- gs_base="artifacts.opnfv.org/$PROJECT/review"
- gs_path="$gs_base/$GERRIT_CHANGE_NUMBER"
- local_path="upload/$GERRIT_CHANGE_NUMBER"
-
- mkdir -p upload
- mv docs_output "$local_path"
- gsutil -m cp -r "$local_path" "gs://$gs_base"
-
- gsutil -m setmeta \
- -h "Content-Type:text/html" \
- -h "Cache-Control:private, max-age=0, no-transform" \
- "gs://$gs_path"/**.html > /dev/null 2>&1
-
- echo "Document link(s):" >> gerrit_comment.txt
- find "$local_path" | grep -e 'index.html$' -e 'pdf$' | \
- sed -e "s|^$local_path| http://$gs_path|" >> gerrit_comment.txt
+ - shell: |
+ #!/bin/bash
+ set -o errexit
+ set -o pipefail
+ set -o xtrace
+ export PATH=$PATH:/usr/local/bin/
+
+ [[ $GERRIT_CHANGE_NUMBER =~ .+ ]]
+ [[ -d docs_output ]] || exit 0
+
+ echo
+ echo "###########################"
+ echo "UPLOADING DOCS UNDER REVIEW"
+ echo "###########################"
+ echo
+
+ gs_base="artifacts.opnfv.org/$PROJECT/review"
+ gs_path="$gs_base/$GERRIT_CHANGE_NUMBER"
+ local_path="upload/$GERRIT_CHANGE_NUMBER"
+
+ mkdir -p upload
+ mv docs_output "$local_path"
+ gsutil -m cp -r "$local_path" "gs://$gs_base"
+
+ gsutil -m setmeta \
+ -h "Content-Type:text/html" \
+ -h "Cache-Control:private, max-age=0, no-transform" \
+ "gs://$gs_path"/**.html > /dev/null 2>&1
+
+ echo "Document link(s):" >> gerrit_comment.txt
+ find "$local_path" | grep -e 'index.html$' -e 'pdf$' | \
+ sed -e "s|^$local_path| http://$gs_path|" >> gerrit_comment.txt
- builder:
name: upload-generated-docs-to-opnfv-artifacts
builders:
- - shell: |
- #!/bin/bash
- set -o errexit
- set -o pipefail
- set -o xtrace
- export PATH=$PATH:/usr/local/bin/
+ - shell: |
+ #!/bin/bash
+ set -o errexit
+ set -o pipefail
+ set -o xtrace
+ export PATH=$PATH:/usr/local/bin/
- [[ -d docs_output ]] || exit 0
+ [[ -d docs_output ]] || exit 0
- echo
- echo "########################"
- echo "UPLOADING GENERATED DOCS"
- echo "########################"
- echo
+ echo
+ echo "########################"
+ echo "UPLOADING GENERATED DOCS"
+ echo "########################"
+ echo
- echo "gs_path="$GS_URL/docs""
- echo "local_path="upload/docs""
+ echo "gs_path="$GS_URL/docs""
+ echo "local_path="upload/docs""
- gs_path="$GS_URL/docs"
- local_path="upload/docs"
+ gs_path="$GS_URL/docs"
+ local_path="upload/docs"
- mkdir -p upload
- mv docs_output "$local_path"
- ls "$local_path"
+ mkdir -p upload
+ mv docs_output "$local_path"
+ ls "$local_path"
- echo "gsutil -m cp -r "$local_path"/* "gs://$gs_path""
- gsutil -m cp -r "$local_path"/* "gs://$gs_path"
+ echo "gsutil -m cp -r "$local_path"/* "gs://$gs_path""
+ gsutil -m cp -r "$local_path"/* "gs://$gs_path"
- gsutil -m setmeta \
- -h "Content-Type:text/html" \
- -h "Cache-Control:private, max-age=0, no-transform" \
- "gs://$gs_path"/**.html > /dev/null 2>&1
+ gsutil -m setmeta \
+ -h "Content-Type:text/html" \
+ -h "Cache-Control:private, max-age=0, no-transform" \
+ "gs://$gs_path"/**.html > /dev/null 2>&1
- echo "Document link(s):" >> gerrit_comment.txt
- find "$local_path" | grep -e 'index.html$' -e 'pdf$' | \
- sed -e "s|^$local_path| http://$gs_path|" >> gerrit_comment.txt
+ echo "Document link(s):" >> gerrit_comment.txt
+ find "$local_path" | grep -e 'index.html$' -e 'pdf$' | \
+ sed -e "s|^$local_path| http://$gs_path|" >> gerrit_comment.txt
# To take advantage of this macro, have your build write
# out the file 'gerrit_comment.txt' with information to post
- builder:
name: report-build-result-to-gerrit
builders:
- - shell: |
- #!/bin/bash
- set -o errexit
- set -o pipefail
- set -o xtrace
- export PATH=$PATH:/usr/local/bin/
- if [[ -e gerrit_comment.txt ]] ; then
- echo
- echo "posting review comment to gerrit..."
- echo
- cat gerrit_comment.txt
- echo
- ssh -p 29418 gerrit.opnfv.org \
- "gerrit review -p $GERRIT_PROJECT \
- -m '$(cat gerrit_comment.txt)' \
- $GERRIT_PATCHSET_REVISION \
- --notify NONE"
- fi
+ - shell: |
+ #!/bin/bash
+ set -o errexit
+ set -o pipefail
+ set -o xtrace
+ export PATH=$PATH:/usr/local/bin/
+ if [[ -e gerrit_comment.txt ]] ; then
+ echo
+ echo "posting review comment to gerrit..."
+ echo
+ cat gerrit_comment.txt
+ echo
+ ssh -p 29418 gerrit.opnfv.org \
+ "gerrit review -p $GERRIT_PROJECT \
+ -m '$(cat gerrit_comment.txt)' \
+ $GERRIT_PATCHSET_REVISION \
+ --notify NONE"
+ fi
- builder:
name: remove-old-docs-from-opnfv-artifacts
builders:
- - shell: |
- #!/bin/bash
- set -o errexit
- set -o pipefail
- set -o xtrace
- export PATH=$PATH:/usr/local/bin/
-
- [[ $GERRIT_CHANGE_NUMBER =~ .+ ]]
-
- gs_path="artifacts.opnfv.org/$PROJECT/review/$GERRIT_CHANGE_NUMBER"
-
- if gsutil ls "gs://$gs_path" > /dev/null 2>&1 ; then
- echo
- echo "Deleting Out-of-dated Documents..."
- gsutil -m rm -r "gs://$gs_path"
- fi
- gs_path="artifacts.opnfv.org/review/$GERRIT_CHANGE_NUMBER"
-
- if gsutil ls "gs://$gs_path" > /dev/null 2>&1 ; then
- echo
- echo "Deleting Out-of-dated Documents..."
- gsutil -m rm -r "gs://$gs_path"
- fi
+ - shell: |
+ #!/bin/bash
+ set -o errexit
+ set -o pipefail
+ set -o xtrace
+ export PATH=$PATH:/usr/local/bin/
+
+ [[ $GERRIT_CHANGE_NUMBER =~ .+ ]]
+
+ gs_path="artifacts.opnfv.org/$PROJECT/review/$GERRIT_CHANGE_NUMBER"
+
+ if gsutil ls "gs://$gs_path" > /dev/null 2>&1 ; then
+ echo
+ echo "Deleting Out-of-dated Documents..."
+ gsutil -m rm -r "gs://$gs_path"
+ fi
+ gs_path="artifacts.opnfv.org/review/$GERRIT_CHANGE_NUMBER"
+
+ if gsutil ls "gs://$gs_path" > /dev/null 2>&1 ; then
+ echo
+ echo "Deleting Out-of-dated Documents..."
+ gsutil -m rm -r "gs://$gs_path"
+ fi
- builder:
name: build-and-upload-artifacts-json-api
builders:
- - shell: |
- #!/bin/bash
- set -o errexit
- set -o pipefail
- export PATH=$PATH:/usr/local/bin/
+ - shell: |
+ #!/bin/bash
+ set -o errexit
+ set -o pipefail
+ export PATH=$PATH:/usr/local/bin/
- virtualenv -p python2.7 $WORKSPACE/releng_artifacts
- source $WORKSPACE/releng_artifacts/bin/activate
+ virtualenv -p python2.7 $WORKSPACE/releng_artifacts
+ source $WORKSPACE/releng_artifacts/bin/activate
- # install python packages
- pip install google-api-python-client
+ # install python packages
+ pip install google-api-python-client
- # generate and upload index file
- echo "Generating Artifacts API ..."
- python $WORKSPACE/utils/opnfv-artifacts.py > index.json
- gsutil cp index.json gs://artifacts.opnfv.org/index.json
+ # generate and upload index file
+ echo "Generating Artifacts API ..."
+ python $WORKSPACE/utils/opnfv-artifacts.py > index.json
+ gsutil cp index.json gs://artifacts.opnfv.org/index.json
- deactivate
+ deactivate
- builder:
name: lint-python-code
builders:
- - shell: |
- #!/bin/bash
- set -o errexit
- set -o pipefail
- set -o xtrace
- export PATH=$PATH:/usr/local/bin/
-
- virtualenv -p python2.7 $WORKSPACE/releng_flake8
- source $WORKSPACE/releng_flake8/bin/activate
-
- # install python packages
- pip install "flake8==2.6.2"
-
- # generate and upload lint log
- echo "Running flake8 code on $PROJECT ..."
-
- # Get number of flake8 violations. If none, this will be an
- # empty string: ""
- FLAKE_COUNT="$(find . \
+ - shell: |
+ #!/bin/bash
+ set -o errexit
+ set -o pipefail
+ set -o xtrace
+ export PATH=$PATH:/usr/local/bin/
+
+ virtualenv -p python2.7 $WORKSPACE/releng_flake8
+ source $WORKSPACE/releng_flake8/bin/activate
+
+ # install python packages
+ pip install "flake8==2.6.2"
+
+ # generate and upload lint log
+ echo "Running flake8 code on $PROJECT ..."
+
+ # Get number of flake8 violations. If none, this will be an
+ # empty string: ""
+ FLAKE_COUNT="$(find . \
+ -path './releng_flake8' -prune -o \
+ -path './.tox' -prune -o \
+ -type f -name "*.py" -print | \
+ xargs flake8 --exit-zero -qq --count 2>&1)"
+
+ # Ensure we start with a clean environment
+ rm -f lint.log
+
+ if [ ! -z $FLAKE_COUNT ]; then
+ echo "Flake8 Violations: $FLAKE_COUNT" > lint.log
+ find . \
-path './releng_flake8' -prune -o \
-path './.tox' -prune -o \
-type f -name "*.py" -print | \
- xargs flake8 --exit-zero -qq --count 2>&1)"
-
- # Ensure we start with a clean environment
- rm -f lint.log
-
- if [ ! -z $FLAKE_COUNT ]; then
- echo "Flake8 Violations: $FLAKE_COUNT" > lint.log
- find . \
- -path './releng_flake8' -prune -o \
- -path './.tox' -prune -o \
- -type f -name "*.py" -print | \
- xargs flake8 --exit-zero --first >> violation.log
- SHOWN=$(wc -l violation.log | cut -d' ' -f1)
- echo -e "First $SHOWN shown\n---" >> lint.log
- cat violation.log >> lint.log
- sed -r -i '4,$s/^/ /g' lint.log
- rm violation.log
- fi
-
- deactivate
+ xargs flake8 --exit-zero --first >> violation.log
+ SHOWN=$(wc -l violation.log | cut -d' ' -f1)
+ echo -e "First $SHOWN shown\n---" >> lint.log
+ cat violation.log >> lint.log
+ sed -r -i '4,$s/^/ /g' lint.log
+ rm violation.log
+ fi
+
+ deactivate
- builder:
name: report-lint-result-to-gerrit
builders:
- - shell: |
- #!/bin/bash
- set -o errexit
- set -o pipefail
- set -o xtrace
- export PATH=$PATH:/usr/local/bin/
+ - shell: |
+ #!/bin/bash
+ set -o errexit
+ set -o pipefail
+ set -o xtrace
+ export PATH=$PATH:/usr/local/bin/
- # If no violations were found, no lint log will exist.
- if [[ -e lint.log ]] ; then
- echo -e "\nposting linting report to gerrit...\n"
+ # If no violations were found, no lint log will exist.
+ if [[ -e lint.log ]] ; then
+ echo -e "\nposting linting report to gerrit...\n"
- cat lint.log
- echo
+ cat lint.log
+ echo
- ssh -p 29418 gerrit.opnfv.org \
- "gerrit review -p $GERRIT_PROJECT \
- -m \"$(cat lint.log)\" \
- $GERRIT_PATCHSET_REVISION \
- --notify NONE"
+ ssh -p 29418 gerrit.opnfv.org \
+ "gerrit review -p $GERRIT_PROJECT \
+ -m \"$(cat lint.log)\" \
+ $GERRIT_PATCHSET_REVISION \
+ --notify NONE"
- exit 1
- fi
+ exit 1
+ fi
- builder:
name: upload-review-docs
builders:
- - build-html-and-pdf-docs-output
- - upload-under-review-docs-to-opnfv-artifacts
- - report-build-result-to-gerrit
+ - build-html-and-pdf-docs-output
+ - upload-under-review-docs-to-opnfv-artifacts
+ - report-build-result-to-gerrit
- builder:
name: upload-merged-docs
builders:
- - build-html-and-pdf-docs-output
- - upload-generated-docs-to-opnfv-artifacts
- - report-build-result-to-gerrit
- - remove-old-docs-from-opnfv-artifacts
+ - build-html-and-pdf-docs-output
+ - upload-generated-docs-to-opnfv-artifacts
+ - report-build-result-to-gerrit
+ - remove-old-docs-from-opnfv-artifacts
- builder:
name: check-bash-syntax
builders:
- - shell: "find . -name '*.sh' | xargs bash -n"
+ - shell: "find . -name '*.sh' | xargs bash -n"
- builder:
name: lint-yaml-code
builders:
- - shell: |
- #!/bin/bash
- set -o errexit
- set -o pipefail
- set -o xtrace
- export PATH=$PATH:/usr/local/bin/
-
- # install python packages
- pip install "yamllint==1.6.0"
-
- # generate and upload lint log
- echo "Running yaml code on $PROJECT ..."
-
- # Ensure we start with a clean environment
- rm -f yaml-violation.log lint.log
-
- # Get number of yaml violations. If none, this will be an
- # empty string: ""
- find . \
- -type f -name "*.yml" -print \
- -o -name "*.yaml" -print | \
- xargs yamllint > yaml-violation.log || true
-
- if [ -s "yaml-violation.log" ]; then
- SHOWN=$(cat yaml-violation.log| grep -v "^$" |wc -l)
- echo -e "First $SHOWN shown\n---" > lint.log
- cat yaml-violation.log >> lint.log
- sed -r -i '4,$s/^/ /g' lint.log
- fi
+ - shell: |
+ #!/bin/bash
+ set -o errexit
+ set -o pipefail
+ set -o xtrace
+ export PATH=$PATH:/usr/local/bin/
+
+ # install python packages
+ pip install "yamllint==1.6.0"
+
+ # generate and upload lint log
+ echo "Running yaml code on $PROJECT ..."
+
+ # Ensure we start with a clean environment
+ rm -f yaml-violation.log lint.log
+
+ # Get number of yaml violations. If none, this will be an
+ # empty string: ""
+ find . \
+ -type f -name "*.yml" -print \
+ -o -name "*.yaml" -print | \
+ xargs yamllint > yaml-violation.log || true
+
+ if [ -s "yaml-violation.log" ]; then
+ SHOWN=$(cat yaml-violation.log| grep -v "^$" |wc -l)
+ echo -e "First $SHOWN shown\n---" > lint.log
+ cat yaml-violation.log >> lint.log
+ sed -r -i '4,$s/^/ /g' lint.log
+ fi
- builder:
name: clean-workspace-log
builders:
- - shell: |
- find $WORKSPACE -type f -name '*.log' | xargs rm -f
+ - shell: |
+ find $WORKSPACE -type f -name '*.log' | xargs rm -f
- publisher:
name: archive-artifacts
publishers:
- - archive:
- artifacts: '{artifacts}'
- allow-empty: true
- fingerprint: true
- latest-only: true
+ - archive:
+ artifacts: '{artifacts}'
+ allow-empty: true
+ fingerprint: true
+ latest-only: true
- publisher:
name: publish-coverage
+---
#####################################################
# Parameters for slaves using old labels
# This will be cleaned up once the new job structure and
- parameter:
name: 'apex-baremetal-master-defaults'
parameters:
- - label:
- name: SLAVE_LABEL
- default: 'apex-baremetal-master'
- - string:
- name: GIT_BASE
- default: https://gerrit.opnfv.org/gerrit/$PROJECT
- description: 'Git URL to use on this Jenkins Slave'
- - string:
- name: SSH_KEY
- default: /root/.ssh/id_rsa
- description: 'SSH key to use for Apex'
- - node:
- name: SLAVE_NAME
- description: 'Slave name on Jenkins'
- allowed-slaves:
- - lf-pod1
- default-slaves:
- - lf-pod1
+ - label:
+ name: SLAVE_LABEL
+ default: 'apex-baremetal-master'
+ - string:
+ name: GIT_BASE
+ default: https://gerrit.opnfv.org/gerrit/$PROJECT
+ description: 'Git URL to use on this Jenkins Slave'
+ - string:
+ name: SSH_KEY
+ default: /root/.ssh/id_rsa
+ description: 'SSH key to use for Apex'
+ - node:
+ name: SLAVE_NAME
+ description: 'Slave name on Jenkins'
+ allowed-slaves:
+ - lf-pod1
+ default-slaves:
+ - lf-pod1
+
- parameter:
name: 'apex-baremetal-danube-defaults'
parameters:
- - label:
- name: SLAVE_LABEL
- default: 'apex-baremetal-danube'
- - string:
- name: GIT_BASE
- default: https://gerrit.opnfv.org/gerrit/$PROJECT
- description: 'Git URL to use on this Jenkins Slave'
- - string:
- name: SSH_KEY
- default: /root/.ssh/id_rsa
- description: 'SSH key to use for Apex'
- - node:
- name: SLAVE_NAME
- description: 'Slave name on Jenkins'
- allowed-slaves:
- - lf-pod1
- default-slaves:
- - lf-pod1
+ - label:
+ name: SLAVE_LABEL
+ default: 'apex-baremetal-danube'
+ - string:
+ name: GIT_BASE
+ default: https://gerrit.opnfv.org/gerrit/$PROJECT
+ description: 'Git URL to use on this Jenkins Slave'
+ - string:
+ name: SSH_KEY
+ default: /root/.ssh/id_rsa
+ description: 'SSH key to use for Apex'
+ - node:
+ name: SLAVE_NAME
+ description: 'Slave name on Jenkins'
+ allowed-slaves:
+ - lf-pod1
+ default-slaves:
+ - lf-pod1
+
- parameter:
name: 'apex-virtual-master-defaults'
parameters:
- - label:
- name: SLAVE_LABEL
- default: 'apex-virtual-master'
- - string:
- name: GIT_BASE
- default: https://gerrit.opnfv.org/gerrit/$PROJECT
- description: 'Git URL to use on this Jenkins Slave'
- - string:
- name: SSH_KEY
- default: /root/.ssh/id_rsa
- description: 'SSH key to use for Apex'
- - node:
- name: SLAVE_NAME
- description: 'Slave name on Jenkins'
- allowed-slaves:
- - lf-virtual2
- - lf-virtual3
- default-slaves:
- - lf-virtual2
- - lf-virtual3
+ - label:
+ name: SLAVE_LABEL
+ default: 'apex-virtual-master'
+ - string:
+ name: GIT_BASE
+ default: https://gerrit.opnfv.org/gerrit/$PROJECT
+ description: 'Git URL to use on this Jenkins Slave'
+ - string:
+ name: SSH_KEY
+ default: /root/.ssh/id_rsa
+ description: 'SSH key to use for Apex'
+ - node:
+ name: SLAVE_NAME
+ description: 'Slave name on Jenkins'
+ allowed-slaves:
+ - lf-virtual2
+ - lf-virtual3
+ default-slaves:
+ - lf-virtual2
+ - lf-virtual3
- parameter:
name: 'apex-virtual-danube-defaults'
parameters:
- - label:
- name: SLAVE_LABEL
- default: 'apex-virtual-danube'
- - string:
- name: GIT_BASE
- default: https://gerrit.opnfv.org/gerrit/$PROJECT
- description: 'Git URL to use on this Jenkins Slave'
- - string:
- name: SSH_KEY
- default: /root/.ssh/id_rsa
- description: 'SSH key to use for Apex'
- - node:
- name: SLAVE_NAME
- description: 'Slave name on Jenkins'
- allowed-slaves:
- - lf-pod3
- default-slaves:
- - lf-pod3
+ - label:
+ name: SLAVE_LABEL
+ default: 'apex-virtual-danube'
+ - string:
+ name: GIT_BASE
+ default: https://gerrit.opnfv.org/gerrit/$PROJECT
+ description: 'Git URL to use on this Jenkins Slave'
+ - string:
+ name: SSH_KEY
+ default: /root/.ssh/id_rsa
+ description: 'SSH key to use for Apex'
+ - node:
+ name: SLAVE_NAME
+ description: 'Slave name on Jenkins'
+ allowed-slaves:
+ - lf-pod3
+ default-slaves:
+ - lf-pod3
+
- parameter:
name: 'lf-pod1-defaults'
parameters:
- - node:
- name: SLAVE_NAME
- description: 'Slave name on Jenkins'
- allowed-slaves:
- - lf-pod1
- default-slaves:
- - lf-pod1
- - string:
- name: GIT_BASE
- default: https://gerrit.opnfv.org/gerrit/$PROJECT
- description: 'Git URL to use on this Jenkins Slave'
- - string:
- name: SSH_KEY
- default: /root/.ssh/id_rsa
- description: 'SSH key to use for Apex'
+ - node:
+ name: SLAVE_NAME
+ description: 'Slave name on Jenkins'
+ allowed-slaves:
+ - lf-pod1
+ default-slaves:
+ - lf-pod1
+ - string:
+ name: GIT_BASE
+ default: https://gerrit.opnfv.org/gerrit/$PROJECT
+ description: 'Git URL to use on this Jenkins Slave'
+ - string:
+ name: SSH_KEY
+ default: /root/.ssh/id_rsa
+ description: 'SSH key to use for Apex'
+
- parameter:
name: 'lf-pod3-defaults'
parameters:
- - node:
- name: SLAVE_NAME
- description: 'Slave name on Jenkins'
- allowed-slaves:
- - lf-pod3
- default-slaves:
- - lf-pod3
- - string:
- name: GIT_BASE
- default: https://gerrit.opnfv.org/gerrit/$PROJECT
- description: 'Git URL to use on this Jenkins Slave'
- - string:
- name: SSH_KEY
- default: /root/.ssh/id_rsa
- description: 'SSH key to use for Apex'
+ - node:
+ name: SLAVE_NAME
+ description: 'Slave name on Jenkins'
+ allowed-slaves:
+ - lf-pod3
+ default-slaves:
+ - lf-pod3
+ - string:
+ name: GIT_BASE
+ default: https://gerrit.opnfv.org/gerrit/$PROJECT
+ description: 'Git URL to use on this Jenkins Slave'
+ - string:
+ name: SSH_KEY
+ default: /root/.ssh/id_rsa
+ description: 'SSH key to use for Apex'
+
#####################################################
# Parameters for CI baremetal PODs
#####################################################
- parameter:
name: 'apex-baremetal-defaults'
parameters:
- - label:
- name: SLAVE_LABEL
- default: 'apex-baremetal'
- - string:
- name: GIT_BASE
- default: https://gerrit.opnfv.org/gerrit/$PROJECT
- description: 'Git URL to use on this Jenkins Slave'
- - string:
- name: SSH_KEY
- default: /root/.ssh/id_rsa
- description: 'SSH key to use for Apex'
+ - label:
+ name: SLAVE_LABEL
+ default: 'apex-baremetal'
+ - string:
+ name: GIT_BASE
+ default: https://gerrit.opnfv.org/gerrit/$PROJECT
+ description: 'Git URL to use on this Jenkins Slave'
+ - string:
+ name: SSH_KEY
+ default: /root/.ssh/id_rsa
+ description: 'SSH key to use for Apex'
- parameter:
name: 'compass-baremetal-defaults'
parameters:
- - label:
- name: SLAVE_LABEL
- default: 'compass-baremetal'
- - string:
- name: GIT_BASE
- default: https://gerrit.opnfv.org/gerrit/$PROJECT
- description: 'Git URL to use on this Jenkins Slave'
+ - label:
+ name: SLAVE_LABEL
+ default: 'compass-baremetal'
+ - string:
+ name: GIT_BASE
+ default: https://gerrit.opnfv.org/gerrit/$PROJECT
+ description: 'Git URL to use on this Jenkins Slave'
+
- parameter:
name: 'compass-baremetal-master-defaults'
parameters:
- - label:
- name: SLAVE_LABEL
- default: 'compass-baremetal-master'
- - string:
- name: GIT_BASE
- default: https://gerrit.opnfv.org/gerrit/$PROJECT
- description: 'Git URL to use on this Jenkins Slave'
+ - label:
+ name: SLAVE_LABEL
+ default: 'compass-baremetal-master'
+ - string:
+ name: GIT_BASE
+ default: https://gerrit.opnfv.org/gerrit/$PROJECT
+ description: 'Git URL to use on this Jenkins Slave'
+
- parameter:
name: 'compass-baremetal-branch-defaults'
parameters:
- - label:
- name: SLAVE_LABEL
- default: 'compass-baremetal-branch'
- - string:
- name: GIT_BASE
- default: https://gerrit.opnfv.org/gerrit/$PROJECT
- description: 'Git URL to use on this Jenkins Slave'
+ - label:
+ name: SLAVE_LABEL
+ default: 'compass-baremetal-branch'
+ - string:
+ name: GIT_BASE
+ default: https://gerrit.opnfv.org/gerrit/$PROJECT
+ description: 'Git URL to use on this Jenkins Slave'
+
- parameter:
name: 'fuel-baremetal-defaults'
parameters:
- - label:
- name: SLAVE_LABEL
- default: 'fuel-baremetal'
- - string:
- name: GIT_BASE
- default: https://gerrit.opnfv.org/gerrit/$PROJECT
- description: 'Git URL to use on this Jenkins Slave'
+ - label:
+ name: SLAVE_LABEL
+ default: 'fuel-baremetal'
+ - string:
+ name: GIT_BASE
+ default: https://gerrit.opnfv.org/gerrit/$PROJECT
+ description: 'Git URL to use on this Jenkins Slave'
+
- parameter:
name: 'armband-baremetal-defaults'
parameters:
- - label:
- name: SLAVE_LABEL
- default: 'armband-baremetal'
- - string:
- name: GIT_BASE
- default: https://gerrit.opnfv.org/gerrit/$PROJECT
- description: 'Git URL to use on this Jenkins Slave'
- - string:
- name: LAB_CONFIG_URL
- default: ssh://jenkins-enea@gerrit.opnfv.org:29418/securedlab
- description: 'Base URI to the configuration directory'
+ - label:
+ name: SLAVE_LABEL
+ default: 'armband-baremetal'
+ - string:
+ name: GIT_BASE
+ default: https://gerrit.opnfv.org/gerrit/$PROJECT
+ description: 'Git URL to use on this Jenkins Slave'
+ - string:
+ name: LAB_CONFIG_URL
+ default: ssh://jenkins-enea@gerrit.opnfv.org:29418/securedlab
+ description: 'Base URI to the configuration directory'
+
- parameter:
name: 'joid-baremetal-defaults'
parameters:
- - label:
- name: SLAVE_LABEL
- default: 'joid-baremetal'
- - string:
- name: GIT_BASE
- default: https://gerrit.opnfv.org/gerrit/$PROJECT
- description: 'Git URL to use on this Jenkins Slave'
- - string:
- name: EXTERNAL_NETWORK
- default: ext-net
- description: "External network floating ips"
+ - label:
+ name: SLAVE_LABEL
+ default: 'joid-baremetal'
+ - string:
+ name: GIT_BASE
+ default: https://gerrit.opnfv.org/gerrit/$PROJECT
+ description: 'Git URL to use on this Jenkins Slave'
+ - string:
+ name: EXTERNAL_NETWORK
+ default: ext-net
+ description: "External network floating ips"
+
- parameter:
name: 'daisy-baremetal-defaults'
parameters:
- - node:
- name: SLAVE_NAME
- description: 'Slave name on Jenkins'
- allowed-slaves:
- - zte-pod2
- default-slaves:
- - zte-pod2
- - label:
- name: SLAVE_LABEL
- default: 'daisy-baremetal'
- - string:
- name: INSTALLER_IP
- default: '10.20.7.3'
- description: 'IP of the installer'
- - string:
- name: GIT_BASE
- default: https://gerrit.opnfv.org/gerrit/$PROJECT
- description: 'Git URL to use on this Jenkins Slave'
+ - node:
+ name: SLAVE_NAME
+ description: 'Slave name on Jenkins'
+ allowed-slaves:
+ - zte-pod2
+ default-slaves:
+ - zte-pod2
+ - label:
+ name: SLAVE_LABEL
+ default: 'daisy-baremetal'
+ - string:
+ name: INSTALLER_IP
+ default: '10.20.7.3'
+ description: 'IP of the installer'
+ - string:
+ name: GIT_BASE
+ default: https://gerrit.opnfv.org/gerrit/$PROJECT
+ description: 'Git URL to use on this Jenkins Slave'
+
#####################################################
# Parameters for CI virtual PODs
#####################################################
- parameter:
name: 'apex-virtual-defaults'
parameters:
- - label:
- name: SLAVE_LABEL
- default: 'apex-virtual'
- - string:
- name: GIT_BASE
- default: https://gerrit.opnfv.org/gerrit/$PROJECT
- description: 'Git URL to use on this Jenkins Slave'
- - string:
- name: SSH_KEY
- default: /root/.ssh/id_rsa
- description: 'SSH key to use for Apex'
+ - label:
+ name: SLAVE_LABEL
+ default: 'apex-virtual'
+ - string:
+ name: GIT_BASE
+ default: https://gerrit.opnfv.org/gerrit/$PROJECT
+ description: 'Git URL to use on this Jenkins Slave'
+ - string:
+ name: SSH_KEY
+ default: /root/.ssh/id_rsa
+ description: 'SSH key to use for Apex'
+
- parameter:
name: 'compass-virtual-defaults'
parameters:
- - label:
- name: SLAVE_LABEL
- default: 'compass-virtual'
- - string:
- name: GIT_BASE
- default: https://gerrit.opnfv.org/gerrit/$PROJECT
- description: 'Git URL to use on this Jenkins Slave'
+ - label:
+ name: SLAVE_LABEL
+ default: 'compass-virtual'
+ - string:
+ name: GIT_BASE
+ default: https://gerrit.opnfv.org/gerrit/$PROJECT
+ description: 'Git URL to use on this Jenkins Slave'
+
- parameter:
name: 'compass-virtual-master-defaults'
parameters:
- - label:
- name: SLAVE_LABEL
- default: 'compass-virtual-master'
- - string:
- name: GIT_BASE
- default: https://gerrit.opnfv.org/gerrit/$PROJECT
- description: 'Git URL to use on this Jenkins Slave'
+ - label:
+ name: SLAVE_LABEL
+ default: 'compass-virtual-master'
+ - string:
+ name: GIT_BASE
+ default: https://gerrit.opnfv.org/gerrit/$PROJECT
+ description: 'Git URL to use on this Jenkins Slave'
+
- parameter:
name: 'compass-virtual-branch-defaults'
parameters:
- - label:
- name: SLAVE_LABEL
- default: 'compass-virtual-branch'
- - string:
- name: GIT_BASE
- default: https://gerrit.opnfv.org/gerrit/$PROJECT
- description: 'Git URL to use on this Jenkins Slave'
+ - label:
+ name: SLAVE_LABEL
+ default: 'compass-virtual-branch'
+ - string:
+ name: GIT_BASE
+ default: https://gerrit.opnfv.org/gerrit/$PROJECT
+ description: 'Git URL to use on this Jenkins Slave'
+
- parameter:
name: 'fuel-virtual-defaults'
parameters:
- - label:
- name: SLAVE_LABEL
- default: 'fuel-virtual'
- - string:
- name: GIT_BASE
- default: https://gerrit.opnfv.org/gerrit/$PROJECT
- description: 'Git URL to use on this Jenkins Slave'
+ - label:
+ name: SLAVE_LABEL
+ default: 'fuel-virtual'
+ - string:
+ name: GIT_BASE
+ default: https://gerrit.opnfv.org/gerrit/$PROJECT
+ description: 'Git URL to use on this Jenkins Slave'
+
- parameter:
name: 'armband-virtual-defaults'
parameters:
- - label:
- name: SLAVE_LABEL
- default: 'armband-virtual'
- - string:
- name: GIT_BASE
- default: https://gerrit.opnfv.org/gerrit/$PROJECT
- description: 'Git URL to use on this Jenkins Slave'
- - string:
- name: LAB_CONFIG_URL
- default: ssh://jenkins-enea@gerrit.opnfv.org:29418/securedlab
- description: 'Base URI to the configuration directory'
+ - label:
+ name: SLAVE_LABEL
+ default: 'armband-virtual'
+ - string:
+ name: GIT_BASE
+ default: https://gerrit.opnfv.org/gerrit/$PROJECT
+ description: 'Git URL to use on this Jenkins Slave'
+ - string:
+ name: LAB_CONFIG_URL
+ default: ssh://jenkins-enea@gerrit.opnfv.org:29418/securedlab
+ description: 'Base URI to the configuration directory'
+
- parameter:
name: 'joid-virtual-defaults'
parameters:
- - label:
- name: SLAVE_LABEL
- default: 'joid-virtual'
- - string:
- name: GIT_BASE
- default: https://gerrit.opnfv.org/gerrit/$PROJECT
- description: 'Git URL to use on this Jenkins Slave'
+ - label:
+ name: SLAVE_LABEL
+ default: 'joid-virtual'
+ - string:
+ name: GIT_BASE
+ default: https://gerrit.opnfv.org/gerrit/$PROJECT
+ description: 'Git URL to use on this Jenkins Slave'
+
- parameter:
name: 'daisy-virtual-defaults'
parameters:
- - node:
- name: SLAVE_NAME
- description: 'Slave name on Jenkins'
- allowed-slaves:
- - zte-virtual1
- - zte-virtual2
- default-slaves:
- - zte-virtual1
- - label:
- name: SLAVE_LABEL
- default: 'daisy-virtual'
- - string:
- name: INSTALLER_IP
- default: '10.20.11.2'
- description: 'IP of the installer'
- - string:
- name: BRIDGE
- default: 'daisy1'
- description: 'pxe bridge for booting of Daisy master'
- - string:
- name: GIT_BASE
- default: https://gerrit.opnfv.org/gerrit/$PROJECT
- description: 'Git URL to use on this Jenkins Slave'
+ - node:
+ name: SLAVE_NAME
+ description: 'Slave name on Jenkins'
+ allowed-slaves:
+ - zte-virtual1
+ - zte-virtual2
+ default-slaves:
+ - zte-virtual1
+ - label:
+ name: SLAVE_LABEL
+ default: 'daisy-virtual'
+ - string:
+ name: INSTALLER_IP
+ default: '10.20.11.2'
+ description: 'IP of the installer'
+ - string:
+ name: BRIDGE
+ default: 'daisy1'
+ description: 'pxe bridge for booting of Daisy master'
+ - string:
+ name: GIT_BASE
+ default: https://gerrit.opnfv.org/gerrit/$PROJECT
+ description: 'Git URL to use on this Jenkins Slave'
+
#####################################################
# Parameters for build slaves
#####################################################
- parameter:
name: 'opnfv-build-enea-defaults'
parameters:
- - label:
- name: SLAVE_LABEL
- default: 'opnfv-build-enea'
- - string:
- name: GIT_BASE
- default: https://gerrit.opnfv.org/gerrit/$PROJECT
- description: 'Git URL to use on this Jenkins Slave'
- - string:
- name: BUILD_DIRECTORY
- default: $WORKSPACE/build_output
- description: "Directory where the build artifact will be located upon the completion of the build."
+ - label:
+ name: SLAVE_LABEL
+ default: 'opnfv-build-enea'
+ - string:
+ name: GIT_BASE
+ default: https://gerrit.opnfv.org/gerrit/$PROJECT
+ description: 'Git URL to use on this Jenkins Slave'
+ - string:
+ name: BUILD_DIRECTORY
+ default: $WORKSPACE/build_output
+ description: "Directory where the build artifact will be located upon the completion of the build."
+
- parameter:
name: 'opnfv-build-centos-defaults'
parameters:
- - label:
- name: SLAVE_LABEL
- default: 'opnfv-build-centos'
- - string:
- name: GIT_BASE
- default: https://gerrit.opnfv.org/gerrit/$PROJECT
- description: 'Git URL to use on this Jenkins Slave'
- - string:
- name: BUILD_DIRECTORY
- default: $WORKSPACE/build_output
- description: "Directory where the build artifact will be located upon the completion of the build."
+ - label:
+ name: SLAVE_LABEL
+ default: 'opnfv-build-centos'
+ - string:
+ name: GIT_BASE
+ default: https://gerrit.opnfv.org/gerrit/$PROJECT
+ description: 'Git URL to use on this Jenkins Slave'
+ - string:
+ name: BUILD_DIRECTORY
+ default: $WORKSPACE/build_output
+ description: "Directory where the build artifact will be located upon the completion of the build."
+
- parameter:
name: 'opnfv-build-ubuntu-defaults'
parameters:
- - label:
- name: SLAVE_LABEL
- default: 'opnfv-build-ubuntu'
- description: 'Slave label on Jenkins'
- - string:
- name: GIT_BASE
- default: https://gerrit.opnfv.org/gerrit/$PROJECT
- description: 'Git URL to use on this Jenkins Slave'
- - string:
- name: BUILD_DIRECTORY
- default: $WORKSPACE/build_output
- description: "Directory where the build artifact will be located upon the completion of the build."
+ - label:
+ name: SLAVE_LABEL
+ default: 'opnfv-build-ubuntu'
+ description: 'Slave label on Jenkins'
+ - string:
+ name: GIT_BASE
+ default: https://gerrit.opnfv.org/gerrit/$PROJECT
+ description: 'Git URL to use on this Jenkins Slave'
+ - string:
+ name: BUILD_DIRECTORY
+ default: $WORKSPACE/build_output
+ description: "Directory where the build artifact will be located upon the completion of the build."
+
- parameter:
name: 'opnfv-build-defaults'
parameters:
- - label:
- name: SLAVE_LABEL
- default: 'opnfv-build'
- description: 'Slave label on Jenkins'
- - string:
- name: GIT_BASE
- default: https://gerrit.opnfv.org/gerrit/$PROJECT
- description: 'Git URL to use on this Jenkins Slave'
- - string:
- name: BUILD_DIRECTORY
- default: $WORKSPACE/build_output
- description: "Directory where the build artifact will be located upon the completion of the build."
+ - label:
+ name: SLAVE_LABEL
+ default: 'opnfv-build'
+ description: 'Slave label on Jenkins'
+ - string:
+ name: GIT_BASE
+ default: https://gerrit.opnfv.org/gerrit/$PROJECT
+ description: 'Git URL to use on this Jenkins Slave'
+ - string:
+ name: BUILD_DIRECTORY
+ default: $WORKSPACE/build_output
+ description: "Directory where the build artifact will be located upon the completion of the build."
+
- parameter:
name: 'huawei-build-defaults'
parameters:
- - node:
- name: SLAVE_NAME
- description: 'Slave name on Jenkins'
- allowed-slaves:
- - huawei-build
- default-slaves:
- - huawei-build
- - string:
- name: GIT_BASE
- default: https://gerrit.opnfv.org/gerrit/$PROJECT
- description: 'Git URL to use on this Jenkins Slave'
+ - node:
+ name: SLAVE_NAME
+ description: 'Slave name on Jenkins'
+ allowed-slaves:
+ - huawei-build
+ default-slaves:
+ - huawei-build
+ - string:
+ name: GIT_BASE
+ default: https://gerrit.opnfv.org/gerrit/$PROJECT
+ description: 'Git URL to use on this Jenkins Slave'
+
- parameter:
name: 'opnfv-build-ubuntu-arm-defaults'
parameters:
- - label:
- name: SLAVE_LABEL
- default: 'opnfv-build-ubuntu-arm'
- description: 'Slave label on Jenkins'
- - string:
- name: GIT_BASE
- default: https://gerrit.opnfv.org/gerrit/$PROJECT
- description: 'Git URL to use on this Jenkins Slave'
- - string:
- name: BUILD_DIRECTORY
- default: $WORKSPACE/build_output
- description: "Directory where the build artifact will be located upon the completion of the build."
+ - label:
+ name: SLAVE_LABEL
+ default: 'opnfv-build-ubuntu-arm'
+ description: 'Slave label on Jenkins'
+ - string:
+ name: GIT_BASE
+ default: https://gerrit.opnfv.org/gerrit/$PROJECT
+ description: 'Git URL to use on this Jenkins Slave'
+ - string:
+ name: BUILD_DIRECTORY
+ default: $WORKSPACE/build_output
+ description: "Directory where the build artifact will be located upon the completion of the build."
+
#####################################################
# Parameters for none-CI PODs
#####################################################
- parameter:
name: 'ericsson-pod1-defaults'
parameters:
- - node:
- name: SLAVE_NAME
- description: 'Slave name on Jenkins'
- allowed-slaves:
- - ericsson-pod1
- default-slaves:
- - ericsson-pod1
- - string:
- name: GIT_BASE
- default: https://gerrit.opnfv.org/gerrit/$PROJECT
- description: 'Git URL to use on this Jenkins Slave'
+ - node:
+ name: SLAVE_NAME
+ description: 'Slave name on Jenkins'
+ allowed-slaves:
+ - ericsson-pod1
+ default-slaves:
+ - ericsson-pod1
+ - string:
+ name: GIT_BASE
+ default: https://gerrit.opnfv.org/gerrit/$PROJECT
+ description: 'Git URL to use on this Jenkins Slave'
+
- parameter:
name: 'cengn-pod1-defaults'
parameters:
- - node:
- name: SLAVE_NAME
- description: 'Slave name on Jenkins'
- allowed-slaves:
- - cengn-pod1
- default-slaves:
- - cengn-pod1
- - string:
- name: GIT_BASE
- default: https://gerrit.opnfv.org/gerrit/$PROJECT
- description: 'Git URL to use on this Jenkins Slave'
+ - node:
+ name: SLAVE_NAME
+ description: 'Slave name on Jenkins'
+ allowed-slaves:
+ - cengn-pod1
+ default-slaves:
+ - cengn-pod1
+ - string:
+ name: GIT_BASE
+ default: https://gerrit.opnfv.org/gerrit/$PROJECT
+ description: 'Git URL to use on this Jenkins Slave'
+
- parameter:
name: 'intel-pod1-defaults'
parameters:
- - node:
- name: SLAVE_NAME
- description: 'Slave name on Jenkins'
- allowed-slaves:
- - intel-pod1
- default-slaves:
- - intel-pod1
- - string:
- name: GIT_BASE
- default: https://gerrit.opnfv.org/gerrit/$PROJECT
- description: 'Git URL to use on this Jenkins Slave'
+ - node:
+ name: SLAVE_NAME
+ description: 'Slave name on Jenkins'
+ allowed-slaves:
+ - intel-pod1
+ default-slaves:
+ - intel-pod1
+ - string:
+ name: GIT_BASE
+ default: https://gerrit.opnfv.org/gerrit/$PROJECT
+ description: 'Git URL to use on this Jenkins Slave'
+
- parameter:
name: 'intel-pod2-defaults'
parameters:
- - node:
- name: SLAVE_NAME
- description: 'Slave name on Jenkins'
- allowed-slaves:
- - intel-pod2
- default-slaves:
- - intel-pod2
- - string:
- name: GIT_BASE
- default: https://gerrit.opnfv.org/gerrit/$PROJECT
- description: 'Git URL to use on this Jenkins Slave'
- - string:
- name: SSH_KEY
- default: /root/.ssh/id_rsa
- description: 'SSH key to use for Apex'
+ - node:
+ name: SLAVE_NAME
+ description: 'Slave name on Jenkins'
+ allowed-slaves:
+ - intel-pod2
+ default-slaves:
+ - intel-pod2
+ - string:
+ name: GIT_BASE
+ default: https://gerrit.opnfv.org/gerrit/$PROJECT
+ description: 'Git URL to use on this Jenkins Slave'
+ - string:
+ name: SSH_KEY
+ default: /root/.ssh/id_rsa
+ description: 'SSH key to use for Apex'
+
- parameter:
name: 'intel-pod9-defaults'
parameters:
- - node:
- name: SLAVE_NAME
- description: 'Slave name on Jenkins'
- allowed-slaves:
- - intel-pod9
- default-slaves:
- - intel-pod9
- - string:
- name: GIT_BASE
- default: https://gerrit.opnfv.org/gerrit/$PROJECT
- description: 'Git URL to use on this Jenkins Slave'
+ - node:
+ name: SLAVE_NAME
+ description: 'Slave name on Jenkins'
+ allowed-slaves:
+ - intel-pod9
+ default-slaves:
+ - intel-pod9
+ - string:
+ name: GIT_BASE
+ default: https://gerrit.opnfv.org/gerrit/$PROJECT
+ description: 'Git URL to use on this Jenkins Slave'
+
- parameter:
name: 'intel-pod10-defaults'
parameters:
- - node:
- name: SLAVE_NAME
- description: 'Slave name on Jenkins'
- allowed-slaves:
- - intel-pod10
- default-slaves:
- - intel-pod10
- - string:
- name: GIT_BASE
- default: https://gerrit.opnfv.org/gerrit/$PROJECT
- description: 'Git URL to use on this Jenkins Slave'
+ - node:
+ name: SLAVE_NAME
+ description: 'Slave name on Jenkins'
+ allowed-slaves:
+ - intel-pod10
+ default-slaves:
+ - intel-pod10
+ - string:
+ name: GIT_BASE
+ default: https://gerrit.opnfv.org/gerrit/$PROJECT
+ description: 'Git URL to use on this Jenkins Slave'
+
- parameter:
name: 'intel-pod12-defaults'
parameters:
- - node:
- name: SLAVE_NAME
- description: 'Slave name on Jenkins'
- allowed-slaves:
- - intel-pod12
- default-slaves:
- - intel-pod12
- - string:
- name: GIT_BASE
- default: https://gerrit.opnfv.org/gerrit/$PROJECT
- description: 'Git URL to use on this Jenkins Slave'
+ - node:
+ name: SLAVE_NAME
+ description: 'Slave name on Jenkins'
+ allowed-slaves:
+ - intel-pod12
+ default-slaves:
+ - intel-pod12
+ - string:
+ name: GIT_BASE
+ default: https://gerrit.opnfv.org/gerrit/$PROJECT
+ description: 'Git URL to use on this Jenkins Slave'
+
- parameter:
name: 'huawei-pod3-defaults'
parameters:
- - node:
- name: SLAVE_NAME
- description: 'Slave name on Jenkins'
- allowed-slaves:
- - huawei-pod3
- default-slaves:
- - huawei-pod3
- - label:
- name: SLAVE_LABEL
- default: 'huawei-test'
- - string:
- name: GIT_BASE
- default: https://gerrit.opnfv.org/gerrit/$PROJECT
- description: 'Git URL to use on this Jenkins Slave'
+ - node:
+ name: SLAVE_NAME
+ description: 'Slave name on Jenkins'
+ allowed-slaves:
+ - huawei-pod3
+ default-slaves:
+ - huawei-pod3
+ - label:
+ name: SLAVE_LABEL
+ default: 'huawei-test'
+ - string:
+ name: GIT_BASE
+ default: https://gerrit.opnfv.org/gerrit/$PROJECT
+ description: 'Git URL to use on this Jenkins Slave'
+
- parameter:
name: 'huawei-pod4-defaults'
parameters:
- - node:
- name: SLAVE_NAME
- description: 'Slave name on Jenkins'
- allowed-slaves:
- - huawei-pod4
- default-slaves:
- - huawei-pod4
- - label:
- name: SLAVE_LABEL
- default: 'huawei-test'
- - string:
- name: GIT_BASE
- default: https://gerrit.opnfv.org/gerrit/$PROJECT
- description: 'Git URL to use on this Jenkins Slave'
+ - node:
+ name: SLAVE_NAME
+ description: 'Slave name on Jenkins'
+ allowed-slaves:
+ - huawei-pod4
+ default-slaves:
+ - huawei-pod4
+ - label:
+ name: SLAVE_LABEL
+ default: 'huawei-test'
+ - string:
+ name: GIT_BASE
+ default: https://gerrit.opnfv.org/gerrit/$PROJECT
+ description: 'Git URL to use on this Jenkins Slave'
+
- parameter:
name: 'intel-pod8-defaults'
parameters:
- - node:
- name: SLAVE_NAME
- description: 'Slave name on Jenkins'
- allowed-slaves:
- - intel-pod8
- default-slaves:
- - intel-pod8
- - string:
- name: GIT_BASE
- default: https://gerrit.opnfv.org/gerrit/$PROJECT
+ - node:
+ name: SLAVE_NAME
+ description: 'Slave name on Jenkins'
+ allowed-slaves:
+ - intel-pod8
+ default-slaves:
+ - intel-pod8
+ - string:
+ name: GIT_BASE
+ default: https://gerrit.opnfv.org/gerrit/$PROJECT
+
- parameter:
name: 'huawei-virtual5-defaults'
parameters:
- - label:
- name: SLAVE_LABEL
- default: 'huawei-virtual5'
- - string:
- name: GIT_BASE
- default: https://gerrit.opnfv.org/gerrit/$PROJECT
+ - label:
+ name: SLAVE_LABEL
+ default: 'huawei-virtual5'
+ - string:
+ name: GIT_BASE
+ default: https://gerrit.opnfv.org/gerrit/$PROJECT
+
- parameter:
name: 'huawei-virtual7-defaults'
parameters:
- - node:
- name: SLAVE_NAME
- description: 'Slave name on Jenkins'
- allowed-slaves:
- - huawei-virtual7
- default-slaves:
- - huawei-virtual7
- - string:
- name: GIT_BASE
- default: https://gerrit.opnfv.org/gerrit/$PROJECT
+ - node:
+ name: SLAVE_NAME
+ description: 'Slave name on Jenkins'
+ allowed-slaves:
+ - huawei-virtual7
+ default-slaves:
+ - huawei-virtual7
+ - string:
+ name: GIT_BASE
+ default: https://gerrit.opnfv.org/gerrit/$PROJECT
+
- parameter:
name: 'huawei-pod7-defaults'
parameters:
- - node:
- name: SLAVE_NAME
- description: 'Slave name on Jenkins'
- allowed-slaves:
- - huawei-pod7
- default-slaves:
- - huawei-pod7
- - string:
- name: GIT_BASE
- default: https://gerrit.opnfv.org/gerrit/$PROJECT
+ - node:
+ name: SLAVE_NAME
+ description: 'Slave name on Jenkins'
+ allowed-slaves:
+ - huawei-pod7
+ default-slaves:
+ - huawei-pod7
+ - string:
+ name: GIT_BASE
+ default: https://gerrit.opnfv.org/gerrit/$PROJECT
+
- parameter:
name: 'zte-pod1-defaults'
parameters:
- - node:
- name: SLAVE_NAME
- description: 'Slave name on Jenkins'
- allowed-slaves:
- - zte-pod1
- default-slaves:
- - zte-pod1
- - string:
- name: GIT_BASE
- default: https://gerrit.opnfv.org/gerrit/$PROJECT
- description: 'Git URL to use on this Jenkins Slave'
- - string:
- name: INSTALLER_IP
- default: '10.20.6.2'
- description: 'IP of the installer'
- - string:
- name: BRIDGE
- default: 'br6'
- description: 'pxe bridge for booting of Fuel master'
+ - node:
+ name: SLAVE_NAME
+ description: 'Slave name on Jenkins'
+ allowed-slaves:
+ - zte-pod1
+ default-slaves:
+ - zte-pod1
+ - string:
+ name: GIT_BASE
+ default: https://gerrit.opnfv.org/gerrit/$PROJECT
+ description: 'Git URL to use on this Jenkins Slave'
+ - string:
+ name: INSTALLER_IP
+ default: '10.20.6.2'
+ description: 'IP of the installer'
+ - string:
+ name: BRIDGE
+ default: 'br6'
+ description: 'pxe bridge for booting of Fuel master'
+
- parameter:
name: 'zte-pod2-defaults'
parameters:
- - node:
- name: SLAVE_NAME
- description: 'Slave name on Jenkins'
- allowed-slaves:
- - zte-pod2
- default-slaves:
- - zte-pod2
- - string:
- name: GIT_BASE
- default: https://gerrit.opnfv.org/gerrit/$PROJECT
- description: 'Git URL to use on this Jenkins Slave'
- - string:
- name: INSTALLER_IP
- default: '10.20.7.3'
- description: 'IP of the installer'
- - string:
- name: BRIDGE
- default: 'br7'
- description: 'pxe bridge for booting of Fuel master'
+ - node:
+ name: SLAVE_NAME
+ description: 'Slave name on Jenkins'
+ allowed-slaves:
+ - zte-pod2
+ default-slaves:
+ - zte-pod2
+ - string:
+ name: GIT_BASE
+ default: https://gerrit.opnfv.org/gerrit/$PROJECT
+ description: 'Git URL to use on this Jenkins Slave'
+ - string:
+ name: INSTALLER_IP
+ default: '10.20.7.3'
+ description: 'IP of the installer'
+ - string:
+ name: BRIDGE
+ default: 'br7'
+ description: 'pxe bridge for booting of Fuel master'
+
- parameter:
name: 'zte-pod3-defaults'
parameters:
- - node:
- name: SLAVE_NAME
- description: 'Slave name on Jenkins'
- allowed-slaves:
- - zte-pod3
- default-slaves:
- - zte-pod3
- - string:
- name: GIT_BASE
- default: https://gerrit.opnfv.org/gerrit/$PROJECT
- description: 'Git URL to use on this Jenkins Slave'
- - string:
- name: BRIDGE
- default: 'br0'
- description: 'pxe bridge for booting of Fuel master'
+ - node:
+ name: SLAVE_NAME
+ description: 'Slave name on Jenkins'
+ allowed-slaves:
+ - zte-pod3
+ default-slaves:
+ - zte-pod3
+ - string:
+ name: GIT_BASE
+ default: https://gerrit.opnfv.org/gerrit/$PROJECT
+ description: 'Git URL to use on this Jenkins Slave'
+ - string:
+ name: BRIDGE
+ default: 'br0'
+ description: 'pxe bridge for booting of Fuel master'
+
- parameter:
name: zte-pod4-defaults
parameters:
- - node:
- name: SLAVE_NAME
- description: 'Slave name on Jenkins'
- allowed-slaves:
- - zte-pod4
- default-slaves:
- - zte-pod4
- - string:
- name: GIT_BASE
- default: https://gerrit.opnfv.org/gerrit/$PROJECT
- description: 'Git URL to use on this Jenkins Slave'
+ - node:
+ name: SLAVE_NAME
+ description: 'Slave name on Jenkins'
+ allowed-slaves:
+ - zte-pod4
+ default-slaves:
+ - zte-pod4
+ - string:
+ name: GIT_BASE
+ default: https://gerrit.opnfv.org/gerrit/$PROJECT
+ description: 'Git URL to use on this Jenkins Slave'
+
- parameter:
name: 'juniper-pod1-defaults'
parameters:
- - node:
- name: SLAVE_NAME
- description: 'Slave name on Jenkins'
- allowed-slaves:
- - juniper-pod1
- default-slaves:
- - juniper-pod1
- - string:
- name: GIT_BASE
- default: https://gerrit.opnfv.org/gerrit/$PROJECT
- description: 'Git URL to use on this Jenkins Slave'
- - string:
- name: CEPH_DISKS
- default: /srv
- description: "Disks to use by ceph (comma separated list)"
+ - node:
+ name: SLAVE_NAME
+ description: 'Slave name on Jenkins'
+ allowed-slaves:
+ - juniper-pod1
+ default-slaves:
+ - juniper-pod1
+ - string:
+ name: GIT_BASE
+ default: https://gerrit.opnfv.org/gerrit/$PROJECT
+ description: 'Git URL to use on this Jenkins Slave'
+ - string:
+ name: CEPH_DISKS
+ default: /srv
+ description: "Disks to use by ceph (comma separated list)"
+
- parameter:
name: 'orange-pod1-defaults'
parameters:
- - node:
- name: SLAVE_NAME
- description: 'Slave name on Jenkins'
- allowed-slaves:
- - orange-pod1
- default-slaves:
- - orange-pod1
- - string:
- name: GIT_BASE
- default: https://gerrit.opnfv.org/gerrit/$PROJECT
- description: 'Git URL to use on this Jenkins Slave'
+ - node:
+ name: SLAVE_NAME
+ description: 'Slave name on Jenkins'
+ allowed-slaves:
+ - orange-pod1
+ default-slaves:
+ - orange-pod1
+ - string:
+ name: GIT_BASE
+ default: https://gerrit.opnfv.org/gerrit/$PROJECT
+ description: 'Git URL to use on this Jenkins Slave'
+
- parameter:
name: 'orange-pod2-defaults'
parameters:
- - node:
- name: SLAVE_NAME
- description: 'Slave name on Jenkins'
- allowed-slaves:
- - orange-pod2
- default-slaves:
- - orange-pod2
- - string:
- name: GIT_BASE
- default: https://gerrit.opnfv.org/gerrit/$PROJECT
- description: 'Git URL to use on this Jenkins Slave'
+ - node:
+ name: SLAVE_NAME
+ description: 'Slave name on Jenkins'
+ allowed-slaves:
+ - orange-pod2
+ default-slaves:
+ - orange-pod2
+ - string:
+ name: GIT_BASE
+ default: https://gerrit.opnfv.org/gerrit/$PROJECT
+ description: 'Git URL to use on this Jenkins Slave'
+
- parameter:
name: 'orange-pod5-defaults'
parameters:
- - node:
- name: SLAVE_NAME
- description: 'Slave name on Jenkins'
- allowed-slaves:
- - orange-pod5
- default-slaves:
- - orange-pod5
- - string:
- name: GIT_BASE
- default: https://gerrit.opnfv.org/gerrit/$PROJECT
- description: 'Git URL to use on this Jenkins Slave'
+ - node:
+ name: SLAVE_NAME
+ description: 'Slave name on Jenkins'
+ allowed-slaves:
+ - orange-pod5
+ default-slaves:
+ - orange-pod5
+ - string:
+ name: GIT_BASE
+ default: https://gerrit.opnfv.org/gerrit/$PROJECT
+ description: 'Git URL to use on this Jenkins Slave'
+
- parameter:
name: 'dell-pod1-defaults'
parameters:
- - node:
- name: SLAVE_NAME
- description: 'Slave name on Jenkins'
- allowed-slaves:
- - dell-pod1
- default-slaves:
- - dell-pod1
- - string:
- name: GIT_BASE
- default: https://gerrit.opnfv.org/gerrit/$PROJECT
- description: 'Git URL to use on this Jenkins Slave'
+ - node:
+ name: SLAVE_NAME
+ description: 'Slave name on Jenkins'
+ allowed-slaves:
+ - dell-pod1
+ default-slaves:
+ - dell-pod1
+ - string:
+ name: GIT_BASE
+ default: https://gerrit.opnfv.org/gerrit/$PROJECT
+ description: 'Git URL to use on this Jenkins Slave'
+
- parameter:
name: 'dell-pod2-defaults'
parameters:
- - node:
- name: SLAVE_NAME
- description: 'Slave name on Jenkins'
- allowed-slaves:
- - dell-pod2
- default-slaves:
- - dell-pod2
- - string:
- name: GIT_BASE
- default: https://gerrit.opnfv.org/gerrit/$PROJECT
- description: 'Git URL to use on this Jenkins Slave'
+ - node:
+ name: SLAVE_NAME
+ description: 'Slave name on Jenkins'
+ allowed-slaves:
+ - dell-pod2
+ default-slaves:
+ - dell-pod2
+ - string:
+ name: GIT_BASE
+ default: https://gerrit.opnfv.org/gerrit/$PROJECT
+ description: 'Git URL to use on this Jenkins Slave'
+
- parameter:
name: 'nokia-pod1-defaults'
parameters:
- - node:
- name: SLAVE_NAME
- description: 'Slave name on Jenkins'
- allowed-slaves:
- - nokia-pod1
- default-slaves:
- - nokia-pod1
- - string:
- name: GIT_BASE
- default: https://gerrit.opnfv.org/gerrit/$PROJECT
- description: 'Git URL to use on this Jenkins Slave'
- - string:
- name: SSH_KEY
- default: /root/.ssh/id_rsa
- description: 'SSH key to use for Apex'
+ - node:
+ name: SLAVE_NAME
+ description: 'Slave name on Jenkins'
+ allowed-slaves:
+ - nokia-pod1
+ default-slaves:
+ - nokia-pod1
+ - string:
+ name: GIT_BASE
+ default: https://gerrit.opnfv.org/gerrit/$PROJECT
+ description: 'Git URL to use on this Jenkins Slave'
+ - string:
+ name: SSH_KEY
+ default: /root/.ssh/id_rsa
+ description: 'SSH key to use for Apex'
+
- parameter:
name: 'arm-pod2-defaults'
parameters:
- - node:
- name: SLAVE_NAME
- description: 'Slave name on Jenkins'
- allowed-slaves:
- - arm-pod2
- default-slaves:
- - arm-pod2
- - string:
- name: GIT_BASE
- default: https://gerrit.opnfv.org/gerrit/$PROJECT
- description: 'Git URL to use on this Jenkins Slave'
- - string:
- name: LAB_CONFIG_URL
- default: ssh://jenkins-enea@gerrit.opnfv.org:29418/securedlab
- description: 'Base URI to the configuration directory'
-- parameter:
- name: 'arm-pod3-defaults'
- parameters:
- - node:
- name: SLAVE_NAME
- description: 'Slave name on Jenkins'
- allowed-slaves:
- - arm-pod3
- default-slaves:
- - arm-pod3
- - string:
- name: GIT_BASE
- default: https://gerrit.opnfv.org/gerrit/$PROJECT
- description: 'Git URL to use on this Jenkins Slave'
- - string:
- name: LAB_CONFIG_URL
- default: ssh://jenkins-enea@gerrit.opnfv.org:29418/securedlab
- description: 'Base URI to the configuration directory'
+ - node:
+ name: SLAVE_NAME
+ description: 'Slave name on Jenkins'
+ allowed-slaves:
+ - arm-pod2
+ default-slaves:
+ - arm-pod2
+ - string:
+ name: GIT_BASE
+ default: https://gerrit.opnfv.org/gerrit/$PROJECT
+ description: 'Git URL to use on this Jenkins Slave'
+ - string:
+ name: LAB_CONFIG_URL
+ default: ssh://jenkins-enea@gerrit.opnfv.org:29418/securedlab
+ description: 'Base URI to the configuration directory'
+
+- parameter:
+ name: 'arm-pod5-defaults'
+ parameters:
+ - node:
+ name: SLAVE_NAME
+ description: 'Slave name on Jenkins'
+ allowed-slaves:
+ - arm-pod5
+ default-slaves:
+ - arm-pod5
+ - string:
+ name: GIT_BASE
+ default: https://gerrit.opnfv.org/gerrit/$PROJECT
+ description: 'Git URL to use on this Jenkins Slave'
+ - string:
+ name: LAB_CONFIG_URL
+ default: ssh://jenkins-enea@gerrit.opnfv.org:29418/securedlab
+ description: 'Base URI to the configuration directory'
+
- parameter:
name: 'arm-pod4-defaults'
parameters:
- - node:
- name: SLAVE_NAME
- description: 'Slave name on Jenkins'
- allowed-slaves:
- - arm-pod4
- default-slaves:
- - arm-pod4
- - string:
- name: GIT_BASE
- default: https://gerrit.opnfv.org/gerrit/$PROJECT
- description: 'Git URL to use on this Jenkins Slave'
- - string:
- name: LAB_CONFIG_URL
- default: ssh://jenkins-enea@gerrit.opnfv.org:29418/securedlab
- description: 'Base URI to the configuration directory'
-- parameter:
- name: 'arm-virtual1-defaults'
- parameters:
- - node:
- name: SLAVE_NAME
- description: 'Slave name on Jenkins'
- allowed-slaves:
- - arm-virtual1
- default-slaves:
- - arm-virtual1
- - string:
- name: GIT_BASE
- default: https://gerrit.opnfv.org/gerrit/$PROJECT
- description: 'Git URL to use on this Jenkins Slave'
- - string:
- name: LAB_CONFIG_URL
- default: ssh://jenkins-enea@gerrit.opnfv.org:29418/securedlab
- description: 'Base URI to the configuration directory'
+ - node:
+ name: SLAVE_NAME
+ description: 'Slave name on Jenkins'
+ allowed-slaves:
+ - arm-pod4
+ default-slaves:
+ - arm-pod4
+ - string:
+ name: GIT_BASE
+ default: https://gerrit.opnfv.org/gerrit/$PROJECT
+ description: 'Git URL to use on this Jenkins Slave'
+ - string:
+ name: LAB_CONFIG_URL
+ default: ssh://jenkins-enea@gerrit.opnfv.org:29418/securedlab
+ description: 'Base URI to the configuration directory'
+
+- parameter:
+ name: 'arm-virtual2-defaults'
+ parameters:
+ - node:
+ name: SLAVE_NAME
+ description: 'Slave name on Jenkins'
+ allowed-slaves:
+ - arm-virtual2
+ default-slaves:
+ - arm-virtual2
+ - string:
+ name: GIT_BASE
+ default: https://gerrit.opnfv.org/gerrit/$PROJECT
+ description: 'Git URL to use on this Jenkins Slave'
+ - string:
+ name: LAB_CONFIG_URL
+ default: ssh://jenkins-enea@gerrit.opnfv.org:29418/securedlab
+ description: 'Base URI to the configuration directory'
+
- parameter:
name: 'intel-virtual6-defaults'
parameters:
- - node:
- name: SLAVE_NAME
- description: 'Slave name on Jenkins'
- allowed-slaves:
- - intel-virtual6
- default-slaves:
- - intel-virtual6
- - string:
- name: GIT_BASE
- default: https://gerrit.opnfv.org/gerrit/$PROJECT
- description: 'Git URL to use on this Jenkins Slave'
+ - node:
+ name: SLAVE_NAME
+ description: 'Slave name on Jenkins'
+ allowed-slaves:
+ - intel-virtual6
+ default-slaves:
+ - intel-virtual6
+ - string:
+ name: GIT_BASE
+ default: https://gerrit.opnfv.org/gerrit/$PROJECT
+ description: 'Git URL to use on this Jenkins Slave'
+
+- parameter:
+ name: 'intel-virtual10-defaults'
+ parameters:
+ - node:
+ name: SLAVE_NAME
+ description: 'Slave name on Jenkins'
+ allowed-slaves:
+ - intel-virtual10
+ default-slaves:
+ - intel-virtual10
+ - string:
+ name: GIT_BASE
+ default: https://gerrit.opnfv.org/gerrit/$PROJECT
+ description: 'Git URL to use on this Jenkins Slave'
+
- parameter:
name: 'doctor-defaults'
parameters:
- - node:
- name: SLAVE_NAME
- description: 'Slave name on Jenkins'
- allowed-slaves:
- - '{default-slave}'
- default-slaves:
- - '{default-slave}'
- - string:
- name: GIT_BASE
- default: https://gerrit.opnfv.org/gerrit/$PROJECT
- description: 'Git URL to use on this Jenkins Slave'
- - string:
- name: SSH_KEY
- default: /root/.ssh/id_rsa
- description: 'SSH key to be used'
+ - node:
+ name: SLAVE_NAME
+ description: 'Slave name on Jenkins'
+ allowed-slaves:
+ - '{default-slave}'
+ default-slaves:
+ - '{default-slave}'
+ - string:
+ name: GIT_BASE
+ default: https://gerrit.opnfv.org/gerrit/$PROJECT
+ description: 'Git URL to use on this Jenkins Slave'
+ - string:
+ name: SSH_KEY
+ default: /root/.ssh/id_rsa
+ description: 'SSH key to be used'
+
- parameter:
name: 'doctor-apex-verify-defaults'
parameters:
- - 'doctor-defaults':
- default-slave: 'doctor-apex-verify'
+ - 'doctor-defaults':
+ default-slave: 'doctor-apex-verify'
+
- parameter:
name: 'doctor-fuel-verify-defaults'
parameters:
- - 'doctor-defaults':
- default-slave: 'doctor-fuel-verify'
+ - 'doctor-defaults':
+ default-slave: 'doctor-fuel-verify'
+
- parameter:
name: 'doctor-joid-verify-defaults'
parameters:
- - 'doctor-defaults':
- default-slave: 'doctor-joid-verify'
+ - 'doctor-defaults':
+ default-slave: 'doctor-joid-verify'
+
- parameter:
name: 'multisite-virtual-defaults'
parameters:
- - label:
- name: SLAVE_LABEL
- default: 'multisite-virtual'
- - string:
- name: GIT_BASE
- default: https://gerrit.opnfv.org/gerrit/$PROJECT
- description: 'Git URL to use on this Jenkins Slave'
+ - label:
+ name: SLAVE_LABEL
+ default: 'multisite-virtual'
+ - string:
+ name: GIT_BASE
+ default: https://gerrit.opnfv.org/gerrit/$PROJECT
+ description: 'Git URL to use on this Jenkins Slave'
+
- parameter:
name: 'ericsson-virtual5-defaults'
parameters:
- - label:
- name: SLAVE_LABEL
- default: 'ericsson-virtual5'
- - string:
- name: GIT_BASE
- default: https://git.opendaylight.org/gerrit/p/$PROJECT.git
- description: 'Git URL to use on this Jenkins Slave'
+ - label:
+ name: SLAVE_LABEL
+ default: 'ericsson-virtual5'
+ - string:
+ name: GIT_BASE
+ default: https://git.opendaylight.org/gerrit/p/$PROJECT.git
+ description: 'Git URL to use on this Jenkins Slave'
+
- parameter:
name: 'ericsson-virtual12-defaults'
parameters:
- - label:
- name: SLAVE_LABEL
- default: 'ericsson-virtual12'
- - string:
- name: GIT_BASE
- default: https://gerrit.opnfv.org/gerrit/$PROJECT
- description: 'Git URL to use on this Jenkins Slave'
+ - label:
+ name: SLAVE_LABEL
+ default: 'ericsson-virtual12'
+ - string:
+ name: GIT_BASE
+ default: https://gerrit.opnfv.org/gerrit/$PROJECT
+ description: 'Git URL to use on this Jenkins Slave'
+
- parameter:
name: 'ericsson-virtual13-defaults'
parameters:
- - label:
- name: SLAVE_LABEL
- default: 'ericsson-virtual13'
- - string:
- name: GIT_BASE
- default: https://gerrit.opnfv.org/gerrit/$PROJECT
- description: 'Git URL to use on this Jenkins Slave'
+ - label:
+ name: SLAVE_LABEL
+ default: 'ericsson-virtual13'
+ - string:
+ name: GIT_BASE
+ default: https://gerrit.opnfv.org/gerrit/$PROJECT
+ description: 'Git URL to use on this Jenkins Slave'
+
- parameter:
name: 'ericsson-virtual-pod1bl01-defaults'
parameters:
- - label:
- name: SLAVE_LABEL
- default: 'ericsson-virtual-pod1bl01'
- - string:
- name: GIT_BASE
- default: https://gerrit.opnfv.org/gerrit/$PROJECT
- description: 'Git URL to use on this Jenkins Slave'
+ - label:
+ name: SLAVE_LABEL
+ default: 'ericsson-virtual-pod1bl01'
+ - string:
+ name: GIT_BASE
+ default: https://gerrit.opnfv.org/gerrit/$PROJECT
+ description: 'Git URL to use on this Jenkins Slave'
+
- parameter:
name: 'odl-netvirt-virtual-defaults'
parameters:
- - label:
- name: SLAVE_LABEL
- default: 'odl-netvirt-virtual'
- - string:
- name: GIT_BASE
- default: https://gerrit.opnfv.org/gerrit/$PROJECT
- description: 'Git URL to use on this Jenkins Slave'
+ - label:
+ name: SLAVE_LABEL
+ default: 'odl-netvirt-virtual'
+ - string:
+ name: GIT_BASE
+ default: https://gerrit.opnfv.org/gerrit/$PROJECT
+ description: 'Git URL to use on this Jenkins Slave'
+
- parameter:
name: 'odl-netvirt-virtual-intel-defaults'
parameters:
- - label:
- name: SLAVE_LABEL
- default: 'odl-netvirt-virtual-intel'
- - string:
- name: GIT_BASE
- default: https://gerrit.opnfv.org/gerrit/$PROJECT
- description: 'Git URL to use on this Jenkins Slave'
+ - label:
+ name: SLAVE_LABEL
+ default: 'odl-netvirt-virtual-intel'
+ - string:
+ name: GIT_BASE
+ default: https://gerrit.opnfv.org/gerrit/$PROJECT
+ description: 'Git URL to use on this Jenkins Slave'
+
#####################################################
# These slaves are just dummy slaves for sandbox jobs
#####################################################
- parameter:
name: 'sandbox-baremetal-defaults'
parameters:
- - label:
- name: SLAVE_LABEL
- default: 'sandbox-baremetal'
- description: 'Slave label on Jenkins'
- - string:
- name: GIT_BASE
- default: https://gerrit.opnfv.org/gerrit/$PROJECT
- description: 'Git URL to use on this Jenkins Slave'
- - string:
- name: BUILD_DIRECTORY
- default: $WORKSPACE/build_output
- description: "Directory where the build artifact will be located upon the completion of the build."
+ - label:
+ name: SLAVE_LABEL
+ default: 'sandbox-baremetal'
+ description: 'Slave label on Jenkins'
+ - string:
+ name: GIT_BASE
+ default: https://gerrit.opnfv.org/gerrit/$PROJECT
+ description: 'Git URL to use on this Jenkins Slave'
+ - string:
+ name: BUILD_DIRECTORY
+ default: $WORKSPACE/build_output
+ description: "Directory where the build artifact will be located upon the completion of the build."
+
- parameter:
name: 'sandbox-virtual-defaults'
parameters:
- - label:
- name: SLAVE_LABEL
- default: 'sandbox-virtual'
- description: 'Slave label on Jenkins'
- - string:
- name: GIT_BASE
- default: https://gerrit.opnfv.org/gerrit/$PROJECT
- description: 'Git URL to use on this Jenkins Slave'
- - string:
- name: BUILD_DIRECTORY
- default: $WORKSPACE/build_output
- description: "Directory where the build artifact will be located upon the completion of the build."
+ - label:
+ name: SLAVE_LABEL
+ default: 'sandbox-virtual'
+ description: 'Slave label on Jenkins'
+ - string:
+ name: GIT_BASE
+ default: https://gerrit.opnfv.org/gerrit/$PROJECT
+ description: 'Git URL to use on this Jenkins Slave'
+ - string:
+ name: BUILD_DIRECTORY
+ default: $WORKSPACE/build_output
+ description: "Directory where the build artifact will be located upon the completion of the build."
+
- parameter:
name: 'dummy-pod1-defaults'
parameters:
- - label:
- name: SLAVE_LABEL
- default: 'dummy-pod1'
- description: 'Slave label on Jenkins'
- - string:
- name: GIT_BASE
- default: https://gerrit.opnfv.org/gerrit/$PROJECT
- description: 'Git URL to use on this Jenkins Slave'
- - string:
- name: BUILD_DIRECTORY
- default: $WORKSPACE/build_output
- description: "Directory where the build artifact will be located upon the completion of the build."
+ - label:
+ name: SLAVE_LABEL
+ default: 'dummy-pod1'
+ description: 'Slave label on Jenkins'
+ - string:
+ name: GIT_BASE
+ default: https://gerrit.opnfv.org/gerrit/$PROJECT
+ description: 'Git URL to use on this Jenkins Slave'
+ - string:
+ name: BUILD_DIRECTORY
+ default: $WORKSPACE/build_output
+ description: "Directory where the build artifact will be located upon the completion of the build."
dockerdir: 'docker/storperf-master'
<<: *master
<<: *other-receivers
+ - 'storperf-graphite':
+ project: 'storperf'
+ dockerdir: 'docker/storperf-graphite'
+ <<: *master
+ <<: *other-receivers
- 'storperf-httpfrontend':
project: 'storperf'
dockerdir: 'docker/storperf-httpfrontend'
--- /dev/null
+#!/bin/bash
+# SPDX-license-identifier: Apache-2.0
+##############################################################################
+# Copyright (c) 2016 Linux Foundation and others.
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+set -o errexit
+set -o pipefail
+export PATH=$PATH:/usr/local/bin/
+
+DATE="$(date +%Y%m%d)"
+
+declare -a PROJECT_LIST
+EXCLUDE_PROJECTS="All-Projects|All-Users|securedlab"
+CLONE_PATH="$WORKSPACE/opnfv-repos"
+
+# Generate project list from gerrit
+PROJECT_LIST=($(ssh -p 29418 jenkins-ci@gerrit.opnfv.org gerrit ls-projects | egrep -v $EXCLUDE_PROJECTS))
+
+echo "Cloning all OPNFV repositories"
+echo "------------------------------"
+
+for PROJECT in "${PROJECT_LIST[@]}"; do
+ echo "> Cloning $PROJECT"
+ if [ ! -d "$CLONE_PATH/$PROJECT" ]; then
+ git clone "https://gerrit.opnfv.org/gerrit/$PROJECT.git" $CLONE_PATH/$PROJECT
+ else
+ pushd "$CLONE_PATH/$PROJECT" &>/dev/null
+ git pull -f
+ popd &> /dev/null
+ fi
+
+ # Don't license scan kernel or qemu in kvmfornfv
+ if [ "$PROJECT" == "kvmfornfv" ]; then
+ rm -rf "$CLONE_PATH/$PROJECT/{kernel,qemu}"
+ fi
+done
+
+echo "Finished cloning OPNFV repositories"
+echo "-----------------------------------"
+
+# Copy repos and clear git data
+echo "Copying repos to $WORKSPACE/opnfv-archive and removing .git files"
+cp -R $CLONE_PATH $WORKSPACE/opnfv-archive
+find $WORKSPACE/opnfv-archive -type d -iname '.git' -exec rm -rf {} +
+find $WORKSPACE/opnfv-archive -type f -iname '.git*' -exec rm -rf {} +
+
+# Create archive
+echo "Creating archive: opnfv-archive-$DATE.tar.gz"
+echo "--------------------------------------"
+cd $WORKSPACE
+tar -czf "opnfv-archive-$DATE.tar.gz" opnfv-archive && rm -rf opnfv-archive
+echo "Archiving Complete."
+
+echo "Uploading artifacts"
+echo "--------------------------------------"
+
+gsutil cp "$WORKSPACE/opnfv-archive-$DATE.tar.gz" \
+ "gs://opnfv-archive/opnfv-archive-$DATE.tar.gz" 2>&1
+
+rm -f opnfv-archive-$DATE.tar.gz
+
+echo "Finished"
jobs:
- 'prune-docker-images'
+ - 'archive-repositories'
+
########################
# job templates
########################
triggers:
- timed: '@midnight'
+
+- job-template:
+ name: 'archive-repositories'
+
+ disabled: false
+
+ concurrent: true
+
+ parameters:
+ - node:
+ name: SLAVE_NAME
+ description: Where to create the archive
+ default-slaves:
+ - master
+ allowed-multiselect: false
+ ignore-offline-nodes: true
+
+ triggers:
+ - timed: '@monthly'
+
+ builders:
+ - shell:
+ !include-raw-escape: opnfv-repo-archiver.sh
pattern: '**/*.jinja2'
- compare-type: ANT
pattern: '**/*.yaml'
+ skip-vote:
+ successful: true
+ failed: true
+ unstable: true
+ notbuilt: true
+
builders:
- check-jinja
# They will only be enabled on request by projects!
###################################################
- project:
- name: snaps
+ name: sfc-project-jobs
- project: '{name}'
+ project: 'sfc'
jobs:
- - 'snaps-verify-{stream}'
-
+ - 'sfc-verify-{stream}'
stream:
- master:
branch: '{stream}'
- danube:
branch: 'stable/{stream}'
gs-pathname: '/{stream}'
- disabled: false
+ disabled: true
- job-template:
- name: 'snaps-verify-{stream}'
+ name: 'sfc-verify-{stream}'
disabled: '{obj:disabled}'
project: '{project}'
branch: '{branch}'
- 'opnfv-build-ubuntu-defaults'
-
scm:
- git-scm-gerrit
- compare-type: ANT
pattern: 'docs/**|.gitignore'
+ builders:
+ - sfc-unit-tests
+
+################################
+# job builders
+################################
+
+- builder:
+ name: sfc-unit-tests
builders:
- shell: |
- echo "Nothing to verify!"
+ cd $WORKSPACE && yamllint $(git ls-tree -r HEAD --name-only | egrep 'yml$|yaml$')
--- /dev/null
+###################################################
+# All the jobs except verify have been removed!
+# They will only be enabled on request by projects!
+###################################################
+- project:
+ name: snaps
+
+ project: '{name}'
+
+ jobs:
+ - 'snaps-verify-{stream}'
+
+ stream:
+ - master:
+ branch: '{stream}'
+ gs-pathname: ''
+ disabled: false
+
+- job-template:
+ name: 'snaps-verify-{stream}'
+
+ disabled: '{obj:disabled}'
+
+ concurrent: false
+
+ parameters:
+ - project-parameter:
+ project: '{project}'
+ branch: '{branch}'
+ - string:
+ name: DEPLOYMENT_HOST_IP
+ default: 192.168.122.2
+ description: 'IP of the deployment node'
+ - string:
+ name: CONTROLLER_IP
+ default: 192.168.122.3
+ description: 'IP of the controller node'
+ - 'intel-virtual10-defaults'
+
+ scm:
+ - git-scm-gerrit
+
+ triggers:
+ - gerrit:
+ server-name: 'gerrit.opnfv.org'
+ trigger-on:
+ - patchset-created-event:
+ exclude-drafts: 'false'
+ exclude-trivial-rebase: 'false'
+ exclude-no-code-change: 'false'
+ - draft-published-event
+ - comment-added-contains-event:
+ comment-contains-value: 'recheck'
+ - comment-added-contains-event:
+ comment-contains-value: 'reverify'
+ projects:
+ - project-compare-type: 'ANT'
+ project-pattern: '{project}'
+ branches:
+ - branch-compare-type: 'ANT'
+ branch-pattern: '**/{branch}'
+ disable-strict-forbidden-file-verification: 'true'
+ forbidden-file-paths:
+ - compare-type: ANT
+ pattern: 'docs/**|.gitignore'
+
+ builders:
+ - shell: |
+ #!/bin/bash
+
+ cd $WORKSPACE/ci
+ ./run_tests.sh $DEPLOYMENT_HOST_IP $CONTROLLER_IP
--- /dev/null
+- project:
+ name: storperf-verify
+
+ project: 'storperf'
+
+#--------------------------------
+# branches
+#--------------------------------
+ stream:
+ - master:
+ branch: '{stream}'
+ gs-pathname: ''
+ disabled: false
+ docker-tag: 'latest'
+#--------------------------------
+# patch verification phases
+#--------------------------------
+ phase:
+ - 'unit-test':
+ slave-label: 'opnfv-build-ubuntu'
+ - 'build-x86_64':
+ slave-label: 'opnfv-build-ubuntu'
+ - 'build-aarch64':
+ slave-label: 'opnfv-build-ubuntu-arm'
+#--------------------------------
+# jobs
+#--------------------------------
+ jobs:
+ - 'storperf-verify-{stream}'
+ - 'storperf-verify-{phase}-{stream}'
+#--------------------------------
+# job templates
+#--------------------------------
+- job-template:
+ name: 'storperf-verify-{stream}'
+
+ disabled: '{obj:disabled}'
+
+ project-type: 'multijob'
+
+ parameters:
+ - project-parameter:
+ project: '{project}'
+ branch: '{branch}'
+ - 'opnfv-build-defaults'
+
+ scm:
+ - git-scm-gerrit
+
+ triggers:
+ - gerrit:
+ server-name: 'gerrit.opnfv.org'
+ trigger-on:
+ - patchset-created-event:
+ exclude-drafts: 'false'
+ exclude-trivial-rebase: 'false'
+ exclude-no-code-change: 'false'
+ - draft-published-event
+ - comment-added-contains-event:
+ comment-contains-value: 'recheck'
+ - comment-added-contains-event:
+ comment-contains-value: 'reverify'
+ projects:
+ - project-compare-type: 'ANT'
+ project-pattern: '{project}'
+ branches:
+ - branch-compare-type: 'ANT'
+ branch-pattern: '**/{branch}'
+
+ builders:
+ - shell: |
+ #!/bin/bash
+
+ # we do nothing here as the main stuff will be done
+ # in phase jobs
+ echo "Triggering phase jobs!"
+ - multijob:
+ name: 'storperf-build-and-unittest'
+ execution-type: PARALLEL
+ projects:
+ - name: 'storperf-verify-unit-test-{stream}'
+ current-parameters: false
+ predefined-parameters: |
+ GERRIT_BRANCH=$GERRIT_BRANCH
+ GERRIT_REFSPEC=$GERRIT_REFSPEC
+ GERRIT_CHANGE_NUMBER=$GERRIT_CHANGE_NUMBER
+ GERRIT_CHANGE_COMMIT_MESSAGE=$GERRIT_CHANGE_COMMIT_MESSAGE
+ git-revision: true
+ node-parameters: false
+ kill-phase-on: FAILURE
+ abort-all-job: false
+ - name: 'storperf-verify-build-x86_64-{stream}'
+ current-parameters: false
+ predefined-parameters: |
+ GERRIT_BRANCH=$GERRIT_BRANCH
+ GERRIT_REFSPEC=$GERRIT_REFSPEC
+ GERRIT_CHANGE_NUMBER=$GERRIT_CHANGE_NUMBER
+ GERRIT_CHANGE_COMMIT_MESSAGE=$GERRIT_CHANGE_COMMIT_MESSAGE
+ ARCH=x86_64
+ git-revision: true
+ node-parameters: false
+ kill-phase-on: FAILURE
+ abort-all-job: false
+ - name: 'storperf-verify-build-aarch64-{stream}'
+ current-parameters: false
+ predefined-parameters: |
+ GERRIT_BRANCH=$GERRIT_BRANCH
+ GERRIT_REFSPEC=$GERRIT_REFSPEC
+ GERRIT_CHANGE_NUMBER=$GERRIT_CHANGE_NUMBER
+ GERRIT_CHANGE_COMMIT_MESSAGE=$GERRIT_CHANGE_COMMIT_MESSAGE
+ ARCH=aarch64
+ git-revision: true
+ node-parameters: false
+ kill-phase-on: FAILURE
+ abort-all-job: false
+
+- job-template:
+ name: 'storperf-verify-{phase}-{stream}'
+
+ disabled: '{obj:disabled}'
+
+ wrappers:
+ - ssh-agent-wrapper
+ - build-timeout:
+ timeout: 30
+
+ parameters:
+ - project-parameter:
+ project: '{project}'
+ branch: '{branch}'
+ - '{slave-label}-defaults'
+
+ scm:
+ - git-scm-gerrit
+
+ builders:
+ - 'storperf-verify-{phase}-builders-macro'
+
+ publishers:
+ - 'storperf-verify-{phase}-publishers-macro'
+#--------------------------------
+# builder macros
+#--------------------------------
+- builder:
+ name: 'storperf-verify-unit-test-builders-macro'
+ builders:
+ - shell: |
+ $WORKSPACE/ci/verify.sh
+- builder:
+ name: 'storperf-verify-build-x86_64-builders-macro'
+ builders:
+ - shell: |
+ $WORKSPACE/ci/verify-build.sh
+- builder:
+ name: 'storperf-verify-build-aarch64-builders-macro'
+ builders:
+ - shell: |
+ $WORKSPACE/ci/verify-build.sh
+#--------------------------------
+# publisher macros
+#--------------------------------
+- publisher:
+ name: 'storperf-verify-unit-test-publishers-macro'
+ publishers:
+ - junit:
+ results: nosetests.xml
+ - cobertura:
+ report-file: "coverage.xml"
+ only-stable: "true"
+ health-auto-update: "true"
+ stability-auto-update: "true"
+ zoom-coverage-chart: "true"
+ targets:
+ - files:
+ healthy: 10
+ unhealthy: 20
+ failing: 30
+ - method:
+ healthy: 50
+ unhealthy: 40
+ failing: 30
+ - email-jenkins-admins-on-failure
+- publisher:
+ name: 'storperf-verify-build-x86_64-publishers-macro'
+ publishers:
+ - email-jenkins-admins-on-failure
+- publisher:
+ name: 'storperf-verify-build-aarch64-publishers-macro'
+ publishers:
+ - email-jenkins-admins-on-failure
project: '{name}'
jobs:
- - 'storperf-verify-{stream}'
- 'storperf-merge-{stream}'
stream:
disabled: false
docker-tag: 'stable'
-- job-template:
- name: 'storperf-verify-{stream}'
-
- disabled: '{obj:disabled}'
-
- node: opnfv-build-ubuntu
-
- parameters:
- - project-parameter:
- project: '{project}'
- branch: '{branch}'
- - string:
- name: GIT_BASE
- default: https://gerrit.opnfv.org/gerrit/$PROJECT
- description: "Used for overriding the GIT URL coming from Global Jenkins configuration in case if the stuff is done on none-LF HW."
-
- scm:
- - git-scm-gerrit
-
- triggers:
- - gerrit:
- server-name: 'gerrit.opnfv.org'
- trigger-on:
- - patchset-created-event:
- exclude-drafts: 'false'
- exclude-trivial-rebase: 'false'
- exclude-no-code-change: 'false'
- - draft-published-event
- - comment-added-contains-event:
- comment-contains-value: 'recheck'
- - comment-added-contains-event:
- comment-contains-value: 'reverify'
- projects:
- - project-compare-type: 'ANT'
- project-pattern: '{project}'
- branches:
- - branch-compare-type: 'ANT'
- branch-pattern: '**/{branch}'
-
- builders:
- - shell: |
- $WORKSPACE/ci/verify.sh
-
- publishers:
- - junit:
- results: nosetests.xml
- - cobertura:
- report-file: "coverage.xml"
- only-stable: "true"
- health-auto-update: "true"
- stability-auto-update: "true"
- zoom-coverage-chart: "true"
- targets:
- - files:
- healthy: 10
- unhealthy: 20
- failing: 30
- - method:
- healthy: 50
- unhealthy: 40
- failing: 30
- - email-jenkins-admins-on-failure
-
- job-template:
name: 'storperf-merge-{stream}'
- project:
- project: 'releng-xci'
+ name: 'opnfv-osa-periodic'
- name: 'os-periodic'
-#--------------------------------
-# Branch Anchors
-#--------------------------------
-# the versions stated here default to branches which then later
-# on used for checking out the branches, pulling in head of the branch.
- master: &master
- stream: master
- openstack-osa-version: '{stream}'
- opnfv-releng-version: 'master'
- gs-pathname: ''
- ocata: &ocata
- stream: ocata
- openstack-osa-version: 'stable/{stream}'
- opnfv-releng-version: 'master'
- gs-pathname: '/{stream}'
+ project: 'releng-xci'
#--------------------------------
-# XCI PODs
+# branches
#--------------------------------
- pod:
- - virtual:
- <<: *master
- - virtual:
- <<: *ocata
+ stream:
+ - master:
+ branch: '{stream}'
#--------------------------------
-# Supported Distros
+# distros
#--------------------------------
distro:
- 'xenial':
disabled: false
- slave-label: xci-xenial-virtual
- dib-os-release: 'xenial'
- dib-os-element: 'ubuntu-minimal'
- dib-os-packages: 'vlan,vim,less,bridge-utils,sudo,language-pack-en,iputils-ping,rsyslog,curl,python,debootstrap,ifenslave,ifenslave-2.6,lsof,lvm2,tcpdump,nfs-kernel-server,chrony,iptables'
- extra-dib-elements: 'openssh-server'
- 'centos7':
disabled: true
- slave-label: xci-centos7-virtual
- dib-os-release: '7'
- dib-os-element: 'centos7'
- dib-os-packages: 'vim,less,bridge-utils,iputils,rsyslog,curl'
- extra-dib-elements: 'openssh-server'
- 'suse':
disabled: true
- slave-label: xci-suse-virtual
- dib-os-release: '42.2'
- dib-os-element: 'opensuse-minimal'
- dib-os-packages: 'vim,less,bridge-utils,iputils,rsyslog,curl'
- extra-dib-elements: 'openssh-server'
-
+#--------------------------------
+# type
+#--------------------------------
+ type:
+ - virtual
+#--------------------------------
+# phases
+#--------------------------------
+ phase:
+ - 'deploy'
+ - 'healthcheck'
#--------------------------------
# jobs
#--------------------------------
jobs:
- - 'osa-deploy-{pod}-{distro}-periodic-{stream}'
-
+ - 'osa-periodic-{distro}-{type}-{stream}'
+ - 'osa-periodic-{phase}-{type}-{stream}'
#--------------------------------
# job templates
#--------------------------------
- job-template:
- name: 'osa-deploy-{pod}-{distro}-periodic-{stream}'
+ name: 'osa-periodic-{distro}-{type}-{stream}'
+
+ project-type: multijob
disabled: '{obj:disabled}'
concurrent: false
properties:
+ - logrotate-default
- build-blocker:
use-build-blocker: true
blocking-jobs:
- - '^xci-os.*'
- - '^xci-deploy.*'
- - '^xci-functest.*'
- - '^bifrost-.*periodic.*'
- - '^osa-.*periodic.*'
+ - 'xci-verify-.*'
+ - 'bifrost-verify-.*'
+ - 'bifrost-periodic-.*'
+ - 'osa-verify-.*'
+ - 'osa-periodic-.*'
block-level: 'NODE'
+
+ wrappers:
+ - ssh-agent-wrapper
+ - build-timeout:
+ timeout: 240
+ - fix-workspace-permissions
+
+ scm:
+ - git-scm-osa
+
+ triggers:
+ - pollscm:
+ cron: "@midnight"
+ ignore-post-commit-hooks: True
+
+ parameters:
+ - project-parameter:
+ project: '{project}'
+ branch: '{branch}'
+ - label:
+ name: SLAVE_LABEL
+ default: 'xci-virtual-{distro}'
+
+ builders:
+ - description-setter:
+ description: "Built on $NODE_NAME"
+ - multijob:
+ name: deploy
+ condition: SUCCESSFUL
+ projects:
+ - name: 'osa-periodic-deploy-{type}-{stream}'
+ current-parameters: true
+ predefined-parameters: |
+ DISTRO={distro}
+ DEPLOY_SCENARIO=os-nosdn-nofeature-noha
+ git-revision: true
+ node-parameters: true
+ kill-phase-on: FAILURE
+ abort-all-job: true
+ - multijob:
+ name: healthcheck
+ condition: SUCCESSFUL
+ projects:
+ - name: 'osa-periodic-healthcheck-{type}-{stream}'
+ current-parameters: true
+ predefined-parameters: |
+ DISTRO={distro}
+ DEPLOY_SCENARIO=os-nosdn-nofeature-noha
+ FUNCTEST_SUITE_NAME=healthcheck
+ node-parameters: true
+ kill-phase-on: NEVER
+ abort-all-job: false
+
+- job-template:
+ name: 'osa-periodic-{phase}-{type}-{stream}'
+
+ disabled: false
+
+ concurrent: true
+
+ properties:
- logrotate-default
+ - build-blocker:
+ use-build-blocker: true
+ blocking-jobs:
+ - 'xci-verify-deploy-.*'
+ - 'xci-verify-healthcheck-.*'
+ - 'bifrost-verify-.*'
+ - 'bifrost-periodic-.*'
+ - 'osa-verify-.*'
+ - 'osa-periodic-.*'
+ block-level: 'NODE'
parameters:
- project-parameter:
project: '{project}'
- branch: '{opnfv-releng-version}'
- - string:
- name: GIT_BASE
- default: https://gerrit.opnfv.org/gerrit/$PROJECT
- - string:
- name: XCI_FLAVOR
- default: 'ha'
+ branch: '{branch}'
+ - label:
+ name: SLAVE_LABEL
+ default: 'xci-virtual-{distro}'
- string:
name: OPENSTACK_OSA_VERSION
- default: '{openstack-osa-version}'
- - string:
- name: OPNFV_RELENG_VERSION
- default: '{opnfv-releng-version}'
+ default: 'master'
- string:
name: DISTRO
- default: '{distro}'
+ default: 'xenial'
- string:
- name: DIB_OS_RELEASE
- default: '{dib-os-release}'
+ name: DEPLOY_SCENARIO
+ default: 'os-nosdn-nofeature-noha'
- string:
- name: DIB_OS_ELEMENT
- default: '{dib-os-element}'
+ name: XCI_FLAVOR
+ default: 'mini'
- string:
- name: DIB_OS_PACKAGES
- default: '{dib-os-packages}'
+ name: XCI_LOOP
+ default: 'periodic'
- string:
- name: EXTRA_DIB_ELEMENTS
- default: '{extra-dib-elements}'
+ name: OPNFV_RELENG_DEV_PATH
+ default: $WORKSPACE/releng-xci
- string:
- name: CLEAN_DIB_IMAGES
- default: 'true'
- - label:
- name: SLAVE_LABEL
- default: '{slave-label}'
+ name: FUNCTEST_SUITE_NAME
+ default: 'healthcheck'
- string:
name: ANSIBLE_VERBOSITY
- default: ''
+ default: '-vvvv'
- string:
- name: XCI_LOOP
- default: 'periodic'
-
- wrappers:
- - fix-workspace-permissions
+ name: FORCE_MASTER
+ default: 'true'
+ - string:
+ name: GIT_BASE
+ default: https://gerrit.opnfv.org/gerrit/$PROJECT
scm:
- - git-scm
+ - git-scm-osa
- # trigger is disabled until we know which jobs we will have
- # and adjust stuff accordingly
- triggers:
- - timed: '' # '@midnight'
+ wrappers:
+ - ssh-agent-wrapper
+ - build-timeout:
+ timeout: 240
+ - fix-workspace-permissions
builders:
- description-setter:
- description: "Built on $NODE_NAME - Scenario: $DEPLOY_SCENARIO"
- - 'osa-deploy-builder'
+ description: "Built on $NODE_NAME"
+ - 'osa-periodic-{phase}-macro'
-#---------------------------
+#--------------------------------
# builder macros
-#---------------------------
+#--------------------------------
- builder:
- name: osa-deploy-builder
+ name: 'osa-periodic-deploy-macro'
builders:
- - shell:
- !include-raw: ./xci-deploy.sh
+ - shell: |
+ #!/bin/bash
+
+ # here we will
+ # - clone releng-xci repo as the jobs are running against openstack gerrit
+ # and we need to clone releng-xci ourselves to $OPNFV_RELENG_DEV_PATH
+ # - run sources-branch-updater.sh from osa to update/pin the role versions
+ # at the time this job gets triggered against osa master in case if the
+ # deployment succeeds and we decide to bump version used by xci
+ # - copy generated role versions into $OPNFV_RELENG_DEV_PATH/xci/file
+ # - start the deployment by executing xci-deploy.sh as usual
+ #
+ # we might also need to pin versions of openstack services as well.
+
+ echo "Hello World!"
+
+- builder:
+ name: 'osa-periodic-healthcheck-macro'
+ builders:
+ - shell: |
+ #!/bin/bash
+
+ echo "Hello World!"
+#--------------------------------
+# scm macro
+#--------------------------------
+- scm:
+ name: git-scm-osa
+ scm:
+ - git:
+ url: https://review.openstack.org/p/openstack/openstack-ansible.git
+ branches:
+ - master
+ timeout: 15
installer: fuel
auto-trigger-name: 'daily-trigger-disabled'
<<: *danube
- - arm-pod3:
+ - arm-pod5:
slave-label: '{pod}'
installer: fuel
auto-trigger-name: 'daily-trigger-disabled'
<<: *master
- - arm-pod3:
+ - arm-pod5:
slave-label: '{pod}'
installer: fuel
auto-trigger-name: 'daily-trigger-disabled'
<<: *danube
- - arm-virtual1:
+ - arm-virtual2:
slave-label: '{pod}'
installer: fuel
auto-trigger-name: 'daily-trigger-disabled'
<<: *master
- - arm-virtual1:
+ - arm-virtual2:
slave-label: '{pod}'
installer: fuel
auto-trigger-name: 'daily-trigger-disabled'
description: "POD: $NODE_NAME"
- 'yardstick-cleanup'
- 'yardstick-fetch-os-creds'
+ - 'yardstick-fetch-k8s-conf'
- 'yardstick-{testsuite}'
- 'yardstick-store-results'
- shell:
!include-raw: ../../utils/fetch_os_creds.sh
+- builder:
+ name: yardstick-fetch-k8s-conf
+ builders:
+ - shell:
+ !include-raw: ./yardstick-get-k8s-conf.sh
+
- builder:
name: yardstick-store-results
builders:
default: '-i 104.197.68.199:8086'
description: 'Arguments to use in order to choose the backend DB'
- parameter:
- name: 'yardstick-params-arm-virtual1'
+ name: 'yardstick-params-arm-virtual2'
parameters:
- string:
name: YARDSTICK_DB_BACKEND
description: 'Arguments to use in order to choose the backend DB'
- parameter:
- name: 'yardstick-params-arm-pod3'
+ name: 'yardstick-params-arm-pod5'
parameters:
- string:
name: YARDSTICK_DB_BACKEND
- trigger:
name: 'yardstick-daily-huawei-pod4-trigger'
triggers:
- - timed: '0 1 * * *'
+ - timed: ''
fi
if [[ ${INSTALLER_TYPE} == 'joid' ]]; then
- # If production lab then creds may be retrieved dynamically
- # creds are on the jumphost, always in the same folder
- rc_file_vol="-v $LAB_CONFIG/admin-openrc:/etc/yardstick/openstack.creds"
- # If dev lab, credentials may not be the default ones, just provide a path to put them into docker
- # replace the default one by the customized one provided by jenkins config
+ if [[ "${DEPLOY_SCENARIO:0:2}" == "k8" ]];then
+ rc_file_vol="-v /home/ubuntu/config:/etc/yardstick/admin.conf"
+ else
+ # If production lab then creds may be retrieved dynamically
+ # creds are on the jumphost, always in the same folder
+ rc_file_vol="-v $LAB_CONFIG/admin-openrc:/etc/yardstick/openstack.creds"
+ # If dev lab, credentials may not be the default ones, just provide a path to put them into docker
+ # replace the default one by the customized one provided by jenkins config
+ fi
elif [[ ${INSTALLER_TYPE} == 'compass' && ${BRANCH} == 'master' ]]; then
cacert_file_vol="-v ${HOME}/os_cacert:/etc/yardstick/os_cacert"
echo "export OS_CACERT=/etc/yardstick/os_cacert" >> ${HOME}/opnfv-openrc.sh
map_log_dir="-v ${dir_result}:/tmp/yardstick"
# Run docker
-cmd="sudo docker run ${opts} ${envs} ${rc_file_vol} ${cacert_file_vol} ${map_log_dir} ${sshkey} opnfv/yardstick:${DOCKER_TAG} \
+if [[ ${INSTALLER_TYPE} == "joid" && "${DEPLOY_SCENARIO:0:2}" == "k8" ]];then
+ juju ssh kubernetes-master/0 sudo apt-get install docker.io
+ cmd="juju ssh kubernetes-master/0 sudo docker run ${opts} ${envs} ${rc_file_vol} ${cacert_file_vol} ${map_log_dir} ${sshkey} opnfv/yardstick:${DOCKER_TAG} exec_tests.sh ${YARDSTICK_DB_BACKEND} ${YARDSTICK_SCENARIO_SUITE_NAME}"
+else
+ cmd="sudo docker run ${opts} ${envs} ${rc_file_vol} ${cacert_file_vol} ${map_log_dir} ${sshkey} opnfv/yardstick:${DOCKER_TAG} \
exec_tests.sh ${YARDSTICK_DB_BACKEND} ${YARDSTICK_SCENARIO_SUITE_NAME}"
+fi
+
echo "Yardstick: Running docker cmd: ${cmd}"
${cmd}
--- /dev/null
+#!/bin/bash
+set -e
+
+dest_path="$HOME/admin.conf"
+
+if [[ "${DEPLOY_SCENARIO:0:2}" == "k8" ]];then
+ juju scp kubernetes-master/0:config "${dest_path}"
+fi
info "Fetching rc file from controller $controller_ip..."
ssh ${ssh_options} ubuntu@${controller_ip} "sudo cat /root/keystonercv3" > $dest_path
+
+ if [[ $BUILD_TAG =~ "baremetal" ]]; then
+ ssh ${ssh_options} ubuntu@${installer_ip} "cat /etc/ssl/certs/os_cacert" > $os_cacert
+ fi
else
#ip_fuel="10.20.0.2"
env=$(sshpass -p r00tme ssh 2>/dev/null ${ssh_options} root@${installer_ip} \
'ericsson-pod1' 'ericsson-pod2' \
'ericsson-virtual1' 'ericsson-virtual2' 'ericsson-virtual3' \
'ericsson-virtual4' 'ericsson-virtual5' 'ericsson-virtual12' \
-'arm-pod1' 'arm-pod3' \
+'arm-pod1' 'arm-pod5' \
'huawei-pod1' 'huawei-pod2' 'huawei-pod3' 'huawei-pod4' 'huawei-pod5' \
'huawei-pod6' 'huawei-pod7' 'huawei-pod12' \
'huawei-virtual1' 'huawei-virtual2' 'huawei-virtual3' 'huawei-virtual4' \
WORKDIR ${HOME}
# Packaged dependencies
RUN apt-get update && apt-get install -y \
+build-essential \
ssh \
+curl \
+gnupg \
python-pip \
+python-dev \
+python-setuptools \
git-core \
-nodejs \
-npm \
supervisor \
--no-install-recommends
-RUN pip install --upgrade pip
+RUN pip install --upgrade pip && easy_install -U setuptools==30.0.0
-RUN git clone --depth 1 https://gerrit.opnfv.org/gerrit/releng ${HOME}/releng
+RUN git clone --depth 1 https://gerrit.opnfv.org/gerrit/releng /home/opnfv/releng
RUN pip install -r ${working_dir}/requirements.txt
+RUN sh -c 'curl -sL https://deb.nodesource.com/setup_8.x | bash -' \
+ && apt-get install -y nodejs \
+ && npm install -g bower \
+ && npm install -g grunt \
+ && npm install -g grunt-cli
+
WORKDIR ${working_dir}
RUN python setup.py install
RUN docker/reporting.sh
# supervisor config
cp /home/opnfv/releng/utils/test/reporting/docker/supervisor.conf /etc/supervisor/conf.d/
-ln -s /usr/bin/nodejs /usr/bin/node
-
# Manage Angular front end
cd pages && /bin/bash angular.sh
<div class="panel-heading"><h4><b>List of last scenarios ({{version}}) run over the last {{period}} days </b></h4></div>
<table class="table">
<tr>
- <th width="40%">Scenario</th>
+ <th width="40%">HA Scenario</th>
<th width="20%">Status</th>
<th width="20%">Trend</th>
<th width="10%">Score</th>
</tr>
{% for scenario,iteration in scenario_stats.iteritems() -%}
<tr class="tr-ok">
+ {% if '-ha' in scenario -%}
<td><a href={{scenario_results[scenario].getUrlLastRun()}}>{{scenario}}</a></td>
<td><div id="gaugeScenario{{loop.index}}"></div></td>
<td><div id="trend_svg{{loop.index}}"></div></td>
<td>{{scenario_results[scenario].getScore()}}</td>
<td>{{iteration}}</td>
+ {%- endif %}
+ </tr>
+ {%- endfor %}
+ <br>
+ </table>
+ <br>
+ <table class="table">
+ <tr>
+ <th width="40%">NOHA Scenario</th>
+ <th width="20%">Status</th>
+ <th width="20%">Trend</th>
+ <th width="10%">Score</th>
+ <th width="10%">Iteration</th>
+ </tr>
+ {% for scenario,iteration in scenario_stats.iteritems() -%}
+ <tr class="tr-ok">
+ {% if '-noha' in scenario -%}
+ <td><a href={{scenario_results[scenario].getUrlLastRun()}}>{{scenario}}</a></td>
+ <td><div id="gaugeScenario{{loop.index}}"></div></td>
+ <td><div id="trend_svg{{loop.index}}"></div></td>
+ <td>{{scenario_results[scenario].getScore()}}</td>
+ <td>{{iteration}}</td>
+ {%- endif %}
</tr>
{%- endfor %}
- </table>
+ </table>
+
</div>
if not os.path.exists(profile_dir):
os.makedirs(profile_dir)
- profile_file = "{}/{}/scenario_history.txt".format(profile_dir,
- version)
+ profile_file = "{}/scenario_history.txt".format(profile_dir)
if not os.path.exists(profile_file):
with open(profile_file, 'w') as f:
info = 'date,scenario,installer,details,score\n'
<nav>
<ul class="nav nav-justified">
<li class="active"><a href="http://testresults.opnfv.org/reporting/index.html">Home</a></li>
- <li><a href="index-status-apex.html">Apex</a></li>
- <li><a href="index-status-compass.html">Compass</a></li>
- <li><a href="index-status-fuel.html">Fuel</a></li>
- <li><a href="index-status-joid.html">Joid</a></li>
+ <li><a href="status-apex.html">Apex</a></li>
+ <li><a href="status-compass.html">Compass</a></li>
+ <li><a href="status-daisy.html">Daisy</a></li>
+ <li><a href="status-fuel.html">Fuel</a></li>
+ <li><a href="status-joid.html">Joid</a></li>
</ul>
</nav>
</div>
# http://www.apache.org/licenses/LICENSE-2.0
# pylint: disable=missing-docstring
-
import setuptools
# In python < 2.7.4, a lazy loading of package `pbr` will break
AUTHORS
ChangeLog
setup.cfg-e
+opnfv_testapi/static
+build
+*.egg-info
WORKDIR /home/releng/utils/test/testapi/
RUN pip install -r requirements.txt
-RUN bash install.sh
+RUN python setup.py install
CMD ["bash", "docker/start-server.sh"]
+++ /dev/null
-#!/bin/bash
-
-usage="
-Script to install opnfv_tesgtapi automatically.
-This script should be run under root.
-
-usage:
- bash $(basename "$0") [-h|--help] [-t <test_name>]
-
-where:
- -h|--help show this help text"
-
-# Ref :- https://openstack.nimeyo.com/87286/openstack-packaging-all-definition-data-files-config-setup
-if [ -z "$VIRTUAL_ENV" ];
-then
- if [[ $(whoami) != "root" ]];
- then
- echo "Error: This script must be run as root!"
- exit 1
- fi
-else
- sed -i -e 's#/etc/opnfv_testapi =#etc/opnfv_testapi =#g' setup.cfg
-fi
-
-cp -fr 3rd_party/static opnfv_testapi/static
-python setup.py install
-rm -fr opnfv_testapi/static
-if [ ! -z "$VIRTUAL_ENV" ]; then
- sed -i -e 's#etc/opnfv_testapi =#/etc/opnfv_testapi =#g' setup.cfg
-fi
\ No newline at end of file
'{} has no attribute {}'.format(cls.__name__, k))
value = v
if isinstance(v, dict) and k in attr_parser:
- value = attr_parser[k].from_dict(v)
+ value = attr_parser[k].from_dict_with_raise(v)
elif isinstance(v, list) and k in attr_parser:
value = []
for item in v:
- value.append(attr_parser[k].from_dict(item))
+ value.append(attr_parser[k].from_dict_with_raise(item))
t.__setattr__(k, value)
self._get_one(query={'name': name})
pass
+ @swagger.operation(nickname="updateScenarioName")
def put(self, name):
- pass
+ """
+ @description: update scenario, only rename is supported currently
+ @param body: fields to be updated
+ @type body: L{ScenarioUpdateRequest}
+ @in body: body
+ @rtype: L{Scenario}
+ @return 200: update success
+ @raise 404: scenario not exist
+ @raise 403: nothing to update
+ """
+ query = {'name': name}
+ db_keys = ['name']
+ self._update(query=query, db_keys=db_keys)
@swagger.operation(nickname="deleteScenarioByName")
def delete(self, name):
('projects', 'put'): self._update_requests_update_projects,
('projects', 'delete'): self._update_requests_delete_projects,
('owner', 'put'): self._update_requests_change_owner,
+ ('versions', 'post'): self._update_requests_add_versions,
+ ('versions', 'put'): self._update_requests_update_versions,
+ ('versions', 'delete'): self._update_requests_delete_versions,
+ ('installers', 'post'): self._update_requests_add_installers,
+ ('installers', 'put'): self._update_requests_update_installers,
+ ('installers', 'delete'): self._update_requests_delete_installers,
}
updates[(item, action)](self.data)
@iter_installers
@iter_versions
def _update_requests_add_projects(self, version):
- exists = list()
- malformat = list()
- for n in self.body:
- try:
- f_n = models.ScenarioProject.from_dict_with_raise(n)
- if not any(o.project == f_n.project for o in version.projects):
- version.projects.append(f_n)
- else:
- exists.append(n['project'])
- except Exception as e:
- malformat.append(e.message)
- if malformat:
- raises.BadRequest(message.bad_format(malformat))
- elif exists:
- raises.Conflict(message.exist('projects', exists))
+ version.projects = self._update_with_body(models.ScenarioProject,
+ 'project',
+ version.projects)
@iter_installers
@iter_versions
def _update_requests_update_projects(self, version):
- exists = list()
- malformat = list()
- projects = list()
- for n in self.body:
- try:
- f_n = models.ScenarioProject.from_dict_with_raise(n)
- if not any(o.project == f_n.project for o in projects):
- projects.append(models.ScenarioProject.from_dict(n))
- else:
- exists.append(n['project'])
- except:
- malformat.append(n)
- if malformat:
- raises.BadRequest(message.bad_format(malformat))
- elif exists:
- raises.Forbidden(message.exist('projects', exists))
- version.projects = projects
+ version.projects = self._update_with_body(models.ScenarioProject,
+ 'project',
+ list())
@iter_installers
@iter_versions
@iter_installers
@iter_versions
def _update_requests_change_owner(self, version):
- version.owner = self.body
+ version.owner = self.body.get('owner')
+
+ @iter_installers
+ def _update_requests_add_versions(self, installer):
+ installer.versions = self._update_with_body(models.ScenarioVersion,
+ 'version',
+ installer.versions)
+
+ @iter_installers
+ def _update_requests_update_versions(self, installer):
+ installer.versions = self._update_with_body(models.ScenarioVersion,
+ 'version',
+ list())
+
+ @iter_installers
+ def _update_requests_delete_versions(self, installer):
+ installer.versions = self._remove_versions(installer.versions)
+
+ def _update_requests_add_installers(self, scenario):
+ scenario.installers = self._update_with_body(models.ScenarioInstaller,
+ 'installer',
+ scenario.installers)
+
+ def _update_requests_update_installers(self, scenario):
+ scenario.installers = self._update_with_body(models.ScenarioInstaller,
+ 'installer',
+ list())
+
+ def _update_requests_delete_installers(self, scenario):
+ scenario.installers = self._remove_installers(scenario.installers)
+
+ def _update_with_body(self, clazz, field, withs):
+ exists = list()
+ malformat = list()
+ for new in self.body:
+ try:
+ format_new = clazz.from_dict_with_raise(new)
+ new_name = getattr(format_new, field)
+ if not any(getattr(o, field) == new_name for o in withs):
+ withs.append(format_new)
+ else:
+ exists.append(new_name)
+ except Exception as error:
+ malformat.append(error.message)
+ if malformat:
+ raises.BadRequest(message.bad_format(malformat))
+ elif exists:
+ raises.Conflict(message.exist('{}s'.format(field), exists))
+ return withs
def _filter_installers(self, installers):
return self._filter('installer', installers)
+ def _remove_installers(self, installers):
+ return self._remove('installer', installers)
+
def _filter_versions(self, versions):
return self._filter('version', versions)
+ def _remove_versions(self, versions):
+ return self._remove('version', versions)
+
def _filter_projects(self, projects):
return self._filter('project', projects)
installer=<installer_name>& \
version=<version_name>
@param body: new owner
- @type body: L{string}
+ @type body: L{ScenarioChangeOwnerRequest}
@in body: body
@param installer: installer type
@type installer: L{string}
locators={'scenario': scenario,
'installer': None,
'version': None})
+
+
+class ScenarioVersionsHandler(GenericScenarioUpdateHandler):
+ @swagger.operation(nickname="addVersionsUnderScenario")
+ def post(self, scenario):
+ """
+ @description: add versions to scenario
+ @notes: add one or multiple versions
+ POST /api/v1/scenarios/<scenario_name>/versions? \
+ installer=<installer_name>
+ @param body: versions to be added
+ @type body: C{list} of L{ScenarioVersion}
+ @in body: body
+ @param installer: installer type
+ @type installer: L{string}
+ @in installer: query
+ @required installer: True
+ @return 200: versions are added.
+ @raise 400: bad schema
+ @raise 409: conflict, version already exists
+ @raise 404: scenario/installer not exist
+ """
+ self.do_update('versions',
+ 'post',
+ locators={'scenario': scenario,
+ 'installer': None})
+
+ @swagger.operation(nickname="updateVersionsUnderScenario")
+ def put(self, scenario):
+ """
+ @description: replace all versions
+ @notes: substitute all versions as a totality
+ PUT /api/v1/scenarios/<scenario_name>/versions? \
+ installer=<installer_name>
+ @param body: new versions
+ @type body: C{list} of L{ScenarioVersion}
+ @in body: body
+ @param installer: installer type
+ @type installer: L{string}
+ @in installer: query
+ @required installer: True
+ @return 200: replace versions success.
+ @raise 400: bad schema
+ @raise 404: scenario/installer not exist
+ """
+ self.do_update('versions',
+ 'put',
+ locators={'scenario': scenario,
+ 'installer': None})
+
+ @swagger.operation(nickname="deleteVersionsUnderScenario")
+ def delete(self, scenario):
+ """
+ @description: delete one or multiple versions
+ @notes: delete one or multiple versions
+ DELETE /api/v1/scenarios/<scenario_name>/versions? \
+ installer=<installer_name>
+ @param body: versions(names) to be deleted
+ @type body: C{list} of L{string}
+ @in body: body
+ @param installer: installer type
+ @type installer: L{string}
+ @in installer: query
+ @required installer: True
+ @return 200: delete versions success.
+ @raise 404: scenario/installer not exist
+ """
+ self.do_update('versions',
+ 'delete',
+ locators={'scenario': scenario,
+ 'installer': None})
+
+
+class ScenarioInstallersHandler(GenericScenarioUpdateHandler):
+ @swagger.operation(nickname="addInstallersUnderScenario")
+ def post(self, scenario):
+ """
+ @description: add installers to scenario
+ @notes: add one or multiple installers
+ POST /api/v1/scenarios/<scenario_name>/installers
+ @param body: installers to be added
+ @type body: C{list} of L{ScenarioInstaller}
+ @in body: body
+ @return 200: installers are added.
+ @raise 400: bad schema
+ @raise 409: conflict, installer already exists
+ @raise 404: scenario not exist
+ """
+ self.do_update('installers',
+ 'post',
+ locators={'scenario': scenario})
+
+ @swagger.operation(nickname="updateInstallersUnderScenario")
+ def put(self, scenario):
+ """
+ @description: replace all installers
+ @notes: substitute all installers as a totality
+ PUT /api/v1/scenarios/<scenario_name>/installers
+ @param body: new installers
+ @type body: C{list} of L{ScenarioInstaller}
+ @in body: body
+ @return 200: replace versions success.
+ @raise 400: bad schema
+ @raise 404: scenario/installer not exist
+ """
+ self.do_update('installers',
+ 'put',
+ locators={'scenario': scenario})
+
+ @swagger.operation(nickname="deleteInstallersUnderScenario")
+ def delete(self, scenario):
+ """
+ @description: delete one or multiple installers
+ @notes: delete one or multiple installers
+ DELETE /api/v1/scenarios/<scenario_name>/installers
+ @param body: installers(names) to be deleted
+ @type body: C{list} of L{string}
+ @in body: body
+ @return 200: delete versions success.
+ @raise 404: scenario/installer not exist
+ """
+ self.do_update('installers',
+ 'delete',
+ locators={'scenario': scenario})
return {'installers': ScenarioInstaller}
+@swagger.model()
+class ScenarioChangeOwnerRequest(models.ModelBase):
+ def __init__(self, owner=None):
+ self.owner = owner
+
+
+@swagger.model()
+class ScenarioUpdateRequest(models.ModelBase):
+ def __init__(self, name=None):
+ self.name = name
+
+
@swagger.model()
class Scenario(models.ModelBase):
"""
scenario_handlers.ScenarioProjectsHandler),
(r"/api/v1/scenarios/([^/]+)/owner",
scenario_handlers.ScenarioOwnerHandler),
+ (r"/api/v1/scenarios/([^/]+)/versions",
+ scenario_handlers.ScenarioVersionsHandler),
+ (r"/api/v1/scenarios/([^/]+)/installers",
+ scenario_handlers.ScenarioInstallersHandler),
# static path
(r'/(.*\.(css|png|gif|js|html|json|map|woff2|woff|ttf))',
import httplib
import json
import os
-from copy import deepcopy
+
from datetime import datetime
from opnfv_testapi.common import message
self.assertEqual(scenario, models.Scenario.from_dict(req))
@staticmethod
- def _set_query(*args):
+ def set_query(*args):
uri = ''
for arg in args:
uri += arg + '&'
return uri[0: -1]
- def _get_and_assert(self, name, req=None):
+ def get_and_assert(self, name):
code, body = self.get(name)
- self.assert_res(code, body, req)
+ self.assert_res(code, body, self.req_d)
class TestScenarioCreate(TestScenarioBase):
self.scenario_2 = self.create_return_name(self.req_2)
def test_getByName(self):
- self._get_and_assert(self.scenario_1, self.req_d)
+ self.get_and_assert(self.scenario_1)
def test_getAll(self):
self._query_and_assert(query=None, reqs=[self.req_d, self.req_2])
def test_queryName(self):
- query = self._set_query('name=nosdn-nofeature-ha')
+ query = self.set_query('name=nosdn-nofeature-ha')
self._query_and_assert(query, reqs=[self.req_d])
def test_queryInstaller(self):
- query = self._set_query('installer=apex')
+ query = self.set_query('installer=apex')
self._query_and_assert(query, reqs=[self.req_d])
def test_queryVersion(self):
- query = self._set_query('version=master')
+ query = self.set_query('version=master')
self._query_and_assert(query, reqs=[self.req_d])
def test_queryProject(self):
- query = self._set_query('project=functest')
+ query = self.set_query('project=functest')
self._query_and_assert(query, reqs=[self.req_d, self.req_2])
# close due to random fail, open again after solve it in another patch
def update_url_fixture(item):
def _update_url_fixture(xstep):
def wrapper(self, *args, **kwargs):
+ self.update_url = '{}/{}'.format(self.scenario_url, item)
locator = None
if item in ['projects', 'owner']:
locator = 'installer={}&version={}'.format(
self.installer,
self.version)
- self.update_url = '{}/{}?{}'.format(self.scenario_url,
- item,
- locator)
+ elif item in ['versions']:
+ locator = 'installer={}'.format(
+ self.installer)
+ elif item in ['rename']:
+ self.update_url = self.scenario_url
+
+ if locator:
+ self.update_url = '{}?{}'.format(self.update_url, locator)
+
xstep(self, *args, **kwargs)
return wrapper
return _update_url_fixture
def _update_partial(set_update):
@functools.wraps(set_update)
def wrapper(self):
- update, scenario = set_update(self, deepcopy(self.req_d))
- code, body = getattr(self, operate)(update, self.scenario)
- getattr(self, expected)(code, scenario)
+ update = set_update(self)
+ code, body = getattr(self, operate)(update)
+ getattr(self, expected)(code)
return wrapper
return _update_partial
@update_partial('_add', '_success')
- def test_addScore(self, scenario):
+ def test_addScore(self):
add = models.ScenarioScore(date=str(datetime.now()), score='11/12')
- projects = scenario['installers'][0]['versions'][0]['projects']
+ projects = self.req_d['installers'][0]['versions'][0]['projects']
functest = filter(lambda f: f['project'] == 'functest', projects)[0]
functest['scores'].append(add.format())
self.update_url = '{}/scores?{}'.format(self.scenario_url,
self.locate_project)
- return add, scenario
+ return add
@update_partial('_add', '_success')
- def test_addTrustIndicator(self, scenario):
+ def test_addTrustIndicator(self):
add = models.ScenarioTI(date=str(datetime.now()), status='gold')
- projects = scenario['installers'][0]['versions'][0]['projects']
+ projects = self.req_d['installers'][0]['versions'][0]['projects']
functest = filter(lambda f: f['project'] == 'functest', projects)[0]
functest['trust_indicators'].append(add.format())
self.update_url = '{}/trust_indicators?{}'.format(self.scenario_url,
self.locate_project)
- return add, scenario
+ return add
@update_partial('_add', '_success')
- def test_addCustoms(self, scenario):
- add = ['odl', 'parser', 'vping_ssh']
- projects = scenario['installers'][0]['versions'][0]['projects']
+ def test_addCustoms(self):
+ adds = ['odl', 'parser', 'vping_ssh']
+ projects = self.req_d['installers'][0]['versions'][0]['projects']
functest = filter(lambda f: f['project'] == 'functest', projects)[0]
- functest['customs'] = list(set(functest['customs'] + add))
+ functest['customs'] = list(set(functest['customs'] + adds))
self.update_url = '{}/customs?{}'.format(self.scenario_url,
self.locate_project)
- return add, scenario
+ return adds
@update_partial('_update', '_success')
- def test_updateCustoms(self, scenario):
- news = ['odl', 'parser', 'vping_ssh']
- projects = scenario['installers'][0]['versions'][0]['projects']
+ def test_updateCustoms(self):
+ updates = ['odl', 'parser', 'vping_ssh']
+ projects = self.req_d['installers'][0]['versions'][0]['projects']
functest = filter(lambda f: f['project'] == 'functest', projects)[0]
- functest['customs'] = news
+ functest['customs'] = updates
self.update_url = '{}/customs?{}'.format(self.scenario_url,
self.locate_project)
- return news, scenario
+ return updates
@update_partial('_delete', '_success')
- def test_deleteCustoms(self, scenario):
- obsoletes = ['vping_ssh']
- projects = scenario['installers'][0]['versions'][0]['projects']
+ def test_deleteCustoms(self):
+ deletes = ['vping_ssh']
+ projects = self.req_d['installers'][0]['versions'][0]['projects']
functest = filter(lambda f: f['project'] == 'functest', projects)[0]
functest['customs'] = ['healthcheck']
self.update_url = '{}/customs?{}'.format(self.scenario_url,
self.locate_project)
- return obsoletes, scenario
+ return deletes
@update_url_fixture('projects')
@update_partial('_add', '_success')
- def test_addProjects_succ(self, scenario):
+ def test_addProjects_succ(self):
add = models.ScenarioProject(project='qtip').format()
- scenario['installers'][0]['versions'][0]['projects'].append(add)
- return [add], scenario
+ self.req_d['installers'][0]['versions'][0]['projects'].append(add)
+ return [add]
@update_url_fixture('projects')
@update_partial('_add', '_conflict')
- def test_addProjects_already_exist(self, scenario):
+ def test_addProjects_already_exist(self):
add = models.ScenarioProject(project='functest').format()
- scenario['installers'][0]['versions'][0]['projects'].append(add)
- return [add], scenario
+ return [add]
@update_url_fixture('projects')
@update_partial('_add', '_bad_request')
- def test_addProjects_bad_schema(self, scenario):
+ def test_addProjects_bad_schema(self):
add = models.ScenarioProject(project='functest').format()
add['score'] = None
- scenario['installers'][0]['versions'][0]['projects'].append(add)
- return [add], scenario
+ return [add]
@update_url_fixture('projects')
@update_partial('_update', '_success')
- def test_updateProjects_succ(self, scenario):
+ def test_updateProjects_succ(self):
update = models.ScenarioProject(project='qtip').format()
- scenario['installers'][0]['versions'][0]['projects'] = [update]
- return [update], scenario
+ self.req_d['installers'][0]['versions'][0]['projects'] = [update]
+ return [update]
+
+ @update_url_fixture('projects')
+ @update_partial('_update', '_conflict')
+ def test_updateProjects_duplicated(self):
+ update = models.ScenarioProject(project='qtip').format()
+ return [update, update]
@update_url_fixture('projects')
@update_partial('_update', '_bad_request')
- def test_updateProjects_bad_schema(self, scenario):
+ def test_updateProjects_bad_schema(self):
update = models.ScenarioProject(project='functest').format()
update['score'] = None
- scenario['installers'][0]['versions'][0]['projects'] = [update]
- return [update], scenario
+ return [update]
@update_url_fixture('projects')
@update_partial('_delete', '_success')
- def test_deleteProjects(self, scenario):
+ def test_deleteProjects(self):
deletes = ['functest']
- projects = scenario['installers'][0]['versions'][0]['projects']
- scenario['installers'][0]['versions'][0]['projects'] = filter(
+ projects = self.req_d['installers'][0]['versions'][0]['projects']
+ self.req_d['installers'][0]['versions'][0]['projects'] = filter(
lambda f: f['project'] != 'functest',
projects)
- return deletes, scenario
+ return deletes
@update_url_fixture('owner')
@update_partial('_update', '_success')
- def test_changeOwner(self, scenario):
+ def test_changeOwner(self):
new_owner = 'new_owner'
- scenario['installers'][0]['versions'][0]['owner'] = new_owner
- return new_owner, scenario
+ update = models.ScenarioChangeOwnerRequest(new_owner).format()
+ self.req_d['installers'][0]['versions'][0]['owner'] = new_owner
+ return update
+
+ @update_url_fixture('versions')
+ @update_partial('_add', '_success')
+ def test_addVersions_succ(self):
+ add = models.ScenarioVersion(version='Euphrates').format()
+ self.req_d['installers'][0]['versions'].append(add)
+ return [add]
+
+ @update_url_fixture('versions')
+ @update_partial('_add', '_conflict')
+ def test_addVersions_already_exist(self):
+ add = models.ScenarioVersion(version='master').format()
+ return [add]
- def _add(self, update_req, new_scenario):
+ @update_url_fixture('versions')
+ @update_partial('_add', '_bad_request')
+ def test_addVersions_bad_schema(self):
+ add = models.ScenarioVersion(version='euphrates').format()
+ add['notexist'] = None
+ return [add]
+
+ @update_url_fixture('versions')
+ @update_partial('_update', '_success')
+ def test_updateVersions_succ(self):
+ update = models.ScenarioVersion(version='euphrates').format()
+ self.req_d['installers'][0]['versions'] = [update]
+ return [update]
+
+ @update_url_fixture('versions')
+ @update_partial('_update', '_conflict')
+ def test_updateVersions_duplicated(self):
+ update = models.ScenarioVersion(version='euphrates').format()
+ return [update, update]
+
+ @update_url_fixture('versions')
+ @update_partial('_update', '_bad_request')
+ def test_updateVersions_bad_schema(self):
+ update = models.ScenarioVersion(version='euphrates').format()
+ update['not_owner'] = 'Iam'
+ return [update]
+
+ @update_url_fixture('versions')
+ @update_partial('_delete', '_success')
+ def test_deleteVersions(self):
+ deletes = ['master']
+ versions = self.req_d['installers'][0]['versions']
+ self.req_d['installers'][0]['versions'] = filter(
+ lambda f: f['version'] != 'master',
+ versions)
+ return deletes
+
+ @update_url_fixture('installers')
+ @update_partial('_add', '_success')
+ def test_addInstallers_succ(self):
+ add = models.ScenarioInstaller(installer='daisy').format()
+ self.req_d['installers'].append(add)
+ return [add]
+
+ @update_url_fixture('installers')
+ @update_partial('_add', '_conflict')
+ def test_addInstallers_already_exist(self):
+ add = models.ScenarioInstaller(installer='apex').format()
+ return [add]
+
+ @update_url_fixture('installers')
+ @update_partial('_add', '_bad_request')
+ def test_addInstallers_bad_schema(self):
+ add = models.ScenarioInstaller(installer='daisy').format()
+ add['not_exist'] = 'not_exist'
+ return [add]
+
+ @update_url_fixture('installers')
+ @update_partial('_update', '_success')
+ def test_updateInstallers_succ(self):
+ update = models.ScenarioInstaller(installer='daisy').format()
+ self.req_d['installers'] = [update]
+ return [update]
+
+ @update_url_fixture('installers')
+ @update_partial('_update', '_conflict')
+ def test_updateInstallers_duplicated(self):
+ update = models.ScenarioInstaller(installer='daisy').format()
+ return [update, update]
+
+ @update_url_fixture('installers')
+ @update_partial('_update', '_bad_request')
+ def test_updateInstallers_bad_schema(self):
+ update = models.ScenarioInstaller(installer='daisy').format()
+ update['not_exist'] = 'not_exist'
+ return [update]
+
+ @update_url_fixture('installers')
+ @update_partial('_delete', '_success')
+ def test_deleteInstallers(self):
+ deletes = ['apex']
+ installers = self.req_d['installers']
+ self.req_d['installers'] = filter(
+ lambda f: f['installer'] != 'apex',
+ installers)
+ return deletes
+
+ @update_url_fixture('rename')
+ @update_partial('_update', '_success')
+ def test_renameScenario(self):
+ new_name = 'new_scenario_name'
+ update = models.ScenarioUpdateRequest(name=new_name)
+ self.req_d['name'] = new_name
+ return update
+
+ @update_url_fixture('rename')
+ @update_partial('_update', '_forbidden')
+ def test_renameScenario_exist(self):
+ new_name = self.req_d['name']
+ update = models.ScenarioUpdateRequest(name=new_name)
+ return update
+
+ def _add(self, update_req):
return self.post_direct_url(self.update_url, update_req)
- def _update(self, update_req, new_scenario):
+ def _update(self, update_req):
return self.update_direct_url(self.update_url, update_req)
- def _delete(self, update_req, new_scenario):
+ def _delete(self, update_req):
return self.delete_direct_url(self.update_url, update_req)
- def _success(self, status, new_scenario):
+ def _success(self, status):
self.assertEqual(status, httplib.OK)
- self._get_and_assert(new_scenario.get('name'), new_scenario)
+ self.get_and_assert(self.req_d['name'])
- def _forbidden(self, status, new_scenario):
+ def _forbidden(self, status):
self.assertEqual(status, httplib.FORBIDDEN)
- def _bad_request(self, status, new_scenario):
+ def _bad_request(self, status):
self.assertEqual(status, httplib.BAD_REQUEST)
- def _conflict(self, status, new_scenario):
+ def _conflict(self, status):
self.assertEqual(status, httplib.CONFLICT)
-import setuptools
+import os
+import subprocess
+import setuptools
__author__ = 'serena'
except ImportError:
pass
+dirpath = os.path.dirname(os.path.abspath(__file__))
+subprocess.call(['ln', '-s',
+ '{}/3rd_party/static'.format(dirpath),
+ '{}/opnfv_testapi/static'.format(dirpath)])
+
setuptools.setup(
setup_requires=['pbr==2.0.0'],
pbr=True)