#--------------------------------
# POD, INSTALLER, AND BRANCH MAPPING
#--------------------------------
-# brahmaputra
+# CI POD's
+#--------------------------------
+# colorado
#--------------------------------
pod:
- - arm-pod1:
+ - armband-baremetal:
+ slave-label: armband-baremetal
installer: fuel
<<: *colorado
- - arm-pod2:
+ - armband-virtual:
+ slave-label: armband-virtual
installer: fuel
<<: *colorado
#--------------------------------
# master
#--------------------------------
- pod:
- - arm-pod1:
+ - armband-baremetal:
+ slave-label: armband-baremetal
installer: fuel
<<: *master
+ - armband-virtual:
+ slave-label: armband-virtual
+ installer: fuel
+ <<: *master
+#--------------------------------
+# NONE-CI POD's
+#--------------------------------
+# colorado
+#--------------------------------
+ - arm-pod2:
+ slave-label: arm-pod2
+ installer: fuel
+ <<: *colorado
+#--------------------------------
+# master
+#--------------------------------
- arm-pod2:
+ slave-label: arm-pod2
installer: fuel
<<: *master
#--------------------------------
scenario:
# HA scenarios
- 'os-nosdn-nofeature-ha':
- auto-trigger-name: 'armband-{installer}-{scenario}-{pod}-{stream}-trigger'
+ auto-trigger-name: '{installer}-{scenario}-{pod}-{stream}-trigger'
- 'os-odl_l2-nofeature-ha':
- auto-trigger-name: 'armband-{installer}-{scenario}-{pod}-{stream}-trigger'
+ auto-trigger-name: '{installer}-{scenario}-{pod}-{stream}-trigger'
- 'os-odl_l3-nofeature-ha':
- auto-trigger-name: 'armband-{installer}-{scenario}-{pod}-{stream}-trigger'
+ auto-trigger-name: '{installer}-{scenario}-{pod}-{stream}-trigger'
- 'os-odl_l2-bgpvpn-ha':
- auto-trigger-name: 'armband-{installer}-{scenario}-{pod}-{stream}-trigger'
+ auto-trigger-name: '{installer}-{scenario}-{pod}-{stream}-trigger'
# NOHA scenarios
- 'os-odl_l2-nofeature-noha':
- auto-trigger-name: 'armband-{installer}-{scenario}-{pod}-{stream}-trigger'
+ auto-trigger-name: '{installer}-{scenario}-{pod}-{stream}-trigger'
jobs:
- - 'armband-{installer}-{scenario}-{pod}-daily-{stream}'
- - 'armband-{installer}-deploy-{pod}-daily-{stream}'
+ - '{installer}-{scenario}-{pod}-daily-{stream}'
+ - '{installer}-deploy-{pod}-daily-{stream}'
########################
# job templates
########################
- job-template:
- name: 'armband-{installer}-{scenario}-{pod}-daily-{stream}'
+ name: '{installer}-{scenario}-{pod}-daily-{stream}'
concurrent: false
- build-blocker:
use-build-blocker: true
blocking-jobs:
- - 'armband-{installer}-os-.*?-{pod}-daily-.*'
+ - '{installer}-os-.*?-{pod}-daily-.*'
block-level: 'NODE'
wrappers:
- project-parameter:
project: '{project}'
- '{installer}-defaults'
- - '{pod}-defaults':
+ - '{slave-label}-defaults':
installer: '{installer}'
- string:
name: DEPLOY_SCENARIO
builders:
- trigger-builds:
- - project: 'armband-{installer}-deploy-{pod}-daily-{stream}'
+ - project: '{installer}-deploy-{pod}-daily-{stream}'
current-parameters: false
predefined-parameters:
DEPLOY_SCENARIO={scenario}
build-step-failure-threshold: 'never'
failure-threshold: 'never'
unstable-threshold: 'FAILURE'
+ - trigger-builds:
+ - project: 'yardstick-{installer}-{pod}-daily-{stream}'
+ current-parameters: false
+ predefined-parameters:
+ DEPLOY_SCENARIO={scenario}
+ block: true
+ same-node: true
+ block-thresholds:
+ build-step-failure-threshold: 'never'
+ failure-threshold: 'never'
+ unstable-threshold: 'FAILURE'
- job-template:
- name: 'armband-{installer}-deploy-{pod}-daily-{stream}'
+ name: '{installer}-deploy-{pod}-daily-{stream}'
concurrent: false
- build-blocker:
use-build-blocker: true
blocking-jobs:
- - 'armband-{installer}-deploy-{pod}-daily-{stream}'
- - 'armband-{installer}-deploy-generic-daily-.*'
+ - '{installer}-deploy-{pod}-daily-{stream}'
+ - '{installer}-deploy-generic-daily-.*'
block-level: 'NODE'
parameters:
- project-parameter:
project: '{project}'
- '{installer}-defaults'
- - '{pod}-defaults':
+ - '{slave-label}-defaults':
installer: '{installer}'
- string:
name: DEPLOY_SCENARIO
# trigger macros
########################
# CI PODs
-#----------------------------------------------------------
-# Enea Armband POD 1 Triggers running against master branch
-#----------------------------------------------------------
+#-----------------------------------------------------------------
+# Enea Armband CI Baremetal Triggers running against master branch
+#-----------------------------------------------------------------
- trigger:
- name: 'armband-fuel-os-odl_l2-nofeature-ha-arm-pod1-master-trigger'
+ name: 'fuel-os-odl_l2-nofeature-ha-armband-baremetal-master-trigger'
triggers:
- - timed: '0 3 * * 1,4'
+ - timed: '0 3,15 * * 1'
- trigger:
- name: 'armband-fuel-os-nosdn-nofeature-ha-arm-pod1-master-trigger'
+ name: 'fuel-os-nosdn-nofeature-ha-armband-baremetal-master-trigger'
triggers:
- - timed: '0 15 * * 1,4'
+ - timed: '0 3,15 * * 2'
- trigger:
- name: 'armband-fuel-os-odl_l3-nofeature-ha-arm-pod1-master-trigger'
+ name: 'fuel-os-odl_l3-nofeature-ha-armband-baremetal-master-trigger'
triggers:
- - timed: '0 3 * * 2,5'
+ - timed: '0 3,15 * * 3'
- trigger:
- name: 'armband-fuel-os-odl_l2-bgpvpn-ha-arm-pod1-master-trigger'
+ name: 'fuel-os-odl_l2-bgpvpn-ha-armband-baremetal-master-trigger'
triggers:
- - timed: '0 15 * * 2,5'
+ - timed: '0 3,15 * * 4'
- trigger:
- name: 'armband-fuel-os-odl_l2-nofeature-noha-arm-pod1-master-trigger'
+ name: 'fuel-os-odl_l2-nofeature-noha-armband-baremetal-master-trigger'
triggers:
- - timed: '0 3 * * 3,6'
+ - timed: '0 3,15 * * 5'
+#----------------------------------------------------------------------
+# Enea Armband CI Baremetal Triggers running against colorado branch
+#----------------------------------------------------------------------
+- trigger:
+ name: 'fuel-os-odl_l2-nofeature-ha-armband-baremetal-colorado-trigger'
+ triggers:
+ - timed: '0 4,16 * * 1'
+- trigger:
+ name: 'fuel-os-nosdn-nofeature-ha-armband-baremetal-colorado-trigger'
+ triggers:
+ - timed: '0 4,16 * * 2'
+- trigger:
+ name: 'fuel-os-odl_l3-nofeature-ha-armband-baremetal-colorado-trigger'
+ triggers:
+ - timed: '0 4,16 * * 3'
+- trigger:
+ name: 'fuel-os-odl_l2-bgpvpn-ha-armband-baremetal-colorado-trigger'
+ triggers:
+ - timed: '0 4,16 * * 4'
+- trigger:
+ name: 'fuel-os-odl_l2-nofeature-noha-armband-baremetal-colorado-trigger'
+ triggers:
+ - timed: '0 4,16 * * 5'
#---------------------------------------------------------------
-# Enea Armband POD 1 Triggers running against brahmaputra branch
+# Enea Armband CI Virtual Triggers running against master branch
#---------------------------------------------------------------
- trigger:
- name: 'armband-fuel-os-odl_l2-nofeature-ha-arm-pod1-colorado-trigger'
+ name: 'fuel-os-odl_l2-nofeature-ha-armband-virtual-master-trigger'
+ triggers:
+ - timed: ''
+- trigger:
+ name: 'fuel-os-nosdn-nofeature-ha-armband-virtual-master-trigger'
+ triggers:
+ - timed: ''
+- trigger:
+ name: 'fuel-os-odl_l3-nofeature-ha-armband-virtual-master-trigger'
+ triggers:
+ - timed: ''
+- trigger:
+ name: 'fuel-os-odl_l2-bgpvpn-ha-armband-virtual-master-trigger'
+ triggers:
+ - timed: ''
+- trigger:
+ name: 'fuel-os-odl_l2-nofeature-noha-armband-virtual-master-trigger'
+ triggers:
+ - timed: ''
+#--------------------------------------------------------------------
+# Enea Armband CI Virtual Triggers running against colorado branch
+#--------------------------------------------------------------------
+- trigger:
+ name: 'fuel-os-odl_l2-nofeature-ha-armband-virtual-colorado-trigger'
triggers:
- timed: ''
- trigger:
- name: 'armband-fuel-os-nosdn-nofeature-ha-arm-pod1-colorado-trigger'
+ name: 'fuel-os-nosdn-nofeature-ha-armband-virtual-colorado-trigger'
triggers:
- timed: ''
- trigger:
- name: 'armband-fuel-os-odl_l3-nofeature-ha-arm-pod1-colorado-trigger'
+ name: 'fuel-os-odl_l3-nofeature-ha-armband-virtual-colorado-trigger'
triggers:
- timed: ''
- trigger:
- name: 'armband-fuel-os-odl_l2-bgpvpn-ha-arm-pod1-colorado-trigger'
+ name: 'fuel-os-odl_l2-bgpvpn-ha-armband-virtual-colorado-trigger'
triggers:
- timed: ''
- trigger:
- name: 'armband-fuel-os-odl_l2-nofeature-noha-arm-pod1-colorado-trigger'
+ name: 'fuel-os-odl_l2-nofeature-noha-armband-virtual-colorado-trigger'
triggers:
- timed: ''
#----------------------------------------------------------
# Enea Armband POD 2 Triggers running against master branch
#----------------------------------------------------------
-# No triggers for master for now
- trigger:
- name: 'armband-fuel-os-odl_l2-nofeature-ha-arm-pod2-master-trigger'
+ name: 'fuel-os-odl_l2-nofeature-ha-arm-pod2-master-trigger'
triggers:
- timed: ''
- trigger:
- name: 'armband-fuel-os-nosdn-nofeature-ha-arm-pod2-master-trigger'
+ name: 'fuel-os-nosdn-nofeature-ha-arm-pod2-master-trigger'
triggers:
- timed: ''
- trigger:
- name: 'armband-fuel-os-odl_l3-nofeature-ha-arm-pod2-master-trigger'
+ name: 'fuel-os-odl_l3-nofeature-ha-arm-pod2-master-trigger'
triggers:
- timed: ''
- trigger:
- name: 'armband-fuel-os-odl_l2-bgpvpn-ha-arm-pod2-master-trigger'
+ name: 'fuel-os-odl_l2-bgpvpn-ha-arm-pod2-master-trigger'
triggers:
- timed: ''
- trigger:
- name: 'armband-fuel-os-odl_l2-nofeature-noha-arm-pod2-master-trigger'
+ name: 'fuel-os-odl_l2-nofeature-noha-arm-pod2-master-trigger'
triggers:
- timed: ''
#---------------------------------------------------------------
-# Enea Armband POD 2 Triggers running against brahmaputra branch
+# Enea Armband POD 2 Triggers running against colorado branch
#---------------------------------------------------------------
- trigger:
- name: 'armband-fuel-os-odl_l2-nofeature-ha-arm-pod2-colorado-trigger'
+ name: 'fuel-os-odl_l2-nofeature-ha-arm-pod2-colorado-trigger'
triggers:
- timed: ''
- trigger:
- name: 'armband-fuel-os-nosdn-nofeature-ha-arm-pod2-colorado-trigger'
+ name: 'fuel-os-nosdn-nofeature-ha-arm-pod2-colorado-trigger'
triggers:
- timed: ''
- trigger:
- name: 'armband-fuel-os-odl_l3-nofeature-ha-arm-pod2-colorado-trigger'
+ name: 'fuel-os-odl_l3-nofeature-ha-arm-pod2-colorado-trigger'
triggers:
- timed: ''
- trigger:
- name: 'armband-fuel-os-odl_l2-bgpvpn-ha-arm-pod2-colorado-trigger'
+ name: 'fuel-os-odl_l2-bgpvpn-ha-arm-pod2-colorado-trigger'
triggers:
- timed: ''
- trigger:
- name: 'armband-fuel-os-odl_l2-nofeature-noha-arm-pod2-colorado-trigger'
+ name: 'fuel-os-odl_l2-nofeature-noha-arm-pod2-colorado-trigger'
triggers:
- timed: ''
echo "cloning $LAB_CONFIG_URL"
git clone --quiet --branch ${GIT_BRANCH##origin/} $LAB_CONFIG_URL lab-config
LAB_CONFIG_URL=file://${WORKSPACE}/lab-config
+
+ # Source local_env if present, which contains POD-specific config
+ local_env="${WORKSPACE}/lab-config/labs/$LAB_NAME/$POD_NAME/fuel/config/local_env"
+ if [ -e $local_env ]; then
+ echo "-- Sourcing local environment file"
+ source $local_env
+ fi
fi
# releng wants us to use nothing else but opnfv.iso for now. We comply.
- zte-pod3:
slave-label: zte-pod3
<<: *master
+ - zte-pod1:
+ slave-label: zte-pod1
+ <<: *colorado
+ - zte-pod3:
+ slave-label: zte-pod3
+ <<: *colorado
#--------------------------------
# scenarios
#--------------------------------
publishers:
- email:
- recipients: peter.barabas@ericsson.com
+ recipients: peter.barabas@ericsson.com fzhadaev@mirantis.com
- job-template:
name: 'fuel-deploy-{pod}-daily-{stream}'
publishers:
- email:
- recipients: jonas.bjurel@ericsson.com stefan.k.berg@ericsson.com peter.barabas@ericsson.com
+ recipients: jonas.bjurel@ericsson.com stefan.k.berg@ericsson.com peter.barabas@ericsson.com fzhadaev@mirantis.com
########################
# parameter macros
- trigger:
name: 'fuel-os-odl_l2-nofeature-ha-zte-pod1-daily-colorado-trigger'
triggers:
- - timed: ''
+ - timed: '0 2 * * *'
- trigger:
name: 'fuel-os-odl_l3-nofeature-ha-zte-pod1-daily-colorado-trigger'
triggers:
- trigger:
name: 'fuel-os-nosdn-kvm-ha-zte-pod3-daily-colorado-trigger'
triggers:
- - timed: ''
+ - timed: '0 18 * * *'
- trigger:
name: 'fuel-os-nosdn-ovs-ha-zte-pod3-daily-colorado-trigger'
triggers:
export TACKER_SCRIPT_URL="https://git.opnfv.org/cgit/fuel/plain/prototypes/sfc_tacker/poc.tacker-up.sh?h=${GIT_BRANCH#*/}"
export CONTROLLER_NODE_IP=$(sshpass -pr00tme /usr/bin/ssh -o UserKnownHostsFile=/dev/null \
-o StrictHostKeyChecking=no root@$FUEL_MASTER_IP 'fuel node list' | \
- grep opendaylight | cut -d'|' -f5)
+ grep controller | head -1 | cut -d'|' -f5)
# we can't do much if we do not have the controller IP
if [[ ! "$CONTROLLER_NODE_IP" =~ "10.20.0" ]]; then
expect "# "
send "git clone https://gerrit.opnfv.org/gerrit/fuel && cd fuel\r"
expect "# "
-send "git fetch https://gerrit.opnfv.org/gerrit/fuel refs/changes/97/10597/2 && git checkout FETCH_HEAD\r"
-expect "# "
send "/bin/bash /root/sfc-poc/fuel/prototypes/sfc_tacker/poc.tacker-up.sh\r"
expect "# "
send "exit\r"
publishers:
- email:
- recipients: jonas.bjurel@ericsson.com stefan.k.berg@ericsson.com
+ recipients: jonas.bjurel@ericsson.com stefan.k.berg@ericsson.com fzhadaev@mirantis.com
- job-template:
name: 'fuel-merge-build-{stream}'
publishers:
- email:
- recipients: jonas.bjurel@ericsson.com stefan.k.berg@ericsson.com
+ recipients: jonas.bjurel@ericsson.com stefan.k.berg@ericsson.com fzhadaev@mirantis.com
- job-template:
name: 'fuel-deploy-generic-daily-{stream}'
slave-label: '{pod}'
installer: apex
<<: *colorado
+# armband CI PODs
+ - armband-baremetal:
+ slave-label: armband-baremetal
+ installer: fuel
+ <<: *master
+ - armband-virtual:
+ slave-label: armband-virtual
+ installer: fuel
+ <<: *master
+ - armband-baremetal:
+ slave-label: armband-baremetal
+ installer: fuel
+ <<: *colorado
+ - armband-virtual:
+ slave-label: armband-virtual
+ installer: fuel
+ <<: *colorado
#--------------------------------
# None-CI PODs
#--------------------------------
slave-label: '{pod}'
installer: apex
<<: *master
- - arm-pod1:
+ - arm-pod2:
slave-label: '{pod}'
installer: fuel
<<: *master
slave-label: '{pod}'
installer: fuel
<<: *master
+ - zte-pod1:
+ slave-label: '{pod}'
+ installer: fuel
+ <<: *colorado
- zte-pod2:
slave-label: '{pod}'
installer: fuel
slave-label: '{pod}'
installer: fuel
<<: *master
- - arm-pod1:
+ - zte-pod3:
+ slave-label: '{pod}'
+ installer: fuel
+ <<: *colorado
+ - arm-pod2:
slave-label: '{pod}'
installer: fuel
<<: *colorado
- 'functest-cleanup'
- 'set-functest-env'
- 'functest-suite'
- - 'functest-exit'
- builder:
name: functest-daily
name: functest-exit
builders:
- shell:
- !include-raw: ./functest-exit.sh
\ No newline at end of file
+ !include-raw: ./functest-exit.sh
failure-threshold: 'never'
unstable-threshold: 'FAILURE'
+ publishers:
+ - email:
+ recipients: fatih.degirmenci@ericsson.com yroblamo@redhat.com mchandras@suse.de jack.morgan@intel.com zhang.jun3g@zte.com.cn
+
- job-template:
name: 'infra-{phase}-{pod}-daily-{stream}'
- string:
name: DEPLOY_SCENARIO
default: 'os-nosdn-nofeature-noha'
+ - string:
+ name: CLEAN_DIB_IMAGES
+ default: 'false'
scm:
- git-scm:
- shell: |
#!/bin/bash
- sudo $WORKSPACE/jjb/infra/infra-provision.sh
+ echo "Not activated!"
- builder:
name: 'infra-smoketest-daily-builder'
builders:
- shell: |
#!/bin/bash
- sudo $WORKSPACE/jjb/infra/infra-provision.sh
+ echo "Not activated!"
# cleanup remnants of previous deployment
cd /opt/bifrost
-./scripts/destroy_env.sh
+./scripts/destroy-env.sh
# provision 3 VMs; jumphost, controller, and compute
cd /opt/bifrost
case "$JOB_TYPE" in
verify)
+ OPNFV_ARTIFACT_VERSION="gerrit-$GERRIT_CHANGE_NUMBER"
GS_UPLOAD_LOCATION="gs://artifacts.opnfv.org/$PROJECT/review/$GERRIT_CHANGE_NUMBER"
echo "Removing outdated artifacts produced for the previous patch for the change $GERRIT_CHANGE_NUMBER"
gsutil ls $GS_UPLOAD_LOCATION > /dev/null 2>&1 && gsutil rm -r $GS_UPLOAD_LOCATION
exit 1
esac
+# save information regarding artifacts into file
+(
+ echo "OPNFV_ARTIFACT_VERSION=$OPNFV_ARTIFACT_VERSION"
+ echo "OPNFV_GIT_URL=$(git config --get remote.origin.url)"
+ echo "OPNFV_GIT_SHA1=$(git rev-parse HEAD)"
+ echo "OPNFV_ARTIFACT_URL=$GS_UPLOAD_LOCATION"
+ echo "OPNFV_BUILD_URL=$BUILD_URL"
+) > $WORKSPACE/opnfv.properties
+source $WORKSPACE/opnfv.properties
+
+# upload artifacts
gsutil cp -r $WORKSPACE/build_output/* $GS_UPLOAD_LOCATION > $WORKSPACE/gsutil.log 2>&1
gsutil -m setmeta -r \
-h "Cache-Control:private, max-age=0, no-transform" \
$GS_UPLOAD_LOCATION > /dev/null 2>&1
+# upload metadata file for the artifacts built by daily job
+if [[ "$JOB_TYPE" == "daily" ]]; then
+ gsutil cp $WORKSPACE/opnfv.properties $GS_UPLOAD_LOCATION/opnfv.properties > $WORKSPACE/gsutil.log 2>&1
+ gsutil cp $WORKSPACE/opnfv.properties gs://$GS_URL/latest.properties > $WORKSPACE/gsutil.log 2>&1
+ gsutil -m setmeta -r \
+ -h "Cache-Control:private, max-age=0, no-transform" \
+ $GS_UPLOAD_LOCATION/opnfv.properties \
+ gs://$GS_URL/latest.properties > /dev/null 2>&1
+fi
+
gsutil ls $GS_UPLOAD_LOCATION > /dev/null 2>&1
if [[ $? -ne 0 ]]; then
echo "Problem while uploading artifacts!"
# Start the build
echo "Building docker image: $DOCKER_REPO_NAME:$DOCKER_BRANCH_TAG"
-docker build --no-cache -t $DOCKER_REPO_NAME:$DOCKER_BRANCH_TAG .
+if [[ $DOCKER_REPO_NAME == *"functest"* ]]; then
+ docker build --no-cache -t $DOCKER_REPO_NAME:$DOCKER_BRANCH_TAG --build-arg BRANCH=$branch .
+else
+ docker build --no-cache -t $DOCKER_REPO_NAME:$DOCKER_BRANCH_TAG .
+fi
+
echo "Creating tag '$DOCKER_TAG'..."
docker tag -f $DOCKER_REPO_NAME:$DOCKER_BRANCH_TAG $DOCKER_REPO_NAME:$DOCKER_TAG
name: GIT_BASE
default: https://gerrit.opnfv.org/gerrit/$PROJECT
description: 'Git URL to use on this Jenkins Slave'
+- parameter:
+ name: 'armband-baremetal-defaults'
+ parameters:
+ - label:
+ name: SLAVE_LABEL
+ default: 'armband-baremetal'
+ - string:
+ name: GIT_BASE
+ default: https://gerrit.opnfv.org/gerrit/$PROJECT
+ description: 'Git URL to use on this Jenkins Slave'
+ - string:
+ name: LAB_CONFIG_URL
+ default: ssh://git@git.enea.com/pharos/lab-config
+ description: 'Base URI to the configuration directory'
- parameter:
name: 'joid-baremetal-defaults'
parameters:
name: GIT_BASE
default: https://gerrit.opnfv.org/gerrit/$PROJECT
description: 'Git URL to use on this Jenkins Slave'
+- parameter:
+ name: 'armband-virtual-defaults'
+ parameters:
+ - label:
+ name: SLAVE_LABEL
+ default: 'armband-virtual'
+ - string:
+ name: GIT_BASE
+ default: https://gerrit.opnfv.org/gerrit/$PROJECT
+ description: 'Git URL to use on this Jenkins Slave'
+ - string:
+ name: LAB_CONFIG_URL
+ default: ssh://git@git.enea.com/pharos/lab-config
+ description: 'Base URI to the configuration directory'
- parameter:
name: 'joid-virtual-defaults'
parameters:
name: SSH_KEY
default: /root/.ssh/id_rsa
description: 'SSH key to use for Apex'
-- parameter:
- name: 'arm-pod1-defaults'
- parameters:
- - node:
- name: SLAVE_NAME
- description: 'Slave name on Jenkins'
- allowed-slaves:
- - arm-pod1
- default-slaves:
- - arm-pod1
- - string:
- name: GIT_BASE
- default: https://gerrit.opnfv.org/gerrit/$PROJECT
- description: 'Git URL to use on this Jenkins Slave'
- - string:
- name: DEFAULT_BRIDGE
- default: 'admin6_br0,public6_br0'
- desciption: 'The bridge to use for Fuel PXE booting. It can be a comma sparated list of bridges, in which case the first is the PXE boot bridge, and all subsequent interfaces that will be added to the VM. If left empty, most deploy scripts will default to pxebr.'
- - string:
- name: DEPLOY_TIMEOUT
- default: '360'
- description: 'Deployment timeout in minutes'
- - string:
- name: LAB_CONFIG_URL
- default: ssh://git@git.enea.com/pharos/lab-config
- description: 'Base URI to the configuration directory'
- parameter:
name: 'arm-pod2-defaults'
parameters:
name: GIT_BASE
default: https://gerrit.opnfv.org/gerrit/$PROJECT
description: 'Git URL to use on this Jenkins Slave'
- - string:
- name: DEFAULT_BRIDGE
- default: 'admin_br0,public_br0'
- desciption: 'The bridge to use for Fuel PXE booting. It can be a comma sparated list of bridges, in which case the first is the PXE boot bridge, and all subsequent interfaces that will be added to the VM. If left empty, most deploy scripts will default to pxebr.'
- - string:
- name: DEPLOY_TIMEOUT
- default: '360'
- description: 'Deployment timeout in minutes'
- string:
name: LAB_CONFIG_URL
default: ssh://git@git.enea.com/pharos/lab-config
stream: master
branch: '{stream}'
gs-pathname: ''
+ docker-tag: 'latest'
#--------------------------------
# POD, INSTALLER, AND BRANCH MAPPING
#--------------------------------
- string:
name: DEPLOY_SCENARIO
default: 'os-nosdn-nofeature-ha'
+ - string:
+ name: DOCKER_TAG
+ default: '{docker-tag}'
+ description: 'Tag to pull docker image'
scm:
- git-scm:
builders:
- 'qtip-cleanup'
- - 'qtip-set-env'
- - 'qtip-run-suite'
- - 'qtip-pushtoDB'
+ - 'qtip-daily-ci'
publishers:
- email:
#biuilder macros
###########################
- builder:
- name: qtip-set-env
- builders:
- - shell: |
- #!/bin/bash
- echo "Qtip: Start Docker and prepare environment"
- envs="INSTALLER_TYPE=${INSTALLER_TYPE} -e INSTALLER_IP=${INSTALLER_IP} -e NODE_NAME=${NODE_NAME}"
- suite="TEST_CASE=all"
- dir_imgstore="${HOME}/imgstore"
- img_volume="${dir_imgstore}:/home/opnfv/imgstore"
- docker pull opnfv/qtip:latest
- cmd=" docker run -id -e $envs -e $suite -v ${img_volume} opnfv/qtip:latest /bin/bash"
- echo "Qtip: Running docker run command: ${cmd}"
- ${cmd}
- docker ps -a
- container_id=$(docker ps | grep 'opnfv/qtip:latest' | awk '{print $1}' | head -1)
- if [ $(docker ps | grep 'opnfv/qtip' | wc -l) == 0 ]; then
- echo "The container opnfv/qtip with ID=${container_id} has not been properly started. Exiting..."
- exit 1
- fi
-- builder:
- name: qtip-run-suite
- builders:
- - shell: |
- #!/bin/bash
- container_id=$(docker ps | grep 'opnfv/qtip:latest' | awk '{print $1}' | head -1)
- if [[ ! -z ${container_id} ]]; then
- echo "The container ID is: ${container_id}"
- QTIP_REPO=/home/opnfv/repos/qtip
- docker exec -t ${container_id} $QTIP_REPO/docker/run_qtip.sh
- else
- echo "Container ID not available"
- fi
-
-- builder:
- name: qtip-pushtoDB
+ name: qtip-daily-ci
builders:
- - shell: |
- #!/bin/bash
-
- echo "Pushing available results to DB"
- echo "The container id is:"
- container_id=$(docker ps | grep 'opnfv/qtip:latest' | awk '{print $1}' | head -1)
- if [[ ! -z ${container_id} ]]; then
- echo "The condiner ID is: ${container_id}"
- QTIP_REPO=/home/opnfv/repos/qtip
- docker exec -t ${container_id} $QTIP_REPO/docker/push_db.sh
- else
- echo "Container ID not available"
- fi
+ - shell:
+ !include-raw: ./qtip-daily-ci.sh
- builder:
name: qtip-cleanup
builders:
- - shell: |
- #!/bin/bash
-
- echo "Cleaning up QTIP docker containers/images..."
- # Remove previous running containers if exist
- if [[ ! -z $(docker ps -a | grep opnfv/qtip) ]]; then
- echo "Removing existing opnfv/qtip containers..."
- running_containers=$(docker ps | grep opnfv/qtip | awk '{print $1}')
- docker stop ${running_containers}
- all_containers=$(docker ps -a | grep opnfv/qtip | awk '{print $1}')
- docker rm ${all_containers}
- fi
-
- # Remove existing images if exist
- if [[ ! -z $(docker images | grep opnfv/qtip) ]]; then
- echo "Docker images to remove:"
- docker images | head -1 && docker images | grep opnfv/qtip
- image_tags=($(docker images | grep opnfv/qtip | awk '{print $2}'))
- for tag in "${image_tags[@]}"; do
- echo "Removing docker image opnfv/qtip:$tag..."
- docker rmi opnfv/qtip:$tag
- done
- fi
+ - shell:
+ !include-raw: ./qtip-cleanup.sh
#################
#trigger macros
--- /dev/null
+#!/bin/bash
+##############################################################################
+# Copyright (c) 2016 ZTE and others.
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+echo "Cleaning up QTIP docker containers/images..."
+
+# Remove previous running containers if exist
+if [[ ! -z $(docker ps -a | grep opnfv/qtip) ]]; then
+ echo "Removing existing opnfv/qtip containers..."
+ running_containers=$(docker ps | grep opnfv/qtip | awk '{print $1}')
+ docker stop ${running_containers}
+ all_containers=$(docker ps -a | grep opnfv/qtip | awk '{print $1}')
+ docker rm ${all_containers}
+fi
+
+# Remove existing images if exist
+if [[ ! -z $(docker images | grep opnfv/qtip) ]]; then
+ echo "Docker images to remove:"
+ docker images | head -1 && docker images | grep opnfv/qtip
+ image_tags=($(docker images | grep opnfv/qtip | awk '{print $2}'))
+ for tag in "${image_tags[@]}"; do
+ echo "Removing docker image opnfv/qtip:$tag..."
+ docker rmi opnfv/qtip:$tag
+ done
+fi
+
--- /dev/null
+#!/bin/bash
+##############################################################################
+# Copyright (c) 2016 ZTE and others.
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+set -e
+
+envs="INSTALLER_TYPE=${INSTALLER_TYPE} -e INSTALLER_IP=${INSTALLER_IP} -e NODE_NAME=${NODE_NAME}"
+suite="TEST_CASE=all"
+dir_imgstore="${HOME}/imgstore"
+img_volume="${dir_imgstore}:/home/opnfv/imgstore"
+
+echo "Qtip: Pulling docker image: opnfv/qtip:${DOCKER_TAG}"
+docker pull opnfv/qtip:$DOCKER_TAG
+
+cmd=" docker run -id -e $envs -e $suite -v ${img_volume} opnfv/qtip:${DOCKER_TAG} /bin/bash"
+echo "Qtip: Running docker command: ${cmd}"
+${cmd}
+
+container_id=$(docker ps | grep "opnfv/qtip:${DOCKER_TAG}" | awk '{print $1}' | head -1)
+if [ $(docker ps | grep 'opnfv/qtip' | wc -l) == 0 ]; then
+ echo "The container opnfv/qtip with ID=${container_id} has not been properly started. Exiting..."
+ exit 1
+else
+ echo "The container ID is: ${container_id}"
+ QTIP_REPO=/home/opnfv/repos/qtip
+
+ echo "Run Qtip test"
+ docker exec -t ${container_id} $QTIP_REPO/docker/run_qtip.sh
+
+ echo "Pushing available results to DB"
+ docker exec -t ${container_id} $QTIP_REPO/docker/push_db.sh
+fi
+
+echo "Qtip done!"
- 'origin/$GERRIT_BRANCH'
skip-tag: true
choosing-strategy: '{choosing-strategy}'
+ timeout: 15
- wrapper:
name: build-timeout
mv docs_output "$local_path"
gsutil -m cp -r "$local_path" "gs://$gs_base"
- if gsutil ls "gs://$gs_path" | grep -e 'html$' > /dev/null 2>&1 ; then
- gsutil -m setmeta \
- -h "Content-Type:text/html" \
- -h "Cache-Control:private, max-age=0, no-transform" \
- "gs://$gs_path"/**.html
- fi
+ gsutil -m setmeta \
+ -h "Content-Type:text/html" \
+ -h "Cache-Control:private, max-age=0, no-transform" \
+ "gs://$gs_path"/**.html > /dev/null 2>&1
echo "Document link(s):" >> gerrit_comment.txt
find "$local_path" | grep -e 'index.html$' -e 'pdf$' | \
mv docs_output "$local_path"
gsutil -m cp -r "$local_path" "gs://$GS_URL"
- if gsutil ls "gs://$gs_path" | grep -e 'html$' > /dev/null 2>&1 ; then
- gsutil -m setmeta \
- -h "Content-Type:text/html" \
- -h "Cache-Control:private, max-age=0, no-transform" \
- "gs://$gs_path"/**.html
- fi
+ gsutil -m setmeta \
+ -h "Content-Type:text/html" \
+ -h "Cache-Control:private, max-age=0, no-transform" \
+ "gs://$gs_path"/**.html > /dev/null 2>&1
echo "Document link(s):" >> gerrit_comment.txt
find "$local_path" | grep -e 'index.html$' -e 'pdf$' | \
installer: fuel
auto-trigger-name: 'daily-trigger-disabled'
<<: *colorado
+# armband CI PODs
+ - armband-baremetal:
+ slave-label: armband-baremetal
+ installer: fuel
+ auto-trigger-name: 'daily-trigger-disabled'
+ <<: *master
+ - armband-virtual:
+ slave-label: armband-virtual
+ installer: fuel
+ auto-trigger-name: 'daily-trigger-disabled'
+ <<: *master
+ - armband-baremetal:
+ slave-label: armband-baremetal
+ installer: fuel
+ auto-trigger-name: 'daily-trigger-disabled'
+ <<: *colorado
+ - armband-virtual:
+ slave-label: armband-virtual
+ installer: fuel
+ auto-trigger-name: 'daily-trigger-disabled'
+ <<: *colorado
# joid CI PODs
- baremetal:
slave-label: joid-baremetal
installer: fuel
auto-trigger-name: 'daily-trigger-disabled'
<<: *master
+ - zte-pod1:
+ slave-label: '{pod}'
+ installer: fuel
+ auto-trigger-name: 'daily-trigger-disabled'
+ <<: *colorado
- zte-pod2:
slave-label: '{pod}'
installer: fuel
installer: fuel
auto-trigger-name: 'daily-trigger-disabled'
<<: *master
- - arm-pod1:
+ - zte-pod3:
+ slave-label: '{pod}'
+ installer: fuel
+ auto-trigger-name: 'daily-trigger-disabled'
+ <<: *colorado
+ - arm-pod2:
slave-label: '{pod}'
installer: fuel
auto-trigger-name: 'daily-trigger-disabled'
name: YARDSTICK_DB_BACKEND
default: ''
description: 'Arguments to use in order to choose the backend DB'
+- parameter:
+ name: 'yardstick-params-armband-baremetal'
+ parameters:
+ - string:
+ name: YARDSTICK_DB_BACKEND
+ default: '-i 104.197.68.199:8086'
+ description: 'Arguments to use in order to choose the backend DB'
+- parameter:
+ name: 'yardstick-params-armband-virtual'
+ parameters:
+ - string:
+ name: YARDSTICK_DB_BACKEND
+ default: ''
+ description: 'Arguments to use in order to choose the backend DB'
- parameter:
name: 'yardstick-params-joid-baremetal'
parameters:
description: 'Arguments to use in order to choose the backend DB'
- parameter:
- name: 'yardstick-params-arm-pod1'
+ name: 'yardstick-params-arm-pod2'
parameters:
- string:
name: YARDSTICK_DB_BACKEND
5. Run destroy script if you need to cleanup previous environment::
cd /opt/bifrost
- ./scripts/destroy_env.sh
+ ./scripts/destroy-env.sh
6. Run deployment script to spin up 3 vms with bifrost: jumphost, controller and compute::
echo "removing logs"
rm -rf /var/log/libvirt/baremetal_logs/*.log
-# clean up images
-rm -rf /httpboot/*
-rm -rf /tftpboot/*
+# clean up dib images only if requested explicitly
+if [ $CLEAN_DIB_IMAGES = "true" ]; then
+ rm -rf /httpboot/*
+ rm -rf /tftpboot/*
+fi
+
+# remove VM disk images
rm -rf /var/lib/libvirt/images/*.qcow2
echo "restarting services"
cd $BIFROST_HOME/playbooks
# Syntax check of dynamic inventory test path
-${ANSIBLE} -vvvv \
- -i inventory/localhost \
- test-bifrost-create-vm.yaml \
- --syntax-check \
- --list-tasks
-${ANSIBLE} -vvvv \
- -i inventory/localhost \
- ${TEST_PLAYBOOK} \
- --syntax-check \
- --list-tasks \
- -e testing_user=${TESTING_USER}
+for task in syntax-check list-tasks; do
+ ${ANSIBLE} -vvvv \
+ -i inventory/localhost \
+ test-bifrost-create-vm.yaml \
+ --${task}
+ ${ANSIBLE} -vvvv \
+ -i inventory/localhost \
+ ${TEST_PLAYBOOK} \
+ --${task} \
+ -e testing_user=${TESTING_USER}
+done
# Create the test VMS
${ANSIBLE} -vvvv \
2. Copy hiera to the right place::
- cp /opt/releng/prototypes/puppet-infracloud/hiera/common.yaml /var/lib/hiera/
+ cp /opt/releng/prototypes/puppet-infracloud/hiera/common.yaml /var/lib/hiera
3. Install modules::
4. Apply the infracloud manifest::
cd /opt/releng/prototypes/puppet-infracloud
- puppet apply --manifests/site.pp --modulepath=/etc/puppet/modules:/opt/releng/prototypes/puppet-infracloud/modules
+ puppet apply manifests/site.pp --modulepath=/etc/puppet/modules:/opt/releng/prototypes/puppet-infracloud/modules
5. Once you finish this operation on controller and compute nodes, you will have a functional OpenStack cloud.
git clone https://gerrit.opnfv.org/gerrit/releng /opt/releng
-2. Create OpenStack clouds config directory:
+2. Create OpenStack clouds config directory::
mkdir -p /root/.config/openstack
cp /opt/releng/prototypes/puppet-infracloud/creds/clouds.yaml /root/.config/openstack/
-4. Install openstack-client:
+4. Install python-dev package as the installation of python-openstackclient depends on it
+
+ apt-get install -y python-dev
+
+5. Install openstack-client. (version 3.2.0 is known to work)::
pip install python-openstackclient
-5. Export the desired cloud::
+6. Update /etc/hosts and add controller00::
+
+ 192.168.122.3 controller00
+ 192.168.122.3 controller00.opnfvlocal controller00
+
+7. Export the desired cloud::
export OS_CLOUD=opnfv
-6. Start using it::
+8. Start using it::
- openstack server list
+ openstack service list
ip: 192.168.122.3
compute00.opnfvlocal:
ip: 192.168.122.4
+
+# br-eth0 for debian, br-ens3 for RHEL
+bridge_name: br-eth0
keystone_admin_token => hiera('keystone_admin_token'),
ssl_key_file_contents => hiera('ssl_key_file_contents'),
ssl_cert_file_contents => hiera('ssl_cert_file_contents'),
- br_name => 'br-eth0',
+ br_name => hiera('bridge_name'),
controller_public_address => $::fqdn,
neutron_subnet_cidr => '192.168.122.0/24',
neutron_subnet_gateway => '192.168.122.1',
neutron_admin_password => hiera('neutron_admin_password'),
ssl_cert_file_contents => hiera('ssl_cert_file_contents'),
ssl_key_file_contents => hiera('ssl_key_file_contents'),
- br_name => 'br-eth0',
+ br_name => hiera('bridge_name'),
controller_public_address => 'controller00.opnfvlocal',
virt_type => 'qemu',
}
}
+node 'jumphost.opnfvlocal' {
+ class { 'opnfv::server':
+ sysadmins => hiera('sysadmins', []),
+ enable_unbound => false,
+ purge_apt_sources => false,
+ }
+}
$controller_public_address,
$virt_type = 'kvm',
) {
+ # disable selinux if needed
+ if $::osfamily == 'RedHat' {
+ class { 'selinux':
+ mode => 'permissive',
+ before => Class['::infracloud::compute'],
+ }
+ }
+
class { '::infracloud::compute':
nova_rabbit_password => $nova_rabbit_password,
neutron_rabbit_password => $neutron_rabbit_password,
$opnfv_password,
$opnfv_email = 'opnfvuser@gmail.com',
) {
+ # disable selinux if needed
+ if $::osfamily == 'RedHat' {
+ class { 'selinux':
+ mode => 'permissive',
+ before => Class['::infracloud::controller'],
+ }
+ }
+
class { '::infracloud::controller':
keystone_rabbit_password => $keystone_rabbit_password,
neutron_rabbit_password => $neutron_rabbit_password,
if [ $? != 0 ]; then
echo "Not possible to push results to artifact: gsutil not installed.";
else
- echo "copy result files to artifact $project_artifact"
- gsutil -m cp -r "$dir_result" gs://artifacts.opnfv.org/"$project_artifact"/ >/dev/null 2>&1
+ echo "Uploading logs to artifact $project_artifact"
+ gsutil -m cp -r "$dir_result"/* gs://artifacts.opnfv.org/"$project_artifact"/ >/dev/null 2>&1
+ echo "Logs can be found in http://artifacts.opnfv.org/logs_${project}_${testbed}.html"
+ cd $dir_result
+ files=($(find . -name \* -print|sed 's/^\.//'|sed '/^\s*$/d'))
+ for f in ${files[@]}; do
+ echo "http://artifacts.opnfv.org/${project_artifact}${f}"
+ done
fi
fi
else
<h3 class="text-muted">Functest reporting page</h3>
<nav>
<ul class="nav nav-justified">
- <li class="active"><a href="#">Home</a></li>
- <li><a href="./index-status-apex.html">Status</a></li>
- <li><a href="./index-tempest-apex.html">Tempest</a></li>
- <li><a href="./index-vims-apex.html">vIMS</a></li>
+ <li class="active"><a href="http://testresults.opnfv.org/reporting/index.html">Home</a></li>
+ <li><a href="index-status-apex.html">Apex</a></li>
+ <li><a href="index-status-compass.html">Compass</a></li>
+ <li><a href="index-status-fuel.html">Fuel</a></li>
+ <li><a href="index-status-joid.html">Joid</a></li>
</ul>
</nav>
</div>
<h3 class="text-muted">Yardstick reporting page</h3>
<nav>
<ul class="nav nav-justified">
- <li class="active"><a href="#">Home</a></li>
- <li><a href="./index-status-apex.html">Status</a></li>
+ <li class="active"><a href="http://testresults.opnfv.org/reporting/index.html">Home</a></li>
+ <li><a href="index-status-apex.html">Apex</a></li>
+ <li><a href="index-status-compass.html">Compass</a></li>
+ <li><a href="index-status-fuel.html">Fuel</a></li>
+ <li><a href="index-status-joid.html">Joid</a></li>
</ul>
</nav>
</div>
</div>
</div>
<div class="col-md-1"></div>
-</div>
\ No newline at end of file
+</div>
# ****************************************************
installers = ["apex", "compass", "fuel", "joid"]
-versions = ["master"]
+versions = ["master", "stable/colorado"]
# get data in the past 7 days
PERIOD = 7
mkdir -p $TARGET_DIR
echo "Export results"
-mongoexport -db test_results_collection -c test_results --out $TARGET_DIR/results.json
+mongoexport --db test_results_collection -c results --out $TARGET_DIR/backup-results.json
echo "Export test cases"
-mongoexport --db test_results_collection -c test_cases --out $TARGET_DIR/backup-cases.json
+mongoexport --db test_results_collection -c testcases --out $TARGET_DIR/backup-cases.json
echo "Export projects"
-mongoexport --db test_results_collection -c test_projects --out $TARGET_DIR/backup-projects.json
+mongoexport --db test_results_collection -c projects --out $TARGET_DIR/backup-projects.json
echo "Export pods"
-mongoexport --db test_results_collection -c pod --out $TARGET_DIR/backup-pod.json
+mongoexport --db test_results_collection -c pods --out $TARGET_DIR/backup-pod.json
echo "Create tar.gz"
-tar -cvzf $TEST_RESULT_DB_BACKUP $TARGET_DIR
+#tar -cvzf $TEST_RESULT_DB_BACKUP $TARGET_DIR
echo "Delete temp directory"
-rm -Rf $TARGET_DIR
+#rm -Rf $TARGET_DIR
#! /usr/bin/env python
+import json
import logging
+import urlparse
+
import argparse
+import yaml
+
import shared_utils
-import json
-import urlparse
logger = logging.getLogger('create_kibana_dashboards')
logger.setLevel(logging.DEBUG)
-file_handler = logging.FileHandler('/var/log/{}.log'.format('create_kibana_dashboards'))
+file_handler = logging.FileHandler('./{}.log'.format('create_kibana_dashboards'))
file_handler.setFormatter(logging.Formatter('%(asctime)s %(levelname)s: %(message)s'))
logger.addHandler(file_handler)
_installers = {'fuel', 'apex', 'compass', 'joid'}
-# see class VisualizationState for details on format
-_testcases = [
- ('functest', 'tempest_smoke_serial',
- [
- {
- "metrics": [
- {
- "type": "avg",
- "params": {
- "field": "details.duration"
- }
- }
- ],
- "type": "line",
- "metadata": {
- "label": "tempest_smoke_serial duration",
- "test_family": "VIM"
- }
- },
-
- {
- "metrics": [
- {
- "type": "sum",
- "params": {
- "field": "details.tests"
- }
- },
- {
- "type": "sum",
- "params": {
- "field": "details.failures"
- }
- }
- ],
- "type": "histogram",
- "metadata": {
- "label": "tempest_smoke_serial nr of tests/failures",
- "test_family": "VIM"
- }
- },
-
- {
- "metrics": [
- {
- "type": "avg",
- "params": {
- "field": "details.success_percentage"
- }
- }
- ],
- "type": "line",
- "metadata": {
- "label": "tempest_smoke_serial success percentage",
- "test_family": "VIM"
- }
- }
- ]
- ),
-
- ('functest', 'rally_sanity',
- [
- {
- "metrics": [
- {
- "type": "avg",
- "params": {
- "field": "details.duration"
- }
- }
- ],
- "type": "line",
- "metadata": {
- "label": "rally_sanity duration",
- "test_family": "VIM"
- }
- },
-
- {
- "metrics": [
- {
- "type": "avg",
- "params": {
- "field": "details.tests"
- }
- }
- ],
- "type": "histogram",
- "metadata": {
- "label": "rally_sanity nr of tests",
- "test_family": "VIM"
- }
- },
-
- {
- "metrics": [
- {
- "type": "avg",
- "params": {
- "field": "details.success_percentage"
- }
- }
- ],
- "type": "line",
- "metadata": {
- "label": "rally_sanity success percentage",
- "test_family": "VIM"
- }
- }
- ]
- ),
-
- ('functest', 'vping_ssh',
- [
- {
- "metrics": [
- {
- "type": "avg",
- "params": {
- "field": "details.duration"
- }
- }
- ],
- "type": "line",
- "metadata": {
- "label": "vPing duration",
- "test_family": "VIM"
- }
- }
- ]
- ),
-
- ('functest', 'vping_userdata',
- [
- {
- "metrics": [
- {
- "type": "avg",
- "params": {
- "field": "details.duration"
- }
- }
- ],
- "type": "line",
- "metadata": {
- "label": "vPing_userdata duration",
- "test_family": "VIM"
- }
- }
- ]
- ),
-
- ('functest', 'odl',
- [
- {
- "metrics": [
- {
- "type": "sum",
- "params": {
- "field": "details.tests"
- }
- },
- {
- "type": "sum",
- "params": {
- "field": "details.failures"
- }
- }
- ],
- "type": "histogram",
- "metadata": {
- "label": "ODL nr of tests/failures",
- "test_family": "Controller"
- }
- },
-
- {
- "metrics": [
- {
- "type": "avg",
- "params": {
- "field": "details.success_percentage"
- }
- }
- ],
- "type": "line",
- "metadata": {
- "label": "ODL success percentage",
- "test_family": "Controller"
- }
- }
- ]
- ),
-
- ('functest', 'onos',
- [
- {
- "metrics": [
- {
- "type": "avg",
- "params": {
- "field": "details.FUNCvirNet.duration"
- }
- }
- ],
- "type": "line",
- "metadata": {
- "label": "ONOS FUNCvirNet duration",
- "test_family": "Controller"
- }
- },
-
- {
- "metrics": [
- {
- "type": "sum",
- "params": {
- "field": "details.FUNCvirNet.tests"
- }
- },
- {
- "type": "sum",
- "params": {
- "field": "details.FUNCvirNet.failures"
- }
- }
- ],
- "type": "histogram",
- "metadata": {
- "label": "ONOS FUNCvirNet nr of tests/failures",
- "test_family": "Controller"
- }
- },
-
- {
- "metrics": [
- {
- "type": "avg",
- "params": {
- "field": "details.FUNCvirNetL3.duration"
- }
- }
- ],
- "type": "line",
- "metadata": {
- "label": "ONOS FUNCvirNetL3 duration",
- "test_family": "Controller"
- }
- },
-
- {
- "metrics": [
- {
- "type": "sum",
- "params": {
- "field": "details.FUNCvirNetL3.tests"
- }
- },
- {
- "type": "sum",
- "params": {
- "field": "details.FUNCvirNetL3.failures"
- }
- }
- ],
- "type": "histogram",
- "metadata": {
- "label": "ONOS FUNCvirNetL3 nr of tests/failures",
- "test_family": "Controller"
- }
- }
- ]
- ),
-
- ('functest', 'vims',
- [
- {
- "metrics": [
- {
- "type": "sum",
- "params": {
- "field": "details.sig_test.tests"
- }
- },
- {
- "type": "sum",
- "params": {
- "field": "details.sig_test.failures"
- }
- },
- {
- "type": "sum",
- "params": {
- "field": "details.sig_test.passed"
- }
- },
- {
- "type": "sum",
- "params": {
- "field": "details.sig_test.skipped"
- }
- }
- ],
- "type": "histogram",
- "metadata": {
- "label": "vIMS nr of tests/failures/passed/skipped",
- "test_family": "Features"
- }
- },
-
- {
- "metrics": [
- {
- "type": "avg",
- "params": {
- "field": "details.vIMS.duration"
- }
- },
- {
- "type": "avg",
- "params": {
- "field": "details.orchestrator.duration"
- }
- },
- {
- "type": "avg",
- "params": {
- "field": "details.sig_test.duration"
- }
- }
- ],
- "type": "histogram",
- "metadata": {
- "label": "vIMS/ochestrator/test duration",
- "test_family": "Features"
- }
- }
- ]
- ),
-
- ('promise', 'promise',
- [
- {
- "metrics": [
- {
- "type": "avg",
- "params": {
- "field": "details.duration"
- }
- }
- ],
- "type": "line",
- "metadata": {
- "label": "promise duration",
- "test_family": "Features"
- }
- },
-
- {
- "metrics": [
- {
- "type": "sum",
- "params": {
- "field": "details.tests"
- }
- },
- {
- "type": "sum",
- "params": {
- "field": "details.failures"
- }
- }
- ],
- "type": "histogram",
- "metadata": {
- "label": "promise nr of tests/failures",
- "test_family": "Features"
- }
- }
- ]
- ),
-
- ('doctor', 'doctor-notification',
- [
- {
- "metrics": [
- {
- "type": "avg",
- "params": {
- "field": "details.duration"
- }
- }
- ],
- "type": "line",
- "metadata": {
- "label": "doctor-notification duration",
- "test_family": "Features"
- }
- }
- ]
- )
-]
-
class KibanaDashboard(dict):
- def __init__(self, project_name, case_name, installer, pod, scenarios, visualization_detail):
+ def __init__(self, project_name, case_name, family, installer, pod, scenarios, visualization):
super(KibanaDashboard, self).__init__()
self.project_name = project_name
self.case_name = case_name
+ self.family = family
self.installer = installer
self.pod = pod
self.scenarios = scenarios
- self.visualization_detail = visualization_detail
+ self.visualization = visualization
self._visualization_title = None
self._kibana_visualizations = []
self._kibana_dashboard = None
self.installer,
self.pod,
scenario,
- self.visualization_detail))
+ self.visualization))
self._visualization_title = self._kibana_visualizations[0].vis_state_title
},
separators=(',', ':'))
}
- self['metadata'] = self.visualization_detail['metadata']
+
+ label = self.case_name
+ if 'label' in self.visualization:
+ label += " %s" % self.visualization.get('label')
+ label += " %s" % self.visualization.get('name')
+ self['metadata'] = {
+ "label": label,
+ "test_family": self.family
+ }
def _publish(self):
url = urlparse.urljoin(base_elastic_url, '/.kibana/dashboard/{}'.format(self.id))
class VisualizationState(dict):
- def __init__(self, input_dict):
- """
- dict structure:
- {
- "metrics":
- [
- {
- "type": type, # default sum
- "params": {
- "field": field # mandatory, no default
- },
- {metric2}
- ],
- "segments":
- [
- {
- "type": type, # default date_histogram
- "params": {
- "field": field # default start_date
- },
- {segment2}
- ],
- "type": type, # default area
- "mode": mode, # default grouped for type 'histogram', stacked for other types
- "metadata": {
- "label": "tempest_smoke_serial duration",# mandatory, no default
- "test_family": "VIM" # mandatory, no default
- }
- }
-
- default modes:
- type histogram: grouped
- type area: stacked
-
- :param input_dict:
- :return:
- """
+ def __init__(self, visualization):
super(VisualizationState, self).__init__()
- metrics = input_dict['metrics']
- segments = [] if 'segments' not in input_dict else input_dict['segments']
-
- graph_type = 'area' if 'type' not in input_dict else input_dict['type']
- self['type'] = graph_type
-
- if 'mode' not in input_dict:
- if graph_type == 'histogram':
- mode = 'grouped'
- else:
- # default
- mode = 'stacked'
+ name = visualization.get('name')
+ fields = visualization.get('fields')
+
+ if name == 'tests_failures':
+ mode = 'grouped'
+ metric_type = 'sum'
+ self['type'] = 'histogram'
else:
- mode = input_dict['mode']
+ # duration or success_percentage
+ mode = 'stacked'
+ metric_type = 'avg'
+ self['type'] = 'line'
+
self['params'] = {
"shareYAxis": True,
"addTooltip": True,
self['aggs'] = []
i = 1
- for metric in metrics:
+ for field in fields:
self['aggs'].append({
"id": str(i),
- "type": 'sum' if 'type' not in metric else metric['type'],
+ "type": metric_type,
"schema": "metric",
"params": {
- "field": metric['params']['field']
+ "field": field.get('field')
}
})
i += 1
- if len(segments) > 0:
- for segment in segments:
- self['aggs'].append({
- "id": str(i),
- "type": 'date_histogram' if 'type' not in segment else segment['type'],
- "schema": "metric",
- "params": {
- "field": "start_date" if ('params' not in segment or 'field' not in segment['params'])
- else segment['params']['field'],
- "interval": "auto",
- "customInterval": "2h",
- "min_doc_count": 1,
- "extended_bounds": {}
- }
- })
- i += 1
- else:
- self['aggs'].append({
+ self['aggs'].append({
"id": str(i),
"type": 'date_histogram',
"schema": "segment",
class KibanaVisualization(dict):
- def __init__(self, project_name, case_name, installer, pod, scenario, detail):
+ def __init__(self, project_name, case_name, installer, pod, scenario, visualization):
"""
We need two things
1. filter created from
:return:
"""
super(KibanaVisualization, self).__init__()
- vis_state = VisualizationState(detail)
+ vis_state = VisualizationState(visualization)
self.vis_state_title = vis_state['title']
self['title'] = '{} {} {} {} {} {}'.format(project_name,
case_name,
:return: list of KibanaDashboards
"""
kibana_dashboards = []
- for project_name, case_name, visualization_details in _testcases:
- for installer in _installers:
- pods_and_scenarios = _get_pods_and_scenarios(project_name, case_name, installer)
- for visualization_detail in visualization_details:
- for pod, scenarios in pods_and_scenarios.iteritems():
- kibana_dashboards.append(KibanaDashboard(project_name, case_name, installer, pod, scenarios,
- visualization_detail))
+ with open('./testcases.yaml') as f:
+ testcases_yaml = yaml.safe_load(f)
+
+ for project, case_dicts in testcases_yaml.items():
+ for case in case_dicts:
+ case_name = case.get('name')
+ visualizations = case.get('visualizations')
+ family = case.get('test_family')
+ for installer in _installers:
+ pods_and_scenarios = _get_pods_and_scenarios(project, case_name, installer)
+ for visualization in visualizations:
+ for pod, scenarios in pods_and_scenarios.iteritems():
+ kibana_dashboards.append(KibanaDashboard(project,
+ case_name,
+ family,
+ installer,
+ pod,
+ scenarios,
+ visualization))
return kibana_dashboards
if generate_inputs:
generate_js_inputs(input_file_path, kibana_url, dashboards)
-
#! /usr/bin/env python
-import logging
-import argparse
-import shared_utils
+import datetime
import json
-import urlparse
-import uuid
+import logging
import os
import subprocess
-import datetime
+import traceback
+import urlparse
+import uuid
+
+import argparse
+
+import shared_utils
logger = logging.getLogger('mongo_to_elasticsearch')
logger.setLevel(logging.DEBUG)
project = testcase['project_name']
case_name = testcase['case_name']
logger.info("Processing mongo test case '{}'".format(case_name))
- if project == 'functest':
- if case_name == 'rally_sanity':
- return modify_functest_rally(testcase)
- elif case_name.lower() == 'odl':
- return modify_functest_odl(testcase)
- elif case_name.lower() == 'onos':
- return modify_functest_onos(testcase)
- elif case_name.lower() == 'vims':
- return modify_functest_vims(testcase)
- elif case_name == 'tempest_smoke_serial':
- return modify_functest_tempest(testcase)
- return modify_default_entry(testcase)
+ try:
+ if project == 'functest':
+ if case_name == 'rally_sanity':
+ return modify_functest_rally(testcase)
+ elif case_name.lower() == 'odl':
+ return modify_functest_odl(testcase)
+ elif case_name.lower() == 'onos':
+ return modify_functest_onos(testcase)
+ elif case_name.lower() == 'vims':
+ return modify_functest_vims(testcase)
+ elif case_name == 'tempest_smoke_serial':
+ return modify_functest_tempest(testcase)
+ return modify_default_entry(testcase)
+ except Exception:
+ logger.error("Fail in modify testcase[%s]\nerror message: %s" % (testcase, traceback.format_exc()))
else:
return False
--- /dev/null
+functest:
+ -
+ name: tempest_smoke_serial
+ test_family: VIM
+ visualizations:
+ -
+ name: duration
+ fields:
+ - field: details.duration
+ -
+ name: tests_failures
+ fields:
+ - field: details.tests
+ - field: details.failures
+ -
+ name: success_percentage
+ fields:
+ - field: details.success_percentage
+ -
+ name: rally_sanity
+ test_family: VIM
+ visualizations:
+ -
+ name: duration
+ fields:
+ - field: details.duration
+ -
+ name: tests_failures
+ fields:
+ - field: details.tests
+ -
+ name: success_percentage
+ fields:
+ - field: details.success_percentage
+ -
+ name: vping_ssh
+ test_family: VIM
+ visualizations:
+ -
+ name: duration
+ fields:
+ - field: details.duration
+ -
+ name: vping_userdata
+ test_family: VIM
+ visualizations:
+ -
+ name: duration
+ fields:
+ - field: details.duration
+ -
+ name: odl
+ test_family: Controller
+ visualizations:
+ -
+ name: tests_failures
+ fields:
+ - field: details.tests
+ - field: details.failures
+ -
+ name: success_percentage
+ fields:
+ - field: details.success_percentage
+ -
+ name: onos
+ test_family: Controller
+ visualizations:
+ -
+ name: duration
+ label: FUNCvirNet
+ fields:
+ - field: details.FUNCvirNet.duration
+ -
+ name: duration
+ label: FUNCvirNetL3
+ fields:
+ - field: details.FUNCvirNetL3.duration
+ -
+ name: tests_failures
+ label: FUNCvirNet
+ fields:
+ - field: details.FUNCvirNet.tests
+ - field: details.FUNCvirNet.failures
+ -
+ name: tests_failures
+ label: FUNCvirNetL3
+ fields:
+ - field: details.FUNCvirNetL3.tests
+ - field: details.FUNCvirNetL3.failures
+ -
+ name: vims
+ test_family: Features
+ visualizations:
+ -
+ name: duration
+ fields:
+ - field: details.vIMS.duration
+ - field: details.orchestrator.duration
+ - field: details.sig_test.duration
+ -
+ name: tests_failures
+ fields:
+ - field: details.sig_test.tests
+ - field: details.sig_test.failures
+ - field: details.sig_test.passed
+ - field: details.sig_test.skipped
+promise:
+ -
+ name: promise
+ test_family: Features
+ visualizations:
+ -
+ name: duration
+ fields:
+ - field: details.duration
+ -
+ name: tests_failures
+ fields:
+ - field: details.tests
+ - field: details.failures
+doctor:
+ -
+ name: doctor-notification
+ test_family: Features
+ visualizations:
+ -
+ name: duration
+ fields:
+ - field: details.duration