- job-template:
name: 'apex-verify-{stream}'
- node: 'apex-virtual-master'
-
concurrent: true
disabled: '{obj:disabled}'
project-type: 'multijob'
parameters:
+ - '{project}-virtual-{stream}-defaults'
- apex-parameter:
gs-pathname: '{gs-pathname}/dev'
- project-parameter:
GERRIT_REFSPEC=$GERRIT_REFSPEC
GERRIT_CHANGE_NUMBER=$GERRIT_CHANGE_NUMBER
GERRIT_CHANGE_COMMIT_MESSAGE=$GERRIT_CHANGE_COMMIT_MESSAGE
- node-parameters: false
+ node-parameters: true
kill-phase-on: FAILURE
abort-all-job: true
git-revision: true
- job-template:
name: 'apex-verify-gate-{stream}'
- node: 'apex-build-{stream}'
-
concurrent: true
disabled: '{obj:disabled}'
project-type: 'multijob'
parameters:
+ - '{project}-virtual-{stream}-defaults'
- apex-parameter:
gs-pathname: '{gs-pathname}/dev'
- project-parameter:
- job-template:
name: 'apex-deploy-{platform}-{stream}'
- node: 'apex-{platform}-{stream}'
-
concurrent: true
disabled: false
- job-template:
name: 'apex-virtual-{stream}'
- node: 'apex-virtual-master'
-
project-type: 'multijob'
disabled: false
# branch: branch (eg. stable)
project-type: 'multijob'
- node: '{baremetal-slave}'
-
disabled: '{obj:disable_daily}'
scm:
- job-template:
name: 'apex-deploy-{platform}-{stream}'
- node: 'apex-{platform}-{stream}'
-
concurrent: true
disabled: false
- job-template:
name: 'apex-virtual-{stream}'
- node: 'apex-virtual-master'
-
project-type: 'multijob'
disabled: false
# branch: branch (eg. stable)
project-type: 'multijob'
- node: '{baremetal-slave}'
-
disabled: '{obj:disable_daily}'
scm:
use-build-blocker: true
blocking-jobs:
- 'compass-os-.*?-{pod}-daily-.*?'
+ - 'compass-k8-.*?-{pod}-daily-.*?'
- 'compass-os-.*?-baremetal-daily-.*?'
+ - 'compass-k8-.*?-baremetal-daily-.*?'
- 'compass-verify-[^-]*-[^-]*'
block-level: 'NODE'
blocking-jobs:
- 'compass-verify-[^-]*-[^-]*'
- 'compass-os-.*?-virtual-daily-.*?'
+ - 'compass-k8-.*?-virtual-daily-.*?'
block-level: 'NODE'
wrappers:
installer_mac=$(sudo virsh domiflist daisy | grep vnet | \
grep -Eo "[0-9a-f]+:[0-9a-f]+:[0-9a-f]+:[0-9a-f]+:[0-9a-f]+:[0-9a-f]+")
- export INSTALLER_IP=$(/usr/sbin/arp -e -i $bridge_name | grep ${installer_mac} | awk {'print $1'})
+ export INSTALLER_IP=$(/usr/sbin/arp -e -i $bridge_name | grep ${installer_mac} | head -n 1 | awk {'print $1'})
echo "Installer ip is ${INSTALLER_IP}"
else
if [[ -s violation.log ]]; then
echo "Reporting lint result..."
msg="Found syntax error and/or coding style violation(s) in the files modified by your patchset."
- sed -i -e '1s/^//$msg\n\n/' violation.log
+ sed -i -e "1s#^#${msg}\n\n#" violation.log
cmd="gerrit review -p $GERRIT_PROJECT -m \"$(cat violation.log)\" $GERRIT_PATCHSET_REVISION --notify NONE"
ssh -p 29418 gerrit.opnfv.org "$cmd"
builders:
- shell: |
#!/bin/bash
- # Install python package
+ # Install python package
sudo pip install "flake8==2.6.2"
echo "Checking python code..."
name: SSH_KEY
default: /root/.ssh/id_rsa
description: 'SSH key to use for Apex'
- - node:
- name: SLAVE_NAME
- description: 'Slave name on Jenkins'
- allowed-slaves:
- - lf-pod1
- default-slaves:
- - lf-pod1
- parameter:
name: 'apex-baremetal-euphrates-defaults'
name: SSH_KEY
default: /root/.ssh/id_rsa
description: 'SSH key to use for Apex'
- - node:
- name: SLAVE_NAME
- description: 'Slave name on Jenkins'
- allowed-slaves:
- - lf-pod1
- default-slaves:
- - lf-pod1
- parameter:
name: 'apex-baremetal-danube-defaults'
name: SSH_KEY
default: /root/.ssh/id_rsa
description: 'SSH key to use for Apex'
- - node:
- name: SLAVE_NAME
- description: 'Slave name on Jenkins'
- allowed-slaves:
- - lf-pod1
- default-slaves:
- - lf-pod1
+
- parameter:
name: 'apex-virtual-master-defaults'
name: SSH_KEY
default: /root/.ssh/id_rsa
description: 'SSH key to use for Apex'
- - node:
- name: SLAVE_NAME
- description: 'Slave name on Jenkins'
- allowed-slaves:
- - lf-virtual2
- - lf-virtual3
- default-slaves:
- - lf-virtual2
- - lf-virtual3
- parameter:
name: 'apex-virtual-euphrates-defaults'
name: SSH_KEY
default: /root/.ssh/id_rsa
description: 'SSH key to use for Apex'
- - node:
- name: SLAVE_NAME
- description: 'Slave name on Jenkins'
- allowed-slaves:
- - lf-virtual2
- - lf-virtual3
- default-slaves:
- - lf-virtual2
- - lf-virtual3
- parameter:
name: 'apex-virtual-danube-defaults'
name: SSH_KEY
default: /root/.ssh/id_rsa
description: 'SSH key to use for Apex'
- - node:
- name: SLAVE_NAME
- description: 'Slave name on Jenkins'
- allowed-slaves:
- - lf-pod3
- default-slaves:
- - lf-pod3
- parameter:
name: 'lf-pod1-defaults'
- 'prune-docker-images'
- 'archive-repositories'
- 'check-status-of-slaves'
+ - 'ansible-build-server'
########################
# job templates
name: SLAVE_NAME
description: Slaves to prune docker images
default-slaves:
- - arm-build2
+ - arm-build3
+ - arm-build4
+ - arm-build5
+ - arm-build6
- ericsson-build3
- ericsson-build4
- lf-build2
builders:
- description-setter:
description: "Built on $NODE_NAME"
+ # yamllint disable rule:line-length
- shell: |
#!/bin/bash
-
(docker ps -q; docker ps -aq) | sort | uniq -u | xargs --no-run-if-empty docker rm
docker images -f dangling=true -q | xargs --no-run-if-empty docker rmi
+
+ # yamllint enable rule:line-length
triggers:
- timed: '@midnight'
parameters:
- node:
name: SLAVE_NAME
- description: We don't want workspace wiped. so I just threw the script on the master
+ description: 'script lives on master node'
default-slaves:
- master
allowed-multiselect: false
- shell: |
cd /opt/jenkins-ci/slavemonitor
bash slave-monitor-0.1.sh | sort
+
+- job-template:
+ name: 'ansible-build-server'
+
+ project-type: freestyle
+
+ disabled: false
+ concurrent: true
+
+ parameters:
+ - node:
+ name: SLAVE_NAME
+ description: Build Servers
+ default-slaves:
+ - lf-build1
+ - lf-build2
+ allowed-multiselect: true
+ ignore-offline-nodes: true
+ - project-parameter:
+ project: releng
+ branch: master
+
+ scm:
+ - git-scm
+
+ triggers:
+ - timed: '@midnight'
+
+ builders:
+ - install-ansible
+ - run-ansible-build-server-playbook
+
+
+- builder:
+ name: install-ansible
+ builders:
+ - shell: |
+ # Install ansible here
+ if [ -f /etc/centos-release ] \
+ || [ -f /etc/redhat-release ] \
+ || [ -f /etc/system-release ]; then
+ sudo yum -y install ansible
+ fi
+ if [ -f /etc/debian_version ] \
+ || grep -qi ubuntu /etc/lsb-release \
+ || grep -qi ubuntu /etc/os-release; then
+ sudo apt-get -y install ansible
+ fi
+
+- builder:
+ name: run-ansible-build-server-playbook
+ builders:
+ - shell: |
+ # run playbook
+ sudo ansible-playbook -C -D -i \
+ $WORKSPACE/utils/build-server-ansible/inventory.ini \
+ $WORKSPACE/utils/build-server-ansible/main.yml
# run basic sanity test
make sanity
cd ../ci
- scl enable python33 "source ~/vsperfenv/bin/activate ; ./build-vsperf.sh daily"
+ scl enable rh-python34 "source ~/vsperfenv/bin/activate ; ./build-vsperf.sh daily"
- job-template:
name: 'vswitchperf-verify-{stream}'
cd $WORKSPACE/releng-xci
cat > bifrost_test.sh<<EOF
+#!/bin/bash
+set -o errexit
+set -o nounset
+set -o pipefail
+
cd ~/bifrost
# provision 3 VMs; xcimaster, controller, and compute
./scripts/bifrost-provision.sh
notbuilt: false
- centos:
disabled: false
- successful: false
- failed: false
- unstable: false
- notbuilt: false
+ successful: true
+ failed: true
+ unstable: true
+ notbuilt: true
- opensuse:
disabled: false
successful: false