Merge "[Dovetail] process name configuration for HA test cases on Fuel"
authormei mei <meimei@huawei.com>
Wed, 18 Jul 2018 06:31:02 +0000 (06:31 +0000)
committerGerrit Code Review <gerrit@opnfv.org>
Wed, 18 Jul 2018 06:31:02 +0000 (06:31 +0000)
20 files changed:
jjb/3rd_party_ci/detect-snapshot.sh [new file with mode: 0755]
jjb/3rd_party_ci/install-netvirt.sh
jjb/3rd_party_ci/odl-netvirt.yaml
jjb/apex/apex-snapshot-deploy.sh
jjb/apex/apex-verify-jobs.yaml
jjb/apex/apex.yaml
jjb/apex/apex.yaml.j2
jjb/apex/scenarios.yaml.hidden
jjb/auto/auto.yaml
jjb/compass4nfv/compass-verify-jobs.yaml
jjb/cperf/cperf-ci-jobs.yaml
jjb/cperf/cperf-prepare-robot.sh [new file with mode: 0755]
jjb/cperf/cperf-robot-netvirt-csit.sh [new file with mode: 0755]
jjb/cperf/parse-node-yaml.py [new file with mode: 0644]
jjb/dovetail-webportal/dovetail-webportal-project-jobs.yaml [new file with mode: 0644]
jjb/functest/functest-alpine.sh
jjb/functest/functest-daily-jobs.yaml
jjb/releng/opnfv-docker.sh
jjb/releng/opnfv-docker.yaml
jjb/releng/opnfv-utils.yaml

diff --git a/jjb/3rd_party_ci/detect-snapshot.sh b/jjb/3rd_party_ci/detect-snapshot.sh
new file mode 100755 (executable)
index 0000000..4949cb2
--- /dev/null
@@ -0,0 +1,31 @@
+#!/bin/bash
+set -o errexit
+set -o nounset
+set -o pipefail
+
+echo "Detecting requested OpenStack branch and topology type in gerrit comment"
+parsed_comment=$(echo $GERRIT_EVENT_COMMENT_TEXT | sed -n 's/^.*check-opnfv\s*//p')
+parsed_comment=$(echo $parsed_comment | sed -n 's/\s*$//p')
+if [ ! -z "$parsed_comment" ]; then
+  if echo $parsed_comment | grep -E '^[a-z]+-(no)?ha'; then
+    IFS='-' read -r -a array <<< "$parsed_comment"
+    os_version=${array[0]}
+    topo=${array[1]}
+    echo "OS version detected in gerrit comment: ${os_version}"
+    echo "Topology type detected in gerrit comment: ${topo}"
+  else
+    echo "Invalid format given for scenario in gerrit comment: ${parsed_comment}...aborting"
+    exit 1
+  fi
+else
+  echo "No scenario given in gerrit comment, will use default (master OpenStack, noha)"
+  os_version='master'
+  topo='noha'
+fi
+
+echo "Writing variables to file"
+cat > detected_snapshot << EOI
+OS_VERSION=$os_version
+TOPOLOGY=$topo
+SNAP_CACHE=$HOME/snap_cache/$os_version/$topo
+EOI
index 07bbe77..232d60e 100755 (executable)
@@ -3,7 +3,7 @@ set -o errexit
 set -o nounset
 set -o pipefail
 
-SNAP_CACHE=$HOME/snap_cache
+SNAP_CACHE=$HOME/snap_cache/$OS_VERSION/$TOPOLOGY
 # clone opnfv sdnvpn repo
 git clone https://gerrit.opnfv.org/gerrit/p/sdnvpn.git $WORKSPACE/sdnvpn
 
index 3a46e79..62e4ca1 100644 (file)
@@ -92,7 +92,7 @@
             #     comment-contains-value: 'https://jenkins.opendaylight.org/releng/job/netvirt-patch-test-current-carbon/.*?/ : UNSTABLE'
             # yamllint enable rule:line-length
             - comment-added-contains-event:
-                comment-contains-value: 'opnfv-test'
+                comment-contains-value: 'check-opnfv'
           projects:
             - project-compare-type: 'ANT'
               project-pattern: '{project}'
     builders:
       - description-setter:
           description: "Built on $NODE_NAME"
+      - detect-opnfv-snapshot
+      - inject:
+          properties-file: detected_snapshot
       - multijob:
           name: create-apex-vms
           condition: SUCCESSFUL
                 GERRIT_PATCHSET_REVISION=$GERRIT_PATCHSET_REVISION
                 NETVIRT_ARTIFACT=$NETVIRT_ARTIFACT
                 APEX_ENV_NUMBER=$APEX_ENV_NUMBER
+                GERRIT_EVENT_COMMENT_TEXT=$GERRIT_EVENT_COMMENT_TEXT
+                TOPOLOGY=$TOPOLOGY
+                OS_VERSION=$OS_VERSION
               node-parameters: true
               kill-phase-on: FAILURE
               abort-all-job: true
             - name: 'odl-netvirt-verify-virtual-install-netvirt-{stream}'
               current-parameters: false
               predefined-parameters: |
-                ODL_BRANCH={branch}
+                ODL_BRANCH=$BRANCH
                 BRANCH=$BRANCH
                 GERRIT_REFSPEC=$GERRIT_REFSPEC
                 GERRIT_CHANGE_NUMBER=$GERRIT_CHANGE_NUMBER
                 GERRIT_PATCHSET_NUMBER=$GERRIT_PATCHSET_NUMBER
                 GERRIT_PATCHSET_REVISION=$GERRIT_PATCHSET_REVISION
                 NETVIRT_ARTIFACT=$NETVIRT_ARTIFACT
+                TOPOLOGY=$TOPOLOGY
+                OS_VERSION=$OS_VERSION
               node-parameters: true
               kill-phase-on: FAILURE
               abort-all-job: true
           projects:
             - name: 'functest-netvirt-virtual-suite-master'
               predefined-parameters: |
-                DEPLOY_SCENARIO=os-odl-nofeature-ha
+                DEPLOY_SCENARIO=os-odl-nofeature-$TOPOLOGY
                 FUNCTEST_MODE=testcase
-                FUNCTEST_SUITE_NAME=tempest_smoke_serial
+                FUNCTEST_SUITE_NAME=tempest_smoke
                 RC_FILE_PATH=$HOME/cloner-info/overcloudrc
               node-parameters: true
               kill-phase-on: FAILURE
               abort-all-job: false
+      - multijob:
+          name: csit
+          condition: ALWAYS
+          projects:
+            - name: cperf-apex-csit-{stream}
+              predefined-parameters: |
+                ODL_BRANCH=$BRANCH
+                RC_FILE_PATH=$SNAP_CACHE/overcloudrc
+                NODE_FILE_PATH=$SNAP_CACHE/node.yaml
+                SSH_KEY_PATH=$SNAP_CACHE/id_rsa
+                ODL_CONTAINERIZED=false
+                OS_VERSION=$OS_VERSION
+              node-parameters: true
+              kill-phase-on: NEVER
+              abort-all-job: false
       - multijob:
           name: postprocess
           condition: ALWAYS
     builders:
       - shell:
           !include-raw: ./postprocess-netvirt.sh
+
+- builder:
+    name: 'detect-opnfv-snapshot'
+    builders:
+      - shell:
+          !include-raw: ./detect-snapshot.sh
index 0760626..9738ecb 100644 (file)
@@ -25,9 +25,11 @@ pushd ci > /dev/null
 sudo opnfv-clean
 popd > /dev/null
 
+full_snap_url=http://$GS_URL/${OS_VERSION}/${TOPOLOGY}
+
 echo "Downloading latest snapshot properties file"
-if ! wget -O $WORKSPACE/opnfv.properties http://$GS_URL/snapshot.properties; then
-  echo "ERROR: Unable to find snapshot.properties at ${GS_URL}...exiting"
+if ! wget -O $WORKSPACE/opnfv.properties ${full_snap_url}/snapshot.properties; then
+  echo "ERROR: Unable to find snapshot.properties at ${full_snap_url}...exiting"
   exit 1
 fi
 
@@ -39,6 +41,7 @@ if [ -z "$latest_snap_checksum" ]; then
 fi
 
 local_snap_checksum=""
+SNAP_CACHE=${SNAP_CACHE}/${OS_VERSION}/${TOPOLOGY}
 
 # check snap cache directory exists
 # if snapshot cache exists, find the checksum
index 81e59bd..819e17b 100644 (file)
                 GERRIT_REFSPEC=$GERRIT_REFSPEC
                 GERRIT_CHANGE_NUMBER=$GERRIT_CHANGE_NUMBER
                 GERRIT_CHANGE_COMMIT_MESSAGE=$GERRIT_CHANGE_COMMIT_MESSAGE
-                FUNCTEST_SUITE_NAME=tempest_smoke_serial
+                FUNCTEST_SUITE_NAME=tempest_smoke
                 FUNCTEST_MODE=testcase
               node-parameters: false
               kill-phase-on: FAILURE
               current-parameters: false
               predefined-parameters: |
                 DEPLOY_SCENARIO=$DEPLOY_SCENARIO
-                FUNCTEST_SUITE_NAME=tempest_smoke_serial
+                FUNCTEST_SUITE_NAME=tempest_smoke
                 FUNCTEST_MODE=testcase
                 REPO=$REPO
                 GERRIT_BRANCH=$GERRIT_BRANCH
index e24a2c3..720d5bd 100644 (file)
@@ -8,7 +8,7 @@
       - 'apex-virtual-{stream}'
       - 'apex-deploy-{platform}-{stream}'
       - 'apex-daily-{stream}'
-      - 'apex-csit-promote-daily-{stream}-os-{os_version}'
+      - 'apex-csit-promote-daily-{stream}-os-{os_version}-{topology}'
       - 'apex-fdio-promote-daily-{stream}'
       - 'apex-{scenario}-baremetal-{scenario_stream}'
       - 'apex-testsuite-{scenario}-baremetal-{scenario_stream}'
@@ -37,7 +37,7 @@
           baremetal-slave: 'apex-baremetal-master'
           verify-scenario: 'os-nosdn-nofeature-ha'
           scenario_stream: 'fraser'
-          disable_daily: false
+          disable_daily: true
           disable_promote: true
       - euphrates: &euphrates
           branch: 'stable/euphrates'
           <<: *master
       - 'os-odl-queens-ha':
           <<: *master
+      - 'k8s-nosdn-nofeature-noha':
+          <<: *master
       - 'os-nosdn-nofeature-noha':
           <<: *euphrates
       - 'os-nosdn-nofeature-ha':
       - 'master':
           os_scenario: 'nofeature'
 
+    topology:
+      - 'noha'
+      - 'ha'
 
 # Fetch Logs Job
 - job-template:
 
 # CSIT promote
 - job-template:
-    name: 'apex-csit-promote-daily-{stream}-os-{os_version}'
+    name: 'apex-csit-promote-daily-{stream}-os-{os_version}-{topology}'
 
     # Job template for promoting CSIT Snapshots
     #
           name: PROMOTE
           default: 'True'
           description: "Used for overriding the PROMOTE"
+      - string:
+          name: GS_URL
+          default: $GS_BASE{gs-pathname}/{os_version}/{topology}
+          description: "User for overriding GS_URL from apex params"
 
     properties:
       - build-blocker:
             - 'apex-verify.*'
             - 'apex-runner.*'
             - 'apex-daily.*'
+            - 'apex-csit-promote.*'
+      - throttle:
+          max-per-node: 1
+          max-total: 10
+          option: 'project'
 
     triggers:
       - timed: '0 12 * * 0'
             - name: 'apex-virtual-{stream}'
               current-parameters: true
               predefined-parameters: |
-                DEPLOY_SCENARIO=os-odl-{os_scenario}-noha
+                DEPLOY_SCENARIO=os-odl-{os_scenario}-{topology}
                 OPNFV_CLEAN=yes
                 GERRIT_BRANCH=$GERRIT_BRANCH
                 GERRIT_REFSPEC=$GERRIT_REFSPEC
                 GERRIT_CHANGE_NUMBER=$GERRIT_CHANGE_NUMBER
                 GERRIT_CHANGE_COMMIT_MESSAGE=$GERRIT_CHANGE_COMMIT_MESSAGE
-                FUNCTEST_SUITE_NAME=tempest_smoke_serial
+                FUNCTEST_SUITE_NAME=tempest_smoke
                 FUNCTEST_MODE=testcase
               node-parameters: true
               kill-phase-on: FAILURE
               kill-phase-on: NEVER
               abort-all-job: true
               git-revision: false
+            - name: 'apex-k8s-nosdn-nofeature-noha-baremetal-master'
+              node-parameters: false
+              current-parameters: false
+              predefined-parameters: |
+                OPNFV_CLEAN=yes
+              kill-phase-on: NEVER
+              abort-all-job: true
+              git-revision: false
 
 # euphrates Builder
 - builder:
index 1f5232b..ffe8c57 100644 (file)
@@ -8,7 +8,7 @@
       - 'apex-virtual-{stream}'
       - 'apex-deploy-{platform}-{stream}'
       - 'apex-daily-{stream}'
-      - 'apex-csit-promote-daily-{stream}-os-{os_version}'
+      - 'apex-csit-promote-daily-{stream}-os-{os_version}-{topology}'
       - 'apex-fdio-promote-daily-{stream}'
       - 'apex-{scenario}-baremetal-{scenario_stream}'
       - 'apex-testsuite-{scenario}-baremetal-{scenario_stream}'
@@ -37,7 +37,7 @@
           baremetal-slave: 'apex-baremetal-master'
           verify-scenario: 'os-nosdn-nofeature-ha'
           scenario_stream: 'fraser'
-          disable_daily: false
+          disable_daily: true
           disable_promote: true
       - euphrates: &euphrates
           branch: 'stable/euphrates'
@@ -81,6 +81,9 @@
       - 'master':
           os_scenario: 'nofeature'
 
+    topology:
+      - 'noha'
+      - 'ha'
 
 # Fetch Logs Job
 - job-template:
 
 # CSIT promote
 - job-template:
-    name: 'apex-csit-promote-daily-{stream}-os-{os_version}'
+    name: 'apex-csit-promote-daily-{stream}-os-{os_version}-{topology}'
 
     # Job template for promoting CSIT Snapshots
     #
           name: PROMOTE
           default: 'True'
           description: "Used for overriding the PROMOTE"
+      - string:
+          name: GS_URL
+          default: $GS_BASE{gs-pathname}/{os_version}/{topology}
+          description: "User for overriding GS_URL from apex params"
 
     properties:
       - build-blocker:
             - 'apex-verify.*'
             - 'apex-runner.*'
             - 'apex-daily.*'
+            - 'apex-csit-promote.*'
+      - throttle:
+          max-per-node: 1
+          max-total: 10
+          option: 'project'
 
     triggers:
       - timed: '0 12 * * 0'
             - name: 'apex-virtual-{stream}'
               current-parameters: true
               predefined-parameters: |
-                DEPLOY_SCENARIO=os-odl-{os_scenario}-noha
+                DEPLOY_SCENARIO=os-odl-{os_scenario}-{topology}
                 OPNFV_CLEAN=yes
                 GERRIT_BRANCH=$GERRIT_BRANCH
                 GERRIT_REFSPEC=$GERRIT_REFSPEC
                 GERRIT_CHANGE_NUMBER=$GERRIT_CHANGE_NUMBER
                 GERRIT_CHANGE_COMMIT_MESSAGE=$GERRIT_CHANGE_COMMIT_MESSAGE
-                FUNCTEST_SUITE_NAME=tempest_smoke_serial
+                FUNCTEST_SUITE_NAME=tempest_smoke
                 FUNCTEST_MODE=testcase
               node-parameters: true
               kill-phase-on: FAILURE
index 71a6c3d..3474fdb 100644 (file)
@@ -7,6 +7,7 @@ master:
   - 'os-nosdn-queens-ha'
   - 'os-odl-queens-noha'
   - 'os-odl-queens-ha'
+  - 'k8s-nosdn-nofeature-noha'
 fraser:
   - 'os-nosdn-nofeature-noha'
   - 'os-nosdn-nofeature-ha'
index 0cfe88f..ee72082 100644 (file)
 - trigger:
     name: 'fuel-os-nosdn-onap-ha-auto-baremetal-master-trigger'
     triggers:
-      - timed: '0 1 * * *'
+      - timed: ''
 # ---------------------------------------------------------------------
 # Auto CI Baremetal Triggers running against fraser branch
 # ---------------------------------------------------------------------
index c357ff6..beb857d 100644 (file)
               node-parameters: true
               kill-phase-on: NEVER
               abort-all-job: true
-            - name: 'functest-compass-virtual-suite-{stream}'
-              current-parameters: false
-              predefined-parameters: |
-                FUNCTEST_MODE=testcase
-                FUNCTEST_SUITE_NAME=vping_ssh
-                DEPLOY_SCENARIO=os-nosdn-nofeature-ha
-              node-parameters: true
-              kill-phase-on: NEVER
-              abort-all-job: true
 
 - job-template:
     name: 'compass-verify-k8-{distro}-{stream}'
index fdd3509..59afb89 100644 (file)
@@ -9,47 +9,29 @@
     # -------------------------------
     # BRANCH ANCHORS
     # -------------------------------
-    master: &master
-      stream: master
-      branch: '{stream}'
-      gs-pathname: ''
-      docker-tag: 'latest'
-    danube: &danube
-      stream: danube
-      branch: 'stable/{stream}'
-      gs-pathname: '/{stream}'
-      docker-tag: 'stable'
+    stream: master
+    branch: '{stream}'
+    gs-pathname: ''
+    docker-tag: 'latest'
 
-    # -------------------------------
-    # POD, INSTALLER, AND BRANCH MAPPING
-    # -------------------------------
-    pod:
-      # -------------------------------
-      #        master
-      # -------------------------------
-      - intel-pod2:
-          installer: apex
-          <<: *master
-      - intel-pod2:
-          installer: apex
-          <<: *danube
+    installer: apex
 
     testsuite:
-      - 'daily'
+      - csit
+      - cbench
 
     jobs:
-      - 'cperf-{installer}-{pod}-{testsuite}-{stream}'
+      - 'cperf-{installer}-{testsuite}-{stream}'
 
 ################################
 # job template
 ################################
 - job-template:
-    name: 'cperf-{installer}-{pod}-{testsuite}-{stream}'
+    name: 'cperf-{installer}-{testsuite}-{stream}'
 
     concurrent: true
 
     properties:
-      - logrotate-default
       - throttle:
           enabled: true
           max-per-node: 1
 
     wrappers:
       - build-name:
-          name: '$BUILD_NUMBER Suite: $CPERF_SUITE_NAME Scenario: $DEPLOY_SCENARIO'
+          name: '$BUILD_NUMBER Suite: $CPERF_SUITE_NAME ODL BRANCH: $ODL_BRANCH'
       - timeout:
           timeout: 400
           abort: true
 
     parameters:
-      - project-parameter:
-          project: '{project}'
-          branch: '{branch}'
-      - '{pod}-defaults'
-      - '{installer}-defaults'
       - cperf-parameter:
           testsuite: '{testsuite}'
           gs-pathname: '{gs-pathname}'
           docker-tag: '{docker-tag}'
-
-    scm:
-      - git-scm
+          stream: '{stream}'
 
     builders:
       - 'cperf-{testsuite}-builder'
           name: CPERF_SUITE_NAME
           default: '{testsuite}'
           description: "Suite name to run"
+      - string:
+          name: ODL_BRANCH
+          default: 'master'
+          description: "Branch that OpenDaylight is running"
+      - string:
+          name: OS_VERSION
+          default: 'master'
+          description: "OpenStack version (short name, no stable/ prefix)"
       - string:
           name: GS_PATHNAME
           default: '{gs-pathname}'
           name: DOCKER_TAG
           default: '{docker-tag}'
           description: 'Tag to pull docker image'
+      - string:
+          name: RC_FILE_PATH
+          default: ''
+          description: "Path to the OS credentials file if given"
+      - string:
+          name: SSH_KEY_PATH
+          default: ''
+          description: "Path to the private SSH key to access OPNFV nodes"
+      - string:
+          name: NODE_FILE_PATH
+          default: ''
+          description: "Path to the yaml file describing overcloud nodes"
+      - string:
+          name: ODL_CONTAINERIZED
+          default: 'true'
+          description: "boolean set true if ODL on overcloud is a container"
 
 ########################
 # trigger macros
 # builder macros
 ########################
 - builder:
-    name: cperf-daily-builder
+    name: cperf-csit-builder
+    builders:
+      - 'cperf-cleanup'
+      - 'cperf-prepare-robot'
+      - 'cperf-robot-netvirt-csit'
+
+- builder:
+    name: cperf-cbench-builder
     builders:
       - 'cperf-cleanup'
+      - 'cperf-prepare-robot'
       - 'cperf-robot-cbench'
 
+- builder:
+    name: cperf-prepare-robot
+    builders:
+      - shell:
+          !include-raw: ./cperf-prepare-robot.sh
+
 - builder:
     name: cperf-robot-cbench
     builders:
           set -o errexit
           set -o nounset
           set -o pipefail
-          undercloud_mac=$(sudo virsh domiflist undercloud | grep default | \
-                            grep -Eo "[0-9a-f]+:[0-9a-f]+:[0-9a-f]+:[0-9a-f]+:[0-9a-f]+:[0-9a-f]+")
-          INSTALLER_IP=$(/usr/sbin/arp -e | grep ${undercloud_mac} | awk {'print $1'})
-
-          sudo scp -o StrictHostKeyChecking=no root@$INSTALLER_IP:/home/stack/overcloudrc /tmp/overcloudrc
-          sudo chmod 755 /tmp/overcloudrc
-          source /tmp/overcloudrc
-
-          # robot suites need the ssh key to log in to controller nodes, so throwing it
-          # in tmp, and mounting /tmp as $HOME as far as robot is concerned
-          sudo rm -rf /tmp/.ssh
-          sudo mkdir /tmp/.ssh
-          sudo chmod 0700 /tmp/.ssh
-          sudo scp -o StrictHostKeyChecking=no root@$INSTALLER_IP:/home/stack/.ssh/id_rsa /tmp/.ssh/
-          sudo chown -R jenkins-ci:jenkins-ci /tmp/.ssh
-          # done with sudo. jenkins-ci is the user from this point
-          chmod 0600 /tmp/.ssh/id_rsa
 
           # cbench requires the openflow drop test feature to be installed.
           sshpass -p karaf ssh -o StrictHostKeyChecking=no \
                                -p 8101 karaf@$SDN_CONTROLLER_IP \
                                 feature:install odl-openflowplugin-flow-services-ui odl-openflowplugin-drop-test
 
-          docker pull opnfv/cperf:$DOCKER_TAG
-
           robot_cmd="pybot -e exclude -L TRACE -d /tmp \
                       -v ODL_SYSTEM_1_IP:${SDN_CONTROLLER_IP} \
                       -v ODL_SYSTEM_IP:${SDN_CONTROLLER_IP} \
 
           docker run -i -v /tmp:/tmp opnfv/cperf:$DOCKER_TAG ${robot_cmd} ${robot_suite}
 
+- builder:
+    name: cperf-robot-netvirt-csit
+    builders:
+      - shell:
+          !include-raw: ./cperf-robot-netvirt-csit.sh
+
 - builder:
     name: cperf-cleanup
     builders:
diff --git a/jjb/cperf/cperf-prepare-robot.sh b/jjb/cperf/cperf-prepare-robot.sh
new file mode 100755 (executable)
index 0000000..d88c6d5
--- /dev/null
@@ -0,0 +1,32 @@
+#!/usr/bin/env bash
+
+set -o errexit
+set -o nounset
+set -o pipefail
+
+if [ -z ${RC_FILE_PATH+x} ]; then
+  undercloud_mac=$(sudo virsh domiflist undercloud | grep default | \
+                   grep -Eo "[0-9a-f]+:[0-9a-f]+:[0-9a-f]+:[0-9a-f]+:[0-9a-f]+:[0-9a-f]+")
+  INSTALLER_IP=$(/usr/sbin/arp -e | grep ${undercloud_mac} | awk {'print $1'})
+  sudo scp -o StrictHostKeyChecking=no root@$INSTALLER_IP:/home/stack/overcloudrc /tmp/overcloudrc
+else
+  cp -f $RC_FILE_PATH ${WORKSPACE}/overcloudrc
+fi
+
+sudo chmod 755 ${WORKSPACE}/overcloudrc
+source ${WORKSPACE}/overcloudrc
+
+# copy ssh key for robot
+
+if [ -z ${SSH_KEY_PATH+x} ]; then
+  sudo scp -o StrictHostKeyChecking=no root@$INSTALLER_IP:/home/stack/.ssh/id_rsa ${WORKSPACE}/
+  sudo chown -R jenkins-ci:jenkins-ci ${WORKSPACE}/
+  # done with sudo. jenkins-ci is the user from this point
+  chmod 0600 ${WORKSPACE}/id_rsa
+else
+  cp -f ${SSH_KEY_PATH} ${WORKSPACE}/
+fi
+
+docker pull opnfv/cperf:$DOCKER_TAG
+
+sudo mkdir -p /tmp/robot_results
diff --git a/jjb/cperf/cperf-robot-netvirt-csit.sh b/jjb/cperf/cperf-robot-netvirt-csit.sh
new file mode 100755 (executable)
index 0000000..3ef7471
--- /dev/null
@@ -0,0 +1,105 @@
+#!/usr/bin/env bash
+
+set -o errexit
+set -o nounset
+set -o pipefail
+
+source ${WORKSPACE}/overcloudrc
+# note SDN_CONTROLLER_IP is set in overcloudrc, which is the VIP
+# for admin/public network (since we are running single network deployment)
+
+if [ "$OS_VERSION" == 'master' ]; then
+  FULL_OS_VER='master'
+else
+  FULL_OS_VER="stable/${OS_VERSION}"
+fi
+
+if [ "$ODL_BRANCH" == 'master' ]; then
+  ODL_STREAM='flourine'
+else
+  ODL_STREAM=${ODL_BRANCH}
+fi
+
+NUM_CONTROL_NODES=$(python ./parse-node-yaml.py num_nodes --file $NODE_FILE_PATH)
+NUM_COMPUTE_NODES=$(python ./parse-node-yaml.py num_nodes --node-type compute --file $NODE_FILE_PATH)
+
+idx=1
+EXTRA_ROBOT_ARGS=""
+for idx in `seq 1 $NUM_CONTROL_NODES`; do
+  CONTROLLER_IP=$(python ./parse-node-yaml.py get_value -k address --node-number ${idx} --file $NODE_FILE_PATH)
+  EXTRA_ROBOT_ARGS+=" -v ODL_SYSTEM_${idx}_IP:${CONTROLLER_IP} \
+                      -v OS_CONTROL_NODE_${idx}_IP:${CONTROLLER_IP} \
+                      -v ODL_SYSTEM_${idx}_IP:${CONTROLLER_IP} \
+                      -v HA_PROXY_${idx}_IP:${SDN_CONTROLLER_IP}"
+done
+
+idx=1
+for idx in `seq 1 $NUM_COMPUTE_NODES`; do
+  COMPUTE_IP=$(python ./parse-node-yaml.py get_value -k address --node-type compute --node-number ${idx} --file $NODE_FILE_PATH)
+  EXTRA_ROBOT_ARGS+=" -v OS_COMPUTE_${idx}_IP:${COMPUTE_IP}"
+done
+
+CONTROLLER_1_IP=$(python ./parse-node-yaml.py get_value -k address --node-number 1 --file $NODE_FILE_PATH)
+
+if [ "$ODL_CONTAINERIZED" == 'false' ]; then
+  EXTRA_ROBOT_ARGS+=" -v NODE_KARAF_COUNT_COMMAND:'ps axf | grep org.apache.karaf | grep -v grep | wc -l || echo 0' \
+                      -v NODE_START_COMMAND:'sudo systemctl start opendaylight_api' \
+                      -v NODE_KILL_COMMAND:'sudo systemctl stop opendaylight_api' \
+                      -v NODE_STOP_COMMAND:'sudo systemctl stop opendaylight_api' \
+                      -v NODE_FREEZE_COMMAND:'sudo systemctl stop opendaylight_api' "
+else
+  EXTRA_ROBOT_ARGS+=" -v NODE_KARAF_COUNT_COMMAND:\"sudo docker exec opendaylight_api /bin/bash -c 'ps axf | \
+                                grep org.apache.karaf | grep -v grep | wc -l' || echo 0\" \
+                      -v NODE_START_COMMAND:\"sudo docker start opendaylight_api\" \
+                      -v NODE_KILL_COMMAND:\"sudo docker stop opendaylight_api\" \
+                      -v NODE_STOP_COMMAND:\"sudo docker stop opendaylight_api\" \
+                      -v NODE_FREEZE_COMMAND:\"sudo docker stop opendaylight_api\" "
+fi
+
+robot_cmd="pybot \
+  --removekeywords wuks \
+  --xunit robotxunit.xml \
+  -c critical \
+  -e exclude \
+  -d /tmp/robot_results \
+  -v BUNDLEFOLDER:/opt/opendaylight \
+  -v CONTROLLER_USER:heat-admin \
+  -v DEFAULT_LINUX_PROMPT:\$ \
+  -v DEFAULT_LINUX_PROMPT_STRICT:]\$ \
+  -v DEFAULT_USER:heat-admin \
+  -v DEVSTACK_DEPLOY_PATH:/tmp \
+  -v HA_PROXY_IP:$SDN_CONTROLLER_IP \
+  -v NUM_ODL_SYSTEM:$NUM_CONTROL_NODES \
+  -v NUM_OS_SYSTEM:$NUM_CONTROL_NODES \
+  -v NUM_TOOLS_SYSTEM:0 \
+  -v ODL_SNAT_MODE:conntrack \
+  -v ODL_STREAM:$ODL_STREAM \
+  -v ODL_SYSTEM_IP: $CONTROLLER_1_IP \
+  -v OS_CONTROL_NODE_IP:$CONTROLLER_1_IP \
+  -v OPENSTACK_BRANCH:$FULL_OS_VER \
+  -v OS_USER:heat-admin \
+  -v ODL_ENABLE_L3_FWD:yes \
+  -v ODL_SYSTEM_USER:heat-admin \
+  -v ODL_SYSTEM_PROMPT:\$ \
+  -v PRE_CLEAN_OPENSTACK_ALL:True \
+  -v PUBLIC_PHYSICAL_NETWORK:datacentre \
+  -v RESTCONFPORT:8081 \
+  -v ODL_RESTCONF_USER:admin \
+  -v ODL_RESTCONF_PASSWORD:admin \
+  -v KARAF_PROMPT_LOGIN:'opendaylight-user' \
+  -v KARAF_PROMPT:'opendaylight-user.*root.*>' \
+  -v SECURITY_GROUP_MODE:stateful \
+  -v USER:heat-admin \
+  -v USER_HOME:\$HOME \
+  -v TOOLS_SYSTEM_IP:'' \
+  -v NODE_ROLE_INDEX_START:0 \
+  -v WORKSPACE:/tmp  \
+  $EXTRA_ROBOT_ARGS \
+  -v of_port:6653 "
+
+docker run -i --net=host \
+  -v ${WORKSPACE}/id_rsa:/tmp/id_rsa \
+  -v ${WORKSPACE}/overcloudrc:/tmp/overcloudrc \
+  opnfv/cperf:$DOCKER_TAG \
+  /bin/bash -c "source /tmp/overcloudrc; mkdir -p \$HOME/.ssh; cp /tmp/id_rsa \$HOME/.ssh; \
+  $robot_cmd /home/opnfv/repos/odl_test/csit/suites/openstack/connectivity/l2.robot;"
diff --git a/jjb/cperf/parse-node-yaml.py b/jjb/cperf/parse-node-yaml.py
new file mode 100644 (file)
index 0000000..5a75755
--- /dev/null
@@ -0,0 +1,71 @@
+##############################################################################
+# Copyright (c) 2018 Tim Rozet (trozet@redhat.com) and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+import argparse
+import sys
+import yaml
+
+
+def get_node_data_by_number(node_type, node_number):
+    node_idx = 1
+    for node_name, node_data in data['servers'].items():
+        if node_type == node_data['type']:
+            if node_idx == node_number:
+                return node_name, node_data
+            else:
+                node_idx += 1
+
+
+def get_node_value(node_type, node_number, key):
+    node_name, node_data = get_node_data_by_number(node_type, node_number)
+    if not key and node_name is not None:
+        return node_name
+    elif node_data and isinstance(node_data, dict) and key in node_data:
+        return node_data[key]
+
+
+def get_number_of_nodes(node_type):
+    nodes = data['servers']
+    num_nodes = 0
+    for node_name, node_data in nodes.items():
+        if node_data['type'] == node_type:
+            num_nodes += 1
+    return num_nodes
+
+
+FUNCTION_MAP = {'num_nodes':
+                {'func': get_number_of_nodes,
+                 'args': ['node_type']},
+                'get_value':
+                    {'func': get_node_value,
+                     'args': ['node_type', 'node_number', 'key']},
+                }
+
+if __name__ == "__main__":
+    parser = argparse.ArgumentParser()
+    parser.add_argument('command', choices=FUNCTION_MAP.keys())
+    parser.add_argument('-f', '--file',
+                        dest='node_file',
+                        required=True)
+    parser.add_argument('--node-type',
+                        default='controller',
+                        required=False)
+    parser.add_argument('--node-number',
+                        default=1,
+                        type=int,
+                        required=False)
+    parser.add_argument('-k', '--key',
+                        required=False)
+    args = parser.parse_args(sys.argv[1:])
+    with open(args.node_file, 'r') as fh:
+        data = yaml.safe_load(fh)
+    assert 'servers' in data
+    func = FUNCTION_MAP[args.command]['func']
+    args = [getattr(args, x) for x in FUNCTION_MAP[args.command]['args']]
+    print(func(*args))
diff --git a/jjb/dovetail-webportal/dovetail-webportal-project-jobs.yaml b/jjb/dovetail-webportal/dovetail-webportal-project-jobs.yaml
new file mode 100644 (file)
index 0000000..f7d3213
--- /dev/null
@@ -0,0 +1,101 @@
+---
+###################################################
+# Non-ci jobs for Dovetail project
+# They will only be enabled on request by projects!
+###################################################
+- project:
+    name: dovetail-webportal-project-jobs
+
+    project: 'dovetail-webportal'
+
+    jobs:
+      - 'dovetail-webportal-verify-{stream}'
+      - 'dovetail-webportal-merge-{stream}'
+
+    stream:
+      - master:
+          branch: '{stream}'
+          disabled: false
+
+################################
+# job templates
+################################
+
+- job-template:
+    name: 'dovetail-webportal-verify-{stream}'
+
+    disabled: '{obj:disabled}'
+
+    parameters:
+      - project-parameter:
+          project: '{project}'
+          branch: '{branch}'
+      - 'opnfv-build-ubuntu-defaults'
+
+    scm:
+      - git-scm-gerrit
+
+    triggers:
+      - gerrit:
+          server-name: 'gerrit.opnfv.org'
+          trigger-on:
+            - patchset-created-event:
+                exclude-drafts: 'false'
+                exclude-trivial-rebase: 'false'
+                exclude-no-code-change: 'false'
+            - draft-published-event
+            - comment-added-contains-event:
+                comment-contains-value: 'recheck'
+            - comment-added-contains-event:
+                comment-contains-value: 'reverify'
+          projects:
+            - project-compare-type: 'ANT'
+              project-pattern: '{project}'
+              branches:
+                - branch-compare-type: 'ANT'
+                  branch-pattern: '**/{branch}'
+    builders:
+      - dovetail-webportal-hello-world
+
+- job-template:
+    name: 'dovetail-webportal-merge-{stream}'
+
+    disabled: '{obj:disabled}'
+
+    parameters:
+      - project-parameter:
+          project: '{project}'
+          branch: '{branch}'
+      - 'opnfv-build-ubuntu-defaults'
+
+    scm:
+      - git-scm
+
+    triggers:
+      - gerrit:
+          server-name: 'gerrit.opnfv.org'
+          trigger-on:
+            - change-merged-event
+            - comment-added-contains-event:
+                comment-contains-value: 'remerge'
+          projects:
+            - project-compare-type: 'ANT'
+              project-pattern: '{project}'
+              branches:
+                - branch-compare-type: 'ANT'
+                  branch-pattern: '**/{branch}'
+
+    builders:
+      - dovetail-webportal-hello-world
+
+################################
+# builders for dovetail project
+###############################
+- builder:
+    name: dovetail-webportal-hello-world
+    builders:
+      - shell: |
+          #!/bin/bash
+          set -o errexit
+
+          echo "hello world"
index cf63bb5..7adfdff 100755 (executable)
@@ -59,18 +59,16 @@ run_test() {
     echo 0 > ${ret_val_file}
     # Determine which Functest image should be used for the test case
     case ${test_name} in
-        connection_check|api_check|snaps_health_check)
+        connection_check|tenantnetwork1|tenantnetwork2|vmready1|vmready2|singlevm1|singlevm2|vping_ssh|vping_userdata|cinder_test|odl|api_check|snaps_health_check)
             FUNCTEST_IMAGE=${REPO}/functest-healthcheck:${DOCKER_TAG} ;;
-        vping_ssh|vping_userdata|cinder_test|tempest_smoke_serial|rally_sanity|refstack_defcore|patrole|odl|snaps_smoke|neutron_trunk)
+        tempest_smoke|rally_sanity|refstack_defcore|patrole|shaker|snaps_smoke|neutron_trunk|barbican)
             FUNCTEST_IMAGE=${REPO}/functest-smoke:${DOCKER_TAG} ;;
-        tempest_full_parallel|rally_full)
+        tempest_full|rally_full)
             FUNCTEST_IMAGE=${REPO}/functest-components:${DOCKER_TAG} ;;
         cloudify_ims|vyos_vrouter|juju_epc)
             FUNCTEST_IMAGE=${REPO}/functest-vnf:${DOCKER_TAG} ;;
         doctor-notification|bgpvpn|functest-odl-sfc|barometercollectd|fds)
             FUNCTEST_IMAGE=${REPO}/functest-features:${DOCKER_TAG} ;;
-        parser-basics)
-            FUNCTEST_IMAGE=${REPO}/functest-parser:${DOCKER_TAG} ;;
         *)
             echo "Unkown test case $test_name"
             exit 1
index 0300b99..1239db5 100644 (file)
             - 'vping_ssh'
             - 'vping_userdata'
             - 'cinder_test'
-            - 'tempest_smoke_serial'
+            - 'tempest_smoke'
             - 'rally_sanity'
             - 'refstack_defcore'
             - 'patrole'
             - 'functest-odl-sfc'
             - 'barometercollectd'
             - 'fds'
-            - 'tempest_full_parallel'
+            - 'tempest_full'
             - 'rally_full'
             - 'cloudify_ims'
             - 'vyos_vrouter'
index 70baf16..e647641 100644 (file)
@@ -20,12 +20,20 @@ echo
 function remove_containers_images()
 {
     # Remove previous running containers if exist
-    if [[ -n "$(docker ps -a | grep $DOCKER_REPO_NAME)" ]]; then
+    #
+    # $ docker ps -a
+    # CONTAINER ID        IMAGE                            COMMAND      ...
+    # 6a796ed40b8e        opnfv/compass-tasks:latest       "/bin/bash"  ...
+    # 99fcb59f4787        opnfv/compass-tasks-base:latest  "/bin/bash"  ...
+    # cc5eee16b995        opnfv/compass-tasks-k8s          "/bin/bash"  ...
+    #
+    # Cut image name by leading space and ending space or colon(tag)
+    if [[ -n "$(docker ps -a | grep " $DOCKER_REPO_NAME[ :]")" ]]; then
         echo "Removing existing $DOCKER_REPO_NAME containers..."
-        docker ps -a | grep $DOCKER_REPO_NAME | awk '{print $1}' | xargs docker rm -f
+        docker ps -a | grep " $DOCKER_REPO_NAME[ :]" | awk '{print $1}' | xargs docker rm -f
         t=60
         # Wait max 60 sec for containers to be removed
-        while [[ $t -gt 0 ]] && [[ -n "$(docker ps| grep $DOCKER_REPO_NAME)" ]]; do
+        while [[ $t -gt 0 ]] && [[ -n "$(docker ps| grep " $DOCKER_REPO_NAME[ :]")" ]]; do
             sleep 1
             let t=t-1
         done
@@ -33,12 +41,20 @@ function remove_containers_images()
 
 
     # Remove existing images if exist
-    if [[ -n "$(docker images | grep $DOCKER_REPO_NAME)" ]]; then
+    #
+    # $ docker images
+    # REPOSITORY                    TAG                 IMAGE ID        ...
+    # opnfv/compass-tasks           latest              6501569fd328    ...
+    # opnfv/compass-tasks-base      latest              8764fe29c434    ...
+    # opnfv/compass-tasks-k8s       latest              61094cac9e65    ...
+    #
+    # Cut image name by start of line and ending space
+    if [[ -n "$(docker images | grep "^$DOCKER_REPO_NAME ")" ]]; then
         echo "Docker images to remove:"
-        docker images | head -1 && docker images | grep $DOCKER_REPO_NAME
-        image_ids=($(docker images | grep $DOCKER_REPO_NAME | awk '{print $3}'))
+        docker images | head -1 && docker images | grep "^$DOCKER_REPO_NAME "
+        image_ids=($(docker images | grep "^$DOCKER_REPO_NAME " | awk '{print $3}'))
         for id in "${image_ids[@]}"; do
-            if [[ -n "$(docker images|grep $DOCKER_REPO_NAME|grep $id)" ]]; then
+            if [[ -n "$(docker images|grep "^$DOCKER_REPO_NAME "|grep $id)" ]]; then
                 echo "Removing docker image $DOCKER_REPO_NAME:$id..."
                 docker rmi -f $id
             fi
index c73bae3..0d06598 100644 (file)
           project: 'dovetail'
           <<: *master
           <<: *other-receivers
+      - 'dovetail-webportal-api':
+          project: 'dovetail-webportal'
+          dockerfile: 'Dockerfile.api'
+          <<: *master
+          <<: *other-receivers
+      - 'dovetail-webportal-web':
+          project: 'dovetail-webportal'
+          dockerfile: 'Dockerfile.web'
+          <<: *master
+          <<: *other-receivers
       - 'nfvbench':
           project: 'nfvbench'
           <<: *master
           project: 'yardstick'
           <<: *fraser
           <<: *other-receivers
-      # projects with jobs for danube
-      - 'dovetail':
-          project: 'dovetail'
-          <<: *danube
-          <<: *other-receivers
       # projects with jobs for fraser
       - 'bottlenecks':
           project: 'bottlenecks'
index 19fb4b5..1d50eb4 100644 (file)
@@ -41,6 +41,7 @@
       # yamllint disable rule:line-length
       - shell: |
           #!/bin/bash
+          sudo systemctl restart docker
           (docker ps -q; docker ps -aq) | sort | uniq -u | xargs --no-run-if-empty docker rm
           docker images -f dangling=true -q | xargs --no-run-if-empty docker rmi