Merge "[dovetail] Run mandatory and optional test cases with 2 jobs"
authormei mei <meimei@huawei.com>
Wed, 18 Jul 2018 06:31:46 +0000 (06:31 +0000)
committerGerrit Code Review <gerrit@opnfv.org>
Wed, 18 Jul 2018 06:31:46 +0000 (06:31 +0000)
14 files changed:
jjb/3rd_party_ci/detect-snapshot.sh [new file with mode: 0755]
jjb/3rd_party_ci/install-netvirt.sh
jjb/3rd_party_ci/odl-netvirt.yaml
jjb/apex/apex-snapshot-deploy.sh
jjb/apex/apex.yaml
jjb/apex/scenarios.yaml.hidden
jjb/cperf/cperf-ci-jobs.yaml
jjb/cperf/cperf-prepare-robot.sh [new file with mode: 0755]
jjb/cperf/cperf-robot-netvirt-csit.sh [new file with mode: 0755]
jjb/cperf/parse-node-yaml.py [new file with mode: 0644]
jjb/dovetail-webportal/dovetail-webportal-project-jobs.yaml
jjb/dovetail/dovetail-ci-jobs.yaml
jjb/dovetail/dovetail-run.sh
jjb/releng/opnfv-utils.yaml

diff --git a/jjb/3rd_party_ci/detect-snapshot.sh b/jjb/3rd_party_ci/detect-snapshot.sh
new file mode 100755 (executable)
index 0000000..4949cb2
--- /dev/null
@@ -0,0 +1,31 @@
+#!/bin/bash
+set -o errexit
+set -o nounset
+set -o pipefail
+
+echo "Detecting requested OpenStack branch and topology type in gerrit comment"
+parsed_comment=$(echo $GERRIT_EVENT_COMMENT_TEXT | sed -n 's/^.*check-opnfv\s*//p')
+parsed_comment=$(echo $parsed_comment | sed -n 's/\s*$//p')
+if [ ! -z "$parsed_comment" ]; then
+  if echo $parsed_comment | grep -E '^[a-z]+-(no)?ha'; then
+    IFS='-' read -r -a array <<< "$parsed_comment"
+    os_version=${array[0]}
+    topo=${array[1]}
+    echo "OS version detected in gerrit comment: ${os_version}"
+    echo "Topology type detected in gerrit comment: ${topo}"
+  else
+    echo "Invalid format given for scenario in gerrit comment: ${parsed_comment}...aborting"
+    exit 1
+  fi
+else
+  echo "No scenario given in gerrit comment, will use default (master OpenStack, noha)"
+  os_version='master'
+  topo='noha'
+fi
+
+echo "Writing variables to file"
+cat > detected_snapshot << EOI
+OS_VERSION=$os_version
+TOPOLOGY=$topo
+SNAP_CACHE=$HOME/snap_cache/$os_version/$topo
+EOI
index 07bbe77..232d60e 100755 (executable)
@@ -3,7 +3,7 @@ set -o errexit
 set -o nounset
 set -o pipefail
 
-SNAP_CACHE=$HOME/snap_cache
+SNAP_CACHE=$HOME/snap_cache/$OS_VERSION/$TOPOLOGY
 # clone opnfv sdnvpn repo
 git clone https://gerrit.opnfv.org/gerrit/p/sdnvpn.git $WORKSPACE/sdnvpn
 
index c78de9b..62e4ca1 100644 (file)
     builders:
       - description-setter:
           description: "Built on $NODE_NAME"
+      - detect-opnfv-snapshot
+      - inject:
+          properties-file: detected_snapshot
       - multijob:
           name: create-apex-vms
           condition: SUCCESSFUL
                 NETVIRT_ARTIFACT=$NETVIRT_ARTIFACT
                 APEX_ENV_NUMBER=$APEX_ENV_NUMBER
                 GERRIT_EVENT_COMMENT_TEXT=$GERRIT_EVENT_COMMENT_TEXT
+                TOPOLOGY=$TOPOLOGY
+                OS_VERSION=$OS_VERSION
               node-parameters: true
               kill-phase-on: FAILURE
               abort-all-job: true
             - name: 'odl-netvirt-verify-virtual-install-netvirt-{stream}'
               current-parameters: false
               predefined-parameters: |
-                ODL_BRANCH={branch}
+                ODL_BRANCH=$BRANCH
                 BRANCH=$BRANCH
                 GERRIT_REFSPEC=$GERRIT_REFSPEC
                 GERRIT_CHANGE_NUMBER=$GERRIT_CHANGE_NUMBER
                 GERRIT_PATCHSET_NUMBER=$GERRIT_PATCHSET_NUMBER
                 GERRIT_PATCHSET_REVISION=$GERRIT_PATCHSET_REVISION
                 NETVIRT_ARTIFACT=$NETVIRT_ARTIFACT
+                TOPOLOGY=$TOPOLOGY
+                OS_VERSION=$OS_VERSION
               node-parameters: true
               kill-phase-on: FAILURE
               abort-all-job: true
           projects:
             - name: 'functest-netvirt-virtual-suite-master'
               predefined-parameters: |
-                DEPLOY_SCENARIO=os-odl-nofeature-ha
+                DEPLOY_SCENARIO=os-odl-nofeature-$TOPOLOGY
                 FUNCTEST_MODE=testcase
                 FUNCTEST_SUITE_NAME=tempest_smoke
                 RC_FILE_PATH=$HOME/cloner-info/overcloudrc
               node-parameters: true
               kill-phase-on: FAILURE
               abort-all-job: false
+      - multijob:
+          name: csit
+          condition: ALWAYS
+          projects:
+            - name: cperf-apex-csit-{stream}
+              predefined-parameters: |
+                ODL_BRANCH=$BRANCH
+                RC_FILE_PATH=$SNAP_CACHE/overcloudrc
+                NODE_FILE_PATH=$SNAP_CACHE/node.yaml
+                SSH_KEY_PATH=$SNAP_CACHE/id_rsa
+                ODL_CONTAINERIZED=false
+                OS_VERSION=$OS_VERSION
+              node-parameters: true
+              kill-phase-on: NEVER
+              abort-all-job: false
       - multijob:
           name: postprocess
           condition: ALWAYS
     builders:
       - shell:
           !include-raw: ./postprocess-netvirt.sh
+
+- builder:
+    name: 'detect-opnfv-snapshot'
+    builders:
+      - shell:
+          !include-raw: ./detect-snapshot.sh
index 0a47506..9738ecb 100644 (file)
@@ -25,27 +25,7 @@ pushd ci > /dev/null
 sudo opnfv-clean
 popd > /dev/null
 
-echo "Detecting requested OpenStack branch and topology type in gerrit comment"
-parsed_comment=$(echo $GERRIT_EVENT_COMMENT_TEXT | sed -n 's/^opnfv-check\s*//p')
-parsed_comment=$(echo $parsed_comment | sed -n 's/\s*$//p')
-if [ ! -z "$parsed_comment" ]; then
-  if echo $parsed_comment | grep -E '^[a-z]+-(no)?ha'; then
-    IFS='-' read -r -a array <<< "$parsed_comment"
-    os_version=${array[0]}
-    topo=${array[1]}
-    echo "OS version detected in gerrit comment: ${os_version}"
-    echo "Topology type detected in gerrit comment: ${topo}"
-  else
-    echo "Invalid format given for scenario in gerrit comment: ${parsed_comment}...aborting"
-    exit 1
-  fi
-else
-  echo "No scenario given in gerrit comment, will use default (master OpenStack, noha)"
-  os_version='master'
-  topo='noha'
-fi
-
-full_snap_url=http://$GS_URL/${os_version}/${topo}
+full_snap_url=http://$GS_URL/${OS_VERSION}/${TOPOLOGY}
 
 echo "Downloading latest snapshot properties file"
 if ! wget -O $WORKSPACE/opnfv.properties ${full_snap_url}/snapshot.properties; then
@@ -61,7 +41,7 @@ if [ -z "$latest_snap_checksum" ]; then
 fi
 
 local_snap_checksum=""
-SNAP_CACHE=${SNAP_CACHE}/${os_version}/${topo}
+SNAP_CACHE=${SNAP_CACHE}/${OS_VERSION}/${TOPOLOGY}
 
 # check snap cache directory exists
 # if snapshot cache exists, find the checksum
index 45f6fd8..720d5bd 100644 (file)
           <<: *master
       - 'os-odl-queens-ha':
           <<: *master
+      - 'k8s-nosdn-nofeature-noha':
+          <<: *master
       - 'os-nosdn-nofeature-noha':
           <<: *euphrates
       - 'os-nosdn-nofeature-ha':
               kill-phase-on: NEVER
               abort-all-job: true
               git-revision: false
+            - name: 'apex-k8s-nosdn-nofeature-noha-baremetal-master'
+              node-parameters: false
+              current-parameters: false
+              predefined-parameters: |
+                OPNFV_CLEAN=yes
+              kill-phase-on: NEVER
+              abort-all-job: true
+              git-revision: false
 
 # euphrates Builder
 - builder:
index 71a6c3d..3474fdb 100644 (file)
@@ -7,6 +7,7 @@ master:
   - 'os-nosdn-queens-ha'
   - 'os-odl-queens-noha'
   - 'os-odl-queens-ha'
+  - 'k8s-nosdn-nofeature-noha'
 fraser:
   - 'os-nosdn-nofeature-noha'
   - 'os-nosdn-nofeature-ha'
index fdd3509..59afb89 100644 (file)
@@ -9,47 +9,29 @@
     # -------------------------------
     # BRANCH ANCHORS
     # -------------------------------
-    master: &master
-      stream: master
-      branch: '{stream}'
-      gs-pathname: ''
-      docker-tag: 'latest'
-    danube: &danube
-      stream: danube
-      branch: 'stable/{stream}'
-      gs-pathname: '/{stream}'
-      docker-tag: 'stable'
+    stream: master
+    branch: '{stream}'
+    gs-pathname: ''
+    docker-tag: 'latest'
 
-    # -------------------------------
-    # POD, INSTALLER, AND BRANCH MAPPING
-    # -------------------------------
-    pod:
-      # -------------------------------
-      #        master
-      # -------------------------------
-      - intel-pod2:
-          installer: apex
-          <<: *master
-      - intel-pod2:
-          installer: apex
-          <<: *danube
+    installer: apex
 
     testsuite:
-      - 'daily'
+      - csit
+      - cbench
 
     jobs:
-      - 'cperf-{installer}-{pod}-{testsuite}-{stream}'
+      - 'cperf-{installer}-{testsuite}-{stream}'
 
 ################################
 # job template
 ################################
 - job-template:
-    name: 'cperf-{installer}-{pod}-{testsuite}-{stream}'
+    name: 'cperf-{installer}-{testsuite}-{stream}'
 
     concurrent: true
 
     properties:
-      - logrotate-default
       - throttle:
           enabled: true
           max-per-node: 1
 
     wrappers:
       - build-name:
-          name: '$BUILD_NUMBER Suite: $CPERF_SUITE_NAME Scenario: $DEPLOY_SCENARIO'
+          name: '$BUILD_NUMBER Suite: $CPERF_SUITE_NAME ODL BRANCH: $ODL_BRANCH'
       - timeout:
           timeout: 400
           abort: true
 
     parameters:
-      - project-parameter:
-          project: '{project}'
-          branch: '{branch}'
-      - '{pod}-defaults'
-      - '{installer}-defaults'
       - cperf-parameter:
           testsuite: '{testsuite}'
           gs-pathname: '{gs-pathname}'
           docker-tag: '{docker-tag}'
-
-    scm:
-      - git-scm
+          stream: '{stream}'
 
     builders:
       - 'cperf-{testsuite}-builder'
           name: CPERF_SUITE_NAME
           default: '{testsuite}'
           description: "Suite name to run"
+      - string:
+          name: ODL_BRANCH
+          default: 'master'
+          description: "Branch that OpenDaylight is running"
+      - string:
+          name: OS_VERSION
+          default: 'master'
+          description: "OpenStack version (short name, no stable/ prefix)"
       - string:
           name: GS_PATHNAME
           default: '{gs-pathname}'
           name: DOCKER_TAG
           default: '{docker-tag}'
           description: 'Tag to pull docker image'
+      - string:
+          name: RC_FILE_PATH
+          default: ''
+          description: "Path to the OS credentials file if given"
+      - string:
+          name: SSH_KEY_PATH
+          default: ''
+          description: "Path to the private SSH key to access OPNFV nodes"
+      - string:
+          name: NODE_FILE_PATH
+          default: ''
+          description: "Path to the yaml file describing overcloud nodes"
+      - string:
+          name: ODL_CONTAINERIZED
+          default: 'true'
+          description: "boolean set true if ODL on overcloud is a container"
 
 ########################
 # trigger macros
 # builder macros
 ########################
 - builder:
-    name: cperf-daily-builder
+    name: cperf-csit-builder
+    builders:
+      - 'cperf-cleanup'
+      - 'cperf-prepare-robot'
+      - 'cperf-robot-netvirt-csit'
+
+- builder:
+    name: cperf-cbench-builder
     builders:
       - 'cperf-cleanup'
+      - 'cperf-prepare-robot'
       - 'cperf-robot-cbench'
 
+- builder:
+    name: cperf-prepare-robot
+    builders:
+      - shell:
+          !include-raw: ./cperf-prepare-robot.sh
+
 - builder:
     name: cperf-robot-cbench
     builders:
           set -o errexit
           set -o nounset
           set -o pipefail
-          undercloud_mac=$(sudo virsh domiflist undercloud | grep default | \
-                            grep -Eo "[0-9a-f]+:[0-9a-f]+:[0-9a-f]+:[0-9a-f]+:[0-9a-f]+:[0-9a-f]+")
-          INSTALLER_IP=$(/usr/sbin/arp -e | grep ${undercloud_mac} | awk {'print $1'})
-
-          sudo scp -o StrictHostKeyChecking=no root@$INSTALLER_IP:/home/stack/overcloudrc /tmp/overcloudrc
-          sudo chmod 755 /tmp/overcloudrc
-          source /tmp/overcloudrc
-
-          # robot suites need the ssh key to log in to controller nodes, so throwing it
-          # in tmp, and mounting /tmp as $HOME as far as robot is concerned
-          sudo rm -rf /tmp/.ssh
-          sudo mkdir /tmp/.ssh
-          sudo chmod 0700 /tmp/.ssh
-          sudo scp -o StrictHostKeyChecking=no root@$INSTALLER_IP:/home/stack/.ssh/id_rsa /tmp/.ssh/
-          sudo chown -R jenkins-ci:jenkins-ci /tmp/.ssh
-          # done with sudo. jenkins-ci is the user from this point
-          chmod 0600 /tmp/.ssh/id_rsa
 
           # cbench requires the openflow drop test feature to be installed.
           sshpass -p karaf ssh -o StrictHostKeyChecking=no \
                                -p 8101 karaf@$SDN_CONTROLLER_IP \
                                 feature:install odl-openflowplugin-flow-services-ui odl-openflowplugin-drop-test
 
-          docker pull opnfv/cperf:$DOCKER_TAG
-
           robot_cmd="pybot -e exclude -L TRACE -d /tmp \
                       -v ODL_SYSTEM_1_IP:${SDN_CONTROLLER_IP} \
                       -v ODL_SYSTEM_IP:${SDN_CONTROLLER_IP} \
 
           docker run -i -v /tmp:/tmp opnfv/cperf:$DOCKER_TAG ${robot_cmd} ${robot_suite}
 
+- builder:
+    name: cperf-robot-netvirt-csit
+    builders:
+      - shell:
+          !include-raw: ./cperf-robot-netvirt-csit.sh
+
 - builder:
     name: cperf-cleanup
     builders:
diff --git a/jjb/cperf/cperf-prepare-robot.sh b/jjb/cperf/cperf-prepare-robot.sh
new file mode 100755 (executable)
index 0000000..d88c6d5
--- /dev/null
@@ -0,0 +1,32 @@
+#!/usr/bin/env bash
+
+set -o errexit
+set -o nounset
+set -o pipefail
+
+if [ -z ${RC_FILE_PATH+x} ]; then
+  undercloud_mac=$(sudo virsh domiflist undercloud | grep default | \
+                   grep -Eo "[0-9a-f]+:[0-9a-f]+:[0-9a-f]+:[0-9a-f]+:[0-9a-f]+:[0-9a-f]+")
+  INSTALLER_IP=$(/usr/sbin/arp -e | grep ${undercloud_mac} | awk {'print $1'})
+  sudo scp -o StrictHostKeyChecking=no root@$INSTALLER_IP:/home/stack/overcloudrc /tmp/overcloudrc
+else
+  cp -f $RC_FILE_PATH ${WORKSPACE}/overcloudrc
+fi
+
+sudo chmod 755 ${WORKSPACE}/overcloudrc
+source ${WORKSPACE}/overcloudrc
+
+# copy ssh key for robot
+
+if [ -z ${SSH_KEY_PATH+x} ]; then
+  sudo scp -o StrictHostKeyChecking=no root@$INSTALLER_IP:/home/stack/.ssh/id_rsa ${WORKSPACE}/
+  sudo chown -R jenkins-ci:jenkins-ci ${WORKSPACE}/
+  # done with sudo. jenkins-ci is the user from this point
+  chmod 0600 ${WORKSPACE}/id_rsa
+else
+  cp -f ${SSH_KEY_PATH} ${WORKSPACE}/
+fi
+
+docker pull opnfv/cperf:$DOCKER_TAG
+
+sudo mkdir -p /tmp/robot_results
diff --git a/jjb/cperf/cperf-robot-netvirt-csit.sh b/jjb/cperf/cperf-robot-netvirt-csit.sh
new file mode 100755 (executable)
index 0000000..3ef7471
--- /dev/null
@@ -0,0 +1,105 @@
+#!/usr/bin/env bash
+
+set -o errexit
+set -o nounset
+set -o pipefail
+
+source ${WORKSPACE}/overcloudrc
+# note SDN_CONTROLLER_IP is set in overcloudrc, which is the VIP
+# for admin/public network (since we are running single network deployment)
+
+if [ "$OS_VERSION" == 'master' ]; then
+  FULL_OS_VER='master'
+else
+  FULL_OS_VER="stable/${OS_VERSION}"
+fi
+
+if [ "$ODL_BRANCH" == 'master' ]; then
+  ODL_STREAM='flourine'
+else
+  ODL_STREAM=${ODL_BRANCH}
+fi
+
+NUM_CONTROL_NODES=$(python ./parse-node-yaml.py num_nodes --file $NODE_FILE_PATH)
+NUM_COMPUTE_NODES=$(python ./parse-node-yaml.py num_nodes --node-type compute --file $NODE_FILE_PATH)
+
+idx=1
+EXTRA_ROBOT_ARGS=""
+for idx in `seq 1 $NUM_CONTROL_NODES`; do
+  CONTROLLER_IP=$(python ./parse-node-yaml.py get_value -k address --node-number ${idx} --file $NODE_FILE_PATH)
+  EXTRA_ROBOT_ARGS+=" -v ODL_SYSTEM_${idx}_IP:${CONTROLLER_IP} \
+                      -v OS_CONTROL_NODE_${idx}_IP:${CONTROLLER_IP} \
+                      -v ODL_SYSTEM_${idx}_IP:${CONTROLLER_IP} \
+                      -v HA_PROXY_${idx}_IP:${SDN_CONTROLLER_IP}"
+done
+
+idx=1
+for idx in `seq 1 $NUM_COMPUTE_NODES`; do
+  COMPUTE_IP=$(python ./parse-node-yaml.py get_value -k address --node-type compute --node-number ${idx} --file $NODE_FILE_PATH)
+  EXTRA_ROBOT_ARGS+=" -v OS_COMPUTE_${idx}_IP:${COMPUTE_IP}"
+done
+
+CONTROLLER_1_IP=$(python ./parse-node-yaml.py get_value -k address --node-number 1 --file $NODE_FILE_PATH)
+
+if [ "$ODL_CONTAINERIZED" == 'false' ]; then
+  EXTRA_ROBOT_ARGS+=" -v NODE_KARAF_COUNT_COMMAND:'ps axf | grep org.apache.karaf | grep -v grep | wc -l || echo 0' \
+                      -v NODE_START_COMMAND:'sudo systemctl start opendaylight_api' \
+                      -v NODE_KILL_COMMAND:'sudo systemctl stop opendaylight_api' \
+                      -v NODE_STOP_COMMAND:'sudo systemctl stop opendaylight_api' \
+                      -v NODE_FREEZE_COMMAND:'sudo systemctl stop opendaylight_api' "
+else
+  EXTRA_ROBOT_ARGS+=" -v NODE_KARAF_COUNT_COMMAND:\"sudo docker exec opendaylight_api /bin/bash -c 'ps axf | \
+                                grep org.apache.karaf | grep -v grep | wc -l' || echo 0\" \
+                      -v NODE_START_COMMAND:\"sudo docker start opendaylight_api\" \
+                      -v NODE_KILL_COMMAND:\"sudo docker stop opendaylight_api\" \
+                      -v NODE_STOP_COMMAND:\"sudo docker stop opendaylight_api\" \
+                      -v NODE_FREEZE_COMMAND:\"sudo docker stop opendaylight_api\" "
+fi
+
+robot_cmd="pybot \
+  --removekeywords wuks \
+  --xunit robotxunit.xml \
+  -c critical \
+  -e exclude \
+  -d /tmp/robot_results \
+  -v BUNDLEFOLDER:/opt/opendaylight \
+  -v CONTROLLER_USER:heat-admin \
+  -v DEFAULT_LINUX_PROMPT:\$ \
+  -v DEFAULT_LINUX_PROMPT_STRICT:]\$ \
+  -v DEFAULT_USER:heat-admin \
+  -v DEVSTACK_DEPLOY_PATH:/tmp \
+  -v HA_PROXY_IP:$SDN_CONTROLLER_IP \
+  -v NUM_ODL_SYSTEM:$NUM_CONTROL_NODES \
+  -v NUM_OS_SYSTEM:$NUM_CONTROL_NODES \
+  -v NUM_TOOLS_SYSTEM:0 \
+  -v ODL_SNAT_MODE:conntrack \
+  -v ODL_STREAM:$ODL_STREAM \
+  -v ODL_SYSTEM_IP: $CONTROLLER_1_IP \
+  -v OS_CONTROL_NODE_IP:$CONTROLLER_1_IP \
+  -v OPENSTACK_BRANCH:$FULL_OS_VER \
+  -v OS_USER:heat-admin \
+  -v ODL_ENABLE_L3_FWD:yes \
+  -v ODL_SYSTEM_USER:heat-admin \
+  -v ODL_SYSTEM_PROMPT:\$ \
+  -v PRE_CLEAN_OPENSTACK_ALL:True \
+  -v PUBLIC_PHYSICAL_NETWORK:datacentre \
+  -v RESTCONFPORT:8081 \
+  -v ODL_RESTCONF_USER:admin \
+  -v ODL_RESTCONF_PASSWORD:admin \
+  -v KARAF_PROMPT_LOGIN:'opendaylight-user' \
+  -v KARAF_PROMPT:'opendaylight-user.*root.*>' \
+  -v SECURITY_GROUP_MODE:stateful \
+  -v USER:heat-admin \
+  -v USER_HOME:\$HOME \
+  -v TOOLS_SYSTEM_IP:'' \
+  -v NODE_ROLE_INDEX_START:0 \
+  -v WORKSPACE:/tmp  \
+  $EXTRA_ROBOT_ARGS \
+  -v of_port:6653 "
+
+docker run -i --net=host \
+  -v ${WORKSPACE}/id_rsa:/tmp/id_rsa \
+  -v ${WORKSPACE}/overcloudrc:/tmp/overcloudrc \
+  opnfv/cperf:$DOCKER_TAG \
+  /bin/bash -c "source /tmp/overcloudrc; mkdir -p \$HOME/.ssh; cp /tmp/id_rsa \$HOME/.ssh; \
+  $robot_cmd /home/opnfv/repos/odl_test/csit/suites/openstack/connectivity/l2.robot;"
diff --git a/jjb/cperf/parse-node-yaml.py b/jjb/cperf/parse-node-yaml.py
new file mode 100644 (file)
index 0000000..5a75755
--- /dev/null
@@ -0,0 +1,71 @@
+##############################################################################
+# Copyright (c) 2018 Tim Rozet (trozet@redhat.com) and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+import argparse
+import sys
+import yaml
+
+
+def get_node_data_by_number(node_type, node_number):
+    node_idx = 1
+    for node_name, node_data in data['servers'].items():
+        if node_type == node_data['type']:
+            if node_idx == node_number:
+                return node_name, node_data
+            else:
+                node_idx += 1
+
+
+def get_node_value(node_type, node_number, key):
+    node_name, node_data = get_node_data_by_number(node_type, node_number)
+    if not key and node_name is not None:
+        return node_name
+    elif node_data and isinstance(node_data, dict) and key in node_data:
+        return node_data[key]
+
+
+def get_number_of_nodes(node_type):
+    nodes = data['servers']
+    num_nodes = 0
+    for node_name, node_data in nodes.items():
+        if node_data['type'] == node_type:
+            num_nodes += 1
+    return num_nodes
+
+
+FUNCTION_MAP = {'num_nodes':
+                {'func': get_number_of_nodes,
+                 'args': ['node_type']},
+                'get_value':
+                    {'func': get_node_value,
+                     'args': ['node_type', 'node_number', 'key']},
+                }
+
+if __name__ == "__main__":
+    parser = argparse.ArgumentParser()
+    parser.add_argument('command', choices=FUNCTION_MAP.keys())
+    parser.add_argument('-f', '--file',
+                        dest='node_file',
+                        required=True)
+    parser.add_argument('--node-type',
+                        default='controller',
+                        required=False)
+    parser.add_argument('--node-number',
+                        default=1,
+                        type=int,
+                        required=False)
+    parser.add_argument('-k', '--key',
+                        required=False)
+    args = parser.parse_args(sys.argv[1:])
+    with open(args.node_file, 'r') as fh:
+        data = yaml.safe_load(fh)
+    assert 'servers' in data
+    func = FUNCTION_MAP[args.command]['func']
+    args = [getattr(args, x) for x in FUNCTION_MAP[args.command]['args']]
+    print(func(*args))
index 8e09942..f7d3213 100644 (file)
@@ -55,7 +55,7 @@
                 - branch-compare-type: 'ANT'
                   branch-pattern: '**/{branch}'
     builders:
-      - dovetail-unit-tests
+      - dovetail-webportal-hello-world
 
 - job-template:
     name: 'dovetail-webportal-merge-{stream}'
index a95617a..8d47616 100644 (file)
       - 'default'
       - 'proposed_tests'
 
+    testarea:
+      - 'mandatory'
+      - 'optional'
+
     jobs:
       - 'dovetail-{SUT}-{pod}-{testsuite}-{stream}'
+      - 'dovetail-{SUT}-{pod}-{testsuite}-{testarea}-{stream}'
 
 ################################
 # job templates
           name: TESTSUITE
           default: '{testsuite}'
           description: "dovetail testsuite to run"
+      - string:
+          name: TESTAREA
+          default: 'all'
+          description: "dovetail testarea to run"
+      - string:
+          name: DOVETAIL_REPO_DIR
+          default: "/home/opnfv/dovetail"
+          description: "Directory where the dovetail repository is cloned"
+      - string:
+          name: SUT_BRANCH
+          default: '{branch}'
+          description: "SUT branch"
+
+    scm:
+      - git-scm
+
+    builders:
+      - description-setter:
+          description: "POD: $NODE_NAME"
+      - 'dovetail-cleanup'
+      - 'dovetail-run'
+
+    publishers:
+      - archive:
+          artifacts: 'results/**/*'
+          allow-empty: true
+          fingerprint: true
+      - email-jenkins-admins-on-failure
+
+- job-template:
+    name: 'dovetail-{SUT}-{pod}-{testsuite}-{testarea}-{stream}'
+
+    disabled: false
+
+    concurrent: true
+
+    properties:
+      - logrotate-default
+      - throttle:
+          enabled: true
+          max-per-node: 1
+          option: 'project'
+
+    wrappers:
+      - build-name:
+          name: '$BUILD_NUMBER - Scenario: $DEPLOY_SCENARIO'
+      - timeout:
+          timeout: 300
+          abort: true
+      - fix-workspace-permissions
+
+    triggers:
+      - '{auto-trigger-name}'
+
+    parameters:
+      - project-parameter:
+          project: '{project}'
+          branch: '{dovetail-branch}'
+      - '{SUT}-defaults'
+      - '{slave-label}-defaults'
+      - string:
+          name: DEPLOY_SCENARIO
+          default: 'os-nosdn-nofeature-ha'
+      - string:
+          name: DOCKER_TAG
+          default: '{docker-tag}'
+          description: 'Tag to pull dovetail docker image'
+      - string:
+          name: CI_DEBUG
+          default: 'true'
+          description: "Show debug output information"
+      - string:
+          name: TESTSUITE
+          default: '{testsuite}'
+          description: "dovetail testsuite to run"
+      - string:
+          name: TESTAREA
+          default: '{testarea}'
+          description: "dovetail testarea to run"
       - string:
           name: DOVETAIL_REPO_DIR
           default: "/home/opnfv/dovetail"
index 2a9c73c..c92ebba 100755 (executable)
@@ -130,8 +130,18 @@ if [[ ! "${SUT_BRANCH}" =~ "danube" && ${INSTALLER_TYPE} == 'fuel' && ${DEPLOY_T
     fuel_ctl_ip=$(ssh 2>/dev/null ${fuel_ctl_ssh_options} "${ssh_user}@${INSTALLER_IP}" \
             "sudo salt 'cfg*' pillar.get _param:openstack_control_address --out text| \
                 cut -f2 -d' '")
+    fuel_cmp_ip=$(ssh 2>/dev/null ${fuel_ctl_ssh_options} "${ssh_user}@${INSTALLER_IP}" \
+            "sudo salt 'cmp001*' pillar.get _param:openstack_control_address --out text| \
+                cut -f2 -d' '")
+    fuel_dbs_ip=$(ssh 2>/dev/null ${fuel_ctl_ssh_options} "${ssh_user}@${INSTALLER_IP}" \
+            "sudo salt 'dbs01*' pillar.get _param:openstack_control_address --out text| \
+                cut -f2 -d' '")
+    fuel_msg_ip=$(ssh 2>/dev/null ${fuel_ctl_ssh_options} "${ssh_user}@${INSTALLER_IP}" \
+            "sudo salt 'msg01*' pillar.get _param:openstack_control_address --out text| \
+                cut -f2 -d' '")
     ipmi_index=$(ssh 2>/dev/null ${fuel_ctl_ssh_options} "${ssh_user}@${INSTALLER_IP}" \
             "sudo salt 'ctl*' network.ip_addrs cidr=${fuel_ctl_ip} --out text | grep ${fuel_ctl_ip} | cut -c 5")
+
     organization="$(cut -d'-' -f1 <<< "${NODE_NAME}")"
     pod_name="$(cut -d'-' -f2 <<< "${NODE_NAME}")"
     pdf_file=${pharos_repo}/labs/${organization}/${pod_name}.yaml
@@ -148,7 +158,9 @@ nodes:
    role: Jumpserver, user: ${ssh_user}}
 - {ip: ${fuel_ctl_ip}, name: node1, key_filename: /home/opnfv/userconfig/pre_config/id_rsa,
    role: controller, user: ${ssh_user}, ipmi_ip: ${ipmiIp}, ipmi_user: ${ipmiUser}, ipmi_password: ${ipmiPass}}
-
+- {ip: ${fuel_msg_ip}, name: msg01, key_filename: /home/opnfv/userconfig/pre_config/id_rsa, role: controller, user: ${ssh_user}}
+- {ip: ${fuel_cmp_ip}, name: cmp01, key_filename: /home/opnfv/userconfig/pre_config/id_rsa, role: controller, user: ${ssh_user}}
+- {ip: ${fuel_dbs_ip}, name: dbs01, key_filename: /home/opnfv/userconfig/pre_config/id_rsa, role: controller, user: ${ssh_user}}
 EOF
 fi
 
@@ -193,15 +205,20 @@ if [ -f ${DOVETAIL_CONFIG}/pod.yaml ]; then
     sudo chmod 666 ${DOVETAIL_CONFIG}/pod.yaml
     echo "Adapt process info for $INSTALLER_TYPE ..."
     if [ "$INSTALLER_TYPE" == "apex" ]; then
-        attack_process='rabbitmq_server'
-    else
-        attack_process='rabbitmq'
-    fi
-    cat << EOF >> ${DOVETAIL_CONFIG}/pod.yaml
+        cat << EOF >> ${DOVETAIL_CONFIG}/pod.yaml
 process_info:
-- {testcase_name: dovetail.ha.rabbitmq, attack_process: ${attack_process}}
-
+- {testcase_name: dovetail.ha.rabbitmq, attack_process: rabbitmq_server}
 EOF
+    elif [ "$INSTALLER_TYPE" == "fuel" ]; then
+        cat << EOF >> ${DOVETAIL_CONFIG}/pod.yaml
+process_info:
+- {testcase_name: dovetail.ha.cinder_api, attack_process: cinder_wsgi}
+- {testcase_name: dovetail.ha.rabbitmq, attack_process: rabbitmq-server, attack_host: msg01}
+- {testcase_name: dovetail.ha.neutron_l3_agent, attack_process: neutron-l3-agent, attack_host: cmp01}
+- {testcase_name: dovetail.ha.database, attack_process: mysqld, attack_host: dbs01}
+EOF
+    fi
+
     echo "file ${DOVETAIL_CONFIG}/pod.yaml:"
     cat ${DOVETAIL_CONFIG}/pod.yaml
 else
@@ -345,7 +362,17 @@ else
     testsuite="--testsuite ${TESTSUITE}"
 fi
 
-run_cmd="dovetail run ${testsuite} -d"
+if [[ ${TESTAREA} == 'mandatory' ]]; then
+    testarea='--mandatory'
+elif [[ ${TESTAREA} == 'optional' ]]; then
+    testarea="--optional"
+elif [[ ${TESTAREA} == 'all' ]]; then
+    testarea=""
+else
+    testarea="--testarea ${TESTAREA}"
+fi
+
+run_cmd="dovetail run ${testsuite} ${testarea} -d"
 echo "Container exec command: ${run_cmd}"
 docker exec $container_id ${run_cmd}
 
index 19fb4b5..1d50eb4 100644 (file)
@@ -41,6 +41,7 @@
       # yamllint disable rule:line-length
       - shell: |
           #!/bin/bash
+          sudo systemctl restart docker
           (docker ps -q; docker ps -aq) | sort | uniq -u | xargs --no-run-if-empty docker rm
           docker images -f dangling=true -q | xargs --no-run-if-empty docker rmi