Merge "Add arm support in functest reporting pages"
authorMorgan Richomme <morgan.richomme@orange.com>
Wed, 10 May 2017 06:04:25 +0000 (06:04 +0000)
committerGerrit Code Review <gerrit@opnfv.org>
Wed, 10 May 2017 06:04:25 +0000 (06:04 +0000)
15 files changed:
jjb/3rd_party_ci/download-netvirt-artifact.sh
jjb/3rd_party_ci/odl-netvirt.yml
jjb/daisy4nfv/daisy4nfv-download-artifact.sh
jjb/dovetail/dovetail-ci-jobs.yml
jjb/fuel/fuel-daily-jobs.yml
jjb/functest/functest-loop.sh
jjb/functest/set-functest-env.sh
jjb/securedlab/check-jinja2.sh
jjb/xci/bifrost-provision.sh
jjb/xci/bifrost-verify.sh
prototypes/bifrost/playbooks/opnfv-virtual.yaml
prototypes/xci/file/install-ansible.sh [new file with mode: 0644]
prototypes/xci/xci-deploy.sh
utils/create_pod_file.py
utils/fetch_os_creds.sh

index 6aea01d..7ecf8d7 100755 (executable)
@@ -6,11 +6,18 @@ set -o pipefail
 ODL_ZIP=distribution-karaf-0.6.0-SNAPSHOT.zip
 
 echo "Attempting to fetch the artifact location from ODL Jenkins"
-CHANGE_DETAILS_URL="https://git.opendaylight.org/gerrit/changes/netvirt~master~$GERRIT_CHANGE_ID/detail"
+if [ "$ODL_BRANCH" != 'master' ]; then
+  DIST=$(echo ${ODL_BRANCH} | sed -rn 's#([a-zA-Z]+)/([a-zA-Z]+)#\2#p')
+  ODL_BRANCH=$(echo ${ODL_BRANCH} | sed -rn 's#([a-zA-Z]+)/([a-zA-Z]+)#\1%2F\2#p')
+else
+  DIST='nitrogen'
+fi
+CHANGE_DETAILS_URL="https://git.opendaylight.org/gerrit/changes/netvirt~${ODL_BRANCH}~${GERRIT_CHANGE_ID}/detail"
 # due to limitation with the Jenkins Gerrit Trigger, we need to use Gerrit REST API to get the change details
-ODL_BUILD_JOB_NUM=$(curl -s $CHANGE_DETAILS_URL | grep -Eo 'netvirt-distribution-check-carbon/[0-9]+' | tail -1 | grep -Eo [0-9]+)
+ODL_BUILD_JOB_NUM=$(curl --fail -s ${CHANGE_DETAILS_URL} | grep -Eo "netvirt-distribution-check-${DIST}/[0-9]+" | tail -1 | grep -Eo [0-9]+)
+DISTRO_CHECK_CONSOLE_LOG="https://logs.opendaylight.org/releng/jenkins092/netvirt-distribution-check-${DIST}/${ODL_BUILD_JOB_NUM}/console.log.gz"
+NETVIRT_ARTIFACT_URL=$(curl --fail -s --compressed ${DISTRO_CHECK_CONSOLE_LOG} | grep 'BUNDLE_URL' | cut -d = -f 2)
 
-NETVIRT_ARTIFACT_URL="https://jenkins.opendaylight.org/releng/job/netvirt-distribution-check-carbon/${ODL_BUILD_JOB_NUM}/artifact/${ODL_ZIP}"
 echo -e "URL to artifact is\n\t$NETVIRT_ARTIFACT_URL"
 
 echo "Downloading the artifact. This could take time..."
index 470e433..a937acb 100644 (file)
             branch: '{stream}'
             gs-pathname: ''
             disabled: false
+        - carbon:
+            branch: 'stable/carbon'
+            gs-pathname: ''
+            disabled: false
 #####################################
 # patch verification phases
 #####################################
                 - name: 'odl-netvirt-verify-virtual-install-netvirt-{stream}'
                   current-parameters: false
                   predefined-parameters: |
+                    ODL_BRANCH={branch}
                     BRANCH=$BRANCH
                     GERRIT_REFSPEC=$GERRIT_REFSPEC
                     GERRIT_CHANGE_NUMBER=$GERRIT_CHANGE_NUMBER
             name: functest
             condition: SUCCESSFUL
             projects:
-                - name: 'functest-netvirt-virtual-suite-{stream}'
+                - name: 'functest-netvirt-virtual-suite-master'
                   predefined-parameters: |
                     DEPLOY_SCENARIO=os-odl_l3-nofeature-ha
-                    FUNCTEST_SUITE_NAME=tempest_smoke_serial
+                    FUNCTEST_SUITE_NAME=odl_netvirt
                     RC_FILE_PATH=$HOME/cloner-info/overcloudrc
                   node-parameters: true
                   kill-phase-on: FAILURE
index 1cc0443..a64c80e 100755 (executable)
@@ -57,12 +57,18 @@ fi
 
 # log info to console
 echo "Downloading the $INSTALLER_TYPE artifact using URL http://$OPNFV_ARTIFACT_URL"
-echo "This could take some time..."
+echo "This could take some time... Now the time is $(date -u)"
 echo "--------------------------------------------------------"
 echo
 
 # download the file
-curl -L -s -o $WORKSPACE/opnfv.bin http://$OPNFV_ARTIFACT_URL > gsutil.bin.log 2>&1
+if [[ "$NODE_NAME" =~ (zte) ]] && [ -x "$(command -v aria2c)" ]; then
+    DOWNLOAD_CMD="aria2c -x 3 --allow-overwrite=true -d $WORKSPACE -o opnfv.bin"
+else
+    DOWNLOAD_CMD="curl -L -s -o $WORKSPACE/opnfv.bin"
+fi
+
+$DOWNLOAD_CMD http://$OPNFV_ARTIFACT_URL > gsutil.bin.log 2>&1
 
 # list the file
 ls -al $WORKSPACE/opnfv.bin
index 4998278..682948d 100644 (file)
             SUT: fuel
             auto-trigger-name: 'daily-trigger-disabled'
             <<: *master
+        - zte-pod1:
+            slave-label: zte-pod1
+            SUT: fuel
+            auto-trigger-name: 'daily-trigger-disabled'
+            <<: *master
+        - zte-pod2:
+            slave-label: zte-pod2
+            SUT: fuel
+            auto-trigger-name: 'daily-trigger-disabled'
+            <<: *master
+        - zte-pod3:
+            slave-label: zte-pod3
+            SUT: fuel
+            auto-trigger-name: 'daily-trigger-disabled'
+            <<: *master
+        - zte-pod1:
+            slave-label: zte-pod1
+            SUT: fuel
+            auto-trigger-name: 'daily-trigger-disabled'
+            <<: *danube
+        - zte-pod3:
+            slave-label: zte-pod3
+            SUT: fuel
+            auto-trigger-name: 'daily-trigger-disabled'
+            <<: *danube
 #--------------------------------
     testsuite:
         - 'debug'
index 32abad6..2fa8687 100644 (file)
@@ -73,8 +73,8 @@
             auto-trigger-name: 'fuel-{scenario}-{pod}-daily-{stream}-trigger'
         - 'os-odl_l2-sfc-ha':
             auto-trigger-name: 'fuel-{scenario}-{pod}-daily-{stream}-trigger'
-        - 'os-odl_l2-bgpvpn-ha':
-            auto-trigger-name: 'fuel-{scenario}-{pod}-daily-{stream}-trigger'
+        - 'os-odl_l2-bgpvpn-ha':
+        #    auto-trigger-name: 'fuel-{scenario}-{pod}-daily-{stream}-trigger'
         - 'os-nosdn-kvm-ha':
             auto-trigger-name: 'fuel-{scenario}-{pod}-daily-{stream}-trigger'
         - 'os-nosdn-ovs-ha':
     jobs:
         - 'fuel-{scenario}-{pod}-daily-{stream}'
         - 'fuel-deploy-{pod}-daily-{stream}'
+        - 'fuel-os-odl_l2-bgpvpn-ha-{pod}-daily-{stream}'
 
 ########################
 # job templates
         - email:
             recipients: peter.barabas@ericsson.com fzhadaev@mirantis.com
 
+- job-template:
+    name: 'fuel-os-odl_l2-bgpvpn-ha-{pod}-daily-{stream}'
+
+    disabled: '{obj:disabled}'
+
+    concurrent: false
+
+    properties:
+        - logrotate-default
+        - throttle:
+            enabled: true
+            max-total: 4
+            max-per-node: 1
+            option: 'project'
+        - build-blocker:
+            use-build-blocker: true
+            blocking-jobs:
+                - 'fuel-os-.*?-{pod}-daily-.*'
+                - 'fuel-os-.*?-{pod}-weekly-.*'
+            block-level: 'NODE'
+
+    wrappers:
+        - build-name:
+            name: '$BUILD_NUMBER - Scenario: os-odl_l2-bgpvpn-ha'
+
+    triggers:
+        - 'fuel-os-odl_l2-bgpvpn-ha-{pod}-daily-{stream}-trigger'
+
+    parameters:
+        - project-parameter:
+            project: '{project}'
+            branch: '{branch}'
+        - '{installer}-defaults'
+        - '{slave-label}-defaults':
+            installer: '{installer}'
+        - string:
+            name: DEPLOY_SCENARIO
+            default: "os-odl_l2-bgpvpn-ha"
+        - fuel-ci-parameter:
+            gs-pathname: '{gs-pathname}'
+
+    builders:
+        - description-setter:
+            description: "Built on $NODE_NAME"
+        - trigger-builds:
+            - project: 'fuel-deploy-{pod}-daily-{stream}'
+              current-parameters: false
+              predefined-parameters:
+                DEPLOY_SCENARIO=os-odl_l2-bgpvpn-ha
+              same-node: true
+              block: true
+        - trigger-builds:
+            - project: 'functest-fuel-{pod}-daily-{stream}'
+              current-parameters: false
+              predefined-parameters:
+                DEPLOY_SCENARIO=os-odl_l2-bgpvpn-ha
+              same-node: true
+              block: true
+              block-thresholds:
+                build-step-failure-threshold: 'never'
+                failure-threshold: 'never'
+                unstable-threshold: 'FAILURE'
+        - trigger-builds:
+            - project: 'yardstick-fuel-{pod}-daily-{stream}'
+              current-parameters: false
+              predefined-parameters:
+                DEPLOY_SCENARIO=os-odl_l2-bgpvpn-ha
+              block: true
+              same-node: true
+              block-thresholds:
+                build-step-failure-threshold: 'never'
+                failure-threshold: 'never'
+                unstable-threshold: 'FAILURE'
+        # 1.dovetail only master by now, not sync with A/B/C branches
+        # 2.here the stream means the SUT stream, dovetail stream is defined in its own job
+        # 3.only debug testsuite here(includes basic testcase,
+        #   i.e. refstack ipv6 vpn test cases from functest, HA test case
+        #   from yardstick)
+        # 4.not used for release criteria or compliance,
+        #   only to debug the dovetail tool bugs with fuel bgpvpn scenario
+        - trigger-builds:
+            - project: 'dovetail-fuel-{pod}-proposed_tests-{stream}'
+              current-parameters: false
+              predefined-parameters:
+                DEPLOY_SCENARIO=os-odl_l2-bgpvpn-ha
+              block: true
+              same-node: true
+              block-thresholds:
+                build-step-failure-threshold: 'never'
+                failure-threshold: 'never'
+                unstable-threshold: 'FAILURE'
+
+    publishers:
+        - email:
+            recipients: peter.barabas@ericsson.com fzhadaev@mirantis.com matthew.lijun@huawei.com
+
+
 - job-template:
     name: 'fuel-deploy-{pod}-daily-{stream}'
 
index 893c428..869c395 100755 (executable)
@@ -1,15 +1,9 @@
 #!/bin/bash
 set +e
 
-branch=${GIT_BRANCH##*/}
-[[ "$PUSH_RESULTS_TO_DB" == "true" ]] && flags+="-r"
-if [[ "$BRANCH" =~ 'brahmaputra' ]]; then
-    cmd="${FUNCTEST_REPO_DIR}/docker/run_tests.sh -s ${flags}"
-elif [[ "$BRANCH" =~ 'colorado' ]]; then
-    cmd="python ${FUNCTEST_REPO_DIR}/ci/run_tests.py -t all ${flags}"
-else
-    cmd="python ${FUNCTEST_REPO_DIR}/functest/ci/run_tests.py -t all ${flags}"
-fi
+
+cmd="python ${FUNCTEST_REPO_DIR}/functest/ci/run_tests.py -t all ${flags}"
+
 container_id=$(docker ps -a | grep opnfv/functest | awk '{print $1}' | head -1)
 docker exec $container_id $cmd
 
index 569f371..1acf0a2 100755 (executable)
@@ -112,12 +112,8 @@ if [ $(docker ps | grep "${FUNCTEST_IMAGE}:${DOCKER_TAG}" | wc -l) == 0 ]; then
     echo "The container ${FUNCTEST_IMAGE} with ID=${container_id} has not been properly started. Exiting..."
     exit 1
 fi
-if [[ "$BRANCH" =~ 'brahmaputra' ]]; then
-    cmd="${FUNCTEST_REPO_DIR}/docker/prepare_env.sh"
-elif [[ "$BRANCH" =~ 'colorado' ]]; then
-    cmd="python ${FUNCTEST_REPO_DIR}/ci/prepare_env.py start"
-else
-    cmd="python ${FUNCTEST_REPO_DIR}/functest/ci/prepare_env.py start"
-fi
+
+cmd="python ${FUNCTEST_REPO_DIR}/functest/ci/prepare_env.py start"
+
 echo "Executing command inside the docker: ${cmd}"
 docker exec ${container_id} ${cmd}
index 84907e5..57650ec 100755 (executable)
@@ -5,5 +5,5 @@ for lab_configs in $(find labs/ -name 'pod.yaml'); do
         while IFS= read -r jinja_templates; do
           echo "./utils/generate_config.py -y $lab_configs -j $jinja_templates"
           ./utils/generate_config.py -y $lab_configs -j $jinja_templates
-        done < <(find installers/ -name 'pod_config.yaml.j2')
+        done < <(find installers/ -name '*.j2')
 done
index 4724c2e..b37da90 100755 (executable)
@@ -82,13 +82,13 @@ sudo -E ./scripts/destroy-env.sh
 
 # provision VMs for the flavor
 cd /opt/bifrost
-sudo -E ./scripts/bifrost-provision.sh
+./scripts/bifrost-provision.sh
 
 # list the provisioned VMs
 cd /opt/bifrost
 source env-vars
 ironic node-list
-virsh list
+sudo -H -E virsh list
 
 echo "OpenStack nodes are provisioned!"
 # here we have to do something in order to capture what was the working sha1
index 29af7ca..18019a7 100755 (executable)
@@ -117,10 +117,10 @@ sudo -H -E ./scripts/destroy-env.sh
 
 # provision 3 VMs; xcimaster, controller, and compute
 cd /opt/bifrost
-sudo -H -E ./scripts/bifrost-provision.sh
+./scripts/bifrost-provision.sh
 
 # list the provisioned VMs
 cd /opt/bifrost
 source env-vars
 ironic node-list
-virsh list
+sudo -H -E virsh list
index 699c966..94de628 100644 (file)
       dib_packages: "{{ lookup('env', 'DIB_OS_PACKAGES') }}"
       when: create_image_via_dib | bool == true and transform_boot_image | bool == false
     - role: bifrost-keystone-client-config
-      # NOTE(hwoarang): This should be ansible_env.SUDO_USER like in the
-      # upstream playbook. However, we run ansible as root (ie with sudo)
-      # so clouds.yaml will be placed in the user's home directory (see
-      # the bifrost-keystone-client-config role) and then ansible will look
-      # for one in /root and fail. As such we hardcode the user to be 'root'.
-      user: "root"
+      user: "{{ ansible_env.SUDO_USER }}"
       clouds:
         bifrost:
           config_username: "{{ ironic.keystone.default_username }}"
diff --git a/prototypes/xci/file/install-ansible.sh b/prototypes/xci/file/install-ansible.sh
new file mode 100644 (file)
index 0000000..daa7f51
--- /dev/null
@@ -0,0 +1,136 @@
+#!/bin/bash
+# NOTE(hwoarang): Most parts of this this file were taken from the
+# bifrost repository (scripts/install-deps.sh). This script contains all
+# the necessary distro specific code to install ansible and it's dependencies.
+
+set -eu
+
+declare -A PKG_MAP
+
+CHECK_CMD_PKGS=(
+    libffi
+    libopenssl
+    net-tools
+    python-devel
+)
+
+# Check zypper before apt-get in case zypper-aptitude
+# is installed
+if [ -x '/usr/bin/zypper' ]; then
+    OS_FAMILY="Suse"
+    INSTALLER_CMD="sudo -H -E zypper install -y"
+    CHECK_CMD="zypper search --match-exact --installed"
+    PKG_MAP=(
+        [gcc]=gcc
+        [git]=git
+        [libffi]=libffi-devel
+        [libopenssl]=libopenssl-devel
+        [net-tools]=net-tools
+        [python]=python
+        [python-devel]=python-devel
+        [venv]=python-virtualenv
+        [wget]=wget
+    )
+    EXTRA_PKG_DEPS=( python-xml )
+    # NOTE (cinerama): we can't install python without removing this package
+    # if it exists
+    if $(${CHECK_CMD} patterns-openSUSE-minimal_base-conflicts &> /dev/null); then
+        sudo -H zypper remove -y patterns-openSUSE-minimal_base-conflicts
+    fi
+elif [ -x '/usr/bin/apt-get' ]; then
+    OS_FAMILY="Debian"
+    INSTALLER_CMD="sudo -H -E apt-get -y install"
+    CHECK_CMD="dpkg -l"
+    PKG_MAP=( [gcc]=gcc
+              [git]=git
+              [libffi]=libffi-dev
+              [libopenssl]=libssl-dev
+              [net-tools]=net-tools
+              [python]=python-minimal
+              [python-devel]=libpython-dev
+              [venv]=python-virtualenv
+              [wget]=wget
+            )
+    EXTRA_PKG_DEPS=()
+elif [ -x '/usr/bin/dnf' ] || [ -x '/usr/bin/yum' ]; then
+    OS_FAMILY="RedHat"
+    PKG_MANAGER=$(which dnf || which yum)
+    INSTALLER_CMD="sudo -H -E ${PKG_MANAGER} -y install"
+    CHECK_CMD="rpm -q"
+    PKG_MAP=(
+        [gcc]=gcc
+        [git]=git
+        [libffi]=libffi-devel
+        [libopenssl]=openssl-devel
+        [net-tools]=net-tools
+        [python]=python
+        [python-devel]=python-devel
+        [venv]=python-virtualenv
+        [wget]=wget
+    )
+    EXTRA_PKG_DEPS=()
+else
+    echo "ERROR: Supported package manager not found.  Supported: apt,yum,zypper"
+fi
+
+if ! $(python --version &>/dev/null); then
+    ${INSTALLER_CMD} ${PKG_MAP[python]}
+fi
+if ! $(gcc -v &>/dev/null); then
+    ${INSTALLER_CMD} ${PKG_MAP[gcc]}
+fi
+if ! $(git --version &>/dev/null); then
+    ${INSTALLER_CMD} ${PKG_MAP[git]}
+fi
+if ! $(wget --version &>/dev/null); then
+    ${INSTALLER_CMD} ${PKG_MAP[wget]}
+fi
+
+for pkg in ${CHECK_CMD_PKGS[@]}; do
+    if ! $(${CHECK_CMD} ${PKG_MAP[$pkg]} &>/dev/null); then
+        ${INSTALLER_CMD} ${PKG_MAP[$pkg]}
+    fi
+done
+
+if [ -n "${EXTRA_PKG_DEPS-}" ]; then
+    for pkg in ${EXTRA_PKG_DEPS}; do
+        if ! $(${CHECK_CMD} ${pkg} &>/dev/null); then
+            ${INSTALLER_CMD} ${pkg}
+        fi
+    done
+fi
+
+# If we're using a venv, we need to work around sudo not
+# keeping the path even with -E.
+PYTHON=$(which python)
+
+# To install python packages, we need pip.
+#
+# We can't use the apt packaged version of pip since
+# older versions of pip are incompatible with
+# requests, one of our indirect dependencies (bug 1459947).
+#
+# Note(cinerama): We use pip to install an updated pip plus our
+# other python requirements. pip breakages can seriously impact us,
+# so we've chosen to install/upgrade pip here rather than in
+# requirements (which are synced automatically from the global ones)
+# so we can quickly and easily adjust version parameters.
+# See bug 1536627.
+#
+# Note(cinerama): If pip is linked to pip3, the rest of the install
+# won't work. Remove the alternatives. This is due to ansible's
+# python 2.x requirement.
+if [[ $(readlink -f /etc/alternatives/pip) =~ "pip3" ]]; then
+    sudo -H update-alternatives --remove pip $(readlink -f /etc/alternatives/pip)
+fi
+
+if ! which pip; then
+    wget -O /tmp/get-pip.py https://bootstrap.pypa.io/get-pip.py
+    sudo -H -E ${PYTHON} /tmp/get-pip.py
+fi
+
+PIP=$(which pip)
+
+sudo -H -E ${PIP} install "pip>6.0"
+
+pip install ansible==$XCI_ANSIBLE_PIP_VERSION
index 2fd9be0..718ed73 100755 (executable)
@@ -50,7 +50,7 @@ echo "-------------------------------------------------------------------------"
 #-------------------------------------------------------------------------------
 # Install ansible on localhost
 #-------------------------------------------------------------------------------
-pip install ansible==$XCI_ANSIBLE_PIP_VERSION
+source file/install-ansible.sh
 
 # TODO: The xci playbooks can be put into a playbook which will be done later.
 
index 22943fc..7e30cc6 100644 (file)
@@ -63,17 +63,27 @@ def create_file(handler):
     node_list = []
     index = 1
     for node in nodes:
-        if node.roles[0].lower() == "controller":
-            node_info = {'name': "node%s" % index, 'role': node.roles[0],
-                         'ip': node.ip, 'user': 'root'}
+        try:
+            if node.roles[0].lower() == "controller":
+                node_info = {'name': "node%s" % index, 'role': node.roles[0],
+                             'ip': node.ip, 'user': 'root'}
+                node_list.append(node_info)
+                index += 1
+        except Exception:
+            node_info = {'name': node.name, 'role': 'unknown', 'ip': node.ip,
+                         'user': 'root'}
             node_list.append(node_info)
-            index += 1
     for node in nodes:
-        if node.roles[0].lower() == "compute":
-            node_info = {'name': "node%s" % index, 'role': node.roles[0],
-                         'ip': node.ip, 'user': 'root'}
+        try:
+            if node.roles[0].lower() == "compute":
+                node_info = {'name': "node%s" % index, 'role': node.roles[0],
+                             'ip': node.ip, 'user': 'root'}
+                node_list.append(node_info)
+                index += 1
+        except Exception:
+            node_info = {'name': node.name, 'role': 'unknown', 'ip': node.ip,
+                         'user': 'root'}
             node_list.append(node_info)
-            index += 1
     if args.INSTALLER_TYPE == 'compass':
         for item in node_list:
             item['password'] = 'root'
index 3ec2d1e..6a382a5 100755 (executable)
@@ -138,7 +138,7 @@ elif [ "$installer_type" == "apex" ]; then
     if [ -f /root/.ssh/id_rsa ]; then
         chmod 600 /root/.ssh/id_rsa
     fi
-    sudo scp $ssh_options root@$installer_ip:/home/stack/overcloudrc_v3 $dest_path
+    sudo scp $ssh_options root@$installer_ip:/home/stack/overcloudrc.v3 $dest_path
 
 elif [ "$installer_type" == "compass" ]; then
     verify_connectivity $installer_ip