Merge "compass: Align scenario short names"
authorJun Li <matthew.lijun@huawei.com>
Sat, 16 Jan 2016 02:24:35 +0000 (02:24 +0000)
committerGerrit Code Review <gerrit@172.30.200.206>
Sat, 16 Jan 2016 02:24:35 +0000 (02:24 +0000)
jjb/apex/apex.yml
jjb/bottlenecks/bottlenecks-ci-jobs.yml [new file with mode: 0644]
jjb/functest/functest-ci-jobs.yml
jjb/joid/joid-deploy.sh
utils/test/result_collection_api/dashboard/qtip2Dashboard.py [new file with mode: 0644]

index e6d196a..8e2f511 100644 (file)
@@ -4,8 +4,8 @@
         - 'apex-verify-{stream}'
         - 'apex-merge-{stream}'
         - 'apex-build-{stream}'
-        - 'apex-deploy-virtual-{stream}'
-        - 'apex-deploy-baremetal-{stream}'
+        - 'apex-deploy-virtual-{scenario}-{stream}'
+        - 'apex-deploy-baremetal-{scenario}-{stream}'
         - 'apex-daily-{stream}'
 
     # stream:    branch with - in place of / (eg. stable-arno)
         - brahmaputra:
             branch: 'stable/brahmaputra'
             gs-pathname: '/brahmaputra'
+            disabled: true
 
     project: 'apex'
 
+    scenario:
+         - 'os-odl_l2-nofeature-ha'
+         - 'os-odl_l2-sfc-ha'
+         - 'os-odl_l3-nofeature-ha'
+         - 'os-onos-nofeature-ha'
+         - 'os-opencontrail-nofeature-ha'
+
 - job-template:
     name: 'apex-verify-{stream}'
 
@@ -28,7 +36,6 @@
     parameters:
         - apex-parameter:
             gs-pathname: '{gs-pathname}'
-            ARTIFACT_VERSION: 'dev'
         - project-parameter:
             project: '{project}'
         - gerrit-parameter:
 
     builders:
         - 'apex-build'
-        - 'apex-deploy-virtual'
+        - trigger-builds:
+          - project: 'apex-deploy-virtual-os-odl_l2-nofeature-ha-{stream}'
+            predefined-parameters:
+              BUILD_DIRECTORY=apex-verify-master/build_output
+            git-revision: false
+            block: true
+        - trigger-builds:
+          - project: 'apex-deploy-virtual-os-onos-nofeature-ha-{stream}'
+            predefined-parameters:
+              BUILD_DIRECTORY=apex-verify-master/build_output
+            git-revision: false
+            block: true
         - 'apex-workspace-cleanup'
 
 - job-template:
         - 'apex-workspace-cleanup'
 
 - job-template:
-    name: 'apex-deploy-virtual-{stream}'
+    name: 'apex-deploy-virtual-{scenario}-{stream}'
 
     # Job template for virtual deployment
     #
             project: '{project}'
         - apex-parameter:
             gs-pathname: '{gs-pathname}'
+        - string:
+            name: DEPLOY_SCENARIO
+            default: '{scenario}'
+            description: "Scenario to deploy with."
 
     properties:
         - build-blocker:
             use-build-blocker: true
             blocking-jobs:
-                - "apex-verify.*"
                 - "apex-deploy.*"
                 - "apex-build.*"
 
         - 'apex-workspace-cleanup'
 
 - job-template:
-    name: 'apex-deploy-baremetal-{stream}'
+    name: 'apex-deploy-baremetal-{scenario}-{stream}'
 
     # Job template for baremetal deployment
     #
             project: '{project}'
         - apex-parameter:
             gs-pathname: '{gs-pathname}'
+        - string:
+            name: DEPLOY_SCENARIO
+            default: '{scenario}'
+            description: "Scenario to deploy with."
 
     properties:
         - build-blocker:
             current-parameters: true
             block: true
         - trigger-builds:
-          - project: 'apex-deploy-baremetal-{stream}'
+          - project: 'apex-deploy-baremetal-os-odl_l2-nofeature-ha-{stream}'
+            predefined-parameters:
+              DEPLOY_SCENARIO="os-odl_l2-nofeature-ha"
             git-revision: true
             block: true
         - trigger-builds:
                 # specific artifact from artifacts.opnfv.org
                 RPM_INSTALL_PATH=$GS_URL/$ARTIFACT_NAME
             else
-                if [[ -f opnfv.properties ]]; then
+                if [[ -f ${BUILD_DIRECTORY}/../opnfv.properties ]]; then
                     # if opnfv.properties exists then use the
                     # local build. Source the file so we get local OPNFV vars
-                    source opnfv.properties
-                    RPM_INSTALL_PATH=build_output/$(basename $OPNFV_RPM_URL)
+                    source ${BUILD_DIRECTORY}/../opnfv.properties
+                    RPM_INSTALL_PATH=${BUILD_DIRECTORY}/$(basename $OPNFV_RPM_URL)
                 else
+                    if [[ $BUILD_DIRECTORY == *verify* ]]; then
+                      echo "BUILD_DIRECTORY is from a verify job, so will not use latest from URL"
+                      echo "Check that the slave has opnfv.properties in $BUILD_DIRECTORY"
+                      exit 1
+                    fi
                     # no opnfv.properties means use the latest from artifacts.opnfv.org
                     # get the latest.properties to get the link to the latest artifact
                     curl -s -o $WORKSPACE/opnfv.properties http://$GS_URL/latest.properties
                 fi
             fi
 
-            source opnfv.properties
-            RPM_INSTALL_PATH=build_output/$(basename $OPNFV_RPM_URL)
+            RPM_LIST=$RPM_INSTALL_PATH
+            for pkg in common undercloud; do
+                RPM_LIST+=" ${RPM_INSTALL_PATH/opnfv-apex/opnfv-apex-${pkg}}"
+            done
 
             # update / install the new rpm
             if rpm -q opnfv-apex > /dev/null; then
                if [ $(basename $OPNFV_RPM_URL) == $(rpm -q opnfv-apex).rpm ]; then
                  echo "RPM is already installed"
-               elif sudo yum update -y $RPM_INSTALL_PATH | grep "does not update installed package"; then
-                   sudo yum downgrade -y $RPM_INSTALL_PATH;
+               elif sudo yum update -y $RPM_LIST | grep "does not update installed package"; then
+                   if ! sudo yum downgrade -y $RPM_LIST; then
+                     sudo yum remove -y opnfv-undercloud opnfv-common
+                     sudo yum downgrade -y $RPM_INSTALL_PATH
+                   fi
                fi
             else
-               sudo yum install -y $RPM_INSTALL_PATH;
+               sudo yum install -y $RPM_LIST;
             fi
 
             # cleanup virtual machines before we start
             sudo opnfv-clean
             # initiate virtual deployment
-            if [ -e /usr/share/doc/opnfv/network_settings.yaml.example ]; then
-              sudo opnfv-deploy -v -d /usr/share/doc/opnfv/deploy_settings.yaml.example -n /usr/share/doc/opnfv/network_settings.yaml.example
+            if [ -e /etc/opnfv-apex/network_settings.yaml ]; then
+              if [ -n "$DEPLOY_SCENARIO" ]; then
+                echo "Deploy Scenario set to ${DEPLOY_SCENARIO}"
+                if [ -e /etc/opnfv-apex/${DEPLOY_SCENARIO}.yaml ]; then
+                  sudo opnfv-deploy -v -d /etc/opnfv-apex/${DEPLOY_SCENARIO}.yaml -n /etc/opnfv-apex/network_settings.yaml
+                else
+                  echo "File does not exist /etc/opnfv-apex/${DEPLOY_SCENARIO}.yaml"
+                  exit 1
+                fi
+              else
+                echo "Deploy scenario not set!"
+                exit 1
             else
               sudo opnfv-deploy -v
             fi
                RPM_INSTALL_PATH=http://${OPNFV_RPM_URL}
             fi
 
+            RPM_LIST=$RPM_INSTALL_PATH
+            for pkg in common undercloud; do
+                RPM_LIST+=" ${RPM_INSTALL_PATH/opnfv-apex/opnfv-apex-${pkg}}"
+            done
+
             # update / install the new rpm
             if rpm -q opnfv-apex > /dev/null; then
                if [ $(basename $OPNFV_RPM_URL) == $(rpm -q opnfv-apex).rpm ]; then
                  echo "RPM is already installed"
-               elif sudo yum update -y $RPM_INSTALL_PATH | grep "does not update installed package"; then
-                   sudo yum downgrade -y $RPM_INSTALL_PATH;
+               elif sudo yum update -y $RPM_LIST | grep "does not update installed package"; then
+                   if ! sudo yum downgrade -y $RPM_LIST; then
+                     sudo yum remove -y opnfv-undercloud opnfv-common
+                     sudo yum downgrade -y $RPM_INSTALL_PATH
+                   fi
                fi
             else
-               sudo yum install -y $RPM_INSTALL_PATH;
+               sudo yum install -y $RPM_LIST;
             fi
 
             # cleanup environment before we start
             sudo opnfv-clean
             # initiate baremetal deployment
-            sudo opnfv-deploy -i  /root/inventory/pod_settings.yaml \
-            -d /usr/share/doc/opnfv/deploy_settings.yaml.example \
-            -n /root/network/network_settings.yaml
+            if [ -e /etc/opnfv-apex/network_settings.yaml ]; then
+              if [ -n "$DEPLOY_SCENARIO" ]; then
+                echo "Deploy Scenario set to ${DEPLOY_SCENARIO}"
+                if [ -e /etc/opnfv-apex/${DEPLOY_SCENARIO}.yaml ]; then
+                  sudo opnfv-deploy -i  /root/inventory/pod_settings.yaml \
+                  -d /etc/opnfv-apex/${DEPLOY_SCENARIO}.yaml \
+                  -n /root/network_settings.yaml
+                else
+                  echo "File does not exist /etc/opnfv-apex/${DEPLOY_SCENARIO}.yaml"
+                  exit 1
+                fi
+              else
+                echo "Deploy scenario not set!"
+                exit 1
+            else
+              echo "File /etc/opnfv-apex/network_settings.yaml does not exist!"
+              exit 1
+            fi
 
             echo
             echo "--------------------------------------------------------"
diff --git a/jjb/bottlenecks/bottlenecks-ci-jobs.yml b/jjb/bottlenecks/bottlenecks-ci-jobs.yml
new file mode 100644 (file)
index 0000000..b4b736f
--- /dev/null
@@ -0,0 +1,205 @@
+####################################
+# job configuration for bottlenecks
+####################################
+- project:
+    name: bottlenecks-ci-jobs
+
+    project: 'bottlenecks'
+
+#--------------------------------
+# BRANCH ANCHORS
+#--------------------------------
+    master: &master
+        stream: master
+        branch: '{stream}'
+        gs-pathname: ''
+    brahmaputra: &brahmaputra
+        stream: brahmaputra
+        branch: 'stable/{stream}'
+        gs-pathname: '{stream}'
+#--------------------------------
+# POD, INSTALLER, AND BRANCH MAPPING
+#--------------------------------
+#        brahmaputra
+#--------------------------------
+    pod:
+        - opnfv-jump-2:
+            installer: fuel
+            <<: *brahmaputra
+        - intel-pod5:
+            installer: joid
+            <<: *brahmaputra
+        - huawei-us-deploy-bare-1:
+            installer: compass
+            <<: *brahmaputra
+#--------------------------------
+#        master
+#--------------------------------
+        - ericsson-pod1:
+            installer: fuel
+            <<: *master
+        - ericsson-pod2:
+            installer: fuel
+            <<: *master
+        - intel-pod6:
+            installer: joid
+            <<: *master
+        - intel-pod8:
+            installer: compass
+            <<: *master
+        - zte-build-1:
+            installer: fuel
+            <<: *master
+        - orange-pod2:
+            installer: joid
+            <<: *master
+#--------------------------------
+    suite:
+        - 'rubbos'
+        - 'vstf'
+
+    jobs:
+        - 'bottlenecks-{installer}-{suite}-{pod}-daily-{stream}'
+
+################################
+# job templates
+################################
+- job-template:
+    name: 'bottlenecks-{installer}-{suite}-{pod}-daily-{stream}'
+
+    disabled: false
+
+    wrappers:
+        - build-name:
+            name: '$BUILD_NUMBER - Scenario: $DEPLOY_SCENARIO'
+
+    parameters:
+        - project-parameter:
+            project: '{project}'
+        - '{pod}-defaults'
+        - '{installer}-defaults'
+        - 'bottlenecks-params-{pod}'
+        - string:
+            name: GERRIT_REFSPEC_DEBUG
+            default: ''
+            description: "Gerrit refspec for debug."
+
+    scm:
+        - git-scm:
+            credentials-id: '{ssh-credentials}'
+            refspec: ''
+            branch: '{branch}'
+
+    builders:
+        - 'bottlenecks-fetch-os-creds'
+        - 'bottlenecks-run-{suite}'
+
+    publishers:
+        - email:
+            recipients: hongbo.tianhongbo@huawei.com matthew.lijun@huawei.com liangqi1@huawei.com liyiting@huawei.com
+
+########################
+# builder macros
+########################
+#- builder:
+#    name: bottlenecks-fetch-os-creds
+#    builders:
+#        - shell:
+#            !include-raw ../../utils/fetch_os_creds.sh
+
+#- builder:
+#    name: bottlenecks-run-rubbos
+#    builders:
+#        - shell: |
+#            #!/bin/bash
+#            set -o errexit
+#
+#            echo "Bottlenecks: rubbos running now..."
+#            cd $WORKSPACE
+#            ./ci/run.sh $GERRIT_REFSPEC_DEBUG
+
+#- builder:
+#    name: bottlenecks-run-vstf
+#    builders:
+#        - shell: |
+#            #!/bin/bash
+#            set -o errexit
+
+#            echo "Bottlenecks: vstf running now..."
+#            cd $WORKSPACE
+#            ./ci/vstf_run.sh $GERRIT_REFSPEC_DEBUG
+
+########################
+# parameter macros
+########################
+- parameter:
+    name: 'bottlenecks-params-intel-pod5'
+    parameters:
+        - string:
+            name: BOTTLENECKS_DB_TARGET
+            default: '213.77.62.197'
+            description: 'Arguments to use in order to choose the backend DB'
+
+- parameter:
+    name: 'bottlenecks-params-intel-pod6'
+    parameters:
+        - string:
+            name: BOTTLENECKS_DB_TARGET
+            default: '213.77.62.197'
+            description: 'Arguments to use in order to choose the backend DB'
+
+- parameter:
+    name: 'bottlenecks-params-intel-pod8'
+    parameters:
+        - string:
+            name: BOTTLENECKS_DB_TARGET
+            default: '213.77.62.197'
+            description: 'Arguments to use in order to choose the backend DB'
+
+- parameter:
+    name: 'bottlenecks-params-ericsson-pod1'
+    parameters:
+        - string:
+            name: BOTTLENECKS_DB_TARGET
+            default: '213.77.62.197'
+            description: 'Arguments to use in order to choose the backend DB'
+
+- parameter:
+    name: 'bottlenecks-params-ericsson-pod2'
+    parameters:
+        - string:
+            name: BOTTLENECKS_DB_TARGET
+            default: '213.77.62.197'
+            description: 'Arguments to use in order to choose the backend DB'
+
+- parameter:
+    name: 'bottlenecks-params-opnfv-jump-2'
+    parameters:
+        - string:
+            name: BOTTLENECKS_DB_TARGET
+            default: '213.77.62.197'
+            description: 'Arguments to use in order to choose the backend DB'
+
+- parameter:
+    name: 'bottlenecks-params-huawei-us-deploy-bare-1'
+    parameters:
+        - string:
+            name: BOTTLENECKS_DB_TARGET
+            default: '213.77.62.197'
+            description: 'Arguments to use in order to choose the backend DB'
+
+- parameter:
+    name: 'bottlenecks-params-zte-build-1'
+    parameters:
+        - string:
+            name: BOTTLENECKS_DB_TARGET
+            default: '213.77.62.197'
+            description: 'Arguments to use in order to choose the backend DB'
+
+- parameter:
+    name: 'bottlenecks-params-orange-pod2'
+    parameters:
+        - string:
+            name: BOTTLENECKS_DB_TARGET
+            default: '213.77.62.197'
+            description: 'Arguments to use in order to choose the backend DB'
index ee6e7d6..49d29cb 100644 (file)
         - string:
             name: CI_DEBUG
             default: 'false'
-            description: "Show debut output information"
+            description: "Show debug output information"
 ########################
 # trigger macros
 ########################
 
             dir_result="${HOME}/opnfv/functest/reports"
             mkdir -p ${dir_result}
-            rm -rf ${dir_result}/*
+            sudo rm -rf ${dir_result}/*
             res_volume="-v ${dir_result}:/home/opnfv/functest/results"
 
             docker pull opnfv/functest:latest_stable >$redirect
index 5ed33de..51ddb31 100644 (file)
@@ -91,33 +91,15 @@ NFV_FEATURES=${DEPLOY_OPTIONS[2]}
 HA_MODE=${DEPLOY_OPTIONS[3]}
 EXTRA=${DEPLOY_OPTIONS[4]}
 
-# Get the juju config path with those options, later we will directly use
-# scenario name
-case $SDN_CONTROLLER in
-    odl_l2)
-        SRCBUNDLE="ovs-odl"
-        SDN_CONTROLLER="odl"
-        ;;
-    onos)
-        SRCBUNDLE="onos"
-        ;;
-    ocl)
-        SRCBUNDLE="contrail"
-        SDN_CONTROLLER="opencontrail"
-        ;;
-    *)
-        SRCBUNDLE="ovs"
-        echo "${SDN_CONTROLLER} not in SDN controllers list, using 'nosdn' setting"
-        SDN_CONTROLLER="nosdn"
-        ;;
-    esac
-SRCBUNDLE="${WORKSPACE}/ci/${SDN_CONTROLLER}/juju-deployer/${SRCBUNDLE}"
+if [ "$SDN_CONTROLLER" == 'odl_l2' ] || [ "$SDN_CONTROLLER" == 'odl_l3' ]; then
+    SDN_CONTROLLER='odl'
+fi
 if [ "$HA_MODE" == 'noha' ]; then
-    SRCBUNDLE="${SRCBUNDLE}.yaml"
-    HA_MODE == 'nonha'
-else
-    SRCBUNDLE="${SRCBUNDLE}-${HA_MODE}.yaml"
+    HA_MODE='nonha'
 fi
+SRCBUNDLE="${WORKSPACE}/ci/${SDN_CONTROLLER}/juju-deployer/"
+SRCBUNDLE="${SRCBUNDLE}/ovs-${SDN_CONTROLLER}-${HA_MODE}.yaml"
+
 
 # Modify files
 
@@ -217,6 +199,8 @@ else
       --allocation-pool start=$EXTNET_FIP,end=$EXTNET_LIP \
       --disable-dhcp --gateway $EXTNET_GW $EXTNET_NET
     exit_on_error $? "External subnet creation failed"
+    neutron net-update $EXTNET_NAME --shared
+    exit_on_error $? "External network sharing failed"
 fi
 
 ##
diff --git a/utils/test/result_collection_api/dashboard/qtip2Dashboard.py b/utils/test/result_collection_api/dashboard/qtip2Dashboard.py
new file mode 100644 (file)
index 0000000..138244d
--- /dev/null
@@ -0,0 +1,121 @@
+#!/usr/bin/python
+
+##############################################################################
+# Copyright (c) 2015 Dell Inc  and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+
+def get_qtip_cases():
+    """
+    get the list of the supported test cases
+    TODO: update the list when adding a new test case for the dashboard
+    """
+    return ["compute_test_suite","storage_test_suite","network_test_suite"]
+
+def check_qtip_case_exist(case):
+    """
+    check if the testcase exists
+    if the test case is not defined or not declared in the list
+    return False
+    """
+    qtip_cases = get_qtip_cases()
+    if (case is None or case not in qtip_cases):
+        return False
+    else:
+        return True
+
+def format_qtip_for_dashboard(case, results):
+    """
+    generic method calling the method corresponding to the test case
+    check that the testcase is properly declared first
+    then build the call to the specific method
+    """
+    if check_qtip_case_exist(case):
+        res = format_common_for_dashboard(case, results)
+    else:
+        res = []
+        print "Test cases not declared"
+    return res
+
+def format_common_for_dashboard(case, results):
+    """
+    Common post processing
+    """
+    test_data_description = case + " results for Dashboard"
+    test_data = [{'description': test_data_description}]
+
+    graph_name = ''
+    if "network_test_suite" in case:
+        graph_name = "Throughput index"
+    else:
+        graph_name = "Index"
+
+    # Graph 1:
+    # ********************************
+    new_element = []
+    for date, index in results:
+        new_element.append({'x': date,
+                            'y1': index,
+                            })
+
+    test_data.append({'name': graph_name,
+                      'info': {'type': "graph",
+                               'xlabel': 'time',
+                               'y1label': 'Index Number'},
+                      'data_set': new_element})
+
+    return test_data
+
+
+############################  For local test  ################################
+import os
+import requests
+import json
+from collections import defaultdict
+
+def _get_results(db_url, testcase):
+
+    testproject = testcase["project"]
+    testcase = testcase["testcase"]
+    resultarray = defaultdict()
+    #header
+    header = {'Content-Type': 'application/json'}
+    #url
+    url = db_url + "/results?project="+testproject+"&case="+testcase
+    data = requests.get(url,header)
+    datajson = data.json()
+    for x in range(0, len(datajson['test_results'])):
+
+        rawresults = datajson['test_results'][x]['details']
+        index = rawresults['index']
+        resultarray[str(datajson['test_results'][x]['creation_date'])]=index
+
+    return resultarray
+
+def _test():
+
+    db_url = "http://213.77.62.197"
+    raw_result = defaultdict()
+
+    raw_result = _get_results(db_url, {"project": "qtip", "testcase": "compute_test_suite"})
+    resultitems= raw_result.items()
+    result = format_qtip_for_dashboard("compute_test_suite", resultitems)
+    print result
+
+    raw_result = _get_results(db_url, {"project": "qtip", "testcase": "storage_test_suite"})
+    resultitems= raw_result.items()
+    result = format_qtip_for_dashboard("storage_test_suite", resultitems)
+    print result
+
+    raw_result = _get_results(db_url, {"project": "qtip", "testcase": "network_test_suite"})
+    resultitems= raw_result.items()
+    result = format_qtip_for_dashboard("network_test_suite", resultitems)
+    print result
+
+if __name__ == '__main__':
+    _test()
\ No newline at end of file