Merge "Enable daisy euphrates jobs of daily and functest"
authorMorgan Richomme <morgan.richomme@orange.com>
Mon, 25 Sep 2017 12:58:05 +0000 (12:58 +0000)
committerGerrit Code Review <gerrit@opnfv.org>
Mon, 25 Sep 2017 12:58:05 +0000 (12:58 +0000)
25 files changed:
jjb/3rd_party_ci/odl-netvirt.yml
jjb/apex/apex-build.sh
jjb/apex/apex-iso-verify.sh
jjb/apex/apex.yml
jjb/apex/apex.yml.j2
jjb/compass4nfv/compass-ci-jobs.yml
jjb/compass4nfv/compass-project-jobs.yml
jjb/compass4nfv/compass-verify-jobs.yml
jjb/doctor/doctor.yml
jjb/dovetail/dovetail-ci-jobs.yml
jjb/fuel/fuel-deploy.sh
jjb/functest/functest-alpine.sh
jjb/functest/functest-daily-jobs.yml
jjb/kvmfornfv/kvmfornfv.yml
jjb/opera/opera-daily-jobs.yml
jjb/orchestra/orchestra-daily-jobs.yml
jjb/ovn4nfv/ovn4nfv-daily-jobs.yml
jjb/releng/opnfv-docker.yml
jjb/xci/osa-periodic-jobs.yml
jjb/xci/xci-daily-jobs.yml
jjb/xci/xci-verify-jobs.yml
utils/test/reporting/reporting/functest/reporting-status.py
utils/test/reporting/reporting/functest/reporting-vims.py
utils/test/reporting/reporting/functest/template/index-vims-tmpl.html
utils/test/reporting/reporting/functest/testCase.py

index 01017f3..863eb94 100644 (file)
             - name: 'functest-netvirt-virtual-suite-master'
               predefined-parameters: |
                 DEPLOY_SCENARIO=os-odl_l3-nofeature-ha
+                FUNCTEST_MODE=testcase
                 FUNCTEST_SUITE_NAME=odl_netvirt
                 RC_FILE_PATH=$HOME/cloner-info/overcloudrc
               node-parameters: true
index 23ce810..cf59998 100755 (executable)
@@ -46,6 +46,12 @@ echo "Cache Directory Contents:"
 echo "-------------------------"
 ls -al $CACHE_DIRECTORY
 
+if [[ "$BUILD_ARGS" =~ '--iso' ]]; then
+  mkdir -p /tmp/apex-iso/
+  rm -f /tmp/apex-iso/*.iso
+  cp -f $BUILD_DIRECTORY/../.build/release/OPNFV-CentOS-7-x86_64-$OPNFV_ARTIFACT_VERSION.iso /tmp/apex-iso/
+fi
+
 if ! echo $ARTIFACT_VERSION | grep "dev" 1> /dev/null; then
   echo "Writing opnfv.properties file"
   # save information regarding artifact into file
index f102421..4faeb60 100755 (executable)
@@ -8,7 +8,14 @@ echo "Starting the Apex iso verify."
 echo "--------------------------------------------------------"
 echo
 
-source $BUILD_DIRECTORY/../opnfv.properties
+# Must be RPMs/ISO
+echo "Downloading latest properties file"
+
+# get the properties file in order to get info regarding artifacts
+curl --fail -s -o opnfv.properties http://$GS_URL/latest.properties
+
+# source the file so we get OPNFV vars
+source opnfv.properties
 
 if ! rpm -q virt-install > /dev/null; then
   sudo yum -y install virt-install
@@ -35,9 +42,9 @@ sudo rm -f /var/log/libvirt/qemu/apex-iso-verify-console.log
 sudo virt-install -n apex-iso-verify -r 4096 --vcpus 4 --os-variant=rhel7 \
  --accelerate -v --noautoconsole \
  --disk path=/var/lib/libvirt/images/apex-iso-verify.qcow2,size=30,format=qcow2 \
- -l $BUILD_DIRECTORY/release/OPNFV-CentOS-7-x86_64-$OPNFV_ARTIFACT_VERSION.iso \
+ -l /tmp/apex-iso/OPNFV-CentOS-7-x86_64-$OPNFV_ARTIFACT_VERSION.iso \
  --extra-args 'console=ttyS0 console=ttyS0,115200n8 serial inst.ks=file:/iso-verify.ks inst.stage2=hd:LABEL=OPNFV\x20CentOS\x207\x20x86_64:/' \
- --initrd-inject $BUILD_DIRECTORY/../ci/iso-verify.ks \
+ --initrd-inject ci/iso-verify.ks \
  --serial file,path=/var/log/libvirt/qemu/apex-iso-verify-console.log
 
 echo "Waiting for install to finish..."
index bed67f4..058f18a 100644 (file)
                 current-parameters: false
                 predefined-parameters: |
                   DEPLOY_SCENARIO=$DEPLOY_SCENARIO
-                  FUNCTEST_SUITE_NAME=healthcheck
+                  FUNCTEST_MODE=tier
+                  FUNCTEST_TIER=healthcheck
                   GERRIT_BRANCH=$GERRIT_BRANCH
                   GERRIT_REFSPEC=$GERRIT_REFSPEC
                   GERRIT_CHANGE_NUMBER=$GERRIT_CHANGE_NUMBER
 
     properties:
         - logrotate-default
+        - build-blocker:
+            use-build-blocker: true
+            block-level: 'NODE'
+            blocking-jobs:
+                - 'apex-daily.*'
 
     triggers:
         - 'apex-{stream}'
index 3c36e8f..09c2f8c 100644 (file)
                 current-parameters: false
                 predefined-parameters: |
                   DEPLOY_SCENARIO=$DEPLOY_SCENARIO
-                  FUNCTEST_SUITE_NAME=healthcheck
+                  FUNCTEST_MODE=tier
+                  FUNCTEST_TIER=healthcheck
                   GERRIT_BRANCH=$GERRIT_BRANCH
                   GERRIT_REFSPEC=$GERRIT_REFSPEC
                   GERRIT_CHANGE_NUMBER=$GERRIT_CHANGE_NUMBER
 
     properties:
         - logrotate-default
+        - build-blocker:
+            use-build-blocker: true
+            block-level: 'NODE'
+            blocking-jobs:
+                - 'apex-daily.*'
 
     triggers:
         - 'apex-{stream}'
index 9f266b0..74a134e 100644 (file)
@@ -16,7 +16,7 @@
     euphrates: &euphrates
         stream: euphrates
         branch: 'stable/{stream}'
-        disabled: true
+        disabled: false
         gs-pathname: '/{stream}'
         ppa-pathname: '/{stream}'
         openstack-version: ocata
 - trigger:
     name: 'compass-os-nosdn-nofeature-ha-baremetal-euphrates-trigger'
     triggers:
-        - timed: '' #'0 9 * * *'
+        - timed: '0 1 * * *'
 - trigger:
     name: 'compass-os-nosdn-openo-ha-baremetal-euphrates-trigger'
     triggers:
-        - timed: '' #'0 13 * * *'
+        - timed: ''
 - trigger:
     name: 'compass-os-odl_l2-nofeature-ha-baremetal-euphrates-trigger'
     triggers:
-        - timed: '' #'0 17 * * *'
+        - timed: ''
 - trigger:
     name: 'compass-os-odl_l3-nofeature-ha-baremetal-euphrates-trigger'
     triggers:
-        - timed: '' #'0 21 * * *'
+        - timed: '0 21 * * *'
 - trigger:
     name: 'compass-os-onos-nofeature-ha-baremetal-euphrates-trigger'
     triggers:
-        - timed: '' #'0 1 * * *'
+        - timed: ''
 - trigger:
     name: 'compass-os-ocl-nofeature-ha-baremetal-euphrates-trigger'
     triggers:
 - trigger:
     name: 'compass-os-onos-sfc-ha-baremetal-euphrates-trigger'
     triggers:
-        - timed: '' #'0 5 * * *'
+        - timed: ''
 - trigger:
     name: 'compass-os-odl_l2-moon-ha-baremetal-euphrates-trigger'
     triggers:
-        - timed: ''
+        - timed: '0 5 * * *'
 - trigger:
     name: 'compass-os-nosdn-kvm-ha-baremetal-euphrates-trigger'
     triggers:
-        - timed: ''
+        - timed: '0 13 * * *'
 - trigger:
     name: 'compass-os-nosdn-dpdk-ha-baremetal-euphrates-trigger'
     triggers:
-        - timed: ''
+        - timed: '0 9 * * *'
 - trigger:
     name: 'compass-k8-nosdn-nofeature-ha-baremetal-euphrates-trigger'
     triggers:
 - trigger:
     name: 'compass-os-odl-sfc-ha-baremetal-euphrates-trigger'
     triggers:
-        - timed: ''
+        - timed: '0 17 * * *'
 
 #----------------------
 # noha-baremetal-euphrates
 - trigger:
     name: 'compass-os-odl_l2-moon-ha-virtual-master-trigger'
     triggers:
-        - timed: '0 12 * * *'
+        - timed: '30 12 * * *'
 - trigger:
     name: 'compass-os-nosdn-kvm-ha-virtual-master-trigger'
     triggers:
 - trigger:
     name: 'compass-os-nosdn-kvm-noha-virtual-master-trigger'
     triggers:
-        - timed: '0 13 * * *'
+        - timed: '30 13 * * *'
 - trigger:
     name: 'compass-os-nosdn-nofeature-noha-virtual-master-trigger'
     triggers:
 - trigger:
     name: 'compass-os-nosdn-nofeature-ha-virtual-euphrates-trigger'
     triggers:
-        - timed: '' #'0 21 * * *'
+        - timed: '0 23 * * *'
 - trigger:
     name: 'compass-os-nosdn-openo-ha-virtual-euphrates-trigger'
     triggers:
-        - timed: '' #'0 22 * * *'
+        - timed: ''
 - trigger:
     name: 'compass-os-odl_l2-nofeature-ha-virtual-euphrates-trigger'
     triggers:
-        - timed: '' #'0 20 * * *'
+        - timed: ''
 - trigger:
     name: 'compass-os-odl_l3-nofeature-ha-virtual-euphrates-trigger'
     triggers:
-        - timed: '' #'0 19 * * *'
+        - timed: '0 22 * * *'
 - trigger:
     name: 'compass-os-onos-nofeature-ha-virtual-euphrates-trigger'
     triggers:
-        - timed: '' #'0 18 * * *'
+        - timed: ''
 - trigger:
     name: 'compass-os-ocl-nofeature-ha-virtual-euphrates-trigger'
     triggers:
 - trigger:
     name: 'compass-os-onos-sfc-ha-virtual-euphrates-trigger'
     triggers:
-        - timed: '' #'0 15 * * *'
+        - timed: ''
 - trigger:
     name: 'compass-os-odl_l2-moon-ha-virtual-euphrates-trigger'
     triggers:
-        - timed: '' #'0 14 * * *'
+        - timed: '0 20 * * *'
 - trigger:
     name: 'compass-os-nosdn-kvm-ha-virtual-euphrates-trigger'
     triggers:
-        - timed: ''
+        - timed: '0 16 * * *'
 - trigger:
     name: 'compass-os-nosdn-dpdk-ha-virtual-euphrates-trigger'
     triggers:
-        - timed: ''
+        - timed: '0 14 * * *'
 - trigger:
     name: 'compass-os-odl-sfc-ha-virtual-euphrates-trigger'
     triggers:
-        - timed: ''
+        - timed: '0 18 * * *'
 - trigger:
     name: 'compass-k8-nosdn-nofeature-ha-virtual-euphrates-trigger'
     triggers:
-        - timed: ''
+        - timed: '0 13 * * *'
 
 #--------------------
 # noha-virtual-euphrates
 - trigger:
     name: 'compass-os-nosdn-kvm-noha-virtual-euphrates-trigger'
     triggers:
-        - timed: ''
+        - timed: '0 15 * * *'
 - trigger:
     name: 'compass-os-nosdn-nofeature-noha-virtual-euphrates-trigger'
     triggers:
-        - timed: ''
+        - timed: '0 17 * * *'
 - trigger:
     name: 'compass-os-odl_l3-nofeature-noha-virtual-euphrates-trigger'
     triggers:
-        - timed: ''
+        - timed: '0 23 * * *'
 - trigger:
     name: 'compass-os-odl_l2-moon-noha-virtual-euphrates-trigger'
     triggers:
-        - timed: ''
+        - timed: '0 21 * * *'
 - trigger:
     name: 'compass-os-odl-sfc-noha-virtual-euphrates-trigger'
     triggers:
-        - timed: ''
+        - timed: '0 19 * * *'
 - trigger:
     name: 'compass-os-nosdn-dpdk-noha-virtual-euphrates-trigger'
     triggers:
-        - timed: ''
+        - timed: '0 12 * * *'
index e612ef6..fe91705 100644 (file)
             gs-pathname: '/{stream}'
             ppa-pathname: '/{stream}'
             disabled: false
+        - euphrates:
+            branch: 'stable/{stream}'
+            gs-pathname: '/{stream}'
+            ppa-pathname: '/{stream}'
+            disabled: false
 
     jobs:
         - '{installer}-build-daily-{stream}'
index ee91e02..f4fe8f6 100644 (file)
                 - name: 'functest-compass-virtual-suite-{stream}'
                   current-parameters: false
                   predefined-parameters: |
-                    FUNCTEST_SUITE_NAME=healthcheck
+                    FUNCTEST_MODE=tier
+                    FUNCTEST_TIER=healthcheck
                     DEPLOY_SCENARIO=os-nosdn-nofeature-ha
                   node-parameters: true
                   kill-phase-on: NEVER
                 - name: 'functest-compass-virtual-suite-{stream}'
                   current-parameters: false
                   predefined-parameters: |
+                    FUNCTEST_MODE=testcase
                     FUNCTEST_SUITE_NAME=vping_ssh
                     DEPLOY_SCENARIO=os-nosdn-nofeature-ha
                   node-parameters: true
index 012d373..c806429 100644 (file)
             default: 'os-nosdn-nofeature-ha'
             description: 'Scenario to deploy and test'
         # functest-suite-parameter
+        - string:
+            name: FUNCTEST_MODE
+            default: 'testcase'
         - string:
             name: FUNCTEST_SUITE_NAME
             default: 'doctor-notification'
index 4c3d72f..1474803 100644 (file)
             SUT: apex
             auto-trigger-name: 'daily-trigger-disabled'
             <<: *danube
+        - huawei-pod7:
+            slave-label: huawei-pod7
+            SUT: compass
+            auto-trigger-name: 'daily-trigger-disabled'
+            <<: *danube
 #--------------------------------
     testsuite:
         - 'compliance_set'
index d44c060..be45604 100755 (executable)
@@ -103,7 +103,7 @@ echo "--------------------------------------------------------"
 echo "Scenario: ${DEPLOY_SCENARIO}"
 echo "Lab: ${LAB_NAME}"
 echo "POD: ${POD_NAME}"
-[[ "${BRANCH}" != 'master' ]] && echo "ISO: ${OPNFV_ARTIFACT_URL/*\/}"
+[[ "${BRANCH}" =~ 'danube' ]] && echo "ISO: ${OPNFV_ARTIFACT_URL/*\/}"
 echo
 echo "Starting the deployment using ${INSTALLER_TYPE}. This could take some time..."
 echo "--------------------------------------------------------"
index c948430..fe1888f 100755 (executable)
@@ -4,6 +4,60 @@ set -e
 set +u
 set +o pipefail
 
+run_tiers() {
+    cmd_opt='prepare_env start && run_tests -r -t all'
+    ret_val_file="${HOME}/opnfv/functest/results/${BRANCH##*/}/return_value"
+    echo 0 > ${ret_val_file}
+
+    for tier in ${tiers[@]}; do
+        FUNCTEST_IMAGE=opnfv/functest-${tier}
+        echo "Functest: Pulling Functest Docker image ${FUNCTEST_IMAGE} ..."
+        docker pull ${FUNCTEST_IMAGE}>/dev/null
+        cmd="docker run --privileged=true ${envs} ${volumes} ${FUNCTEST_IMAGE} /bin/bash -c '${cmd_opt}'"
+        echo "Running Functest tier '${tier}'. CMD: ${cmd}"
+        eval ${cmd}
+        ret_value=$?
+        if [ ${ret_value} != 0 ]; then
+          echo ${ret_value} > ${ret_val_file}
+        fi
+    done
+}
+
+run_test() {
+    test_name=$1
+    cmd_opt='prepare_env start && run_tests -r -t $test_name'
+    ret_val_file="${HOME}/opnfv/functest/results/${BRANCH##*/}/return_value"
+    echo 0 > ${ret_val_file}
+    # Determine which Functest image should be used for the test case
+    case ${test_name} in
+        connection_check|api_check|snaps_health_check)
+            FUNCTEST_IMAGE=opnfv/functest-healthcheck
+        vping_ssh|vping_userdata|tempest_smoke_serial|rally_sanity|refstack_defcore|odl|odl_netvirt|fds|snaps_smoke)
+            FUNCTEST_IMAGE=opnfv/functest-smoke
+        tempest_full_parallel|tempest_custom|rally_full)
+            FUNCTEST_IMAGE=opnfv/functest-components
+        cloudify_ims|orchestra_openims|orchestra_clearwaterims|vyos_vrouter)
+            FUNCTEST_IMAGE=opnfv/functest-vnf
+        promise|doctor-notification|bgpvpn|functest-odl-sfc|domino-multinode|barometercollectd)
+            FUNCTEST_IMAGE=opnfv/functest-features
+        parser)
+            FUNCTEST_IMAGE=opnfv/functest-parser
+        *)
+            echo "Unkown test case $test_name"
+            exit 1
+    esac
+    echo "Functest: Pulling Functest Docker image ${FUNCTEST_IMAGE} ..."
+    docker pull ${FUNCTEST_IMAGE}>/dev/null
+    cmd="docker run --privileged=true ${envs} ${volumes} ${FUNCTEST_IMAGE} /bin/bash -c '${cmd_opt}'"
+    echo "Running Functest test case '${test_name}'. CMD: ${cmd}"
+    eval ${cmd}
+    ret_value=$?
+    if [ ${ret_value} != 0 ]; then
+      echo ${ret_value} > ${ret_val_file}
+    fi
+}
+
+
 [[ $CI_DEBUG == true ]] && redirect="/dev/stdout" || redirect="/dev/null"
 FUNCTEST_DIR=/home/opnfv/functest
 DEPLOY_TYPE=baremetal
@@ -15,7 +69,7 @@ rc_file=${HOME}/opnfv-openrc.sh
 
 if [[ ${INSTALLER_TYPE} == 'joid' ]]; then
     rc_file=$LAB_CONFIG/admin-openrc
-elif [[ ${INSTALLER_TYPE} == 'compass' && ${BRANCH} == 'master' ]]; then
+elif [[ ${INSTALLER_TYPE} == 'compass' ]]; then
     cacert_file_vol="-v ${HOME}/os_cacert:${FUNCTEST_DIR}/conf/os_cacert"
     echo "export OS_CACERT=${FUNCTEST_DIR}/conf/os_cacert" >> ${HOME}/opnfv-openrc.sh
 elif [[ ${INSTALLER_TYPE} == 'fuel' && ${DEPLOY_TYPE} == 'baremetal' ]]; then
@@ -56,16 +110,8 @@ envs="-e INSTALLER_TYPE=${INSTALLER_TYPE} -e INSTALLER_IP=${INSTALLER_IP} \
 
 ssh_options="-o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no"
 
-if [[ ${INSTALLER_TYPE} == 'compass' && ${DEPLOY_SCENARIO} == *'os-nosdn-openo-ha'* ]]; then
-    openo_msb_port=${openo_msb_port:-80}
-    openo_msb_endpoint="$(sshpass -p'root' ssh 2>/dev/null $ssh_options root@${installer_ip} \
-    'mysql -ucompass -pcompass -Dcompass -e "select package_config from cluster;" \
-    | sed s/,/\\n/g | grep openo_ip | cut -d \" -f 4'):$openo_msb_port"
 
-    envs=${env}" -e OPENO_MSB_ENDPOINT=${openo_msb_endpoint}"
-fi
-
-if [ "${INSTALLER_TYPE}" == 'fuel' ] && [ "$BRANCH" != 'stable/danube' ]; then
+if [ "${INSTALLER_TYPE}" == 'fuel' ]; then
     COMPUTE_ARCH=$(ssh -l ubuntu ${INSTALLER_IP} -i ${SSH_KEY} ${ssh_options} \
         "sudo salt 'cmp*' grains.get cpuarch --out yaml | awk '{print \$2; exit}'")
     envs="${envs} -e POD_ARCH=${COMPUTE_ARCH}"
@@ -75,29 +121,23 @@ volumes="${images_vol} ${results_vol} ${sshkey_vol} ${rc_file_vol} ${cacert_file
 
 set +e
 
-if [ ${FUNCTEST_SUITE_NAME} == 'healthcheck' ]; then
-    tiers=(healthcheck)
-else
-    if [ ${DEPLOY_TYPE} == 'baremetal' ]; then
-        tiers=(healthcheck smoke features vnf parser)
+
+if [[ ${DEPLOY_SCENARIO} =~ ^os-.* ]]; then
+    if [ ${FUNCTEST_MODE} == 'testcase' ]; then
+        echo "FUNCTEST_MODE=testcase, FUNCTEST_SUITE_NAME=${FUNCTEST_SUITE_NAME}"
+        run_test ${FUNCTEST_SUITE_NAME}
+    elif [ ${FUNCTEST_MODE} == 'tier' ]; then
+        echo "FUNCTEST_MODE=tier, FUNCTEST_TIER=${FUNCTEST_TIER}"
+        tiers=(${FUNCTEST_TIER})
+        run_tiers ${tiers}
     else
-        tiers=(healthcheck smoke features)
+        if [ ${DEPLOY_TYPE} == 'baremetal' ]; then
+            tiers=(healthcheck smoke features vnf parser)
+        else
+            tiers=(healthcheck smoke features)
+        fi
+        run_tiers ${tiers}
     fi
+else
+    echo "k8 deployment has not been supported by functest yet"
 fi
-
-cmd_opt='prepare_env start && run_tests -r -t all'
-ret_val_file="${HOME}/opnfv/functest/results/${BRANCH##*/}/return_value"
-echo 0 > ${ret_val_file}
-
-for tier in ${tiers[@]}; do
-    FUNCTEST_IMAGE=opnfv/functest-${tier}
-    echo "Functest: Pulling Functest Docker image ${FUNCTEST_IMAGE} ..."
-    docker pull ${FUNCTEST_IMAGE}>/dev/null
-    cmd="docker run --privileged=true ${envs} ${volumes} ${FUNCTEST_IMAGE} /bin/bash -c '${cmd_opt}'"
-    echo "Running Functest tier '${tier}'. CMD: ${cmd}"
-    eval ${cmd}
-    ret_value=$?
-    if [ ${ret_value} != 0 ]; then
-      echo ${ret_value} > ${ret_val_file}
-    fi
-done
index f47120b..04a2936 100644 (file)
 
     wrappers:
         - build-name:
-            name: '$BUILD_NUMBER Suite: $FUNCTEST_SUITE_NAME Scenario: $DEPLOY_SCENARIO'
+            name: '$BUILD_NUMBER Suite: $FUNCTEST_MODE Scenario: $DEPLOY_SCENARIO'
         - timeout:
             timeout: '{job-timeout}'
             abort: true
     name: functest-daily-parameter
     parameters:
         - string:
-            name: FUNCTEST_SUITE_NAME
+            name: FUNCTEST_MODE
             default: 'daily'
             description: "Daily suite name to run"
 - parameter:
     name: functest-arm-daily-parameter
     parameters:
         - string:
-            name: FUNCTEST_SUITE_NAME
+            name: FUNCTEST_MODE
             default: 'arm-daily'
             description: "Daily suite name (Aarch64) to run"
 - parameter:
     name: functest-suite-parameter
     parameters:
+        - choice:
+            name: FUNCTEST_MODE
+            choices:
+                - 'tier'
+                - 'testcase'
+            default: 'tier'
+            description: "Test case or Tier to be run"
         - choice:
             name: FUNCTEST_SUITE_NAME
             choices:
                 - 'rally_sanity'
                 - 'odl'
                 - 'odl_netvirt'
-                - 'onos'
                 - 'snaps_smoke'
                 - 'refstack_defcore'
                 - 'promise'
                 - 'cloudify_vrouter'
                 - 'orchestra_openims'
                 - 'orchestra_clearwaterims'
+            default: 'connection_check'
+        - choice:
+            name: FUNCTEST_TIER
+            choices:
+                - 'healthcheck'
+                - 'smoke'
+                - 'features'
+                - 'components'
+                - 'vnf'
+            default: 'healthcheck'
         - string:
             name: TESTCASE_OPTIONS
             default: ''
     name: functest-suite-builder
     builders:
         - 'functest-cleanup'
-        - 'set-functest-env'
+        - 'set-functest-env-alpine'
         - 'functest-suite'
         - 'functest-store-results'
         - 'functest-exit'
         - 'set-functest-env-alpine'
         - 'functest-daily'
         - 'functest-store-results'
+        - 'functest-exit'
 
 - builder:
     name: functest-daily
     name: functest-suite
     builders:
         - shell:
-            !include-raw: ./functest-suite.sh
+            !include-raw:
+                - ./functest-alpine.sh
 
 - builder:
     name: set-functest-env
index ad497e9..2055da9 100644 (file)
@@ -8,19 +8,19 @@
       - master:
           branch: '{stream}'
           gs-pathname: ''
-          disabled: false
       - euphrates:
           branch: 'stable/{stream}'
           gs-pathname: '/{stream}'
-          disabled: true
     #####################################
     # patch verification phases
     #####################################
     phase:
       - 'build':
           slave-label: 'opnfv-build-ubuntu'
+          disabled: false
       - 'test':
           slave-label: 'intel-pod10'
+          disabled: true
     #####################################
     # patch verification phases
     #####################################
index d0dd052..5d7526b 100644 (file)
@@ -92,6 +92,7 @@
               current-parameters: false
               predefined-parameters: |
                 DEPLOY_SCENARIO=os-nosdn-openo-ha
+                FUNCTEST_MODE=testcase
                 FUNCTEST_SUITE_NAME=opera_vims
               node-parameters: true
               kill-phase-on: NEVER
index 74c997c..7c2deae 100644 (file)
@@ -93,6 +93,7 @@
               current-parameters: false
               predefined-parameters: |
                 DEPLOY_SCENARIO=os-nosdn-openbaton-ha
+                FUNCTEST_MODE=testcase
                 FUNCTEST_SUITE_NAME=orchestra_ims
               node-parameters: true
               kill-phase-on: NEVER
index a4c5865..ca4cfee 100644 (file)
@@ -81,6 +81,7 @@
               current-parameters: false
               predefined-parameters: |
                 DEPLOY_SCENARIO=os-ovn-nofeature-ha
+                FUNCTEST_MODE=testcase
                 FUNCTEST_SUITE_NAME=ovn4nfv_test_suite
               node-parameters: true
               kill-phase-on: NEVER
index 2b8fd27..714d8cf 100644 (file)
             arch_tag: 'x86_64'
             <<: *euphrates
             <<: *storperf-receivers
+        - 'nfvbench':
+            project: 'nfvbench'
+            <<: *euphrates
+            <<: *other-receivers
 
     jobs:
         - "{dockerrepo}-docker-build-push-{stream}"
index 86910ac..048825e 100644 (file)
                   predefined-parameters: |
                     DISTRO={distro}
                     DEPLOY_SCENARIO=os-nosdn-nofeature-noha
-                    FUNCTEST_SUITE_NAME=healthcheck
+                    FUNCTEST_MODE=tier
+                    FUNCTEST_TIER=healthcheck
                   node-parameters: true
                   kill-phase-on: NEVER
                   abort-all-job: false
         - string:
             name: OPNFV_RELENG_DEV_PATH
             default: $WORKSPACE/releng-xci
+        - string:
+            name: FUNCTEST_MODE
+            default: 'tier'
         - string:
             name: FUNCTEST_SUITE_NAME
             default: 'healthcheck'
index 64659da..7d95429 100644 (file)
         - string:
             name: INSTALLER_TYPE
             default: 'osa'
+        - string:
+            name: FUNCTEST_MODE
+            default: 'daily'
         - string:
             name: FUNCTEST_SUITE_NAME
             default: 'daily'
index 324bfd1..440fac7 100644 (file)
                     DISTRO={distro}
                     DEPLOY_SCENARIO=os-nosdn-nofeature-noha
                     CLEAN_DIB_IMAGES=$CLEAN_DIB_IMAGES
-                    FUNCTEST_SUITE_NAME=healthcheck
+                    FUNCTEST_MODE=tier
+                    FUNCTEST_TIER=healthcheck
                   node-parameters: true
                   kill-phase-on: NEVER
                   abort-all-job: true
         - string:
             name: DEPLOY_SCENARIO
             default: 'os-nosdn-nofeature-noha'
+        - string:
+            name: FUNCTEST_MODE
+            default: 'tier'
         - string:
             name: FUNCTEST_SUITE_NAME
             default: 'healthcheck'
index 02bf67d..808c841 100755 (executable)
@@ -22,7 +22,7 @@ Functest reporting status
 """
 
 # Logger
-logger = rp_utils.getLogger("Functest-Status")
+LOGGER = rp_utils.getLogger("Functest-Status")
 
 # Initialization
 testValid = []
@@ -46,16 +46,16 @@ exclude_virtual = rp_utils.get_config('functest.exclude_virtual')
 
 functest_yaml_config = rp_utils.getFunctestConfig()
 
-logger.info("*******************************************")
-logger.info("*                                         *")
-logger.info("*   Generating reporting scenario status  *")
-logger.info("*   Data retention: %s days               *" % period)
-logger.info("*   Log level: %s                         *" % log_level)
-logger.info("*                                         *")
-logger.info("*   Virtual PODs exluded: %s              *" % exclude_virtual)
-logger.info("*   NOHA scenarios excluded: %s           *" % exclude_noha)
-logger.info("*                                         *")
-logger.info("*******************************************")
+LOGGER.info("*******************************************")
+LOGGER.info("*                                         *")
+LOGGER.info("*   Generating reporting scenario status  *")
+LOGGER.info("*   Data retention: %s days               *", period)
+LOGGER.info("*   Log level: %s                         *", log_level)
+LOGGER.info("*                                         *")
+LOGGER.info("*   Virtual PODs exluded: %s              *", exclude_virtual)
+LOGGER.info("*   NOHA scenarios excluded: %s           *", exclude_noha)
+LOGGER.info("*                                         *")
+LOGGER.info("*******************************************")
 
 # Retrieve test cases of Tier 1 (smoke)
 config_tiers = functest_yaml_config.get("tiers")
@@ -75,9 +75,9 @@ for tier in config_tiers:
     elif tier['order'] == 2:
         for case in tier['testcases']:
             if case['case_name'] not in blacklist:
-                testValid.append(tc.TestCase(case['case_name'],
-                                             case['case_name'],
-                                             case['dependencies']))
+                otherTestCases.append(tc.TestCase(case['case_name'],
+                                                  case['case_name'],
+                                                  case['dependencies']))
     elif tier['order'] > 2:
         for case in tier['testcases']:
             if case['case_name'] not in blacklist:
@@ -85,7 +85,7 @@ for tier in config_tiers:
                                                   "functest",
                                                   case['dependencies']))
 
-logger.debug("Functest reporting start")
+LOGGER.debug("Functest reporting start")
 
 # For all the versions
 for version in versions:
@@ -101,7 +101,7 @@ for version in versions:
     # initiate scenario file if it does not exist
     if not os.path.isfile(scenario_file_name):
         with open(scenario_file_name, "a") as my_file:
-            logger.debug("Create scenario file: %s" % scenario_file_name)
+            LOGGER.debug("Create scenario file: %s", scenario_file_name)
             my_file.write("date,scenario,installer,detail,score\n")
 
     for installer in installers:
@@ -113,10 +113,10 @@ for version in versions:
                                                  version)
         # get nb of supported architecture (x86, aarch64)
         architectures = rp_utils.getArchitectures(scenario_results)
-        logger.info("Supported architectures: {}".format(architectures))
+        LOGGER.info("Supported architectures: %s", architectures)
 
         for architecture in architectures:
-            logger.info("architecture: {}".format(architecture))
+            LOGGER.info("Architecture: %s", architecture)
             # Consider only the results for the selected architecture
             # i.e drop x86 for aarch64 and vice versa
             filter_results = rp_utils.filterArchitecture(scenario_results,
@@ -133,10 +133,10 @@ for version in versions:
 
             # For all the scenarios get results
             for s, s_result in filter_results.items():
-                logger.info("---------------------------------")
-                logger.info("installer %s, version %s, scenario %s:" %
-                            (installer, version, s))
-                logger.debug("Scenario results: %s" % s_result)
+                LOGGER.info("---------------------------------")
+                LOGGER.info("installer %s, version %s, scenario %s:",
+                            installer, version, s)
+                LOGGER.debug("Scenario results: %s", s_result)
 
                 # Green or Red light for a given scenario
                 nb_test_runnable_for_this_scenario = 0
@@ -146,11 +146,11 @@ for version in versions:
                 s_url = ""
                 if len(s_result) > 0:
                     build_tag = s_result[len(s_result)-1]['build_tag']
-                    logger.debug("Build tag: %s" % build_tag)
+                    LOGGER.debug("Build tag: %s", build_tag)
                     s_url = rp_utils.getJenkinsUrl(build_tag)
                     if s_url is None:
                         s_url = "http://testresultS.opnfv.org/reporting"
-                    logger.info("last jenkins url: %s" % s_url)
+                    LOGGER.info("last jenkins url: %s", s_url)
                 testCases2BeDisplayed = []
                 # Check if test case is runnable / installer, scenario
                 # for the test case used for Scenario validation
@@ -160,24 +160,24 @@ for version in versions:
                     for test_case in testValid:
                         test_case.checkRunnable(installer, s,
                                                 test_case.getConstraints())
-                        logger.debug("testcase %s (%s) is %s" %
-                                     (test_case.getDisplayName(),
-                                      test_case.getName(),
-                                      test_case.isRunnable))
+                        LOGGER.debug("testcase %s (%s) is %s",
+                                     test_case.getDisplayName(),
+                                     test_case.getName(),
+                                     test_case.isRunnable)
                         time.sleep(1)
                         if test_case.isRunnable:
                             name = test_case.getName()
                             displayName = test_case.getDisplayName()
                             project = test_case.getProject()
                             nb_test_runnable_for_this_scenario += 1
-                            logger.info(" Searching results for case %s " %
-                                        (displayName))
+                            LOGGER.info(" Searching results for case %s ",
+                                        displayName)
                             result = rp_utils.getResult(name, installer,
                                                         s, version)
                             # if no result set the value to 0
                             if result < 0:
                                 result = 0
-                            logger.info(" >>>> Test score = " + str(result))
+                            LOGGER.info(" >>>> Test score = " + str(result))
                             test_case.setCriteria(result)
                             test_case.setIsRunnable(True)
                             testCases2BeDisplayed.append(tc.TestCase(name,
@@ -193,17 +193,17 @@ for version in versions:
                     for test_case in otherTestCases:
                         test_case.checkRunnable(installer, s,
                                                 test_case.getConstraints())
-                        logger.debug("testcase %s (%s) is %s" %
-                                     (test_case.getDisplayName(),
-                                      test_case.getName(),
-                                      test_case.isRunnable))
+                        LOGGER.debug("testcase %s (%s) is %s",
+                                     test_case.getDisplayName(),
+                                     test_case.getName(),
+                                     test_case.isRunnable)
                         time.sleep(1)
                         if test_case.isRunnable:
                             name = test_case.getName()
                             displayName = test_case.getDisplayName()
                             project = test_case.getProject()
-                            logger.info(" Searching results for case %s " %
-                                        (displayName))
+                            LOGGER.info(" Searching results for case %s ",
+                                        displayName)
                             result = rp_utils.getResult(name, installer,
                                                         s, version)
                             # at least 1 result for the test
@@ -218,13 +218,13 @@ for version in versions:
                                     True,
                                     4))
                             else:
-                                logger.debug("No results found")
+                                LOGGER.debug("No results found")
 
                         items[s] = testCases2BeDisplayed
                 except Exception:
-                    logger.error("Error: installer %s, version %s, scenario %s"
-                                 % (installer, version, s))
-                    logger.error("No data available: %s" % (sys.exc_info()[0]))
+                    LOGGER.error("Error installer %s, version %s, scenario %s",
+                                 installer, version, s)
+                    LOGGER.error("No data available: %s", sys.exc_info()[0])
 
                 # **********************************************
                 # Evaluate the results for scenario validation
@@ -243,11 +243,11 @@ for version in versions:
 
                 s_status = "KO"
                 if scenario_score < scenario_criteria:
-                    logger.info(">>>> scenario not OK, score = %s/%s" %
-                                (scenario_score, scenario_criteria))
+                    LOGGER.info(">>>> scenario not OK, score = %s/%s",
+                                scenario_score, scenario_criteria)
                     s_status = "KO"
                 else:
-                    logger.info(">>>>> scenario OK, save the information")
+                    LOGGER.info(">>>>> scenario OK, save the information")
                     s_status = "OK"
                     path_validation_file = ("./display/" + version +
                                             "/functest/" +
@@ -270,7 +270,7 @@ for version in versions:
                     s_score,
                     s_score_percent,
                     s_url)
-                logger.info("--------------------------")
+                LOGGER.info("--------------------------")
 
             templateLoader = jinja2.FileSystemLoader(".")
             templateEnv = jinja2.Environment(
@@ -294,9 +294,9 @@ for version in versions:
                       installer_display + ".html", "wb") as fh:
                 fh.write(outputText)
 
-            logger.info("Manage export CSV & PDF")
+            LOGGER.info("Manage export CSV & PDF")
             rp_utils.export_csv(scenario_file_name, installer_display, version)
-            logger.error("CSV generated...")
+            LOGGER.error("CSV generated...")
 
             # Generate outputs for export
             # pdf
@@ -306,4 +306,4 @@ for version in versions:
             pdf_doc_name = ("./display/" + version +
                             "/functest/status-" + installer_display + ".pdf")
             rp_utils.export_pdf(pdf_path, pdf_doc_name)
-            logger.info("PDF generated...")
+            LOGGER.info("PDF generated...")
index 14fddbe..3b25e91 100755 (executable)
+#!/usr/bin/python
+#
+# This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+"""
+vIMS reporting status
+"""
 from urllib2 import Request, urlopen, URLError
 import json
 import jinja2
 
-# manage conf
-import utils.reporting_utils as rp_utils
-
-logger = rp_utils.getLogger("vIMS")
-
-
-def sig_test_format(sig_test):
-    nbPassed = 0
-    nbFailures = 0
-    nbSkipped = 0
-    for data_test in sig_test:
-        if data_test['result'] == "Passed":
-            nbPassed += 1
-        elif data_test['result'] == "Failed":
-            nbFailures += 1
-        elif data_test['result'] == "Skipped":
-            nbSkipped += 1
-    total_sig_test_result = {}
-    total_sig_test_result['passed'] = nbPassed
-    total_sig_test_result['failures'] = nbFailures
-    total_sig_test_result['skipped'] = nbSkipped
-    return total_sig_test_result
-
-period = rp_utils.get_config('general.period')
-versions = rp_utils.get_config('general.versions')
-url_base = rp_utils.get_config('testapi.url')
-
-logger.info("****************************************")
-logger.info("*   Generating reporting vIMS          *")
-logger.info("*   Data retention = %s days           *" % period)
-logger.info("*                                      *")
-logger.info("****************************************")
-
-installers = rp_utils.get_config('general.installers')
-step_order = ["initialisation", "orchestrator", "vIMS", "sig_test"]
-logger.info("Start processing....")
+import reporting.utils.reporting_utils as rp_utils
+
+LOGGER = rp_utils.getLogger("vIMS")
+
+PERIOD = rp_utils.get_config('general.period')
+VERSIONS = rp_utils.get_config('general.versions')
+URL_BASE = rp_utils.get_config('testapi.url')
+
+LOGGER.info("****************************************")
+LOGGER.info("*   Generating reporting vIMS          *")
+LOGGER.info("*   Data retention = %s days           *", PERIOD)
+LOGGER.info("*                                      *")
+LOGGER.info("****************************************")
+
+INSTALLERS = rp_utils.get_config('general.installers')
+STEP_ORDER = ["initialisation", "orchestrator", "vnf", "test_vnf"]
+LOGGER.info("Start vIMS reporting processing....")
 
 # For all the versions
-for version in versions:
-    for installer in installers:
-        logger.info("Search vIMS results for installer: %s, version: %s"
-                    % (installer, version))
-        request = Request("http://" + url_base + '?case=vims&installer=' +
-                          installer + '&version=' + version)
-
-        try:
-            response = urlopen(request)
-            k = response.read()
-            results = json.loads(k)
-        except URLError as e:
-            logger.error("Error code: %s" % e)
-
-        test_results = results['results']
-
-        logger.debug("Results found: %s" % test_results)
-
-        scenario_results = {}
-        for r in test_results:
-            if not r['scenario'] in scenario_results.keys():
-                scenario_results[r['scenario']] = []
-            scenario_results[r['scenario']].append(r)
-
-        for s, s_result in scenario_results.items():
-            scenario_results[s] = s_result[0:5]
-            logger.debug("Search for success criteria")
-            for result in scenario_results[s]:
-                result["start_date"] = result["start_date"].split(".")[0]
-                sig_test = result['details']['sig_test']['result']
-                if not sig_test == "" and isinstance(sig_test, list):
-                    format_result = sig_test_format(sig_test)
-                    if format_result['failures'] > format_result['passed']:
-                        result['details']['sig_test']['duration'] = 0
-                    result['details']['sig_test']['result'] = format_result
-                nb_step_ok = 0
-                nb_step = len(result['details'])
-
-                for step_name, step_result in result['details'].items():
-                    if step_result['duration'] != 0:
-                        nb_step_ok += 1
-                    m, s = divmod(step_result['duration'], 60)
-                    m_display = ""
-                    if int(m) != 0:
-                        m_display += str(int(m)) + "m "
-
-                    step_result['duration_display'] = (m_display +
-                                                       str(int(s)) + "s")
-
-                result['pr_step_ok'] = 0
-                if nb_step != 0:
-                    result['pr_step_ok'] = (float(nb_step_ok) / nb_step) * 100
-                try:
-                    logger.debug("Scenario %s, Installer %s"
-                                 % (s_result[1]['scenario'], installer))
-                    res = result['details']['orchestrator']['duration']
-                    logger.debug("Orchestrator deployment: %s s"
-                                 % res)
-                    logger.debug("vIMS deployment: %s s"
-                                 % result['details']['vIMS']['duration'])
-                    logger.debug("Signaling testing: %s s"
-                                 % result['details']['sig_test']['duration'])
-                    logger.debug("Signaling testing results: %s"
-                                 % format_result)
-                except Exception:
-                    logger.error("Data badly formatted")
-                logger.debug("----------------------------------------")
+for version in VERSIONS:
+    for installer in INSTALLERS:
+
+        # get nb of supported architecture (x86, aarch64)
+        # get scenarios
+        scenario_results = rp_utils.getScenarios("functest",
+                                                 "cloudify_ims",
+                                                 installer,
+                                                 version)
+
+        architectures = rp_utils.getArchitectures(scenario_results)
+        LOGGER.info("Supported architectures: %s", architectures)
+
+        for architecture in architectures:
+            LOGGER.info("Architecture: %s", architecture)
+            # Consider only the results for the selected architecture
+            # i.e drop x86 for aarch64 and vice versa
+            filter_results = rp_utils.filterArchitecture(scenario_results,
+                                                         architecture)
+            scenario_stats = rp_utils.getScenarioStats(filter_results)
+            items = {}
+            scenario_result_criteria = {}
+
+            # in case of more than 1 architecture supported
+            # precise the architecture
+            installer_display = installer
+            if "fuel" in installer:
+                installer_display = installer + "@" + architecture
+
+            LOGGER.info("Search vIMS results for installer: %s, version: %s",
+                        installer, version)
+            request = Request("http://" + URL_BASE + '?case=cloudify_ims&'
+                              'installer=' + installer + '&version=' + version)
+            try:
+                response = urlopen(request)
+                k = response.read()
+                results = json.loads(k)
+            except URLError as err:
+                LOGGER.error("Error code: %s", err)
+
+            test_results = results['results']
+
+            # LOGGER.debug("Results found: %s" % test_results)
+
+            scenario_results = {}
+            for r in test_results:
+                if not r['scenario'] in scenario_results.keys():
+                    scenario_results[r['scenario']] = []
+                scenario_results[r['scenario']].append(r)
+
+            # LOGGER.debug("scenario result: %s" % scenario_results)
+
+            for s, s_result in scenario_results.items():
+                scenario_results[s] = s_result[0:5]
+                for result in scenario_results[s]:
+                    try:
+                        format_result = result['details']['test_vnf']['result']
+
+                        # round durations of the different steps
+                        result['details']['orchestrator']['duration'] = round(
+                            result['details']['orchestrator']['duration'], 1)
+                        result['details']['vnf']['duration'] = round(
+                            result['details']['vnf']['duration'], 1)
+                        result['details']['test_vnf']['duration'] = round(
+                            result['details']['test_vnf']['duration'], 1)
+
+                        res_orch = \
+                            result['details']['orchestrator']['duration']
+                        res_vnf = result['details']['vnf']['duration']
+                        res_test_vnf = \
+                            result['details']['test_vnf']['duration']
+                        res_signaling = \
+                            result['details']['test_vnf']['result']['failures']
+
+                        # Manage test result status
+                        if res_signaling != 0:
+                            LOGGER.debug("At least 1 signalig test FAIL")
+                            result['details']['test_vnf']['status'] = "FAIL"
+                        else:
+                            LOGGER.debug("All signalig tests PASS")
+                            result['details']['test_vnf']['status'] = "PASS"
+
+                        LOGGER.debug("Scenario %s, Installer %s",
+                                     s_result[1]['scenario'], installer)
+                        LOGGER.debug("Orchestrator deployment: %ss", res_orch)
+                        LOGGER.debug("vIMS deployment: %ss", res_vnf)
+                        LOGGER.debug("VNF testing: %ss", res_test_vnf)
+                        LOGGER.debug("VNF testing results: %s", format_result)
+                    except Exception as err:  # pylint: disable=broad-except
+                        LOGGER.error("Uncomplete data %s", err)
+                    LOGGER.debug("----------------------------------------")
 
         templateLoader = jinja2.FileSystemLoader(".")
         templateEnv = jinja2.Environment(loader=templateLoader,
@@ -116,11 +132,11 @@ for version in versions:
         template = templateEnv.get_template(TEMPLATE_FILE)
 
         outputText = template.render(scenario_results=scenario_results,
-                                     step_order=step_order,
-                                     installer=installer)
-
+                                     step_order=STEP_ORDER,
+                                     installer=installer_display)
+        LOGGER.debug("Generate html page for %s", installer_display)
         with open("./display/" + version + "/functest/vims-" +
-                  installer + ".html", "wb") as fh:
+                  installer_display + ".html", "wb") as fh:
             fh.write(outputText)
 
-logger.info("vIMS report succesfully generated")
+LOGGER.info("vIMS report succesfully generated")
index cd51607..9bd2b2f 100644 (file)
         <nav>
           <ul class="nav nav-justified">
             <li class="active"><a href="../../index.html">Home</a></li>
-            <li><a href="vims-fuel.html">Fuel</a></li>
+            <li><a href="vims-apex.html">Apex</a></li>
             <li><a href="vims-compass.html">Compass</a></li>
             <li><a href="vims-daisy.html">Daisy</a></li>
-            <li><a href="vims-joid.html">JOID</a></li>
-            <li><a href="vims-apex.html">APEX</a></li>
+            <li><a href="vims-fuel@x86.html">Fuel@x86</a></li>
+            <li><a href="vims-fuel@aarch64.html">Fuel@aarch64</a></li>
+            <li><a href="vims-joid.html">Joid</a></li>
           </ul>
         </nav>
       </div>
                             <tr>
                                 <th width="20%">Step</th>
                                 <th width="10%">Status</th>
-                                <th width="10%">Duration</th>
+                                <th width="10%">Duration(s)</th>
                                 <th width="60%">Result</th>
                             </tr>
                             {% for step_od_name in step_order -%}
                                 {% if step_od_name in result.details.keys() -%}
                                     {% set step_result = result.details[step_od_name] -%}
-                                    {% if step_result.duration != 0 -%}
+                                    {% if step_result.status == "PASS" -%}
                                         <tr class="tr-ok">
                                             <td>{{step_od_name}}</td>
                                             <td><span class="glyphicon glyphicon-ok"></td>
-                                            <td><b>{{step_result.duration_display}}</b></td>
+                                            <td><b>{{step_result.duration}}</b></td>
                                             <td>{{step_result.result}}</td>
                                         </tr>
                                     {%- else -%}
index 9834f07..a182dd4 100644 (file)
@@ -50,9 +50,10 @@ class TestCase(object):
                                'gluon_vping': 'Netready',
                                'fds': 'FDS',
                                'cloudify_ims': 'vIMS (Cloudify)',
-                               'orchestra_ims': 'OpenIMS (OpenBaton)',
+                               'orchestra_openims': 'OpenIMS (OpenBaton)',
+                               'orchestra_clearwaterims': 'vIMS (OpenBaton)',
                                'opera_ims': 'vIMS (Open-O)',
-                               'vyos_vrouter': 'vyos',
+                               'vyos_vrouter': 'vyos (Cloudify)',
                                'barometercollectd': 'Barometer',
                                'odl_netvirt': 'Netvirt',
                                'security_scan': 'Security'}