Adapt reporting after Functest refactoring 75/14375/1
authorMorgan Richomme <morgan.richomme@orange.com>
Thu, 19 May 2016 12:41:23 +0000 (14:41 +0200)
committerMorgan Richomme <morgan.richomme@orange.com>
Thu, 19 May 2016 12:44:36 +0000 (14:44 +0200)
Get test cases from cases declares in Tiers
Consider only Tier 0-3 to validate scenario
Display results for Tier > 3

Change-Id: I581702bd7f2cc323d38b82a2404b301fb8fd7840
Signed-off-by: Morgan Richomme <morgan.richomme@orange.com>
utils/test/reporting/functest/__init__.py [new file with mode: 0644]
utils/test/reporting/functest/default.css
utils/test/reporting/functest/reporting-status.py
utils/test/reporting/functest/reportingUtils.py [new file with mode: 0644]
utils/test/reporting/functest/scenarioResult.py [new file with mode: 0644]
utils/test/reporting/functest/template/index-status-tmpl.html
utils/test/reporting/functest/testCase.py [new file with mode: 0644]

diff --git a/utils/test/reporting/functest/__init__.py b/utils/test/reporting/functest/__init__.py
new file mode 100644 (file)
index 0000000..e69de29
index 0e330e9..897c3b1 100644 (file)
@@ -53,4 +53,25 @@ td{
 .btn-more {
     color: white;
     background-color: #0095a2;
-}
\ No newline at end of file
+}
+
+h1 { 
+    display: block;
+    font-size: 2em;
+    margin-top: 0.67em;
+    margin-bottom: 0.67em;
+    margin-left: 0;
+    margin-right: 0;
+    font-weight: bold;
+}
+
+h2 {
+    display: block;
+    font-size: 1.5em;
+    margin-top: 0.83em;
+    margin-bottom: 0.83em;
+    margin-left: 0;
+    margin-right: 0;
+    font-weight: bold;
+    color:rgb(128, 128, 128)
+}
index 9271717..e6ee8d3 100644 (file)
-from urllib2 import Request, urlopen, URLError
+#!/usr/bin/python
+#
+# This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
 import datetime
-import json
 import jinja2
 import os
-import re
 import requests
 import sys
 import time
 import yaml
 
-# Declaration of the variables
-functest_test_list = ['vPing', 'vPing_userdata',
-                      'Tempest', 'Rally',
-                      'ODL', 'ONOS', 'vIMS']
-# functest_test_list = ['vPing']
-companion_test_list = ['doctor/doctor-notification', 'promise/promise']
-# companion_test_list = []
-installers = ["apex", "compass", "fuel", "joid"]
-# installers = ["fuel"]
-versions = ["brahmaputra", "master"]
-# versions = ["master"]
-PERIOD = 10
-MAX_SCENARIO_CRITERIA = 18
+import reportingUtils as utils
+import reportingConf as conf
+import testCase as tc
+import scenarioResult as sr
 
-# Correspondance between the name of the test case and the name in the DB
-# ideally we should modify the DB to avoid such interface....
-# '<name in the DB':'<name in the config'>
-# I know it is uggly...
-test_match_matrix = {'healthcheck': 'healthcheck',
-                     'vPing': 'vping_ssh',
-                     'vPing_userdata': 'vping_userdata',
-                     'ODL': 'odl',
-                     'ONOS': 'onos',
-                     'Tempest': 'tempest',
-                     'Rally': 'rally',
-                     'vIMS': 'vims',
-                     'doctor-notification': 'doctor',
-                     'promise': 'promise'}
-
-
-class TestCase(object):
-    def __init__(self, name, project, criteria=-1, isRunnable=True):
-        self.name = name
-        self.project = project
-        self.criteria = criteria
-        self.isRunnable = isRunnable
-
-    def getName(self):
-        return self.name
-
-    def getProject(self):
-        return self.project
-
-    def getCriteria(self):
-        return self.criteria
-
-    def setCriteria(self, criteria):
-        self.criteria = criteria
-
-    def setIsRunnable(self, isRunnable):
-        self.isRunnable = isRunnable
-
-    def checkRunnable(self, installer, scenario, config):
-        # Re-use Functest declaration
-        # Retrieve Functest configuration file functest_config.yaml
-        is_runnable = True
-        config_test = ""
-        TEST_ENV = functest_yaml_config.get("test-dependencies")
-
-        # print " *********************** "
-        # print TEST_ENV
-        # print " ---------------------- "
-        # print "case = " + self.name
-        # print "installer = " + installer
-        # print "scenario = " + scenario
-        # print "project = " + self.project
-
-        # Retrieve test constraints
-        case_name_formated = test_match_matrix[self.name]
-
-        try:
-            config_test = TEST_ENV[self.project][case_name_formated]
-        except KeyError:
-            # if not defined in dependencies => no dependencies
-            config_test = TEST_ENV[case_name_formated]
-        except Exception, e:
-            print "Error [getTestEnv]:", e
-
-        # Retrieve test execution param
-        test_execution_context = {"installer": installer,
-                                  "scenario": scenario}
-        # By default we assume that all the tests are always runnable...
-        # if test_env not empty => dependencies to be checked
-        if config_test is not None and len(config_test) > 0:
-            # possible criteria = ["installer", "scenario"]
-            # consider test criteria from config file
-            # compare towards CI env through CI en variable
-            for criteria in config_test:
-                if re.search(config_test[criteria],
-                             test_execution_context[criteria]) is None:
-                    # print "Test "+ test + " cannot be run on the environment"
-                    is_runnable = False
-        # print is_runnable
-        self.isRunnable = is_runnable
-
-
-class ScenarioResult(object):
-    def __init__(self, status, score=0):
-        self.status = status
-        self.score = score
-
-    def getStatus(self):
-        return self.status
-
-    def getScore(self):
-        return self.score
-
-# *****************************************************************************
-
-
-def getApiResults(case, installer, scenario, version):
-    case = case.getName()
-    results = json.dumps([])
-    # to remove proxy (to be removed at the end for local test only)
-    # proxy_handler = urllib2.ProxyHandler({})
-    # opener = urllib2.build_opener(proxy_handler)
-    # urllib2.install_opener(opener)
-    # url = "http://127.0.0.1:8000/results?case=" + case + \
-    #       "&period=30&installer=" + installer
-    url = ("http://testresults.opnfv.org/testapi/results?case=" + case +
-           "&period=" + str(PERIOD) + "&installer=" + installer +
-           "&scenario=" + scenario + "&version=" + version)
-    request = Request(url)
-
-    try:
-        response = urlopen(request)
-        k = response.read()
-        results = json.loads(k)
-    except URLError, e:
-        print 'No kittez. Got an error code:', e
-
-    return results
-
-
-def getScenarios(case, installer, version):
-
-    case = case.getName()
-    url = "http://testresults.opnfv.org/testapi/results?case=" + case + \
-          "&period=" + str(PERIOD) + "&installer=" + installer + \
-          "&version=" + version
-    request = Request(url)
-
-    try:
-        response = urlopen(request)
-        k = response.read()
-        results = json.loads(k)
-    except URLError, e:
-        print 'Got an error code:', e
-
-    test_results = results['test_results']
-
-    if test_results is not None:
-        test_results.reverse()
-
-        scenario_results = {}
-
-        for r in test_results:
-            # Retrieve all the scenarios per installer
-            if not r['scenario'] in scenario_results.keys():
-                scenario_results[r['scenario']] = []
-            scenario_results[r['scenario']].append(r)
-
-    return scenario_results
-
-
-def getScenarioStats(scenario_results):
-    scenario_stats = {}
-    for k, v in scenario_results.iteritems():
-        scenario_stats[k] = len(v)
-
-    return scenario_stats
-
-
-def getNbtestOk(results):
-    nb_test_ok = 0
-    for r in results:
-        for k, v in r.iteritems():
-            try:
-                if "passed" in v:
-                    nb_test_ok += 1
-            except:
-                print "Cannot retrieve test status"
-    return nb_test_ok
-
-
-def getResult(testCase, installer, scenario, version):
-
-    # retrieve raw results
-    results = getApiResults(testCase, installer, scenario, version)
-    # let's concentrate on test results only
-    test_results = results['test_results']
-
-    # if results found, analyze them
-    if test_results is not None:
-        test_results.reverse()
-
-        scenario_results = []
-
-        # print " ---------------- "
-        # print test_results
-        # print " ---------------- "
-        # print "nb of results:" + str(len(test_results))
-
-        for r in test_results:
-            # print r["creation_date"]
-            # print r["criteria"]
-            scenario_results.append({r["creation_date"]: r["criteria"]})
-        # sort results
-        scenario_results.sort()
-        # 4 levels for the results
-        # 3: 4+ consecutive runs passing the success criteria
-        # 2: <4 successful consecutive runs but passing the criteria
-        # 1: close to pass the success criteria
-        # 0: 0% success, not passing
-        test_result_indicator = 0
-        nbTestOk = getNbtestOk(scenario_results)
-        # print "Nb test OK:"+ str(nbTestOk)
-        # check that we have at least 4 runs
-        if nbTestOk < 1:
-            test_result_indicator = 0
-        elif nbTestOk < 2:
-            test_result_indicator = 1
-        else:
-            # Test the last 4 run
-            if (len(scenario_results) > 3):
-                last4runResults = scenario_results[-4:]
-                if getNbtestOk(last4runResults):
-                    test_result_indicator = 3
-                else:
-                    test_result_indicator = 2
-            else:
-                test_result_indicator = 2
-    print "        >>>> Test indicator:" + str(test_result_indicator)
-    return test_result_indicator
-
-# ******************************************************************************
-# ******************************************************************************
-# ******************************************************************************
-# ******************************************************************************
-# ******************************************************************************
+testCases4Validation = []
+otherTestCases = []
 
 # init just tempest to get the list of scenarios
 # as all the scenarios run Tempest
-tempest = TestCase("Tempest", "functest", -1)
+tempest = tc.TestCase("Tempest", "functest", -1)
 
 # Retrieve the Functest configuration to detect which tests are relevant
 # according to the installer, scenario
-cf = "https://git.opnfv.org/cgit/functest/plain/ci/config_functest.yaml"
+# cf = "https://git.opnfv.org/cgit/functest/plain/ci/config_functest.yaml"
+cf = "https://git.opnfv.org/cgit/functest/plain/ci/testcases.yaml"
 response = requests.get(cf)
 functest_yaml_config = yaml.load(response.text)
 
 print "****************************************"
 print "*   Generating reporting.....          *"
-print ("*   Data retention = %s days           *" % PERIOD)
+print ("*   Data retention = %s days           *" % conf.PERIOD)
 print "*                                      *"
 print "****************************************"
 
+# Retrieve test cases of Tier 1 (smoke)
+config_tiers = functest_yaml_config.get("tiers")
+
+# we consider Tier 1 (smoke),2 (sdn suites) and 3 (features)
+# to validate scenarios
+# Tier > 4 are not used to validate scenarios but we display the results anyway
+# tricky thing for the API as some tests are Functest tests
+# other tests are declared directly in the feature projects
+for tier in config_tiers:
+    if tier['order'] > 0 and tier['order'] < 3:
+        for case in tier['testcases']:
+            testCases4Validation.append(tc.TestCase(case['name'],
+                                                    "functest",
+                                                    case['dependencies']))
+    elif tier['order'] == 3:
+        for case in tier['testcases']:
+            testCases4Validation.append(tc.TestCase(case['name'],
+                                                    case['name'],
+                                                    case['dependencies']))
+    elif tier['order'] > 3:
+        for case in tier['testcases']:
+            otherTestCases.append(tc.TestCase(case['name'],
+                                              "functest",
+                                              case['dependencies']))
+
 # For all the versions
-for version in versions:
+for version in conf.versions:
     # For all the installers
-    for installer in installers:
+    for installer in conf.installers:
         # get scenarios
-        scenario_results = getScenarios(tempest, installer, version)
-        scenario_stats = getScenarioStats(scenario_results)
+        scenario_results = utils.getScenarios(tempest, installer, version)
+        scenario_stats = utils.getScenarioStats(scenario_results)
         items = {}
         scenario_result_criteria = {}
 
         # For all the scenarios get results
         for s, s_result in scenario_results.items():
-            testCases = []
             # Green or Red light for a given scenario
             nb_test_runnable_for_this_scenario = 0
             scenario_score = 0
 
-            # For each scenario declare the test cases
-            # Functest cases
-            for test_case in functest_test_list:
-                testCases.append(TestCase(test_case, "functest"))
-
-            # project/case
-            for test_case in companion_test_list:
-                test_split = test_case.split("/")
-                test_project = test_split[0]
-                test_case = test_split[1]
-                testCases.append(TestCase(test_case, test_project))
-
+            testCases2BeDisplayed = []
             # Check if test case is runnable / installer, scenario
+            # for the test case used for Scenario validation
             try:
-                for test_case in testCases:
-                    test_case.checkRunnable(installer, s, functest_yaml_config)
-                    # print "testcase %s is %s" % (test_case.getName(),
-                    #                              test_case.isRunnable)
+                print ("---------------------------------")
                 print ("installer %s, version %s, scenario %s:" %
                        (installer, version, s))
-                for testCase in testCases:
+
+                # 1) Manage the test cases for the scenario validation
+                # concretely Tiers 0-3
+                for test_case in testCases4Validation:
+                    test_case.checkRunnable(installer, s,
+                                            test_case.getConstraints())
+                    print ("testcase %s is %s" % (test_case.getName(),
+                                                  test_case.isRunnable))
                     time.sleep(1)
-                    if testCase.isRunnable:
+                    if test_case.isRunnable:
+                        dbName = test_case.getDbName()
+                        name = test_case.getName()
+                        project = test_case.getProject()
                         nb_test_runnable_for_this_scenario += 1
                         print (" Searching results for case %s " %
-                               (testCase.getName()))
-                        result = getResult(testCase, installer, s, version)
-                        testCase.setCriteria(result)
-                        items[s] = testCases
+                               (dbName))
+                        result = utils.getResult(dbName, installer, s, version)
+                        print " >>>> Test result=" + str(result)
+                        test_case.setCriteria(result)
+                        test_case.setIsRunnable(True)
+                        testCases2BeDisplayed.append(tc.TestCase(name,
+                                                                 project,
+                                                                 "",
+                                                                 result,
+                                                                 True,
+                                                                 1))
                         scenario_score = scenario_score + result
+
+                # 2) Manage the test cases for the scenario qualification
+                # concretely Tiers > 3
+                for test_case in otherTestCases:
+                    test_case.checkRunnable(installer, s,
+                                            test_case.getConstraints())
+                    print ("testcase %s is %s" % (test_case.getName(),
+                                                  test_case.isRunnable))
+                    time.sleep(1)
+                    if test_case.isRunnable:
+                        dbName = test_case.getDbName()
+                        name = test_case.getName()
+                        project = test_case.getProject()
+                        print (" Searching results for case %s " %
+                               (dbName))
+                        result = utils.getResult(dbName, installer, s, version)
+                        test_case.setCriteria(result)
+                        test_case.setIsRunnable(True)
+                        testCases2BeDisplayed.append(tc.TestCase(name,
+                                                                 project,
+                                                                 "",
+                                                                 result,
+                                                                 True,
+                                                                 4))
+
+                    items[s] = testCases2BeDisplayed
             except:
-                print ("installer %s, version %s, scenario %s" %
+                print ("Error: installer %s, version %s, scenario %s" %
                        (installer, version, s))
                 print "No data available , error %s " % (sys.exc_info()[0])
 
+            # **********************************************
+            # Evaluate the results for scenario validation
+            # **********************************************
             # the validation criteria = nb runnable tests x 3
+            # because each test case = 0,1,2 or 3
             scenario_criteria = nb_test_runnable_for_this_scenario * 3
             # if 0 runnable tests set criteria at a high value
             if scenario_criteria < 1:
-                scenario_criteria = MAX_SCENARIO_CRITERIA
+                scenario_criteria = conf.MAX_SCENARIO_CRITERIA
 
             s_score = str(scenario_score) + "/" + str(scenario_criteria)
             s_status = "KO"
@@ -333,14 +164,15 @@ for version in versions:
             else:
                 print ">>>>> scenario OK, save the information"
                 s_status = "OK"
-                with open("./release/" + version +
-                          "/validated_scenario_history.txt", "a") as f:
+                path_validation_file = ("./release/" + version +
+                                        "/validated_scenario_history.txt")
+                with open(path_validation_file, "a") as f:
                     time_format = "%Y-%m-%d %H:%M"
                     info = (datetime.datetime.now().strftime(time_format) +
                             ";" + installer + ";" + s + "\n")
                     f.write(info)
 
-            scenario_result_criteria[s] = ScenarioResult(s_status, s_score)
+            scenario_result_criteria[s] = sr.ScenarioResult(s_status, s_score)
             print "--------------------------"
 
         templateLoader = jinja2.FileSystemLoader(os.path.dirname
@@ -355,7 +187,7 @@ for version in versions:
                                      scenario_results=scenario_result_criteria,
                                      items=items,
                                      installer=installer,
-                                     period=PERIOD,
+                                     period=conf.PERIOD,
                                      version=version)
 
         with open("./release/" + version +
diff --git a/utils/test/reporting/functest/reportingUtils.py b/utils/test/reporting/functest/reportingUtils.py
new file mode 100644 (file)
index 0000000..f8d64aa
--- /dev/null
@@ -0,0 +1,135 @@
+#!/usr/bin/python
+#
+# This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+from urllib2 import Request, urlopen, URLError
+import json
+import reportingConf
+
+
+def getApiResults(case, installer, scenario, version):
+    results = json.dumps([])
+    # to remove proxy (to be removed at the end for local test only)
+    # proxy_handler = urllib2.ProxyHandler({})
+    # opener = urllib2.build_opener(proxy_handler)
+    # urllib2.install_opener(opener)
+    # url = "http://127.0.0.1:8000/results?case=" + case + \
+    #       "&period=30&installer=" + installer
+    url = ("http://testresults.opnfv.org/testapi/results?case=" + case +
+           "&period=" + str(reportingConf.PERIOD) + "&installer=" + installer +
+           "&scenario=" + scenario + "&version=" + version)
+    request = Request(url)
+
+    try:
+        response = urlopen(request)
+        k = response.read()
+        results = json.loads(k)
+    except URLError, e:
+        print 'No kittez. Got an error code:', e
+
+    return results
+
+
+def getScenarios(case, installer, version):
+
+    case = case.getName()
+    url = ("http://testresults.opnfv.org/testapi/results?case=" + case +
+           "&period=" + str(reportingConf.PERIOD) + "&installer=" + installer +
+           "&version=" + version)
+    request = Request(url)
+
+    try:
+        response = urlopen(request)
+        k = response.read()
+        results = json.loads(k)
+    except URLError, e:
+        print 'Got an error code:', e
+
+    test_results = results['test_results']
+
+    if test_results is not None:
+        test_results.reverse()
+
+        scenario_results = {}
+
+        for r in test_results:
+            # Retrieve all the scenarios per installer
+            if not r['scenario'] in scenario_results.keys():
+                scenario_results[r['scenario']] = []
+            scenario_results[r['scenario']].append(r)
+
+    return scenario_results
+
+
+def getScenarioStats(scenario_results):
+    scenario_stats = {}
+    for k, v in scenario_results.iteritems():
+        scenario_stats[k] = len(v)
+
+    return scenario_stats
+
+
+def getNbtestOk(results):
+    nb_test_ok = 0
+    for r in results:
+        for k, v in r.iteritems():
+            try:
+                if "passed" in v:
+                    nb_test_ok += 1
+            except:
+                print "Cannot retrieve test status"
+    return nb_test_ok
+
+
+def getResult(testCase, installer, scenario, version):
+
+    # retrieve raw results
+    results = getApiResults(testCase, installer, scenario, version)
+    # let's concentrate on test results only
+    test_results = results['test_results']
+
+    # if results found, analyze them
+    if test_results is not None:
+        test_results.reverse()
+
+        scenario_results = []
+
+        # print " ---------------- "
+        # print test_results
+        # print " ---------------- "
+        # print "nb of results:" + str(len(test_results))
+
+        for r in test_results:
+            # print r["creation_date"]
+            # print r["criteria"]
+            scenario_results.append({r["creation_date"]: r["criteria"]})
+        # sort results
+        scenario_results.sort()
+        # 4 levels for the results
+        # 3: 4+ consecutive runs passing the success criteria
+        # 2: <4 successful consecutive runs but passing the criteria
+        # 1: close to pass the success criteria
+        # 0: 0% success, not passing
+        test_result_indicator = 0
+        nbTestOk = getNbtestOk(scenario_results)
+        # print "Nb test OK:"+ str(nbTestOk)
+        # check that we have at least 4 runs
+        if nbTestOk < 1:
+            test_result_indicator = 0
+        elif nbTestOk < 2:
+            test_result_indicator = 1
+        else:
+            # Test the last 4 run
+            if (len(scenario_results) > 3):
+                last4runResults = scenario_results[-4:]
+                if getNbtestOk(last4runResults):
+                    test_result_indicator = 3
+                else:
+                    test_result_indicator = 2
+            else:
+                test_result_indicator = 2
+    return test_result_indicator
diff --git a/utils/test/reporting/functest/scenarioResult.py b/utils/test/reporting/functest/scenarioResult.py
new file mode 100644 (file)
index 0000000..743346a
--- /dev/null
@@ -0,0 +1,20 @@
+#!/usr/bin/python
+#
+# This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+
+
+class ScenarioResult(object):
+    def __init__(self, status, score=0):
+        self.status = status
+        self.score = score
+
+    def getStatus(self):
+        return self.status
+
+    def getScore(self):
+        return self.score
index 7a0656b..89a1d15 100644 (file)
                     <table class="table">
                         <tr>
                             {% for test in items[scenario] -%}
-                            <th>{{test.getName() }}</th>
+                            <th>
+                            {% if test.getCriteria() > -1 -%}
+                            {{test.getDbName() }}
+                            {%- endif %}
+                                                       {% if test.getTier() > 3 -%}
+                            *
+                            {%- endif %}
+                             </th>                           
                             {%- endfor %}
                         </tr>
                         <tr class="tr-weather-weather">
-                            {% for test in items[scenario] -%}
-                            {% if test.isRunnable is sameas false -%}
-                                <td>N.R</td>
-                            {% elif test.getCriteria() > 2 -%}
+                            {% for test in items[scenario] -%}                          
+                            {% if test.getCriteria() > 2 -%}
                                 <td><img src="../../img/weather-clear.png"></td>
                             {%- elif test.getCriteria() > 1 -%}
                                 <td><img src="../../img/weather-few-clouds.png"></td>
                             {%- elif test.getCriteria() > 0 -%}
                                 <td><img src="../../img/weather-overcast.png"></td>
-                            {%- else -%}
+                            {%- elif test.getCriteria() > -1 -%}
                                 <td><img src="../../img/weather-storm.png"></td>
                             {%- endif %}
                             {%- endfor %}
                 </div>
         </div>
         {%- endfor %}
+    *: not used for scenario validation
     </div>
     <div class="col-md-1"></div>
 </div>
diff --git a/utils/test/reporting/functest/testCase.py b/utils/test/reporting/functest/testCase.py
new file mode 100644 (file)
index 0000000..f6ab95a
--- /dev/null
@@ -0,0 +1,106 @@
+#!/usr/bin/python
+#
+# This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+import re
+
+
+class TestCase(object):
+
+    def __init__(self, name, project, constraints,
+                 criteria=-1, isRunnable=True, tier=-1):
+        self.name = name
+        self.project = project
+        self.constraints = constraints
+        self.criteria = criteria
+        self.isRunnable = isRunnable
+        self.tier = tier
+
+    def getName(self):
+        return self.name
+
+    def getProject(self):
+        return self.project
+
+    def getConstraints(self):
+        return self.constraints
+
+    def getCriteria(self):
+        return self.criteria
+
+    def getTier(self):
+        return self.tier
+
+    def setCriteria(self, criteria):
+        self.criteria = criteria
+
+    def setIsRunnable(self, isRunnable):
+        self.isRunnable = isRunnable
+
+    def checkRunnable(self, installer, scenario, config):
+        # Re-use Functest declaration
+        # Retrieve Functest configuration file functest_config.yaml
+        is_runnable = True
+        config_test = config
+        # print " *********************** "
+        # print TEST_ENV
+        # print " ---------------------- "
+        # print "case = " + self.name
+        # print "installer = " + installer
+        # print "scenario = " + scenario
+        # print "project = " + self.project
+
+        # Retrieve test constraints
+        # Retrieve test execution param
+        test_execution_context = {"installer": installer,
+                                  "scenario": scenario}
+
+        # By default we assume that all the tests are always runnable...
+        # if test_env not empty => dependencies to be checked
+        if config_test is not None and len(config_test) > 0:
+            # possible criteria = ["installer", "scenario"]
+            # consider test criteria from config file
+            # compare towards CI env through CI en variable
+            for criteria in config_test:
+                if re.search(config_test[criteria],
+                             test_execution_context[criteria]) is None:
+                    # print "Test "+ test + " cannot be run on the environment"
+                    is_runnable = False
+        # print is_runnable
+        self.isRunnable = is_runnable
+
+    def toString(self):
+        testcase = ("Name=" + self.name + ";Criteria=" + str(self.criteria)
+                    + ";Project=" + self.project + ";Constraints="
+                    + str(self.constraints) + ";IsRunnable"
+                    + str(self.isRunnable))
+        return testcase
+
+    def getDbName(self):
+        # Correspondance name of the test case / name in the DB
+        # ideally we should modify the DB to avoid such interface....
+        # '<name in the config>':'<name in the DB>'
+        # I know it is uggly...
+        test_match_matrix = {'healthcheck': 'healthcheck',
+                             'vping_ssh': 'vPing',
+                             'vping_userdata': 'vPing_userdata',
+                             'odl': 'ODL',
+                             'onos': 'ONOS',
+                             'ovno': 'ovno',
+                             'tempest_smoke_serial': 'Tempest',
+                             'tempest_full_parallel': 'tempest_full_parallel',
+                             'rally_sanity': 'Rally',
+                             'bgpvpn': 'bgpvpn',
+                             'rally_full': 'rally_full',
+                             'vims': 'vIMS',
+                             'doctor': 'doctor-notification',
+                             'promise': 'promise'
+                             }
+        try:
+            return test_match_matrix[self.name]
+        except:
+            return "unknown"