Adapt functest testcase to APi refactoring 91/15291/2
authorMorgan Richomme <morgan.richomme@orange.com>
Wed, 8 Jun 2016 13:56:54 +0000 (15:56 +0200)
committerMorgan Richomme <morgan.richomme@orange.com>
Wed, 8 Jun 2016 14:43:47 +0000 (16:43 +0200)
JIRA: FUNCTEST-303

Change-Id: Ia276d9ca6e8d62b496c3b5f81561b14b02c43fd7
Signed-off-by: Morgan Richomme <morgan.richomme@orange.com>
12 files changed:
ci/config_functest.yaml
testcases/Controllers/ODL/odlreport2db.py
testcases/Controllers/ONOS/Teston/onosfunctest.py
testcases/OpenStack/rally/run_rally-cert.py
testcases/OpenStack/tempest/run_tempest.py
testcases/OpenStack/vPing/vPing_ssh.py
testcases/OpenStack/vPing/vPing_userdata.py
testcases/features/bgpvpn.py
testcases/features/doctor.py
testcases/features/promise.py
testcases/vIMS/vIMS.py
utils/functest_utils.py

index ab620f0..69f263f 100644 (file)
@@ -155,7 +155,7 @@ promise:
         router_name: promise-router
 
 results:
-    test_db_url: http://testresults.opnfv.org/testapi
+    test_db_url: http://testresults.opnfv.org/test/api/v1
 
 # to be maintained...
 # the execution order is important as some tests may be more destructive than others
index 8eb78b1..0288084 100644 (file)
@@ -18,6 +18,7 @@
 # Later, the VM2 boots then execute cloud-init to ping VM1.
 # After successful ping, both the VMs are deleted.
 # 0.2: measure test duration and publish results under json format
+# 0.3: adapt push 2 DB after Test API refacroting
 #
 #
 
@@ -25,6 +26,7 @@ import getopt
 import json
 import os
 import sys
+import time
 import xmltodict
 import yaml
 
@@ -125,19 +127,19 @@ def main(argv):
         functest_yaml = yaml.safe_load(f)
         f.close()
 
-    database = functest_yaml.get("results").get("test_db_url")
-    build_tag = functest_utils.get_build_tag()
-
     try:
         # example:
         # python odlreport2db.py -x ~/Pictures/Perso/odl/output3.xml
         #                        -i fuel
         #                        -p opnfv-jump-2
         #                        -s os-odl_l2-ha
-        version = functest_utils.get_version()
 
         # success criteria for ODL = 100% of tests OK
-        status = "failed"
+        status = "FAIL"
+        # TODO as part of the tests are executed before in the bash
+        # start and stoptime have no real meaning
+        start_time = time.time()
+        stop_time = start_time
         try:
             tests_passed = 0
             tests_failed = 0
@@ -148,19 +150,18 @@ def main(argv):
                     tests_failed += 1
 
             if (tests_failed < 1):
-                status = "passed"
+                status = "PASS"
         except:
             print("Unable to set criteria" % sys.exc_info()[0])
-        functest_utils.push_results_to_db(database,
-                                          "functest",
+
+        functest_utils.push_results_to_db("functest",
                                           data['case_name'],
                                           None,
-                                          data['pod_name'],
-                                          version,
-                                          scenario,
+                                          start_time,
+                                          stop_time,
                                           status,
-                                          build_tag,
                                           data)
+
     except:
         print("Error pushing results into Database '%s'" % sys.exc_info()[0])
 
index 07ecacc..38935c5 100644 (file)
@@ -164,7 +164,8 @@ def CleanOnosTest():
 
 
 def main():
-
+    start_time = time.time()
+    stop_time = start_time
     DownloadCodes()
     if args.installer == "joid":
         logger.debug("Installer is Joid")
@@ -175,11 +176,10 @@ def main():
     RunScript("FUNCvirNetNBL3")
 
     try:
-        logger.debug("Push result into DB")
+        logger.debug("Push ONOS results into DB")
         # TODO check path result for the file
-        scenario = functest_utils.get_scenario(logger)
-        version = functest_utils.get_version(logger)
         result = GetResult()
+        stop_time = time.time()
 
         # ONOS success criteria = all tests OK
         # i.e. FUNCvirNet & FUNCvirNetL3
@@ -191,13 +191,14 @@ def main():
         except:
             logger.error("Unable to set ONOS criteria")
 
-        pod_name = functest_utils.get_pod_name(logger)
-        build_tag = functest_utils.get_build_tag(logger)
-        functest_utils.push_results_to_db(TEST_DB,
-                                          "functest",
+        functest_utils.push_results_to_db("functest",
                                           "ONOS",
-                                          logger, pod_name, version, scenario,
-                                          status, build_tag, payload=result)
+                                          logger,
+                                          start_time,
+                                          stop_time,
+                                          status,
+                                          result)
+
     except:
         logger.error("Error pushing results into Database")
 
index c3dd304..6bb29b8 100755 (executable)
@@ -18,7 +18,6 @@ import iniparse
 import json
 import os
 import re
-import requests
 import subprocess
 import time
 import yaml
@@ -125,26 +124,6 @@ CINDER_VOLUME_TYPE_NAME = "volume_test"
 SUMMARY = []
 
 
-def push_results_to_db(case, payload, criteria):
-
-    url = TEST_DB + "/results"
-    installer = functest_utils.get_installer_type(logger)
-    scenario = functest_utils.get_scenario(logger)
-    version = functest_utils.get_version(logger)
-    pod_name = functest_utils.get_pod_name(logger)
-
-    # evalutate success criteria
-
-    params = {"project_name": "functest", "case_name": case,
-              "pod_name": pod_name, "installer": installer,
-              "version": version, "scenario": scenario,
-              "criteria": criteria, "details": payload}
-
-    headers = {'Content-Type': 'application/json'}
-    r = requests.post(url, data=json.dumps(params), headers=headers)
-    logger.debug(r)
-
-
 def get_task_id(cmd_raw):
     """
     get task id from command rally result
@@ -303,6 +282,8 @@ def run_task(test_name):
     #
     global SUMMARY
     logger.info('Starting test scenario "{}" ...'.format(test_name))
+    start_time = time.time()
+    stop_time = start_time
 
     task_file = '{}task.yaml'.format(RALLY_DIR)
     if not os.path.exists(task_file):
@@ -376,13 +357,23 @@ def run_task(test_name):
 
     # Push results in payload of testcase
     if args.report:
-        logger.debug("Push result into DB")
-        push_results_to_db("Rally_details", json_data, status)
+        stop_time = time.time()
+        logger.debug("Push Rally detailed results into DB")
+        functest_utils.push_results_to_db("functest",
+                                          "Rally_details",
+                                          logger,
+                                          start_time,
+                                          stop_time,
+                                          status,
+                                          json_data)
 
 
 def main():
     global SUMMARY
     global network_dict
+    start_time = time.time()
+    stop_time = start_time
+
     # configure script
     if not (args.test_name in tests):
         logger.error('argument not valid')
@@ -482,6 +473,7 @@ def main():
               "+===================+============+===============+===========+"
               "\n")
     payload = []
+    stop_time = time.time()
 
     # for each scenario we draw a row for the table
     total_duration = 0.0
@@ -538,8 +530,13 @@ def main():
 
     if args.report:
         logger.debug("Pushing Rally summary into DB...")
-        push_results_to_db("Rally", payload, status)
-
+        functest_utils.push_results_to_db("functest",
+                                          "Rally",
+                                          logger,
+                                          start_time,
+                                          stop_time,
+                                          status,
+                                          payload)
     if args.noclean:
         exit(0)
 
index d8a8a1a..46b0189 100644 (file)
 # http://www.apache.org/licenses/LICENSE-2.0
 #
 import argparse
-import json
 import os
 import re
-import requests
 import shutil
 import subprocess
+import sys
 import time
 import yaml
 import ConfigParser
@@ -114,27 +113,6 @@ def get_info(file_result):
     logger.debug("duration:" + duration)
 
 
-def push_results_to_db(case, payload, criteria):
-
-    # TODO move DB creds into config file
-    url = TEST_DB + "/results"
-    installer = ft_utils.get_installer_type(logger)
-    scenario = ft_utils.get_scenario(logger)
-    version = ft_utils.get_version(logger)
-    pod_name = ft_utils.get_pod_name(logger)
-
-    logger.info("Pushing results to DB: '%s'." % url)
-
-    params = {"project_name": "functest", "case_name": case,
-              "pod_name": str(pod_name), 'installer': installer,
-              "version": version, "scenario": scenario, "criteria": criteria,
-              'details': payload}
-    headers = {'Content-Type': 'application/json'}
-
-    r = requests.post(url, data=json.dumps(params), headers=headers)
-    logger.debug(r)
-
-
 def create_tempest_resources():
     ks_creds = os_utils.get_credentials("keystone")
     logger.debug("Creating tenant and user for Tempest suite")
@@ -253,6 +231,8 @@ def run_tempest(OPTION):
     # :return: void
     #
     logger.info("Starting Tempest test suite: '%s'." % OPTION)
+    start_time = time.time()
+    stop_time = start_time
     cmd_line = "rally verify start " + OPTION + " --system-wide"
 
     header = ("Tempest environment:\n"
@@ -293,11 +273,12 @@ def run_tempest(OPTION):
     dur_sec_float = float(duration.split(':')[2])
     dur_sec_int = int(round(dur_sec_float, 0))
     dur_sec_int = dur_sec_int + 60 * dur_min
-
+    stop_time = time.time()
     # Push results in payload of testcase
     if args.report:
+        logger.debug("Pushing tempest results into DB...")
         # Note criteria hardcoded...TODO move to testcase.yaml
-        status = "failed"
+        status = "FAIL"
         try:
             diff = (int(num_tests) - int(num_failures))
             success_rate = 100 * diff / int(num_tests)
@@ -306,7 +287,7 @@ def run_tempest(OPTION):
 
         # For Tempest we assume that the success rate is above 90%
         if success_rate >= 90:
-            status = "passed"
+            status = "PASS"
 
         # add the test in error in the details sections
         # should be possible to do it during the test
@@ -322,9 +303,18 @@ def run_tempest(OPTION):
                         "tests": int(num_tests), "failures": int(num_failures),
                         "errors": error_logs}
         logger.info("Results: " + str(json_results))
-
-        logger.debug("Push result into DB")
-        push_results_to_db("Tempest", json_results, status)
+        # TODO split Tempest smoke and full
+        try:
+            ft_utils.push_results_to_db("functest",
+                                        "Tempest",
+                                        logger,
+                                        start_time,
+                                        stop_time,
+                                        status,
+                                        json_results)
+        except:
+            logger.error("Error pushing results into Database '%s'"
+                         % sys.exc_info()[0])
 
 
 def main():
index 1188784..2a417eb 100644 (file)
@@ -11,7 +11,7 @@
 # Later, the VM2 boots then execute cloud-init to ping VM1.
 # After successful ping, both the VMs are deleted.
 # 0.2: measure test duration and publish results under json format
-#
+# 0.3: adapt push 2 DB after Test API refacroting
 #
 import argparse
 import datetime
@@ -19,6 +19,7 @@ import os
 import paramiko
 import pprint
 import re
+import sys
 import time
 import yaml
 from scp import SCPClient
@@ -176,30 +177,6 @@ def create_security_group(neutron_client):
     return sg_id
 
 
-def push_results(start_time_ts, duration, status):
-    try:
-        logger.debug("Pushing result into DB...")
-        scenario = functest_utils.get_scenario(logger)
-        version = functest_utils.get_version(logger)
-        criteria = "failed"
-        test_criteria = functest_utils.get_criteria_by_test("vping_ssh")
-        if eval(test_criteria):  # evaluates the regex 'status == "PASS"' 
-            criteria = "passed"
-        pod_name = functest_utils.get_pod_name(logger)
-        build_tag = functest_utils.get_build_tag(logger)
-        functest_utils.push_results_to_db(TEST_DB,
-                                          "functest",
-                                          "vPing",
-                                          logger, pod_name, version, scenario,
-                                          criteria, build_tag,
-                                          payload={'timestart': start_time_ts,
-                                                   'duration': duration,
-                                                   'status': status})
-    except:
-        logger.error("Error pushing results into Database '%s'"
-                     % sys.exc_info()[0])
-
-
 def main():
 
     creds_nova = openstack_utils.get_credentials("nova")
@@ -268,10 +245,10 @@ def main():
             server.delete()
 
     # boot VM 1
-    start_time_ts = time.time()
-    end_time_ts = start_time_ts
+    start_time = time.time()
+    stop_time = start_time
     logger.info("vPing Start Time:'%s'" % (
-        datetime.datetime.fromtimestamp(start_time_ts).strftime(
+        datetime.datetime.fromtimestamp(start_time).strftime(
             '%Y-%m-%d %H:%M:%S')))
 
     logger.info("Creating instance '%s'..." % NAME_VM_1)
@@ -409,10 +386,12 @@ def main():
 
     logger.info("Waiting for ping...")
     sec = 0
+    stop_time = time.time()
     duration = 0
 
     cmd = '~/ping.sh ' + test_ip
     flag = False
+
     while True:
         time.sleep(1)
         (stdin, stdout, stderr) = ssh.exec_command(cmd)
@@ -423,8 +402,8 @@ def main():
                 logger.info("vPing detected!")
 
                 # we consider start time at VM1 booting
-                end_time_ts = time.time()
-                duration = round(end_time_ts - start_time_ts, 1)
+                stop_time = time.time()
+                duration = round(stop_time - start_time, 1)
                 logger.info("vPing duration:'%s' s." % duration)
                 EXIT_CODE = 0
                 flag = True
@@ -440,7 +419,9 @@ def main():
         sec += 1
 
     test_status = "FAIL"
-    if EXIT_CODE == 0:
+    test_criteria = functest_utils.get_criteria_by_test("vping_ssh")
+
+    if eval(test_criteria):
         logger.info("vPing OK")
         test_status = "PASS"
     else:
@@ -448,7 +429,20 @@ def main():
         logger.error("vPing FAILED")
 
     if args.report:
-        push_results(start_time_ts, duration, test_status)
+        try:
+            logger.debug("Pushing vPing SSH results into DB...")
+            functest_utils.push_results_to_db("functest",
+                                              "vPing",
+                                              logger,
+                                              start_time,
+                                              stop_time,
+                                              test_status,
+                                              details={'timestart': start_time,
+                                                       'duration': duration,
+                                                       'status': test_status})
+        except:
+            logger.error("Error pushing results into Database '%s'"
+                         % sys.exc_info()[0])
 
     exit(EXIT_CODE)
 
index 2b29631..5b7d2d9 100644 (file)
@@ -11,6 +11,7 @@
 # Later, the VM2 boots then execute cloud-init to ping VM1.
 # After successful ping, both the VMs are deleted.
 # 0.2: measure test duration and publish results under json format
+# 0.3: adapt push 2 DB after Test API refacroting
 #
 #
 
@@ -18,6 +19,7 @@ import argparse
 import datetime
 import os
 import pprint
+import sys
 import time
 import yaml
 
@@ -174,29 +176,6 @@ def create_security_group(neutron_client):
     return sg_id
 
 
-def push_results(start_time_ts, duration, test_status):
-    try:
-        logger.debug("Pushing result into DB...")
-        scenario = functest_utils.get_scenario(logger)
-        version = functest_utils.get_version(logger)
-        criteria = "failed"
-        if test_status == "OK":
-            criteria = "passed"
-        pod_name = functest_utils.get_pod_name(logger)
-        build_tag = functest_utils.get_build_tag(logger)
-        functest_utils.push_results_to_db(TEST_DB,
-                                          "functest",
-                                          "vPing_userdata",
-                                          logger, pod_name, version, scenario,
-                                          criteria, build_tag,
-                                          payload={'timestart': start_time_ts,
-                                                   'duration': duration,
-                                                   'status': test_status})
-    except:
-        logger.error("Error pushing results into Database '%s'"
-                     % sys.exc_info()[0])
-
-
 def main():
 
     creds_nova = openstack_utils.get_credentials("nova")
@@ -268,10 +247,10 @@ def main():
     # tune (e.g. flavor, images, network) to your specific
     # openstack configuration here
     # we consider start time at VM1 booting
-    start_time_ts = time.time()
-    end_time_ts = start_time_ts
+    start_time = time.time()
+    stop_time = start_time
     logger.info("vPing Start Time:'%s'" % (
-        datetime.datetime.fromtimestamp(start_time_ts).strftime(
+        datetime.datetime.fromtimestamp(start_time).strftime(
             '%Y-%m-%d %H:%M:%S')))
 
     # create VM
@@ -336,6 +315,7 @@ def main():
     metadata_tries = 0
     console_log = vm2.get_console_output()
     duration = 0
+    stop_time = time.time()
 
     while True:
         time.sleep(1)
@@ -346,8 +326,8 @@ def main():
             logger.info("vPing detected!")
 
             # we consider start time at VM1 booting
-            end_time_ts = time.time()
-            duration = round(end_time_ts - start_time_ts, 1)
+            stop_time = time.time()
+            duration = round(stop_time - start_time, 1)
             logger.info("vPing duration:'%s'" % duration)
             EXIT_CODE = 0
             break
@@ -379,7 +359,20 @@ def main():
         logger.error("vPing FAILED")
 
     if args.report:
-        push_results(start_time_ts, duration, test_status)
+        try:
+            logger.debug("Pushing vPing userdata results into DB...")
+            functest_utils.push_results_to_db("functest",
+                                              "vPing_userdata",
+                                              logger,
+                                              start_time,
+                                              stop_time,
+                                              test_status,
+                                              details={'timestart': start_time,
+                                                       'duration': duration,
+                                                       'status': test_status})
+        except:
+            logger.error("Error pushing results into Database '%s'"
+                         % sys.exc_info()[0])
 
     exit(EXIT_CODE)
 
index e3db920..bac2559 100644 (file)
@@ -12,6 +12,7 @@
 import argparse
 import os
 import re
+import time
 import yaml
 import ConfigParser
 
@@ -41,6 +42,7 @@ logger = ft_logger.Logger("bgpvpn").getLogger()
 
 def main():
     logger.info("Running BGPVPN Tempest test case...")
+    start_time = time.time()
 
     cmd = 'cd ' + BGPVPN_REPO + ';pip install --no-deps -e .'
     ft_utils.execute_command(cmd, logger, exit_on_error=False)
@@ -93,8 +95,9 @@ def main():
                     "errors": error_logs}
 
     logger.info("Results: " + str(json_results))
-    criteria = "failed"
+    criteria = "FAIL"
     # criteria = success rate = 100% (i.e all tests passed)
+    # TODO use criteria defined in config file
     criteria_run = int(tests)
     if not failed:
         criteria_failed = 0
@@ -102,20 +105,19 @@ def main():
         criteria_failed = int(failed)
 
     if criteria_run > 0 and criteria_failed < 1:
-        criteria = "passed"
+        criteria = "PASS"
 
     # Push results in payload of testcase
     if args.report:
-        logger.debug("Push result into DB")
-        url = TEST_DB_URL
-        scenario = ft_utils.get_scenario(logger)
-        version = ft_utils.get_version(logger)
-        pod_name = ft_utils.get_pod_name(logger)
-        build_tag = ft_utils.get_build_tag(logger)
-
-        ft_utils.push_results_to_db(url, "sdnvpn", "bgpvpn_api", logger,
-                                    pod_name, version, scenario, criteria,
-                                    build_tag, json_results)
+        logger.debug("Push bgpvpn results into DB")
+        stop_time = time.time()
+        ft_utils.push_results_to_db("sdnvpn",
+                                    "bgpvpn_api",
+                                    logger,
+                                    start_time,
+                                    stop_time,
+                                    criteria,
+                                    json_results)
 
 if __name__ == '__main__':
     main()
index ac68430..c7fc848 100644 (file)
@@ -34,12 +34,12 @@ logger = ft_logger.Logger("doctor").getLogger()
 
 def main():
     cmd = 'cd %s/tests && ./run.sh' % DOCTOR_REPO
-    start_time_ts = time.time()
+    start_time = time.time()
 
     ret = functest_utils.execute_command(cmd, logger, exit_on_error=False)
 
-    end_time_ts = time.time()
-    duration = round(end_time_ts - start_time_ts, 1)
+    stop_time = time.time()
+    duration = round(end_time_ts - start_time, 1)
     if ret:
         logger.info("doctor OK")
         test_status = 'OK'
@@ -48,7 +48,7 @@ def main():
         test_status = 'NOK'
 
     details = {
-        'timestart': start_time_ts,
+        'timestart': start_time,
         'duration': duration,
         'status': test_status,
     }
@@ -61,7 +61,7 @@ def main():
     if details['status'] == "OK":
         status = "passed"
 
-    logger.info("Pushing result: TEST_DB_URL=%(db)s pod_name=%(pod)s "
+    logger.info("Pushing Doctor results: TEST_DB_URL=%(db)s pod_name=%(pod)s "
                 "version=%(v)s scenario=%(s)s criteria=%(c)s details=%(d)s" % {
                     'db': TEST_DB_URL,
                     'pod': pod_name,
@@ -71,11 +71,13 @@ def main():
                     'b': build_tag,
                     'd': details,
                 })
-    functest_utils.push_results_to_db(TEST_DB_URL,
-                                      'doctor', 'doctor-notification',
-                                      logger, pod_name, version, scenario,
-                                      status, build_tag, details)
-
+    functest_utils.push_results_to_db("doctor",
+                                      "doctor-notification",
+                                      logger,
+                                      start_time,
+                                      stop_time,
+                                      status,
+                                      details)
 
 if __name__ == '__main__':
     main()
index c74c7cf..7f03464 100644 (file)
@@ -12,8 +12,8 @@
 import argparse
 import json
 import os
-import requests
 import subprocess
+import time
 import yaml
 
 import keystoneclient.v2_0.client as ksclient
@@ -71,6 +71,7 @@ logger = ft_logger.Logger("promise").getLogger()
 
 
 def main():
+    start_time = time.time()
     ks_creds = openstack_utils.get_credentials("keystone")
     nv_creds = openstack_utils.get_credentials("nova")
     nt_creds = openstack_utils.get_credentials("neutron")
@@ -235,33 +236,23 @@ def main():
                    start_time, end_time, duration))
 
     if args.report:
-        pod_name = functest_utils.get_pod_name(logger)
-        installer = functest_utils.get_installer_type(logger)
-        scenario = functest_utils.get_scenario(logger)
-        version = functest_utils.get_version(logger)
-        build_tag = functest_utils.get_build_tag(logger)
-        # git_version = functest_utils.get_git_branch(PROMISE_REPO)
-        url = TEST_DB + "/results"
-
+        stop_time = time.time()
         json_results = {"timestart": start_time, "duration": duration,
                         "tests": int(tests), "failures": int(failures)}
-        logger.debug("Results json: " + str(json_results))
+        logger.debug("Promise Results json: " + str(json_results))
 
         # criteria for Promise in Release B was 100% of tests OK
-        status = "failed"
+        status = "FAIL"
         if int(tests) > 32 and int(failures) < 1:
-            status = "passed"
-
-        params = {"project_name": "promise", "case_name": "promise",
-                  "pod_name": str(pod_name), 'installer': installer,
-                  "version": version, "scenario": scenario,
-                  "criteria": status, "build_tag": build_tag,
-                  'details': json_results}
-        headers = {'Content-Type': 'application/json'}
-
-        logger.info("Pushing results to DB...")
-        r = requests.post(url, data=json.dumps(params), headers=headers)
-        logger.debug(r)
+            status = "PASS"
+
+        functest_utils.push_results_to_db("promise",
+                                          "promise",
+                                          logger,
+                                          start_time,
+                                          stop_time,
+                                          status,
+                                          json_results)
 
 
 if __name__ == '__main__':
index 2430af1..3cdbab9 100644 (file)
@@ -115,33 +115,22 @@ def download_and_add_image_on_glance(glance, image_name, image_url):
 def step_failure(step_name, error_msg):
     logger.error(error_msg)
     set_result(step_name, 0, error_msg)
-    status = "failed"
+    status = "FAIL"
+    # in case of failure starting and stoping time are not correct
+    start_time = time.time()
+    stop_time = start_time
     if step_name == "sig_test":
-        status = "passed"
-    push_results(status)
+        status = "PASS"
+    functest_utils.push_results_to_db("functest",
+                                      "vIMS",
+                                      logger,
+                                      start_time,
+                                      stop_time,
+                                      status,
+                                      RESULTS)
     exit(-1)
 
 
-def push_results(status):
-    if args.report:
-        logger.debug("Pushing results to DB....")
-
-        scenario = functest_utils.get_scenario(logger)
-        version = functest_utils.get_version(logger)
-        pod_name = functest_utils.get_pod_name(logger)
-        build_tag = functest_utils.get_build_tag(logger)
-
-        functest_utils.push_results_to_db(db_url=DB_URL,
-                                          project="functest",
-                                          case_name="vIMS",
-                                          logger=logger, pod_name=pod_name,
-                                          version=version,
-                                          scenario=scenario,
-                                          criteria=status,
-                                          build_tag=build_tag,
-                                          payload=RESULTS)
-
-
 def set_result(step_name, duration=0, result=""):
     RESULTS[step_name] = {'duration': duration, 'result': result}
 
@@ -245,14 +234,22 @@ def test_clearwater():
         # success criteria for vIMS (for Brahmaputra)
         # - orchestrator deployed
         # - VNF deployed
-        status = "failed"
+        # TODO use test criteria defined in config file
+        status = "FAIL"
         try:
             if (RESULTS['orchestrator']['duration'] > 0 and
                     RESULTS['vIMS']['duration'] > 0):
-                status = "passed"
+                status = "PASS"
         except:
             logger.error("Unable to set test status")
-        push_results(status)
+
+        functest_utils.push_results_to_db("functest",
+                                          "vIMS",
+                                          logger,
+                                          start_time_ts,
+                                          end_time_ts,
+                                          status,
+                                          RESULTS)
 
         try:
             os.remove(VIMS_TEST_DIR + "temp.json")
index b43a63c..7b68244 100644 (file)
@@ -149,17 +149,35 @@ def get_build_tag(logger=None):
     return build_tag
 
 
-def push_results_to_db(db_url, project, case_name, logger, pod_name,
-                       version, scenario, criteria, build_tag, payload):
+def get_db_url(logger=None):
+    """
+    Returns DB URL
+    """
+    with open(os.environ["CONFIG_FUNCTEST_YAML"]) as f:
+        functest_yaml = yaml.safe_load(f)
+    f.close()
+    db_url = functest_yaml.get("results").get("test_db_url")
+    return db_url
+
+
+def push_results_to_db(project, case_name, logger,
+                       start_date, stop_date, criteria, details):
     """
     POST results to the Result target DB
     """
-    url = db_url + "/results"
+    # Retrieve params from CI and conf
+    url = get_db_url(logger) + "/results"
     installer = get_installer_type(logger)
+    scenario = get_scenario(logger)
+    version = get_version(logger)
+    pod_name = get_pod_name(logger)
+    build_tag = get_build_tag(logger)
+
     params = {"project_name": project, "case_name": case_name,
               "pod_name": pod_name, "installer": installer,
               "version": version, "scenario": scenario, "criteria": criteria,
-              "build_tag": build_tag, "details": payload}
+              "build_tag": build_tag, "start_date": start_date,
+              "stop_date": stop_date, "details": details}
 
     headers = {'Content-Type': 'application/json'}
     try:
@@ -170,8 +188,8 @@ def push_results_to_db(db_url, project, case_name, logger, pod_name,
     except Exception, e:
         print ("Error [push_results_to_db('%s', '%s', '%s', " +
                "'%s', '%s', '%s', '%s', '%s', '%s')]:" %
-               (db_url, project, case_name, pod_name, version,
-                scenario, criteria, build_tag, payload)), e
+               (url, project, case_name, pod_name, version,
+                scenario, criteria, build_tag, details)), e
         return False