Merge "Move from snaps_healthcheck to connection_check to retireve scenario lxd based...
[releng.git] / utils / test / reporting / functest / reporting-status.py
index b527b78..95f9e66 100755 (executable)
 #
 import datetime
 import jinja2
-import pdfkit
+import os
 import requests
 import sys
 import time
 import yaml
 
-import reportingUtils as utils
-import reportingConf as conf
 import testCase as tc
 import scenarioResult as sr
 
+# manage conf
+import utils.reporting_utils as rp_utils
+
 # Logger
-logger = utils.getLogger("Status")
+logger = rp_utils.getLogger("Functest-Status")
 
 # Initialization
 testValid = []
 otherTestCases = []
 reportingDate = datetime.datetime.now().strftime("%Y-%m-%d %H:%M")
 
-# init just tempest to get the list of scenarios
-# as all the scenarios run Tempest
-tempest = tc.TestCase("tempest_smoke_serial", "functest", -1)
+# init just connection_check to get the list of scenarios
+# as all the scenarios run connection_check
+healthcheck = tc.TestCase("connection_check", "functest", -1)
 
 # Retrieve the Functest configuration to detect which tests are relevant
 # according to the installer, scenario
-cf = conf.TEST_CONF
+cf = rp_utils.get_config('functest.test_conf')
+period = rp_utils.get_config('general.period')
+versions = rp_utils.get_config('general.versions')
+installers = rp_utils.get_config('general.installers')
+blacklist = rp_utils.get_config('functest.blacklist')
+log_level = rp_utils.get_config('general.log.log_level')
+exclude_noha = rp_utils.get_config('functest.exclude_noha')
+exclude_virtual = rp_utils.get_config('functest.exclude_virtual')
+
 response = requests.get(cf)
 
 functest_yaml_config = yaml.safe_load(response.text)
 
 logger.info("*******************************************")
+logger.info("*                                         *")
 logger.info("*   Generating reporting scenario status  *")
-logger.info("*   Data retention = %s days              *" % conf.PERIOD)
+logger.info("*   Data retention: %s days               *" % period)
+logger.info("*   Log level: %s                         *" % log_level)
+logger.info("*                                         *")
+logger.info("*   Virtual PODs exluded: %s              *" % exclude_virtual)
+logger.info("*   NOHA scenarios excluded: %s           *" % exclude_noha)
 logger.info("*                                         *")
 logger.info("*******************************************")
 
 # Retrieve test cases of Tier 1 (smoke)
 config_tiers = functest_yaml_config.get("tiers")
 
-# we consider Tier 1 (smoke),2 (sdn suites) and 3 (features)
+# we consider Tier 0 (Healthcheck), Tier 1 (smoke),2 (features)
 # to validate scenarios
-# Tier > 4 are not used to validate scenarios but we display the results anyway
+# Tier > 2 are not used to validate scenarios but we display the results anyway
 # tricky thing for the API as some tests are Functest tests
 # other tests are declared directly in the feature projects
 for tier in config_tiers:
-    if tier['order'] > 0 and tier['order'] < 3:
+    if tier['order'] >= 0 and tier['order'] < 2:
         for case in tier['testcases']:
-            if case['name'] not in conf.blacklist:
+            if case['name'] not in blacklist:
                 testValid.append(tc.TestCase(case['name'],
                                              "functest",
                                              case['dependencies']))
-    elif tier['order'] == 3:
+    elif tier['order'] == 2:
         for case in tier['testcases']:
-            if case['name'] not in conf.blacklist:
+            if case['name'] not in blacklist:
                 testValid.append(tc.TestCase(case['name'],
                                              case['name'],
                                              case['dependencies']))
-    elif tier['order'] > 3:
+    elif tier['order'] > 2:
         for case in tier['testcases']:
-            if case['name'] not in conf.blacklist:
+            if case['name'] not in blacklist:
                 otherTestCases.append(tc.TestCase(case['name'],
                                                   "functest",
                                                   case['dependencies']))
 
+logger.debug("Functest reporting start")
 # For all the versions
-for version in conf.versions:
+for version in versions:
     # For all the installers
-    for installer in conf.installers:
+    for installer in installers:
         # get scenarios
-        scenario_results = utils.getScenarios(tempest, installer, version)
-        scenario_stats = utils.getScenarioStats(scenario_results)
+        scenario_results = rp_utils.getScenarios(healthcheck,
+                                                 installer,
+                                                 version)
+        scenario_stats = rp_utils.getScenarioStats(scenario_results)
         items = {}
         scenario_result_criteria = {}
+        scenario_file_name = ("./display/" + version +
+                              "/functest/scenario_history.txt")
+        # initiate scenario file if it does not exist
+        if not os.path.isfile(scenario_file_name):
+            with open(scenario_file_name, "a") as my_file:
+                logger.debug("Create scenario file: %s" % scenario_file_name)
+                my_file.write("date,scenario,installer,detail,score\n")
 
         # For all the scenarios get results
         for s, s_result in scenario_results.items():
@@ -98,7 +122,7 @@ for version in conf.versions:
             if len(s_result) > 0:
                 build_tag = s_result[len(s_result)-1]['build_tag']
                 logger.debug("Build tag: %s" % build_tag)
-                s_url = s_url = utils.getJenkinsUrl(build_tag)
+                s_url = s_url = rp_utils.getJenkinsUrl(build_tag)
                 logger.info("last jenkins url: %s" % s_url)
             testCases2BeDisplayed = []
             # Check if test case is runnable / installer, scenario
@@ -122,7 +146,8 @@ for version in conf.versions:
                         nb_test_runnable_for_this_scenario += 1
                         logger.info(" Searching results for case %s " %
                                     (displayName))
-                        result = utils.getResult(dbName, installer, s, version)
+                        result = rp_utils.getResult(dbName, installer,
+                                                    s, version)
                         # if no result set the value to 0
                         if result < 0:
                             result = 0
@@ -154,7 +179,8 @@ for version in conf.versions:
                         project = test_case.getProject()
                         logger.info(" Searching results for case %s " %
                                     (displayName))
-                        result = utils.getResult(dbName, installer, s, version)
+                        result = rp_utils.getResult(dbName, installer,
+                                                    s, version)
                         # at least 1 result for the test
                         if result > -1:
                             test_case.setCriteria(result)
@@ -182,14 +208,11 @@ for version in conf.versions:
             scenario_criteria = nb_test_runnable_for_this_scenario * 3
             # if 0 runnable tests set criteria at a high value
             if scenario_criteria < 1:
-                scenario_criteria = conf.MAX_SCENARIO_CRITERIA
+                scenario_criteria = 50  # conf.MAX_SCENARIO_CRITERIA
 
             s_score = str(scenario_score) + "/" + str(scenario_criteria)
-            s_score_percent = 0.0
-            try:
-                s_score_percent = float(scenario_score) / float(scenario_criteria) * 100
-            except:
-                logger.error("cannot calculate the score percent")
+            s_score_percent = rp_utils.getScenarioPercent(scenario_score,
+                                                          scenario_criteria)
 
             s_status = "KO"
             if scenario_score < scenario_criteria:
@@ -199,9 +222,9 @@ for version in conf.versions:
             else:
                 logger.info(">>>>> scenario OK, save the information")
                 s_status = "OK"
-                path_validation_file = (conf.REPORTING_PATH +
-                                        "/functest/release/" + version +
-                                        "/validated_scenario_history.txt")
+                path_validation_file = ("./display/" + version +
+                                        "/functest/" +
+                                        "validated_scenario_history.txt")
                 with open(path_validation_file, "a") as f:
                     time_format = "%Y-%m-%d %H:%M"
                     info = (datetime.datetime.now().strftime(time_format) +
@@ -209,10 +232,7 @@ for version in conf.versions:
                     f.write(info)
 
             # Save daily results in a file
-            path_validation_file = (conf.REPORTING_PATH +
-                                    "/functest/release/" + version +
-                                    "/scenario_history.txt")
-            with open(path_validation_file, "a") as f:
+            with open(scenario_file_name, "a") as f:
                 info = (reportingDate + "," + s + "," + installer +
                         "," + s_score + "," +
                         str(round(s_score_percent)) + "\n")
@@ -224,49 +244,37 @@ for version in conf.versions:
                                                             s_url)
             logger.info("--------------------------")
 
-        templateLoader = jinja2.FileSystemLoader(conf.REPORTING_PATH)
+        templateLoader = jinja2.FileSystemLoader(".")
         templateEnv = jinja2.Environment(
             loader=templateLoader, autoescape=True)
 
-        TEMPLATE_FILE = "/functest/template/index-status-tmpl.html"
+        TEMPLATE_FILE = "./functest/template/index-status-tmpl.html"
         template = templateEnv.get_template(TEMPLATE_FILE)
 
         outputText = template.render(scenario_stats=scenario_stats,
                                      scenario_results=scenario_result_criteria,
                                      items=items,
                                      installer=installer,
-                                     period=conf.PERIOD,
+                                     period=period,
                                      version=version,
                                      date=reportingDate)
 
-    with open(conf.REPORTING_PATH + "/functest/release/" + version +
-              "/index-status-" + installer + ".html", "wb") as fh:
-        fh.write(outputText)
+        with open("./display/" + version +
+                  "/functest/status-" + installer + ".html", "wb") as fh:
+            fh.write(outputText)
+
+        logger.info("Manage export CSV & PDF")
+        rp_utils.export_csv(scenario_file_name, installer, version)
+        logger.error("CSV generated...")
 
-    # Generate outputs for export
-    # pdf
-    try:
-        pdf_path = ("http://testresults.opnfv.org/reporting/" +
-                    "functest/release/" + version +
-                    "/index-status-" + installer + ".html")
-        pdf_doc_name = (conf.REPORTING_PATH +
-                        "/functest/release/" + version +
-                        "/status-" + installer + ".pdf")
-        pdfkit.from_url(pdf_path, pdf_doc_name)
-    except IOError:
-        logger.info("pdf generated anyway...")
-    except:
-        logger.error("impossible to generate PDF")
-    # csv
-    # generate sub files based on scenario_history.txt
-    scenario_installer_file_name = (conf.REPORTING_PATH +
-                                    "/functest/release/" + version +
-                                    "/scenario_history_" +
-                                    installer + ".txt")
-    scenario_installer_file = open(scenario_installer_file_name, "w")
-    with open(path_validation_file, "r") as f:
-        for line in f:
-            if installer in line:
-                scenario_installer_file.write(line)
-    scenario_installer_file.close
+        # Generate outputs for export
+        # pdf
+        # TODO Change once web site updated...use the current one
+        # to test pdf production
+        url_pdf = rp_utils.get_config('general.url')
+        pdf_path = ("./display/" + version +
+                    "/functest/status-" + installer + ".html")
+        pdf_doc_name = ("./display/" + version +
+                        "/functest/status-" + installer + ".pdf")
+        rp_utils.export_pdf(pdf_path, pdf_doc_name)
+        logger.info("PDF generated...")