3 # This program and the accompanying materials
4 # are made available under the terms of the Apache License, Version 2.0
5 # which accompanies this distribution, and is available at
7 # http://www.apache.org/licenses/LICENSE-2.0
17 import reportingUtils as utils
18 import reportingConf as conf
20 import scenarioResult as sr
22 testCases4Validation = []
25 # init just tempest to get the list of scenarios
26 # as all the scenarios run Tempest
27 tempest = tc.TestCase("Tempest", "functest", -1)
29 # Retrieve the Functest configuration to detect which tests are relevant
30 # according to the installer, scenario
31 # cf = "https://git.opnfv.org/cgit/functest/plain/ci/config_functest.yaml"
32 cf = "https://git.opnfv.org/cgit/functest/plain/ci/testcases.yaml"
33 response = requests.get(cf)
34 functest_yaml_config = yaml.load(response.text)
36 print "****************************************"
37 print "* Generating reporting..... *"
38 print ("* Data retention = %s days *" % conf.PERIOD)
40 print "****************************************"
42 # Retrieve test cases of Tier 1 (smoke)
43 config_tiers = functest_yaml_config.get("tiers")
45 # we consider Tier 1 (smoke),2 (sdn suites) and 3 (features)
46 # to validate scenarios
47 # Tier > 4 are not used to validate scenarios but we display the results anyway
48 # tricky thing for the API as some tests are Functest tests
49 # other tests are declared directly in the feature projects
50 for tier in config_tiers:
51 if tier['order'] > 0 and tier['order'] < 3:
52 for case in tier['testcases']:
53 testCases4Validation.append(tc.TestCase(case['name'],
55 case['dependencies']))
56 elif tier['order'] == 3:
57 for case in tier['testcases']:
58 testCases4Validation.append(tc.TestCase(case['name'],
60 case['dependencies']))
61 elif tier['order'] > 3:
62 for case in tier['testcases']:
63 otherTestCases.append(tc.TestCase(case['name'],
65 case['dependencies']))
67 # For all the versions
68 for version in conf.versions:
69 # For all the installers
70 for installer in conf.installers:
72 scenario_results = utils.getScenarios(tempest, installer, version)
73 scenario_stats = utils.getScenarioStats(scenario_results)
75 scenario_result_criteria = {}
77 # For all the scenarios get results
78 for s, s_result in scenario_results.items():
79 # Green or Red light for a given scenario
80 nb_test_runnable_for_this_scenario = 0
83 testCases2BeDisplayed = []
84 # Check if test case is runnable / installer, scenario
85 # for the test case used for Scenario validation
87 print ("---------------------------------")
88 print ("installer %s, version %s, scenario %s:" %
89 (installer, version, s))
91 # 1) Manage the test cases for the scenario validation
92 # concretely Tiers 0-3
93 for test_case in testCases4Validation:
94 test_case.checkRunnable(installer, s,
95 test_case.getConstraints())
96 print ("testcase %s is %s" % (test_case.getName(),
97 test_case.isRunnable))
99 if test_case.isRunnable:
100 dbName = test_case.getDbName()
101 name = test_case.getName()
102 project = test_case.getProject()
103 nb_test_runnable_for_this_scenario += 1
104 print (" Searching results for case %s " %
106 result = utils.getResult(dbName, installer, s, version)
107 print " >>>> Test result=" + str(result)
108 test_case.setCriteria(result)
109 test_case.setIsRunnable(True)
110 testCases2BeDisplayed.append(tc.TestCase(name,
116 scenario_score = scenario_score + result
118 # 2) Manage the test cases for the scenario qualification
119 # concretely Tiers > 3
120 for test_case in otherTestCases:
121 test_case.checkRunnable(installer, s,
122 test_case.getConstraints())
123 print ("testcase %s is %s" % (test_case.getName(),
124 test_case.isRunnable))
126 if test_case.isRunnable:
127 dbName = test_case.getDbName()
128 name = test_case.getName()
129 project = test_case.getProject()
130 print (" Searching results for case %s " %
132 result = utils.getResult(dbName, installer, s, version)
133 test_case.setCriteria(result)
134 test_case.setIsRunnable(True)
135 testCases2BeDisplayed.append(tc.TestCase(name,
142 items[s] = testCases2BeDisplayed
144 print ("Error: installer %s, version %s, scenario %s" %
145 (installer, version, s))
146 print "No data available , error %s " % (sys.exc_info()[0])
148 # **********************************************
149 # Evaluate the results for scenario validation
150 # **********************************************
151 # the validation criteria = nb runnable tests x 3
152 # because each test case = 0,1,2 or 3
153 scenario_criteria = nb_test_runnable_for_this_scenario * 3
154 # if 0 runnable tests set criteria at a high value
155 if scenario_criteria < 1:
156 scenario_criteria = conf.MAX_SCENARIO_CRITERIA
158 s_score = str(scenario_score) + "/" + str(scenario_criteria)
160 if scenario_score < scenario_criteria:
161 print (">>>> scenario not OK, score = %s/%s" %
162 (scenario_score, scenario_criteria))
165 print ">>>>> scenario OK, save the information"
167 path_validation_file = ("./release/" + version +
168 "/validated_scenario_history.txt")
169 with open(path_validation_file, "a") as f:
170 time_format = "%Y-%m-%d %H:%M"
171 info = (datetime.datetime.now().strftime(time_format) +
172 ";" + installer + ";" + s + "\n")
175 scenario_result_criteria[s] = sr.ScenarioResult(s_status, s_score)
176 print "--------------------------"
178 templateLoader = jinja2.FileSystemLoader(os.path.dirname
181 templateEnv = jinja2.Environment(loader=templateLoader)
183 TEMPLATE_FILE = "./template/index-status-tmpl.html"
184 template = templateEnv.get_template(TEMPLATE_FILE)
186 outputText = template.render(scenario_stats=scenario_stats,
187 scenario_results=scenario_result_criteria,
193 with open("./release/" + version +
194 "/index-status-" + installer + ".html", "wb") as fh: