sleep 10 # to let the instances terminate
;;
"doctor")
- python ${FUNCTEST_REPO_DIR}/testcases/features/doctor.py
+ python ${FUNCTEST_REPO_DIR}/testcases/features/doctor.py $report
;;
"ovno")
# suite under rewritting for colorado
python ${repos_dir}/securityscanning/security_scan.py --config ${repos_dir}/securityscanning/config.ini
;;
"copper")
- python ${FUNCTEST_REPO_DIR}/testcases/features/copper.py
+ python ${FUNCTEST_REPO_DIR}/testcases/features/copper.py $report
;;
"moon")
python ${repos_dir}/moon/tests/run_tests.py
-c ${FUNCTEST_REPO_DIR}/testcases/OpenStack/tempest/tempest_multisite.conf
;;
"domino")
- python ${FUNCTEST_REPO_DIR}/testcases/features/domino.py
+ python ${FUNCTEST_REPO_DIR}/testcases/features/domino.py $report
;;
"odl-sfc")
bash ${FUNCTEST_REPO_DIR}/testcases/features/sfc/server_presetup_CI.bash
exit $ret_val
fi
source ${FUNCTEST_REPO_DIR}/testcases/features/sfc/tackerc
- python ${FUNCTEST_REPO_DIR}/testcases/features/sfc/sfc.py
+ python ${FUNCTEST_REPO_DIR}/testcases/features/sfc/sfc.py $report
;;
"parser")
python ${FUNCTEST_REPO_DIR}/testcases/vnf/vRNC/parser.py $report
# See the License for the specific language governing permissions and\r
# limitations under the License.\r
#\r
-\r
+import argparse\r
import os\r
import sys\r
import time\r
import functest.utils.functest_utils as functest_utils\r
import yaml\r
\r
+parser = argparse.ArgumentParser()\r
+parser.add_argument("-r", "--report",\r
+ help="Create json result file",\r
+ action="store_true")\r
+args = parser.parse_args()\r
\r
with open(os.environ["CONFIG_FUNCTEST_YAML"]) as f:\r
functest_yaml = yaml.safe_load(f)\r
dirs = functest_yaml.get('general').get('directories')\r
FUNCTEST_REPO = dirs.get('dir_repo_functest')\r
COPPER_REPO = dirs.get('dir_repo_copper')\r
-TEST_DB_URL = functest_yaml.get('results').get('test_db_url')\r
\r
logger = ft_logger.Logger("copper").getLogger()\r
\r
'duration': duration,\r
'status': test_status,\r
}\r
- pod_name = functest_utils.get_pod_name(logger)\r
- scenario = functest_utils.get_scenario(logger)\r
- version = functest_utils.get_version(logger)\r
- build_tag = functest_utils.get_build_tag(logger)\r
-\r
+ functest_utils.logger_test_results(logger, "Copper",\r
+ "copper-notification",\r
+ details['status'], details)\r
try:\r
- logger.info("Pushing COPPER results: TEST_DB_URL=%(db)s "\r
- "pod_name=%(pod)s version=%(v)s scenario=%(s)s "\r
- "criteria=%(c)s details=%(d)s" % {\r
- 'db': TEST_DB_URL,\r
- 'pod': pod_name,\r
- 'v': version,\r
- 's': scenario,\r
- 'c': details['status'],\r
- 'b': build_tag,\r
- 'd': details,\r
- })\r
- functest_utils.push_results_to_db("copper",\r
- "copper-notification",\r
- logger,\r
- start_time,\r
- stop_time,\r
- details['status'],\r
- details)\r
+ if args.report:\r
+ functest_utils.push_results_to_db("copper",\r
+ "copper-notification",\r
+ logger,\r
+ start_time,\r
+ stop_time,\r
+ details['status'],\r
+ details)\r
+ logger.info("COPPER results pushed to DB")\r
except:\r
logger.error("Error pushing results into Database '%s'"\r
% sys.exc_info()[0])\r
# 0.2: measure test duration and publish results under json format
#
#
-
+import argparse
import os
import time
import yaml
import functest.utils.functest_logger as ft_logger
import functest.utils.functest_utils as functest_utils
+parser = argparse.ArgumentParser()
+parser.add_argument("-r", "--report",
+ help="Create json result file",
+ action="store_true")
+args = parser.parse_args()
+
with open(os.environ["CONFIG_FUNCTEST_YAML"]) as f:
functest_yaml = yaml.safe_load(f)
dirs = functest_yaml.get('general').get('directories')
FUNCTEST_REPO = dirs.get('dir_repo_functest')
DOCTOR_REPO = dirs.get('dir_repo_doctor')
-TEST_DB_URL = functest_yaml.get('results').get('test_db_url')
logger = ft_logger.Logger("doctor").getLogger()
'duration': duration,
'status': test_status,
}
- pod_name = functest_utils.get_pod_name(logger)
- scenario = functest_utils.get_scenario(logger)
- version = functest_utils.get_version(logger)
- build_tag = functest_utils.get_build_tag(logger)
-
status = "FAIL"
if details['status'] == "OK":
status = "PASS"
-
- logger.info("Pushing Doctor results: TEST_DB_URL=%(db)s pod_name=%(pod)s "
- "version=%(v)s scenario=%(s)s criteria=%(c)s details=%(d)s" % {
- 'db': TEST_DB_URL,
- 'pod': pod_name,
- 'v': version,
- 's': scenario,
- 'c': status,
- 'b': build_tag,
- 'd': details,
- })
- functest_utils.push_results_to_db("doctor",
- "doctor-notification",
- logger,
- start_time,
- stop_time,
- status,
- details)
+ functest_utils.logger_test_results(logger, "Doctor",
+ "doctor-notification",
+ status, details)
+ if args.report:
+ functest_utils.push_results_to_db("doctor",
+ "doctor-notification",
+ logger,
+ start_time,
+ stop_time,
+ status,
+ details)
+ logger.info("Doctor results pushed to DB")
exit(exit_code)
# Later, the VM2 boots then execute cloud-init to ping VM1.
# After successful ping, both the VMs are deleted.
# 0.2: measure test duration and publish results under json format
-#
+# 0.3: add report flag to push results when needed
#
+import argparse
import os
import time
import yaml
import functest.utils.functest_logger as ft_logger
import functest.utils.functest_utils as functest_utils
+parser = argparse.ArgumentParser()
+
+parser.add_argument("-r", "--report",
+ help="Create json result file",
+ action="store_true")
+args = parser.parse_args()
+
with open(os.environ["CONFIG_FUNCTEST_YAML"]) as f:
functest_yaml = yaml.safe_load(f)
dirs = functest_yaml.get('general').get('directories')
FUNCTEST_REPO = dirs.get('dir_repo_functest')
DOMINO_REPO = dirs.get('dir_repo_domino')
-TEST_DB_URL = functest_yaml.get('results').get('test_db_url')
logger = ft_logger.Logger("domino").getLogger()
'duration': duration,
'status': test_status,
}
- pod_name = functest_utils.get_pod_name(logger)
- scenario = functest_utils.get_scenario(logger)
- version = functest_utils.get_version(logger)
- build_tag = functest_utils.get_build_tag(logger)
status = "FAIL"
if details['status'] == "OK":
elif details['status'] == "SKIPPED":
status = "SKIP"
- logger.info("Pushing Domino results: TEST_DB_URL=%(db)s pod_name=%(pod)s "
- "version=%(v)s scenario=%(s)s criteria=%(c)s details=%(d)s" % {
- 'db': TEST_DB_URL,
- 'pod': pod_name,
- 'v': version,
- 's': scenario,
- 'c': status,
- 'b': build_tag,
- 'd': details,
- })
-
- if status is not "SKIP":
- functest_utils.push_results_to_db("domino",
- "domino-multinode",
- logger,
- start_time,
- stop_time,
- status,
- details)
+ functest_utils.logger_test_results(logger, "Domino",
+ "domino-multinode",
+ status, details)
+ if args.report:
+ if status is not "SKIP":
+ functest_utils.push_results_to_db("domino",
+ "domino-multinode",
+ logger,
+ start_time,
+ stop_time,
+ status,
+ details)
+ logger.info("Domino results pushed to DB")
+
if __name__ == '__main__':
main()
logger.info('\033[92m' + "SFC TEST WORKED"
" :) \n" + '\033[0m')
+ # TODO report results to DB
+ # functest_utils.logger_test_results(logger, "SFC",
+ # "odl-sfc",
+ # status, details)
+ # see doctor, promise, domino, ...
+ # if args.report:
+ # logger.info("Pushing odl-SFC results")
+ # functest_utils.push_results_to_db("functest",
+ # "odl-sfc",
+ # logger,
+ # start_time,
+ # stop_time,
+ # status,
+ # details)
+
sys.exit(0)
if __name__ == '__main__':
version = get_version(logger)
build_tag = get_build_tag(logger)
- logger.info("Pushing %(p)s/%(n)s results: TEST_DB_URL=%(db)s "
- "pod_name=%(pod)s version=%(v)s scenario=%(s)s "
- "criteria=%(c)s details=%(d)s" % {
+ logger.info("\n"
+ "****************************************\n"
+ "\t %(p)s/%(n)s results \n\n"
+ "****************************************\n"
+ "DB:\t%(db)s\n"
+ "pod:\t%(pod)s\n"
+ "version:\t%(v)s\n"
+ "scenario:\t%(s)s\n"
+ "status:\t%(c)s\n"
+ "build tag:\t%(b)s\n"
+ "details:\t%(d)s\n"
+ % {
'p': project,
'n': case_name,
'db': get_db_url(),