From: Morgan Richomme Date: Fri, 1 Sep 2017 07:51:05 +0000 (+0000) Subject: Merge "cleanup of obsolete non-CI arm PODs" X-Git-Url: https://gerrit.opnfv.org/gerrit/gitweb?a=commitdiff_plain;h=fc84a25aa2eae77a0a2a11659867026c55fb49a1;hp=c0ba4a5e01f2cd517f770a7a2a2360717927a4ad;p=releng.git Merge "cleanup of obsolete non-CI arm PODs" --- diff --git a/jjb/apex/apex-download-artifact.sh b/jjb/apex/apex-download-artifact.sh index 860cd60a5..7293e8243 100755 --- a/jjb/apex/apex-download-artifact.sh +++ b/jjb/apex/apex-download-artifact.sh @@ -21,7 +21,7 @@ if [[ "$ARTIFACT_VERSION" =~ dev ]]; then tar -xvf apex-${OPNFV_ARTIFACT_VERSION}.tar.gz popd > /dev/null else - echo "Will download RPMs..." + echo "Will use RPMs..." # Must be RPMs/ISO echo "Downloading latest properties file" @@ -33,13 +33,13 @@ else source $BUILD_DIRECTORY/opnfv.properties RPM_INSTALL_PATH=$(echo "http://"$OPNFV_RPM_URL | sed 's/\/'"$(basename $OPNFV_RPM_URL)"'//') - RPM_LIST=${RPM_INSTALL_PATH}/$(basename $OPNFV_RPM_URL) + RPM_LIST=$(basename $OPNFV_RPM_URL) # find version of RPM VERSION_EXTENSION=$(echo $(basename $RPM_LIST) | grep -Eo '[0-9]+\.[0-9]+-([0-9]{8}|[a-z]+-[0-9]\.[0-9]+)') # build RPM List which already includes base Apex RPM - RPM_LIST+=" ${RPM_INSTALL_PATH}/opnfv-apex-undercloud-${VERSION_EXTENSION}.noarch.rpm" - RPM_LIST+=" ${RPM_INSTALL_PATH}/python34-opnfv-apex-${VERSION_EXTENSION}.noarch.rpm" + RPM_LIST+=" opnfv-apex-undercloud-${VERSION_EXTENSION}.noarch.rpm" + RPM_LIST+=" python34-opnfv-apex-${VERSION_EXTENSION}.noarch.rpm" # remove old / install new RPMs if rpm -q opnfv-apex > /dev/null; then @@ -48,10 +48,20 @@ else sudo yum remove -y ${INSTALLED_RPMS} fi fi + # Create an rpms dir on slave + mkdir -p ~/apex_rpms + pushd ~/apex_rpms + # Remove older rpms which do not match this version + find . ! -name *${VERSION_EXTENSION}.noarch.rpm -type f -exec rm -f {} + + # Download RPM only if changed on server + for rpm in $RPM_LIST; do + wget -N ${RPM_INSTALL_PATH}/${rpm} + done if ! sudo yum install -y $RPM_LIST; then echo "Unable to install new RPMs: $RPM_LIST" exit 1 fi + popd fi # TODO: Uncomment these lines to verify SHA512SUMs once the sums are diff --git a/jjb/apex/apex-unit-test.sh b/jjb/apex/apex-unit-test.sh index 12cb862b0..abcddcab4 100755 --- a/jjb/apex/apex-unit-test.sh +++ b/jjb/apex/apex-unit-test.sh @@ -9,7 +9,6 @@ echo pushd ci/ > /dev/null -sudo BASE="${WORKSPACE}/build" LIB="${WORKSPACE}/lib" ./clean.sh ./test.sh popd diff --git a/jjb/ci_gate_security/anteater-report-to-gerrit.sh b/jjb/ci_gate_security/anteater-report-to-gerrit.sh index 00a78ceba..a749d1db4 100644 --- a/jjb/ci_gate_security/anteater-report-to-gerrit.sh +++ b/jjb/ci_gate_security/anteater-report-to-gerrit.sh @@ -13,7 +13,7 @@ if [[ -e securityaudit.log ]] ; then EXITSTATUS=1 fi - cat securityaudit.log | awk -F"ERROR - " '{print $2}' | sed -e "s/\"/\\\\\"/g;s/\'/\\\\\'/g"> shortlog + awk -F"ERROR - " '{print $2}' securityaudit.log | sed -e "s/\"/\\\\\"/g;s/\'/\\\\/g"> shortlog ssh -p 29418 gerrit.opnfv.org \ "gerrit review -p $GERRIT_PROJECT \ diff --git a/utils/test/reporting/reporting/functest/reporting-tempest.py b/utils/test/reporting/reporting/functest/reporting-tempest.py index bc2885639..d78d9a19d 100755 --- a/utils/test/reporting/reporting/functest/reporting-tempest.py +++ b/utils/test/reporting/reporting/functest/reporting-tempest.py @@ -8,58 +8,57 @@ # http://www.apache.org/licenses/LICENSE-2.0 # SPDX-license-identifier: Apache-2.0 -from urllib2 import Request, urlopen, URLError from datetime import datetime import json -import jinja2 import os -# manage conf -import utils.reporting_utils as rp_utils +from urllib2 import Request, urlopen, URLError +import jinja2 + +import reporting.utils.reporting_utils as rp_utils -installers = rp_utils.get_config('general.installers') -items = ["tests", "Success rate", "duration"] +INSTALLERS = rp_utils.get_config('general.installers') +ITEMS = ["tests", "Success rate", "duration"] CURRENT_DIR = os.getcwd() PERIOD = rp_utils.get_config('general.period') -criteria_nb_test = 165 -criteria_duration = 1800 -criteria_success_rate = 90 +CRITERIA_NB_TEST = 100 +CRITERIA_DURATION = 1800 +CRITERIA_SUCCESS_RATE = 100 logger = rp_utils.getLogger("Tempest") logger.info("************************************************") logger.info("* Generating reporting Tempest_smoke_serial *") -logger.info("* Data retention = %s days *" % PERIOD) +logger.info("* Data retention = %s days *", PERIOD) logger.info("* *") logger.info("************************************************") logger.info("Success criteria:") -logger.info("nb tests executed > %s s " % criteria_nb_test) -logger.info("test duration < %s s " % criteria_duration) -logger.info("success rate > %s " % criteria_success_rate) +logger.info("nb tests executed > %s s ", CRITERIA_NB_TEST) +logger.info("test duration < %s s ", CRITERIA_DURATION) +logger.info("success rate > %s ", CRITERIA_SUCCESS_RATE) # For all the versions for version in rp_utils.get_config('general.versions'): - for installer in installers: + for installer in INSTALLERS: # we consider the Tempest results of the last PERIOD days url = ("http://" + rp_utils.get_config('testapi.url') + - "?case=tempest_smoke_serial") - request = Request(url + '&period=' + str(PERIOD) + - '&installer=' + installer + - '&version=' + version) - logger.info("Search tempest_smoke_serial results for installer %s" - " for version %s" - % (installer, version)) + "?case=tempest_smoke_serial&period=" + str(PERIOD) + + "&installer=" + installer + "&version=" + version) + request = Request(url) + logger.info(("Search tempest_smoke_serial results for installer %s" + " for version %s"), installer, version) try: response = urlopen(request) k = response.read() results = json.loads(k) - except URLError as e: - logger.error("Error code: %s" % e) - + except URLError as err: + logger.error("Error code: %s", err) + logger.debug("request sent: %s", url) + logger.debug("Results from API: %s", results) test_results = results['results'] - + logger.debug("Test results: %s", test_results) scenario_results = {} criteria = {} errors = {} @@ -72,27 +71,37 @@ for version in rp_utils.get_config('general.versions'): scenario_results[r['scenario']] = [] scenario_results[r['scenario']].append(r) + logger.debug("Scenario results: %s", scenario_results) + for s, s_result in scenario_results.items(): scenario_results[s] = s_result[0:5] # For each scenario, we build a result object to deal with # results, criteria and error handling for result in scenario_results[s]: result["start_date"] = result["start_date"].split(".")[0] + logger.debug("start_date= %s", result["start_date"]) # retrieve results # **************** nb_tests_run = result['details']['tests'] nb_tests_failed = result['details']['failures'] - if nb_tests_run != 0: - success_rate = 100 * ((int(nb_tests_run) - + logger.debug("nb_tests_run= %s", nb_tests_run) + logger.debug("nb_tests_failed= %s", nb_tests_failed) + + try: + success_rate = (100 * (int(nb_tests_run) - int(nb_tests_failed)) / - int(nb_tests_run)) - else: + int(nb_tests_run)) + except ZeroDivisionError: success_rate = 0 result['details']["tests"] = nb_tests_run result['details']["Success rate"] = str(success_rate) + "%" + logger.info("nb_tests_run= %s", result['details']["tests"]) + logger.info("test rate = %s", + result['details']["Success rate"]) + # Criteria management # ******************* crit_tests = False @@ -100,11 +109,11 @@ for version in rp_utils.get_config('general.versions'): crit_time = False # Expect that at least 165 tests are run - if nb_tests_run >= criteria_nb_test: + if nb_tests_run >= CRITERIA_NB_TEST: crit_tests = True # Expect that at least 90% of success - if success_rate >= criteria_success_rate: + if success_rate >= CRITERIA_SUCCESS_RATE: crit_rate = True # Expect that the suite duration is inferior to 30m @@ -114,28 +123,27 @@ for version in rp_utils.get_config('general.versions'): '%Y-%m-%d %H:%M:%S') delta = stop_date - start_date - if (delta.total_seconds() < criteria_duration): + + if delta.total_seconds() < CRITERIA_DURATION: crit_time = True result['criteria'] = {'tests': crit_tests, 'Success rate': crit_rate, 'duration': crit_time} try: - logger.debug("Scenario %s, Installer %s" - % (s_result[1]['scenario'], installer)) - logger.debug("Nb Test run: %s" % nb_tests_run) - logger.debug("Test duration: %s" - % result['details']['duration']) - logger.debug("Success rate: %s" % success_rate) - except: + logger.debug("Nb Test run: %s", nb_tests_run) + logger.debug("Test duration: %s", delta) + logger.debug("Success rate: %s", success_rate) + except Exception: # pylint: disable=broad-except logger.error("Data format error") # Error management # **************** try: errors = result['details']['errors'] - result['errors'] = errors.replace('{0}', '') - except: + logger.info("errors: %s", errors) + result['errors'] = errors + except Exception: # pylint: disable=broad-except logger.error("Error field not present (Brahamputra runs?)") templateLoader = jinja2.FileSystemLoader(".") @@ -146,7 +154,7 @@ for version in rp_utils.get_config('general.versions'): template = templateEnv.get_template(TEMPLATE_FILE) outputText = template.render(scenario_results=scenario_results, - items=items, + items=ITEMS, installer=installer) with open("./display/" + version + diff --git a/utils/test/testapi/3rd_party/static/testapi-ui/components/pods/pods.html b/utils/test/testapi/3rd_party/static/testapi-ui/components/pods/pods.html index cdfcfaf36..7ce36ca7c 100644 --- a/utils/test/testapi/3rd_party/static/testapi-ui/components/pods/pods.html +++ b/utils/test/testapi/3rd_party/static/testapi-ui/components/pods/pods.html @@ -63,7 +63,7 @@ - +