Merge "refstack: bugfix of testcase failure"
authorJose Lausuch <jose.lausuch@ericsson.com>
Wed, 22 Mar 2017 09:57:15 +0000 (09:57 +0000)
committerGerrit Code Review <gerrit@opnfv.org>
Wed, 22 Mar 2017 09:57:15 +0000 (09:57 +0000)
25 files changed:
docker/Dockerfile
docker/Dockerfile.aarch64
functest/ci/__init__.py [changed mode: 0755->0644]
functest/ci/testcases.yaml
functest/ci/tier_builder.py [changed mode: 0755->0644]
functest/ci/tier_handler.py [changed mode: 0755->0644]
functest/opnfv_tests/features/copper.py [changed mode: 0755->0644]
functest/opnfv_tests/features/doctor.py [changed mode: 0755->0644]
functest/opnfv_tests/features/domino.py [changed mode: 0755->0644]
functest/opnfv_tests/features/promise.py [changed mode: 0755->0644]
functest/opnfv_tests/features/sdnvpn.py [changed mode: 0755->0644]
functest/opnfv_tests/features/security_scan.py [changed mode: 0755->0644]
functest/opnfv_tests/mano/orchestra.py [changed mode: 0755->0644]
functest/opnfv_tests/openstack/healthcheck/healthcheck.sh [changed mode: 0755->0644]
functest/opnfv_tests/openstack/refstack_client/refstack_client.py
functest/opnfv_tests/vnf/ims/cloudify_ims.py
functest/opnfv_tests/vnf/ims/cloudify_ims.yaml
functest/opnfv_tests/vnf/ims/opera_ims.py [changed mode: 0644->0755]
functest/opnfv_tests/vnf/ims/orchestra_ims.py [changed mode: 0644->0755]
functest/opnfv_tests/vnf/router/__init__.py [changed mode: 0755->0644]
functest/opnfv_tests/vnf/router/vyos_vrouter.py [changed mode: 0755->0644]
functest/tests/unit/utils/test_functest_utils.py
functest/utils/functest_logger.py [changed mode: 0755->0644]
functest/utils/functest_utils.py
functest/utils/openstack_utils.py [changed mode: 0755->0644]

index 30c31da..176c109 100644 (file)
@@ -120,8 +120,20 @@ RUN cd ${REPOS_DIR}/barometer \
     && pip install .
 
 RUN find ${FUNCTEST_REPO_DIR} -name "*.py" \
-    -not -path "*tests/unit*" |xargs grep __main__ |cut -d\: -f 1 |xargs chmod -c 755 \
-    && find ${FUNCTEST_REPO_DIR} -name "*.sh" |xargs grep \#\! |cut -d\:  -f 1 |xargs chmod -c 755
+    -not -path "*tests/unit*" \
+    -not -path "*functest_venv*" \
+    |xargs grep -L __main__ |cut -d\: -f 1 |xargs chmod -c 644 &&
+    find ${FUNCTEST_REPO_DIR} -name "*.sh" \
+    -not -path "*functest_venv*" \
+    |xargs grep -L \#\! |cut -d\:  -f 1 |xargs chmod -c 644
+
+RUN find ${FUNCTEST_REPO_DIR} -name "*.py" \
+    -not -path "*tests/unit*" \
+    -not -path "*functest_venv*" \
+    |xargs grep __main__ |cut -d\: -f 1 |xargs chmod -c 755 &&
+    find ${FUNCTEST_REPO_DIR} -name "*.sh" \
+    -not -path "*functest_venv*" \
+    |xargs grep \#\! |cut -d\:  -f 1 |xargs chmod -c 755
 
 RUN /bin/bash ${REPOS_DIR}/parser/tests/parser_install.sh ${REPOS_DIR}
 RUN ${REPOS_DIR}/rally/install_rally.sh --yes
index 15f0bdc..60f72a2 100644 (file)
@@ -112,8 +112,20 @@ RUN cd ${RELENG_MODULE_DIR} \
     && pip install -e .
 
 RUN find ${FUNCTEST_REPO_DIR} -name "*.py" \
-    -not -path "*tests/unit*" |xargs grep __main__ |cut -d\: -f 1 |xargs chmod -c 755 \
-    && find ${FUNCTEST_REPO_DIR} -name "*.sh" |xargs grep \#\! |cut -d\:  -f 1 |xargs chmod -c 755
+    -not -path "*tests/unit*" \
+    -not -path "*functest_venv*" \
+    |xargs grep -L __main__ |cut -d\: -f 1 |xargs chmod -c 644 &&
+    find ${FUNCTEST_REPO_DIR} -name "*.sh" \
+    -not -path "*functest_venv*" \
+    |xargs grep -L \#\! |cut -d\:  -f 1 |xargs chmod -c 644
+
+RUN find ${FUNCTEST_REPO_DIR} -name "*.py" \
+    -not -path "*tests/unit*" \
+    -not -path "*functest_venv*" \
+    |xargs grep __main__ |cut -d\: -f 1 |xargs chmod -c 755 &&
+    find ${FUNCTEST_REPO_DIR} -name "*.sh" \
+    -not -path "*functest_venv*" \
+    |xargs grep \#\! |cut -d\:  -f 1 |xargs chmod -c 755
 
 RUN /bin/bash ${REPOS_DIR}/parser/tests/parser_install.sh ${REPOS_DIR}
 RUN ${REPOS_DIR}/rally/install_rally.sh --yes
old mode 100755 (executable)
new mode 100644 (file)
index e3d5ffa..1497755 100755 (executable)
@@ -132,7 +132,7 @@ tiers:
 
             -
                 name: refstack_defcore
-                criteria: 'success_rate >= 80%'
+                criteria: 'success_rate == 100%'
                 blocking: false
                 clean_flag: false
                 description: >-
@@ -435,20 +435,6 @@ tiers:
 #                run:
 #                    module: 'functest.opnfv_tests.openstack.tempest.tempest'
 #                    class: 'TempestFullParallel'
-            -
-                name: tempest_defcore
-                criteria: 'success_rate == 100%'
-                blocking: false
-                clean_flag: false
-                description: >-
-                    This is the set of Tempest test cases created by OpenStack
-                    Interop Working Group for certification purposes.
-                dependencies:
-                    installer: ''
-                    scenario: 'nosdn-nofeature-ha'
-                run:
-                    module: 'functest.opnfv_tests.openstack.tempest.tempest'
-                    class: 'TempestDefcore'
             -
                 name: tempest_custom
                 criteria: 'success_rate == 100%'
old mode 100755 (executable)
new mode 100644 (file)
old mode 100755 (executable)
new mode 100644 (file)
old mode 100755 (executable)
new mode 100644 (file)
old mode 100755 (executable)
new mode 100644 (file)
old mode 100755 (executable)
new mode 100644 (file)
old mode 100755 (executable)
new mode 100644 (file)
old mode 100755 (executable)
new mode 100644 (file)
old mode 100755 (executable)
new mode 100644 (file)
old mode 100755 (executable)
new mode 100644 (file)
index c9f0f27..7d4c568 100755 (executable)
@@ -129,10 +129,10 @@ class RefstackClient(testcase_base.TestcaseBase):
             num_executed = int(num_tests) - int(num_skipped)
             success_rate = 100 * int(num_success) / int(num_executed)
 
-            self.details = {"num_tests": int(num_tests),
-                            "num_failures": int(num_failures),
+            self.details = {"tests": int(num_tests),
+                            "failures": int(num_failures),
                             "success": success_testcases,
-                            "failed": failed_testcases,
+                            "errors": failed_testcases,
                             "skipped": skipped_testcases}
         except Exception:
             success_rate = 0
index 74470ad..f7dfd53 100644 (file)
@@ -203,7 +203,7 @@ class ImsVnf(vnf_base.VnfOnBoardingBase):
         flavor_exist, flavor_id = os_utils.get_or_create_flavor(
             "m1.small",
             self.vnf['requirements']['ram_min'],
-            '20',
+            '30',
             '1',
             public=True)
         self.logger.debug("Flavor id: %s" % flavor_id)
index b84ef8f..74b9e95 100644 (file)
@@ -6,7 +6,7 @@ cloudify:
         url: https://github.com/boucherv-orange/cloudify-manager-blueprints.git
         branch: '3.3.1-build'
     requirements:
-        ram_min: 4000
+        ram_min: 4096
         os_image: centos_7
     inputs:
       keystone_username: ""
@@ -29,7 +29,7 @@ clearwater:
         branch: stable
     deployment_name: clearwater-opnfv
     requirements:
-        ram_min: 2000
+        ram_min: 2048
         os_image: ubuntu_14.04
     inputs:
         image_id: ''
old mode 100644 (file)
new mode 100755 (executable)
old mode 100644 (file)
new mode 100755 (executable)
old mode 100755 (executable)
new mode 100644 (file)
old mode 100755 (executable)
new mode 100644 (file)
index e5bae62..eb241e5 100644 (file)
@@ -295,25 +295,6 @@ class FunctestUtilsTesting(unittest.TestCase):
     def test_push_results_to_db_missing_buildtag(self):
         self._test_push_results_to_db_missing_env('BUILD_TAG')
 
-    def test_push_results_to_db_incorrect_buildtag(self):
-        dic = self._get_env_dict(None)
-        dic['BUILD_TAG'] = 'incorrect_build_tag'
-        with mock.patch('functest.utils.functest_utils.get_db_url',
-                        return_value=self.db_url), \
-                mock.patch.dict(os.environ,
-                                dic,
-                                clear=True), \
-                mock.patch('functest.utils.functest_utils.logger.error') \
-                as mock_logger_error:
-            self.assertFalse(functest_utils.
-                             push_results_to_db(self.project, self.case_name,
-                                                self.start_date,
-                                                self.stop_date,
-                                                self.criteria, self.details))
-            mock_logger_error.assert_called_once_with("Please fix BUILD_TAG"
-                                                      " env var: incorrect_"
-                                                      "build_tag")
-
     def test_push_results_to_db_request_post_failed(self):
         dic = self._get_env_dict(None)
         with mock.patch('functest.utils.functest_utils.get_db_url',
old mode 100755 (executable)
new mode 100644 (file)
index 9e13ffe..7cc5029 100644 (file)
@@ -207,13 +207,7 @@ def push_results_to_db(project, case_name,
     except KeyError as e:
         logger.error("Please set env var: " + str(e))
         return False
-    rule = "daily-(.+?)-[0-9]*"
-    m = re.search(rule, build_tag)
-    if m:
-        version = m.group(1)
-    else:
-        logger.error("Please fix BUILD_TAG env var: " + build_tag)
-        return False
+    version = get_version()
     test_start = dt.fromtimestamp(start_date).strftime('%Y-%m-%d %H:%M:%S')
     test_stop = dt.fromtimestamp(stop_date).strftime('%Y-%m-%d %H:%M:%S')
 
old mode 100755 (executable)
new mode 100644 (file)