Refact API module 73/68773/3
authorxudan <xudan16@huawei.com>
Sun, 3 Nov 2019 14:11:00 +0000 (09:11 -0500)
committerxudan <xudan16@huawei.com>
Tue, 5 Nov 2019 03:51:14 +0000 (22:51 -0500)
Change-Id: Ifa01fcbb848d3218619c1f8e726f26476773ce2f
Signed-off-by: xudan <xudan16@huawei.com>
dovetail/api/app/routes.py
dovetail/api/app/server.py
dovetail/api/app/utils.py

index e60f10a..352d69f 100644 (file)
@@ -9,7 +9,7 @@ import uuid
 from flask import Flask, jsonify, request
 from flask_cors import CORS
 
-import app.server as server
+from app.server import Server
 
 app = Flask(__name__)
 CORS(app)
@@ -17,13 +17,13 @@ CORS(app)
 
 @app.route('/api/v1/scenario/nfvi/testsuites', methods=['GET'])
 def get_all_testsuites():
-    testsuites = server.list_testsuites()
+    testsuites = Server.list_testsuites()
     return jsonify({'testsuites': testsuites}), 200
 
 
 @app.route('/api/v1/scenario/nfvi/testcases', methods=['GET'])
 def get_testcases():
-    testcases = server.list_testcases()
+    testcases = Server.list_testcases()
     return jsonify({'testcases': testcases}), 200
 
 
@@ -37,15 +37,17 @@ def run_testcases():
     else:
         return 'No DOVETAIL_HOME found in env.\n', 500
 
-    msg, ret = server.set_conf_files(request.json, dovetail_home, requestId)
+    server = Server(dovetail_home, requestId, request.json)
+
+    msg, ret = server.set_conf_files()
     if not ret:
         return msg, 500
 
-    msg, ret = server.set_vm_images(request.json, dovetail_home, requestId)
+    msg, ret = server.set_vm_images()
     if not ret:
         return msg, 500
 
-    input_str = server.parse_request(request.json)
+    input_str = server.parse_request()
 
     repo_dir = os.path.abspath(os.path.join(os.path.dirname(__file__),
                                os.pardir, os.pardir))
@@ -72,8 +74,7 @@ def run_testcases():
         testcases = data['testcases']
         testsuite = data['testsuite']
 
-    result = server.get_execution_status(dovetail_home, testsuite,
-                                         testcases, testcases, requestId)
+    result = server.get_execution_status(testsuite, testcases, testcases)
 
     return jsonify({'result': result}), 200
 
@@ -87,6 +88,7 @@ def get_testcases_status(exec_id):
     testcases = request.json['testcase']
     dovetail_home = os.getenv('DOVETAIL_HOME')
 
+    server = Server(dovetail_home, exec_id, request.json)
     testcases_file = os.path.join(dovetail_home, str(exec_id),
                                   'results', 'testcases.json')
     with open(testcases_file, "r") as f:
@@ -94,6 +96,7 @@ def get_testcases_status(exec_id):
             data = json.loads(jsonfile)
         testsuite = data['testsuite']
 
-    result = server.get_execution_status(dovetail_home, testsuite,
-                                         testcases, data['testcases'], exec_id)
+    result = server.get_execution_status(testsuite, testcases,
+                                         data['testcases'])
+
     return jsonify({'result': result}), 200
index 312657d..d44e2ee 100644 (file)
@@ -3,267 +3,295 @@ import os
 import shutil
 
 import app.constants as constants
-import app.utils as utils
+from app.utils import Utils
 
 from dovetail.testcase import Testsuite, Testcase
 
 
-def list_testsuites():
-    return Testsuite.load()
-
-
-def list_testcases():
-    testcases = Testcase.load()
-    testcase_list = []
-    for key, value in testcases.items():
-        testcase = {'testCaseName': key,
-                    'description': value.objective(),
-                    'subTestCase': value.sub_testcase()}
-        if value.validate_type() in constants.NFVI_PROJECT:
-            testcase['scenario'] = 'nfvi'
-        elif value.validate_type() in constants.VNF_PROJECT:
-            testcase['scenario'] = 'vnf'
-        else:
-            testcase['scenario'] = 'unknown'
-        testcase_list.append(testcase)
-    return testcase_list
-
-
-def set_vm_images(data, dovetail_home, requestId):
-    image_path = os.path.join(dovetail_home, str(requestId), 'images')
-    try:
-        origin_image_path = data['conf']['vm_images']
-    except KeyError:
-        origin_image_path = os.path.join(dovetail_home, 'images')
-    if os.path.exists(origin_image_path):
-        try:
-            shutil.copytree(origin_image_path, image_path)
-        except Exception as e:
-            return str(e), False
-        return "Success to set vm images.\n", True
-    else:
-        return "Could not find vm images.\n", False
-
+class Server(object):
+
+    def __init__(self, dovetail_home=None, requestId=None, requestData=None):
+        self.dovetail_home = dovetail_home
+        self.requestId = requestId
+        self.requestData = requestData
+
+    @staticmethod
+    def list_testsuites():
+        return Testsuite.load()
+
+    @staticmethod
+    def list_testcases():
+        testcases = Testcase.load()
+        testcase_list = []
+        for key, value in testcases.items():
+            testcase = {'testCaseName': key,
+                        'description': value.objective(),
+                        'subTestCase': value.sub_testcase()}
+            if value.validate_type() in constants.NFVI_PROJECT:
+                testcase['scenario'] = 'nfvi'
+            elif value.validate_type() in constants.VNF_PROJECT:
+                testcase['scenario'] = 'vnf'
+            else:
+                testcase['scenario'] = 'unknown'
+            testcase_list.append(testcase)
+        return testcase_list
 
-def set_conf_files(data, dovetail_home, requestId):
-    config_path = os.path.join(dovetail_home, str(requestId), 'pre_config')
-    origin_config_path = os.path.join(dovetail_home, 'pre_config')
-    if os.path.exists(origin_config_path):
+    def set_vm_images(self):
+        image_path = os.path.join(self.dovetail_home, str(self.requestId),
+                                  'images')
         try:
-            shutil.copytree(origin_config_path, config_path)
-        except Exception as e:
-            return str(e), False
-
-    # check and prepare mandatory env_config.sh file
-    # if there are envs in request body, use it
-    # otherwise, use the file in pre_config
-    # if don't have this file, return False with error message
-    env_file = os.path.join(config_path, 'env_config.sh')
-    try:
-        utils.write_env_file(data['conf']['envs'], env_file)
-    except KeyError:
-        if not os.path.isfile(env_file):
-            return "No 'envs' found in the request body.\n", False
+            origin_image_path = self.requestData['conf']['vm_images']
+        except KeyError:
+            origin_image_path = os.path.join(self.dovetail_home, 'images')
+        if os.path.exists(origin_image_path):
+            try:
+                shutil.copytree(origin_image_path, image_path)
+            except Exception as e:
+                return str(e), False
+            return "Success to set vm images.\n", True
         else:
-            pass
-    except Exception as e:
-        return str(e), False
+            return "Could not find vm images.\n", False
 
-    # check and prepare other optional yaml files
-    for key, value in constants.CONFIG_YAML_FILES.items():
-        config_file = os.path.join(config_path, value)
+    def set_conf_files(self):
+        config_path = os.path.join(self.dovetail_home, str(self.requestId),
+                                   'pre_config')
+        origin_config_path = os.path.join(self.dovetail_home, 'pre_config')
+        if os.path.exists(origin_config_path):
+            try:
+                shutil.copytree(origin_config_path, config_path)
+            except Exception as e:
+                return str(e), False
+
+        # check and prepare mandatory env_config.sh file
+        # if there are envs in request body, use it
+        # otherwise, use the file in pre_config
+        # if don't have this file, return False with error message
+        env_file = os.path.join(config_path, 'env_config.sh')
         try:
-            utils.write_yaml_file(data['conf'][key], config_file)
+            Utils.write_env_file(self.requestData['conf']['envs'], env_file)
         except KeyError:
-            pass
+            if not os.path.isfile(env_file):
+                return "No 'envs' found in the request body.\n", False
+            else:
+                pass
         except Exception as e:
             return str(e), False
 
-    return 'Success to prepare all config files.\n', True
+        # check and prepare other optional yaml files
+        for key, value in constants.CONFIG_YAML_FILES.items():
+            config_file = os.path.join(config_path, value)
+            try:
+                Utils.write_yaml_file(self.requestData['conf'][key],
+                                      config_file)
+            except KeyError:
+                pass
+            except Exception as e:
+                return str(e), False
+
+        return 'Success to prepare all config files.\n', True
 
+    def parse_request(self):
+        output = ''
+        default_args = constants.RUN_TEST_ITEMS['arguments']
+        default_options = constants.RUN_TEST_ITEMS['options']
 
-def parse_request(request_json):
-    output = ''
-    default_args = constants.RUN_TEST_ITEMS['arguments']
-    default_options = constants.RUN_TEST_ITEMS['options']
+        for arg in default_args['no_multiple']:
+            if arg in self.requestData.keys():
+                output = output + ' --{} {}'.format(arg, self.requestData[arg])
+        for arg in default_args['multiple']:
+            if arg in self.requestData.keys() and self.requestData[arg]:
+                for item in self.requestData[arg]:
+                    output = output + ' --{} {}'.format(arg, item)
 
-    for arg in default_args['no_multiple']:
-        if arg in request_json.keys():
-            output = output + ' --{} {}'.format(arg, request_json[arg])
-    for arg in default_args['multiple']:
-        if arg in request_json.keys() and request_json[arg]:
-            for item in request_json[arg]:
-                output = output + ' --{} {}'.format(arg, item)
+        if 'options' not in self.requestData.keys():
+            return output
 
-    if 'options' not in request_json.keys():
-        return output
+        for option in default_options:
+            if option in self.requestData['options']:
+                output = output + ' --{}'.format(option)
 
-    for option in default_options:
-        if option in request_json['options']:
-            output = output + ' --{}'.format(option)
+        return output
 
-    return output
+    def get_execution_status(self, testsuite, request_testcases,
+                             exec_testcases):
+        results_dir = os.path.join(self.dovetail_home, str(self.requestId),
+                                   'results')
+        results = []
+        for tc in request_testcases:
+            if tc not in exec_testcases:
+                res = {'testCaseName': tc, 'status': 'NOT_EXECUTED'}
+                results.append(res)
+                continue
 
+            tc_type = tc.split('.')[0]
+            checker = CheckerFactory.create(tc_type)
+            status, result = checker.get_status(results_dir, tc)
 
-def get_execution_status(dovetail_home, testsuite, request_testcases,
-                         exec_testcases, requestId):
-    results_dir = os.path.join(dovetail_home, str(requestId), 'results')
-    results = []
-    for tc in request_testcases:
-        if tc not in exec_testcases:
-            res = {'testCaseName': tc, 'status': 'NOT_EXECUTED'}
-            results.append(res)
-            continue
-        if tc.startswith('functest'):
-            status, result = get_functest_status(results_dir, tc)
             res = {'testCaseName': tc, 'testSuiteName': testsuite,
-                   'scenario': 'nfvi', 'executionId': requestId,
-                   'results': result, 'status': status}
-            if not result:
-                res['timestart'] = None
-                res['endTime'] = None
-            else:
+                   'scenario': 'nfvi', 'executionId': self.requestId,
+                   'results': result, 'status': status, 'timestart': None,
+                   'endTime': None}
+            try:
                 res['timestart'] = result['timestart']
                 res['endTime'] = result['timestop']
+            except Exception:
+                pass
+
             results.append(res)
-        if tc.startswith('yardstick'):
-            status, result = get_yardstick_status(results_dir, tc)
-            res = {'testCaseName': tc, 'testSuiteName': testsuite,
-                   'scenario': 'nfvi', 'executionId': requestId,
-                   'results': result, 'status': status,
-                   'timestart': None, 'endTime': None}
-            results.append(res)
-        if tc.startswith('bottlenecks'):
-            status, result = get_bottlenecks_status(results_dir, tc)
-            res = {'testCaseName': tc, 'testSuiteName': testsuite,
-                   'scenario': 'nfvi', 'executionId': requestId,
-                   'results': result, 'status': status,
-                   'timestart': None, 'endTime': None}
-            results.append(res)
-    return results
 
+        return results
 
-def get_status_from_total_file(total_file, testcase):
-    with open(total_file, 'r') as f:
-        for jsonfile in f:
-            try:
-                data = json.loads(jsonfile)
-                for item in data['testcases_list']:
-                    if item['name'] == testcase:
-                        return item['result'], item['sub_testcase']
-            except KeyError as e:
-                return 'FAILED', None
-            except ValueError:
-                continue
-    return 'FAILED', None
 
+class Checker(object):
+
+    def __init__(self):
+        pass
 
-def get_functest_status(results_dir, testcase):
-    functest_file = os.path.join(results_dir, 'functest_results.txt')
-    total_file = os.path.join(results_dir, 'results.json')
-    if not os.path.isfile(functest_file):
-        if not os.path.isfile(total_file):
-            return 'IN_PROGRESS', None
+    @staticmethod
+    def get_status_from_total_file(total_file, testcase):
+        with open(total_file, 'r') as f:
+            for jsonfile in f:
+                try:
+                    data = json.loads(jsonfile)
+                    for item in data['testcases_list']:
+                        if item['name'] == testcase:
+                            return item['result'], item['sub_testcase']
+                except KeyError as e:
+                    return 'FAILED', None
+                except ValueError:
+                    continue
         return 'FAILED', None
-    criteria = None
-    sub_testcase = []
-    timestart = None
-    timestop = None
-
-    # get criteria and sub_testcase from results.json when all tests completed
-    if os.path.isfile(total_file):
-        criteria, sub_testcase = get_status_from_total_file(total_file,
-                                                            testcase)
-        if criteria == 'FAILED':
-            return 'FAILED', None
 
-    # get detailed results from functest_results.txt
-    with open(functest_file, 'r') as f:
-        for jsonfile in f:
-            try:
-                data = json.loads(jsonfile)
-                if data['build_tag'].endswith(testcase):
-                    criteria = data['criteria'] if not criteria else criteria
-                    timestart = data['start_date']
-                    timestop = data['stop_date']
-                    break
-            except KeyError:
-                return 'FAILED', None
-            except ValueError:
-                continue
-        else:
-            if not criteria:
+
+class FunctestChecker(Checker):
+
+    def get_status(self, results_dir, testcase):
+        functest_file = os.path.join(results_dir, 'functest_results.txt')
+        total_file = os.path.join(results_dir, 'results.json')
+        if not os.path.isfile(functest_file):
+            if not os.path.isfile(total_file):
                 return 'IN_PROGRESS', None
+            return 'FAILED', None
+        criteria = None
+        sub_testcase = []
+        timestart = None
+        timestop = None
+
+        # get criteria and sub_testcase when all tests completed
+        if os.path.isfile(total_file):
+            criteria, sub_testcase = self.get_status_from_total_file(
+                total_file, testcase)
+            if criteria == 'FAILED':
+                return 'FAILED', None
 
-    status = 'COMPLETED' if criteria == 'PASS' else 'FAILED'
-    results = {'criteria': criteria, 'sub_testcase': sub_testcase,
-               'timestart': timestart, 'timestop': timestop}
-    return status, results
+        # get detailed results from functest_results.txt
+        with open(functest_file, 'r') as f:
+            for jsonfile in f:
+                try:
+                    data = json.loads(jsonfile)
+                    if data['build_tag'].endswith(testcase):
+                        criteria = data['criteria'] if not criteria \
+                            else criteria
+                        timestart = data['start_date']
+                        timestop = data['stop_date']
+                        break
+                except KeyError:
+                    return 'FAILED', None
+                except ValueError:
+                    continue
+            else:
+                if not criteria:
+                    return 'IN_PROGRESS', None
 
+        status = 'COMPLETED' if criteria == 'PASS' else 'FAILED'
+        results = {'criteria': criteria, 'sub_testcase': sub_testcase,
+                   'timestart': timestart, 'timestop': timestop}
+        return status, results
 
-def get_yardstick_status(results_dir, testcase):
-    yardstick_file = os.path.join(results_dir, 'ha_logs',
-                                  '{}.out'.format(testcase))
-    total_file = os.path.join(results_dir, 'results.json')
-    if not os.path.isfile(yardstick_file):
-        if not os.path.isfile(total_file):
-            return 'IN_PROGRESS', None
-        return 'FAILED', None
 
-    criteria = None
+class YardstickChecker(Checker):
 
-    # get criteria and sub_testcase from results.json when all tests completed
-    if os.path.isfile(total_file):
-        criteria, _ = get_status_from_total_file(total_file, testcase)
-        if criteria == 'FAILED':
+    def get_status(self, results_dir, testcase):
+        yardstick_file = os.path.join(results_dir, 'ha_logs',
+                                      '{}.out'.format(testcase))
+        total_file = os.path.join(results_dir, 'results.json')
+        if not os.path.isfile(yardstick_file):
+            if not os.path.isfile(total_file):
+                return 'IN_PROGRESS', None
             return 'FAILED', None
 
-    with open(yardstick_file, 'r') as f:
-        for jsonfile in f:
-            data = json.loads(jsonfile)
-            try:
-                if not criteria:
-                    criteria = data['result']['criteria']
-                if criteria == 'PASS':
-                    details = data['result']['testcases']
-                    for key, value in details.items():
-                        sla_pass = value['tc_data'][0]['data']['sla_pass']
-                        if not 1 == sla_pass:
-                            criteria = 'FAIL'
-            except KeyError:
+        criteria = None
+
+        # get criteria and sub_testcase when all tests completed
+        if os.path.isfile(total_file):
+            criteria, _ = self.get_status_from_total_file(total_file, testcase)
+            if criteria == 'FAILED':
                 return 'FAILED', None
 
-    status = 'COMPLETED' if criteria == 'PASS' else 'FAILED'
-    results = {'criteria': criteria, 'timestart': None, 'timestop': None}
-    return status, results
+        with open(yardstick_file, 'r') as f:
+            for jsonfile in f:
+                data = json.loads(jsonfile)
+                try:
+                    if not criteria:
+                        criteria = data['result']['criteria']
+                    if criteria == 'PASS':
+                        details = data['result']['testcases']
+                        for key, value in details.items():
+                            sla_pass = value['tc_data'][0]['data']['sla_pass']
+                            if not 1 == sla_pass:
+                                criteria = 'FAIL'
+                except KeyError:
+                    return 'FAILED', None
+
+        status = 'COMPLETED' if criteria == 'PASS' else 'FAILED'
+        results = {'criteria': criteria, 'timestart': None, 'timestop': None}
+        return status, results
+
+
+class BottlenecksChecker(Checker):
+
+    def get_status(self, results_dir, testcase):
+        bottlenecks_file = os.path.join(results_dir, 'stress_logs',
+                                        '{}.out'.format(testcase))
+        total_file = os.path.join(results_dir, 'results.json')
+        if not os.path.isfile(bottlenecks_file):
+            if not os.path.isfile(total_file):
+                return 'IN_PROGRESS', None
+            return 'FAILED', None
 
+        criteria = None
 
-def get_bottlenecks_status(results_dir, testcase):
-    bottlenecks_file = os.path.join(results_dir, 'stress_logs',
-                                    '{}.out'.format(testcase))
-    total_file = os.path.join(results_dir, 'results.json')
-    if not os.path.isfile(bottlenecks_file):
-        if not os.path.isfile(total_file):
-            return 'IN_PROGRESS', None
-        return 'FAILED', None
+        # get criteria and sub_testcase when all tests completed
+        if os.path.isfile(total_file):
+            criteria, _ = self.get_status_from_total_file(total_file, testcase)
+            if criteria == 'FAILED':
+                return 'FAILED', None
 
-    criteria = None
+        with open(bottlenecks_file, 'r') as f:
+            for jsonfile in f:
+                data = json.loads(jsonfile)
+                try:
+                    if not criteria:
+                        criteria = data['data_body']['result']
+                except KeyError:
+                    return 'FAILED', None
 
-    # get criteria and sub_testcase from results.json when all tests completed
-    if os.path.isfile(total_file):
-        criteria, _ = get_status_from_total_file(total_file, testcase)
-        if criteria == 'FAILED':
-            return 'FAILED', None
+        status = 'COMPLETED' if criteria == 'PASS' else 'FAILED'
+        results = {'criteria': criteria, 'timestart': None, 'timestop': None}
+        return status, results
 
-    with open(bottlenecks_file, 'r') as f:
-        for jsonfile in f:
-            data = json.loads(jsonfile)
-            try:
-                if not criteria:
-                    criteria = data['data_body']['result']
-            except KeyError:
-                return 'FAILED', None
 
-    status = 'COMPLETED' if criteria == 'PASS' else 'FAILED'
-    results = {'criteria': criteria, 'timestart': None, 'timestop': None}
-    return status, results
+class CheckerFactory(object):
+
+    CHECKER_MAP = {
+        'functest': FunctestChecker,
+        'yardstick': YardstickChecker,
+        'bottlenecks': BottlenecksChecker
+    }
+
+    @classmethod
+    def create(cls, tc_type):
+        try:
+            return cls.CHECKER_MAP[tc_type]()
+        except KeyError:
+            return None
index 1708dfb..9f35ee0 100644 (file)
@@ -2,20 +2,23 @@ import json
 import os
 
 
-def write_env_file(envs, file_path):
-    file_dir = os.path.dirname(file_path)
-    if not os.path.exists(file_dir):
-        os.makedirs(file_dir)
-    with open(file_path, "w") as f:
-        for key, value in envs.items():
-            f.write("export {}={}\n".format(key, value))
-    return True
+class Utils(object):
 
+    @staticmethod
+    def write_env_file(envs, file_path):
+        file_dir = os.path.dirname(file_path)
+        if not os.path.exists(file_dir):
+            os.makedirs(file_dir)
+        with open(file_path, "w") as f:
+            for key, value in envs.items():
+                f.write("export {}={}\n".format(key, value))
+        return True
 
-def write_yaml_file(data, file_path):
-    file_dir = os.path.dirname(file_path)
-    if not os.path.exists(file_dir):
-        os.makedirs(file_dir)
-    with open(file_path, "w") as f:
-        f.write(json.dumps(data) + '\n')
-    return True
+    @staticmethod
+    def write_yaml_file(data, file_path):
+        file_dir = os.path.dirname(file_path)
+        if not os.path.exists(file_dir):
+            os.makedirs(file_dir)
+        with open(file_path, "w") as f:
+            f.write(json.dumps(data) + '\n')
+        return True