Add run tests API 22/68222/6
authorxudan <xudan16@huawei.com>
Thu, 11 Jul 2019 01:59:21 +0000 (21:59 -0400)
committerDan Xu <xudan16@huawei.com>
Fri, 23 Aug 2019 09:28:42 +0000 (09:28 +0000)
Change-Id: I456d5d1459faa20d9dd8afd0c235a4bb6cbfbcce
Signed-off-by: xudan <xudan16@huawei.com>
dovetail/api/app/constants.py
dovetail/api/app/routes.py
dovetail/api/app/server.py
dovetail/api/app/utils.py [new file with mode: 0644]
dovetail/api/swagger.yaml
dovetail/cli/commands/cli_testcase.py
dovetail/run.py
dovetail/tests/unit/test_run.py

index 14d9145..f6ffd1b 100644 (file)
@@ -1,2 +1,15 @@
 NFVI_PROJECT = ['bottlenecks', 'functest', 'yardstick']
 VNF_PROJECT = ['onap-vtp', 'onap-vvp']
+RUN_TEST_ITEMS = {
+    'arguments': {
+        'no_multiple': ['testsuite', 'deploy_scenario'],
+        'multiple': ['testarea', 'testcase']
+    },
+    'options': ['mandatory', 'no_api_validation', 'no_clean', 'stop', 'debug',
+                'opnfv_ci', 'report', 'offline', 'optional']
+}
+CONFIG_YAML_FILES = {
+    'hosts': 'hosts.yaml',
+    'pods': 'pod.yaml',
+    'tempest_conf': 'tempest_conf.yaml'
+}
index 6c32732..b1557b6 100644 (file)
@@ -1,6 +1,12 @@
 #!flask/bin/python
 
-from flask import Flask, jsonify
+import json
+import os
+import subprocess
+import time
+import uuid
+
+from flask import Flask, jsonify, request
 from flask_cors import CORS
 
 import server
@@ -19,3 +25,74 @@ def get_all_testsuites():
 def get_testcases():
     testcases = server.list_testcases()
     return jsonify({'testcases': testcases}), 200
+
+
+@app.route('/api/v1/scenario/nfvi/execution', methods=['POST'])
+def run_testcases():
+    requestId = request.args.get('requestId')
+    if not requestId:
+        requestId = uuid.uuid1()
+    if os.getenv('DOVETAIL_HOME'):
+        dovetail_home = os.getenv('DOVETAIL_HOME')
+    else:
+        return 'No DOVETAIL_HOME found in env.\n', 500
+
+    msg, ret = server.set_conf_files(request.json, dovetail_home, requestId)
+    if not ret:
+        return msg, 500
+
+    msg, ret = server.set_vm_images(request.json, dovetail_home, requestId)
+    if not ret:
+        return msg, 500
+
+    input_str = server.parse_request(request.json)
+
+    repo_dir = os.path.abspath(os.path.join(os.path.dirname(__file__),
+                               os.pardir, os.pardir))
+    run_script = os.path.join(repo_dir, 'run.py')
+
+    cmd = 'python {} {}'.format(run_script, input_str)
+    api_home = os.path.join(dovetail_home, str(requestId))
+    subprocess.Popen(cmd, shell=True, env={'DOVETAIL_HOME': api_home})
+
+    testcases_file = os.path.join(dovetail_home, str(requestId),
+                                  'results', 'testcases.json')
+    for loop in range(60):
+        if not os.path.isfile(testcases_file):
+            time.sleep(1)
+        else:
+            break
+    else:
+        return 'Can not get file testcases.json.\n', 500
+
+    with open(testcases_file, "r") as f:
+        for jsonfile in f:
+            data = json.loads(jsonfile)
+        testcases = data['testcases']
+        testsuite = data['testsuite']
+
+    result = server.get_execution_status(dovetail_home, testsuite,
+                                         testcases, requestId)
+
+    return jsonify({'result': result}), 200
+
+
+@app.route('/api/v1/scenario/nfvi/execution/status/<exec_id>',
+           methods=['POST'])
+def get_testcases_status(exec_id):
+    if 'testcase' not in request.json:
+        return 'Need testcases list as input.\n', 400
+
+    testcases = request.json['testcase']
+    dovetail_home = os.getenv('DOVETAIL_HOME')
+
+    testcases_file = os.path.join(dovetail_home, str(exec_id),
+                                  'results', 'testcases.json')
+    with open(testcases_file, "r") as f:
+        for jsonfile in f:
+            data = json.loads(jsonfile)
+        testsuite = data['testsuite']
+
+    result = server.get_execution_status(dovetail_home, testsuite,
+                                         testcases, data['testcases'], exec_id)
+    return jsonify({'result': result}), 200
index 4428c25..e6b1df4 100644 (file)
@@ -1,4 +1,9 @@
+import json
+import os
+import shutil
+
 import constants
+import utils
 
 from dovetail.testcase import Testsuite, Testcase
 
@@ -22,3 +27,193 @@ def list_testcases():
             testcase['scenario'] = 'unknown'
         testcase_list.append(testcase)
     return testcase_list
+
+
+def set_vm_images(data, dovetail_home, requestId):
+    image_path = os.path.join(dovetail_home, str(requestId), 'images')
+    try:
+        origin_image_path = data['conf']['vm_images']
+    except KeyError:
+        origin_image_path = os.path.join(dovetail_home, 'images')
+    if os.path.exists(origin_image_path):
+        try:
+            shutil.copytree(origin_image_path, image_path)
+        except Exception as e:
+            return str(e), False
+        return "Success to set vm images.\n", True
+    else:
+        return "Could not find vm images.\n", False
+
+
+def set_conf_files(data, dovetail_home, requestId):
+    config_path = os.path.join(dovetail_home, str(requestId), 'pre_config')
+    origin_config_path = os.path.join(dovetail_home, 'pre_config')
+    if os.path.exists(origin_config_path):
+        try:
+            shutil.copytree(origin_config_path, config_path)
+        except Exception as e:
+            return str(e), False
+
+    # check and prepare mandatory env_config.sh file
+    # if there are envs in request body, use it
+    # otherwise, use the file in pre_config
+    # if don't have this file, return False with error message
+    env_file = os.path.join(config_path, 'env_config.sh')
+    try:
+        utils.write_env_file(data['conf']['envs'], env_file)
+    except KeyError:
+        if not os.path.isfile(env_file):
+            return "No 'envs' found in the request body.\n", False
+        else:
+            pass
+    except Exception as e:
+        return str(e), False
+
+    # check and prepare other optional yaml files
+    for key, value in constants.CONFIG_YAML_FILES.items():
+        config_file = os.path.join(config_path, value)
+        try:
+            utils.write_yaml_file(data['conf'][key], config_file)
+        except KeyError:
+            pass
+        except Exception as e:
+            return str(e), False
+
+    return 'Success to prepare all config files.\n', True
+
+
+def parse_request(request_json):
+    output = ''
+    default_args = constants.RUN_TEST_ITEMS['arguments']
+    default_options = constants.RUN_TEST_ITEMS['options']
+
+    for arg in default_args['no_multiple']:
+        if arg in request_json.keys():
+            output = output + ' --{} {}'.format(arg, request_json[arg])
+    for arg in default_args['multiple']:
+        if arg in request_json.keys() and request_json[arg]:
+            for item in request_json[arg]:
+                output = output + ' --{} {}'.format(arg, item)
+
+    if 'options' not in request_json.keys():
+        return output
+
+    for option in default_options:
+        if option in request_json['options']:
+            output = output + ' --{}'.format(option)
+
+    return output
+
+
+def get_execution_status(dovetail_home, testsuite, request_testcases,
+                         exec_testcases, requestId):
+    results_dir = os.path.join(dovetail_home, str(requestId), 'results')
+    results = []
+    for tc in request_testcases:
+        if tc not in exec_testcases:
+            res = {'testCaseName': tc, 'status': 'NOT_EXECUTED'}
+            results.append(res)
+            continue
+        if tc.startswith('functest'):
+            status, result = get_functest_status(results_dir, tc)
+            res = {'testCaseName': tc, 'testSuiteName': testsuite,
+                   'scenario': 'nfvi', 'executionId': requestId,
+                   'results': result, 'status': status}
+            if not result:
+                res['timestart'] = None
+                res['endTime'] = None
+            else:
+                res['timestart'] = result['timestart']
+                res['endTime'] = result['timestop']
+            results.append(res)
+        if tc.startswith('yardstick'):
+            status, result = get_yardstick_status(results_dir, tc)
+            res = {'testCaseName': tc, 'testSuiteName': testsuite,
+                   'scenario': 'nfvi', 'executionId': requestId,
+                   'results': result, 'status': status,
+                   'timestart': None, 'endTime': None}
+            results.append(res)
+        if tc.startswith('bottlenecks'):
+            pass
+    return results
+
+
+def get_functest_status(results_dir, testcase):
+    functest_file = os.path.join(results_dir, 'functest_results.txt')
+    total_file = os.path.join(results_dir, 'results.json')
+    if not os.path.isfile(functest_file):
+        if not os.path.isfile(total_file):
+            return 'IN_PROGRESS', None
+        return 'FAILED', None
+    criteria = None
+    sub_testcase = []
+    timestart = None
+    timestop = None
+
+    # get criteria and sub_testcase from results.json when all tests completed
+    if os.path.isfile(total_file):
+        with open(total_file, 'r') as f:
+            for jsonfile in f:
+                try:
+                    data = json.loads(jsonfile)
+                    for item in data['testcases_list']:
+                        if item['name'] == testcase:
+                            criteria = item['result']
+                            sub_testcase = item['sub_testcase']
+                            break
+                    else:
+                        return 'FAILED', None
+                except KeyError:
+                    return 'FAILED', None
+                except ValueError:
+                    continue
+
+    # get detailed results from functest_results.txt
+    with open(functest_file, 'r') as f:
+        for jsonfile in f:
+            try:
+                data = json.loads(jsonfile)
+                if data['build_tag'].endswith(testcase):
+                    criteria = data['criteria'] if not criteria else criteria
+                    timestart = data['start_date']
+                    timestop = data['stop_date']
+                    break
+            except KeyError:
+                return 'FAILED', None
+            except ValueError:
+                continue
+        else:
+            if not criteria:
+                return 'IN_PROGRESS', None
+
+    status = 'COMPLETED' if criteria == 'PASS' else 'FAILED'
+    results = {'criteria': criteria, 'sub_testcase': sub_testcase,
+               'timestart': timestart, 'timestop': timestop}
+    return status, results
+
+
+def get_yardstick_status(results_dir, testcase):
+    yardstick_file = os.path.join(results_dir, 'ha_logs',
+                                  '{}.out'.format(testcase))
+    total_file = os.path.join(results_dir, 'results.json')
+    if not os.path.isfile(yardstick_file):
+        if not os.path.isfile(total_file):
+            return 'IN_PROGRESS', None
+        return 'FAILED', None
+    with open(yardstick_file, 'r') as f:
+        for jsonfile in f:
+            data = json.loads(jsonfile)
+            try:
+                criteria = data['result']['criteria']
+                if criteria == 'PASS':
+                    details = data['result']['testcases']
+                    for key, value in details.items():
+                        sla_pass = value['tc_data'][0]['data']['sla_pass']
+                        if not 1 == sla_pass:
+                            criteria = 'FAIL'
+            except KeyError:
+                return 'FAILED', None
+
+    status = 'COMPLETED' if criteria == 'PASS' else 'FAILED'
+    results = {'criteria': criteria, 'timestart': None, 'timestop': None}
+    return status, results
diff --git a/dovetail/api/app/utils.py b/dovetail/api/app/utils.py
new file mode 100644 (file)
index 0000000..1708dfb
--- /dev/null
@@ -0,0 +1,21 @@
+import json
+import os
+
+
+def write_env_file(envs, file_path):
+    file_dir = os.path.dirname(file_path)
+    if not os.path.exists(file_dir):
+        os.makedirs(file_dir)
+    with open(file_path, "w") as f:
+        for key, value in envs.items():
+            f.write("export {}={}\n".format(key, value))
+    return True
+
+
+def write_yaml_file(data, file_path):
+    file_dir = os.path.dirname(file_path)
+    if not os.path.exists(file_dir):
+        os.makedirs(file_dir)
+    with open(file_path, "w") as f:
+        f.write(json.dumps(data) + '\n')
+    return True
index 5df0dcc..98291d2 100644 (file)
@@ -15,6 +15,8 @@ tags:
   description: "Operations about testsuites"
 - name: "testcases"
   description: "Operations about test cases"
+- name: "execution"
+  description: "Operations about running test cases"
 schemes:
 - "http"
 paths:
@@ -52,3 +54,293 @@ paths:
           description: "successful operation"
         default:
           description: Unexpected error
+  /execution:
+    post:
+      tags:
+      - "execution"
+      summary: "Run test cases"
+      description: ""
+      operationId: "runTestCases"
+      consumes:
+      - "application/json"
+      produces:
+      - "application/json"
+      parameters:
+      - name: "body"
+        in: "body"
+        description: "All info used to run tests"
+        required: false
+        schema:
+          $ref: "#/definitions/RunInfo"
+      responses:
+        200:
+          description: "successful operation"
+          schema:
+            $ref: "#/definitions/StatusResponse"
+        500:
+          description: "internal error"
+        default:
+          description: Unexpected error
+  /execution/{exec_id}:
+    post:
+      tags:
+      - "execution"
+      summary: "Run test cases with exec_id"
+      description: ""
+      operationId: "runTestCasesWithID"
+      consumes:
+      - "application/json"
+      produces:
+      - "application/json"
+      parameters:
+      - name: "exec_id"
+        in: "path"
+        description: "ID of this run, will generate randomly if not given"
+        required: true
+        schema:
+          type: "integer"
+          format: "uuid"
+      - name: "body"
+        in: "body"
+        description: "All info used to run tests"
+        required: false
+        schema:
+          $ref: "#/definitions/RunInfo"
+      responses:
+        200:
+          description: "successful operation"
+          schema:
+            $ref: "#/definitions/StatusResponse"
+        500:
+          description: "internal error"
+        default:
+          description: Unexpected error
+  /execution/status/{exec_id}:
+    post:
+      tags:
+      - "execution/status"
+      summary: "Get status of running test cases"
+      description: ""
+      operationId: "getTestCasesStatus"
+      consumes:
+      - "application/json"
+      produces:
+      - "application/json"
+      parameters:
+      - name: "exec_id"
+        in: "path"
+        description: "exec_id used to get the status of test cases"
+        required: true
+        schema:
+          type: "integer"
+          format: "uuid"
+      - name: "body"
+        in: "body"
+        description: "Test case list used to get status"
+        required: true
+        schema:
+          $ref: "#/definitions/TestCaseList"
+      responses:
+        200:
+          description: "successful operation"
+          schema:
+            $ref: "#/definitions/StatusResponse"
+        500:
+          description: "internal error"
+        default:
+          description: Unexpected error
+definitions:
+  TestCaseList:
+    type: "object"
+    properties:
+      testcase:
+        type: "array"
+        items:
+          type: "string"
+        example:
+        - "functest.vping.ssh"
+        - "yardstick.ha.rabbitmq"
+  Node:
+    type: "object"
+    required:
+    - "name"
+    - "role"
+    - "ip"
+    - "user"
+    properties:
+      name:
+        type: "string"
+        example: "node1"
+      role:
+        type: "string"
+        enum:
+        - "Controller"
+        - "Compute"
+        - "Jumpserver"
+      ip:
+        type: "string"
+        example: "192.168.117.222"
+      user:
+        type: "string"
+        example: "root"
+      password:
+        type: "string"
+        example: "root"
+      key_filename:
+        type: "string"
+        example: "/home/ovp/pre_config/id_rsa"
+  ProcessInfo:
+    type: "object"
+    required:
+    - "testcase_name"
+    properties:
+      testcase_name:
+        type: "string"
+        example: "yardstick.ha.rabbitmq"
+      attack_host:
+        type: "string"
+        example: "node1"
+      attack_process:
+        type: "string"
+        example: "rabbitmq"
+  Pods:
+    type: "object"
+    properties:
+      nodes:
+        type: "array"
+        items:
+          $ref: '#/definitions/Node'
+      process_info:
+        type: "array"
+        items:
+          $ref: "#/definitions/ProcessInfo"
+  tempestconf:
+    type: "object"
+    additionalProperties:
+      type: string
+  TempestConf:
+    type: "object"
+    additionalProperties:
+      $ref: "#/definitions/tempestconf"
+  Hosts:
+    type: "object"
+    additionalProperties:
+      type: "array"
+      items:
+        type: "string"
+  Envs:
+    type: "object"
+    additionalProperties:
+      type: string
+    example:
+      OS_USERNAME: "admin"
+      OS_PASSWORD: "admin"
+      OS_AUTH_URL: "https://192.168.117.222:5000/v3"
+      EXTERNAL_NETWORK: "ext-net"
+  Conf:
+    type: "object"
+    properties:
+      vm_images:
+        type: "string"
+        example: "/home/ovp/images"
+      pods:
+        $ref: "#/definitions/Pods"
+      tempest_conf:
+        $ref: "#/definitions/TempestConf"
+      hosts:
+        $ref: "#/definitions/Hosts"
+      envs:
+        $ref: "#/definitions/Envs"
+  RunInfo:
+    type: "object"
+    properties:
+      conf:
+        $ref: "#/definitions/Conf"
+      testcase:
+        type: "array"
+        items:
+          type: "string"
+        example:
+        - "functest.vping.ssh"
+        - "yardstick.ha.rabbitmq"
+      testsuite:
+        type: "string"
+        example: "ovp.2019.0x"
+      testarea:
+        type: "array"
+        items:
+          type: "string"
+        example:
+        - "vping"
+        - "ha"
+      deploy_scenario:
+        type: "string"
+        example: "os-nosdn-ovs-ha"
+      options:
+        type: "array"
+        items:
+          type: "string"
+          enum:
+          - "opnfv-ci"
+          - "optional"
+          - "offline"
+          - "report"
+          - "debug"
+          - "stop"
+          - "no-clean"
+          - "no-api-validation"
+          - "mandatory"
+        example:
+        - "debug"
+        - "report"
+  Results:
+    type: "object"
+    properties:
+      criteria:
+        type: "string"
+        enum:
+        - "PASS"
+        - "FAIL"
+      timestart:
+        type: "string"
+        format: "date-time"
+      timestop:
+        type: "string"
+        format: "date-time"
+  TestCaseStatus:
+    type: "object"
+    properties:
+      endTime:
+        type: "string"
+        format: "date-time"
+      executionId:
+        type: "string"
+        format: "uuid"
+      results:
+        $ref: "#/definitions/Results"
+      scenario:
+        type: "string"
+        example: "nfvi"
+      status:
+        type: "string"
+        enum:
+        - "IN_PROGRESS"
+        - "COMPLETED"
+        - "FAILED"
+        - "NOT_EXECUTED"
+      testCaseName:
+        type: "string"
+        example: "functest.vping.ssh"
+      testSuiteName:
+        type: "string"
+        example: "ovp.2019.0x"
+      timestart:
+        type: "string"
+        format: "date-time"
+  StatusResponse:
+    type: "object"
+    properties:
+      result:
+        type: "array"
+        items:
+          $ref: "#/definitions/TestCaseStatus"
index e91d88e..2725c89 100644 (file)
@@ -19,12 +19,13 @@ import dovetail.utils.dovetail_utils as dt_utils
 
 class CliTestcase(object):
 
-    @classmethod
-    def testsuite_load(cls):
+    @staticmethod
+    def testsuite_load():
         dt_cfg.load_config_files(constants.CONF_PATH)
         Testsuite.load()
 
-    def list_one_testsuite(self, testsuite):
+    @staticmethod
+    def list_one_testsuite(testsuite):
         testsuite_stream = Testsuite.get(testsuite)
         if testsuite_stream:
             mandatory = dt_utils.get_value_from_dict(
@@ -59,7 +60,8 @@ class CliTestcase(object):
             else:
                 click.echo("No testsuite defined yet in dovetail!!!")
 
-    def show_testcase(self, name):
+    @staticmethod
+    def show_testcase(name):
         tc_path = os.path.join(constants.TESTCASE_PATH, "{}.yml".format(name))
         if os.path.isfile(tc_path):
             with open(tc_path, 'r') as stream:
@@ -70,7 +72,8 @@ class CliTestcase(object):
         else:
             click.echo("testcase %s not exist or not supported" % name)
 
-    def run(self, args_str):
+    @staticmethod
+    def run(args_str):
         options = ''
         if args_str:
             options = options + args_str
index c83c197..0ea3cb1 100755 (executable)
@@ -12,6 +12,7 @@
 
 import copy
 from datetime import datetime
+import json
 import os
 import time
 import uuid
@@ -275,6 +276,13 @@ def main(*args, **kwargs):
     dt_utils.check_docker_version(logger)
 
     testcase_list = get_testcase_list(logger, **kwargs)
+
+    dovetail_home = os.environ['DOVETAIL_HOME']
+    testcases_file = os.path.join(dovetail_home, 'results', 'testcases.json')
+    with open(testcases_file, "w") as f:
+        data = {'testsuite': kwargs['testsuite'], 'testcases': testcase_list}
+        f.write(json.dumps(data) + '\n')
+
     if not testcase_list:
         raise SystemExit(EXIT_RUN_FAILED)
 
index 654d8c9..497cd06 100644 (file)
@@ -490,7 +490,9 @@ class RunTesting(unittest.TestCase):
             'Test area area is not defined.')
         self.assertEquals(None, result)
 
+    @patch('__builtin__.open')
     @patch('dovetail.run.os')
+    @patch('dovetail.run.json')
     @patch('dovetail.run.uuid')
     @patch('dovetail.run.dt_logger')
     @patch('dovetail.run.dt_cfg')
@@ -507,13 +509,16 @@ class RunTesting(unittest.TestCase):
     def test_main(self, mock_create_logs, mock_run, mock_get_list,
                   mock_copy_patch, mock_copy_userconf, mock_update, mock_parse,
                   mock_clean, mock_get_result, mock_utils, mock_config,
-                  mock_logger, mock_uuid, mock_os):
+                  mock_logger, mock_uuid, mock_json, mock_os, mock_open):
         mock_config.dovetail_config = {}
-        mock_os.environ = {}
+        mock_os.environ = {'DOVETAIL_HOME': 'dovetail_home'}
         logger_obj = Mock()
         logger_temp_obj = Mock()
+        file_obj = Mock()
         logger_temp_obj.getLogger.return_value = logger_obj
         mock_logger.Logger.return_value = logger_temp_obj
+        mock_open.return_value.__enter__.return_value = file_obj
+        mock_json.dumps.return_value = 'results text'
         mock_uuid.uuid1.return_value = 42
         mock_get_result.return_value = True
         testcase_list = ['testcase']
@@ -538,8 +543,8 @@ class RunTesting(unittest.TestCase):
                           mock_config.dovetail_config)
         mock_get_result.assert_called_once_with()
         mock_clean.assert_called_once_with()
-        self.assertEquals({'DEBUG': 'true', 'OPNFV_CI': 'true'},
-                          mock_os.environ)
+        self.assertEquals({'DOVETAIL_HOME': 'dovetail_home', 'DEBUG': 'true',
+                           'OPNFV_CI': 'true'}, mock_os.environ)
         mock_create_logs.assert_called_once_with()
         logger_obj.info.assert_has_calls([
             call('================================================'),
@@ -575,6 +580,8 @@ class RunTesting(unittest.TestCase):
         mock_get_result.assert_called_once_with()
         self.assertEquals(expected.code, 0)
 
+    @patch('__builtin__.open')
+    @patch('dovetail.run.json')
     @patch('dovetail.run.os')
     @patch('dovetail.run.uuid')
     @patch('dovetail.run.dt_logger')
@@ -593,13 +600,17 @@ class RunTesting(unittest.TestCase):
                                   mock_get_list, mock_copy_patch,
                                   mock_copy_userconf, mock_update, mock_clean,
                                   mock_get_result, mock_utils, mock_config,
-                                  mock_logger, mock_uuid, mock_os):
+                                  mock_logger, mock_uuid, mock_os, mock_json,
+                                  mock_open):
         mock_config.dovetail_config = {}
-        mock_os.environ = {}
+        mock_os.environ = {'DOVETAIL_HOME': 'dovetail_home'}
         logger_obj = Mock()
         logger_temp_obj = Mock()
+        file_obj = Mock()
         logger_temp_obj.getLogger.return_value = logger_obj
         mock_logger.Logger.return_value = logger_temp_obj
+        mock_open.return_value.__enter__.return_value = file_obj
+        mock_json.dumps.return_value = 'results text'
         mock_uuid.uuid1.return_value = 42
         mock_get_result.return_value = True
         mock_get_list.return_value = None
@@ -624,8 +635,8 @@ class RunTesting(unittest.TestCase):
                           mock_config.dovetail_config)
         mock_get_result.assert_called_once_with()
         mock_clean.assert_called_once_with()
-        self.assertEquals({'DEBUG': 'true', 'OPNFV_CI': 'false'},
-                          mock_os.environ)
+        self.assertEquals({'DOVETAIL_HOME': 'dovetail_home', 'DEBUG': 'true',
+                           'OPNFV_CI': 'false'}, mock_os.environ)
         mock_create_logs.assert_called_once_with()
         logger_obj.info.assert_has_calls([
             call('================================================'),