Merge "Add CI job for traffic stress test in POSCA testsuite"
authormei mei <meimei@huawei.com>
Fri, 17 Feb 2017 09:28:46 +0000 (09:28 +0000)
committerGerrit Code Review <gerrit@opnfv.org>
Fri, 17 Feb 2017 09:28:46 +0000 (09:28 +0000)
17 files changed:
jjb/doctor/doctor.yml
jjb/functest/set-functest-env.sh
jjb/global/releng-macros.yml
jjb/infra/bifrost-verify-jobs.yml
jjb/opnfvdocs/docs-post-rtd.sh [new file with mode: 0644]
jjb/opnfvdocs/docs-rtd.yaml [new file with mode: 0644]
modules/opnfv/deployment/apex/adapter.py
modules/opnfv/deployment/fuel/adapter.py
modules/opnfv/deployment/manager.py
modules/opnfv/utils/ovs_logger.py
prototypes/bifrost/scripts/test-bifrost-deployment.sh
utils/test/reporting/functest/reporting-status.py
utils/test/reporting/functest/testCase.py
utils/test/reporting/reporting.yaml
utils/test/reporting/utils/reporting_utils.py
utils/test/testapi/opnfv_testapi/resources/scenario_handlers.py
utils/test/testapi/opnfv_testapi/tests/unit/test_scenario.py

index 2333fca..28888d6 100644 (file)
@@ -22,9 +22,9 @@
         - fuel:
             slave-label: 'ool-virtual2'
             pod: 'ool-virtual2'
-        - joid:
-            slave-label: 'ool-virtual3'
-            pod: 'ool-virtual3'
+        #- joid:
+        #    slave-label: 'ool-virtual3'
+        #    pod: 'ool-virtual3'
 
     inspector:
         - 'sample'
index abec480..05e3d57 100755 (executable)
@@ -17,32 +17,34 @@ if [[ ${RC_FILE_PATH} != '' ]] && [[ -f ${RC_FILE_PATH} ]] ; then
     echo "Credentials file detected: ${RC_FILE_PATH}"
     # volume if credentials file path is given to Functest
     rc_file_vol="-v ${RC_FILE_PATH}:/home/opnfv/functest/conf/openstack.creds"
+    RC_FLAG=1
 fi
 
 
 if [[ ${INSTALLER_TYPE} == 'apex' ]]; then
     ssh_options="-o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no"
-    if sudo virsh list | grep instack; then
-        instack_mac=$(sudo virsh domiflist instack | grep default | \
-                      grep -Eo "[0-9a-f]+:[0-9a-f]+:[0-9a-f]+:[0-9a-f]+:[0-9a-f]+:[0-9a-f]+")
-    elif sudo virsh list | grep undercloud; then
-        instack_mac=$(sudo virsh domiflist undercloud | grep default | \
+    if sudo virsh list | grep undercloud; then
+        echo "Installer VM detected"
+        undercloud_mac=$(sudo virsh domiflist undercloud | grep default | \
                       grep -Eo "[0-9a-f]+:[0-9a-f]+:[0-9a-f]+:[0-9a-f]+:[0-9a-f]+:[0-9a-f]+")
+        INSTALLER_IP=$(/usr/sbin/arp -e | grep ${undercloud_mac} | awk {'print $1'})
+        sshkey_vol="-v /root/.ssh/id_rsa:/root/.ssh/id_rsa"
+        sudo scp $ssh_options root@${INSTALLER_IP}:/home/stack/stackrc ${HOME}/stackrc
+        stackrc_vol="-v ${HOME}/stackrc:/home/opnfv/functest/conf/stackrc"
+
+        if sudo iptables -C FORWARD -o virbr0 -j REJECT --reject-with icmp-port-unreachable 2> ${redirect}; then
+            sudo iptables -D FORWARD -o virbr0 -j REJECT --reject-with icmp-port-unreachable
+        fi
+        if sudo iptables -C FORWARD -i virbr0 -j REJECT --reject-with icmp-port-unreachable 2> ${redirect}; then
+          sudo iptables -D FORWARD -i virbr0 -j REJECT --reject-with icmp-port-unreachable
+        fi
+    elif [[ "$RC_FLAG" == 1 ]]; then
+        echo "No available installer VM, but credentials provided...continuing"
     else
-        echo "No available installer VM exists...exiting"
+        echo "No available installer VM exists and no credentials provided...exiting"
         exit 1
     fi
-    INSTALLER_IP=$(/usr/sbin/arp -e | grep ${instack_mac} | awk {'print $1'})
-    sshkey_vol="-v /root/.ssh/id_rsa:/root/.ssh/id_rsa"
-    sudo scp $ssh_options root@${INSTALLER_IP}:/home/stack/stackrc ${HOME}/stackrc
-    stackrc_vol="-v ${HOME}/stackrc:/home/opnfv/functest/conf/stackrc"
 
-    if sudo iptables -C FORWARD -o virbr0 -j REJECT --reject-with icmp-port-unreachable 2> ${redirect}; then
-        sudo iptables -D FORWARD -o virbr0 -j REJECT --reject-with icmp-port-unreachable
-    fi
-    if sudo iptables -C FORWARD -i virbr0 -j REJECT --reject-with icmp-port-unreachable 2> ${redirect}; then
-        sudo iptables -D FORWARD -i virbr0 -j REJECT --reject-with icmp-port-unreachable
-    fi
 fi
 
 
index 9b09e31..b584ae5 100644 (file)
             choosing-strategy: 'gerrit'
             refspec: '$GERRIT_REFSPEC'
             <<: *git-scm-defaults
-
+- scm:
+    name: git-scm-with-submodules
+    scm:
+        - git:
+            credentials-id: '$SSH_CREDENTIAL_ID'
+            url: '$GIT_BASE'
+            refspec: ''
+            branches:
+                - 'refs/heads/{branch}'
+            skip-tag: true
+            wipe-workspace: true
+            submodule:
+                recursive: true
 - trigger:
     name: 'daily-trigger-disabled'
     triggers:
@@ -72,7 +84,6 @@
     triggers:
         - timed: ''
 
-# NOTE: unused macro, but we may use this for some jobs.
 - trigger:
     name: gerrit-trigger-patchset-created
     triggers:
                 - draft-published-event
                 - comment-added-contains-event:
                     comment-contains-value: 'recheck'
+                - comment-added-contains-event:
+                    comment-contains-value: 'reverify'
             projects:
               - project-compare-type: 'ANT'
                 project-pattern: '{project}'
                 branches:
                   - branch-compare-type: 'ANT'
                     branch-pattern: '**/{branch}'
+                file-paths:
+                  - compare-type: 'ANT'
+                    pattern: '{files}'
+            skip-vote:
+                successful: true
+                failed: true
+                unstable: true
+                notbuilt: true
 
 - trigger:
     name: gerrit-trigger-change-merged
     name: clean-workspace-log
     builders:
         - shell: |
-            find $WORKSPACE -type f -print -name '*.log' | xargs rm -f
+            find $WORKSPACE -type f -name '*.log' | xargs rm -f
 
 - publisher:
     name: archive-artifacts
index c99023e..d595d4b 100644 (file)
 
     publishers:
         - email:
-            recipients: fatih.degirmenci@ericsson.com yroblamo@redhat.com mchandras@suse.de jack.morgan@intel.com zhang.jun3g@zte.com.cn
+            recipients: fatih.degirmenci@ericsson.com yroblamo@redhat.com mchandras@suse.de jack.morgan@intel.com julienjut@gmail.com
 #--------------------------------
 # trigger macros
 #--------------------------------
diff --git a/jjb/opnfvdocs/docs-post-rtd.sh b/jjb/opnfvdocs/docs-post-rtd.sh
new file mode 100644 (file)
index 0000000..7faa26f
--- /dev/null
@@ -0,0 +1,7 @@
+#!/bin/bash
+if [ $GERRIT_BRANCH == "master" ]; then
+    RTD_BUILD_VERSION=latest
+else
+    RTD_BUILD_VERSION=${{GERRIT_BRANCH/\//-}}
+fi
+curl -X POST --data "version_slug=$RTD_BUILD_VERSION" https://readthedocs.org/build/{rtdproject}
diff --git a/jjb/opnfvdocs/docs-rtd.yaml b/jjb/opnfvdocs/docs-rtd.yaml
new file mode 100644 (file)
index 0000000..32ef732
--- /dev/null
@@ -0,0 +1,87 @@
+- project:
+    name: docs-rtd
+    jobs:
+        - 'docs-merge-rtd-{stream}'
+        - 'docs-verify-rtd-{stream}'
+
+    stream:
+        - master:
+            branch: 'master'
+
+    project: 'opnfvdocs'
+    rtdproject: 'opnfv'
+    # TODO: Archive Artifacts
+
+- job-template:
+    name: 'docs-merge-rtd-{stream}'
+
+    project-type: freestyle
+
+    parameters:
+        - label:
+            name: SLAVE_LABEL
+            default: 'lf-build1'
+            description: 'Slave label on Jenkins'
+        - project-parameter:
+            project: '{project}'
+            branch: '{branch}'
+        - string:
+            name: GIT_BASE
+            default: https://gerrit.opnfv.org/gerrit/releng
+            description: 'Git URL to use on this Jenkins Slave'
+    scm:
+        - git-scm
+
+    triggers:
+        - gerrit-trigger-change-merged
+
+    builders:
+        - shell: !include-raw: docs-post-rtd.sh
+
+- job-template:
+    name: 'docs-verify-rtd-{stream}'
+
+    project-type: freestyle
+
+    parameters:
+        - label:
+            name: SLAVE_LABEL
+            default: 'lf-build1'
+            description: 'Slave label on Jenkins'
+        - project-parameter:
+            project: '{project}'
+            branch: '{branch}'
+        - string:
+            name: GIT_BASE
+            default: https://gerrit.opnfv.org/gerrit/opnfvdocs
+            description: 'Git URL to use on this Jenkins Slave'
+    scm:
+        - git-scm-with-submodules:
+            branch: '{branch}'
+
+    triggers:
+        - gerrit-trigger-patchset-created:
+            server: 'gerrit.opnfv.org'
+            project: '**'
+            branch: '{branch}'
+            files: 'docs/**/*.rst'
+        - timed: 'H H * * *'
+
+    builders:
+        - shell: |
+            if [ "$GERRIT_PROJECT" != "opnfvdocs" ]; then
+                cd opnfvdocs/docs/submodules/$GERRIT_PROJECT
+                git fetch origin $GERRIT_REFSPEC && git checkout FETCH_HEAD
+            else
+                git fetch origin $GERRIT_REFSPEC && git checkout FETCH_HEAD
+            fi
+        - shell: |
+            sudo pip install virtualenv virtualenvwrapper
+            export WORKON_HOME=$HOME/.virtualenvs
+            source /usr/local/bin/virtualenvwrapper.sh
+            mkvirtualenv $WORKSPACE/venv
+            workon $WORKSPACE/venv
+            pip install --upgrade pip
+            pip freeze
+            pip install tox
+            tox -edocs
index cb827d8..225e174 100644 (file)
@@ -35,28 +35,34 @@ class ApexAdapter(manager.DeploymentHandler):
             return None
 
         for line in lines:
-            if 'controller' in line:
-                roles = "controller"
-            elif 'compute' in line:
-                roles = "compute"
-            else:
+            roles = []
+            if any(x in line for x in ['-----', 'Networks']):
                 continue
-            if 'Daylight' in line:
-                roles += ", OpenDaylight"
+            if 'controller' in line:
+                roles.append(manager.Role.CONTROLLER)
+            if 'compute' in line:
+                roles.append(manager.Role.COMPUTE)
+            if 'opendaylight' in line.lower():
+                roles.append(manager.Role.ODL)
+
             fields = line.split('|')
             id = re.sub('[!| ]', '', fields[1]).encode()
             name = re.sub('[!| ]', '', fields[2]).encode()
-            status_node = re.sub('[!| ]', '', fields[3]).encode()
+            status_node = re.sub('[!| ]', '', fields[3]).encode().lower()
             ip = re.sub('[!| ctlplane=]', '', fields[4]).encode()
 
-            if status_node.lower() == 'active':
-                status = manager.Node.STATUS_OK
+            ssh_client = None
+            if 'active' in status_node:
+                status = manager.NodeStatus.STATUS_OK
                 ssh_client = ssh_utils.get_ssh_client(hostname=ip,
                                                       username='heat-admin',
                                                       pkey_file=self.pkey_file)
+            elif 'error' in status_node:
+                status = manager.NodeStatus.STATUS_ERROR
+            elif 'off' in status_node:
+                status = manager.NodeStatus.STATUS_OFFLINE
             else:
-                status = manager.Node.STATUS_INACTIVE
-                ssh_client = None
+                status = manager.NodeStatus.STATUS_INACTIVE
 
             node = manager.Node(id, ip, name, status, roles, ssh_client)
             nodes.append(node)
@@ -73,8 +79,9 @@ class ApexAdapter(manager.DeploymentHandler):
                      "grep Description|sed 's/^.*\: //'")
         cmd_ver = ("sudo yum info opendaylight 2>/dev/null|"
                    "grep Version|sed 's/^.*\: //'")
+        description = None
         for node in self.nodes:
-            if 'controller' in node.get_attribute('roles'):
+            if node.is_controller():
                 description = node.run_cmd(cmd_descr)
                 version = node.run_cmd(cmd_ver)
                 break
index 3e6ef50..aa5ad7a 100644 (file)
@@ -66,7 +66,7 @@ class FuelAdapter(manager.DeploymentHandler):
         if options and options['cluster'] and len(self.nodes) > 0:
             n = []
             for node in self.nodes:
-                if node.info['cluster'] == options['cluster']:
+                if str(node.info['cluster']) == str(options['cluster']):
                     n.append(node)
             return n
 
@@ -124,26 +124,36 @@ class FuelAdapter(manager.DeploymentHandler):
             fields = lines[i].rsplit(' | ')
             id = fields[index_id].strip().encode()
             ip = fields[index_ip].strip().encode()
-            status_node = fields[index_status].strip().encode()
+            status_node = fields[index_status].strip().encode().lower()
             name = fields[index_name].strip().encode()
-            roles = fields[index_roles].strip().encode()
+            roles_all = fields[index_roles].strip().encode().lower()
+
+            roles = [x for x in [manager.Role.CONTROLLER,
+                                 manager.Role.COMPUTE,
+                                 manager.Role.ODL] if x in roles_all]
 
             dict = {"cluster": fields[index_cluster].strip().encode(),
                     "mac": fields[index_mac].strip().encode(),
                     "status_node": status_node,
                     "online": fields[index_online].strip().encode()}
 
+            ssh_client = None
             if status_node == 'ready':
-                status = manager.Node.STATUS_OK
+                status = manager.NodeStatus.STATUS_OK
                 proxy = {'ip': self.installer_ip,
                          'username': self.installer_user,
                          'password': self.installer_pwd}
                 ssh_client = ssh_utils.get_ssh_client(hostname=ip,
                                                       username='root',
                                                       proxy=proxy)
+            elif 'error' in status_node:
+                status = manager.NodeStatus.STATUS_ERROR
+            elif 'off' in status_node:
+                status = manager.NodeStatus.STATUS_OFFLINE
+            elif 'discover' in status_node:
+                status = manager.NodeStatus.STATUS_UNUSED
             else:
-                status = manager.Node.STATUS_INACTIVE
-                ssh_client = None
+                status = manager.NodeStatus.STATUS_INACTIVE
 
             node = manager.Node(
                 id, ip, name, status, roles, ssh_client, dict)
@@ -160,7 +170,7 @@ class FuelAdapter(manager.DeploymentHandler):
         cmd = 'source openrc;nova-manage version 2>/dev/null'
         version = None
         for node in self.nodes:
-            if 'controller' in node.get_attribute('roles'):
+            if node.is_controller():
                 version = node.run_cmd(cmd)
                 break
         return version
@@ -169,7 +179,7 @@ class FuelAdapter(manager.DeploymentHandler):
         cmd = "apt-cache show opendaylight|grep Version|sed 's/^.*\: //'"
         version = None
         for node in self.nodes:
-            if 'controller' in node.get_attribute('roles'):
+            if node.is_controller():
                 odl_version = node.run_cmd(cmd)
                 if odl_version:
                     version = 'OpenDaylight ' + odl_version
index 8c9599b..43a7948 100644 (file)
@@ -89,25 +89,35 @@ class Deployment(object):
             sdn_controller=self.deployment_info['sdn_controller'])
 
         for node in self.deployment_info['nodes']:
-            s += '\t\t{node_object}\n'.format(node_object=node)
+            s += '{node_object}\n'.format(node_object=node)
 
         return s
 
 
-class Node(object):
+class Role():
+    CONTROLLER = 'controller'
+    COMPUTE = 'compute'
+    ODL = 'opendaylight'
+    ONOS = 'onos'
+
 
+class NodeStatus():
     STATUS_OK = 'active'
     STATUS_INACTIVE = 'inactive'
     STATUS_OFFLINE = 'offline'
-    STATUS_FAILED = 'failed'
+    STATUS_ERROR = 'error'
+    STATUS_UNUSED = 'unused'
+
+
+class Node(object):
 
     def __init__(self,
                  id,
                  ip,
                  name,
                  status,
-                 roles,
-                 ssh_client,
+                 roles=[],
+                 ssh_client=None,
                  info={}):
         self.id = id
         self.ip = ip
@@ -121,7 +131,7 @@ class Node(object):
         '''
         SCP file from a node
         '''
-        if self.status is not Node.STATUS_OK:
+        if self.status is not NodeStatus.STATUS_OK:
             logger.info("The node %s is not active" % self.ip)
             return 1
         logger.info("Fetching %s from %s" % (src, self.ip))
@@ -137,7 +147,7 @@ class Node(object):
         '''
         SCP file to a node
         '''
-        if self.status is not Node.STATUS_OK:
+        if self.status is not NodeStatus.STATUS_OK:
             logger.info("The node %s is not active" % self.ip)
             return 1
         logger.info("Copying %s to %s" % (src, self.ip))
@@ -153,9 +163,9 @@ class Node(object):
         '''
         Run command remotely on a node
         '''
-        if self.status is not Node.STATUS_OK:
-            logger.info("The node %s is not active" % self.ip)
-            return 1
+        if self.status is not NodeStatus.STATUS_OK:
+            logger.error("The node %s is not active" % self.ip)
+            return None
         _, stdout, stderr = (self.ssh_client.exec_command(cmd))
         error = stderr.readlines()
         if len(error) > 0:
@@ -187,7 +197,7 @@ class Node(object):
         '''
         Returns if the node is a controller
         '''
-        if 'controller' in self.get_attribute('roles'):
+        if 'controller' in self.roles:
             return True
         return False
 
@@ -195,12 +205,32 @@ class Node(object):
         '''
         Returns if the node is a compute
         '''
-        if 'compute' in self.get_attribute('roles'):
+        if 'compute' in self.roles:
             return True
         return False
 
+    def get_ovs_info(self):
+        '''
+        Returns the ovs version installed
+        '''
+        cmd = "ovs-vsctl --version|head -1| sed 's/^.*) //'"
+        return self.run_cmd(cmd)
+
     def __str__(self):
-        return str(self.get_dict())
+        return '''
+            name:   {name}
+            id:     {id}
+            ip:     {ip}
+            status: {status}
+            roles:  {roles}
+            ovs:    {ovs}
+            info:   {info}'''.format(name=self.name,
+                                     id=self.id,
+                                     ip=self.ip,
+                                     status=self.status,
+                                     roles=self.roles,
+                                     ovs=self.get_ovs_info(),
+                                     info=self.info)
 
 
 class DeploymentHandler(object):
@@ -236,7 +266,7 @@ class DeploymentHandler(object):
             self.installer_node = Node(id='',
                                        ip=installer_ip,
                                        name=installer,
-                                       status='active',
+                                       status=NodeStatus.STATUS_OK,
                                        ssh_client=self.installer_connection,
                                        roles='installer node')
         else:
index 75b4cec..d650eb9 100644 (file)
@@ -7,7 +7,7 @@
 # http://www.apache.org/licenses/LICENSE-2.0
 ##############################################################################
 
-import opnfv.utils.OPNFVLogger as OPNFVLogger
+import opnfv.utils.opnfv_logger as OPNFVLogger
 import os
 import time
 import shutil
index 914a906..3e2381f 100755 (executable)
@@ -79,6 +79,11 @@ source ${ANSIBLE_INSTALL_ROOT}/ansible/hacking/env-setup
 ANSIBLE=$(which ansible-playbook)
 set -x -o nounset
 
+logs_on_exit() {
+    $SCRIPT_HOME/collect-test-info.sh
+}
+trap logs_on_exit EXIT
+
 # Change working directory
 cd $BIFROST_HOME/playbooks
 
@@ -129,6 +134,4 @@ if [ $EXITCODE != 0 ]; then
     echo "****************************"
 fi
 
-$SCRIPT_HOME/collect-test-info.sh
-
 exit $EXITCODE
index 158ee59..df56323 100755 (executable)
@@ -61,13 +61,13 @@ logger.info("*******************************************")
 # Retrieve test cases of Tier 1 (smoke)
 config_tiers = functest_yaml_config.get("tiers")
 
-# we consider Tier 1 (smoke),2 (features)
+# we consider Tier 0 (Healthcheck), Tier 1 (smoke),2 (features)
 # to validate scenarios
-# Tier > 4 are not used to validate scenarios but we display the results anyway
+# Tier > 2 are not used to validate scenarios but we display the results anyway
 # tricky thing for the API as some tests are Functest tests
 # other tests are declared directly in the feature projects
 for tier in config_tiers:
-    if tier['order'] > 0 and tier['order'] < 2:
+    if tier['order'] >= 0 and tier['order'] < 2:
         for case in tier['testcases']:
             if case['name'] not in blacklist:
                 testValid.append(tc.TestCase(case['name'],
index df0874e..22196c8 100644 (file)
@@ -43,7 +43,8 @@ class TestCase(object):
                                'parser': 'Parser',
                                'connection_check': 'Health (connection)',
                                'api_check': 'Health (api)',
-                               'snaps_smoke': 'SNAPS'}
+                               'snaps_smoke': 'SNAPS',
+                               'snaps_health_check': 'Health (dhcp)'}
         try:
             self.displayName = display_name_matrix[self.name]
         except:
@@ -138,7 +139,8 @@ class TestCase(object):
                              'parser': 'parser-basics',
                              'connection_check': 'connection_check',
                              'api_check': 'api_check',
-                             'snaps_smoke': 'snaps_smoke'
+                             'snaps_smoke': 'snaps_smoke',
+                             'snaps_health_check': 'snaps_health_check'
                              }
         try:
             return test_match_matrix[self.name]
index 9db0890..2fb6b78 100644 (file)
@@ -36,12 +36,20 @@ functest:
         - ovno
         - security_scan
         - rally_sanity
+        - healthcheck
+        - odl_netvirt
+        - aaa
+        - cloudify_ims
+        - orchestra_ims
+        - juju_epc
+        - orchestra
+        - promise
     max_scenario_criteria: 50
     test_conf: https://git.opnfv.org/cgit/functest/plain/functest/ci/testcases.yaml
     log_level: ERROR
     jenkins_url: https://build.opnfv.org/ci/view/functest/job/
     exclude_noha: False
-    exclude_virtual: True
+    exclude_virtual: False
 
 yardstick:
     test_conf: https://git.opnfv.org/cgit/yardstick/plain/tests/ci/report_config.yaml
index fc5d188..1879fb6 100644 (file)
@@ -269,7 +269,8 @@ def getJenkinsUrl(build_tag):
     url_base = get_config('functest.jenkins_url')
     try:
         build_id = [int(s) for s in build_tag.split("-") if s.isdigit()]
-        url_id = build_tag[8:-(len(build_id) + 3)] + "/" + str(build_id[0])
+        url_id = (build_tag[8:-(len(str(build_id[0])) + 1)] +
+                  "/" + str(build_id[0]))
         jenkins_url = url_base + url_id + "/console"
     except:
         print('Impossible to get jenkins url:')
index a8c1a94..7c8c333 100644 (file)
@@ -116,6 +116,17 @@ class ScenarioGURHandler(GenericScenarioHandler):
         db_keys = ['name']
         self._update(query, db_keys)
 
+    @swagger.operation(nickname="deleteScenarioByName")
+    def delete(self, name):
+        """
+        @description: delete a scenario by name
+        @return 200: delete success
+        @raise 404: scenario not exist:
+        """
+
+        query = {'name': name}
+        self._delete(query)
+
     def _update_query(self, keys, data):
         query = dict()
         equal = True
index c15dc32..3a0abf9 100644 (file)
@@ -1,11 +1,9 @@
 from copy import deepcopy
+from datetime import datetime
 import json
 import os
-from datetime import datetime
 
-from opnfv_testapi.common.constants import HTTP_BAD_REQUEST
-from opnfv_testapi.common.constants import HTTP_FORBIDDEN
-from opnfv_testapi.common.constants import HTTP_OK
+from opnfv_testapi.common import constants
 import opnfv_testapi.resources.scenario_models as models
 from test_testcase import TestBase
 
@@ -38,7 +36,7 @@ class TestScenarioBase(TestBase):
         return res.href.split('/')[-1]
 
     def assert_res(self, code, scenario, req=None):
-        self.assertEqual(code, HTTP_OK)
+        self.assertEqual(code, constants.HTTP_OK)
         if req is None:
             req = self.req_d
         scenario_dict = scenario.format_http()
@@ -61,29 +59,29 @@ class TestScenarioBase(TestBase):
 class TestScenarioCreate(TestScenarioBase):
     def test_withoutBody(self):
         (code, body) = self.create()
-        self.assertEqual(code, HTTP_BAD_REQUEST)
+        self.assertEqual(code, constants.HTTP_BAD_REQUEST)
 
     def test_emptyName(self):
         req_empty = models.ScenarioCreateRequest('')
         (code, body) = self.create(req_empty)
-        self.assertEqual(code, HTTP_BAD_REQUEST)
+        self.assertEqual(code, constants.HTTP_BAD_REQUEST)
         self.assertIn('name missing', body)
 
     def test_noneName(self):
         req_none = models.ScenarioCreateRequest(None)
         (code, body) = self.create(req_none)
-        self.assertEqual(code, HTTP_BAD_REQUEST)
+        self.assertEqual(code, constants.HTTP_BAD_REQUEST)
         self.assertIn('name missing', body)
 
     def test_success(self):
         (code, body) = self.create_d()
-        self.assertEqual(code, HTTP_OK)
+        self.assertEqual(code, constants.HTTP_OK)
         self.assert_create_body(body)
 
     def test_alreadyExist(self):
         self.create_d()
         (code, body) = self.create_d()
-        self.assertEqual(code, HTTP_FORBIDDEN)
+        self.assertEqual(code, constants.HTTP_FORBIDDEN)
         self.assertIn('already exists', body)
 
 
@@ -126,7 +124,7 @@ class TestScenarioGet(TestScenarioBase):
     def _query_and_assert(self, query, found=True, reqs=None):
         code, body = self.query(query)
         if not found:
-            self.assertEqual(code, HTTP_OK)
+            self.assertEqual(code, constants.HTTP_OK)
             self.assertEqual(0, len(body.scenarios))
         else:
             self.assertEqual(len(reqs), len(body.scenarios))
@@ -296,10 +294,23 @@ class TestScenarioUpdate(TestScenarioBase):
 
     def _update_and_assert(self, update_req, new_scenario, name=None):
         code, _ = self.update(update_req, self.scenario)
-        self.assertEqual(code, HTTP_OK)
+        self.assertEqual(code, constants.HTTP_OK)
         self._get_and_assert(self._none_default(name, self.scenario),
                              new_scenario)
 
     @staticmethod
     def _none_default(check, default):
         return check if check else default
+
+
+class TestScenarioDelete(TestScenarioBase):
+    def test_notFound(self):
+        code, body = self.delete('notFound')
+        self.assertEqual(code, constants.HTTP_NOT_FOUND)
+
+    def test_success(self):
+        scenario = self.create_return_name(self.req_d)
+        code, _ = self.delete(scenario)
+        self.assertEqual(code, constants.HTTP_OK)
+        code, _ = self.get(scenario)
+        self.assertEqual(code, constants.HTTP_NOT_FOUND)