Merge "Adapt vping testcases to the new template"
authorMorgan Richomme <morgan.richomme@orange.com>
Wed, 23 Nov 2016 10:13:08 +0000 (10:13 +0000)
committerGerrit Code Review <gerrit@opnfv.org>
Wed, 23 Nov 2016 10:13:08 +0000 (10:13 +0000)
27 files changed:
docker/Dockerfile
functest/ci/exec_test.sh
functest/ci/testcases.yaml
functest/core/TestCasesBase.py
functest/opnfv_tests/features/__init__.py [new file with mode: 0644]
functest/opnfv_tests/features/domino.py
functest/opnfv_tests/features/promise.py
functest/opnfv_tests/features/sfc/SSHUtils.py [deleted file]
functest/opnfv_tests/features/sfc/compute_presetup_CI.bash [deleted file]
functest/opnfv_tests/features/sfc/correct_classifier.bash [deleted file]
functest/opnfv_tests/features/sfc/delete.sh [deleted file]
functest/opnfv_tests/features/sfc/ovs_utils.py [deleted file]
functest/opnfv_tests/features/sfc/prepare_odl_sfc.bash [deleted file]
functest/opnfv_tests/features/sfc/prepare_odl_sfc.py [deleted file]
functest/opnfv_tests/features/sfc/server_presetup_CI.bash [deleted file]
functest/opnfv_tests/features/sfc/sfc.py [deleted file]
functest/opnfv_tests/features/sfc/sfc_change_classi.bash [deleted file]
functest/opnfv_tests/features/sfc/sfc_tacker.bash [deleted file]
functest/opnfv_tests/features/sfc/tacker_client_install.sh [deleted file]
functest/opnfv_tests/features/sfc/test-vnfd1.yaml [deleted file]
functest/opnfv_tests/features/sfc/test-vnfd2.yaml [deleted file]
functest/utils/openstack/cinder.py [new file with mode: 0644]
functest/utils/openstack/glance.py [new file with mode: 0644]
functest/utils/openstack/keystone.py [new file with mode: 0644]
functest/utils/openstack/neutron.py [new file with mode: 0644]
functest/utils/openstack/nova.py [new file with mode: 0644]
run_unit_tests.sh

index 1adfe6e..8d8bc04 100644 (file)
@@ -86,7 +86,8 @@ RUN git clone --depth 1 -b $BRANCH https://gerrit.opnfv.org/gerrit/domino ${repo
 RUN git clone --depth 1 -b $BRANCH https://gerrit.opnfv.org/gerrit/parser ${repos_dir}/parser
 RUN git clone --depth 1 -b $BRANCH https://gerrit.opnfv.org/gerrit/doctor ${repos_dir}/doctor
 RUN git clone --depth 1 -b $BRANCH https://gerrit.opnfv.org/gerrit/ovno ${repos_dir}/ovno
-RUN git clone --depth 1 https://github.com/opnfv/promise ${repos_dir}/promise
+RUN git clone --depth 1 -b $BRANCH https://gerrit.opnfv.org/gerrit/promise ${repos_dir}/promise
+RUN git clone --depth 1 -b $BRANCH https://gerrit.opnfv.org/gerrit/sfc ${repos_dir}/sfc
 RUN git clone --depth 1 https://gerrit.opnfv.org/gerrit/securityscanning ${repos_dir}/securityscanning
 RUN git clone --depth 1 https://gerrit.opnfv.org/gerrit/releng ${repos_dir}/releng
 
@@ -125,7 +126,7 @@ RUN curl -L https://get.rvm.io | bash -s stable
 RUN git clone --depth 1 https://gerrit.cablelabs.com/snaps-provisioning ${repos_dir}/snaps
 RUN pip install -e ${repos_dir}/snaps/
 
-RUN /bin/bash -c ". /home/opnfv/repos/functest/functest/opnfv_tests/features/sfc/tacker_client_install.sh"
+RUN /bin/bash -c ". /home/opnfv/repos/sfc/tests/functest/odl-sfc/tacker_client_install.sh"
 RUN cd ${repos_dir}/bgpvpn && pip install .
 #RUN cd ${repos_dir}/kingbird && pip install -e .
 RUN cd ${repos_dir}/moon/moonclient/ && python setup.py install
@@ -146,7 +147,7 @@ RUN /bin/bash -c ". /etc/profile.d/rvm.sh \
 RUN sh -c 'curl -sL https://deb.nodesource.com/setup_4.x | sudo -E bash -'
 RUN sudo apt-get install -y nodejs
 RUN cd ${repos_dir}/promise && sudo npm -g install npm@latest
-RUN cd ${repos_dir}/promise && npm install
+RUN cd ${repos_dir}/promise/source && npm install
 
 RUN echo "set nocompatible \n\
 set backspace=2" \
index fece3b9..4d913c5 100644 (file)
@@ -150,17 +150,11 @@ function run_test(){
                 $clean_flag -s -m feature_multisite $report \
                 -c ${FUNCTEST_TEST_DIR}/OpenStack/tempest/tempest_multisite.conf
         ;;
-        "domino")
-            python ${FUNCTEST_TEST_DIR}/features/domino.py $report
-        ;;
         "odl-sfc")
-            pip install --upgrade python-keystoneclient==1.7.4
-            ODL_SFC_DIR=${FUNCTEST_TEST_DIR}/features/sfc
-            # pass FUNCTEST_REPO_DIR inside prepare_odl_sfc.bash
+            ODL_SFC_DIR=${repos_dir}/sfc/tests/functest/odl-sfc
             FUNCTEST_REPO_DIR=${FUNCTEST_REPO_DIR} python ${ODL_SFC_DIR}/prepare_odl_sfc.py || exit $?
             source ${ODL_SFC_DIR}/tackerc
             python ${ODL_SFC_DIR}/sfc.py $report
-            pip install --upgrade python-keystoneclient==3.5.0
         ;;
         "parser")
             python ${FUNCTEST_TEST_DIR}/vnf/vRNC/parser.py $report
index 1fb3511..1b7747c 100644 (file)
@@ -198,6 +198,9 @@ tiers:
                 dependencies:
                     installer: 'joid'
                     scenario: ''
+                run:
+                    module: 'functest.opnfv_tests.features.domino'
+                    class: 'DominoCases'
             -
                 name: odl-sfc
                 criteria: 'status == "PASS"'
index 4c156b2..bd02e13 100644 (file)
@@ -21,11 +21,10 @@ class TestCasesBase(object):
 
     logger = ft_logger.Logger(__name__).getLogger()
 
-    project = "functest"
-
     def __init__(self):
         self.functest_repo = ft_utils.FUNCTEST_REPO
         self.details = {}
+        self.project_name = "functest"
         self.case_name = ""
         self.criteria = ""
         self.start_time = ""
@@ -37,12 +36,13 @@ class TestCasesBase(object):
 
     def push_to_db(self):
         try:
+            assert self.project_name
             assert self.case_name
             assert self.criteria
             assert self.start_time
             assert self.stop_time
             if ft_utils.push_results_to_db(
-                    TestCasesBase.project, self.case_name, self.start_time,
+                    self.project_name, self.case_name, self.start_time,
                     self.stop_time, self.criteria, self.details):
                 self.logger.info("The results were successfully pushed to DB")
                 return TestCasesBase.EX_OK
diff --git a/functest/opnfv_tests/features/__init__.py b/functest/opnfv_tests/features/__init__.py
new file mode 100644 (file)
index 0000000..e69de29
index 7705c07..be3dd54 100755 (executable)
 # After successful ping, both the VMs are deleted.
 # 0.2: measure test duration and publish results under json format
 # 0.3: add report flag to push results when needed
-#
+# 0.4: refactoring to match Test abstraction class
 
 import argparse
+import sys
 import time
 
+from functest.core import TestCasesBase
 import functest.utils.functest_logger as ft_logger
 import functest.utils.functest_utils as ft_utils
 
-parser = argparse.ArgumentParser()
-
-parser.add_argument("-r", "--report",
-                    help="Create json result file",
-                    action="store_true")
-args = parser.parse_args()
-
-
-DOMINO_REPO = \
-    ft_utils.get_functest_config('general.directories.dir_repo_domino')
-RESULTS_DIR = \
-    ft_utils.get_functest_config('general.directories.dir_results')
-
-logger = ft_logger.Logger("domino").getLogger()
-
-
-def main():
-    cmd = 'cd %s && ./tests/run_multinode.sh' % DOMINO_REPO
-    log_file = RESULTS_DIR + "/domino.log"
-    start_time = time.time()
-
-    ret = ft_utils.execute_command(cmd,
-                                   output_file=log_file)
-
-    stop_time = time.time()
-    duration = round(stop_time - start_time, 1)
-    if ret == 0 and duration > 1:
-        logger.info("domino OK")
-        test_status = 'OK'
-    elif ret == 0 and duration <= 1:
-        logger.info("domino TEST SKIPPED")
-        test_status = 'SKIPPED'
-    else:
-        logger.info("domino FAILED")
-        test_status = 'NOK'
-
-    details = {
-        'timestart': start_time,
-        'duration': duration,
-        'status': test_status,
-    }
-
-    status = "FAIL"
-    if details['status'] == "OK":
-        status = "PASS"
-    elif details['status'] == "SKIPPED":
-        status = "SKIP"
 
-    ft_utils.logger_test_results("Domino",
-                                 "domino-multinode",
-                                 status,
-                                 details)
-    if args.report:
+class DominoCases(TestCasesBase.TestCasesBase):
+    DOMINO_REPO = \
+        ft_utils.get_functest_config('general.directories.dir_repo_domino')
+    RESULTS_DIR = \
+        ft_utils.get_functest_config('general.directories.dir_results')
+    logger = ft_logger.Logger("domino").getLogger()
+
+    def __init__(self):
+        super(DominoCases, self).__init__()
+        self.project_name = "domino"
+        self.case_name = "domino-multinode"
+
+    def main(self, **kwargs):
+        cmd = 'cd %s && ./tests/run_multinode.sh' % self.DOMINO_REPO
+        log_file = self.RESULTS_DIR + "/domino.log"
+        start_time = time.time()
+
+        ret = ft_utils.execute_command(cmd,
+                                       output_file=log_file)
+
+        stop_time = time.time()
+        duration = round(stop_time - start_time, 1)
+        if ret == 0 and duration > 1:
+            self.logger.info("domino OK")
+            status = 'PASS'
+        elif ret == 0 and duration <= 1:
+            self.logger.info("domino TEST SKIPPED")
+            status = 'SKIP'
+        else:
+            self.logger.info("domino FAILED")
+            status = "FAIL"
+
+        # report status only if tests run (FAIL OR PASS)
         if status is not "SKIP":
-            ft_utils.push_results_to_db("domino",
-                                        "domino-multinode",
-                                        start_time,
-                                        stop_time,
-                                        status,
-                                        details)
-            logger.info("Domino results pushed to DB")
+            self.criteria = status
+            self.start_time = start_time
+            self.stop_time = stop_time
+            self.details = {}
 
+    def run(self):
+        kwargs = {}
+        return self.main(**kwargs)
 
 if __name__ == '__main__':
-    main()
+    parser = argparse.ArgumentParser()
+    parser.add_argument("-r", "--report",
+                        help="Create json result file",
+                        action="store_true")
+    args = vars(parser.parse_args())
+    domino = DominoCases()
+    try:
+        result = domino.main(**args)
+        if result != TestCasesBase.TestCasesBase.EX_OK:
+            sys.exit(result)
+        if args['report']:
+            sys.exit(domino.push_to_db())
+    except Exception:
+        sys.exit(TestCasesBase.TestCasesBase.EX_RUN_ERROR)
index cce0f5d..e198bde 100755 (executable)
@@ -181,7 +181,7 @@ def main():
     os.environ["OS_TEST_FLAVOR"] = flavor_id
     os.environ["OS_TEST_NETWORK"] = network_dic["net_id"]
 
-    os.chdir(PROMISE_REPO)
+    os.chdir(PROMISE_REPO + '/source/')
     results_file_name = RESULTS_DIR + '/' + 'promise-results.json'
     results_file = open(results_file_name, 'w+')
     cmd = 'npm run -s test -- --reporter json'
diff --git a/functest/opnfv_tests/features/sfc/SSHUtils.py b/functest/opnfv_tests/features/sfc/SSHUtils.py
deleted file mode 100644 (file)
index 9c8c2c7..0000000
+++ /dev/null
@@ -1,120 +0,0 @@
-##############################################################################
-# Copyright (c) 2015 Ericsson AB and others.
-# Authors: George Paraskevopoulos (geopar@intracom-telecom.com)
-#          Jose Lausuch (jose.lausuch@ericsson.com)
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
-
-
-import paramiko
-import functest.utils.functest_logger as rl
-import os
-
-logger = rl.Logger('SSHUtils').getLogger()
-
-
-def get_ssh_client(hostname, username, password=None, proxy=None):
-    client = None
-    try:
-        if proxy is None:
-            client = paramiko.SSHClient()
-        else:
-            client = ProxyHopClient()
-            client.configure_jump_host(proxy['ip'],
-                                       proxy['username'],
-                                       proxy['password'])
-
-        if client is None:
-            raise Exception('Could not connect to client')
-
-        client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
-        client.connect(hostname,
-                       username=username,
-                       password=password)
-        return client
-    except Exception, e:
-        logger.error(e)
-        return None
-
-
-def get_file(ssh_conn, src, dest):
-    try:
-        sftp = ssh_conn.open_sftp()
-        sftp.get(src, dest)
-        return True
-    except Exception, e:
-        logger.error("Error [get_file(ssh_conn, '%s', '%s']: %s" %
-                     (src, dest, e))
-        return None
-
-
-def put_file(ssh_conn, src, dest):
-    try:
-        sftp = ssh_conn.open_sftp()
-        sftp.put(src, dest)
-        return True
-    except Exception, e:
-        logger.error("Error [put_file(ssh_conn, '%s', '%s']: %s" %
-                     (src, dest, e))
-        return None
-
-
-class ProxyHopClient(paramiko.SSHClient):
-    '''
-    Connect to a remote server using a proxy hop
-    '''
-    def __init__(self, *args, **kwargs):
-        self.logger = rl.Logger("ProxyHopClient").getLogger()
-        self.proxy_ssh = None
-        self.proxy_transport = None
-        self.proxy_channel = None
-        self.proxy_ip = None
-        self.proxy_ssh_key = None
-        self.local_ssh_key = os.path.join(os.getcwd(), 'id_rsa')
-        super(ProxyHopClient, self).__init__(*args, **kwargs)
-
-    def configure_jump_host(self, jh_ip, jh_user, jh_pass,
-                            jh_ssh_key='/root/.ssh/id_rsa'):
-        self.proxy_ip = jh_ip
-        self.proxy_ssh_key = jh_ssh_key
-        self.proxy_ssh = paramiko.SSHClient()
-        self.proxy_ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
-        self.proxy_ssh.connect(jh_ip,
-                               username=jh_user,
-                               password=jh_pass)
-        self.proxy_transport = self.proxy_ssh.get_transport()
-
-    def connect(self, hostname, port=22, username='root', password=None,
-                pkey=None, key_filename=None, timeout=None, allow_agent=True,
-                look_for_keys=True, compress=False, sock=None, gss_auth=False,
-                gss_kex=False, gss_deleg_creds=True, gss_host=None,
-                banner_timeout=None):
-        try:
-            if self.proxy_ssh is None:
-                raise Exception('You must configure the jump '
-                                'host before calling connect')
-
-            get_file_res = get_file(self.proxy_ssh,
-                                    self.proxy_ssh_key,
-                                    self.local_ssh_key)
-            if get_file_res is None:
-                raise Exception('Could\'t fetch SSH key from jump host')
-            proxy_key = (paramiko.RSAKey
-                         .from_private_key_file(self.local_ssh_key))
-
-            self.proxy_channel = self.proxy_transport.open_channel(
-                "direct-tcpip",
-                (hostname, 22),
-                (self.proxy_ip, 22))
-
-            self.set_missing_host_key_policy(paramiko.AutoAddPolicy())
-            super(ProxyHopClient, self).connect(hostname,
-                                                username=username,
-                                                pkey=proxy_key,
-                                                sock=self.proxy_channel)
-            os.remove(self.local_ssh_key)
-        except Exception, e:
-            self.logger.error(e)
diff --git a/functest/opnfv_tests/features/sfc/compute_presetup_CI.bash b/functest/opnfv_tests/features/sfc/compute_presetup_CI.bash
deleted file mode 100755 (executable)
index 36148aa..0000000
+++ /dev/null
@@ -1,27 +0,0 @@
-#!/bin/bash
-
-# This script must be use with vxlan-gpe + nsh. Once we have eth + nsh support
-# in ODL, we will not need it anymore
-
-set -e
-ssh_options='-o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no'
-BASEDIR=`dirname $0`
-INSTALLER_IP=${INSTALLER_IP:-10.20.0.2}
-
-pushd $BASEDIR
-#ip=`sshpass -p r00tme ssh $ssh_options root@${INSTALLER_IP} 'fuel node'|grep compute|\
-#awk '{print $10}' | head -1`
-
-ip=$1
-echo $ip
-#sshpass -p r00tme scp $ssh_options correct_classifier.bash ${INSTALLER_IP}:/root
-#sshpass -p r00tme ssh $ssh_options root@${INSTALLER_IP} 'scp correct_classifier.bash '"$ip"':/root'
-
-sshpass -p r00tme ssh $ssh_options root@${INSTALLER_IP} 'ssh root@'"$ip"' ifconfig br-int up'
-output=$(sshpass -p r00tme ssh $ssh_options root@${INSTALLER_IP} 'ssh root@'"$ip"' ip route | \
-cut -d" " -f1 | grep 11.0.0.0' ; exit 0)
-
-if [ -z "$output" ]; then
-sshpass -p r00tme ssh $ssh_options root@${INSTALLER_IP} 'ssh root@'"$ip"' ip route add 11.0.0.0/24 \
-dev br-int'
-fi
diff --git a/functest/opnfv_tests/features/sfc/correct_classifier.bash b/functest/opnfv_tests/features/sfc/correct_classifier.bash
deleted file mode 100755 (executable)
index fb08af5..0000000
+++ /dev/null
@@ -1,37 +0,0 @@
-#!/bin/bash
-
-#This scripts correct the current ODL bug which does not detect
-#when SFF and classifier are in the same swtich
-
-nsp=`ovs-ofctl -O Openflow13 dump-flows br-int table=11 | \
-grep "NXM_NX_NSP" | head -1 | cut -d',' -f13 | cut -d':' -f2 \
-| cut -d'-' -f1`
-
-ip=`ovs-ofctl -O Openflow13 dump-flows br-int table=11 | \
-grep NXM_NX_NSH_C1 | head -1 | cut -d':' -f5 | cut -d'-' -f1`
-
-output_port=`ovs-ofctl -O Openflow13 show br-int | \
-grep vxgpe | cut -d'(' -f1`
-
-output_port2=`echo $output_port`
-
-echo "This is the nsp =$(($nsp))"
-echo "This is the ip=$ip"
-echo "This is the vxlan-gpe port=$output_port2"
-
-ovs-ofctl -O Openflow13 del-flows br-int "table=11,tcp,reg0=0x1,tp_dst=80"
-ovs-ofctl -O Openflow13 del-flows br-int "table=11,tcp,reg0=0x1,tp_dst=22"
-
-ovs-ofctl -O Openflow13 add-flow br-int "table=11,tcp,reg0=0x1,tp_dst=80 \
-actions=move:NXM_NX_TUN_ID[0..31]->NXM_NX_NSH_C2[],push_nsh,\
-load:0x1->NXM_NX_NSH_MDTYPE[],load:0x3->NXM_NX_NSH_NP[],\
-load:$ip->NXM_NX_NSH_C1[],load:$nsp->NXM_NX_NSP[0..23],\
-load:0xff->NXM_NX_NSI[],load:$ip->NXM_NX_TUN_IPV4_DST[],\
-load:$nsp->NXM_NX_TUN_ID[0..31],resubmit($output_port,0)"
-
-ovs-ofctl -O Openflow13 add-flow br-int "table=11,tcp,reg0=0x1,tp_dst=22\
- actions=move:NXM_NX_TUN_ID[0..31]->NXM_NX_NSH_C2[],push_nsh,\
-load:0x1->NXM_NX_NSH_MDTYPE[],load:0x3->NXM_NX_NSH_NP[],\
-load:$ip->NXM_NX_NSH_C1[],load:$nsp->NXM_NX_NSP[0..23],\
-load:0xff->NXM_NX_NSI[],load:$ip->NXM_NX_TUN_IPV4_DST[],\
-load:$nsp->NXM_NX_TUN_ID[0..31],resubmit($output_port,0)"
diff --git a/functest/opnfv_tests/features/sfc/delete.sh b/functest/opnfv_tests/features/sfc/delete.sh
deleted file mode 100755 (executable)
index c04ae63..0000000
+++ /dev/null
@@ -1,15 +0,0 @@
-tacker sfc-classifier-delete red_http
-tacker sfc-classifier-delete blue_ssh
-tacker sfc-classifier-delete red_ssh
-tacker sfc-classifier-delete blue_http
-tacker sfc-delete red
-tacker sfc-delete blue
-tacker vnf-delete testVNF1
-tacker vnf-delete testVNF2
-tacker vnfd-delete test-vnfd1
-tacker vnfd-delete test-vnfd2
-openstack stack delete sfc --y
-openstack stack delete sfc_test1 --y
-openstack stack delete sfc_test2 --y
-nova delete client
-nova delete server
diff --git a/functest/opnfv_tests/features/sfc/ovs_utils.py b/functest/opnfv_tests/features/sfc/ovs_utils.py
deleted file mode 100644 (file)
index af1f232..0000000
+++ /dev/null
@@ -1,137 +0,0 @@
-##############################################################################
-# Copyright (c) 2015 Ericsson AB and others.
-# Author: George Paraskevopoulos (geopar@intracom-telecom.com)
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
-
-import functest.utils.functest_logger as rl
-import os
-import time
-import shutil
-import re
-
-logger = rl.Logger('ovs_utils').getLogger()
-
-
-class OVSLogger(object):
-    def __init__(self, basedir, ft_resdir):
-        self.ovs_dir = basedir
-        self.ft_resdir = ft_resdir
-        self.__mkdir_p(self.ovs_dir)
-
-    def __mkdir_p(self, dirpath):
-        if not os.path.exists(dirpath):
-            os.makedirs(dirpath)
-
-    def __ssh_host(self, ssh_conn, host_prefix='10.20.0'):
-        try:
-            _, stdout, _ = ssh_conn.exec_command('hostname -I')
-            hosts = stdout.readline().strip().split(' ')
-            found_host = [h for h in hosts if h.startswith(host_prefix)][0]
-            return found_host
-        except Exception, e:
-            logger.error(e)
-
-    def __dump_to_file(self, operation, host, text, timestamp=None):
-        ts = (timestamp if timestamp is not None
-              else time.strftime("%Y%m%d-%H%M%S"))
-        dumpdir = os.path.join(self.ovs_dir, ts)
-        self.__mkdir_p(dumpdir)
-        fname = '{0}_{1}'.format(operation, host)
-        with open(os.path.join(dumpdir, fname), 'w') as f:
-            f.write(text)
-
-    def __remote_cmd(self, ssh_conn, cmd):
-        try:
-            _, stdout, stderr = ssh_conn.exec_command(cmd)
-            errors = stderr.readlines()
-            if len(errors) > 0:
-                host = self.__ssh_host(ssh_conn)
-                logger.error(''.join(errors))
-                raise Exception('Could not execute {0} in {1}'
-                                .format(cmd, host))
-            output = ''.join(stdout.readlines())
-            return output
-        except Exception, e:
-            logger.error('[__remote_command(ssh_client, {0})]: {1}'
-                         .format(cmd, e))
-            return None
-
-    def create_artifact_archive(self):
-        shutil.make_archive(self.ovs_dir,
-                            'zip',
-                            root_dir=os.path.dirname(self.ovs_dir),
-                            base_dir=self.ovs_dir)
-        shutil.copy2('{0}.zip'.format(self.ovs_dir), self.ft_resdir)
-
-    def ofctl_dump_flows(self, ssh_conn, br='br-int',
-                         choose_table=None, timestamp=None):
-        try:
-            cmd = 'ovs-ofctl -OOpenFlow13 dump-flows {0}'.format(br)
-            if choose_table is not None:
-                cmd = '{0} table={1}'.format(cmd, choose_table)
-            output = self.__remote_cmd(ssh_conn, cmd)
-            operation = 'ofctl_dump_flows'
-            host = self.__ssh_host(ssh_conn)
-            self.__dump_to_file(operation, host, output, timestamp=timestamp)
-            return output
-        except Exception, e:
-            logger.error('[ofctl_dump_flows(ssh_client, {0}, {1})]: {2}'
-                         .format(br, choose_table, e))
-            return None
-
-    def vsctl_show(self, ssh_conn, timestamp=None):
-        try:
-            cmd = 'ovs-vsctl show'
-            output = self.__remote_cmd(ssh_conn, cmd)
-            operation = 'vsctl_show'
-            host = self.__ssh_host(ssh_conn)
-            self.__dump_to_file(operation, host, output, timestamp=timestamp)
-            return output
-        except Exception, e:
-            logger.error('[vsctl_show(ssh_client)]: {0}'.format(e))
-            return None
-
-    def dump_ovs_logs(self, controller_clients, compute_clients,
-                      related_error=None, timestamp=None):
-        if timestamp is None:
-            timestamp = time.strftime("%Y%m%d-%H%M%S")
-
-        for controller_client in controller_clients:
-            self.ofctl_dump_flows(controller_client,
-                                  timestamp=timestamp)
-            self.vsctl_show(controller_client,
-                            timestamp=timestamp)
-
-        for compute_client in compute_clients:
-            self.ofctl_dump_flows(compute_client,
-                                  timestamp=timestamp)
-            self.vsctl_show(compute_client,
-                            timestamp=timestamp)
-
-        if related_error is not None:
-            dumpdir = os.path.join(self.ovs_dir, timestamp)
-            with open(os.path.join(dumpdir, 'error'), 'w') as f:
-                f.write(related_error)
-
-    def ofctl_time_counter(self, ssh_conn):
-        try:
-            # We get the flows from table 11
-            table = 11
-            br = "br-int"
-            output = self.ofctl_dump_flows(ssh_conn, br, table)
-            pattern = "NXM_NX_NSP"
-            rsps = []
-            lines = output.split(",")
-            for line in lines:
-                is_there = re.findall(pattern, line)
-                if is_there:
-                    value = line.split(":")[1].split("-")[0]
-                    rsps.append(value)
-            return rsps
-        except Exception, e:
-            logger.error('Error when countering %s' % e)
-            return None
diff --git a/functest/opnfv_tests/features/sfc/prepare_odl_sfc.bash b/functest/opnfv_tests/features/sfc/prepare_odl_sfc.bash
deleted file mode 100755 (executable)
index c4d8f4f..0000000
+++ /dev/null
@@ -1,38 +0,0 @@
-#!/bin/bash
-
-#
-# Author: George Paraskevopoulos (geopar@intracom-telecom.com)
-#         Manuel Buil (manuel.buil@ericsson.com)
-# Prepares the controller and the compute nodes for the odl-sfc testcase
-#
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-
-ODL_SFC_LOG=/home/opnfv/functest/results/odl-sfc.log
-ODL_SFC_DIR=${FUNCTEST_REPO_DIR}/opnfv_tests/features/sfc
-
-# Split the output to the log file and redirect STDOUT and STDERR to /dev/null
-bash ${ODL_SFC_DIR}/server_presetup_CI.bash |& \
-    tee -a ${ODL_SFC_LOG} 1>/dev/null 2>&1
-
-# Get return value from PIPESTATUS array (bash specific feature)
-ret_val=${PIPESTATUS[0]}
-if [ $ret_val != 0 ]; then
-    echo "The tacker server deployment failed"
-    exit $ret_val
-fi
-echo "The tacker server was deployed successfully"
-
-bash ${ODL_SFC_DIR}/compute_presetup_CI.bash |& \
-    tee -a ${ODL_SFC_LOG} 1>/dev/null 2>&1
-
-ret_val=${PIPESTATUS[0]}
-if [ $ret_val != 0 ]; then
-    exit $ret_val
-fi
-
-exit 0
diff --git a/functest/opnfv_tests/features/sfc/prepare_odl_sfc.py b/functest/opnfv_tests/features/sfc/prepare_odl_sfc.py
deleted file mode 100755 (executable)
index 8ba287e..0000000
+++ /dev/null
@@ -1,97 +0,0 @@
-#
-# Author: George Paraskevopoulos (geopar@intracom-telecom.com)
-#         Manuel Buil (manuel.buil@ericsson.com)
-# Prepares the controller and the compute nodes for the odl-sfc testcase
-#
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-
-import os
-import sys
-import subprocess
-import paramiko
-import functest.utils.functest_logger as ft_logger
-
-logger = ft_logger.Logger("ODL_SFC").getLogger()
-
-try:
-    FUNCTEST_REPO_DIR = os.environ['FUNCTEST_REPO_DIR']
-except:
-    logger.debug("FUNCTEST_REPO_DIR does not exist!!!!!")
-
-FUNCTEST_REPO_DIR = "/home/opnfv/repos/functest"
-
-try:
-    INSTALLER_IP = os.environ['INSTALLER_IP']
-
-except:
-    logger.debug("INSTALLER_IP does not exist. We create 10.20.0.2")
-    INSTALLER_IP = "10.20.0.2"
-
-os.environ['ODL_SFC_LOG'] = "/home/opnfv/functest/results/odl-sfc.log"
-os.environ['ODL_SFC_DIR'] = os.path.join(FUNCTEST_REPO_DIR,
-                                         "functest/opnfv_tests/features/sfc")
-
-command = os.environ['ODL_SFC_DIR'] + ("/server_presetup_CI.bash | "
-                                       "tee -a ${ODL_SFC_LOG} "
-                                       "1>/dev/null 2>&1")
-
-output = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE)
-
-# This code is for debugging purposes
-# for line in iter(output.stdout.readline, ''):
-#    i = line.rstrip()
-#    print(i)
-
-# Make sure the process is finished before checking the returncode
-if not output.poll():
-    output.wait()
-
-# Get return value
-if output.returncode:
-    print("The presetup of the server did not work")
-    sys.exit(output.returncode)
-
-logger.info("The presetup of the server worked ")
-
-ssh_options = "-o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no"
-ssh = paramiko.SSHClient()
-ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
-
-try:
-    ssh.connect(INSTALLER_IP, username="root",
-                password="r00tme", timeout=2)
-    command = "fuel node | grep compute | awk '{print $10}'"
-    logger.info("Executing ssh to collect the compute IPs")
-    (stdin, stdout, stderr) = ssh.exec_command(command)
-except:
-    logger.debug("Something went wrong in the ssh to collect the computes IP")
-
-output = stdout.readlines()
-for ip in output:
-    command = os.environ['ODL_SFC_DIR'] + ("/compute_presetup_CI.bash "
-                                           "" + ip.rstrip() + "| tee -a "
-                                           "${ODL_SFC_LOG} 1>/dev/null 2>&1")
-
-    output = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE)
-
-# This code is for debugging purposes
-#    for line in iter(output.stdout.readline, ''):
-#        print(line)
-#        sys.stdout.flush()
-
-    output.stdout.close()
-
-    if not (output.poll()):
-        output.wait()
-
-    # Get return value
-    if output.returncode:
-        print("The compute config did not work on compute %s" % ip)
-        sys.exit(output.returncode)
-
-sys.exit(0)
diff --git a/functest/opnfv_tests/features/sfc/server_presetup_CI.bash b/functest/opnfv_tests/features/sfc/server_presetup_CI.bash
deleted file mode 100755 (executable)
index 240353f..0000000
+++ /dev/null
@@ -1,13 +0,0 @@
-#!/bin/bash
-set -e
-ssh_options='-o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no'
-BASEDIR=`dirname $0`
-INSTALLER_IP=${INSTALLER_IP:-10.20.0.2}
-
-pushd $BASEDIR
-ip=$(sshpass -p r00tme ssh $ssh_options root@${INSTALLER_IP} 'fuel node'|grep controller|awk '{print $10}' | head -1)
-echo $ip
-
-sshpass -p r00tme scp $ssh_options delete.sh ${INSTALLER_IP}:/root
-sshpass -p r00tme ssh $ssh_options root@${INSTALLER_IP} 'scp '"$ip"':/root/tackerc .'
-sshpass -p r00tme scp $ssh_options ${INSTALLER_IP}:/root/tackerc $BASEDIR
diff --git a/functest/opnfv_tests/features/sfc/sfc.py b/functest/opnfv_tests/features/sfc/sfc.py
deleted file mode 100755 (executable)
index e048d90..0000000
+++ /dev/null
@@ -1,589 +0,0 @@
-import argparse
-import os
-import subprocess
-import sys
-import time
-import functest.utils.functest_logger as ft_logger
-import functest.utils.functest_utils as ft_utils
-import functest.utils.openstack_utils as os_utils
-import re
-import json
-import SSHUtils as ssh_utils
-import ovs_utils
-import thread
-
-parser = argparse.ArgumentParser()
-
-parser.add_argument("-r", "--report",
-                    help="Create json result file",
-                    action="store_true")
-
-args = parser.parse_args()
-
-""" logging configuration """
-logger = ft_logger.Logger("ODL_SFC").getLogger()
-
-FUNCTEST_RESULTS_DIR = '/home/opnfv/functest/results/odl-sfc'
-FUNCTEST_REPO = ft_utils.FUNCTEST_REPO
-REPO_PATH = os.path.join(os.environ['repos_dir'], 'functest/')
-CLIENT = "client"
-SERVER = "server"
-FLAVOR = "custom"
-IMAGE_NAME = "sf_nsh_colorado"
-IMAGE_FILENAME = "sf_nsh_colorado.qcow2"
-IMAGE_FORMAT = "qcow2"
-IMAGE_DIR = "/home/opnfv/functest/data"
-IMAGE_PATH = os.path.join(IMAGE_DIR, IMAGE_FILENAME)
-IMAGE_URL = "http://artifacts.opnfv.org/sfc/demo/" + IMAGE_FILENAME
-
-# NEUTRON Private Network parameters
-NET_NAME = "example-net"
-SUBNET_NAME = "example-subnet"
-SUBNET_CIDR = "11.0.0.0/24"
-ROUTER_NAME = "example-router"
-SECGROUP_NAME = "example-sg"
-SECGROUP_DESCR = "Example Security group"
-SFC_TEST_DIR = os.path.join(REPO_PATH, "functest/opnfv_tests/features/sfc/")
-TACKER_SCRIPT = SFC_TEST_DIR + "sfc_tacker.bash"
-TACKER_CHANGECLASSI = SFC_TEST_DIR + "sfc_change_classi.bash"
-ssh_options = '-q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no'
-json_results = {"tests": 4, "failures": 0}
-
-PROXY = {
-    'ip': '10.20.0.2',
-    'username': 'root',
-    'password': 'r00tme'
-}
-
-# run given command locally and return commands output if success
-
-
-def run_cmd(cmd, wdir=None, ignore_stderr=False, ignore_no_output=True):
-    pipe = subprocess.Popen(cmd, shell=True,
-                            stdin=subprocess.PIPE,
-                            stdout=subprocess.PIPE,
-                            stderr=subprocess.PIPE, cwd=wdir)
-
-    (output, errors) = pipe.communicate()
-    if output:
-        output = output.strip()
-    if pipe.returncode < 0:
-        logger.error(errors)
-        return False
-    if errors:
-        logger.error(errors)
-        if ignore_stderr:
-            return True
-        else:
-            return False
-
-    if ignore_no_output:
-        if not output:
-            return True
-
-    return output
-
-# run given command on OpenStack controller
-
-
-def run_cmd_on_cntlr(cmd):
-    ip_cntlrs = get_openstack_node_ips("controller")
-    if not ip_cntlrs:
-        return None
-
-    ssh_cmd = "ssh %s %s %s" % (ssh_options, ip_cntlrs[0], cmd)
-    return run_cmd_on_fm(ssh_cmd)
-
-# run given command on OpenStack Compute node
-
-
-def run_cmd_on_compute(cmd):
-    ip_computes = get_openstack_node_ips("compute")
-    if not ip_computes:
-        return None
-
-    ssh_cmd = "ssh %s %s %s" % (ssh_options, ip_computes[0], cmd)
-    return run_cmd_on_fm(ssh_cmd)
-
-# run given command on Fuel Master
-
-
-def run_cmd_on_fm(cmd, username="root", passwd="r00tme"):
-    ip = os.environ.get("INSTALLER_IP")
-    ssh_cmd = "sshpass -p %s ssh %s %s@%s %s" % (
-        passwd, ssh_options, username, ip, cmd)
-    return run_cmd(ssh_cmd)
-
-# run given command on Remote Machine, Can be VM
-
-
-def run_cmd_remote(ip, cmd, username="root", passwd="opnfv"):
-    ssh_opt_append = "%s -o ConnectTimeout=50 " % ssh_options
-    ssh_cmd = "sshpass -p %s ssh %s %s@%s %s" % (
-        passwd, ssh_opt_append, username, ip, cmd)
-    return run_cmd(ssh_cmd)
-
-# Get OpenStack Nodes IP Address
-
-
-def get_openstack_node_ips(role):
-    fuel_env = os.environ.get("FUEL_ENV")
-    if fuel_env is not None:
-        cmd = "fuel2 node list -f json -e %s" % fuel_env
-    else:
-        cmd = "fuel2 node list -f json"
-
-    nodes = run_cmd_on_fm(cmd)
-    ips = []
-    nodes = json.loads(nodes)
-    for node in nodes:
-        if role in node["roles"]:
-            ips.append(node["ip"])
-
-    return ips
-
-# Configures IPTABLES on OpenStack Controller
-
-
-def configure_iptables():
-    iptable_cmds = ["iptables -P INPUT ACCEPT",
-                    "iptables -t nat -P INPUT ACCEPT",
-                    "iptables -A INPUT -m state \
-                    --state NEW,ESTABLISHED,RELATED -j ACCEPT"]
-
-    for cmd in iptable_cmds:
-        logger.info("Configuring %s on contoller" % cmd)
-        run_cmd_on_cntlr(cmd)
-
-    return
-
-
-def download_image():
-    if not os.path.isfile(IMAGE_PATH):
-        logger.info("Downloading image")
-        ft_utils.download_url(IMAGE_URL, IMAGE_DIR)
-
-    logger.info("Using old image")
-    return
-
-
-def setup_glance(glance_client):
-    image_id = os_utils.create_glance_image(glance_client,
-                                            IMAGE_NAME,
-                                            IMAGE_PATH,
-                                            disk=IMAGE_FORMAT,
-                                            container="bare",
-                                            public=True)
-
-    return image_id
-
-
-def setup_neutron(neutron_client):
-    n_dict = os_utils.create_network_full(neutron_client,
-                                          NET_NAME,
-                                          SUBNET_NAME,
-                                          ROUTER_NAME,
-                                          SUBNET_CIDR)
-    if not n_dict:
-        logger.error("failed to create neutron network")
-        sys.exit(-1)
-
-    network_id = n_dict["net_id"]
-    return network_id
-
-
-def setup_ingress_egress_secgroup(neutron_client, protocol,
-                                  min_port=None, max_port=None):
-    secgroups = os_utils.get_security_groups(neutron_client)
-    for sg in secgroups:
-        os_utils.create_secgroup_rule(neutron_client, sg['id'],
-                                      'ingress', protocol,
-                                      port_range_min=min_port,
-                                      port_range_max=max_port)
-        os_utils.create_secgroup_rule(neutron_client, sg['id'],
-                                      'egress', protocol,
-                                      port_range_min=min_port,
-                                      port_range_max=max_port)
-    return
-
-
-def setup_security_groups(neutron_client):
-    sg_id = os_utils.create_security_group_full(neutron_client,
-                                                SECGROUP_NAME, SECGROUP_DESCR)
-    setup_ingress_egress_secgroup(neutron_client, "icmp")
-    setup_ingress_egress_secgroup(neutron_client, "udp", 67, 68)
-    setup_ingress_egress_secgroup(neutron_client, "tcp", 22, 22)
-    setup_ingress_egress_secgroup(neutron_client, "tcp", 80, 80)
-    return sg_id
-
-
-def boot_instance(nova_client, name, flavor, image_id, network_id, sg_id):
-    logger.info("Creating instance '%s'..." % name)
-    logger.debug(
-        "Configuration:\n name=%s \n flavor=%s \n image=%s \n "
-        "network=%s \n" % (name, flavor, image_id, network_id))
-
-    instance = os_utils.create_instance_and_wait_for_active(flavor,
-                                                            image_id,
-                                                            network_id,
-                                                            name)
-
-    if instance is None:
-        logger.error("Error while booting instance.")
-        sys.exit(-1)
-
-    instance_ip = instance.networks.get(NET_NAME)[0]
-    logger.debug("Instance '%s' got private ip '%s'." %
-                 (name, instance_ip))
-
-    logger.info("Adding '%s' to security group %s" % (name, SECGROUP_NAME))
-    os_utils.add_secgroup_to_instance(nova_client, instance.id, sg_id)
-
-    return instance_ip
-
-
-def ping(remote, pkt_cnt=1, iface=None, retries=100, timeout=None):
-    ping_cmd = 'ping'
-
-    if timeout:
-        ping_cmd = ping_cmd + ' -w %s' % timeout
-
-    grep_cmd = "grep -e 'packet loss' -e rtt"
-
-    if iface is not None:
-        ping_cmd = ping_cmd + ' -I %s' % iface
-
-    ping_cmd = ping_cmd + ' -i 0 -c %d %s' % (pkt_cnt, remote)
-    cmd = ping_cmd + '|' + grep_cmd
-
-    while retries > 0:
-        output = run_cmd(cmd)
-        if not output:
-            return False
-
-        match = re.search('(\d*)% packet loss', output)
-        if not match:
-            return False
-
-        packet_loss = int(match.group(1))
-        if packet_loss == 0:
-            return True
-
-        retries = retries - 1
-
-    return False
-
-
-def get_floating_ips(nova_client, neutron_client):
-    ips = []
-    instances = nova_client.servers.list(search_opts={'all_tenants': 1})
-    for instance in instances:
-        floatip_dic = os_utils.create_floating_ip(neutron_client)
-        floatip = floatip_dic['fip_addr']
-        instance.add_floating_ip(floatip)
-        logger.info("Instance name and ip %s:%s " % (instance.name, floatip))
-        logger.info("Waiting for instance %s:%s to come up" %
-                    (instance.name, floatip))
-        if not ping(floatip):
-            logger.info("Instance %s:%s didn't come up" %
-                        (instance.name, floatip))
-            sys.exit(1)
-
-        if instance.name == "server":
-            logger.info("Server:%s is reachable" % floatip)
-            server_ip = floatip
-        elif instance.name == "client":
-            logger.info("Client:%s is reachable" % floatip)
-            client_ip = floatip
-        else:
-            logger.info("SF:%s is reachable" % floatip)
-            ips.append(floatip)
-
-    return server_ip, client_ip, ips[1], ips[0]
-
-# Start http server on a give machine, Can be VM
-
-
-def start_http_server(ip):
-    cmd = "\'python -m SimpleHTTPServer 80"
-    cmd = cmd + " > /dev/null 2>&1 &\'"
-    return run_cmd_remote(ip, cmd)
-
-# Set firewall using vxlan_tool.py on a give machine, Can be VM
-
-
-def vxlan_firewall(sf, iface="eth0", port="22", block=True):
-    cmd = "python vxlan_tool.py"
-    cmd = cmd + " -i " + iface + " -d forward -v off"
-    if block:
-        cmd = "python vxlan_tool.py -i eth0 -d forward -v off -b " + port
-
-    cmd = "sh -c 'cd /root;nohup " + cmd + " > /dev/null 2>&1 &'"
-    run_cmd_remote(sf, cmd)
-
-# Run netcat on a give machine, Can be VM
-
-
-def netcat(s_ip, c_ip, port="80", timeout=5):
-    cmd = "nc -zv "
-    cmd = cmd + " -w %s %s %s" % (timeout, s_ip, port)
-    cmd = cmd + " 2>&1"
-    output = run_cmd_remote(c_ip, cmd)
-    logger.info("%s" % output)
-    return output
-
-
-def is_ssh_blocked(srv_prv_ip, client_ip):
-    res = netcat(srv_prv_ip, client_ip, port="22")
-    match = re.search("nc:.*timed out:.*", res, re.M)
-    if match:
-        return True
-
-    return False
-
-
-def is_http_blocked(srv_prv_ip, client_ip):
-    res = netcat(srv_prv_ip, client_ip, port="80")
-    match = re.search(".* 80 port.* succeeded!", res, re.M)
-    if match:
-        return False
-
-    return True
-
-
-def capture_err_logs(controller_clients, compute_clients, error):
-    ovs_logger = ovs_utils.OVSLogger(
-        os.path.join(os.getcwd(), 'ovs-logs'),
-        FUNCTEST_RESULTS_DIR)
-
-    timestamp = time.strftime("%Y%m%d-%H%M%S")
-    ovs_logger.dump_ovs_logs(controller_clients,
-                             compute_clients,
-                             related_error=error,
-                             timestamp=timestamp)
-    return
-
-
-def update_json_results(name, result):
-    json_results.update({name: result})
-    if result is not "Passed":
-        json_results["failures"] += 1
-
-    return
-
-
-def get_ssh_clients(role):
-    clients = []
-    for ip in get_openstack_node_ips(role):
-        s_client = ssh_utils.get_ssh_client(ip,
-                                            'root',
-                                            proxy=PROXY)
-        clients.append(s_client)
-
-    return clients
-
-# Check SSH connectivity to VNFs
-
-
-def check_ssh(ips, retries=100):
-    check = [False, False]
-    logger.info("Checking SSH connectivity to the SFs with ips %s" % str(ips))
-    while retries and not all(check):
-        for index, ip in enumerate(ips):
-            check[index] = run_cmd_remote(ip, "exit")
-
-        if all(check):
-            logger.info("SSH connectivity to the SFs established")
-            return True
-
-        time.sleep(3)
-        retries -= 1
-
-    return False
-
-# Measure the time it takes to update the classification rules
-
-
-def capture_time_log(compute_clients):
-    ovs_logger = ovs_utils.OVSLogger(
-        os.path.join(os.getcwd(), 'ovs-logs'),
-        "test")
-    i = 0
-    first_RSP = ""
-    start_time = time.time()
-    while True:
-        rsps = ovs_logger.ofctl_time_counter(compute_clients[0])
-        if not i:
-            if len(rsps) > 0:
-                first_RSP = rsps[0]
-                i = i + 1
-            else:
-                first_RSP = 0
-                i = i + 1
-        if (len(rsps) > 1):
-            if(first_RSP != rsps[0]):
-                if (rsps[0] == rsps[1]):
-                    stop_time = time.time()
-                    logger.info("classification rules updated")
-                    difference = stop_time - start_time
-                    logger.info("It took %s seconds" % difference)
-                    break
-        time.sleep(1)
-    return
-
-
-def main():
-    installer_type = os.environ.get("INSTALLER_TYPE")
-    if installer_type != "fuel":
-        logger.error(
-            '\033[91mCurrently supported only Fuel Installer type\033[0m')
-        sys.exit(1)
-
-    installer_ip = os.environ.get("INSTALLER_IP")
-    if not installer_ip:
-        logger.error(
-            '\033[91minstaller ip is not set\033[0m')
-        logger.error(
-            '\033[91mexport INSTALLER_IP=<ip>\033[0m')
-        sys.exit(1)
-
-    env_list = run_cmd_on_fm("fuel2 env list -f json")
-    fuel_env = os.environ.get("FUEL_ENV")
-    if len(eval(env_list)) > 1 and fuel_env is None:
-        out = run_cmd_on_fm("fuel env")
-        logger.error(
-            '\033[91mMore than one fuel env found\033[0m\n %s' % out)
-        logger.error(
-            '\033[91mexport FUEL_ENV=<env-id> to set ENV\033[0m')
-        sys.exit(1)
-
-    start_time = time.time()
-    status = "PASS"
-    configure_iptables()
-    download_image()
-    _, custom_flv_id = os_utils.get_or_create_flavor(
-        FLAVOR, 1500, 10, 1, public=True)
-    if not custom_flv_id:
-        logger.error("Failed to create custom flavor")
-        sys.exit(1)
-
-    glance_client = os_utils.get_glance_client()
-    neutron_client = os_utils.get_neutron_client()
-    nova_client = os_utils.get_nova_client()
-
-    controller_clients = get_ssh_clients("controller")
-    compute_clients = get_ssh_clients("compute")
-
-    image_id = setup_glance(glance_client)
-    network_id = setup_neutron(neutron_client)
-    sg_id = setup_security_groups(neutron_client)
-
-    boot_instance(
-        nova_client, CLIENT, FLAVOR, image_id, network_id, sg_id)
-    srv_prv_ip = boot_instance(
-        nova_client, SERVER, FLAVOR, image_id, network_id, sg_id)
-
-    subprocess.call(TACKER_SCRIPT, shell=True)
-
-    # Start measuring the time it takes to implement the classification rules
-    try:
-        thread.start_new_thread(capture_time_log, (compute_clients,))
-    except Exception, e:
-        logger.error("Unable to start the thread that counts time %s" % e)
-
-    server_ip, client_ip, sf1, sf2 = get_floating_ips(
-        nova_client, neutron_client)
-
-    if not check_ssh([sf1, sf2]):
-        logger.error("Cannot establish SSH connection to the SFs")
-        sys.exit(1)
-
-    logger.info("Starting HTTP server on %s" % server_ip)
-    if not start_http_server(server_ip):
-        logger.error(
-            '\033[91mFailed to start HTTP server on %s\033[0m' % server_ip)
-        sys.exit(1)
-
-    logger.info("Starting HTTP firewall on %s" % sf2)
-    vxlan_firewall(sf2, port="80")
-    logger.info("Starting SSH firewall on %s" % sf1)
-    vxlan_firewall(sf1, port="22")
-
-    logger.info("Wait for ODL to update the classification rules in OVS")
-    time.sleep(120)
-
-    logger.info("Test SSH")
-    if is_ssh_blocked(srv_prv_ip, client_ip):
-        logger.info('\033[92mTEST 1 [PASSED] ==> SSH BLOCKED\033[0m')
-        update_json_results("Test 1: SSH Blocked", "Passed")
-    else:
-        error = ('\033[91mTEST 1 [FAILED] ==> SSH NOT BLOCKED\033[0m')
-        logger.error(error)
-        capture_err_logs(controller_clients, compute_clients, error)
-        update_json_results("Test 1: SSH Blocked", "Failed")
-
-    logger.info("Test HTTP")
-    if not is_http_blocked(srv_prv_ip, client_ip):
-        logger.info('\033[92mTEST 2 [PASSED] ==> HTTP WORKS\033[0m')
-        update_json_results("Test 2: HTTP works", "Passed")
-    else:
-        error = ('\033[91mTEST 2 [FAILED] ==> HTTP BLOCKED\033[0m')
-        logger.error(error)
-        capture_err_logs(controller_clients, compute_clients, error)
-        update_json_results("Test 2: HTTP works", "Failed")
-
-    logger.info("Changing the classification")
-    subprocess.call(TACKER_CHANGECLASSI, shell=True)
-
-    # Start measuring the time it takes to implement the classification rules
-    try:
-        thread.start_new_thread(capture_time_log, (compute_clients,))
-    except Exception, e:
-        logger.error("Unable to start the thread that counts time %s" % e)
-
-    logger.info("Wait for ODL to update the classification rules in OVS")
-    time.sleep(100)
-
-    logger.info("Test HTTP")
-    if is_http_blocked(srv_prv_ip, client_ip):
-        logger.info('\033[92mTEST 3 [PASSED] ==> HTTP Blocked\033[0m')
-        update_json_results("Test 3: HTTP Blocked", "Passed")
-    else:
-        error = ('\033[91mTEST 3 [FAILED] ==> HTTP WORKS\033[0m')
-        logger.error(error)
-        capture_err_logs(controller_clients, compute_clients, error)
-        update_json_results("Test 3: HTTP Blocked", "Failed")
-
-    logger.info("Test SSH")
-    if not is_ssh_blocked(srv_prv_ip, client_ip):
-        logger.info('\033[92mTEST 4 [PASSED] ==> SSH Works\033[0m')
-        update_json_results("Test 4: SSH Works", "Passed")
-    else:
-        error = ('\033[91mTEST 4 [FAILED] ==> SSH BLOCKED\033[0m')
-        logger.error(error)
-        capture_err_logs(controller_clients, compute_clients, error)
-        update_json_results("Test 4: SSH Works", "Failed")
-
-    if json_results["failures"]:
-        status = "FAIL"
-        logger.error('\033[91mSFC TESTS: %s :( FOUND %s FAIL \033[0m' % (
-            status, json_results["failures"]))
-
-    if args.report:
-        stop_time = time.time()
-        logger.debug("Promise Results json: " + str(json_results))
-        ft_utils.push_results_to_db("sfc",
-                                    "functest-odl-sfc",
-                                    start_time,
-                                    stop_time,
-                                    status,
-                                    json_results)
-
-    if status == "PASS":
-        logger.info('\033[92mSFC ALL TESTS: %s :)\033[0m' % status)
-        sys.exit(0)
-
-    sys.exit(1)
-
-if __name__ == '__main__':
-    main()
diff --git a/functest/opnfv_tests/features/sfc/sfc_change_classi.bash b/functest/opnfv_tests/features/sfc/sfc_change_classi.bash
deleted file mode 100755 (executable)
index 70375ab..0000000
+++ /dev/null
@@ -1,7 +0,0 @@
-tacker sfc-classifier-delete red_http
-tacker sfc-classifier-delete red_ssh
-
-tacker sfc-classifier-create --name blue_http --chain blue --match source_port=0,dest_port=80,protocol=6
-tacker sfc-classifier-create --name blue_ssh  --chain blue --match source_port=0,dest_port=22,protocol=6
-
-tacker sfc-classifier-list
diff --git a/functest/opnfv_tests/features/sfc/sfc_tacker.bash b/functest/opnfv_tests/features/sfc/sfc_tacker.bash
deleted file mode 100755 (executable)
index 690d5f5..0000000
+++ /dev/null
@@ -1,31 +0,0 @@
-#!/bin/bash
-BASEDIR=`dirname $0`
-
-#import VNF descriptor
-tacker vnfd-create --vnfd-file ${BASEDIR}/test-vnfd1.yaml
-tacker vnfd-create --vnfd-file ${BASEDIR}/test-vnfd2.yaml
-
-#create instances of the imported VNF
-tacker vnf-create --name testVNF1 --vnfd-name test-vnfd1
-tacker vnf-create --name testVNF2 --vnfd-name test-vnfd2
-
-key=true
-while $key;do
-        sleep 3
-        active=`tacker vnf-list | grep -E 'PENDING|ERROR'`
-        echo -e "checking if SFs are up:  $active"
-        if [ -z "$active" ]; then
-                key=false
-        fi
-done
-
-#create service chain
-tacker sfc-create --name red --chain testVNF1
-tacker sfc-create --name blue --chain testVNF2
-
-#create classifier
-tacker sfc-classifier-create --name red_http --chain red --match source_port=0,dest_port=80,protocol=6
-tacker sfc-classifier-create --name red_ssh --chain red --match source_port=0,dest_port=22,protocol=6
-
-tacker sfc-list
-tacker sfc-classifier-list
diff --git a/functest/opnfv_tests/features/sfc/tacker_client_install.sh b/functest/opnfv_tests/features/sfc/tacker_client_install.sh
deleted file mode 100755 (executable)
index adb9a44..0000000
+++ /dev/null
@@ -1,43 +0,0 @@
-MYDIR=$(dirname $(readlink -f "$0"))
-CLIENT=$(echo python-python-tackerclient_*_all.deb)
-CLIREPO="tacker-client"
-
-# Function checks whether a python egg is available, if not, installs
-function chkPPkg() {
-    PKG="$1"
-    IPPACK=$(python - <<'____EOF'
-import pip
-from os.path import join
-for package in pip.get_installed_distributions():
-    print(package.location)
-    print(join(package.location, *package._get_metadata("top_level.txt")))
-____EOF
-)
-    echo "$IPPACK" | grep -q "$PKG"
-    if [ $? -ne 0 ];then
-        pip install "$PKG"
-    fi
-}
-
-function envSetup() {
-    apt-get install -y python-all debhelper fakeroot
-    #pip install --upgrade python-keystoneclient==1.7.4
-    chkPPkg stdeb
-}
-
-# Function installs python-tackerclient from github
-function deployTackerClient() {
-    cd $MYDIR
-    git clone -b 'SFC_refactor' https://github.com/trozet/python-tackerclient.git $CLIREPO
-    cd $CLIREPO
-    python setup.py --command-packages=stdeb.command bdist_deb
-    cd "deb_dist"
-    CLIENT=$(echo python-python-tackerclient_*_all.deb)
-    cp $CLIENT $MYDIR
-    dpkg -i "${MYDIR}/${CLIENT}"
-    apt-get -f -y install
-    dpkg -i "${MYDIR}/${CLIENT}"
-}
-
-envSetup
-deployTackerClient
diff --git a/functest/opnfv_tests/features/sfc/test-vnfd1.yaml b/functest/opnfv_tests/features/sfc/test-vnfd1.yaml
deleted file mode 100644 (file)
index 5c672e3..0000000
+++ /dev/null
@@ -1,31 +0,0 @@
-template_name: test-vnfd1
-description: firewall1-example
-
-service_properties:
-  Id: firewall1-vnfd
-  vendor: tacker
-  version: 1
-  type:
-      - firewall1
-vdus:
-  vdu1:
-    id: vdu1
-    vm_image: sf_nsh_colorado
-    instance_type: custom
-    service_type: firewall1
-
-    network_interfaces:
-      management:
-        network: example-net
-        management: true
-
-    placement_policy:
-      availability_zone: nova
-
-    auto-scaling: noop
-    monitoring_policy: noop
-    failure_policy: respawn
-
-    config:
-      param0: key0
-      param1: key1
diff --git a/functest/opnfv_tests/features/sfc/test-vnfd2.yaml b/functest/opnfv_tests/features/sfc/test-vnfd2.yaml
deleted file mode 100644 (file)
index 8a570ab..0000000
+++ /dev/null
@@ -1,31 +0,0 @@
-template_name: test-vnfd2
-description: firewall2-example
-
-service_properties:
-  Id: firewall2-vnfd
-  vendor: tacker
-  version: 1
-  type:
-      - firewall2
-vdus:
-  vdu1:
-    id: vdu1
-    vm_image: sf_nsh_colorado
-    instance_type: custom
-    service_type: firewall2
-
-    network_interfaces:
-      management:
-        network: example-net
-        management: true
-
-    placement_policy:
-      availability_zone: nova
-
-    auto-scaling: noop
-    monitoring_policy: noop
-    failure_policy: respawn
-
-    config:
-      param0: key0
-      param1: key1
diff --git a/functest/utils/openstack/cinder.py b/functest/utils/openstack/cinder.py
new file mode 100644 (file)
index 0000000..f966468
--- /dev/null
@@ -0,0 +1,7 @@
+#!/usr/bin/env python
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+#
diff --git a/functest/utils/openstack/glance.py b/functest/utils/openstack/glance.py
new file mode 100644 (file)
index 0000000..f966468
--- /dev/null
@@ -0,0 +1,7 @@
+#!/usr/bin/env python
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+#
diff --git a/functest/utils/openstack/keystone.py b/functest/utils/openstack/keystone.py
new file mode 100644 (file)
index 0000000..f966468
--- /dev/null
@@ -0,0 +1,7 @@
+#!/usr/bin/env python
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+#
diff --git a/functest/utils/openstack/neutron.py b/functest/utils/openstack/neutron.py
new file mode 100644 (file)
index 0000000..f966468
--- /dev/null
@@ -0,0 +1,7 @@
+#!/usr/bin/env python
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+#
diff --git a/functest/utils/openstack/nova.py b/functest/utils/openstack/nova.py
new file mode 100644 (file)
index 0000000..f966468
--- /dev/null
@@ -0,0 +1,7 @@
+#!/usr/bin/env python
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+#
index e050418..0abf3c2 100755 (executable)
@@ -55,7 +55,7 @@ nosetests --with-xunit \
          --with-coverage \
          --cover-erase \
          --cover-package=functest.core.TestCasesBase \
-         --cover-package=functest.testcases.Controllers.ODL.OpenDaylightTesting \
+         --cover-package=functest.opnfv_tests.Controllers.ODL.OpenDaylightTesting \
          --cover-xml \
          --cover-html \
          functest/tests/unit