debug:
name: debug
testcases_list:
+ - dovetail.example.tc002
- dovetail.ipv6.tc001
- dovetail.nfvi.tc001
- dovetail.nfvi.tc002
--- /dev/null
+example_set:
+ name: example_set
+ testcases_list:
+ # Temporarily, one test case kept here as default to run
+ # for use of software development/debug
+ # TO DO: will amend when compliance set is settled
+ - dovetail.example.tc001
+ - dovetail.example.tc002
cls.update_config_envs('yardstick', key, options[item])
@classmethod
- def update_config_envs(cls, script_type, key, value):
- envs = cls.dovetail_config[script_type]['envs']
+ def update_config_envs(cls, validate_type, key, value):
+ envs = cls.dovetail_config[validate_type]['envs']
old_value = re.findall(r'\s+%s=(.*?)(\s+|$)' % key, envs)
if old_value == []:
envs += ' -e ' + key + '=' + value
else:
envs = envs.replace(old_value[0][0], value)
- cls.dovetail_config[script_type]['envs'] = envs
+ cls.dovetail_config[validate_type]['envs'] = envs
return envs
parameters:
- name: testcase
path: '("name",)'
- - name: script_testcase
- path: '("scripts", "testcase")'
+ - name: validate_testcase
+ path: '("validate", "testcase")'
include_config:
- functest_config.yml
-e BUILD_TAG=dovetail -e CI_DEBUG=true -e DEPLOY_TYPE=baremetal'
opts: '-id --privileged=true'
pre_condition:
- cmds:
- - 'echo test for precondition'
- testcase:
- cmds:
- - 'functest env prepare'
- - 'functest testcase run {{script_testcase}}'
+ - 'echo test for precondition'
+ cmds:
+ - 'functest env prepare'
+ - 'functest testcase run {{validate_testcase}}'
post_condition:
- cmds:
- - ''
+ - ''
result:
dir: '/home/opnfv/functest/results'
store_type: 'file'
-e EXTERNAL_NETWORK=ext-net'
opts: '-id --privileged=true'
pre_condition:
- cmds:
- - 'source /home/opnfv/repos/yardstick/tests/ci/prepare_env.sh &&
+ - 'source /home/opnfv/repos/yardstick/tests/ci/prepare_env.sh &&
source /home/opnfv/repos/yardstick/tests/ci/clean_images.sh && cleanup'
- - 'source /home/opnfv/repos/yardstick/tests/ci/prepare_env.sh &&
+ - 'source /home/opnfv/repos/yardstick/tests/ci/prepare_env.sh &&
cd /home/opnfv/repos/yardstick && source tests/ci/load_images.sh'
- testcase:
- cmds:
- - 'mkdir -p /home/opnfv/yardstick/results/'
- - 'cd /home/opnfv/repos/yardstick && source tests/ci/prepare_env.sh &&
- yardstick task start tests/opnfv/test_cases/{{script_testcase}}.yaml
- --output-file /home/opnfv/yardstick/results/{{script_testcase}}.out &>
+ cmds:
+ - 'mkdir -p /home/opnfv/yardstick/results/'
+ - 'cd /home/opnfv/repos/yardstick && source tests/ci/prepare_env.sh &&
+ yardstick task start tests/opnfv/test_cases/{{validate_testcase}}.yaml
+ --output-file /home/opnfv/yardstick/results/{{validate_testcase}}.out &>
/home/opnfv/yardstick/results/yardstick.log'
post_condition:
- cmds:
- - ''
+ - ''
result:
dir: '/home/opnfv/yardstick/results'
store_type: 'file'
class Report:
- results = {'functest': {}, 'yardstick': {}}
+ results = {'functest': {}, 'yardstick': {}, 'shell': {}}
logger = None
@staticmethod
def check_result(testcase, db_result):
- checker = CheckerFactory.create(testcase.script_type())
- checker.check(testcase, db_result)
+ checker = CheckerFactory.create(testcase.validate_type())
+ if checker is not None:
+ checker.check(testcase, db_result)
@classmethod
def generate_json(cls, testsuite_yaml, testarea, duration):
@classmethod
def get_result(cls, testcase):
- script_testcase = testcase.script_testcase()
- type = testcase.script_type()
+ validate_testcase = testcase.validate_testcase()
+ type = testcase.validate_type()
crawler = CrawlerFactory.create(type)
+ if crawler is None:
+ return None
- if script_testcase in cls.results[type]:
- return cls.results[type][script_testcase]
+ if validate_testcase in cls.results[type]:
+ return cls.results[type][validate_testcase]
- result = crawler.crawl(script_testcase)
+ result = crawler.crawl(validate_testcase)
if result is not None:
- cls.results[type][script_testcase] = result
+ cls.results[type][validate_testcase] = result
testcase.script_result_acquired(True)
cls.logger.debug('testcase: %s -> result acquired' %
- script_testcase)
+ validate_testcase)
else:
retry = testcase.increase_retry()
cls.logger.debug('testcase: %s -> result acquired retry:%d' %
- (script_testcase, retry))
+ (validate_testcase, retry))
return result
import click
import sys
import os
-import time
import utils.dovetail_logger as dt_logger
import utils.dovetail_utils as dt_utils
from report import FunctestCrawler, YardstickCrawler
from report import FunctestChecker, YardstickChecker
from conf.dovetail_config import DovetailConfig as dt_cfg
+from test_runner import DockerRunner, ShellRunner
def load_testsuite(testsuite):
def set_container_tags(option_str):
for script_tag_opt in option_str.split(','):
option_str = script_tag_opt.split(':')
- script_type = option_str[0].strip()
+ validate_type = option_str[0].strip()
script_tag = option_str[1].strip()
- dt_cfg.dovetail_config[script_type]['docker_tag'] = script_tag
+ dt_cfg.dovetail_config[validate_type]['docker_tag'] = script_tag
def load_testcase():
run_testcase = False
if run_testcase:
- Container.pull_image(testcase.script_type())
- container_id = Container.create(testcase.script_type())
- logger.debug('container id:%s' % container_id)
-
- if not Testcase.prepared(testcase.script_type()):
- cmds = testcase.pre_condition()['cmds']
- if cmds:
- for cmd in cmds:
- Container.exec_cmd(container_id, cmd)
- Testcase.prepared(testcase.script_type(), True)
-
- if not testcase.prepare_cmd():
- logger.error('failed to prepare testcase:%s' % testcase.name())
- else:
- start_time = time.time()
- for cmd in testcase.cmds:
- Container.exec_cmd(container_id, cmd)
- end_time = time.time()
- duration = end_time - start_time
-
- # testcase.post_condition()
-
- Container.clean(container_id)
+ testcase.run()
db_result = Report.get_result(testcase)
Report.check_result(testcase, db_result)
YardstickChecker.create_log()
Testcase.create_log()
Testsuite.create_log()
+ DockerRunner.create_log()
+ ShellRunner.create_log()
def clean_results_dir():
--- /dev/null
+#!/usr/bin/env python
+#
+# grakiss.wanglei@huawei.com
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+
+import utils.dovetail_utils as dt_utils
+import utils.dovetail_logger as dt_logger
+
+from container import Container
+
+
+class DockerRunner(object):
+
+ logger = None
+
+ def __init__(self, testcase):
+ self.testcase = testcase
+
+ @classmethod
+ def create_log(cls):
+ cls.logger = dt_logger.Logger(__file__).getLogger()
+
+ def run(self):
+ Container.pull_image(self.testcase.validate_type())
+ container_id = Container.create(self.testcase.validate_type())
+ self.logger.debug('container id:%s' % container_id)
+
+ if not self.testcase.prepared():
+ cmds = self.testcase.pre_condition()
+ if cmds:
+ for cmd in cmds:
+ Container.exec_cmd(container_id, cmd)
+ self.testcase.prepared(True)
+
+ if not self.testcase.prepare_cmd():
+ self.logger.error('failed to prepare testcase:%s',
+ self.testcase.name())
+ else:
+ for cmd in self.testcase.cmds:
+ Container.exec_cmd(container_id, cmd)
+
+ cmds = self.testcase.post_condition()
+ if cmds:
+ for cmd in cmds:
+ Container.exec_cmd(container_id, cmd)
+ self.testcase.cleaned(True)
+
+ Container.clean(container_id)
+
+
+class FunctestRunner(DockerRunner):
+
+ def __init__(self, testcase):
+ super(FunctestRunner, self).__init__(testcase)
+ self.name = 'functest'
+
+
+class YardstickRunner(DockerRunner):
+
+ def __init__(self, testcase):
+ super(YardstickRunner, self).__init__(testcase)
+ self.name = 'yardstick'
+
+
+class ShellRunner(object):
+
+ logger = None
+
+ @classmethod
+ def create_log(cls):
+ cls.logger = dt_logger.Logger(__file__).getLogger()
+
+ def __init__(self, testcase):
+ super(ShellRunner, self).__init__()
+ self.testcase = testcase
+ self.name = 'shell'
+
+ def run(self):
+ for cmd in self.testcase.cmds:
+ dt_utils.exec_cmd(cmd, self.logger)
+
+
+class TestRunnerFactory(object):
+
+ TEST_RUNNER_MAP = {
+ "functest": FunctestRunner,
+ "yardstick": YardstickRunner,
+ "shell": ShellRunner,
+ }
+
+ @classmethod
+ def create(cls, testcase):
+ try:
+ return cls.TEST_RUNNER_MAP[testcase.validate_type()](testcase)
+ except KeyError:
+ return None
from parser import Parser
from conf.dovetail_config import DovetailConfig as dt_cfg
+from test_runner import TestRunnerFactory
-class Testcase:
+class Testcase(object):
logger = None
def __init__(self, testcase_yaml):
self.testcase = testcase_yaml.values()[0]
+ self.logger.debug('testcase:%s', self.testcase)
self.testcase['passed'] = False
self.cmds = []
self.sub_testcase_status = {}
- self.update_script_testcase(self.script_type(),
- self.script_testcase())
+ self.update_validate_testcase(self.validate_testcase())
@classmethod
def create_log(cls):
cls.logger = dt_logger.Logger(__name__ + '.Testcase').getLogger()
def prepare_cmd(self):
- script_type = self.script_type()
- for cmd in dt_cfg.dovetail_config[script_type]['testcase']['cmds']:
- cmd_lines = Parser.parse_cmd(cmd, self)
- if not cmd_lines:
- return False
- self.cmds.append(cmd_lines)
-
- return True
+ try:
+ self.cmds = self.testcase['validate']['cmds']
+ return True
+ except KeyError:
+ return False
def __str__(self):
return self.testcase
return self.testcase['objective']
def sub_testcase(self):
- return self.testcase['scripts']['sub_testcase_list']
+ try:
+ return self.testcase['report']['sub_testcase_list']
+ except KeyError:
+ return []
def sub_testcase_passed(self, name, passed=None):
if passed is not None:
- self.logger.debug('sub_testcase_passed:%s %s' % (name, passed))
+ self.logger.debug('sub_testcase_passed:%s %s', name, passed)
self.sub_testcase_status[name] = passed
return self.sub_testcase_status[name]
- def script_type(self):
- return self.testcase['scripts']['type']
+ def validate_type(self):
+ return self.testcase['validate']['type']
- def script_testcase(self):
- return self.testcase['scripts']['testcase']
+ def validate_testcase(self):
+ return self.testcase['validate']['testcase']
def exceed_max_retry_times(self):
# logger.debug('retry times:%d' % self.testcase['retry'])
- return self._exceed_max_retry_times(self.script_type(),
- self.script_testcase())
+ return self._exceed_max_retry_times(self.validate_testcase())
def increase_retry(self):
# self.testcase['retry'] = self.testcase['retry'] + 1
# return self.testcase['retry']
- return self._increase_retry(self.script_type(), self.script_testcase())
+ return self._increase_retry(self.validate_testcase())
def passed(self, passed=None):
if passed is not None:
return self.testcase['passed']
def script_result_acquired(self, acquired=None):
- return self._result_acquired(self.script_type(),
- self.script_testcase(), acquired)
+ return self._result_acquired(self.validate_testcase(), acquired)
def pre_condition(self):
- return self.pre_condition_cls(self.script_type())
+ return self.pre_condition_cls(self.validate_type())
def post_condition(self):
- return self.post_condition_cls(self.script_type())
+ return self.post_condition_cls(self.validate_type())
- # testcase in upstream testing project
- script_testcase_list = {'functest': {}, 'yardstick': {}}
+ def run(self):
+ runner = TestRunnerFactory.create(self)
+ try:
+ runner.run()
+ except AttributeError:
+ pass
+ # testcase in upstream testing project
+ # validate_testcase_list = {'functest': {}, 'yardstick': {}, 'shell': {}}
+ validate_testcase_list = {}
# testcase in dovetail
testcase_list = {}
@classmethod
- def prepared(cls, script_type, prepared=None):
+ def prepared(cls, prepared=None):
if prepared is not None:
- cls.script_testcase_list[script_type]['prepared'] = prepared
- return cls.script_testcase_list[script_type]['prepared']
+ cls.validate_testcase_list['prepared'] = prepared
+ return cls.validate_testcase_list['prepared']
@classmethod
- def cleaned(cls, script_type, cleaned=None):
+ def cleaned(cls, cleaned=None):
if cleaned is not None:
- cls.scrpit_testcase_list[script_type]['cleaned'] = cleaned
- return cls.script_testcase_list[script_type]['cleaned']
+ cls.validate_testcase_list['cleaned'] = cleaned
+ return cls.validate_testcase_list['cleaned']
@staticmethod
- def pre_condition_cls(script_type):
- return dt_cfg.dovetail_config[script_type]['pre_condition']
+ def pre_condition_cls(validate_type):
+ return dt_cfg.dovetail_config[validate_type]['pre_condition']
@staticmethod
- def post_condition_cls(script_type):
- return dt_cfg.dovetail_config[script_type]['post_condition']
+ def post_condition_cls(validate_type):
+ return dt_cfg.dovetail_config[validate_type]['post_condition']
@classmethod
- def update_script_testcase(cls, script_type, script_testcase):
- if script_testcase not in cls.script_testcase_list[script_type]:
- cls.script_testcase_list[script_type][script_testcase] = \
+ def update_validate_testcase(cls, testcase_name):
+ if testcase_name not in cls.validate_testcase_list:
+ cls.validate_testcase_list[testcase_name] = \
{'retry': 0, 'acquired': False}
- cls.script_testcase_list[script_type]['prepared'] = False
- cls.script_testcase_list[script_type]['cleaned'] = False
+ cls.validate_testcase_list['prepared'] = False
+ cls.validate_testcase_list['cleaned'] = False
@classmethod
- def _exceed_max_retry_times(cls, script_type, script_testcase):
- retry = cls.script_testcase_list[script_type][script_testcase]['retry']
+ def _exceed_max_retry_times(cls, validate_testcase):
+ retry = cls.validate_testcase_list[validate_testcase]['retry']
return retry > 1
@classmethod
- def _increase_retry(cls, script_type, script_testcase):
- cls.script_testcase_list[script_type][script_testcase]['retry'] += 1
- return cls.script_testcase_list[script_type][script_testcase]['retry']
+ def _increase_retry(cls, validate_testcase):
+ cls.validate_testcase_list[validate_testcase]['retry'] += 1
+ return cls.validate_testcase_list[validate_testcase]['retry']
@classmethod
- def _result_acquired(cls, script_type, testcase, acquired=None):
+ def _result_acquired(cls, testcase, acquired=None):
if acquired is not None:
- cls.script_testcase_list[script_type][testcase]['acquired'] = \
+ cls.validate_testcase_list[testcase]['acquired'] = \
acquired
- return cls.script_testcase_list[script_type][testcase]['acquired']
+ return cls.validate_testcase_list[testcase]['acquired']
@classmethod
def load(cls):
for root, dirs, files in \
- os.walk(dt_cfg.dovetail_config['TESTCASE_PATH']):
+ os.walk(dt_cfg.dovetail_config['TESTCASE_PATH']):
for testcase_file in files:
with open(os.path.join(root, testcase_file)) as f:
testcase_yaml = yaml.safe_load(f)
- cls.testcase_list[testcase_yaml.keys()[0]] = \
- cls(testcase_yaml)
+ case_type = testcase_yaml.values()[0]['validate']['type']
+ testcase = TestcaseFactory.create(case_type, testcase_yaml)
+ if testcase is not None:
+ cls.testcase_list[next(testcase_yaml.iterkeys())] = \
+ testcase
+ else:
+ cls.logger.error('failed to create testcase: %s',
+ testcase_file)
cls.logger.debug(cls.testcase_list)
@classmethod
return None
+class FunctestTestcase(Testcase):
+
+ validate_testcase_list = {}
+
+ def __init__(self, testcase_yaml):
+ super(FunctestTestcase, self).__init__(testcase_yaml)
+ self.name = 'functest'
+
+ def prepare_cmd(self):
+ ret = super(FunctestTestcase, self).prepare_cmd()
+ if not ret:
+ for cmd in \
+ dt_cfg.dovetail_config[self.name]['cmds']:
+ cmd_lines = Parser.parse_cmd(cmd, self)
+ if not cmd_lines:
+ return False
+ self.cmds.append(cmd_lines)
+ return True
+ return ret
+
+
+class YardstickTestcase(Testcase):
+
+ validate_testcae_list = {}
+
+ def __init__(self, testcase_yaml):
+ super(YardstickTestcase, self).__init__(testcase_yaml)
+ self.name = 'yardstick'
+
+
+class ShellTestcase(Testcase):
+
+ validate_testcase_list = {}
+
+ def __init__(self, testcase_yaml):
+ super(ShellTestcase, self).__init__(testcase_yaml)
+ self.name = 'shell'
+
+
+class TestcaseFactory(object):
+ TESTCASE_TYPE_MAP = {
+ 'functest': FunctestTestcase,
+ 'yardstick': YardstickTestcase,
+ 'shell': ShellTestcase,
+ }
+
+ @classmethod
+ def create(cls, testcase_type, testcase_yaml):
+ try:
+ return cls.TESTCASE_TYPE_MAP[testcase_type](testcase_yaml)
+ except KeyError:
+ return None
+
+
class Testsuite:
logger = None
@classmethod
def load(cls):
for root, dirs, files in \
- os.walk(dt_cfg.dovetail_config['COMPLIANCE_PATH']):
+ os.walk(dt_cfg.dovetail_config['COMPLIANCE_PATH']):
for testsuite_yaml in files:
with open(os.path.join(root, testsuite_yaml)) as f:
testsuite_yaml = yaml.safe_load(f)
--- /dev/null
+dovetail.example.tc001:
+ name: dovetail.example.tc001
+ objective: Bulk creation and deletion of IPv6 networks, ports and subnets
+ validate:
+ type: functest
+ testcase: tempest_smoke_serial
+ pre_condition:
+ - 'echo test for precondition'
+ cmds:
+ - 'functest env prepare'
+ - 'functest testcase run {{validate_testcase}}'
+ post_condition:
+ - 'echo test for precondition'
+ report:
+ sub_testcase_list:
+ - tempest.api.network.test_networks.BulkNetworkOpsIpV6Test.test_bulk_create_delete_network
+ - tempest.api.network.test_networks.BulkNetworkOpsIpV6Test.test_bulk_create_delete_port
+ - tempest.api.network.test_networks.BulkNetworkOpsIpV6Test.test_bulk_create_delete_subnet
--- /dev/null
+dovetail.example.tc002:
+ name: dovetail.example.tc002
+ objective: VIM ipv6 operations, to create/update/delete an IPv6 network and subnet
+ validate:
+ type: shell
+ testcase: "run shell"
+ pre_condition:
+ - "echo pre_condition"
+ cmds:
+ - "echo test2"
+ post_condition:
+ - "echo post_condition"
+ report:
+ sub_testcase_list:
+
dovetail.ipv6.tc001:
name: dovetail.ipv6.tc001
objective: Bulk creation and deletion of IPv6 networks, ports and subnets
- scripts:
+ validate:
type: functest
testcase: tempest_smoke_serial
+ pre_condition:
+ - 'echo test for precondition'
+ cmds:
+ - 'functest env prepare'
+ - 'functest testcase run {{validate_testcase}}'
+ post_condition:
+ - 'echo test for precondition'
+ report:
sub_testcase_list:
- tempest.api.network.test_networks.BulkNetworkOpsIpV6Test.test_bulk_create_delete_network
- tempest.api.network.test_networks.BulkNetworkOpsIpV6Test.test_bulk_create_delete_port
dovetail.ipv6.tc002:
name: dovetail.ipv6.tc002
objective: VIM ipv6 operations, to create/update/delete an IPv6 network and subnet
- scripts:
+ validate:
type: functest
testcase: tempest_smoke_serial
+ report:
sub_testcase_list:
- tempest.api.network.test_networks.NetworksIpV6Test.test_create_update_delete_network_subnet
- tempest.api.network.test_networks.NetworksIpV6TestAttrs.test_create_update_delete_network_subnet
+
dovetail.ipv6.tc003:
name: dovetail.ipv6.tc003
objective: VIM ipv6 operations, to check external network visibility
- scripts:
+ validate:
type: functest
testcase: tempest_smoke_serial
+ report:
sub_testcase_list:
- tempest.api.network.test_networks.NetworksIpV6Test.test_external_network_visibility
- tempest.api.network.test_networks.NetworksIpV6TestAttrs.test_external_network_visibility
dovetail.ipv6.tc004:
name: dovetail.ipv6.tc004
objective: VIM ipv6 operations, to list IPv6 networks and subnets of a tenant
- scripts:
+ validate:
type: functest
testcase: tempest_smoke_serial
+ report:
sub_testcase_list:
- tempest.api.network.test_networks.NetworksIpV6Test.test_list_networks
- tempest.api.network.test_networks.NetworksIpV6Test.test_list_subnets
dovetail.ipv6.tc005:
name: dovetail.ipv6.tc005
objective: VIM ipv6 operations, to show information of an IPv6 network and subnet
- scripts:
+ validate:
type: functest
testcase: tempest_smoke_serial
+ report:
sub_testcase_list:
- tempest.api.network.test_networks.NetworksIpV6Test.test_show_network
- tempest.api.network.test_networks.NetworksIpV6Test.test_show_subnet
dovetail.ipv6.tc006:
name: dovetail.ipv6.tc006
objective: VIM ipv6 operations, to create an IPv6 port in allowed allocation pools
- scripts:
+ validate:
type: functest
testcase: tempest_smoke_serial
+ report:
sub_testcase_list:
- tempest.api.network.test_ports.PortsIpV6TestJSON.test_create_port_in_allowed_allocation_pools
dovetail.ipv6.tc007:
name: dovetail.ipv6.tc007
objective: VIM ipv6 operations, to create an IPv6 port without security groups
- scripts:
+ validate:
type: functest
testcase: tempest_smoke_serial
+ report:
sub_testcase_list:
- tempest.api.network.test_ports.PortsIpV6TestJSON.test_create_port_with_no_securitygroups
dovetail.ipv6.tc008:
name: dovetail.ipv6.tc008
objective: VIM ipv6 operations, to create/update/delete an IPv6 port
- scripts:
+ validate:
type: functest
testcase: tempest_smoke_serial
+ report:
sub_testcase_list:
- tempest.api.network.test_ports.PortsIpV6TestJSON.test_create_update_delete_port
dovetail.ipv6.tc009:
name: dovetail.ipv6.tc009
objective: VIM ipv6 operations, to list IPv6 ports of a tenant
- scripts:
+ validate:
type: functest
testcase: tempest_smoke_serial
+ report:
sub_testcase_list:
- tempest.api.network.test_ports.PortsIpV6TestJSON.test_list_ports
dovetail.ipv6.tc010:
name: dovetail.ipv6.tc010
objective: VIM ipv6 operations, to show information of an IPv6 port
- scripts:
+ validate:
type: functest
testcase: tempest_smoke_serial
+ report:
sub_testcase_list:
- tempest.api.network.test_ports.PortsIpV6TestJSON.test_show_port
dovetail.ipv6.tc011:
name: dovetail.ipv6.tc011
objective: VIM ipv6 operations, to add multiple interfaces for an IPv6 router
- scripts:
+ validate:
type: functest
testcase: tempest_smoke_serial
+ report:
sub_testcase_list:
- tempest.api.network.test_routers.RoutersIpV6Test.test_add_multiple_router_interfaces
dovetail.ipv6.tc012:
name: dovetail.ipv6.tc012
objective: VIM ipv6 operations, to add and remove an IPv6 router interface with port_id
- scripts:
+ validate:
type: functest
testcase: tempest_smoke_serial
+ report:
sub_testcase_list:
- tempest.api.network.test_routers.RoutersIpV6Test.test_add_remove_router_interface_with_port_id
dovetail.ipv6.tc013:
name: dovetail.ipv6.tc013
objective: VIM ipv6 operations, to add and remove an IPv6 router interface with subnet_id
- scripts:
+ validate:
type: functest
testcase: tempest_smoke_serial
+ report:
sub_testcase_list:
- tempest.api.network.test_routers.RoutersIpV6Test.test_add_remove_router_interface_with_subnet_id
dovetail.ipv6.tc014:
name: dovetail.ipv6.tc014
objective: VIM ipv6 operations, to create, update, delete, list and show an IPv6 router
- scripts:
+ validate:
type: functest
testcase: tempest_smoke_serial
+ report:
sub_testcase_list:
- tempest.api.network.test_routers.RoutersIpV6Test.test_create_show_list_update_delete_router
dovetail.ipv6.tc015:
name: dovetail.ipv6.tc015
objective: VIM ipv6 operations, to create, update, delete, list and show an IPv6 security group
- scripts:
+ validate:
type: functest
testcase: tempest_smoke_serial
+ report:
sub_testcase_list:
- tempest.api.network.test_security_groups.SecGroupIPv6Test.test_create_list_update_show_delete_security_group
dovetail.ipv6.tc016:
name: dovetail.ipv6.tc016
objective: VIM ipv6 operations, to create, delete and show security group rules
- scripts:
+ validate:
type: functest
testcase: tempest_smoke_serial
+ report:
sub_testcase_list:
- tempest.api.network.test_security_groups.SecGroupIPv6Test.test_create_show_delete_security_group_rule
dovetail.ipv6.tc017:
name: dovetail.ipv6.tc017
objective: VIM ipv6 operations, to list all security groups
- scripts:
+ validate:
type: functest
testcase: tempest_smoke_serial
+ report:
sub_testcase_list:
- tempest.api.network.test_security_groups.SecGroupIPv6Test.test_list_security_groups
dovetail.ipv6.tc018:
name: dovetail.ipv6.tc018
objective: VIM ipv6 operations, to show information of an IPv6 port
- scripts:
+ validate:
type: functest
testcase: tempest_full_parallel
+ report:
sub_testcase_list:
- tempest.scenario.test_network_v6.TestGettingAddress.test_dhcp6_stateless_from_os
dovetail.ipv6.tc019:
name: dovetail.ipv6.tc019
objective: VIM ipv6 operations, to do IPv6 address assignment - dual stack, DHCPv6 stateless
- scripts:
+ validate:
type: functest
testcase: tempest_full_parallel
+ report:
sub_testcase_list:
- tempest.scenario.test_network_v6.TestGettingAddress.test_dualnet_dhcp6_stateless_from_os
dovetail.ipv6.tc020:
name: dovetail.ipv6.tc020
objective: VIM ipv6 operations, to do IPv6 Address Assignment - Multiple Prefixes, DHCPv6 Stateless
- scripts:
+ validate:
type: functest
testcase: tempest_full_parallel
+ report:
sub_testcase_list:
- tempest.scenario.test_network_v6.TestGettingAddress.test_multi_prefix_dhcpv6_stateless
dovetail.ipv6.tc021:
name: dovetail.ipv6.tc021
objective: VIM ipv6 operations, to do IPv6 Address Assignment - Dual Stack, Multiple Prefixes, DHCPv6 Stateless
- scripts:
+ validate:
type: functest
testcase: tempest_full_parallel
+ report:
sub_testcase_list:
- tempest.scenario.test_network_v6.TestGettingAddress.test_dualnet_multi_prefix_dhcpv6_stateless
dovetail.ipv6.tc022:
name: dovetail.ipv6.tc022
objective: VIM ipv6 operations, to do IPv6 Address Assignment - SLAAC
- scripts:
+ validate:
type: functest
testcase: tempest_full_parallel
+ report:
sub_testcase_list:
- tempest.scenario.test_network_v6.TestGettingAddress.test_slaac_from_os
dovetail.ipv6.tc023:
name: dovetail.ipv6.tc023
objective: VIM ipv6 operations, to do IPv6 Address Assignment - Dual Stack, SLAAC
- scripts:
+ validate:
type: functest
testcase: tempest_full_parallel
+ report:
sub_testcase_list:
- tempest.scenario.test_network_v6.TestGettingAddress.test_dualnet_dhcp6_stateless_from_os
dovetail.ipv6.tc024:
name: dovetail.ipv6.tc024
objective: VIM ipv6 operations, to do IPv6 address assignment - multiple prefixes, SLAAC
- scripts:
+ validate:
type: functest
testcase: tempest_full_parallel
+ report:
sub_testcase_list:
- tempest.scenario.test_network_v6.TestGettingAddress.test_multi_prefix_slaac
dovetail.ipv6.tc025:
name: dovetail.ipv6.tc025
objective: VIM ipv6 operations, to do IPv6 address assignment - dual stack, multiple prefixes, SLAAC
- scripts:
+ validate:
type: functest
testcase: tempest_full_parallel
+ report:
sub_testcase_list:
- tempest.scenario.test_network_v6.TestGettingAddress.test_dualnet_multi_prefix_slaac
dovetail.nfvi.tc001:
name: dovetail.nfvi.tc001
objective: testing for vping using ssh
- scripts:
+ validate:
type: functest
testcase: vping_ssh
+ report:
sub_testcase_list:
dovetail.nfvi.tc002:
name: dovetail.nfvi.tc002
objective: testing for vping using userdata
- scripts:
+ validate:
type: functest
testcase: vping_userdata
+ report:
sub_testcase_list:
dovetail.vimops.tc001:
name: dovetail.vimops.tc001
objective: Glance images v2 index
- scripts:
+ validate:
type: functest
testcase: tempest_full_parallel
+ report:
sub_testcase_list:
- tempest.api.image.v2.test_images.ListImagesTest.test_list_no_params
dovetail.vimops.tc002:
name: dovetail.vimops.tc002
objective: Glance Images v2 Delete
- scripts:
+ validate:
type: functest
testcase: tempest_full_parallel
+ report:
sub_testcase_list:
- tempest.api.image.v2.test_images.BasicOperationsImagesTest.test_delete_image
- tempest.api.image.v2.test_images_negative.ImagesNegativeTest.test_delete_image_null_id
dovetail.vimops.tc003:
name: dovetail.vimops.tc003
objective: Glance images v2 list
- scripts:
+ validate:
type: functest
testcase: tempest_full_parallel
+ report:
sub_testcase_list:
- tempest.api.image.v2.test_images.ListImagesTest.test_get_image_schema
- tempest.api.image.v2.test_images.ListImagesTest.test_get_images_schema
dovetail.vimops.tc004:
name: dovetail.vimops.tc004
objective: Glance images v2 list
- scripts:
+ validate:
type: functest
testcase: tempest_full_parallel
+ report:
sub_testcase_list:
- tempest.api.image.v2.test_images.ListImagesTest.test_list_images_param_container_format
- tempest.api.image.v2.test_images.ListImagesTest.test_list_images_param_disk_format
dovetail.vimops.tc005:
name: dovetail.vimops.tc005
objective: Glance images v2 import
- scripts:
+ validate:
type: functest
testcase: tempest_full_parallel
+ report:
sub_testcase_list:
- tempest.api.image.v2.test_images.BasicOperationsImagesTest.test_register_upload_get_image_file
- tempest.api.image.v2.test_images_negative.ImagesNegativeTest.test_register_with_invalid_container_format
dovetail.vimops.tc006:
name: dovetail.vimops.tc006
objective: Glance images v2 update
- scripts:
+ validate:
type: functest
testcase: tempest_full_parallel
+ report:
sub_testcase_list:
- tempest.api.image.v2.test_images.BasicOperationsImagesTest.test_update_image
- tempest.api.image.v2.test_images_tags.ImagesTagsTest.test_update_delete_tags_for_image
def test_parser_cmd(self):
"""Test whether the command is correctly parsed."""
- mock_cmd = "python /functest/ci/run_tests.py -t {{script_testcase}} -r"
+ mock_cmd = "python /functest/ci/run_tests.py "\
+ "-t {{validate_testcase}} -r"
with open(os.path.join(self.test_path, 'test_testcase.yaml')) as f:
mock_testcase_yaml = yaml.safe_load(f)
MockTestcase = type('Testcase', (object,), {})
def test_parser_cmd_fail(self):
"""Test whether the command is correctly parsed."""
- mock_cmd = "python /functest/ci/run_tests.py -t {{script_testcase}} -r"
+ mock_cmd = "python /functest/ci/run_tests.py "\
+ "-t {{validate_testcase}} -r"
mock_testcase_yaml = {}
MockTestcase = type('Testcase', (object,), {})
mock_testcase = MockTestcase()
"None -r")
self.assertEqual(expected_output, output)
+
if __name__ == '__main__':
unittest.main()
dovetail.ipv6.tc001:
name: dovetail.ipv6.tc001
objective: VIM ipv6 operations, to create/delete network, port and subnet in bulk operation
- scripts:
+ validate:
type: functest
testcase: tempest_smoke_serial
+ report:
sub_testcase_list:
- tempest.api.network.test_networks.BulkNetworkOpsIpV6Test.test_bulk_create_delete_network
- tempest.api.network.test_networks.BulkNetworkOpsIpV7Test.test_bulk_create_delete_port