--- /dev/null
+##############################################################################
+# Copyright (c) 2016 Huawei Technologies Co.,Ltd and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+import logging
+import requests
+
+from oslo_serialization import jsonutils
+
+from yardstick.common import constants as consts
+
+logger = logging.getLogger(__name__)
+
+
+def post(url, data={}):
+ url = '{}{}'.format(consts.BASE_URL, url)
+ data = jsonutils.dumps(data)
+ headers = {'Content-Type': 'application/json'}
+ try:
+ response = requests.post(url, data=data, headers=headers)
+ result = response.json()
+ logger.debug('The result is: %s', result)
+
+ return result
+ except Exception as e:
+ logger.exception('Failed: %s', e)
+ raise
+
+
+def get(url):
+ url = '{}{}'.format(consts.BASE_URL, url)
+ response = requests.get(url)
+ return response.json()
--- /dev/null
+# ############################################################################
+# Copyright (c) 2017 Huawei Technologies Co.,Ltd and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+# ############################################################################
+from yardstick.benchmark.core.testcase import Testcase
+from yardstick.benchmark.core import Param
+from api.utils import common as common_utils
+
+
+def default(args):
+ return listAllTestcases(args)
+
+
+def listAllTestcases(args):
+ param = Param(args)
+ testcase_list = Testcase().list_all(param)
+ return common_utils.result_handler(1, testcase_list)
urlpatterns = [
Url('/yardstick/asynctask', views.Asynctask, 'asynctask'),
+ Url('/yardstick/testcases', views.Testcases, 'testcases'),
Url('/yardstick/testcases/release/action', views.ReleaseAction, 'release'),
Url('/yardstick/testcases/samples/action', views.SamplesAction, 'samples'),
Url('/yardstick/testsuites/action', views.TestsuitesAction, 'testsuites'),
return self._dispatch_get()
+class Testcases(ApiResource):
+ def get(self):
+ return self._dispatch_get()
+
+
class ReleaseAction(ApiResource):
@swag_from(os.getcwd() + '/swagger/docs/release_action.yaml')
def post(self):
---
-# Yardstick TC001 config file
-# Measure network throughput using pktgen
-# Different amounts of flows are tested with, from 2 up to 1001000
-# All tests are run twice. First twice with the least amount of
-#ports and further on.
schema: "yardstick:task:0.1"
+description: >
+ Yardstick TC001 config file;
+ Measure network throughput using pktgen;
+ Different amounts of flows are tested with, from 2 up to 1001000;
+ All tests are run twice. First twice with the least amount of ports and further on.
scenarios:
{% for num_ports in [1, 10, 50, 100, 500, 1000] %}
---
-# measure network latency using ping
schema: "yardstick:task:0.1"
+description: >
+ Yardstick TC002 config file;
+ measure network latency using ping;
+
{% set image = image or "cirros-0.3.3" %}
scenarios:
{% for i in range(2) %}
---
-# Yardstick TC005 config file
-# Measure Storage IOPS, throughput and latency using fio
schema: "yardstick:task:0.1"
+description: >
+ Yardstick TC005 config file;
+ Measure Storage IOPS, throughput and latency using fio.
+
scenarios:
{% for rw in ['read', 'write', 'randwrite', 'randread', 'rw'] %}
{% for bs in ['4k', '64k', '1024k'] %}
---
+
schema: "yardstick:task:0.1"
+description: >
+ Yardstick TC006 config file.
scenarios:
-
---
-# Sample benchmark task config file
-# vTC
schema: "yardstick:task:0.1"
+description: >
+ Sample benchmark task config file;
+ vTC.
scenarios:
-
---
-# Yardstick TC008 config file
-# Measure network throughput and packet loss using Pktgen.
-# Different amount of flows, from 2 up to 1001000, in combination
-# with different packet sizes are run in each test.
-# Each combination of packet size and flow amount is run 10 times.
-# First 10 times with the smallest packet size, starting with the
-# least amount of ports/flows, then next amount of ports with same
-# packet size, and so on. The test sequence continues with the next
-# packet size, with same ports/flows sequence as before.
schema: "yardstick:task:0.1"
+description: >
+ Yardstick TC008 config file;
+ Measure network throughput and packet loss using Pktgen;
+ Different amount of flows, from 2 up to 1001000, in combination
+ with different packet sizes are run in each test.
+ Each combination of packet size and flow amount is run 10 times.
+ First 10 times with the smallest packet size, starting with the
+ least amount of ports/flows, then next amount of ports with same
+ packet size, and so on. The test sequence continues with the next
+ packet size, with same ports/flows sequence as before.
scenarios:
{% for pkt_size in [64, 128, 256, 512, 1024, 1280, 1518] %}
---
-# Yardstick TC009 config file
-# Measure network throughput and packet loss using pktgen.
-# Different amounts of flows are tested with, from 2 up to 1001000.
-# All tests are run 10 times each. First 10 times with the least
-# amount of ports, then 10 times with the next amount of ports,
-# and so on until all packet sizes have been run with.
schema: "yardstick:task:0.1"
+description: >
+ Yardstick TC009 config file;
+ Measure network throughput and packet loss using pktgen;
+ Different amounts of flows are tested with, from 2 up to 1001000;
+ All tests are run 10 times each. First 10 times with the least
+ amount of ports, then 10 times with the next amount of ports,
+ and so on until all packet sizes have been run with;
scenarios:
{% for num_ports in [1, 10, 50, 100, 500, 1000] %}
---
-# Yardstick TC010 config file
-# measure memory read latency using lmbench
schema: "yardstick:task:0.1"
+description: >
+ Yardstick TC010 config file;
+ measure memory read latency using lmbench.
scenarios:
-
---
-# Yardstick TC011 config file
-# Measure packet delay variation (jitter) using iperf3
schema: "yardstick:task:0.1"
+description: >
+ Yardstick TC011 config file;
+ Measure packet delay variation (jitter) using iperf3.
scenarios:
-
---
-# Yardstick TC012 config file
-# Measure memory read and write bandwidth using lmbench
schema: "yardstick:task:0.1"
+description: >
+ Yardstick TC012 config file;
+ Measure memory read and write bandwidth using lmbench.
scenarios:
-
---
-# Yardstick TC014 config file
-# Measure Processing speed using unixbench
schema: "yardstick:task:0.1"
+description: >
+ Yardstick TC014 config file;
+ Measure Processing speed using unixbench.
scenarios:
-
---
-# Sample test case for the HA of controller node Openstack service
schema: "yardstick:task:0.1"
+description: >
+ Sample test case for the HA of controller node Openstack service.
scenarios:
-
---
+
schema: "yardstick:task:0.1"
+description: >
+ Yardstick TC020 config file.
scenarios:
-
---
+
schema: "yardstick:task:0.1"
+description: >
+ Yardstick TC021 config file.
scenarios:
-
---
-# Sample test case for the HA of OpenStack Controll Node abnormally shutdown
schema: "yardstick:task:0.1"
+description: >
+ Sample test case for the HA of OpenStack Controll Node abnormally shutdown.
scenarios:
-
---
-# Yardstick TC027 config file
-# Measure IPV6 network latency using ping6
schema: "yardstick:task:0.1"
+description: >
+ Yardstick TC027 config file;
+ Measure IPV6 network latency using ping6.
+
{% set openrc = openrc or "/opt/admin-openrc.sh" %}
{% set external_network = external_network or "ext-net" %}
{% set pod_info = pod_info or "etc/yardstick/nodes/compass_sclab_physical/pod.yaml" %}
---
-# Yardstick TC037 config file
-# Measure network throughput and packet loss using pktgen.
-# Different amounts of flows are tested with, from 2 up to 1001000.
-# All tests are run 2 times each. First 2 times with the least
-# amount of ports, then 2 times with the next amount of ports,
-# and so on until all packet sizes have been run with.
-#
-# During the measurements system load and network latency are
-# recorded/measured using ping and mpstat, respectively.
schema: "yardstick:task:0.1"
+description: >
+ Yardstick TC037 config file;
+ Measure network throughput and packet loss using pktgen;
+ Different amounts of flows are tested with, from 2 up to 1001000;
+ All tests are run 2 times each. First 2 times with the least
+ amount of ports, then 2 times with the next amount of ports,
+ and so on until all packet sizes have been run with;
+ During the measurements system load and network latency are
+ recorded/measured using ping and mpstat, respectively;
scenarios:
-
---
-# Yardstick TC038 config file
-# Measure network throughput and packet loss using pktgen.
-# Different amounts of flows are tested with, from 2 up to 1001000.
-# All tests are run 10 times each. First 10 times with the least
-# amount of ports, then 10 times with the next amount of ports,
-# and so on until all packet sizes have been run with.
-#
-# During the measurements system load and network latency are
-# recorded/measured using ping and mpstat, respectively.
schema: "yardstick:task:0.1"
+description: >
+ Yardstick TC038 config file;
+ Measure network throughput and packet loss using pktgen;
+ Different amounts of flows are tested with, from 2 up to 1001000;
+ All tests are run 10 times each. First 10 times with the least
+ amount of ports, then 10 times with the next amount of ports,
+ and so on until all packet sizes have been run with;
+ During the measurements system load and network latency are
+ recorded/measured using ping and mpstat, respectively;
scenarios:
-
---
-# Yardstick TC040 config file
-# Running Parser Yang-to-Tosca module as a tool, validating output against expected outcome
schema: "yardstick:task:0.1"
+description: >
+ Yardstick TC040 config file;
+ Running Parser Yang-to-Tosca module as a tool, validating output against expected outcome.
scenarios:
---
-# Yardstick TC042 config file
-# Measure network latency using testpmd and pktgen-dpdk
schema: "yardstick:task:0.1"
+description: >
+ Yardstick TC042 config file;
+ Measure network latency using testpmd and pktgen-dpdk.
scenarios:
-
---
-# Yardstick TC043 config file
-# Measure latency between NFVI nodes using ping
schema: "yardstick:task:0.1"
+description: >
+ Yardstick TC043 config file;
+ Measure latency between NFVI nodes using ping.
+
{% set host = host or "node1.LF" %}
{% set target = target or "node2.LF" %}
{% set pod_info = pod_info or "etc/yardstick/nodes/compass_sclab_physical/pod.yaml" %}
---
-# Test case for TC045 :Control node Openstack service down - neutron server
schema: "yardstick:task:0.1"
+description: >
+ Test case for TC045 :Control node Openstack service down - neutron server.
scenarios:
-
---
-# Test case for TC046 :Control node Openstack service down - keystone
schema: "yardstick:task:0.1"
+description: >
+ Test case for TC046 :Control node Openstack service down - keystone.
scenarios:
-
---
-# Test case for TC047 :Control node Openstack service down - glance api
schema: "yardstick:task:0.1"
+description: >
+ Test case for TC047 :Control node Openstack service down - glance api.
scenarios:
-
---
-# Test case for TC048 :Control node Openstack service down - cinder api
schema: "yardstick:task:0.1"
+description: >
+ Test case for TC048 :Control node Openstack service down - cinder api.
scenarios:
-
---
-# Test case for TC049 :Control node Openstack service down - swift proxy
schema: "yardstick:task:0.1"
+description: >
+ Test case for TC049 :Control node Openstack service down - swift proxy.
scenarios:
-
---
-# Test case for TC050 :OpenStack Controller Node Network High Availability
-# This test case is written by new scenario-based HA testing framework
schema: "yardstick:task:0.1"
+description: >
+ Test case for TC050 :OpenStack Controller Node Network High Availability;
+ This test case is written by new scenario-based HA testing framework.
+
scenarios:
-
type: "GeneralHA"
---
-# Test case for TC051 :OpenStack Controller Node CPU Overload High Availability
-# This test case is written by new scenario-based HA testing framework
schema: "yardstick:task:0.1"
+description: >
+ Test case for TC051 :OpenStack Controller Node CPU Overload High Availability;
+ This test case is written by new scenario-based HA testing framework.
+
scenarios:
-
type: "GeneralHA"
---
-# Test case for TC052 :OpenStack Controller Node Disk I/O Block High Availability
-# This test case is written by new scenario-based HA testing framework
schema: "yardstick:task:0.1"
+description: >
+ Test case for TC052 :OpenStack Controller Node Disk I/O Block High Availability;
+ This test case is written by new scenario-based HA testing framework.
+
scenarios:
-
type: "GeneralHA"
---
-# Test case for TC053 :Openstack Controller Load Balance Service High Availability
-# This test case is written by new scenario-based HA testing framework
schema: "yardstick:task:0.1"
+description: >
+ Test case for TC053 :Openstack Controller Load Balance Service High Availability;
+ This test case is written by new scenario-based HA testing framework.
+
scenarios:
-
type: "GeneralHA"
---
-# Test case for TC054 :OpenStack VIP Master Node abnormally shutdown High Availability
-# This test case is written by new scenario-based HA testing framework
schema: "yardstick:task:0.1"
+description: >
+ Test case for TC054 :OpenStack VIP Master Node abnormally shutdown High Availability;
+ This test case is written by new scenario-based HA testing framework.
+
scenarios:
-
type: "GeneralHA"
---
-# Yardstick TC055 config file
-# Collect hardware specification from /proc/cpuinfo /proc/meminfo
-# compute capacity and scale.
-
-# the results have
-# number of CPUs, number of physical cores in a single CPU
-# number of logical cores, total memory size
-# cache size per CPU, total cache size
-# HT (Hyper-Thread) support status, 1 for open, 0 for close
schema: "yardstick:task:0.1"
+description: >
+ Yardstick TC055 config file;
+ Collect hardware specification from /proc/cpuinfo /proc/meminfo;
+ compute capacity and scale.
+ the results have,
+ number of CPUs, number of physical cores in a single CPU;
+ number of logical cores, total memory size;
+ cache size per CPU, total cache size;
+ HT (Hyper-Thread) support status, 1 for open, 0 for close.
+
{% set host = host or "node5.yardstick-TC055" %}
{% set pod_info = pod_info or "etc/yardstick/nodes/compass_sclab_virtual/pod.yaml" %}
scenarios:
---
-# Yardstick TC063 config file
-# Measure disk size, block size and disk utilization using fdisk and iostat
schema: "yardstick:task:0.1"
+description: >
+ Yardstick TC063 config file;
+ Measure disk size, block size and disk utilization using fdisk and iostat.
+
{% set host = host or "node5.yardstick-TC063" %}
{% set pod_info = pod_info or "etc/yardstick/nodes/compass_sclab_virtual/pod.yaml" %}
---
-# Yardstick TC069 config file
-# Measure memory read and write bandwidth using ramspeed
schema: "yardstick:task:0.1"
+description: >
+ Yardstick TC069 config file;
+ Measure memory read and write bandwidth using ramspeed.
scenarios:
-
---
-# Yardstick TC070 config file
-# Measure network throughput and packet loss using pktgen.
-# Different amounts of flows are tested with, from 2 up to 1001000.
-# All tests are run 2 times each. First 2 times with the least
-# amount of ports, then 2 times with the next amount of ports,
-# and so on until all packet sizes have been run with.
-#
-# During the measurements memory usage statistics and network latency are
-# recorded/measured using free and ping, respectively.
schema: "yardstick:task:0.1"
+description: >
+ Yardstick TC070 config file;
+ Measure network throughput and packet loss using pktgen;
+ Different amounts of flows are tested with, from 2 up to 1001000;
+ All tests are run 2 times each. First 2 times with the least
+ amount of ports, then 2 times with the next amount of ports,
+ and so on until all packet sizes have been run with;
+ During the measurements memory usage statistics and network latency are
+ recorded/measured using free and ping, respectively;
scenarios:
-
---
-# Yardstick TC071 config file
-# Measure cache hit/miss ratio and usage, network throughput and latency.
-# Different amounts of flows are tested with, from 2 up to 1001000.
-# All tests are run 2 times each. First 2 times with the least
-# amount of ports, then 2 times with the next amount of ports,
-# and so on until all packet sizes have been run with.
-#
-# During the measurements cache hit/miss ration, cache usage statistics and
-# network latency are recorded/measured using cachestat and ping, respectively.
schema: "yardstick:task:0.1"
+description: >
+ Yardstick TC071 config file;
+ Measure cache hit/miss ratio and usage, network throughput and latency;
+ Different amounts of flows are tested with, from 2 up to 1001000;
+ All tests are run 2 times each. First 2 times with the least
+ amount of ports, then 2 times with the next amount of ports,
+ and so on until all packet sizes have been run with;
+ During the measurements cache hit/miss ration, cache usage statistics and
+ network latency are recorded/measured using cachestat and ping, respectively;
scenarios:
-
---
-# Yardstick TC072 config file
-# Measure network throughput and packet loss using pktgen.
-# Different amounts of flows are tested with, from 2 up to 1001000.
-# All tests are run 2 times each. First 2 times with the least
-# amount of ports, then 2 times with the next amount of ports,
-# and so on until all packet sizes have been run with.
-#
-# During the measurements network usage statistics and network latency are
-# recorded/measured using sar and ping, respectively.
schema: "yardstick:task:0.1"
+description: >
+ Yardstick TC072 config file;
+ Measure network throughput and packet loss using pktgen;
+ Different amounts of flows are tested with, from 2 up to 1001000;
+ All tests are run 2 times each. First 2 times with the least
+ amount of ports, then 2 times with the next amount of ports,
+ and so on until all packet sizes have been run with;
+ During the measurements network usage statistics and network latency are
+ recorded/measured using sar and ping, respectively;
scenarios:
-
---
-# Yardstick TC073 config file
-# measure network latency and throughput using netperf
-# There are two sample scenarios: bulk test and request/response test
-# In bulk test, UDP_STREAM and TCP_STREAM can be used
-# send_msg_size and recv_msg_size are options of bulk test
-# In req/rsp test, TCP_RR TCP_CRR UDP_RR can be used
-# req_rsp_size is option of req/rsp test
schema: "yardstick:task:0.1"
+description: >
+ Yardstick TC073 config file;
+ measure network latency and throughput using netperf;
+ There are two sample scenarios: bulk test and request/response test;
+ In bulk test, UDP_STREAM and TCP_STREAM can be used;
+ send_msg_size and recv_msg_size are options of bulk test;
+ In req/rsp test, TCP_RR TCP_CRR UDP_RR can be used;
+ req_rsp_size is option of req/rsp test;
+
{% set host = host or "node1.LF" %}
{% set target = target or "node2.LF" %}
{% set pod_info = pod_info or "etc/yardstick/nodes/compass_sclab_physical/pod.yaml" %}
---
-# Test case for TC074 StorPerf benchmark task config file
-# StorPerf is a tool to measure block and object storage performance in an NFVI
schema: "yardstick:task:0.1"
+description: >
+ Test case for TC074 StorPerf benchmark task config file;
+ StorPerf is a tool to measure block and object storage performance in an NFVI.
+
{% set public_network = public_network or "ext-net" %}
{% set StorPerf_ip = StorPerf_ip or "192.168.200.1" %}
scenarios:
---
-# Yardstick TC075 config file
-# Measure network capacity and scale.
-# Measure number of connections, number of frames received
schema: "yardstick:task:0.1"
+description: >
+ Yardstick TC075 config file;
+ Measure network capacity and scale.
+ Measure number of connections, number of frames received;
+
{% set host = host or "node1.LF" %}
{% set pod_info = pod_info or "etc/yardstick/nodes/compass_sclab_virtual/pod.yaml" %}
def test_list_all(self):
t = testcase.Testcase()
result = t.list_all("")
- self.assertEqual(result, True)
+ self.assertIsInstance(result, list)
def test_show(self):
t = testcase.Testcase()
casename = Arg()
result = t.show(casename)
- self.assertEqual(result, True)
+ self.assertTrue(result)
def main():
--- /dev/null
+import unittest
+from mock import patch
+
+from yardstick.cmd.commands.testcase import TestcaseCommands
+
+
+class TestcaseCommandsUT(unittest.TestCase):
+ @patch('yardstick.cmd.commands.testcase.TestcaseCommands._format_print')
+ @patch('yardstick.cmd.commands.client')
+ def test_do_list(self, mock_client, mock_print):
+ mock_client.get.return_value = {'result': []}
+ TestcaseCommands().do_list({})
+ self.assertTrue(mock_print.called)
+
+
+def main():
+ unittest.main()
+
+
+if __name__ == '__main__':
+ main()
""" Handler for yardstick command 'testcase' """
from __future__ import absolute_import
from __future__ import print_function
+
import os
import yaml
-import sys
+import logging
-from yardstick.benchmark.core import print_hbar
from yardstick.common.task_template import TaskTemplate
-from yardstick.definitions import YARDSTICK_ROOT_PATH
+from yardstick.common import constants as consts
+
+LOG = logging.getLogger(__name__)
class Testcase(object):
Set of commands to discover and display test cases.
"""
- def __init__(self):
- self.test_case_path = YARDSTICK_ROOT_PATH + 'tests/opnfv/test_cases/'
- self.testcase_list = []
-
def list_all(self, args):
"""List existing test cases"""
- try:
- testcase_files = os.listdir(self.test_case_path)
- except Exception as e:
- print("Failed to list dir:\n%(path)s\n%(err)s\n"
- % {"path": self.test_case_path, "err": e})
- raise e
- testcase_files.sort()
-
- for testcase_file in testcase_files:
- record = self._get_record(testcase_file)
- self.testcase_list.append(record)
-
- self._format_print(self.testcase_list)
- return True
+ testcase_files = self._get_testcase_file_list()
+ testcase_list = [self._get_record(f) for f in testcase_files]
- def show(self, args):
- """Show details of a specific test case"""
- testcase_name = args.casename[0]
- testcase_path = self.test_case_path + testcase_name + ".yaml"
+ return testcase_list
+
+ def _get_testcase_file_list(self):
try:
- with open(testcase_path) as f:
- try:
- testcase_info = f.read()
- print(testcase_info)
-
- except Exception as e:
- print("Failed to load test cases:"
- "\n%(testcase_file)s\n%(err)s\n"
- % {"testcase_file": testcase_path, "err": e})
- raise e
- except IOError as ioerror:
- sys.exit(ioerror)
- return True
+ testcase_files = sorted(os.listdir(consts.TESTCASE_DIR))
+ except OSError:
+ LOG.exception('Failed to list dir:\n%s\n', consts.TESTCASE_DIR)
+ raise
+
+ return testcase_files
def _get_record(self, testcase_file):
- try:
- with open(self.test_case_path + testcase_file) as f:
- try:
- testcase_info = f.read()
- except Exception as e:
- print("Failed to load test cases:"
- "\n%(testcase_file)s\n%(err)s\n"
- % {"testcase_file": testcase_file, "err": e})
- raise e
- description, installer, deploy_scenarios = \
- self._parse_testcase(testcase_info)
-
- record = {'Name': testcase_file.split(".")[0],
- 'Description': description,
- 'installer': installer,
- 'deploy_scenarios': deploy_scenarios}
- return record
- except IOError as ioerror:
- sys.exit(ioerror)
+ file_path = os.path.join(consts.TESTCASE_DIR, testcase_file)
+ with open(file_path) as f:
+ try:
+ testcase_info = f.read()
+ except IOError:
+ LOG.exception('Failed to load test case:\n%s\n', testcase_file)
+ raise
+
+ description, installer, deploy_scenarios = self._parse_testcase(
+ testcase_info)
+
+ record = {
+ 'Name': testcase_file.split(".")[0],
+ 'Description': description,
+ 'installer': installer,
+ 'deploy_scenarios': deploy_scenarios
+ }
+
+ return record
def _parse_testcase(self, testcase_info):
- kw = {}
- rendered_testcase = TaskTemplate.render(testcase_info, **kw)
+ rendered_testcase = TaskTemplate.render(testcase_info)
testcase_cfg = yaml.load(rendered_testcase)
- test_precondition = testcase_cfg.get('precondition', None)
- installer_type = 'all'
- deploy_scenarios = 'all'
- if test_precondition is not None:
- installer_type = test_precondition.get('installer_type', 'all')
- deploy_scenarios = test_precondition.get('deploy_scenarios', 'all')
-
- description = testcase_info.split("\n")[2][1:].strip()
+
+ test_precondition = testcase_cfg.get('precondition', {})
+ installer_type = test_precondition.get('installer_type', 'all')
+ deploy_scenarios = test_precondition.get('deploy_scenarios', 'all')
+
+ description = self._get_description(testcase_cfg)
+
return description, installer_type, deploy_scenarios
- def _format_print(self, testcase_list):
- """format output"""
+ def _get_description(self, testcase_cfg):
+ try:
+ description_list = testcase_cfg['description'].split(';')
+ except KeyError:
+ return ''
+ else:
+ try:
+ return description_list[1].replace(os.linesep, '').strip()
+ except IndexError:
+ return description_list[0].replace(os.linesep, '').strip()
- print_hbar(88)
- print("| %-21s | %-60s" % ("Testcase Name", "Description"))
- print_hbar(88)
- for testcase_record in testcase_list:
- print("| %-16s | %-60s" % (testcase_record['Name'],
- testcase_record['Description']))
- print_hbar(88)
+ def show(self, args):
+ """Show details of a specific test case"""
+ testcase_name = args.casename[0]
+ testcase_path = os.path.join(consts.TESTCASE_DIR,
+ testcase_name + ".yaml")
+ with open(testcase_path) as f:
+ try:
+ testcase_info = f.read()
+ except IOError:
+ LOG.exception('Failed to load test case:\n%s\n', testcase_path)
+ raise
+
+ print(testcase_info)
+ return True
from __future__ import absolute_import
from yardstick.benchmark.core import Param
+from api import client
def change_osloobj_to_paras(args):
param = Param({})
- for k in param.__dict__:
+ for k in vars(param):
if hasattr(args, k):
setattr(param, k, getattr(args, k))
return param
+
+
+class Commands(object):
+ def __init__(self):
+ self.client = client
+
+ def _change_to_dict(self, args):
+ p = Param({})
+ return {k: getattr(args, k) for k in vars(p) if hasattr(args, k)}
""" Handler for yardstick command 'testcase' """
from __future__ import print_function
-
from __future__ import absolute_import
+
from yardstick.benchmark.core.testcase import Testcase
+from yardstick.benchmark.core import print_hbar
from yardstick.common.utils import cliargs
from yardstick.cmd.commands import change_osloobj_to_paras
+from yardstick.cmd.commands import Commands
-class TestcaseCommands(object):
+class TestcaseCommands(Commands):
"""Testcase commands.
Set of commands to discover and display test cases.
def do_list(self, args):
"""List existing test cases"""
- param = change_osloobj_to_paras(args)
- Testcase().list_all(param)
+ testcase_list = self.client.get('/yardstick/testcases')['result']
+ self._format_print(testcase_list)
@cliargs("casename", type=str, help="test case name", nargs=1)
def do_show(self, args):
"""Show details of a specific test case"""
param = change_osloobj_to_paras(args)
Testcase().show(param)
+
+ def _format_print(self, testcase_list):
+ """format output"""
+
+ print_hbar(88)
+ print("| %-21s | %-60s" % ("Testcase Name", "Description"))
+ print_hbar(88)
+ for testcase_record in testcase_list:
+ print("| %-16s | %-60s" % (testcase_record['Name'],
+ testcase_record['Description']))
+ print_hbar(88)
YARDSTICK_ROOT_PATH = dirname(dirname(dirname(abspath(__file__)))) + sep
+TESTCASE_DIR = join(YARDSTICK_ROOT_PATH, 'tests/opnfv/test_cases/')
+
YARDSTICK_REPOS_DIR = '/home/opnfv/repos/yardstick'
YARDSTICK_CONFIG_DIR = '/etc/yardstick/'