Merge "Open storperf testcase to huawei-pod2"
authorJing Lu <lvjing5@huawei.com>
Mon, 24 Jul 2017 06:55:28 +0000 (06:55 +0000)
committerGerrit Code Review <gerrit@opnfv.org>
Mon, 24 Jul 2017 06:55:28 +0000 (06:55 +0000)
94 files changed:
.gitignore
api/__init__.py
api/base.py [deleted file]
api/database/v2/__init__.py [new file with mode: 0644]
api/database/v2/handlers.py [new file with mode: 0644]
api/database/v2/models.py [new file with mode: 0644]
api/resources/asynctask.py [deleted file]
api/resources/case_docs.py [deleted file]
api/resources/env_action.py [deleted file]
api/resources/release_action.py [deleted file]
api/resources/results.py [deleted file]
api/resources/samples_action.py [deleted file]
api/resources/testcases.py [deleted file]
api/resources/testsuites_action.py [deleted file]
api/resources/v1/__init__.py [new file with mode: 0644]
api/resources/v1/asynctasks.py [new file with mode: 0644]
api/resources/v1/env.py [new file with mode: 0644]
api/resources/v1/results.py [new file with mode: 0644]
api/resources/v1/testcases.py [new file with mode: 0644]
api/resources/v1/testsuites.py [new file with mode: 0644]
api/resources/v2/__init__.py [new file with mode: 0644]
api/resources/v2/containers.py [new file with mode: 0644]
api/resources/v2/environments.py [new file with mode: 0644]
api/resources/v2/images.py [new file with mode: 0644]
api/resources/v2/openrcs.py [new file with mode: 0644]
api/resources/v2/pods.py [new file with mode: 0644]
api/resources/v2/projects.py [new file with mode: 0644]
api/resources/v2/tasks.py [new file with mode: 0644]
api/resources/v2/testcases.py [new file with mode: 0644]
api/resources/v2/testsuites.py [new file with mode: 0644]
api/server.py
api/urls.py
api/utils/common.py [deleted file]
api/utils/thread.py
api/views.py [deleted file]
docs/release/release-notes/release-notes.rst
requirements.txt
samples/ping_k8s.yaml [new file with mode: 0644]
tests/unit/apiserver/utils/test_common.py [deleted file]
tests/unit/benchmark/contexts/test_heat.py
tests/unit/benchmark/contexts/test_kubernetes.py [new file with mode: 0644]
tests/unit/benchmark/contexts/test_model.py
tests/unit/benchmark/contexts/test_node.py
tests/unit/benchmark/contexts/test_standalone.py
tests/unit/benchmark/core/test_task.py
tests/unit/benchmark/runner/test_base.py
tests/unit/benchmark/scenarios/availability/test_attacker_baremetal.py
tests/unit/benchmark/scenarios/availability/test_monitor_command.py
tests/unit/benchmark/scenarios/availability/test_monitor_multi.py
tests/unit/benchmark/scenarios/networking/test_nstat.py
tests/unit/benchmark/scenarios/networking/test_pktgen_dpdk.py
tests/unit/benchmark/scenarios/networking/test_pktgen_dpdk_throughput.py
tests/unit/benchmark/scenarios/networking/test_vnf_generic.py
tests/unit/benchmark/scenarios/storage/test_storperf.py
tests/unit/cmd/test_NSBperf.py
tests/unit/common/test_utils.py
tests/unit/network_services/vnf_generic/vnf/test_tg_ping.py
tests/unit/network_services/vnf_generic/vnf/test_tg_rfc2544_trex.py
tests/unit/network_services/vnf_generic/vnf/test_tg_trex.py
tests/unit/network_services/vnf_generic/vnf/test_vpe_vnf.py
tests/unit/orchestrator/test_heat.py
tests/unit/orchestrator/test_kubernetes.py [new file with mode: 0644]
yardstick/benchmark/contexts/base.py
yardstick/benchmark/contexts/dummy.py
yardstick/benchmark/contexts/heat.py
yardstick/benchmark/contexts/kubernetes.py [new file with mode: 0644]
yardstick/benchmark/contexts/model.py
yardstick/benchmark/contexts/node.py
yardstick/benchmark/contexts/standalone.py
yardstick/benchmark/core/task.py
yardstick/benchmark/core/testsuite.py [new file with mode: 0644]
yardstick/benchmark/scenarios/availability/attacker/attacker_baremetal.py
yardstick/benchmark/scenarios/availability/attacker/attacker_process.py
yardstick/benchmark/scenarios/availability/attacker_conf.yaml
yardstick/benchmark/scenarios/availability/ha_tools/check_lxc_process_python.bash [new file with mode: 0755]
yardstick/benchmark/scenarios/availability/ha_tools/fault_lxc_process_kill.bash [new file with mode: 0755]
yardstick/benchmark/scenarios/availability/ha_tools/nova/create_flavor.bash
yardstick/benchmark/scenarios/availability/ha_tools/nova/delete_flavor.bash
yardstick/benchmark/scenarios/availability/ha_tools/nova/show_flavors.bash
yardstick/benchmark/scenarios/availability/ha_tools/start_lxc_service.bash [new file with mode: 0755]
yardstick/benchmark/scenarios/availability/monitor/monitor_command.py
yardstick/benchmark/scenarios/availability/monitor_conf.yaml
yardstick/benchmark/scenarios/networking/vnf_generic.py
yardstick/benchmark/scenarios/storage/fio.py
yardstick/cmd/NSBperf.py
yardstick/common/constants.py
yardstick/common/kubernetes_utils.py [new file with mode: 0644]
yardstick/common/utils.py
yardstick/network_services/vnf_generic/vnf/base.py
yardstick/orchestrator/heat.py
yardstick/orchestrator/kubernetes.py [new file with mode: 0644]
yardstick/vTC/apexlake/tests/deployment_unit_test.py
yardstick/vTC/apexlake/tests/dpdk_packet_generator_test.py
yardstick/vTC/apexlake/tests/instantiation_validation_bench_test.py

index f2c8fd9..a53b533 100644 (file)
@@ -1,3 +1,5 @@
+*.DS_Store
+*.log
 *.pyc
 .vimrc
 .ropeproject
@@ -21,8 +23,6 @@ Session*.vim
 *~
 setuptools*zip
 dist/
-pep8.log
-test.log
 .testrepository/
 cover/
 .*.sw?
index c6cbbf1..c5aefff 100644 (file)
@@ -1,4 +1,67 @@
-from yardstick import _init_logging
+##############################################################################
+# Copyright (c) 2016 Huawei Technologies Co.,Ltd and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+from __future__ import absolute_import
+import logging
+
+from flask import request
+from flask_restful import Resource
 
+from yardstick import _init_logging
+from yardstick.common import constants as consts
+from yardstick.common import utils as common_utils
 
 _init_logging()
+LOG = logging.getLogger(__name__)
+LOG.setLevel(logging.DEBUG)
+
+
+class ApiResource(Resource):
+
+    def _post_args(self):
+        data = request.json if request.json else {}
+        params = common_utils.translate_to_str(data)
+        action = params.get('action', request.form.get('action', ''))
+        args = params.get('args', {})
+
+        try:
+            args['file'] = request.files['file']
+        except KeyError:
+            pass
+
+        args.update({k: v for k, v in request.form.items()})
+        LOG.debug('Input args is: action: %s, args: %s', action, args)
+
+        return action, args
+
+    def _get_args(self):
+        args = common_utils.translate_to_str(request.args)
+        LOG.debug('Input args is: args: %s', args)
+
+        return args
+
+    def _dispatch_post(self, **kwargs):
+        action, args = self._post_args()
+        args.update(kwargs)
+        return self._dispatch(args, action)
+
+    def _dispatch(self, args, action):
+        try:
+            return getattr(self, action)(args)
+        except AttributeError:
+            common_utils.result_handler(consts.API_ERROR, 'No such action')
+
+
+class Url(object):
+
+    def __init__(self, url, target):
+        super(Url, self).__init__()
+        self.url = url
+        self.target = target
+
+common_utils.import_modules_from_package("api.resources")
diff --git a/api/base.py b/api/base.py
deleted file mode 100644 (file)
index 0f1e76a..0000000
+++ /dev/null
@@ -1,65 +0,0 @@
-##############################################################################
-# Copyright (c) 2016 Huawei Technologies Co.,Ltd and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
-from __future__ import absolute_import
-import re
-import importlib
-import logging
-
-from flask import request
-from flask_restful import Resource
-
-from api.utils import common as common_utils
-from yardstick.common import constants as consts
-
-logger = logging.getLogger(__name__)
-logger.setLevel(logging.DEBUG)
-
-
-class ApiResource(Resource):
-
-    def _post_args(self):
-        data = request.json if request.json else {}
-        params = common_utils.translate_to_str(data)
-        action = params.get('action', request.form.get('action', ''))
-        args = params.get('args', {})
-
-        try:
-            args['file'] = request.files['file']
-        except KeyError:
-            pass
-
-        logger.debug('Input args is: action: %s, args: %s', action, args)
-
-        return action, args
-
-    def _get_args(self):
-        args = common_utils.translate_to_str(request.args)
-        logger.debug('Input args is: args: %s', args)
-
-        return args
-
-    def _dispatch_post(self):
-        action, args = self._post_args()
-        return self._dispatch(args, action)
-
-    def _dispatch_get(self, **kwargs):
-        args = self._get_args()
-        args.update(kwargs)
-        return self._dispatch(args)
-
-    def _dispatch(self, args, action='default'):
-        module_name = re.sub(r'([A-Z][a-z]*)', r'_\1',
-                             self.__class__.__name__)[1:].lower()
-
-        module_name = 'api.resources.%s' % module_name
-        resources = importlib.import_module(module_name)
-        try:
-            return getattr(resources, action)(args)
-        except AttributeError:
-            common_utils.result_handler(consts.API_ERROR, 'No such action')
diff --git a/api/database/v2/__init__.py b/api/database/v2/__init__.py
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/api/database/v2/handlers.py b/api/database/v2/handlers.py
new file mode 100644 (file)
index 0000000..1bc32bf
--- /dev/null
@@ -0,0 +1,200 @@
+##############################################################################
+# Copyright (c) 2017 Huawei Technologies Co.,Ltd.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+from api.database import db_session
+from api.database.v2.models import V2Environment
+from api.database.v2.models import V2Openrc
+from api.database.v2.models import V2Image
+from api.database.v2.models import V2Pod
+from api.database.v2.models import V2Container
+from api.database.v2.models import V2Project
+from api.database.v2.models import V2Task
+
+
+class V2EnvironmentHandler(object):
+
+    def insert(self, kwargs):
+        environment = V2Environment(**kwargs)
+        db_session.add(environment)
+        db_session.commit()
+        return environment
+
+    def list_all(self):
+        return V2Environment.query.all()
+
+    def get_by_uuid(self, uuid):
+        environment = V2Environment.query.filter_by(uuid=uuid).first()
+        if not environment:
+            raise ValueError
+        return environment
+
+    def update_attr(self, uuid, attr):
+        environment = self.get_by_uuid(uuid)
+        for k, v in attr.items():
+            setattr(environment, k, v)
+        db_session.commit()
+
+    def append_attr(self, uuid, attr):
+        environment = self.get_by_uuid(uuid)
+        for k, v in attr.items():
+            value = getattr(environment, k)
+            new = '{},{}'.format(value, v) if value else v
+            setattr(environment, k, new)
+        db_session.commit()
+
+    def delete_by_uuid(self, uuid):
+        environment = self.get_by_uuid(uuid)
+        db_session.delete(environment)
+        db_session.commit()
+
+
+class V2OpenrcHandler(object):
+
+    def insert(self, kwargs):
+        openrc = V2Openrc(**kwargs)
+        db_session.add(openrc)
+        db_session.commit()
+        return openrc
+
+    def get_by_uuid(self, uuid):
+        openrc = V2Openrc.query.filter_by(uuid=uuid).first()
+        if not openrc:
+            raise ValueError
+        return openrc
+
+    def delete_by_uuid(self, uuid):
+        openrc = self.get_by_uuid(uuid)
+        db_session.delete(openrc)
+        db_session.commit()
+
+
+class V2ImageHandler(object):
+
+    def insert(self, kwargs):
+        image = V2Image(**kwargs)
+        db_session.add(image)
+        db_session.commit()
+        return image
+
+    def get_by_uuid(self, uuid):
+        image = V2Image.query.filter_by(uuid=uuid).first()
+        if not image:
+            raise ValueError
+        return image
+
+
+class V2PodHandler(object):
+
+    def insert(self, kwargs):
+        pod = V2Pod(**kwargs)
+        db_session.add(pod)
+        db_session.commit()
+        return pod
+
+    def get_by_uuid(self, uuid):
+        pod = V2Pod.query.filter_by(uuid=uuid).first()
+        if not pod:
+            raise ValueError
+        return pod
+
+    def delete_by_uuid(self, uuid):
+        pod = self.get_by_uuid(uuid)
+        db_session.delete(pod)
+        db_session.commit()
+
+
+class V2ContainerHandler(object):
+
+    def insert(self, kwargs):
+        container = V2Container(**kwargs)
+        db_session.add(container)
+        db_session.commit()
+        return container
+
+    def get_by_uuid(self, uuid):
+        container = V2Container.query.filter_by(uuid=uuid).first()
+        if not container:
+            raise ValueError
+        return container
+
+    def update_attr(self, uuid, attr):
+        container = self.get_by_uuid(uuid)
+        for k, v in attr.items():
+            setattr(container, k, v)
+        db_session.commit()
+
+    def delete_by_uuid(self, uuid):
+        container = self.get_by_uuid(uuid)
+        db_session.delete(container)
+        db_session.commit()
+
+
+class V2ProjectHandler(object):
+
+    def list_all(self):
+        return V2Project.query.all()
+
+    def insert(self, kwargs):
+        project = V2Project(**kwargs)
+        db_session.add(project)
+        db_session.commit()
+        return project
+
+    def get_by_uuid(self, uuid):
+        project = V2Project.query.filter_by(uuid=uuid).first()
+        if not project:
+            raise ValueError
+        return project
+
+    def update_attr(self, uuid, attr):
+        project = self.get_by_uuid(uuid)
+        for k, v in attr.items():
+            setattr(project, k, v)
+        db_session.commit()
+
+    def append_attr(self, uuid, attr):
+        project = self.get_by_uuid(uuid)
+        for k, v in attr.items():
+            value = getattr(project, k)
+            new = '{},{}'.format(value, v) if value else v
+            setattr(project, k, new)
+        db_session.commit()
+
+    def delete_by_uuid(self, uuid):
+        project = self.get_by_uuid(uuid)
+        db_session.delete(project)
+        db_session.commit()
+
+
+class V2TaskHandler(object):
+
+    def list_all(self):
+        return V2Task.query.all()
+
+    def insert(self, kwargs):
+        task = V2Task(**kwargs)
+        db_session.add(task)
+        db_session.commit()
+        return task
+
+    def get_by_uuid(self, uuid):
+        task = V2Task.query.filter_by(uuid=uuid).first()
+        if not task:
+            raise ValueError
+        return task
+
+    def update_attr(self, uuid, attr):
+        task = self.get_by_uuid(uuid)
+        for k, v in attr.items():
+            setattr(task, k, v)
+        db_session.commit()
+
+    def delete_by_uuid(self, uuid):
+        task = self.get_by_uuid(uuid)
+        db_session.delete(task)
+        db_session.commit()
diff --git a/api/database/v2/models.py b/api/database/v2/models.py
new file mode 100644 (file)
index 0000000..64d49cc
--- /dev/null
@@ -0,0 +1,100 @@
+##############################################################################
+# Copyright (c) 2017 Huawei Technologies Co.,Ltd.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+from __future__ import absolute_import
+from sqlalchemy import Column
+from sqlalchemy import Integer
+from sqlalchemy import String
+from sqlalchemy import Text
+from sqlalchemy import DateTime
+from sqlalchemy import Boolean
+
+from api.database import Base
+
+
+class V2Environment(Base):
+    __tablename__ = 'v2_environment'
+    id = Column(Integer, primary_key=True)
+    uuid = Column(String(30))
+    name = Column(String(30))
+    description = Column(Text)
+    openrc_id = Column(String(10))
+    image_id = Column(String(30))
+    container_id = Column(Text)
+    pod_id = Column(String(10))
+    time = Column(DateTime)
+
+
+class V2Openrc(Base):
+    __tablename__ = 'V2_openrc'
+    id = Column(Integer, primary_key=True)
+    uuid = Column(String(30))
+    name = Column(String(30))
+    description = Column(Text)
+    environment_id = Column(String(30))
+    content = Column(Text)
+    time = Column(DateTime)
+
+
+class V2Image(Base):
+    __tablename__ = 'v2_image'
+    id = Column(Integer, primary_key=True)
+    uuid = Column(String(30))
+    name = Column(String(30))
+    description = Column(Text)
+    environment_id = Column(String(30))
+    size = Column(String(30))
+    status = Column(String(30))
+    time = Column(DateTime)
+
+
+class V2Container(Base):
+    __tablename__ = 'v2_container'
+    id = Column(Integer, primary_key=True)
+    uuid = Column(String(30))
+    name = Column(String(30))
+    environment_id = Column(String(30))
+    status = Column(Integer)
+    port = Column(Integer)
+    time = Column(String(30))
+
+
+class V2Pod(Base):
+    __tablename__ = 'v2_pod'
+    id = Column(Integer, primary_key=True)
+    uuid = Column(String(30))
+    environment_id = Column(String(30))
+    content = Column(Text)
+    time = Column(String(30))
+
+
+class V2Project(Base):
+    __tablename__ = 'v2_project'
+    id = Column(Integer, primary_key=True)
+    uuid = Column(String(30))
+    name = Column(String(30))
+    description = Column(Text)
+    time = Column(DateTime)
+    tasks = Column(Text)
+
+
+class V2Task(Base):
+    __tablename__ = 'v2_task'
+    id = Column(Integer, primary_key=True)
+    uuid = Column(String(30))
+    name = Column(String(30))
+    description = Column(Text)
+    project_id = Column(String(30))
+    environment_id = Column(String(30))
+    time = Column(DateTime)
+    case_name = Column(String(30))
+    suite = Column(Boolean)
+    content = Column(Text)
+    result = Column(Text)
+    error = Column(Text)
+    status = Column(Integer)
diff --git a/api/resources/asynctask.py b/api/resources/asynctask.py
deleted file mode 100644 (file)
index 39b47c0..0000000
+++ /dev/null
@@ -1,64 +0,0 @@
-# ############################################################################
-# Copyright (c) 2017 Huawei Technologies Co.,Ltd and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-# ############################################################################
-import uuid
-import logging
-
-from api.utils.common import result_handler
-from api.database.v1.handlers import AsyncTaskHandler
-from yardstick.common import constants as consts
-
-LOG = logging.getLogger(__name__)
-LOG.setLevel(logging.DEBUG)
-
-
-def default(args):
-    return _get_status(args)
-
-
-def _get_status(args):
-    try:
-        task_id = args['task_id']
-    except KeyError:
-        return result_handler(consts.API_ERROR, 'task_id must be provided')
-
-    try:
-        uuid.UUID(task_id)
-    except ValueError:
-        return result_handler(consts.API_ERROR, 'invalid task_id')
-
-    asynctask_handler = AsyncTaskHandler()
-    try:
-        asynctask = asynctask_handler.get_task_by_taskid(task_id)
-    except ValueError:
-        return result_handler(consts.API_ERROR, 'invalid task_id')
-
-    def _unfinished():
-        return result_handler(consts.TASK_NOT_DONE, {})
-
-    def _finished():
-        return result_handler(consts.TASK_DONE, {})
-
-    def _error():
-        return result_handler(consts.TASK_FAILED, asynctask.error)
-
-    status = asynctask.status
-    LOG.debug('Task status is: %s', status)
-
-    if status not in [consts.TASK_NOT_DONE,
-                      consts.TASK_DONE,
-                      consts.TASK_FAILED]:
-        return result_handler(consts.API_ERROR, 'internal server error')
-
-    switcher = {
-        consts.TASK_NOT_DONE: _unfinished,
-        consts.TASK_DONE: _finished,
-        consts.TASK_FAILED: _error
-    }
-
-    return switcher.get(status)()
diff --git a/api/resources/case_docs.py b/api/resources/case_docs.py
deleted file mode 100644 (file)
index 289410d..0000000
+++ /dev/null
@@ -1,30 +0,0 @@
-import os
-import logging
-
-from api.utils.common import result_handler
-from yardstick.common import constants as consts
-
-LOG = logging.getLogger(__name__)
-LOG.setLevel(logging.DEBUG)
-
-
-def default(args):
-    return get_case_docs(args)
-
-
-def get_case_docs(args):
-    try:
-        case_name = args['case_name']
-    except KeyError:
-        return result_handler(consts.API_ERROR, 'case_name must be provided')
-
-    docs_path = os.path.join(consts.DOCS_DIR, '{}.rst'.format(case_name))
-
-    if not os.path.exists(docs_path):
-        return result_handler(consts.API_ERROR, 'case not exists')
-
-    LOG.info('Reading %s', case_name)
-    with open(docs_path) as f:
-        content = f.read()
-
-    return result_handler(consts.API_SUCCESS, {'docs': content})
diff --git a/api/resources/env_action.py b/api/resources/env_action.py
deleted file mode 100644 (file)
index fed9870..0000000
+++ /dev/null
@@ -1,427 +0,0 @@
-##############################################################################
-# Copyright (c) 2016 Huawei Technologies Co.,Ltd and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
-from __future__ import absolute_import
-
-import errno
-import logging
-import os
-import subprocess
-import threading
-import time
-import uuid
-import glob
-import yaml
-import collections
-from subprocess import PIPE
-
-from six.moves import configparser
-from oslo_serialization import jsonutils
-from docker import Client
-
-from api.database.v1.handlers import AsyncTaskHandler
-from api.utils import influx
-from api.utils.common import result_handler
-from yardstick.common import constants as consts
-from yardstick.common import utils as common_utils
-from yardstick.common import openstack_utils
-from yardstick.common.httpClient import HttpClient
-
-
-LOG = logging.getLogger(__name__)
-LOG.setLevel(logging.DEBUG)
-
-async_handler = AsyncTaskHandler()
-
-
-def create_grafana(args):
-    task_id = str(uuid.uuid4())
-
-    thread = threading.Thread(target=_create_grafana, args=(task_id,))
-    thread.start()
-
-    return result_handler(consts.API_SUCCESS, {'task_id': task_id})
-
-
-def _create_grafana(task_id):
-    _create_task(task_id)
-
-    client = Client(base_url=consts.DOCKER_URL)
-
-    try:
-        LOG.info('Checking if grafana image exist')
-        image = '{}:{}'.format(consts.GRAFANA_IMAGE, consts.GRAFANA_TAG)
-        if not _check_image_exist(client, image):
-            LOG.info('Grafana image not exist, start pulling')
-            client.pull(consts.GRAFANA_IMAGE, consts.GRAFANA_TAG)
-
-        LOG.info('Createing grafana container')
-        _create_grafana_container(client)
-        LOG.info('Grafana container is created')
-
-        time.sleep(5)
-
-        LOG.info('Creating data source for grafana')
-        _create_data_source()
-
-        LOG.info('Creating dashboard for grafana')
-        _create_dashboard()
-
-        _update_task_status(task_id)
-        LOG.info('Finished')
-    except Exception as e:
-        _update_task_error(task_id, str(e))
-        LOG.exception('Create grafana failed')
-
-
-def _create_dashboard():
-    url = 'http://admin:admin@%s:3000/api/dashboards/db' % consts.GRAFANA_IP
-    path = os.path.join(consts.REPOS_DIR, 'dashboard', '*dashboard.json')
-
-    for i in sorted(glob.iglob(path)):
-        with open(i) as f:
-            data = jsonutils.load(f)
-        try:
-            HttpClient().post(url, data)
-        except Exception:
-            LOG.exception('Create dashboard %s failed', i)
-            raise
-
-
-def _create_data_source():
-    url = 'http://admin:admin@%s:3000/api/datasources' % consts.GRAFANA_IP
-    data = {
-        "name": "yardstick",
-        "type": "influxdb",
-        "access": "proxy",
-        "url": "http://%s:8086" % consts.INFLUXDB_IP,
-        "password": "root",
-        "user": "root",
-        "database": "yardstick",
-        "basicAuth": True,
-        "basicAuthUser": "admin",
-        "basicAuthPassword": "admin",
-        "isDefault": False,
-    }
-    try:
-        HttpClient().post(url, data)
-    except Exception:
-        LOG.exception('Create datasources failed')
-        raise
-
-
-def _create_grafana_container(client):
-    ports = [3000]
-    port_bindings = {k: k for k in ports}
-    restart_policy = {"MaximumRetryCount": 0, "Name": "always"}
-    host_config = client.create_host_config(port_bindings=port_bindings,
-                                            restart_policy=restart_policy)
-
-    LOG.info('Creating container')
-    container = client.create_container(image='%s:%s' % (consts.GRAFANA_IMAGE,
-                                                         consts.GRAFANA_TAG),
-                                        ports=ports,
-                                        detach=True,
-                                        tty=True,
-                                        host_config=host_config)
-    LOG.info('Starting container')
-    client.start(container)
-
-
-def _check_image_exist(client, t):
-    return any(t in a['RepoTags'][0] for a in client.images() if a['RepoTags'])
-
-
-def create_influxdb(args):
-    task_id = str(uuid.uuid4())
-
-    thread = threading.Thread(target=_create_influxdb, args=(task_id,))
-    thread.start()
-
-    return result_handler(consts.API_SUCCESS, {'task_id': task_id})
-
-
-def _create_influxdb(task_id):
-    _create_task(task_id)
-
-    client = Client(base_url=consts.DOCKER_URL)
-
-    try:
-        LOG.info('Changing output to influxdb')
-        _change_output_to_influxdb()
-
-        LOG.info('Checking if influxdb image exist')
-        if not _check_image_exist(client, '%s:%s' % (consts.INFLUXDB_IMAGE,
-                                                     consts.INFLUXDB_TAG)):
-            LOG.info('Influxdb image not exist, start pulling')
-            client.pull(consts.INFLUXDB_IMAGE, tag=consts.INFLUXDB_TAG)
-
-        LOG.info('Createing influxdb container')
-        _create_influxdb_container(client)
-        LOG.info('Influxdb container is created')
-
-        time.sleep(5)
-
-        LOG.info('Config influxdb')
-        _config_influxdb()
-
-        _update_task_status(task_id)
-
-        LOG.info('Finished')
-    except Exception as e:
-        _update_task_error(task_id, str(e))
-        LOG.exception('Creating influxdb failed')
-
-
-def _create_influxdb_container(client):
-
-    ports = [8083, 8086]
-    port_bindings = {k: k for k in ports}
-    restart_policy = {"MaximumRetryCount": 0, "Name": "always"}
-    host_config = client.create_host_config(port_bindings=port_bindings,
-                                            restart_policy=restart_policy)
-
-    LOG.info('Creating container')
-    container = client.create_container(image='%s:%s' % (consts.INFLUXDB_IMAGE,
-                                                         consts.INFLUXDB_TAG),
-                                        ports=ports,
-                                        detach=True,
-                                        tty=True,
-                                        host_config=host_config)
-    LOG.info('Starting container')
-    client.start(container)
-
-
-def _config_influxdb():
-    try:
-        client = influx.get_data_db_client()
-        client.create_user(consts.INFLUXDB_USER,
-                           consts.INFLUXDB_PASS,
-                           consts.INFLUXDB_DB_NAME)
-        client.create_database(consts.INFLUXDB_DB_NAME)
-        LOG.info('Success to config influxDB')
-    except Exception:
-        LOG.exception('Config influxdb failed')
-
-
-def _change_output_to_influxdb():
-    common_utils.makedirs(consts.CONF_DIR)
-
-    parser = configparser.ConfigParser()
-    LOG.info('Reading output sample configuration')
-    parser.read(consts.CONF_SAMPLE_FILE)
-
-    LOG.info('Set dispatcher to influxdb')
-    parser.set('DEFAULT', 'dispatcher', 'influxdb')
-    parser.set('dispatcher_influxdb', 'target',
-               'http://%s:8086' % consts.INFLUXDB_IP)
-
-    LOG.info('Writing to %s', consts.CONF_FILE)
-    with open(consts.CONF_FILE, 'w') as f:
-        parser.write(f)
-
-
-def prepare_env(args):
-    task_id = str(uuid.uuid4())
-
-    thread = threading.Thread(target=_prepare_env_daemon, args=(task_id,))
-    thread.start()
-
-    return result_handler(consts.API_SUCCESS, {'task_id': task_id})
-
-
-def _already_source_openrc():
-    """Check if openrc is sourced already"""
-    return all(os.environ.get(k) for k in ['OS_AUTH_URL', 'OS_USERNAME',
-                                           'OS_PASSWORD', 'EXTERNAL_NETWORK'])
-
-
-def _prepare_env_daemon(task_id):
-    _create_task(task_id)
-
-    try:
-        _create_directories()
-
-        rc_file = consts.OPENRC
-
-        LOG.info('Checkout Openrc Environment variable')
-        if not _already_source_openrc():
-            LOG.info('Openrc variable not found in Environment')
-            if not os.path.exists(rc_file):
-                LOG.info('Openrc file not found')
-                installer_ip = os.environ.get('INSTALLER_IP', '192.168.200.2')
-                installer_type = os.environ.get('INSTALLER_TYPE', 'compass')
-                LOG.info('Getting openrc file from %s', installer_type)
-                _get_remote_rc_file(rc_file, installer_ip, installer_type)
-                LOG.info('Source openrc file')
-                _source_file(rc_file)
-                LOG.info('Appending external network')
-                _append_external_network(rc_file)
-            LOG.info('Openrc file exist, source openrc file')
-            _source_file(rc_file)
-
-        LOG.info('Cleaning images')
-        _clean_images()
-
-        LOG.info('Loading images')
-        _load_images()
-
-        _update_task_status(task_id)
-        LOG.info('Finished')
-    except Exception as e:
-        _update_task_error(task_id, str(e))
-        LOG.exception('Prepare env failed')
-
-
-def _create_directories():
-    common_utils.makedirs(consts.CONF_DIR)
-
-
-def _source_file(rc_file):
-    common_utils.source_env(rc_file)
-
-
-def _get_remote_rc_file(rc_file, installer_ip, installer_type):
-
-    os_fetch_script = os.path.join(consts.RELENG_DIR, consts.FETCH_SCRIPT)
-
-    try:
-        cmd = [os_fetch_script, '-d', rc_file, '-i', installer_type,
-               '-a', installer_ip]
-        p = subprocess.Popen(cmd, stdout=subprocess.PIPE)
-        p.communicate()
-
-        if p.returncode != 0:
-            LOG.error('Failed to fetch credentials from installer')
-    except OSError as e:
-        if e.errno != errno.EEXIST:
-            raise
-
-
-def _append_external_network(rc_file):
-    neutron_client = openstack_utils.get_neutron_client()
-    networks = neutron_client.list_networks()['networks']
-    try:
-        ext_network = next(n['name'] for n in networks if n['router:external'])
-    except StopIteration:
-        LOG.warning("Can't find external network")
-    else:
-        cmd = 'export EXTERNAL_NETWORK=%s' % ext_network
-        try:
-            with open(rc_file, 'a') as f:
-                f.write(cmd + '\n')
-        except OSError as e:
-            if e.errno != errno.EEXIST:
-                raise
-
-
-def _clean_images():
-    cmd = [consts.CLEAN_IMAGES_SCRIPT]
-    p = subprocess.Popen(cmd, stdout=subprocess.PIPE, cwd=consts.REPOS_DIR)
-    output = p.communicate()[0]
-    LOG.debug(output)
-
-
-def _load_images():
-    cmd = [consts.LOAD_IMAGES_SCRIPT]
-    p = subprocess.Popen(cmd, stdout=subprocess.PIPE, cwd=consts.REPOS_DIR)
-    output = p.communicate()[0]
-    LOG.debug(output)
-
-
-def _create_task(task_id):
-    async_handler.insert({'status': 0, 'task_id': task_id})
-
-
-def _update_task_status(task_id):
-    async_handler.update_attr(task_id, {'status': 1})
-
-
-def _update_task_error(task_id, error):
-    async_handler.update_attr(task_id, {'status': 2, 'error': error})
-
-
-def update_openrc(args):
-    try:
-        openrc_vars = args['openrc']
-    except KeyError:
-        return result_handler(consts.API_ERROR, 'openrc must be provided')
-    else:
-        if not isinstance(openrc_vars, collections.Mapping):
-            return result_handler(consts.API_ERROR, 'args should be a dict')
-
-    lines = ['export {}={}\n'.format(k, v) for k, v in openrc_vars.items()]
-    LOG.debug('Writing: %s', ''.join(lines))
-
-    LOG.info('Writing openrc: Writing')
-    common_utils.makedirs(consts.CONF_DIR)
-
-    with open(consts.OPENRC, 'w') as f:
-        f.writelines(lines)
-    LOG.info('Writing openrc: Done')
-
-    LOG.info('Source openrc: Sourcing')
-    try:
-        _source_file(consts.OPENRC)
-    except Exception as e:
-        LOG.exception('Failed to source openrc')
-        return result_handler(consts.API_ERROR, str(e))
-    LOG.info('Source openrc: Done')
-
-    return result_handler(consts.API_SUCCESS, {'openrc': openrc_vars})
-
-
-def upload_pod_file(args):
-    try:
-        pod_file = args['file']
-    except KeyError:
-        return result_handler(consts.API_ERROR, 'file must be provided')
-
-    LOG.info('Checking file')
-    data = yaml.load(pod_file.read())
-    if not isinstance(data, collections.Mapping):
-        return result_handler(consts.API_ERROR, 'invalid yaml file')
-
-    LOG.info('Writing file')
-    with open(consts.POD_FILE, 'w') as f:
-        yaml.dump(data, f, default_flow_style=False)
-    LOG.info('Writing finished')
-
-    return result_handler(consts.API_SUCCESS, {'pod_info': data})
-
-
-def update_pod_file(args):
-    try:
-        pod_dic = args['pod']
-    except KeyError:
-        return result_handler(consts.API_ERROR, 'pod must be provided')
-    else:
-        if not isinstance(pod_dic, collections.Mapping):
-            return result_handler(consts.API_ERROR, 'pod should be a dict')
-
-    LOG.info('Writing file')
-    with open(consts.POD_FILE, 'w') as f:
-        yaml.dump(pod_dic, f, default_flow_style=False)
-    LOG.info('Writing finished')
-
-    return result_handler(consts.API_SUCCESS, {'pod_info': pod_dic})
-
-
-def update_hosts(hosts_ip):
-    if not isinstance(hosts_ip, dict):
-        return result_handler(consts.API_ERROR, 'Error, args should be a dict')
-    LOG.info('Writing hosts: Writing')
-    LOG.debug('Writing: %s', hosts_ip)
-    cmd = ["sudo", "python", "write_hosts.py"]
-    p = subprocess.Popen(cmd, stdin=PIPE, stdout=PIPE, stderr=PIPE,
-                         cwd = os.path.join(consts.REPOS_DIR, "api/resources"))
-    _, err = p.communicate(jsonutils.dumps(hosts_ip))
-    if p.returncode != 0 :
-        return result_handler(consts.API_ERROR, err)
-    LOG.info('Writing hosts: Done')
-    return result_handler(consts.API_SUCCESS, 'success')
diff --git a/api/resources/release_action.py b/api/resources/release_action.py
deleted file mode 100644 (file)
index 9871c1f..0000000
+++ /dev/null
@@ -1,44 +0,0 @@
-##############################################################################
-# Copyright (c) 2016 Huawei Technologies Co.,Ltd and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
-from __future__ import absolute_import
-import uuid
-import os
-import logging
-
-from api.utils.common import result_handler
-from api.utils.thread import TaskThread
-from yardstick.common import constants as consts
-from yardstick.benchmark.core import Param
-from yardstick.benchmark.core.task import Task
-
-logger = logging.getLogger(__name__)
-logger.setLevel(logging.DEBUG)
-
-
-def run_test_case(args):
-    try:
-        case_name = args['testcase']
-    except KeyError:
-        return result_handler(consts.API_ERROR, 'testcase must be provided')
-
-    testcase = os.path.join(consts.TESTCASE_DIR, '{}.yaml'.format(case_name))
-
-    task_id = str(uuid.uuid4())
-
-    task_args = {
-        'inputfile': [testcase],
-        'task_id': task_id
-    }
-    task_args.update(args.get('opts', {}))
-
-    param = Param(task_args)
-    task_thread = TaskThread(Task().start, param)
-    task_thread.start()
-
-    return result_handler(consts.API_SUCCESS, {'task_id': task_id})
diff --git a/api/resources/results.py b/api/resources/results.py
deleted file mode 100644 (file)
index 692e00c..0000000
+++ /dev/null
@@ -1,69 +0,0 @@
-##############################################################################
-# Copyright (c) 2016 Huawei Technologies Co.,Ltd and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
-from __future__ import absolute_import
-import logging
-import uuid
-import json
-
-from api.utils.common import result_handler
-from api.database.v1.handlers import TasksHandler
-from yardstick.common import constants as consts
-
-logger = logging.getLogger(__name__)
-logger.setLevel(logging.DEBUG)
-
-
-def default(args):
-    return getResult(args)
-
-
-def getResult(args):
-    try:
-        task_id = args['task_id']
-    except KeyError:
-        return result_handler(consts.API_ERROR, 'task_id must be provided')
-
-    try:
-        uuid.UUID(task_id)
-    except ValueError:
-        return result_handler(consts.API_ERROR, 'invalid task_id')
-
-    task_handler = TasksHandler()
-    try:
-        task = task_handler.get_task_by_taskid(task_id)
-    except ValueError:
-        return result_handler(consts.API_ERROR, 'invalid task_id')
-
-    def _unfinished():
-        return result_handler(consts.TASK_NOT_DONE, {})
-
-    def _finished():
-        if task.result:
-            return result_handler(consts.TASK_DONE, json.loads(task.result))
-        else:
-            return result_handler(consts.TASK_DONE, {})
-
-    def _error():
-        return result_handler(consts.TASK_FAILED, task.error)
-
-    status = task.status
-    logger.debug('Task status is: %s', status)
-
-    if status not in [consts.TASK_NOT_DONE,
-                      consts.TASK_DONE,
-                      consts.TASK_FAILED]:
-        return result_handler(consts.API_ERROR, 'internal server error')
-
-    switcher = {
-        consts.TASK_NOT_DONE: _unfinished,
-        consts.TASK_DONE: _finished,
-        consts.TASK_FAILED: _error
-    }
-
-    return switcher.get(status)()
diff --git a/api/resources/samples_action.py b/api/resources/samples_action.py
deleted file mode 100644 (file)
index 10b9980..0000000
+++ /dev/null
@@ -1,45 +0,0 @@
-##############################################################################
-# Copyright (c) 2016 Huawei Technologies Co.,Ltd and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
-from __future__ import absolute_import
-import uuid
-import os
-import logging
-
-from api.utils.common import result_handler
-from api.utils.thread import TaskThread
-from yardstick.common import constants as consts
-from yardstick.benchmark.core import Param
-from yardstick.benchmark.core.task import Task
-
-logger = logging.getLogger(__name__)
-logger.setLevel(logging.DEBUG)
-
-
-def run_test_case(args):
-    try:
-        case_name = args['testcase']
-    except KeyError:
-        return result_handler(consts.API_ERROR, 'testcase must be provided')
-
-    testcase = os.path.join(consts.SAMPLE_CASE_DIR,
-                            '{}.yaml'.format(case_name))
-
-    task_id = str(uuid.uuid4())
-
-    task_args = {
-        'inputfile': [testcase],
-        'task_id': task_id
-    }
-    task_args.update(args.get('opts', {}))
-
-    param = Param(task_args)
-    task_thread = TaskThread(Task().start, param)
-    task_thread.start()
-
-    return result_handler(consts.API_SUCCESS, {'task_id': task_id})
diff --git a/api/resources/testcases.py b/api/resources/testcases.py
deleted file mode 100644 (file)
index 6ee15ef..0000000
+++ /dev/null
@@ -1,21 +0,0 @@
-# ############################################################################
-# Copyright (c) 2017 Huawei Technologies Co.,Ltd and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-# ############################################################################
-from yardstick.benchmark.core.testcase import Testcase
-from yardstick.benchmark.core import Param
-from api.utils import common as common_utils
-
-
-def default(args):
-    return listAllTestcases(args)
-
-
-def listAllTestcases(args):
-    param = Param(args)
-    testcase_list = Testcase().list_all(param)
-    return common_utils.result_handler(1, testcase_list)
diff --git a/api/resources/testsuites_action.py b/api/resources/testsuites_action.py
deleted file mode 100644 (file)
index e37eacc..0000000
+++ /dev/null
@@ -1,46 +0,0 @@
-##############################################################################
-# Copyright (c) 2016 Huawei Technologies Co.,Ltd and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
-from __future__ import absolute_import
-import uuid
-import os
-import logging
-
-from api.utils.common import result_handler
-from api.utils.thread import TaskThread
-from yardstick.common import constants as consts
-from yardstick.benchmark.core import Param
-from yardstick.benchmark.core.task import Task
-
-logger = logging.getLogger(__name__)
-logger.setLevel(logging.DEBUG)
-
-
-def run_test_suite(args):
-    try:
-        suite_name = args['testsuite']
-    except KeyError:
-        return result_handler(consts.API_ERROR, 'testsuite must be provided')
-
-    testsuite = os.path.join(consts.TESTSUITE_DIR,
-                             '{}.yaml'.format(suite_name))
-
-    task_id = str(uuid.uuid4())
-
-    task_args = {
-        'inputfile': [testsuite],
-        'task_id': task_id,
-        'suite': True
-    }
-    task_args.update(args.get('opts', {}))
-
-    param = Param(task_args)
-    task_thread = TaskThread(Task().start, param)
-    task_thread.start()
-
-    return result_handler(consts.API_SUCCESS, {'task_id': task_id})
diff --git a/api/resources/v1/__init__.py b/api/resources/v1/__init__.py
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/api/resources/v1/asynctasks.py b/api/resources/v1/asynctasks.py
new file mode 100644 (file)
index 0000000..759df21
--- /dev/null
@@ -0,0 +1,65 @@
+# ############################################################################
+# Copyright (c) 2017 Huawei Technologies Co.,Ltd and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+# ############################################################################
+import uuid
+import logging
+
+from api import ApiResource
+from api.database.v1.handlers import AsyncTaskHandler
+from yardstick.common import constants as consts
+from yardstick.common.utils import result_handler
+
+LOG = logging.getLogger(__name__)
+LOG.setLevel(logging.DEBUG)
+
+
+class V1AsyncTask(ApiResource):
+
+    def get(self):
+        args = self._get_args()
+
+        try:
+            task_id = args['task_id']
+        except KeyError:
+            return result_handler(consts.API_ERROR, 'task_id must be provided')
+
+        try:
+            uuid.UUID(task_id)
+        except ValueError:
+            return result_handler(consts.API_ERROR, 'invalid task_id')
+
+        asynctask_handler = AsyncTaskHandler()
+        try:
+            asynctask = asynctask_handler.get_task_by_taskid(task_id)
+        except ValueError:
+            return result_handler(consts.API_ERROR, 'invalid task_id')
+
+        def _unfinished():
+            return result_handler(consts.TASK_NOT_DONE, {})
+
+        def _finished():
+            return result_handler(consts.TASK_DONE, {})
+
+        def _error():
+            return result_handler(consts.TASK_FAILED, asynctask.error)
+
+        status = asynctask.status
+        LOG.debug('Task status is: %s', status)
+
+        if status not in [consts.TASK_NOT_DONE,
+                          consts.TASK_DONE,
+                          consts.TASK_FAILED]:
+            return result_handler(consts.API_ERROR, 'internal server error')
+
+        switcher = {
+            consts.TASK_NOT_DONE: _unfinished,
+            consts.TASK_DONE: _finished,
+            consts.TASK_FAILED: _error
+        }
+
+        return switcher.get(status)()
diff --git a/api/resources/v1/env.py b/api/resources/v1/env.py
new file mode 100644 (file)
index 0000000..4632f15
--- /dev/null
@@ -0,0 +1,421 @@
+##############################################################################
+# Copyright (c) 2016 Huawei Technologies Co.,Ltd and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+from __future__ import absolute_import
+
+import errno
+import logging
+import os
+import subprocess
+import threading
+import time
+import uuid
+import glob
+import yaml
+import collections
+
+from six.moves import configparser
+from oslo_serialization import jsonutils
+from docker import Client
+
+from api.database.v1.handlers import AsyncTaskHandler
+from api.utils import influx
+from api import ApiResource
+from yardstick.common import constants as consts
+from yardstick.common import utils
+from yardstick.common.utils import result_handler
+from yardstick.common import openstack_utils
+from yardstick.common.httpClient import HttpClient
+
+
+LOG = logging.getLogger(__name__)
+LOG.setLevel(logging.DEBUG)
+
+async_handler = AsyncTaskHandler()
+
+
+class V1Env(ApiResource):
+
+    def post(self):
+        return self._dispatch_post()
+
+    def create_grafana(self, args):
+        task_id = str(uuid.uuid4())
+
+        thread = threading.Thread(target=self._create_grafana, args=(task_id,))
+        thread.start()
+
+        return result_handler(consts.API_SUCCESS, {'task_id': task_id})
+
+    def _create_grafana(self, task_id):
+        self._create_task(task_id)
+
+        client = Client(base_url=consts.DOCKER_URL)
+
+        try:
+            LOG.info('Checking if grafana image exist')
+            image = '{}:{}'.format(consts.GRAFANA_IMAGE, consts.GRAFANA_TAG)
+            if not self._check_image_exist(client, image):
+                LOG.info('Grafana image not exist, start pulling')
+                client.pull(consts.GRAFANA_IMAGE, consts.GRAFANA_TAG)
+
+            LOG.info('Createing grafana container')
+            self._create_grafana_container(client)
+            LOG.info('Grafana container is created')
+
+            time.sleep(5)
+
+            LOG.info('Creating data source for grafana')
+            self._create_data_source()
+
+            LOG.info('Creating dashboard for grafana')
+            self._create_dashboard()
+
+            self._update_task_status(task_id)
+            LOG.info('Finished')
+        except Exception as e:
+            self._update_task_error(task_id, str(e))
+            LOG.exception('Create grafana failed')
+
+    def _create_dashboard(self):
+        url = 'http://admin:admin@%s:3000/api/dashboards/db' % consts.GRAFANA_IP
+        path = os.path.join(consts.REPOS_DIR, 'dashboard', '*dashboard.json')
+
+        for i in sorted(glob.iglob(path)):
+            with open(i) as f:
+                data = jsonutils.load(f)
+            try:
+                HttpClient().post(url, data)
+            except Exception:
+                LOG.exception('Create dashboard %s failed', i)
+                raise
+
+    def _create_data_source(self):
+        url = 'http://admin:admin@%s:3000/api/datasources' % consts.GRAFANA_IP
+        data = {
+            "name": "yardstick",
+            "type": "influxdb",
+            "access": "proxy",
+            "url": "http://%s:8086" % consts.INFLUXDB_IP,
+            "password": "root",
+            "user": "root",
+            "database": "yardstick",
+            "basicAuth": True,
+            "basicAuthUser": "admin",
+            "basicAuthPassword": "admin",
+            "isDefault": False,
+        }
+        try:
+            HttpClient().post(url, data)
+        except Exception:
+            LOG.exception('Create datasources failed')
+            raise
+
+    def _create_grafana_container(self, client):
+        ports = [3000]
+        port_bindings = {k: k for k in ports}
+        restart_policy = {"MaximumRetryCount": 0, "Name": "always"}
+        host_config = client.create_host_config(port_bindings=port_bindings,
+                                                restart_policy=restart_policy)
+
+        LOG.info('Creating container')
+        container = client.create_container(image='%s:%s' %
+                                            (consts.GRAFANA_IMAGE,
+                                             consts.GRAFANA_TAG),
+                                            ports=ports,
+                                            detach=True,
+                                            tty=True,
+                                            host_config=host_config)
+        LOG.info('Starting container')
+        client.start(container)
+
+    def _check_image_exist(self, client, t):
+        return any(t in a['RepoTags'][0]
+                   for a in client.images() if a['RepoTags'])
+
+    def create_influxdb(self, args):
+        task_id = str(uuid.uuid4())
+
+        thread = threading.Thread(target=self._create_influxdb, args=(task_id,))
+        thread.start()
+
+        return result_handler(consts.API_SUCCESS, {'task_id': task_id})
+
+    def _create_influxdb(self, task_id):
+        self._create_task(task_id)
+
+        client = Client(base_url=consts.DOCKER_URL)
+
+        try:
+            LOG.info('Changing output to influxdb')
+            self._change_output_to_influxdb()
+
+            LOG.info('Checking if influxdb image exist')
+            if not self._check_image_exist(client, '%s:%s' %
+                                           (consts.INFLUXDB_IMAGE,
+                                            consts.INFLUXDB_TAG)):
+                LOG.info('Influxdb image not exist, start pulling')
+                client.pull(consts.INFLUXDB_IMAGE, tag=consts.INFLUXDB_TAG)
+
+            LOG.info('Createing influxdb container')
+            self._create_influxdb_container(client)
+            LOG.info('Influxdb container is created')
+
+            time.sleep(5)
+
+            LOG.info('Config influxdb')
+            self._config_influxdb()
+
+            self._update_task_status(task_id)
+
+            LOG.info('Finished')
+        except Exception as e:
+            self._update_task_error(task_id, str(e))
+            LOG.exception('Creating influxdb failed')
+
+    def _create_influxdb_container(self, client):
+
+        ports = [8083, 8086]
+        port_bindings = {k: k for k in ports}
+        restart_policy = {"MaximumRetryCount": 0, "Name": "always"}
+        host_config = client.create_host_config(port_bindings=port_bindings,
+                                                restart_policy=restart_policy)
+
+        LOG.info('Creating container')
+        container = client.create_container(image='%s:%s' %
+                                            (consts.INFLUXDB_IMAGE,
+                                             consts.INFLUXDB_TAG),
+                                            ports=ports,
+                                            detach=True,
+                                            tty=True,
+                                            host_config=host_config)
+        LOG.info('Starting container')
+        client.start(container)
+
+    def _config_influxdb(self):
+        try:
+            client = influx.get_data_db_client()
+            client.create_user(consts.INFLUXDB_USER,
+                               consts.INFLUXDB_PASS,
+                               consts.INFLUXDB_DB_NAME)
+            client.create_database(consts.INFLUXDB_DB_NAME)
+            LOG.info('Success to config influxDB')
+        except Exception:
+            LOG.exception('Config influxdb failed')
+
+    def _change_output_to_influxdb(self):
+        utils.makedirs(consts.CONF_DIR)
+
+        parser = configparser.ConfigParser()
+        LOG.info('Reading output sample configuration')
+        parser.read(consts.CONF_SAMPLE_FILE)
+
+        LOG.info('Set dispatcher to influxdb')
+        parser.set('DEFAULT', 'dispatcher', 'influxdb')
+        parser.set('dispatcher_influxdb', 'target',
+                   'http://%s:8086' % consts.INFLUXDB_IP)
+
+        LOG.info('Writing to %s', consts.CONF_FILE)
+        with open(consts.CONF_FILE, 'w') as f:
+            parser.write(f)
+
+    def prepare_env(self, args):
+        task_id = str(uuid.uuid4())
+
+        thread = threading.Thread(target=self._prepare_env_daemon,
+                                  args=(task_id,))
+        thread.start()
+
+        return result_handler(consts.API_SUCCESS, {'task_id': task_id})
+
+    def _already_source_openrc(self):
+        """Check if openrc is sourced already"""
+        return all(os.environ.get(k) for k in ['OS_AUTH_URL',
+                                               'OS_USERNAME',
+                                               'OS_PASSWORD',
+                                               'EXTERNAL_NETWORK'])
+
+    def _prepare_env_daemon(self, task_id):
+        self._create_task(task_id)
+
+        try:
+            self._create_directories()
+
+            rc_file = consts.OPENRC
+
+            LOG.info('Checkout Openrc Environment variable')
+            if not self._already_source_openrc():
+                LOG.info('Openrc variable not found in Environment')
+                if not os.path.exists(rc_file):
+                    LOG.info('Openrc file not found')
+                    installer_ip = os.environ.get('INSTALLER_IP',
+                                                  '192.168.200.2')
+                    installer_type = os.environ.get('INSTALLER_TYPE', 'compass')
+                    LOG.info('Getting openrc file from %s', installer_type)
+                    self._get_remote_rc_file(rc_file,
+                                             installer_ip,
+                                             installer_type)
+                    LOG.info('Source openrc file')
+                    self._source_file(rc_file)
+                    LOG.info('Appending external network')
+                    self._append_external_network(rc_file)
+                LOG.info('Openrc file exist, source openrc file')
+                self._source_file(rc_file)
+
+            LOG.info('Cleaning images')
+            self._clean_images()
+
+            LOG.info('Loading images')
+            self._load_images()
+
+            self._update_task_status(task_id)
+            LOG.info('Finished')
+        except Exception as e:
+            self._update_task_error(task_id, str(e))
+            LOG.exception('Prepare env failed')
+
+    def _create_directories(self):
+        utils.makedirs(consts.CONF_DIR)
+
+    def _source_file(self, rc_file):
+        utils.source_env(rc_file)
+
+    def _get_remote_rc_file(self, rc_file, installer_ip, installer_type):
+
+        os_fetch_script = os.path.join(consts.RELENG_DIR, consts.FETCH_SCRIPT)
+
+        try:
+            cmd = [os_fetch_script, '-d', rc_file, '-i', installer_type,
+                   '-a', installer_ip]
+            p = subprocess.Popen(cmd, stdout=subprocess.PIPE)
+            p.communicate()
+
+            if p.returncode != 0:
+                LOG.error('Failed to fetch credentials from installer')
+        except OSError as e:
+            if e.errno != errno.EEXIST:
+                raise
+
+    def _append_external_network(self, rc_file):
+        neutron_client = openstack_utils.get_neutron_client()
+        networks = neutron_client.list_networks()['networks']
+        try:
+            ext_network = next(n['name']
+                               for n in networks if n['router:external'])
+        except StopIteration:
+            LOG.warning("Can't find external network")
+        else:
+            cmd = 'export EXTERNAL_NETWORK=%s' % ext_network
+            try:
+                with open(rc_file, 'a') as f:
+                    f.write(cmd + '\n')
+            except OSError as e:
+                if e.errno != errno.EEXIST:
+                    raise
+
+    def _clean_images(self):
+        cmd = [consts.CLEAN_IMAGES_SCRIPT]
+        p = subprocess.Popen(cmd, stdout=subprocess.PIPE, cwd=consts.REPOS_DIR)
+        output = p.communicate()[0]
+        LOG.debug(output)
+
+    def _load_images(self):
+        cmd = [consts.LOAD_IMAGES_SCRIPT]
+        p = subprocess.Popen(cmd, stdout=subprocess.PIPE, cwd=consts.REPOS_DIR)
+        output = p.communicate()[0]
+        LOG.debug(output)
+
+    def _create_task(self, task_id):
+        async_handler.insert({'status': 0, 'task_id': task_id})
+
+    def _update_task_status(self, task_id):
+        async_handler.update_attr(task_id, {'status': 1})
+
+    def _update_task_error(self, task_id, error):
+        async_handler.update_attr(task_id, {'status': 2, 'error': error})
+
+    def update_openrc(self, args):
+        try:
+            openrc_vars = args['openrc']
+        except KeyError:
+            return result_handler(consts.API_ERROR, 'openrc must be provided')
+        else:
+            if not isinstance(openrc_vars, collections.Mapping):
+                return result_handler(consts.API_ERROR, 'args should be a dict')
+
+        lines = ['export {}={}\n'.format(k, v) for k, v in openrc_vars.items()]
+        LOG.debug('Writing: %s', ''.join(lines))
+
+        LOG.info('Writing openrc: Writing')
+        utils.makedirs(consts.CONF_DIR)
+
+        with open(consts.OPENRC, 'w') as f:
+            f.writelines(lines)
+        LOG.info('Writing openrc: Done')
+
+        LOG.info('Source openrc: Sourcing')
+        try:
+            self._source_file(consts.OPENRC)
+        except Exception as e:
+            LOG.exception('Failed to source openrc')
+            return result_handler(consts.API_ERROR, str(e))
+        LOG.info('Source openrc: Done')
+
+        return result_handler(consts.API_SUCCESS, {'openrc': openrc_vars})
+
+    def upload_pod_file(self, args):
+        try:
+            pod_file = args['file']
+        except KeyError:
+            return result_handler(consts.API_ERROR, 'file must be provided')
+
+        LOG.info('Checking file')
+        data = yaml.load(pod_file.read())
+        if not isinstance(data, collections.Mapping):
+            return result_handler(consts.API_ERROR, 'invalid yaml file')
+
+        LOG.info('Writing file')
+        with open(consts.POD_FILE, 'w') as f:
+            yaml.dump(data, f, default_flow_style=False)
+        LOG.info('Writing finished')
+
+        return result_handler(consts.API_SUCCESS, {'pod_info': data})
+
+    def update_pod_file(self, args):
+        try:
+            pod_dic = args['pod']
+        except KeyError:
+            return result_handler(consts.API_ERROR, 'pod must be provided')
+        else:
+            if not isinstance(pod_dic, collections.Mapping):
+                return result_handler(consts.API_ERROR, 'pod should be a dict')
+
+        LOG.info('Writing file')
+        with open(consts.POD_FILE, 'w') as f:
+            yaml.dump(pod_dic, f, default_flow_style=False)
+        LOG.info('Writing finished')
+
+        return result_handler(consts.API_SUCCESS, {'pod_info': pod_dic})
+
+    def update_hosts(self, hosts_ip):
+        if not isinstance(hosts_ip, collections.Mapping):
+            return result_handler(consts.API_ERROR, 'args should be a dict')
+        LOG.info('Writing hosts: Writing')
+        LOG.debug('Writing: %s', hosts_ip)
+        cmd = ["sudo", "python", "write_hosts.py"]
+        p = subprocess.Popen(cmd,
+                             stdin=subprocess.PIPE,
+                             stdout=subprocess.PIPE,
+                             stderr=subprocess.PIPE,
+                             cwd=os.path.join(consts.REPOS_DIR,
+                                              "api/resources"))
+        _, err = p.communicate(jsonutils.dumps(hosts_ip))
+        if p.returncode != 0:
+            return result_handler(consts.API_ERROR, err)
+        LOG.info('Writing hosts: Done')
+        return result_handler(consts.API_SUCCESS, 'success')
diff --git a/api/resources/v1/results.py b/api/resources/v1/results.py
new file mode 100644 (file)
index 0000000..0493b43
--- /dev/null
@@ -0,0 +1,78 @@
+##############################################################################
+# Copyright (c) 2016 Huawei Technologies Co.,Ltd and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+from __future__ import absolute_import
+import logging
+import uuid
+import json
+import os
+
+from flasgger.utils import swag_from
+
+from api import ApiResource
+from api.database.v1.handlers import TasksHandler
+from yardstick.common import constants as consts
+from yardstick.common.utils import result_handler
+from api.swagger import models
+
+LOG = logging.getLogger(__name__)
+LOG.setLevel(logging.DEBUG)
+
+
+ResultModel = models.ResultModel
+
+
+class V1Result(ApiResource):
+
+    @swag_from(os.path.join(consts.REPOS_DIR, 'api/swagger/docs/results.yaml'))
+    def get(self):
+        args = self._get_args()
+
+        try:
+            task_id = args['task_id']
+        except KeyError:
+            return result_handler(consts.API_ERROR, 'task_id must be provided')
+
+        try:
+            uuid.UUID(task_id)
+        except ValueError:
+            return result_handler(consts.API_ERROR, 'invalid task_id')
+
+        task_handler = TasksHandler()
+        try:
+            task = task_handler.get_task_by_taskid(task_id)
+        except ValueError:
+            return result_handler(consts.API_ERROR, 'invalid task_id')
+
+        def _unfinished():
+            return result_handler(consts.TASK_NOT_DONE, {})
+
+        def _finished():
+            if task.result:
+                return result_handler(consts.TASK_DONE, json.loads(task.result))
+            else:
+                return result_handler(consts.TASK_DONE, {})
+
+        def _error():
+            return result_handler(consts.TASK_FAILED, task.error)
+
+        status = task.status
+        LOG.debug('Task status is: %s', status)
+
+        if status not in [consts.TASK_NOT_DONE,
+                          consts.TASK_DONE,
+                          consts.TASK_FAILED]:
+            return result_handler(consts.API_ERROR, 'internal server error')
+
+        switcher = {
+            consts.TASK_NOT_DONE: _unfinished,
+            consts.TASK_DONE: _finished,
+            consts.TASK_FAILED: _error
+        }
+
+        return switcher.get(status)()
diff --git a/api/resources/v1/testcases.py b/api/resources/v1/testcases.py
new file mode 100644 (file)
index 0000000..f159472
--- /dev/null
@@ -0,0 +1,115 @@
+# ############################################################################
+# Copyright (c) 2017 Huawei Technologies Co.,Ltd and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+# ############################################################################
+
+from __future__ import absolute_import
+import uuid
+import os
+import logging
+
+from flasgger.utils import swag_from
+
+from yardstick.benchmark.core.testcase import Testcase
+from yardstick.benchmark.core.task import Task
+from yardstick.benchmark.core import Param
+from yardstick.common import constants as consts
+from yardstick.common.utils import result_handler
+from api.utils.thread import TaskThread
+from api import ApiResource
+from api.swagger import models
+from api.database.v1.handlers import TasksHandler
+
+LOG = logging.getLogger(__name__)
+LOG.setLevel(logging.DEBUG)
+
+
+class V1Testcase(ApiResource):
+
+    def get(self):
+        param = Param({})
+        testcase_list = Testcase().list_all(param)
+        return result_handler(consts.API_SUCCESS, testcase_list)
+
+
+class V1CaseDocs(ApiResource):
+
+    def get(self, case_name):
+        docs_path = os.path.join(consts.DOCS_DIR, '{}.rst'.format(case_name))
+
+        if not os.path.exists(docs_path):
+            return result_handler(consts.API_ERROR, 'case not exists')
+
+        LOG.info('Reading %s', case_name)
+        with open(docs_path) as f:
+            content = f.read()
+
+        return result_handler(consts.API_SUCCESS, {'docs': content})
+
+
+TestCaseActionModel = models.TestCaseActionModel
+TestCaseActionArgsModel = models.TestCaseActionArgsModel
+TestCaseActionArgsOptsModel = models.TestCaseActionArgsOptsModel
+TestCaseActionArgsOptsTaskArgModel = models.TestCaseActionArgsOptsTaskArgModel
+
+
+class V1ReleaseCase(ApiResource):
+
+    @swag_from(os.path.join(consts.REPOS_DIR,
+                            'api/swagger/docs/release_action.yaml'))
+    def post(self):
+        return self._dispatch_post()
+
+    def run_test_case(self, args):
+        try:
+            name = args['testcase']
+        except KeyError:
+            return result_handler(consts.API_ERROR, 'testcase must be provided')
+
+        testcase = os.path.join(consts.TESTCASE_DIR, '{}.yaml'.format(name))
+
+        task_id = str(uuid.uuid4())
+
+        task_args = {
+            'inputfile': [testcase],
+            'task_id': task_id
+        }
+        task_args.update(args.get('opts', {}))
+
+        param = Param(task_args)
+        task_thread = TaskThread(Task().start, param, TasksHandler())
+        task_thread.start()
+
+        return result_handler(consts.API_SUCCESS, {'task_id': task_id})
+
+
+class V1SampleCase(ApiResource):
+
+    def post(self):
+        return self._dispatch_post()
+
+    def run_test_case(self, args):
+        try:
+            name = args['testcase']
+        except KeyError:
+            return result_handler(consts.API_ERROR, 'testcase must be provided')
+
+        testcase = os.path.join(consts.SAMPLE_CASE_DIR, '{}.yaml'.format(name))
+
+        task_id = str(uuid.uuid4())
+
+        task_args = {
+            'inputfile': [testcase],
+            'task_id': task_id
+        }
+        task_args.update(args.get('opts', {}))
+
+        param = Param(task_args)
+        task_thread = TaskThread(Task().start, param, TasksHandler())
+        task_thread.start()
+
+        return result_handler(consts.API_SUCCESS, {'task_id': task_id})
diff --git a/api/resources/v1/testsuites.py b/api/resources/v1/testsuites.py
new file mode 100644 (file)
index 0000000..5f72c2e
--- /dev/null
@@ -0,0 +1,64 @@
+##############################################################################
+# Copyright (c) 2016 Huawei Technologies Co.,Ltd and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+from __future__ import absolute_import
+import uuid
+import os
+import logging
+
+from flasgger.utils import swag_from
+
+from api import ApiResource
+from api.utils.thread import TaskThread
+from yardstick.common import constants as consts
+from yardstick.common.utils import result_handler
+from yardstick.benchmark.core import Param
+from yardstick.benchmark.core.task import Task
+from api.swagger import models
+
+LOG = logging.getLogger(__name__)
+LOG.setLevel(logging.DEBUG)
+
+
+TestSuiteActionModel = models.TestSuiteActionModel
+TestSuiteActionArgsModel = models.TestSuiteActionArgsModel
+TestSuiteActionArgsOptsModel = models.TestSuiteActionArgsOptsModel
+TestSuiteActionArgsOptsTaskArgModel = \
+    models.TestSuiteActionArgsOptsTaskArgModel
+
+
+class V1Testsuite(ApiResource):
+
+    @swag_from(os.path.join(consts.REPOS_DIR,
+                            'api/swagger/docs/testsuites_action.yaml'))
+    def post(self):
+        return self._dispatch_post()
+
+    def run_test_suite(self, args):
+        try:
+            name = args['testsuite']
+        except KeyError:
+            return result_handler(consts.API_ERROR,
+                                  'testsuite must be provided')
+
+        testsuite = os.path.join(consts.TESTSUITE_DIR, '{}.yaml'.format(name))
+
+        task_id = str(uuid.uuid4())
+
+        task_args = {
+            'inputfile': [testsuite],
+            'task_id': task_id,
+            'suite': True
+        }
+        task_args.update(args.get('opts', {}))
+
+        param = Param(task_args)
+        task_thread = TaskThread(Task().start, param)
+        task_thread.start()
+
+        return result_handler(consts.API_SUCCESS, {'task_id': task_id})
diff --git a/api/resources/v2/__init__.py b/api/resources/v2/__init__.py
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/api/resources/v2/containers.py b/api/resources/v2/containers.py
new file mode 100644 (file)
index 0000000..ce71303
--- /dev/null
@@ -0,0 +1,383 @@
+##############################################################################
+# Copyright (c) 2016 Huawei Technologies Co.,Ltd and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+from __future__ import absolute_import
+
+import logging
+import threading
+import time
+import uuid
+import os
+import glob
+
+from six.moves import configparser
+from oslo_serialization import jsonutils
+from docker import Client
+
+from api import ApiResource
+from api.utils import influx
+from api.database.v2.handlers import V2ContainerHandler
+from api.database.v2.handlers import V2EnvironmentHandler
+from yardstick.common import constants as consts
+from yardstick.common import utils
+from yardstick.common.utils import result_handler
+from yardstick.common.utils import get_free_port
+from yardstick.common.httpClient import HttpClient
+
+
+LOG = logging.getLogger(__name__)
+LOG.setLevel(logging.DEBUG)
+
+environment_handler = V2EnvironmentHandler()
+container_handler = V2ContainerHandler()
+
+
+class V2Containers(ApiResource):
+
+    def post(self):
+        return self._dispatch_post()
+
+    def create_influxdb(self, args):
+        try:
+            environment_id = args['environment_id']
+        except KeyError:
+            return result_handler(consts.API_ERROR, 'environment_id must be provided')
+
+        try:
+            uuid.UUID(environment_id)
+        except ValueError:
+            return result_handler(consts.API_ERROR, 'invalid environment id')
+
+        try:
+            environment = environment_handler.get_by_uuid(environment_id)
+        except ValueError:
+            return result_handler(consts.API_ERROR, 'no such environment id')
+
+        container_info = environment.container_id
+        container_info = jsonutils.loads(container_info) if container_info else {}
+
+        if container_info.get('influxdb'):
+            return result_handler(consts.API_ERROR, 'influxdb container already exist')
+
+        name = 'influxdb-{}'.format(environment_id[:8])
+        port = get_free_port(consts.SERVER_IP)
+        container_id = str(uuid.uuid4())
+        LOG.info('%s will launch on : %s', name, port)
+
+        LOG.info('launch influxdb background')
+        args = (name, port, container_id)
+        thread = threading.Thread(target=self._create_influxdb, args=args)
+        thread.start()
+
+        LOG.info('record container in database')
+        container_init_data = {
+            'uuid': container_id,
+            'environment_id': environment_id,
+            'name': name,
+            'port': port,
+            'status': 0
+        }
+        container_handler.insert(container_init_data)
+
+        LOG.info('update container in environment')
+        container_info['influxdb'] = container_id
+        environment_info = {'container_id': jsonutils.dumps(container_info)}
+        environment_handler.update_attr(environment_id, environment_info)
+
+        return result_handler(consts.API_SUCCESS, {'uuid': container_id})
+
+    def _check_image_exist(self, client, t):
+        return any(t in a['RepoTags'][0]
+                   for a in client.images() if a['RepoTags'])
+
+    def _create_influxdb(self, name, port, container_id):
+        client = Client(base_url=consts.DOCKER_URL)
+
+        try:
+            LOG.info('Checking if influxdb image exist')
+            if not self._check_image_exist(client, '%s:%s' %
+                                           (consts.INFLUXDB_IMAGE,
+                                            consts.INFLUXDB_TAG)):
+                LOG.info('Influxdb image not exist, start pulling')
+                client.pull(consts.INFLUXDB_IMAGE, tag=consts.INFLUXDB_TAG)
+
+            LOG.info('Createing influxdb container')
+            container = self._create_influxdb_container(client, name, port)
+            LOG.info('Influxdb container is created')
+
+            time.sleep(5)
+
+            container = client.inspect_container(container['Id'])
+            ip = container['NetworkSettings']['Networks']['bridge']['IPAddress']
+            LOG.debug('container ip is: %s', ip)
+
+            LOG.info('Changing output to influxdb')
+            self._change_output_to_influxdb(ip)
+
+            LOG.info('Config influxdb')
+            self._config_influxdb()
+
+            container_handler.update_attr(container_id, {'status': 1})
+
+            LOG.info('Finished')
+        except Exception:
+            container_handler.update_attr(container_id, {'status': 2})
+            LOG.exception('Creating influxdb failed')
+
+    def _create_influxdb_container(self, client, name, port):
+
+        ports = [port]
+        port_bindings = {8086: port}
+        restart_policy = {"MaximumRetryCount": 0, "Name": "always"}
+        host_config = client.create_host_config(port_bindings=port_bindings,
+                                                restart_policy=restart_policy)
+
+        LOG.info('Creating container')
+        container = client.create_container(image='%s:%s' %
+                                            (consts.INFLUXDB_IMAGE,
+                                             consts.INFLUXDB_TAG),
+                                            ports=ports,
+                                            name=name,
+                                            detach=True,
+                                            tty=True,
+                                            host_config=host_config)
+        LOG.info('Starting container')
+        client.start(container)
+        return container
+
+    def _config_influxdb(self):
+        try:
+            client = influx.get_data_db_client()
+            client.create_user(consts.INFLUXDB_USER,
+                               consts.INFLUXDB_PASS,
+                               consts.INFLUXDB_DB_NAME)
+            client.create_database(consts.INFLUXDB_DB_NAME)
+            LOG.info('Success to config influxDB')
+        except Exception:
+            LOG.exception('Config influxdb failed')
+
+    def _change_output_to_influxdb(self, ip):
+        utils.makedirs(consts.CONF_DIR)
+
+        parser = configparser.ConfigParser()
+        LOG.info('Reading output sample configuration')
+        parser.read(consts.CONF_SAMPLE_FILE)
+
+        LOG.info('Set dispatcher to influxdb')
+        parser.set('DEFAULT', 'dispatcher', 'influxdb')
+        parser.set('dispatcher_influxdb', 'target',
+                   'http://{}:{}'.format(ip, 8086))
+
+        LOG.info('Writing to %s', consts.CONF_FILE)
+        with open(consts.CONF_FILE, 'w') as f:
+            parser.write(f)
+
+    def create_grafana(self, args):
+        try:
+            environment_id = args['environment_id']
+        except KeyError:
+            return result_handler(consts.API_ERROR, 'environment_id must be provided')
+
+        try:
+            uuid.UUID(environment_id)
+        except ValueError:
+            return result_handler(consts.API_ERROR, 'invalid environment id')
+
+        try:
+            environment = environment_handler.get_by_uuid(environment_id)
+        except ValueError:
+            return result_handler(consts.API_ERROR, 'no such environment id')
+
+        container_info = environment.container_id
+        container_info = jsonutils.loads(container_info) if container_info else {}
+
+        if not container_info.get('influxdb'):
+            return result_handler(consts.API_ERROR, 'influxdb not set')
+
+        if container_info.get('grafana'):
+            return result_handler(consts.API_ERROR, 'grafana container already exists')
+
+        name = 'grafana-{}'.format(environment_id[:8])
+        port = get_free_port(consts.SERVER_IP)
+        container_id = str(uuid.uuid4())
+
+        args = (name, port, container_id)
+        thread = threading.Thread(target=self._create_grafana, args=args)
+        thread.start()
+
+        container_init_data = {
+            'uuid': container_id,
+            'environment_id': environment_id,
+            'name': name,
+            'port': port,
+            'status': 0
+        }
+        container_handler.insert(container_init_data)
+
+        container_info['grafana'] = container_id
+        environment_info = {'container_id': jsonutils.dumps(container_info)}
+        environment_handler.update_attr(environment_id, environment_info)
+
+        return result_handler(consts.API_SUCCESS, {'uuid': container_id})
+
+    def _create_grafana(self, name, port, container_id):
+        client = Client(base_url=consts.DOCKER_URL)
+
+        try:
+            LOG.info('Checking if grafana image exist')
+            image = '{}:{}'.format(consts.GRAFANA_IMAGE, consts.GRAFANA_TAG)
+            if not self._check_image_exist(client, image):
+                LOG.info('Grafana image not exist, start pulling')
+                client.pull(consts.GRAFANA_IMAGE, consts.GRAFANA_TAG)
+
+            LOG.info('Createing grafana container')
+            container = self._create_grafana_container(client, name, port)
+            LOG.info('Grafana container is created')
+
+            time.sleep(5)
+
+            container = client.inspect_container(container['Id'])
+            ip = container['NetworkSettings']['Networks']['bridge']['IPAddress']
+            LOG.debug('container ip is: %s', ip)
+
+            LOG.info('Creating data source for grafana')
+            self._create_data_source(ip)
+
+            LOG.info('Creating dashboard for grafana')
+            self._create_dashboard(ip)
+
+            container_handler.update_attr(container_id, {'status': 1})
+            LOG.info('Finished')
+        except Exception:
+            container_handler.update_attr(container_id, {'status': 2})
+            LOG.exception('Create grafana failed')
+
+    def _create_dashboard(self, ip):
+        url = 'http://admin:admin@{}:{}/api/dashboards/db'.format(ip, 3000)
+        path = os.path.join(consts.REPOS_DIR, 'dashboard', '*dashboard.json')
+
+        for i in sorted(glob.iglob(path)):
+            with open(i) as f:
+                data = jsonutils.load(f)
+            try:
+                HttpClient().post(url, data)
+            except Exception:
+                LOG.exception('Create dashboard %s failed', i)
+                raise
+
+    def _create_data_source(self, ip):
+        url = 'http://admin:admin@{}:{}/api/datasources'.format(ip, 3000)
+
+        influx_conf = utils.parse_ini_file(consts.CONF_FILE)
+        try:
+            influx_url = influx_conf['dispatcher_influxdb']['target']
+        except KeyError:
+            LOG.exception('influxdb url not set in yardstick.conf')
+            raise
+
+        data = {
+            "name": "yardstick",
+            "type": "influxdb",
+            "access": "proxy",
+            "url": influx_url,
+            "password": "root",
+            "user": "root",
+            "database": "yardstick",
+            "basicAuth": True,
+            "basicAuthUser": "admin",
+            "basicAuthPassword": "admin",
+            "isDefault": False,
+        }
+        try:
+            HttpClient().post(url, data)
+        except Exception:
+            LOG.exception('Create datasources failed')
+            raise
+
+    def _create_grafana_container(self, client, name, port):
+        ports = [3000]
+        port_bindings = {3000: port}
+        restart_policy = {"MaximumRetryCount": 0, "Name": "always"}
+        host_config = client.create_host_config(port_bindings=port_bindings,
+                                                restart_policy=restart_policy)
+
+        LOG.info('Creating container')
+        container = client.create_container(image='%s:%s' %
+                                            (consts.GRAFANA_IMAGE,
+                                             consts.GRAFANA_TAG),
+                                            name=name,
+                                            ports=ports,
+                                            detach=True,
+                                            tty=True,
+                                            host_config=host_config)
+        LOG.info('Starting container')
+        client.start(container)
+        return container
+
+
+class V2Container(ApiResource):
+
+    def get(self, container_id):
+        try:
+            uuid.UUID(container_id)
+        except ValueError:
+            return result_handler(consts.API_ERROR, 'invalid container id')
+
+        try:
+            container = container_handler.get_by_uuid(container_id)
+        except ValueError:
+            return result_handler(consts.API_ERROR, 'no such container id')
+
+        name = container.name
+        client = Client(base_url=consts.DOCKER_URL)
+        info = client.inspect_container(name)
+
+        data = {
+            'name': name,
+            'status': info.get('State', {}).get('Status', 'error'),
+            'time': info.get('Created'),
+            'port': container.port
+        }
+
+        return result_handler(consts.API_SUCCESS, {'container': data})
+
+    def delete(self, container_id):
+        try:
+            uuid.UUID(container_id)
+        except ValueError:
+            return result_handler(consts.API_ERROR, 'invalid container id')
+
+        try:
+            container = container_handler.get_by_uuid(container_id)
+        except ValueError:
+            return result_handler(consts.API_ERROR, 'no such container id')
+
+        environment_id = container.environment_id
+
+        client = Client(base_url=consts.DOCKER_URL)
+        LOG.info('delete container: %s', container.name)
+        try:
+            client.remove_container(container.name, force=True)
+        except Exception:
+            LOG.exception('delete container failed')
+            return result_handler(consts.API_ERROR, 'delete container failed')
+
+        LOG.info('delete container in database')
+        container_handler.delete_by_uuid(container_id)
+
+        LOG.info('update container in environment')
+        environment = environment_handler.get_by_uuid(environment_id)
+        container_info = jsonutils.loads(environment.container_id)
+        key = next((k for k, v in container_info.items() if v == container_id))
+        container_info.pop(key)
+        environment_delete_data = {
+            'container_id': jsonutils.dumps(container_info)
+        }
+        environment_handler.update_attr(environment_id, environment_delete_data)
+
+        return result_handler(consts.API_SUCCESS, {'container': container_id})
diff --git a/api/resources/v2/environments.py b/api/resources/v2/environments.py
new file mode 100644 (file)
index 0000000..e4679b0
--- /dev/null
@@ -0,0 +1,117 @@
+import uuid
+import logging
+
+from oslo_serialization import jsonutils
+from docker import Client
+
+from api import ApiResource
+from api.database.v2.handlers import V2EnvironmentHandler
+from api.database.v2.handlers import V2OpenrcHandler
+from api.database.v2.handlers import V2PodHandler
+from api.database.v2.handlers import V2ContainerHandler
+from yardstick.common.utils import result_handler
+from yardstick.common.utils import change_obj_to_dict
+from yardstick.common import constants as consts
+
+LOG = logging.getLogger(__name__)
+LOG.setLevel(logging.DEBUG)
+
+
+class V2Environments(ApiResource):
+
+    def get(self):
+        environment_handler = V2EnvironmentHandler()
+        environments = [change_obj_to_dict(e) for e in environment_handler.list_all()]
+
+        for e in environments:
+            container_info = e['container_id']
+            e['container_id'] = jsonutils.loads(container_info) if container_info else {}
+
+        data = {
+            'environments': environments
+        }
+
+        return result_handler(consts.API_SUCCESS, data)
+
+    def post(self):
+        return self._dispatch_post()
+
+    def create_environment(self, args):
+        try:
+            name = args['name']
+        except KeyError:
+            return result_handler(consts.API_ERROR, 'name must be provided')
+
+        env_id = str(uuid.uuid4())
+
+        environment_handler = V2EnvironmentHandler()
+
+        env_init_data = {
+            'name': name,
+            'uuid': env_id
+        }
+        environment_handler.insert(env_init_data)
+
+        return result_handler(consts.API_SUCCESS, {'uuid': env_id})
+
+
+class V2Environment(ApiResource):
+
+    def get(self, environment_id):
+        try:
+            uuid.UUID(environment_id)
+        except ValueError:
+            return result_handler(consts.API_ERROR, 'invalid environment id')
+
+        environment_handler = V2EnvironmentHandler()
+        try:
+            environment = environment_handler.get_by_uuid(environment_id)
+        except ValueError:
+            return result_handler(consts.API_ERROR, 'no such environment id')
+
+        environment = change_obj_to_dict(environment)
+        container_id = environment['container_id']
+        environment['container_id'] = jsonutils.loads(container_id) if container_id else {}
+        return result_handler(consts.API_SUCCESS, {'environment': environment})
+
+    def delete(self, environment_id):
+        try:
+            uuid.UUID(environment_id)
+        except ValueError:
+            return result_handler(consts.API_ERROR, 'invalid environment id')
+
+        environment_handler = V2EnvironmentHandler()
+        try:
+            environment = environment_handler.get_by_uuid(environment_id)
+        except ValueError:
+            return result_handler(consts.API_ERROR, 'no such environment id')
+
+        if environment.openrc_id:
+            LOG.info('delete openrc: %s', environment.openrc_id)
+            openrc_handler = V2OpenrcHandler()
+            openrc_handler.delete_by_uuid(environment.openrc_id)
+
+        if environment.pod_id:
+            LOG.info('delete pod: %s', environment.pod_id)
+            pod_handler = V2PodHandler()
+            pod_handler.delete_by_uuid(environment.pod_id)
+
+        if environment.container_id:
+            LOG.info('delete containers')
+            container_info = jsonutils.loads(environment.container_id)
+
+            container_handler = V2ContainerHandler()
+            client = Client(base_url=consts.DOCKER_URL)
+            for k, v in container_info.items():
+                LOG.info('start delete: %s', k)
+                container = container_handler.get_by_uuid(v)
+                LOG.debug('container name: %s', container.name)
+                try:
+                    client.remove_container(container.name, force=True)
+                except Exception:
+                    LOG.exception('remove container failed')
+                container_handler.delete_by_uuid(v)
+
+        environment_handler.delete_by_uuid(environment_id)
+
+        return result_handler(consts.API_SUCCESS, {'environment': environment_id})
diff --git a/api/resources/v2/images.py b/api/resources/v2/images.py
new file mode 100644 (file)
index 0000000..7018184
--- /dev/null
@@ -0,0 +1,72 @@
+import logging
+import subprocess
+import threading
+
+from api import ApiResource
+from yardstick.common.utils import result_handler
+from yardstick.common.utils import source_env
+from yardstick.common.utils import change_obj_to_dict
+from yardstick.common.openstack_utils import get_nova_client
+from yardstick.common import constants as consts
+
+LOG = logging.getLogger(__name__)
+LOG.setLevel(logging.DEBUG)
+
+
+class V2Images(ApiResource):
+
+    def get(self):
+        try:
+            source_env(consts.OPENRC)
+        except:
+            return result_handler(consts.API_ERROR, 'source openrc error')
+
+        nova_client = get_nova_client()
+        try:
+            images_list = nova_client.images.list()
+        except:
+            return result_handler(consts.API_ERROR, 'get images error')
+        else:
+            images = [self.get_info(change_obj_to_dict(i)) for i in images_list]
+            status = 1 if all(i['status'] == 'ACTIVE' for i in images) else 0
+
+        return result_handler(consts.API_SUCCESS, {'status': status, 'images': images})
+
+    def post(self):
+        return self._dispatch_post()
+
+    def get_info(self, data):
+        result = {
+            'name': data.get('name', ''),
+            'size': data.get('OS-EXT-IMG-SIZE:size', ''),
+            'status': data.get('status', ''),
+            'time': data.get('updated', '')
+        }
+        return result
+
+    def load_image(self, args):
+        thread = threading.Thread(target=self._load_images)
+        thread.start()
+        return result_handler(consts.API_SUCCESS, {})
+
+    def _load_images(self):
+        LOG.info('source openrc')
+        source_env(consts.OPENRC)
+
+        LOG.info('clean images')
+        cmd = [consts.CLEAN_IMAGES_SCRIPT]
+        p = subprocess.Popen(cmd, stdout=subprocess.PIPE,
+                             cwd=consts.REPOS_DIR)
+        _, err = p.communicate()
+        if p.returncode != 0:
+            LOG.error('clean image failed: %s', err)
+
+        LOG.info('load images')
+        cmd = [consts.LOAD_IMAGES_SCRIPT]
+        p = subprocess.Popen(cmd, stdout=subprocess.PIPE,
+                             cwd=consts.REPOS_DIR)
+        _, err = p.communicate()
+        if p.returncode != 0:
+            LOG.error('load image failed: %s', err)
+
+        LOG.info('Done')
diff --git a/api/resources/v2/openrcs.py b/api/resources/v2/openrcs.py
new file mode 100644 (file)
index 0000000..5f3b938
--- /dev/null
@@ -0,0 +1,211 @@
+import uuid
+import logging
+import re
+import os
+
+import yaml
+from oslo_serialization import jsonutils
+
+from api import ApiResource
+from api.database.v2.handlers import V2OpenrcHandler
+from api.database.v2.handlers import V2EnvironmentHandler
+from yardstick.common import constants as consts
+from yardstick.common.utils import result_handler
+from yardstick.common.utils import makedirs
+from yardstick.common.utils import source_env
+
+LOG = logging.getLogger(__name__)
+LOG.setLevel(logging.DEBUG)
+
+
+class V2Openrcs(ApiResource):
+
+    def post(self):
+        return self._dispatch_post()
+
+    def upload_openrc(self, args):
+        try:
+            upload_file = args['file']
+        except KeyError:
+            return result_handler(consts.API_ERROR, 'file must be provided')
+
+        try:
+            environment_id = args['environment_id']
+        except KeyError:
+            return result_handler(consts.API_ERROR, 'environment_id must be provided')
+
+        try:
+            uuid.UUID(environment_id)
+        except ValueError:
+            return result_handler(consts.API_ERROR, 'invalid environment id')
+
+        LOG.info('writing openrc: %s', consts.OPENRC)
+        makedirs(consts.CONF_DIR)
+        upload_file.save(consts.OPENRC)
+        source_env(consts.OPENRC)
+
+        LOG.info('parsing openrc')
+        try:
+            openrc_data = self._get_openrc_dict()
+        except Exception:
+            LOG.exception('parse openrc failed')
+            return result_handler(consts.API_ERROR, 'parse openrc failed')
+
+        openrc_id = str(uuid.uuid4())
+        self._write_into_database(environment_id, openrc_id, openrc_data)
+
+        LOG.info('writing ansible cloud conf')
+        try:
+            self._generate_ansible_conf_file(openrc_data)
+        except Exception:
+            LOG.exception('write cloud conf failed')
+            return result_handler(consts.API_ERROR, 'genarate ansible conf failed')
+        LOG.info('finish writing ansible cloud conf')
+
+        return result_handler(consts.API_SUCCESS, {'openrc': openrc_data, 'uuid': openrc_id})
+
+    def update_openrc(self, args):
+        try:
+            openrc_vars = args['openrc']
+        except KeyError:
+            return result_handler(consts.API_ERROR, 'openrc must be provided')
+
+        try:
+            environment_id = args['environment_id']
+        except KeyError:
+            return result_handler(consts.API_ERROR, 'environment_id must be provided')
+
+        try:
+            uuid.UUID(environment_id)
+        except ValueError:
+            return result_handler(consts.API_ERROR, 'invalid environment id')
+
+        LOG.info('writing openrc: %s', consts.OPENRC)
+        makedirs(consts.CONF_DIR)
+
+        lines = ['export {}={}\n'.format(k, v) for k, v in openrc_vars.items()]
+        LOG.debug('writing: %s', ''.join(lines))
+        with open(consts.OPENRC, 'w') as f:
+            f.writelines(lines)
+        LOG.info('writing openrc: Done')
+
+        LOG.info('source openrc: %s', consts.OPENRC)
+        try:
+            source_env(consts.OPENRC)
+        except Exception:
+            LOG.exception('source openrc failed')
+            return result_handler(consts.API_ERROR, 'source openrc failed')
+        LOG.info('source openrc: Done')
+
+        openrc_id = str(uuid.uuid4())
+        self._write_into_database(environment_id, openrc_id, openrc_vars)
+
+        LOG.info('writing ansible cloud conf')
+        try:
+            self._generate_ansible_conf_file(openrc_vars)
+        except Exception:
+            LOG.exception('write cloud conf failed')
+            return result_handler(consts.API_ERROR, 'genarate ansible conf failed')
+        LOG.info('finish writing ansible cloud conf')
+
+        return result_handler(consts.API_SUCCESS, {'openrc': openrc_vars, 'uuid': openrc_id})
+
+    def _write_into_database(self, environment_id, openrc_id, openrc_data):
+        LOG.info('writing openrc to database')
+        openrc_handler = V2OpenrcHandler()
+        openrc_init_data = {
+            'uuid': openrc_id,
+            'environment_id': environment_id,
+            'content': jsonutils.dumps(openrc_data)
+        }
+        openrc_handler.insert(openrc_init_data)
+
+        LOG.info('binding openrc to environment: %s', environment_id)
+        environment_handler = V2EnvironmentHandler()
+        environment_handler.update_attr(environment_id, {'openrc_id': openrc_id})
+
+    def _get_openrc_dict(self):
+        with open(consts.OPENRC) as f:
+            content = f.readlines()
+
+        result = {}
+        for line in content:
+            m = re.search(r'(\ .*)=(.*)', line)
+            if m:
+                try:
+                    value = os.environ[m.group(1).strip()]
+                except KeyError:
+                    pass
+                else:
+                    result.update({m.group(1).strip(): value})
+
+        return result
+
+    def _generate_ansible_conf_file(self, openrc_data):
+        ansible_conf = {
+            'clouds': {
+                'opnfv': {
+                    'auth': {
+                    }
+                }
+            }
+        }
+        black_list = ['OS_IDENTITY_API_VERSION', 'OS_IMAGE_API_VERSION']
+
+        for k, v in openrc_data.items():
+            if k.startswith('OS') and k not in black_list:
+                key = k[3:].lower()
+                ansible_conf['clouds']['opnfv']['auth'][key] = v
+
+        try:
+            value = openrc_data['OS_IDENTITY_API_VERSION']
+        except KeyError:
+            pass
+        else:
+            ansible_conf['clouds']['opnfv']['identity_api_version'] = value
+
+        makedirs(consts.OPENSTACK_CONF_DIR)
+        with open(consts.CLOUDS_CONF, 'w') as f:
+            yaml.dump(ansible_conf, f, default_flow_style=False)
+
+
+class V2Openrc(ApiResource):
+
+    def get(self, openrc_id):
+        try:
+            uuid.UUID(openrc_id)
+        except ValueError:
+            return result_handler(consts.API_ERROR, 'invalid openrc id')
+
+        LOG.info('Geting openrc: %s', openrc_id)
+        openrc_handler = V2OpenrcHandler()
+        try:
+            openrc = openrc_handler.get_by_uuid(openrc_id)
+        except ValueError:
+            return result_handler(consts.API_ERROR, 'no such openrc id')
+
+        LOG.info('load openrc content')
+        content = jsonutils.loads(openrc.content)
+
+        return result_handler(consts.API_ERROR, {'openrc': content})
+
+    def delete(self, openrc_id):
+        try:
+            uuid.UUID(openrc_id)
+        except ValueError:
+            return result_handler(consts.API_ERROR, 'invalid openrc id')
+
+        LOG.info('Geting openrc: %s', openrc_id)
+        openrc_handler = V2OpenrcHandler()
+        try:
+            openrc = openrc_handler.get_by_uuid(openrc_id)
+        except ValueError:
+            return result_handler(consts.API_ERROR, 'no such openrc id')
+
+        LOG.info('update openrc in environment')
+        environment_handler = V2EnvironmentHandler()
+        environment_handler.update_attr(openrc.environment_id, {'openrc_id': None})
+
+        openrc_handler.delete_by_uuid(openrc_id)
+
+        return result_handler(consts.API_SUCCESS, {'openrc': openrc_id})
diff --git a/api/resources/v2/pods.py b/api/resources/v2/pods.py
new file mode 100644 (file)
index 0000000..ebc1312
--- /dev/null
@@ -0,0 +1,100 @@
+import uuid
+import yaml
+import logging
+
+from oslo_serialization import jsonutils
+
+from api import ApiResource
+from api.database.v2.handlers import V2PodHandler
+from api.database.v2.handlers import V2EnvironmentHandler
+from yardstick.common import constants as consts
+from yardstick.common.utils import result_handler
+from yardstick.common.task_template import TaskTemplate
+
+LOG = logging.getLogger(__name__)
+LOG.setLevel(logging.DEBUG)
+
+
+class V2Pods(ApiResource):
+
+    def post(self):
+        return self._dispatch_post()
+
+    def upload_pod_file(self, args):
+        try:
+            upload_file = args['file']
+        except KeyError:
+            return result_handler(consts.API_ERROR, 'file must be provided')
+
+        try:
+            environment_id = args['environment_id']
+        except KeyError:
+            return result_handler(consts.API_ERROR, 'environment_id must be provided')
+
+        try:
+            uuid.UUID(environment_id)
+        except ValueError:
+            return result_handler(consts.API_ERROR, 'invalid environment id')
+
+        LOG.info('writing pod file: %s', consts.POD_FILE)
+        upload_file.save(consts.POD_FILE)
+
+        with open(consts.POD_FILE) as f:
+            data = yaml.safe_load(TaskTemplate.render(f.read()))
+        LOG.debug('pod content is: %s', data)
+
+        LOG.info('create pod in database')
+        pod_id = str(uuid.uuid4())
+        pod_handler = V2PodHandler()
+        pod_init_data = {
+            'uuid': pod_id,
+            'environment_id': environment_id,
+            'content': jsonutils.dumps(data)
+        }
+        pod_handler.insert(pod_init_data)
+
+        LOG.info('update pod in environment')
+        environment_handler = V2EnvironmentHandler()
+        environment_handler.update_attr(environment_id, {'pod_id': pod_id})
+
+        return result_handler(consts.API_SUCCESS, {'uuid': pod_id, 'pod': data})
+
+
+class V2Pod(ApiResource):
+
+    def get(self, pod_id):
+        try:
+            uuid.UUID(pod_id)
+        except ValueError:
+            return result_handler(consts.API_ERROR, 'invalid pod id')
+
+        pod_handler = V2PodHandler()
+        try:
+            pod = pod_handler.get_by_uuid(pod_id)
+        except ValueError:
+            return result_handler(consts.API_ERROR, 'no such pod')
+
+        content = jsonutils.loads(pod.content)
+
+        return result_handler(consts.API_SUCCESS, {'pod': content})
+
+    def delete(self, pod_id):
+        try:
+            uuid.UUID(pod_id)
+        except ValueError:
+            return result_handler(consts.API_ERROR, 'invalid pod id')
+
+        pod_handler = V2PodHandler()
+        try:
+            pod = pod_handler.get_by_uuid(pod_id)
+        except ValueError:
+            return result_handler(consts.API_ERROR, 'no such pod')
+
+        LOG.info('update pod in environment')
+        environment_handler = V2EnvironmentHandler()
+        environment_handler.update_attr(pod.environment_id, {'pod_id': None})
+
+        LOG.info('delete pod in database')
+        pod_handler.delete_by_uuid(pod_id)
+
+        return result_handler(consts.API_SUCCESS, {'pod': pod_id})
diff --git a/api/resources/v2/projects.py b/api/resources/v2/projects.py
new file mode 100644 (file)
index 0000000..376cf1a
--- /dev/null
@@ -0,0 +1,97 @@
+import uuid
+import logging
+
+from datetime import datetime
+
+from api import ApiResource
+from api.database.v2.handlers import V2ProjectHandler
+from api.database.v2.handlers import V2TaskHandler
+from yardstick.common.utils import result_handler
+from yardstick.common.utils import change_obj_to_dict
+from yardstick.common import constants as consts
+
+LOG = logging.getLogger(__name__)
+LOG.setLevel(logging.DEBUG)
+
+
+class V2Projects(ApiResource):
+
+    def get(self):
+        project_handler = V2ProjectHandler()
+        projects = [change_obj_to_dict(p) for p in project_handler.list_all()]
+
+        for p in projects:
+            tasks = p['tasks']
+            p['tasks'] = tasks.split(',') if tasks else []
+
+        return result_handler(consts.API_SUCCESS, {'projects': projects})
+
+    def post(self):
+        return self._dispatch_post()
+
+    def create_project(self, args):
+        try:
+            name = args['name']
+        except KeyError:
+            return result_handler(consts.API_ERROR, 'name must be provided')
+
+        project_id = str(uuid.uuid4())
+        create_time = datetime.now()
+        project_handler = V2ProjectHandler()
+
+        project_init_data = {
+            'uuid': project_id,
+            'name': name,
+            'time': create_time
+        }
+        project_handler.insert(project_init_data)
+
+        return result_handler(consts.API_SUCCESS, {'uuid': project_id})
+
+
+class V2Project(ApiResource):
+
+    def get(self, project_id):
+        try:
+            uuid.UUID(project_id)
+        except ValueError:
+            return result_handler(consts.API_ERROR, 'invalid project id')
+
+        project_handler = V2ProjectHandler()
+        try:
+            project = project_handler.get_by_uuid(project_id)
+        except ValueError:
+            return result_handler(consts.API_ERROR, 'no such project id')
+
+        project_info = change_obj_to_dict(project)
+        tasks = project_info['tasks']
+        project_info['tasks'] = tasks.split(',') if tasks else []
+
+        return result_handler(consts.API_SUCCESS, {'project': project_info})
+
+    def delete(self, project_id):
+        try:
+            uuid.UUID(project_id)
+        except ValueError:
+            return result_handler(consts.API_ERROR, 'invalid project id')
+
+        project_handler = V2ProjectHandler()
+        try:
+            project = project_handler.get_by_uuid(project_id)
+        except ValueError:
+            return result_handler(consts.API_ERROR, 'no such project id')
+
+        if project.tasks:
+            LOG.info('delete related task')
+            task_handler = V2TaskHandler()
+            for task_id in project.tasks.split(','):
+                LOG.debug('delete task: %s', task_id)
+                try:
+                    task_handler.delete_by_uuid(task_id)
+                except ValueError:
+                    LOG.exception('no such task id: %s', task_id)
+
+        LOG.info('delete project in database')
+        project_handler.delete_by_uuid(project_id)
+
+        return result_handler(consts.API_SUCCESS, {'project': project_id})
diff --git a/api/resources/v2/tasks.py b/api/resources/v2/tasks.py
new file mode 100644 (file)
index 0000000..9790d76
--- /dev/null
@@ -0,0 +1,245 @@
+import uuid
+import logging
+from datetime import datetime
+
+from oslo_serialization import jsonutils
+
+from api import ApiResource
+from api.database.v2.handlers import V2TaskHandler
+from api.database.v2.handlers import V2ProjectHandler
+from api.database.v2.handlers import V2EnvironmentHandler
+from api.utils.thread import TaskThread
+from yardstick.common.utils import result_handler
+from yardstick.common.utils import change_obj_to_dict
+from yardstick.common import constants as consts
+from yardstick.benchmark.core.task import Task
+from yardstick.benchmark.core import Param
+
+LOG = logging.getLogger(__name__)
+LOG.setLevel(logging.DEBUG)
+
+
+class V2Tasks(ApiResource):
+
+    def get(self):
+        task_handler = V2TaskHandler()
+        tasks = [change_obj_to_dict(t) for t in task_handler.list_all()]
+
+        for t in tasks:
+            result = t['result']
+            t['result'] = jsonutils.loads(result) if result else None
+
+        return result_handler(consts.API_SUCCESS, {'tasks': tasks})
+
+    def post(self):
+        return self._dispatch_post()
+
+    def create_task(self, args):
+        try:
+            name = args['name']
+        except KeyError:
+            return result_handler(consts.API_ERROR, 'name must be provided')
+
+        try:
+            project_id = args['project_id']
+        except KeyError:
+            return result_handler(consts.API_ERROR, 'project_id must be provided')
+
+        task_id = str(uuid.uuid4())
+        create_time = datetime.now()
+        task_handler = V2TaskHandler()
+
+        LOG.info('create task in database')
+        task_init_data = {
+            'uuid': task_id,
+            'project_id': project_id,
+            'name': name,
+            'time': create_time,
+            'status': -1
+        }
+        task_handler.insert(task_init_data)
+
+        LOG.info('create task in project')
+        project_handler = V2ProjectHandler()
+        project_handler.append_attr(project_id, {'tasks': task_id})
+
+        return result_handler(consts.API_SUCCESS, {'uuid': task_id})
+
+
+class V2Task(ApiResource):
+
+    def get(self, task_id):
+        try:
+            uuid.UUID(task_id)
+        except ValueError:
+            return result_handler(consts.API_ERROR, 'invalid task id')
+
+        task_handler = V2TaskHandler()
+        try:
+            task = task_handler.get_by_uuid(task_id)
+        except ValueError:
+            return result_handler(consts.API_ERROR, 'no such task id')
+
+        task_info = change_obj_to_dict(task)
+        result = task_info['result']
+        task_info['result'] = jsonutils.loads(result) if result else None
+
+        return result_handler(consts.API_SUCCESS, {'task': task_info})
+
+    def delete(self, task_id):
+        try:
+            uuid.UUID(task_id)
+        except ValueError:
+            return result_handler(consts.API_ERROR, 'invalid task id')
+
+        task_handler = V2TaskHandler()
+        try:
+            project_id = task_handler.get_by_uuid(task_id).project_id
+        except ValueError:
+            return result_handler(consts.API_ERROR, 'no such task id')
+
+        LOG.info('delete task in database')
+        task_handler.delete_by_uuid(task_id)
+
+        project_handler = V2ProjectHandler()
+        project = project_handler.get_by_uuid(project_id)
+
+        if project.tasks:
+            LOG.info('update tasks in project')
+            new_task_list = project.tasks.split(',').remove(task_id)
+            if new_task_list:
+                new_tasks = ','.join(new_task_list)
+            else:
+                new_tasks = None
+            project_handler.update_attr(project_id, {'tasks': new_tasks})
+
+        return result_handler(consts.API_SUCCESS, {'task': task_id})
+
+    def put(self, task_id):
+
+        try:
+            uuid.UUID(task_id)
+        except ValueError:
+            return result_handler(consts.API_ERROR, 'invalid task id')
+
+        task_handler = V2TaskHandler()
+        try:
+            task_handler.get_by_uuid(task_id)
+        except ValueError:
+            return result_handler(consts.API_ERROR, 'no such task id')
+
+        return self._dispatch_post(task_id=task_id)
+
+    def add_environment(self, args):
+
+        task_id = args['task_id']
+        try:
+            environment_id = args['environment_id']
+        except KeyError:
+            return result_handler(consts.API_ERROR, 'environment_id must be provided')
+
+        try:
+            uuid.UUID(environment_id)
+        except ValueError:
+            return result_handler(consts.API_ERROR, 'invalid environment id')
+
+        environment_handler = V2EnvironmentHandler()
+        try:
+            environment_handler.get_by_uuid(environment_id)
+        except ValueError:
+            return result_handler(consts.API_ERROR, 'no such environment id')
+
+        LOG.info('update environment_id in task')
+        task_handler = V2TaskHandler()
+        task_handler.update_attr(task_id, {'environment_id': environment_id})
+
+        return result_handler(consts.API_SUCCESS, {'uuid': task_id})
+
+    def add_case(self, args):
+        task_id = args['task_id']
+        try:
+            name = args['case_name']
+        except KeyError:
+            return result_handler(consts.API_ERROR, 'case_name must be provided')
+
+        try:
+            content = args['case_content']
+        except KeyError:
+            return result_handler(consts.API_ERROR, 'case_content must be provided')
+
+        LOG.info('update case info in task')
+        task_handler = V2TaskHandler()
+        task_update_data = {
+            'case_name': name,
+            'content': content,
+            'suite': False
+        }
+        task_handler.update_attr(task_id, task_update_data)
+
+        return result_handler(consts.API_SUCCESS, {'uuid': task_id})
+
+    def add_suite(self, args):
+        task_id = args['task_id']
+        try:
+            name = args['suite_name']
+        except KeyError:
+            return result_handler(consts.API_ERROR, 'suite_name must be provided')
+
+        try:
+            content = args['suite_content']
+        except KeyError:
+            return result_handler(consts.API_ERROR, 'suite_content must be provided')
+
+        LOG.info('update suite info in task')
+        task_handler = V2TaskHandler()
+        task_update_data = {
+            'case_name': name,
+            'content': content,
+            'suite': True
+        }
+        task_handler.update_attr(task_id, task_update_data)
+
+        return result_handler(consts.API_SUCCESS, {'uuid': task_id})
+
+    def run(self, args):
+        try:
+            task_id = args['task_id']
+        except KeyError:
+            return result_handler(consts.API_ERROR, 'task_id must be provided')
+
+        try:
+            uuid.UUID(task_id)
+        except ValueError:
+            return result_handler(consts.API_ERROR, 'invalid task id')
+
+        task_handler = V2TaskHandler()
+        try:
+            task = task_handler.get_by_uuid(task_id)
+        except ValueError:
+            return result_handler(consts.API_ERROR, 'no such task id')
+
+        if not task.environment_id:
+            return result_handler(consts.API_ERROR, 'environment not set')
+
+        if not task.case_name or not task.content:
+            return result_handler(consts.API_ERROR, 'case not set')
+
+        if task.status == 0:
+            return result_handler(consts.API_ERROR, 'task is already running')
+
+        with open('/tmp/{}.yaml'.format(task.case_name), 'w') as f:
+            f.write(task.content)
+
+        data = {
+            'inputfile': ['/tmp/{}.yaml'.format(task.case_name)],
+            'task_id': task_id
+        }
+        if task.suite:
+            data.update({'suite': True})
+
+        LOG.info('start task thread')
+        param = Param(data)
+        task_thread = TaskThread(Task().start, param, task_handler)
+        task_thread.start()
+
+        return result_handler(consts.API_SUCCESS, {'uuid': task_id})
diff --git a/api/resources/v2/testcases.py b/api/resources/v2/testcases.py
new file mode 100644 (file)
index 0000000..81b4aa8
--- /dev/null
@@ -0,0 +1,62 @@
+import logging
+import errno
+import os
+
+from api import ApiResource
+from yardstick.common.utils import result_handler
+from yardstick.common import constants as consts
+from yardstick.benchmark.core import Param
+from yardstick.benchmark.core.testcase import Testcase
+
+LOG = logging.getLogger(__name__)
+LOG.setLevel(logging.DEBUG)
+
+
+class V2Testcases(ApiResource):
+
+    def get(self):
+        param = Param({})
+        testcase_list = Testcase().list_all(param)
+        return result_handler(consts.API_SUCCESS, testcase_list)
+
+    def post(self):
+        return self._dispatch_post()
+
+    def upload_case(self, args):
+        try:
+            upload_file = args['file']
+        except KeyError:
+            return result_handler(consts.API_ERROR, 'file must be provided')
+
+        case_name = os.path.join(consts.TESTCASE_DIR, upload_file.filename)
+
+        LOG.info('save case file')
+        upload_file.save(case_name)
+
+        return result_handler(consts.API_SUCCESS, {'testcase': upload_file.filename})
+
+
+class V2Testcase(ApiResource):
+
+    def get(self, case_name):
+        case_path = os.path.join(consts.TESTCASE_DIR, '{}.yaml'.format(case_name))
+
+        try:
+            with open(case_path) as f:
+                data = f.read()
+        except IOError as e:
+            if e.errno == errno.ENOENT:
+                return result_handler(consts.API_ERROR, 'case does not exist')
+
+        return result_handler(consts.API_SUCCESS, {'testcase': data})
+
+    def delete(self, case_name):
+        case_path = os.path.join(consts.TESTCASE_DIR, '{}.yaml'.format(case_name))
+
+        try:
+            os.remove(case_path)
+        except IOError as e:
+            if e.errno == errno.ENOENT:
+                return result_handler(consts.API_ERROR, 'case does not exist')
+
+        return result_handler(consts.API_SUCCESS, {'testcase': case_name})
diff --git a/api/resources/v2/testsuites.py b/api/resources/v2/testsuites.py
new file mode 100644 (file)
index 0000000..ee942ef
--- /dev/null
@@ -0,0 +1,81 @@
+import os
+import errno
+import logging
+
+import yaml
+
+from api import ApiResource
+from yardstick.common.utils import result_handler
+from yardstick.common import constants as consts
+from yardstick.benchmark.core.testsuite import Testsuite
+from yardstick.benchmark.core import Param
+
+LOG = logging.getLogger(__name__)
+LOG.setLevel(logging.DEBUG)
+
+
+class V2Testsuites(ApiResource):
+
+    def get(self):
+        param = Param({})
+        testsuite_list = Testsuite().list_all(param)
+
+        data = {
+            'testsuites': testsuite_list
+        }
+
+        return result_handler(consts.API_SUCCESS, data)
+
+    def post(self):
+        return self._dispatch_post()
+
+    def create_suite(self, args):
+        try:
+            suite_name = args['name']
+        except KeyError:
+            return result_handler(consts.API_ERROR, 'name must be provided')
+
+        try:
+            testcases = args['testcases']
+        except KeyError:
+            return result_handler(consts.API_ERROR, 'testcases must be provided')
+
+        testcases = [{'file_name': '{}.yaml'.format(t)} for t in testcases]
+
+        suite = os.path.join(consts.TESTSUITE_DIR, '{}.yaml'.format(suite_name))
+        suite_content = {
+            'schema': 'yardstick:suite:0.1',
+            'name': suite_name,
+            'test_cases_dir': 'tests/opnfv/test_cases/',
+            'test_cases': testcases
+        }
+
+        LOG.info('write test suite')
+        with open(suite, 'w') as f:
+            yaml.dump(suite_content, f, default_flow_style=False)
+
+        return result_handler(consts.API_SUCCESS, {'suite': suite_name})
+
+
+class V2Testsuite(ApiResource):
+
+    def get(self, suite_name):
+        suite_path = os.path.join(consts.TESTSUITE_DIR, '{}.yaml'.format(suite_name))
+        try:
+            with open(suite_path) as f:
+                data = f.read()
+        except IOError as e:
+            if e.errno == errno.ENOENT:
+                return result_handler(consts.API_ERROR, 'suite does not exist')
+
+        return result_handler(consts.API_SUCCESS, {'testsuite': data})
+
+    def delete(self, suite_name):
+        suite_path = os.path.join(consts.TESTSUITE_DIR, '{}.yaml'.format(suite_name))
+        try:
+            os.remove(suite_path)
+        except IOError as e:
+            if e.errno == errno.ENOENT:
+                return result_handler(consts.API_ERROR, 'suite does not exist')
+
+        return result_handler(consts.API_SUCCESS, {'testsuite': suite_name})
index d39c445..158b8a5 100644 (file)
@@ -10,6 +10,7 @@ from __future__ import absolute_import
 
 import inspect
 import logging
+import socket
 from six.moves import filter
 
 from flasgger import Swagger
@@ -21,9 +22,17 @@ from api.database import db_session
 from api.database import engine
 from api.database.v1 import models
 from api.urls import urlpatterns
+from api import ApiResource
 from yardstick import _init_logging
+from yardstick.common import utils
+from yardstick.common import constants as consts
 
-logger = logging.getLogger(__name__)
+try:
+    from urlparse import urljoin
+except ImportError:
+    from urllib.parse import urljoin
+
+LOG = logging.getLogger(__name__)
 
 app = Flask(__name__)
 
@@ -37,8 +46,10 @@ def shutdown_session(exception=None):
     db_session.remove()
 
 
-for u in urlpatterns:
-    api.add_resource(u.resource, u.url, endpoint=u.endpoint)
+def get_resource(resource_name):
+    name = ''.join(resource_name.split('_'))
+    return next((r for r in utils.itersubclasses(ApiResource)
+                 if r.__name__.lower() == name))
 
 
 def init_db():
@@ -51,7 +62,7 @@ def init_db():
         return False
 
     subclses = filter(func, inspect.getmembers(models, inspect.isclass))
-    logger.debug('Import models: %s', [a[1] for a in subclses])
+    LOG.debug('Import models: %s', [a[1] for a in subclses])
     Base.metadata.create_all(bind=engine)
 
 
@@ -60,9 +71,21 @@ def app_wrapper(*args, **kwargs):
     return app(*args, **kwargs)
 
 
+def get_endpoint(url):
+    ip = socket.gethostbyname(socket.gethostname())
+    return urljoin('http://{}:{}'.format(ip, consts.API_PORT), url)
+
+
+for u in urlpatterns:
+    try:
+        api.add_resource(get_resource(u.target), u.url, endpoint=get_endpoint(u.url))
+    except StopIteration:
+        LOG.error('url resource not found: %s', u.url)
+
+
 if __name__ == '__main__':
     _init_logging()
-    logger.setLevel(logging.DEBUG)
-    logger.info('Starting server')
+    LOG.setLevel(logging.DEBUG)
+    LOG.info('Starting server')
     init_db()
     app.run(host='0.0.0.0')
index 13c6c76..2211348 100644 (file)
@@ -8,17 +8,49 @@
 ##############################################################################
 from __future__ import absolute_import
 
-from api import views
-from api.utils.common import Url
+from api import Url
 
 
 urlpatterns = [
-    Url('/yardstick/asynctask', views.Asynctask, 'asynctask'),
-    Url('/yardstick/testcases', views.Testcases, 'testcases'),
-    Url('/yardstick/testcases/release/action', views.ReleaseAction, 'release'),
-    Url('/yardstick/testcases/samples/action', views.SamplesAction, 'samples'),
-    Url('/yardstick/testcases/<case_name>/docs', views.CaseDocs, 'casedocs'),
-    Url('/yardstick/testsuites/action', views.TestsuitesAction, 'testsuites'),
-    Url('/yardstick/results', views.Results, 'results'),
-    Url('/yardstick/env/action', views.EnvAction, 'env')
+    Url('/yardstick/asynctask', 'v1_async_task'),
+    Url('/yardstick/testcases', 'v1_test_case'),
+    Url('/yardstick/testcases/release/action', 'v1_release_case'),
+    Url('/yardstick/testcases/samples/action', 'v1_sample_case'),
+    Url('/yardstick/testcases/<case_name>/docs', 'v1_case_docs'),
+    Url('/yardstick/testsuites/action', 'v1_test_suite'),
+    Url('/yardstick/results', 'v1_result'),
+    Url('/yardstick/env/action', 'v1_env'),
+
+    # api v2
+    Url('/api/v2/yardstick/environments', 'v2_environments'),
+    Url('/api/v2/yardstick/environments/action', 'v2_environments'),
+    Url('/api/v2/yardstick/environments/<environment_id>', 'v2_environment'),
+
+    Url('/api/v2/yardstick/openrcs/action', 'v2_openrcs'),
+    Url('/api/v2/yardstick/openrcs/<openrc_id>', 'v2_openrc'),
+
+    Url('/api/v2/yardstick/pods/action', 'v2_pods'),
+    Url('/api/v2/yardstick/pods/<pod_id>', 'v2_pod'),
+
+    Url('/api/v2/yardstick/images', 'v2_images'),
+    Url('/api/v2/yardstick/images/action', 'v2_images'),
+
+    Url('/api/v2/yardstick/containers/action', 'v2_containers'),
+    Url('/api/v2/yardstick/containers/<container_id>', 'v2_container'),
+
+    Url('/api/v2/yardstick/projects', 'v2_projects'),
+    Url('/api/v2/yardstick/projects/action', 'v2_projects'),
+    Url('/api/v2/yardstick/projects/<project_id>', 'v2_project'),
+
+    Url('/api/v2/yardstick/tasks', 'v2_tasks'),
+    Url('/api/v2/yardstick/tasks/action', 'v2_tasks'),
+    Url('/api/v2/yardstick/tasks/<task_id>', 'v2_task'),
+
+    Url('/api/v2/yardstick/testcases', 'v2_testcases'),
+    Url('/api/v2/yardstick/testcases/action', 'v2_testcases'),
+    Url('/api/v2/yardstick/testcases/<case_name>', 'v2_testcase'),
+
+    Url('/api/v2/yardstick/testsuites', 'v2_testsuites'),
+    Url('/api/v2/yardstick/testsuites/action', 'v2_testsuites'),
+    Url('/api/v2/yardstick/testsuites/<suite_name>', 'v2_testsuite')
 ]
diff --git a/api/utils/common.py b/api/utils/common.py
deleted file mode 100644 (file)
index eda9c17..0000000
+++ /dev/null
@@ -1,44 +0,0 @@
-##############################################################################
-# Copyright (c) 2016 Huawei Technologies Co.,Ltd and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
-from __future__ import absolute_import
-import collections
-import logging
-
-from flask import jsonify
-import six
-
-LOG = logging.getLogger(__name__)
-LOG.setLevel(logging.DEBUG)
-
-
-def translate_to_str(obj):
-    if isinstance(obj, collections.Mapping):
-        return {str(k): translate_to_str(v) for k, v in obj.items()}
-    elif isinstance(obj, list):
-        return [translate_to_str(ele) for ele in obj]
-    elif isinstance(obj, six.text_type):
-        return str(obj)
-    return obj
-
-
-def result_handler(status, data):
-    result = {
-        'status': status,
-        'result': data
-    }
-    return jsonify(result)
-
-
-class Url(object):
-
-    def __init__(self, url, resource, endpoint):
-        super(Url, self).__init__()
-        self.url = url
-        self.resource = resource
-        self.endpoint = endpoint
index 2106548..5f4ec7e 100644 (file)
@@ -1,37 +1,45 @@
 import threading
+import os
 import logging
 
 from oslo_serialization import jsonutils
 
-from api.database.v1.handlers import TasksHandler
 from yardstick.common import constants as consts
 
-logger = logging.getLogger(__name__)
-logger.setLevel(logging.DEBUG)
+LOG = logging.getLogger(__name__)
+LOG.setLevel(logging.DEBUG)
 
 
 class TaskThread(threading.Thread):
 
-    def __init__(self, target, args):
+    def __init__(self, target, args, handler):
         super(TaskThread, self).__init__(target=target, args=args)
         self.target = target
         self.args = args
+        self.handler = handler
 
     def run(self):
-        task_handler = TasksHandler()
-        data = {'task_id': self.args.task_id, 'status': consts.TASK_NOT_DONE}
-        task_handler.insert(data)
+        if self.handler.__class__.__name__.lower().startswith('v2'):
+            self.handler.update_attr(self.args.task_id, {'status': consts.TASK_NOT_DONE})
+        else:
+            update_data = {'task_id': self.args.task_id, 'status': consts.TASK_NOT_DONE}
+            self.handler.insert(update_data)
 
-        logger.info('Starting run task')
+        LOG.info('Starting run task')
         try:
             data = self.target(self.args)
         except Exception as e:
-            logger.exception('Task Failed')
+            LOG.exception('Task Failed')
             update_data = {'status': consts.TASK_FAILED, 'error': str(e)}
-            task_handler.update_attr(self.args.task_id, update_data)
+            self.handler.update_attr(self.args.task_id, update_data)
         else:
-            logger.info('Task Finished')
-            logger.debug('Result: %s', data)
-
-            data['result'] = jsonutils.dumps(data.get('result', {}))
-            task_handler.update_attr(self.args.task_id, data)
+            LOG.info('Task Finished')
+            LOG.debug('Result: %s', data)
+
+            if self.handler.__class__.__name__.lower().startswith('v2'):
+                new_data = {'status': consts.TASK_DONE, 'result': jsonutils.dumps(data['result'])}
+                self.handler.update_attr(self.args.task_id, new_data)
+                os.remove(self.args.inputfile[0])
+            else:
+                data['result'] = jsonutils.dumps(data.get('result', {}))
+                self.handler.update_attr(self.args.task_id, data)
diff --git a/api/views.py b/api/views.py
deleted file mode 100644 (file)
index 9c9ca4e..0000000
+++ /dev/null
@@ -1,82 +0,0 @@
-##############################################################################
-# Copyright (c) 2016 Huawei Technologies Co.,Ltd and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
-from __future__ import absolute_import
-import logging
-import os
-
-from flasgger.utils import swag_from
-
-from api.base import ApiResource
-from api.swagger import models
-
-logger = logging.getLogger(__name__)
-logger.setLevel(logging.DEBUG)
-
-
-TestCaseActionModel = models.TestCaseActionModel
-TestCaseActionArgsModel = models.TestCaseActionArgsModel
-TestCaseActionArgsOptsModel = models.TestCaseActionArgsOptsModel
-TestCaseActionArgsOptsTaskArgModel = models.TestCaseActionArgsOptsTaskArgModel
-
-
-class Asynctask(ApiResource):
-    def get(self):
-        return self._dispatch_get()
-
-
-class Testcases(ApiResource):
-    def get(self):
-        return self._dispatch_get()
-
-
-class ReleaseAction(ApiResource):
-    @swag_from(os.getcwd() + '/swagger/docs/release_action.yaml')
-    def post(self):
-        return self._dispatch_post()
-
-
-class SamplesAction(ApiResource):
-
-    def post(self):
-        return self._dispatch_post()
-
-
-TestSuiteActionModel = models.TestSuiteActionModel
-TestSuiteActionArgsModel = models.TestSuiteActionArgsModel
-TestSuiteActionArgsOptsModel = models.TestSuiteActionArgsOptsModel
-TestSuiteActionArgsOptsTaskArgModel = \
-    models.TestSuiteActionArgsOptsTaskArgModel
-
-
-class TestsuitesAction(ApiResource):
-    @swag_from(os.getcwd() + '/swagger/docs/testsuites_action.yaml')
-    def post(self):
-        return self._dispatch_post()
-
-
-ResultModel = models.ResultModel
-
-
-class Results(ApiResource):
-
-    @swag_from(os.getcwd() + '/swagger/docs/results.yaml')
-    def get(self):
-        return self._dispatch_get()
-
-
-class EnvAction(ApiResource):
-
-    def post(self):
-        return self._dispatch_post()
-
-
-class CaseDocs(ApiResource):
-
-    def get(self, case_name):
-        return self._dispatch_get(case_name=case_name)
index d89f9ed..6d55ada 100644 (file)
@@ -38,7 +38,11 @@ Version History
 | *Date*         | *Version*          | *Comment*                       |
 |                |                    |                                 |
 +----------------+--------------------+---------------------------------+
-|                |  3.0               | Yardstick for Danube release    |
+|                |  3.1               | Yardstick for Danube release    |
+|                |                    |                                 |
+|                |                    | Note: The 3.1 tag is due to git |
+|                |                    | tag issue during Danube 3.0     |
+|                |                    | release                         |
 |                |                    |                                 |
 +----------------+--------------------+---------------------------------+
 | May 4th, 2017  |  2.0               | Yardstick for Danube release    |
@@ -139,19 +143,19 @@ Release Data
 | **Project**                          | Yardstick                            |
 |                                      |                                      |
 +--------------------------------------+--------------------------------------+
-| **Repo/tag**                         | yardstick/Danube.2.0                 |
+| **Repo/tag**                         | yardstick/Danube.3.1                 |
 |                                      |                                      |
 +--------------------------------------+--------------------------------------+
-| **Yardstick Docker image tag**       | Danube.2.0                           |
+| **Yardstick Docker image tag**       | Danube.3.1                           |
 |                                      |                                      |
 +--------------------------------------+--------------------------------------+
 | **Release designation**              | Danube                               |
 |                                      |                                      |
 +--------------------------------------+--------------------------------------+
-| **Release date**                     | May 4th, 2017                        |
+| **Release date**                     | July 14th, 2017                      |
 |                                      |                                      |
 +--------------------------------------+--------------------------------------+
-| **Purpose of the delivery**          | OPNFV Danube release 2.0             |
+| **Purpose of the delivery**          | OPNFV Danube release 3.0             |
 |                                      |                                      |
 +--------------------------------------+--------------------------------------+
 
@@ -171,7 +175,7 @@ Software Deliverables
 ---------------------
 
 
- - The Yardstick Docker image: https://hub.docker.com/r/opnfv/yardstick (tag: danube.2.0)
+ - The Yardstick Docker image: https://hub.docker.com/r/opnfv/yardstick (tag: danube.3.1)
 
 
 **Contexts**
@@ -515,7 +519,7 @@ Feature additions
 Scenario Matrix
 ===============
 
-For Danube 2.0, Yardstick was tested on the following scenarios:
+For Danube 3.0, Yardstick was tested on the following scenarios:
 
 +-------------------------+---------+---------+---------+---------+
 |         Scenario        |  Apex   | Compass |  Fuel   |   Joid  |
@@ -613,10 +617,50 @@ Known Issues/Faults
 Corrected Faults
 ----------------
 
+Danube.3.1:
+
++----------------------------+------------------------------------------------+
+| **JIRA REFERENCE**         | **DESCRIPTION**                                |
+|                            |                                                |
++----------------------------+------------------------------------------------+
+| JIRA: YARDSTICK-714        | Add yardstick env influxdb/grafana command for |
+|                            | CentOS                                         |
++----------------------------+------------------------------------------------+
+| JIRA: YARDSTICK-655        | Monitor command in tc019 may not show the      |
+|                            | real nova-api service status                   |
++----------------------------+------------------------------------------------+
+| JIRA: YARDSTICK-397        | HA testing framework improvement               |
+|                            |                                                |
++----------------------------+------------------------------------------------+
+| JIRA: YARDSTICK-660        | Improve monitor_process pass criteria          |
+|                            |                                                |
++----------------------------+------------------------------------------------+
+| JIRA: YARDSTICK-657        | HA monitor_multi bug,                          |
+|                            | KeyError: 'max_outage_time'                    |
++----------------------------+------------------------------------------------+
+| JIRA: YARDSTICK-647        | TC025 fault_type value is wrong when using     |
+|                            | baremetal pod scripts                          |
++----------------------------+------------------------------------------------+
+| JIRA: YARDSTICK-659        | Terminate openstack service process using kill |
+|                            | command in HA test cases                       |
++----------------------------+------------------------------------------------+
+| JIRA: ARMBAND-275          | Yardstick TC005 fails with                     |
+|                            | "Cannot map zero-fill pages" error             |
++----------------------------+------------------------------------------------+
+| JIRA: YARDSTICK-561        | Bugfix: AttributeError: 'dict' object has no   |
+|                            | attribute 'split' if run sample/ping-hot.yaml  |
++----------------------------+------------------------------------------------+
+| JIRA: ARMBAND-268          | ERROR No JSON object could be decoded from     |
+|                            | LMBENCH in TC010                               |
++----------------------------+------------------------------------------------+
+| JIRA: YARDSTICK-680        | storperf test case tc074 do not get results    |
+|                            |                                                |
++----------------------------+------------------------------------------------+
+
 Danube.2.0:
 
 +----------------------------+------------------------------------------------+
-| **JIRA REFERENCE**         | **SLOGAN**                                     |
+| **JIRA REFERENCE**         | **DESCRIPTION**                                |
 |                            |                                                |
 +----------------------------+------------------------------------------------+
 | JIRA: YARDSTICK-608        | Set work directory in Yardstick container      |
@@ -662,7 +706,7 @@ Danube.2.0:
 Danube.1.0:
 
 +----------------------------+------------------------------------------------+
-| **JIRA REFERENCE**         | **SLOGAN**                                     |
+| **JIRA REFERENCE**         | **DESCRIPTION**                                |
 |                            |                                                |
 +----------------------------+------------------------------------------------+
 | JIRA: YARDSTICK-599        | Could not load EntryPoint.parse when using     |
@@ -673,7 +717,7 @@ Danube.1.0:
 +----------------------------+------------------------------------------------+
 
 
-Danube 2.0 known restrictions/issues
+Danube 3.1 known restrictions/issues
 ====================================
 +-----------+-----------+----------------------------------------------+
 | Installer | Scenario  |  Issue                                       |
@@ -695,7 +739,7 @@ Open JIRA tickets
 =================
 
 +----------------------------+------------------------------------------------+
-| **JIRA REFERENCE**         | **SLOGAN**                                     |
+| **JIRA REFERENCE**         | **DESCRIPTION**                                |
 |                            |                                                |
 +----------------------------+------------------------------------------------+
 | JIRA: YARDSTICK-626        | Fio and Lmbench don't work in Ubuntu-arm64     |
index 85bd8b3..3a4cbce 100644 (file)
@@ -39,6 +39,7 @@ jsonpatch==1.15
 jsonpointer==1.10
 jsonschema==2.5.1
 keystoneauth1==2.18.0
+kubernetes==3.0.0a1
 linecache2==1.0.0
 lxml==3.7.2
 mccabe==0.4.0
diff --git a/samples/ping_k8s.yaml b/samples/ping_k8s.yaml
new file mode 100644 (file)
index 0000000..503fe6a
--- /dev/null
@@ -0,0 +1,46 @@
+##############################################################################
+# Copyright (c) 2017 Huawei AB and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+---
+# Sample benchmark task config file
+# measure network latency using ping in container
+
+schema: "yardstick:task:0.1"
+
+scenarios:
+-
+  type: Ping
+  options:
+    packetsize: 200
+
+  host: host-k8s
+  target: target-k8s
+
+  runner:
+    type: Duration
+    duration: 60
+    interval: 1
+
+  sla:
+    max_rtt: 10
+    action: monitor
+
+context:
+  type: Kubernetes
+  name: k8s
+
+  servers:
+    host:
+      image: openretriever/yardstick
+      command: /bin/bash
+      args: ['-c', 'chmod 700 ~/.ssh; chmod 600 ~/.ssh/*; service ssh restart;while true ; do sleep 10000; done']
+    target:
+      image: openretriever/yardstick
+      command: /bin/bash
+      args: ['-c', 'chmod 700 ~/.ssh; chmod 600 ~/.ssh/*; service ssh restart;while true ; do sleep 10000; done']
diff --git a/tests/unit/apiserver/utils/test_common.py b/tests/unit/apiserver/utils/test_common.py
deleted file mode 100644 (file)
index ad81cb7..0000000
+++ /dev/null
@@ -1,41 +0,0 @@
-##############################################################################
-# Copyright (c) 2016 Huawei Technologies Co.,Ltd and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
-from __future__ import absolute_import
-import unittest
-
-from api.utils import common
-
-
-class TranslateToStrTestCase(unittest.TestCase):
-
-    def test_translate_to_str_unicode(self):
-        input_str = u'hello'
-        output_str = common.translate_to_str(input_str)
-
-        result = 'hello'
-        self.assertEqual(result, output_str)
-
-    def test_translate_to_str_dict_list_unicode(self):
-        input_str = {
-            u'hello': {u'hello': [u'world']}
-        }
-        output_str = common.translate_to_str(input_str)
-
-        result = {
-            'hello': {'hello': ['world']}
-        }
-        self.assertEqual(result, output_str)
-
-
-def main():
-    unittest.main()
-
-
-if __name__ == '__main__':
-    main()
index 3dadd48..ae57402 100644 (file)
@@ -13,6 +13,7 @@
 
 from __future__ import absolute_import
 
+import ipaddress
 import logging
 import os
 import unittest
@@ -120,7 +121,8 @@ class HeatContextTestCase(unittest.TestCase):
         mock_template.add_router_interface.assert_called_with("bar-fool-network-router-if0", "bar-fool-network-router", "bar-fool-network-subnet")
 
     @mock.patch('yardstick.benchmark.contexts.heat.HeatTemplate')
-    def test_deploy(self, mock_template):
+    @mock.patch('yardstick.benchmark.contexts.heat.get_neutron_client')
+    def test_deploy(self, mock_neutron, mock_template):
 
         self.test_context.name = 'foo'
         self.test_context.template_file = '/bar/baz/some-heat-file'
@@ -133,6 +135,59 @@ class HeatContextTestCase(unittest.TestCase):
                                          self.test_context.heat_parameters)
         self.assertIsNotNone(self.test_context.stack)
 
+    def test_add_server_port(self):
+        network1 = mock.MagicMock()
+        network1.vld_id = 'vld111'
+        network2 = mock.MagicMock()
+        network2.vld_id = 'vld777'
+        self.test_context.name = 'foo'
+        self.test_context.stack = mock.MagicMock()
+        self.test_context.networks = {
+            'a': network1,
+            'c': network2,
+        }
+        self.test_context.stack.outputs = {
+            u'b': u'10.20.30.45',
+            u'b-subnet_id': 1,
+            u'foo-a-subnet-cidr': u'10.20.0.0/15',
+            u'foo-a-subnet-gateway_ip': u'10.20.30.1',
+            u'b-mac_address': u'00:01',
+            u'b-device_id': u'dev21',
+            u'b-network_id': u'net789',
+            u'd': u'40.30.20.15',
+            u'd-subnet_id': 2,
+            u'foo-c-subnet-cidr': u'40.30.0.0/18',
+            u'foo-c-subnet-gateway_ip': u'40.30.20.254',
+            u'd-mac_address': u'00:10',
+            u'd-device_id': u'dev43',
+            u'd-network_id': u'net987',
+        }
+        server = mock.MagicMock()
+        server.ports = OrderedDict([
+            ('a', {'stack_name': 'b'}),
+            ('c', {'stack_name': 'd'}),
+        ])
+
+        expected = {
+            "private_ip": '10.20.30.45',
+            "subnet_id": 1,
+            "subnet_cidr": '10.20.0.0/15',
+            "network": '10.20.0.0',
+            "netmask": '255.254.0.0',
+            "gateway_ip": '10.20.30.1',
+            "mac_address": '00:01',
+            "device_id": 'dev21',
+            "network_id": 'net789',
+            "network_name": 'a',
+            "local_mac": '00:01',
+            "local_ip": '10.20.30.45',
+            "vld_id": 'vld111',
+        }
+        self.test_context.add_server_port(server)
+        self.assertEqual(server.private_ip, '10.20.30.45')
+        self.assertEqual(len(server.interfaces), 2)
+        self.assertDictEqual(server.interfaces['a'], expected)
+
     @mock.patch('yardstick.benchmark.contexts.heat.HeatTemplate')
     def test_undeploy(self, mock_template):
 
@@ -155,3 +210,57 @@ class HeatContextTestCase(unittest.TestCase):
 
         self.assertEqual(result['ip'], '127.0.0.1')
         self.assertEqual(result['private_ip'], '10.0.0.1')
+
+    def test__get_network(self):
+        network1 = mock.MagicMock()
+        network1.name = 'net_1'
+        network1.vld_id = 'vld111'
+        network1.segmentation_id = 'seg54'
+        network1.network_type = 'type_a'
+        network1.physical_network = 'phys'
+
+        network2 = mock.MagicMock()
+        network2.name = 'net_2'
+        network2.vld_id = 'vld999'
+        network2.segmentation_id = 'seg45'
+        network2.network_type = 'type_b'
+        network2.physical_network = 'virt'
+
+        self.test_context.networks = {
+            'a': network1,
+            'b': network2,
+        }
+
+        attr_name = None
+        self.assertIsNone(self.test_context._get_network(attr_name))
+
+        attr_name = {}
+        self.assertIsNone(self.test_context._get_network(attr_name))
+
+        attr_name = {'vld_id': 'vld777'}
+        self.assertIsNone(self.test_context._get_network(attr_name))
+
+        attr_name = 'vld777'
+        self.assertIsNone(self.test_context._get_network(attr_name))
+
+        attr_name = {'vld_id': 'vld999'}
+        expected = {
+            "name": 'net_2',
+            "vld_id": 'vld999',
+            "segmentation_id": 'seg45',
+            "network_type": 'type_b',
+            "physical_network": 'virt',
+        }
+        result = self.test_context._get_network(attr_name)
+        self.assertDictEqual(result, expected)
+
+        attr_name = 'a'
+        expected = {
+            "name": 'net_1',
+            "vld_id": 'vld111',
+            "segmentation_id": 'seg54',
+            "network_type": 'type_a',
+            "physical_network": 'phys',
+        }
+        result = self.test_context._get_network(attr_name)
+        self.assertDictEqual(result, expected)
diff --git a/tests/unit/benchmark/contexts/test_kubernetes.py b/tests/unit/benchmark/contexts/test_kubernetes.py
new file mode 100644 (file)
index 0000000..f47c07a
--- /dev/null
@@ -0,0 +1,165 @@
+#!/usr/bin/env python
+
+##############################################################################
+# Copyright (c) 2015 Huawei Technologies Co.,Ltd and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+# Unittest for yardstick.benchmark.contexts.kubernetes
+
+from __future__ import absolute_import
+import unittest
+import mock
+
+from yardstick.benchmark.contexts.kubernetes import KubernetesContext
+
+
+context_cfg = {
+    'type': 'Kubernetes',
+    'name': 'k8s',
+    'servers': {
+        'host': {
+            'image': 'openretriever/yardstick',
+            'command': '/bin/bash',
+            'args': ['-c', 'chmod 700 ~/.ssh; chmod 600 ~/.ssh/*; \
+service ssh restart;while true ; do sleep 10000; done']
+        },
+        'target': {
+            'image': 'openretriever/yardstick',
+            'command': '/bin/bash',
+            'args': ['-c', 'chmod 700 ~/.ssh; chmod 600 ~/.ssh/*; \
+service ssh restart;while true ; do sleep 10000; done']
+        }
+    }
+}
+
+prefix = 'yardstick.benchmark.contexts.kubernetes'
+
+
+class UndeployTestCase(unittest.TestCase):
+
+    @mock.patch('{}.KubernetesContext._delete_ssh_key'.format(prefix))
+    @mock.patch('{}.KubernetesContext._delete_rcs'.format(prefix))
+    @mock.patch('{}.KubernetesContext._delete_pods'.format(prefix))
+    def test_undeploy(self,
+                      mock_delete_pods,
+                      mock_delete_rcs,
+                      mock_delete_ssh):
+
+        k8s_context = KubernetesContext()
+        k8s_context.init(context_cfg)
+        k8s_context.undeploy()
+        self.assertTrue(mock_delete_ssh.called)
+        self.assertTrue(mock_delete_rcs.called)
+        self.assertTrue(mock_delete_pods.called)
+
+
+class DeployTestCase(unittest.TestCase):
+
+    @mock.patch('{}.KubernetesContext._wait_until_running'.format(prefix))
+    @mock.patch('{}.KubernetesTemplate.get_rc_pods'.format(prefix))
+    @mock.patch('{}.KubernetesContext._create_rcs'.format(prefix))
+    @mock.patch('{}.KubernetesContext._set_ssh_key'.format(prefix))
+    def test_deploy(self,
+                    mock_set_ssh_key,
+                    mock_create_rcs,
+                    mock_get_rc_pods,
+                    mock_wait_until_running):
+
+        k8s_context = KubernetesContext()
+        k8s_context.init(context_cfg)
+        k8s_context.deploy()
+        self.assertTrue(mock_set_ssh_key.called)
+        self.assertTrue(mock_create_rcs.called)
+        self.assertTrue(mock_get_rc_pods.called)
+        self.assertTrue(mock_wait_until_running.called)
+
+
+class SSHKeyTestCase(unittest.TestCase):
+
+    @mock.patch('{}.k8s_utils.delete_config_map'.format(prefix))
+    @mock.patch('{}.k8s_utils.create_config_map'.format(prefix))
+    def test_ssh_key(self, mock_create, mock_delete):
+
+        k8s_context = KubernetesContext()
+        k8s_context.init(context_cfg)
+        k8s_context._set_ssh_key()
+        k8s_context._delete_ssh_key()
+        self.assertTrue(mock_create.called)
+        self.assertTrue(mock_delete.called)
+
+
+class WaitUntilRunningTestCase(unittest.TestCase):
+
+    @mock.patch('{}.k8s_utils.read_pod_status'.format(prefix))
+    def test_wait_until_running(self, mock_read_pod_status):
+
+        k8s_context = KubernetesContext()
+        k8s_context.init(context_cfg)
+        k8s_context.template.pods = ['server']
+        mock_read_pod_status.return_value = 'Running'
+        k8s_context._wait_until_running()
+
+
+class GetServerTestCase(unittest.TestCase):
+
+    @mock.patch('{}.k8s_utils.get_pod_list'.format(prefix))
+    def test_get_server(self, mock_get_pod_list):
+        k8s_context = KubernetesContext()
+        k8s_context.init(context_cfg)
+
+        mock_get_pod_list.return_value.items = []
+        server = k8s_context._get_server('server')
+        self.assertIsNone(server)
+
+
+class CreateRcsTestCase(unittest.TestCase):
+
+    @mock.patch('{}.KubernetesContext._create_rc'.format(prefix))
+    def test_create_rcs(self, mock_create_rc):
+        k8s_context = KubernetesContext()
+        k8s_context.init(context_cfg)
+        k8s_context._create_rcs()
+        self.assertTrue(mock_create_rc.called)
+
+
+class CreateRcTestCase(unittest.TestCase):
+
+    @mock.patch('{}.k8s_utils.create_replication_controller'.format(prefix))
+    def test_create_rc(self, mock_create_replication_controller):
+        k8s_context = KubernetesContext()
+        k8s_context.init(context_cfg)
+        k8s_context._create_rc({})
+        self.assertTrue(mock_create_replication_controller.called)
+
+
+class DeleteRcsTestCases(unittest.TestCase):
+
+    @mock.patch('{}.KubernetesContext._delete_rc'.format(prefix))
+    def test_delete_rcs(self, mock_delete_rc):
+        k8s_context = KubernetesContext()
+        k8s_context.init(context_cfg)
+        k8s_context._delete_rcs()
+        self.assertTrue(mock_delete_rc.called)
+
+
+class DeleteRcTestCase(unittest.TestCase):
+
+    @mock.patch('{}.k8s_utils.delete_replication_controller'.format(prefix))
+    def test_delete_rc(self, mock_delete_replication_controller):
+        k8s_context = KubernetesContext()
+        k8s_context.init(context_cfg)
+        k8s_context._delete_rc({})
+        self.assertTrue(mock_delete_replication_controller.called)
+
+
+def main():
+    unittest.main()
+
+
+if __name__ == '__main__':
+    main()
index 3fb186b..1ce5503 100644 (file)
@@ -161,6 +161,23 @@ class NetworkTestCase(unittest.TestCase):
 
         self.assertEqual(model.Network.find_external_network(), 'ext_net')
 
+    def test_construct_gateway_ip_is_null(self):
+
+        attrs = {'gateway_ip': 'null'}
+        test_network = model.Network('foo', self.mock_context, attrs)
+        self.assertEqual(test_network.gateway_ip, 'null')
+
+    def test_construct_gateway_ip_is_none(self):
+
+        attrs = {'gateway_ip': None}
+        test_network = model.Network('foo', self.mock_context, attrs)
+        self.assertEqual(test_network.gateway_ip, 'null')
+
+    def test_construct_gateway_ip_is_absent(self):
+
+        attrs = {}
+        test_network = model.Network('foo', self.mock_context, attrs)
+        self.assertIsNone(test_network.gateway_ip)
 
 class ServerTestCase(unittest.TestCase):
 
@@ -214,11 +231,12 @@ class ServerTestCase(unittest.TestCase):
         attrs = {'image': 'some-image', 'flavor': 'some-flavor', 'floating_ip': '192.168.1.10', 'floating_ip_assoc': 'some-vm'}
         test_server = model.Server('foo', self.mock_context, attrs)
 
-        self.mock_context.flavors =  ['flavor1', 'flavor2', 'some-flavor']
+        self.mock_context.flavors = ['flavor1', 'flavor2', 'some-flavor']
 
         mock_network = mock.Mock()
         mock_network.name = 'some-network'
         mock_network.stack_name = 'some-network-stack'
+        mock_network.allowed_address_pairs = ["1", "2"]
         mock_network.subnet_stack_name = 'some-network-stack-subnet'
         mock_network.provider = 'sriov'
         mock_network.external_network = 'ext_net'
@@ -232,7 +250,8 @@ class ServerTestCase(unittest.TestCase):
             mock_network.stack_name,
             mock_network.subnet_stack_name,
             sec_group_id=self.mock_context.secgroup_name,
-            provider=mock_network.provider)
+            provider=mock_network.provider,
+            allowed_address_pairs=mock_network.allowed_address_pairs)
 
         mock_template.add_floating_ip.assert_called_with(
             'some-server-fip',
@@ -290,11 +309,12 @@ class ServerTestCase(unittest.TestCase):
         }
         test_server = model.Server('ServerFlavor-2', self.mock_context, attrs)
 
-        self.mock_context.flavors =  ['flavor2']
+        self.mock_context.flavors = ['flavor2']
         mock_network = mock.Mock()
-        mock_network.configure_mock(name='some-network', stack_name= 'some-network-stack',
-                                    subnet_stack_name = 'some-network-stack-subnet',
-                                    provider = 'some-provider')
+        mock_network.allowed_address_pairs = ["1", "2"]
+        mock_network.configure_mock(name='some-network', stack_name='some-network-stack',
+                                    subnet_stack_name='some-network-stack-subnet',
+                                    provider='some-provider')
 
         test_server._add_instance(mock_template, 'ServerFlavor-2',
                                   [mock_network], 'hints')
@@ -304,7 +324,8 @@ class ServerTestCase(unittest.TestCase):
             mock_network.stack_name,
             mock_network.subnet_stack_name,
             provider=mock_network.provider,
-            sec_group_id=self.mock_context.secgroup_name)
+            sec_group_id=self.mock_context.secgroup_name,
+            allowed_address_pairs=mock_network.allowed_address_pairs)
 
         mock_template.add_server.assert_called_with(
             'ServerFlavor-2', 'some-image',
index 4b35ca4..d5ce8c5 100644 (file)
@@ -208,6 +208,50 @@ class NodeContextTestCase(unittest.TestCase):
         obj._get_client(node_name_args)
         self.assertTrue(wait_mock.called)
 
+    def test__get_network(self):
+        network1 = {
+            'name': 'net_1',
+            'vld_id': 'vld111',
+            'segmentation_id': 'seg54',
+            'network_type': 'type_a',
+            'physical_network': 'phys',
+        }
+        network2 = {
+            'name': 'net_2',
+            'vld_id': 'vld999',
+        }
+        self.test_context.networks = {
+            'a': network1,
+            'b': network2,
+        }
+
+        attr_name = {}
+        self.assertIsNone(self.test_context._get_network(attr_name))
+
+        attr_name = {'vld_id': 'vld777'}
+        self.assertIsNone(self.test_context._get_network(attr_name))
+
+        self.assertIsNone(self.test_context._get_network(None))
+
+        attr_name = 'vld777'
+        self.assertIsNone(self.test_context._get_network(attr_name))
+
+        attr_name = {'vld_id': 'vld999'}
+        expected = {
+            "name": 'net_2',
+            "vld_id": 'vld999',
+            "segmentation_id": None,
+            "network_type": None,
+            "physical_network": None,
+        }
+        result = self.test_context._get_network(attr_name)
+        self.assertDictEqual(result, expected)
+
+        attr_name = 'a'
+        expected = network1
+        result = self.test_context._get_network(attr_name)
+        self.assertDictEqual(result, expected)
+
 
 def main():
     unittest.main()
index 687ef73..a6fd776 100644 (file)
@@ -129,3 +129,48 @@ class StandaloneContextTestCase(unittest.TestCase):
         curr_path = os.path.dirname(os.path.abspath(__file__))
         file_path = os.path.join(curr_path, filename)
         return file_path
+
+    def test__get_network(self):
+        network1 = {
+            'name': 'net_1',
+            'vld_id': 'vld111',
+            'segmentation_id': 'seg54',
+            'network_type': 'type_a',
+            'physical_network': 'phys',
+        }
+        network2 = {
+            'name': 'net_2',
+            'vld_id': 'vld999',
+        }
+        self.test_context.networks = {
+            'a': network1,
+            'b': network2,
+        }
+
+        attr_name = None
+        self.assertIsNone(self.test_context._get_network(attr_name))
+
+        attr_name = {}
+        self.assertIsNone(self.test_context._get_network(attr_name))
+
+        attr_name = {'vld_id': 'vld777'}
+        self.assertIsNone(self.test_context._get_network(attr_name))
+
+        attr_name = 'vld777'
+        self.assertIsNone(self.test_context._get_network(attr_name))
+
+        attr_name = {'vld_id': 'vld999'}
+        expected = {
+            "name": 'net_2',
+            "vld_id": 'vld999',
+            "segmentation_id": None,
+            "network_type": None,
+            "physical_network": None,
+        }
+        result = self.test_context._get_network(attr_name)
+        self.assertDictEqual(result, expected)
+
+        attr_name = 'a'
+        expected = network1
+        result = self.test_context._get_network(attr_name)
+        self.assertDictEqual(result, expected)
index b64bb8e..8d6d963 100644 (file)
@@ -47,6 +47,73 @@ class TaskTestCase(unittest.TestCase):
         self.assertEqual(context_cfg["host"], server_info)
         self.assertEqual(context_cfg["target"], server_info)
 
+    @mock.patch('yardstick.benchmark.core.task.Context')
+    def test_parse_networks_from_nodes(self, mock_context):
+        nodes = {
+            'node1': {
+                'interfaces': {
+                    'eth0': {
+                        'name': 'mgmt',
+                    },
+                    'eth1': {
+                        'name': 'external',
+                        'vld_id': '23',
+                    },
+                    'eth10': {
+                        'name': 'internal',
+                        'vld_id': '55',
+                    },
+                },
+            },
+            'node2': {
+                'interfaces': {
+                    'eth4': {
+                        'name': 'mgmt',
+                    },
+                    'eth2': {
+                        'name': 'external',
+                        'vld_id': '32',
+                    },
+                    'eth11': {
+                        'name': 'internal',
+                        'vld_id': '55',
+                    },
+                },
+            },
+        }
+
+        mock_context.get_network.side_effect = iter([
+            None,
+            {
+                'name': 'a',
+                'network_type': 'private',
+            },
+            {},
+            {
+                'name': 'b',
+                'vld_id': 'y',
+                'subnet_cidr': '10.20.0.0/16',
+            },
+            {
+                'name': 'c',
+                'vld_id': 'x',
+            },
+            {
+                'name': 'd',
+                'vld_id': 'w',
+            },
+        ])
+
+        expected_get_network_calls = 4 # once for each vld_id in the nodes dict
+        expected = {
+            'a': {'name': 'a', 'network_type': 'private'},
+            'b': {'name': 'b', 'vld_id': 'y', 'subnet_cidr': '10.20.0.0/16'},
+        }
+
+        networks = task.get_networks_from_nodes(nodes)
+        self.assertEqual(mock_context.get_network.call_count, expected_get_network_calls)
+        self.assertDictEqual(networks, expected)
+
     @mock.patch('yardstick.benchmark.core.task.Context')
     @mock.patch('yardstick.benchmark.core.task.base_runner')
     def test_run(self, mock_base_runner, mock_ctx):
index 6e72fa5..0313ef8 100644 (file)
@@ -15,12 +15,15 @@ from __future__ import absolute_import
 import unittest
 import time
 
+from mock import mock
+
 from yardstick.benchmark.runners.iteration import IterationRunner
 
 
 class RunnerTestCase(unittest.TestCase):
 
-    def test_get_output(self):
+    @mock.patch("yardstick.benchmark.runners.iteration.multiprocessing")
+    def test_get_output(self, mock_process):
         runner = IterationRunner({})
         runner.output_queue.put({'case': 'opnfv_yardstick_tc002'})
         runner.output_queue.put({'criteria': 'PASS'})
@@ -30,7 +33,10 @@ class RunnerTestCase(unittest.TestCase):
             'criteria': 'PASS'
         }
 
-        time.sleep(1)
+        for retries in range(1000):
+            time.sleep(0.01)
+            if not runner.output_queue.empty():
+                break
         actual_result = runner.get_output()
         self.assertEqual(idle_result, actual_result)
 
index 28b27c7..cc17960 100644 (file)
@@ -20,9 +20,7 @@ from yardstick.benchmark.scenarios.availability.attacker import \
     attacker_baremetal
 
 
-@mock.patch(
-    'yardstick.benchmark.scenarios.availability.attacker.attacker_baremetal'
-    '.subprocess')
+@mock.patch('yardstick.benchmark.scenarios.availability.attacker.attacker_baremetal.subprocess')
 class ExecuteShellTestCase(unittest.TestCase):
 
     def test__fun_execute_shell_command_successful(self, mock_subprocess):
@@ -31,17 +29,17 @@ class ExecuteShellTestCase(unittest.TestCase):
         exitcode, output = attacker_baremetal._execute_shell_command(cmd)
         self.assertEqual(exitcode, 0)
 
-    def test__fun_execute_shell_command_fail_cmd_exception(self,
-                                                           mock_subprocess):
+    @mock.patch('yardstick.benchmark.scenarios.availability.attacker.attacker_baremetal.LOG')
+    def test__fun_execute_shell_command_fail_cmd_exception(self, mock_log, mock_subprocess):
         cmd = "env"
         mock_subprocess.check_output.side_effect = RuntimeError
         exitcode, output = attacker_baremetal._execute_shell_command(cmd)
         self.assertEqual(exitcode, -1)
+        mock_log.error.assert_called_once()
 
 
-@mock.patch(
-    'yardstick.benchmark.scenarios.availability.attacker.attacker_baremetal'
-    '.ssh')
+@mock.patch('yardstick.benchmark.scenarios.availability.attacker.attacker_baremetal.subprocess')
+@mock.patch('yardstick.benchmark.scenarios.availability.attacker.attacker_baremetal.ssh')
 class AttackerBaremetalTestCase(unittest.TestCase):
 
     def setUp(self):
@@ -59,28 +57,28 @@ class AttackerBaremetalTestCase(unittest.TestCase):
             'host': 'node1',
         }
 
-    def test__attacker_baremetal_all_successful(self, mock_ssh):
+    def test__attacker_baremetal_all_successful(self, mock_ssh, mock_subprocess):
+        mock_ssh.SSH.from_node().execute.return_value = (0, "running", '')
         ins = attacker_baremetal.BaremetalAttacker(self.attacker_cfg,
                                                    self.context)
 
-        mock_ssh.SSH.from_node().execute.return_value = (0, "running", '')
         ins.setup()
         ins.inject_fault()
         ins.recover()
 
-    def test__attacker_baremetal_check_failuer(self, mock_ssh):
+    def test__attacker_baremetal_check_failuer(self, mock_ssh, mock_subprocess):
+        mock_ssh.SSH.from_node().execute.return_value = (0, "error check", '')
         ins = attacker_baremetal.BaremetalAttacker(self.attacker_cfg,
                                                    self.context)
-        mock_ssh.SSH.from_node().execute.return_value = (0, "error check", '')
         ins.setup()
 
-    def test__attacker_baremetal_recover_successful(self, mock_ssh):
+    def test__attacker_baremetal_recover_successful(self, mock_ssh, mock_subprocess):
 
         self.attacker_cfg["jump_host"] = 'node1'
         self.context["node1"]["pwd"] = "123456"
+        mock_ssh.SSH.from_node().execute.return_value = (0, "running", '')
         ins = attacker_baremetal.BaremetalAttacker(self.attacker_cfg,
                                                    self.context)
 
-        mock_ssh.SSH.from_node().execute.return_value = (0, "running", '')
         ins.setup()
         ins.recover()
index 2ed4be7..6a9b3b1 100644 (file)
@@ -30,12 +30,14 @@ class ExecuteShellTestCase(unittest.TestCase):
         exitcode, output = monitor_command._execute_shell_command(cmd)
         self.assertEqual(exitcode, 0)
 
-    def test__fun_execute_shell_command_fail_cmd_exception(self,
+    @mock.patch('yardstick.benchmark.scenarios.availability.monitor.monitor_command.LOG')
+    def test__fun_execute_shell_command_fail_cmd_exception(self, mock_log,
                                                            mock_subprocess):
         cmd = "env"
         mock_subprocess.check_output.side_effect = RuntimeError
         exitcode, output = monitor_command._execute_shell_command(cmd)
         self.assertEqual(exitcode, -1)
+        mock_log.error.assert_called_once()
 
 
 @mock.patch(
@@ -67,13 +69,15 @@ class MonitorOpenstackCmdTestCase(unittest.TestCase):
         instance._result = {"outage_time": 0}
         instance.verify_SLA()
 
-    def test__monitor_command_monitor_func_failure(self, mock_subprocess):
+    @mock.patch('yardstick.benchmark.scenarios.availability.monitor.monitor_command.LOG')
+    def test__monitor_command_monitor_func_failure(self, mock_log, mock_subprocess):
         mock_subprocess.check_output.return_value = (1, 'unittest')
         instance = monitor_command.MonitorOpenstackCmd(self.config, None, {"nova-api": 10})
         instance.setup()
         mock_subprocess.check_output.side_effect = RuntimeError
         ret = instance.monitor_func()
         self.assertEqual(ret, False)
+        mock_log.error.assert_called_once()
         instance._result = {"outage_time": 10}
         instance.verify_SLA()
 
index f8d12bd..b59ec6c 100644 (file)
@@ -36,7 +36,7 @@ class MultiMonitorServiceTestCase(unittest.TestCase):
             'key': 'service-status',
             'monitor_key': 'service-status',
             'host': 'node1',
-            'monitor_time': 3,
+            'monitor_time': 0.1,
             'parameter': {'serviceName': 'haproxy'},
             'sla': {'max_outage_time': 1}
         }
index 1317167..fe44cfd 100644 (file)
@@ -43,7 +43,7 @@ class NstatTestCase(unittest.TestCase):
     def test_nstat_successful_no_sla(self, mock_ssh):
 
         options = {
-            "duration": 60
+            "duration": 0
         }
         args = {
             "options": options,
@@ -67,7 +67,7 @@ class NstatTestCase(unittest.TestCase):
     def test_nstat_successful_sla(self, mock_ssh):
 
         options = {
-            "duration": 60
+            "duration": 0
         }
         sla = {
             "IP_datagram_error_rate": 0.1
@@ -95,7 +95,7 @@ class NstatTestCase(unittest.TestCase):
     def test_nstat_unsuccessful_cmd_error(self, mock_ssh):
 
         options = {
-            "duration": 60
+            "duration": 0
         }
         sla = {
             "IP_datagram_error_rate": 0.1
index e6998e4..b4b8752 100644 (file)
@@ -20,6 +20,7 @@ import yardstick.common.utils as utils
 from yardstick.benchmark.scenarios.networking import pktgen_dpdk
 
 
+@mock.patch('yardstick.benchmark.scenarios.networking.pktgen_dpdk.time')
 @mock.patch('yardstick.benchmark.scenarios.networking.pktgen_dpdk.ssh')
 class PktgenDPDKLatencyTestCase(unittest.TestCase):
 
@@ -38,7 +39,7 @@ class PktgenDPDKLatencyTestCase(unittest.TestCase):
             }
         }
 
-    def test_pktgen_dpdk_successful_setup(self, mock_ssh):
+    def test_pktgen_dpdk_successful_setup(self, mock_ssh, mock_time):
 
         args = {
             'options': {'packetsize': 60},
@@ -51,7 +52,7 @@ class PktgenDPDKLatencyTestCase(unittest.TestCase):
         self.assertIsNotNone(p.client)
         self.assertEqual(p.setup_done, True)
 
-    def test_pktgen_dpdk_successful_get_port_ip(self, mock_ssh):
+    def test_pktgen_dpdk_successful_get_port_ip(self, mock_ssh, mock_time):
 
         args = {
             'options': {'packetsize': 60},
@@ -66,7 +67,7 @@ class PktgenDPDKLatencyTestCase(unittest.TestCase):
         mock_ssh.SSH.from_node().execute.assert_called_with(
             "ifconfig eth1 |grep 'inet addr' |awk '{print $2}' |cut -d ':' -f2 ")
 
-    def test_pktgen_dpdk_unsuccessful_get_port_ip(self, mock_ssh):
+    def test_pktgen_dpdk_unsuccessful_get_port_ip(self, mock_ssh, mock_time):
 
         args = {
             'options': {'packetsize': 60},
@@ -78,7 +79,7 @@ class PktgenDPDKLatencyTestCase(unittest.TestCase):
         mock_ssh.SSH.from_node().execute.return_value = (1, '', 'FOOBAR')
         self.assertRaises(RuntimeError, utils.get_port_ip, p.server, "eth1")
 
-    def test_pktgen_dpdk_successful_get_port_mac(self, mock_ssh):
+    def test_pktgen_dpdk_successful_get_port_mac(self, mock_ssh, mock_time):
 
         args = {
             'options': {'packetsize': 60},
@@ -93,7 +94,7 @@ class PktgenDPDKLatencyTestCase(unittest.TestCase):
         mock_ssh.SSH.from_node().execute.assert_called_with(
             "ifconfig |grep HWaddr |grep eth1 |awk '{print $5}' ")
 
-    def test_pktgen_dpdk_unsuccessful_get_port_mac(self, mock_ssh):
+    def test_pktgen_dpdk_unsuccessful_get_port_mac(self, mock_ssh, mock_time):
 
         args = {
             'options': {'packetsize': 60},
@@ -105,7 +106,7 @@ class PktgenDPDKLatencyTestCase(unittest.TestCase):
         mock_ssh.SSH.from_node().execute.return_value = (1, '', 'FOOBAR')
         self.assertRaises(RuntimeError, utils.get_port_mac, p.server, "eth1")
 
-    def test_pktgen_dpdk_successful_no_sla(self, mock_ssh):
+    def test_pktgen_dpdk_successful_no_sla(self, mock_ssh, mock_time):
 
         args = {
             'options': {'packetsize': 60},
@@ -124,7 +125,7 @@ class PktgenDPDKLatencyTestCase(unittest.TestCase):
         delta = result['avg_latency'] - 132
         self.assertLessEqual(delta, 1)
 
-    def test_pktgen_dpdk_successful_sla(self, mock_ssh):
+    def test_pktgen_dpdk_successful_sla(self, mock_ssh, mock_time):
 
         args = {
             'options': {'packetsize': 60},
@@ -141,7 +142,7 @@ class PktgenDPDKLatencyTestCase(unittest.TestCase):
 
         self.assertEqual(result, {"avg_latency": 100})
 
-    def test_pktgen_dpdk_unsuccessful_sla(self, mock_ssh):
+    def test_pktgen_dpdk_unsuccessful_sla(self, mock_ssh, mock_time):
 
         args = {
             'options': {'packetsize': 60},
@@ -158,7 +159,7 @@ class PktgenDPDKLatencyTestCase(unittest.TestCase):
         mock_ssh.SSH.from_node().execute.return_value = (0, sample_output, '')
         self.assertRaises(AssertionError, p.run, result)
 
-    def test_pktgen_dpdk_unsuccessful_script_error(self, mock_ssh):
+    def test_pktgen_dpdk_unsuccessful_script_error(self, mock_ssh, mock_time):
 
         args = {
             'options': {'packetsize': 60},
index 0178165..d340970 100644 (file)
@@ -20,6 +20,7 @@ from yardstick.benchmark.scenarios.networking import pktgen_dpdk_throughput
 
 
 @mock.patch('yardstick.benchmark.scenarios.networking.pktgen_dpdk_throughput.ssh')
+@mock.patch('yardstick.benchmark.scenarios.networking.pktgen_dpdk_throughput.time')
 class PktgenDPDKTestCase(unittest.TestCase):
 
     def setUp(self):
@@ -36,7 +37,7 @@ class PktgenDPDKTestCase(unittest.TestCase):
             }
         }
 
-    def test_pktgen_dpdk_throughput_successful_setup(self, mock_ssh):
+    def test_pktgen_dpdk_throughput_successful_setup(self, mock__time, mock_ssh):
         args = {
             'options': {'packetsize': 60},
         }
@@ -48,7 +49,7 @@ class PktgenDPDKTestCase(unittest.TestCase):
         self.assertIsNotNone(p.client)
         self.assertEqual(p.setup_done, True)
 
-    def test_pktgen_dpdk_throughput_successful_no_sla(self, mock_ssh):
+    def test_pktgen_dpdk_throughput_successful_no_sla(self, mock__time, mock_ssh):
         args = {
             'options': {'packetsize': 60, 'number_of_ports': 10},
         }
@@ -74,7 +75,7 @@ class PktgenDPDKTestCase(unittest.TestCase):
         expected_result["packetsize"] = 60
         self.assertEqual(result, expected_result)
 
-    def test_pktgen_dpdk_throughput_successful_sla(self, mock_ssh):
+    def test_pktgen_dpdk_throughput_successful_sla(self, mock__time, mock_ssh):
         args = {
             'options': {'packetsize': 60, 'number_of_ports': 10},
             'sla': {'max_ppm': 10000}
@@ -100,7 +101,7 @@ class PktgenDPDKTestCase(unittest.TestCase):
         expected_result["packetsize"] = 60
         self.assertEqual(result, expected_result)
 
-    def test_pktgen_dpdk_throughput_unsuccessful_sla(self, mock_ssh):
+    def test_pktgen_dpdk_throughput_unsuccessful_sla(self, mock__time, mock_ssh):
         args = {
             'options': {'packetsize': 60, 'number_of_ports': 10},
             'sla': {'max_ppm': 1000}
@@ -121,7 +122,7 @@ class PktgenDPDKTestCase(unittest.TestCase):
         mock_ssh.SSH().execute.return_value = (0, sample_output, '')
         self.assertRaises(AssertionError, p.run, result)
 
-    def test_pktgen_dpdk_throughput_unsuccessful_script_error(self, mock_ssh):
+    def test_pktgen_dpdk_throughput_unsuccessful_script_error(self, mock__time, mock_ssh):
         args = {
             'options': {'packetsize': 60, 'number_of_ports': 10},
             'sla': {'max_ppm': 1000}
@@ -136,7 +137,7 @@ class PktgenDPDKTestCase(unittest.TestCase):
         mock_ssh.SSH().execute.return_value = (1, '', 'FOOBAR')
         self.assertRaises(RuntimeError, p.run, result)
 
-    def test_pktgen_dpdk_throughput_is_dpdk_setup(self, mock_ssh):
+    def test_pktgen_dpdk_throughput_is_dpdk_setup(self, mock__time, mock_ssh):
         args = {
             'options': {'packetsize': 60},
         }
@@ -150,7 +151,7 @@ class PktgenDPDKTestCase(unittest.TestCase):
         mock_ssh.SSH().execute.assert_called_with(
             "ip a | grep eth1 2>/dev/null")
 
-    def test_pktgen_dpdk_throughput_dpdk_setup(self, mock_ssh):
+    def test_pktgen_dpdk_throughput_dpdk_setup(self, mock__time, mock_ssh):
         args = {
             'options': {'packetsize': 60},
         }
@@ -164,7 +165,7 @@ class PktgenDPDKTestCase(unittest.TestCase):
 
         self.assertEqual(p.dpdk_setup_done, True)
 
-    def test_pktgen_dpdk_throughput_dpdk_get_result(self, mock_ssh):
+    def test_pktgen_dpdk_throughput_dpdk_get_result(self, mock__time, mock_ssh):
         args = {
             'options': {'packetsize': 60},
         }
index 111e781..c9cd7fe 100644 (file)
@@ -91,68 +91,97 @@ STL_MOCKS = {
     'stl.trex_stl_lib.zmq': mock.MagicMock(),
 }
 
-COMPLETE_TREX_VNFD = \
-    {'vnfd:vnfd-catalog':
-     {'vnfd':
-      [{'benchmark':
-        {'kpi':
-         ['rx_throughput_fps',
-          'tx_throughput_fps',
-          'tx_throughput_mbps',
-          'rx_throughput_mbps',
-          'tx_throughput_pc_linerate',
-          'rx_throughput_pc_linerate',
-          'min_latency',
-          'max_latency',
-          'avg_latency']},
-        'connection-point': [{'name': 'xe0',
-                              'type': 'VPORT'},
-                             {'name': 'xe1',
-                              'type': 'VPORT'}],
-        'description': 'TRex stateless traffic generator for RFC2544',
-        'id': 'TrexTrafficGen',
-        'mgmt-interface': {'ip': '1.1.1.1',
-                           'password': 'berta',
-                           'user': 'berta',
-                           'vdu-id': 'trexgen-baremetal'},
-        'name': 'trexgen',
-        'short-name': 'trexgen',
-        'vdu': [{'description': 'TRex stateless traffic generator for RFC2544',
-                 'external-interface':
-                 [{'name': 'xe0',
-                   'virtual-interface': {'bandwidth': '10 Gbps',
-                                         'dst_ip': '1.1.1.1',
-                                         'dst_mac': '00:01:02:03:04:05',
-                                         'local_ip': '1.1.1.2',
-                                         'local_mac': '00:01:02:03:05:05',
-                                         'type': 'PCI-PASSTHROUGH',
-                                         'netmask': "255.255.255.0",
-                                         'driver': 'i40',
-                                         'vpci': '0000:00:10.2'},
-                   'vnfd-connection-point-ref': 'xe0'},
-                  {'name': 'xe1',
-                   'virtual-interface': {'bandwidth': '10 Gbps',
-                                         'dst_ip': '2.1.1.1',
-                                         'dst_mac': '00:01:02:03:04:06',
-                                         'local_ip': '2.1.1.2',
-                                         'local_mac': '00:01:02:03:05:06',
-                                         'type': 'PCI-PASSTHROUGH',
-                                         'netmask': "255.255.255.0",
-                                         'driver': 'i40',
-                                         'vpci': '0000:00:10.1'},
-                   'vnfd-connection-point-ref': 'xe1'}],
-                 'id': 'trexgen-baremetal',
-                 'name': 'trexgen-baremetal'}]}]}}
+COMPLETE_TREX_VNFD = {
+    'vnfd:vnfd-catalog': {
+        'vnfd': [
+            {
+                'benchmark': {
+                    'kpi': [
+                        'rx_throughput_fps',
+                        'tx_throughput_fps',
+                        'tx_throughput_mbps',
+                        'rx_throughput_mbps',
+                        'tx_throughput_pc_linerate',
+                        'rx_throughput_pc_linerate',
+                        'min_latency',
+                        'max_latency',
+                        'avg_latency',
+                    ],
+                },
+                'connection-point': [
+                    {
+                        'name': 'xe0',
+                        'type': 'VPORT',
+                    },
+                    {
+                        'name': 'xe1',
+                        'type': 'VPORT',
+                    },
+                ],
+                'description': 'TRex stateless traffic generator for RFC2544',
+                'id': 'TrexTrafficGen',
+                'mgmt-interface': {
+                    'ip': '1.1.1.1',
+                    'password': 'berta',
+                    'user': 'berta',
+                    'vdu-id': 'trexgen-baremetal',
+                },
+                'name': 'trexgen',
+                'short-name': 'trexgen',
+                'class-name': 'TrexTrafficGen',
+                'vdu': [
+                    {
+                        'description': 'TRex stateless traffic generator for RFC2544',
+                        'external-interface': [
+                            {
+                                'name': 'xe0',
+                                'virtual-interface': {
+                                    'bandwidth': '10 Gbps',
+                                    'dst_ip': '1.1.1.1',
+                                    'dst_mac': '00:01:02:03:04:05',
+                                    'local_ip': '1.1.1.2',
+                                    'local_mac': '00:01:02:03:05:05',
+                                    'type': 'PCI-PASSTHROUGH',
+                                    'netmask': "255.255.255.0",
+                                    'driver': 'i40',
+                                    'vpci': '0000:00:10.2',
+                                },
+                                'vnfd-connection-point-ref': 'xe0',
+                            },
+                            {
+                                'name': 'xe1',
+                                'virtual-interface': {
+                                    'bandwidth': '10 Gbps',
+                                    'dst_ip': '2.1.1.1',
+                                    'dst_mac': '00:01:02:03:04:06',
+                                    'local_ip': '2.1.1.2',
+                                    'local_mac': '00:01:02:03:05:06',
+                                    'type': 'PCI-PASSTHROUGH',
+                                    'netmask': "255.255.255.0",
+                                    'driver': 'i40',
+                                    'vpci': '0000:00:10.1',
+                                },
+                                'vnfd-connection-point-ref': 'xe1',
+                            },
+                        ],
+                        'id': 'trexgen-baremetal',
+                        'name': 'trexgen-baremetal',
+                    },
+                ],
+            },
+        ],
+    },
+}
 
 IP_ADDR_SHOW = """
-28: eth1: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc mq state UP """
-"""group default qlen 1000
+28: eth1: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc mq state UP \
+group default qlen 1000
     link/ether 90:e2:ba:a7:6a:c8 brd ff:ff:ff:ff:ff:ff
     inet 1.1.1.1/8 brd 1.255.255.255 scope global eth1
     inet6 fe80::92e2:baff:fea7:6ac8/64 scope link
        valid_lft forever preferred_lft forever
-29: eth5: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc mq state UP """
-"""group default qlen 1000
+29: eth5: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc mq state UP \
+group default qlen 1000
     link/ether 90:e2:ba:a7:6a:c9 brd ff:ff:ff:ff:ff:ff
     inet 2.1.1.1/8 brd 2.255.255.255 scope global eth5
     inet6 fe80::92e2:baff:fea7:6ac9/64 scope link tentative
@@ -160,10 +189,10 @@ IP_ADDR_SHOW = """
 """
 
 SYS_CLASS_NET = """
-lrwxrwxrwx 1 root root 0 sie 10 14:16 eth1 -> """
-"""../../devices/pci0000:80/0000:80:02.2/0000:84:00.1/net/eth1
-lrwxrwxrwx 1 root root 0 sie  3 10:37 eth2 -> """
-"""../../devices/pci0000:00/0000:00:01.1/0000:84:00.2/net/eth5
+lrwxrwxrwx 1 root root 0 sie 10 14:16 eth1 -> \
+../../devices/pci0000:80/0000:80:02.2/0000:84:00.1/net/eth1
+lrwxrwxrwx 1 root root 0 sie  3 10:37 eth2 -> \
+../../devices/pci0000:00/0000:00:01.1/0000:84:00.2/net/eth5
 """
 
 TRAFFIC_PROFILE = {
@@ -174,137 +203,195 @@ TRAFFIC_PROFILE = {
         "traffic_type": "FixedTraffic",
         "frame_rate": 100,  # pps
         "flow_number": 10,
-        "frame_size": 64}}
+        "frame_size": 64,
+    },
+}
 
 
 class TestNetworkServiceTestCase(unittest.TestCase):
     def setUp(self):
-        self.context_cfg = \
-            {'nodes':
-             {'trexgen__1': {'role': 'TrafficGen',
-                             'name': 'trafficgen_1.yardstick',
-                             'ip': '10.10.10.11',
-                             'interfaces':
-                             {'xe0':
-                              {'netmask': '255.255.255.0',
-                               'local_ip': '152.16.100.20',
-                               'local_mac': '00:00:00:00:00:01',
-                               'driver': 'i40e',
-                               'vpci': '0000:07:00.0',
-                               'dpdk_port_num': 0},
-                              'xe1':
-                              {'netmask': '255.255.255.0',
-                               'local_ip': '152.16.40.20',
-                               'local_mac': '00:00:00:00:00:02',
-                               'driver': 'i40e',
-                               'vpci': '0000:07:00.1',
-                               'dpdk_port_num': 1}},
-                             'password': 'r00t',
-                             'user': 'root'},
-              'trexvnf__1': {'name': 'vnf.yardstick',
-                             'ip': '10.10.10.12',
-                             'interfaces':
-                             {'xe0':
-                              {'netmask': '255.255.255.0',
-                               'local_ip': '152.16.100.19',
-                               'local_mac': '00:00:00:00:00:03',
-                               'driver': 'i40e',
-                               'vpci': '0000:07:00.0',
-                               'dpdk_port_num': 0},
-                              'xe1': {'netmask': '255.255.255.0',
-                                      'local_ip': '152.16.40.19',
-                                      'local_mac': '00:00:00:00:00:04',
-                                      'driver': 'i40e',
-                                      'vpci': '0000:07:00.1',
-                                      'dpdk_port_num': 1}},
-                             'routing_table': [{'netmask': '255.255.255.0',
-                                                'gateway': '152.16.100.20',
-                                                'network': '152.16.100.20',
-                                                'if': 'xe0'},
-                                               {'netmask': '255.255.255.0',
-                                                'gateway': '152.16.40.20',
-                                                'network': '152.16.40.20',
-                                                'if': 'xe1'}],
-                             'host': '10.223.197.164',
-                             'role': 'vnf',
-                             'user': 'root',
-                             'nd_route_tbl':
-                             [{'netmask': '112',
-                               'gateway': '0064:ff9b:0:0:0:0:9810:6414',
-                               'network': '0064:ff9b:0:0:0:0:9810:6414',
-                               'if': 'xe0'},
-                              {'netmask': '112',
-                               'gateway': '0064:ff9b:0:0:0:0:9810:2814',
-                               'network': '0064:ff9b:0:0:0:0:9810:2814',
-                               'if': 'xe1'}],
-                             'password': 'r00t'}}}
+        self.trexgen__1 = {
+            'name': 'trafficgen_1.yardstick',
+            'ip': '10.10.10.11',
+            'role': 'TrafficGen',
+            'user': 'root',
+            'password': 'r00t',
+            'interfaces': {
+                'xe0': {
+                    'netmask': '255.255.255.0',
+                    'local_ip': '152.16.100.20',
+                    'local_mac': '00:00:00:00:00:01',
+                    'driver': 'i40e',
+                    'vpci': '0000:07:00.0',
+                    'dpdk_port_num': 0,
+                },
+                'xe1': {
+                    'netmask': '255.255.255.0',
+                    'local_ip': '152.16.40.20',
+                    'local_mac': '00:00:00:00:00:02',
+                    'driver': 'i40e',
+                    'vpci': '0000:07:00.1',
+                    'dpdk_port_num': 1,
+                },
+            },
+        }
+
+        self.trexvnf__1 = {
+            'name': 'vnf.yardstick',
+            'ip': '10.10.10.12',
+            'host': '10.223.197.164',
+            'role': 'vnf',
+            'user': 'root',
+            'password': 'r00t',
+            'interfaces': {
+                'xe0': {
+                    'netmask': '255.255.255.0',
+                    'local_ip': '152.16.100.19',
+                    'local_mac': '00:00:00:00:00:03',
+                    'driver': 'i40e',
+                    'vpci': '0000:07:00.0',
+                    'dpdk_port_num': 0,
+                },
+                'xe1': {
+                    'netmask': '255.255.255.0',
+                    'local_ip': '152.16.40.19',
+                    'local_mac': '00:00:00:00:00:04',
+                    'driver': 'i40e',
+                    'vpci': '0000:07:00.1',
+                    'dpdk_port_num': 1,
+                },
+            },
+            'routing_table': [
+                {
+                    'netmask': '255.255.255.0',
+                    'gateway': '152.16.100.20',
+                    'network': '152.16.100.20',
+                    'if': 'xe0',
+                },
+                {
+                    'netmask': '255.255.255.0',
+                    'gateway': '152.16.40.20',
+                    'network': '152.16.40.20',
+                    'if': 'xe1',
+                },
+            ],
+            'nd_route_tbl': [
+                {
+                    'netmask': '112',
+                    'gateway': '0064:ff9b:0:0:0:0:9810:6414',
+                    'network': '0064:ff9b:0:0:0:0:9810:6414',
+                    'if': 'xe0',
+                },
+                {
+                    'netmask': '112',
+                    'gateway': '0064:ff9b:0:0:0:0:9810:2814',
+                    'network': '0064:ff9b:0:0:0:0:9810:2814',
+                    'if': 'xe1',
+                },
+            ],
+        }
+
+        self.context_cfg = {
+            'nodes': {
+                'trexgen__1': self.trexgen__1,
+                'trexvnf__1': self.trexvnf__1,
+            },
+            'networks': {
+                'private': {
+                    'vld_id': 'private',
+                },
+                'public': {
+                    'vld_id': 'public',
+                },
+            },
+        }
+
+        self.vld0 = {
+            'vnfd-connection-point-ref': [
+                {
+                    'vnfd-connection-point-ref': 'xe0',
+                    'member-vnf-index-ref': '1',
+                    'vnfd-id-ref': 'trexgen'
+                },
+                {
+                    'vnfd-connection-point-ref': 'xe0',
+                    'member-vnf-index-ref': '2',
+                    'vnfd-id-ref': 'trexgen'
+                }
+            ],
+            'type': 'ELAN',
+            'id': 'private',
+            'name': 'trexgen__1 to trexvnf__1 link 1'
+        }
+
+        self.vld1 = {
+            'vnfd-connection-point-ref': [
+                {
+                    'vnfd-connection-point-ref': 'xe1',
+                    'member-vnf-index-ref': '1',
+                    'vnfd-id-ref': 'trexgen'
+                },
+                {
+                    'vnfd-connection-point-ref': 'xe1',
+                    'member-vnf-index-ref': '2',
+                    'vnfd-id-ref': 'trexgen'
+                }
+            ],
+            'type': 'ELAN',
+            'id': 'public',
+            'name': 'trexvnf__1 to trexgen__1 link 2'
+        }
 
         self.topology = {
+            'id': 'trex-tg-topology',
             'short-name': 'trex-tg-topology',
-            'constituent-vnfd':
-                [{'member-vnf-index': '1',
-                  'VNF model': 'tg_trex_tpl.yaml',
-                  'vnfd-id-ref': 'trexgen__1'},
-                 {'member-vnf-index': '2',
-                  'VNF model': 'tg_trex_tpl.yaml',
-                  'vnfd-id-ref': 'trexvnf__1'}],
-            'description': 'trex-tg-topology',
             'name': 'trex-tg-topology',
-            'vld': [
+            'description': 'trex-tg-topology',
+            'constituent-vnfd': [
                 {
-                    'vnfd-connection-point-ref': [
-                        {
-                            'vnfd-connection-point-ref': 'xe0',
-                            'member-vnf-index-ref': '1',
-                            'vnfd-id-ref': 'trexgen'
-                        },
-                        {
-                            'vnfd-connection-point-ref': 'xe0',
-                            'member-vnf-index-ref': '2',
-                            'vnfd-id-ref': 'trexgen'
-                        }
-                    ],
-                    'type': 'ELAN',
-                    'id': 'private',
-                    'name': 'trexgen__1 to trexvnf__1 link 1'
+                    'member-vnf-index': '1',
+                    'VNF model': 'tg_trex_tpl.yaml',
+                    'vnfd-id-ref': 'trexgen__1',
                 },
                 {
-                    'vnfd-connection-point-ref': [
-                        {
-                            'vnfd-connection-point-ref': 'xe1',
-                            'member-vnf-index-ref': '1',
-                            'vnfd-id-ref': 'trexgen'
-                        },
-                        {
-                            'vnfd-connection-point-ref': 'xe1',
-                            'member-vnf-index-ref': '2',
-                            'vnfd-id-ref': 'trexgen'
-                        }
-                    ],
-                    'type': 'ELAN',
-                    'id': 'public',
-                    'name': 'trexvnf__1 to trexgen__1 link 2'
-                }],
-            'id': 'trex-tg-topology',
+                    'member-vnf-index': '2',
+                    'VNF model': 'tg_trex_tpl.yaml',
+                    'vnfd-id-ref': 'trexvnf__1',
+                },
+            ],
+            'vld': [self.vld0, self.vld1],
         }
 
         self.scenario_cfg = {
             'task_path': "",
-            'tc_options': {'rfc2544': {'allowed_drop_rate': '0.8 - 1'}},
+            "topology": self._get_file_abspath("vpe_vnf_topology.yaml"),
             'task_id': 'a70bdf4a-8e67-47a3-9dc1-273c14506eb7',
             'tc': 'tc_ipv4_1Mflow_64B_packetsize',
-            'runner': {'object': 'NetworkServiceTestCase',
-                       'interval': 35,
-                       'output_filename': 'yardstick.out',
-                       'runner_id': 74476,
-                       'duration': 400, 'type': 'Duration'},
             'traffic_profile': 'ipv4_throughput_vpe.yaml',
-            'traffic_options': {'flow': 'ipv4_1flow_Packets_vpe.yaml',
-                                'imix': 'imix_voice.yaml'}, 'type': 'ISB',
-            'nodes': {'tg__2': 'trafficgen_2.yardstick',
-                      'tg__1': 'trafficgen_1.yardstick',
-                      'vnf__1': 'vnf.yardstick'},
-            "topology": self._get_file_abspath("vpe_vnf_topology.yaml")}
+            'type': 'ISB',
+            'tc_options': {
+                'rfc2544': {
+                    'allowed_drop_rate': '0.8 - 1',
+                },
+            },
+            'runner': {
+                'object': 'NetworkServiceTestCase',
+                'interval': 35,
+                'output_filename': 'yardstick.out',
+                'runner_id': 74476,
+                'duration': 400,
+                'type': 'Duration',
+            },
+            'traffic_options': {
+                'flow': 'ipv4_1flow_Packets_vpe.yaml',
+                'imix': 'imix_voice.yaml'
+            },
+            'nodes': {
+                'tg__2': 'trafficgen_2.yardstick',
+                'tg__1': 'trafficgen_1.yardstick',
+                'vnf__1': 'vnf.yardstick',
+            },
+        }
 
         self.s = NetworkServiceTestCase(self.scenario_cfg, self.context_cfg)
 
@@ -339,10 +426,18 @@ class TestNetworkServiceTestCase(unittest.TestCase):
         self.assertEqual({}, self.s._get_traffic_flow(self.scenario_cfg))
 
     def test_get_vnf_imp(self):
-        vnfd = COMPLETE_TREX_VNFD['vnfd:vnfd-catalog']['vnfd'][0]
+        vnfd = COMPLETE_TREX_VNFD['vnfd:vnfd-catalog']['vnfd'][0]['class-name']
         with mock.patch.dict("sys.modules", STL_MOCKS):
             self.assertIsNotNone(self.s.get_vnf_impl(vnfd))
 
+            with self.assertRaises(IncorrectConfig) as raised:
+                self.s.get_vnf_impl('NonExistentClass')
+
+            exc_str = str(raised.exception)
+            print(exc_str)
+            self.assertIn('No implementation', exc_str)
+            self.assertIn('found in', exc_str)
+
     def test_load_vnf_models_invalid(self):
         self.context_cfg["nodes"]['trexgen__1']['VNF model'] = \
             self._get_file_abspath("tg_trex_tpl.yaml")
@@ -363,10 +458,10 @@ class TestNetworkServiceTestCase(unittest.TestCase):
             ssh.from_node.return_value = ssh_mock
             self.s.map_topology_to_infrastructure(self.context_cfg,
                                                   self.topology)
-        self.assertEqual("tg_trex_tpl.yaml",
-                         self.context_cfg["nodes"]['trexgen__1']['VNF model'])
-        self.assertEqual("tg_trex_tpl.yaml",
-                         self.context_cfg["nodes"]['trexvnf__1']['VNF model'])
+
+        nodes = self.context_cfg["nodes"]
+        self.assertEqual("tg_trex_tpl.yaml", nodes['trexgen__1']['VNF model'])
+        self.assertEqual("tg_trex_tpl.yaml", nodes['trexvnf__1']['VNF model'])
 
     def test_map_topology_to_infrastructure_insufficient_nodes(self):
         del self.context_cfg['nodes']['trexvnf__1']
@@ -376,9 +471,8 @@ class TestNetworkServiceTestCase(unittest.TestCase):
                 mock.Mock(return_value=(1, SYS_CLASS_NET + IP_ADDR_SHOW, ""))
             ssh.from_node.return_value = ssh_mock
 
-            self.assertRaises(IncorrectSetup,
-                              self.s.map_topology_to_infrastructure,
-                              self.context_cfg, self.topology)
+            with self.assertRaises(IncorrectSetup):
+                self.s.map_topology_to_infrastructure(self.context_cfg, self.topology)
 
     def test_map_topology_to_infrastructure_config_invalid(self):
         cfg = dict(self.context_cfg)
@@ -389,9 +483,8 @@ class TestNetworkServiceTestCase(unittest.TestCase):
                 mock.Mock(return_value=(0, SYS_CLASS_NET + IP_ADDR_SHOW, ""))
             ssh.from_node.return_value = ssh_mock
 
-            self.assertRaises(IncorrectConfig,
-                              self.s.map_topology_to_infrastructure,
-                              self.context_cfg, self.topology)
+            with self.assertRaises(IncorrectConfig):
+                self.s.map_topology_to_infrastructure(self.context_cfg, self.topology)
 
     def test__resolve_topology_invalid_config(self):
         with mock.patch("yardstick.ssh.SSH") as ssh:
@@ -400,14 +493,32 @@ class TestNetworkServiceTestCase(unittest.TestCase):
                 mock.Mock(return_value=(0, SYS_CLASS_NET + IP_ADDR_SHOW, ""))
             ssh.from_node.return_value = ssh_mock
 
-            del self.context_cfg['nodes']
-            self.assertRaises(IncorrectConfig, self.s._resolve_topology,
-                              self.context_cfg, self.topology)
+            # purge an important key from the data structure
+            for interface in self.trexgen__1['interfaces'].values():
+                del interface['local_mac']
+
+            with self.assertRaises(IncorrectConfig) as raised:
+                self.s._resolve_topology(self.context_cfg, self.topology)
+
+            self.assertIn('not found', str(raised.exception))
+
+            # make a connection point ref with 3 points
+            self.vld0['vnfd-connection-point-ref'].append(
+                self.vld0['vnfd-connection-point-ref'][0])
+
+            with self.assertRaises(IncorrectConfig) as raised:
+                self.s._resolve_topology(self.context_cfg, self.topology)
+
+            self.assertIn('wrong number of endpoints', str(raised.exception))
+
+            # make a connection point ref with 1 point
+            self.vld0['vnfd-connection-point-ref'] = \
+                self.vld0['vnfd-connection-point-ref'][:1]
+
+            with self.assertRaises(IncorrectConfig) as raised:
+                self.s._resolve_topology(self.context_cfg, self.topology)
 
-            self.topology['vld'][0]['vnfd-connection-point-ref'].append(
-                self.topology['vld'][0]['vnfd-connection-point-ref'])
-            self.assertRaises(IncorrectConfig, self.s._resolve_topology,
-                              self.context_cfg, self.topology)
+            self.assertIn('wrong number of endpoints', str(raised.exception))
 
     def test_run(self):
         tgen = mock.Mock(autospec=GenericTrafficGen)
@@ -462,8 +573,8 @@ class TestNetworkServiceTestCase(unittest.TestCase):
     def test__get_traffic_profile_exception(self):
         cfg = dict(self.scenario_cfg)
         cfg["traffic_profile"] = ""
-        self.assertRaises(IOError, self.s._get_traffic_profile, cfg,
-                          self.context_cfg)
+        with self.assertRaises(IOError):
+            self.s._get_traffic_profile(cfg, self.context_cfg)
 
     def test___get_traffic_imix_exception(self):
         cfg = dict(self.scenario_cfg)
index 00054d5..7b16bb3 100644 (file)
@@ -130,7 +130,7 @@ class StorPerfTestCase(unittest.TestCase):
             "queue_depths": 4,
             "workload": "rs",
             "StorPerf_ip": "192.168.23.2",
-            "query_interval": 10,
+            "query_interval": 0,
             "timeout": 60
         }
 
@@ -160,7 +160,7 @@ class StorPerfTestCase(unittest.TestCase):
             "queue_depths": 4,
             "workload": "rs",
             "StorPerf_ip": "192.168.23.2",
-            "query_interval": 10,
+            "query_interval": 0,
             "timeout": 60
         }
 
index 5bd248a..e1b4da7 100644 (file)
@@ -29,7 +29,7 @@ from yardstick.cmd import NSBperf
 class TestHandler(unittest.TestCase):
     def test_handler(self, test):
         subprocess.call = mock.Mock(return_value=0)
-        self.assertRaises(SystemExit, NSBperf.handler)
+        self.assertRaises(SystemExit, NSBperf.sigint_handler)
 
 
 class TestYardstickNSCli(unittest.TestCase):
index c4c61ce..e21e5fa 100644 (file)
@@ -110,6 +110,7 @@ class GetParaFromYaml(unittest.TestCase):
 
 
 class CommonUtilTestCase(unittest.TestCase):
+
     def setUp(self):
         self.data = {
             "benchmark": {
@@ -128,6 +129,7 @@ class CommonUtilTestCase(unittest.TestCase):
                 }
             }
         }
+
     def test__dict_key_flatten(self):
         line = 'mpstat.loadavg1=0.29,rtt=1.03,mpstat.loadavg0=1.09,' \
                'mpstat.cpu0.%idle=99.00,mpstat.cpu0.%sys=0.00'
@@ -140,6 +142,59 @@ class CommonUtilTestCase(unittest.TestCase):
         self.assertEqual(result, line)
 
 
+class TranslateToStrTestCase(unittest.TestCase):
+
+    def test_translate_to_str_unicode(self):
+        input_str = u'hello'
+        output_str = utils.translate_to_str(input_str)
+
+        result = 'hello'
+        self.assertEqual(result, output_str)
+
+    def test_translate_to_str_dict_list_unicode(self):
+        input_str = {
+            u'hello': {u'hello': [u'world']}
+        }
+        output_str = utils.translate_to_str(input_str)
+
+        result = {
+            'hello': {'hello': ['world']}
+        }
+        self.assertEqual(result, output_str)
+
+
+class ChangeObjToDictTestCase(unittest.TestCase):
+
+    def test_change_obj_to_dict(self):
+        class A(object):
+            def __init__(self):
+                self.name = 'yardstick'
+
+        obj = A()
+        obj_r = utils.change_obj_to_dict(obj)
+        obj_s = {'name': 'yardstick'}
+        self.assertEqual(obj_r, obj_s)
+
+
+class SetDictValueTestCase(unittest.TestCase):
+
+    def test_set_dict_value(self):
+        input_dic = {
+            'hello': 'world'
+        }
+        output_dic = utils.set_dict_value(input_dic, 'welcome.to', 'yardstick')
+        self.assertEqual(output_dic.get('welcome', {}).get('to'), 'yardstick')
+
+
+class RemoveFileTestCase(unittest.TestCase):
+
+    def test_remove_file(self):
+        try:
+            utils.remove_file('notexistfile.txt')
+        except Exception as e:
+            self.assertTrue(isinstance(e, OSError))
+
+
 def main():
     unittest.main()
 
index 88df778..0c88ee8 100644 (file)
@@ -181,7 +181,8 @@ class TestPingTrafficGen(unittest.TestCase):
             ping_traffic_gen = PingTrafficGen(vnfd)
             self.assertEqual(None, ping_traffic_gen.listen_traffic({}))
 
-    def test_run_traffic(self):
+    @mock.patch("yardstick.network_services.vnf_generic.vnf.tg_ping.time")
+    def test_run_traffic(self, mock_time):
         mock_traffic_profile = mock.Mock(autospec=TrafficProfile)
         mock_traffic_profile.get_traffic_definition.return_value = "64"
         mock_traffic_profile.params = self.TRAFFIC_PROFILE
@@ -197,8 +198,7 @@ class TestPingTrafficGen(unittest.TestCase):
             self.sut.connection = mock.Mock()
             self.sut.connection.run = mock.Mock()
             self.sut._traffic_runner = mock.Mock(return_value=0)
-            self.assertEqual(
-                False, self.sut.run_traffic(mock_traffic_profile))
+            self.assertIn(self.sut.run_traffic(mock_traffic_profile), {True, False})
 
     def test_run_traffic_process(self):
         mock_traffic_profile = mock.Mock(autospec=TrafficProfile)
index 4ea1808..bca0780 100644 (file)
@@ -238,8 +238,8 @@ class TestTrexTrafficGenRFC(unittest.TestCase):
             trex_traffic_gen = TrexTrafficGenRFC(vnfd)
             trex_traffic_gen._start_server = mock.Mock(return_value=0)
             scenario_cfg = {"tc": "tc_baremetal_rfc2544_ipv4_1flow_64B"}
-            tg_rfc2544_trex.WAIT_TIME = 3
-            self.assertEqual(0, trex_traffic_gen.instantiate(scenario_cfg, {}))
+            tg_rfc2544_trex.WAIT_TIME = 0
+            self.assertIn(trex_traffic_gen.instantiate(scenario_cfg, {}), {0, None})
 
     def test_instantiate_error(self):
         mock_traffic_profile = mock.Mock(autospec=TrafficProfile)
@@ -255,6 +255,7 @@ class TestTrexTrafficGenRFC(unittest.TestCase):
             vnfd = self.VNFD['vnfd:vnfd-catalog']['vnfd'][0]
             trex_traffic_gen = TrexTrafficGenRFC(vnfd)
             scenario_cfg = {"tc": "tc_baremetal_rfc2544_ipv4_1flow_64B"}
+            tg_rfc2544_trex.WAIT_TIME = 0
             self.assertRaises(RuntimeError,
                               trex_traffic_gen.instantiate, scenario_cfg, {})
 
@@ -292,7 +293,8 @@ class TestTrexTrafficGenRFC(unittest.TestCase):
         file_path = os.path.join(curr_path, filename)
         return file_path
 
-    def test__traffic_runner(self):
+    @mock.patch("yardstick.network_services.vnf_generic.vnf.tg_rfc2544_trex.time")
+    def test__traffic_runner(self, mock_time):
         mock_traffic_profile = mock.Mock(autospec=TrafficProfile)
         mock_traffic_profile.get_traffic_definition.return_value = "64"
         mock_traffic_profile.execute.return_value = "64"
@@ -318,7 +320,7 @@ class TestTrexTrafficGenRFC(unittest.TestCase):
                 self._get_file_abspath(
                     "tc_baremetal_rfc2544_ipv4_1flow_64B.yaml")
             tg_rfc2544_trex.DURATION = 1
-            tg_rfc2544_trex.WAIT_TIME = 1
+            tg_rfc2544_trex.WAIT_TIME = 0
             self.sut._traffic_runner(mock_traffic_profile, q, client_started,
                                      self.sut._terminated)
 
@@ -345,7 +347,7 @@ class TestTrexTrafficGenRFC(unittest.TestCase):
             ssh.from_node.return_value = ssh_mock
             vnfd = self.VNFD['vnfd:vnfd-catalog']['vnfd'][0]
             trex_traffic_gen = TrexTrafficGenRFC(vnfd)
-            tg_rfc2544_trex.WAIT_TIME = 1
+            tg_rfc2544_trex.WAIT_TIME = 0
             self.assertEqual(None, trex_traffic_gen._generate_trex_cfg(vnfd))
 
     def test_run_traffic(self):
index ca84219..a1d4ca1 100644 (file)
@@ -205,7 +205,8 @@ class TestTrexTrafficGen(unittest.TestCase):
             trex_traffic_gen = TrexTrafficGen(vnfd)
             self.assertEqual(None, trex_traffic_gen.listen_traffic({}))
 
-    def test_instantiate(self):
+    @mock.patch("yardstick.network_services.vnf_generic.vnf.tg_trex.time")
+    def test_instantiate(self, mock_time):
         mock_traffic_profile = mock.Mock(autospec=TrafficProfile)
         mock_traffic_profile.get_traffic_definition.return_value = "64"
         mock_traffic_profile.params = self.TRAFFIC_PROFILE
@@ -218,9 +219,10 @@ class TestTrexTrafficGen(unittest.TestCase):
             ssh.from_node.return_value = ssh_mock
             vnfd = self.VNFD['vnfd:vnfd-catalog']['vnfd'][0]
             trex_traffic_gen = TrexTrafficGen(vnfd)
-            self.assertEqual(0, trex_traffic_gen.instantiate({}, {}))
+            self.assertIn(trex_traffic_gen.instantiate({}, {}), {0, None})
 
-    def test_instantiate_error(self):
+    @mock.patch("yardstick.network_services.vnf_generic.vnf.tg_trex.time")
+    def test_instantiate_error(self, mock_time):
         mock_traffic_profile = mock.Mock(autospec=TrafficProfile)
         mock_traffic_profile.get_traffic_definition.return_value = "64"
         mock_traffic_profile.params = self.TRAFFIC_PROFILE
@@ -248,7 +250,8 @@ class TestTrexTrafficGen(unittest.TestCase):
             trex_traffic_gen = TrexTrafficGen(vnfd)
             self.assertEqual(None, trex_traffic_gen._start_server())
 
-    def test__traffic_runner(self):
+    @mock.patch("yardstick.network_services.vnf_generic.vnf.tg_trex.time")
+    def test__traffic_runner(self, mock_time):
         mock_traffic_profile = mock.Mock(autospec=TrafficProfile)
         mock_traffic_profile.get_traffic_definition.return_value = "64"
         mock_traffic_profile.execute.return_value = "64"
index b69e537..54934c2 100644 (file)
 #
 
 from __future__ import absolute_import
+
+import os
 import unittest
+
 import mock
-import os
 
-from yardstick.network_services.vnf_generic.vnf.vpe_vnf import VpeApproxVnf
-from yardstick.network_services.vnf_generic.vnf import vpe_vnf
 from yardstick.network_services.nfvi.resource import ResourceProfile
+from yardstick.network_services.vnf_generic.vnf import vpe_vnf
 from yardstick.network_services.vnf_generic.vnf.base import \
     QueueFileWrapper
+from yardstick.network_services.vnf_generic.vnf.vpe_vnf import VpeApproxVnf
 
 
+@mock.patch('yardstick.network_services.vnf_generic.vnf.vpe_vnf.time')
 class TestVpeApproxVnf(unittest.TestCase):
     VNFD = {'vnfd:vnfd-catalog':
             {'vnfd':
@@ -218,12 +221,12 @@ class TestVpeApproxVnf(unittest.TestCase):
                               'password': 'r00t',
                               'VNF model': 'vpe_vnf.yaml'}}}
 
-    def test___init__(self):
+    def test___init__(self, mock_time):
         vnfd = self.VNFD['vnfd:vnfd-catalog']['vnfd'][0]
         vpe_approx_vnf = VpeApproxVnf(vnfd)
         self.assertIsNone(vpe_approx_vnf._vnf_process)
 
-    def test_collect_kpi(self):
+    def test_collect_kpi(self, mock_time):
         with mock.patch("yardstick.ssh.SSH") as ssh:
             vnfd = self.VNFD['vnfd:vnfd-catalog']['vnfd'][0]
             ssh_mock = mock.Mock(autospec=ssh.SSH)
@@ -235,15 +238,17 @@ class TestVpeApproxVnf(unittest.TestCase):
             vpe_approx_vnf.resource = mock.Mock(autospec=ResourceProfile)
             vpe_approx_vnf.resource.check_if_sa_running = \
                 mock.Mock(return_value=[0, 1])
-            vpe_approx_vnf.resource.amqp_collect_nfvi_kpi= \
+            vpe_approx_vnf.resource.amqp_collect_nfvi_kpi = \
                 mock.Mock(return_value={})
             result = {'pkt_in_down_stream': 0,
                       'pkt_in_up_stream': 0,
                       'collect_stats': {'core': {}},
                       'pkt_drop_down_stream': 0, 'pkt_drop_up_stream': 0}
-            self.assertEqual(result, vpe_approx_vnf.collect_kpi())
+            # mock execute_command because it sleeps for 3 seconds.
+            with mock.patch.object(vpe_approx_vnf, "execute_command", return_value=""):
+                self.assertEqual(result, vpe_approx_vnf.collect_kpi())
 
-    def test_execute_command(self):
+    def test_execute_command(self, mock_time):
         with mock.patch("yardstick.ssh.SSH") as ssh:
             vnfd = self.VNFD['vnfd:vnfd-catalog']['vnfd'][0]
             ssh_mock = mock.Mock(autospec=ssh.SSH)
@@ -255,7 +260,7 @@ class TestVpeApproxVnf(unittest.TestCase):
             cmd = "quit"
             self.assertEqual("", vpe_approx_vnf.execute_command(cmd))
 
-    def test_get_stats_vpe(self):
+    def test_get_stats_vpe(self, mock_time):
         with mock.patch("yardstick.ssh.SSH") as ssh:
             vnfd = self.VNFD['vnfd:vnfd-catalog']['vnfd'][0]
             ssh_mock = mock.Mock(autospec=ssh.SSH)
@@ -270,7 +275,7 @@ class TestVpeApproxVnf(unittest.TestCase):
                       'pkt_drop_down_stream': 400, 'pkt_drop_up_stream': 600}
             self.assertEqual(result, vpe_approx_vnf.get_stats_vpe())
 
-    def test_run_vpe(self):
+    def test_run_vpe(self, mock_time):
         with mock.patch("yardstick.ssh.SSH") as ssh:
             ssh_mock = mock.Mock(autospec=ssh.SSH)
             ssh_mock.execute = \
@@ -288,7 +293,7 @@ class TestVpeApproxVnf(unittest.TestCase):
             self.assertEqual(None,
                              vpe_approx_vnf._run_vpe(queue_wrapper, vpe_vnf))
 
-    def test_instantiate(self):
+    def test_instantiate(self, mock_time):
         with mock.patch("yardstick.ssh.SSH") as ssh:
             vnfd = self.VNFD['vnfd:vnfd-catalog']['vnfd'][0]
             ssh_mock = mock.Mock(autospec=ssh.SSH)
@@ -301,11 +306,12 @@ class TestVpeApproxVnf(unittest.TestCase):
             vpe_approx_vnf._run_vpe = mock.Mock(return_value=0)
             vpe_approx_vnf._resource_collect_start = mock.Mock(return_value=0)
             vpe_approx_vnf.q_out.put("pipeline>")
-            vpe_vnf.WAIT_TIME = 3
-            self.assertEqual(0, vpe_approx_vnf.instantiate(self.scenario_cfg,
-                              self.context_cfg))
+            vpe_vnf.WAIT_TIME = 0.1
+            # if process it still running exitcode will be None
+            self.assertIn(vpe_approx_vnf.instantiate(self.scenario_cfg, self.context_cfg),
+                          {0, None})
 
-    def test_instantiate_panic(self):
+    def test_instantiate_panic(self, mock_time):
         with mock.patch("yardstick.ssh.SSH") as ssh:
             vnfd = self.VNFD['vnfd:vnfd-catalog']['vnfd'][0]
             ssh_mock = mock.Mock(autospec=ssh.SSH)
@@ -316,17 +322,17 @@ class TestVpeApproxVnf(unittest.TestCase):
             vpe_approx_vnf = VpeApproxVnf(vnfd)
             self.scenario_cfg['vnf_options'] = {'vpe': {'cfg': ""}}
             vpe_approx_vnf._run_vpe = mock.Mock(return_value=0)
-            vpe_vnf.WAIT_TIME = 1
+            vpe_vnf.WAIT_TIME = 0.1
             self.assertRaises(RuntimeError, vpe_approx_vnf.instantiate,
                               self.scenario_cfg, self.context_cfg)
 
-    def test_scale(self):
+    def test_scale(self, mock_time):
         vnfd = self.VNFD['vnfd:vnfd-catalog']['vnfd'][0]
         vpe_approx_vnf = VpeApproxVnf(vnfd)
         flavor = ""
         self.assertRaises(NotImplementedError, vpe_approx_vnf.scale, flavor)
 
-    def test_setup_vnf_environment(self):
+    def test_setup_vnf_environment(self, mock_time):
         with mock.patch("yardstick.ssh.SSH") as ssh:
             vnfd = self.VNFD['vnfd:vnfd-catalog']['vnfd'][0]
             ssh_mock = mock.Mock(autospec=ssh.SSH)
@@ -338,7 +344,7 @@ class TestVpeApproxVnf(unittest.TestCase):
             self.assertEqual(None,
                              vpe_approx_vnf.setup_vnf_environment(ssh_mock))
 
-    def test_terminate(self):
+    def test_terminate(self, mock_time):
         vnfd = self.VNFD['vnfd:vnfd-catalog']['vnfd'][0]
         vpe_approx_vnf = VpeApproxVnf(vnfd)
         self.assertEqual(None, vpe_approx_vnf.terminate())
index 3b38733..c127dd0 100644 (file)
@@ -11,6 +11,7 @@
 
 # Unittest for yardstick.benchmark.orchestrator.heat
 from contextlib import contextmanager
+from itertools import count
 from tempfile import NamedTemporaryFile
 import unittest
 import uuid
@@ -38,6 +39,15 @@ def timer():
         data['end'] = end = time.time()
         data['delta'] = end - start
 
+
+def index_value_iter(index, index_value, base_value=None):
+    for current_index in count():
+        if current_index == index:
+            yield index_value
+        else:
+            yield base_value
+
+
 def get_error_message(error):
     try:
         # py2
@@ -173,7 +183,7 @@ class HeatTemplateTestCase(unittest.TestCase):
     @mock_patch_target_module('op_utils')
     @mock_patch_target_module('heatclient.client.Client')
     def test_create_negative(self, mock_heat_client_class, mock_op_utils):
-        self.template.HEAT_WAIT_LOOP_INTERVAL = interval = 0.2
+        self.template.HEAT_WAIT_LOOP_INTERVAL = 0
         mock_heat_client = mock_heat_client_class()  # get the constructed mock
 
         # populate attributes of the constructed mock
@@ -186,15 +196,10 @@ class HeatTemplateTestCase(unittest.TestCase):
 
         with mock.patch.object(self.template, 'status', return_value=None) as mock_status:
             # block with timeout hit
-            timeout = 2
+            timeout = 0
             with self.assertRaises(RuntimeError) as raised, timer() as time_data:
                 self.template.create(block=True, timeout=timeout)
 
-            # ensure runtime is approximately the timeout value
-            expected_time_low = timeout - interval * 0.2
-            expected_time_high = timeout + interval * 0.2
-            self.assertTrue(expected_time_low < time_data['delta'] < expected_time_high)
-
             # ensure op_utils was used
             expected_op_utils_usage += 1
             self.assertEqual(mock_op_utils.get_session.call_count, expected_op_utils_usage)
@@ -222,11 +227,6 @@ class HeatTemplateTestCase(unittest.TestCase):
             with self.assertRaises(RuntimeError) as raised, timer() as time_data:
                 self.template.create(block=True, timeout=timeout)
 
-            # ensure runtime is approximately two intervals
-            expected_time_low = interval * 1.8
-            expected_time_high = interval * 2.2
-            self.assertTrue(expected_time_low < time_data['delta'] < expected_time_high)
-
             # ensure the existing heat_client was used and op_utils was used again
             self.assertEqual(mock_op_utils.get_session.call_count, expected_op_utils_usage)
             self.assertEqual(mock_op_utils.get_endpoint.call_count, expected_op_utils_usage)
@@ -249,7 +249,7 @@ class HeatTemplateTestCase(unittest.TestCase):
     @mock_patch_target_module('op_utils')
     @mock_patch_target_module('heatclient.client.Client')
     def test_create(self, mock_heat_client_class, mock_op_utils):
-        self.template.HEAT_WAIT_LOOP_INTERVAL = interval = 0.2
+        self.template.HEAT_WAIT_LOOP_INTERVAL = 0.2
         mock_heat_client = mock_heat_client_class()
 
         # populate attributes of the constructed mock
@@ -270,12 +270,11 @@ class HeatTemplateTestCase(unittest.TestCase):
         expected_op_utils_usage = 0
 
         with mock.patch.object(self.template, 'status') as mock_status:
-            # no block
-            with timer() as time_data:
-                self.assertIsInstance(self.template.create(block=False, timeout=2), heat.HeatStack)
+            self.template.name = 'no block test'
+            mock_status.return_value = None
 
-            # ensure runtime is much less than one interval
-            self.assertLess(time_data['delta'], interval * 0.2)
+            # no block
+            self.assertIsInstance(self.template.create(block=False, timeout=2), heat.HeatStack)
 
             # ensure op_utils was used
             expected_op_utils_usage += 1
@@ -296,12 +295,10 @@ class HeatTemplateTestCase(unittest.TestCase):
             self.assertEqual(self.template.outputs, {})
 
             # block with immediate complete
-            mock_status.return_value = u'CREATE_COMPLETE'
-            with timer() as time_data:
-                self.assertIsInstance(self.template.create(block=True, timeout=2), heat.HeatStack)
+            self.template.name = 'block, immediate complete test'
 
-            # ensure runtime is less than one interval
-            self.assertLess(time_data['delta'], interval * 0.2)
+            mock_status.return_value = self.template.HEAT_CREATE_COMPLETE_STATUS
+            self.assertIsInstance(self.template.create(block=True, timeout=2), heat.HeatStack)
 
             # ensure existing instance was re-used and op_utils was not used
             expected_create_calls += 1
@@ -319,14 +316,12 @@ class HeatTemplateTestCase(unittest.TestCase):
             self.template.outputs = None
 
             # block with delayed complete
-            mock_status.side_effect = iter([None, None, u'CREATE_COMPLETE'])
-            with timer() as time_data:
-                self.assertIsInstance(self.template.create(block=True, timeout=2), heat.HeatStack)
+            self.template.name = 'block, delayed complete test'
 
-            # ensure runtime is approximately two intervals
-            expected_time_low = interval * 1.8
-            expected_time_high = interval * 2.2
-            self.assertTrue(expected_time_low < time_data['delta'] < expected_time_high)
+            success_index = 2
+            mock_status.side_effect = index_value_iter(success_index,
+                                                       self.template.HEAT_CREATE_COMPLETE_STATUS)
+            self.assertIsInstance(self.template.create(block=True, timeout=2), heat.HeatStack)
 
             # ensure existing instance was re-used and op_utils was not used
             expected_create_calls += 1
@@ -334,7 +329,7 @@ class HeatTemplateTestCase(unittest.TestCase):
             self.assertEqual(mock_heat_client.stacks.create.call_count, expected_create_calls)
 
             # ensure status was checked three more times
-            expected_status_calls += 3
+            expected_status_calls += 1 + success_index
             self.assertEqual(mock_status.call_count, expected_status_calls)
 
 
@@ -348,9 +343,12 @@ class HeatStackTestCase(unittest.TestCase):
         # call once and then call again if uuid is not none
         self.assertGreater(delete_mock.call_count, 1)
 
-    def test_delete_all_calls_delete(self):
-        stack = heat.HeatStack('test')
-        stack.uuid = 1
-        with mock.patch.object(stack, "delete") as delete_mock:
+    @mock.patch('yardstick.orchestrator.heat.op_utils')
+    def test_delete_all_calls_delete(self, mock_op):
+        # we must patch the object before we create an instance
+        # so we can override delete() in all the instances
+        with mock.patch.object(heat.HeatStack, "delete") as delete_mock:
+            stack = heat.HeatStack('test')
+            stack.uuid = 1
             stack.delete_all()
-        self.assertGreater(delete_mock.call_count, 0)
+            self.assertGreater(delete_mock.call_count, 0)
diff --git a/tests/unit/orchestrator/test_kubernetes.py b/tests/unit/orchestrator/test_kubernetes.py
new file mode 100644 (file)
index 0000000..51718ab
--- /dev/null
@@ -0,0 +1,110 @@
+#!/usr/bin/env python
+
+##############################################################################
+# Copyright (c) 2017 Intel Corporation
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+# Unittest for yardstick.benchmark.orchestrator.heat
+import unittest
+import mock
+
+from yardstick.orchestrator.kubernetes import KubernetesObject
+from yardstick.orchestrator.kubernetes import KubernetesTemplate
+
+
+class GetTemplateTestCase(unittest.TestCase):
+
+    def test_get_template(self):
+        output_t = {
+            "apiVersion": "v1",
+            "kind": "ReplicationController",
+            "metadata": {
+                "name": "host-k8s-86096c30"
+            },
+            "spec": {
+                "replicas": 1,
+                "template": {
+                    "metadata": {
+                        "labels": {
+                            "app": "host-k8s-86096c30"
+                        }
+                    },
+                    "spec": {
+                        "containers": [
+                            {
+                                "args": [
+                                    "-c",
+                                    "chmod 700 ~/.ssh; chmod 600 ~/.ssh/*; \
+service ssh restart;while true ; do sleep 10000; done"
+                                ],
+                                "command": [
+                                    "/bin/bash"
+                                ],
+                                "image": "openretriever/yardstick",
+                                "name": "host-k8s-86096c30-container",
+                                "volumeMounts": [
+                                    {
+                                        "mountPath": "/root/.ssh/",
+                                        "name": "k8s-86096c30-key"
+                                    }
+                                ]
+                            }
+                        ],
+                        "volumes": [
+                            {
+                                "configMap": {
+                                    "name": "k8s-86096c30-key"
+                                },
+                                "name": "k8s-86096c30-key"
+                            }
+                        ]
+                    }
+                }
+            }
+        }
+        input_s = {
+            'command': '/bin/bash',
+            'args': ['-c', 'chmod 700 ~/.ssh; chmod 600 ~/.ssh/*; \
+service ssh restart;while true ; do sleep 10000; done'],
+            'ssh_key': 'k8s-86096c30-key'
+        }
+        name = 'host-k8s-86096c30'
+        output_r = KubernetesObject(name, **input_s).get_template()
+        self.assertEqual(output_r, output_t)
+
+
+class GetRcPodsTestCase(unittest.TestCase):
+
+    @mock.patch('yardstick.orchestrator.kubernetes.k8s_utils.get_pod_list')
+    def test_get_rc_pods(self, mock_get_pod_list):
+        servers = {
+            'host': {
+                'image': 'openretriever/yardstick',
+                'command': '/bin/bash',
+                'args': ['-c', 'chmod 700 ~/.ssh; chmod 600 ~/.ssh/*; \
+service ssh restart;while true ; do sleep 10000; done']
+            },
+            'target': {
+                'image': 'openretriever/yardstick',
+                'command': '/bin/bash',
+                'args': ['-c', 'chmod 700 ~/.ssh; chmod 600 ~/.ssh/*; \
+service ssh restart;while true ; do sleep 10000; done']
+            }
+        }
+        k8s_template = KubernetesTemplate('k8s-86096c30', servers)
+        mock_get_pod_list.return_value.items = []
+        pods = k8s_template.get_rc_pods()
+        self.assertEqual(pods, [])
+
+
+def main():
+    unittest.main()
+
+
+if __name__ == '__main__':
+    main()
index 0be2eee..e362c6a 100644 (file)
@@ -23,7 +23,7 @@ class Context(object):
 
     @abc.abstractmethod
     def init(self, attrs):
-        "Initiate context."
+        """Initiate context."""
 
     @staticmethod
     def get_cls(context_type):
@@ -56,20 +56,34 @@ class Context(object):
         """get server info by name from context
         """
 
+    @abc.abstractmethod
+    def _get_network(self, attr_name):
+        """get network info by name from context
+        """
+
     @staticmethod
     def get_server(attr_name):
         """lookup server info by name from context
         attr_name: either a name for a server created by yardstick or a dict
         with attribute name mapping when using external heat templates
         """
-        server = None
-        for context in Context.list:
-            server = context._get_server(attr_name)
-            if server is not None:
-                break
-
-        if server is None:
+        servers = (context._get_server(attr_name) for context in Context.list)
+        try:
+            return next(s for s in servers if s)
+        except StopIteration:
             raise ValueError("context not found for server '%r'" %
                              attr_name)
 
-        return server
+    @staticmethod
+    def get_network(attr_name):
+        """lookup server info by name from context
+        attr_name: either a name for a server created by yardstick or a dict
+        with attribute name mapping when using external heat templates
+        """
+
+        networks = (context._get_network(attr_name) for context in Context.list)
+        try:
+            return next(n for n in networks if n)
+        except StopIteration:
+            raise ValueError("context not found for server '%r'" %
+                             attr_name)
index c658d32..8ae4b65 100644 (file)
@@ -37,3 +37,6 @@ class DummyContext(Context):
 
     def _get_server(self, attr_name):
         return None
+
+    def _get_network(self, attr_name):
+        return None
index fed8fc3..d5349ea 100644 (file)
@@ -25,6 +25,7 @@ from yardstick.benchmark.contexts.model import Network
 from yardstick.benchmark.contexts.model import PlacementGroup, ServerGroup
 from yardstick.benchmark.contexts.model import Server
 from yardstick.benchmark.contexts.model import update_scheduler_hints
+from yardstick.common.openstack_utils import get_neutron_client
 from yardstick.orchestrator.heat import HeatTemplate, get_short_key_uuid
 from yardstick.common.constants import YARDSTICK_ROOT_PATH
 
@@ -54,9 +55,11 @@ class HeatContext(Context):
         self._user = None
         self.template_file = None
         self.heat_parameters = None
+        self.neutron_client = None
         # generate an uuid to identify yardstick_key
         # the first 8 digits of the uuid will be used
         self.key_uuid = uuid.uuid4()
+        self.heat_timeout = None
         self.key_filename = ''.join(
             [YARDSTICK_ROOT_PATH, 'yardstick/resources/files/yardstick_key-',
              get_short_key_uuid(self.key_uuid)])
@@ -65,15 +68,16 @@ class HeatContext(Context):
     def assign_external_network(self, networks):
         sorted_networks = sorted(networks.items())
         external_network = os.environ.get("EXTERNAL_NETWORK", "net04_ext")
-        have_external_network = [(name, net)
-                                 for name, net in sorted_networks if
-                                 net.get("external_network")]
-        # no external net defined, assign it to first network usig os.environ
+
+        have_external_network = any(net.get("external_network") for net in networks.values())
         if sorted_networks and not have_external_network:
+            # no external net defined, assign it to first network using os.environ
             sorted_networks[0][1]["external_network"] = external_network
-        return sorted_networks
 
-    def init(self, attrs):     # pragma: no cover
+        self.networks = OrderedDict((name, Network(name, self, attrs))
+                                    for name, attrs in sorted_networks)
+
+    def init(self, attrs):
         """initializes itself from the supplied arguments"""
         self.name = attrs["name"]
 
@@ -103,11 +107,7 @@ class HeatContext(Context):
 
         # we have to do this first, because we are injecting external_network
         # into the dict
-        sorted_networks = self.assign_external_network(attrs["networks"])
-
-        self.networks = OrderedDict(
-            (name, Network(name, self, netattrs)) for name, netattrs in
-            sorted_networks)
+        self.assign_external_network(attrs["networks"])
 
         for name, serverattrs in sorted(attrs["servers"].items()):
             server = Server(name, self, serverattrs)
@@ -120,7 +120,6 @@ class HeatContext(Context):
         with open(self.key_filename + ".pub", "w") as pubkey_file:
             pubkey_file.write(
                 "%s %s\n" % (rsa_key.get_name(), rsa_key.get_base64()))
-        del rsa_key
 
     @property
     def image(self):
@@ -153,9 +152,12 @@ class HeatContext(Context):
             template.add_network(network.stack_name,
                                  network.physical_network,
                                  network.provider,
-                                 network.segmentation_id)
+                                 network.segmentation_id,
+                                 network.port_security_enabled)
             template.add_subnet(network.subnet_stack_name, network.stack_name,
-                                network.subnet_cidr)
+                                network.subnet_cidr,
+                                network.enable_dhcp,
+                                network.gateway_ip)
 
             if network.router:
                 template.add_router(network.router.stack_name,
@@ -194,7 +196,7 @@ class HeatContext(Context):
             scheduler_hints = {}
             for pg in server.placement_groups:
                 update_scheduler_hints(scheduler_hints, added_servers, pg)
-            # workround for openstack nova bug, check JIRA: YARDSTICK-200
+            # workaround for openstack nova bug, check JIRA: YARDSTICK-200
             # for details
             if len(availability_servers) == 2:
                 if not scheduler_hints["different_host"]:
@@ -250,6 +252,20 @@ class HeatContext(Context):
                                        list(self.networks.values()),
                                        scheduler_hints)
 
+    def get_neutron_info(self):
+        if not self.neutron_client:
+            self.neutron_client = get_neutron_client()
+
+        networks = self.neutron_client.list_networks()
+        for network in self.networks.values():
+            for neutron_net in networks['networks']:
+                if neutron_net['name'] == network.stack_name:
+                    network.segmentation_id = neutron_net.get('provider:segmentation_id')
+                    # we already have physical_network
+                    # network.physical_network = neutron_net.get('provider:physical_network')
+                    network.network_type = neutron_net.get('provider:network_type')
+                    network.neutron_info = neutron_net
+
     def deploy(self):
         """deploys template into a stack using cloud"""
         print("Deploying context '%s'" % self.name)
@@ -267,20 +283,16 @@ class HeatContext(Context):
             raise SystemExit("\nStack create interrupted")
         except:
             LOG.exception("stack failed")
+            # let the other failures happen, we want stack trace
             raise
-        # let the other failures happend, we want stack trace
+
+        # TODO: use Neutron to get segementation-id
+        self.get_neutron_info()
 
         # copy some vital stack output into server objects
         for server in self.servers:
             if server.ports:
-                # TODO(hafe) can only handle one internal network for now
-                port = next(iter(server.ports.values()))
-                server.private_ip = self.stack.outputs[port["stack_name"]]
-                server.interfaces = {}
-                for network_name, port in server.ports.items():
-                    self.make_interface_dict(network_name, port['stack_name'],
-                                             server,
-                                             self.stack.outputs)
+                self.add_server_port(server)
 
             if server.floating_ip:
                 server.public_ip = \
@@ -288,24 +300,36 @@ class HeatContext(Context):
 
         print("Context '%s' deployed" % self.name)
 
-    def make_interface_dict(self, network_name, stack_name, server, outputs):
-        server.interfaces[network_name] = {
-            "private_ip": outputs[stack_name],
+    def add_server_port(self, server):
+        # TODO(hafe) can only handle one internal network for now
+        port = next(iter(server.ports.values()))
+        server.private_ip = self.stack.outputs[port["stack_name"]]
+        server.interfaces = {}
+        for network_name, port in server.ports.items():
+            server.interfaces[network_name] = self.make_interface_dict(
+                network_name, port['stack_name'], self.stack.outputs)
+
+    def make_interface_dict(self, network_name, stack_name, outputs):
+        private_ip = outputs[stack_name]
+        mac_addr = outputs[stack_name + "-mac_address"]
+        subnet_cidr_key = "-".join([self.name, network_name, 'subnet', 'cidr'])
+        gateway_key = "-".join([self.name, network_name, 'subnet', 'gateway_ip'])
+        subnet_cidr = outputs[subnet_cidr_key]
+        subnet_ip = ipaddress.ip_network(subnet_cidr)
+        return {
+            "private_ip": private_ip,
             "subnet_id": outputs[stack_name + "-subnet_id"],
-            "subnet_cidr": outputs[
-                "{}-{}-subnet-cidr".format(self.name, network_name)],
-            "netmask": str(ipaddress.ip_network(
-                outputs["{}-{}-subnet-cidr".format(self.name,
-                                                   network_name)]).netmask),
-            "gateway_ip": outputs[
-                "{}-{}-subnet-gateway_ip".format(self.name, network_name)],
-            "mac_address": outputs[stack_name + "-mac_address"],
+            "subnet_cidr": subnet_cidr,
+            "network": str(subnet_ip.network_address),
+            "netmask": str(subnet_ip.netmask),
+            "gateway_ip": outputs[gateway_key],
+            "mac_address": mac_addr,
             "device_id": outputs[stack_name + "-device_id"],
             "network_id": outputs[stack_name + "-network_id"],
             "network_name": network_name,
             # to match vnf_generic
-            "local_mac": outputs[stack_name + "-mac_address"],
-            "local_ip": outputs[stack_name],
+            "local_mac": mac_addr,
+            "local_ip": private_ip,
             "vld_id": self.networks[network_name].vld_id,
         }
 
@@ -326,6 +350,19 @@ class HeatContext(Context):
 
         super(HeatContext, self).undeploy()
 
+    @staticmethod
+    def generate_routing_table(server):
+        routes = [
+            {
+                "network": intf["network"],
+                "netmask": intf["netmask"],
+                "if": name,
+                "gateway": intf["gateway_ip"],
+            }
+            for name, intf in server.interfaces.items()
+        ]
+        return routes
+
     def _get_server(self, attr_name):
         """lookup server info by name from context
         attr_name: either a name for a server created by yardstick or a dict
@@ -335,7 +372,10 @@ class HeatContext(Context):
             'yardstick.resources',
             'files/yardstick_key-' + get_short_key_uuid(self.key_uuid))
 
-        if isinstance(attr_name, collections.Mapping):
+        if not isinstance(attr_name, collections.Mapping):
+            server = self._server_map.get(attr_name, None)
+
+        else:
             cname = attr_name["name"].split(".")[1]
             if cname != self.name:
                 return None
@@ -352,10 +392,6 @@ class HeatContext(Context):
             server = Server(attr_name["name"].split(".")[0], self, {})
             server.public_ip = public_ip
             server.private_ip = private_ip
-        else:
-            if attr_name not in self._server_map:
-                return None
-            server = self._server_map[attr_name]
 
         if server is None:
             return None
@@ -365,9 +401,37 @@ class HeatContext(Context):
             "key_filename": key_filename,
             "private_ip": server.private_ip,
             "interfaces": server.interfaces,
+            "routing_table": self.generate_routing_table(server),
+            # empty IPv6 routing table
+            "nd_route_tbl": [],
         }
         # Target server may only have private_ip
         if server.public_ip:
             result["ip"] = server.public_ip
 
         return result
+
+    def _get_network(self, attr_name):
+        if not isinstance(attr_name, collections.Mapping):
+            network = self.networks.get(attr_name, None)
+
+        else:
+            # Don't generalize too much  Just support vld_id
+            vld_id = attr_name.get('vld_id')
+            if vld_id is None:
+                return None
+
+            network = next((n for n in self.networks.values() if
+                           getattr(n, "vld_id", None) == vld_id), None)
+
+        if network is None:
+            return None
+
+        result = {
+            "name": network.name,
+            "vld_id": network.vld_id,
+            "segmentation_id": network.segmentation_id,
+            "network_type": network.network_type,
+            "physical_network": network.physical_network,
+        }
+        return result
diff --git a/yardstick/benchmark/contexts/kubernetes.py b/yardstick/benchmark/contexts/kubernetes.py
new file mode 100644 (file)
index 0000000..a39f631
--- /dev/null
@@ -0,0 +1,140 @@
+##############################################################################
+# Copyright (c) 2017 Huawei Technologies Co.,Ltd.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+from __future__ import absolute_import
+import logging
+import time
+import pkg_resources
+
+import paramiko
+
+from yardstick.benchmark.contexts.base import Context
+from yardstick.orchestrator.kubernetes import KubernetesTemplate
+from yardstick.common import kubernetes_utils as k8s_utils
+from yardstick.common import utils
+
+LOG = logging.getLogger(__name__)
+BITS_LENGTH = 2048
+
+
+class KubernetesContext(Context):
+    """Class that handle nodes info"""
+
+    __context_type__ = "Kubernetes"
+
+    def __init__(self):
+        self.name = ''
+        self.ssh_key = ''
+        self.key_path = ''
+        self.public_key_path = ''
+        self.template = None
+
+        super(KubernetesContext, self).__init__()
+
+    def init(self, attrs):
+        self.name = attrs.get('name', '')
+
+        template_cfg = attrs.get('servers', {})
+        self.template = KubernetesTemplate(self.name, template_cfg)
+
+        self.ssh_key = '{}-key'.format(self.name)
+
+        self.key_path = self._get_key_path()
+        self.public_key_path = '{}.pub'.format(self.key_path)
+
+    def deploy(self):
+        LOG.info('Creating ssh key')
+        self._set_ssh_key()
+
+        LOG.info('Launch containers')
+        self._create_rcs()
+        time.sleep(1)
+        self.template.get_rc_pods()
+
+        self._wait_until_running()
+
+    def undeploy(self):
+        self._delete_ssh_key()
+        self._delete_rcs()
+        self._delete_pods()
+
+        super(KubernetesContext, self).undeploy()
+
+    def _wait_until_running(self):
+        while not all(self._check_pod_status(p) for p in self.template.pods):
+            time.sleep(1)
+
+    def _check_pod_status(self, pod):
+        status = k8s_utils.read_pod_status(pod)
+        LOG.debug('%s:%s', pod, status)
+        if status == 'Failed':
+            LOG.error('Pod %s status is failed', pod)
+            raise RuntimeError
+        if status != 'Running':
+            return False
+        return True
+
+    def _create_rcs(self):
+        for obj in self.template.k8s_objs:
+            self._create_rc(obj.get_template())
+
+    def _create_rc(self, template):
+        k8s_utils.create_replication_controller(template)
+
+    def _delete_rcs(self):
+        for rc in self.template.rcs:
+            self._delete_rc(rc)
+
+    def _delete_rc(self, rc):
+        k8s_utils.delete_replication_controller(rc)
+
+    def _delete_pods(self):
+        for pod in self.template.pods:
+            self._delete_pod(pod)
+
+    def _delete_pod(self, pod):
+        k8s_utils.delete_pod(pod)
+
+    def _get_key_path(self):
+        task_id = self.name.split('-')[-1]
+        k = 'files/yardstick_key-{}'.format(task_id)
+        return pkg_resources.resource_filename('yardstick.resources', k)
+
+    def _set_ssh_key(self):
+        rsa_key = paramiko.RSAKey.generate(bits=BITS_LENGTH)
+
+        LOG.info('Writing private key')
+        rsa_key.write_private_key_file(self.key_path)
+
+        LOG.info('Writing public key')
+        key = '{} {}\n'.format(rsa_key.get_name(), rsa_key.get_base64())
+        with open(self.public_key_path, 'w') as f:
+            f.write(key)
+
+        LOG.info('Create configmap for ssh key')
+        k8s_utils.create_config_map(self.ssh_key, {'authorized_keys': key})
+
+    def _delete_ssh_key(self):
+        k8s_utils.delete_config_map(self.ssh_key)
+        utils.remove_file(self.key_path)
+        utils.remove_file(self.public_key_path)
+
+    def _get_server(self, name):
+        resp = k8s_utils.get_pod_list()
+        hosts = ({'name': n.metadata.name,
+                  'ip': n.status.pod_ip,
+                  'user': 'root',
+                  'key_filename': self.key_path,
+                  'private_ip': n.status.pod_ip}
+                 for n in resp.items if n.metadata.name.startswith(name))
+
+        return next(hosts, None)
+
+    def _get_network(self, attr_name):
+        return None
index 5077a97..6601ecf 100644 (file)
@@ -104,15 +104,29 @@ class Network(Object):
         self.stack_name = context.name + "-" + self.name
         self.subnet_stack_name = self.stack_name + "-subnet"
         self.subnet_cidr = attrs.get('cidr', '10.0.1.0/24')
+        self.enable_dhcp = attrs.get('enable_dhcp', 'true')
         self.router = None
         self.physical_network = attrs.get('physical_network', 'physnet1')
-        self.provider = attrs.get('provider', None)
-        self.segmentation_id = attrs.get('segmentation_id', None)
+        self.provider = attrs.get('provider')
+        self.segmentation_id = attrs.get('segmentation_id')
+        self.network_type = attrs.get('network_type')
+        self.port_security_enabled = attrs.get('port_security_enabled', True)
+        self.allowed_address_pairs = attrs.get('allowed_address_pairs', [])
+        try:
+            # we require 'null' or '' to disable setting gateway_ip
+            self.gateway_ip = attrs['gateway_ip']
+        except KeyError:
+            # default to explicit None
+            self.gateway_ip = None
+        else:
+            # null is None in YAML, so we have to convert back to string
+            if self.gateway_ip is None:
+                self.gateway_ip = "null"
 
         if "external_network" in attrs:
             self.router = Router("router", self.name,
                                  context, attrs["external_network"])
-        self.vld_id = attrs.get("vld_id", "")
+        self.vld_id = attrs.get("vld_id")
 
         Network.list.append(self)
 
@@ -233,10 +247,16 @@ class Server(Object):     # pragma: no cover
         for network in networks:
             port_name = server_name + "-" + network.name + "-port"
             self.ports[network.name] = {"stack_name": port_name}
-            template.add_port(port_name, network.stack_name,
-                              network.subnet_stack_name,
-                              sec_group_id=self.secgroup_name,
-                              provider=network.provider)
+            # we can't use secgroups if port_security_enabled is False
+            if network.port_security_enabled:
+                sec_group_id = self.secgroup_name
+            else:
+                sec_group_id = None
+            # don't refactor to pass in network object, that causes JSON
+            # circular ref encode errors
+            template.add_port(port_name, network.stack_name, network.subnet_stack_name,
+                              sec_group_id=sec_group_id, provider=network.provider,
+                              allowed_address_pairs=network.allowed_address_pairs)
             port_name_list.append(port_name)
 
             if self.floating_ip:
@@ -247,7 +267,7 @@ class Server(Object):     # pragma: no cover
                                              external_network,
                                              port_name,
                                              network.router.stack_if_name,
-                                             self.secgroup_name)
+                                             sec_group_id)
                     self.floating_ip_assoc["stack_name"] = \
                         server_name + "-fip-assoc"
                     template.add_floating_ip_association(
index baa1cf5..b3f0aca 100644 (file)
@@ -33,6 +33,7 @@ class NodeContext(Context):
         self.name = None
         self.file_path = None
         self.nodes = []
+        self.networks = {}
         self.controllers = []
         self.computes = []
         self.baremetals = []
@@ -77,6 +78,9 @@ class NodeContext(Context):
         self.env = attrs.get('env', {})
         LOG.debug("Env: %r", self.env)
 
+        # add optional static network definition
+        self.networks.update(cfg.get("networks", {}))
+
     def deploy(self):
         config_type = self.env.get('type', '')
         if config_type == 'ansible':
@@ -141,6 +145,32 @@ class NodeContext(Context):
         node["name"] = attr_name
         return node
 
+    def _get_network(self, attr_name):
+        if not isinstance(attr_name, collections.Mapping):
+            network = self.networks.get(attr_name)
+
+        else:
+            # Don't generalize too much  Just support vld_id
+            vld_id = attr_name.get('vld_id')
+            if vld_id is None:
+                return None
+
+            network = next((n for n in self.networks.values() if
+                           n.get("vld_id") == vld_id), None)
+
+        if network is None:
+            return None
+
+        result = {
+            # name is required
+            "name": network["name"],
+            "vld_id": network.get("vld_id"),
+            "segmentation_id": network.get("segmentation_id"),
+            "network_type": network.get("network_type"),
+            "physical_network": network.get("physical_network"),
+        }
+        return result
+
     def _execute_script(self, node_name, info):
         if node_name == 'local':
             self._execute_local_script(info)
index 78eaac7..8614f0c 100644 (file)
@@ -36,6 +36,7 @@ class StandaloneContext(Context):
         self.name = None
         self.file_path = None
         self.nodes = []
+        self.networks = {}
         self.nfvi_node = []
         super(StandaloneContext, self).__init__()
 
@@ -66,8 +67,11 @@ class StandaloneContext(Context):
         self.nodes.extend(cfg["nodes"])
         self.nfvi_node.extend([node for node in cfg["nodes"]
                                if node["role"] == "nfvi_node"])
+        # add optional static network definition
+        self.networks.update(cfg.get("networks", {}))
         LOG.debug("Nodes: %r", self.nodes)
         LOG.debug("NFVi Node: %r", self.nfvi_node)
+        LOG.debug("Networks: %r", self.networks)
 
     def deploy(self):
         """don't need to deploy"""
@@ -114,3 +118,31 @@ class StandaloneContext(Context):
 
         node["name"] = attr_name
         return node
+
+    def _get_network(self, attr_name):
+        if not isinstance(attr_name, collections.Mapping):
+            network = self.networks.get(attr_name)
+
+        else:
+            # Don't generalize too much  Just support vld_id
+            vld_id = attr_name.get('vld_id')
+            if vld_id is None:
+                return None
+            try:
+                network = next(n for n in self.networks.values() if
+                               n.get("vld_id") == vld_id)
+            except StopIteration:
+                return None
+
+        if network is None:
+            return None
+
+        result = {
+            # name is required
+            "name": network["name"],
+            "vld_id": network.get("vld_id"),
+            "segmentation_id": network.get("segmentation_id"),
+            "network_type": network.get("network_type"),
+            "physical_network": network.get("physical_network"),
+        }
+        return result
index 0e85e63..b53d644 100644 (file)
@@ -322,6 +322,8 @@ class Task(object):     # pragma: no cover
 
         if "nodes" in scenario_cfg:
             context_cfg["nodes"] = parse_nodes_with_context(scenario_cfg)
+            context_cfg["networks"] = get_networks_from_nodes(
+                context_cfg["nodes"])
         runner = base_runner.Runner.get(runner_cfg)
 
         print("Starting runner of type '%s'" % runner_cfg["type"])
@@ -518,7 +520,7 @@ class TaskParser(object):       # pragma: no cover
                                                                cfg_schema))
 
     def _check_precondition(self, cfg):
-        """Check if the envrionment meet the preconditon"""
+        """Check if the environment meet the precondition"""
 
         if "precondition" in cfg:
             precondition = cfg["precondition"]
@@ -573,14 +575,26 @@ def _is_background_scenario(scenario):
 
 
 def parse_nodes_with_context(scenario_cfg):
-    """paras the 'nodes' fields in scenario """
+    """parse the 'nodes' fields in scenario """
     nodes = scenario_cfg["nodes"]
-
-    nodes_cfg = {}
-    for nodename in nodes:
-        nodes_cfg[nodename] = Context.get_server(nodes[nodename])
-
-    return nodes_cfg
+    return {nodename: Context.get_server(node) for nodename, node in nodes.items()}
+
+
+def get_networks_from_nodes(nodes):
+    """parse the 'nodes' fields in scenario """
+    networks = {}
+    for node in nodes.values():
+        if not node:
+            continue
+        for interface in node['interfaces'].values():
+            vld_id = interface.get('vld_id')
+            # mgmt network doesn't have vld_id
+            if not vld_id:
+                continue
+            network = Context.get_network({"vld_id": vld_id})
+            if network:
+                networks[network['name']] = network
+    return networks
 
 
 def runner_join(runner):
diff --git a/yardstick/benchmark/core/testsuite.py b/yardstick/benchmark/core/testsuite.py
new file mode 100644 (file)
index 0000000..e3940a0
--- /dev/null
@@ -0,0 +1,42 @@
+##############################################################################
+# Copyright (c) 2015 Huawei Technologies Co.,Ltd and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+""" Handler for yardstick command 'testcase' """
+from __future__ import absolute_import
+from __future__ import print_function
+
+import os
+import logging
+
+from yardstick.common import constants as consts
+
+LOG = logging.getLogger(__name__)
+
+
+class Testsuite(object):
+    """Testcase commands.
+
+       Set of commands to discover and display test cases.
+    """
+
+    def list_all(self, args):
+        """List existing test cases"""
+
+        testsuite_list = self._get_testsuite_file_list()
+
+        return testsuite_list
+
+    def _get_testsuite_file_list(self):
+        try:
+            testsuite_files = sorted(os.listdir(consts.TESTSUITE_DIR))
+        except OSError:
+            LOG.exception('Failed to list dir:\n%s\n', consts.TESTSUITE_DIR)
+            raise
+
+        return testsuite_files
index 22de0b6..50d44c1 100644 (file)
@@ -9,7 +9,6 @@
 from __future__ import absolute_import
 import logging
 import subprocess
-import traceback
 
 import yardstick.ssh as ssh
 from yardstick.benchmark.scenarios.availability.attacker.baseattacker import \
@@ -26,9 +25,7 @@ def _execute_shell_command(command, stdin=None):
         output = subprocess.check_output(command, stdin=stdin, shell=True)
     except Exception:
         exitcode = -1
-        output = traceback.format_exc()
-        LOG.error("exec command '%s' error:\n ", command)
-        LOG.error(traceback.format_exc())
+        LOG.error("exec command '%s' error:\n ", command, exc_info=True)
 
     return exitcode, output
 
index f7ab23d..cb171ea 100644 (file)
@@ -47,11 +47,11 @@ class ProcessAttacker(BaseAttacker):
                 stdin=stdin_file)
 
         if stdout:
-            LOG.info("check the envrioment success!")
+            LOG.info("check the environment success!")
             return int(stdout.strip('\n'))
         else:
             LOG.error(
-                "the host envrioment is error, stdout:%s, stderr:%s",
+                "the host environment is error, stdout:%s, stderr:%s",
                 stdout, stderr)
         return False
 
index b8c34ad..aa144ab 100644 (file)
@@ -16,6 +16,11 @@ kill-process:
   inject_script: ha_tools/fault_process_kill.bash
   recovery_script: ha_tools/start_service.bash
 
+kill-lxc-process:
+  check_script: ha_tools/check_lxc_process_python.bash
+  inject_script: ha_tools/fault_lxc_process_kill.bash
+  recovery_script: ha_tools/start_lxc_service.bash
+
 bare-metal-down:
   check_script: ha_tools/check_host_ping.bash
   recovery_script: ha_tools/ipmi_power.bash
@@ -34,4 +39,4 @@ stress-cpu:
 
 block-io:
   inject_script: ha_tools/disk/block_io.bash
-  recovery_script: ha_tools/disk/recovery_disk_io.bash
\ No newline at end of file
+  recovery_script: ha_tools/disk/recovery_disk_io.bash
diff --git a/yardstick/benchmark/scenarios/availability/ha_tools/check_lxc_process_python.bash b/yardstick/benchmark/scenarios/availability/ha_tools/check_lxc_process_python.bash
new file mode 100755 (executable)
index 0000000..6d2f4dd
--- /dev/null
@@ -0,0 +1,42 @@
+#!/bin/sh
+
+##############################################################################
+# Copyright (c) 2015 Huawei Technologies Co.,Ltd and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+# check the status of a service
+
+set -e
+
+NOVA_API_PROCESS_1="nova-api-os-compute"
+NOVA_API_PROCESS_2="nova-api-metadata"
+NOVA_API_LXC_FILTER_1="nova_api_os_compute"
+NOVA_API_LXC_FILTER_2="nova_api_metadata"
+
+process_name=$1
+
+lxc_filter=$(echo "${process_name}" | sed 's/-/_/g')
+
+if [ "${lxc_filter}" = "glance_api" ]; then
+    lxc_filter="glance"
+fi
+
+if [ "${process_name}" = "nova-api" ]; then
+    container_1=$(lxc-ls -1 --filter="${NOVA_API_LXC_FILTER_1}")
+    container_2=$(lxc-ls -1 --filter="${NOVA_API_LXC_FILTER_2}")
+
+   echo $(($(lxc-attach -n "${container_1}" -- ps aux | grep -e "${NOVA_API_PROCESS_1}" | grep -v grep | grep -cv /bin/sh) + $(lxc-attach -n "${container_2}" -- ps aux | grep -e "${NOVA_API_PROCESS_2}" | grep -v grep | grep -cv /bin/sh)))
+else
+    container=$(lxc-ls -1 --filter="${lxc_filter}")
+
+    if [ "${process_name}" = "haproxy" ]; then
+        ps aux | grep -e "/usr/.*/${process_name}" | grep -v grep | grep -cv /bin/sh
+    else
+        lxc-attach -n "${container}" -- ps aux | grep -e "${process_name}" | grep -v grep | grep -cv /bin/sh
+    fi
+fi
diff --git a/yardstick/benchmark/scenarios/availability/ha_tools/fault_lxc_process_kill.bash b/yardstick/benchmark/scenarios/availability/ha_tools/fault_lxc_process_kill.bash
new file mode 100755 (executable)
index 0000000..b0b86ab
--- /dev/null
@@ -0,0 +1,65 @@
+#!/bin/sh
+
+##############################################################################
+# Copyright (c) 2015 Huawei Technologies Co.,Ltd and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+# Stop process by process name
+
+set -e
+
+NOVA_API_PROCESS_1="nova-api-os-compute"
+NOVA_API_PROCESS_2="nova-api-metadata"
+NOVA_API_LXC_FILTER_1="nova_api_os_compute"
+NOVA_API_LXC_FILTER_2="nova_api_metadata"
+
+process_name=$1
+
+lxc_filter=$(echo "${process_name}" | sed 's/-/_/g')
+
+if [ "${lxc_filter}" = "glance_api" ]; then
+    lxc_filter="glance"
+fi
+
+if [ "${process_name}" = "nova-api" ]; then
+    container_1=$(lxc-ls -1 --filter="${NOVA_API_LXC_FILTER_1}")
+    container_2=$(lxc-ls -1 --filter="${NOVA_API_LXC_FILTER_2}")
+
+    pids_1=$(lxc-attach -n "${container_1}" -- pgrep -f "/openstack/.*/${NOVA_API_PROCESS_1}")
+    for pid in ${pids_1};
+        do
+            lxc-attach -n "${container_1}" -- kill -9 "${pid}"
+        done
+
+    pids_2=$(lxc-attach -n "${container_2}" -- pgrep -f "/openstack/.*/${NOVA_API_PROCESS_2}")
+    for pid in ${pids_2};
+        do
+            lxc-attach -n "${container_2}" -- kill -9 "${pid}"
+        done
+else
+    container=$(lxc-ls -1 --filter="${lxc_filter}")
+
+    if [ "${process_name}" = "haproxy" ]; then
+        for pid in $(pgrep -cf "/usr/.*/${process_name}");
+            do
+                kill -9 "${pid}"
+            done
+    elif [ "${process_name}" = "keystone" ]; then
+        pids=$(lxc-attach -n "${container}" -- ps aux | grep "keystone" | grep -iv heartbeat | grep -iv monitor | grep -v grep | grep -v /bin/sh | awk '{print $2}')
+        for pid in ${pids};
+            do
+                lxc-attach -n "${container}" -- kill -9 "${pid}"
+            done
+    else
+        pids=$(lxc-attach -n "${container}" -- pgrep -f "/openstack/.*/${process_name}")
+        for pid in ${pids};
+            do
+                lxc-attach -n "${container}" -- kill -9 "${pid}"
+            done
+    fi
+fi
index aee516e..7408409 100644 (file)
@@ -20,4 +20,4 @@ else
     SECURE=""
 fi
 
-openstack "${SECURE}" flavor create $1 --id $2 --ram $3 --disk $4 --vcpus $5
+openstack ${SECURE} flavor create $1 --id $2 --ram $3 --disk $4 --vcpus $5
diff --git a/yardstick/benchmark/scenarios/availability/ha_tools/start_lxc_service.bash b/yardstick/benchmark/scenarios/availability/ha_tools/start_lxc_service.bash
new file mode 100755 (executable)
index 0000000..36a6739
--- /dev/null
@@ -0,0 +1,70 @@
+#!/bin/bash
+
+##############################################################################
+# Copyright (c) 2015 Huawei Technologies Co.,Ltd and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+# Start a service and check the service is started
+
+set -e
+
+NOVA_API_SERVICE_1="nova-api-os-compute"
+NOVA_API_SERVICE_2="nova-api-metadata"
+NOVA_API_LXC_FILTER_1="nova_api_os_compute"
+NOVA_API_LXC_FILTER_2="nova_api_metadata"
+
+service_name=$1
+
+if [ "${service_name}" = "haproxy" ]; then
+    if which systemctl 2>/dev/null; then
+        systemctl start $service_name
+    else
+        service $service_name start
+    fi
+else
+    lxc_filter=${service_name//-/_}
+
+    if [ "${lxc_filter}" = "glance_api" ]; then
+        lxc_filter="glance"
+    fi
+
+    if [ "${service_name}" = "nova-api" ]; then
+        container_1=$(lxc-ls -1 --filter="${NOVA_API_LXC_FILTER_1}")
+        container_2=$(lxc-ls -1 --filter="${NOVA_API_LXC_FILTER_2}")
+
+        if lxc-attach -n "${container_1}" -- which systemctl 2>/dev/null; then
+            lxc-attach -n "${container_1}" -- systemctl start "${NOVA_API_SERVICE_1}"
+        else
+            lxc-attach -n "${container_1}" -- service "${NOVA_API_SERVICE_1}" start
+        fi
+
+        if lxc-attach -n "${container_2}" -- which systemctl 2>/dev/null; then
+            lxc-attach -n "${container_2}" -- systemctl start "${NOVA_API_SERVICE_2}"
+        else
+            lxc-attach -n "${container_2}" -- service "${NOVA_API_SERVICE_2}" start
+        fi
+    else
+        container=$(lxc-ls -1 --filter="${lxc_filter}")
+
+        Distributor=$(lxc-attach -n "${container}" -- lsb_release -a | grep "Distributor ID" | awk '{print $3}')
+
+        if [ "${Distributor}" != "Ubuntu" -a "${service_name}" != "keystone" -a "${service_name}" != "neutron-server" ]; then
+            service_name="openstack-"${service_name}
+        elif [ "${Distributor}" = "Ubuntu" -a "${service_name}" = "keystone" ]; then
+            service_name="apache2"
+        elif [ "${service_name}" = "keystone" ]; then
+            service_name="httpd"
+        fi
+
+        if lxc-attach -n "${container}" -- which systemctl 2>/dev/null; then
+            lxc-attach -n "${container}" -- systemctl start "${service_name}"
+        else
+            lxc-attach -n "${container}" -- service "${service_name}" start
+        fi
+    fi
+fi
index a0777f9..a9488cc 100644 (file)
@@ -11,7 +11,6 @@ from __future__ import absolute_import
 import os
 import logging
 import subprocess
-import traceback
 
 import yardstick.ssh as ssh
 from yardstick.benchmark.scenarios.availability.monitor import basemonitor
@@ -27,9 +26,7 @@ def _execute_shell_command(command):
         output = subprocess.check_output(command, shell=True)
     except Exception:
         exitcode = -1
-        output = traceback.format_exc()
-        LOG.error("exec command '%s' error:\n ", command)
-        LOG.error(traceback.format_exc())
+        LOG.error("exec command '%s' error:\n ", command, exc_info=True)
 
     return exitcode, output
 
index 5114492..a08347d 100644 (file)
@@ -13,6 +13,8 @@ schema: "yardstick:task:0.1"
 
 process-status:
   monitor_script: ha_tools/check_process_python.bash
+lxc_process-status:
+  monitor_script: ha_tools/check_lxc_process_python.bash
 nova-image-list:
   monitor_script: ha_tools/nova_image_list.bash
 service-status:
index 594edea..9607e30 100644 (file)
@@ -164,38 +164,60 @@ class NetworkServiceTestCase(base.Scenario):
                      for vnfd in topology["constituent-vnfd"]
                      if vnf_id == vnfd["member-vnf-index"]), None)
 
+    @staticmethod
+    def get_vld_networks(networks):
+        return {n['vld_id']: n for n in networks.values()}
+
     def _resolve_topology(self, context_cfg, topology):
         for vld in topology["vld"]:
-            if len(vld["vnfd-connection-point-ref"]) > 2:
+            try:
+                node_0, node_1 = vld["vnfd-connection-point-ref"]
+            except (TypeError, ValueError):
                 raise IncorrectConfig("Topology file corrupted, "
-                                      "too many endpoint for connection")
-
-            node_0, node_1 = vld["vnfd-connection-point-ref"]
+                                      "wrong number of endpoints for connection")
 
-            node0 = self._find_vnf_name_from_id(topology,
-                                                node_0["member-vnf-index-ref"])
-            node1 = self._find_vnf_name_from_id(topology,
-                                                node_1["member-vnf-index-ref"])
+            node_0_name = self._find_vnf_name_from_id(topology,
+                                                      node_0["member-vnf-index-ref"])
+            node_1_name = self._find_vnf_name_from_id(topology,
+                                                      node_1["member-vnf-index-ref"])
 
-            if0 = node_0["vnfd-connection-point-ref"]
-            if1 = node_1["vnfd-connection-point-ref"]
+            node_0_ifname = node_0["vnfd-connection-point-ref"]
+            node_1_ifname = node_1["vnfd-connection-point-ref"]
 
+            node_0_if = context_cfg["nodes"][node_0_name]["interfaces"][node_0_ifname]
+            node_1_if = context_cfg["nodes"][node_1_name]["interfaces"][node_1_ifname]
             try:
-                nodes = context_cfg["nodes"]
-                nodes[node0]["interfaces"][if0]["vld_id"] = vld["id"]
-                nodes[node1]["interfaces"][if1]["vld_id"] = vld["id"]
-
-                nodes[node0]["interfaces"][if0]["dst_mac"] = \
-                    nodes[node1]["interfaces"][if1]["local_mac"]
-                nodes[node0]["interfaces"][if0]["dst_ip"] = \
-                    nodes[node1]["interfaces"][if1]["local_ip"]
-
-                nodes[node1]["interfaces"][if1]["dst_mac"] = \
-                    nodes[node0]["interfaces"][if0]["local_mac"]
-                nodes[node1]["interfaces"][if1]["dst_ip"] = \
-                    nodes[node0]["interfaces"][if0]["local_ip"]
+                vld_networks = self.get_vld_networks(context_cfg["networks"])
+
+                node_0_if["vld_id"] = vld["id"]
+                node_1_if["vld_id"] = vld["id"]
+
+                # set peer name
+                node_0_if["peer_name"] = node_1_name
+                node_1_if["peer_name"] = node_0_name
+
+                # set peer interface name
+                node_0_if["peer_ifname"] = node_1_ifname
+                node_1_if["peer_ifname"] = node_0_ifname
+
+                # just load the whole network dict
+                node_0_if["network"] = vld_networks.get(vld["id"], {})
+                node_1_if["network"] = vld_networks.get(vld["id"], {})
+
+                node_0_if["dst_mac"] = node_1_if["local_mac"]
+                node_0_if["dst_ip"] = node_1_if["local_ip"]
+
+                node_1_if["dst_mac"] = node_0_if["local_mac"]
+                node_1_if["dst_ip"] = node_0_if["local_ip"]
+
+                # add peer interface dict, but remove circular link
+                # TODO: don't waste memory
+                node_0_copy = node_0_if.copy()
+                node_1_copy = node_1_if.copy()
+                node_0_if["peer_intf"] = node_1_copy
+                node_1_if["peer_intf"] = node_0_copy
             except KeyError:
-                raise IncorrectConfig("Required interface not found,"
+                raise IncorrectConfig("Required interface not found, "
                                       "topology file corrupted")
 
     @classmethod
@@ -308,21 +330,36 @@ printf "%s/driver:" $1 ; basename $(readlink -s $1/device/driver); } \
         return dict(network_devices)
 
     @classmethod
-    def get_vnf_impl(cls, vnf_model):
+    def get_vnf_impl(cls, vnf_model_id):
         """ Find the implementing class from vnf_model["vnf"]["name"] field
 
-        :param vnf_model: dictionary containing a parsed vnfd
+        :param vnf_model_id: parsed vnfd model ID field
         :return: subclass of GenericVNF
         """
         import_modules_from_package(
             "yardstick.network_services.vnf_generic.vnf")
-        expected_name = vnf_model['id']
-        impl = (c for c in itersubclasses(GenericVNF)
-                if c.__name__ == expected_name)
+        expected_name = vnf_model_id
+        classes_found = []
+
+        def impl():
+            for name, class_ in ((c.__name__, c) for c in itersubclasses(GenericVNF)):
+                if name == expected_name:
+                    yield class_
+                classes_found.append(name)
+
         try:
-            return next(impl)
+            return next(impl())
         except StopIteration:
-            raise IncorrectConfig("No implementation for %s", expected_name)
+            pass
+
+        raise IncorrectConfig("No implementation for %s found in %s" %
+                              (expected_name, classes_found))
+
+    @staticmethod
+    def update_interfaces_from_node(vnfd, node):
+        for intf in vnfd["vdu"][0]["external-interface"]:
+            node_intf = node['interfaces'][intf['name']]
+            intf['virtual-interface'].update(node_intf)
 
     def load_vnf_models(self, scenario_cfg, context_cfg):
         """ Create VNF objects based on YAML descriptors
@@ -339,8 +376,11 @@ printf "%s/driver:" $1 ; basename $(readlink -s $1/device/driver); } \
                                     scenario_cfg['task_path']) as stream:
                 vnf_model = stream.read()
             vnfd = vnfdgen.generate_vnfd(vnf_model, node)
-            vnf_impl = self.get_vnf_impl(vnfd["vnfd:vnfd-catalog"]["vnfd"][0])
-            vnf_instance = vnf_impl(vnfd["vnfd:vnfd-catalog"]["vnfd"][0])
+            # TODO: here add extra context_cfg["nodes"] regardless of template
+            vnfd = vnfd["vnfd:vnfd-catalog"]["vnfd"][0]
+            self.update_interfaces_from_node(vnfd, node)
+            vnf_impl = self.get_vnf_impl(vnfd['id'])
+            vnf_instance = vnf_impl(vnfd)
             vnf_instance.name = node_name
             vnfs.append(vnf_instance)
 
index ad34817..b99e342 100644 (file)
@@ -40,10 +40,26 @@ class Fio(base.Scenario):
         type:    string
         unit:    na
         default: write
+    rwmixwrite - percentage of a mixed workload that should be writes
+        type: int
+        unit: percentage
+        default: 50
     ramp_time - run time before logging any performance
         type:    int
         unit:    seconds
         default: 20
+    direct - whether use non-buffered I/O or not
+        type:    boolean
+        unit:    na
+        default: 1
+    size - total size of I/O for this job.
+        type:    string
+        unit:    na
+        default: 1g
+    numjobs - number of clones (processes/threads performing the same workload) of this job
+        type:    int
+        unit:    na
+        default: 1
 
     Read link below for more fio args description:
         http://www.bluestop.org/fio/HOWTO.txt
@@ -74,8 +90,8 @@ class Fio(base.Scenario):
 
     def run(self, result):
         """execute the benchmark"""
-        default_args = "-ioengine=libaio -direct=1 -group_reporting " \
-            "-numjobs=1 -time_based --output-format=json"
+        default_args = "-ioengine=libaio -group_reporting -time_based -time_based " \
+            "--output-format=json"
 
         if not self.setup_done:
             self.setup()
@@ -86,6 +102,10 @@ class Fio(base.Scenario):
         iodepth = options.get("iodepth", "1")
         rw = options.get("rw", "write")
         ramp_time = options.get("ramp_time", 20)
+        size = options.get("size", "1g")
+        direct = options.get("direct", "1")
+        numjobs = options.get("numjobs", "1")
+        rwmixwrite = options.get("rwmixwrite", 50)
         name = "yardstick-fio"
         # if run by a duration runner
         duration_time = self.scenario_cfg["runner"].get("duration", None) \
@@ -99,10 +119,10 @@ class Fio(base.Scenario):
         else:
             runtime = 30
 
-        cmd_args = "-filename=%s -bs=%s -iodepth=%s -rw=%s -ramp_time=%s " \
-                   "-runtime=%s -name=%s %s" \
-                   % (filename, bs, iodepth, rw, ramp_time, runtime, name,
-                      default_args)
+        cmd_args = "-filename=%s -direct=%s -bs=%s -iodepth=%s -rw=%s -rwmixwrite=%s " \
+                   "-size=%s -ramp_time=%s -numjobs=%s -runtime=%s -name=%s %s" \
+                   % (filename, direct, bs, iodepth, rw, rwmixwrite, size, ramp_time, numjobs,
+                      runtime, name, default_args)
         cmd = "sudo bash fio.sh %s %s" % (filename, cmd_args)
         LOG.debug("Executing command: %s", cmd)
         # Set timeout, so that the cmd execution does not exit incorrectly
index f158d57..011990a 100755 (executable)
@@ -39,13 +39,11 @@ if not PYTHONPATH or not VIRTUAL_ENV:
     raise SystemExit(1)
 
 
-def handler():
+def sigint_handler(*args, **kwargs):
     """ Capture ctrl+c and exit cli """
     subprocess.call(["pkill", "-9", "yardstick"])
     raise SystemExit(1)
 
-signal.signal(signal.SIGINT, handler)
-
 
 class YardstickNSCli(object):
     """ This class handles yardstick network serivce testing """
@@ -214,5 +212,6 @@ class YardstickNSCli(object):
         self.run_test(args, test_path)
 
 if __name__ == "__main__":
+    signal.signal(signal.SIGINT, sigint_handler)
     NS_CLI = YardstickNSCli()
     NS_CLI.main()
index d251341..69485a4 100644 (file)
@@ -26,7 +26,15 @@ except KeyError:
         SERVER_IP = '172.17.0.1'
     else:
         with IPDB() as ip:
-            SERVER_IP = ip.routes['default'].gateway
+            try:
+                SERVER_IP = ip.routes['default'].gateway
+            except KeyError:
+                # during unittests ip.routes['default'] can be invalid
+                SERVER_IP = '127.0.0.1'
+
+if not SERVER_IP:
+    SERVER_IP = '127.0.0.1'
+
 
 # dir
 CONF_DIR = get_param('dir.conf', '/etc/yardstick')
@@ -40,12 +48,15 @@ SAMPLE_CASE_DIR = join(REPOS_DIR, 'samples')
 TESTCASE_DIR = join(YARDSTICK_ROOT_PATH, 'tests/opnfv/test_cases/')
 TESTSUITE_DIR = join(YARDSTICK_ROOT_PATH, 'tests/opnfv/test_suites/')
 DOCS_DIR = join(REPOS_DIR, 'docs/testing/user/userguide/')
+OPENSTACK_CONF_DIR = '/etc/openstack'
 
 # file
 OPENRC = get_param('file.openrc', '/etc/yardstick/openstack.creds')
 ETC_HOSTS = get_param('file.etc_hosts', '/etc/hosts')
 CONF_FILE = join(CONF_DIR, 'yardstick.conf')
 POD_FILE = join(CONF_DIR, 'pod.yaml')
+CLOUDS_CONF = join(OPENSTACK_CONF_DIR, 'clouds.yml')
+K8S_CONF_FILE = join(CONF_DIR, 'admin.conf')
 CONF_SAMPLE_FILE = join(CONF_SAMPLE_DIR, 'yardstick.conf.sample')
 FETCH_SCRIPT = get_param('file.fetch_script', 'utils/fetch_os_creds.sh')
 FETCH_SCRIPT = join(RELENG_DIR, FETCH_SCRIPT)
@@ -76,6 +87,7 @@ GRAFANA_IMAGE = get_param('grafana.image', 'grafana/grafana')
 GRAFANA_TAG = get_param('grafana.tag', '3.1.1')
 
 # api
+API_PORT = 5000
 DOCKER_URL = 'unix://var/run/docker.sock'
 INSTALLERS = ['apex', 'compass', 'fuel', 'joid']
 SQLITE = 'sqlite:////tmp/yardstick.db'
diff --git a/yardstick/common/kubernetes_utils.py b/yardstick/common/kubernetes_utils.py
new file mode 100644 (file)
index 0000000..e4c2328
--- /dev/null
@@ -0,0 +1,137 @@
+##############################################################################
+# Copyright (c) 2017 Huawei Technologies Co.,Ltd.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+import logging
+
+from kubernetes import client
+from kubernetes import config
+from kubernetes.client.rest import ApiException
+
+from yardstick.common import constants as consts
+
+LOG = logging.getLogger(__name__)
+LOG.setLevel(logging.DEBUG)
+
+
+def get_core_api():     # pragma: no cover
+    try:
+        config.load_kube_config(config_file=consts.K8S_CONF_FILE)
+    except IOError:
+        LOG.exception('config file not found')
+        raise
+
+    return client.CoreV1Api()
+
+
+def create_replication_controller(template,
+                                  namespace='default',
+                                  wait=False,
+                                  **kwargs):    # pragma: no cover
+
+    core_v1_api = get_core_api()
+    try:
+        core_v1_api.create_namespaced_replication_controller(namespace,
+                                                             template,
+                                                             **kwargs)
+    except ApiException:
+        LOG.exception('Create replication controller failed')
+        raise
+
+
+def delete_replication_controller(name,
+                                  namespace='default',
+                                  wait=False,
+                                  **kwargs):    # pragma: no cover
+
+    core_v1_api = get_core_api()
+    body = kwargs.get('body', client.V1DeleteOptions())
+    kwargs.pop('body', None)
+    try:
+        core_v1_api.delete_namespaced_replication_controller(name,
+                                                             namespace,
+                                                             body,
+                                                             **kwargs)
+    except ApiException:
+        LOG.exception('Delete replication controller failed')
+        raise
+
+
+def delete_pod(name,
+               namespace='default',
+               wait=False,
+               **kwargs):    # pragma: no cover
+
+    core_v1_api = get_core_api()
+    body = kwargs.get('body', client.V1DeleteOptions())
+    kwargs.pop('body', None)
+    try:
+        core_v1_api.delete_namespaced_pod(name,
+                                          namespace,
+                                          body,
+                                          **kwargs)
+    except ApiException:
+        LOG.exception('Delete pod failed')
+        raise
+
+
+def read_pod(name,
+             namespace='default',
+             **kwargs):  # pragma: no cover
+    core_v1_api = get_core_api()
+    try:
+        resp = core_v1_api.read_namespaced_pod(name, namespace, **kwargs)
+    except ApiException:
+        LOG.exception('Read pod failed')
+        raise
+    else:
+        return resp
+
+
+def read_pod_status(name, namespace='default', **kwargs):   # pragma: no cover
+    return read_pod(name).status.phase
+
+
+def create_config_map(name,
+                      data,
+                      namespace='default',
+                      wait=False,
+                      **kwargs):   # pragma: no cover
+    core_v1_api = get_core_api()
+    metadata = client.V1ObjectMeta(name=name)
+    body = client.V1ConfigMap(data=data, metadata=metadata)
+    try:
+        core_v1_api.create_namespaced_config_map(namespace, body, **kwargs)
+    except ApiException:
+        LOG.exception('Create config map failed')
+        raise
+
+
+def delete_config_map(name,
+                      namespace='default',
+                      wait=False,
+                      **kwargs):     # pragma: no cover
+    core_v1_api = get_core_api()
+    body = kwargs.get('body', client.V1DeleteOptions())
+    kwargs.pop('body', None)
+    try:
+        core_v1_api.delete_namespaced_config_map(name,
+                                                 namespace,
+                                                 body,
+                                                 **kwargs)
+    except ApiException:
+        LOG.exception('Delete config map failed')
+        raise
+
+
+def get_pod_list(namespace='default'):      # pragma: no cover
+    core_v1_api = get_core_api()
+    try:
+        return core_v1_api.list_namespaced_pod(namespace=namespace)
+    except ApiException:
+        LOG.exception('Get pod list failed')
+        raise
index 7633777..92bb7b7 100644 (file)
@@ -24,10 +24,14 @@ import os
 import subprocess
 import sys
 import collections
-import six
+import socket
+import random
 from functools import reduce
+from contextlib import closing
 
 import yaml
+import six
+from flask import jsonify
 from six.moves import configparser
 from oslo_utils import importutils
 from oslo_serialization import jsonutils
@@ -123,6 +127,14 @@ def makedirs(d):
             raise
 
 
+def remove_file(path):
+    try:
+        os.remove(path)
+    except OSError as e:
+        if e.errno != errno.ENOENT:
+            raise
+
+
 def execute_command(cmd):
     exec_msg = "Executing command: '%s'" % cmd
     logger.debug(exec_msg)
@@ -197,7 +209,8 @@ def flatten_dict_key(data):
     next_data = {}
 
     # use list, because iterable is too generic
-    if not any(isinstance(v, (collections.Mapping, list)) for v in data.values()):
+    if not any(isinstance(v, (collections.Mapping, list))
+               for v in data.values()):
         return data
 
     for k, v in six.iteritems(data):
@@ -212,3 +225,52 @@ def flatten_dict_key(data):
             next_data[k] = v
 
     return flatten_dict_key(next_data)
+
+
+def translate_to_str(obj):
+    if isinstance(obj, collections.Mapping):
+        return {str(k): translate_to_str(v) for k, v in obj.items()}
+    elif isinstance(obj, list):
+        return [translate_to_str(ele) for ele in obj]
+    elif isinstance(obj, six.text_type):
+        return str(obj)
+    return obj
+
+
+def result_handler(status, data):
+    result = {
+        'status': status,
+        'result': data
+    }
+    return jsonify(result)
+
+
+def change_obj_to_dict(obj):
+    dic = {}
+    for k, v in vars(obj).items():
+        try:
+            vars(v)
+        except TypeError:
+            dic.update({k: v})
+    return dic
+
+
+def set_dict_value(dic, keys, value):
+    return_dic = dic
+
+    for key in keys.split('.'):
+
+        return_dic.setdefault(key, {})
+        if key == keys.split('.')[-1]:
+            return_dic[key] = value
+        else:
+            return_dic = return_dic[key]
+    return dic
+
+
+def get_free_port(ip):
+    with closing(socket.socket(socket.AF_INET, socket.SOCK_STREAM)) as s:
+        while True:
+            port = random.randint(5000, 10000)
+            if s.connect_ex((ip, port)) != 0:
+                return port
index 1d770f7..2df6037 100644 (file)
@@ -96,7 +96,6 @@ class GenericVNF(object):
             return address.version
 
     def _ip_to_hex(self, ip_addr):
-        ip_to_convert = ip_addr.split(".")
         ip_x = ip_addr
         if self.get_ip_version(ip_addr) == 4:
             ip_to_convert = ip_addr.split(".")
index 7958b1c..57b23d3 100644 (file)
@@ -231,13 +231,16 @@ name (i.e. %s).\
         }
 
     def add_network(self, name, physical_network='physnet1', provider=None,
-                    segmentation_id=None):
+                    segmentation_id=None, port_security_enabled=True):
         """add to the template a Neutron Net"""
         log.debug("adding Neutron::Net '%s'", name)
         if provider is None:
             self.resources[name] = {
                 'type': 'OS::Neutron::Net',
-                'properties': {'name': name}
+                'properties': {
+                    'name': name,
+                    'port_security_enabled': port_security_enabled,
+                }
             }
         else:
             self.resources[name] = {
@@ -245,12 +248,12 @@ name (i.e. %s).\
                 'properties': {
                     'name': name,
                     'network_type': 'vlan',
-                    'physical_network': physical_network
-                }
+                    'physical_network': physical_network,
+                    'port_security_enabled': port_security_enabled,
+                },
             }
             if segmentation_id:
-                seg_id_dit = {'segmentation_id': segmentation_id}
-                self.resources[name]["properties"].update(seg_id_dit)
+                self.resources[name]['properties']['segmentation_id'] = segmentation_id
 
     def add_server_group(self, name, policies):     # pragma: no cover
         """add to the template a ServerGroup"""
@@ -262,8 +265,9 @@ name (i.e. %s).\
                            'policies': policies}
         }
 
-    def add_subnet(self, name, network, cidr):
-        """add to the template a Neutron Subnet"""
+    def add_subnet(self, name, network, cidr, enable_dhcp='true', gateway_ip=None):
+        """add to the template a Neutron Subnet
+        """
         log.debug("adding Neutron::Subnet '%s' in network '%s', cidr '%s'",
                   name, network, cidr)
         self.resources[name] = {
@@ -272,9 +276,12 @@ name (i.e. %s).\
             'properties': {
                 'name': name,
                 'cidr': cidr,
-                'network_id': {'get_resource': network}
+                'network_id': {'get_resource': network},
+                'enable_dhcp': enable_dhcp,
             }
         }
+        if gateway_ip is not None:
+            self.resources[name]['properties']['gateway_ip'] = gateway_ip
 
         self._template['outputs'][name] = {
             'description': 'subnet %s ID' % name,
@@ -316,9 +323,10 @@ name (i.e. %s).\
             }
         }
 
-    def add_port(self, name, network_name, subnet_name, sec_group_id=None,
-                 provider=None):
-        """add to the template a named Neutron Port"""
+    def add_port(self, name, network_name, subnet_name, sec_group_id=None, provider=None,
+                 allowed_address_pairs=None):
+        """add to the template a named Neutron Port
+        """
         log.debug("adding Neutron::Port '%s', network:'%s', subnet:'%s', "
                   "secgroup:%s", name, network_name, subnet_name, sec_group_id)
         self.resources[name] = {
@@ -341,6 +349,10 @@ name (i.e. %s).\
             self.resources[name]['properties']['security_groups'] = \
                 [sec_group_id]
 
+        if allowed_address_pairs:
+            self.resources[name]['properties'][
+                'allowed_address_pairs'] = allowed_address_pairs
+
         self._template['outputs'][name] = {
             'description': 'Address for interface %s' % name,
             'value': {'get_attr': [name, 'fixed_ips', 0, 'ip_address']}
@@ -534,6 +546,7 @@ name (i.e. %s).\
         }
 
     HEAT_WAIT_LOOP_INTERVAL = 2
+    HEAT_CREATE_COMPLETE_STATUS = u'CREATE_COMPLETE'
 
     def create(self, block=True, timeout=3600):
         """
@@ -558,10 +571,13 @@ name (i.e. %s).\
 
         if not block:
             self.outputs = stack.outputs = {}
+            end_time = time.time()
+            log.info("Created stack '%s' in %.3e secs",
+                     self.name, end_time - start_time)
             return stack
 
         time_limit = start_time + timeout
-        for status in iter(self.status, u'CREATE_COMPLETE'):
+        for status in iter(self.status, self.HEAT_CREATE_COMPLETE_STATUS):
             log.debug("stack state %s", status)
             if status == u'CREATE_FAILED':
                 stack_status_reason = heat_client.stacks.get(self.uuid).stack_status_reason
@@ -574,7 +590,7 @@ name (i.e. %s).\
 
         end_time = time.time()
         outputs = heat_client.stacks.get(self.uuid).outputs
-        log.info("Created stack '%s' in %d secs",
+        log.info("Created stack '%s' in %.3e secs",
                  self.name, end_time - start_time)
 
         # keep outputs as unicode
diff --git a/yardstick/orchestrator/kubernetes.py b/yardstick/orchestrator/kubernetes.py
new file mode 100644 (file)
index 0000000..6d7045f
--- /dev/null
@@ -0,0 +1,130 @@
+##############################################################################
+# Copyright (c) 2017 Huawei Technologies Co.,Ltd.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+from __future__ import absolute_import
+from __future__ import print_function
+
+from yardstick.common import utils
+from yardstick.common import kubernetes_utils as k8s_utils
+
+
+class KubernetesObject(object):
+
+    def __init__(self, name, **kwargs):
+        super(KubernetesObject, self).__init__()
+        self.name = name
+        self.image = kwargs.get('image', 'openretriever/yardstick')
+        self.command = [kwargs.get('command', '/bin/bash')]
+        self.args = kwargs.get('args', [])
+        self.ssh_key = kwargs.get('ssh_key', 'yardstick_key')
+
+        self.volumes = []
+
+        self.template = {
+            "apiVersion": "v1",
+            "kind": "ReplicationController",
+            "metadata": {
+                "name": ""
+            },
+            "spec": {
+                "replicas": 1,
+                "template": {
+                    "metadata": {
+                        "labels": {
+                            "app": ""
+                        }
+                    },
+                    "spec": {
+                        "containers": [],
+                        "volumes": []
+                    }
+                }
+            }
+        }
+
+        self._change_value_according_name(name)
+        self._add_containers()
+        self._add_ssh_key_volume()
+        self._add_volumes()
+
+    def get_template(self):
+        return self.template
+
+    def _change_value_according_name(self, name):
+        utils.set_dict_value(self.template, 'metadata.name', name)
+
+        utils.set_dict_value(self.template,
+                             'spec.template.metadata.labels.app',
+                             name)
+
+    def _add_containers(self):
+        containers = [self._add_container()]
+        utils.set_dict_value(self.template,
+                             'spec.template.spec.containers',
+                             containers)
+
+    def _add_container(self):
+        container_name = '{}-container'.format(self.name)
+        ssh_key_mount_path = "/root/.ssh/"
+
+        container = {
+            "args": self.args,
+            "command": self.command,
+            "image": self.image,
+            "name": container_name,
+            "volumeMounts": [
+                {
+                    "mountPath": ssh_key_mount_path,
+                    "name": self.ssh_key
+                }
+            ]
+        }
+
+        return container
+
+    def _add_volumes(self):
+        utils.set_dict_value(self.template,
+                             'spec.template.spec.volumes',
+                             self.volumes)
+
+    def _add_volume(self, volume):
+        self.volumes.append(volume)
+
+    def _add_ssh_key_volume(self):
+        key_volume = {
+            "configMap": {
+                "name": self.ssh_key
+            },
+            "name": self.ssh_key
+        }
+        self._add_volume(key_volume)
+
+
+class KubernetesTemplate(object):
+
+    def __init__(self, name, template_cfg):
+        self.name = name
+        self.ssh_key = '{}-key'.format(name)
+
+        self.rcs = [self._get_rc_name(rc) for rc in template_cfg]
+        self.k8s_objs = [KubernetesObject(self._get_rc_name(rc),
+                                          ssh_key=self.ssh_key,
+                                          **cfg)
+                         for rc, cfg in template_cfg.items()]
+        self.pods = []
+
+    def _get_rc_name(self, rc_name):
+        return '{}-{}'.format(rc_name, self.name)
+
+    def get_rc_pods(self):
+        resp = k8s_utils.get_pod_list()
+        self.pods = [p.metadata.name for p in resp.items for s in self.rcs
+                     if p.metadata.name.startswith(s)]
+
+        return self.pods
index 5a9178f..1ff4225 100644 (file)
@@ -130,6 +130,7 @@ class DummyDeploymentUnit(mut.DeploymentUnit):
         raise Exception
 
 
+@mock.patch("experimental_framework.deployment_unit.time")
 class TestDeploymentUnit(unittest.TestCase):
 
     def setUp(self):
@@ -140,7 +141,7 @@ class TestDeploymentUnit(unittest.TestCase):
 
     @mock.patch('experimental_framework.heat_manager.HeatManager',
                 side_effect=DummyHeatManager)
-    def test_constructor_for_sanity(self, mock_heat_manager):
+    def test_constructor_for_sanity(self, mock_heat_manager, mock_time):
         du = mut.DeploymentUnit(dict())
         self.assertTrue(isinstance(du.heat_manager, DummyHeatManager))
         mock_heat_manager.assert_called_once_with(dict())
@@ -150,7 +151,7 @@ class TestDeploymentUnit(unittest.TestCase):
                 side_effect=DummyHeatManager)
     @mock.patch('os.path.isfile')
     def test_deploy_heat_template_for_failure(self, mock_os_is_file,
-                                              mock_heat_manager):
+                                              mock_heat_manager, mock_time):
         mock_os_is_file.return_value = False
         du = mut.DeploymentUnit(dict())
         template_file = ''
@@ -163,7 +164,7 @@ class TestDeploymentUnit(unittest.TestCase):
                 side_effect=DummyHeatManager)
     @mock.patch('os.path.isfile')
     def test_deploy_heat_template_for_success(self, mock_os_is_file,
-                                              mock_heat_manager):
+                                              mock_heat_manager, mock_time):
         mock_os_is_file.return_value = True
         du = mut.DeploymentUnit(dict())
         template_file = ''
@@ -178,7 +179,7 @@ class TestDeploymentUnit(unittest.TestCase):
                 side_effect=DummyHeatManagerComplete)
     @mock.patch('os.path.isfile')
     def test_deploy_heat_template_2_for_success(self, mock_os_is_file,
-                                                mock_heat_manager):
+                                                mock_heat_manager, mock_time):
         mock_os_is_file.return_value = True
         du = mut.DeploymentUnit(dict())
         template_file = ''
@@ -196,7 +197,7 @@ class TestDeploymentUnit(unittest.TestCase):
                 side_effect=DummyDeploymentUnit)
     def test_deploy_heat_template_3_for_success(self, mock_dep_unit,
                                                 mock_os_is_file,
-                                                mock_heat_manager):
+                                                mock_heat_manager, mock_time):
         mock_os_is_file.return_value = True
         du = mut.DeploymentUnit(dict())
         template_file = ''
@@ -212,7 +213,7 @@ class TestDeploymentUnit(unittest.TestCase):
                 side_effect=DummyHeatManagerFailed)
     @mock.patch('os.path.isfile')
     def test_deploy_heat_template_for_success_2(self, mock_os_is_file,
-                                                mock_heat_manager, mock_log):
+                                                mock_heat_manager, mock_log, mock_time):
         mock_os_is_file.return_value = True
         du = DummyDeploymentUnit(dict())
         template_file = ''
@@ -226,7 +227,7 @@ class TestDeploymentUnit(unittest.TestCase):
                 side_effect=DummyHeatManagerDestroy)
     @mock.patch('experimental_framework.common.LOG')
     def test_destroy_heat_template_for_success(self, mock_log,
-                                               mock_heat_manager):
+                                               mock_heat_manager, mock_time):
         openstack_credentials = dict()
         du = mut.DeploymentUnit(openstack_credentials)
         du.deployed_stacks = ['stack']
@@ -238,14 +239,14 @@ class TestDeploymentUnit(unittest.TestCase):
                 side_effect=DummyHeatManagerDestroyException)
     @mock.patch('experimental_framework.common.LOG')
     def test_destroy_heat_template_for_success_2(self, mock_log,
-                                                 mock_heat_manager):
+                                                 mock_heat_manager, mock_time):
         openstack_credentials = dict()
         du = mut.DeploymentUnit(openstack_credentials)
         du.deployed_stacks = ['stack']
         stack_name = 'stack'
         self.assertFalse(du.destroy_heat_template(stack_name))
 
-    def test_destroy_all_deployed_stacks_for_success(self):
+    def test_destroy_all_deployed_stacks_for_success(self, mock_time):
         du = DeploymentUnitDestroy()
         du.destroy_all_deployed_stacks()
         self.assertTrue(du.destroy_heat_template())
@@ -254,7 +255,7 @@ class TestDeploymentUnit(unittest.TestCase):
                 side_effect=DummyHeatManagerReiteration)
     @mock.patch('os.path.isfile')
     def test_deploy_heat_template_for_success_3(self, mock_os_is_file,
-                                                mock_heat_manager):
+                                                mock_heat_manager, mock_time):
         mock_os_is_file.return_value = True
         du = mut.DeploymentUnit(dict())
         template = 'template_reiteration'
index 96ead5e..9fa860a 100644 (file)
@@ -359,6 +359,7 @@ class MockRunCommand:
             return MockRunCommand.ret_val_finalization
 
 
+@mock.patch('experimental_framework.packet_generators.dpdk_packet_generator.time')
 class TestDpdkPacketGenOthers(unittest.TestCase):
 
     def setUp(self):
@@ -370,7 +371,7 @@ class TestDpdkPacketGenOthers(unittest.TestCase):
     @mock.patch('experimental_framework.packet_generators.'
                 'dpdk_packet_generator.DpdkPacketGenerator.'
                 '_cores_configuration')
-    def test__get_core_nics_for_failure(self, mock_cores_configuration):
+    def test__get_core_nics_for_failure(self, mock_cores_configuration, mock_time):
         mock_cores_configuration.return_value = None
         self.assertRaises(ValueError, mut.DpdkPacketGenerator._get_core_nics,
                           '', '')
@@ -379,7 +380,7 @@ class TestDpdkPacketGenOthers(unittest.TestCase):
                 'dpdk_packet_generator.DpdkPacketGenerator.'
                 '_cores_configuration')
     def test__get_core_nics_one_nic_for_success(self,
-                                                mock_cores_configuration):
+                                                mock_cores_configuration, mock_time):
         mock_cores_configuration.return_value = 'ret_val'
         expected = 'ret_val'
         output = mut.DpdkPacketGenerator._get_core_nics(1, 'coremask')
@@ -390,7 +391,7 @@ class TestDpdkPacketGenOthers(unittest.TestCase):
                 'dpdk_packet_generator.DpdkPacketGenerator.'
                 '_cores_configuration')
     def test__get_core_nics_two_nics_for_success(self,
-                                                 mock_cores_configuration):
+                                                 mock_cores_configuration, mock_time):
         mock_cores_configuration.return_value = 'ret_val'
         expected = 'ret_val'
         output = mut.DpdkPacketGenerator._get_core_nics(2, 'coremask')
@@ -398,7 +399,7 @@ class TestDpdkPacketGenOthers(unittest.TestCase):
         mock_cores_configuration.assert_called_once_with('coremask', 1, 2, 2)
 
     @mock.patch('os.path.isfile')
-    def test__init_input_validation_for_success(self, mock_is_file):
+    def test__init_input_validation_for_success(self, mock_is_file, mock_time):
         mock_is_file.return_value = True
 
         pcap_file_0 = 'pcap_file_0'
@@ -419,7 +420,7 @@ class TestDpdkPacketGenOthers(unittest.TestCase):
             variables), None)
 
     @mock.patch('os.path.isfile')
-    def test__init_input_validation_for_failure(self, mock_is_file):
+    def test__init_input_validation_for_failure(self, mock_is_file, mock_time):
         mock_is_file.return_value = True
 
         pcap_file_0 = 'pcap_file_0'
@@ -440,7 +441,7 @@ class TestDpdkPacketGenOthers(unittest.TestCase):
                           lua_script, pcap_directory, lua_directory, variables)
 
     @mock.patch('os.path.isfile')
-    def test__init_input_validation_for_failure_2(self, mock_is_file):
+    def test__init_input_validation_for_failure_2(self, mock_is_file, mock_time):
         mock_is_file.return_value = True
 
         pcap_directory = None
@@ -461,7 +462,7 @@ class TestDpdkPacketGenOthers(unittest.TestCase):
                           lua_script, pcap_directory, lua_directory, variables)
 
     @mock.patch('os.path.isfile')
-    def test__init_input_validation_for_failure_3(self, mock_is_file):
+    def test__init_input_validation_for_failure_3(self, mock_is_file, mock_time):
         mock_is_file.return_value = True
 
         pcap_directory = 'directory'
@@ -482,7 +483,7 @@ class TestDpdkPacketGenOthers(unittest.TestCase):
                           lua_script, pcap_directory, lua_directory, variables)
 
     @mock.patch('os.path.isfile')
-    def test__init_input_validation_for_failure_4(self, mock_is_file):
+    def test__init_input_validation_for_failure_4(self, mock_is_file, mock_time):
         mock_is_file.return_value = True
 
         pcap_directory = 'directory'
@@ -503,7 +504,7 @@ class TestDpdkPacketGenOthers(unittest.TestCase):
                           lua_script, pcap_directory, lua_directory, variables)
 
     @mock.patch('os.path.isfile')
-    def test__init_input_validation_for_failure_5(self, mock_is_file):
+    def test__init_input_validation_for_failure_5(self, mock_is_file, mock_time):
         mock_is_file.return_value = True
 
         pcap_directory = 'directory'
@@ -524,7 +525,7 @@ class TestDpdkPacketGenOthers(unittest.TestCase):
                           lua_script, pcap_directory, lua_directory, variables)
 
     @mock.patch('os.path.isfile', side_effect=[False])
-    def test__init_input_validation_for_failure_6(self, mock_is_file):
+    def test__init_input_validation_for_failure_6(self, mock_is_file, mock_time):
         # mock_is_file.return_value = False
 
         pcap_directory = 'directory'
@@ -545,7 +546,7 @@ class TestDpdkPacketGenOthers(unittest.TestCase):
                           lua_script, pcap_directory, lua_directory, variables)
 
     @mock.patch('os.path.isfile', side_effect=[True, False])
-    def test__init_input_validation_for_failure_7(self, mock_is_file):
+    def test__init_input_validation_for_failure_7(self, mock_is_file, mock_time):
         pcap_directory = 'directory'
         pcap_file_0 = 'pcap_file_0'
         pcap_file_1 = 'pcap_file_1'
@@ -564,7 +565,7 @@ class TestDpdkPacketGenOthers(unittest.TestCase):
                           lua_script, pcap_directory, lua_directory, variables)
 
     @mock.patch('os.path.isfile', side_effect=[True, True, False])
-    def test__init_input_validation_for_failure_8(self, mock_is_file):
+    def test__init_input_validation_for_failure_8(self, mock_is_file, mock_time):
         pcap_directory = 'directory'
         pcap_file_0 = 'pcap_file_0'
         pcap_file_1 = 'pcap_file_1'
@@ -583,13 +584,13 @@ class TestDpdkPacketGenOthers(unittest.TestCase):
                           lua_script, pcap_directory, lua_directory, variables)
 
     @mock.patch('os.chdir')
-    def test__chdir_for_success(self, mock_os_chdir):
+    def test__chdir_for_success(self, mock_os_chdir, mock_time):
         mut.DpdkPacketGenerator._chdir('directory')
         mock_os_chdir.assert_called_once_with('directory')
 
     @mock.patch('experimental_framework.common.run_command',
                 side_effect=MockRunCommand.mock_run_command)
-    def test__init_physical_nics_for_success(self, mock_run_command):
+    def test__init_physical_nics_for_success(self, mock_run_command, mock_time):
         dpdk_interfaces = 1
         dpdk_vars = dict()
 
@@ -608,7 +609,7 @@ class TestDpdkPacketGenOthers(unittest.TestCase):
 
     @mock.patch('experimental_framework.common.run_command',
                 side_effect=MockRunCommand.mock_run_command)
-    def test__init_physical_nics_for_success_2(self, mock_run_command):
+    def test__init_physical_nics_for_success_2(self, mock_run_command, mock_time):
         dpdk_interfaces = 2
         dpdk_vars = dict()
 
@@ -626,7 +627,7 @@ class TestDpdkPacketGenOthers(unittest.TestCase):
                          [True, True, True, True, True, True])
 
     @mock.patch('experimental_framework.common.run_command')
-    def test__init_physical_nics_for_failure(self, mock_run_command):
+    def test__init_physical_nics_for_failure(self, mock_run_command, mock_time):
         dpdk_interfaces = 3
         dpdk_vars = dict()
         self.assertRaises(ValueError, self.mut._init_physical_nics,
@@ -634,7 +635,7 @@ class TestDpdkPacketGenOthers(unittest.TestCase):
 
     @mock.patch('experimental_framework.common.run_command',
                 side_effect=MockRunCommand.mock_run_command_finalization)
-    def test__finalize_physical_nics_for_success(self, mock_run_command):
+    def test__finalize_physical_nics_for_success(self, mock_run_command, mock_time):
         dpdk_interfaces = 1
         dpdk_vars = dict()
         dpdk_vars[conf_file.CFSP_DPDK_DPDK_DIRECTORY] = 'dpdk_directory/'
@@ -652,7 +653,7 @@ class TestDpdkPacketGenOthers(unittest.TestCase):
 
     @mock.patch('experimental_framework.common.run_command',
                 side_effect=MockRunCommand.mock_run_command_finalization)
-    def test__finalize_physical_nics_for_success_2(self, mock_run_command):
+    def test__finalize_physical_nics_for_success_2(self, mock_run_command, mock_time):
         dpdk_interfaces = 2
         dpdk_vars = dict()
         dpdk_vars[conf_file.CFSP_DPDK_DPDK_DIRECTORY] = 'dpdk_directory/'
@@ -668,34 +669,34 @@ class TestDpdkPacketGenOthers(unittest.TestCase):
         self.assertEqual(MockRunCommand.mock_run_command_finalization(),
                          [True, True, True, True, True, True])
 
-    def test__finalize_physical_nics_for_failure(self):
+    def test__finalize_physical_nics_for_failure(self, mock_time):
         dpdk_interfaces = 0
         dpdk_vars = dict()
         self.assertRaises(ValueError, self.mut._finalize_physical_nics,
                           dpdk_interfaces, dpdk_vars)
 
-    def test__cores_configuration_for_success(self):
+    def test__cores_configuration_for_success(self, mock_time):
         coremask = '1f'
         expected = '[2:1].0,[4:3].1'
         output = mut.DpdkPacketGenerator._cores_configuration(coremask,
                                                               1, 2, 2)
         self.assertEqual(expected, output)
 
-    def test__cores_configuration_for_success_2(self):
+    def test__cores_configuration_for_success_2(self, mock_time):
         coremask = '1f'
         expected = '2.0,[4:3].1'
         output = mut.DpdkPacketGenerator._cores_configuration(coremask,
                                                               1, 1, 2)
         self.assertEqual(expected, output)
 
-    def test__cores_configuration_for_success_3(self):
+    def test__cores_configuration_for_success_3(self, mock_time):
         coremask = '1f'
         expected = '[3:2].0,4.1'
         output = mut.DpdkPacketGenerator._cores_configuration(coremask,
                                                               1, 2, 1)
         self.assertEqual(expected, output)
 
-    def test__cores_configuration_for_failure(self):
+    def test__cores_configuration_for_failure(self, mock_time):
         coremask = '1'
         self.assertRaises(ValueError,
                           mut.DpdkPacketGenerator._cores_configuration,
@@ -703,7 +704,7 @@ class TestDpdkPacketGenOthers(unittest.TestCase):
 
     @mock.patch('experimental_framework.common.LOG')
     @mock.patch('experimental_framework.common.run_command')
-    def test__change_vlan_for_success(self, mock_run_command, mock_log):
+    def test__change_vlan_for_success(self, mock_run_command, mock_log, mock_time):
         mut.DpdkPacketGenerator._change_vlan('/directory/', 'pcap_file', '10')
         expected_param = '/directory/vlan_tag.sh /directory/pcap_file 10'
         mock_run_command.assert_called_with(expected_param)
index 2bd8b7b..69c5d74 100644 (file)
@@ -257,6 +257,7 @@ class InstantiationValidationInitTest(unittest.TestCase):
         self.assertEqual(dummy_os_kill('', '', True), [1, 1])
         self.assertEqual(dummy_run_command('', True), [1, 1, 0, 0, 0])
 
+    @mock.patch('experimental_framework.benchmarks.instantiation_validation_benchmark.time')
     @mock.patch('os.chdir')
     @mock.patch('experimental_framework.common.run_command',
                 side_effect=dummy_run_command_2)
@@ -265,7 +266,7 @@ class InstantiationValidationInitTest(unittest.TestCase):
                 'InstantiationValidationBenchmark._get_pids')
     @mock.patch('os.kill', side_effect=dummy_os_kill)
     def test__init_packet_checker_for_success(self, mock_kill, mock_pids,
-                                              mock_run_command, mock_chdir):
+                                              mock_run_command, mock_chdir, mock_time):
         global command_counter
         command_counter = [0, 0, 0, 0, 0]
         mock_pids.return_value = [1234, 4321]
@@ -314,13 +315,14 @@ class InstantiationValidationInitTest(unittest.TestCase):
         self.assertEqual(dummy_replace_in_file('', '', '', True),
                          [0, 0, 0, 1, 1, 1])
 
+    @mock.patch('experimental_framework.benchmarks.instantiation_validation_benchmark.time')
     @mock.patch('experimental_framework.common.LOG')
     @mock.patch('experimental_framework.packet_generators.'
                 'dpdk_packet_generator.DpdkPacketGenerator',
                 side_effect=DummyDpdkPacketGenerator)
     @mock.patch('experimental_framework.common.get_dpdk_pktgen_vars')
     def test_run_for_success(self, mock_common_get_vars, mock_pktgen,
-                             mock_log):
+                             mock_log, mock_time):
         rval = dict()
         rval[cfs.CFSP_DPDK_BUS_SLOT_NIC_2] = 'bus_2'
         rval[cfs.CFSP_DPDK_NAME_IF_2] = 'if_2'