Merge "base: remove redudant ip_to_convert"
authorRoss Brattain <ross.b.brattain@intel.com>
Mon, 24 Jul 2017 05:39:21 +0000 (05:39 +0000)
committerGerrit Code Review <gerrit@opnfv.org>
Mon, 24 Jul 2017 05:39:21 +0000 (05:39 +0000)
27 files changed:
api/__init__.py
api/database/v2/handlers.py
api/resources/v1/testcases.py
api/resources/v2/containers.py [new file with mode: 0644]
api/resources/v2/images.py [new file with mode: 0644]
api/resources/v2/pods.py
api/resources/v2/projects.py [new file with mode: 0644]
api/resources/v2/tasks.py [new file with mode: 0644]
api/resources/v2/testcases.py [new file with mode: 0644]
api/resources/v2/testsuites.py [new file with mode: 0644]
api/urls.py
api/utils/thread.py
tests/unit/benchmark/contexts/test_model.py
yardstick/benchmark/contexts/heat.py
yardstick/benchmark/contexts/model.py
yardstick/benchmark/core/testsuite.py [new file with mode: 0644]
yardstick/benchmark/scenarios/availability/attacker_conf.yaml
yardstick/benchmark/scenarios/availability/ha_tools/check_lxc_process_python.bash [new file with mode: 0755]
yardstick/benchmark/scenarios/availability/ha_tools/fault_lxc_process_kill.bash [new file with mode: 0755]
yardstick/benchmark/scenarios/availability/ha_tools/nova/create_flavor.bash
yardstick/benchmark/scenarios/availability/ha_tools/nova/delete_flavor.bash
yardstick/benchmark/scenarios/availability/ha_tools/nova/show_flavors.bash
yardstick/benchmark/scenarios/availability/ha_tools/start_lxc_service.bash [new file with mode: 0755]
yardstick/benchmark/scenarios/availability/monitor_conf.yaml
yardstick/benchmark/scenarios/storage/fio.py
yardstick/common/utils.py
yardstick/orchestrator/heat.py

index 3195c97..c5aefff 100644 (file)
@@ -45,8 +45,9 @@ class ApiResource(Resource):
 
         return args
 
-    def _dispatch_post(self):
+    def _dispatch_post(self, **kwargs):
         action, args = self._post_args()
+        args.update(kwargs)
         return self._dispatch(args, action)
 
     def _dispatch(self, args, action):
index 095ad72..1bc32bf 100644 (file)
@@ -122,6 +122,12 @@ class V2ContainerHandler(object):
             raise ValueError
         return container
 
+    def update_attr(self, uuid, attr):
+        container = self.get_by_uuid(uuid)
+        for k, v in attr.items():
+            setattr(container, k, v)
+        db_session.commit()
+
     def delete_by_uuid(self, uuid):
         container = self.get_by_uuid(uuid)
         db_session.delete(container)
@@ -130,6 +136,9 @@ class V2ContainerHandler(object):
 
 class V2ProjectHandler(object):
 
+    def list_all(self):
+        return V2Project.query.all()
+
     def insert(self, kwargs):
         project = V2Project(**kwargs)
         db_session.add(project)
@@ -164,6 +173,9 @@ class V2ProjectHandler(object):
 
 class V2TaskHandler(object):
 
+    def list_all(self):
+        return V2Task.query.all()
+
     def insert(self, kwargs):
         task = V2Task(**kwargs)
         db_session.add(task)
index fbeb36f..f159472 100644 (file)
@@ -22,6 +22,7 @@ from yardstick.common.utils import result_handler
 from api.utils.thread import TaskThread
 from api import ApiResource
 from api.swagger import models
+from api.database.v1.handlers import TasksHandler
 
 LOG = logging.getLogger(__name__)
 LOG.setLevel(logging.DEBUG)
@@ -80,7 +81,7 @@ class V1ReleaseCase(ApiResource):
         task_args.update(args.get('opts', {}))
 
         param = Param(task_args)
-        task_thread = TaskThread(Task().start, param)
+        task_thread = TaskThread(Task().start, param, TasksHandler())
         task_thread.start()
 
         return result_handler(consts.API_SUCCESS, {'task_id': task_id})
@@ -108,7 +109,7 @@ class V1SampleCase(ApiResource):
         task_args.update(args.get('opts', {}))
 
         param = Param(task_args)
-        task_thread = TaskThread(Task().start, param)
+        task_thread = TaskThread(Task().start, param, TasksHandler())
         task_thread.start()
 
         return result_handler(consts.API_SUCCESS, {'task_id': task_id})
diff --git a/api/resources/v2/containers.py b/api/resources/v2/containers.py
new file mode 100644 (file)
index 0000000..ce71303
--- /dev/null
@@ -0,0 +1,383 @@
+##############################################################################
+# Copyright (c) 2016 Huawei Technologies Co.,Ltd and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+from __future__ import absolute_import
+
+import logging
+import threading
+import time
+import uuid
+import os
+import glob
+
+from six.moves import configparser
+from oslo_serialization import jsonutils
+from docker import Client
+
+from api import ApiResource
+from api.utils import influx
+from api.database.v2.handlers import V2ContainerHandler
+from api.database.v2.handlers import V2EnvironmentHandler
+from yardstick.common import constants as consts
+from yardstick.common import utils
+from yardstick.common.utils import result_handler
+from yardstick.common.utils import get_free_port
+from yardstick.common.httpClient import HttpClient
+
+
+LOG = logging.getLogger(__name__)
+LOG.setLevel(logging.DEBUG)
+
+environment_handler = V2EnvironmentHandler()
+container_handler = V2ContainerHandler()
+
+
+class V2Containers(ApiResource):
+
+    def post(self):
+        return self._dispatch_post()
+
+    def create_influxdb(self, args):
+        try:
+            environment_id = args['environment_id']
+        except KeyError:
+            return result_handler(consts.API_ERROR, 'environment_id must be provided')
+
+        try:
+            uuid.UUID(environment_id)
+        except ValueError:
+            return result_handler(consts.API_ERROR, 'invalid environment id')
+
+        try:
+            environment = environment_handler.get_by_uuid(environment_id)
+        except ValueError:
+            return result_handler(consts.API_ERROR, 'no such environment id')
+
+        container_info = environment.container_id
+        container_info = jsonutils.loads(container_info) if container_info else {}
+
+        if container_info.get('influxdb'):
+            return result_handler(consts.API_ERROR, 'influxdb container already exist')
+
+        name = 'influxdb-{}'.format(environment_id[:8])
+        port = get_free_port(consts.SERVER_IP)
+        container_id = str(uuid.uuid4())
+        LOG.info('%s will launch on : %s', name, port)
+
+        LOG.info('launch influxdb background')
+        args = (name, port, container_id)
+        thread = threading.Thread(target=self._create_influxdb, args=args)
+        thread.start()
+
+        LOG.info('record container in database')
+        container_init_data = {
+            'uuid': container_id,
+            'environment_id': environment_id,
+            'name': name,
+            'port': port,
+            'status': 0
+        }
+        container_handler.insert(container_init_data)
+
+        LOG.info('update container in environment')
+        container_info['influxdb'] = container_id
+        environment_info = {'container_id': jsonutils.dumps(container_info)}
+        environment_handler.update_attr(environment_id, environment_info)
+
+        return result_handler(consts.API_SUCCESS, {'uuid': container_id})
+
+    def _check_image_exist(self, client, t):
+        return any(t in a['RepoTags'][0]
+                   for a in client.images() if a['RepoTags'])
+
+    def _create_influxdb(self, name, port, container_id):
+        client = Client(base_url=consts.DOCKER_URL)
+
+        try:
+            LOG.info('Checking if influxdb image exist')
+            if not self._check_image_exist(client, '%s:%s' %
+                                           (consts.INFLUXDB_IMAGE,
+                                            consts.INFLUXDB_TAG)):
+                LOG.info('Influxdb image not exist, start pulling')
+                client.pull(consts.INFLUXDB_IMAGE, tag=consts.INFLUXDB_TAG)
+
+            LOG.info('Createing influxdb container')
+            container = self._create_influxdb_container(client, name, port)
+            LOG.info('Influxdb container is created')
+
+            time.sleep(5)
+
+            container = client.inspect_container(container['Id'])
+            ip = container['NetworkSettings']['Networks']['bridge']['IPAddress']
+            LOG.debug('container ip is: %s', ip)
+
+            LOG.info('Changing output to influxdb')
+            self._change_output_to_influxdb(ip)
+
+            LOG.info('Config influxdb')
+            self._config_influxdb()
+
+            container_handler.update_attr(container_id, {'status': 1})
+
+            LOG.info('Finished')
+        except Exception:
+            container_handler.update_attr(container_id, {'status': 2})
+            LOG.exception('Creating influxdb failed')
+
+    def _create_influxdb_container(self, client, name, port):
+
+        ports = [port]
+        port_bindings = {8086: port}
+        restart_policy = {"MaximumRetryCount": 0, "Name": "always"}
+        host_config = client.create_host_config(port_bindings=port_bindings,
+                                                restart_policy=restart_policy)
+
+        LOG.info('Creating container')
+        container = client.create_container(image='%s:%s' %
+                                            (consts.INFLUXDB_IMAGE,
+                                             consts.INFLUXDB_TAG),
+                                            ports=ports,
+                                            name=name,
+                                            detach=True,
+                                            tty=True,
+                                            host_config=host_config)
+        LOG.info('Starting container')
+        client.start(container)
+        return container
+
+    def _config_influxdb(self):
+        try:
+            client = influx.get_data_db_client()
+            client.create_user(consts.INFLUXDB_USER,
+                               consts.INFLUXDB_PASS,
+                               consts.INFLUXDB_DB_NAME)
+            client.create_database(consts.INFLUXDB_DB_NAME)
+            LOG.info('Success to config influxDB')
+        except Exception:
+            LOG.exception('Config influxdb failed')
+
+    def _change_output_to_influxdb(self, ip):
+        utils.makedirs(consts.CONF_DIR)
+
+        parser = configparser.ConfigParser()
+        LOG.info('Reading output sample configuration')
+        parser.read(consts.CONF_SAMPLE_FILE)
+
+        LOG.info('Set dispatcher to influxdb')
+        parser.set('DEFAULT', 'dispatcher', 'influxdb')
+        parser.set('dispatcher_influxdb', 'target',
+                   'http://{}:{}'.format(ip, 8086))
+
+        LOG.info('Writing to %s', consts.CONF_FILE)
+        with open(consts.CONF_FILE, 'w') as f:
+            parser.write(f)
+
+    def create_grafana(self, args):
+        try:
+            environment_id = args['environment_id']
+        except KeyError:
+            return result_handler(consts.API_ERROR, 'environment_id must be provided')
+
+        try:
+            uuid.UUID(environment_id)
+        except ValueError:
+            return result_handler(consts.API_ERROR, 'invalid environment id')
+
+        try:
+            environment = environment_handler.get_by_uuid(environment_id)
+        except ValueError:
+            return result_handler(consts.API_ERROR, 'no such environment id')
+
+        container_info = environment.container_id
+        container_info = jsonutils.loads(container_info) if container_info else {}
+
+        if not container_info.get('influxdb'):
+            return result_handler(consts.API_ERROR, 'influxdb not set')
+
+        if container_info.get('grafana'):
+            return result_handler(consts.API_ERROR, 'grafana container already exists')
+
+        name = 'grafana-{}'.format(environment_id[:8])
+        port = get_free_port(consts.SERVER_IP)
+        container_id = str(uuid.uuid4())
+
+        args = (name, port, container_id)
+        thread = threading.Thread(target=self._create_grafana, args=args)
+        thread.start()
+
+        container_init_data = {
+            'uuid': container_id,
+            'environment_id': environment_id,
+            'name': name,
+            'port': port,
+            'status': 0
+        }
+        container_handler.insert(container_init_data)
+
+        container_info['grafana'] = container_id
+        environment_info = {'container_id': jsonutils.dumps(container_info)}
+        environment_handler.update_attr(environment_id, environment_info)
+
+        return result_handler(consts.API_SUCCESS, {'uuid': container_id})
+
+    def _create_grafana(self, name, port, container_id):
+        client = Client(base_url=consts.DOCKER_URL)
+
+        try:
+            LOG.info('Checking if grafana image exist')
+            image = '{}:{}'.format(consts.GRAFANA_IMAGE, consts.GRAFANA_TAG)
+            if not self._check_image_exist(client, image):
+                LOG.info('Grafana image not exist, start pulling')
+                client.pull(consts.GRAFANA_IMAGE, consts.GRAFANA_TAG)
+
+            LOG.info('Createing grafana container')
+            container = self._create_grafana_container(client, name, port)
+            LOG.info('Grafana container is created')
+
+            time.sleep(5)
+
+            container = client.inspect_container(container['Id'])
+            ip = container['NetworkSettings']['Networks']['bridge']['IPAddress']
+            LOG.debug('container ip is: %s', ip)
+
+            LOG.info('Creating data source for grafana')
+            self._create_data_source(ip)
+
+            LOG.info('Creating dashboard for grafana')
+            self._create_dashboard(ip)
+
+            container_handler.update_attr(container_id, {'status': 1})
+            LOG.info('Finished')
+        except Exception:
+            container_handler.update_attr(container_id, {'status': 2})
+            LOG.exception('Create grafana failed')
+
+    def _create_dashboard(self, ip):
+        url = 'http://admin:admin@{}:{}/api/dashboards/db'.format(ip, 3000)
+        path = os.path.join(consts.REPOS_DIR, 'dashboard', '*dashboard.json')
+
+        for i in sorted(glob.iglob(path)):
+            with open(i) as f:
+                data = jsonutils.load(f)
+            try:
+                HttpClient().post(url, data)
+            except Exception:
+                LOG.exception('Create dashboard %s failed', i)
+                raise
+
+    def _create_data_source(self, ip):
+        url = 'http://admin:admin@{}:{}/api/datasources'.format(ip, 3000)
+
+        influx_conf = utils.parse_ini_file(consts.CONF_FILE)
+        try:
+            influx_url = influx_conf['dispatcher_influxdb']['target']
+        except KeyError:
+            LOG.exception('influxdb url not set in yardstick.conf')
+            raise
+
+        data = {
+            "name": "yardstick",
+            "type": "influxdb",
+            "access": "proxy",
+            "url": influx_url,
+            "password": "root",
+            "user": "root",
+            "database": "yardstick",
+            "basicAuth": True,
+            "basicAuthUser": "admin",
+            "basicAuthPassword": "admin",
+            "isDefault": False,
+        }
+        try:
+            HttpClient().post(url, data)
+        except Exception:
+            LOG.exception('Create datasources failed')
+            raise
+
+    def _create_grafana_container(self, client, name, port):
+        ports = [3000]
+        port_bindings = {3000: port}
+        restart_policy = {"MaximumRetryCount": 0, "Name": "always"}
+        host_config = client.create_host_config(port_bindings=port_bindings,
+                                                restart_policy=restart_policy)
+
+        LOG.info('Creating container')
+        container = client.create_container(image='%s:%s' %
+                                            (consts.GRAFANA_IMAGE,
+                                             consts.GRAFANA_TAG),
+                                            name=name,
+                                            ports=ports,
+                                            detach=True,
+                                            tty=True,
+                                            host_config=host_config)
+        LOG.info('Starting container')
+        client.start(container)
+        return container
+
+
+class V2Container(ApiResource):
+
+    def get(self, container_id):
+        try:
+            uuid.UUID(container_id)
+        except ValueError:
+            return result_handler(consts.API_ERROR, 'invalid container id')
+
+        try:
+            container = container_handler.get_by_uuid(container_id)
+        except ValueError:
+            return result_handler(consts.API_ERROR, 'no such container id')
+
+        name = container.name
+        client = Client(base_url=consts.DOCKER_URL)
+        info = client.inspect_container(name)
+
+        data = {
+            'name': name,
+            'status': info.get('State', {}).get('Status', 'error'),
+            'time': info.get('Created'),
+            'port': container.port
+        }
+
+        return result_handler(consts.API_SUCCESS, {'container': data})
+
+    def delete(self, container_id):
+        try:
+            uuid.UUID(container_id)
+        except ValueError:
+            return result_handler(consts.API_ERROR, 'invalid container id')
+
+        try:
+            container = container_handler.get_by_uuid(container_id)
+        except ValueError:
+            return result_handler(consts.API_ERROR, 'no such container id')
+
+        environment_id = container.environment_id
+
+        client = Client(base_url=consts.DOCKER_URL)
+        LOG.info('delete container: %s', container.name)
+        try:
+            client.remove_container(container.name, force=True)
+        except Exception:
+            LOG.exception('delete container failed')
+            return result_handler(consts.API_ERROR, 'delete container failed')
+
+        LOG.info('delete container in database')
+        container_handler.delete_by_uuid(container_id)
+
+        LOG.info('update container in environment')
+        environment = environment_handler.get_by_uuid(environment_id)
+        container_info = jsonutils.loads(environment.container_id)
+        key = next((k for k, v in container_info.items() if v == container_id))
+        container_info.pop(key)
+        environment_delete_data = {
+            'container_id': jsonutils.dumps(container_info)
+        }
+        environment_handler.update_attr(environment_id, environment_delete_data)
+
+        return result_handler(consts.API_SUCCESS, {'container': container_id})
diff --git a/api/resources/v2/images.py b/api/resources/v2/images.py
new file mode 100644 (file)
index 0000000..7018184
--- /dev/null
@@ -0,0 +1,72 @@
+import logging
+import subprocess
+import threading
+
+from api import ApiResource
+from yardstick.common.utils import result_handler
+from yardstick.common.utils import source_env
+from yardstick.common.utils import change_obj_to_dict
+from yardstick.common.openstack_utils import get_nova_client
+from yardstick.common import constants as consts
+
+LOG = logging.getLogger(__name__)
+LOG.setLevel(logging.DEBUG)
+
+
+class V2Images(ApiResource):
+
+    def get(self):
+        try:
+            source_env(consts.OPENRC)
+        except:
+            return result_handler(consts.API_ERROR, 'source openrc error')
+
+        nova_client = get_nova_client()
+        try:
+            images_list = nova_client.images.list()
+        except:
+            return result_handler(consts.API_ERROR, 'get images error')
+        else:
+            images = [self.get_info(change_obj_to_dict(i)) for i in images_list]
+            status = 1 if all(i['status'] == 'ACTIVE' for i in images) else 0
+
+        return result_handler(consts.API_SUCCESS, {'status': status, 'images': images})
+
+    def post(self):
+        return self._dispatch_post()
+
+    def get_info(self, data):
+        result = {
+            'name': data.get('name', ''),
+            'size': data.get('OS-EXT-IMG-SIZE:size', ''),
+            'status': data.get('status', ''),
+            'time': data.get('updated', '')
+        }
+        return result
+
+    def load_image(self, args):
+        thread = threading.Thread(target=self._load_images)
+        thread.start()
+        return result_handler(consts.API_SUCCESS, {})
+
+    def _load_images(self):
+        LOG.info('source openrc')
+        source_env(consts.OPENRC)
+
+        LOG.info('clean images')
+        cmd = [consts.CLEAN_IMAGES_SCRIPT]
+        p = subprocess.Popen(cmd, stdout=subprocess.PIPE,
+                             cwd=consts.REPOS_DIR)
+        _, err = p.communicate()
+        if p.returncode != 0:
+            LOG.error('clean image failed: %s', err)
+
+        LOG.info('load images')
+        cmd = [consts.LOAD_IMAGES_SCRIPT]
+        p = subprocess.Popen(cmd, stdout=subprocess.PIPE,
+                             cwd=consts.REPOS_DIR)
+        _, err = p.communicate()
+        if p.returncode != 0:
+            LOG.error('load image failed: %s', err)
+
+        LOG.info('Done')
index ffb8a60..ebc1312 100644 (file)
@@ -77,3 +77,24 @@ class V2Pod(ApiResource):
         content = jsonutils.loads(pod.content)
 
         return result_handler(consts.API_SUCCESS, {'pod': content})
+
+    def delete(self, pod_id):
+        try:
+            uuid.UUID(pod_id)
+        except ValueError:
+            return result_handler(consts.API_ERROR, 'invalid pod id')
+
+        pod_handler = V2PodHandler()
+        try:
+            pod = pod_handler.get_by_uuid(pod_id)
+        except ValueError:
+            return result_handler(consts.API_ERROR, 'no such pod')
+
+        LOG.info('update pod in environment')
+        environment_handler = V2EnvironmentHandler()
+        environment_handler.update_attr(pod.environment_id, {'pod_id': None})
+
+        LOG.info('delete pod in database')
+        pod_handler.delete_by_uuid(pod_id)
+
+        return result_handler(consts.API_SUCCESS, {'pod': pod_id})
diff --git a/api/resources/v2/projects.py b/api/resources/v2/projects.py
new file mode 100644 (file)
index 0000000..376cf1a
--- /dev/null
@@ -0,0 +1,97 @@
+import uuid
+import logging
+
+from datetime import datetime
+
+from api import ApiResource
+from api.database.v2.handlers import V2ProjectHandler
+from api.database.v2.handlers import V2TaskHandler
+from yardstick.common.utils import result_handler
+from yardstick.common.utils import change_obj_to_dict
+from yardstick.common import constants as consts
+
+LOG = logging.getLogger(__name__)
+LOG.setLevel(logging.DEBUG)
+
+
+class V2Projects(ApiResource):
+
+    def get(self):
+        project_handler = V2ProjectHandler()
+        projects = [change_obj_to_dict(p) for p in project_handler.list_all()]
+
+        for p in projects:
+            tasks = p['tasks']
+            p['tasks'] = tasks.split(',') if tasks else []
+
+        return result_handler(consts.API_SUCCESS, {'projects': projects})
+
+    def post(self):
+        return self._dispatch_post()
+
+    def create_project(self, args):
+        try:
+            name = args['name']
+        except KeyError:
+            return result_handler(consts.API_ERROR, 'name must be provided')
+
+        project_id = str(uuid.uuid4())
+        create_time = datetime.now()
+        project_handler = V2ProjectHandler()
+
+        project_init_data = {
+            'uuid': project_id,
+            'name': name,
+            'time': create_time
+        }
+        project_handler.insert(project_init_data)
+
+        return result_handler(consts.API_SUCCESS, {'uuid': project_id})
+
+
+class V2Project(ApiResource):
+
+    def get(self, project_id):
+        try:
+            uuid.UUID(project_id)
+        except ValueError:
+            return result_handler(consts.API_ERROR, 'invalid project id')
+
+        project_handler = V2ProjectHandler()
+        try:
+            project = project_handler.get_by_uuid(project_id)
+        except ValueError:
+            return result_handler(consts.API_ERROR, 'no such project id')
+
+        project_info = change_obj_to_dict(project)
+        tasks = project_info['tasks']
+        project_info['tasks'] = tasks.split(',') if tasks else []
+
+        return result_handler(consts.API_SUCCESS, {'project': project_info})
+
+    def delete(self, project_id):
+        try:
+            uuid.UUID(project_id)
+        except ValueError:
+            return result_handler(consts.API_ERROR, 'invalid project id')
+
+        project_handler = V2ProjectHandler()
+        try:
+            project = project_handler.get_by_uuid(project_id)
+        except ValueError:
+            return result_handler(consts.API_ERROR, 'no such project id')
+
+        if project.tasks:
+            LOG.info('delete related task')
+            task_handler = V2TaskHandler()
+            for task_id in project.tasks.split(','):
+                LOG.debug('delete task: %s', task_id)
+                try:
+                    task_handler.delete_by_uuid(task_id)
+                except ValueError:
+                    LOG.exception('no such task id: %s', task_id)
+
+        LOG.info('delete project in database')
+        project_handler.delete_by_uuid(project_id)
+
+        return result_handler(consts.API_SUCCESS, {'project': project_id})
diff --git a/api/resources/v2/tasks.py b/api/resources/v2/tasks.py
new file mode 100644 (file)
index 0000000..9790d76
--- /dev/null
@@ -0,0 +1,245 @@
+import uuid
+import logging
+from datetime import datetime
+
+from oslo_serialization import jsonutils
+
+from api import ApiResource
+from api.database.v2.handlers import V2TaskHandler
+from api.database.v2.handlers import V2ProjectHandler
+from api.database.v2.handlers import V2EnvironmentHandler
+from api.utils.thread import TaskThread
+from yardstick.common.utils import result_handler
+from yardstick.common.utils import change_obj_to_dict
+from yardstick.common import constants as consts
+from yardstick.benchmark.core.task import Task
+from yardstick.benchmark.core import Param
+
+LOG = logging.getLogger(__name__)
+LOG.setLevel(logging.DEBUG)
+
+
+class V2Tasks(ApiResource):
+
+    def get(self):
+        task_handler = V2TaskHandler()
+        tasks = [change_obj_to_dict(t) for t in task_handler.list_all()]
+
+        for t in tasks:
+            result = t['result']
+            t['result'] = jsonutils.loads(result) if result else None
+
+        return result_handler(consts.API_SUCCESS, {'tasks': tasks})
+
+    def post(self):
+        return self._dispatch_post()
+
+    def create_task(self, args):
+        try:
+            name = args['name']
+        except KeyError:
+            return result_handler(consts.API_ERROR, 'name must be provided')
+
+        try:
+            project_id = args['project_id']
+        except KeyError:
+            return result_handler(consts.API_ERROR, 'project_id must be provided')
+
+        task_id = str(uuid.uuid4())
+        create_time = datetime.now()
+        task_handler = V2TaskHandler()
+
+        LOG.info('create task in database')
+        task_init_data = {
+            'uuid': task_id,
+            'project_id': project_id,
+            'name': name,
+            'time': create_time,
+            'status': -1
+        }
+        task_handler.insert(task_init_data)
+
+        LOG.info('create task in project')
+        project_handler = V2ProjectHandler()
+        project_handler.append_attr(project_id, {'tasks': task_id})
+
+        return result_handler(consts.API_SUCCESS, {'uuid': task_id})
+
+
+class V2Task(ApiResource):
+
+    def get(self, task_id):
+        try:
+            uuid.UUID(task_id)
+        except ValueError:
+            return result_handler(consts.API_ERROR, 'invalid task id')
+
+        task_handler = V2TaskHandler()
+        try:
+            task = task_handler.get_by_uuid(task_id)
+        except ValueError:
+            return result_handler(consts.API_ERROR, 'no such task id')
+
+        task_info = change_obj_to_dict(task)
+        result = task_info['result']
+        task_info['result'] = jsonutils.loads(result) if result else None
+
+        return result_handler(consts.API_SUCCESS, {'task': task_info})
+
+    def delete(self, task_id):
+        try:
+            uuid.UUID(task_id)
+        except ValueError:
+            return result_handler(consts.API_ERROR, 'invalid task id')
+
+        task_handler = V2TaskHandler()
+        try:
+            project_id = task_handler.get_by_uuid(task_id).project_id
+        except ValueError:
+            return result_handler(consts.API_ERROR, 'no such task id')
+
+        LOG.info('delete task in database')
+        task_handler.delete_by_uuid(task_id)
+
+        project_handler = V2ProjectHandler()
+        project = project_handler.get_by_uuid(project_id)
+
+        if project.tasks:
+            LOG.info('update tasks in project')
+            new_task_list = project.tasks.split(',').remove(task_id)
+            if new_task_list:
+                new_tasks = ','.join(new_task_list)
+            else:
+                new_tasks = None
+            project_handler.update_attr(project_id, {'tasks': new_tasks})
+
+        return result_handler(consts.API_SUCCESS, {'task': task_id})
+
+    def put(self, task_id):
+
+        try:
+            uuid.UUID(task_id)
+        except ValueError:
+            return result_handler(consts.API_ERROR, 'invalid task id')
+
+        task_handler = V2TaskHandler()
+        try:
+            task_handler.get_by_uuid(task_id)
+        except ValueError:
+            return result_handler(consts.API_ERROR, 'no such task id')
+
+        return self._dispatch_post(task_id=task_id)
+
+    def add_environment(self, args):
+
+        task_id = args['task_id']
+        try:
+            environment_id = args['environment_id']
+        except KeyError:
+            return result_handler(consts.API_ERROR, 'environment_id must be provided')
+
+        try:
+            uuid.UUID(environment_id)
+        except ValueError:
+            return result_handler(consts.API_ERROR, 'invalid environment id')
+
+        environment_handler = V2EnvironmentHandler()
+        try:
+            environment_handler.get_by_uuid(environment_id)
+        except ValueError:
+            return result_handler(consts.API_ERROR, 'no such environment id')
+
+        LOG.info('update environment_id in task')
+        task_handler = V2TaskHandler()
+        task_handler.update_attr(task_id, {'environment_id': environment_id})
+
+        return result_handler(consts.API_SUCCESS, {'uuid': task_id})
+
+    def add_case(self, args):
+        task_id = args['task_id']
+        try:
+            name = args['case_name']
+        except KeyError:
+            return result_handler(consts.API_ERROR, 'case_name must be provided')
+
+        try:
+            content = args['case_content']
+        except KeyError:
+            return result_handler(consts.API_ERROR, 'case_content must be provided')
+
+        LOG.info('update case info in task')
+        task_handler = V2TaskHandler()
+        task_update_data = {
+            'case_name': name,
+            'content': content,
+            'suite': False
+        }
+        task_handler.update_attr(task_id, task_update_data)
+
+        return result_handler(consts.API_SUCCESS, {'uuid': task_id})
+
+    def add_suite(self, args):
+        task_id = args['task_id']
+        try:
+            name = args['suite_name']
+        except KeyError:
+            return result_handler(consts.API_ERROR, 'suite_name must be provided')
+
+        try:
+            content = args['suite_content']
+        except KeyError:
+            return result_handler(consts.API_ERROR, 'suite_content must be provided')
+
+        LOG.info('update suite info in task')
+        task_handler = V2TaskHandler()
+        task_update_data = {
+            'case_name': name,
+            'content': content,
+            'suite': True
+        }
+        task_handler.update_attr(task_id, task_update_data)
+
+        return result_handler(consts.API_SUCCESS, {'uuid': task_id})
+
+    def run(self, args):
+        try:
+            task_id = args['task_id']
+        except KeyError:
+            return result_handler(consts.API_ERROR, 'task_id must be provided')
+
+        try:
+            uuid.UUID(task_id)
+        except ValueError:
+            return result_handler(consts.API_ERROR, 'invalid task id')
+
+        task_handler = V2TaskHandler()
+        try:
+            task = task_handler.get_by_uuid(task_id)
+        except ValueError:
+            return result_handler(consts.API_ERROR, 'no such task id')
+
+        if not task.environment_id:
+            return result_handler(consts.API_ERROR, 'environment not set')
+
+        if not task.case_name or not task.content:
+            return result_handler(consts.API_ERROR, 'case not set')
+
+        if task.status == 0:
+            return result_handler(consts.API_ERROR, 'task is already running')
+
+        with open('/tmp/{}.yaml'.format(task.case_name), 'w') as f:
+            f.write(task.content)
+
+        data = {
+            'inputfile': ['/tmp/{}.yaml'.format(task.case_name)],
+            'task_id': task_id
+        }
+        if task.suite:
+            data.update({'suite': True})
+
+        LOG.info('start task thread')
+        param = Param(data)
+        task_thread = TaskThread(Task().start, param, task_handler)
+        task_thread.start()
+
+        return result_handler(consts.API_SUCCESS, {'uuid': task_id})
diff --git a/api/resources/v2/testcases.py b/api/resources/v2/testcases.py
new file mode 100644 (file)
index 0000000..81b4aa8
--- /dev/null
@@ -0,0 +1,62 @@
+import logging
+import errno
+import os
+
+from api import ApiResource
+from yardstick.common.utils import result_handler
+from yardstick.common import constants as consts
+from yardstick.benchmark.core import Param
+from yardstick.benchmark.core.testcase import Testcase
+
+LOG = logging.getLogger(__name__)
+LOG.setLevel(logging.DEBUG)
+
+
+class V2Testcases(ApiResource):
+
+    def get(self):
+        param = Param({})
+        testcase_list = Testcase().list_all(param)
+        return result_handler(consts.API_SUCCESS, testcase_list)
+
+    def post(self):
+        return self._dispatch_post()
+
+    def upload_case(self, args):
+        try:
+            upload_file = args['file']
+        except KeyError:
+            return result_handler(consts.API_ERROR, 'file must be provided')
+
+        case_name = os.path.join(consts.TESTCASE_DIR, upload_file.filename)
+
+        LOG.info('save case file')
+        upload_file.save(case_name)
+
+        return result_handler(consts.API_SUCCESS, {'testcase': upload_file.filename})
+
+
+class V2Testcase(ApiResource):
+
+    def get(self, case_name):
+        case_path = os.path.join(consts.TESTCASE_DIR, '{}.yaml'.format(case_name))
+
+        try:
+            with open(case_path) as f:
+                data = f.read()
+        except IOError as e:
+            if e.errno == errno.ENOENT:
+                return result_handler(consts.API_ERROR, 'case does not exist')
+
+        return result_handler(consts.API_SUCCESS, {'testcase': data})
+
+    def delete(self, case_name):
+        case_path = os.path.join(consts.TESTCASE_DIR, '{}.yaml'.format(case_name))
+
+        try:
+            os.remove(case_path)
+        except IOError as e:
+            if e.errno == errno.ENOENT:
+                return result_handler(consts.API_ERROR, 'case does not exist')
+
+        return result_handler(consts.API_SUCCESS, {'testcase': case_name})
diff --git a/api/resources/v2/testsuites.py b/api/resources/v2/testsuites.py
new file mode 100644 (file)
index 0000000..ee942ef
--- /dev/null
@@ -0,0 +1,81 @@
+import os
+import errno
+import logging
+
+import yaml
+
+from api import ApiResource
+from yardstick.common.utils import result_handler
+from yardstick.common import constants as consts
+from yardstick.benchmark.core.testsuite import Testsuite
+from yardstick.benchmark.core import Param
+
+LOG = logging.getLogger(__name__)
+LOG.setLevel(logging.DEBUG)
+
+
+class V2Testsuites(ApiResource):
+
+    def get(self):
+        param = Param({})
+        testsuite_list = Testsuite().list_all(param)
+
+        data = {
+            'testsuites': testsuite_list
+        }
+
+        return result_handler(consts.API_SUCCESS, data)
+
+    def post(self):
+        return self._dispatch_post()
+
+    def create_suite(self, args):
+        try:
+            suite_name = args['name']
+        except KeyError:
+            return result_handler(consts.API_ERROR, 'name must be provided')
+
+        try:
+            testcases = args['testcases']
+        except KeyError:
+            return result_handler(consts.API_ERROR, 'testcases must be provided')
+
+        testcases = [{'file_name': '{}.yaml'.format(t)} for t in testcases]
+
+        suite = os.path.join(consts.TESTSUITE_DIR, '{}.yaml'.format(suite_name))
+        suite_content = {
+            'schema': 'yardstick:suite:0.1',
+            'name': suite_name,
+            'test_cases_dir': 'tests/opnfv/test_cases/',
+            'test_cases': testcases
+        }
+
+        LOG.info('write test suite')
+        with open(suite, 'w') as f:
+            yaml.dump(suite_content, f, default_flow_style=False)
+
+        return result_handler(consts.API_SUCCESS, {'suite': suite_name})
+
+
+class V2Testsuite(ApiResource):
+
+    def get(self, suite_name):
+        suite_path = os.path.join(consts.TESTSUITE_DIR, '{}.yaml'.format(suite_name))
+        try:
+            with open(suite_path) as f:
+                data = f.read()
+        except IOError as e:
+            if e.errno == errno.ENOENT:
+                return result_handler(consts.API_ERROR, 'suite does not exist')
+
+        return result_handler(consts.API_SUCCESS, {'testsuite': data})
+
+    def delete(self, suite_name):
+        suite_path = os.path.join(consts.TESTSUITE_DIR, '{}.yaml'.format(suite_name))
+        try:
+            os.remove(suite_path)
+        except IOError as e:
+            if e.errno == errno.ENOENT:
+                return result_handler(consts.API_ERROR, 'suite does not exist')
+
+        return result_handler(consts.API_SUCCESS, {'testsuite': suite_name})
index 5c7e9f7..2211348 100644 (file)
@@ -52,5 +52,5 @@ urlpatterns = [
 
     Url('/api/v2/yardstick/testsuites', 'v2_testsuites'),
     Url('/api/v2/yardstick/testsuites/action', 'v2_testsuites'),
-    Url('/api/v2/yardstick/testsuites/<suite_name>', 'v2_testsuites')
+    Url('/api/v2/yardstick/testsuites/<suite_name>', 'v2_testsuite')
 ]
index 2106548..5f4ec7e 100644 (file)
@@ -1,37 +1,45 @@
 import threading
+import os
 import logging
 
 from oslo_serialization import jsonutils
 
-from api.database.v1.handlers import TasksHandler
 from yardstick.common import constants as consts
 
-logger = logging.getLogger(__name__)
-logger.setLevel(logging.DEBUG)
+LOG = logging.getLogger(__name__)
+LOG.setLevel(logging.DEBUG)
 
 
 class TaskThread(threading.Thread):
 
-    def __init__(self, target, args):
+    def __init__(self, target, args, handler):
         super(TaskThread, self).__init__(target=target, args=args)
         self.target = target
         self.args = args
+        self.handler = handler
 
     def run(self):
-        task_handler = TasksHandler()
-        data = {'task_id': self.args.task_id, 'status': consts.TASK_NOT_DONE}
-        task_handler.insert(data)
+        if self.handler.__class__.__name__.lower().startswith('v2'):
+            self.handler.update_attr(self.args.task_id, {'status': consts.TASK_NOT_DONE})
+        else:
+            update_data = {'task_id': self.args.task_id, 'status': consts.TASK_NOT_DONE}
+            self.handler.insert(update_data)
 
-        logger.info('Starting run task')
+        LOG.info('Starting run task')
         try:
             data = self.target(self.args)
         except Exception as e:
-            logger.exception('Task Failed')
+            LOG.exception('Task Failed')
             update_data = {'status': consts.TASK_FAILED, 'error': str(e)}
-            task_handler.update_attr(self.args.task_id, update_data)
+            self.handler.update_attr(self.args.task_id, update_data)
         else:
-            logger.info('Task Finished')
-            logger.debug('Result: %s', data)
-
-            data['result'] = jsonutils.dumps(data.get('result', {}))
-            task_handler.update_attr(self.args.task_id, data)
+            LOG.info('Task Finished')
+            LOG.debug('Result: %s', data)
+
+            if self.handler.__class__.__name__.lower().startswith('v2'):
+                new_data = {'status': consts.TASK_DONE, 'result': jsonutils.dumps(data['result'])}
+                self.handler.update_attr(self.args.task_id, new_data)
+                os.remove(self.args.inputfile[0])
+            else:
+                data['result'] = jsonutils.dumps(data.get('result', {}))
+                self.handler.update_attr(self.args.task_id, data)
index 3fb186b..1ce5503 100644 (file)
@@ -161,6 +161,23 @@ class NetworkTestCase(unittest.TestCase):
 
         self.assertEqual(model.Network.find_external_network(), 'ext_net')
 
+    def test_construct_gateway_ip_is_null(self):
+
+        attrs = {'gateway_ip': 'null'}
+        test_network = model.Network('foo', self.mock_context, attrs)
+        self.assertEqual(test_network.gateway_ip, 'null')
+
+    def test_construct_gateway_ip_is_none(self):
+
+        attrs = {'gateway_ip': None}
+        test_network = model.Network('foo', self.mock_context, attrs)
+        self.assertEqual(test_network.gateway_ip, 'null')
+
+    def test_construct_gateway_ip_is_absent(self):
+
+        attrs = {}
+        test_network = model.Network('foo', self.mock_context, attrs)
+        self.assertIsNone(test_network.gateway_ip)
 
 class ServerTestCase(unittest.TestCase):
 
@@ -214,11 +231,12 @@ class ServerTestCase(unittest.TestCase):
         attrs = {'image': 'some-image', 'flavor': 'some-flavor', 'floating_ip': '192.168.1.10', 'floating_ip_assoc': 'some-vm'}
         test_server = model.Server('foo', self.mock_context, attrs)
 
-        self.mock_context.flavors =  ['flavor1', 'flavor2', 'some-flavor']
+        self.mock_context.flavors = ['flavor1', 'flavor2', 'some-flavor']
 
         mock_network = mock.Mock()
         mock_network.name = 'some-network'
         mock_network.stack_name = 'some-network-stack'
+        mock_network.allowed_address_pairs = ["1", "2"]
         mock_network.subnet_stack_name = 'some-network-stack-subnet'
         mock_network.provider = 'sriov'
         mock_network.external_network = 'ext_net'
@@ -232,7 +250,8 @@ class ServerTestCase(unittest.TestCase):
             mock_network.stack_name,
             mock_network.subnet_stack_name,
             sec_group_id=self.mock_context.secgroup_name,
-            provider=mock_network.provider)
+            provider=mock_network.provider,
+            allowed_address_pairs=mock_network.allowed_address_pairs)
 
         mock_template.add_floating_ip.assert_called_with(
             'some-server-fip',
@@ -290,11 +309,12 @@ class ServerTestCase(unittest.TestCase):
         }
         test_server = model.Server('ServerFlavor-2', self.mock_context, attrs)
 
-        self.mock_context.flavors =  ['flavor2']
+        self.mock_context.flavors = ['flavor2']
         mock_network = mock.Mock()
-        mock_network.configure_mock(name='some-network', stack_name= 'some-network-stack',
-                                    subnet_stack_name = 'some-network-stack-subnet',
-                                    provider = 'some-provider')
+        mock_network.allowed_address_pairs = ["1", "2"]
+        mock_network.configure_mock(name='some-network', stack_name='some-network-stack',
+                                    subnet_stack_name='some-network-stack-subnet',
+                                    provider='some-provider')
 
         test_server._add_instance(mock_template, 'ServerFlavor-2',
                                   [mock_network], 'hints')
@@ -304,7 +324,8 @@ class ServerTestCase(unittest.TestCase):
             mock_network.stack_name,
             mock_network.subnet_stack_name,
             provider=mock_network.provider,
-            sec_group_id=self.mock_context.secgroup_name)
+            sec_group_id=self.mock_context.secgroup_name,
+            allowed_address_pairs=mock_network.allowed_address_pairs)
 
         mock_template.add_server.assert_called_with(
             'ServerFlavor-2', 'some-image',
index 0a94dd9..d5349ea 100644 (file)
@@ -152,9 +152,12 @@ class HeatContext(Context):
             template.add_network(network.stack_name,
                                  network.physical_network,
                                  network.provider,
-                                 network.segmentation_id)
+                                 network.segmentation_id,
+                                 network.port_security_enabled)
             template.add_subnet(network.subnet_stack_name, network.stack_name,
-                                network.subnet_cidr)
+                                network.subnet_cidr,
+                                network.enable_dhcp,
+                                network.gateway_ip)
 
             if network.router:
                 template.add_router(network.router.stack_name,
index 06538d8..6601ecf 100644 (file)
@@ -104,11 +104,24 @@ class Network(Object):
         self.stack_name = context.name + "-" + self.name
         self.subnet_stack_name = self.stack_name + "-subnet"
         self.subnet_cidr = attrs.get('cidr', '10.0.1.0/24')
+        self.enable_dhcp = attrs.get('enable_dhcp', 'true')
         self.router = None
         self.physical_network = attrs.get('physical_network', 'physnet1')
         self.provider = attrs.get('provider')
         self.segmentation_id = attrs.get('segmentation_id')
         self.network_type = attrs.get('network_type')
+        self.port_security_enabled = attrs.get('port_security_enabled', True)
+        self.allowed_address_pairs = attrs.get('allowed_address_pairs', [])
+        try:
+            # we require 'null' or '' to disable setting gateway_ip
+            self.gateway_ip = attrs['gateway_ip']
+        except KeyError:
+            # default to explicit None
+            self.gateway_ip = None
+        else:
+            # null is None in YAML, so we have to convert back to string
+            if self.gateway_ip is None:
+                self.gateway_ip = "null"
 
         if "external_network" in attrs:
             self.router = Router("router", self.name,
@@ -234,10 +247,16 @@ class Server(Object):     # pragma: no cover
         for network in networks:
             port_name = server_name + "-" + network.name + "-port"
             self.ports[network.name] = {"stack_name": port_name}
-            template.add_port(port_name, network.stack_name,
-                              network.subnet_stack_name,
-                              sec_group_id=self.secgroup_name,
-                              provider=network.provider)
+            # we can't use secgroups if port_security_enabled is False
+            if network.port_security_enabled:
+                sec_group_id = self.secgroup_name
+            else:
+                sec_group_id = None
+            # don't refactor to pass in network object, that causes JSON
+            # circular ref encode errors
+            template.add_port(port_name, network.stack_name, network.subnet_stack_name,
+                              sec_group_id=sec_group_id, provider=network.provider,
+                              allowed_address_pairs=network.allowed_address_pairs)
             port_name_list.append(port_name)
 
             if self.floating_ip:
@@ -248,7 +267,7 @@ class Server(Object):     # pragma: no cover
                                              external_network,
                                              port_name,
                                              network.router.stack_if_name,
-                                             self.secgroup_name)
+                                             sec_group_id)
                     self.floating_ip_assoc["stack_name"] = \
                         server_name + "-fip-assoc"
                     template.add_floating_ip_association(
diff --git a/yardstick/benchmark/core/testsuite.py b/yardstick/benchmark/core/testsuite.py
new file mode 100644 (file)
index 0000000..e3940a0
--- /dev/null
@@ -0,0 +1,42 @@
+##############################################################################
+# Copyright (c) 2015 Huawei Technologies Co.,Ltd and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+""" Handler for yardstick command 'testcase' """
+from __future__ import absolute_import
+from __future__ import print_function
+
+import os
+import logging
+
+from yardstick.common import constants as consts
+
+LOG = logging.getLogger(__name__)
+
+
+class Testsuite(object):
+    """Testcase commands.
+
+       Set of commands to discover and display test cases.
+    """
+
+    def list_all(self, args):
+        """List existing test cases"""
+
+        testsuite_list = self._get_testsuite_file_list()
+
+        return testsuite_list
+
+    def _get_testsuite_file_list(self):
+        try:
+            testsuite_files = sorted(os.listdir(consts.TESTSUITE_DIR))
+        except OSError:
+            LOG.exception('Failed to list dir:\n%s\n', consts.TESTSUITE_DIR)
+            raise
+
+        return testsuite_files
index b8c34ad..aa144ab 100644 (file)
@@ -16,6 +16,11 @@ kill-process:
   inject_script: ha_tools/fault_process_kill.bash
   recovery_script: ha_tools/start_service.bash
 
+kill-lxc-process:
+  check_script: ha_tools/check_lxc_process_python.bash
+  inject_script: ha_tools/fault_lxc_process_kill.bash
+  recovery_script: ha_tools/start_lxc_service.bash
+
 bare-metal-down:
   check_script: ha_tools/check_host_ping.bash
   recovery_script: ha_tools/ipmi_power.bash
@@ -34,4 +39,4 @@ stress-cpu:
 
 block-io:
   inject_script: ha_tools/disk/block_io.bash
-  recovery_script: ha_tools/disk/recovery_disk_io.bash
\ No newline at end of file
+  recovery_script: ha_tools/disk/recovery_disk_io.bash
diff --git a/yardstick/benchmark/scenarios/availability/ha_tools/check_lxc_process_python.bash b/yardstick/benchmark/scenarios/availability/ha_tools/check_lxc_process_python.bash
new file mode 100755 (executable)
index 0000000..6d2f4dd
--- /dev/null
@@ -0,0 +1,42 @@
+#!/bin/sh
+
+##############################################################################
+# Copyright (c) 2015 Huawei Technologies Co.,Ltd and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+# check the status of a service
+
+set -e
+
+NOVA_API_PROCESS_1="nova-api-os-compute"
+NOVA_API_PROCESS_2="nova-api-metadata"
+NOVA_API_LXC_FILTER_1="nova_api_os_compute"
+NOVA_API_LXC_FILTER_2="nova_api_metadata"
+
+process_name=$1
+
+lxc_filter=$(echo "${process_name}" | sed 's/-/_/g')
+
+if [ "${lxc_filter}" = "glance_api" ]; then
+    lxc_filter="glance"
+fi
+
+if [ "${process_name}" = "nova-api" ]; then
+    container_1=$(lxc-ls -1 --filter="${NOVA_API_LXC_FILTER_1}")
+    container_2=$(lxc-ls -1 --filter="${NOVA_API_LXC_FILTER_2}")
+
+   echo $(($(lxc-attach -n "${container_1}" -- ps aux | grep -e "${NOVA_API_PROCESS_1}" | grep -v grep | grep -cv /bin/sh) + $(lxc-attach -n "${container_2}" -- ps aux | grep -e "${NOVA_API_PROCESS_2}" | grep -v grep | grep -cv /bin/sh)))
+else
+    container=$(lxc-ls -1 --filter="${lxc_filter}")
+
+    if [ "${process_name}" = "haproxy" ]; then
+        ps aux | grep -e "/usr/.*/${process_name}" | grep -v grep | grep -cv /bin/sh
+    else
+        lxc-attach -n "${container}" -- ps aux | grep -e "${process_name}" | grep -v grep | grep -cv /bin/sh
+    fi
+fi
diff --git a/yardstick/benchmark/scenarios/availability/ha_tools/fault_lxc_process_kill.bash b/yardstick/benchmark/scenarios/availability/ha_tools/fault_lxc_process_kill.bash
new file mode 100755 (executable)
index 0000000..b0b86ab
--- /dev/null
@@ -0,0 +1,65 @@
+#!/bin/sh
+
+##############################################################################
+# Copyright (c) 2015 Huawei Technologies Co.,Ltd and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+# Stop process by process name
+
+set -e
+
+NOVA_API_PROCESS_1="nova-api-os-compute"
+NOVA_API_PROCESS_2="nova-api-metadata"
+NOVA_API_LXC_FILTER_1="nova_api_os_compute"
+NOVA_API_LXC_FILTER_2="nova_api_metadata"
+
+process_name=$1
+
+lxc_filter=$(echo "${process_name}" | sed 's/-/_/g')
+
+if [ "${lxc_filter}" = "glance_api" ]; then
+    lxc_filter="glance"
+fi
+
+if [ "${process_name}" = "nova-api" ]; then
+    container_1=$(lxc-ls -1 --filter="${NOVA_API_LXC_FILTER_1}")
+    container_2=$(lxc-ls -1 --filter="${NOVA_API_LXC_FILTER_2}")
+
+    pids_1=$(lxc-attach -n "${container_1}" -- pgrep -f "/openstack/.*/${NOVA_API_PROCESS_1}")
+    for pid in ${pids_1};
+        do
+            lxc-attach -n "${container_1}" -- kill -9 "${pid}"
+        done
+
+    pids_2=$(lxc-attach -n "${container_2}" -- pgrep -f "/openstack/.*/${NOVA_API_PROCESS_2}")
+    for pid in ${pids_2};
+        do
+            lxc-attach -n "${container_2}" -- kill -9 "${pid}"
+        done
+else
+    container=$(lxc-ls -1 --filter="${lxc_filter}")
+
+    if [ "${process_name}" = "haproxy" ]; then
+        for pid in $(pgrep -cf "/usr/.*/${process_name}");
+            do
+                kill -9 "${pid}"
+            done
+    elif [ "${process_name}" = "keystone" ]; then
+        pids=$(lxc-attach -n "${container}" -- ps aux | grep "keystone" | grep -iv heartbeat | grep -iv monitor | grep -v grep | grep -v /bin/sh | awk '{print $2}')
+        for pid in ${pids};
+            do
+                lxc-attach -n "${container}" -- kill -9 "${pid}"
+            done
+    else
+        pids=$(lxc-attach -n "${container}" -- pgrep -f "/openstack/.*/${process_name}")
+        for pid in ${pids};
+            do
+                lxc-attach -n "${container}" -- kill -9 "${pid}"
+            done
+    fi
+fi
index aee516e..7408409 100644 (file)
@@ -20,4 +20,4 @@ else
     SECURE=""
 fi
 
-openstack "${SECURE}" flavor create $1 --id $2 --ram $3 --disk $4 --vcpus $5
+openstack ${SECURE} flavor create $1 --id $2 --ram $3 --disk $4 --vcpus $5
diff --git a/yardstick/benchmark/scenarios/availability/ha_tools/start_lxc_service.bash b/yardstick/benchmark/scenarios/availability/ha_tools/start_lxc_service.bash
new file mode 100755 (executable)
index 0000000..36a6739
--- /dev/null
@@ -0,0 +1,70 @@
+#!/bin/bash
+
+##############################################################################
+# Copyright (c) 2015 Huawei Technologies Co.,Ltd and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+# Start a service and check the service is started
+
+set -e
+
+NOVA_API_SERVICE_1="nova-api-os-compute"
+NOVA_API_SERVICE_2="nova-api-metadata"
+NOVA_API_LXC_FILTER_1="nova_api_os_compute"
+NOVA_API_LXC_FILTER_2="nova_api_metadata"
+
+service_name=$1
+
+if [ "${service_name}" = "haproxy" ]; then
+    if which systemctl 2>/dev/null; then
+        systemctl start $service_name
+    else
+        service $service_name start
+    fi
+else
+    lxc_filter=${service_name//-/_}
+
+    if [ "${lxc_filter}" = "glance_api" ]; then
+        lxc_filter="glance"
+    fi
+
+    if [ "${service_name}" = "nova-api" ]; then
+        container_1=$(lxc-ls -1 --filter="${NOVA_API_LXC_FILTER_1}")
+        container_2=$(lxc-ls -1 --filter="${NOVA_API_LXC_FILTER_2}")
+
+        if lxc-attach -n "${container_1}" -- which systemctl 2>/dev/null; then
+            lxc-attach -n "${container_1}" -- systemctl start "${NOVA_API_SERVICE_1}"
+        else
+            lxc-attach -n "${container_1}" -- service "${NOVA_API_SERVICE_1}" start
+        fi
+
+        if lxc-attach -n "${container_2}" -- which systemctl 2>/dev/null; then
+            lxc-attach -n "${container_2}" -- systemctl start "${NOVA_API_SERVICE_2}"
+        else
+            lxc-attach -n "${container_2}" -- service "${NOVA_API_SERVICE_2}" start
+        fi
+    else
+        container=$(lxc-ls -1 --filter="${lxc_filter}")
+
+        Distributor=$(lxc-attach -n "${container}" -- lsb_release -a | grep "Distributor ID" | awk '{print $3}')
+
+        if [ "${Distributor}" != "Ubuntu" -a "${service_name}" != "keystone" -a "${service_name}" != "neutron-server" ]; then
+            service_name="openstack-"${service_name}
+        elif [ "${Distributor}" = "Ubuntu" -a "${service_name}" = "keystone" ]; then
+            service_name="apache2"
+        elif [ "${service_name}" = "keystone" ]; then
+            service_name="httpd"
+        fi
+
+        if lxc-attach -n "${container}" -- which systemctl 2>/dev/null; then
+            lxc-attach -n "${container}" -- systemctl start "${service_name}"
+        else
+            lxc-attach -n "${container}" -- service "${service_name}" start
+        fi
+    fi
+fi
index 5114492..a08347d 100644 (file)
@@ -13,6 +13,8 @@ schema: "yardstick:task:0.1"
 
 process-status:
   monitor_script: ha_tools/check_process_python.bash
+lxc_process-status:
+  monitor_script: ha_tools/check_lxc_process_python.bash
 nova-image-list:
   monitor_script: ha_tools/nova_image_list.bash
 service-status:
index ad34817..b99e342 100644 (file)
@@ -40,10 +40,26 @@ class Fio(base.Scenario):
         type:    string
         unit:    na
         default: write
+    rwmixwrite - percentage of a mixed workload that should be writes
+        type: int
+        unit: percentage
+        default: 50
     ramp_time - run time before logging any performance
         type:    int
         unit:    seconds
         default: 20
+    direct - whether use non-buffered I/O or not
+        type:    boolean
+        unit:    na
+        default: 1
+    size - total size of I/O for this job.
+        type:    string
+        unit:    na
+        default: 1g
+    numjobs - number of clones (processes/threads performing the same workload) of this job
+        type:    int
+        unit:    na
+        default: 1
 
     Read link below for more fio args description:
         http://www.bluestop.org/fio/HOWTO.txt
@@ -74,8 +90,8 @@ class Fio(base.Scenario):
 
     def run(self, result):
         """execute the benchmark"""
-        default_args = "-ioengine=libaio -direct=1 -group_reporting " \
-            "-numjobs=1 -time_based --output-format=json"
+        default_args = "-ioengine=libaio -group_reporting -time_based -time_based " \
+            "--output-format=json"
 
         if not self.setup_done:
             self.setup()
@@ -86,6 +102,10 @@ class Fio(base.Scenario):
         iodepth = options.get("iodepth", "1")
         rw = options.get("rw", "write")
         ramp_time = options.get("ramp_time", 20)
+        size = options.get("size", "1g")
+        direct = options.get("direct", "1")
+        numjobs = options.get("numjobs", "1")
+        rwmixwrite = options.get("rwmixwrite", 50)
         name = "yardstick-fio"
         # if run by a duration runner
         duration_time = self.scenario_cfg["runner"].get("duration", None) \
@@ -99,10 +119,10 @@ class Fio(base.Scenario):
         else:
             runtime = 30
 
-        cmd_args = "-filename=%s -bs=%s -iodepth=%s -rw=%s -ramp_time=%s " \
-                   "-runtime=%s -name=%s %s" \
-                   % (filename, bs, iodepth, rw, ramp_time, runtime, name,
-                      default_args)
+        cmd_args = "-filename=%s -direct=%s -bs=%s -iodepth=%s -rw=%s -rwmixwrite=%s " \
+                   "-size=%s -ramp_time=%s -numjobs=%s -runtime=%s -name=%s %s" \
+                   % (filename, direct, bs, iodepth, rw, rwmixwrite, size, ramp_time, numjobs,
+                      runtime, name, default_args)
         cmd = "sudo bash fio.sh %s %s" % (filename, cmd_args)
         LOG.debug("Executing command: %s", cmd)
         # Set timeout, so that the cmd execution does not exit incorrectly
index a4f7b30..92bb7b7 100644 (file)
@@ -24,7 +24,10 @@ import os
 import subprocess
 import sys
 import collections
+import socket
+import random
 from functools import reduce
+from contextlib import closing
 
 import yaml
 import six
@@ -263,3 +266,11 @@ def set_dict_value(dic, keys, value):
         else:
             return_dic = return_dic[key]
     return dic
+
+
+def get_free_port(ip):
+    with closing(socket.socket(socket.AF_INET, socket.SOCK_STREAM)) as s:
+        while True:
+            port = random.randint(5000, 10000)
+            if s.connect_ex((ip, port)) != 0:
+                return port
index 2a907d1..57b23d3 100644 (file)
@@ -231,13 +231,16 @@ name (i.e. %s).\
         }
 
     def add_network(self, name, physical_network='physnet1', provider=None,
-                    segmentation_id=None):
+                    segmentation_id=None, port_security_enabled=True):
         """add to the template a Neutron Net"""
         log.debug("adding Neutron::Net '%s'", name)
         if provider is None:
             self.resources[name] = {
                 'type': 'OS::Neutron::Net',
-                'properties': {'name': name}
+                'properties': {
+                    'name': name,
+                    'port_security_enabled': port_security_enabled,
+                }
             }
         else:
             self.resources[name] = {
@@ -245,12 +248,12 @@ name (i.e. %s).\
                 'properties': {
                     'name': name,
                     'network_type': 'vlan',
-                    'physical_network': physical_network
-                }
+                    'physical_network': physical_network,
+                    'port_security_enabled': port_security_enabled,
+                },
             }
             if segmentation_id:
-                seg_id_dit = {'segmentation_id': segmentation_id}
-                self.resources[name]["properties"].update(seg_id_dit)
+                self.resources[name]['properties']['segmentation_id'] = segmentation_id
 
     def add_server_group(self, name, policies):     # pragma: no cover
         """add to the template a ServerGroup"""
@@ -262,8 +265,9 @@ name (i.e. %s).\
                            'policies': policies}
         }
 
-    def add_subnet(self, name, network, cidr):
-        """add to the template a Neutron Subnet"""
+    def add_subnet(self, name, network, cidr, enable_dhcp='true', gateway_ip=None):
+        """add to the template a Neutron Subnet
+        """
         log.debug("adding Neutron::Subnet '%s' in network '%s', cidr '%s'",
                   name, network, cidr)
         self.resources[name] = {
@@ -272,9 +276,12 @@ name (i.e. %s).\
             'properties': {
                 'name': name,
                 'cidr': cidr,
-                'network_id': {'get_resource': network}
+                'network_id': {'get_resource': network},
+                'enable_dhcp': enable_dhcp,
             }
         }
+        if gateway_ip is not None:
+            self.resources[name]['properties']['gateway_ip'] = gateway_ip
 
         self._template['outputs'][name] = {
             'description': 'subnet %s ID' % name,
@@ -316,9 +323,10 @@ name (i.e. %s).\
             }
         }
 
-    def add_port(self, name, network_name, subnet_name, sec_group_id=None,
-                 provider=None):
-        """add to the template a named Neutron Port"""
+    def add_port(self, name, network_name, subnet_name, sec_group_id=None, provider=None,
+                 allowed_address_pairs=None):
+        """add to the template a named Neutron Port
+        """
         log.debug("adding Neutron::Port '%s', network:'%s', subnet:'%s', "
                   "secgroup:%s", name, network_name, subnet_name, sec_group_id)
         self.resources[name] = {
@@ -341,6 +349,10 @@ name (i.e. %s).\
             self.resources[name]['properties']['security_groups'] = \
                 [sec_group_id]
 
+        if allowed_address_pairs:
+            self.resources[name]['properties'][
+                'allowed_address_pairs'] = allowed_address_pairs
+
         self._template['outputs'][name] = {
             'description': 'Address for interface %s' % name,
             'value': {'get_attr': [name, 'fixed_ips', 0, 'ip_address']}