Merge "import new _put_file_shell method from upstream rally"
authorRex Lee <limingjiang@huawei.com>
Mon, 5 Dec 2016 06:37:08 +0000 (06:37 +0000)
committerGerrit Code Review <gerrit@opnfv.org>
Mon, 5 Dec 2016 06:37:08 +0000 (06:37 +0000)
65 files changed:
api/actions/env.py
api/actions/result.py
api/actions/samples.py [new file with mode: 0644]
api/actions/test.py
api/api-prepare.sh
api/conf.py
api/server.py
api/swagger/__init__.py [new file with mode: 0644]
api/swagger/docs/results.yaml [new file with mode: 0644]
api/swagger/docs/testcases.yaml [new file with mode: 0644]
api/swagger/models.py [new file with mode: 0644]
api/urls.py
api/utils/common.py
api/views.py
dashboard/ping_dashboard.json [new file with mode: 0644]
docker/Dockerfile
install.sh
requirements.txt
setup.py
tests/ci/yardstick-verify
tests/unit/api/actions/test_env.py [deleted file]
tests/unit/api/actions/test_result.py [deleted file]
tests/unit/api/actions/test_test.py [deleted file]
tests/unit/api/test_views.py [deleted file]
tests/unit/api/utils/test_common.py
tests/unit/api/utils/test_daemonthread.py [deleted file]
tests/unit/benchmark/scenarios/networking/test_vsperf.py
tests/unit/test_ssh.py
yardstick/benchmark/contexts/node.py
yardstick/benchmark/runners/arithmetic.py
yardstick/benchmark/runners/base.py
yardstick/benchmark/runners/duration.py
yardstick/benchmark/runners/iteration.py
yardstick/benchmark/runners/sequence.py
yardstick/benchmark/scenarios/availability/actionrollbackers.py
yardstick/benchmark/scenarios/availability/attacker/attacker_baremetal.py
yardstick/benchmark/scenarios/availability/attacker/attacker_general.py
yardstick/benchmark/scenarios/availability/attacker/attacker_process.py
yardstick/benchmark/scenarios/availability/attacker/baseattacker.py
yardstick/benchmark/scenarios/availability/director.py
yardstick/benchmark/scenarios/availability/monitor/basemonitor.py
yardstick/benchmark/scenarios/availability/monitor/monitor_command.py
yardstick/benchmark/scenarios/availability/monitor/monitor_general.py
yardstick/benchmark/scenarios/availability/monitor/monitor_process.py
yardstick/benchmark/scenarios/availability/operation/baseoperation.py
yardstick/benchmark/scenarios/availability/operation/operation_general.py
yardstick/benchmark/scenarios/availability/result_checker/baseresultchecker.py
yardstick/benchmark/scenarios/availability/result_checker/result_checker_general.py
yardstick/benchmark/scenarios/availability/scenario_general.py
yardstick/benchmark/scenarios/availability/serviceha.py
yardstick/benchmark/scenarios/compute/cachestat.py
yardstick/benchmark/scenarios/compute/cpuload.py
yardstick/benchmark/scenarios/compute/cyclictest.py
yardstick/benchmark/scenarios/compute/memload.py
yardstick/benchmark/scenarios/networking/netutilization.py
yardstick/benchmark/scenarios/networking/vsperf.py
yardstick/benchmark/scenarios/storage/storperf.py
yardstick/cmd/commands/env.py
yardstick/cmd/commands/task.py
yardstick/common/constants.py
yardstick/common/httpClient.py
yardstick/common/utils.py
yardstick/dispatcher/http.py
yardstick/dispatcher/influxdb.py
yardstick/ssh.py

index 3216499..9e53dde 100644 (file)
 ##############################################################################
 import logging
 import threading
+import subprocess
 import time
+import json
+import os
+import errno
 
 from docker import Client
 
 from yardstick.common import constants as config
 from yardstick.common import utils as yardstick_utils
+from yardstick.common.httpClient import HttpClient
 from api import conf as api_conf
-from api.utils import common as common_utils
 from api.utils import influx
+from api.utils.common import result_handler
 
 logger = logging.getLogger(__name__)
 
 
+def createGrafanaContainer(args):
+    thread = threading.Thread(target=_create_grafana)
+    thread.start()
+    return result_handler('success', [])
+
+
+def _create_grafana():
+    client = Client(base_url=config.DOCKER_URL)
+
+    try:
+        if not _check_image_exist(client, '%s:%s' % (config.GRAFANA_IMAGE,
+                                                     config.GRAFANA_TAGS)):
+            client.pull(config.GRAFANA_IMAGE, config.GRAFANA_TAGS)
+
+        _create_grafana_container(client)
+
+        time.sleep(5)
+
+        _create_data_source()
+
+        _create_dashboard()
+    except Exception as e:
+        logger.debug('Error: %s', e)
+
+
+def _create_dashboard():
+    url = 'http://admin:admin@%s:3000/api/dashboards/db' % api_conf.GATEWAY_IP
+    data = json.load(file('../dashboard/ping_dashboard.json'))
+    HttpClient().post(url, data)
+
+
+def _create_data_source():
+    url = 'http://admin:admin@%s:3000/api/datasources' % api_conf.GATEWAY_IP
+    data = {
+        "name": "yardstick",
+        "type": "influxdb",
+        "access": "proxy",
+        "url": "http://%s:8086" % api_conf.GATEWAY_IP,
+        "password": "root",
+        "user": "root",
+        "database": "yardstick",
+        "basicAuth": True,
+        "basicAuthUser": "admin",
+        "basicAuthPassword": "admin",
+        "isDefault": False,
+    }
+    HttpClient().post(url, data)
+
+
+def _create_grafana_container(client):
+    ports = [3000]
+    port_bindings = {k: k for k in ports}
+    host_config = client.create_host_config(port_bindings=port_bindings)
+
+    container = client.create_container(image='%s:%s' % (config.GRAFANA_IMAGE,
+                                                         config.GRAFANA_TAGS),
+                                        ports=ports,
+                                        detach=True,
+                                        tty=True,
+                                        host_config=host_config)
+    client.start(container)
+
+
+def _check_image_exist(client, t):
+    return any(t in a['RepoTags'][0] for a in client.images() if a['RepoTags'])
+
+
 def createInfluxDBContainer(args):
+    thread = threading.Thread(target=_create_influxdb)
+    thread.start()
+    return result_handler('success', [])
+
+
+def _create_influxdb():
+    client = Client(base_url=config.DOCKER_URL)
+
     try:
-        container = _create_influxdb_container()
         _config_output_file()
-        thread = threading.Thread(target=_config_influxdb)
-        thread.start()
-        return common_utils.result_handler('success', container)
+
+        if not _check_image_exist(client, '%s:%s' % (config.INFLUXDB_IMAGE,
+                                                     config.INFLUXDB_TAG)):
+            client.pull(config.INFLUXDB_IMAGE, tag=config.INFLUXDB_TAG)
+
+        _create_influxdb_container(client)
+
+        time.sleep(5)
+
+        _config_influxdb()
     except Exception as e:
-        message = 'Failed to create influxdb container: %s' % e
-        return common_utils.error_handler(message)
+        logger.debug('Error: %s', e)
 
 
-def _create_influxdb_container():
-    client = Client(base_url=config.DOCKER_URL)
+def _create_influxdb_container(client):
 
     ports = [8083, 8086]
     port_bindings = {k: k for k in ports}
     host_config = client.create_host_config(port_bindings=port_bindings)
 
-    container = client.create_container(image='tutum/influxdb',
+    container = client.create_container(image='%s:%s' % (config.INFLUXDB_IMAGE,
+                                                         config.INFLUXDB_TAG),
                                         ports=ports,
                                         detach=True,
                                         tty=True,
                                         host_config=host_config)
     client.start(container)
-    return container
 
 
 def _config_influxdb():
-    time.sleep(20)
     try:
         client = influx.get_data_db_client()
         client.create_user(config.USER, config.PASSWORD, config.DATABASE)
@@ -61,11 +144,11 @@ def _config_influxdb():
 
 
 def _config_output_file():
-    yardstick_utils.makedirs('/etc/yardstick')
-    with open('/etc/yardstick/yardstick.conf', 'w') as f:
+    yardstick_utils.makedirs(config.YARDSTICK_CONFIG_DIR)
+    with open(config.YARDSTICK_CONFIG_FILE, 'w') as f:
         f.write("""\
 [DEFAULT]
-debug = True
+debug = False
 dispatcher = influxdb
 
 [dispatcher_file]
@@ -83,3 +166,90 @@ username = root
 password = root
 """
                 % api_conf.GATEWAY_IP)
+
+
+def prepareYardstickEnv(args):
+    thread = threading.Thread(target=_prepare_env_daemon)
+    thread.start()
+    return result_handler('success', [])
+
+
+def _prepare_env_daemon():
+
+    installer_ip = os.environ.get('INSTALLER_IP', 'undefined')
+    installer_type = os.environ.get('INSTALLER_TYPE', 'undefined')
+
+    _check_variables(installer_ip, installer_type)
+
+    _create_directories()
+
+    rc_file = config.OPENSTACK_RC_FILE
+
+    _get_remote_rc_file(rc_file, installer_ip, installer_type)
+
+    _source_file(rc_file)
+
+    _append_external_network(rc_file)
+
+    _load_images()
+
+
+def _check_variables(installer_ip, installer_type):
+
+    if installer_ip == 'undefined':
+        raise SystemExit('Missing INSTALLER_IP')
+
+    if installer_type == 'undefined':
+        raise SystemExit('Missing INSTALLER_TYPE')
+    elif installer_type not in config.INSTALLERS:
+        raise SystemExit('INSTALLER_TYPE is not correct')
+
+
+def _create_directories():
+    yardstick_utils.makedirs(config.YARDSTICK_CONFIG_DIR)
+
+
+def _source_file(rc_file):
+    yardstick_utils.source_env(rc_file)
+
+
+def _get_remote_rc_file(rc_file, installer_ip, installer_type):
+
+    os_fetch_script = os.path.join(config.RELENG_DIR, config.OS_FETCH_SCRIPT)
+
+    try:
+        cmd = [os_fetch_script, '-d', rc_file, '-i', installer_type,
+               '-a', installer_ip]
+        p = subprocess.Popen(cmd, stdout=subprocess.PIPE)
+        p.communicate()[0]
+
+        if p.returncode != 0:
+            logger.debug('Failed to fetch credentials from installer')
+    except OSError as e:
+        if e.errno != errno.EEXIST:
+            raise
+
+
+def _append_external_network(rc_file):
+    neutron_client = yardstick_utils.get_neutron_client()
+    networks = neutron_client.list_networks()['networks']
+    try:
+        ext_network = next(n['name'] for n in networks if n['router:external'])
+    except StopIteration:
+        logger.warning("Can't find external network")
+    else:
+        cmd = 'export EXTERNAL_NETWORK=%s' % ext_network
+        try:
+            with open(rc_file, 'a') as f:
+                f.write(cmd + '\n')
+        except OSError as e:
+            if e.errno != errno.EEXIST:
+                raise
+
+
+def _load_images():
+    cmd = [config.LOAD_IMAGES_SCRIPT]
+    p = subprocess.Popen(cmd, stdout=subprocess.PIPE,
+                         cwd=config.YARDSTICK_REPOS_DIR)
+    output = p.communicate()[0]
+    logger.debug('The result is: %s', output)
index 10112ac..1f200fb 100644 (file)
@@ -30,8 +30,6 @@ def getResult(args):
         message = 'measurement and task_id must be provided'
         return common_utils.error_handler(message)
 
-    measurement = conf.TEST_CASE_PRE + measurement
-
     query_template = "select * from %s where task_id='%s'"
     query_sql = query_template % ('tasklist', task_id)
     data = common_utils.translate_to_str(influx_utils.query(query_sql))
@@ -40,8 +38,12 @@ def getResult(args):
         return common_utils.result_handler(0, [])
 
     def _finished():
-        query_sql = query_template % (measurement, task_id)
+        query_sql = query_template % (conf.TEST_CASE_PRE + measurement,
+                                      task_id)
         data = common_utils.translate_to_str(influx_utils.query(query_sql))
+        if not data:
+            query_sql = query_template % (measurement, task_id)
+            data = common_utils.translate_to_str(influx_utils.query(query_sql))
 
         return common_utils.result_handler(1, data)
 
diff --git a/api/actions/samples.py b/api/actions/samples.py
new file mode 100644 (file)
index 0000000..545447a
--- /dev/null
@@ -0,0 +1,37 @@
+##############################################################################
+# Copyright (c) 2016 Huawei Technologies Co.,Ltd and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+import uuid
+import os
+import logging
+
+from api import conf
+from api.utils import common as common_utils
+
+logger = logging.getLogger(__name__)
+
+
+def runTestCase(args):
+    try:
+        opts = args.get('opts', {})
+        testcase = args['testcase']
+    except KeyError:
+        return common_utils.error_handler('Lack of testcase argument')
+
+    testcase = os.path.join(conf.SAMPLE_PATH, testcase + '.yaml')
+
+    task_id = str(uuid.uuid4())
+
+    command_list = ['task', 'start']
+    command_list = common_utils.get_command_list(command_list, opts, testcase)
+    logger.debug('The command_list is: %s', command_list)
+
+    logger.debug('Start to execute command list')
+    common_utils.exec_command_task(command_list, task_id)
+
+    return common_utils.result_handler('success', task_id)
index b1dc212..fda0ffd 100644 (file)
@@ -7,7 +7,6 @@
 # http://www.apache.org/licenses/LICENSE-2.0
 ##############################################################################
 import uuid
-import json
 import os
 import logging
 
@@ -22,12 +21,7 @@ def runTestCase(args):
         opts = args.get('opts', {})
         testcase = args['testcase']
     except KeyError:
-        logger.error('Lack of testcase argument')
-        result = {
-            'status': 'error',
-            'message': 'need testcase name'
-        }
-        return json.dumps(result)
+        return common_utils.error_handler('Lack of testcase argument')
 
     testcase = os.path.join(conf.TEST_CASE_PATH,
                             conf.TEST_CASE_PRE + testcase + '.yaml')
@@ -41,8 +35,4 @@ def runTestCase(args):
     logger.debug('Start to execute command list')
     common_utils.exec_command_task(command_list, task_id)
 
-    result = {
-        'status': 'success',
-        'task_id': task_id
-    }
-    return json.dumps(result)
+    return common_utils.result_handler('success', task_id)
index c05dbb5..fade8cc 100755 (executable)
@@ -24,14 +24,26 @@ server {
     }
 }
 EOF
+echo "daemon off;" >> /etc/nginx/nginx.conf
 fi
 
 # nginx service start when boot
-cat << EOF >> /root/.bashrc
+supervisor_config='/etc/supervisor/conf.d/yardstick.conf'
 
-nginx_status=\$(service nginx status | grep not)
-if [ -n "\${nginx_status}" ];then
-    service nginx restart
-    uwsgi -i /home/opnfv/repos/yardstick/api/yardstick.ini
-fi
+if [[ ! -e "${supervisor_config}" ]];then
+    cat << EOF > "${supervisor_config}"
+[supervisord]
+nodaemon = true
+
+[program:yardstick_nginx]
+user = root
+command = service nginx restart
+autorestart = true
+
+[program:yardstick_uwsgi]
+user = root
+directory = /home/opnfv/repos/yardstick/api
+command = uwsgi -i yardstick.ini
+autorestart = true
 EOF
+fi
index e1da4ab..df44042 100644 (file)
@@ -18,6 +18,8 @@ TEST_ACTION = ['runTestCase']
 
 TEST_CASE_PATH = '../tests/opnfv/test_cases/'
 
+SAMPLE_PATH = '../samples/'
+
 TEST_CASE_PRE = 'opnfv_yardstick_'
 
 TEST_SUITE_PATH = '../tests/opnfv/test_suites/'
index 3f104c6..64a2b4f 100644 (file)
@@ -10,6 +10,7 @@ import logging
 
 from flask import Flask
 from flask_restful import Api
+from flasgger import Swagger
 
 from api.urls import urlpatterns
 from yardstick import _init_logging
@@ -18,8 +19,11 @@ logger = logging.getLogger(__name__)
 
 app = Flask(__name__)
 
+Swagger(app)
+
 api = Api(app)
 
+
 reduce(lambda a, b: a.add_resource(b.resource, b.url,
                                    endpoint=b.endpoint) or a, urlpatterns, api)
 
diff --git a/api/swagger/__init__.py b/api/swagger/__init__.py
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/api/swagger/docs/results.yaml b/api/swagger/docs/results.yaml
new file mode 100644 (file)
index 0000000..7bdab3e
--- /dev/null
@@ -0,0 +1,41 @@
+Query task result data
+
+This api offer the interface to get the result data via task_id
+We will return a result json dict
+---
+tags:
+  - Results
+parameters:
+  -
+    in: query
+    name: action
+    type: string
+    default: getResult
+    required: true
+  -
+    in: query
+    name: measurement
+    type: string
+    description: test case name
+    required: true
+  -
+    in: query
+    name: task_id
+    type: string
+    description: the task_id you get before
+    required: true
+responses:
+  200:
+    description: a result json dict
+    schema:
+      id: ResultModel
+      properties:
+        status:
+          type: string
+          description: the status of the certain task
+          default: success
+        result:
+          schema:
+            type: array
+            items:
+              type: object
diff --git a/api/swagger/docs/testcases.yaml b/api/swagger/docs/testcases.yaml
new file mode 100644 (file)
index 0000000..7bfe5e6
--- /dev/null
@@ -0,0 +1,50 @@
+TestCases Actions\r
+\r
+This API may offer many actions, including runTestCase\r
+\r
+action: runTestCase\r
+This api offer the interface to run a test case in yardstick\r
+we will return a task_id for querying\r
+you can use the returned task_id to get the result data\r
+---\r
+tags:\r
+  - Release Action\r
+parameters:\r
+  - in: body\r
+    name: body\r
+    description: this is the input json dict\r
+    schema:\r
+      id: TestCaseActionModel\r
+      required:\r
+        - action\r
+        - args\r
+      properties:\r
+        action:\r
+          type: string\r
+          description: this is action for testcases\r
+          default: runTestCase\r
+        args:\r
+          schema:\r
+            id: TestCaseActionArgsModel\r
+            required:\r
+              - testcase\r
+            properties:\r
+              testcase:\r
+                type: string\r
+                description: this is the test case name\r
+                default: tc002\r
+              opts:\r
+                schema:\r
+                  id: TestCaseActionArgsOptsModel\r
+responses:\r
+  200:\r
+    description: A result json dict\r
+    schema:\r
+      id: result\r
+      properties:\r
+        status:\r
+          type: string\r
+          default: success\r
+        result:\r
+          type: string\r
+          description: task_id of this task\r
diff --git a/api/swagger/models.py b/api/swagger/models.py
new file mode 100644 (file)
index 0000000..7c65fbb
--- /dev/null
@@ -0,0 +1,51 @@
+##############################################################################
+# Copyright (c) 2016 Huawei Technologies Co.,Ltd and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+from flask_restful import fields
+from flask_restful_swagger import swagger
+
+
+# for testcases/action runTestCase action
+@swagger.model
+class TestCaseActionArgsOptsTaskArgModel:
+    resource_fields = {
+    }
+
+
+@swagger.model
+class TestCaseActionArgsOptsModel:
+    resource_fields = {
+        'task-args': TestCaseActionArgsOptsTaskArgModel,
+        'keep-deploy': fields.String,
+        'suite': fields.String
+    }
+
+
+@swagger.model
+class TestCaseActionArgsModel:
+    resource_fields = {
+        'testcase': fields.String,
+        'opts': TestCaseActionArgsOptsModel
+    }
+
+
+@swagger.model
+class TestCaseActionModel:
+    resource_fields = {
+        'action': fields.String,
+        'args': TestCaseActionArgsModel
+    }
+
+
+# for results
+@swagger.model
+class ResultModel:
+    resource_fields = {
+        'status': fields.String,
+        'result': fields.List
+    }
index eaaf8b6..50be91e 100644 (file)
@@ -11,7 +11,8 @@ from api.utils.common import Url
 
 
 urlpatterns = [
-    Url('/yardstick/test/action', views.Test, 'test'),
-    Url('/yardstick/result/action', views.Result, 'result'),
+    Url('/yardstick/testcases/release/action', views.Release, 'release'),
+    Url('/yardstick/testcases/samples/action', views.Samples, 'samples'),
+    Url('/yardstick/results', views.Results, 'results'),
     Url('/yardstick/env/action', views.Env, 'env')
 ]
index 09cfc04..e3e64a7 100644 (file)
@@ -8,7 +8,8 @@
 ##############################################################################
 import collections
 import logging
-import json
+
+from flask import jsonify
 
 from api.utils.daemonthread import DaemonThread
 from yardstick.cmd.cli import YardstickCLI
@@ -50,7 +51,7 @@ def error_handler(message):
         'status': 'error',
         'message': message
     }
-    return json.dumps(result)
+    return jsonify(result)
 
 
 def result_handler(status, data):
@@ -58,7 +59,7 @@ def result_handler(status, data):
         'status': status,
         'result': data
     }
-    return json.dumps(result)
+    return jsonify(result)
 
 
 class Url(object):
index 7357625..928d8e9 100644 (file)
@@ -7,19 +7,30 @@
 # http://www.apache.org/licenses/LICENSE-2.0
 ##############################################################################
 import logging
+import os
 
 from flask import request
 from flask_restful import Resource
+from flasgger.utils import swag_from
 
 from api.utils import common as common_utils
+from api.swagger import models
 from api.actions import test as test_action
+from api.actions import samples as samples_action
 from api.actions import result as result_action
 from api.actions import env as env_action
 
 logger = logging.getLogger(__name__)
 
 
-class Test(Resource):
+TestCaseActionModel = models.TestCaseActionModel
+TestCaseActionArgsModel = models.TestCaseActionArgsModel
+TestCaseActionArgsOptsModel = models.TestCaseActionArgsOptsModel
+TestCaseActionArgsOptsTaskArgModel = models.TestCaseActionArgsOptsTaskArgModel
+
+
+class Release(Resource):
+    @swag_from(os.getcwd() + '/swagger/docs/testcases.yaml')
     def post(self):
         action = common_utils.translate_to_str(request.json.get('action', ''))
         args = common_utils.translate_to_str(request.json.get('args', {}))
@@ -31,7 +42,23 @@ class Test(Resource):
             return common_utils.error_handler('Wrong action')
 
 
-class Result(Resource):
+class Samples(Resource):
+    def post(self):
+        action = common_utils.translate_to_str(request.json.get('action', ''))
+        args = common_utils.translate_to_str(request.json.get('args', {}))
+        logger.debug('Input args is: action: %s, args: %s', action, args)
+
+        try:
+            return getattr(samples_action, action)(args)
+        except AttributeError:
+            return common_utils.error_handler('Wrong action')
+
+
+ResultModel = models.ResultModel
+
+
+class Results(Resource):
+    @swag_from(os.getcwd() + '/swagger/docs/results.yaml')
     def get(self):
         args = common_utils.translate_to_str(request.args)
         action = args.get('action', '')
diff --git a/dashboard/ping_dashboard.json b/dashboard/ping_dashboard.json
new file mode 100644 (file)
index 0000000..cbc4f67
--- /dev/null
@@ -0,0 +1 @@
+{"meta":{"type":"db","canSave":true,"canEdit":true,"canStar":true,"slug":null,"expires":"0001-01-01T00:00:00Z","created":"2016-10-09T00:45:46Z","updated":"2016-10-09T03:12:01Z","updatedBy":"admin","createdBy":"admin","version":7},"dashboard":{"annotations":{"list":[]},"editable":true,"gnetId":null,"hideControls":false,"id":null,"links":[],"refresh":false,"rows":[{"collapse":false,"editable":true,"height":"250px","panels":[{"aliasColors":{},"bars":false,"datasource":"yardstick","editable":true,"error":false,"fill":1,"grid":{"threshold1":1,"threshold1Color":"rgba(216, 200, 27, 0.27)","threshold2":0.5,"threshold2Color":"rgba(234, 112, 112, 0.22)","thresholdLine":false},"id":1,"isNew":true,"legend":{"alignAsTable":false,"avg":false,"current":false,"max":true,"min":true,"rightSide":false,"show":false,"total":false,"values":true},"lines":true,"linewidth":2,"links":[],"nullPointMode":"connected","percentage":false,"pointradius":5,"points":false,"renderer":"flot","seriesOverrides":[],"span":12,"stack":false,"steppedLine":false,"targets":[{"dsType":"influxdb","groupBy":[{"params":["$interval"],"type":"time"},{"params":["null"],"type":"fill"}],"measurement":"ping","policy":"default","refId":"A","resultFormat":"time_series","select":[[{"params":["rtt.ares"],"type":"field"},{"params":[],"type":"mean"}]],"tags":[]}],"timeFrom":null,"timeShift":null,"title":"Ping","tooltip":{"msResolution":true,"shared":true,"sort":0,"value_type":"cumulative"},"type":"graph","xaxis":{"show":true},"yaxes":[{"format":"short","label":null,"logBase":1,"max":null,"min":null,"show":true},{"format":"short","label":null,"logBase":1,"max":null,"min":null,"show":true}]}],"title":"Row"}],"schemaVersion":12,"sharedCrosshair":false,"style":"dark","tags":[],"templating":{"list":[]},"time":{"from":"now-24h","to":"now"},"timepicker":{"refresh_intervals":["5s","10s","30s","1m","5m","15m","30m","1h","2h","1d"],"time_options":["5m","15m","1h","6h","12h","24h","2d","7d","30d"]},"timezone":"browser","title":"Ping_Sample","version":7}}
index fb8625e..3dd9401 100644 (file)
@@ -48,6 +48,7 @@ RUN apt-get update && apt-get install -y \
     nginx \
     uwsgi \
     uwsgi-plugin-python \
+    supervisor \
     python-setuptools && \
     easy_install -U setuptools
 
@@ -57,8 +58,8 @@ RUN apt-get -y autoremove && \
 RUN mkdir -p ${REPOS_DIR}
 
 RUN git config --global http.sslVerify false
-RUN git clone https://gerrit.opnfv.org/gerrit/yardstick ${YARDSTICK_REPO_DIR}
-RUN git clone https://gerrit.opnfv.org/gerrit/releng ${RELENG_REPO_DIR}
+RUN git clone --depth 1 -b $BRANCH https://gerrit.opnfv.org/gerrit/yardstick ${YARDSTICK_REPO_DIR}
+RUN git clone --depth 1 https://gerrit.opnfv.org/gerrit/releng ${RELENG_REPO_DIR}
 
 # install yardstick + dependencies
 RUN cd ${YARDSTICK_REPO_DIR} && easy_install -U pip
@@ -73,3 +74,4 @@ ADD http://download.cirros-cloud.net/0.3.3/cirros-0.3.3-x86_64-disk.img /home/op
 ADD http://cloud-images.ubuntu.com/trusty/current/trusty-server-cloudimg-amd64-disk1.img /home/opnfv/images/
 
 COPY ./exec_tests.sh /usr/local/bin/
+CMD ["/usr/bin/supervisord"]
index afb7351..e9b6035 100755 (executable)
@@ -16,6 +16,7 @@ apt-get update && apt-get install -y \
     nginx \
     uwsgi \
     uwsgi-plugin-python \
+    supervisor \
     python-setuptools && \
     easy_install -U setuptools
 
index b47951e..e391c92 100644 (file)
@@ -82,3 +82,5 @@ flask-restful==0.3.5
 influxdb==3.0.0
 pyroute2==0.4.10
 docker-py==1.10.6
+flasgger==0.5.13
+flask-restful-swagger==0.19
index 6a89d63..54595b6 100755 (executable)
--- a/setup.py
+++ b/setup.py
@@ -28,7 +28,8 @@ setup(
             'yardstick/nodes/*/*.yaml'
         ],
         'tests': [
-            'opnfv/*/*.yaml'
+            'opnfv/*/*.yaml',
+            'ci/*.sh'
         ]
     },
     url="https://www.opnfv.org",
index 1a6682f..7644c96 100755 (executable)
@@ -162,7 +162,7 @@ run_test()
 
     cat << EOF > /etc/yardstick/yardstick.conf
 [DEFAULT]
-debug = True
+debug = False
 dispatcher = ${DISPATCHER_TYPE}
 
 [dispatcher_file]
diff --git a/tests/unit/api/actions/test_env.py b/tests/unit/api/actions/test_env.py
deleted file mode 100644 (file)
index e674d73..0000000
+++ /dev/null
@@ -1,50 +0,0 @@
-##############################################################################
-# Copyright (c) 2016 Huawei Technologies Co.,Ltd and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
-import unittest
-import mock
-
-from api.actions import env
-
-
-class CreateInfluxDBContainerTestCase(unittest.TestCase):
-
-    @mock.patch('api.actions.env._create_influxdb_container')
-    def test_create_influxdb_container(self, mock_create_container):
-        env.createInfluxDBContainer({})
-        mock_create_container.assert_called_with()
-
-
-class CreateInfluxdbContainerTestCase(unittest.TestCase):
-
-    @mock.patch('api.actions.env.Client')
-    def test_create_influxdb_container(self, mock_influx_client):
-        env._create_influxdb_container()
-        self.assertFalse(mock_influx_client()._create_container.called)
-
-
-class ConfigInfluxdbTestCase(unittest.TestCase):
-
-    @mock.patch('api.actions.env.influx.get_data_db_client')
-    def test_config_influxdb(self, mock_influx_client):
-        env._config_influxdb()
-        mock_influx_client.assert_called_with()
-
-
-class ConfigOutputFile(unittest.TestCase):
-
-    def test_config_output_file(self):
-        pass
-
-
-def main():
-    unittest.main()
-
-
-if __name__ == '__main__':
-    main()
diff --git a/tests/unit/api/actions/test_result.py b/tests/unit/api/actions/test_result.py
deleted file mode 100644 (file)
index 1686319..0000000
+++ /dev/null
@@ -1,29 +0,0 @@
-##############################################################################
-# Copyright (c) 2016 Huawei Technologies Co.,Ltd and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
-import unittest
-import json
-
-from api.actions import result
-
-
-class GetResultTestCase(unittest.TestCase):
-
-    def test_getResult_with_no_taskid_arg(self):
-        args = {}
-        output = json.loads(result.getResult(args))
-
-        self.assertEqual('error', output['status'])
-
-
-def main():
-    unittest.main()
-
-
-if __name__ == '__main__':
-    main()
diff --git a/tests/unit/api/actions/test_test.py b/tests/unit/api/actions/test_test.py
deleted file mode 100644 (file)
index 7ebe9fc..0000000
+++ /dev/null
@@ -1,29 +0,0 @@
-##############################################################################
-# Copyright (c) 2016 Huawei Technologies Co.,Ltd and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
-import unittest
-import json
-
-from api.actions import test
-
-
-class RunTestCase(unittest.TestCase):
-
-    def test_runTestCase_with_no_testcase_arg(self):
-        args = {}
-        output = json.loads(test.runTestCase(args))
-
-        self.assertEqual('error', output['status'])
-
-
-def main():
-    unittest.main()
-
-
-if __name__ == '__main__':
-    main()
diff --git a/tests/unit/api/test_views.py b/tests/unit/api/test_views.py
deleted file mode 100644 (file)
index b835567..0000000
+++ /dev/null
@@ -1,57 +0,0 @@
-##############################################################################
-# Copyright (c) 2016 Huawei Technologies Co.,Ltd and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
-import unittest
-import mock
-import json
-
-from api.views import Test
-from api.views import Result
-from api.views import Env
-
-
-class TestTestCase(unittest.TestCase):
-
-    @mock.patch('api.views.request')
-    def test_post(self, mock_request):
-        mock_request.json.get.side_effect = ['hello', {}]
-
-        result = json.loads(Test().post())
-
-        self.assertEqual('error', result['status'])
-
-
-class ResultTestCase(unittest.TestCase):
-
-    @mock.patch('api.views.request')
-    def test_get(self, mock_request):
-        mock_request.args.get.return_value = 'hello'
-
-        print Result().get()
-        result = json.loads(Result().get())
-
-        self.assertEqual('error', result['status'])
-
-
-class EnvTestCase(unittest.TestCase):
-
-    @mock.patch('api.views.request')
-    def test_post(self, mock_request):
-        mock_request.json.get.side_effect = ['hello', {}]
-
-        result = json.loads(Env().post())
-
-        self.assertEqual('error', result['status'])
-
-
-def main():
-    unittest.main()
-
-
-if __name__ == '__main__':
-    main()
index 9e050c7..5d17740 100644 (file)
@@ -7,7 +7,6 @@
 # http://www.apache.org/licenses/LICENSE-2.0
 ##############################################################################
 import unittest
-import json
 
 from api.utils import common
 
@@ -58,35 +57,6 @@ class GetCommandListTestCase(unittest.TestCase):
         self.assertEqual(result_list, output_list)
 
 
-class ErrorHandlerTestCase(unittest.TestCase):
-
-    def test_error_handler(self):
-        message = 'hello world'
-        output_dict = json.loads(common.error_handler(message))
-
-        result = {
-            'status': 'error',
-            'message': message
-        }
-
-        self.assertEqual(result, output_dict)
-
-
-class ResultHandlerTestCase(unittest.TestCase):
-
-    def test_result_handler(self):
-        status = 1
-        data = ['hello world']
-        output_dict = json.loads(common.result_handler(status, data))
-
-        result = {
-            'status': status,
-            'result': data
-        }
-
-        self.assertEqual(result, output_dict)
-
-
 def main():
     unittest.main()
 
diff --git a/tests/unit/api/utils/test_daemonthread.py b/tests/unit/api/utils/test_daemonthread.py
deleted file mode 100644 (file)
index f07f0fe..0000000
+++ /dev/null
@@ -1,37 +0,0 @@
-##############################################################################
-# Copyright (c) 2016 Huawei Technologies Co.,Ltd and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
-import unittest
-import mock
-
-from api.utils.daemonthread import DaemonThread
-
-
-class DaemonThreadTestCase(unittest.TestCase):
-
-    @mock.patch('api.utils.daemonthread.os')
-    def test_run(self, mock_os):
-        def func(common_list, task_id):
-            return task_id
-
-        common_list = []
-        task_id = '1234'
-        thread = DaemonThread(func, (common_list, task_id))
-        thread.run()
-
-        mock_os.path.exist.return_value = True
-        pre_path = '../tests/opnfv/test_suites/'
-        mock_os.remove.assert_called_with(pre_path + '1234.yaml')
-
-
-def main():
-    unittest.main()
-
-
-if __name__ == '__main__':
-    main()
index cb5c09a..25d5221 100644 (file)
@@ -39,17 +39,17 @@ class VsperfTestCase(unittest.TestCase):
         }
         self.args = {
             'options': {
-                'testname': 'rfc2544_p2p_continuous',
+                'testname': 'p2p_rfc2544_continuous',
                 'traffic_type': 'continuous',
-                'pkt_sizes': '64',
+                'frame_size': '64',
                 'bidirectional': 'True',
                 'iload': 100,
-                'duration': 29,
                 'trafficgen_port1': 'eth1',
                 'trafficgen_port2': 'eth3',
                 'external_bridge': 'br-ex',
-                'conf-file': 'vsperf-yardstick.conf',
-                'setup-script': 'setup_yardstick.sh',
+                'conf_file': 'vsperf-yardstick.conf',
+                'setup_script': 'setup_yardstick.sh',
+                'test_params': 'TRAFFICGEN_DURATION=30;',
             },
             'sla': {
                 'metrics': 'throughput_rx_fps',
index 083ab03..88638a0 100644 (file)
@@ -19,6 +19,8 @@
 import os
 import socket
 import unittest
+from cStringIO import StringIO
+
 import mock
 
 from yardstick import ssh
@@ -275,6 +277,23 @@ class SSHRunTestCase(unittest.TestCase):
         send_calls = [call("line1"), call("line2"), call("e2")]
         self.assertEqual(send_calls, self.fake_session.send.mock_calls)
 
+    @mock.patch("yardstick.ssh.select")
+    def test_run_stdin_keep_open(self, mock_select):
+        """Test run method with stdin.
+
+        Third send call was called with "e2" because only 3 bytes was sent
+        by second call. So remainig 2 bytes of "line2" was sent by third call.
+        """
+        mock_select.select.return_value = ([], [], [])
+        self.fake_session.exit_status_ready.side_effect = [0, 0, 0, True]
+        self.fake_session.send_ready.return_value = True
+        self.fake_session.send.side_effect = len
+        fake_stdin = StringIO("line1\nline2\n")
+        self.test_client.run("cmd", stdin=fake_stdin, keep_stdin_open=True)
+        call = mock.call
+        send_calls = [call("line1\nline2\n")]
+        self.assertEqual(send_calls, self.fake_session.send.mock_calls)
+
     @mock.patch("yardstick.ssh.select")
     def test_run_select_error(self, mock_select):
         self.fake_session.exit_status_ready.return_value = False
index 67db442..78bce82 100644 (file)
@@ -83,7 +83,7 @@ class NodeContext(Context):
             return None
         elif len(nodes) > 1:
             LOG.error("Duplicate nodes!!!")
-            LOG.error("Nodes: %r" % nodes)
+            LOG.error("Nodes: %r", nodes)
             sys.exit(-1)
 
         # A clone is created in order to avoid affecting the
index 74a236f..69ea915 100755 (executable)
@@ -93,7 +93,7 @@ def _worker_process(queue, cls, method_name, scenario_cfg,
         if aborted.is_set():
             break
 
-        LOG.debug("runner=%(runner)s seq=%(sequence)s START" %
+        LOG.debug("runner=%(runner)s seq=%(sequence)s START",
                   {"runner": runner_cfg["runner_id"], "sequence": sequence})
 
         for i, value in enumerate(comb_values):
@@ -109,7 +109,7 @@ def _worker_process(queue, cls, method_name, scenario_cfg,
             if sla_action == "assert":
                 raise
             elif sla_action == "monitor":
-                LOG.warning("SLA validation failed: %s" % assertion.args)
+                LOG.warning("SLA validation failed: %s", assertion.args)
                 errors = assertion.args
         except Exception as e:
             errors = traceback.format_exc()
@@ -129,7 +129,7 @@ def _worker_process(queue, cls, method_name, scenario_cfg,
 
         queue.put(record)
 
-        LOG.debug("runner=%(runner)s seq=%(sequence)s END" %
+        LOG.debug("runner=%(runner)s seq=%(sequence)s END",
                   {"runner": runner_cfg["runner_id"], "sequence": sequence})
 
         sequence += 1
index 2374992..8f3f75f 100755 (executable)
@@ -63,7 +63,7 @@ def _execute_shell_command(command):
     except Exception:
         exitcode = -1
         output = traceback.format_exc()
-        log.error("exec command '%s' error:\n " % command)
+        log.error("exec command '%s' error:\n ", command)
         log.error(traceback.format_exc())
 
     return exitcode, output
@@ -76,10 +76,10 @@ def _single_action(seconds, command, queue):
     log.debug("single action: executing command: '%s'", command)
     ret_code, data = _execute_shell_command(command)
     if ret_code < 0:
-        log.error("single action error! command:%s" % command)
+        log.error("single action error! command:%s", command)
         queue.put({'single-action-data': data})
         return
-    log.debug("single action data: \n%s" % data)
+    log.debug("single action data: \n%s", data)
     queue.put({'single-action-data': data})
 
 
@@ -96,7 +96,7 @@ def _periodic_action(interval, command, queue):
             log.error("periodic action error! command:%s", command)
             queue.put({'periodic-action-data': data})
             break
-        log.debug("periodic action data: \n%s" % data)
+        log.debug("periodic action data: \n%s", data)
         queue.put({'periodic-action-data': data})
 
 
@@ -127,7 +127,7 @@ class Runner(object):
         """
         # if there is no runner, start the output serializer subprocess
         if len(Runner.runners) == 0:
-            log.debug("Starting dump process file '%s'" %
+            log.debug("Starting dump process file '%s'",
                       config["output_filename"])
             Runner.queue = multiprocessing.Queue()
             Runner.dump_process = multiprocessing.Process(
@@ -196,13 +196,13 @@ class Runner(object):
         '''run a potentially configured post-stop action'''
         if "post-stop-action" in self.config:
             command = self.config["post-stop-action"]["command"]
-            log.debug("post stop action: command: '%s'" % command)
+            log.debug("post stop action: command: '%s'", command)
             ret_code, data = _execute_shell_command(command)
             if ret_code < 0:
                 log.error("post action error! command:%s", command)
                 self.result_queue.put({'post-stop-action-data': data})
                 return
-            log.debug("post-stop data: \n%s" % data)
+            log.debug("post-stop data: \n%s", data)
             self.result_queue.put({'post-stop-action-data': data})
 
     def run(self, scenario_cfg, context_cfg):
@@ -219,13 +219,13 @@ class Runner(object):
         # run a potentially configured pre-start action
         if "pre-start-action" in self.config:
             command = self.config["pre-start-action"]["command"]
-            log.debug("pre start action: command: '%s'" % command)
+            log.debug("pre start action: command: '%s'", command)
             ret_code, data = _execute_shell_command(command)
             if ret_code < 0:
                 log.error("pre-start action error! command:%s", command)
                 self.result_queue.put({'pre-start-action-data': data})
                 return
-            log.debug("pre-start data: \n%s" % data)
+            log.debug("pre-start data: \n%s", data)
             self.result_queue.put({'pre-start-action-data': data})
 
         if "single-shot-action" in self.config:
index 1f51f51..1412c0c 100644 (file)
@@ -58,7 +58,7 @@ def _worker_process(queue, cls, method_name, scenario_cfg,
     start = time.time()
     while True:
 
-        LOG.debug("runner=%(runner)s seq=%(sequence)s START" %
+        LOG.debug("runner=%(runner)s seq=%(sequence)s START",
                   {"runner": runner_cfg["runner_id"], "sequence": sequence})
 
         data = {}
@@ -71,7 +71,7 @@ def _worker_process(queue, cls, method_name, scenario_cfg,
             if sla_action == "assert":
                 raise
             elif sla_action == "monitor":
-                LOG.warning("SLA validation failed: %s" % assertion.args)
+                LOG.warning("SLA validation failed: %s", assertion.args)
                 errors = assertion.args
         except Exception as e:
             errors = traceback.format_exc()
@@ -91,7 +91,7 @@ def _worker_process(queue, cls, method_name, scenario_cfg,
 
         queue.put(record)
 
-        LOG.debug("runner=%(runner)s seq=%(sequence)s END" %
+        LOG.debug("runner=%(runner)s seq=%(sequence)s END",
                   {"runner": runner_cfg["runner_id"], "sequence": sequence})
 
         sequence += 1
index b23b32b..3a839b6 100644 (file)
@@ -60,7 +60,7 @@ def _worker_process(queue, cls, method_name, scenario_cfg,
     if "run" in run_step:
         while True:
 
-            LOG.debug("runner=%(runner)s seq=%(sequence)s START" %
+            LOG.debug("runner=%(runner)s seq=%(sequence)s START",
                       {"runner": runner_cfg["runner_id"],
                        "sequence": sequence})
 
@@ -74,7 +74,7 @@ def _worker_process(queue, cls, method_name, scenario_cfg,
                 if sla_action == "assert":
                     raise
                 elif sla_action == "monitor":
-                    LOG.warning("SLA validation failed: %s" % assertion.args)
+                    LOG.warning("SLA validation failed: %s", assertion.args)
                     errors = assertion.args
             except Exception as e:
                 errors = traceback.format_exc()
@@ -94,7 +94,7 @@ def _worker_process(queue, cls, method_name, scenario_cfg,
 
             queue.put(record)
 
-            LOG.debug("runner=%(runner)s seq=%(sequence)s END" %
+            LOG.debug("runner=%(runner)s seq=%(sequence)s END",
                       {"runner": runner_cfg["runner_id"],
                        "sequence": sequence})
 
index fe53412..3b06e2a 100644 (file)
@@ -67,7 +67,7 @@ def _worker_process(queue, cls, method_name, scenario_cfg,
     for value in sequence_values:
         options[arg_name] = value
 
-        LOG.debug("runner=%(runner)s seq=%(sequence)s START" %
+        LOG.debug("runner=%(runner)s seq=%(sequence)s START",
                   {"runner": runner_cfg["runner_id"], "sequence": sequence})
 
         data = {}
@@ -80,7 +80,7 @@ def _worker_process(queue, cls, method_name, scenario_cfg,
             if sla_action == "assert":
                 raise
             elif sla_action == "monitor":
-                LOG.warning("SLA validation failed: %s" % assertion.args)
+                LOG.warning("SLA validation failed: %s", assertion.args)
                 errors = assertion.args
         except Exception as e:
             errors = traceback.format_exc()
@@ -100,7 +100,7 @@ def _worker_process(queue, cls, method_name, scenario_cfg,
 
         queue.put(record)
 
-        LOG.debug("runner=%(runner)s seq=%(sequence)s END" %
+        LOG.debug("runner=%(runner)s seq=%(sequence)s END",
                   {"runner": runner_cfg["runner_id"], "sequence": sequence})
 
         sequence += 1
index 4b732a1..38f57d4 100644 (file)
@@ -28,8 +28,8 @@ class AttackerRollbacker(ActionRollbacker):
 
     def rollback(self):
         LOG.debug(
-            "\033[93m recovering attacker %s \033[0m"
-            % (self.underlyingAttacker.key))
+            "\033[93m recovering attacker %s \033[0m",
+            self.underlyingAttacker.key)
         self.underlyingAttacker.recover()
 
 
@@ -40,6 +40,6 @@ class OperationRollbacker(ActionRollbacker):
 
     def rollback(self):
         LOG.debug(
-            "\033[93m rollback operation %s \033[0m"
-            % (self.underlyingOperation.key))
+            "\033[93m rollback operation %s \033[0m",
+            self.underlyingOperation.key)
         self.underlyingOperation.rollback()
index 6561f6b..3b1f8ef 100644 (file)
@@ -24,7 +24,7 @@ def _execute_shell_command(command, stdin=None):
     except Exception:
         exitcode = -1
         output = traceback.format_exc()
-        LOG.error("exec command '%s' error:\n " % command)
+        LOG.error("exec command '%s' error:\n ", command)
         LOG.error(traceback.format_exc())
 
     return exitcode, output
@@ -34,7 +34,7 @@ class BaremetalAttacker(BaseAttacker):
     __attacker_type__ = 'bare-metal-down'
 
     def setup(self):
-        LOG.debug("config:%s context:%s" % (self._config, self._context))
+        LOG.debug("config:%s context:%s", self._config, self._context)
         host = self._context.get(self._config['host'], None)
         ip = host.get("ip", None)
         user = host.get("user", "root")
@@ -65,10 +65,10 @@ class BaremetalAttacker(BaseAttacker):
             "/bin/sh -s {0} -W 10".format(self.host_ip),
             stdin=open(self.check_script, "r"))
 
-        LOG.debug("check ret: %s out:%s err:%s" %
-                  (exit_status, stdout, stderr))
+        LOG.debug("check ret: %s out:%s err:%s",
+                  exit_status, stdout, stderr)
         if not stdout or "running" not in stdout:
-            LOG.info("the host (ipmi_ip:%s) is not running!" % self.ipmi_ip)
+            LOG.info("the host (ipmi_ip:%s) is not running!", self.ipmi_ip)
             return False
 
         return True
@@ -76,8 +76,8 @@ class BaremetalAttacker(BaseAttacker):
     def inject_fault(self):
         exit_status, stdout, stderr = self.connection.execute(
             "shutdown -h now")
-        LOG.debug("inject fault ret: %s out:%s err:%s" %
-                  (exit_status, stdout, stderr))
+        LOG.debug("inject fault ret: %s out:%s err:%s",
+                  exit_status, stdout, stderr)
         if not exit_status:
             LOG.info("inject fault success")
 
@@ -91,7 +91,7 @@ class BaremetalAttacker(BaseAttacker):
             ssh_port = host.get("ssh_port", ssh.DEFAULT_PORT)
             pwd = host.get("pwd", None)
 
-            LOG.debug("jump_host ip:%s user:%s" % (ip, user))
+            LOG.debug("jump_host ip:%s user:%s", ip, user)
             self.jump_connection = ssh.SSH(user, ip, password=pwd,
                                            port=ssh_port)
             self.jump_connection.wait(timeout=600)
index 5e7716e..a452c37 100644 (file)
@@ -20,7 +20,7 @@ class GeneralAttacker(BaseAttacker):
     __attacker_type__ = 'general-attacker'
 
     def setup(self):
-        LOG.debug("config:%s context:%s" % (self._config, self._context))
+        LOG.debug("config:%s context:%s", self._config, self._context)
         host = self._context.get(self._config['host'], None)
         ip = host.get("ip", None)
         user = host.get("user", "root")
@@ -79,8 +79,8 @@ class GeneralAttacker(BaseAttacker):
                       .format(stdout))
         else:
             LOG.error(
-                "the inject_fault's error, stdout:%s, stderr:%s" %
-                (stdout, stderr))
+                "the inject_fault's error, stdout:%s, stderr:%s",
+                stdout, stderr)
 
     def recover(self):
         if "rollback_parameter" in self._config:
index 0a844f5..2ccc231 100644 (file)
@@ -19,7 +19,7 @@ class ProcessAttacker(BaseAttacker):
     __attacker_type__ = 'kill-process'
 
     def setup(self):
-        LOG.debug("config:%s context:%s" % (self._config, self._context))
+        LOG.debug("config:%s context:%s", self._config, self._context)
         host = self._context.get(self._config['host'], None)
         ip = host.get("ip", None)
         user = host.get("user", "root")
@@ -54,8 +54,8 @@ class ProcessAttacker(BaseAttacker):
             return True
         else:
             LOG.error(
-                "the host envrioment is error, stdout:%s, stderr:%s" %
-                (stdout, stderr))
+                "the host envrioment is error, stdout:%s, stderr:%s",
+                stdout, stderr)
         return False
 
     def inject_fault(self):
index 78276ef..f96e577 100644 (file)
@@ -26,7 +26,7 @@ class AttackerMgr(object):
         self._attacker_list = []
 
     def init_attackers(self, attacker_cfgs, context):
-        LOG.debug("attackerMgr confg: %s" % attacker_cfgs)
+        LOG.debug("attackerMgr confg: %s", attacker_cfgs)
 
         for cfg in attacker_cfgs:
             attacker_cls = BaseAttacker.get_attacker_cls(cfg)
index 267933d..104c683 100644 (file)
@@ -63,7 +63,7 @@ class Director(object):
 
     def createActionPlayer(self, type, key):
         LOG.debug(
-            "the type of current action is %s, the key is %s" % (type, key))
+            "the type of current action is %s, the key is %s", type, key)
         if type == ActionType.ATTACKER:
             return actionplayers.AttackerPlayer(self.attackerMgr[key])
         if type == ActionType.MONITOR:
@@ -77,13 +77,13 @@ class Director(object):
 
     def createActionRollbacker(self, type, key):
         LOG.debug(
-            "the type of current action is %s, the key is %s" % (type, key))
+            "the type of current action is %s, the key is %s", type, key)
         if type == ActionType.ATTACKER:
             return actionrollbackers.AttackerRollbacker(self.attackerMgr[key])
         if type == ActionType.OPERATION:
             return actionrollbackers.OperationRollbacker(
                 self.operationMgr[key])
-        LOG.debug("no rollbacker created for %s" % (key))
+        LOG.debug("no rollbacker created for %s", key)
 
     def verify(self):
         result = True
index d26c99c..38d1c4e 100644 (file)
@@ -27,7 +27,7 @@ class MonitorMgr(object):
         self._monitor_list = []
 
     def init_monitors(self, monitor_cfgs, context):
-        LOG.debug("monitorMgr config: %s" % monitor_cfgs)
+        LOG.debug("monitorMgr config: %s", monitor_cfgs)
 
         for monitor_cfg in monitor_cfgs:
             monitor_type = monitor_cfg["monitor_type"]
@@ -87,7 +87,7 @@ class BaseMonitor(multiprocessing.Process):
         return os.path.join(base_path, path)
 
     def run(self):
-        LOG.debug("config:%s context:%s" % (self._config, self._context))
+        LOG.debug("config:%s context:%s", self._config, self._context)
 
         self.setup()
         monitor_time = self._config.get("monitor_time", 0)
@@ -140,7 +140,7 @@ class BaseMonitor(multiprocessing.Process):
     def wait_monitor(self):
         self.join()
         self._result = self._queue.get()
-        LOG.debug("the monitor result:%s" % self._result)
+        LOG.debug("the monitor result:%s", self._result)
 
     def setup(self):  # pragma: no cover
         pass
index b55cc31..366d16e 100644 (file)
@@ -24,7 +24,7 @@ def _execute_shell_command(command):
     except Exception:
         exitcode = -1
         output = traceback.format_exc()
-        LOG.error("exec command '%s' error:\n " % command)
+        LOG.error("exec command '%s' error:\n ", command)
         LOG.error(traceback.format_exc())
 
     return exitcode, output
@@ -62,8 +62,8 @@ class MonitorOpenstackCmd(basemonitor.BaseMonitor):
                 "/bin/bash -s '{0}'".format(self.cmd),
                 stdin=open(self.check_script, "r"))
 
-            LOG.debug("the ret stats: %s stdout: %s stderr: %s" %
-                      (exit_status, stdout, stderr))
+            LOG.debug("the ret stats: %s stdout: %s stderr: %s",
+                      exit_status, stdout, stderr)
         else:
             exit_status, stdout = _execute_shell_command(self.cmd)
         if exit_status:
@@ -72,10 +72,10 @@ class MonitorOpenstackCmd(basemonitor.BaseMonitor):
 
     def verify_SLA(self):
         outage_time = self._result.get('outage_time', None)
-        LOG.debug("the _result:%s" % self._result)
+        LOG.debug("the _result:%s", self._result)
         max_outage_time = self._config["sla"]["max_outage_time"]
         if outage_time > max_outage_time:
-            LOG.info("SLA failure: %f > %f" % (outage_time, max_outage_time))
+            LOG.info("SLA failure: %f > %f", outage_time, max_outage_time)
             return False
         else:
             LOG.info("the sla is passed")
index f9ddb25..359cde6 100644 (file)
@@ -61,14 +61,14 @@ class GeneralMonitor(basemonitor.BaseMonitor):
         return True
 
     def verify_SLA(self):
-        LOG.debug("the _result:%s" % self._result)
+        LOG.debug("the _result:%s", self._result)
         outage_time = self._result.get('outage_time', None)
         max_outage_time = self._config["sla"]["max_outage_time"]
         if outage_time is None:
             LOG.error("There is no outage_time in monitor result.")
             return False
         if outage_time > max_outage_time:
-            LOG.error("SLA failure: %f > %f" % (outage_time, max_outage_time))
+            LOG.error("SLA failure: %f > %f", outage_time, max_outage_time)
             return False
         else:
             return True
index 403ec4d..a88b8d4 100644 (file)
@@ -39,17 +39,17 @@ class MonitorProcess(basemonitor.BaseMonitor):
             "/bin/sh -s {0}".format(self.process_name),
             stdin=open(self.check_script, "r"))
         if not stdout or int(stdout) <= 0:
-            LOG.info("the process (%s) is not running!" % self.process_name)
+            LOG.info("the process (%s) is not running!", self.process_name)
             return False
 
         return True
 
     def verify_SLA(self):
-        LOG.debug("the _result:%s" % self._result)
+        LOG.debug("the _result:%s", self._result)
         outage_time = self._result.get('outage_time', None)
         max_outage_time = self._config["sla"]["max_recover_time"]
         if outage_time > max_outage_time:
-            LOG.error("SLA failure: %f > %f" % (outage_time, max_outage_time))
+            LOG.error("SLA failure: %f > %f", outage_time, max_outage_time)
             return False
         else:
             return True
index e776e87..80efd1b 100644 (file)
@@ -26,7 +26,7 @@ class OperationMgr(object):
         self._operation_list = []
 
     def init_operations(self, operation_cfgs, context):
-        LOG.debug("operationMgr confg: %s" % operation_cfgs)
+        LOG.debug("operationMgr confg: %s", operation_cfgs)
         for cfg in operation_cfgs:
             operation_type = cfg['operation_type']
             operation_cls = BaseOperation.get_operation_cls(operation_type)
index aa28472..b3a20c3 100644 (file)
@@ -19,7 +19,7 @@ class GeneralOperaion(BaseOperation):
     __operation__type__ = "general-operation"
 
     def setup(self):
-        LOG.debug("config:%s context:%s" % (self._config, self._context))
+        LOG.debug("config:%s context:%s", self._config, self._context)
         host = self._context.get(self._config['host'], None)
         ip = host.get("ip", None)
         user = host.get("user", "root")
@@ -67,8 +67,8 @@ class GeneralOperaion(BaseOperation):
             LOG.debug("success,the operation's output is: {0}".format(stdout))
         else:
             LOG.error(
-                "the operation's error, stdout:%s, stderr:%s" %
-                (stdout, stderr))
+                "the operation's error, stdout:%s, stderr:%s",
+                stdout, stderr)
 
     def rollback(self):
         if "rollback_parameter" in self._config:
index 1bdb9f2..a24f26e 100644 (file)
@@ -26,7 +26,7 @@ class ResultCheckerMgr(object):
         self._result_checker_list = []
 
     def init_ResultChecker(self, resultchecker_cfgs, context):
-        LOG.debug("resultcheckerMgr confg: %s" % resultchecker_cfgs)
+        LOG.debug("resultcheckerMgr confg: %s", resultchecker_cfgs)
 
         for cfg in resultchecker_cfgs:
             resultchecker_type = cfg['checker_type']
index ae896c2..8c9d160 100644 (file)
@@ -20,7 +20,7 @@ class GeneralResultChecker(BaseResultChecker):
     __result_checker__type__ = "general-result-checker"
 
     def setup(self):
-        LOG.debug("config:%s context:%s" % (self._config, self._context))
+        LOG.debug("config:%s context:%s", self._config, self._context)
         host = self._context.get(self._config['host'], None)
         ip = host.get("ip", None)
         user = host.get("user", "root")
@@ -67,7 +67,7 @@ class GeneralResultChecker(BaseResultChecker):
             LOG.debug("action script of the operation is: {0}"
                       .format(self.verify_script))
 
-        LOG.debug("exit_status ,stdout : {0} ,{1}".format(exit_status, stdout))
+        LOG.debug("exit_status ,stdout : %s ,%s", exit_status, stdout)
         if exit_status == 0 and stdout:
             self.actualResult = stdout
             LOG.debug("verifying resultchecker: {0}".format(self.key))
@@ -104,6 +104,6 @@ class GeneralResultChecker(BaseResultChecker):
             LOG.error(stderr)
 
         LOG.debug(
-            "verifying resultchecker: {0},the result is : {1}"
-            .format(self.key, self.success))
+            "verifying resultchecker: %s,the result is : %s", self.key,
+            self.success)
         return self.success
index 0a128aa..b064c67 100644 (file)
@@ -22,7 +22,7 @@ class ScenarioGeneral(base.Scenario):
 
     def __init__(self, scenario_cfg, context_cfg):
         LOG.debug(
-            "scenario_cfg:%s context_cfg:%s" % (scenario_cfg, context_cfg))
+            "scenario_cfg:%s context_cfg:%s", scenario_cfg, context_cfg)
         self.scenario_cfg = scenario_cfg
         self.context_cfg = context_cfg
 
index 10f2c4f..46a197c 100755 (executable)
@@ -21,8 +21,8 @@ class ServiceHA(base.Scenario):
 
     def __init__(self, scenario_cfg, context_cfg):
         LOG.debug(
-            "scenario_cfg:%s context_cfg:%s" %
-            (scenario_cfg, context_cfg))
+            "scenario_cfg:%s context_cfg:%s",
+            scenario_cfg, context_cfg)
         self.scenario_cfg = scenario_cfg
         self.context_cfg = context_cfg
         self.setup_done = False
index 1177020..25300dd 100644 (file)
@@ -92,7 +92,7 @@ class CACHEstat(base.Scenario):
 
     def _execute_command(self, cmd):
         """Execute a command on server."""
-        LOG.info("Executing: %s" % cmd)
+        LOG.info("Executing: %s", cmd)
         status, stdout, stderr = self.client.execute(cmd)
         if status:
             raise RuntimeError("Failed executing command: ",
index a7fae44..9d71038 100644 (file)
@@ -96,7 +96,7 @@ class CPULoad(base.Scenario):
 
     def _execute_command(self, cmd):
         """Execute a command on server."""
-        LOG.info("Executing: %s" % cmd)
+        LOG.info("Executing: %s", cmd)
         status, stdout, stderr = self.client.execute(cmd)
         if status != 0:
             raise RuntimeError("Failed executing command: ",
index 6a1afe2..a6c4d95 100644 (file)
@@ -69,14 +69,14 @@ class Cyclictest(base.Scenario):
         rpm_dir = setup_options["rpm_dir"]
         script_dir = setup_options["script_dir"]
         image_dir = setup_options["image_dir"]
-        LOG.debug("Send RPMs from %s to workspace %s" %
-                  (rpm_dir, self.WORKSPACE))
+        LOG.debug("Send RPMs from %s to workspace %s",
+                  rpm_dir, self.WORKSPACE)
         client.put(rpm_dir, self.WORKSPACE, recursive=True)
-        LOG.debug("Send scripts from %s to workspace %s" %
-                  (script_dir, self.WORKSPACE))
+        LOG.debug("Send scripts from %s to workspace %s",
+                  script_dir, self.WORKSPACE)
         client.put(script_dir, self.WORKSPACE, recursive=True)
-        LOG.debug("Send guest image from %s to workspace %s" %
-                  (image_dir, self.WORKSPACE))
+        LOG.debug("Send guest image from %s to workspace %s",
+                  image_dir, self.WORKSPACE)
         client.put(image_dir, self.WORKSPACE, recursive=True)
 
     def _connect_host(self):
@@ -102,7 +102,7 @@ class Cyclictest(base.Scenario):
         self.guest.wait(timeout=600)
 
     def _run_setup_cmd(self, client, cmd):
-        LOG.debug("Run cmd: %s" % cmd)
+        LOG.debug("Run cmd: %s", cmd)
         status, stdout, stderr = client.execute(cmd)
         if status:
             if re.search(self.REBOOT_CMD_PATTERN, cmd):
index 48088f8..e1ba93d 100644 (file)
@@ -61,7 +61,7 @@ class MEMLoad(base.Scenario):
 
     def _execute_command(self, cmd):
         """Execute a command on server."""
-        LOG.info("Executing: %s" % cmd)
+        LOG.info("Executing: %s", cmd)
         status, stdout, stderr = self.client.execute(cmd)
         if status:
             raise RuntimeError("Failed executing command: ",
index ecde756..1ea92cc 100644 (file)
@@ -83,7 +83,7 @@ class NetUtilization(base.Scenario):
 
     def _execute_command(self, cmd):
         """Execute a command on target."""
-        LOG.info("Executing: %s" % cmd)
+        LOG.info("Executing: %s", cmd)
         status, stdout, stderr = self.client.execute(cmd)
         if status:
             raise RuntimeError("Failed executing command: ",
index 82db1e2..39912a9 100644 (file)
@@ -32,14 +32,11 @@ class Vsperf(base.Scenario):
     the valid values are "rfc2544", "continuous", "back2back"
         type:    string
         default: "rfc2544"
-    pkt_sizes - a packet size for which test should be executed;
-        Multiple packet sizes can be tested by modification of Sequence runner
+    frame_size - a frame size for which test should be executed;
+        Multiple frame sizes can be tested by modification of sequence runner
         section inside TC YAML definition.
         type:    string
         default: "64"
-    duration - sets duration for which traffic will be generated
-        type:    int
-        default: 30
     bidirectional - speficies if traffic will be uni (False) or bi-directional
         (True)
         type:    string
@@ -47,9 +44,6 @@ class Vsperf(base.Scenario):
     iload - specifies frame rate
         type:    string
         default: 100
-    rfc2544_trials - the number of trials performed for each packet size
-        type:    string
-        default: NA
     multistream - the number of simulated streams
         type:    string
         default: 0 (disabled)
@@ -57,11 +51,24 @@ class Vsperf(base.Scenario):
         the valid values are "L4", "L3" and "L2"
         type:    string
         default: "L4"
-    conf-file - path to the vsperf configuration file, which will be uploaded
-        to the VM
+    test_params - specifies a string with a list of vsperf configuration
+        parameters, which will be passed to the '--test-params' CLI argument;
+        Parameters should be stated in the form of 'param=value' and separated
+        by a semicolon. Please check VSPERF documentation for details about
+        available configuration parameters and their data types.
+        In case that both 'test_params' and 'conf_file' are specified,
+        then values from 'test_params' will override values defined
+        in the configuration file.
+        type:    string
+        default: NA
+    conf_file - path to the vsperf configuration file, which will be uploaded
+        to the VM;
+        In case that both 'test_params' and 'conf_file' are specified,
+        then values from 'test_params' will override values defined
+        in configuration file.
         type:   string
         default: NA
-    setup-script - path to the setup script, which will be executed during
+    setup_script - path to the setup script, which will be executed during
         setup and teardown phases
         type:   string
         default: NA
@@ -80,8 +87,6 @@ class Vsperf(base.Scenario):
     """
     __scenario_type__ = "Vsperf"
 
-    VSPERF_CONF = '~/vsperf-yardstick.conf'
-
     def __init__(self, scenario_cfg, context_cfg):
         self.scenario_cfg = scenario_cfg
         self.context_cfg = context_cfg
@@ -93,13 +98,18 @@ class Vsperf(base.Scenario):
                                                          None)
         self.br_ex = self.scenario_cfg['options'].get('external_bridge',
                                                       'br-ex')
-        self.vsperf_conf = os.path.expanduser(
-            self.scenario_cfg['options'].get('conf-file', Vsperf.VSPERF_CONF))
-        self.setup_script = self.scenario_cfg['options'].get('setup-script',
+        self.vsperf_conf = self.scenario_cfg['options'].get('conf_file', None)
+        if self.vsperf_conf:
+            self.vsperf_conf = os.path.expanduser(self.vsperf_conf)
+
+        self.setup_script = self.scenario_cfg['options'].get('setup_script',
                                                              None)
         if self.setup_script:
             self.setup_script = os.path.expanduser(self.setup_script)
 
+        self.test_params = self.scenario_cfg['options'].get('test-params',
+                                                            None)
+
     def setup(self):
         '''scenario setup'''
         vsperf = self.context_cfg['host']
@@ -123,9 +133,10 @@ class Vsperf(base.Scenario):
         # traffic generation could last long
         self.client.wait(timeout=1800)
 
-        # copy script to host
-        self.client.run("cat > ~/vsperf.conf",
-                        stdin=open(self.vsperf_conf, "rb"))
+        # copy script to host if needed
+        if self.vsperf_conf:
+            self.client.run("cat > ~/vsperf.conf",
+                            stdin=open(self.vsperf_conf, "rb"))
 
         # execute external setup script
         if self.setup_script:
@@ -166,18 +177,26 @@ class Vsperf(base.Scenario):
         options = self.scenario_cfg['options']
         test_params = []
         test_params.append(add_test_params(options, "traffic_type", "rfc2544"))
-        test_params.append(add_test_params(options, "pkt_sizes", "64"))
-        test_params.append(add_test_params(options, "duration", None))
         test_params.append(add_test_params(options, "bidirectional", "False"))
         test_params.append(add_test_params(options, "iload", 100))
-        test_params.append(add_test_params(options, "rfc2544_trials", None))
         test_params.append(add_test_params(options, "multistream", None))
         test_params.append(add_test_params(options, "stream_type", None))
+        if 'frame_size' in options:
+            test_params.append("%s=(%s,)" % ('TRAFFICGEN_PKT_SIZES',
+                                             options['frame_size']))
+        if 'test_params' in options:
+            test_params.append(options['test_params'])
+
+        # filter empty parameters and escape quotes and double quotes
+        test_params = [tp.replace('"', '\\"').replace("'", "\\'")
+                       for tp in test_params if tp]
 
         # execute vsperf
         cmd = "source ~/vsperfenv/bin/activate ; cd vswitchperf ; "
-        cmd += "./vsperf --mode trafficgen --conf-file ~/vsperf.conf "
-        cmd += "--test-params=\"%s\"" % (';'.join(filter(None, test_params)))
+        cmd += "./vsperf --mode trafficgen "
+        if self.vsperf_conf:
+            cmd += "--conf-file ~/vsperf.conf "
+        cmd += "--test-params=\"%s\"" % (';'.join(test_params))
         LOG.debug("Executing command: %s", cmd)
         status, stdout, stderr = self.client.execute(cmd)
 
index d39c23a..06c329d 100644 (file)
@@ -75,8 +75,8 @@ class StorPerf(base.Scenario):
         setup_query_content = json.loads(setup_query.content)
         if setup_query_content["stack_created"]:
             self.setup_done = True
-            LOG.debug("stack_created: %s"
-                      setup_query_content["stack_created"])
+            LOG.debug("stack_created: %s",
+                      setup_query_content["stack_created"])
 
     def setup(self):
         """Set the configuration."""
@@ -88,8 +88,8 @@ class StorPerf(base.Scenario):
             if env_argument in self.options:
                 env_args[env_argument] = self.options[env_argument]
 
-        LOG.info("Creating a stack on node %s with parameters %s" %
-                 (self.target, env_args))
+        LOG.info("Creating a stack on node %s with parameters %s",
+                 self.target, env_args)
         setup_res = requests.post('http://%s:5000/api/v1.0/configurations'
                                   % self.target, json=env_args)
 
@@ -99,7 +99,7 @@ class StorPerf(base.Scenario):
             raise RuntimeError("Failed to create a stack, error message:",
                                setup_res_content["message"])
         elif setup_res.status_code == 200:
-            LOG.info("stack_id: %s" % setup_res_content["stack_id"])
+            LOG.info("stack_id: %s", setup_res_content["stack_id"])
 
             while not self.setup_done:
                 self._query_setup_state()
@@ -148,7 +148,7 @@ class StorPerf(base.Scenario):
             if job_argument in self.options:
                 job_args[job_argument] = self.options[job_argument]
 
-        LOG.info("Starting a job with parameters %s" % job_args)
+        LOG.info("Starting a job with parameters %s", job_args)
         job_res = requests.post('http://%s:5000/api/v1.0/jobs' % self.target,
                                 json=job_args)
 
@@ -159,7 +159,7 @@ class StorPerf(base.Scenario):
                                job_res_content["message"])
         elif job_res.status_code == 200:
             job_id = job_res_content["job_id"]
-            LOG.info("Started job id: %s..." % job_id)
+            LOG.info("Started job id: %s...", job_id)
 
             time.sleep(self.timeout)
             terminate_res = requests.delete('http://%s:5000/api/v1.0/jobs' %
index d9c0c0a..098379a 100644 (file)
@@ -6,12 +6,34 @@
 # which accompanies this distribution, and is available at
 # http://www.apache.org/licenses/LICENSE-2.0
 ##############################################################################
+import logging
+
 from yardstick.common.httpClient import HttpClient
+from yardstick.common import constants
+
+logger = logging.getLogger(__name__)
+logger.setLevel(logging.DEBUG)
 
 
 class EnvCommand(object):
+    '''
 
+        Set of commands to prepare environment
+    '''
     def do_influxdb(self, args):
-        url = 'http://localhost:5000/yardstick/env/action'
+        url = constants.YARDSTICK_ENV_ACTION_API
         data = {'action': 'createInfluxDBContainer'}
         HttpClient().post(url, data)
+        logger.debug('Now creating and configing influxdb')
+
+    def do_grafana(self, args):
+        url = constants.YARDSTICK_ENV_ACTION_API
+        data = {'action': 'createGrafanaContainer'}
+        HttpClient().post(url, data)
+        logger.debug('Now creating and configing grafana')
+
+    def do_prepare(self, args):
+        url = constants.YARDSTICK_ENV_ACTION_API
+        data = {'action': 'prepareYardstickEnv'}
+        HttpClient().post(url, data)
+        logger.debug('Now preparing environment')
index 47fb2ee..9524778 100644 (file)
@@ -17,12 +17,15 @@ import ipaddress
 import time
 import logging
 import uuid
+import errno
 from itertools import ifilter
 
 from yardstick.benchmark.contexts.base import Context
 from yardstick.benchmark.runners import base as base_runner
 from yardstick.common.task_template import TaskTemplate
 from yardstick.common.utils import cliargs
+from yardstick.common.utils import source_env
+from yardstick.common import constants
 
 output_file_default = "/tmp/yardstick.out"
 test_cases_dir_default = "tests/opnfv/test_cases/"
@@ -58,6 +61,8 @@ class TaskCommands(object):
 
         self.task_id = kwargs.get('task_id', str(uuid.uuid4()))
 
+        check_environment()
+
         total_start_time = time.time()
         parser = TaskParser(args.inputfile[0])
 
@@ -483,3 +488,14 @@ def parse_task_args(src_name, args):
               % {"src": src_name, "src_type": type(kw)})
         raise TypeError()
     return kw
+
+
+def check_environment():
+    auth_url = os.environ.get('OS_AUTH_URL', None)
+    if not auth_url:
+        try:
+            source_env(constants.OPENSTACK_RC_FILE)
+        except IOError as e:
+            if e.errno != errno.EEXIST:
+                raise
+            LOG.debug('OPENRC file not found')
index 8fbc82f..d541ead 100644 (file)
@@ -1,6 +1,4 @@
-CONFIG_SAMPLE = '/etc/yardstick/config.yaml'
-
-RELENG_DIR = 'releng.dir'
+import os
 
 DOCKER_URL = 'unix://var/run/docker.sock'
 
@@ -8,3 +6,33 @@ DOCKER_URL = 'unix://var/run/docker.sock'
 USER = 'root'
 PASSWORD = 'root'
 DATABASE = 'yardstick'
+
+INFLUXDB_IMAGE = 'tutum/influxdb'
+INFLUXDB_TAG = '0.13'
+
+GRAFANA_IMAGE = 'grafana/grafana'
+GRAFANA_TAGS = '3.1.1'
+
+dirname = os.path.dirname
+abspath = os.path.abspath
+sep = os.path.sep
+
+INSTALLERS = ['apex', 'compass', 'fuel', 'joid']
+
+YARDSTICK_ROOT_PATH = dirname(dirname(dirname(abspath(__file__)))) + sep
+
+YARDSTICK_REPOS_DIR = '/home/opnfv/repos/yardstick'
+
+YARDSTICK_CONFIG_DIR = '/etc/yardstick/'
+
+YARDSTICK_CONFIG_FILE = os.path.join(YARDSTICK_CONFIG_DIR, 'config.yaml')
+
+RELENG_DIR = '/home/opnfv/repos/releng'
+
+OS_FETCH_SCRIPT = 'utils/fetch_os_creds.sh'
+
+LOAD_IMAGES_SCRIPT = 'tests/ci/load_images.sh'
+
+OPENSTACK_RC_FILE = os.path.join(YARDSTICK_CONFIG_DIR, 'openstack.creds')
+
+YARDSTICK_ENV_ACTION_API = 'http://localhost:5000/yardstick/env/action'
index b6959b4..ab2e9a3 100644 (file)
@@ -23,5 +23,8 @@ class HttpClient(object):
             response = requests.post(url, data=data, headers=headers)
             result = response.json()
             logger.debug('The result is: %s', result)
+
+            return result
         except Exception as e:
             logger.debug('Failed: %s', e)
+            raise
index afbe4e8..3ecb0ae 100644 (file)
@@ -19,10 +19,19 @@ import os
 import sys
 import yaml
 import errno
+import subprocess
+import logging
+
 from oslo_utils import importutils
+from keystoneauth1 import identity
+from keystoneauth1 import session
+from neutronclient.v2_0 import client
 
 import yardstick
 
+logger = logging.getLogger(__name__)
+logger.setLevel(logging.DEBUG)
+
 
 # Decorator for cli-args
 def cliargs(*args, **kwargs):
@@ -100,3 +109,35 @@ def makedirs(d):
     except OSError as e:
         if e.errno != errno.EEXIST:
             raise
+
+
+def execute_command(cmd):
+    exec_msg = "Executing command: '%s'" % cmd
+    logger.debug(exec_msg)
+
+    output = subprocess.check_output(cmd.split()).split(os.linesep)
+
+    return output
+
+
+def source_env(env_file):
+    p = subprocess.Popen(". %s; env" % env_file, stdout=subprocess.PIPE,
+                         shell=True)
+    output = p.communicate()[0]
+    env = dict((line.split('=', 1) for line in output.splitlines()))
+    os.environ.update(env)
+    return env
+
+
+def get_openstack_session():
+    auth = identity.Password(auth_url=os.environ.get('OS_AUTH_URL'),
+                             username=os.environ.get('OS_USERNAME'),
+                             password=os.environ.get('OS_PASSWORD'),
+                             tenant_name=os.environ.get('OS_TENANT_NAME'))
+    return session.Session(auth=auth)
+
+
+def get_neutron_client():
+    sess = get_openstack_session()
+    neutron_client = client.Client(session=sess)
+    return neutron_client
index 2298d00..98e772d 100644 (file)
@@ -81,14 +81,14 @@ class HttpDispatcher(DispatchBase):
                 case_name = v["scenario_cfg"]["tc"]
                 break
         if case_name == "":
-            LOG.error('Test result : %s' % json.dumps(self.result))
+            LOG.error('Test result : %s', json.dumps(self.result))
             LOG.error('The case_name cannot be found, no data will be posted.')
             return
 
         self.result["case_name"] = case_name
 
         try:
-            LOG.debug('Test result : %s' % json.dumps(self.result))
+            LOG.debug('Test result : %s', json.dumps(self.result))
             res = requests.post(self.target,
                                 data=json.dumps(self.result),
                                 headers=self.headers,
index 8673253..fc9f3e9 100644 (file)
@@ -127,7 +127,7 @@ class InfluxdbDispatcher(DispatchBase):
         return make_lines(msg).encode('utf-8')
 
     def record_result_data(self, data):
-        LOG.debug('Test result : %s' % json.dumps(data))
+        LOG.debug('Test result : %s', json.dumps(data))
         self.raw_result.append(data)
         if self.target == '':
             # if the target was not set, do not do anything
@@ -148,13 +148,13 @@ class InfluxdbDispatcher(DispatchBase):
             return 0
 
         if self.tc == "":
-            LOG.error('Test result : %s' % json.dumps(data))
+            LOG.error('Test result : %s', json.dumps(data))
             LOG.error('The case_name cannot be found, no data will be posted.')
             return -1
 
         try:
             line = self._data_to_line_protocol(data)
-            LOG.debug('Test result line format : %s' % line)
+            LOG.debug('Test result line format : %s', line)
             res = requests.post(self.influxdb_url,
                                 data=line,
                                 auth=(self.username, self.password),
@@ -171,5 +171,5 @@ class InfluxdbDispatcher(DispatchBase):
         return 0
 
     def flush_result_data(self):
-        LOG.debug('Test result all : %s' % json.dumps(self.raw_result))
+        LOG.debug('Test result all : %s', json.dumps(self.raw_result))
         return 0
index 71dce81..b9d9262 100644 (file)
@@ -151,10 +151,12 @@ class SSH(object):
         self._client = False
 
     def run(self, cmd, stdin=None, stdout=None, stderr=None,
-            raise_on_error=True, timeout=3600):
+            raise_on_error=True, timeout=3600,
+            keep_stdin_open=False):
         """Execute specified command on the server.
 
         :param cmd:             Command to be executed.
+        :type cmd:              str
         :param stdin:           Open file or string to pass to stdin.
         :param stdout:          Open file to connect to stdout.
         :param stderr:          Open file to connect to stderr.
@@ -162,6 +164,8 @@ class SSH(object):
                                 then exception will be raized if non-zero code.
         :param timeout:         Timeout in seconds for command execution.
                                 Default 1 hour. No timeout if set to 0.
+        :param keep_stdin_open: don't close stdin on empty reads
+        :type keep_stdin_open:  bool
         """
 
         client = self._get_client()
@@ -171,10 +175,12 @@ class SSH(object):
 
         return self._run(client, cmd, stdin=stdin, stdout=stdout,
                          stderr=stderr, raise_on_error=raise_on_error,
-                         timeout=timeout)
+                         timeout=timeout,
+                         keep_stdin_open=keep_stdin_open)
 
     def _run(self, client, cmd, stdin=None, stdout=None, stderr=None,
-             raise_on_error=True, timeout=3600):
+             raise_on_error=True, timeout=3600,
+             keep_stdin_open=False):
 
         transport = client.get_transport()
         session = transport.open_session()
@@ -197,14 +203,14 @@ class SSH(object):
 
             if session.recv_ready():
                 data = session.recv(4096)
-                self.log.debug("stdout: %r" % data)
+                self.log.debug("stdout: %r", data)
                 if stdout is not None:
                     stdout.write(data)
                 continue
 
             if session.recv_stderr_ready():
                 stderr_data = session.recv_stderr(4096)
-                self.log.debug("stderr: %r" % stderr_data)
+                self.log.debug("stderr: %r", stderr_data)
                 if stderr is not None:
                     stderr.write(stderr_data)
                 continue
@@ -214,13 +220,15 @@ class SSH(object):
                     if not data_to_send:
                         data_to_send = stdin.read(4096)
                         if not data_to_send:
-                            stdin.close()
-                            session.shutdown_write()
-                            writes = []
-                            continue
-                    sent_bytes = session.send(data_to_send)
-                    # LOG.debug("sent: %s" % data_to_send[:sent_bytes])
-                    data_to_send = data_to_send[sent_bytes:]
+                            # we may need to keep stdin open
+                            if not keep_stdin_open:
+                                stdin.close()
+                                session.shutdown_write()
+                                writes = []
+                    if data_to_send:
+                        sent_bytes = session.send(data_to_send)
+                        # LOG.debug("sent: %s" % data_to_send[:sent_bytes])
+                        data_to_send = data_to_send[sent_bytes:]
 
             if session.exit_status_ready():
                 break
@@ -267,10 +275,10 @@ class SSH(object):
             try:
                 return self.execute("uname")
             except (socket.error, SSHError) as e:
-                self.log.debug("Ssh is still unavailable: %r" % e)
+                self.log.debug("Ssh is still unavailable: %r", e)
                 time.sleep(interval)
             if time.time() > (start_time + timeout):
-                raise SSHTimeout("Timeout waiting for '%s'" % self.host)
+                raise SSHTimeout("Timeout waiting for '%s'", self.host)
 
     def put(self, files, remote_path=b'.', recursive=False):
         client = self._get_client()