Merge "update the Euphrates docs"
authorYu Yang (Gabriel) <Gabriel.yuyang@huawei.com>
Tue, 17 Oct 2017 01:39:57 +0000 (01:39 +0000)
committerGerrit Code Review <gerrit@opnfv.org>
Tue, 17 Oct 2017 01:39:57 +0000 (01:39 +0000)
run_tests.sh
testsuites/posca/testcase_cfg/posca_feature_moon_resources.yaml [new file with mode: 0644]
testsuites/posca/testcase_cfg/posca_feature_testpmd_scale_up.yaml [new file with mode: 0644]
testsuites/posca/testcase_script/posca_factor_system_bandwidth.py
testsuites/posca/testcase_script/posca_feature_moon_resources.py [new file with mode: 0644]
testsuites/posca/testcase_script/posca_feature_testpmd_scale_up.py [new file with mode: 0644]
testsuites/run_testsuite.py
utils/env_prepare/stack_prepare.py
utils/infra_setup/runner/docker_env.py
utils/infra_setup/runner/yardstick.py
utils/parser.py

index 065e725..2463edb 100755 (executable)
@@ -103,7 +103,12 @@ function run_test(){
         ;;
         *)
             info "Running posca $test_level: $test_exec"
-            python ${POSCA_SUITE}/../run_testsuite.py $test_level $test_exec $REPORT
+            opts="--privileged=true -id"
+            docker_volume="-v /var/run/docker.sock:/var/run/docker.sock -v /tmp:/tmp"
+            docker run $opts --name bottlenecks-load-master $docker_volume opnfv/bottlenecks:latest /bin/bash
+            sleep 5
+            POSCA_SCRIPT="/home/opnfv/bottlenecks/testsuites/posca"
+            docker exec bottlenecks-load-master python ${POSCA_SCRIPT}/../run_testsuite.py ${test_level} ${test_exec} ${REPORT}
         ;;
     esac
 }
@@ -171,7 +176,8 @@ fi
 # Clean up testing dockers
 if [[ ${cleanup} == true ]]; then
     info "Cleaning up docker-compose images and dockers"
-    docker-compose -f $BASEDIR/docker/bottleneck-compose/docker-compose.yml down --rmi all
     bash ${BASEDIR}/docker/docker_cleanup.sh -d influxdb --debug
     bash ${BASEDIR}/docker/docker_cleanup.sh -d bottlenecks --debug
+    bash ${BASEDIR}/docker/docker_cleanup.sh -d yardstick --debug
+    bash ${BASEDIR}/docker/docker_cleanup.sh -d elk --debug
 fi
diff --git a/testsuites/posca/testcase_cfg/posca_feature_moon_resources.yaml b/testsuites/posca/testcase_cfg/posca_feature_moon_resources.yaml
new file mode 100644 (file)
index 0000000..d6b325f
--- /dev/null
@@ -0,0 +1,30 @@
+##############################################################################
+# Copyright (c) 2017 HUAWEI TECHNOLOGIES CO.,LTD and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+load_manager:
+  scenarios:
+    tool: https request
+    # info that the cpus and memes have the same number of data.
+    tenants: 1,5,10,20
+
+  runners:
+    stack_create: yardstick
+    Debug: False
+    yardstick_test_dir: "samples"
+    yardstick_testcase: "bottlenecks_moon_resources"
+
+  runner_exta:
+    # info this section is for yardstick do some exta env prepare.
+    installation_method: yardstick
+    installation_type: testpmd
+
+contexts:
+  # info that dashboard if have data, we will create the data dashboard.
+  dashboard: "Bottlenecks-ELK"
+  yardstick: "Bottlenecks-yardstick"
\ No newline at end of file
diff --git a/testsuites/posca/testcase_cfg/posca_feature_testpmd_scale_up.yaml b/testsuites/posca/testcase_cfg/posca_feature_testpmd_scale_up.yaml
new file mode 100644 (file)
index 0000000..a686b9d
--- /dev/null
@@ -0,0 +1,39 @@
+##############################################################################
+# Copyright (c) 2017 HUAWEI TECHNOLOGIES CO.,LTD and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+load_manager:
+  scenarios:
+    tool: testpmd
+    # TODO: The number of seconds to run when doing binary search for a throughput test..
+    search_interval: 60
+    # info that the cpus and memes have the same number of data.
+    cpus: 6
+    mems: 12
+    # this is pktsize of Moongen to generate
+    # pkt_size: 64,128,256,1024,1518
+    pkt_size: 64,128,256,1024
+    # this is multiqueue of Monngen to generate
+    multistream: 1, 1000
+
+  runners:
+    stack_create: yardstick
+    Debug: False
+    yardstick_test_dir: "samples"
+    yardstick_testcase: "netperf_bottlenecks"
+
+  runner_exta:
+    # info this section is for yardstick do some exta env prepare.
+    installation_method: yardstick
+    installation_type: testpmd
+
+contexts:
+  # info that dashboard if have data, we will create the data dashboard.
+  dashboard: "Bottlenecks-ELK"
+  yardstick: "yardstick_pmd"
+  yardstick_envpre: Flase
index 05ea61e..1a54554 100644 (file)
@@ -94,6 +94,7 @@ def do_test(test_config, Use_Dashboard, context_conf):
 
 def run(test_config):
     con_dic = test_config["load_manager"]
+    Use_Dashboard = False
     env_pre(None)
     if test_config["contexts"]["yardstick_ip"] is None:
         con_dic["contexts"]["yardstick_ip"] =\
diff --git a/testsuites/posca/testcase_script/posca_feature_moon_resources.py b/testsuites/posca/testcase_script/posca_feature_moon_resources.py
new file mode 100644 (file)
index 0000000..3c66c7b
--- /dev/null
@@ -0,0 +1,133 @@
+#!/usr/bin/env python
+##############################################################################
+# Copyright (c) 2017 Huawei Technologies Co.,Ltd and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+'''This file realize the function of run systembandwidth script.
+for example this contain two part first run_script,
+second is algorithm, this part is about how to judge the bottlenecks.
+This test is using yardstick as a tool to begin test.'''
+
+import os
+import time
+import uuid
+import json
+import utils.logger as log
+from utils.parser import Parser as conf_parser
+import utils.env_prepare.stack_prepare as stack_prepare
+import utils.infra_setup.runner.docker_env as docker_env
+import utils.infra_setup.runner.yardstick as yardstick_task
+
+# --------------------------------------------------
+# logging configuration
+# --------------------------------------------------
+LOG = log.Logger(__name__).getLogger()
+
+testfile = os.path.basename(__file__)
+testcase, file_format = os.path.splitext(testfile)
+# cidr = "/home/opnfv/repos/yardstick/samples/pvp_throughput_bottlenecks.yaml"
+runner_DEBUG = True
+
+
+def env_pre(con_dic):
+    LOG.info("yardstick environment prepare!")
+    stack_prepare._prepare_env_daemon(True)
+
+
+def config_to_result(test_config, test_result):
+    final_data = []
+    print(test_result)
+    out_data = test_result["result"]["testcases"]
+    test_data = out_data["pvp_throughput_bottlenecks"]["tc_data"]
+    for result in test_data:
+        testdata = {}
+        testdata["vcpu"] = test_config["vcpu"]
+        testdata["memory"] = test_config["memory"]
+        testdata["nrFlows"] = result["data"]["nrFlows"]
+        testdata["packet_size"] = result["data"]["packet_size"]
+        testdata["throughput"] = result["data"]["throughput_rx_mbps"]
+        final_data.append(testdata)
+    return final_data
+
+
+def testcase_parser(runner_conf, out_file="yardstick.out", **parameter_info):
+    cidr = "/home/opnfv/repos/yardstick/" + \
+           runner_conf["yardstick_test_dir"] + \
+           runner_conf["yardstick_testcase"]
+    cmd = yardstick_task.yardstick_command_parser(debug=runner_DEBUG,
+                                                  cidr=cidr,
+                                                  outfile=out_file,
+                                                  parameter=parameter_info)
+    return cmd
+
+
+def do_test(runner_conf, test_config, Use_Dashboard, context_conf):
+    yardstick_container = docker_env.yardstick_info['container']
+    out_file = ("/tmp/yardstick_" + str(uuid.uuid4()) + ".out")
+    cmd = testcase_parser(runner_conf, out_file=out_file, **test_config)
+    print(cmd)
+    stdout = docker_env.docker_exec_cmd(yardstick_container, cmd)
+    LOG.info(stdout)
+    loop_value = 0
+    while loop_value < 60:
+        time.sleep(2)
+        loop_value = loop_value + 1
+        with open(out_file) as f:
+            data = json.load(f)
+            if data["status"] == 1:
+                LOG.info("yardstick run success")
+                break
+            elif data["status"] == 2:
+                LOG.error("yardstick error exit")
+                exit()
+    # data = json.load(output)
+
+    save_data = config_to_result(test_config, data)
+    if Use_Dashboard is True:
+        print("use dashboard")
+        # DashBoard.dashboard_send_data(context_conf, save_data)
+
+    # return save_data["data_body"]
+    return save_data
+
+
+def run(test_config):
+    load_config = test_config["load_manager"]
+    scenarios_conf = load_config["scenarios"]
+    runner_conf = test_config["runners"]
+    Use_Dashboard = False
+
+    env_pre(None)
+    if test_config["contexts"]["yardstick_ip"] is None:
+        load_config["contexts"]["yardstick_ip"] =\
+            conf_parser.ip_parser("yardstick_test_ip")
+
+    if "dashboard" in test_config["contexts"].keys():
+        if test_config["contexts"]["dashboard_ip"] is None:
+            test_config["contexts"]["dashboard_ip"] =\
+                conf_parser.ip_parser("dashboard")
+        LOG.info("Create Dashboard data")
+        Use_Dashboard = True
+        # DashBoard.dashboard_system_bandwidth(test_config["contexts"])
+
+    tenants_conf = conf_parser.str_to_list(scenarios_conf["tenants"])
+
+    load_config["result_file"] = os.path.dirname(
+        os.path.abspath(__file__)) + "/test_case/result"
+
+    result = []
+
+    for tenants in tenants_conf:
+        case_config = {"tenants": tenants}
+
+        data_reply = do_test(runner_conf, case_config,
+                             Use_Dashboard, test_config["contexts"])
+        result.append(data_reply)
+
+    LOG.info("Finished bottlenecks testcase")
+    LOG.info("The result data is %s", result)
+    return result
diff --git a/testsuites/posca/testcase_script/posca_feature_testpmd_scale_up.py b/testsuites/posca/testcase_script/posca_feature_testpmd_scale_up.py
new file mode 100644 (file)
index 0000000..830ff73
--- /dev/null
@@ -0,0 +1,141 @@
+#!/usr/bin/env python
+##############################################################################
+# Copyright (c) 2017 Huawei Technologies Co.,Ltd and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+'''This file realize the function of run systembandwidth script.
+for example this contain two part first run_script,
+second is algorithm, this part is about how to judge the bottlenecks.
+This test is using yardstick as a tool to begin test.'''
+
+import os
+import time
+import uuid
+import json
+import utils.logger as log
+from utils.parser import Parser as conf_parser
+import utils.env_prepare.stack_prepare as stack_prepare
+import utils.infra_setup.runner.docker_env as docker_env
+import utils.infra_setup.runner.yardstick as yardstick_task
+
+# --------------------------------------------------
+# logging configuration
+# --------------------------------------------------
+LOG = log.Logger(__name__).getLogger()
+
+testfile = os.path.basename(__file__)
+testcase, file_format = os.path.splitext(testfile)
+cidr = "/home/opnfv/repos/yardstick/samples/pvp_throughput_bottlenecks.yaml"
+runner_DEBUG = True
+
+
+def env_pre(con_dic):
+    LOG.info("yardstick environment prepare!")
+    stack_prepare._prepare_env_daemon(True)
+
+
+def config_to_result(test_config, test_result):
+    final_data = []
+    print(test_result)
+    out_data = test_result["result"]["testcases"]
+    test_data = out_data["pvp_throughput_bottlenecks"]["tc_data"]
+    for result in test_data:
+        testdata = {}
+        testdata["vcpu"] = test_config["vcpu"]
+        testdata["memory"] = test_config["memory"]
+        testdata["nrFlows"] = result["data"]["nrFlows"]
+        testdata["packet_size"] = result["data"]["packet_size"]
+        testdata["throughput"] = result["data"]["throughput_rx_mbps"]
+        final_data.append(testdata)
+    return final_data
+
+
+def testcase_parser(out_file="yardstick.out", **parameter_info):
+    cmd = yardstick_task.yardstick_command_parser(debug=runner_DEBUG,
+                                                  cidr=cidr,
+                                                  outfile=out_file,
+                                                  parameter=parameter_info)
+    return cmd
+
+
+def do_test(test_config, Use_Dashboard, context_conf):
+    yardstick_container = docker_env.yardstick_info['container']
+    out_file = ("/tmp/yardstick_" + str(uuid.uuid4()) + ".out")
+    cmd = testcase_parser(out_file=out_file, **test_config)
+    print(cmd)
+    stdout = docker_env.docker_exec_cmd(yardstick_container, cmd)
+    LOG.info(stdout)
+    loop_value = 0
+    while loop_value < 60:
+        time.sleep(2)
+        loop_value = loop_value + 1
+        with open(out_file) as f:
+            data = json.load(f)
+            if data["status"] == 1:
+                LOG.info("yardstick run success")
+                break
+            elif data["status"] == 2:
+                LOG.error("yardstick error exit")
+                exit()
+    # data = json.load(output)
+
+    save_data = config_to_result(test_config, data)
+    if Use_Dashboard is True:
+        print("use dashboard")
+        # DashBoard.dashboard_send_data(context_conf, save_data)
+
+    # return save_data["data_body"]
+    return save_data
+
+
+def run(test_config):
+    load_config = test_config["load_manager"]
+    scenarios_conf = load_config["scenarios"]
+    Use_Dashboard = False
+
+    env_pre(None)
+    if test_config["contexts"]["yardstick_ip"] is None:
+        load_config["contexts"]["yardstick_ip"] =\
+            conf_parser.ip_parser("yardstick_test_ip")
+
+    if "dashboard" in test_config["contexts"].keys():
+        if test_config["contexts"]["dashboard_ip"] is None:
+            test_config["contexts"]["dashboard_ip"] =\
+                conf_parser.ip_parser("dashboard")
+        LOG.info("Create Dashboard data")
+        Use_Dashboard = True
+        # DashBoard.dashboard_system_bandwidth(test_config["contexts"])
+
+    cpus = conf_parser.str_to_list(scenarios_conf["cpus"])
+    mems = conf_parser.str_to_list(scenarios_conf["mems"])
+    pkt_size = conf_parser.str_to_list(scenarios_conf["pkt_size"])
+    multistream = conf_parser.str_to_list(scenarios_conf["multistream"])
+    search_interval = scenarios_conf["search_interval"]
+
+    load_config["result_file"] = os.path.dirname(
+        os.path.abspath(__file__)) + "/test_case/result"
+
+    if len(cpus) != len(mems):
+        LOG.error("the cpus and mems config data number is not same!")
+        os._exit()
+
+    result = []
+
+    for i in range(0, len(cpus)):
+        case_config = {"vcpu": cpus[i],
+                       "memory": int(mems[i]) * 1024,
+                       "multistreams": multistream,
+                       "pktsize": pkt_size,
+                       "search_interval": search_interval}
+
+        data_reply = do_test(case_config, Use_Dashboard,
+                             test_config["contexts"])
+        result.append(data_reply)
+
+    LOG.info("Finished bottlenecks testcase")
+    LOG.info("The result data is %s", result)
+    return result
index 8c8f699..e727668 100644 (file)
@@ -52,7 +52,7 @@ def report(testcase, start_date, stop_date, criteria, details_doc):
         "description": ("test results for " + testcase),
         "pod_name": os.environ.get('NODE_NAME', 'unknown'),
         "installer": os.environ.get('INSTALLER_TYPE', 'unknown'),
-        "version": os.environ.get('BRANCH', 'unknown'),
+        "version": os.path.basename(os.environ.get('BRANCH', 'unknown')),
         "build_tag": os.environ.get('BUILD_TAG', 'unknown'),
         "stop_date": str(stop_date),
         "start_date": str(start_date),
@@ -107,7 +107,10 @@ def testsuite_run(test_level, test_name, REPORT="False"):
             conf_parser.Parser.testcase_out_dir(testcase)
         start_date = datetime.datetime.now()
         docker_env_prepare(config[testcase])
-        posca_testcase_run(tester_parser[0], testcase, config[testcase])
+        try:
+            posca_testcase_run(tester_parser[0], testcase, config[testcase])
+        except Exception, e:
+            LOG.warning('e.message:\t', e.message)
         stop_date = datetime.datetime.now()
         LOG.info("End of %s testcase in POSCA testsuite", testcase)
         criteria = "FAIL"
index c7dae39..5de6218 100644 (file)
@@ -44,7 +44,7 @@ def _prepare_env_daemon(test_yardstick):
                             file_after)
         docker_env.docker_exec_cmd(yardstick_contain,
                                    cmd)
-        cmd = "sed -i '12s/http/file/g' /etc/yardstick/yardstick.conf"
+        cmd = "sed -i '13s/http/file/g' /etc/yardstick/yardstick.conf"
         docker_env.docker_exec_cmd(yardstick_contain,
                                    cmd)
 
index 64d049b..6e9c78a 100644 (file)
@@ -46,8 +46,7 @@ def env_yardstick(docker_name):
     yardstick_tag = os.getenv("Yardstick_TAG")
     if yardstick_tag is None:
         yardstick_tag = "danube.3.1"
-    env_docker = client.containers.run(image="opnfv/yardstick:%s"
-                                             % yardstick_tag,
+    env_docker = client.containers.run(image="yardstick_pmd",
                                        privileged=True,
                                        tty=True,
                                        detach=True,
index 35b89ae..559b9c1 100644 (file)
@@ -24,6 +24,18 @@ headers = {"Content-Type": "application/json"}
 LOG = logger.Logger(__name__).getLogger()
 
 
+def yardstick_command_parser(debug, cidr, outfile, parameter):
+    cmd = "yardstick"
+    if debug:
+        cmd += " -d"
+    cmd += " task start "
+    cmd += str(cidr)
+    cmd += " --output-file " + outfile
+    if parameter is not None:
+        cmd += " --task-args " + '"' + str(parameter) + '"'
+    return cmd
+
+
 def Get_Reply(test_config, task_id, time_test=1):
     reply_url = ("http://%s/yardstick/results?task_id=%s"
                  % (test_config['yardstick_test_ip'], task_id))
index ecd6bad..b46a3b9 100644 (file)
@@ -127,6 +127,15 @@ class Parser():
             f.write(json.dumps(data, f))
             f.write("\n")
 
+    @staticmethod
+    def str_to_list(str_org):
+        try:
+            data = str_org.split(',')
+        except AttributeError:
+            data = []
+            data.append(str_org)
+        return data
+
 
 class HeatTemplate_Parser():
     """parser a Heat template and a method to deploy template to a stack"""