Bottlenecks testpmd scale-up testcase.
[bottlenecks.git] / testsuites / posca / testcase_script / posca_factor_system_bandwidth.py
index f3e950b..1a54554 100644 (file)
 #!/usr/bin/env python
 ##############################################################################
-# Copyright (c) 2016 Huawei Technologies Co.,Ltd and others.
+# Copyright (c) 2017 Huawei Technologies Co.,Ltd and others.
 #
 # All rights reserved. This program and the accompanying materials
 # are made available under the terms of the Apache License, Version 2.0
 # which accompanies this distribution, and is available at
 # http://www.apache.org/licenses/LICENSE-2.0
 ##############################################################################
+'''This file realize the function of run systembandwidth script.
+for example this contain two part first run_script,
+second is algorithm, this part is about how to judge the bottlenecks.
+This test is using yardstick as a tool to begin test.'''
 
 import os
-import argparse
 import time
-import logging
-import ConfigParser
+import uuid
 import json
-
-# ------------------------------------------------------
-# parser for configuration files in each test case
-# ------------------------------------------------------
-parser = argparse.ArgumentParser()
-parser.add_argument("-c", "--conf",
-                    help="configuration files for the testcase,\
-                        in yaml format",
-                    default="/home/opnfv/bottlenecks/testsuites/posca/\
-                        testcase_cfg/posca_factor_tx_pkt_size.yaml")
-args = parser.parse_args()
-
-cmd = "curl -i"
-order_arg = "-H \"Content-Type: application/json\" -X POST -d \'{\"cmd\": \
-            \"start\", \"opts\":{\"output-file\": \"/tem/yardstick.out\"}, \
-            \"args\": \"../samples/netperf.yaml\"}'"
-
+import utils.logger as log
+from utils.parser import Parser as conf_parser
+import utils.env_prepare.stack_prepare as stack_prepare
+import testsuites.posca.testcase_dashboard.system_bandwidth as DashBoard
+import utils.infra_setup.runner.docker_env as docker_env
 # --------------------------------------------------
 # logging configuration
 # --------------------------------------------------
-logger = logging.getLogger(__name__)
-
-
-def posca_env_check():
-    print("========== posca system bandwidth env check ===========")
-    filepath = r"/home/opnfv/bottlenecks/testsuites/posca/test_result/"
-    if os.path.exists(filepath):
-        return True
-    else:
-        os.mkdirs(r'/home/opnfv/bottlenecks/testsuites/posca/test_result/')
-
-
-def posca_output_result(time_new, input_1, input_2, input_3,
-                        input_4, input_5, input_6):
-    save_dic = {}
-    save_dic['tx_pkt_size'] = input_1
-    save_dic['rx_cache_size'] = input_2
-    save_dic['tx_cache_size'] = input_3
-    save_dic['throughput '] = input_4
-    save_dic['latency'] = input_5
-    save_dic['cpu_load'] = input_6
-    with open("/home/opnfv/bottlenecks/testsuites/posca/test_result/\
-            factor_tx_cache_size_%s.json" % (time_new), "a") as f:
-        f.write(json.dumps(save_dic, f))
-        f.write("\n")
-
-
-def posca_config_read(config_str):
-    print("========== posca system bandwidth config read ===========")
-    con_dic = {}
-    config = ConfigParser.ConfigParser()
-    with open(config_str, "rd") as cfgfile:
-        config.readfp(cfgfile)
-        con_dic['test_ip'] = config.get("config", "test_ip")
-        con_dic['test_tool'] = config.get("config", "tool")
-        con_dic['test_time'] = config.get("config", "test_time")
-        con_dic['test_protocol'] = config.get("config", "protocol")
-        con_dic['test_tx_pkt_s'] = config.get("config", "tx pkt sizes")
-        con_dic['test_rx_pkt_s'] = config.get("config", "rx pkt sizes")
-        con_dic['test_tx_cache_s'] = config.get("config", "tx cache sizes")
-        con_dic['test_rx_cache_s'] = config.get("config", "rx cache sizes")
-        con_dic['test_cpu_load'] = config.get("config", "cpu load")
-        con_dic['test_latency'] = config.get("config", "latency")
-
-    return con_dic
-
-
-def posca_run(con_dic):
-    print("========== run posca system bandwidth ===========")
-
-    test_rx_pkt_s_e = 87380
-    test_tx_pkt_s_a = con_dic['test_tx_pkt_s'].split(',')
-    test_tx_cache_s_a = con_dic['test_tx_cache_s'].split(',')
-    test_rx_cache_s_a = con_dic['test_rx_cache_s'].split(',')
-    time_new = time.strftime('%H_%M', time.localtime(time.time()))
-    bandwidth_tmp = 1
-
-    for test_rx_cache_s_e in test_rx_cache_s_a:
-        for test_tx_cache_s_e in test_tx_cache_s_a:
-            for test_tx_pkt_s_e in test_tx_pkt_s_a:
-                print("%s,%s,%s") % (test_tx_pkt_s_e, test_rx_cache_s_e,
-                                     test_tx_cache_s_e)
-                order_excute = os.popen("%s %s http://%s/api/v3/yardstick/\
-                    tasks/task %s %s %s" % (cmd, order_arg, con_dic['test_ip'],
-                                            test_rx_pkt_s_e, test_rx_cache_s_e,
-                                            test_tx_cache_s_e))
-                order_result = order_excute.read()
-                task_id = order_result.find("task_id")
-                time.sleep(con_dic['test_time'])
-                cmd_excute = os.popen("%s http://%s/api/v3/yardstick/testre\
-                    sults?task_id=%s" % (cmd, con_dic['test_ip'], task_id))
-                bandwidth = cmd_excute.find("bandwidth")
-                cpu_load = cmd_excute.find("cpu_load")
-                latency = cmd_excute.find("latency")
-                posca_output_result(time_new, test_rx_pkt_s_e,
-                                    test_rx_cache_s_e,
-                                    bandwidth, latency, cpu_load)
-                if (cpu_load < con_dic['test_cpu_load\
-                        ']) and (latency < con_dic['test_latency']):
-                    if (abs(bandwidth_tmp-bandwidth)/bandwidth < 0.05):
-                        print("%s,%s,%s,%s,%s,%s") % (test_rx_pkt_s_e,
-                                                      test_rx_cache_s_e,
-                                                      test_tx_cache_s_e,
-                                                      bandwidth,
-                                                      latency,
-                                                      cpu_load)
-                        return True
-                    else:
-                        bandwidth_tmp = bandwidth
-                else:
-                    print("%s,%s,%s,%s,%s,%s") % (test_rx_pkt_s_e,
-                                                  test_rx_cache_s_e,
-                                                  test_tx_cache_s_e,
-                                                  bandwidth,
-                                                  latency,
-                                                  cpu_load)
-                    return True
-
-
-def main():
-    if not (args.conf):
-        logger.error("Configuration files do not exist for \
-                    the specified testcases")
-        os.exit(-1)
-    else:
-        testcase_cfg = args.conf
-
-    con_dic = posca_config_read(testcase_cfg)
-    posca_env_check()
-    posca_run(con_dic)
-
-    time.sleep(5)
-
-if __name__ == '__main__':
-    main()
+LOG = log.Logger(__name__).getLogger()
+
+test_dict = {
+    "action": "runTestCase",
+    "args": {
+        "opts": {
+            "task-args": {}
+        },
+        "testcase": "netperf_bottlenecks"
+    }
+}
+testfile = os.path.basename(__file__)
+testcase, file_format = os.path.splitext(testfile)
+
+
+def env_pre(con_dic):
+    LOG.info("yardstick environment prepare!")
+    stack_prepare._prepare_env_daemon(True)
+
+
+def config_to_result(test_config, test_result):
+    testdata = {}
+    parser_result = test_result["benchmark"]["data"]
+    test_result.update(test_config)
+    test_result.update(parser_result)
+    test_result["throughput"] = float(test_result["throughput"])
+    test_result["remote_cpu_util"] = float(test_result["remote_cpu_util"])
+    test_result["local_cpu_util"] = float(test_result["local_cpu_util"])
+    test_result["mean_latency"] = float(test_result["mean_latency"])
+    testdata["data_body"] = test_result
+    testdata["testcase"] = testcase
+    return testdata
+
+
+def testcase_parser(out_file="yardstick.out", **parameter_info):
+    cmd = ('yardstick task start /home/opnfv/repos/yardstick/'
+           'samples/netperf_bottlenecks.yaml --output-file ' + out_file)
+    cmd = cmd + " --task-args " + '"' + str(parameter_info) + '"'
+    LOG.info("yardstick test cmd is: %s" % cmd)
+    return cmd
+
+
+def do_test(test_config, Use_Dashboard, context_conf):
+    yardstick_container = docker_env.yardstick_info['container']
+    out_file = ("/tmp/yardstick_" + str(uuid.uuid4()) + ".out")
+    cmd = testcase_parser(out_file=out_file, **test_config)
+    stdout = docker_env.docker_exec_cmd(yardstick_container, cmd)
+    LOG.info(stdout)
+    loop_value = 0
+    while loop_value < 60:
+        time.sleep(2)
+        loop_value = loop_value + 1
+        with open(out_file) as f:
+            data = json.load(f)
+            if data["status"] == 1:
+                LOG.info("yardstick run success")
+                break
+            elif data["status"] == 2:
+                LOG.error("yardstick error exit")
+                exit()
+
+    save_data = config_to_result(test_config, data['result'][1])
+    if Use_Dashboard is True:
+        DashBoard.dashboard_send_data(context_conf, save_data)
+
+    return save_data["data_body"]
+
+
+def run(test_config):
+    con_dic = test_config["load_manager"]
+    Use_Dashboard = False
+    env_pre(None)
+    if test_config["contexts"]["yardstick_ip"] is None:
+        con_dic["contexts"]["yardstick_ip"] =\
+            conf_parser.ip_parser("yardstick_test_ip")
+
+    if "dashboard" in test_config["contexts"].keys():
+        if test_config["contexts"]["dashboard_ip"] is None:
+            test_config["contexts"]["dashboard_ip"] =\
+                conf_parser.ip_parser("dashboard")
+        LOG.info("Create Dashboard data")
+        Use_Dashboard = True
+        DashBoard.dashboard_system_bandwidth(test_config["contexts"])
+
+    data = {}
+    rx_pkt_a = con_dic['scenarios']['rx_pkt_sizes'].split(',')
+    tx_pkt_a = con_dic['scenarios']['tx_pkt_sizes'].split(',')
+    data["rx_pkt_sizes"] = rx_pkt_a
+    data["tx_pkt_sizes"] = tx_pkt_a
+    con_dic["result_file"] = os.path.dirname(
+        os.path.abspath(__file__)) + "/test_case/result"
+    cur_role_result = 1
+    pre_role_result = 1
+    pre_reply = {}
+    data_return = {}
+    data_max = {}
+    data_return["throughput"] = 1
+
+    for test_x in data["tx_pkt_sizes"]:
+        data_max["throughput"] = 1
+        bandwidth_tmp = 1
+        for test_y in data["rx_pkt_sizes"]:
+            case_config = {
+                "tx_msg_size": float(test_x),
+                "rx_msg_size": float(test_y),
+                "test_time": con_dic['scenarios']['test_times'],
+                "pod_info": conf_parser.bottlenecks_config["pod_info"]
+            }
+            data_reply = do_test(case_config, Use_Dashboard,
+                                 test_config["contexts"])
+
+            conf_parser.result_to_file(data_reply, test_config["out_file"])
+            bandwidth = data_reply["throughput"]
+            if (data_max["throughput"] < bandwidth):
+                data_max = data_reply
+            if (abs(bandwidth_tmp - bandwidth) / bandwidth_tmp < 0.025):
+                LOG.info("this group of data has reached top output")
+                break
+            else:
+                pre_reply = data_reply
+                bandwidth_tmp = bandwidth
+        cur_role_result = float(pre_reply["throughput"])
+        if (abs(pre_role_result - cur_role_result) / pre_role_result < 0.025):
+            LOG.info("The performance increases slowly")
+        if data_return["throughput"] < data_max["throughput"]:
+            data_return = data_max
+        pre_role_result = cur_role_result
+    LOG.info("Find bottlenecks of this config")
+    LOG.info("The max data is %d", data_return["throughput"])
+    return data_return