system bandwidth testcase frame rebuild
[bottlenecks.git] / testsuites / posca / testcase_script / posca_factor_system_bandwidth.py
1 #!/usr/bin/env python
2 ##############################################################################
3 # Copyright (c) 2017 Huawei Technologies Co.,Ltd and others.
4 #
5 # All rights reserved. This program and the accompanying materials
6 # are made available under the terms of the Apache License, Version 2.0
7 # which accompanies this distribution, and is available at
8 # http://www.apache.org/licenses/LICENSE-2.0
9 ##############################################################################
10 '''This file realize the function of run systembandwidth script.
11 for example this contain two part first run_script,
12 second is algorithm, this part is about how to judge the bottlenecks.
13 This test is using yardstick as a tool to begin test.'''
14
15 import os
16 import time
17 import uuid
18 import json
19 import utils.logger as log
20 from utils.parser import Parser as conf_parser
21 import utils.env_prepare.stack_prepare as stack_prepare
22 import testsuites.posca.testcase_dashboard.system_bandwidth as DashBoard
23 import utils.infra_setup.runner.docker_env as docker_env
24 # --------------------------------------------------
25 # logging configuration
26 # --------------------------------------------------
27 LOG = log.Logger(__name__).getLogger()
28
29 test_dict = {
30     "action": "runTestCase",
31     "args": {
32         "opts": {
33             "task-args": {}
34         },
35         "testcase": "netperf_bottlenecks"
36     }
37 }
38 testfile = os.path.basename(__file__)
39 testcase, file_format = os.path.splitext(testfile)
40
41
42 def env_pre(con_dic):
43     LOG.info("yardstick environment prepare!")
44     stack_prepare._prepare_env_daemon(True)
45
46
47 def config_to_result(test_config, test_result):
48     testdata = {}
49     parser_result = test_result["benchmark"]["data"]
50     test_result.update(test_config)
51     test_result.update(parser_result)
52     test_result["throughput"] = float(test_result["throughput"])
53     test_result["remote_cpu_util"] = float(test_result["remote_cpu_util"])
54     test_result["local_cpu_util"] = float(test_result["local_cpu_util"])
55     test_result["mean_latency"] = float(test_result["mean_latency"])
56     testdata["data_body"] = test_result
57     testdata["testcase"] = testcase
58     return testdata
59
60
61 def testcase_parser(out_file="yardstick.out", **parameter_info):
62     cmd = ('yardstick task start /home/opnfv/repos/yardstick/'
63            'samples/netperf_bottlenecks.yaml --output-file ' + out_file)
64     cmd = cmd + " --task-args " + '"' + str(parameter_info) + '"'
65     LOG.info("yardstick test cmd is: %s" % cmd)
66     return cmd
67
68
69 def do_test(test_config, Use_Dashboard, context_conf):
70     yardstick_container = docker_env.yardstick_info['container']
71     out_file = ("/tmp/yardstick_" + str(uuid.uuid4()) + ".out")
72     cmd = testcase_parser(out_file=out_file, **test_config)
73     stdout = docker_env.docker_exec_cmd(yardstick_container, cmd)
74     LOG.info(stdout)
75     loop_value = 0
76     while loop_value < 60:
77         time.sleep(2)
78         loop_value = loop_value + 1
79         with open(out_file) as f:
80             data = json.load(f)
81             if data["status"] == 1:
82                 LOG.info("yardstick run success")
83                 break
84             elif data["status"] == 2:
85                 LOG.error("yardstick error exit")
86                 exit()
87
88     save_data = config_to_result(test_config, data['result'][1])
89     if Use_Dashboard is True:
90         DashBoard.dashboard_send_data(context_conf, save_data)
91
92     return save_data["data_body"]
93
94
95 def run(test_config):
96     con_dic = test_config["load_manager"]
97     env_pre(None)
98     if test_config["contexts"]["yardstick_ip"] is None:
99         con_dic["contexts"]["yardstick_ip"] =\
100             conf_parser.ip_parser("yardstick_test_ip")
101
102     if "dashboard" in test_config["contexts"].keys():
103         if test_config["contexts"]["dashboard_ip"] is None:
104             test_config["contexts"]["dashboard_ip"] =\
105                 conf_parser.ip_parser("dashboard")
106         LOG.info("Create Dashboard data")
107         Use_Dashboard = True
108         DashBoard.dashboard_system_bandwidth(test_config["contexts"])
109
110     data = {}
111     rx_pkt_a = con_dic['scenarios']['rx_pkt_sizes'].split(',')
112     tx_pkt_a = con_dic['scenarios']['tx_pkt_sizes'].split(',')
113     data["rx_pkt_sizes"] = rx_pkt_a
114     data["tx_pkt_sizes"] = tx_pkt_a
115     con_dic["result_file"] = os.path.dirname(
116         os.path.abspath(__file__)) + "/test_case/result"
117     cur_role_result = 1
118     pre_role_result = 1
119     pre_reply = {}
120     data_return = {}
121     data_max = {}
122     data_return["throughput"] = 1
123
124     for test_x in data["tx_pkt_sizes"]:
125         data_max["throughput"] = 1
126         bandwidth_tmp = 1
127         for test_y in data["rx_pkt_sizes"]:
128             case_config = {
129                 "tx_msg_size": float(test_x),
130                 "rx_msg_size": float(test_y),
131                 "test_time": con_dic['scenarios']['test_times'],
132                 "pod_info": conf_parser.bottlenecks_config["pod_info"]
133             }
134             data_reply = do_test(case_config, Use_Dashboard,
135                                  test_config["contexts"])
136
137             conf_parser.result_to_file(data_reply, test_config["out_file"])
138             bandwidth = data_reply["throughput"]
139             if (data_max["throughput"] < bandwidth):
140                 data_max = data_reply
141             if (abs(bandwidth_tmp - bandwidth) / bandwidth_tmp < 0.025):
142                 LOG.info("this group of data has reached top output")
143                 break
144             else:
145                 pre_reply = data_reply
146                 bandwidth_tmp = bandwidth
147         cur_role_result = float(pre_reply["throughput"])
148         if (abs(pre_role_result - cur_role_result) / pre_role_result < 0.025):
149             LOG.info("The performance increases slowly")
150         if data_return["throughput"] < data_max["throughput"]:
151             data_return = data_max
152         pre_role_result = cur_role_result
153     LOG.info("Find bottlenecks of this config")
154     LOG.info("The max data is %d", data_return["throughput"])
155     return data_return