2 ##############################################################################
3 # Copyright (c) 2017 Huawei Technologies Co.,Ltd and others.
5 # All rights reserved. This program and the accompanying materials
6 # are made available under the terms of the Apache License, Version 2.0
7 # which accompanies this distribution, and is available at
8 # http://www.apache.org/licenses/LICENSE-2.0
9 ##############################################################################
10 """This file realize the function of run systembandwidth script.
11 for example this contain two part first run_script,
12 second is algorithm, this part is about how to judge the bottlenecks.
13 This test is using yardstick as a tool to begin test."""
15 import utils.logger as log
20 from utils.parser import Parser as conf_parser
21 import utils.env_prepare.quota_prepare as quota_prepare
22 import utils.env_prepare.stack_prepare as stack_prepare
24 import utils.infra_setup.runner.docker_env as docker_env
25 import utils.infra_setup.runner.yardstick as yardstick_task
26 # --------------------------------------------------
27 # logging configuration
28 # --------------------------------------------------
29 LOG = log.Logger(__name__).getLogger()
32 testcase_name = ("tc_heat_rfc2544_ipv4_1rule_"
33 "1flow_64B_trex_correlated_traffic_scale_out")
34 testfile = os.path.basename(__file__)
35 testcase, file_format = os.path.splitext(testfile)
36 cidr = ("/home/opnfv/repos/yardstick/samples/vnf_samples/nsut/acl/"
37 "tc_heat_rfc2544_ipv4_1rule_1flow_64B_trex_correlated_"
38 "traffic_scale_out.yaml")
42 def env_pre(test_config):
43 test_yardstick = False
44 if "yardstick" in test_config["contexts"].keys():
47 stack_prepare._prepare_env_daemon(test_yardstick)
48 quota_prepare.quota_env_prepare()
49 LOG.info("yardstick environment prepare!")
50 if(test_config["contexts"]['yardstick_envpre']):
51 stdout = yardstick_task.yardstick_image_prepare()
55 def config_to_result(test_config, test_result):
58 out_data = test_result["result"]["testcases"]
59 test_data = out_data[testcase_name]["tc_data"]
60 for result in test_data:
62 testdata["sequence"] = result["sequence"]
63 traffic_result = result["data"]["tg__0"]
65 testdata["RxThroughput"] = traffic_result["RxThroughput"]
66 testdata["TxThroughput"] = traffic_result["TxThroughput"]
67 testdata["DropPercentage"] = traffic_result["DropPercentage"]
68 final_data.append(testdata)
72 def testcase_parser(out_file="yardstick.out", **parameter_info):
73 cmd = yardstick_task.yardstick_command_parser(debug=runner_DEBUG,
76 parameter=parameter_info)
80 def do_test(test_config, Use_Dashboard, context_conf):
81 yardstick_container = docker_env.yardstick_info['container']
82 out_file = ("/tmp/yardstick_" + str(uuid.uuid4()) + ".out")
83 cmd = testcase_parser(out_file=out_file, **test_config)
85 stdout = docker_env.docker_exec_cmd(yardstick_container, cmd)
88 while loop_value < 60:
90 loop_value = loop_value + 1
91 with open(out_file) as f:
93 if data["status"] == 1:
94 LOG.info("yardstick run success")
96 elif data["status"] == 2:
97 LOG.error("yardstick error exit")
100 save_data = config_to_result(test_config, data)
101 if Use_Dashboard is True:
102 print("use dashboard")
107 def run(test_config):
109 load_config = test_config["load_manager"]
110 scenarios_conf = load_config["scenarios"]
113 if test_config["contexts"]["yardstick_ip"] is None:
114 load_config["contexts"]["yardstick_ip"] =\
115 conf_parser.ip_parser("yardstick_test_ip")
117 if "dashboard" in test_config["contexts"].keys():
118 if test_config["contexts"]["dashboard_ip"] is None:
119 test_config["contexts"]["dashboard_ip"] =\
120 conf_parser.ip_parser("dashboard")
121 LOG.info("Create Dashboard data")
124 num_vnfs = conf_parser.str_to_list(scenarios_conf["number_vnfs"])
125 iterations = scenarios_conf["iterations"]
126 interval = scenarios_conf["interval"]
127 load_config["result_file"] = os.path.dirname(
128 os.path.abspath(__file__)) + "/test_case/result"
132 for i in range(0, len(num_vnfs)):
134 case_config = {"num_vnfs": int(num_vnfs[i]),
135 "iterations": iterations,
136 "interval": interval}
137 data_reply = do_test(case_config, Use_Dashboard,
138 test_config["contexts"])
139 result.append(data_reply)
141 LOG.info("Finished bottlenecks testcase")
142 LOG.info("The result data is %s", result)