Code Review
/
bottlenecks.git
/ blobdiff
commit
grep
author
committer
pickaxe
?
search:
re
summary
|
shortlog
|
log
|
commit
|
commitdiff
|
review
|
tree
raw
|
inline
| side by side
Documentation on storage testcases in bottlenecks
[bottlenecks.git]
/
testsuites
/
posca
/
testcase_script
/
posca_factor_ping.py
diff --git
a/testsuites/posca/testcase_script/posca_factor_ping.py
b/testsuites/posca/testcase_script/posca_factor_ping.py
index
b212971
..
3a2277c
100644
(file)
--- a/
testsuites/posca/testcase_script/posca_factor_ping.py
+++ b/
testsuites/posca/testcase_script/posca_factor_ping.py
@@
-53,7
+53,7
@@
def env_pre(test_config):
stack_prepare._prepare_env_daemon(test_yardstick)
quota_prepare.quota_env_prepare()
cmd = ('yardstick env prepare')
stack_prepare._prepare_env_daemon(test_yardstick)
quota_prepare.quota_env_prepare()
cmd = ('yardstick env prepare')
- LOG.info("yardstick env
ri
onment prepare!")
+ LOG.info("yardstick env
ir
onment prepare!")
if(test_config["contexts"]['yardstick_envpre']):
yardstick_container = docker_env.yardstick_info['container']
stdout = docker_env.docker_exec_cmd(yardstick_container, cmd)
if(test_config["contexts"]['yardstick_envpre']):
yardstick_container = docker_env.yardstick_info['container']
stdout = docker_env.docker_exec_cmd(yardstick_container, cmd)
@@
-69,33
+69,32
@@
def do_test():
stdout = docker_env.docker_exec_cmd(yardstick_container, cmd)
LOG.info(stdout)
out_value = 0
stdout = docker_env.docker_exec_cmd(yardstick_container, cmd)
LOG.info(stdout)
out_value = 0
- loop_
w
alue = 0
- while loop_
walue < 15
0:
+ loop_
v
alue = 0
+ while loop_
value < 6
0:
time.sleep(2)
time.sleep(2)
- loop_
walue = loop_w
alue + 1
+ loop_
value = loop_v
alue + 1
with open(out_file) as f:
data = json.load(f)
if data["status"] == 1:
with open(out_file) as f:
data = json.load(f)
if data["status"] == 1:
- if data["result"]["criteria"] == "PASS":
- LOG.info("yardstick run success")
- out_value = 1
- else:
- LOG.error("task error exit")
- out_value = 0
+ LOG.info("yardstick run success")
+ out_value = 1
break
elif data["status"] == 2:
LOG.error("yardstick error exit")
break
elif data["status"] == 2:
LOG.error("yardstick error exit")
+ out_value = 0
+ break
q.put((out_value, func_name))
return out_value
q.put((out_value, func_name))
return out_value
-def config_to_result(num, out_num, during_date):
+def config_to_result(num, out_num, during_date
, result
):
testdata = {}
test_result = {}
testdata = {}
test_result = {}
- test_result["number_of_
user
s"] = float(num)
+ test_result["number_of_
stack
s"] = float(num)
test_result["success_times"] = out_num
test_result["success_rate"] = out_num / num
test_result["duration_time"] = during_date
test_result["success_times"] = out_num
test_result["success_rate"] = out_num / num
test_result["duration_time"] = during_date
+ test_result["result"] = result
testdata["data_body"] = test_result
testdata["testcase"] = testcase
return testdata
testdata["data_body"] = test_result
testdata["testcase"] = testcase
return testdata
@@
-120,9
+119,8
@@
def run(test_config):
LOG.info("Create Dashboard data")
DashBoard.posca_stress_ping(test_config["contexts"])
LOG.info("Create Dashboard data")
DashBoard.posca_stress_ping(test_config["contexts"])
- LOG.info("bottlenecks envrionment prepare!")
env_pre(test_config)
env_pre(test_config)
- LOG.info("yardstick env
ri
onment prepare done!")
+ LOG.info("yardstick env
ir
onment prepare done!")
for value in test_num:
result = []
for value in test_num:
result = []
@@
-149,15
+147,18
@@
def run(test_config):
LOG.info("%s thread success %d times" % (num, out_num))
during_date = (endtime - starttime).seconds
LOG.info("%s thread success %d times" % (num, out_num))
during_date = (endtime - starttime).seconds
- data_reply = config_to_result(num, out_num, during_date)
+ if out_num >= con_dic["scenarios"]['threshhold']:
+ criteria_result = "PASS"
+ else:
+ criteria_result = "FAIL"
+
+ data_reply = config_to_result(num, out_num, during_date,
+ criteria_result)
if "dashboard" in test_config["contexts"].keys():
DashBoard.dashboard_send_data(test_config['contexts'], data_reply)
conf_parser.result_to_file(data_reply, test_config["out_file"])
if "dashboard" in test_config["contexts"].keys():
DashBoard.dashboard_send_data(test_config['contexts'], data_reply)
conf_parser.result_to_file(data_reply, test_config["out_file"])
- if out_num < num:
- success_rate = ('%d/%d' % (out_num, num))
- LOG.error('error thread: %d '
- 'the successful rate is %s'
- % (num - out_num, success_rate))
+ if criteria_result is "FAIL":
break
LOG.info('END POSCA stress ping test')
break
LOG.info('END POSCA stress ping test')
+ return criteria_result