Moon testcase bug fix
[bottlenecks.git] / testsuites / posca / testcase_script / posca_feature_moon_resources.py
1 #!/usr/bin/env python
2 ##############################################################################
3 # Copyright (c) 2017 Huawei Technologies Co.,Ltd and others.
4 #
5 # All rights reserved. This program and the accompanying materials
6 # are made available under the terms of the Apache License, Version 2.0
7 # which accompanies this distribution, and is available at
8 # http://www.apache.org/licenses/LICENSE-2.0
9 ##############################################################################
10 '''This file realize the function of run systembandwidth script.
11 for example this contain two part first run_script,
12 second is algorithm, this part is about how to judge the bottlenecks.
13 This test is using yardstick as a tool to begin test.'''
14
15 import os
16 import time
17 import uuid
18 import json
19 import utils.logger as log
20 from utils.parser import Parser as conf_parser
21 import utils.env_prepare.moon_prepare as moon_env
22 import utils.infra_setup.runner.docker_env as docker_env
23 import testsuites.posca.testcase_dashboard.posca_feature_moon as DashBoard
24 import utils.infra_setup.runner.yardstick as yardstick_task
25
26 # --------------------------------------------------
27 # logging configuration
28 # --------------------------------------------------
29 LOG = log.Logger(__name__).getLogger()
30
31 testfile = os.path.basename(__file__)
32 testcase, file_format = os.path.splitext(testfile)
33 runner_DEBUG = True
34
35
36 def env_pre(test_config):
37     if "moon_monitoring" in test_config["contexts"].keys():
38         if test_config["contexts"]['moon_envpre'] is True:
39             moon_environment = test_config["contexts"]['moon_environment']
40             moon_env.moon_envprepare(moon_environment)
41     LOG.info("moon environment prepare!")
42
43
44 def config_to_result(test_config, test_result):
45     final_data = {}
46     final_data["testcase"] = "posca_factor_moon_resources"
47     final_data["test_body"] = []
48     out_data = test_result["result"]["testcases"]
49     test_data = out_data["moon_resource"]["tc_data"]
50     for result in test_data:
51         testdata = {}
52         testdata["tenant_number"] = int(test_config["tenant_number"])
53         testdata["max_user"] = result["data"]["max_user"]
54         final_data["test_body"].append(testdata)
55     return final_data
56
57
58 def testcase_parser(runner_conf, out_file="yardstick.out", **parameter_info):
59     cidr = "/home/opnfv/repos/yardstick/" + \
60            runner_conf["yardstick_test_dir"] + "/" + \
61            runner_conf["yardstick_testcase"] + ".yaml"
62     cmd = yardstick_task.yardstick_command_parser(debug=runner_DEBUG,
63                                                   cidr=cidr,
64                                                   outfile=out_file,
65                                                   parameter=parameter_info)
66     return cmd
67
68
69 def do_test(runner_conf, test_config, Use_Dashboard, context_conf):
70     yardstick_container = docker_env.yardstick_info['container']
71     out_file = ("/tmp/yardstick_" + str(uuid.uuid4()) + ".out")
72     cmd = testcase_parser(runner_conf, out_file=out_file, **test_config)
73     print(cmd)
74     stdout = docker_env.docker_exec_cmd(yardstick_container, cmd)
75     LOG.info(stdout)
76     loop_value = 0
77     while loop_value < 60:
78         time.sleep(2)
79         loop_value = loop_value + 1
80         with open(out_file) as f:
81             data = json.load(f)
82             if data["status"] == 1:
83                 LOG.info("yardstick run success")
84                 break
85             elif data["status"] == 2:
86                 LOG.error("yardstick error exit")
87                 exit()
88
89     save_data = config_to_result(test_config, data)
90     if Use_Dashboard is True:
91         print("use dashboard")
92         DashBoard.dashboard_send_data(context_conf, save_data)
93     return save_data
94
95
96 def run(test_config):
97     load_config = test_config["load_manager"]
98     scenarios_conf = load_config["scenarios"]
99     runner_conf = load_config["runners"]
100     contexts_conf = test_config["contexts"]
101     Use_Dashboard = False
102     env_pre(test_config)
103     if test_config["contexts"]["yardstick_ip"] is None:
104         load_config["contexts"]["yardstick_ip"] =\
105             conf_parser.ip_parser("yardstick_test_ip")
106
107     if "dashboard" in test_config["contexts"].keys():
108         if test_config["contexts"]["dashboard_ip"] is None:
109             test_config["contexts"]["dashboard_ip"] =\
110                 conf_parser.ip_parser("dashboard")
111         LOG.info("Create Dashboard data")
112         Use_Dashboard = True
113         DashBoard.posca_moon_init(test_config["contexts"])
114
115     tenants_conf = conf_parser.str_to_list(scenarios_conf["tenants"])
116     subject_number = int(scenarios_conf["subject_number"])
117     object_number = int(scenarios_conf["object_number"])
118     timeout = scenarios_conf["timeout"]
119     consul_host = contexts_conf["moon_environment"]["ip"]
120     consul_port = contexts_conf["moon_environment"]["consul_port"]
121
122     load_config["result_file"] = os.path.dirname(
123         os.path.abspath(__file__)) + "/test_case/result"
124
125     result = []
126
127     for tenants in tenants_conf:
128         print tenants
129         case_config = {"tenant_number": tenants,
130                        "subject_number": subject_number,
131                        "object_number": object_number,
132                        "timeout": timeout,
133                        "consul_host": consul_host,
134                        "consul_port": consul_port}
135
136         data_reply = do_test(runner_conf, case_config,
137                              Use_Dashboard, test_config["contexts"])
138         result.append(data_reply)
139
140     LOG.info("Finished bottlenecks testcase")
141     LOG.info("The result data is %s", result)
142     return result