Moon testcase bug fix
[bottlenecks.git] / testsuites / posca / testcase_script / posca_feature_moon_tenants.py
1 #!/usr/bin/env python
2 ##############################################################################
3 # Copyright (c) 2017 Huawei Technologies Co.,Ltd and others.
4 #
5 # All rights reserved. This program and the accompanying materials
6 # are made available under the terms of the Apache License, Version 2.0
7 # which accompanies this distribution, and is available at
8 # http://www.apache.org/licenses/LICENSE-2.0
9 ##############################################################################
10 '''This file realize the function of run systembandwidth script.
11 for example this contain two part first run_script,
12 second is algorithm, this part is about how to judge the bottlenecks.
13 This test is using yardstick as a tool to begin test.'''
14
15 import os
16 import time
17 import uuid
18 import Queue
19 import multiprocessing
20 import utils.logger as log
21 from utils.parser import Parser as conf_parser
22 import utils.env_prepare.moon_prepare as moon_env
23 import utils.infra_setup.runner.docker_env as docker_env
24
25 import utils.infra_setup.runner.yardstick as yardstick_task
26 import testsuites.posca.testcase_dashboard.posca_feature_moon as DashBoard
27
28 # --------------------------------------------------
29 # logging configuration
30 # --------------------------------------------------
31 LOG = log.Logger(__name__).getLogger()
32
33 testfile = os.path.basename(__file__)
34 testcase, file_format = os.path.splitext(testfile)
35 runner_DEBUG = True
36 manager = multiprocessing.Manager()
37 switch = manager.Value('tmp', 0)
38
39
40 def env_pre(test_config):
41     if "moon_monitoring" in test_config["contexts"].keys():
42         if test_config["contexts"]['moon_envpre'] is True:
43             moon_environment = test_config["contexts"]['moon_environment']
44             moon_env.moon_envprepare(moon_environment)
45     LOG.info("yardstick environment prepare!")
46
47
48 def config_to_result(test_result):
49     final_data = {}
50     final_data["testcase"] = "posca_factor_moon_tenants"
51     final_data["test_body"] = []
52     final_data["test_body"].append(test_result)
53     return final_data
54
55
56 def testcase_parser(runner_conf, out_file="yardstick.out", **parameter_info):
57     cidr = "/home/opnfv/repos/yardstick/" + \
58            runner_conf["yardstick_test_dir"] + "/" + \
59            runner_conf["yardstick_testcase"] + ".yaml"
60     cmd = yardstick_task.yardstick_command_parser(debug=runner_DEBUG,
61                                                   cidr=cidr,
62                                                   outfile=out_file,
63                                                   parameter=parameter_info)
64     return cmd
65
66
67 def do_test(runner_conf, test_config, Use_Dashboard, context_conf):
68     yardstick_container = docker_env.yardstick_info['container']
69     out_file = ("/tmp/yardstick_" + str(uuid.uuid4()) + ".out")
70     cmd = testcase_parser(runner_conf, out_file=out_file, **test_config)
71     print(cmd)
72     stdout = docker_env.docker_exec_cmd(yardstick_container, cmd)
73     LOG.info(stdout)
74     switch.value += 1
75     save_date = []
76     return save_date
77
78
79 def run(test_config):
80     load_config = test_config["load_manager"]
81     scenarios_conf = load_config["scenarios"]
82     contexts_conf = test_config["contexts"]
83     runner_conf = load_config["runners"]
84     Use_Dashboard = False
85
86     env_pre(test_config)
87     if test_config["contexts"]["yardstick_ip"] is None:
88         load_config["contexts"]["yardstick_ip"] =\
89             conf_parser.ip_parser("yardstick_test_ip")
90
91     if "dashboard" in test_config["contexts"].keys():
92         if test_config["contexts"]["dashboard_ip"] is None:
93             test_config["contexts"]["dashboard_ip"] =\
94                 conf_parser.ip_parser("dashboard")
95         LOG.info("Create Dashboard data")
96         Use_Dashboard = True
97         DashBoard.posca_moon_init(test_config["contexts"])
98
99     subject_number = int(scenarios_conf["subject_number"])
100     object_number = int(scenarios_conf["object_number"])
101     timeout = scenarios_conf["timeout"]
102     consul_host = contexts_conf["moon_environment"]["ip"]
103     consul_port = contexts_conf["moon_environment"]["consul_port"]
104
105     initial = scenarios_conf["initial_tenants"]
106     threshhold = scenarios_conf["steps_tenants"]
107     tolerate_time = scenarios_conf["tolerate_time"]
108     case_config = {"subject_number": subject_number,
109                    "object_number": object_number,
110                    "timeout": timeout,
111                    "consul_host": consul_host,
112                    "consul_port": consul_port}
113
114     process_queue = Queue.Queue()
115
116     load_config["result_file"] = os.path.dirname(
117         os.path.abspath(__file__)) + "/test_case/result"
118
119     result = 0
120
121     if initial is 0:
122         tenant_number = threshhold
123     else:
124         tenant_number = initial
125     while switch.value == 0:
126         LOG.info("Start %d process", tenant_number)
127         for tenant in range(0, tenant_number):
128             process = multiprocessing.Process(target=do_test,
129                                               args=(runner_conf,
130                                                     case_config,
131                                                     Use_Dashboard,
132                                                     test_config["contexts"],
133                                                     ))
134             process.start()
135             process_queue.put(process)
136
137         result = result + tenant_number
138         tenant_number = threshhold
139         time.sleep(tolerate_time)
140
141     while process_queue.qsize():
142         process = process_queue.get()
143         process.terminate()
144
145     if result is initial:
146         result = 0
147     else:
148         result = result - threshhold
149
150     testdate = {"tenant_max": result}
151     testresult = config_to_result(testdate)
152     LOG.info("Finished bottlenecks testcase")
153     LOG.info("The result data is %d", result)
154     if Use_Dashboard is True:
155         print "Use Dashboard"
156         DashBoard.dashboard_send_data(test_config["contexts"], testresult)
157
158     return testresult