3 # Copyright (c) 2017 All rights reserved
4 # This program and the accompanying materials
5 # are made available under the terms of the Apache License, Version 2.0
6 # which accompanies this distribution, and is available at
8 # http://www.apache.org/licenses/LICENSE-2.0
17 from functest.utils import openstack_utils as os_utils
18 from multiprocessing import Process, Manager, Lock
19 from sdnvpn.lib import config as sdnvpn_config
20 from sdnvpn.lib import utils as test_utils
21 from sdnvpn.lib.results import Results
23 parser = argparse.ArgumentParser()
25 parser.add_argument("-r", "--report",
26 help="Create json result file",
29 args = parser.parse_args()
31 logger = logging.getLogger('__name__')
35 COMMON_CONFIG = sdnvpn_config.CommonConfig()
36 TESTCASE_CONFIG = sdnvpn_config.TestcaseConfig(
37 'sdnvpn.test.functest.testcase_10')
40 def monitor(in_data, out_data, vm):
41 # At the beginning of ping we might have some
42 # failures, so we ignore the first 10 pings
44 while in_data["stop_thread"] is False:
47 vm_console_out_lines = vm.get_console_output().split('\n')
48 if lines_offset < len(vm_console_out_lines):
49 for console_line in vm_console_out_lines[lines_offset:-1]:
50 is_ping_error = re.match(r'ping.*KO', console_line)
51 if is_ping_error and out_data["error_msg"] == "":
52 out_data["error_msg"] += ("Ping failure from "
55 # Atomic write to std out
57 logging.error("Failure during ping from "
59 format(vm.name, console_line))
60 elif re.match(r'ping.*OK', console_line):
61 # Atomic write to std out
63 logging.info("Ping from instance {}: {}".
64 format(vm.name, console_line))
65 lines_offset = len(vm_console_out_lines)
67 # Atomic write to std out
69 logging.error("Failure in monitor_thread of instance {}".
71 # Return to main process
76 results = Results(COMMON_CONFIG.line_length)
78 results.add_to_summary(0, "=")
79 results.add_to_summary(2, "STATUS", "SUBTEST")
80 results.add_to_summary(0, "=")
82 nova_client = os_utils.get_nova_client()
83 neutron_client = os_utils.get_neutron_client()
84 glance_client = os_utils.get_glance_client()
86 (floatingip_ids, instance_ids, router_ids, network_ids, image_ids,
87 subnet_ids, interfaces, bgpvpn_ids) = ([] for i in range(8))
88 image_id = os_utils.create_glance_image(glance_client,
89 TESTCASE_CONFIG.image_name,
90 COMMON_CONFIG.image_path,
91 disk=COMMON_CONFIG.image_format,
94 image_ids.append(image_id)
96 network_1_id = test_utils.create_net(neutron_client,
97 TESTCASE_CONFIG.net_1_name)
98 subnet_1_id = test_utils.create_subnet(neutron_client,
99 TESTCASE_CONFIG.subnet_1_name,
100 TESTCASE_CONFIG.subnet_1_cidr,
103 network_ids.append(network_1_id)
104 subnet_ids.append(subnet_1_id)
106 sg_id = os_utils.create_security_group_full(neutron_client,
107 TESTCASE_CONFIG.secgroup_name,
108 TESTCASE_CONFIG.secgroup_descr)
110 compute_nodes = test_utils.assert_and_get_compute_nodes(nova_client)
111 av_zone_1 = "nova:" + compute_nodes[0]
112 av_zone_2 = "nova:" + compute_nodes[1]
115 vm_2 = test_utils.create_instance(
117 TESTCASE_CONFIG.instance_2_name,
121 secgroup_name=TESTCASE_CONFIG.secgroup_name,
122 compute_node=av_zone_1)
123 vm2_ip = test_utils.get_instance_ip(vm_2)
125 u1 = test_utils.generate_ping_userdata([vm2_ip], 1)
126 vm_1 = test_utils.create_instance(
128 TESTCASE_CONFIG.instance_1_name,
132 secgroup_name=TESTCASE_CONFIG.secgroup_name,
133 compute_node=av_zone_1,
135 vm1_ip = test_utils.get_instance_ip(vm_1)
137 u3 = test_utils.generate_ping_userdata([vm1_ip, vm2_ip], 1)
138 vm_3 = test_utils.create_instance(
140 TESTCASE_CONFIG.instance_3_name,
144 secgroup_name=TESTCASE_CONFIG.secgroup_name,
145 compute_node=av_zone_2,
147 vm3_ip = test_utils.get_instance_ip(vm_3)
148 # We do not put vm_2 id in instance_ids table because we will
149 # delete the current instance during the testing process
150 instance_ids.extend([vm_1.id, vm_3.id])
152 # Wait for VMs to get ips.
153 instances_up = test_utils.wait_for_instances_up(vm_1, vm_2,
157 logger.error("One or more instances is down")
158 # TODO: Handle this appropriately
159 # Create monitor threads to monitor traffic between vm_1, vm_2 and vm_3
161 monitor_input1 = m.dict()
162 monitor_output1 = m.dict()
163 monitor_input1["stop_thread"] = False
164 monitor_output1["error_msg"] = ""
165 monitor_thread1 = Process(target=monitor, args=(monitor_input1,
166 monitor_output1, vm_1,))
167 monitor_input2 = m.dict()
168 monitor_output2 = m.dict()
169 monitor_input2["stop_thread"] = False
170 monitor_output2["error_msg"] = ""
171 monitor_thread2 = Process(target=monitor, args=(monitor_input2,
172 monitor_output2, vm_2,))
173 monitor_input3 = m.dict()
174 monitor_output3 = m.dict()
175 monitor_input3["stop_thread"] = False
176 monitor_output3["error_msg"] = ""
177 monitor_thread3 = Process(target=monitor, args=(monitor_input3,
178 monitor_output3, vm_3,))
179 # Lists of all monitor threads and their inputs and outputs.
180 threads = [monitor_thread1, monitor_thread2, monitor_thread3]
181 thread_inputs = [monitor_input1, monitor_input2, monitor_input3]
182 thread_outputs = [monitor_output1, monitor_output2, monitor_output3]
184 logging.info("Starting all monitor threads")
185 # Start all monitor threads
186 for thread in threads:
188 logging.info("Wait before subtest")
189 test_utils.wait_before_subtest()
191 for thread_output in thread_outputs:
192 if thread_output["error_msg"] != "":
193 monitor_err_msg += " ,{}".format(thread_output["error_msg"])
194 thread_output["error_msg"] = ""
195 results.record_action("Check ping status of vm_1, vm_2, and vm_3")
196 results.add_to_summary(0, "-")
197 if len(monitor_err_msg) == 0:
198 results.add_success("Ping succeeds")
200 results.add_failure(monitor_err_msg)
201 # Stop monitor thread 2 and delete instance vm_2
202 thread_inputs[1]["stop_thread"] = True
203 if not os_utils.delete_instance(nova_client, vm_2.id):
204 logging.error("Fail to delete vm_2 instance during "
206 raise Exception("Fail to delete instance vm_2.")
207 # Create a new vm (vm_4) on compute 1 node
208 u4 = test_utils.generate_ping_userdata([vm1_ip, vm3_ip], 1)
209 vm_4 = test_utils.create_instance(
211 TESTCASE_CONFIG.instance_4_name,
215 secgroup_name=TESTCASE_CONFIG.secgroup_name,
216 compute_node=av_zone_1,
218 instance_ids.append(vm_4.id)
219 # Wait for VMs to get ips.
220 instances_up = test_utils.wait_for_instances_up(vm_4)
222 logger.error("Instance vm_4 failed to start.")
223 # TODO: Handle this appropriately
224 # Create and start a new monitor thread for vm_4
225 monitor_input4 = m.dict()
226 monitor_output4 = m.dict()
227 monitor_input4["stop_thread"] = False
228 monitor_output4["error_msg"] = ""
229 monitor_thread4 = Process(target=monitor, args=(monitor_input4,
232 threads.append(monitor_thread4)
233 thread_inputs.append(monitor_input4)
234 thread_outputs.append(monitor_output4)
235 logging.info("Starting monitor thread of vm_4")
237 test_utils.wait_before_subtest()
239 for thread_output in thread_outputs:
240 if thread_output["error_msg"] != "":
241 monitor_err_msg += " ,{}".format(thread_output["error_msg"])
242 thread_output["error_msg"] = ""
243 results.record_action("Check ping status of vm_1, vm_3 and vm_4. "
244 "Instance vm_2 is deleted")
245 results.add_to_summary(0, "-")
246 if len(monitor_err_msg) == 0:
247 results.add_success("Ping succeeds")
249 results.add_failure(monitor_err_msg)
251 except Exception as e:
252 logger.error("exception occurred while executing testcase_10: %s", e)
255 # Give a stop signal to all threads
256 logging.info("Sending stop signal to monitor thread")
257 for thread_input in thread_inputs:
258 thread_input["stop_thread"] = True
259 # Wait for all threads to stop and return to the main process
260 for thread in threads:
263 test_utils.cleanup_nova(nova_client, instance_ids, image_ids)
264 test_utils.cleanup_neutron(neutron_client, floatingip_ids, bgpvpn_ids,
265 interfaces, subnet_ids, router_ids,
268 return results.compile_summary()
271 if __name__ == '__main__':
272 logging.basicConfig(level=logging.INFO)