3 # Copyright (c) 2017 All rights reserved
4 # This program and the accompanying materials
5 # are made available under the terms of the Apache License, Version 2.0
6 # which accompanies this distribution, and is available at
8 # http://www.apache.org/licenses/LICENSE-2.0
16 from multiprocessing import Process, Manager, Lock
17 from sdnvpn.lib import config as sdnvpn_config
18 from sdnvpn.lib import openstack_utils as os_utils
19 from sdnvpn.lib import utils as test_utils
20 from sdnvpn.lib.results import Results
22 logger = logging.getLogger('__name__')
26 COMMON_CONFIG = sdnvpn_config.CommonConfig()
27 TESTCASE_CONFIG = sdnvpn_config.TestcaseConfig(
28 'sdnvpn.test.functest.testcase_10')
31 def monitor(conn, in_data, out_data, vm):
32 # At the beginning of ping we might have some
33 # failures, so we ignore the first 10 pings
35 while in_data["stop_thread"] is False:
38 vm_console_out_lines = conn.compute.\
39 get_server_console_output(vm)['output'].split('\n')
40 if lines_offset < len(vm_console_out_lines):
41 for console_line in vm_console_out_lines[lines_offset:-1]:
42 is_ping_error = re.match(r'ping.*KO', console_line)
43 if is_ping_error and out_data["error_msg"] == "":
44 out_data["error_msg"] += ("Ping failure from "
47 # Atomic write to std out
49 logger.error("Failure during ping from "
51 format(vm.name, console_line))
52 elif re.match(r'ping.*OK', console_line):
53 # Atomic write to std out
55 logger.info("Ping from instance {}: {}".
56 format(vm.name, console_line))
57 lines_offset = len(vm_console_out_lines)
59 # Atomic write to std out
61 logger.error("Failure in monitor_thread of instance {}".
63 # Return to main process
68 conn = os_utils.get_os_connection()
69 results = Results(COMMON_CONFIG.line_length, conn)
71 results.add_to_summary(0, "=")
72 results.add_to_summary(2, "STATUS", "SUBTEST")
73 results.add_to_summary(0, "=")
75 neutron_client = os_utils.get_neutron_client()
76 conn = os_utils.get_os_connection()
78 (floatingip_ids, instance_ids, router_ids, network_ids, image_ids,
79 subnet_ids, interfaces, bgpvpn_ids) = ([] for i in range(8))
80 image_id = os_utils.create_glance_image(conn,
81 TESTCASE_CONFIG.image_name,
82 COMMON_CONFIG.image_path,
83 disk=COMMON_CONFIG.image_format,
86 image_ids.append(image_id)
88 network_1_id = test_utils.create_net(neutron_client,
89 TESTCASE_CONFIG.net_1_name)
90 subnet_1_id = test_utils.create_subnet(neutron_client,
91 TESTCASE_CONFIG.subnet_1_name,
92 TESTCASE_CONFIG.subnet_1_cidr,
95 network_ids.append(network_1_id)
96 subnet_ids.append(subnet_1_id)
98 sg_id = os_utils.create_security_group_full(neutron_client,
99 TESTCASE_CONFIG.secgroup_name,
100 TESTCASE_CONFIG.secgroup_descr)
102 compute_nodes = test_utils.assert_and_get_compute_nodes(conn)
103 av_zone_1 = "nova:" + compute_nodes[0]
104 av_zone_2 = "nova:" + compute_nodes[1]
107 vm_2 = test_utils.create_instance(
109 TESTCASE_CONFIG.instance_2_name,
113 secgroup_name=TESTCASE_CONFIG.secgroup_name,
114 compute_node=av_zone_1)
115 vm2_ip = test_utils.get_instance_ip(conn, vm_2)
117 u1 = test_utils.generate_ping_userdata([vm2_ip])
118 vm_1 = test_utils.create_instance(
120 TESTCASE_CONFIG.instance_1_name,
124 secgroup_name=TESTCASE_CONFIG.secgroup_name,
125 compute_node=av_zone_1,
127 vm1_ip = test_utils.get_instance_ip(conn, vm_1)
129 u3 = test_utils.generate_ping_userdata([vm1_ip, vm2_ip])
130 vm_3 = test_utils.create_instance(
132 TESTCASE_CONFIG.instance_3_name,
136 secgroup_name=TESTCASE_CONFIG.secgroup_name,
137 compute_node=av_zone_2,
139 vm3_ip = test_utils.get_instance_ip(conn, vm_3)
140 # We do not put vm_2 id in instance_ids table because we will
141 # delete the current instance during the testing process
142 instance_ids.extend([vm_1.id, vm_3.id])
144 # Wait for VMs to get ips.
145 instances_up = test_utils.wait_for_instances_up(vm_2)
146 instances_dhcp_up = test_utils.wait_for_instances_get_dhcp(vm_1, vm_3)
148 if (not instances_up or not instances_dhcp_up):
149 logger.error("One or more instances are down")
150 # TODO: Handle this appropriately
151 # Create monitor threads to monitor traffic between vm_1, vm_2 and vm_3
153 monitor_input1 = m.dict()
154 monitor_output1 = m.dict()
155 monitor_input1["stop_thread"] = False
156 monitor_output1["error_msg"] = ""
157 monitor_thread1 = Process(target=monitor, args=(conn, monitor_input1,
158 monitor_output1, vm_1,))
159 monitor_input2 = m.dict()
160 monitor_output2 = m.dict()
161 monitor_input2["stop_thread"] = False
162 monitor_output2["error_msg"] = ""
163 monitor_thread2 = Process(target=monitor, args=(conn, monitor_input2,
164 monitor_output2, vm_2,))
165 monitor_input3 = m.dict()
166 monitor_output3 = m.dict()
167 monitor_input3["stop_thread"] = False
168 monitor_output3["error_msg"] = ""
169 monitor_thread3 = Process(target=monitor, args=(conn, monitor_input3,
170 monitor_output3, vm_3,))
171 # Lists of all monitor threads and their inputs and outputs.
172 threads = [monitor_thread1, monitor_thread2, monitor_thread3]
173 thread_inputs = [monitor_input1, monitor_input2, monitor_input3]
174 thread_outputs = [monitor_output1, monitor_output2, monitor_output3]
176 logger.info("Starting all monitor threads")
177 # Start all monitor threads
178 for thread in threads:
180 logger.info("Wait before subtest")
181 test_utils.wait_before_subtest()
183 for thread_output in thread_outputs:
184 if thread_output["error_msg"] != "":
185 monitor_err_msg += " ,{}".format(thread_output["error_msg"])
186 thread_output["error_msg"] = ""
187 results.record_action("Check ping status of vm_1, vm_2, and vm_3")
188 results.add_to_summary(0, "-")
189 if len(monitor_err_msg) == 0:
190 results.add_success("Ping succeeds")
192 results.add_failure(monitor_err_msg)
193 # Stop monitor thread 2 and delete instance vm_2
194 thread_inputs[1]["stop_thread"] = True
195 if not os_utils.delete_instance(conn, vm_2.id):
196 logger.error("Fail to delete vm_2 instance during "
198 raise Exception("Fail to delete instance vm_2.")
199 for thread_input in thread_inputs:
200 thread_input["stop_thread"] = True
201 for thread in threads:
206 # Create a new vm (vm_4) on compute 1 node
207 u4 = test_utils.generate_ping_userdata([vm1_ip, vm3_ip])
208 vm_4 = test_utils.create_instance(
210 TESTCASE_CONFIG.instance_4_name,
214 secgroup_name=TESTCASE_CONFIG.secgroup_name,
215 compute_node=av_zone_1,
217 instance_ids.append(vm_4.id)
219 # Wait for VMs to get ips.
220 instances_dhcp_up = test_utils.wait_for_instances_get_dhcp(vm_4)
221 if not instances_dhcp_up:
222 logger.error("Instance vm_4 failed to start.")
223 # TODO: Handle this appropriately
224 # Create and start a new monitor thread for vm_4
225 monitor_input4 = m.dict()
226 monitor_output4 = m.dict()
227 monitor_input4["stop_thread"] = False
228 monitor_output4["error_msg"] = ""
229 monitor_thread4 = Process(target=monitor, args=(conn, monitor_input4,
232 threads.append(monitor_thread4)
233 thread_inputs.append(monitor_input4)
234 thread_outputs.append(monitor_output4)
235 logger.info("Starting monitor thread of vm_4")
237 test_utils.wait_before_subtest()
239 for thread_output in thread_outputs:
240 if thread_output["error_msg"] != "":
241 monitor_err_msg += " ,{}".format(thread_output["error_msg"])
242 thread_output["error_msg"] = ""
243 results.record_action("Check ping status of vm_1, vm_3 and vm_4. "
244 "Instance vm_2 is deleted")
245 results.add_to_summary(0, "-")
246 if len(monitor_err_msg) == 0:
247 results.add_success("Ping succeeds")
249 results.add_failure(monitor_err_msg)
251 except Exception as e:
252 logger.error("exception occurred while executing testcase_10: %s", e)
255 # Give a stop signal to all threads
256 logger.info("Sending stop signal to monitor thread")
257 for thread_input in thread_inputs:
258 thread_input["stop_thread"] = True
259 # Wait for all threads to stop and return to the main process
260 for thread in threads:
263 test_utils.cleanup_nova(conn, instance_ids)
264 test_utils.cleanup_glance(conn, image_ids)
265 test_utils.cleanup_neutron(neutron_client, floatingip_ids, bgpvpn_ids,
266 interfaces, subnet_ids, router_ids,
269 return results.compile_summary()
272 if __name__ == '__main__':