3 # Copyright (c) 2017 All rights reserved
4 # This program and the accompanying materials
5 # are made available under the terms of the Apache License, Version 2.0
6 # which accompanies this distribution, and is available at
8 # http://www.apache.org/licenses/LICENSE-2.0
16 from multiprocessing import Process, Manager, Lock
17 from sdnvpn.lib import config as sdnvpn_config
18 from sdnvpn.lib import openstack_utils as os_utils
19 from sdnvpn.lib import utils as test_utils
20 from sdnvpn.lib.results import Results
22 logger = logging.getLogger('__name__')
26 COMMON_CONFIG = sdnvpn_config.CommonConfig()
27 TESTCASE_CONFIG = sdnvpn_config.TestcaseConfig(
28 'sdnvpn.test.functest.testcase_10')
31 def monitor(in_data, out_data, vm):
32 # At the beginning of ping we might have some
33 # failures, so we ignore the first 10 pings
35 while in_data["stop_thread"] is False:
38 vm_console_out_lines = vm.get_console_output().split('\n')
39 if lines_offset < len(vm_console_out_lines):
40 for console_line in vm_console_out_lines[lines_offset:-1]:
41 is_ping_error = re.match(r'ping.*KO', console_line)
42 if is_ping_error and out_data["error_msg"] == "":
43 out_data["error_msg"] += ("Ping failure from "
46 # Atomic write to std out
48 logger.error("Failure during ping from "
50 format(vm.name, console_line))
51 elif re.match(r'ping.*OK', console_line):
52 # Atomic write to std out
54 logger.info("Ping from instance {}: {}".
55 format(vm.name, console_line))
56 lines_offset = len(vm_console_out_lines)
58 # Atomic write to std out
60 logger.error("Failure in monitor_thread of instance {}".
62 # Return to main process
67 results = Results(COMMON_CONFIG.line_length)
69 results.add_to_summary(0, "=")
70 results.add_to_summary(2, "STATUS", "SUBTEST")
71 results.add_to_summary(0, "=")
73 nova_client = os_utils.get_nova_client()
74 neutron_client = os_utils.get_neutron_client()
75 conn = os_utils.get_os_connection()
77 (floatingip_ids, instance_ids, router_ids, network_ids, image_ids,
78 subnet_ids, interfaces, bgpvpn_ids) = ([] for i in range(8))
79 image_id = os_utils.create_glance_image(conn,
80 TESTCASE_CONFIG.image_name,
81 COMMON_CONFIG.image_path,
82 disk=COMMON_CONFIG.image_format,
85 image_ids.append(image_id)
87 network_1_id = test_utils.create_net(neutron_client,
88 TESTCASE_CONFIG.net_1_name)
89 subnet_1_id = test_utils.create_subnet(neutron_client,
90 TESTCASE_CONFIG.subnet_1_name,
91 TESTCASE_CONFIG.subnet_1_cidr,
94 network_ids.append(network_1_id)
95 subnet_ids.append(subnet_1_id)
97 sg_id = os_utils.create_security_group_full(neutron_client,
98 TESTCASE_CONFIG.secgroup_name,
99 TESTCASE_CONFIG.secgroup_descr)
101 compute_nodes = test_utils.assert_and_get_compute_nodes(nova_client)
102 av_zone_1 = "nova:" + compute_nodes[0]
103 av_zone_2 = "nova:" + compute_nodes[1]
106 vm_2 = test_utils.create_instance(
108 TESTCASE_CONFIG.instance_2_name,
112 secgroup_name=TESTCASE_CONFIG.secgroup_name,
113 compute_node=av_zone_1)
114 vm2_ip = test_utils.get_instance_ip(vm_2)
116 u1 = test_utils.generate_ping_userdata([vm2_ip])
117 vm_1 = test_utils.create_instance(
119 TESTCASE_CONFIG.instance_1_name,
123 secgroup_name=TESTCASE_CONFIG.secgroup_name,
124 compute_node=av_zone_1,
126 vm1_ip = test_utils.get_instance_ip(vm_1)
128 u3 = test_utils.generate_ping_userdata([vm1_ip, vm2_ip])
129 vm_3 = test_utils.create_instance(
131 TESTCASE_CONFIG.instance_3_name,
135 secgroup_name=TESTCASE_CONFIG.secgroup_name,
136 compute_node=av_zone_2,
138 vm3_ip = test_utils.get_instance_ip(vm_3)
139 # We do not put vm_2 id in instance_ids table because we will
140 # delete the current instance during the testing process
141 instance_ids.extend([vm_1.id, vm_3.id])
143 # Wait for VMs to get ips.
144 instances_up = test_utils.wait_for_instances_up(vm_2)
145 instances_dhcp_up = test_utils.wait_for_instances_get_dhcp(vm_1, vm_3)
147 if (not instances_up or not instances_dhcp_up):
148 logger.error("One or more instances are down")
149 # TODO: Handle this appropriately
150 # Create monitor threads to monitor traffic between vm_1, vm_2 and vm_3
152 monitor_input1 = m.dict()
153 monitor_output1 = m.dict()
154 monitor_input1["stop_thread"] = False
155 monitor_output1["error_msg"] = ""
156 monitor_thread1 = Process(target=monitor, args=(monitor_input1,
157 monitor_output1, vm_1,))
158 monitor_input2 = m.dict()
159 monitor_output2 = m.dict()
160 monitor_input2["stop_thread"] = False
161 monitor_output2["error_msg"] = ""
162 monitor_thread2 = Process(target=monitor, args=(monitor_input2,
163 monitor_output2, vm_2,))
164 monitor_input3 = m.dict()
165 monitor_output3 = m.dict()
166 monitor_input3["stop_thread"] = False
167 monitor_output3["error_msg"] = ""
168 monitor_thread3 = Process(target=monitor, args=(monitor_input3,
169 monitor_output3, vm_3,))
170 # Lists of all monitor threads and their inputs and outputs.
171 threads = [monitor_thread1, monitor_thread2, monitor_thread3]
172 thread_inputs = [monitor_input1, monitor_input2, monitor_input3]
173 thread_outputs = [monitor_output1, monitor_output2, monitor_output3]
175 logger.info("Starting all monitor threads")
176 # Start all monitor threads
177 for thread in threads:
179 logger.info("Wait before subtest")
180 test_utils.wait_before_subtest()
182 for thread_output in thread_outputs:
183 if thread_output["error_msg"] != "":
184 monitor_err_msg += " ,{}".format(thread_output["error_msg"])
185 thread_output["error_msg"] = ""
186 results.record_action("Check ping status of vm_1, vm_2, and vm_3")
187 results.add_to_summary(0, "-")
188 if len(monitor_err_msg) == 0:
189 results.add_success("Ping succeeds")
191 results.add_failure(monitor_err_msg)
192 # Stop monitor thread 2 and delete instance vm_2
193 thread_inputs[1]["stop_thread"] = True
194 if not os_utils.delete_instance(nova_client, vm_2.id):
195 logger.error("Fail to delete vm_2 instance during "
197 raise Exception("Fail to delete instance vm_2.")
198 for thread_input in thread_inputs:
199 thread_input["stop_thread"] = True
200 for thread in threads:
205 # Create a new vm (vm_4) on compute 1 node
206 u4 = test_utils.generate_ping_userdata([vm1_ip, vm3_ip])
207 vm_4 = test_utils.create_instance(
209 TESTCASE_CONFIG.instance_4_name,
213 secgroup_name=TESTCASE_CONFIG.secgroup_name,
214 compute_node=av_zone_1,
216 instance_ids.append(vm_4.id)
218 # Wait for VMs to get ips.
219 instances_dhcp_up = test_utils.wait_for_instances_get_dhcp(vm_4)
220 if not instances_dhcp_up:
221 logger.error("Instance vm_4 failed to start.")
222 # TODO: Handle this appropriately
223 # Create and start a new monitor thread for vm_4
224 monitor_input4 = m.dict()
225 monitor_output4 = m.dict()
226 monitor_input4["stop_thread"] = False
227 monitor_output4["error_msg"] = ""
228 monitor_thread4 = Process(target=monitor, args=(monitor_input4,
231 threads.append(monitor_thread4)
232 thread_inputs.append(monitor_input4)
233 thread_outputs.append(monitor_output4)
234 logger.info("Starting monitor thread of vm_4")
236 test_utils.wait_before_subtest()
238 for thread_output in thread_outputs:
239 if thread_output["error_msg"] != "":
240 monitor_err_msg += " ,{}".format(thread_output["error_msg"])
241 thread_output["error_msg"] = ""
242 results.record_action("Check ping status of vm_1, vm_3 and vm_4. "
243 "Instance vm_2 is deleted")
244 results.add_to_summary(0, "-")
245 if len(monitor_err_msg) == 0:
246 results.add_success("Ping succeeds")
248 results.add_failure(monitor_err_msg)
250 except Exception as e:
251 logger.error("exception occurred while executing testcase_10: %s", e)
254 # Give a stop signal to all threads
255 logger.info("Sending stop signal to monitor thread")
256 for thread_input in thread_inputs:
257 thread_input["stop_thread"] = True
258 # Wait for all threads to stop and return to the main process
259 for thread in threads:
262 test_utils.cleanup_nova(nova_client, instance_ids)
263 test_utils.cleanup_glance(conn, image_ids)
264 test_utils.cleanup_neutron(neutron_client, floatingip_ids, bgpvpn_ids,
265 interfaces, subnet_ids, router_ids,
268 return results.compile_summary()
271 if __name__ == '__main__':