3 # Copyright (c) 2017 All rights reserved
4 # This program and the accompanying materials
5 # are made available under the terms of the Apache License, Version 2.0
6 # which accompanies this distribution, and is available at
8 # http://www.apache.org/licenses/LICENSE-2.0
16 from functest.utils import openstack_utils as os_utils
17 from multiprocessing import Process, Manager, Lock
18 from sdnvpn.lib import config as sdnvpn_config
19 from sdnvpn.lib import utils as test_utils
20 from sdnvpn.lib.results import Results
23 logger = logging.getLogger('__name__')
27 COMMON_CONFIG = sdnvpn_config.CommonConfig()
28 TESTCASE_CONFIG = sdnvpn_config.TestcaseConfig(
29 'sdnvpn.test.functest.testcase_10')
32 def monitor(in_data, out_data, vm):
33 # At the beginning of ping we might have some
34 # failures, so we ignore the first 10 pings
36 while in_data["stop_thread"] is False:
39 vm_console_out_lines = vm.get_console_output().split('\n')
40 if lines_offset < len(vm_console_out_lines):
41 for console_line in vm_console_out_lines[lines_offset:-1]:
42 is_ping_error = re.match(r'ping.*KO', console_line)
43 if is_ping_error and out_data["error_msg"] == "":
44 out_data["error_msg"] += ("Ping failure from "
47 # Atomic write to std out
49 logging.error("Failure during ping from "
51 format(vm.name, console_line))
52 elif re.match(r'ping.*OK', console_line):
53 # Atomic write to std out
55 logging.info("Ping from instance {}: {}".
56 format(vm.name, console_line))
57 lines_offset = len(vm_console_out_lines)
59 # Atomic write to std out
61 logging.error("Failure in monitor_thread of instance {}".
63 # Return to main process
68 results = Results(COMMON_CONFIG.line_length)
70 results.add_to_summary(0, "=")
71 results.add_to_summary(2, "STATUS", "SUBTEST")
72 results.add_to_summary(0, "=")
74 nova_client = os_utils.get_nova_client()
75 neutron_client = os_utils.get_neutron_client()
76 glance_client = os_utils.get_glance_client()
78 (floatingip_ids, instance_ids, router_ids, network_ids, image_ids,
79 subnet_ids, interfaces, bgpvpn_ids) = ([] for i in range(8))
80 image_id = os_utils.create_glance_image(glance_client,
81 TESTCASE_CONFIG.image_name,
82 COMMON_CONFIG.image_path,
83 disk=COMMON_CONFIG.image_format,
86 image_ids.append(image_id)
88 network_1_id = test_utils.create_net(neutron_client,
89 TESTCASE_CONFIG.net_1_name)
90 subnet_1_id = test_utils.create_subnet(neutron_client,
91 TESTCASE_CONFIG.subnet_1_name,
92 TESTCASE_CONFIG.subnet_1_cidr,
95 network_ids.append(network_1_id)
96 subnet_ids.append(subnet_1_id)
98 sg_id = os_utils.create_security_group_full(neutron_client,
99 TESTCASE_CONFIG.secgroup_name,
100 TESTCASE_CONFIG.secgroup_descr)
102 compute_nodes = test_utils.assert_and_get_compute_nodes(nova_client)
103 av_zone_1 = "nova:" + compute_nodes[0]
104 av_zone_2 = "nova:" + compute_nodes[1]
107 vm_2 = test_utils.create_instance(
109 TESTCASE_CONFIG.instance_2_name,
113 secgroup_name=TESTCASE_CONFIG.secgroup_name,
114 compute_node=av_zone_1)
115 vm2_ip = test_utils.get_instance_ip(vm_2)
117 u1 = test_utils.generate_ping_userdata([vm2_ip], 1)
118 vm_1 = test_utils.create_instance(
120 TESTCASE_CONFIG.instance_1_name,
124 secgroup_name=TESTCASE_CONFIG.secgroup_name,
125 compute_node=av_zone_1,
127 vm1_ip = test_utils.get_instance_ip(vm_1)
129 u3 = test_utils.generate_ping_userdata([vm1_ip, vm2_ip], 1)
130 vm_3 = test_utils.create_instance(
132 TESTCASE_CONFIG.instance_3_name,
136 secgroup_name=TESTCASE_CONFIG.secgroup_name,
137 compute_node=av_zone_2,
139 vm3_ip = test_utils.get_instance_ip(vm_3)
140 # We do not put vm_2 id in instance_ids table because we will
141 # delete the current instance during the testing process
142 instance_ids.extend([vm_1.id, vm_3.id])
144 # Wait for VMs to get ips.
145 instances_up = test_utils.wait_for_instances_up(vm_2)
146 instances_dhcp_up = test_utils.wait_for_instances_get_dhcp(vm_1, vm_3)
148 if (not instances_up or not instances_dhcp_up):
149 logger.error("One or more instances are down")
150 # TODO: Handle this appropriately
151 # Create monitor threads to monitor traffic between vm_1, vm_2 and vm_3
153 monitor_input1 = m.dict()
154 monitor_output1 = m.dict()
155 monitor_input1["stop_thread"] = False
156 monitor_output1["error_msg"] = ""
157 monitor_thread1 = Process(target=monitor, args=(monitor_input1,
158 monitor_output1, vm_1,))
159 monitor_input2 = m.dict()
160 monitor_output2 = m.dict()
161 monitor_input2["stop_thread"] = False
162 monitor_output2["error_msg"] = ""
163 monitor_thread2 = Process(target=monitor, args=(monitor_input2,
164 monitor_output2, vm_2,))
165 monitor_input3 = m.dict()
166 monitor_output3 = m.dict()
167 monitor_input3["stop_thread"] = False
168 monitor_output3["error_msg"] = ""
169 monitor_thread3 = Process(target=monitor, args=(monitor_input3,
170 monitor_output3, vm_3,))
171 # Lists of all monitor threads and their inputs and outputs.
172 threads = [monitor_thread1, monitor_thread2, monitor_thread3]
173 thread_inputs = [monitor_input1, monitor_input2, monitor_input3]
174 thread_outputs = [monitor_output1, monitor_output2, monitor_output3]
176 logging.info("Starting all monitor threads")
177 # Start all monitor threads
178 for thread in threads:
180 logging.info("Wait before subtest")
181 test_utils.wait_before_subtest()
183 for thread_output in thread_outputs:
184 if thread_output["error_msg"] != "":
185 monitor_err_msg += " ,{}".format(thread_output["error_msg"])
186 thread_output["error_msg"] = ""
187 results.record_action("Check ping status of vm_1, vm_2, and vm_3")
188 results.add_to_summary(0, "-")
189 if len(monitor_err_msg) == 0:
190 results.add_success("Ping succeeds")
192 results.add_failure(monitor_err_msg)
193 # Stop monitor thread 2 and delete instance vm_2
194 thread_inputs[1]["stop_thread"] = True
195 if not os_utils.delete_instance(nova_client, vm_2.id):
196 logging.error("Fail to delete vm_2 instance during "
198 raise Exception("Fail to delete instance vm_2.")
199 # Create a new vm (vm_4) on compute 1 node
200 u4 = test_utils.generate_ping_userdata([vm1_ip, vm3_ip], 1)
201 vm_4 = test_utils.create_instance(
203 TESTCASE_CONFIG.instance_4_name,
207 secgroup_name=TESTCASE_CONFIG.secgroup_name,
208 compute_node=av_zone_1,
210 instance_ids.append(vm_4.id)
212 # Wait for VMs to get ips.
213 instances_dhcp_up = test_utils.wait_for_instances_get_dhcp(vm_4)
214 if not instances_dhcp_up:
215 logger.error("Instance vm_4 failed to start.")
216 # TODO: Handle this appropriately
217 # Create and start a new monitor thread for vm_4
218 monitor_input4 = m.dict()
219 monitor_output4 = m.dict()
220 monitor_input4["stop_thread"] = False
221 monitor_output4["error_msg"] = ""
222 monitor_thread4 = Process(target=monitor, args=(monitor_input4,
225 threads.append(monitor_thread4)
226 thread_inputs.append(monitor_input4)
227 thread_outputs.append(monitor_output4)
228 logging.info("Starting monitor thread of vm_4")
230 test_utils.wait_before_subtest()
232 for thread_output in thread_outputs:
233 if thread_output["error_msg"] != "":
234 monitor_err_msg += " ,{}".format(thread_output["error_msg"])
235 thread_output["error_msg"] = ""
236 results.record_action("Check ping status of vm_1, vm_3 and vm_4. "
237 "Instance vm_2 is deleted")
238 results.add_to_summary(0, "-")
239 if len(monitor_err_msg) == 0:
240 results.add_success("Ping succeeds")
242 results.add_failure(monitor_err_msg)
244 except Exception as e:
245 logger.error("exception occurred while executing testcase_10: %s", e)
248 # Give a stop signal to all threads
249 logging.info("Sending stop signal to monitor thread")
250 for thread_input in thread_inputs:
251 thread_input["stop_thread"] = True
252 # Wait for all threads to stop and return to the main process
253 for thread in threads:
256 test_utils.cleanup_nova(nova_client, instance_ids)
257 test_utils.cleanup_glance(glance_client, image_ids)
258 test_utils.cleanup_neutron(neutron_client, floatingip_ids, bgpvpn_ids,
259 interfaces, subnet_ids, router_ids,
262 return results.compile_summary()
265 if __name__ == '__main__':
266 logging.basicConfig(level=logging.INFO)