Make sdnvpn logging proper
[sdnvpn.git] / sdnvpn / test / functest / testcase_10.py
1 #!/usr/bin/env python
2 #
3 # Copyright (c) 2017 All rights reserved
4 # This program and the accompanying materials
5 # are made available under the terms of the Apache License, Version 2.0
6 # which accompanies this distribution, and is available at
7 #
8 # http://www.apache.org/licenses/LICENSE-2.0
9 #
10
11 import re
12 import sys
13 import time
14
15 from multiprocessing import Process, Manager, Lock
16 from sdnvpn.lib import config as sdnvpn_config
17 from sdnvpn.lib import openstack_utils as os_utils
18 from sdnvpn.lib import utils as test_utils
19 from sdnvpn.lib.results import Results
20 from sdnvpn.lib import logutil
21
22
23 logger = logutil.getLogger('__name__')
24
25 std_out_lock = Lock()
26
27 COMMON_CONFIG = sdnvpn_config.CommonConfig()
28 TESTCASE_CONFIG = sdnvpn_config.TestcaseConfig(
29     'sdnvpn.test.functest.testcase_10')
30
31
32 def monitor(in_data, out_data, vm):
33     # At the beginning of ping we might have some
34     # failures, so we ignore the first 10 pings
35     lines_offset = 20
36     while in_data["stop_thread"] is False:
37         try:
38             time.sleep(1)
39             vm_console_out_lines = vm.get_console_output().split('\n')
40             if lines_offset < len(vm_console_out_lines):
41                 for console_line in vm_console_out_lines[lines_offset:-1]:
42                     is_ping_error = re.match(r'ping.*KO', console_line)
43                     if is_ping_error and out_data["error_msg"] == "":
44                         out_data["error_msg"] += ("Ping failure from "
45                                                   "instance {}".
46                                                   format(vm.name))
47                         # Atomic write to std out
48                         with std_out_lock:
49                             logger.error("Failure during ping from "
50                                          "instance {}: {}".
51                                          format(vm.name, console_line))
52                     elif re.match(r'ping.*OK', console_line):
53                         # Atomic write to std out
54                         with std_out_lock:
55                             logger.info("Ping from instance {}: {}".
56                                         format(vm.name, console_line))
57                 lines_offset = len(vm_console_out_lines)
58         except:
59             # Atomic write to std out
60             with std_out_lock:
61                 logger.error("Failure in monitor_thread of instance {}".
62                              format(vm.name))
63     # Return to main process
64     return
65
66
67 def main():
68     results = Results(COMMON_CONFIG.line_length)
69
70     results.add_to_summary(0, "=")
71     results.add_to_summary(2, "STATUS", "SUBTEST")
72     results.add_to_summary(0, "=")
73
74     nova_client = os_utils.get_nova_client()
75     neutron_client = os_utils.get_neutron_client()
76     glance_client = os_utils.get_glance_client()
77
78     (floatingip_ids, instance_ids, router_ids, network_ids, image_ids,
79      subnet_ids, interfaces, bgpvpn_ids) = ([] for i in range(8))
80     image_id = os_utils.create_glance_image(glance_client,
81                                             TESTCASE_CONFIG.image_name,
82                                             COMMON_CONFIG.image_path,
83                                             disk=COMMON_CONFIG.image_format,
84                                             container="bare",
85                                             public='public')
86     image_ids.append(image_id)
87
88     network_1_id = test_utils.create_net(neutron_client,
89                                          TESTCASE_CONFIG.net_1_name)
90     subnet_1_id = test_utils.create_subnet(neutron_client,
91                                            TESTCASE_CONFIG.subnet_1_name,
92                                            TESTCASE_CONFIG.subnet_1_cidr,
93                                            network_1_id)
94
95     network_ids.append(network_1_id)
96     subnet_ids.append(subnet_1_id)
97
98     sg_id = os_utils.create_security_group_full(neutron_client,
99                                                 TESTCASE_CONFIG.secgroup_name,
100                                                 TESTCASE_CONFIG.secgroup_descr)
101
102     compute_nodes = test_utils.assert_and_get_compute_nodes(nova_client)
103     av_zone_1 = "nova:" + compute_nodes[0]
104     av_zone_2 = "nova:" + compute_nodes[1]
105
106     # boot INSTANCES
107     vm_2 = test_utils.create_instance(
108         nova_client,
109         TESTCASE_CONFIG.instance_2_name,
110         image_id,
111         network_1_id,
112         sg_id,
113         secgroup_name=TESTCASE_CONFIG.secgroup_name,
114         compute_node=av_zone_1)
115     vm2_ip = test_utils.get_instance_ip(vm_2)
116
117     u1 = test_utils.generate_ping_userdata([vm2_ip])
118     vm_1 = test_utils.create_instance(
119         nova_client,
120         TESTCASE_CONFIG.instance_1_name,
121         image_id,
122         network_1_id,
123         sg_id,
124         secgroup_name=TESTCASE_CONFIG.secgroup_name,
125         compute_node=av_zone_1,
126         userdata=u1)
127     vm1_ip = test_utils.get_instance_ip(vm_1)
128
129     u3 = test_utils.generate_ping_userdata([vm1_ip, vm2_ip])
130     vm_3 = test_utils.create_instance(
131         nova_client,
132         TESTCASE_CONFIG.instance_3_name,
133         image_id,
134         network_1_id,
135         sg_id,
136         secgroup_name=TESTCASE_CONFIG.secgroup_name,
137         compute_node=av_zone_2,
138         userdata=u3)
139     vm3_ip = test_utils.get_instance_ip(vm_3)
140     # We do not put vm_2 id in instance_ids table because we will
141     # delete the current instance during the testing process
142     instance_ids.extend([vm_1.id, vm_3.id])
143
144     # Wait for VMs to get ips.
145     instances_up = test_utils.wait_for_instances_up(vm_2)
146     instances_dhcp_up = test_utils.wait_for_instances_get_dhcp(vm_1, vm_3)
147
148     if (not instances_up or not instances_dhcp_up):
149         logger.error("One or more instances are down")
150         # TODO: Handle this appropriately
151     # Create monitor threads to monitor traffic between vm_1, vm_2 and vm_3
152     m = Manager()
153     monitor_input1 = m.dict()
154     monitor_output1 = m.dict()
155     monitor_input1["stop_thread"] = False
156     monitor_output1["error_msg"] = ""
157     monitor_thread1 = Process(target=monitor, args=(monitor_input1,
158                                                     monitor_output1, vm_1,))
159     monitor_input2 = m.dict()
160     monitor_output2 = m.dict()
161     monitor_input2["stop_thread"] = False
162     monitor_output2["error_msg"] = ""
163     monitor_thread2 = Process(target=monitor, args=(monitor_input2,
164                                                     monitor_output2, vm_2,))
165     monitor_input3 = m.dict()
166     monitor_output3 = m.dict()
167     monitor_input3["stop_thread"] = False
168     monitor_output3["error_msg"] = ""
169     monitor_thread3 = Process(target=monitor, args=(monitor_input3,
170                                                     monitor_output3, vm_3,))
171     # Lists of all monitor threads and their inputs and outputs.
172     threads = [monitor_thread1, monitor_thread2, monitor_thread3]
173     thread_inputs = [monitor_input1, monitor_input2, monitor_input3]
174     thread_outputs = [monitor_output1, monitor_output2, monitor_output3]
175     try:
176         logger.info("Starting all monitor threads")
177         # Start all monitor threads
178         for thread in threads:
179             thread.start()
180         logger.info("Wait before subtest")
181         test_utils.wait_before_subtest()
182         monitor_err_msg = ""
183         for thread_output in thread_outputs:
184             if thread_output["error_msg"] != "":
185                 monitor_err_msg += " ,{}".format(thread_output["error_msg"])
186                 thread_output["error_msg"] = ""
187         results.record_action("Check ping status of vm_1, vm_2, and vm_3")
188         results.add_to_summary(0, "-")
189         if len(monitor_err_msg) == 0:
190             results.add_success("Ping succeeds")
191         else:
192             results.add_failure(monitor_err_msg)
193         # Stop monitor thread 2 and delete instance vm_2
194         thread_inputs[1]["stop_thread"] = True
195         if not os_utils.delete_instance(nova_client, vm_2.id):
196             logger.error("Fail to delete vm_2 instance during "
197                          "testing process")
198             raise Exception("Fail to delete instance vm_2.")
199         for thread_input in thread_inputs:
200             thread_input["stop_thread"] = True
201         for thread in threads:
202             thread.join()
203         threads = []
204         thread_inputs = []
205         thread_outputs = []
206         # Create a new vm (vm_4) on compute 1 node
207         u4 = test_utils.generate_ping_userdata([vm1_ip, vm3_ip])
208         vm_4 = test_utils.create_instance(
209             nova_client,
210             TESTCASE_CONFIG.instance_4_name,
211             image_id,
212             network_1_id,
213             sg_id,
214             secgroup_name=TESTCASE_CONFIG.secgroup_name,
215             compute_node=av_zone_1,
216             userdata=u4)
217         instance_ids.append(vm_4.id)
218
219         # Wait for VMs to get ips.
220         instances_dhcp_up = test_utils.wait_for_instances_get_dhcp(vm_4)
221         if not instances_dhcp_up:
222             logger.error("Instance vm_4 failed to start.")
223             # TODO: Handle this appropriately
224         # Create and start a new monitor thread for vm_4
225         monitor_input4 = m.dict()
226         monitor_output4 = m.dict()
227         monitor_input4["stop_thread"] = False
228         monitor_output4["error_msg"] = ""
229         monitor_thread4 = Process(target=monitor, args=(monitor_input4,
230                                                         monitor_output4,
231                                                         vm_4,))
232         threads.append(monitor_thread4)
233         thread_inputs.append(monitor_input4)
234         thread_outputs.append(monitor_output4)
235         logger.info("Starting monitor thread of vm_4")
236         threads[0].start()
237         test_utils.wait_before_subtest()
238         monitor_err_msg = ""
239         for thread_output in thread_outputs:
240             if thread_output["error_msg"] != "":
241                 monitor_err_msg += " ,{}".format(thread_output["error_msg"])
242                 thread_output["error_msg"] = ""
243         results.record_action("Check ping status of vm_1, vm_3 and vm_4. "
244                               "Instance vm_2 is deleted")
245         results.add_to_summary(0, "-")
246         if len(monitor_err_msg) == 0:
247             results.add_success("Ping succeeds")
248         else:
249             results.add_failure(monitor_err_msg)
250
251     except Exception as e:
252         logger.error("exception occurred while executing testcase_10: %s", e)
253         raise
254     finally:
255         # Give a stop signal to all threads
256         logger.info("Sending stop signal to monitor thread")
257         for thread_input in thread_inputs:
258             thread_input["stop_thread"] = True
259         # Wait for all threads to stop and return to the main process
260         for thread in threads:
261             thread.join()
262
263         test_utils.cleanup_nova(nova_client, instance_ids)
264         test_utils.cleanup_glance(glance_client, image_ids)
265         test_utils.cleanup_neutron(neutron_client, floatingip_ids, bgpvpn_ids,
266                                    interfaces, subnet_ids, router_ids,
267                                    network_ids)
268
269     return results.compile_summary()
270
271
272 if __name__ == '__main__':
273     sys.exit(main())