Merge "Replace glance client calls with openstack sdk"
[sdnvpn.git] / sdnvpn / test / functest / testcase_10.py
1 #!/usr/bin/env python
2 #
3 # Copyright (c) 2017 All rights reserved
4 # This program and the accompanying materials
5 # are made available under the terms of the Apache License, Version 2.0
6 # which accompanies this distribution, and is available at
7 #
8 # http://www.apache.org/licenses/LICENSE-2.0
9 #
10
11 import logging
12 import re
13 import sys
14 import time
15
16 from multiprocessing import Process, Manager, Lock
17 from sdnvpn.lib import config as sdnvpn_config
18 from sdnvpn.lib import openstack_utils as os_utils
19 from sdnvpn.lib import utils as test_utils
20 from sdnvpn.lib.results import Results
21
22 logger = logging.getLogger('__name__')
23
24 std_out_lock = Lock()
25
26 COMMON_CONFIG = sdnvpn_config.CommonConfig()
27 TESTCASE_CONFIG = sdnvpn_config.TestcaseConfig(
28     'sdnvpn.test.functest.testcase_10')
29
30
31 def monitor(in_data, out_data, vm):
32     # At the beginning of ping we might have some
33     # failures, so we ignore the first 10 pings
34     lines_offset = 20
35     while in_data["stop_thread"] is False:
36         try:
37             time.sleep(1)
38             vm_console_out_lines = vm.get_console_output().split('\n')
39             if lines_offset < len(vm_console_out_lines):
40                 for console_line in vm_console_out_lines[lines_offset:-1]:
41                     is_ping_error = re.match(r'ping.*KO', console_line)
42                     if is_ping_error and out_data["error_msg"] == "":
43                         out_data["error_msg"] += ("Ping failure from "
44                                                   "instance {}".
45                                                   format(vm.name))
46                         # Atomic write to std out
47                         with std_out_lock:
48                             logger.error("Failure during ping from "
49                                          "instance {}: {}".
50                                          format(vm.name, console_line))
51                     elif re.match(r'ping.*OK', console_line):
52                         # Atomic write to std out
53                         with std_out_lock:
54                             logger.info("Ping from instance {}: {}".
55                                         format(vm.name, console_line))
56                 lines_offset = len(vm_console_out_lines)
57         except:
58             # Atomic write to std out
59             with std_out_lock:
60                 logger.error("Failure in monitor_thread of instance {}".
61                              format(vm.name))
62     # Return to main process
63     return
64
65
66 def main():
67     results = Results(COMMON_CONFIG.line_length)
68
69     results.add_to_summary(0, "=")
70     results.add_to_summary(2, "STATUS", "SUBTEST")
71     results.add_to_summary(0, "=")
72
73     nova_client = os_utils.get_nova_client()
74     neutron_client = os_utils.get_neutron_client()
75     conn = os_utils.get_os_connection()
76
77     (floatingip_ids, instance_ids, router_ids, network_ids, image_ids,
78      subnet_ids, interfaces, bgpvpn_ids) = ([] for i in range(8))
79     image_id = os_utils.create_glance_image(conn,
80                                             TESTCASE_CONFIG.image_name,
81                                             COMMON_CONFIG.image_path,
82                                             disk=COMMON_CONFIG.image_format,
83                                             container="bare",
84                                             public='public')
85     image_ids.append(image_id)
86
87     network_1_id = test_utils.create_net(neutron_client,
88                                          TESTCASE_CONFIG.net_1_name)
89     subnet_1_id = test_utils.create_subnet(neutron_client,
90                                            TESTCASE_CONFIG.subnet_1_name,
91                                            TESTCASE_CONFIG.subnet_1_cidr,
92                                            network_1_id)
93
94     network_ids.append(network_1_id)
95     subnet_ids.append(subnet_1_id)
96
97     sg_id = os_utils.create_security_group_full(neutron_client,
98                                                 TESTCASE_CONFIG.secgroup_name,
99                                                 TESTCASE_CONFIG.secgroup_descr)
100
101     compute_nodes = test_utils.assert_and_get_compute_nodes(nova_client)
102     av_zone_1 = "nova:" + compute_nodes[0]
103     av_zone_2 = "nova:" + compute_nodes[1]
104
105     # boot INSTANCES
106     vm_2 = test_utils.create_instance(
107         nova_client,
108         TESTCASE_CONFIG.instance_2_name,
109         image_id,
110         network_1_id,
111         sg_id,
112         secgroup_name=TESTCASE_CONFIG.secgroup_name,
113         compute_node=av_zone_1)
114     vm2_ip = test_utils.get_instance_ip(vm_2)
115
116     u1 = test_utils.generate_ping_userdata([vm2_ip])
117     vm_1 = test_utils.create_instance(
118         nova_client,
119         TESTCASE_CONFIG.instance_1_name,
120         image_id,
121         network_1_id,
122         sg_id,
123         secgroup_name=TESTCASE_CONFIG.secgroup_name,
124         compute_node=av_zone_1,
125         userdata=u1)
126     vm1_ip = test_utils.get_instance_ip(vm_1)
127
128     u3 = test_utils.generate_ping_userdata([vm1_ip, vm2_ip])
129     vm_3 = test_utils.create_instance(
130         nova_client,
131         TESTCASE_CONFIG.instance_3_name,
132         image_id,
133         network_1_id,
134         sg_id,
135         secgroup_name=TESTCASE_CONFIG.secgroup_name,
136         compute_node=av_zone_2,
137         userdata=u3)
138     vm3_ip = test_utils.get_instance_ip(vm_3)
139     # We do not put vm_2 id in instance_ids table because we will
140     # delete the current instance during the testing process
141     instance_ids.extend([vm_1.id, vm_3.id])
142
143     # Wait for VMs to get ips.
144     instances_up = test_utils.wait_for_instances_up(vm_2)
145     instances_dhcp_up = test_utils.wait_for_instances_get_dhcp(vm_1, vm_3)
146
147     if (not instances_up or not instances_dhcp_up):
148         logger.error("One or more instances are down")
149         # TODO: Handle this appropriately
150     # Create monitor threads to monitor traffic between vm_1, vm_2 and vm_3
151     m = Manager()
152     monitor_input1 = m.dict()
153     monitor_output1 = m.dict()
154     monitor_input1["stop_thread"] = False
155     monitor_output1["error_msg"] = ""
156     monitor_thread1 = Process(target=monitor, args=(monitor_input1,
157                                                     monitor_output1, vm_1,))
158     monitor_input2 = m.dict()
159     monitor_output2 = m.dict()
160     monitor_input2["stop_thread"] = False
161     monitor_output2["error_msg"] = ""
162     monitor_thread2 = Process(target=monitor, args=(monitor_input2,
163                                                     monitor_output2, vm_2,))
164     monitor_input3 = m.dict()
165     monitor_output3 = m.dict()
166     monitor_input3["stop_thread"] = False
167     monitor_output3["error_msg"] = ""
168     monitor_thread3 = Process(target=monitor, args=(monitor_input3,
169                                                     monitor_output3, vm_3,))
170     # Lists of all monitor threads and their inputs and outputs.
171     threads = [monitor_thread1, monitor_thread2, monitor_thread3]
172     thread_inputs = [monitor_input1, monitor_input2, monitor_input3]
173     thread_outputs = [monitor_output1, monitor_output2, monitor_output3]
174     try:
175         logger.info("Starting all monitor threads")
176         # Start all monitor threads
177         for thread in threads:
178             thread.start()
179         logger.info("Wait before subtest")
180         test_utils.wait_before_subtest()
181         monitor_err_msg = ""
182         for thread_output in thread_outputs:
183             if thread_output["error_msg"] != "":
184                 monitor_err_msg += " ,{}".format(thread_output["error_msg"])
185                 thread_output["error_msg"] = ""
186         results.record_action("Check ping status of vm_1, vm_2, and vm_3")
187         results.add_to_summary(0, "-")
188         if len(monitor_err_msg) == 0:
189             results.add_success("Ping succeeds")
190         else:
191             results.add_failure(monitor_err_msg)
192         # Stop monitor thread 2 and delete instance vm_2
193         thread_inputs[1]["stop_thread"] = True
194         if not os_utils.delete_instance(nova_client, vm_2.id):
195             logger.error("Fail to delete vm_2 instance during "
196                          "testing process")
197             raise Exception("Fail to delete instance vm_2.")
198         for thread_input in thread_inputs:
199             thread_input["stop_thread"] = True
200         for thread in threads:
201             thread.join()
202         threads = []
203         thread_inputs = []
204         thread_outputs = []
205         # Create a new vm (vm_4) on compute 1 node
206         u4 = test_utils.generate_ping_userdata([vm1_ip, vm3_ip])
207         vm_4 = test_utils.create_instance(
208             nova_client,
209             TESTCASE_CONFIG.instance_4_name,
210             image_id,
211             network_1_id,
212             sg_id,
213             secgroup_name=TESTCASE_CONFIG.secgroup_name,
214             compute_node=av_zone_1,
215             userdata=u4)
216         instance_ids.append(vm_4.id)
217
218         # Wait for VMs to get ips.
219         instances_dhcp_up = test_utils.wait_for_instances_get_dhcp(vm_4)
220         if not instances_dhcp_up:
221             logger.error("Instance vm_4 failed to start.")
222             # TODO: Handle this appropriately
223         # Create and start a new monitor thread for vm_4
224         monitor_input4 = m.dict()
225         monitor_output4 = m.dict()
226         monitor_input4["stop_thread"] = False
227         monitor_output4["error_msg"] = ""
228         monitor_thread4 = Process(target=monitor, args=(monitor_input4,
229                                                         monitor_output4,
230                                                         vm_4,))
231         threads.append(monitor_thread4)
232         thread_inputs.append(monitor_input4)
233         thread_outputs.append(monitor_output4)
234         logger.info("Starting monitor thread of vm_4")
235         threads[0].start()
236         test_utils.wait_before_subtest()
237         monitor_err_msg = ""
238         for thread_output in thread_outputs:
239             if thread_output["error_msg"] != "":
240                 monitor_err_msg += " ,{}".format(thread_output["error_msg"])
241                 thread_output["error_msg"] = ""
242         results.record_action("Check ping status of vm_1, vm_3 and vm_4. "
243                               "Instance vm_2 is deleted")
244         results.add_to_summary(0, "-")
245         if len(monitor_err_msg) == 0:
246             results.add_success("Ping succeeds")
247         else:
248             results.add_failure(monitor_err_msg)
249
250     except Exception as e:
251         logger.error("exception occurred while executing testcase_10: %s", e)
252         raise
253     finally:
254         # Give a stop signal to all threads
255         logger.info("Sending stop signal to monitor thread")
256         for thread_input in thread_inputs:
257             thread_input["stop_thread"] = True
258         # Wait for all threads to stop and return to the main process
259         for thread in threads:
260             thread.join()
261
262         test_utils.cleanup_nova(nova_client, instance_ids)
263         test_utils.cleanup_glance(conn, image_ids)
264         test_utils.cleanup_neutron(neutron_client, floatingip_ids, bgpvpn_ids,
265                                    interfaces, subnet_ids, router_ids,
266                                    network_ids)
267
268     return results.compile_summary()
269
270
271 if __name__ == '__main__':
272     sys.exit(main())