Merge "Make tight the way an instance is considered as UP"
[sdnvpn.git] / sdnvpn / test / functest / testcase_10.py
1 #!/usr/bin/python
2 #
3 # Copyright (c) 2017 All rights reserved
4 # This program and the accompanying materials
5 # are made available under the terms of the Apache License, Version 2.0
6 # which accompanies this distribution, and is available at
7 #
8 # http://www.apache.org/licenses/LICENSE-2.0
9 #
10
11 import argparse
12 import logging
13 import re
14 import sys
15 import time
16 import traceback
17
18 from functest.utils import openstack_utils as os_utils
19 from multiprocessing import Process, Manager, Lock
20 from sdnvpn.lib import config as sdnvpn_config
21 from sdnvpn.lib import utils as test_utils
22 from sdnvpn.lib.results import Results
23
24 parser = argparse.ArgumentParser()
25
26 parser.add_argument("-r", "--report",
27                     help="Create json result file",
28                     action="store_true")
29
30 args = parser.parse_args()
31
32 logger = logging.getLogger('sdnvpn-testcase-10')
33
34 std_out_lock = Lock()
35
36 COMMON_CONFIG = sdnvpn_config.CommonConfig()
37 TESTCASE_CONFIG = sdnvpn_config.TestcaseConfig('testcase_10')
38
39
40 def monitor(in_data, out_data, vm):
41     # At the beginning of ping we might have some
42     # failures, so we ignore the first 10 pings
43     lines_offset = 10
44     while in_data["stop_thread"] is False:
45         try:
46             time.sleep(1)
47             vm_console_out_lines = vm.get_console_output().split('\n')
48             if lines_offset < len(vm_console_out_lines):
49                 for console_line in vm_console_out_lines[lines_offset:-1]:
50                     is_ping_error = re.match(r'ping.*KO', console_line)
51                     if is_ping_error and out_data["error_msg"] == "":
52                         out_data["error_msg"] += ("Ping failure from "
53                                                   "instance {}".
54                                                   format(vm.name))
55                         # Atomic write to std out
56                         with std_out_lock:
57                             logging.error("Failure during ping from "
58                                           "instance {}: {}".
59                                           format(vm.name, console_line))
60                     elif re.match(r'ping.*OK', console_line):
61                         # Atomic write to std out
62                         with std_out_lock:
63                             logging.info("Ping from instance {}: {}".
64                                          format(vm.name, console_line))
65                 lines_offset = len(vm_console_out_lines)
66         except:
67             # Atomic write to std out
68             with std_out_lock:
69                 logging.error("Failure in monitor_thread of instance {}".
70                               format(vm.name))
71     # Return to main process
72     return
73
74
75 def main():
76     results = Results(COMMON_CONFIG.line_length)
77
78     results.add_to_summary(0, "=")
79     results.add_to_summary(2, "STATUS", "SUBTEST")
80     results.add_to_summary(0, "=")
81
82     nova_client = os_utils.get_nova_client()
83     neutron_client = os_utils.get_neutron_client()
84     glance_client = os_utils.get_glance_client()
85
86     (floatingip_ids, instance_ids, router_ids, network_ids, image_ids,
87      subnet_ids, interfaces, bgpvpn_ids) = ([] for i in range(8))
88     image_id = os_utils.create_glance_image(glance_client,
89                                             TESTCASE_CONFIG.image_name,
90                                             COMMON_CONFIG.image_path,
91                                             disk=COMMON_CONFIG.image_format,
92                                             container="bare",
93                                             public='public')
94     image_ids.append(image_id)
95
96     network_1_id = test_utils.create_net(neutron_client,
97                                          TESTCASE_CONFIG.net_1_name)
98     subnet_1_id = test_utils.create_subnet(neutron_client,
99                                            TESTCASE_CONFIG.subnet_1_name,
100                                            TESTCASE_CONFIG.subnet_1_cidr,
101                                            network_1_id)
102
103     network_ids.append(network_1_id)
104     subnet_ids.append(subnet_1_id)
105
106     sg_id = os_utils.create_security_group_full(neutron_client,
107                                                 TESTCASE_CONFIG.secgroup_name,
108                                                 TESTCASE_CONFIG.secgroup_descr)
109
110     compute_nodes = test_utils.assert_and_get_compute_nodes(nova_client)
111     av_zone_1 = "nova:" + compute_nodes[0]
112     av_zone_2 = "nova:" + compute_nodes[1]
113
114     # boot INSTANCES
115     vm_2 = test_utils.create_instance(
116         nova_client,
117         TESTCASE_CONFIG.instance_2_name,
118         image_id,
119         network_1_id,
120         sg_id,
121         secgroup_name=TESTCASE_CONFIG.secgroup_name,
122         compute_node=av_zone_1)
123     vm2_ip = test_utils.get_instance_ip(vm_2)
124
125     u1 = test_utils.generate_ping_userdata([vm2_ip], 1)
126     vm_1 = test_utils.create_instance(
127         nova_client,
128         TESTCASE_CONFIG.instance_1_name,
129         image_id,
130         network_1_id,
131         sg_id,
132         secgroup_name=TESTCASE_CONFIG.secgroup_name,
133         compute_node=av_zone_1,
134         userdata=u1)
135     vm1_ip = test_utils.get_instance_ip(vm_1)
136
137     u3 = test_utils.generate_ping_userdata([vm1_ip, vm2_ip], 1)
138     vm_3 = test_utils.create_instance(
139         nova_client,
140         TESTCASE_CONFIG.instance_3_name,
141         image_id,
142         network_1_id,
143         sg_id,
144         secgroup_name=TESTCASE_CONFIG.secgroup_name,
145         compute_node=av_zone_2,
146         userdata=u3)
147     vm3_ip = test_utils.get_instance_ip(vm_3)
148     # We do not put vm_2 id in instance_ids table because we will
149     # delete the current instance during the testing process
150     instance_ids.extend([vm_1.id, vm_3.id])
151
152     # Wait for VMs to get ips.
153     instances_up = test_utils.wait_for_instances_up(vm_1, vm_2,
154                                                     vm_3)
155
156     if not instances_up:
157         logger.error("One or more instances is down")
158         # TODO: Handle this appropriately
159     # Create monitor threads to monitor traffic between vm_1, vm_2 and vm_3
160     m = Manager()
161     monitor_input1 = m.dict()
162     monitor_output1 = m.dict()
163     monitor_input1["stop_thread"] = False
164     monitor_output1["error_msg"] = ""
165     monitor_thread1 = Process(target=monitor, args=(monitor_input1,
166                                                     monitor_output1, vm_1,))
167     monitor_input2 = m.dict()
168     monitor_output2 = m.dict()
169     monitor_input2["stop_thread"] = False
170     monitor_output2["error_msg"] = ""
171     monitor_thread2 = Process(target=monitor, args=(monitor_input2,
172                                                     monitor_output2, vm_2,))
173     monitor_input3 = m.dict()
174     monitor_output3 = m.dict()
175     monitor_input3["stop_thread"] = False
176     monitor_output3["error_msg"] = ""
177     monitor_thread3 = Process(target=monitor, args=(monitor_input3,
178                                                     monitor_output3, vm_3,))
179     # Lists of all monitor threads and their inputs and outputs.
180     threads = [monitor_thread1, monitor_thread2, monitor_thread3]
181     thread_inputs = [monitor_input1, monitor_input2, monitor_input3]
182     thread_outputs = [monitor_output1, monitor_output2, monitor_output3]
183     try:
184         logging.info("Starting all monitor threads")
185         # Start all monitor threads
186         for thread in threads:
187             thread.start()
188         logging.info("Wait before subtest")
189         test_utils.wait_before_subtest()
190         monitor_err_msg = ""
191         for thread_output in thread_outputs:
192             if thread_output["error_msg"] != "":
193                 monitor_err_msg += " ,{}".format(thread_output["error_msg"])
194                 thread_output["error_msg"] = ""
195         results.record_action("Check ping status of vm_1, vm_2, and vm_3")
196         results.add_to_summary(0, "-")
197         if len(monitor_err_msg) == 0:
198             results.add_success("Ping succeeds")
199         else:
200             results.add_failure(monitor_err_msg)
201         # Stop monitor thread 2 and delete instance vm_2
202         thread_inputs[1]["stop_thread"] = True
203         if not os_utils.delete_instance(nova_client, vm_2.id):
204             logging.error("Fail to delete vm_2 instance during "
205                           "testing process")
206             raise Exception("Fail to delete instance vm_2.")
207         # Create a new vm (vm_4) on compute 1 node
208         u4 = test_utils.generate_ping_userdata([vm1_ip, vm3_ip], 1)
209         vm_4 = test_utils.create_instance(
210             nova_client,
211             TESTCASE_CONFIG.instance_4_name,
212             image_id,
213             network_1_id,
214             sg_id,
215             secgroup_name=TESTCASE_CONFIG.secgroup_name,
216             compute_node=av_zone_1,
217             userdata=u4)
218         instance_ids.append(vm_4.id)
219         # Wait for VMs to get ips.
220         instances_up = test_utils.wait_for_instances_up(vm_4)
221         if not instances_up:
222             logger.error("Instance vm_4 failed to start.")
223             # TODO: Handle this appropriately
224         # Create and start a new monitor thread for vm_4
225         monitor_input4 = m.dict()
226         monitor_output4 = m.dict()
227         monitor_input4["stop_thread"] = False
228         monitor_output4["error_msg"] = ""
229         monitor_thread4 = Process(target=monitor, args=(monitor_input4,
230                                                         monitor_output4,
231                                                         vm_4,))
232         threads.append(monitor_thread4)
233         thread_inputs.append(monitor_input4)
234         thread_outputs.append(monitor_output4)
235         logging.info("Starting monitor thread of vm_4")
236         threads[3].start()
237         test_utils.wait_before_subtest()
238         monitor_err_msg = ""
239         for thread_output in thread_outputs:
240             if thread_output["error_msg"] != "":
241                 monitor_err_msg += " ,{}".format(thread_output["error_msg"])
242                 thread_output["error_msg"] = ""
243         results.record_action("Check ping status of vm_1, vm_3 and vm_4. "
244                               "Instance vm_2 is deleted")
245         results.add_to_summary(0, "-")
246         if len(monitor_err_msg) == 0:
247             results.add_success("Ping succeeds")
248         else:
249             results.add_failure(monitor_err_msg)
250
251     except:
252         logging.exception("======== EXCEPTION =========")
253         exc_type, exc_value, exc_tb = sys.exc_info()
254         traceback.print_exception(exc_type, exc_value, exc_tb)
255     finally:
256         # Give a stop signal to all threads
257         logging.info("Sending stop signal to monitor thread")
258         for thread_input in thread_inputs:
259             thread_input["stop_thread"] = True
260         # Wait for all threads to stop and return to the main process
261         for thread in threads:
262             thread.join()
263
264     test_utils.cleanup_nova(nova_client, instance_ids, image_ids)
265     test_utils.cleanup_neutron(neutron_client, floatingip_ids, bgpvpn_ids,
266                                interfaces, subnet_ids, router_ids,
267                                network_ids)
268
269     return results.compile_summary()
270
271
272 if __name__ == '__main__':
273     logging.basicConfig(level=logging.INFO)
274     sys.exit(main())