Fix testcase3 (peering subcase) for Apex
[sdnvpn.git] / sdnvpn / lib / utils.py
1 #!/usr/bin/python
2 #
3 # Copyright (c) 2017 All rights reserved
4 # This program and the accompanying materials
5 # are made available under the terms of the Apache License, Version 2.0
6 # which accompanies this distribution, and is available at
7 #
8 # http://www.apache.org/licenses/LICENSE-2.0
9 #
10 import os
11 import sys
12 import time
13 import requests
14 import re
15 import subprocess
16
17 import functest.utils.functest_logger as ft_logger
18 import functest.utils.openstack_utils as os_utils
19 from opnfv.deployment.factory import Factory as DeploymentFactory
20
21 from sdnvpn.lib import config as sdnvpn_config
22
23 logger = ft_logger.Logger("sndvpn_test_utils").getLogger()
24
25 common_config = sdnvpn_config.CommonConfig()
26
27 ODL_USER = 'admin'
28 ODL_PASS = 'admin'
29
30
31 def create_custom_flavor():
32     return os_utils.get_or_create_flavor(common_config.custom_flavor_name,
33                                          common_config.custom_flavor_ram,
34                                          common_config.custom_flavor_disk,
35                                          common_config.custom_flavor_vcpus)
36
37
38 def create_net(neutron_client, name):
39     logger.debug("Creating network %s", name)
40     net_id = os_utils.create_neutron_net(neutron_client, name)
41     if not net_id:
42         logger.error(
43             "There has been a problem when creating the neutron network")
44         sys.exit(-1)
45     return net_id
46
47
48 def create_subnet(neutron_client, name, cidr, net_id):
49     logger.debug("Creating subnet %s in network %s with cidr %s",
50                  name, net_id, cidr)
51     subnet_id = os_utils.create_neutron_subnet(neutron_client,
52                                                name,
53                                                cidr,
54                                                net_id)
55     if not subnet_id:
56         logger.error(
57             "There has been a problem when creating the neutron subnet")
58         sys.exit(-1)
59     return subnet_id
60
61
62 def create_network(neutron_client, net, subnet1, cidr1,
63                    router, subnet2=None, cidr2=None):
64     """Network assoc won't work for networks/subnets created by this function.
65
66     It is an ODL limitation due to it handling routers as vpns.
67     See https://bugs.opendaylight.org/show_bug.cgi?id=6962"""
68     network_dic = os_utils.create_network_full(neutron_client,
69                                                net,
70                                                subnet1,
71                                                router,
72                                                cidr1)
73     if not network_dic:
74         logger.error(
75             "There has been a problem when creating the neutron network")
76         sys.exit(-1)
77     net_id = network_dic["net_id"]
78     subnet_id = network_dic["subnet_id"]
79     router_id = network_dic["router_id"]
80
81     if subnet2 is not None:
82         logger.debug("Creating and attaching a second subnet...")
83         subnet_id = os_utils.create_neutron_subnet(
84             neutron_client, subnet2, cidr2, net_id)
85         if not subnet_id:
86             logger.error(
87                 "There has been a problem when creating the second subnet")
88             sys.exit(-1)
89         logger.debug("Subnet '%s' created successfully" % subnet_id)
90     return net_id, subnet_id, router_id
91
92
93 def create_instance(nova_client,
94                     name,
95                     image_id,
96                     network_id,
97                     sg_id,
98                     secgroup_name=None,
99                     fixed_ip=None,
100                     compute_node='',
101                     userdata=None,
102                     files=None,
103                     **kwargs
104                     ):
105     if 'flavor' not in kwargs:
106         kwargs['flavor'] = common_config.default_flavor
107
108     logger.info("Creating instance '%s'..." % name)
109     logger.debug(
110         "Configuration:\n name=%s \n flavor=%s \n image=%s \n"
111         " network=%s\n secgroup=%s \n hypervisor=%s \n"
112         " fixed_ip=%s\n files=%s\n userdata=\n%s\n"
113         % (name, kwargs['flavor'], image_id, network_id, sg_id,
114            compute_node, fixed_ip, files, userdata))
115     instance = os_utils.create_instance_and_wait_for_active(
116         kwargs['flavor'],
117         image_id,
118         network_id,
119         name,
120         config_drive=True,
121         userdata=userdata,
122         av_zone=compute_node,
123         fixed_ip=fixed_ip,
124         files=files)
125
126     if instance is None:
127         logger.error("Error while booting instance.")
128         sys.exit(-1)
129     else:
130         logger.debug("Instance '%s' booted successfully. IP='%s'." %
131                      (name, instance.networks.itervalues().next()[0]))
132     # Retrieve IP of INSTANCE
133     # instance_ip = instance.networks.get(network_id)[0]
134
135     if secgroup_name:
136         logger.debug("Adding '%s' to security group '%s'..."
137                      % (name, secgroup_name))
138     else:
139         logger.debug("Adding '%s' to security group '%s'..."
140                      % (name, sg_id))
141     os_utils.add_secgroup_to_instance(nova_client, instance.id, sg_id)
142
143     return instance
144
145
146 def generate_ping_userdata(ips_array):
147     ips = ""
148     for ip in ips_array:
149         ips = ("%s %s" % (ips, ip))
150
151     ips = ips.replace('  ', ' ')
152     return ("#!/bin/sh\n"
153             "set%s\n"
154             "while true; do\n"
155             " for i do\n"
156             "  ip=$i\n"
157             "  ping -c 1 $ip 2>&1 >/dev/null\n"
158             "  RES=$?\n"
159             "  if [ \"Z$RES\" = \"Z0\" ] ; then\n"
160             "   echo ping $ip OK\n"
161             "  else echo ping $ip KO\n"
162             "  fi\n"
163             " done\n"
164             " sleep 1\n"
165             "done\n"
166             % ips)
167
168
169 def generate_userdata_common():
170     return ("#!/bin/sh\n"
171             "sudo mkdir -p /home/cirros/.ssh/\n"
172             "sudo chown cirros:cirros /home/cirros/.ssh/\n"
173             "sudo chown cirros:cirros /home/cirros/id_rsa\n"
174             "mv /home/cirros/id_rsa /home/cirros/.ssh/\n"
175             "sudo echo ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAAAgnWtSS98Am516e"
176             "stBsq0jbyOB4eLMUYDdgzsUHsnxFQCtACwwAg9/2uq3FoGUBUWeHZNsT6jcK9"
177             "sCMEYiS479CUCzbrxcd8XaIlK38HECcDVglgBNwNzX/WDfMejXpKzZG61s98rU"
178             "ElNvZ0YDqhaqZGqxIV4ejalqLjYrQkoly3R+2k= "
179             "cirros@test1>/home/cirros/.ssh/authorized_keys\n"
180             "sudo chown cirros:cirros /home/cirros/.ssh/authorized_keys\n"
181             "chmod 700 /home/cirros/.ssh\n"
182             "chmod 644 /home/cirros/.ssh/authorized_keys\n"
183             "chmod 600 /home/cirros/.ssh/id_rsa\n"
184             )
185
186
187 def generate_userdata_with_ssh(ips_array):
188     u1 = generate_userdata_common()
189
190     ips = ""
191     for ip in ips_array:
192         ips = ("%s %s" % (ips, ip))
193
194     ips = ips.replace('  ', ' ')
195     u2 = ("#!/bin/sh\n"
196           "set%s\n"
197           "while true; do\n"
198           " for i do\n"
199           "  ip=$i\n"
200           "  hostname=$(ssh -y -i /home/cirros/.ssh/id_rsa "
201           "cirros@$ip 'hostname' </dev/zero 2>/dev/null)\n"
202           "  RES=$?\n"
203           "  if [ \"Z$RES\" = \"Z0\" ]; then echo $ip $hostname;\n"
204           "  else echo $ip 'not reachable';fi;\n"
205           " done\n"
206           " sleep 1\n"
207           "done\n"
208           % ips)
209     return (u1 + u2)
210
211
212 def get_installerHandler():
213     installer_type = str(os.environ['INSTALLER_TYPE'].lower())
214     installer_ip = get_installer_ip()
215
216     if installer_type not in ["fuel", "apex"]:
217         raise ValueError("%s is not supported" % installer_type)
218     else:
219         if installer_type in ["apex"]:
220             developHandler = DeploymentFactory.get_handler(
221                 installer_type,
222                 installer_ip,
223                 'root',
224                 pkey_file="/root/.ssh/id_rsa")
225
226         if installer_type in ["fuel"]:
227             developHandler = DeploymentFactory.get_handler(
228                 installer_type,
229                 installer_ip,
230                 'root',
231                 'r00tme')
232         return developHandler
233
234
235 def get_nodes():
236     developHandler = get_installerHandler()
237     return developHandler.get_nodes()
238
239
240 def get_installer_ip():
241     return str(os.environ['INSTALLER_IP'])
242
243
244 def get_instance_ip(instance):
245     instance_ip = instance.networks.itervalues().next()[0]
246     return instance_ip
247
248
249 def wait_for_instance(instance):
250     logger.info("Waiting for instance %s to get a DHCP lease..." % instance.id)
251     # The sleep this function replaced waited for 80s
252     tries = 40
253     sleep_time = 2
254     pattern = "Lease of .* obtained, lease time"
255     expected_regex = re.compile(pattern)
256     console_log = ""
257     while tries > 0 and not expected_regex.search(console_log):
258         console_log = instance.get_console_output()
259         time.sleep(sleep_time)
260         tries -= 1
261
262     if not expected_regex.search(console_log):
263         logger.error("Instance %s seems to have failed leasing an IP."
264                      % instance.id)
265         return False
266     return True
267
268
269 def wait_for_instances_up(*args):
270     check = [wait_for_instance(instance) for instance in args]
271     return all(check)
272
273
274 def wait_for_bgp_net_assoc(neutron_client, bgpvpn_id, net_id):
275     tries = 30
276     sleep_time = 1
277     nets = []
278     logger.debug("Waiting for network %s to associate with BGPVPN %s "
279                  % (bgpvpn_id, net_id))
280
281     while tries > 0 and net_id not in nets:
282         nets = os_utils.get_bgpvpn_networks(neutron_client, bgpvpn_id)
283         time.sleep(sleep_time)
284         tries -= 1
285     if net_id not in nets:
286         logger.error("Association of network %s with BGPVPN %s failed" %
287                      (net_id, bgpvpn_id))
288         return False
289     return True
290
291
292 def wait_for_bgp_net_assocs(neutron_client, bgpvpn_id, *args):
293     check = [wait_for_bgp_net_assoc(neutron_client, bgpvpn_id, id)
294              for id in args]
295     # Return True if all associations succeeded
296     return all(check)
297
298
299 def wait_for_bgp_router_assoc(neutron_client, bgpvpn_id, router_id):
300     tries = 30
301     sleep_time = 1
302     routers = []
303     logger.debug("Waiting for router %s to associate with BGPVPN %s "
304                  % (bgpvpn_id, router_id))
305     while tries > 0 and router_id not in routers:
306         routers = os_utils.get_bgpvpn_routers(neutron_client, bgpvpn_id)
307         time.sleep(sleep_time)
308         tries -= 1
309     if router_id not in routers:
310         logger.error("Association of router %s with BGPVPN %s failed" %
311                      (router_id, bgpvpn_id))
312         return False
313     return True
314
315
316 def wait_for_bgp_router_assocs(neutron_client, bgpvpn_id, *args):
317     check = [wait_for_bgp_router_assoc(neutron_client, bgpvpn_id, id)
318              for id in args]
319     # Return True if all associations succeeded
320     return all(check)
321
322
323 def wait_before_subtest(*args, **kwargs):
324     ''' This is a placeholder.
325         TODO: Replace delay with polling logic. '''
326     time.sleep(30)
327
328
329 def assert_and_get_compute_nodes(nova_client, required_node_number=2):
330     """Get the compute nodes in the deployment
331
332     Exit if the deployment doesn't have enough compute nodes"""
333     compute_nodes = os_utils.get_hypervisors(nova_client)
334
335     num_compute_nodes = len(compute_nodes)
336     if num_compute_nodes < 2:
337         logger.error("There are %s compute nodes in the deployment. "
338                      "Minimum number of nodes to complete the test is 2."
339                      % num_compute_nodes)
340         sys.exit(-1)
341
342     logger.debug("Compute nodes: %s" % compute_nodes)
343     return compute_nodes
344
345
346 def open_icmp_ssh(neutron_client, security_group_id):
347     os_utils.create_secgroup_rule(neutron_client,
348                                   security_group_id,
349                                   'ingress',
350                                   'icmp')
351     os_utils.create_secgroup_rule(neutron_client,
352                                   security_group_id,
353                                   'tcp',
354                                   80, 80)
355
356
357 def open_bgp_port(neutron_client, security_group_id):
358     os_utils.create_secgroup_rule(neutron_client,
359                                   security_group_id,
360                                   'tcp',
361                                   179, 179)
362
363
364 def exec_cmd(cmd, verbose):
365     success = True
366     logger.debug("Executing '%s'" % cmd)
367     p = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE,
368                          stderr=subprocess.STDOUT)
369     output = ""
370     for line in iter(p.stdout.readline, b''):
371         output += line
372
373     if verbose:
374         logger.debug(output)
375
376     p.stdout.close()
377     returncode = p.wait()
378     if returncode != 0:
379         logger.error("Command %s failed to execute." % cmd)
380         success = False
381
382     return output, success
383
384
385 def check_odl_fib(ip, controller_ip):
386     """Check that there is an entry in the ODL Fib for `ip`"""
387     url = "http://" + controller_ip + \
388           ":8181/restconf/config/odl-fib:fibEntries/"
389     logger.debug("Querring '%s' for FIB entries", url)
390     res = requests.get(url, auth=(ODL_USER, ODL_PASS))
391     if res.status_code != 200:
392         logger.error("OpenDaylight response status code: %s", res.status_code)
393         return False
394     logger.debug("Checking whether '%s' is in the OpenDaylight FIB"
395                  % controller_ip)
396     logger.debug("OpenDaylight FIB: \n%s" % res.text)
397     return ip in res.text
398
399
400 def run_odl_cmd(odl_node, cmd):
401     '''Run a command in the OpenDaylight Karaf shell
402
403     This is a bit flimsy because of shell quote escaping, make sure that
404     the cmd passed does not have any top level double quotes or this
405     function will break.
406
407     The /dev/null is used because client works, but outputs something
408     that contains "ERROR" and run_cmd doesn't like that.
409
410     '''
411     karaf_cmd = ('/opt/opendaylight/bin/client -h 127.0.0.1 "%s"'
412                  ' 2>/dev/null' % cmd)
413     return odl_node.run_cmd(karaf_cmd)
414
415
416 def wait_for_cloud_init(instance):
417     success = True
418     # ubuntu images take a long time to start
419     tries = 20
420     sleep_time = 30
421     while tries > 0:
422         instance_log = instance.get_console_output()
423         if "Failed to run module" in instance_log:
424             success = False
425             logger.error("Cloud init failed to run. Reason: %s",
426                          instance_log)
427             break
428         if re.search(r"Cloud-init v. .+ finished at", instance_log):
429             success = True
430             break
431         time.sleep(sleep_time)
432         tries = tries - 1
433
434     if tries == 0:
435         logger.error("Cloud init timed out"
436                      ". Reason: %s",
437                      instance_log)
438         success = False
439
440     return success
441
442
443 def attach_instance_to_ext_br(instance, compute_node):
444     libvirt_instance_name = getattr(instance, "OS-EXT-SRV-ATTR:instance_name")
445     installer_type = str(os.environ['INSTALLER_TYPE'].lower())
446     if installer_type == "fuel":
447         bridge = "br-ex"
448     elif installer_type == "apex":
449         # In Apex, br-ex is an ovs bridge and virsh attach-interface
450         # won't just work. We work around it by creating a linux
451         # bridge, attaching that to br-ex with a veth pair
452         # and virsh-attaching the instance to the linux-bridge
453         bridge = "br-quagga"
454         cmd = """
455         set -xe
456         sudo brctl addbr {bridge} &&
457         sudo ip link set {bridge} up &&
458         sudo ip link add quagga-tap type veth peer name ovs-quagga-tap &&
459         sudo ip link set dev ovs-quagga-tap up &&
460         sudo ip link set dev quagga-tap up &&
461         sudo ovs-vsctl add-port br-ex ovs-quagga-tap &&
462         sudo brctl addif {bridge} quagga-tap
463         """
464         compute_node.run_cmd(cmd.format(bridge=bridge))
465
466     compute_node.run_cmd("sudo virsh attach-interface %s"
467                          " bridge %s" % (libvirt_instance_name, bridge))