3 # Copyright (c) 2017 All rights reserved
4 # This program and the accompanying materials
5 # are made available under the terms of the Apache License, Version 2.0
6 # which accompanies this distribution, and is available at
8 # http://www.apache.org/licenses/LICENSE-2.0
15 from concurrent.futures import ThreadPoolExecutor
17 from opnfv.deployment.factory import Factory as DeploymentFactory
19 from sdnvpn.lib import config as sdnvpn_config
20 import sdnvpn.lib.openstack_utils as os_utils
21 from sdnvpn.lib import logutil
23 logger = logutil.getLogger('sdnvpn_test_utils')
25 common_config = sdnvpn_config.CommonConfig()
30 executor = ThreadPoolExecutor(5)
33 class ExtraRoute(object):
35 Class to represent extra route for a router
38 def __init__(self, destination, nexthop):
39 self.destination = destination
40 self.nexthop = nexthop
43 class AllowedAddressPair(object):
45 Class to represent allowed address pair for a neutron port
48 def __init__(self, ipaddress, macaddress):
49 self.ipaddress = ipaddress
50 self.macaddress = macaddress
53 def create_default_flavor():
54 return os_utils.get_or_create_flavor(common_config.default_flavor,
55 common_config.default_flavor_ram,
56 common_config.default_flavor_disk,
57 common_config.default_flavor_vcpus)
60 def create_custom_flavor():
61 return os_utils.get_or_create_flavor(common_config.custom_flavor_name,
62 common_config.custom_flavor_ram,
63 common_config.custom_flavor_disk,
64 common_config.custom_flavor_vcpus)
67 def create_net(neutron_client, name):
68 logger.debug("Creating network %s", name)
69 net_id = os_utils.create_neutron_net(neutron_client, name)
72 "There has been a problem when creating the neutron network")
73 raise Exception("There has been a problem when creating"
74 " the neutron network {}".format(name))
78 def create_subnet(neutron_client, name, cidr, net_id):
79 logger.debug("Creating subnet %s in network %s with cidr %s",
81 subnet_id = os_utils.create_neutron_subnet(neutron_client,
87 "There has been a problem when creating the neutron subnet")
88 raise Exception("There has been a problem when creating"
89 " the neutron subnet {}".format(name))
93 def create_network(neutron_client, net, subnet1, cidr1,
94 router, subnet2=None, cidr2=None):
95 """Network assoc won't work for networks/subnets created by this function.
96 It is an ODL limitation due to it handling routers as vpns.
97 See https://bugs.opendaylight.org/show_bug.cgi?id=6962"""
98 network_dic = os_utils.create_network_full(neutron_client,
105 "There has been a problem when creating the neutron network")
106 raise Exception("There has been a problem when creating"
107 " the neutron network {}".format(net))
108 net_id = network_dic["net_id"]
109 subnet_id = network_dic["subnet_id"]
110 router_id = network_dic["router_id"]
112 if subnet2 is not None:
113 logger.debug("Creating and attaching a second subnet...")
114 subnet_id = os_utils.create_neutron_subnet(
115 neutron_client, subnet2, cidr2, net_id)
118 "There has been a problem when creating the second subnet")
119 raise Exception("There has been a problem when creating"
120 " the second subnet {}".format(subnet2))
121 logger.debug("Subnet '%s' created successfully" % subnet_id)
122 return net_id, subnet_id, router_id
125 def get_port(neutron_client, instance_id):
126 ports = os_utils.get_port_list(neutron_client)
127 if ports is not None:
129 if port['device_id'] == instance_id:
134 def update_port_allowed_address_pairs(neutron_client, port_id, address_pairs):
135 if len(address_pairs) <= 0:
137 allowed_address_pairs = []
138 for address_pair in address_pairs:
139 address_pair_dict = {'ip_address': address_pair.ipaddress,
140 'mac_address': address_pair.macaddress}
141 allowed_address_pairs.append(address_pair_dict)
142 json_body = {'port': {
143 "allowed_address_pairs": allowed_address_pairs
147 port = neutron_client.update_port(port=port_id,
149 return port['port']['id']
150 except Exception as e:
151 logger.error("Error [update_neutron_port(neutron_client, '%s', '%s')]:"
152 " %s" % (port_id, address_pairs, e))
156 def create_instance(nova_client,
168 if 'flavor' not in kwargs:
169 kwargs['flavor'] = common_config.default_flavor
171 logger.info("Creating instance '%s'..." % name)
173 "Configuration:\n name=%s \n flavor=%s \n image=%s \n"
174 " network=%s\n secgroup=%s \n hypervisor=%s \n"
175 " fixed_ip=%s\n files=%s\n userdata=\n%s\n"
176 % (name, kwargs['flavor'], image_id, network_id, sg_id,
177 compute_node, fixed_ip, files, userdata))
178 instance = os_utils.create_instance_and_wait_for_active(
185 av_zone=compute_node,
190 logger.error("Error while booting instance.")
191 raise Exception("Error while booting instance {}".format(name))
193 logger.debug("Instance '%s' booted successfully. IP='%s'." %
194 (name, instance.networks.itervalues().next()[0]))
195 # Retrieve IP of INSTANCE
196 # instance_ip = instance.networks.get(network_id)[0]
199 logger.debug("Adding '%s' to security group '%s'..."
200 % (name, secgroup_name))
202 logger.debug("Adding '%s' to security group '%s'..."
204 os_utils.add_secgroup_to_instance(nova_client, instance.id, sg_id)
209 def generate_ping_userdata(ips_array, ping_count=10):
212 ips = ("%s %s" % (ips, ip))
214 ips = ips.replace(' ', ' ')
215 return ("#!/bin/sh\n"
220 " ping -c %s $ip 2>&1 >/dev/null\n"
222 " if [ \"Z$RES\" = \"Z0\" ] ; then\n"
223 " echo ping $ip OK\n"
224 " else echo ping $ip KO\n"
232 def generate_userdata_common():
233 return ("#!/bin/sh\n"
234 "sudo mkdir -p /home/cirros/.ssh/\n"
235 "sudo chown cirros:cirros /home/cirros/.ssh/\n"
236 "sudo chown cirros:cirros /home/cirros/id_rsa\n"
237 "mv /home/cirros/id_rsa /home/cirros/.ssh/\n"
238 "sudo echo ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAAAgnWtSS98Am516e"
239 "stBsq0jbyOB4eLMUYDdgzsUHsnxFQCtACwwAg9/2uq3FoGUBUWeHZNsT6jcK9"
240 "sCMEYiS479CUCzbrxcd8XaIlK38HECcDVglgBNwNzX/WDfMejXpKzZG61s98rU"
241 "ElNvZ0YDqhaqZGqxIV4ejalqLjYrQkoly3R+2k= "
242 "cirros@test1>/home/cirros/.ssh/authorized_keys\n"
243 "sudo chown cirros:cirros /home/cirros/.ssh/authorized_keys\n"
244 "chmod 700 /home/cirros/.ssh\n"
245 "chmod 644 /home/cirros/.ssh/authorized_keys\n"
246 "chmod 600 /home/cirros/.ssh/id_rsa\n"
250 def generate_userdata_with_ssh(ips_array):
251 u1 = generate_userdata_common()
255 ips = ("%s %s" % (ips, ip))
257 ips = ips.replace(' ', ' ')
263 " hostname=$(ssh -y -i /home/cirros/.ssh/id_rsa "
264 "cirros@$ip 'hostname' </dev/zero 2>/dev/null)\n"
266 " if [ \"Z$RES\" = \"Z0\" ]; then echo $ip $hostname;\n"
267 " else echo $ip 'not reachable';fi;\n"
275 def generate_userdata_interface_create(interface_name, interface_number,
276 ip_Address, net_mask):
277 return ("#!/bin/sh\n"
279 "sudo useradd -m sdnvpn\n"
280 "sudo adduser sdnvpn sudo\n"
281 "sudo echo sdnvpn:opnfv | chpasswd\n"
283 "sudo ifconfig %s:%s %s netmask %s up\n"
284 % (interface_name, interface_number,
285 ip_Address, net_mask))
288 def get_installerHandler():
289 installer_type = str(os.environ['INSTALLER_TYPE'].lower())
290 installer_ip = get_installer_ip()
292 if installer_type not in ["fuel", "apex"]:
293 logger.warn("installer type %s is neither fuel nor apex."
294 "returning None for installer handler" % installer_type)
297 if installer_type in ["apex"]:
298 developHandler = DeploymentFactory.get_handler(
302 pkey_file="/root/.ssh/id_rsa")
304 if installer_type in ["fuel"]:
305 developHandler = DeploymentFactory.get_handler(
310 return developHandler
314 developHandler = get_installerHandler()
315 return developHandler.get_nodes()
318 def get_installer_ip():
319 return str(os.environ['INSTALLER_IP'])
322 def get_instance_ip(instance):
323 instance_ip = instance.networks.itervalues().next()[0]
327 def wait_for_instance(instance, pattern=".* login:", tries=40):
328 logger.info("Waiting for instance %s to boot up" % instance.id)
330 expected_regex = re.compile(pattern)
332 while tries > 0 and not expected_regex.search(console_log):
333 console_log = instance.get_console_output()
334 time.sleep(sleep_time)
337 if not expected_regex.search(console_log):
338 logger.error("Instance %s does not boot up properly."
344 def wait_for_instances_up(*instances):
345 check = [wait_for_instance(instance) for instance in instances]
349 def wait_for_instances_get_dhcp(*instances):
350 check = [wait_for_instance(instance, "Lease of .* obtained")
351 for instance in instances]
355 def async_Wait_for_instances(instances, tries=40):
356 if len(instances) <= 0:
359 for instance in instances:
360 future = executor.submit(wait_for_instance,
364 futures.append(future)
366 for future in futures:
367 results.append(future.result())
369 logger.error("one or more instances is not yet booted up")
372 def wait_for_bgp_net_assoc(neutron_client, bgpvpn_id, net_id):
376 logger.debug("Waiting for network %s to associate with BGPVPN %s "
377 % (bgpvpn_id, net_id))
379 while tries > 0 and net_id not in nets:
380 nets = get_bgpvpn_networks(neutron_client, bgpvpn_id)
381 time.sleep(sleep_time)
383 if net_id not in nets:
384 logger.error("Association of network %s with BGPVPN %s failed" %
390 def wait_for_bgp_net_assocs(neutron_client, bgpvpn_id, *args):
391 check = [wait_for_bgp_net_assoc(neutron_client, bgpvpn_id, id)
393 # Return True if all associations succeeded
397 def wait_for_bgp_router_assoc(neutron_client, bgpvpn_id, router_id):
401 logger.debug("Waiting for router %s to associate with BGPVPN %s "
402 % (bgpvpn_id, router_id))
403 while tries > 0 and router_id not in routers:
404 routers = get_bgpvpn_routers(neutron_client, bgpvpn_id)
405 time.sleep(sleep_time)
407 if router_id not in routers:
408 logger.error("Association of router %s with BGPVPN %s failed" %
409 (router_id, bgpvpn_id))
414 def wait_for_bgp_router_assocs(neutron_client, bgpvpn_id, *args):
415 check = [wait_for_bgp_router_assoc(neutron_client, bgpvpn_id, id)
417 # Return True if all associations succeeded
421 def wait_before_subtest(*args, **kwargs):
422 ''' This is a placeholder.
423 TODO: Replace delay with polling logic. '''
427 def assert_and_get_compute_nodes(nova_client, required_node_number=2):
428 """Get the compute nodes in the deployment
429 Exit if the deployment doesn't have enough compute nodes"""
430 compute_nodes = os_utils.get_hypervisors(nova_client)
432 num_compute_nodes = len(compute_nodes)
433 if num_compute_nodes < 2:
434 logger.error("There are %s compute nodes in the deployment. "
435 "Minimum number of nodes to complete the test is 2."
437 raise Exception("There are {} compute nodes in the deployment. "
438 "Minimum number of nodes to complete the test"
439 " is 2.".format(num_compute_nodes))
441 logger.debug("Compute nodes: %s" % compute_nodes)
445 def open_icmp(neutron_client, security_group_id):
446 if os_utils.check_security_group_rules(neutron_client,
451 if not os_utils.create_secgroup_rule(neutron_client,
455 logger.error("Failed to create icmp security group rule...")
457 logger.info("This rule exists for security group: %s"
461 def open_http_port(neutron_client, security_group_id):
462 if os_utils.check_security_group_rules(neutron_client,
468 if not os_utils.create_secgroup_rule(neutron_client,
474 logger.error("Failed to create http security group rule...")
476 logger.info("This rule exists for security group: %s"
480 def open_bgp_port(neutron_client, security_group_id):
481 if os_utils.check_security_group_rules(neutron_client,
487 if not os_utils.create_secgroup_rule(neutron_client,
492 logger.error("Failed to create bgp security group rule...")
494 logger.info("This rule exists for security group: %s"
498 def exec_cmd(cmd, verbose):
500 logger.debug("Executing '%s'" % cmd)
501 p = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE,
502 stderr=subprocess.STDOUT)
504 for line in iter(p.stdout.readline, b''):
511 returncode = p.wait()
513 logger.error("Command %s failed to execute." % cmd)
516 return output, success
519 def check_odl_fib(ip, controller_ip):
520 """Check that there is an entry in the ODL Fib for `ip`"""
521 url = "http://" + controller_ip + \
522 ":8181/restconf/config/odl-fib:fibEntries/"
523 logger.debug("Querring '%s' for FIB entries", url)
524 res = requests.get(url, auth=(ODL_USER, ODL_PASS))
525 if res.status_code != 200:
526 logger.error("OpenDaylight response status code: %s", res.status_code)
528 logger.debug("Checking whether '%s' is in the OpenDaylight FIB"
530 logger.debug("OpenDaylight FIB: \n%s" % res.text)
531 return ip in res.text
534 def run_odl_cmd(odl_node, cmd):
535 '''Run a command in the OpenDaylight Karaf shell
536 This is a bit flimsy because of shell quote escaping, make sure that
537 the cmd passed does not have any top level double quotes or this
539 The /dev/null is used because client works, but outputs something
540 that contains "ERROR" and run_cmd doesn't like that.
542 karaf_cmd = ('/opt/opendaylight/bin/client -h 127.0.0.1 "%s"'
543 ' 2>/dev/null' % cmd)
544 return odl_node.run_cmd(karaf_cmd)
547 def wait_for_cloud_init(instance):
549 # ubuntu images take a long time to start
552 logger.info("Waiting for cloud init of instance: {}"
553 "".format(instance.name))
555 instance_log = instance.get_console_output()
556 if "Failed to run module" in instance_log:
558 logger.error("Cloud init failed to run. Reason: %s",
561 if re.search(r"Cloud-init v. .+ finished at", instance_log):
564 time.sleep(sleep_time)
568 logger.error("Cloud init timed out"
572 logger.info("Finished waiting for cloud init of instance {} result was {}"
573 "".format(instance.name, success))
577 def attach_instance_to_ext_br(instance, compute_node):
578 libvirt_instance_name = getattr(instance, "OS-EXT-SRV-ATTR:instance_name")
579 installer_type = str(os.environ['INSTALLER_TYPE'].lower())
580 if installer_type == "fuel":
582 elif installer_type == "apex":
583 # In Apex, br-ex is an ovs bridge and virsh attach-interface
584 # won't just work. We work around it by creating a linux
585 # bridge, attaching that to br-ex with a veth pair
586 # and virsh-attaching the instance to the linux-bridge
590 if ! sudo brctl show |grep -q ^{bridge};then
591 sudo brctl addbr {bridge}
592 sudo ip link set {bridge} up
593 sudo ip link add quagga-tap type veth peer name ovs-quagga-tap
594 sudo ip link set dev ovs-quagga-tap up
595 sudo ip link set dev quagga-tap up
596 sudo ovs-vsctl add-port br-ex ovs-quagga-tap
597 sudo brctl addif {bridge} quagga-tap
600 compute_node.run_cmd(cmd.format(bridge=bridge))
602 compute_node.run_cmd("sudo virsh attach-interface %s"
603 " bridge %s" % (libvirt_instance_name, bridge))
606 def detach_instance_from_ext_br(instance, compute_node):
607 libvirt_instance_name = getattr(instance, "OS-EXT-SRV-ATTR:instance_name")
608 mac = compute_node.run_cmd("for vm in $(sudo virsh list | "
609 "grep running | awk '{print $2}'); "
610 "do echo -n ; sudo virsh dumpxml $vm| "
611 "grep -oP '52:54:[\da-f:]+' ;done")
612 compute_node.run_cmd("sudo virsh detach-interface --domain %s"
613 " --type bridge --mac %s"
614 % (libvirt_instance_name, mac))
616 installer_type = str(os.environ['INSTALLER_TYPE'].lower())
617 if installer_type == "fuel":
619 elif installer_type == "apex":
620 # In Apex, br-ex is an ovs bridge and virsh attach-interface
621 # won't just work. We work around it by creating a linux
622 # bridge, attaching that to br-ex with a veth pair
623 # and virsh-attaching the instance to the linux-bridge
626 sudo brctl delif {bridge} quagga-tap &&
627 sudo ovs-vsctl del-port br-ex ovs-quagga-tap &&
628 sudo ip link set dev quagga-tap down &&
629 sudo ip link set dev ovs-quagga-tap down &&
630 sudo ip link del quagga-tap type veth peer name ovs-quagga-tap &&
631 sudo ip link set {bridge} down &&
632 sudo brctl delbr {bridge}
634 compute_node.run_cmd(cmd.format(bridge=bridge))
637 def cleanup_neutron(neutron_client, floatingip_ids, bgpvpn_ids, interfaces,
638 subnet_ids, router_ids, network_ids):
640 if len(floatingip_ids) != 0:
641 for floatingip_id in floatingip_ids:
642 if not os_utils.delete_floating_ip(neutron_client, floatingip_id):
643 logger.error('Fail to delete all floating ips. '
644 'Floating ip with id {} was not deleted.'.
645 format(floatingip_id))
648 if len(bgpvpn_ids) != 0:
649 for bgpvpn_id in bgpvpn_ids:
650 delete_bgpvpn(neutron_client, bgpvpn_id)
652 if len(interfaces) != 0:
653 for router_id, subnet_id in interfaces:
654 if not os_utils.remove_interface_router(neutron_client,
655 router_id, subnet_id):
656 logger.error('Fail to delete all interface routers. '
657 'Interface router with id {} was not deleted.'.
660 if len(router_ids) != 0:
661 for router_id in router_ids:
662 if not os_utils.remove_gateway_router(neutron_client, router_id):
663 logger.error('Fail to delete all gateway routers. '
664 'Gateway router with id {} was not deleted.'.
667 if len(subnet_ids) != 0:
668 for subnet_id in subnet_ids:
669 if not os_utils.delete_neutron_subnet(neutron_client, subnet_id):
670 logger.error('Fail to delete all subnets. '
671 'Subnet with id {} was not deleted.'.
675 if len(router_ids) != 0:
676 for router_id in router_ids:
677 if not os_utils.delete_neutron_router(neutron_client, router_id):
678 logger.error('Fail to delete all routers. '
679 'Router with id {} was not deleted.'.
683 if len(network_ids) != 0:
684 for network_id in network_ids:
685 if not os_utils.delete_neutron_net(neutron_client, network_id):
686 logger.error('Fail to delete all networks. '
687 'Network with id {} was not deleted.'.
693 def cleanup_nova(nova_client, instance_ids, flavor_ids=None):
694 if flavor_ids is not None and len(flavor_ids) != 0:
695 for flavor_id in flavor_ids:
696 nova_client.flavors.delete(flavor_id)
697 if len(instance_ids) != 0:
698 for instance_id in instance_ids:
699 if not os_utils.delete_instance(nova_client, instance_id):
700 logger.error('Fail to delete all instances. '
701 'Instance with id {} was not deleted.'.
707 def cleanup_glance(glance_client, image_ids):
708 if len(image_ids) != 0:
709 for image_id in image_ids:
710 if not os_utils.delete_glance_image(glance_client, image_id):
711 logger.error('Fail to delete all images. '
712 'Image with id {} was not deleted.'.
718 def create_bgpvpn(neutron_client, **kwargs):
719 # route_distinguishers
721 json_body = {"bgpvpn": kwargs}
722 return neutron_client.create_bgpvpn(json_body)
725 def update_bgpvpn(neutron_client, bgpvpn_id, **kwargs):
726 json_body = {"bgpvpn": kwargs}
727 return neutron_client.update_bgpvpn(bgpvpn_id, json_body)
730 def delete_bgpvpn(neutron_client, bgpvpn_id):
731 return neutron_client.delete_bgpvpn(bgpvpn_id)
734 def get_bgpvpn(neutron_client, bgpvpn_id):
735 return neutron_client.show_bgpvpn(bgpvpn_id)
738 def get_bgpvpn_routers(neutron_client, bgpvpn_id):
739 return get_bgpvpn(neutron_client, bgpvpn_id)['bgpvpn']['routers']
742 def get_bgpvpn_networks(neutron_client, bgpvpn_id):
743 return get_bgpvpn(neutron_client, bgpvpn_id)['bgpvpn']['networks']
746 def create_router_association(neutron_client, bgpvpn_id, router_id):
747 json_body = {"router_association": {"router_id": router_id}}
748 return neutron_client.create_router_association(bgpvpn_id, json_body)
751 def create_network_association(neutron_client, bgpvpn_id, neutron_network_id):
752 json_body = {"network_association": {"network_id": neutron_network_id}}
753 return neutron_client.create_network_association(bgpvpn_id, json_body)
756 def is_fail_mode_secure():
758 Checks the value of the attribute fail_mode,
759 if it is set to secure. This check is performed
760 on all OVS br-int interfaces, for all OpenStack nodes.
763 openstack_nodes = get_nodes()
764 get_ovs_int_cmd = ("sudo ovs-vsctl show | "
767 # Define OVS get fail_mode command
768 get_ovs_fail_mode_cmd = ("sudo ovs-vsctl get-fail-mode br-int")
769 for openstack_node in openstack_nodes:
770 if not openstack_node.is_active():
773 ovs_int_list = (openstack_node.run_cmd(get_ovs_int_cmd).
775 if 'br-int' in ovs_int_list:
776 # Execute get fail_mode command
777 br_int_fail_mode = (openstack_node.
778 run_cmd(get_ovs_fail_mode_cmd).strip())
779 if br_int_fail_mode == 'secure':
781 is_secure[openstack_node.name] = True
784 logger.error('The fail_mode for br-int was not secure '
785 'in {} node'.format(openstack_node.name))
786 is_secure[openstack_node.name] = False
790 def update_nw_subnet_port_quota(neutron_client, tenant_id, nw_quota,
791 subnet_quota, port_quota, router_quota):
792 json_body = {"quota": {
794 "subnet": subnet_quota,
796 "router": router_quota
800 neutron_client.update_quota(tenant_id=tenant_id,
803 except Exception as e:
804 logger.error("Error [update_nw_subnet_port_quota(neutron_client,"
805 " '%s', '%s', '%s', '%s, %s')]: %s" %
806 (tenant_id, nw_quota, subnet_quota,
807 port_quota, router_quota, e))
811 def update_instance_quota_class(nova_client, instances_quota):
813 nova_client.quota_classes.update("default", instances=instances_quota)
815 except Exception as e:
816 logger.error("Error [update_instance_quota_class(nova_client,"
817 " '%s' )]: %s" % (instances_quota, e))
821 def get_neutron_quota(neutron_client, tenant_id):
823 return neutron_client.show_quota(tenant_id=tenant_id)['quota']
824 except Exception as e:
825 logger.error("Error in getting neutron quota for tenant "
826 " '%s' )]: %s" % (tenant_id, e))
830 def get_nova_instances_quota(nova_client):
832 return nova_client.quota_classes.get("default").instances
833 except Exception as e:
834 logger.error("Error in getting nova instances quota: %s" % e)
838 def update_router_extra_route(neutron_client, router_id, extra_routes):
839 if len(extra_routes) <= 0:
842 for extra_route in extra_routes:
843 route_dict = {'destination': extra_route.destination,
844 'nexthop': extra_route.nexthop}
845 routes_list.append(route_dict)
846 json_body = {'router': {
847 "routes": routes_list
851 neutron_client.update_router(router_id, body=json_body)
853 except Exception as e:
854 logger.error("Error in updating router with extra route: %s" % e)
858 def update_router_no_extra_route(neutron_client, router_ids):
859 json_body = {'router': {
863 for router_id in router_ids:
865 neutron_client.update_router(router_id, body=json_body)
867 except Exception as e:
868 logger.error("Error in clearing extra route: %s" % e)
871 def get_ovs_groups(compute_node_list, ovs_br_list, of_protocol="OpenFlow13"):
873 Gets, as input, a list of compute nodes and a list of OVS bridges
874 and returns the command console output, as a list of lines, that
875 contains all the OVS groups from all bridges and nodes in lists.
878 for compute_node in compute_node_list:
879 for ovs_br in ovs_br_list:
880 if ovs_br in compute_node.run_cmd("sudo ovs-vsctl show"):
881 ovs_groups_cmd = ("sudo ovs-ofctl dump-groups {} -O {} | "
882 "grep group".format(ovs_br, of_protocol))
883 cmd_out_lines += (compute_node.run_cmd(ovs_groups_cmd).strip().
888 def get_ovs_flows(compute_node_list, ovs_br_list, of_protocol="OpenFlow13"):
890 Gets, as input, a list of compute nodes and a list of OVS bridges
891 and returns the command console output, as a list of lines, that
892 contains all the OVS flows from all bridges and nodes in lists.
895 for compute_node in compute_node_list:
896 for ovs_br in ovs_br_list:
897 if ovs_br in compute_node.run_cmd("sudo ovs-vsctl show"):
898 ovs_flows_cmd = ("sudo ovs-ofctl dump-flows {} -O {} | "
899 "grep table=".format(ovs_br, of_protocol))
900 cmd_out_lines += (compute_node.run_cmd(ovs_flows_cmd).strip().