3 # Copyright (c) 2017 All rights reserved
4 # This program and the accompanying materials
5 # are made available under the terms of the Apache License, Version 2.0
6 # which accompanies this distribution, and is available at
8 # http://www.apache.org/licenses/LICENSE-2.0
17 from concurrent.futures import ThreadPoolExecutor
19 from opnfv.deployment.factory import Factory as DeploymentFactory
21 from sdnvpn.lib import config as sdnvpn_config
22 import sdnvpn.lib.openstack_utils as os_utils
24 logger = logging.getLogger('sdnvpn_test_utils')
26 common_config = sdnvpn_config.CommonConfig()
31 executor = ThreadPoolExecutor(5)
34 class ExtraRoute(object):
36 Class to represent extra route for a router
38 def __init__(self, destination, nexthop):
39 self.destination = destination
40 self.nexthop = nexthop
43 class AllowedAddressPair(object):
45 Class to represent allowed address pair for a neutron port
47 def __init__(self, ipaddress, macaddress):
48 self.ipaddress = ipaddress
49 self.macaddress = macaddress
52 def create_default_flavor():
53 return os_utils.get_or_create_flavor(common_config.default_flavor,
54 common_config.default_flavor_ram,
55 common_config.default_flavor_disk,
56 common_config.default_flavor_vcpus)
59 def create_custom_flavor():
60 return os_utils.get_or_create_flavor(common_config.custom_flavor_name,
61 common_config.custom_flavor_ram,
62 common_config.custom_flavor_disk,
63 common_config.custom_flavor_vcpus)
66 def create_net(neutron_client, name):
67 logger.debug("Creating network %s", name)
68 net_id = os_utils.create_neutron_net(neutron_client, name)
71 "There has been a problem when creating the neutron network")
76 def create_subnet(neutron_client, name, cidr, net_id):
77 logger.debug("Creating subnet %s in network %s with cidr %s",
79 subnet_id = os_utils.create_neutron_subnet(neutron_client,
85 "There has been a problem when creating the neutron subnet")
90 def create_network(neutron_client, net, subnet1, cidr1,
91 router, subnet2=None, cidr2=None):
92 """Network assoc won't work for networks/subnets created by this function.
93 It is an ODL limitation due to it handling routers as vpns.
94 See https://bugs.opendaylight.org/show_bug.cgi?id=6962"""
95 network_dic = os_utils.create_network_full(neutron_client,
102 "There has been a problem when creating the neutron network")
104 net_id = network_dic["net_id"]
105 subnet_id = network_dic["subnet_id"]
106 router_id = network_dic["router_id"]
108 if subnet2 is not None:
109 logger.debug("Creating and attaching a second subnet...")
110 subnet_id = os_utils.create_neutron_subnet(
111 neutron_client, subnet2, cidr2, net_id)
114 "There has been a problem when creating the second subnet")
116 logger.debug("Subnet '%s' created successfully" % subnet_id)
117 return net_id, subnet_id, router_id
120 def get_port(neutron_client, instance_id):
121 ports = os_utils.get_port_list(neutron_client)
122 if ports is not None:
124 if port['device_id'] == instance_id:
129 def update_port_allowed_address_pairs(neutron_client, port_id, address_pairs):
130 if len(address_pairs) <= 0:
132 allowed_address_pairs = []
133 for address_pair in address_pairs:
134 address_pair_dict = {'ip_address': address_pair.ipaddress,
135 'mac_address': address_pair.macaddress}
136 allowed_address_pairs.append(address_pair_dict)
137 json_body = {'port': {
138 "allowed_address_pairs": allowed_address_pairs
142 port = neutron_client.update_port(port=port_id,
144 return port['port']['id']
145 except Exception as e:
146 logger.error("Error [update_neutron_port(neutron_client, '%s', '%s')]:"
147 " %s" % (port_id, address_pairs, e))
151 def create_instance(nova_client,
163 if 'flavor' not in kwargs:
164 kwargs['flavor'] = common_config.default_flavor
166 logger.info("Creating instance '%s'..." % name)
168 "Configuration:\n name=%s \n flavor=%s \n image=%s \n"
169 " network=%s\n secgroup=%s \n hypervisor=%s \n"
170 " fixed_ip=%s\n files=%s\n userdata=\n%s\n"
171 % (name, kwargs['flavor'], image_id, network_id, sg_id,
172 compute_node, fixed_ip, files, userdata))
173 instance = os_utils.create_instance_and_wait_for_active(
180 av_zone=compute_node,
185 logger.error("Error while booting instance.")
188 logger.debug("Instance '%s' booted successfully. IP='%s'." %
189 (name, instance.networks.itervalues().next()[0]))
190 # Retrieve IP of INSTANCE
191 # instance_ip = instance.networks.get(network_id)[0]
194 logger.debug("Adding '%s' to security group '%s'..."
195 % (name, secgroup_name))
197 logger.debug("Adding '%s' to security group '%s'..."
199 os_utils.add_secgroup_to_instance(nova_client, instance.id, sg_id)
204 def generate_ping_userdata(ips_array, ping_count=10):
207 ips = ("%s %s" % (ips, ip))
209 ips = ips.replace(' ', ' ')
210 return ("#!/bin/sh\n"
215 " ping -c %s $ip 2>&1 >/dev/null\n"
217 " if [ \"Z$RES\" = \"Z0\" ] ; then\n"
218 " echo ping $ip OK\n"
219 " else echo ping $ip KO\n"
227 def generate_userdata_common():
228 return ("#!/bin/sh\n"
229 "sudo mkdir -p /home/cirros/.ssh/\n"
230 "sudo chown cirros:cirros /home/cirros/.ssh/\n"
231 "sudo chown cirros:cirros /home/cirros/id_rsa\n"
232 "mv /home/cirros/id_rsa /home/cirros/.ssh/\n"
233 "sudo echo ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAAAgnWtSS98Am516e"
234 "stBsq0jbyOB4eLMUYDdgzsUHsnxFQCtACwwAg9/2uq3FoGUBUWeHZNsT6jcK9"
235 "sCMEYiS479CUCzbrxcd8XaIlK38HECcDVglgBNwNzX/WDfMejXpKzZG61s98rU"
236 "ElNvZ0YDqhaqZGqxIV4ejalqLjYrQkoly3R+2k= "
237 "cirros@test1>/home/cirros/.ssh/authorized_keys\n"
238 "sudo chown cirros:cirros /home/cirros/.ssh/authorized_keys\n"
239 "chmod 700 /home/cirros/.ssh\n"
240 "chmod 644 /home/cirros/.ssh/authorized_keys\n"
241 "chmod 600 /home/cirros/.ssh/id_rsa\n"
245 def generate_userdata_with_ssh(ips_array):
246 u1 = generate_userdata_common()
250 ips = ("%s %s" % (ips, ip))
252 ips = ips.replace(' ', ' ')
258 " hostname=$(ssh -y -i /home/cirros/.ssh/id_rsa "
259 "cirros@$ip 'hostname' </dev/zero 2>/dev/null)\n"
261 " if [ \"Z$RES\" = \"Z0\" ]; then echo $ip $hostname;\n"
262 " else echo $ip 'not reachable';fi;\n"
270 def generate_userdata_interface_create(interface_name, interface_number,
271 ip_Address, net_mask):
272 return ("#!/bin/sh\n"
274 "sudo useradd -m sdnvpn\n"
275 "sudo adduser sdnvpn sudo\n"
276 "sudo echo sdnvpn:opnfv | chpasswd\n"
278 "sudo ifconfig %s:%s %s netmask %s up\n"
279 % (interface_name, interface_number,
280 ip_Address, net_mask))
283 def get_installerHandler():
284 installer_type = str(os.environ['INSTALLER_TYPE'].lower())
285 installer_ip = get_installer_ip()
287 if installer_type not in ["fuel", "apex"]:
288 logger.warn("installer type %s is neither fuel nor apex."
289 "returning None for installer handler" % installer_type)
292 if installer_type in ["apex"]:
293 developHandler = DeploymentFactory.get_handler(
297 pkey_file="/root/.ssh/id_rsa")
299 if installer_type in ["fuel"]:
300 developHandler = DeploymentFactory.get_handler(
305 return developHandler
309 developHandler = get_installerHandler()
310 return developHandler.get_nodes()
313 def get_installer_ip():
314 return str(os.environ['INSTALLER_IP'])
317 def get_instance_ip(instance):
318 instance_ip = instance.networks.itervalues().next()[0]
322 def wait_for_instance(instance, pattern=".* login:", tries=40):
323 logger.info("Waiting for instance %s to boot up" % instance.id)
325 expected_regex = re.compile(pattern)
327 while tries > 0 and not expected_regex.search(console_log):
328 console_log = instance.get_console_output()
329 time.sleep(sleep_time)
332 if not expected_regex.search(console_log):
333 logger.error("Instance %s does not boot up properly."
339 def wait_for_instances_up(*instances):
340 check = [wait_for_instance(instance) for instance in instances]
344 def wait_for_instances_get_dhcp(*instances):
345 check = [wait_for_instance(instance, "Lease of .* obtained")
346 for instance in instances]
350 def async_Wait_for_instances(instances, tries=40):
351 if len(instances) <= 0:
354 for instance in instances:
355 future = executor.submit(wait_for_instance,
359 futures.append(future)
361 for future in futures:
362 results.append(future.result())
364 logger.error("one or more instances is not yet booted up")
367 def wait_for_bgp_net_assoc(neutron_client, bgpvpn_id, net_id):
371 logger.debug("Waiting for network %s to associate with BGPVPN %s "
372 % (bgpvpn_id, net_id))
374 while tries > 0 and net_id not in nets:
375 nets = get_bgpvpn_networks(neutron_client, bgpvpn_id)
376 time.sleep(sleep_time)
378 if net_id not in nets:
379 logger.error("Association of network %s with BGPVPN %s failed" %
385 def wait_for_bgp_net_assocs(neutron_client, bgpvpn_id, *args):
386 check = [wait_for_bgp_net_assoc(neutron_client, bgpvpn_id, id)
388 # Return True if all associations succeeded
392 def wait_for_bgp_router_assoc(neutron_client, bgpvpn_id, router_id):
396 logger.debug("Waiting for router %s to associate with BGPVPN %s "
397 % (bgpvpn_id, router_id))
398 while tries > 0 and router_id not in routers:
399 routers = get_bgpvpn_routers(neutron_client, bgpvpn_id)
400 time.sleep(sleep_time)
402 if router_id not in routers:
403 logger.error("Association of router %s with BGPVPN %s failed" %
404 (router_id, bgpvpn_id))
409 def wait_for_bgp_router_assocs(neutron_client, bgpvpn_id, *args):
410 check = [wait_for_bgp_router_assoc(neutron_client, bgpvpn_id, id)
412 # Return True if all associations succeeded
416 def wait_before_subtest(*args, **kwargs):
417 ''' This is a placeholder.
418 TODO: Replace delay with polling logic. '''
422 def assert_and_get_compute_nodes(nova_client, required_node_number=2):
423 """Get the compute nodes in the deployment
424 Exit if the deployment doesn't have enough compute nodes"""
425 compute_nodes = os_utils.get_hypervisors(nova_client)
427 num_compute_nodes = len(compute_nodes)
428 if num_compute_nodes < 2:
429 logger.error("There are %s compute nodes in the deployment. "
430 "Minimum number of nodes to complete the test is 2."
434 logger.debug("Compute nodes: %s" % compute_nodes)
438 def open_icmp(neutron_client, security_group_id):
439 if os_utils.check_security_group_rules(neutron_client,
444 if not os_utils.create_secgroup_rule(neutron_client,
448 logger.error("Failed to create icmp security group rule...")
450 logger.info("This rule exists for security group: %s"
454 def open_http_port(neutron_client, security_group_id):
455 if os_utils.check_security_group_rules(neutron_client,
461 if not os_utils.create_secgroup_rule(neutron_client,
467 logger.error("Failed to create http security group rule...")
469 logger.info("This rule exists for security group: %s"
473 def open_bgp_port(neutron_client, security_group_id):
474 if os_utils.check_security_group_rules(neutron_client,
480 if not os_utils.create_secgroup_rule(neutron_client,
485 logger.error("Failed to create bgp security group rule...")
487 logger.info("This rule exists for security group: %s"
491 def exec_cmd(cmd, verbose):
493 logger.debug("Executing '%s'" % cmd)
494 p = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE,
495 stderr=subprocess.STDOUT)
497 for line in iter(p.stdout.readline, b''):
504 returncode = p.wait()
506 logger.error("Command %s failed to execute." % cmd)
509 return output, success
512 def check_odl_fib(ip, controller_ip):
513 """Check that there is an entry in the ODL Fib for `ip`"""
514 url = "http://" + controller_ip + \
515 ":8181/restconf/config/odl-fib:fibEntries/"
516 logger.debug("Querring '%s' for FIB entries", url)
517 res = requests.get(url, auth=(ODL_USER, ODL_PASS))
518 if res.status_code != 200:
519 logger.error("OpenDaylight response status code: %s", res.status_code)
521 logger.debug("Checking whether '%s' is in the OpenDaylight FIB"
523 logger.debug("OpenDaylight FIB: \n%s" % res.text)
524 return ip in res.text
527 def run_odl_cmd(odl_node, cmd):
528 '''Run a command in the OpenDaylight Karaf shell
529 This is a bit flimsy because of shell quote escaping, make sure that
530 the cmd passed does not have any top level double quotes or this
532 The /dev/null is used because client works, but outputs something
533 that contains "ERROR" and run_cmd doesn't like that.
535 karaf_cmd = ('/opt/opendaylight/bin/client -h 127.0.0.1 "%s"'
536 ' 2>/dev/null' % cmd)
537 return odl_node.run_cmd(karaf_cmd)
540 def wait_for_cloud_init(instance):
542 # ubuntu images take a long time to start
545 logger.info("Waiting for cloud init of instance: {}"
546 "".format(instance.name))
548 instance_log = instance.get_console_output()
549 if "Failed to run module" in instance_log:
551 logger.error("Cloud init failed to run. Reason: %s",
554 if re.search(r"Cloud-init v. .+ finished at", instance_log):
557 time.sleep(sleep_time)
561 logger.error("Cloud init timed out"
565 logger.info("Finished waiting for cloud init of instance {} result was {}"
566 "".format(instance.name, success))
570 def attach_instance_to_ext_br(instance, compute_node):
571 libvirt_instance_name = getattr(instance, "OS-EXT-SRV-ATTR:instance_name")
572 installer_type = str(os.environ['INSTALLER_TYPE'].lower())
573 if installer_type == "fuel":
575 elif installer_type == "apex":
576 # In Apex, br-ex is an ovs bridge and virsh attach-interface
577 # won't just work. We work around it by creating a linux
578 # bridge, attaching that to br-ex with a veth pair
579 # and virsh-attaching the instance to the linux-bridge
583 if ! sudo brctl show |grep -q ^{bridge};then
584 sudo brctl addbr {bridge}
585 sudo ip link set {bridge} up
586 sudo ip link add quagga-tap type veth peer name ovs-quagga-tap
587 sudo ip link set dev ovs-quagga-tap up
588 sudo ip link set dev quagga-tap up
589 sudo ovs-vsctl add-port br-ex ovs-quagga-tap
590 sudo brctl addif {bridge} quagga-tap
593 compute_node.run_cmd(cmd.format(bridge=bridge))
595 compute_node.run_cmd("sudo virsh attach-interface %s"
596 " bridge %s" % (libvirt_instance_name, bridge))
599 def detach_instance_from_ext_br(instance, compute_node):
600 libvirt_instance_name = getattr(instance, "OS-EXT-SRV-ATTR:instance_name")
601 mac = compute_node.run_cmd("for vm in $(sudo virsh list | "
602 "grep running | awk '{print $2}'); "
603 "do echo -n ; sudo virsh dumpxml $vm| "
604 "grep -oP '52:54:[\da-f:]+' ;done")
605 compute_node.run_cmd("sudo virsh detach-interface --domain %s"
606 " --type bridge --mac %s"
607 % (libvirt_instance_name, mac))
609 installer_type = str(os.environ['INSTALLER_TYPE'].lower())
610 if installer_type == "fuel":
612 elif installer_type == "apex":
613 # In Apex, br-ex is an ovs bridge and virsh attach-interface
614 # won't just work. We work around it by creating a linux
615 # bridge, attaching that to br-ex with a veth pair
616 # and virsh-attaching the instance to the linux-bridge
619 sudo brctl delif {bridge} quagga-tap &&
620 sudo ovs-vsctl del-port br-ex ovs-quagga-tap &&
621 sudo ip link set dev quagga-tap down &&
622 sudo ip link set dev ovs-quagga-tap down &&
623 sudo ip link del quagga-tap type veth peer name ovs-quagga-tap &&
624 sudo ip link set {bridge} down &&
625 sudo brctl delbr {bridge}
627 compute_node.run_cmd(cmd.format(bridge=bridge))
630 def cleanup_neutron(neutron_client, floatingip_ids, bgpvpn_ids, interfaces,
631 subnet_ids, router_ids, network_ids):
633 if len(floatingip_ids) != 0:
634 for floatingip_id in floatingip_ids:
635 if not os_utils.delete_floating_ip(neutron_client, floatingip_id):
636 logging.error('Fail to delete all floating ips. '
637 'Floating ip with id {} was not deleted.'.
638 format(floatingip_id))
641 if len(bgpvpn_ids) != 0:
642 for bgpvpn_id in bgpvpn_ids:
643 delete_bgpvpn(neutron_client, bgpvpn_id)
645 if len(interfaces) != 0:
646 for router_id, subnet_id in interfaces:
647 if not os_utils.remove_interface_router(neutron_client,
648 router_id, subnet_id):
649 logging.error('Fail to delete all interface routers. '
650 'Interface router with id {} was not deleted.'.
653 if len(router_ids) != 0:
654 for router_id in router_ids:
655 if not os_utils.remove_gateway_router(neutron_client, router_id):
656 logging.error('Fail to delete all gateway routers. '
657 'Gateway router with id {} was not deleted.'.
660 if len(subnet_ids) != 0:
661 for subnet_id in subnet_ids:
662 if not os_utils.delete_neutron_subnet(neutron_client, subnet_id):
663 logging.error('Fail to delete all subnets. '
664 'Subnet with id {} was not deleted.'.
668 if len(router_ids) != 0:
669 for router_id in router_ids:
670 if not os_utils.delete_neutron_router(neutron_client, router_id):
671 logging.error('Fail to delete all routers. '
672 'Router with id {} was not deleted.'.
676 if len(network_ids) != 0:
677 for network_id in network_ids:
678 if not os_utils.delete_neutron_net(neutron_client, network_id):
679 logging.error('Fail to delete all networks. '
680 'Network with id {} was not deleted.'.
686 def cleanup_nova(nova_client, instance_ids, flavor_ids=None):
687 if flavor_ids is not None and len(flavor_ids) != 0:
688 for flavor_id in flavor_ids:
689 if not nova_client.flavors.delete(flavor_id):
690 logging.error('Fail to delete flavor. '
691 'Flavor with id {} was not deleted.'.
693 if len(instance_ids) != 0:
694 for instance_id in instance_ids:
695 if not os_utils.delete_instance(nova_client, instance_id):
696 logging.error('Fail to delete all instances. '
697 'Instance with id {} was not deleted.'.
703 def cleanup_glance(glance_client, image_ids):
704 if len(image_ids) != 0:
705 for image_id in image_ids:
706 if not os_utils.delete_glance_image(glance_client, image_id):
707 logging.error('Fail to delete all images. '
708 'Image with id {} was not deleted.'.
714 def create_bgpvpn(neutron_client, **kwargs):
715 # route_distinguishers
717 json_body = {"bgpvpn": kwargs}
718 return neutron_client.create_bgpvpn(json_body)
721 def update_bgpvpn(neutron_client, bgpvpn_id, **kwargs):
722 json_body = {"bgpvpn": kwargs}
723 return neutron_client.update_bgpvpn(bgpvpn_id, json_body)
726 def delete_bgpvpn(neutron_client, bgpvpn_id):
727 return neutron_client.delete_bgpvpn(bgpvpn_id)
730 def get_bgpvpn(neutron_client, bgpvpn_id):
731 return neutron_client.show_bgpvpn(bgpvpn_id)
734 def get_bgpvpn_routers(neutron_client, bgpvpn_id):
735 return get_bgpvpn(neutron_client, bgpvpn_id)['bgpvpn']['routers']
738 def get_bgpvpn_networks(neutron_client, bgpvpn_id):
739 return get_bgpvpn(neutron_client, bgpvpn_id)['bgpvpn']['networks']
742 def create_router_association(neutron_client, bgpvpn_id, router_id):
743 json_body = {"router_association": {"router_id": router_id}}
744 return neutron_client.create_router_association(bgpvpn_id, json_body)
747 def create_network_association(neutron_client, bgpvpn_id, neutron_network_id):
748 json_body = {"network_association": {"network_id": neutron_network_id}}
749 return neutron_client.create_network_association(bgpvpn_id, json_body)
752 def is_fail_mode_secure():
754 Checks the value of the attribute fail_mode,
755 if it is set to secure. This check is performed
756 on all OVS br-int interfaces, for all OpenStack nodes.
759 openstack_nodes = get_nodes()
760 get_ovs_int_cmd = ("sudo ovs-vsctl show | "
763 # Define OVS get fail_mode command
764 get_ovs_fail_mode_cmd = ("sudo ovs-vsctl get-fail-mode br-int")
765 for openstack_node in openstack_nodes:
766 if not openstack_node.is_active():
769 ovs_int_list = (openstack_node.run_cmd(get_ovs_int_cmd).
771 if 'br-int' in ovs_int_list:
772 # Execute get fail_mode command
773 br_int_fail_mode = (openstack_node.
774 run_cmd(get_ovs_fail_mode_cmd).strip())
775 if br_int_fail_mode == 'secure':
777 is_secure[openstack_node.name] = True
780 logging.error('The fail_mode for br-int was not secure '
781 'in {} node'.format(openstack_node.name))
782 is_secure[openstack_node.name] = False
786 def update_nw_subnet_port_quota(neutron_client, tenant_id, nw_quota,
787 subnet_quota, port_quota, router_quota):
788 json_body = {"quota": {
790 "subnet": subnet_quota,
792 "router": router_quota
796 neutron_client.update_quota(tenant_id=tenant_id,
799 except Exception as e:
800 logger.error("Error [update_nw_subnet_port_quota(neutron_client,"
801 " '%s', '%s', '%s', '%s, %s')]: %s" %
802 (tenant_id, nw_quota, subnet_quota,
803 port_quota, router_quota, e))
807 def update_instance_quota_class(nova_client, instances_quota):
809 nova_client.quota_classes.update("default", instances=instances_quota)
811 except Exception as e:
812 logger.error("Error [update_instance_quota_class(nova_client,"
813 " '%s' )]: %s" % (instances_quota, e))
817 def get_neutron_quota(neutron_client, tenant_id):
819 return neutron_client.show_quota(tenant_id=tenant_id)['quota']
820 except Exception as e:
821 logger.error("Error in getting neutron quota for tenant "
822 " '%s' )]: %s" % (tenant_id, e))
826 def get_nova_instances_quota(nova_client):
828 return nova_client.quota_classes.get("default").instances
829 except Exception as e:
830 logger.error("Error in getting nova instances quota: %s" % e)
834 def update_router_extra_route(neutron_client, router_id, extra_routes):
835 if len(extra_routes) <= 0:
838 for extra_route in extra_routes:
839 route_dict = {'destination': extra_route.destination,
840 'nexthop': extra_route.nexthop}
841 routes_list.append(route_dict)
842 json_body = {'router': {
843 "routes": routes_list
847 neutron_client.update_router(router_id, body=json_body)
849 except Exception as e:
850 logger.error("Error in updating router with extra route: %s" % e)
854 def update_router_no_extra_route(neutron_client, router_ids):
855 json_body = {'router': {
859 for router_id in router_ids:
861 neutron_client.update_router(router_id, body=json_body)
863 except Exception as e:
864 logger.error("Error in clearing extra route: %s" % e)
867 def get_ovs_groups(compute_node_list, ovs_br_list, of_protocol="OpenFlow13"):
869 Gets, as input, a list of compute nodes and a list of OVS bridges
870 and returns the command console output, as a list of lines, that
871 contains all the OVS groups from all bridges and nodes in lists.
874 for compute_node in compute_node_list:
875 for ovs_br in ovs_br_list:
876 if ovs_br in compute_node.run_cmd("sudo ovs-vsctl show"):
877 ovs_groups_cmd = ("sudo ovs-ofctl dump-groups {} -O {} | "
878 "grep group".format(ovs_br, of_protocol))
879 cmd_out_lines += (compute_node.run_cmd(ovs_groups_cmd).strip().
884 def get_ovs_flows(compute_node_list, ovs_br_list, of_protocol="OpenFlow13"):
886 Gets, as input, a list of compute nodes and a list of OVS bridges
887 and returns the command console output, as a list of lines, that
888 contains all the OVS flows from all bridges and nodes in lists.
891 for compute_node in compute_node_list:
892 for ovs_br in ovs_br_list:
893 if ovs_br in compute_node.run_cmd("sudo ovs-vsctl show"):
894 ovs_flows_cmd = ("sudo ovs-ofctl dump-flows {} -O {} | "
895 "grep table=".format(ovs_br, of_protocol))
896 cmd_out_lines += (compute_node.run_cmd(ovs_flows_cmd).strip().