+
+
+def wait_for_cloud_init(conn, instance):
+ success = True
+ # ubuntu images take a long time to start
+ tries = 20
+ sleep_time = 30
+ logger.info("Waiting for cloud init of instance: {}"
+ "".format(instance.name))
+ while tries > 0:
+ instance_log = conn.compute.\
+ get_server_console_output(instance)['output']
+ if "Failed to run module" in instance_log:
+ success = False
+ logger.error("Cloud init failed to run. Reason: %s",
+ instance_log)
+ break
+ if re.search(r"Cloud-init v. .+ finished at", instance_log):
+ success = True
+ break
+ time.sleep(sleep_time)
+ tries = tries - 1
+
+ if tries == 0:
+ logger.error("Cloud init timed out"
+ ". Reason: %s",
+ instance_log)
+ success = False
+ logger.info("Finished waiting for cloud init of instance {} result was {}"
+ "".format(instance.name, success))
+ return success
+
+
+def attach_instance_to_ext_br(instance, compute_node):
+ libvirt_instance_name = instance.instance_name
+ installer_type = str(os.environ['INSTALLER_TYPE'].lower())
+ if installer_type == "fuel":
+ bridge = "br-ex"
+ elif installer_type == "apex":
+ # In Apex, br-ex is an ovs bridge and virsh attach-interface
+ # won't just work. We work around it by creating a linux
+ # bridge, attaching that to br-ex with a veth pair
+ # and virsh-attaching the instance to the linux-bridge
+ bridge = "br-quagga"
+ cmd = """
+ set -e
+ if ! sudo brctl show |grep -q ^{bridge};then
+ sudo brctl addbr {bridge}
+ sudo ip link set {bridge} up
+ sudo ip link add quagga-tap type veth peer name ovs-quagga-tap
+ sudo ip link set dev ovs-quagga-tap up
+ sudo ip link set dev quagga-tap up
+ sudo ovs-vsctl add-port br-ex ovs-quagga-tap
+ sudo brctl addif {bridge} quagga-tap
+ fi
+ """
+ compute_node.run_cmd(cmd.format(bridge=bridge))
+
+ compute_node.run_cmd("sudo virsh attach-interface %s"
+ " bridge %s" % (libvirt_instance_name, bridge))
+
+
+def detach_instance_from_ext_br(instance, compute_node):
+ libvirt_instance_name = instance.instance_name
+ mac = compute_node.run_cmd("for vm in $(sudo virsh list | "
+ "grep running | awk '{print $2}'); "
+ "do echo -n ; sudo virsh dumpxml $vm| "
+ "grep -oP '52:54:[\da-f:]+' ;done")
+ compute_node.run_cmd("sudo virsh detach-interface --domain %s"
+ " --type bridge --mac %s"
+ % (libvirt_instance_name, mac))
+
+ installer_type = str(os.environ['INSTALLER_TYPE'].lower())
+ if installer_type == "fuel":
+ bridge = "br-ex"
+ elif installer_type == "apex":
+ # In Apex, br-ex is an ovs bridge and virsh attach-interface
+ # won't just work. We work around it by creating a linux
+ # bridge, attaching that to br-ex with a veth pair
+ # and virsh-attaching the instance to the linux-bridge
+ bridge = "br-quagga"
+ cmd = """
+ sudo brctl delif {bridge} quagga-tap &&
+ sudo ovs-vsctl del-port br-ex ovs-quagga-tap &&
+ sudo ip link set dev quagga-tap down &&
+ sudo ip link set dev ovs-quagga-tap down &&
+ sudo ip link del quagga-tap type veth peer name ovs-quagga-tap &&
+ sudo ip link set {bridge} down &&
+ sudo brctl delbr {bridge}
+ """
+ compute_node.run_cmd(cmd.format(bridge=bridge))
+
+
+def cleanup_neutron(conn, neutron_client, floatingip_ids, bgpvpn_ids,
+ interfaces, subnet_ids, router_ids, network_ids):
+ if len(floatingip_ids) != 0:
+ for floatingip_id in floatingip_ids:
+ if not os_utils.delete_floating_ip(conn, floatingip_id):
+ logger.error('Fail to delete all floating ips. '
+ 'Floating ip with id {} was not deleted.'.
+ format(floatingip_id))
+ return False
+
+ if len(bgpvpn_ids) != 0:
+ for bgpvpn_id in bgpvpn_ids:
+ delete_bgpvpn(neutron_client, bgpvpn_id)
+
+ if len(interfaces) != 0:
+ for router_id, subnet_id in interfaces:
+ if not os_utils.remove_interface_router(conn,
+ router_id, subnet_id):
+ logger.error('Fail to delete all interface routers. '
+ 'Interface router with id {} was not deleted.'.
+ format(router_id))
+
+ if len(router_ids) != 0:
+ for router_id in router_ids:
+ if not os_utils.remove_gateway_router(conn, router_id):
+ logger.error('Fail to delete all gateway routers. '
+ 'Gateway router with id {} was not deleted.'.
+ format(router_id))
+
+ if len(subnet_ids) != 0:
+ for subnet_id in subnet_ids:
+ if not os_utils.delete_neutron_subnet(conn, subnet_id):
+ logger.error('Fail to delete all subnets. '
+ 'Subnet with id {} was not deleted.'.
+ format(subnet_id))
+ return False
+
+ if len(router_ids) != 0:
+ for router_id in router_ids:
+ if not os_utils.delete_neutron_router(conn, router_id):
+ logger.error('Fail to delete all routers. '
+ 'Router with id {} was not deleted.'.
+ format(router_id))
+ return False
+
+ if len(network_ids) != 0:
+ for network_id in network_ids:
+ if not os_utils.delete_neutron_net(conn, network_id):
+ logger.error('Fail to delete all networks. '
+ 'Network with id {} was not deleted.'.
+ format(network_id))
+ return False
+ return True
+
+
+def cleanup_nova(conn, instance_ids, flavor_ids=None):
+ if flavor_ids is not None and len(flavor_ids) != 0:
+ for flavor_id in flavor_ids:
+ conn.compute.delete_flavor(flavor_id)
+ if len(instance_ids) != 0:
+ for instance_id in instance_ids:
+ if not os_utils.delete_instance(conn, instance_id):
+ logger.error('Fail to delete all instances. '
+ 'Instance with id {} was not deleted.'.
+ format(instance_id))
+ else:
+ wait_for_instance_delete(conn, instance_id)
+ return True
+
+
+def cleanup_glance(conn, image_ids):
+ if len(image_ids) != 0:
+ for image_id in image_ids:
+ if not os_utils.delete_glance_image(conn, image_id):
+ logger.error('Fail to delete all images. '
+ 'Image with id {} was not deleted.'.
+ format(image_id))
+ return False
+ return True
+
+
+def create_bgpvpn(neutron_client, **kwargs):
+ # route_distinguishers
+ # route_targets
+ json_body = {"bgpvpn": kwargs}
+ return neutron_client.create_bgpvpn(json_body)
+
+
+def update_bgpvpn(neutron_client, bgpvpn_id, **kwargs):
+ json_body = {"bgpvpn": kwargs}
+ return neutron_client.update_bgpvpn(bgpvpn_id, json_body)
+
+
+def delete_bgpvpn(neutron_client, bgpvpn_id):
+ return neutron_client.delete_bgpvpn(bgpvpn_id)
+
+
+def get_bgpvpn(neutron_client, bgpvpn_id):
+ return neutron_client.show_bgpvpn(bgpvpn_id)
+
+
+def get_bgpvpn_routers(neutron_client, bgpvpn_id):
+ return get_bgpvpn(neutron_client, bgpvpn_id)['bgpvpn']['routers']
+
+
+def get_bgpvpn_networks(neutron_client, bgpvpn_id):
+ return get_bgpvpn(neutron_client, bgpvpn_id)['bgpvpn']['networks']
+
+
+def create_router_association(neutron_client, bgpvpn_id, router_id):
+ json_body = {"router_association": {"router_id": router_id}}
+ return neutron_client.create_router_association(bgpvpn_id, json_body)
+
+
+def create_network_association(neutron_client, bgpvpn_id, neutron_network_id):
+ json_body = {"network_association": {"network_id": neutron_network_id}}
+ return neutron_client.create_network_association(bgpvpn_id, json_body)
+
+
+def is_fail_mode_secure():
+ """
+ Checks the value of the attribute fail_mode,
+ if it is set to secure. This check is performed
+ on all OVS br-int interfaces, for all OpenStack nodes.
+ """
+ is_secure = {}
+ openstack_nodes = get_nodes()
+ get_ovs_int_cmd = ("sudo ovs-vsctl show | "
+ "grep -i bridge | "
+ "awk '{print $2}'")
+ # Define OVS get fail_mode command
+ get_ovs_fail_mode_cmd = ("sudo ovs-vsctl get-fail-mode br-int")
+ for openstack_node in openstack_nodes:
+ if not openstack_node.is_active():
+ continue
+
+ ovs_int_list = (openstack_node.run_cmd(get_ovs_int_cmd).
+ strip().split('\n'))
+ if 'br-int' in ovs_int_list:
+ # Execute get fail_mode command
+ br_int_fail_mode = (openstack_node.
+ run_cmd(get_ovs_fail_mode_cmd).strip())
+ if br_int_fail_mode == 'secure':
+ # success
+ is_secure[openstack_node.name] = True
+ else:
+ # failure
+ logger.error('The fail_mode for br-int was not secure '
+ 'in {} node'.format(openstack_node.name))
+ is_secure[openstack_node.name] = False
+ return is_secure
+
+
+def update_nw_subnet_port_quota(conn, tenant_id, nw_quota,
+ subnet_quota, port_quota, router_quota):
+ try:
+ conn.network.update_quota(tenant_id, networks=nw_quota,
+ subnets=subnet_quota, ports=port_quota,
+ routers=router_quota)
+ return True
+ except Exception as e:
+ logger.error("Error [update_nw_subnet_port_quota(network,"
+ " '%s', '%s', '%s', '%s, %s')]: %s" %
+ (tenant_id, nw_quota, subnet_quota,
+ port_quota, router_quota, e))
+ return False
+
+
+def update_instance_quota_class(cloud, instances_quota):
+ try:
+ cloud.set_compute_quotas('admin', instances=instances_quota)
+ return True
+ except Exception as e:
+ logger.error("Error [update_instance_quota_class(compute,"
+ " '%s' )]: %s" % (instances_quota, e))
+ return False
+
+
+def get_neutron_quota(conn, tenant_id):
+ try:
+ return conn.network.get_quota(tenant_id)
+ except ResourceNotFound as e:
+ logger.error("Error in getting network quota for tenant "
+ " '%s' )]: %s" % (tenant_id, e))
+ raise
+
+
+def get_nova_instances_quota(cloud):
+ try:
+ return cloud.get_compute_quotas('admin').instances
+ except Exception as e:
+ logger.error("Error in getting nova instances quota: %s" % e)
+ raise
+
+
+def update_router_extra_route(conn, router_id, extra_routes):
+ if len(extra_routes) <= 0:
+ return
+ routes_list = []
+ for extra_route in extra_routes:
+ route_dict = {'destination': extra_route.destination,
+ 'nexthop': extra_route.nexthop}
+ routes_list.append(route_dict)
+
+ try:
+ conn.network.update_router(router_id, routes=routes_list)
+ return True
+ except Exception as e:
+ logger.error("Error in updating router with extra route: %s" % e)
+ raise
+
+
+def update_router_no_extra_route(conn, router_ids):
+ for router_id in router_ids:
+ try:
+ conn.network.update_router(router_id, routes=[])
+ return True
+ except Exception as e:
+ logger.error("Error in clearing extra route: %s" % e)
+
+
+def get_ovs_groups(compute_node_list, ovs_br_list, of_protocol="OpenFlow13"):
+ """
+ Gets, as input, a list of compute nodes and a list of OVS bridges
+ and returns the command console output, as a list of lines, that
+ contains all the OVS groups from all bridges and nodes in lists.
+ """
+ cmd_out_lines = []
+ for compute_node in compute_node_list:
+ for ovs_br in ovs_br_list:
+ if ovs_br in compute_node.run_cmd("sudo ovs-vsctl show"):
+ ovs_groups_cmd = ("sudo ovs-ofctl dump-groups {} -O {} | "
+ "grep group".format(ovs_br, of_protocol))
+ cmd_out_lines += (compute_node.run_cmd(ovs_groups_cmd).strip().
+ split("\n"))
+ return cmd_out_lines
+
+
+def get_ovs_flows(compute_node_list, ovs_br_list, of_protocol="OpenFlow13"):
+ """
+ Gets, as input, a list of compute nodes and a list of OVS bridges
+ and returns the command console output, as a list of lines, that
+ contains all the OVS flows from all bridges and nodes in lists.
+ """
+ cmd_out_lines = []
+ for compute_node in compute_node_list:
+ for ovs_br in ovs_br_list:
+ if ovs_br in compute_node.run_cmd("sudo ovs-vsctl show"):
+ ovs_flows_cmd = ("sudo ovs-ofctl dump-flows {} -O {} | "
+ "grep table=".format(ovs_br, of_protocol))
+ cmd_out_lines += (compute_node.run_cmd(ovs_flows_cmd).strip().
+ split("\n"))
+ return cmd_out_lines
+
+
+def get_odl_bgp_entity_owner(controllers):
+ """ Finds the ODL owner of the BGP entity in the cluster.
+
+ When ODL runs in clustering mode we need to execute the BGP speaker
+ related commands to that ODL which is the owner of the BGP entity.
+
+ :param controllers: list of OS controllers
+ :return controller: OS controller in which ODL BGP entity owner runs
+ """
+ if len(controllers) == 1:
+ return controllers[0]
+ else:
+ url = ('http://admin:admin@{ip}:8081/restconf/'
+ 'operational/entity-owners:entity-owners/entity-type/bgp'
+ .format(ip=controllers[0].ip))
+
+ remote_odl_akka_conf = ('/opt/opendaylight/configuration/'
+ 'initial/akka.conf')
+ remote_odl_home_akka_conf = '/home/heat-admin/akka.conf'
+ local_tmp_akka_conf = '/tmp/akka.conf'
+ try:
+ json_output = requests.get(url).json()
+ except Exception:
+ logger.error('Failed to find the ODL BGP '
+ 'entity owner through REST')
+ return None
+ odl_bgp_owner = json_output['entity-type'][0]['entity'][0]['owner']
+
+ for controller in controllers:
+
+ controller.run_cmd('sudo cp {0} /home/heat-admin/'
+ .format(remote_odl_akka_conf))
+ controller.run_cmd('sudo chmod 777 {0}'
+ .format(remote_odl_home_akka_conf))
+ controller.get_file(remote_odl_home_akka_conf, local_tmp_akka_conf)
+
+ for line in open(local_tmp_akka_conf):
+ if re.search(odl_bgp_owner, line):
+ return controller
+ return None
+
+
+def add_quagga_external_gre_end_point(controllers, remote_tep_ip):
+ json_body = {'input':
+ {'destination-ip': remote_tep_ip,
+ 'tunnel-type': "odl-interface:tunnel-type-mpls-over-gre"}
+ }
+ url = ('http://{ip}:8081/restconf/operations/'
+ 'itm-rpc:add-external-tunnel-endpoint'.format(ip=controllers[0].ip))
+ headers = {'Content-type': 'application/yang.data+json',
+ 'Accept': 'application/yang.data+json'}
+ try:
+ requests.post(url, data=json.dumps(json_body),
+ headers=headers,
+ auth=HTTPBasicAuth('admin', 'admin'))
+ except Exception as e:
+ logger.error("Failed to create external tunnel endpoint on"
+ " ODL for external tep ip %s with error %s"
+ % (remote_tep_ip, e))
+ return None
+
+
+def is_fib_entry_present_on_odl(controllers, ip_prefix, vrf_id):
+ url = ('http://admin:admin@{ip}:8081/restconf/config/odl-fib:fibEntries/'
+ 'vrfTables/{vrf}/'.format(ip=controllers[0].ip, vrf=vrf_id))
+ logger.error("url is %s" % url)
+ try:
+ vrf_table = requests.get(url).json()
+ is_ipprefix_exists = False
+ for vrf_entry in vrf_table['vrfTables'][0]['vrfEntry']:
+ if vrf_entry['destPrefix'] == ip_prefix:
+ is_ipprefix_exists = True
+ break
+ return is_ipprefix_exists
+ except Exception as e:
+ logger.error('Failed to find ip prefix %s with error %s'
+ % (ip_prefix, e))
+ return False