JIRA: SDNVPN-220
This patch replaces all calls of nova client with openstack sdk.
Change-Id: I9e3a0fe08ba36bfb64483238cd286acad829ae90
Signed-off-by: Stamatis Katsaounis <mokats@intracom-telecom.com>
15 files changed:
python-heatclient>=1.10.0 # Apache-2.0
python-keystoneclient>=3.8.0 # Apache-2.0
python-neutronclient>=6.7.0 # Apache-2.0
python-heatclient>=1.10.0 # Apache-2.0
python-keystoneclient>=3.8.0 # Apache-2.0
python-neutronclient>=6.7.0 # Apache-2.0
-python-novaclient>=9.1.0 # Apache-2.0
xtesting # Apache-2.0
openstacksdk>=0.11.3 # Apache-2.0
xtesting # Apache-2.0
openstacksdk>=0.11.3 # Apache-2.0
# http://www.apache.org/licenses/LICENSE-2.0
#
# http://www.apache.org/licenses/LICENSE-2.0
#
import logging
import os.path
import shutil
import logging
import os.path
import shutil
from keystoneauth1 import session
from cinderclient import client as cinderclient
from heatclient import client as heatclient
from keystoneauth1 import session
from cinderclient import client as cinderclient
from heatclient import client as heatclient
-from novaclient import client as novaclient
from keystoneclient import client as keystoneclient
from neutronclient.neutron import client as neutronclient
from openstack import connection
from keystoneclient import client as keystoneclient
from neutronclient.neutron import client as neutronclient
from openstack import connection
+from openstack import cloud as os_cloud
from functest.utils import env
from functest.utils import env
return connection.from_config()
return connection.from_config()
+def get_os_cloud():
+ return os_cloud.openstack_cloud()
+
+
def is_keystone_v3():
keystone_api_version = os.getenv('OS_IDENTITY_API_VERSION')
if (keystone_api_version is None or
def is_keystone_v3():
keystone_api_version = os.getenv('OS_IDENTITY_API_VERSION')
if (keystone_api_version is None or
interface=os.getenv('OS_INTERFACE', 'admin'))
interface=os.getenv('OS_INTERFACE', 'admin'))
-def get_nova_client_version():
- api_version = os.getenv('OS_COMPUTE_API_VERSION')
- if api_version is not None:
- logger.info("OS_COMPUTE_API_VERSION is set in env as '%s'",
- api_version)
- return api_version
- return DEFAULT_API_VERSION
-
-
-def get_nova_client(other_creds={}):
- sess = get_session(other_creds)
- return novaclient.Client(get_nova_client_version(), session=sess)
-
-
def get_cinder_client_version():
api_version = os.getenv('OS_VOLUME_API_VERSION')
if api_version is not None:
def get_cinder_client_version():
api_version = os.getenv('OS_VOLUME_API_VERSION')
if api_version is not None:
# *********************************************
# NOVA
# *********************************************
# *********************************************
# NOVA
# *********************************************
-def get_instances(nova_client):
+def get_instances(conn):
- instances = nova_client.servers.list(search_opts={'all_tenants': 1})
+ instances = conn.compute.servers(all_tenants=1)
return instances
except Exception as e:
return instances
except Exception as e:
- logger.error("Error [get_instances(nova_client)]: %s" % e)
+ logger.error("Error [get_instances(compute)]: %s" % e)
-def get_instance_status(nova_client, instance):
+def get_instance_status(conn, instance):
- instance = nova_client.servers.get(instance.id)
+ instance = conn.compute.get_server(instance.id)
return instance.status
except Exception as e:
return instance.status
except Exception as e:
- logger.error("Error [get_instance_status(nova_client)]: %s" % e)
+ logger.error("Error [get_instance_status(compute)]: %s" % e)
-def get_instance_by_name(nova_client, instance_name):
+def get_instance_by_name(conn, instance_name):
- instance = nova_client.servers.find(name=instance_name)
+ instance = conn.compute.find_server(instance_name,
+ ignore_missing=False)
return instance
except Exception as e:
return instance
except Exception as e:
- logger.error("Error [get_instance_by_name(nova_client, '%s')]: %s"
+ logger.error("Error [get_instance_by_name(compute, '%s')]: %s"
% (instance_name, e))
return None
% (instance_name, e))
return None
-def get_flavor_id(nova_client, flavor_name):
- flavors = nova_client.flavors.list(detailed=True)
+def get_flavor_id(conn, flavor_name):
+ flavors = conn.compute.flavors()
id = ''
for f in flavors:
if f.name == flavor_name:
id = ''
for f in flavors:
if f.name == flavor_name:
-def get_flavor_id_by_ram_range(nova_client, min_ram, max_ram):
- flavors = nova_client.flavors.list(detailed=True)
+def get_flavor_id_by_ram_range(conn, min_ram, max_ram):
+ flavors = conn.compute.flavors()
id = ''
for f in flavors:
if min_ram <= f.ram and f.ram <= max_ram:
id = ''
for f in flavors:
if min_ram <= f.ram and f.ram <= max_ram:
-def get_aggregates(nova_client):
+def get_aggregates(cloud):
- aggregates = nova_client.aggregates.list()
+ aggregates = cloud.list_aggregates()
return aggregates
except Exception as e:
return aggregates
except Exception as e:
- logger.error("Error [get_aggregates(nova_client)]: %s" % e)
+ logger.error("Error [get_aggregates(compute)]: %s" % e)
-def get_aggregate_id(nova_client, aggregate_name):
+def get_aggregate_id(cloud, aggregate_name):
- aggregates = get_aggregates(nova_client)
+ aggregates = get_aggregates(cloud)
_id = [ag.id for ag in aggregates if ag.name == aggregate_name][0]
return _id
except Exception as e:
_id = [ag.id for ag in aggregates if ag.name == aggregate_name][0]
return _id
except Exception as e:
- logger.error("Error [get_aggregate_id(nova_client, %s)]:"
+ logger.error("Error [get_aggregate_id(compute, %s)]:"
" %s" % (aggregate_name, e))
return None
" %s" % (aggregate_name, e))
return None
-def get_availability_zones(nova_client):
+def get_availability_zones(conn):
- availability_zones = nova_client.availability_zones.list()
+ availability_zones = conn.compute.availability_zones()
return availability_zones
except Exception as e:
return availability_zones
except Exception as e:
- logger.error("Error [get_availability_zones(nova_client)]: %s" % e)
+ logger.error("Error [get_availability_zones(compute)]: %s" % e)
-def get_availability_zone_names(nova_client):
+def get_availability_zone_names(conn):
- az_names = [az.zoneName for az in get_availability_zones(nova_client)]
+ az_names = [az.zoneName for az in get_availability_zones(conn)]
return az_names
except Exception as e:
return az_names
except Exception as e:
- logger.error("Error [get_availability_zone_names(nova_client)]:"
+ logger.error("Error [get_availability_zone_names(compute)]:"
-def create_flavor(nova_client, flavor_name, ram, disk, vcpus, public=True):
+def create_flavor(conn, flavor_name, ram, disk, vcpus, public=True):
- flavor = nova_client.flavors.create(
- flavor_name, ram, vcpus, disk, is_public=public)
+ flavor = conn.compute.create_flavor(
+ name=flavor_name, ram=ram, disk=disk, vcpus=vcpus,
+ is_public=public)
- logger.error("Error [create_flavor(nova_client, '%s', '%s', '%s', "
+ logger.error("Error [create_flavor(compute, '%s', '%s', '%s', "
"'%s')]: %s" % (flavor_name, ram, disk, vcpus, e))
return None
return flavor.id
"'%s')]: %s" % (flavor_name, ram, disk, vcpus, e))
return None
return flavor.id
def get_or_create_flavor(flavor_name, ram, disk, vcpus, public=True):
flavor_exists = False
def get_or_create_flavor(flavor_name, ram, disk, vcpus, public=True):
flavor_exists = False
- nova_client = get_nova_client()
+ conn = get_os_connection()
- flavor_id = get_flavor_id(nova_client, flavor_name)
+ flavor_id = get_flavor_id(conn, flavor_name)
if flavor_id != '':
logger.info("Using existing flavor '%s'..." % flavor_name)
flavor_exists = True
if flavor_id != '':
logger.info("Using existing flavor '%s'..." % flavor_name)
flavor_exists = True
logger.info("Creating flavor '%s' with '%s' RAM, '%s' disk size, "
"'%s' vcpus..." % (flavor_name, ram, disk, vcpus))
flavor_id = create_flavor(
logger.info("Creating flavor '%s' with '%s' RAM, '%s' disk size, "
"'%s' vcpus..." % (flavor_name, ram, disk, vcpus))
flavor_id = create_flavor(
- nova_client, flavor_name, ram, disk, vcpus, public=public)
+ conn, flavor_name, ram, disk, vcpus, public=public)
if not flavor_id:
raise Exception("Failed to create flavor '%s'..." % (flavor_name))
else:
if not flavor_id:
raise Exception("Failed to create flavor '%s'..." % (flavor_name))
else:
-def get_hypervisors(nova_client):
+def get_hypervisors(conn):
- hypervisors = nova_client.hypervisors.list()
+ hypervisors = conn.compute.hypervisors()
for hypervisor in hypervisors:
if hypervisor.state == "up":
for hypervisor in hypervisors:
if hypervisor.state == "up":
- nodes.append(hypervisor.hypervisor_hostname)
+ nodes.append(hypervisor.name)
return nodes
except Exception as e:
return nodes
except Exception as e:
- logger.error("Error [get_hypervisors(nova_client)]: %s" % e)
+ logger.error("Error [get_hypervisors(compute)]: %s" % e)
-def create_aggregate(nova_client, aggregate_name, av_zone):
+def create_aggregate(cloud, aggregate_name, av_zone):
- nova_client.aggregates.create(aggregate_name, av_zone)
+ cloud.create_aggregate(aggregate_name, av_zone)
return True
except Exception as e:
return True
except Exception as e:
- logger.error("Error [create_aggregate(nova_client, %s, %s)]: %s"
+ logger.error("Error [create_aggregate(compute, %s, %s)]: %s"
% (aggregate_name, av_zone, e))
return None
% (aggregate_name, av_zone, e))
return None
-def add_host_to_aggregate(nova_client, aggregate_name, compute_host):
+def add_host_to_aggregate(cloud, aggregate_name, compute_host):
- aggregate_id = get_aggregate_id(nova_client, aggregate_name)
- nova_client.aggregates.add_host(aggregate_id, compute_host)
+ aggregate_id = get_aggregate_id(cloud, aggregate_name)
+ cloud.add_host_to_aggregate(aggregate_id, compute_host)
return True
except Exception as e:
return True
except Exception as e:
- logger.error("Error [add_host_to_aggregate(nova_client, %s, %s)]: %s"
+ logger.error("Error [add_host_to_aggregate(compute, %s, %s)]: %s"
% (aggregate_name, compute_host, e))
return None
def create_aggregate_with_host(
% (aggregate_name, compute_host, e))
return None
def create_aggregate_with_host(
- nova_client, aggregate_name, av_zone, compute_host):
+ cloud, aggregate_name, av_zone, compute_host):
- create_aggregate(nova_client, aggregate_name, av_zone)
- add_host_to_aggregate(nova_client, aggregate_name, compute_host)
+ create_aggregate(cloud, aggregate_name, av_zone)
+ add_host_to_aggregate(cloud, aggregate_name, compute_host)
return True
except Exception as e:
logger.error("Error [create_aggregate_with_host("
return True
except Exception as e:
logger.error("Error [create_aggregate_with_host("
- "nova_client, %s, %s, %s)]: %s"
+ "compute, %s, %s, %s)]: %s"
% (aggregate_name, av_zone, compute_host, e))
return None
% (aggregate_name, av_zone, compute_host, e))
return None
instance_name="functest-vm",
confdrive=True,
userdata=None,
instance_name="functest-vm",
confdrive=True,
userdata=None,
- files=None):
- nova_client = get_nova_client()
+ files=[]):
+ conn = get_os_connection()
- flavor = nova_client.flavors.find(name=flavor_name)
+ flavor = conn.compute.find_flavor(flavor_name, ignore_missing=False)
- flavors = nova_client.flavors.list()
+ flavors = [flavor.name for flavor in conn.compute.flavors()]
logger.error("Error: Flavor '%s' not found. Available flavors are: "
"\n%s" % (flavor_name, flavors))
return None
if fixed_ip is not None:
logger.error("Error: Flavor '%s' not found. Available flavors are: "
"\n%s" % (flavor_name, flavors))
return None
if fixed_ip is not None:
- nics = {"net-id": network_id, "v4-fixed-ip": fixed_ip}
+ networks = {"uuid": network_id, "fixed_ip": fixed_ip}
- nics = {"net-id": network_id}
- if userdata is None:
- instance = nova_client.servers.create(
- name=instance_name,
- flavor=flavor,
- image=image_id,
- nics=[nics],
- availability_zone=av_zone,
- files=files
- )
- else:
- instance = nova_client.servers.create(
- name=instance_name,
- flavor=flavor,
- image=image_id,
- nics=[nics],
- config_drive=confdrive,
- userdata=userdata,
- availability_zone=av_zone,
- files=files
- )
+ networks = {"uuid": network_id}
+
+ server_attrs = {
+ 'name': instance_name,
+ 'flavor_id': flavor.id,
+ 'image_id': image_id,
+ 'networks': [networks],
+ 'personality': files
+ }
+ if userdata is not None:
+ server_attrs['config_drive'] = confdrive
+ server_attrs['user_data'] = base64.b64encode(userdata.encode())
+ if av_zone is not None:
+ server_attrs['availability_zone'] = av_zone
+
+ instance = conn.compute.create_server(**server_attrs)
instance_name="",
config_drive=False,
userdata="",
instance_name="",
config_drive=False,
userdata="",
SLEEP = 3
VM_BOOT_TIMEOUT = 180
SLEEP = 3
VM_BOOT_TIMEOUT = 180
- nova_client = get_nova_client()
+ conn = get_os_connection()
instance = create_instance(flavor_name,
image_id,
network_id,
instance = create_instance(flavor_name,
image_id,
network_id,
files=files)
count = VM_BOOT_TIMEOUT / SLEEP
for n in range(count, -1, -1):
files=files)
count = VM_BOOT_TIMEOUT / SLEEP
for n in range(count, -1, -1):
- status = get_instance_status(nova_client, instance)
+ status = get_instance_status(conn, instance)
if status is None:
time.sleep(SLEEP)
continue
if status is None:
time.sleep(SLEEP)
continue
-def add_floating_ip(nova_client, server_id, floatingip_addr):
+def add_floating_ip(conn, server_id, floatingip_addr):
- nova_client.servers.add_floating_ip(server_id, floatingip_addr)
+ conn.compute.add_floating_ip_to_server(server_id, floatingip_addr)
return True
except Exception as e:
return True
except Exception as e:
- logger.error("Error [add_floating_ip(nova_client, '%s', '%s')]: %s"
+ logger.error("Error [add_floating_ip(compute, '%s', '%s')]: %s"
% (server_id, floatingip_addr, e))
return False
% (server_id, floatingip_addr, e))
return False
-def delete_instance(nova_client, instance_id):
+def delete_instance(conn, instance_id):
- nova_client.servers.force_delete(instance_id)
+ conn.compute.delete_server(instance_id, force=True)
return True
except Exception as e:
return True
except Exception as e:
- logger.error("Error [delete_instance(nova_client, '%s')]: %s"
+ logger.error("Error [delete_instance(compute, '%s')]: %s"
% (instance_id, e))
return False
% (instance_id, e))
return False
-def remove_host_from_aggregate(nova_client, aggregate_name, compute_host):
+def remove_host_from_aggregate(cloud, aggregate_name, compute_host):
- aggregate_id = get_aggregate_id(nova_client, aggregate_name)
- nova_client.aggregates.remove_host(aggregate_id, compute_host)
+ aggregate_id = get_aggregate_id(cloud, aggregate_name)
+ cloud.remove_host_from_aggregate(aggregate_id, compute_host)
return True
except Exception as e:
return True
except Exception as e:
- logger.error("Error [remove_host_from_aggregate(nova_client, %s, %s)]:"
+ logger.error("Error [remove_host_from_aggregate(compute, %s, %s)]:"
" %s" % (aggregate_name, compute_host, e))
return False
" %s" % (aggregate_name, compute_host, e))
return False
-def remove_hosts_from_aggregate(nova_client, aggregate_name):
- aggregate_id = get_aggregate_id(nova_client, aggregate_name)
- hosts = nova_client.aggregates.get(aggregate_id).hosts
+def remove_hosts_from_aggregate(cloud, aggregate_name):
+ aggregate_id = get_aggregate_id(cloud, aggregate_name)
+ hosts = cloud.get_aggregate(aggregate_id).hosts
- all(remove_host_from_aggregate(nova_client, aggregate_name, host)
+ all(remove_host_from_aggregate(cloud, aggregate_name, host)
-def delete_aggregate(nova_client, aggregate_name):
+def delete_aggregate(cloud, aggregate_name):
- remove_hosts_from_aggregate(nova_client, aggregate_name)
- nova_client.aggregates.delete(aggregate_name)
+ remove_hosts_from_aggregate(cloud, aggregate_name)
+ cloud.delete_aggregate(aggregate_name)
return True
except Exception as e:
return True
except Exception as e:
- logger.error("Error [delete_aggregate(nova_client, %s)]: %s"
+ logger.error("Error [delete_aggregate(compute, %s)]: %s"
% (aggregate_name, e))
return False
% (aggregate_name, e))
return False
-def add_secgroup_to_instance(nova_client, instance_id, secgroup_id):
+def add_secgroup_to_instance(conn, instance_id, secgroup_id):
- nova_client.servers.add_security_group(instance_id, secgroup_id)
+ conn.compute.add_security_group_to_server(instance_id, secgroup_id)
return True
except Exception as e:
return True
except Exception as e:
- logger.error("Error [add_secgroup_to_instance(nova_client, '%s', "
+ logger.error("Error [add_secgroup_to_instance(compute, '%s', "
"'%s')]: %s" % (instance_id, secgroup_id, e))
return False
"'%s')]: %s" % (instance_id, secgroup_id, e))
return False
- def __init__(self, line_length):
+ def __init__(self, line_length, conn=None):
+ self.conn = conn
self.line_length = line_length
self.test_result = "PASS"
self.summary = ""
self.line_length = line_length
self.test_result = "PASS"
self.summary = ""
vm_source,
vm_target,
expected="PASS", timeout=30):
vm_source,
vm_target,
expected="PASS", timeout=30):
- ip_target = vm_target.networks.itervalues().next()[0]
+ ip_target = self.conn.compute.get_server(vm_target).\
+ addresses.values()[0][0]['addr']
self.get_ping_status_target_ip(vm_source, vm_target.name,
ip_target, expected, timeout)
self.get_ping_status_target_ip(vm_source, vm_target.name,
ip_target, expected, timeout)
target_name,
ip_target,
expected="PASS", timeout=30):
target_name,
ip_target,
expected="PASS", timeout=30):
- console_log = vm_source.get_console_output()
- ip_source = vm_source.networks.itervalues().next()[0]
+ console_log = self.conn.compute.\
+ get_server_console_output(vm_source)['output']
+ ip_source = self.conn.compute.get_server(vm_source).\
+ addresses.values()[0][0]['addr']
if "request failed" in console_log:
# Normally, cirros displays this message when userdata fails
logger.debug("It seems userdata is not supported in "
if "request failed" in console_log:
# Normally, cirros displays this message when userdata fails
logger.debug("It seems userdata is not supported in "
tab, target_name, ip_target,
tab, expected_result))
while True:
tab, target_name, ip_target,
tab, expected_result))
while True:
- console_log = vm_source.get_console_output()
+ console_log = self.conn.compute.\
+ get_server_console_output(vm_source)['output']
# the console_log is a long string, we want to take
# the last 4 lines (for example)
lines = console_log.split('\n')
# the console_log is a long string, we want to take
# the last 4 lines (for example)
lines = console_log.split('\n')
def check_ssh_output(self, vm_source, vm_target,
expected, timeout=30):
def check_ssh_output(self, vm_source, vm_target,
expected, timeout=30):
- console_log = vm_source.get_console_output()
- ip_source = vm_source.networks.itervalues().next()[0]
- ip_target = vm_target.networks.itervalues().next()[0]
+ console_log = self.conn.compute.\
+ get_server_console_output(vm_source)['output']
+ ip_source = self.conn.compute.get_server(vm_source).\
+ addresses.values()[0][0]['addr']
+ ip_target = self.conn.compute.get_server(vm_target).\
+ addresses.values()[0][0]['addr']
if "request failed" in console_log:
# Normally, cirros displays this message when userdata fails
if "request failed" in console_log:
# Normally, cirros displays this message when userdata fails
tab, vm_target.name, ip_target,
tab, expected))
while True:
tab, vm_target.name, ip_target,
tab, expected))
while True:
- console_log = vm_source.get_console_output()
+ console_log = self.conn.compute.\
+ get_server_console_output(vm_source)['output']
# the console_log is a long string, we want to take
# the last 4 lines (for example)
lines = console_log.split('\n')
# the console_log is a long string, we want to take
# the last 4 lines (for example)
lines = console_log.split('\n')
-def create_instance(nova_client,
+def create_instance(conn,
name,
image_id,
network_id,
sg_id,
secgroup_name=None,
fixed_ip=None,
name,
image_id,
network_id,
sg_id,
secgroup_name=None,
fixed_ip=None,
**kwargs
):
if 'flavor' not in kwargs:
**kwargs
):
if 'flavor' not in kwargs:
logger.error("Error while booting instance.")
raise Exception("Error while booting instance {}".format(name))
else:
logger.error("Error while booting instance.")
raise Exception("Error while booting instance {}".format(name))
else:
+ # Retrieve IP of INSTANCE
+ network_name = conn.network.get_network(network_id).name
+ instance_ip = conn.compute.get_server(instance).\
+ addresses.get(network_name)[0]['addr']
logger.debug("Instance '%s' booted successfully. IP='%s'." %
logger.debug("Instance '%s' booted successfully. IP='%s'." %
- (name, instance.networks.itervalues().next()[0]))
- # Retrieve IP of INSTANCE
- # instance_ip = instance.networks.get(network_id)[0]
if secgroup_name:
logger.debug("Adding '%s' to security group '%s'..."
if secgroup_name:
logger.debug("Adding '%s' to security group '%s'..."
else:
logger.debug("Adding '%s' to security group '%s'..."
% (name, sg_id))
else:
logger.debug("Adding '%s' to security group '%s'..."
% (name, sg_id))
- os_utils.add_secgroup_to_instance(nova_client, instance.id, sg_id)
+ os_utils.add_secgroup_to_instance(conn, instance.id, sg_id)
return str(os.environ['INSTALLER_IP'])
return str(os.environ['INSTALLER_IP'])
-def get_instance_ip(instance):
- instance_ip = instance.networks.itervalues().next()[0]
+def get_instance_ip(conn, instance):
+ instance_ip = conn.compute.get_server(instance).\
+ addresses.values()[0][0]['addr']
return instance_ip
def wait_for_instance(instance, pattern=".* login:", tries=40):
logger.info("Waiting for instance %s to boot up" % instance.id)
return instance_ip
def wait_for_instance(instance, pattern=".* login:", tries=40):
logger.info("Waiting for instance %s to boot up" % instance.id)
+ conn = os_utils.get_os_connection()
sleep_time = 2
expected_regex = re.compile(pattern)
console_log = ""
while tries > 0 and not expected_regex.search(console_log):
sleep_time = 2
expected_regex = re.compile(pattern)
console_log = ""
while tries > 0 and not expected_regex.search(console_log):
- console_log = instance.get_console_output()
+ console_log = conn.compute.\
+ get_server_console_output(instance)['output']
time.sleep(sleep_time)
tries -= 1
time.sleep(sleep_time)
tries -= 1
logger.error("one or more instances is not yet booted up")
logger.error("one or more instances is not yet booted up")
-def wait_for_instance_delete(nova_client, instance_id, tries=30):
+def wait_for_instance_delete(conn, instance_id, tries=30):
sleep_time = 2
instances = [instance_id]
logger.debug("Waiting for instance %s to be deleted"
% (instance_id))
while tries > 0 and instance_id in instances:
instances = [instance.id for instance in
sleep_time = 2
instances = [instance_id]
logger.debug("Waiting for instance %s to be deleted"
% (instance_id))
while tries > 0 and instance_id in instances:
instances = [instance.id for instance in
- os_utils.get_instances(nova_client)]
+ os_utils.get_instances(conn)]
time.sleep(sleep_time)
tries -= 1
if instance_id in instances:
time.sleep(sleep_time)
tries -= 1
if instance_id in instances:
-def assert_and_get_compute_nodes(nova_client, required_node_number=2):
+def assert_and_get_compute_nodes(conn, required_node_number=2):
"""Get the compute nodes in the deployment
Exit if the deployment doesn't have enough compute nodes"""
"""Get the compute nodes in the deployment
Exit if the deployment doesn't have enough compute nodes"""
- compute_nodes = os_utils.get_hypervisors(nova_client)
+ compute_nodes = os_utils.get_hypervisors(conn)
num_compute_nodes = len(compute_nodes)
if num_compute_nodes < 2:
num_compute_nodes = len(compute_nodes)
if num_compute_nodes < 2:
return odl_node.run_cmd(karaf_cmd)
return odl_node.run_cmd(karaf_cmd)
-def wait_for_cloud_init(instance):
+def wait_for_cloud_init(conn, instance):
success = True
# ubuntu images take a long time to start
tries = 20
success = True
# ubuntu images take a long time to start
tries = 20
logger.info("Waiting for cloud init of instance: {}"
"".format(instance.name))
while tries > 0:
logger.info("Waiting for cloud init of instance: {}"
"".format(instance.name))
while tries > 0:
- instance_log = instance.get_console_output()
+ instance_log = conn.compute.\
+ get_server_console_output(instance)['output']
if "Failed to run module" in instance_log:
success = False
logger.error("Cloud init failed to run. Reason: %s",
if "Failed to run module" in instance_log:
success = False
logger.error("Cloud init failed to run. Reason: %s",
def attach_instance_to_ext_br(instance, compute_node):
def attach_instance_to_ext_br(instance, compute_node):
- libvirt_instance_name = getattr(instance, "OS-EXT-SRV-ATTR:instance_name")
+ libvirt_instance_name = instance.instance_name
installer_type = str(os.environ['INSTALLER_TYPE'].lower())
if installer_type == "fuel":
bridge = "br-ex"
installer_type = str(os.environ['INSTALLER_TYPE'].lower())
if installer_type == "fuel":
bridge = "br-ex"
def detach_instance_from_ext_br(instance, compute_node):
def detach_instance_from_ext_br(instance, compute_node):
- libvirt_instance_name = getattr(instance, "OS-EXT-SRV-ATTR:instance_name")
+ libvirt_instance_name = instance.instance_name
mac = compute_node.run_cmd("for vm in $(sudo virsh list | "
"grep running | awk '{print $2}'); "
"do echo -n ; sudo virsh dumpxml $vm| "
mac = compute_node.run_cmd("for vm in $(sudo virsh list | "
"grep running | awk '{print $2}'); "
"do echo -n ; sudo virsh dumpxml $vm| "
-def cleanup_nova(nova_client, instance_ids, flavor_ids=None):
+def cleanup_nova(conn, instance_ids, flavor_ids=None):
if flavor_ids is not None and len(flavor_ids) != 0:
for flavor_id in flavor_ids:
if flavor_ids is not None and len(flavor_ids) != 0:
for flavor_id in flavor_ids:
- nova_client.flavors.delete(flavor_id)
+ conn.compute.delete_flavor(flavor_id)
if len(instance_ids) != 0:
for instance_id in instance_ids:
if len(instance_ids) != 0:
for instance_id in instance_ids:
- if not os_utils.delete_instance(nova_client, instance_id):
+ if not os_utils.delete_instance(conn, instance_id):
logger.error('Fail to delete all instances. '
'Instance with id {} was not deleted.'.
format(instance_id))
else:
logger.error('Fail to delete all instances. '
'Instance with id {} was not deleted.'.
format(instance_id))
else:
- wait_for_instance_delete(nova_client, instance_id)
+ wait_for_instance_delete(conn, instance_id)
-def update_instance_quota_class(nova_client, instances_quota):
+def update_instance_quota_class(cloud, instances_quota):
- nova_client.quota_classes.update("default", instances=instances_quota)
+ cloud.set_compute_quotas('admin', instances=instances_quota)
return True
except Exception as e:
return True
except Exception as e:
- logger.error("Error [update_instance_quota_class(nova_client,"
+ logger.error("Error [update_instance_quota_class(compute,"
" '%s' )]: %s" % (instances_quota, e))
return False
" '%s' )]: %s" % (instances_quota, e))
return False
-def get_nova_instances_quota(nova_client):
+def get_nova_instances_quota(cloud):
- return nova_client.quota_classes.get("default").instances
+ return cloud.get_compute_quotas('admin').instances
except Exception as e:
logger.error("Error in getting nova instances quota: %s" % e)
raise
except Exception as e:
logger.error("Error in getting nova instances quota: %s" % e)
raise
- nova_client = os_utils.get_nova_client()
+ cloud = os_utils.get_os_cloud()
neutron_client = os_utils.get_neutron_client()
tenant_id = os_utils.get_tenant_id(os_utils.get_keystone_client(),
neutron_client = os_utils.get_neutron_client()
tenant_id = os_utils.get_tenant_id(os_utils.get_keystone_client(),
neutron_router_quota) = (
neutron_quota['network'], neutron_quota['subnet'],
neutron_quota['port'], neutron_quota['router'])
neutron_router_quota) = (
neutron_quota['network'], neutron_quota['subnet'],
neutron_quota['port'], neutron_quota['router'])
- instances_quota = test_utils.get_nova_instances_quota(nova_client)
+ instances_quota = test_utils.get_nova_instances_quota(cloud)
logger.info("Setting net/subnet/port/router "
"quota to unlimited")
logger.info("Setting net/subnet/port/router "
"quota to unlimited")
# https://jira.opnfv.org/projects/SDNVPN/issues/SDNVPN-115
logger.info("Setting instances quota class to unlimited")
test_utils.update_instance_quota_class(
# https://jira.opnfv.org/projects/SDNVPN/issues/SDNVPN-115
logger.info("Setting instances quota class to unlimited")
test_utils.update_instance_quota_class(
- nova_client,
- COMMON_CONFIG.nova_instances_quota_class)
+ cloud, COMMON_CONFIG.nova_instances_quota_class)
# Clean up the stale floating ip's so that required
# ip addresses are available for sdnvpn testcases
# Clean up the stale floating ip's so that required
# ip addresses are available for sdnvpn testcases
neutron_router_quota)
logger.info("Resetting instances quota class")
neutron_router_quota)
logger.info("Resetting instances quota class")
- test_utils.update_instance_quota_class(nova_client, instances_quota)
+ test_utils.update_instance_quota_class(cloud, instances_quota)
try:
installer_type = str(os.environ['INSTALLER_TYPE'].lower())
try:
installer_type = str(os.environ['INSTALLER_TYPE'].lower())
- results = Results(COMMON_CONFIG.line_length)
+ conn = os_utils.get_os_connection()
+ results = Results(COMMON_CONFIG.line_length, conn)
results.add_to_summary(0, "=")
results.add_to_summary(2, "STATUS", "SUBTEST")
results.add_to_summary(0, "=")
results.add_to_summary(0, "=")
results.add_to_summary(2, "STATUS", "SUBTEST")
results.add_to_summary(0, "=")
- nova_client = os_utils.get_nova_client()
neutron_client = os_utils.get_neutron_client()
conn = os_utils.get_os_connection()
neutron_client = os_utils.get_neutron_client()
conn = os_utils.get_os_connection()
neutron_client, TESTCASE_CONFIG.secgroup_name,
TESTCASE_CONFIG.secgroup_descr)
neutron_client, TESTCASE_CONFIG.secgroup_name,
TESTCASE_CONFIG.secgroup_descr)
- compute_nodes = test_utils.assert_and_get_compute_nodes(nova_client)
+ compute_nodes = test_utils.assert_and_get_compute_nodes(conn)
av_zone_1 = "nova:" + compute_nodes[0]
av_zone_2 = "nova:" + compute_nodes[1]
# boot INTANCES
vm_2 = test_utils.create_instance(
av_zone_1 = "nova:" + compute_nodes[0]
av_zone_2 = "nova:" + compute_nodes[1]
# boot INTANCES
vm_2 = test_utils.create_instance(
TESTCASE_CONFIG.instance_2_name,
image_id,
network_1_id,
sg_id,
secgroup_name=TESTCASE_CONFIG.secgroup_name,
compute_node=av_zone_1)
TESTCASE_CONFIG.instance_2_name,
image_id,
network_1_id,
sg_id,
secgroup_name=TESTCASE_CONFIG.secgroup_name,
compute_node=av_zone_1)
- vm_2_ip = test_utils.get_instance_ip(vm_2)
+ vm_2_ip = test_utils.get_instance_ip(conn, vm_2)
vm_3 = test_utils.create_instance(
vm_3 = test_utils.create_instance(
TESTCASE_CONFIG.instance_3_name,
image_id,
network_1_id,
sg_id,
secgroup_name=TESTCASE_CONFIG.secgroup_name,
compute_node=av_zone_2)
TESTCASE_CONFIG.instance_3_name,
image_id,
network_1_id,
sg_id,
secgroup_name=TESTCASE_CONFIG.secgroup_name,
compute_node=av_zone_2)
- vm_3_ip = test_utils.get_instance_ip(vm_3)
+ vm_3_ip = test_utils.get_instance_ip(conn, vm_3)
vm_5 = test_utils.create_instance(
vm_5 = test_utils.create_instance(
TESTCASE_CONFIG.instance_5_name,
image_id,
network_2_id,
sg_id,
secgroup_name=TESTCASE_CONFIG.secgroup_name,
compute_node=av_zone_2)
TESTCASE_CONFIG.instance_5_name,
image_id,
network_2_id,
sg_id,
secgroup_name=TESTCASE_CONFIG.secgroup_name,
compute_node=av_zone_2)
- vm_5_ip = test_utils.get_instance_ip(vm_5)
+ vm_5_ip = test_utils.get_instance_ip(conn, vm_5)
# We boot vm5 first because we need vm5_ip for vm4 userdata
u4 = test_utils.generate_ping_userdata([vm_5_ip])
vm_4 = test_utils.create_instance(
# We boot vm5 first because we need vm5_ip for vm4 userdata
u4 = test_utils.generate_ping_userdata([vm_5_ip])
vm_4 = test_utils.create_instance(
TESTCASE_CONFIG.instance_4_name,
image_id,
network_2_id,
TESTCASE_CONFIG.instance_4_name,
image_id,
network_2_id,
secgroup_name=TESTCASE_CONFIG.secgroup_name,
compute_node=av_zone_1,
userdata=u4)
secgroup_name=TESTCASE_CONFIG.secgroup_name,
compute_node=av_zone_1,
userdata=u4)
- vm_4_ip = test_utils.get_instance_ip(vm_4)
+ vm_4_ip = test_utils.get_instance_ip(conn, vm_4)
# We boot VM1 at the end because we need to get the IPs first
# to generate the userdata
# We boot VM1 at the end because we need to get the IPs first
# to generate the userdata
vm_4_ip,
vm_5_ip])
vm_1 = test_utils.create_instance(
vm_4_ip,
vm_5_ip])
vm_1 = test_utils.create_instance(
TESTCASE_CONFIG.instance_1_name,
image_id,
network_1_id,
TESTCASE_CONFIG.instance_1_name,
image_id,
network_1_id,
logger.error("exception occurred while executing testcase_1: %s", e)
raise
finally:
logger.error("exception occurred while executing testcase_1: %s", e)
raise
finally:
- test_utils.cleanup_nova(nova_client, instance_ids)
+ test_utils.cleanup_nova(conn, instance_ids)
test_utils.cleanup_glance(conn, image_ids)
test_utils.cleanup_neutron(neutron_client, floatingip_ids,
bgpvpn_ids, interfaces, subnet_ids,
test_utils.cleanup_glance(conn, image_ids)
test_utils.cleanup_neutron(neutron_client, floatingip_ids,
bgpvpn_ids, interfaces, subnet_ids,
'sdnvpn.test.functest.testcase_10')
'sdnvpn.test.functest.testcase_10')
-def monitor(in_data, out_data, vm):
+def monitor(conn, in_data, out_data, vm):
# At the beginning of ping we might have some
# failures, so we ignore the first 10 pings
lines_offset = 20
while in_data["stop_thread"] is False:
try:
time.sleep(1)
# At the beginning of ping we might have some
# failures, so we ignore the first 10 pings
lines_offset = 20
while in_data["stop_thread"] is False:
try:
time.sleep(1)
- vm_console_out_lines = vm.get_console_output().split('\n')
+ vm_console_out_lines = conn.compute.\
+ get_server_console_output(vm)['output'].split('\n')
if lines_offset < len(vm_console_out_lines):
for console_line in vm_console_out_lines[lines_offset:-1]:
is_ping_error = re.match(r'ping.*KO', console_line)
if lines_offset < len(vm_console_out_lines):
for console_line in vm_console_out_lines[lines_offset:-1]:
is_ping_error = re.match(r'ping.*KO', console_line)
- results = Results(COMMON_CONFIG.line_length)
+ conn = os_utils.get_os_connection()
+ results = Results(COMMON_CONFIG.line_length, conn)
results.add_to_summary(0, "=")
results.add_to_summary(2, "STATUS", "SUBTEST")
results.add_to_summary(0, "=")
results.add_to_summary(0, "=")
results.add_to_summary(2, "STATUS", "SUBTEST")
results.add_to_summary(0, "=")
- nova_client = os_utils.get_nova_client()
neutron_client = os_utils.get_neutron_client()
conn = os_utils.get_os_connection()
neutron_client = os_utils.get_neutron_client()
conn = os_utils.get_os_connection()
TESTCASE_CONFIG.secgroup_name,
TESTCASE_CONFIG.secgroup_descr)
TESTCASE_CONFIG.secgroup_name,
TESTCASE_CONFIG.secgroup_descr)
- compute_nodes = test_utils.assert_and_get_compute_nodes(nova_client)
+ compute_nodes = test_utils.assert_and_get_compute_nodes(conn)
av_zone_1 = "nova:" + compute_nodes[0]
av_zone_2 = "nova:" + compute_nodes[1]
# boot INSTANCES
vm_2 = test_utils.create_instance(
av_zone_1 = "nova:" + compute_nodes[0]
av_zone_2 = "nova:" + compute_nodes[1]
# boot INSTANCES
vm_2 = test_utils.create_instance(
TESTCASE_CONFIG.instance_2_name,
image_id,
network_1_id,
sg_id,
secgroup_name=TESTCASE_CONFIG.secgroup_name,
compute_node=av_zone_1)
TESTCASE_CONFIG.instance_2_name,
image_id,
network_1_id,
sg_id,
secgroup_name=TESTCASE_CONFIG.secgroup_name,
compute_node=av_zone_1)
- vm2_ip = test_utils.get_instance_ip(vm_2)
+ vm2_ip = test_utils.get_instance_ip(conn, vm_2)
u1 = test_utils.generate_ping_userdata([vm2_ip])
vm_1 = test_utils.create_instance(
u1 = test_utils.generate_ping_userdata([vm2_ip])
vm_1 = test_utils.create_instance(
TESTCASE_CONFIG.instance_1_name,
image_id,
network_1_id,
TESTCASE_CONFIG.instance_1_name,
image_id,
network_1_id,
secgroup_name=TESTCASE_CONFIG.secgroup_name,
compute_node=av_zone_1,
userdata=u1)
secgroup_name=TESTCASE_CONFIG.secgroup_name,
compute_node=av_zone_1,
userdata=u1)
- vm1_ip = test_utils.get_instance_ip(vm_1)
+ vm1_ip = test_utils.get_instance_ip(conn, vm_1)
u3 = test_utils.generate_ping_userdata([vm1_ip, vm2_ip])
vm_3 = test_utils.create_instance(
u3 = test_utils.generate_ping_userdata([vm1_ip, vm2_ip])
vm_3 = test_utils.create_instance(
TESTCASE_CONFIG.instance_3_name,
image_id,
network_1_id,
TESTCASE_CONFIG.instance_3_name,
image_id,
network_1_id,
secgroup_name=TESTCASE_CONFIG.secgroup_name,
compute_node=av_zone_2,
userdata=u3)
secgroup_name=TESTCASE_CONFIG.secgroup_name,
compute_node=av_zone_2,
userdata=u3)
- vm3_ip = test_utils.get_instance_ip(vm_3)
+ vm3_ip = test_utils.get_instance_ip(conn, vm_3)
# We do not put vm_2 id in instance_ids table because we will
# delete the current instance during the testing process
instance_ids.extend([vm_1.id, vm_3.id])
# We do not put vm_2 id in instance_ids table because we will
# delete the current instance during the testing process
instance_ids.extend([vm_1.id, vm_3.id])
monitor_output1 = m.dict()
monitor_input1["stop_thread"] = False
monitor_output1["error_msg"] = ""
monitor_output1 = m.dict()
monitor_input1["stop_thread"] = False
monitor_output1["error_msg"] = ""
- monitor_thread1 = Process(target=monitor, args=(monitor_input1,
+ monitor_thread1 = Process(target=monitor, args=(conn, monitor_input1,
monitor_output1, vm_1,))
monitor_input2 = m.dict()
monitor_output2 = m.dict()
monitor_input2["stop_thread"] = False
monitor_output2["error_msg"] = ""
monitor_output1, vm_1,))
monitor_input2 = m.dict()
monitor_output2 = m.dict()
monitor_input2["stop_thread"] = False
monitor_output2["error_msg"] = ""
- monitor_thread2 = Process(target=monitor, args=(monitor_input2,
+ monitor_thread2 = Process(target=monitor, args=(conn, monitor_input2,
monitor_output2, vm_2,))
monitor_input3 = m.dict()
monitor_output3 = m.dict()
monitor_input3["stop_thread"] = False
monitor_output3["error_msg"] = ""
monitor_output2, vm_2,))
monitor_input3 = m.dict()
monitor_output3 = m.dict()
monitor_input3["stop_thread"] = False
monitor_output3["error_msg"] = ""
- monitor_thread3 = Process(target=monitor, args=(monitor_input3,
+ monitor_thread3 = Process(target=monitor, args=(conn, monitor_input3,
monitor_output3, vm_3,))
# Lists of all monitor threads and their inputs and outputs.
threads = [monitor_thread1, monitor_thread2, monitor_thread3]
monitor_output3, vm_3,))
# Lists of all monitor threads and their inputs and outputs.
threads = [monitor_thread1, monitor_thread2, monitor_thread3]
results.add_failure(monitor_err_msg)
# Stop monitor thread 2 and delete instance vm_2
thread_inputs[1]["stop_thread"] = True
results.add_failure(monitor_err_msg)
# Stop monitor thread 2 and delete instance vm_2
thread_inputs[1]["stop_thread"] = True
- if not os_utils.delete_instance(nova_client, vm_2.id):
+ if not os_utils.delete_instance(conn, vm_2.id):
logger.error("Fail to delete vm_2 instance during "
"testing process")
raise Exception("Fail to delete instance vm_2.")
logger.error("Fail to delete vm_2 instance during "
"testing process")
raise Exception("Fail to delete instance vm_2.")
# Create a new vm (vm_4) on compute 1 node
u4 = test_utils.generate_ping_userdata([vm1_ip, vm3_ip])
vm_4 = test_utils.create_instance(
# Create a new vm (vm_4) on compute 1 node
u4 = test_utils.generate_ping_userdata([vm1_ip, vm3_ip])
vm_4 = test_utils.create_instance(
TESTCASE_CONFIG.instance_4_name,
image_id,
network_1_id,
TESTCASE_CONFIG.instance_4_name,
image_id,
network_1_id,
monitor_output4 = m.dict()
monitor_input4["stop_thread"] = False
monitor_output4["error_msg"] = ""
monitor_output4 = m.dict()
monitor_input4["stop_thread"] = False
monitor_output4["error_msg"] = ""
- monitor_thread4 = Process(target=monitor, args=(monitor_input4,
+ monitor_thread4 = Process(target=monitor, args=(conn, monitor_input4,
monitor_output4,
vm_4,))
threads.append(monitor_thread4)
monitor_output4,
vm_4,))
threads.append(monitor_thread4)
for thread in threads:
thread.join()
for thread in threads:
thread.join()
- test_utils.cleanup_nova(nova_client, instance_ids)
+ test_utils.cleanup_nova(conn, instance_ids)
test_utils.cleanup_glance(conn, image_ids)
test_utils.cleanup_neutron(neutron_client, floatingip_ids, bgpvpn_ids,
interfaces, subnet_ids, router_ids,
test_utils.cleanup_glance(conn, image_ids)
test_utils.cleanup_neutron(neutron_client, floatingip_ids, bgpvpn_ids,
interfaces, subnet_ids, router_ids,
- results = Results(COMMON_CONFIG.line_length)
+ conn = os_utils.get_os_connection()
+ results = Results(COMMON_CONFIG.line_length, conn)
results.add_to_summary(0, "=")
results.add_to_summary(2, "STATUS", "SUBTEST")
results.add_to_summary(0, "=")
results.add_to_summary(0, "=")
results.add_to_summary(2, "STATUS", "SUBTEST")
results.add_to_summary(0, "=")
- nova_client = os_utils.get_nova_client()
neutron_client = os_utils.get_neutron_client()
conn = os_utils.get_os_connection()
openstack_nodes = test_utils.get_nodes()
neutron_client = os_utils.get_neutron_client()
conn = os_utils.get_os_connection()
openstack_nodes = test_utils.get_nodes()
TESTCASE_CONFIG.secgroup_descr)
# Check required number of compute nodes
TESTCASE_CONFIG.secgroup_descr)
# Check required number of compute nodes
- compute_hostname = (
- nova_client.hypervisors.list()[0].hypervisor_hostname)
+ compute_hostname = conn.compute.hypervisors().next().name
compute_nodes = [node for node in openstack_nodes
if node.is_compute()]
compute_nodes = [node for node in openstack_nodes
if node.is_compute()]
# boot INSTANCES
vm_2 = test_utils.create_instance(
# boot INSTANCES
vm_2 = test_utils.create_instance(
TESTCASE_CONFIG.instance_2_name,
image_id,
network_1_id,
TESTCASE_CONFIG.instance_2_name,
image_id,
network_1_id,
compute_node=av_zone_1)
vm_1 = test_utils.create_instance(
compute_node=av_zone_1)
vm_1 = test_utils.create_instance(
TESTCASE_CONFIG.instance_1_name,
image_id,
network_1_id,
TESTCASE_CONFIG.instance_1_name,
image_id,
network_1_id,
raise
finally:
# Cleanup topology
raise
finally:
# Cleanup topology
- test_utils.cleanup_nova(nova_client, instance_ids)
+ test_utils.cleanup_nova(conn, instance_ids)
test_utils.cleanup_glance(conn, image_ids)
test_utils.cleanup_neutron(neutron_client, floatingip_ids, bgpvpn_ids,
interfaces, subnet_ids, router_ids,
test_utils.cleanup_glance(conn, image_ids)
test_utils.cleanup_neutron(neutron_client, floatingip_ids, bgpvpn_ids,
interfaces, subnet_ids, router_ids,
- results = Results(COMMON_CONFIG.line_length)
+ conn = os_utils.get_os_connection()
+ results = Results(COMMON_CONFIG.line_length, conn)
results.add_to_summary(0, "=")
results.add_to_summary(2, "STATUS", "SUBTEST")
results.add_to_summary(0, "=")
results.add_to_summary(0, "=")
results.add_to_summary(2, "STATUS", "SUBTEST")
results.add_to_summary(0, "=")
- nova_client = os_utils.get_nova_client()
neutron_client = os_utils.get_neutron_client()
conn = os_utils.get_os_connection()
openstack_nodes = test_utils.get_nodes()
neutron_client = os_utils.get_neutron_client()
conn = os_utils.get_os_connection()
openstack_nodes = test_utils.get_nodes()
TESTCASE_CONFIG.secgroup_descr)
# Check required number of compute nodes
TESTCASE_CONFIG.secgroup_descr)
# Check required number of compute nodes
- compute_hostname = (
- nova_client.hypervisors.list()[0].hypervisor_hostname)
+ compute_hostname = conn.compute.hypervisors().next().name
compute_nodes = [node for node in openstack_nodes
if node.is_compute()]
compute_nodes = [node for node in openstack_nodes
if node.is_compute()]
# boot INSTANCES
vm_2 = test_utils.create_instance(
# boot INSTANCES
vm_2 = test_utils.create_instance(
TESTCASE_CONFIG.instance_2_name,
image_id,
network_1_id,
TESTCASE_CONFIG.instance_2_name,
image_id,
network_1_id,
compute_node=av_zone_1)
vm_1 = test_utils.create_instance(
compute_node=av_zone_1)
vm_1 = test_utils.create_instance(
TESTCASE_CONFIG.instance_1_name,
image_id,
network_1_id,
TESTCASE_CONFIG.instance_1_name,
image_id,
network_1_id,
raise
finally:
# Cleanup topology
raise
finally:
# Cleanup topology
- test_utils.cleanup_nova(nova_client, instance_ids)
+ test_utils.cleanup_nova(conn, instance_ids)
test_utils.cleanup_glance(conn, image_ids)
test_utils.cleanup_neutron(neutron_client, floatingip_ids, bgpvpn_ids,
interfaces, subnet_ids, router_ids,
test_utils.cleanup_glance(conn, image_ids)
test_utils.cleanup_neutron(neutron_client, floatingip_ids, bgpvpn_ids,
interfaces, subnet_ids, router_ids,
- results = Results(COMMON_CONFIG.line_length)
+ conn = os_utils.get_os_connection()
+ results = Results(COMMON_CONFIG.line_length, conn)
results.add_to_summary(0, "=")
results.add_to_summary(2, "STATUS", "SUBTEST")
results.add_to_summary(0, "=")
results.add_to_summary(2, "STATUS", "SUBTEST")
else:
logger.info("Using old image")
else:
logger.info("Using old image")
- nova_client = os_utils.get_nova_client()
neutron_client = os_utils.get_neutron_client()
conn = os_utils.get_os_connection()
neutron_client = os_utils.get_neutron_client()
conn = os_utils.get_os_connection()
neutron_client, TESTCASE_CONFIG.secgroup_name,
TESTCASE_CONFIG.secgroup_descr)
neutron_client, TESTCASE_CONFIG.secgroup_name,
TESTCASE_CONFIG.secgroup_descr)
- compute_nodes = test_utils.assert_and_get_compute_nodes(nova_client)
+ compute_nodes = test_utils.assert_and_get_compute_nodes(conn)
av_zone_1 = "nova:" + compute_nodes[0]
av_zone_2 = "nova:" + compute_nodes[1]
av_zone_1 = "nova:" + compute_nodes[0]
av_zone_2 = "nova:" + compute_nodes[1]
TESTCASE_CONFIG.extra_route_subnet_mask)
# boot INTANCES
vm_1 = test_utils.create_instance(
TESTCASE_CONFIG.extra_route_subnet_mask)
# boot INTANCES
vm_1 = test_utils.create_instance(
TESTCASE_CONFIG.instance_1_name,
image_id,
network_1_id,
TESTCASE_CONFIG.instance_1_name,
image_id,
network_1_id,
secgroup_name=TESTCASE_CONFIG.secgroup_name,
compute_node=av_zone_1,
userdata=u1)
secgroup_name=TESTCASE_CONFIG.secgroup_name,
compute_node=av_zone_1,
userdata=u1)
- vm_1_ip = test_utils.get_instance_ip(vm_1)
+ vm_1_ip = test_utils.get_instance_ip(conn, vm_1)
vm1_port = test_utils.get_port(neutron_client, vm_1.id)
test_utils.update_port_allowed_address_pairs(
vm1_port = test_utils.get_port(neutron_client, vm_1.id)
test_utils.update_port_allowed_address_pairs(
vm1_port['mac_address'])])
vm_2 = test_utils.create_instance(
vm1_port['mac_address'])])
vm_2 = test_utils.create_instance(
TESTCASE_CONFIG.instance_2_name,
image_id,
network_1_id,
TESTCASE_CONFIG.instance_2_name,
image_id,
network_1_id,
secgroup_name=TESTCASE_CONFIG.secgroup_name,
compute_node=av_zone_1,
userdata=u1)
secgroup_name=TESTCASE_CONFIG.secgroup_name,
compute_node=av_zone_1,
userdata=u1)
- vm_2_ip = test_utils.get_instance_ip(vm_2)
+ vm_2_ip = test_utils.get_instance_ip(conn, vm_2)
vm2_port = test_utils.get_port(neutron_client, vm_2.id)
test_utils.update_port_allowed_address_pairs(
vm2_port = test_utils.get_port(neutron_client, vm_2.id)
test_utils.update_port_allowed_address_pairs(
u3 = test_utils.generate_ping_userdata(
[TESTCASE_CONFIG.extra_route_ip])
vm_3 = test_utils.create_instance(
u3 = test_utils.generate_ping_userdata(
[TESTCASE_CONFIG.extra_route_ip])
vm_3 = test_utils.create_instance(
TESTCASE_CONFIG.instance_3_name,
image_2_id,
network_1_id,
TESTCASE_CONFIG.instance_3_name,
image_2_id,
network_1_id,
raise
finally:
test_utils.update_router_no_extra_route(neutron_client, router_ids)
raise
finally:
test_utils.update_router_no_extra_route(neutron_client, router_ids)
- test_utils.cleanup_nova(nova_client, instance_ids, flavor_ids)
+ test_utils.cleanup_nova(conn, instance_ids, flavor_ids)
test_utils.cleanup_glance(conn, image_ids)
test_utils.cleanup_neutron(neutron_client, floatingip_ids,
bgpvpn_ids, interfaces, subnet_ids,
test_utils.cleanup_glance(conn, image_ids)
test_utils.cleanup_neutron(neutron_client, floatingip_ids,
bgpvpn_ids, interfaces, subnet_ids,
# http://www.apache.org/licenses/LICENSE-2.0
#
# http://www.apache.org/licenses/LICENSE-2.0
#
import logging
import sys
import logging
import sys
- results = Results(COMMON_CONFIG.line_length)
+ conn = os_utils.get_os_connection()
+ results = Results(COMMON_CONFIG.line_length, conn)
results.add_to_summary(0, "=")
results.add_to_summary(2, "STATUS", "SUBTEST")
results.add_to_summary(0, "=")
results.add_to_summary(0, "=")
results.add_to_summary(2, "STATUS", "SUBTEST")
results.add_to_summary(0, "=")
- nova_client = os_utils.get_nova_client()
neutron_client = os_utils.get_neutron_client()
conn = os_utils.get_os_connection()
neutron_client = os_utils.get_neutron_client()
conn = os_utils.get_os_connection()
keyfile = open(COMMON_CONFIG.keyfile_path, 'r')
key = keyfile.read()
keyfile.close()
keyfile = open(COMMON_CONFIG.keyfile_path, 'r')
key = keyfile.read()
keyfile.close()
- files = {"/home/cirros/id_rsa": key}
+ files = [{'path': '/home/cirros/id_rsa',
+ 'contents': base64.b64encode(key)}]
image_id = os_utils.create_glance_image(
conn, TESTCASE_CONFIG.image_name,
image_id = os_utils.create_glance_image(
conn, TESTCASE_CONFIG.image_name,
neutron_client, TESTCASE_CONFIG.secgroup_name,
TESTCASE_CONFIG.secgroup_descr)
neutron_client, TESTCASE_CONFIG.secgroup_name,
TESTCASE_CONFIG.secgroup_descr)
- compute_nodes = test_utils.assert_and_get_compute_nodes(nova_client)
+ compute_nodes = test_utils.assert_and_get_compute_nodes(conn)
av_zone_1 = "nova:" + compute_nodes[0]
# av_zone_2 = "nova:" + compute_nodes[1]
av_zone_1 = "nova:" + compute_nodes[0]
# av_zone_2 = "nova:" + compute_nodes[1]
# boot INTANCES
userdata_common = test_utils.generate_userdata_common()
vm_2 = test_utils.create_instance(
# boot INTANCES
userdata_common = test_utils.generate_userdata_common()
vm_2 = test_utils.create_instance(
TESTCASE_CONFIG.instance_2_name,
image_id,
network_1_id,
TESTCASE_CONFIG.instance_2_name,
image_id,
network_1_id,
# vm_3 = test_utils.create_instance(
# vm_3 = test_utils.create_instance(
# TESTCASE_CONFIG.instance_3_name,
# image_id,
# network_1_id,
# TESTCASE_CONFIG.instance_3_name,
# image_id,
# network_1_id,
# userdata=userdata_common)
#
# vm_5 = test_utils.create_instance(
# userdata=userdata_common)
#
# vm_5 = test_utils.create_instance(
# TESTCASE_CONFIG.instance_5_name,
# image_id,
# network_2_id,
# TESTCASE_CONFIG.instance_5_name,
# image_id,
# network_2_id,
# TESTCASE_CONFIG.instance_5_ip
])
vm_4 = test_utils.create_instance(
# TESTCASE_CONFIG.instance_5_ip
])
vm_4 = test_utils.create_instance(
TESTCASE_CONFIG.instance_4_name,
image_id,
network_2_id,
TESTCASE_CONFIG.instance_4_name,
image_id,
network_2_id,
# TESTCASE_CONFIG.instance_5_ip
])
vm_1 = test_utils.create_instance(
# TESTCASE_CONFIG.instance_5_ip
])
vm_1 = test_utils.create_instance(
TESTCASE_CONFIG.instance_1_name,
image_id,
network_1_id,
TESTCASE_CONFIG.instance_1_name,
image_id,
network_1_id,
logger.error("exception occurred while executing testcase_2: %s", e)
raise
finally:
logger.error("exception occurred while executing testcase_2: %s", e)
raise
finally:
- test_utils.cleanup_nova(nova_client, instance_ids)
+ test_utils.cleanup_nova(conn, instance_ids)
test_utils.cleanup_glance(conn, image_ids)
test_utils.cleanup_neutron(neutron_client, floatingip_ids,
bgpvpn_ids, interfaces, subnet_ids,
test_utils.cleanup_glance(conn, image_ids)
test_utils.cleanup_neutron(neutron_client, floatingip_ids,
bgpvpn_ids, interfaces, subnet_ids,
- results = Results(COMMON_CONFIG.line_length)
+ conn = os_utils.get_os_connection()
+ results = Results(COMMON_CONFIG.line_length, conn)
results.add_to_summary(0, "=")
results.add_to_summary(2, "STATUS", "SUBTEST")
results.add_to_summary(0, "=")
results.add_to_summary(0, "=")
results.add_to_summary(2, "STATUS", "SUBTEST")
results.add_to_summary(0, "=")
logger.info("Using old image")
conn = os_utils.get_os_connection()
logger.info("Using old image")
conn = os_utils.get_os_connection()
- nova_client = os_utils.get_nova_client()
neutron_client = os_utils.get_neutron_client()
(floatingip_ids, instance_ids, router_ids, network_ids, image_ids,
neutron_client = os_utils.get_neutron_client()
(floatingip_ids, instance_ids, router_ids, network_ids, image_ids,
fake_fip = os_utils.create_floating_ip(neutron_client)
# pin quagga to some compute
floatingip_ids.append(fake_fip['fip_id'])
fake_fip = os_utils.create_floating_ip(neutron_client)
# pin quagga to some compute
floatingip_ids.append(fake_fip['fip_id'])
- compute_node = nova_client.hypervisors.list()[0]
- quagga_compute_node = "nova:" + compute_node.hypervisor_hostname
+ compute_node = conn.compute.hypervisors().next()
+ compute_node = conn.compute.get_hypervisor(compute_node)
+ quagga_compute_node = "nova:" + compute_node.name
# Map the hypervisor used above to a compute handle
# returned by releng's manager
for comp in computes:
# Map the hypervisor used above to a compute handle
# returned by releng's manager
for comp in computes:
TESTCASE_CONFIG.export_targets)
quagga_vm = test_utils.create_instance(
TESTCASE_CONFIG.export_targets)
quagga_vm = test_utils.create_instance(
TESTCASE_CONFIG.quagga_instance_name,
ubuntu_image_id,
quagga_net_id,
TESTCASE_CONFIG.quagga_instance_name,
ubuntu_image_id,
quagga_net_id,
userdata=quagga_bootstrap_script,
compute_node=quagga_compute_node)
userdata=quagga_bootstrap_script,
compute_node=quagga_compute_node)
- instance_ids.append(quagga_vm)
+ instance_ids.append(quagga_vm.id)
quagga_vm_port = test_utils.get_port(neutron_client,
quagga_vm.id)
quagga_vm_port = test_utils.get_port(neutron_client,
quagga_vm.id)
test_utils.attach_instance_to_ext_br(quagga_vm, compute)
testcase = "Bootstrap quagga inside an OpenStack instance"
test_utils.attach_instance_to_ext_br(quagga_vm, compute)
testcase = "Bootstrap quagga inside an OpenStack instance"
- cloud_init_success = test_utils.wait_for_cloud_init(quagga_vm)
+ cloud_init_success = test_utils.wait_for_cloud_init(conn, quagga_vm)
if cloud_init_success:
results.add_success(testcase)
else:
if cloud_init_success:
results.add_success(testcase)
else:
userdata_common = test_utils.generate_ping_userdata(
[TESTCASE_CONFIG.external_network_ip])
userdata_common = test_utils.generate_ping_userdata(
[TESTCASE_CONFIG.external_network_ip])
- compute_node = nova_client.hypervisors.list()[0]
- av_zone_1 = "nova:" + compute_node.hypervisor_hostname
+ compute_node = conn.compute.hypervisors().next()
+ av_zone_1 = "nova:" + compute_node.name
vm_bgpvpn = test_utils.create_instance(
vm_bgpvpn = test_utils.create_instance(
TESTCASE_CONFIG.instance_1_name,
image_id,
net_1_id,
TESTCASE_CONFIG.instance_1_name,
image_id,
net_1_id,
secgroup_name=TESTCASE_CONFIG.secgroup_name,
compute_node=av_zone_1,
userdata=userdata_common)
secgroup_name=TESTCASE_CONFIG.secgroup_name,
compute_node=av_zone_1,
userdata=userdata_common)
- instance_ids.append(vm_bgpvpn)
+ instance_ids.append(vm_bgpvpn.id)
# wait for VM to get IP
instance_up = test_utils.wait_for_instances_up(vm_bgpvpn)
# wait for VM to get IP
instance_up = test_utils.wait_for_instances_up(vm_bgpvpn)
finally:
if quagga_vm is not None:
test_utils.detach_instance_from_ext_br(quagga_vm, compute)
finally:
if quagga_vm is not None:
test_utils.detach_instance_from_ext_br(quagga_vm, compute)
- test_utils.cleanup_nova(nova_client, instance_ids, flavor_ids)
+ test_utils.cleanup_nova(conn, instance_ids, flavor_ids)
test_utils.cleanup_glance(conn, image_ids)
test_utils.cleanup_neutron(neutron_client, floatingip_ids,
bgpvpn_ids, interfaces, subnet_ids,
test_utils.cleanup_glance(conn, image_ids)
test_utils.cleanup_neutron(neutron_client, floatingip_ids,
bgpvpn_ids, interfaces, subnet_ids,
- results = Results(COMMON_CONFIG.line_length)
+ conn = os_utils.get_os_connection()
+ results = Results(COMMON_CONFIG.line_length, conn)
results.add_to_summary(0, "=")
results.add_to_summary(2, "STATUS", "SUBTEST")
results.add_to_summary(0, "=")
results.add_to_summary(0, "=")
results.add_to_summary(2, "STATUS", "SUBTEST")
results.add_to_summary(0, "=")
- nova_client = os_utils.get_nova_client()
neutron_client = os_utils.get_neutron_client()
conn = os_utils.get_os_connection()
neutron_client = os_utils.get_neutron_client()
conn = os_utils.get_os_connection()
TESTCASE_CONFIG.secgroup_name,
TESTCASE_CONFIG.secgroup_descr)
TESTCASE_CONFIG.secgroup_name,
TESTCASE_CONFIG.secgroup_descr)
- compute_nodes = test_utils.assert_and_get_compute_nodes(nova_client)
+ compute_nodes = test_utils.assert_and_get_compute_nodes(conn)
av_zone_1 = "nova:" + compute_nodes[0]
av_zone_2 = "nova:" + compute_nodes[1]
# boot INTANCES
vm_2 = test_utils.create_instance(
av_zone_1 = "nova:" + compute_nodes[0]
av_zone_2 = "nova:" + compute_nodes[1]
# boot INTANCES
vm_2 = test_utils.create_instance(
TESTCASE_CONFIG.instance_2_name,
image_id,
network_1_id,
sg_id,
secgroup_name=TESTCASE_CONFIG.secgroup_name,
compute_node=av_zone_1)
TESTCASE_CONFIG.instance_2_name,
image_id,
network_1_id,
sg_id,
secgroup_name=TESTCASE_CONFIG.secgroup_name,
compute_node=av_zone_1)
- vm_2_ip = test_utils.get_instance_ip(vm_2)
+ vm_2_ip = test_utils.get_instance_ip(conn, vm_2)
vm_3 = test_utils.create_instance(
vm_3 = test_utils.create_instance(
TESTCASE_CONFIG.instance_3_name,
image_id,
network_1_id,
sg_id,
secgroup_name=TESTCASE_CONFIG.secgroup_name,
compute_node=av_zone_2)
TESTCASE_CONFIG.instance_3_name,
image_id,
network_1_id,
sg_id,
secgroup_name=TESTCASE_CONFIG.secgroup_name,
compute_node=av_zone_2)
- vm_3_ip = test_utils.get_instance_ip(vm_3)
+ vm_3_ip = test_utils.get_instance_ip(conn, vm_3)
vm_5 = test_utils.create_instance(
vm_5 = test_utils.create_instance(
TESTCASE_CONFIG.instance_5_name,
image_id,
network_2_id,
sg_id,
secgroup_name=TESTCASE_CONFIG.secgroup_name,
compute_node=av_zone_2)
TESTCASE_CONFIG.instance_5_name,
image_id,
network_2_id,
sg_id,
secgroup_name=TESTCASE_CONFIG.secgroup_name,
compute_node=av_zone_2)
- vm_5_ip = test_utils.get_instance_ip(vm_5)
+ vm_5_ip = test_utils.get_instance_ip(conn, vm_5)
# We boot vm5 first because we need vm5_ip for vm4 userdata
u4 = test_utils.generate_ping_userdata([vm_5_ip])
vm_4 = test_utils.create_instance(
# We boot vm5 first because we need vm5_ip for vm4 userdata
u4 = test_utils.generate_ping_userdata([vm_5_ip])
vm_4 = test_utils.create_instance(
TESTCASE_CONFIG.instance_4_name,
image_id,
network_2_id,
TESTCASE_CONFIG.instance_4_name,
image_id,
network_2_id,
secgroup_name=TESTCASE_CONFIG.secgroup_name,
compute_node=av_zone_1,
userdata=u4)
secgroup_name=TESTCASE_CONFIG.secgroup_name,
compute_node=av_zone_1,
userdata=u4)
- vm_4_ip = test_utils.get_instance_ip(vm_4)
+ vm_4_ip = test_utils.get_instance_ip(conn, vm_4)
# We boot VM1 at the end because we need to get the IPs
# first to generate the userdata
# We boot VM1 at the end because we need to get the IPs
# first to generate the userdata
vm_4_ip,
vm_5_ip])
vm_1 = test_utils.create_instance(
vm_4_ip,
vm_5_ip])
vm_1 = test_utils.create_instance(
TESTCASE_CONFIG.instance_1_name,
image_id,
network_1_id,
TESTCASE_CONFIG.instance_1_name,
image_id,
network_1_id,
logger.error("exception occurred while executing testcase_4: %s", e)
raise
finally:
logger.error("exception occurred while executing testcase_4: %s", e)
raise
finally:
- test_utils.cleanup_nova(nova_client, instance_ids)
+ test_utils.cleanup_nova(conn, instance_ids)
test_utils.cleanup_glance(conn, image_ids)
test_utils.cleanup_neutron(neutron_client, floatingip_ids,
bgpvpn_ids, interfaces, subnet_ids,
test_utils.cleanup_glance(conn, image_ids)
test_utils.cleanup_neutron(neutron_client, floatingip_ids,
bgpvpn_ids, interfaces, subnet_ids,
- results = Results(COMMON_CONFIG.line_length)
+ conn = os_utils.get_os_connection()
+ results = Results(COMMON_CONFIG.line_length, conn)
results.add_to_summary(0, "=")
results.add_to_summary(2, "STATUS", "SUBTEST")
results.add_to_summary(0, "=")
results.add_to_summary(0, "=")
results.add_to_summary(2, "STATUS", "SUBTEST")
results.add_to_summary(0, "=")
- nova_client = os_utils.get_nova_client()
neutron_client = os_utils.get_neutron_client()
conn = os_utils.get_os_connection()
neutron_client = os_utils.get_neutron_client()
conn = os_utils.get_os_connection()
test_utils.open_http_port(neutron_client, sg_id)
vm_2 = test_utils.create_instance(
test_utils.open_http_port(neutron_client, sg_id)
vm_2 = test_utils.create_instance(
TESTCASE_CONFIG.instance_2_name,
image_id,
network_2_id,
sg_id,
secgroup_name=TESTCASE_CONFIG.secgroup_name)
TESTCASE_CONFIG.instance_2_name,
image_id,
network_2_id,
sg_id,
secgroup_name=TESTCASE_CONFIG.secgroup_name)
- vm_2_ip = test_utils.get_instance_ip(vm_2)
+ vm_2_ip = test_utils.get_instance_ip(conn, vm_2)
u1 = test_utils.generate_ping_userdata([vm_2_ip])
vm_1 = test_utils.create_instance(
u1 = test_utils.generate_ping_userdata([vm_2_ip])
vm_1 = test_utils.create_instance(
TESTCASE_CONFIG.instance_1_name,
image_id,
network_1_id,
TESTCASE_CONFIG.instance_1_name,
image_id,
network_1_id,
logger.error("exception occurred while executing testcase_7: %s", e)
raise
finally:
logger.error("exception occurred while executing testcase_7: %s", e)
raise
finally:
- test_utils.cleanup_nova(nova_client, instance_ids)
+ test_utils.cleanup_nova(conn, instance_ids)
test_utils.cleanup_glance(conn, image_ids)
test_utils.cleanup_neutron(neutron_client, floatingip_ids,
bgpvpn_ids, interfaces, subnet_ids,
test_utils.cleanup_glance(conn, image_ids)
test_utils.cleanup_neutron(neutron_client, floatingip_ids,
bgpvpn_ids, interfaces, subnet_ids,
- results = Results(COMMON_CONFIG.line_length)
+ conn = os_utils.get_os_connection()
+ results = Results(COMMON_CONFIG.line_length, conn)
results.add_to_summary(0, "=")
results.add_to_summary(2, "STATUS", "SUBTEST")
results.add_to_summary(0, "=")
results.add_to_summary(0, "=")
results.add_to_summary(2, "STATUS", "SUBTEST")
results.add_to_summary(0, "=")
- nova_client = os_utils.get_nova_client()
neutron_client = os_utils.get_neutron_client()
conn = os_utils.get_os_connection()
neutron_client = os_utils.get_neutron_client()
conn = os_utils.get_os_connection()
test_utils.open_icmp(neutron_client, sg_id)
test_utils.open_http_port(neutron_client, sg_id)
test_utils.open_icmp(neutron_client, sg_id)
test_utils.open_http_port(neutron_client, sg_id)
- compute_nodes = test_utils.assert_and_get_compute_nodes(nova_client)
+ compute_nodes = test_utils.assert_and_get_compute_nodes(conn)
av_zone_1 = "nova:" + compute_nodes[0]
# spawning the VMs on the same compute because fib flow (21) entries
# are not created properly if vm1 and vm2 are attached to two
# different computes
vm_2 = test_utils.create_instance(
av_zone_1 = "nova:" + compute_nodes[0]
# spawning the VMs on the same compute because fib flow (21) entries
# are not created properly if vm1 and vm2 are attached to two
# different computes
vm_2 = test_utils.create_instance(
TESTCASE_CONFIG.instance_2_name,
image_id,
network_2_id,
sg_id,
secgroup_name=TESTCASE_CONFIG.secgroup_name,
compute_node=av_zone_1)
TESTCASE_CONFIG.instance_2_name,
image_id,
network_2_id,
sg_id,
secgroup_name=TESTCASE_CONFIG.secgroup_name,
compute_node=av_zone_1)
- vm_2_ip = test_utils.get_instance_ip(vm_2)
+ vm_2_ip = test_utils.get_instance_ip(conn, vm_2)
u1 = test_utils.generate_ping_userdata([vm_2_ip])
vm_1 = test_utils.create_instance(
u1 = test_utils.generate_ping_userdata([vm_2_ip])
vm_1 = test_utils.create_instance(
TESTCASE_CONFIG.instance_1_name,
image_id,
network_1_id,
TESTCASE_CONFIG.instance_1_name,
image_id,
network_1_id,
logger.error("exception occurred while executing testcase_8: %s", e)
raise
finally:
logger.error("exception occurred while executing testcase_8: %s", e)
raise
finally:
- test_utils.cleanup_nova(nova_client, instance_ids)
+ test_utils.cleanup_nova(conn, instance_ids)
test_utils.cleanup_glance(conn, image_ids)
test_utils.cleanup_neutron(neutron_client, floatingip_ids,
bgpvpn_ids, interfaces, subnet_ids,
test_utils.cleanup_glance(conn, image_ids)
test_utils.cleanup_neutron(neutron_client, floatingip_ids,
bgpvpn_ids, interfaces, subnet_ids,