1 # Copyright 2016 Cisco Systems, Inc. All rights reserved.
3 # Licensed under the Apache License, Version 2.0 (the "License"); you may
4 # not use this file except in compliance with the License. You may obtain
5 # a copy of the License at
7 # http://www.apache.org/licenses/LICENSE-2.0
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
11 # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
12 # License for the specific language governing permissions and limitations
14 """Module for Openstack compute operations"""
19 from glanceclient import exc as glance_exception
21 from glanceclient.openstack.common.apiclient.exceptions import NotFound as GlanceImageNotFound
23 from glanceclient.v1.apiclient.exceptions import NotFound as GlanceImageNotFound
30 class Compute(object):
31 def __init__(self, nova_client, glance_client, neutron_client, config):
32 self.novaclient = nova_client
33 self.glance_client = glance_client
34 self.neutronclient = neutron_client
37 def find_image(self, image_name):
39 return next(self.glance_client.images.list(filters={'name': image_name}), None)
40 except (novaclient.exceptions.NotFound, keystoneauth1.exceptions.http.NotFound,
45 def upload_image_via_url(self, final_image_name, image_file, retry_count=60):
47 Directly uploads image to Nova via URL if image is not present
51 # check image is file/url based.
52 with open(image_file) as f_image:
53 img = self.glance_client.images.create(name=str(final_image_name),
55 container_format="bare",
57 self.glance_client.images.upload(img.id, image_data=f_image)
58 # Check for the image in glance
59 while img.status in ['queued', 'saving'] and retry < retry_count:
60 img = self.glance_client.images.get(img.id)
62 LOG.debug("Image not yet active, retrying %s of %s...", retry, retry_count)
63 time.sleep(self.config.generic_poll_sec)
64 if img.status != 'active':
65 LOG.error("Image uploaded but too long to get to active state")
66 raise Exception("Image update active state timeout")
67 except glance_exception.HTTPForbidden:
68 LOG.error("Cannot upload image without admin access. Please make "
69 "sure the image is uploaded and is either public or owned by you.")
72 # catch the exception for file based errors.
73 LOG.error("Failed while uploading the image. Please make sure the "
74 "image at the specified location %s is correct.", image_file)
76 except keystoneauth1.exceptions.http.NotFound as exc:
77 LOG.error("Authentication error while uploading the image: %s", str(exc))
80 LOG.error(traceback.format_exc())
81 LOG.error("Failed to upload image %s.", image_file)
85 def delete_image(self, img_name):
87 LOG.log("Deleting image %s...", img_name)
88 img = self.find_image(image_name=img_name)
89 self.glance_client.images.delete(img.id)
91 LOG.error("Failed to delete the image %s.", img_name)
96 # Remove keypair name from openstack if exists
97 def remove_public_key(self, name):
98 keypair_list = self.novaclient.keypairs.list()
99 for key in keypair_list:
101 self.novaclient.keypairs.delete(name)
102 LOG.info('Removed public key %s', name)
105 # Test if keypair file is present if not create it
106 def create_keypair(self, name, private_key_pair_file):
107 self.remove_public_key(name)
108 keypair = self.novaclient.keypairs.create(name)
109 # Now write the keypair to the file if requested
110 if private_key_pair_file:
111 kpf = os.open(private_key_pair_file,
112 os.O_WRONLY | os.O_CREAT, 0o600)
113 with os.fdopen(kpf, 'w') as kpf:
114 kpf.write(keypair.private_key)
117 # Add an existing public key to openstack
118 def add_public_key(self, name, public_key_file):
119 self.remove_public_key(name)
120 # extract the public key from the file
123 with open(os.path.expanduser(public_key_file)) as pkf:
124 public_key = pkf.read()
125 except IOError as exc:
126 LOG.error('Cannot open public key file %s: %s', public_key_file, exc)
128 keypair = self.novaclient.keypairs.create(name, public_key)
131 def init_key_pair(self, kp_name, ssh_access):
132 '''Initialize the key pair for all test VMs
133 if a key pair is specified in access, use that key pair else
134 create a temporary key pair
136 if ssh_access.public_key_file:
137 return self.add_public_key(kp_name, ssh_access.public_key_file)
138 keypair = self.create_keypair(kp_name, None)
139 ssh_access.private_key = keypair.private_key
142 def find_network(self, label):
143 net = self.novaclient.networks.find(label=label)
146 # Create a server instance with name vmname
147 # and check that it gets into the ACTIVE state
148 def create_server(self, vmname, image, flavor, key_name,
149 nic, sec_group, avail_zone=None, user_data=None,
150 config_drive=None, files=None):
153 security_groups = [sec_group['id']]
155 security_groups = None
157 # Also attach the created security group for the test
158 instance = self.novaclient.servers.create(name=vmname,
163 availability_zone=avail_zone,
165 config_drive=config_drive,
167 security_groups=security_groups)
170 def poll_server(self, instance):
171 return self.novaclient.servers.get(instance.id)
173 def get_server_list(self):
174 servers_list = self.novaclient.servers.list()
177 def find_floating_ips(self):
178 floating_ip = self.novaclient.floating_ips.list()
181 def create_floating_ips(self, pool):
182 return self.novaclient.floating_ips.create(pool)
184 # Return the server network for a server
185 def find_server_network(self, vmname):
186 servers_list = self.get_server_list()
187 for server in servers_list:
188 if server.name == vmname and server.status == "ACTIVE":
189 return server.networks
192 # Returns True if server is present false if not.
193 # Retry for a few seconds since after VM creation sometimes
194 # it takes a while to show up
195 def find_server(self, vmname, retry_count):
196 for retry_attempt in range(retry_count):
197 servers_list = self.get_server_list()
198 for server in servers_list:
199 if server.name == vmname and server.status == "ACTIVE":
201 # Sleep between retries
202 LOG.debug("[%s] VM not yet found, retrying %s of %s...",
203 vmname, (retry_attempt + 1), retry_count)
204 time.sleep(self.config.generic_poll_sec)
205 LOG.error("[%s] VM not found, after %s attempts", vmname, retry_count)
208 # Returns True if server is found and deleted/False if not,
209 # retry the delete if there is a delay
210 def delete_server_by_name(self, vmname):
211 servers_list = self.get_server_list()
212 for server in servers_list:
213 if server.name == vmname:
214 LOG.info('Deleting server %s', server)
215 self.novaclient.servers.delete(server)
219 def delete_server(self, server):
220 self.novaclient.servers.delete(server)
222 def find_flavor(self, flavor_type):
224 flavor = self.novaclient.flavors.find(name=flavor_type)
229 def create_flavor(self, name, ram, vcpus, disk, ephemeral=0, override=False):
231 self.delete_flavor(name)
232 return self.novaclient.flavors.create(name=name, ram=ram, vcpus=vcpus, disk=disk,
235 def delete_flavor(self, flavor=None, name=None):
238 flavor = self.find_flavor(name)
244 def normalize_az_host(self, az, host):
246 az = self.config.availability_zone
247 return az + ':' + host
249 def auto_fill_az(self, host_list, host):
251 no az provided, if there is a host list we can auto-fill the az
252 else we use the configured az if available
253 else we return an error
256 for hyp in host_list:
258 return self.normalize_az_host(hyp.zone, host)
260 LOG.error('Passed host name does not exist: %s', host)
262 if self.config.availability_zone:
263 return self.normalize_az_host(None, host)
264 LOG.error('--hypervisor passed without an az and no az configured')
267 def sanitize_az_host(self, host_list, az_host):
269 host_list: list of hosts as retrieved from openstack (can be empty)
270 az_host: either a host or a az:host string
271 if a host, will check host is in the list, find the corresponding az and
273 if az:host is passed will check the host is in the list and az matches
274 if host_list is empty, will return the configured az if there is no
278 # no host_list, return as is (no check)
281 # if there is a host_list, extract and verify the az and host
282 az_host_list = az_host.split(':')
283 zone = az_host_list[0]
284 host = az_host_list[1]
285 for hyp in host_list:
290 # else continue - another zone with same host name?
292 LOG.error('No match for availability zone and host %s', az_host)
295 return self.auto_fill_az(host_list, az_host)
298 # Return a list of 0, 1 or 2 az:host
300 # The list is computed as follows:
301 # The list of all hosts is retrieved first from openstack
302 # if this fails, checks and az auto-fill are disabled
304 # If the user provides a list of hypervisors (--hypervisor)
305 # that list is checked and returned
307 # If the user provides a configured az name (config.availability_zone)
308 # up to the first 2 hosts from the list that match the az are returned
310 # If the user did not configure an az name
311 # up to the first 2 hosts from the list are returned
312 # Possible return values:
315 # [ az1:hyp1, az2:hyp2 ]
316 # [] if an error occurred (error message printed to console)
318 def get_az_host_list(self):
323 host_list = self.novaclient.services.list()
324 except novaclient.exceptions.Forbidden:
325 LOG.warning('Operation Forbidden: could not retrieve list of hosts'
326 ' (likely no permission)')
328 for host in host_list:
329 # this host must be a compute node
330 if host.binary != 'nova-compute' or host.state != 'up':
333 if self.config.availability_zone:
334 if host.zone == self.config.availability_zone:
335 candidate = self.normalize_az_host(None, host.host)
337 candidate = self.normalize_az_host(host.zone, host.host)
339 avail_list.append(candidate)
340 # pick first 2 matches at most
341 if len(avail_list) == 2:
344 # if empty we insert the configured az
347 if not self.config.availability_zone:
348 LOG.error('Availability_zone must be configured')
350 LOG.error('No host matching the selection for availability zone: %s',
351 self.config.availability_zone)
354 avail_list = [self.config.availability_zone]
357 def get_enabled_az_host_list(self, required_count=1):
359 Check which hypervisors are enabled and on which compute nodes they are running.
360 Pick required count of hosts.
362 :param required_count: count of compute-nodes to return
363 :return: list of enabled available compute nodes
369 hypervisor_list = self.novaclient.hypervisors.list()
370 host_list = self.novaclient.services.list()
371 except novaclient.exceptions.Forbidden:
372 LOG.warning('Operation Forbidden: could not retrieve list of hypervisors'
373 ' (likely no permission)')
375 hypervisor_list = [h for h in hypervisor_list if h.status == 'enabled' and h.state == 'up']
376 if self.config.availability_zone:
377 host_list = [h for h in host_list if h.zone == self.config.availability_zone]
379 if self.config.compute_nodes:
380 host_list = [h for h in host_list if h.host in self.config.compute_nodes]
382 hosts = [h.hypervisor_hostname for h in hypervisor_list]
383 host_list = [h for h in host_list if h.host in hosts]
386 for host in host_list:
387 candidate = self.normalize_az_host(host.zone, host.host)
389 avail_list.append(candidate)
390 if len(avail_list) == required_count:
395 def get_hypervisor(self, hyper_name):
396 # can raise novaclient.exceptions.NotFound
397 # first get the id from name
398 hyper = self.novaclient.hypervisors.search(hyper_name)[0]
399 # get full hypervisor object
400 return self.novaclient.hypervisors.get(hyper.id)
402 # Given 2 VMs test if they are running on same Host or not
403 def check_vm_placement(self, vm_instance1, vm_instance2):
405 server_instance_1 = self.novaclient.servers.get(vm_instance1)
406 server_instance_2 = self.novaclient.servers.get(vm_instance2)
407 return bool(server_instance_1.hostId == server_instance_2.hostId)
408 except novaclient.exceptions:
409 LOG.warning("Exception in retrieving the hostId of servers")
411 # Create a new security group with appropriate rules
412 def security_group_create(self):
413 # check first the security group exists
414 sec_groups = self.neutronclient.list_security_groups()['security_groups']
415 group = [x for x in sec_groups if x['name'] == self.config.security_group_name]
421 'name': self.config.security_group_name,
422 'description': 'PNS Security Group'
425 group = self.neutronclient.create_security_group(body)['security_group']
426 self.security_group_add_rules(group)
430 # Delete a security group
431 def security_group_delete(self, group):
433 LOG.info("Deleting security group")
434 self.neutronclient.delete_security_group(group['id'])
436 # Add rules to the security group
437 def security_group_add_rules(self, group):
439 'security_group_rule': {
440 'direction': 'ingress',
441 'security_group_id': group['id'],
442 'remote_group_id': None
445 if self.config.ipv6_mode:
446 body['security_group_rule']['ethertype'] = 'IPv6'
447 body['security_group_rule']['remote_ip_prefix'] = '::/0'
449 body['security_group_rule']['ethertype'] = 'IPv4'
450 body['security_group_rule']['remote_ip_prefix'] = '0.0.0.0/0'
453 body['security_group_rule']['protocol'] = 'icmp'
454 body['security_group_rule']['port_range_min'] = None
455 body['security_group_rule']['port_range_max'] = None
456 self.neutronclient.create_security_group_rule(body)
459 body['security_group_rule']['protocol'] = 'tcp'
460 body['security_group_rule']['port_range_min'] = 22
461 body['security_group_rule']['port_range_max'] = 22
462 self.neutronclient.create_security_group_rule(body)
464 # Allow TCP/UDP traffic for perf tools like iperf/nuttcp
465 # 5001: Data traffic (standard iperf data port)
466 # 5002: Control traffic (non standard)
467 # note that 5000/tcp is already picked by openstack keystone
468 body['security_group_rule']['protocol'] = 'tcp'
469 body['security_group_rule']['port_range_min'] = 5001
470 body['security_group_rule']['port_range_max'] = 5002
471 self.neutronclient.create_security_group_rule(body)
472 body['security_group_rule']['protocol'] = 'udp'
473 self.neutronclient.create_security_group_rule(body)