1 # Copyright 2016 Cisco Systems, Inc. All rights reserved.
3 # Licensed under the Apache License, Version 2.0 (the "License"); you may
4 # not use this file except in compliance with the License. You may obtain
5 # a copy of the License at
7 # http://www.apache.org/licenses/LICENSE-2.0
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
11 # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
12 # License for the specific language governing permissions and limitations
15 '''Module for Openstack compute operations'''
16 from glanceclient import exc as glance_exception
26 from glanceclient.openstack.common.apiclient.exceptions import NotFound as GlanceImageNotFound
28 from glanceclient.v1.apiclient.exceptions import NotFound as GlanceImageNotFound
31 class Compute(object):
33 def __init__(self, nova_client, glance_client, neutron_client, config):
34 self.novaclient = nova_client
35 self.glance_client = glance_client
36 self.neutronclient = neutron_client
39 def find_image(self, image_name):
41 return next(self.glance_client.images.list(filters={'name': image_name}), None)
42 except (novaclient.exceptions.NotFound, keystoneauth1.exceptions.http.NotFound,
47 def upload_image_via_url(self, final_image_name, image_file, retry_count=60):
49 Directly uploads image to Nova via URL if image is not present
53 # check image is file/url based.
54 with open(image_file) as f_image:
55 img = self.glance_client.images.create(name=str(final_image_name),
57 container_format="bare",
59 self.glance_client.images.upload(img.id, image_data=f_image)
60 # Check for the image in glance
61 while img.status in ['queued', 'saving'] and retry < retry_count:
62 img = self.glance_client.images.get(img.id)
64 LOG.debug("Image not yet active, retrying %s of %s...", retry, retry_count)
65 time.sleep(self.config.generic_poll_sec)
66 if img.status != 'active':
67 LOG.error("Image uploaded but too long to get to active state")
68 raise Exception("Image update active state timeout")
69 except glance_exception.HTTPForbidden:
70 LOG.error("Cannot upload image without admin access. Please make "
71 "sure the image is uploaded and is either public or owned by you.")
74 # catch the exception for file based errors.
75 LOG.error("Failed while uploading the image. Please make sure the "
76 "image at the specified location %s is correct.", image_file)
78 except keystoneauth1.exceptions.http.NotFound as exc:
79 LOG.error("Authentication error while uploading the image:" + str(exc))
82 LOG.error(traceback.format_exc())
83 LOG.error("Failed to upload image %s.", image_file)
87 def delete_image(self, img_name):
89 LOG.log("Deleting image %s...", img_name)
90 img = self.find_image(image_name=img_name)
91 self.glance_client.images.delete(img.id)
93 LOG.error("Failed to delete the image %s.", img_name)
98 # Remove keypair name from openstack if exists
99 def remove_public_key(self, name):
100 keypair_list = self.novaclient.keypairs.list()
101 for key in keypair_list:
103 self.novaclient.keypairs.delete(name)
104 LOG.info('Removed public key %s', name)
107 # Test if keypair file is present if not create it
108 def create_keypair(self, name, private_key_pair_file):
109 self.remove_public_key(name)
110 keypair = self.novaclient.keypairs.create(name)
111 # Now write the keypair to the file if requested
112 if private_key_pair_file:
113 kpf = os.open(private_key_pair_file,
114 os.O_WRONLY | os.O_CREAT, 0o600)
115 with os.fdopen(kpf, 'w') as kpf:
116 kpf.write(keypair.private_key)
119 # Add an existing public key to openstack
120 def add_public_key(self, name, public_key_file):
121 self.remove_public_key(name)
122 # extract the public key from the file
125 with open(os.path.expanduser(public_key_file)) as pkf:
126 public_key = pkf.read()
127 except IOError as exc:
128 LOG.error('Cannot open public key file %s: %s', public_key_file, exc)
130 keypair = self.novaclient.keypairs.create(name, public_key)
133 def init_key_pair(self, kp_name, ssh_access):
134 '''Initialize the key pair for all test VMs
135 if a key pair is specified in access, use that key pair else
136 create a temporary key pair
138 if ssh_access.public_key_file:
139 return self.add_public_key(kp_name, ssh_access.public_key_file)
141 keypair = self.create_keypair(kp_name, None)
142 ssh_access.private_key = keypair.private_key
145 def find_network(self, label):
146 net = self.novaclient.networks.find(label=label)
149 # Create a server instance with name vmname
150 # and check that it gets into the ACTIVE state
151 def create_server(self, vmname, image, flavor, key_name,
152 nic, sec_group, avail_zone=None, user_data=None,
153 config_drive=None, files=None):
156 security_groups = [sec_group['id']]
158 security_groups = None
160 # Also attach the created security group for the test
161 instance = self.novaclient.servers.create(name=vmname,
166 availability_zone=avail_zone,
168 config_drive=config_drive,
170 security_groups=security_groups)
173 def poll_server(self, instance):
174 return self.novaclient.servers.get(instance.id)
176 def get_server_list(self):
177 servers_list = self.novaclient.servers.list()
180 def find_floating_ips(self):
181 floating_ip = self.novaclient.floating_ips.list()
184 def create_floating_ips(self, pool):
185 return self.novaclient.floating_ips.create(pool)
187 # Return the server network for a server
188 def find_server_network(self, vmname):
189 servers_list = self.get_server_list()
190 for server in servers_list:
191 if server.name == vmname and server.status == "ACTIVE":
192 return server.networks
195 # Returns True if server is present false if not.
196 # Retry for a few seconds since after VM creation sometimes
197 # it takes a while to show up
198 def find_server(self, vmname, retry_count):
199 for retry_attempt in range(retry_count):
200 servers_list = self.get_server_list()
201 for server in servers_list:
202 if server.name == vmname and server.status == "ACTIVE":
204 # Sleep between retries
205 LOG.debug("[%s] VM not yet found, retrying %s of %s...",
206 vmname, (retry_attempt + 1), retry_count)
207 time.sleep(self.config.generic_poll_sec)
208 LOG.error("[%s] VM not found, after %s attempts", vmname, retry_count)
211 # Returns True if server is found and deleted/False if not,
212 # retry the delete if there is a delay
213 def delete_server_by_name(self, vmname):
214 servers_list = self.get_server_list()
215 for server in servers_list:
216 if server.name == vmname:
217 LOG.info('Deleting server %s', server)
218 self.novaclient.servers.delete(server)
222 def delete_server(self, server):
223 self.novaclient.servers.delete(server)
225 def find_flavor(self, flavor_type):
227 flavor = self.novaclient.flavors.find(name=flavor_type)
232 def create_flavor(self, name, ram, vcpus, disk, ephemeral=0, override=False):
234 self.delete_flavor(name)
235 return self.novaclient.flavors.create(name=name, ram=ram, vcpus=vcpus, disk=disk,
238 def delete_flavor(self, flavor=None, name=None):
241 flavor = self.find_flavor(name)
247 def normalize_az_host(self, az, host):
249 az = self.config.availability_zone
250 return az + ':' + host
252 def auto_fill_az(self, host_list, host):
254 no az provided, if there is a host list we can auto-fill the az
255 else we use the configured az if available
256 else we return an error
259 for hyp in host_list:
261 return self.normalize_az_host(hyp.zone, host)
263 LOG.error('Passed host name does not exist: ' + host)
265 if self.config.availability_zone:
266 return self.normalize_az_host(None, host)
267 LOG.error('--hypervisor passed without an az and no az configured')
270 def sanitize_az_host(self, host_list, az_host):
272 host_list: list of hosts as retrieved from openstack (can be empty)
273 az_host: either a host or a az:host string
274 if a host, will check host is in the list, find the corresponding az and
276 if az:host is passed will check the host is in the list and az matches
277 if host_list is empty, will return the configured az if there is no
281 # no host_list, return as is (no check)
284 # if there is a host_list, extract and verify the az and host
285 az_host_list = az_host.split(':')
286 zone = az_host_list[0]
287 host = az_host_list[1]
288 for hyp in host_list:
293 # else continue - another zone with same host name?
295 LOG.error('No match for availability zone and host ' + az_host)
298 return self.auto_fill_az(host_list, az_host)
301 # Return a list of 0, 1 or 2 az:host
303 # The list is computed as follows:
304 # The list of all hosts is retrieved first from openstack
305 # if this fails, checks and az auto-fill are disabled
307 # If the user provides a list of hypervisors (--hypervisor)
308 # that list is checked and returned
310 # If the user provides a configured az name (config.availability_zone)
311 # up to the first 2 hosts from the list that match the az are returned
313 # If the user did not configure an az name
314 # up to the first 2 hosts from the list are returned
315 # Possible return values:
318 # [ az1:hyp1, az2:hyp2 ]
319 # [] if an error occurred (error message printed to console)
321 def get_az_host_list(self):
326 host_list = self.novaclient.services.list()
327 except novaclient.exceptions.Forbidden:
328 LOG.warning('Operation Forbidden: could not retrieve list of hosts'
329 ' (likely no permission)')
331 for host in host_list:
332 # this host must be a compute node
333 if host.binary != 'nova-compute' or host.state != 'up':
336 if self.config.availability_zone:
337 if host.zone == self.config.availability_zone:
338 candidate = self.normalize_az_host(None, host.host)
340 candidate = self.normalize_az_host(host.zone, host.host)
342 avail_list.append(candidate)
343 # pick first 2 matches at most
344 if len(avail_list) == 2:
347 # if empty we insert the configured az
350 if not self.config.availability_zone:
351 LOG.error('Availability_zone must be configured')
353 LOG.error('No host matching the selection for availability zone: ' +
354 self.config.availability_zone)
357 avail_list = [self.config.availability_zone]
360 def get_enabled_az_host_list(self, required_count=1):
362 Check which hypervisors are enabled and on which compute nodes they are running.
363 Pick required count of hosts.
365 :param required_count: count of compute-nodes to return
366 :return: list of enabled available compute nodes
372 hypervisor_list = self.novaclient.hypervisors.list()
373 host_list = self.novaclient.services.list()
374 except novaclient.exceptions.Forbidden:
375 LOG.warning('Operation Forbidden: could not retrieve list of hypervisors'
376 ' (likely no permission)')
378 hypervisor_list = filter(lambda h: h.status == 'enabled' and h.state == 'up',
380 if self.config.availability_zone:
381 host_list = filter(lambda h: h.zone == self.config.availability_zone, host_list)
383 if self.config.compute_nodes:
384 host_list = filter(lambda h: h.host in self.config.compute_nodes, host_list)
386 hosts = [h.hypervisor_hostname for h in hypervisor_list]
387 host_list = filter(lambda h: h.host in hosts, host_list)
390 for host in host_list:
391 candidate = self.normalize_az_host(host.zone, host.host)
393 avail_list.append(candidate)
394 if len(avail_list) == required_count:
399 def get_hypervisor(self, hyper_name):
400 # can raise novaclient.exceptions.NotFound
401 # first get the id from name
402 hyper = self.novaclient.hypervisors.search(hyper_name)[0]
403 # get full hypervisor object
404 return self.novaclient.hypervisors.get(hyper.id)
406 # Given 2 VMs test if they are running on same Host or not
407 def check_vm_placement(self, vm_instance1, vm_instance2):
409 server_instance_1 = self.novaclient.servers.get(vm_instance1)
410 server_instance_2 = self.novaclient.servers.get(vm_instance2)
411 if server_instance_1.hostId == server_instance_2.hostId:
415 except novaclient.exceptions:
416 LOG.warning("Exception in retrieving the hostId of servers")
418 # Create a new security group with appropriate rules
419 def security_group_create(self):
420 # check first the security group exists
421 sec_groups = self.neutronclient.list_security_groups()['security_groups']
422 group = [x for x in sec_groups if x['name'] == self.config.security_group_name]
428 'name': self.config.security_group_name,
429 'description': 'PNS Security Group'
432 group = self.neutronclient.create_security_group(body)['security_group']
433 self.security_group_add_rules(group)
437 # Delete a security group
438 def security_group_delete(self, group):
440 LOG.info("Deleting security group")
441 self.neutronclient.delete_security_group(group['id'])
443 # Add rules to the security group
444 def security_group_add_rules(self, group):
446 'security_group_rule': {
447 'direction': 'ingress',
448 'security_group_id': group['id'],
449 'remote_group_id': None
452 if self.config.ipv6_mode:
453 body['security_group_rule']['ethertype'] = 'IPv6'
454 body['security_group_rule']['remote_ip_prefix'] = '::/0'
456 body['security_group_rule']['ethertype'] = 'IPv4'
457 body['security_group_rule']['remote_ip_prefix'] = '0.0.0.0/0'
460 body['security_group_rule']['protocol'] = 'icmp'
461 body['security_group_rule']['port_range_min'] = None
462 body['security_group_rule']['port_range_max'] = None
463 self.neutronclient.create_security_group_rule(body)
466 body['security_group_rule']['protocol'] = 'tcp'
467 body['security_group_rule']['port_range_min'] = 22
468 body['security_group_rule']['port_range_max'] = 22
469 self.neutronclient.create_security_group_rule(body)
471 # Allow TCP/UDP traffic for perf tools like iperf/nuttcp
472 # 5001: Data traffic (standard iperf data port)
473 # 5002: Control traffic (non standard)
474 # note that 5000/tcp is already picked by openstack keystone
475 body['security_group_rule']['protocol'] = 'tcp'
476 body['security_group_rule']['port_range_min'] = 5001
477 body['security_group_rule']['port_range_max'] = 5002
478 self.neutronclient.create_security_group_rule(body)
479 body['security_group_rule']['protocol'] = 'udp'
480 self.neutronclient.create_security_group_rule(body)