2 # Copyright 2018 Cisco Systems, Inc. All rights reserved.
4 # Licensed under the Apache License, Version 2.0 (the "License"); you may
5 # not use this file except in compliance with the License. You may obtain
6 # a copy of the License at
8 # http://www.apache.org/licenses/LICENSE-2.0
10 # Unless required by applicable law or agreed to in writing, software
11 # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
12 # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
13 # License for the specific language governing permissions and limitations
17 # This module takes care of chaining networks, ports and vms
19 """NFVBENCH CHAIN DISCOVERY/STAGING.
21 This module takes care of staging/discovering all resources that are participating in a
22 benchmarking session: flavors, networks, ports, VNF instances.
23 If a resource is discovered with the same name, it will be reused.
24 Otherwise it will be created.
26 ChainManager: manages VM image, flavor, the staging discovery of all chains
28 Chain: manages one chain, has 2 or more networks and 1 or more instances
29 ChainNetwork: manages 1 network in a chain
30 ChainVnf: manages 1 VNF instance in a chain, has 2 ports
31 ChainVnfPort: manages 1 instance port
33 ChainManager-->Chain(*)
34 Chain-->ChainNetwork(*),ChainVnf(*)
35 ChainVnf-->ChainVnfPort(2)
37 Once created/discovered, instances are checked to be in the active state (ready to pass traffic)
38 Configuration parameters that will influence how these resources are staged/related:
39 - openstack or no openstack
42 - number of VNF in each chain (PVP, PVVP)
43 - SRIOV and middle port SRIOV for port types
44 - whether networks are shared across chains or not
46 There is not traffic generation involved in this module.
53 from neutronclient.neutron import client as neutronclient
54 from novaclient.client import Client
56 from attrdict import AttrDict
57 from .chain_router import ChainRouter
60 from .specs import ChainType
61 # Left and right index for network and port lists
64 # L3 traffic edge networks are at the end of networks list
67 # Name of the VM config file
68 NFVBENCH_CFG_FILENAME = 'nfvbenchvm.conf'
69 # full pathame of the VM config in the VM
70 NFVBENCH_CFG_VM_PATHNAME = os.path.join('/etc/', NFVBENCH_CFG_FILENAME)
71 # full path of the boot shell script template file on the server where nfvbench runs
72 BOOT_SCRIPT_PATHNAME = os.path.join(os.path.dirname(os.path.abspath(__file__)),
74 NFVBENCH_CFG_FILENAME)
77 class ChainException(Exception):
78 """Exception while operating the chains."""
80 class NetworkEncaps(object):
81 """Network encapsulation."""
84 class ChainFlavor(object):
85 """Class to manage the chain flavor."""
87 def __init__(self, flavor_name, flavor_dict, comp):
88 """Create a flavor."""
89 self.name = flavor_name
91 self.flavor = self.comp.find_flavor(flavor_name)
95 LOG.info("Reused flavor '%s'", flavor_name)
97 extra_specs = flavor_dict.pop('extra_specs', None)
99 self.flavor = comp.create_flavor(flavor_name,
102 LOG.info("Created flavor '%s'", flavor_name)
104 self.flavor.set_keys(extra_specs)
107 """Delete this flavor."""
108 if not self.reuse and self.flavor:
110 LOG.info("Flavor '%s' deleted", self.name)
113 class ChainVnfPort(object):
114 """A port associated to one VNF in the chain."""
116 def __init__(self, name, vnf, chain_network, vnic_type):
117 """Create or reuse a port on a given network.
119 if vnf.instance is None the VNF instance is not reused and this ChainVnfPort instance must
121 Otherwise vnf.instance is a reused VNF instance and this ChainVnfPort instance must
122 find an existing port to reuse that matches the port requirements: same attached network,
123 instance, name, vnic type
125 name: name for this port
126 vnf: ChainVNf instance that owns this port
127 chain_network: ChainNetwork instance where this port should attach
128 vnic_type: required vnic type for this port
132 self.manager = vnf.manager
135 self.floating_ip = None
137 # VNF instance is reused, we need to find an existing port that matches this instance
139 # discover ports attached to this instance
140 port_list = self.manager.get_ports_from_network(chain_network)
141 for port in port_list:
142 if port['name'] != name:
144 if port['binding:vnic_type'] != vnic_type:
146 if port['device_id'] == vnf.get_uuid():
148 LOG.info('Reusing existing port %s mac=%s', name, port['mac_address'])
151 raise ChainException('Cannot find matching port')
153 # VNF instance is not created yet, we need to create a new port
157 'network_id': chain_network.get_uuid(),
158 'binding:vnic_type': vnic_type
161 port = self.manager.neutron_client.create_port(body)
162 self.port = port['port']
163 LOG.info('Created port %s', name)
165 self.manager.neutron_client.update_port(self.port['id'], {
167 'security_groups': [],
168 'port_security_enabled': False,
171 LOG.info('Security disabled on port %s', name)
173 LOG.info('Failed to disable security on port %s (ignored)', name)
176 """Get the MAC address for this port."""
177 return self.port['mac_address']
180 """Get the IP address for this port."""
181 return self.port['fixed_ips'][0]['ip_address']
183 def set_floating_ip(self, chain_network):
184 # create and add floating ip to port
186 self.floating_ip = self.manager.neutron_client.create_floatingip({
188 'floating_network_id': chain_network.get_uuid(),
189 'port_id': self.port['id'],
190 'description': 'nfvbench floating ip for port:' + self.port['name'],
192 LOG.info('Floating IP %s created and associated on port %s',
193 self.floating_ip['floating_ip_address'], self.name)
194 return self.floating_ip['floating_ip_address']
196 LOG.info('Failed to created and associated floating ip on port %s (ignored)', self.name)
197 return self.port['fixed_ips'][0]['ip_address']
200 """Delete this port instance."""
201 if self.reuse or not self.port:
203 for _ in range(0, self.manager.config.generic_retry_count):
205 self.manager.neutron_client.delete_port(self.port['id'])
206 LOG.info("Deleted port %s", self.name)
208 self.manager.neutron_client.delete_floatingip(self.floating_ip['id'])
209 LOG.info("Deleted floating IP %s", self.floating_ip['description'])
212 time.sleep(self.manager.config.generic_poll_sec)
213 LOG.error('Unable to delete port: %s', self.name)
216 class ChainNetwork(object):
217 """Could be a shared network across all chains or a chain private network."""
219 def __init__(self, manager, network_config, chain_id=None, lookup_only=False,
221 """Create a network for given chain.
223 network_config: a dict containing the network properties
224 (name, segmentation_id and physical_network)
225 chain_id: to which chain the networks belong.
226 a None value will mean that these networks are shared by all chains
227 suffix: a suffix to add to the network name (if not None)
229 self.manager = manager
231 self.name = network_config.name
233 # the name itself can be either a string or a list of names indexed by chain ID
234 if isinstance(network_config.name, tuple):
235 self.name = network_config.name[chain_id]
237 # network_config.name is a prefix string
238 self.name = network_config.name + str(chain_id)
240 self.name = self.name + suffix
241 self.segmentation_id = self._get_item(network_config.segmentation_id,
242 chain_id, auto_index=True)
243 self.physical_network = self._get_item(network_config.physical_network, chain_id)
248 if manager.config.l3_router and hasattr(network_config, 'router_name'):
249 self.router_name = network_config.router_name
251 self._setup(network_config, lookup_only)
254 LOG.error("Cannot find network %s", self.name)
256 LOG.error("Error creating network %s", self.name)
260 def _get_item(self, item_field, index, auto_index=False):
261 """Retrieve an item from a list or a single value.
263 item_field: can be None, a tuple of a single value
264 index: if None is same as 0, else is the index for a chain
265 auto_index: if true will automatically get the final value by adding the
266 index to the base value (if full list not provided)
268 If the item_field is not a tuple, it is considered same as a tuple with same value at any
270 If a list is provided, its length must be > index
276 if isinstance(item_field, tuple):
278 return item_field[index]
280 raise ChainException("List %s is too short for chain index %d" %
281 (str(item_field), index))
282 # single value is configured
284 return item_field + index
287 def _setup(self, network_config, lookup_only):
288 # Lookup if there is a matching network with same name
289 networks = self.manager.neutron_client.list_networks(name=self.name)
290 if networks['networks']:
291 network = networks['networks'][0]
292 # a network of same name already exists, we need to verify it has the same
294 if self.segmentation_id:
295 if network['provider:segmentation_id'] != self.segmentation_id:
296 raise ChainException("Mismatch of 'segmentation_id' for reused "
297 "network '{net}'. Network has id '{seg_id1}', "
298 "configuration requires '{seg_id2}'."
299 .format(net=self.name,
300 seg_id1=network['provider:segmentation_id'],
301 seg_id2=self.segmentation_id))
303 if self.physical_network:
304 if network['provider:physical_network'] != self.physical_network:
305 raise ChainException("Mismatch of 'physical_network' for reused "
306 "network '{net}'. Network has '{phys1}', "
307 "configuration requires '{phys2}'."
308 .format(net=self.name,
309 phys1=network['provider:physical_network'],
310 phys2=self.physical_network))
312 LOG.info('Reusing existing network %s', self.name)
314 self.network = network
317 raise ChainException('Network %s not found' % self.name)
321 'admin_state_up': True
324 if network_config.network_type:
325 body['network']['provider:network_type'] = network_config.network_type
326 if self.segmentation_id:
327 body['network']['provider:segmentation_id'] = self.segmentation_id
328 if self.physical_network:
329 body['network']['provider:physical_network'] = self.physical_network
330 self.network = self.manager.neutron_client.create_network(body)['network']
331 # create associated subnet, all subnets have the same name (which is ok since
332 # we do not need to address them directly by name)
334 'subnet': {'name': network_config.subnet,
335 'cidr': network_config.cidr,
336 'network_id': self.network['id'],
337 'enable_dhcp': False,
339 'dns_nameservers': []}
341 subnet = self.manager.neutron_client.create_subnet(body)['subnet']
342 # add subnet id to the network dict since it has just been added
343 self.network['subnets'] = [subnet['id']]
344 LOG.info('Created network: %s', self.name)
348 Extract UUID of this network.
350 :return: UUID of this network
352 return self.network['id']
356 Extract vlan for this network.
358 :return: vlan ID for this network
360 if self.network['provider:network_type'] != 'vlan':
361 raise ChainException('Trying to retrieve VLAN id for non VLAN network')
362 return self.network['provider:segmentation_id']
366 Extract VNI for this network.
368 :return: VNI ID for this network
371 return self.network['provider:segmentation_id']
374 """Delete this network."""
375 if not self.reuse and self.network:
376 for retry in range(0, self.manager.config.generic_retry_count):
378 self.manager.neutron_client.delete_network(self.network['id'])
379 LOG.info("Deleted network: %s", self.name)
382 LOG.info('Error deleting network %s (retry %d/%d)...',
385 self.manager.config.generic_retry_count)
386 time.sleep(self.manager.config.generic_poll_sec)
387 LOG.error('Unable to delete network: %s', self.name)
390 class ChainVnf(object):
391 """A class to represent a VNF in a chain."""
393 def __init__(self, chain, vnf_id, networks):
394 """Reuse a VNF instance with same characteristics or create a new VNF instance.
396 chain: the chain where this vnf belongs
397 vnf_id: indicates the index of this vnf in its chain (first vnf=0)
398 networks: the list of all networks (ChainNetwork) of the current chain
400 self.manager = chain.manager
403 self.name = self.manager.config.loop_vm_name + str(chain.chain_id)
404 if len(networks) > 2:
405 # we will have more than 1 VM in each chain
406 self.name += '-' + str(vnf_id)
407 # A list of ports for this chain
408 # There are normally 2 ports carrying traffic (index 0, and index 1) and
409 # potentially multiple idle ports not carrying traffic (index 2 and up)
410 # For example if 7 idle interfaces are requested, the corresp. ports will be
413 self.management_port = None
419 self.idle_networks = []
422 # the vnf_id is conveniently also the starting index in networks
423 # for the left and right networks associated to this VNF
424 if self.manager.config.l3_router:
425 self._setup(networks[vnf_id:vnf_id + 4])
427 self._setup(networks[vnf_id:vnf_id + 2])
429 LOG.error("Error creating VNF %s", self.name)
433 def _get_vm_config(self, remote_mac_pair):
434 config = self.manager.config
435 devices = self.manager.generator_config.devices
438 tg_gateway1_ip = self.routers[LEFT].ports[1]['fixed_ips'][0][
439 'ip_address'] # router edge ip left
440 tg_gateway2_ip = self.routers[RIGHT].ports[1]['fixed_ips'][0][
441 'ip_address'] # router edge ip right
442 tg_mac1 = self.routers[LEFT].ports[1]['mac_address'] # router edge mac left
443 tg_mac2 = self.routers[RIGHT].ports[1]['mac_address'] # router edge mac right
444 # edge cidr mask left
445 vnf_gateway1_cidr = \
446 self.ports[LEFT].get_ip() + self.__get_network_mask(
447 self.manager.config.edge_networks.left.cidr)
448 # edge cidr mask right
449 vnf_gateway2_cidr = \
450 self.ports[RIGHT].get_ip() + self.__get_network_mask(
451 self.manager.config.edge_networks.right.cidr)
452 if config.vm_forwarder != 'vpp':
453 raise ChainException(
454 'L3 router mode imply to set VPP as VM forwarder.'
455 'Please update your config file with: vm_forwarder: vpp')
457 tg_gateway1_ip = devices[LEFT].tg_gateway_ip_addrs
458 tg_gateway2_ip = devices[RIGHT].tg_gateway_ip_addrs
459 tg_mac1 = remote_mac_pair[0]
460 tg_mac2 = remote_mac_pair[1]
462 g1cidr = devices[LEFT].get_gw_ip(
463 self.chain.chain_id) + self.__get_network_mask(
464 self.manager.config.internal_networks.left.cidr)
465 g2cidr = devices[RIGHT].get_gw_ip(
466 self.chain.chain_id) + self.__get_network_mask(
467 self.manager.config.internal_networks.right.cidr)
469 vnf_gateway1_cidr = g1cidr
470 vnf_gateway2_cidr = g2cidr
472 with open(BOOT_SCRIPT_PATHNAME, 'r') as boot_script:
473 content = boot_script.read()
475 'forwarder': config.vm_forwarder,
476 'intf_mac1': self.ports[LEFT].get_mac(),
477 'intf_mac2': self.ports[RIGHT].get_mac(),
478 'tg_gateway1_ip': tg_gateway1_ip,
479 'tg_gateway2_ip': tg_gateway2_ip,
480 'tg_net1': devices[LEFT].ip_addrs,
481 'tg_net2': devices[RIGHT].ip_addrs,
482 'vnf_gateway1_cidr': vnf_gateway1_cidr,
483 'vnf_gateway2_cidr': vnf_gateway2_cidr,
486 'vif_mq_size': config.vif_multiqueue_size,
487 'num_mbufs': config.num_mbufs
489 if self.manager.config.use_management_port:
490 mgmt_ip = self.management_port.port['fixed_ips'][0]['ip_address']
491 mgmt_mask = self.__get_network_mask(self.manager.config.management_network.cidr)
492 vm_config['intf_mgmt_cidr'] = mgmt_ip + mgmt_mask
493 vm_config['intf_mgmt_ip_gw'] = self.manager.config.management_network.gateway
494 vm_config['intf_mac_mgmt'] = self.management_port.port['mac_address']
496 # Interface management config left empty to avoid error in VM spawn
497 # if nfvbench config has values for management network but use_management_port=false
498 vm_config['intf_mgmt_cidr'] = ''
499 vm_config['intf_mgmt_ip_gw'] = ''
500 vm_config['intf_mac_mgmt'] = ''
501 return content.format(**vm_config)
504 def __get_network_mask(network):
505 return '/' + network.split('/')[1]
507 def _get_vnic_type(self, port_index):
508 """Get the right vnic type for given port indexself.
510 If SR-IOV is specified, middle ports in multi-VNF chains
511 can use vswitch or SR-IOV based on config.use_sriov_middle_net
513 if self.manager.config.sriov:
514 chain_length = self.chain.get_length()
515 if self.manager.config.use_sriov_middle_net or chain_length == 1:
517 if self.vnf_id == 0 and port_index == 0:
518 # first VNF in chain must use sriov for left port
520 if (self.vnf_id == chain_length - 1) and (port_index == 1):
521 # last VNF in chain must use sriov for right port
525 def _get_idle_networks_ports(self):
526 """Get the idle networks for PVP or PVVP chain (non shared net only)
528 For EXT packet path or shared net, returns empty list.
529 For PVP, PVVP these networks will be created if they do not exist.
530 chain_id: to which chain the networks belong.
531 a None value will mean that these networks are shared by all chains
535 config = self.manager.config
536 chain_id = self.chain.chain_id
537 idle_interfaces_per_vm = config.idle_interfaces_per_vm
538 if config.service_chain == ChainType.EXT or chain_id is None or \
539 idle_interfaces_per_vm == 0:
542 # Make a copy of the idle networks dict as we may have to modify the
544 idle_network_cfg = AttrDict(config.idle_networks)
545 if idle_network_cfg.segmentation_id:
546 segmentation_id = idle_network_cfg.segmentation_id + \
547 chain_id * idle_interfaces_per_vm
549 segmentation_id = None
551 # create as many idle networks and ports as requested
552 for idle_index in range(idle_interfaces_per_vm):
553 if config.service_chain == ChainType.PVP:
554 suffix = '.%d' % (idle_index)
556 suffix = '.%d.%d' % (self.vnf_id, idle_index)
557 port_name = self.name + '-idle' + str(idle_index)
558 # update the segmentation id based on chain id and idle index
560 idle_network_cfg.segmentation_id = segmentation_id + idle_index
561 port_name = port_name + "." + str(segmentation_id)
563 networks.append(ChainNetwork(self.manager,
567 ports.append(ChainVnfPort(port_name,
569 networks[idle_index],
572 # need to cleanup all successful networks
578 self.idle_networks = networks
579 self.idle_ports = ports
581 def _setup(self, networks):
582 flavor_id = self.manager.flavor.flavor.id
583 # Check if we can reuse an instance with same name
584 for instance in self.manager.existing_instances:
585 if instance.name == self.name:
587 instance_right = RIGHT
588 # In case of L3 traffic instance use edge networks
589 if self.manager.config.l3_router:
590 instance_left = EDGE_LEFT
591 instance_right = EDGE_RIGHT
592 # Verify that other instance characteristics match
593 if instance.flavor['id'] != flavor_id:
594 self._reuse_exception('Flavor mismatch')
595 if instance.status != "ACTIVE":
596 self._reuse_exception('Matching instance is not in ACTIVE state')
597 # The 2 networks for this instance must also be reused
598 if not networks[instance_left].reuse:
599 self._reuse_exception('network %s is new' % networks[instance_left].name)
600 if not networks[instance_right].reuse:
601 self._reuse_exception('network %s is new' % networks[instance_right].name)
602 # instance.networks have the network names as keys:
603 # {'nfvbench-rnet0': ['192.168.2.10'], 'nfvbench-lnet0': ['192.168.1.8']}
604 if networks[instance_left].name not in instance.networks:
605 self._reuse_exception('Left network mismatch')
606 if networks[instance_right].name not in instance.networks:
607 self._reuse_exception('Right network mismatch')
610 self.instance = instance
611 LOG.info('Reusing existing instance %s on %s',
612 self.name, self.get_hypervisor_name())
613 # create management port if needed
614 if self.manager.config.use_management_port:
615 self.management_port = ChainVnfPort(self.name + '-mgmt', self,
616 self.manager.management_network, 'normal')
617 ip = self.management_port.port['fixed_ips'][0]['ip_address']
618 if self.manager.config.use_floating_ip:
619 ip = self.management_port.set_floating_ip(self.manager.floating_ip_network)
620 LOG.info("Management interface will be active using IP: %s, "
621 "and you can connect over SSH with login: nfvbench and password: nfvbench", ip)
622 # create or reuse/discover 2 ports per instance
623 if self.manager.config.l3_router:
625 self.ports.append(ChainVnfPort(self.name + '-' + str(index),
628 self._get_vnic_type(index)))
631 self.ports.append(ChainVnfPort(self.name + '-' + str(index),
634 self._get_vnic_type(index)))
636 # create idle networks and ports only if instance is not reused
637 # if reused, we do not care about idle networks/ports
639 self._get_idle_networks_ports()
641 # Create neutron routers for L3 traffic use case
642 if self.manager.config.l3_router and self.manager.openstack:
643 internal_nets = networks[:2]
644 if self.manager.config.service_chain == ChainType.PVP:
645 edge_nets = networks[2:]
647 edge_nets = networks[3:]
648 subnets_left = [internal_nets[0], edge_nets[0]]
649 routes_left = [{'destination': self.manager.config.traffic_generator.ip_addrs[0],
650 'nexthop': self.manager.config.traffic_generator.tg_gateway_ip_addrs[
652 {'destination': self.manager.config.traffic_generator.ip_addrs[1],
653 'nexthop': self.ports[0].get_ip()}]
655 ChainRouter(self.manager, edge_nets[0].router_name, subnets_left, routes_left))
656 subnets_right = [internal_nets[1], edge_nets[1]]
657 routes_right = [{'destination': self.manager.config.traffic_generator.ip_addrs[0],
658 'nexthop': self.ports[1].get_ip()},
659 {'destination': self.manager.config.traffic_generator.ip_addrs[1],
660 'nexthop': self.manager.config.traffic_generator.tg_gateway_ip_addrs[
663 ChainRouter(self.manager, edge_nets[1].router_name, subnets_right, routes_right))
664 # Overload gateway_ips property with router ip address for ARP and traffic calls
665 self.manager.generator_config.devices[LEFT].set_gw_ip(
666 self.routers[LEFT].ports[0]['fixed_ips'][0]['ip_address']) # router edge ip left)
667 self.manager.generator_config.devices[RIGHT].set_gw_ip(
668 self.routers[RIGHT].ports[0]['fixed_ips'][0]['ip_address']) # router edge ip right)
670 # if no reuse, actual vm creation is deferred after all ports in the chain are created
671 # since we need to know the next mac in a multi-vnf chain
673 def create_vnf(self, remote_mac_pair):
674 """Create the VNF instance if it does not already exist."""
675 if self.instance is None:
677 if self.manager.config.use_management_port:
678 port_ids.append({'port-id': self.management_port.port['id']})
679 port_ids.extend([{'port-id': vnf_port.port['id']} for vnf_port in self.ports])
681 for idle_port in self.idle_ports:
682 port_ids.append({'port-id': idle_port.port['id']})
683 vm_config = self._get_vm_config(remote_mac_pair)
684 az = self.manager.placer.get_required_az()
685 server = self.manager.comp.create_server(self.name,
686 self.manager.image_instance,
687 self.manager.flavor.flavor,
694 files={NFVBENCH_CFG_VM_PATHNAME: vm_config})
696 self.instance = server
697 if self.manager.placer.is_resolved():
698 LOG.info('Created instance %s on %s', self.name, az)
700 # the location is undetermined at this point
701 # self.get_hypervisor_name() will return None
702 LOG.info('Created instance %s - waiting for placement resolution...', self.name)
703 # here we MUST wait until this instance is resolved otherwise subsequent
704 # VNF creation can be placed in other hypervisors!
705 config = self.manager.config
706 max_retries = int((config.check_traffic_time_sec +
707 config.generic_poll_sec - 1) / config.generic_poll_sec)
709 for retry in range(max_retries):
710 status = self.get_status()
711 if status == 'ACTIVE':
712 hyp_name = self.get_hypervisor_name()
713 LOG.info('Instance %s is active and has been placed on %s',
715 self.manager.placer.register_full_name(hyp_name)
717 if status == 'ERROR':
718 raise ChainException('Instance %s creation error: %s' %
720 self.instance.fault['message']))
721 LOG.info('Waiting for instance %s to become active (retry %d/%d)...',
722 self.name, retry + 1, max_retries + 1)
723 time.sleep(config.generic_poll_sec)
726 LOG.error('Instance %s creation timed out', self.name)
727 raise ChainException('Instance %s creation timed out' % self.name)
730 raise ChainException('Unable to create instance: %s' % (self.name))
732 def _reuse_exception(self, reason):
733 raise ChainException('Instance %s cannot be reused (%s)' % (self.name, reason))
735 def get_status(self):
736 """Get the statis of this instance."""
737 if self.instance.status != 'ACTIVE':
738 self.instance = self.manager.comp.poll_server(self.instance)
739 return self.instance.status
741 def get_hostname(self):
742 """Get the hypervisor host name running this VNF instance."""
743 if self.manager.is_admin:
744 hypervisor_hostname = getattr(self.instance, 'OS-EXT-SRV-ATTR:hypervisor_hostname')
746 hypervisor_hostname = self.manager.config.hypervisor_hostname
747 if not hypervisor_hostname:
748 raise ChainException('Hypervisor hostname parameter is mandatory')
749 return hypervisor_hostname
751 def get_host_ip(self):
752 """Get the IP address of the host where this instance runs.
754 return: the IP address
757 self.host_ip = self.manager.comp.get_hypervisor(self.get_hostname()).host_ip
760 def get_hypervisor_name(self):
761 """Get hypervisor name (az:hostname) for this VNF instance."""
763 if self.manager.is_admin:
764 az = getattr(self.instance, 'OS-EXT-AZ:availability_zone')
766 az = self.manager.config.availability_zone
768 raise ChainException('Availability zone parameter is mandatory')
769 hostname = self.get_hostname()
771 return az + ':' + hostname
776 """Get the uuid for this instance."""
777 return self.instance.id
779 def delete(self, forced=False):
780 """Delete this VNF instance."""
782 LOG.info("Instance %s not deleted (reused)", self.name)
785 self.manager.comp.delete_server(self.instance)
786 LOG.info("Deleted instance %s", self.name)
787 if self.manager.config.use_management_port:
788 self.management_port.delete()
789 for port in self.ports:
791 for port in self.idle_ports:
793 for network in self.idle_networks:
798 """A class to manage a single chain.
800 Can handle any type of chain (EXT, PVP, PVVP)
803 def __init__(self, chain_id, manager):
804 """Create a new chain.
806 chain_id: chain index (first chain is 0)
807 manager: the chain manager that owns all chains
809 self.chain_id = chain_id
810 self.manager = manager
811 self.encaps = manager.encaps
815 self.networks = manager.get_networks(chain_id)
816 # For external chain VNFs can only be discovered from their MAC addresses
817 # either from config or from ARP
818 if manager.config.service_chain != ChainType.EXT:
819 for chain_instance_index in range(self.get_length()):
820 self.instances.append(ChainVnf(self,
821 chain_instance_index,
823 # at this point new VNFs are not created yet but
824 # verify that all discovered VNFs are on the same hypervisor
825 self._check_hypervisors()
826 # now that all VNF ports are created we need to calculate the
827 # left/right remote MAC for each VNF in the chain
828 # before actually creating the VNF itself
829 rem_mac_pairs = self._get_remote_mac_pairs()
830 for instance in self.instances:
831 rem_mac_pair = rem_mac_pairs.pop(0)
832 instance.create_vnf(rem_mac_pair)
837 def _check_hypervisors(self):
838 common_hypervisor = None
839 for instance in self.instances:
840 # get the full hypervizor name (az:compute)
841 hname = instance.get_hypervisor_name()
843 if common_hypervisor:
844 if hname != common_hypervisor:
845 raise ChainException('Discovered instances on different hypervisors:'
846 ' %s and %s' % (hname, common_hypervisor))
848 common_hypervisor = hname
849 if common_hypervisor:
850 # check that the common hypervisor name matchs the requested hypervisor name
851 # and set the name to be used by all future instances (if any)
852 if not self.manager.placer.register_full_name(common_hypervisor):
853 raise ChainException('Discovered hypervisor placement %s is incompatible' %
856 def get_length(self):
857 """Get the number of VNF in the chain."""
858 # Take into account 2 edge networks for routers
859 return len(self.networks) - 3 if self.manager.config.l3_router else len(self.networks) - 1
861 def _get_remote_mac_pairs(self):
862 """Get the list of remote mac pairs for every VNF in the chain.
864 Traverse the chain from left to right and establish the
865 left/right remote MAC for each VNF in the chainself.
868 mac sequence: tg_src_mac, vm0-mac0, vm0-mac1, tg_dst_mac
869 must produce [[tg_src_mac, tg_dst_mac]] or looking at index in mac sequence: [[0, 3]]
870 the mac pair is what the VNF at that position (index 0) sees as next hop mac left and right
873 tg_src_mac, vm0-mac0, vm0-mac1, vm1-mac0, vm1-mac1, tg_dst_mac
874 Must produce the following list:
875 [[tg_src_mac, vm1-mac0], [vm0-mac1, tg_dst_mac]] or index: [[0, 3], [2, 5]]
877 General case with 3 VMs in chain, the list of consecutive macs (left to right):
878 tg_src_mac, vm0-mac0, vm0-mac1, vm1-mac0, vm1-mac1, vm2-mac0, vm2-mac1, tg_dst_mac
879 Must produce the following list:
880 [[tg_src_mac, vm1-mac0], [vm0-mac1, vm2-mac0], [vm1-mac1, tg_dst_mac]]
881 or index: [[0, 3], [2, 5], [4, 7]]
883 The series pattern is pretty clear: [[n, n+3],... ] where n is multiple of 2
885 # line up all mac from left to right
886 mac_seq = [self.manager.generator_config.devices[LEFT].mac]
887 for instance in self.instances:
888 mac_seq.append(instance.ports[0].get_mac())
889 mac_seq.append(instance.ports[1].get_mac())
890 mac_seq.append(self.manager.generator_config.devices[RIGHT].mac)
893 for _ in self.instances:
894 rem_mac_pairs.append([mac_seq[base], mac_seq[base + 3]])
898 def get_instances(self):
899 """Return all instances for this chain."""
900 return self.instances
902 def get_vlan(self, port_index):
903 """Get the VLAN id on a given port.
905 port_index: left port is 0, right port is 1
906 return: the vlan_id or None if there is no vlan tagging
908 # for port 1 we need to return the VLAN of the last network in the chain
909 # The networks array contains 2 networks for PVP [left, right]
910 # and 3 networks in the case of PVVP [left.middle,right]
912 # this will pick the last item in array
914 return self.networks[port_index].get_vlan()
916 def get_vxlan(self, port_index):
917 """Get the VXLAN id on a given port.
919 port_index: left port is 0, right port is 1
920 return: the vxlan_id or None if there is no vxlan
922 # for port 1 we need to return the VLAN of the last network in the chain
923 # The networks array contains 2 networks for PVP [left, right]
924 # and 3 networks in the case of PVVP [left.middle,right]
926 # this will pick the last item in array
928 return self.networks[port_index].get_vxlan()
930 def get_dest_mac(self, port_index):
931 """Get the dest MAC on a given port.
933 port_index: left port is 0, right port is 1
937 # for right port, use the right port MAC of the last (right most) VNF In chain
938 return self.instances[-1].ports[1].get_mac()
939 # for left port use the left port MAC of the first (left most) VNF in chain
940 return self.instances[0].ports[0].get_mac()
942 def get_network_uuids(self):
943 """Get UUID of networks in this chain from left to right (order is important).
945 :return: list of UUIDs of networks (2 or 3 elements)
947 return [net['id'] for net in self.networks]
949 def get_host_ips(self):
950 """Return the IP adresss(es) of the host compute nodes used for this chain.
952 :return: a list of 1 or 2 IP addresses
954 return [vnf.get_host_ip() for vnf in self.instances]
956 def get_compute_nodes(self):
957 """Return the name of the host compute nodes used for this chain.
959 :return: a list of 1 host name in the az:host format
961 # Since all chains go through the same compute node(s) we can just retrieve the
962 # compute node name(s) for the first chain
963 return [vnf.get_hypervisor_name() for vnf in self.instances]
966 """Delete this chain."""
967 for instance in self.instances:
969 # only delete if these are chain private networks (not shared)
970 if not self.manager.config.service_chain_shared_net:
971 for network in self.networks:
975 class InstancePlacer(object):
976 """A class to manage instance placement for all VNFs in all chains.
978 A full az string is made of 2 parts AZ and hypervisor.
979 The placement is resolved when both parts az and hypervisor names are known.
982 def __init__(self, req_az, req_hyp):
983 """Create a new instance placer.
985 req_az: requested AZ (can be None or empty if no preference)
986 req_hyp: requested hypervisor name (can be None of empty if no preference)
987 can be any of 'nova:', 'comp1', 'nova:comp1'
988 if it is a list, only the first item is used (backward compatibility in config)
990 req_az is ignored if req_hyp has an az part
991 all other parts beyond the first 2 are ignored in req_hyp
993 # if passed a list just pick the first item
994 if req_hyp and isinstance(req_hyp, list):
996 # only pick first part of az
997 if req_az and ':' in req_az:
998 req_az = req_az.split(':')[0]
1000 # check if requested hypervisor string has an AZ part
1001 split_hyp = req_hyp.split(':')
1002 if len(split_hyp) > 1:
1003 # override the AZ part and hypervisor part
1004 req_az = split_hyp[0]
1005 req_hyp = split_hyp[1]
1006 self.requested_az = req_az if req_az else ''
1007 self.requested_hyp = req_hyp if req_hyp else ''
1008 # Nova can accept AZ only (e.g. 'nova:', use any hypervisor in that AZ)
1009 # or hypervisor only (e.g. ':comp1')
1010 # or both (e.g. 'nova:comp1')
1012 self.required_az = req_az + ':' + self.requested_hyp
1014 # need to insert a ':' so nova knows this is the hypervisor name
1015 self.required_az = ':' + self.requested_hyp if req_hyp else ''
1016 # placement is resolved when both AZ and hypervisor names are known and set
1017 self.resolved = self.requested_az != '' and self.requested_hyp != ''
1019 def get_required_az(self):
1020 """Return the required az (can be resolved or not)."""
1021 return self.required_az
1023 def register_full_name(self, discovered_az):
1024 """Verify compatibility and register a discovered hypervisor full name.
1026 discovered_az: a discovered AZ in az:hypervisor format
1027 return: True if discovered_az is compatible and set
1028 False if discovered_az is not compatible
1031 return discovered_az == self.required_az
1033 # must be in full az format
1034 split_daz = discovered_az.split(':')
1035 if len(split_daz) != 2:
1037 if self.requested_az and self.requested_az != split_daz[0]:
1039 if self.requested_hyp and self.requested_hyp != split_daz[1]:
1041 self.required_az = discovered_az
1042 self.resolved = True
1045 def is_resolved(self):
1046 """Check if the full AZ is resolved.
1048 return: True if resolved
1050 return self.resolved
1053 class ChainManager(object):
1054 """A class for managing all chains for a given run.
1056 Supports openstack or no openstack.
1057 Supports EXT, PVP and PVVP chains.
1060 def __init__(self, chain_runner):
1061 """Create a chain manager to take care of discovering or bringing up the requested chains.
1063 A new instance must be created every time a new config is used.
1064 config: the nfvbench config to use
1065 cred: openstack credentials to use of None if there is no openstack
1067 self.chain_runner = chain_runner
1068 self.config = chain_runner.config
1069 self.generator_config = chain_runner.traffic_client.generator_config
1071 self.image_instance = None
1072 self.image_name = None
1073 # Left and right networks shared across all chains (only if shared)
1078 self.nova_client = None
1079 self.neutron_client = None
1080 self.glance_client = None
1081 self.existing_instances = []
1082 # existing ports keyed by the network uuid they belong to
1083 self._existing_ports = {}
1084 config = self.config
1085 self.openstack = (chain_runner.cred is not None) and not config.l2_loopback
1086 self.chain_count = config.service_chain_count
1090 session = chain_runner.cred.get_session()
1091 self.is_admin = chain_runner.cred.is_admin
1092 self.nova_client = Client(2, session=session)
1093 self.neutron_client = neutronclient.Client('2.0', session=session)
1094 self.glance_client = glanceclient.Client('2', session=session)
1095 self.comp = compute.Compute(self.nova_client,
1099 if config.service_chain != ChainType.EXT:
1100 self.placer = InstancePlacer(config.availability_zone, config.compute_nodes)
1102 self.flavor = ChainFlavor(config.flavor_type, config.flavor, self.comp)
1103 # Get list of all existing instances to check if some instances can be reused
1104 self.existing_instances = self.comp.get_server_list()
1105 # If management port is requested for VMs, create management network (shared)
1106 if self.config.use_management_port:
1107 self.management_network = ChainNetwork(self, self.config.management_network,
1109 # If floating IP is used for management, create and share
1110 # across chains the floating network
1111 if self.config.use_floating_ip:
1112 self.floating_ip_network = ChainNetwork(self,
1113 self.config.floating_network,
1116 # For EXT chains, the external_networks left and right fields in the config
1117 # must be either a prefix string or a list of at least chain-count strings
1118 self._check_extnet('left', config.external_networks.left)
1119 self._check_extnet('right', config.external_networks.right)
1121 # If networks are shared across chains, get the list of networks
1122 if config.service_chain_shared_net:
1123 self.networks = self.get_networks()
1124 # Reuse/create chains
1125 for chain_id in range(self.chain_count):
1126 self.chains.append(Chain(chain_id, self))
1127 if config.service_chain == ChainType.EXT:
1128 # if EXT and no ARP or VxLAN we need to read dest MACs from config
1129 if config.no_arp or config.vxlan:
1130 self._get_dest_macs_from_config()
1132 # Make sure all instances are active before proceeding
1133 self._ensure_instances_active()
1134 # network API call do not show VLANS ID if not admin read from config
1135 if not self.is_admin and config.vlan_tagging:
1136 self._get_config_vlans()
1141 # no openstack, no need to create chains
1142 if not config.l2_loopback and config.no_arp:
1143 self._get_dest_macs_from_config()
1144 if config.vlan_tagging:
1145 # make sure there at least as many entries as chains in each left/right list
1146 if len(config.vlans) != 2:
1147 raise ChainException('The config vlans property must be a list '
1148 'with 2 lists of VLAN IDs')
1149 self._get_config_vlans()
1151 raise ChainException('VxLAN is only supported with OpenStack')
1153 def _check_extnet(self, side, name):
1155 raise ChainException('external_networks.%s must contain a valid network'
1156 ' name prefix or a list of network names' % side)
1157 if isinstance(name, tuple) and len(name) < self.chain_count:
1158 raise ChainException('external_networks.%s %s'
1159 ' must have at least %d names' % (side, name, self.chain_count))
1161 def _get_config_vlans(self):
1164 self.vlans = [self._check_list('vlans[0]', self.config.vlans[0], re_vlan),
1165 self._check_list('vlans[1]', self.config.vlans[1], re_vlan)]
1167 raise ChainException('vlans parameter is mandatory. Set valid value in config file')
1169 def _get_dest_macs_from_config(self):
1170 re_mac = "[0-9a-fA-F]{2}([-:])[0-9a-fA-F]{2}(\\1[0-9a-fA-F]{2}){4}$"
1171 tg_config = self.config.traffic_generator
1172 self.dest_macs = [self._check_list("mac_addrs_left",
1173 tg_config.mac_addrs_left, re_mac),
1174 self._check_list("mac_addrs_right",
1175 tg_config.mac_addrs_right, re_mac)]
1177 def _check_list(self, list_name, ll, pattern):
1178 # if it is a single int or mac, make it a list of 1 int
1179 if isinstance(ll, (int, str)):
1182 if not re.match(pattern, str(item)):
1183 raise ChainException("Invalid format '{item}' specified in {fname}"
1184 .format(item=item, fname=list_name))
1185 # must have at least 1 element
1187 raise ChainException('%s cannot be empty' % (list_name))
1188 # for shared network, if 1 element is passed, replicate it as many times
1190 if self.config.service_chain_shared_net and len(ll) == 1:
1191 ll = [ll[0]] * self.chain_count
1193 # number of elements musty be the number of chains
1194 elif len(ll) < self.chain_count:
1195 raise ChainException('%s=%s must be a list with %d elements per chain' %
1196 (list_name, ll, self.chain_count))
1199 def _setup_image(self):
1200 # To avoid reuploading image in server mode, check whether image_name is set or not
1202 self.image_instance = self.comp.find_image(self.image_name)
1203 if self.image_instance:
1204 LOG.info("Reusing image %s", self.image_name)
1206 image_name_search_pattern = r'(nfvbenchvm-\d+(\.\d+)*).qcow2'
1207 if self.config.vm_image_file:
1208 match = re.search(image_name_search_pattern, self.config.vm_image_file)
1210 self.image_name = match.group(1)
1211 LOG.info('Using provided VM image file %s', self.config.vm_image_file)
1213 raise ChainException('Provided VM image file name %s must start with '
1214 '"nfvbenchvm-<version>"' % self.config.vm_image_file)
1216 pkg_root = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
1217 for f in os.listdir(pkg_root):
1218 if re.search(image_name_search_pattern, f):
1219 self.config.vm_image_file = pkg_root + '/' + f
1220 self.image_name = f.replace('.qcow2', '')
1221 LOG.info('Found built-in VM image file %s', f)
1224 raise ChainException('Cannot find any built-in VM image file.')
1226 self.image_instance = self.comp.find_image(self.image_name)
1227 if not self.image_instance:
1228 LOG.info('Uploading %s', self.image_name)
1229 res = self.comp.upload_image_via_url(self.image_name,
1230 self.config.vm_image_file)
1233 raise ChainException('Error uploading image %s from %s. ABORTING.' %
1234 (self.image_name, self.config.vm_image_file))
1235 LOG.info('Image %s successfully uploaded.', self.image_name)
1236 self.image_instance = self.comp.find_image(self.image_name)
1238 # image multiqueue property must be set according to the vif_multiqueue_size
1239 # config value (defaults to 1 or disabled)
1240 self.comp.image_set_multiqueue(self.image_instance, self.config.vif_multiqueue_size > 1)
1242 def _ensure_instances_active(self):
1244 for chain in self.chains:
1245 instances.extend(chain.get_instances())
1246 initial_instance_count = len(instances)
1247 # Give additional 10 seconds per VM
1248 max_retries = (self.config.check_traffic_time_sec + (initial_instance_count - 1) * 10 +
1249 self.config.generic_poll_sec - 1) / self.config.generic_poll_sec
1252 remaining_instances = []
1253 for instance in instances:
1254 status = instance.get_status()
1255 if status == 'ACTIVE':
1256 LOG.info('Instance %s is ACTIVE on %s',
1257 instance.name, instance.get_hypervisor_name())
1259 if status == 'ERROR':
1260 raise ChainException('Instance %s creation error: %s' %
1262 instance.instance.fault['message']))
1263 remaining_instances.append(instance)
1264 if not remaining_instances:
1267 if retry >= max_retries:
1268 raise ChainException('Time-out: %d/%d instances still not active' %
1269 (len(remaining_instances), initial_instance_count))
1270 LOG.info('Waiting for %d/%d instance to become active (retry %d/%d)...',
1271 len(remaining_instances), initial_instance_count,
1273 instances = remaining_instances
1274 time.sleep(self.config.generic_poll_sec)
1275 if initial_instance_count:
1276 LOG.info('All instances are active')
1278 def get_networks(self, chain_id=None):
1279 """Get the networks for given EXT, PVP or PVVP chain.
1281 For EXT packet path, these networks must pre-exist.
1282 For PVP, PVVP these networks will be created if they do not exist.
1283 chain_id: to which chain the networks belong.
1284 a None value will mean that these networks are shared by all chains
1287 # the only case where self.networks exists is when the networks are shared
1289 return self.networks
1290 if self.config.service_chain == ChainType.EXT:
1292 ext_net = self.config.external_networks
1293 net_cfg = [AttrDict({'name': name,
1294 'segmentation_id': None,
1295 'physical_network': None})
1296 for name in [ext_net.left, ext_net.right]]
1297 # segmentation id and subnet should be discovered from neutron
1300 int_nets = self.config.internal_networks
1302 if self.config.service_chain == ChainType.PVP:
1303 net_cfg = [int_nets.left, int_nets.right]
1305 net_cfg = [int_nets.left, int_nets.middle, int_nets.right]
1306 if self.config.l3_router:
1307 edge_nets = self.config.edge_networks
1308 net_cfg.append(edge_nets.left)
1309 net_cfg.append(edge_nets.right)
1313 networks.append(ChainNetwork(self, cfg, chain_id, lookup_only=lookup_only))
1315 # need to cleanup all successful networks prior to bailing out
1316 for net in networks:
1321 def get_existing_ports(self):
1322 """Get the list of existing ports.
1324 Lazy retrieval of ports as this can be costly if there are lots of ports and
1325 is only needed when VM and network are being reused.
1327 return: a dict of list of neutron ports indexed by the network uuid they are attached to
1329 Each port is a dict with fields such as below:
1330 {'allowed_address_pairs': [], 'extra_dhcp_opts': [],
1331 'updated_at': '2018-10-06T07:15:35Z', 'device_owner': 'compute:nova',
1332 'revision_number': 10, 'port_security_enabled': False, 'binding:profile': {},
1333 'fixed_ips': [{'subnet_id': '6903a3b3-49a1-4ba4-8259-4a90e7a44b21',
1334 'ip_address': '192.168.1.4'}], 'id': '3dcb9cfa-d82a-4dd1-85a1-fd8284b52d72',
1335 'security_groups': [],
1336 'binding:vif_details': {'vhostuser_socket': '/tmp/3dcb9cfa-d82a-4dd1-85a1-fd8284b52d72',
1337 'vhostuser_mode': 'server'},
1338 'binding:vif_type': 'vhostuser',
1339 'mac_address': 'fa:16:3e:3c:63:04',
1340 'project_id': '977ac76a63d7492f927fa80e86baff4c',
1342 'binding:host_id': 'a20-champagne-compute-1',
1344 'device_id': 'a98e2ad2-5371-4aa5-a356-8264a970ce4b',
1345 'name': 'nfvbench-loop-vm0-0', 'admin_state_up': True,
1346 'network_id': '3ea5fd88-278f-4d9d-b24d-1e443791a055',
1347 'tenant_id': '977ac76a63d7492f927fa80e86baff4c',
1348 'created_at': '2018-10-06T07:15:10Z',
1349 'binding:vnic_type': 'normal'}
1351 if not self._existing_ports:
1352 LOG.info('Loading list of all ports...')
1353 existing_ports = self.neutron_client.list_ports()['ports']
1354 # place all ports in the dict keyed by the port network uuid
1355 for port in existing_ports:
1356 port_list = self._existing_ports.setdefault(port['network_id'], [])
1357 port_list.append(port)
1358 LOG.info("Loaded %d ports attached to %d networks",
1359 len(existing_ports), len(self._existing_ports))
1360 return self._existing_ports
1362 def get_ports_from_network(self, chain_network):
1363 """Get the list of existing ports that belong to a network.
1365 Lazy retrieval of ports as this can be costly if there are lots of ports and
1366 is only needed when VM and network are being reused.
1368 chain_network: a ChainNetwork instance for which attached ports neeed to be retrieved
1369 return: list of neutron ports attached to requested network
1371 return self.get_existing_ports().get(chain_network.get_uuid(), None)
1373 def get_hypervisor_from_mac(self, mac):
1374 """Get the hypervisor that hosts a VM MAC.
1376 mac: MAC address to look for
1377 return: the hypervisor where the matching port runs or None if not found
1379 # _existing_ports is a dict of list of ports indexed by network id
1380 for port_list in list(self.get_existing_ports().values()):
1381 for port in port_list:
1383 if port['mac_address'] == mac:
1384 host_id = port['binding:host_id']
1385 return self.comp.get_hypervisor(host_id)
1390 def get_host_ip_from_mac(self, mac):
1391 """Get the host IP address matching a MAC.
1393 mac: MAC address to look for
1394 return: the IP address of the host where the matching port runs or None if not found
1396 hypervisor = self.get_hypervisor_from_mac(mac)
1398 return hypervisor.host_ip
1401 def get_chain_vlans(self, port_index):
1402 """Get the list of per chain VLAN id on a given port.
1404 port_index: left port is 0, right port is 1
1405 return: a VLAN ID list indexed by the chain index or None if no vlan tagging
1407 if self.chains and self.is_admin:
1408 return [self.chains[chain_index].get_vlan(port_index)
1409 for chain_index in range(self.chain_count)]
1411 return self.vlans[port_index]
1413 def get_chain_vxlans(self, port_index):
1414 """Get the list of per chain VNIs id on a given port.
1416 port_index: left port is 0, right port is 1
1417 return: a VNIs ID list indexed by the chain index or None if no vlan tagging
1419 if self.chains and self.is_admin:
1420 return [self.chains[chain_index].get_vxlan(port_index)
1421 for chain_index in range(self.chain_count)]
1423 raise ChainException('VxLAN is only supported with OpenStack and with admin user')
1425 def get_dest_macs(self, port_index):
1426 """Get the list of per chain dest MACs on a given port.
1428 Should not be called if EXT+ARP is used (in that case the traffic gen will
1429 have the ARP responses back from VNFs with the dest MAC to use).
1431 port_index: left port is 0, right port is 1
1432 return: a list of dest MACs indexed by the chain index
1434 if self.chains and self.config.service_chain != ChainType.EXT:
1435 return [self.chains[chain_index].get_dest_mac(port_index)
1436 for chain_index in range(self.chain_count)]
1437 # no openstack or EXT+no-arp
1438 return self.dest_macs[port_index]
1440 def get_host_ips(self):
1441 """Return the IP adresss(es) of the host compute nodes used for this run.
1443 :return: a list of 1 IP address
1445 # Since all chains go through the same compute node(s) we can just retrieve the
1446 # compute node(s) for the first chain
1448 if self.config.service_chain != ChainType.EXT:
1449 return self.chains[0].get_host_ips()
1450 # in the case of EXT, the compute node must be retrieved from the port
1451 # associated to any of the dest MACs
1452 dst_macs = self.generator_config.get_dest_macs()
1453 # dest MAC on port 0, chain 0
1454 dst_mac = dst_macs[0][0]
1455 host_ip = self.get_host_ip_from_mac(dst_mac)
1457 LOG.info('Found compute node IP for EXT chain: %s', host_ip)
1461 def get_compute_nodes(self):
1462 """Return the name of the host compute nodes used for this run.
1464 :return: a list of 0 or 1 host name in the az:host format
1466 # Since all chains go through the same compute node(s) we can just retrieve the
1467 # compute node name(s) for the first chain
1469 # in the case of EXT, the compute node must be retrieved from the port
1470 # associated to any of the dest MACs
1471 if self.config.service_chain != ChainType.EXT:
1472 return self.chains[0].get_compute_nodes()
1473 # in the case of EXT, the compute node must be retrieved from the port
1474 # associated to any of the dest MACs
1475 dst_macs = self.generator_config.get_dest_macs()
1476 # dest MAC on port 0, chain 0
1477 dst_mac = dst_macs[0][0]
1478 hypervisor = self.get_hypervisor_from_mac(dst_mac)
1480 LOG.info('Found hypervisor for EXT chain: %s', hypervisor.hypervisor_hostname)
1481 return[':' + hypervisor.hypervisor_hostname]
1482 # no openstack = no chains
1486 """Delete resources for all chains."""
1487 for chain in self.chains:
1489 for network in self.networks:
1491 if self.config.use_management_port and hasattr(self, 'management_network'):
1492 self.management_network.delete()
1493 if self.config.use_floating_ip and hasattr(self, 'floating_ip_network'):
1494 self.floating_ip_network.delete()
1496 self.flavor.delete()