2 # Copyright 2018 Cisco Systems, Inc. All rights reserved.
4 # Licensed under the Apache License, Version 2.0 (the "License"); you may
5 # not use this file except in compliance with the License. You may obtain
6 # a copy of the License at
8 # http://www.apache.org/licenses/LICENSE-2.0
10 # Unless required by applicable law or agreed to in writing, software
11 # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
12 # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
13 # License for the specific language governing permissions and limitations
17 # This module takes care of chaining networks, ports and vms
19 """NFVBENCH CHAIN DISCOVERY/STAGING.
21 This module takes care of staging/discovering all resources that are participating in a
22 benchmarking session: flavors, networks, ports, VNF instances.
23 If a resource is discovered with the same name, it will be reused.
24 Otherwise it will be created.
26 ChainManager: manages VM image, flavor, the staging discovery of all chains
28 Chain: manages one chain, has 2 or more networks and 1 or more instances
29 ChainNetwork: manages 1 network in a chain
30 ChainVnf: manages 1 VNF instance in a chain, has 2 ports
31 ChainVnfPort: manages 1 instance port
33 ChainManager-->Chain(*)
34 Chain-->ChainNetwork(*),ChainVnf(*)
35 ChainVnf-->ChainVnfPort(2)
37 Once created/discovered, instances are checked to be in the active state (ready to pass traffic)
38 Configuration parameters that will influence how these resources are staged/related:
39 - openstack or no openstack
42 - number of VNF in each chain (PVP, PVVP)
43 - SRIOV and middle port SRIOV for port types
44 - whether networks are shared across chains or not
46 There is not traffic generation involved in this module.
53 from neutronclient.neutron import client as neutronclient
54 from novaclient.client import Client
56 from attrdict import AttrDict
57 from .chain_router import ChainRouter
60 from .specs import ChainType
61 # Left and right index for network and port lists
64 # L3 traffic edge networks are at the end of networks list
67 # Name of the VM config file
68 NFVBENCH_CFG_FILENAME = 'nfvbenchvm.conf'
69 # full pathame of the VM config in the VM
70 NFVBENCH_CFG_VM_PATHNAME = os.path.join('/etc/', NFVBENCH_CFG_FILENAME)
71 # full path of the boot shell script template file on the server where nfvbench runs
72 BOOT_SCRIPT_PATHNAME = os.path.join(os.path.dirname(os.path.abspath(__file__)),
74 NFVBENCH_CFG_FILENAME)
77 class ChainException(Exception):
78 """Exception while operating the chains."""
80 class NetworkEncaps(object):
81 """Network encapsulation."""
84 class ChainFlavor(object):
85 """Class to manage the chain flavor."""
87 def __init__(self, flavor_name, flavor_dict, comp):
88 """Create a flavor."""
89 self.name = flavor_name
91 self.flavor = self.comp.find_flavor(flavor_name)
95 LOG.info("Reused flavor '%s'", flavor_name)
97 extra_specs = flavor_dict.pop('extra_specs', None)
99 self.flavor = comp.create_flavor(flavor_name,
102 LOG.info("Created flavor '%s'", flavor_name)
104 self.flavor.set_keys(extra_specs)
107 """Delete this flavor."""
108 if not self.reuse and self.flavor:
110 LOG.info("Flavor '%s' deleted", self.name)
113 class ChainVnfPort(object):
114 """A port associated to one VNF in the chain."""
116 def __init__(self, name, vnf, chain_network, vnic_type):
117 """Create or reuse a port on a given network.
119 if vnf.instance is None the VNF instance is not reused and this ChainVnfPort instance must
121 Otherwise vnf.instance is a reused VNF instance and this ChainVnfPort instance must
122 find an existing port to reuse that matches the port requirements: same attached network,
123 instance, name, vnic type
125 name: name for this port
126 vnf: ChainVNf instance that owns this port
127 chain_network: ChainNetwork instance where this port should attach
128 vnic_type: required vnic type for this port
132 self.manager = vnf.manager
135 self.floating_ip = None
137 # VNF instance is reused, we need to find an existing port that matches this instance
139 # discover ports attached to this instance
140 port_list = self.manager.get_ports_from_network(chain_network)
141 for port in port_list:
142 if port['name'] != name:
144 if port['binding:vnic_type'] != vnic_type:
146 if port['device_id'] == vnf.get_uuid():
148 LOG.info('Reusing existing port %s mac=%s', name, port['mac_address'])
151 raise ChainException('Cannot find matching port')
153 # VNF instance is not created yet, we need to create a new port
157 'network_id': chain_network.get_uuid(),
158 'binding:vnic_type': vnic_type
161 port = self.manager.neutron_client.create_port(body)
162 self.port = port['port']
163 LOG.info('Created port %s', name)
165 self.manager.neutron_client.update_port(self.port['id'], {
167 'security_groups': [],
168 'port_security_enabled': False,
171 LOG.info('Security disabled on port %s', name)
173 LOG.info('Failed to disable security on port %s (ignored)', name)
176 """Get the MAC address for this port."""
177 return self.port['mac_address']
180 """Get the IP address for this port."""
181 return self.port['fixed_ips'][0]['ip_address']
183 def set_floating_ip(self, chain_network):
184 # create and add floating ip to port
186 self.floating_ip = self.manager.neutron_client.create_floatingip({
188 'floating_network_id': chain_network.get_uuid(),
189 'port_id': self.port['id'],
190 'description': 'nfvbench floating ip for port:' + self.port['name'],
192 LOG.info('Floating IP %s created and associated on port %s',
193 self.floating_ip['floating_ip_address'], self.name)
194 return self.floating_ip['floating_ip_address']
196 LOG.info('Failed to created and associated floating ip on port %s (ignored)', self.name)
197 return self.port['fixed_ips'][0]['ip_address']
200 """Delete this port instance."""
201 if self.reuse or not self.port:
203 for _ in range(0, self.manager.config.generic_retry_count):
205 self.manager.neutron_client.delete_port(self.port['id'])
206 LOG.info("Deleted port %s", self.name)
208 self.manager.neutron_client.delete_floatingip(self.floating_ip['id'])
209 LOG.info("Deleted floating IP %s", self.floating_ip['description'])
212 time.sleep(self.manager.config.generic_poll_sec)
213 LOG.error('Unable to delete port: %s', self.name)
216 class ChainNetwork(object):
217 """Could be a shared network across all chains or a chain private network."""
219 def __init__(self, manager, network_config, chain_id=None, lookup_only=False,
221 """Create a network for given chain.
223 network_config: a dict containing the network properties
224 (name, segmentation_id and physical_network)
225 chain_id: to which chain the networks belong.
226 a None value will mean that these networks are shared by all chains
227 suffix: a suffix to add to the network name (if not None)
229 self.manager = manager
231 self.name = network_config.name
233 # the name itself can be either a string or a list of names indexed by chain ID
234 if isinstance(network_config.name, tuple):
235 self.name = network_config.name[chain_id]
237 # network_config.name is a prefix string
238 self.name = network_config.name + str(chain_id)
240 self.name = self.name + suffix
241 self.segmentation_id = self._get_item(network_config.segmentation_id,
242 chain_id, auto_index=True)
243 self.physical_network = self._get_item(network_config.physical_network, chain_id)
248 if manager.config.l3_router and hasattr(network_config, 'router_name'):
249 self.router_name = network_config.router_name
251 self._setup(network_config, lookup_only)
254 LOG.error("Cannot find network %s", self.name)
256 LOG.error("Error creating network %s", self.name)
260 def _get_item(self, item_field, index, auto_index=False):
261 """Retrieve an item from a list or a single value.
263 item_field: can be None, a tuple of a single value
264 index: if None is same as 0, else is the index for a chain
265 auto_index: if true will automatically get the final value by adding the
266 index to the base value (if full list not provided)
268 If the item_field is not a tuple, it is considered same as a tuple with same value at any
270 If a list is provided, its length must be > index
276 if isinstance(item_field, tuple):
278 return item_field[index]
280 raise ChainException("List %s is too short for chain index %d" %
281 (str(item_field), index))
282 # single value is configured
284 return item_field + index
287 def _setup(self, network_config, lookup_only):
288 # Lookup if there is a matching network with same name
289 networks = self.manager.neutron_client.list_networks(name=self.name)
290 if networks['networks']:
291 network = networks['networks'][0]
292 # a network of same name already exists, we need to verify it has the same
294 if self.segmentation_id:
295 if network['provider:segmentation_id'] != self.segmentation_id:
296 raise ChainException("Mismatch of 'segmentation_id' for reused "
297 "network '{net}'. Network has id '{seg_id1}', "
298 "configuration requires '{seg_id2}'."
299 .format(net=self.name,
300 seg_id1=network['provider:segmentation_id'],
301 seg_id2=self.segmentation_id))
303 if self.physical_network:
304 if network['provider:physical_network'] != self.physical_network:
305 raise ChainException("Mismatch of 'physical_network' for reused "
306 "network '{net}'. Network has '{phys1}', "
307 "configuration requires '{phys2}'."
308 .format(net=self.name,
309 phys1=network['provider:physical_network'],
310 phys2=self.physical_network))
312 LOG.info('Reusing existing network %s', self.name)
314 self.network = network
317 raise ChainException('Network %s not found' % self.name)
321 'admin_state_up': True
324 if network_config.network_type:
325 body['network']['provider:network_type'] = network_config.network_type
326 if self.segmentation_id:
327 body['network']['provider:segmentation_id'] = self.segmentation_id
328 if self.physical_network:
329 body['network']['provider:physical_network'] = self.physical_network
330 self.network = self.manager.neutron_client.create_network(body)['network']
331 # create associated subnet, all subnets have the same name (which is ok since
332 # we do not need to address them directly by name)
334 'subnet': {'name': network_config.subnet,
335 'cidr': network_config.cidr,
336 'network_id': self.network['id'],
337 'enable_dhcp': False,
339 'dns_nameservers': []}
341 subnet = self.manager.neutron_client.create_subnet(body)['subnet']
342 # add subnet id to the network dict since it has just been added
343 self.network['subnets'] = [subnet['id']]
344 LOG.info('Created network: %s', self.name)
348 Extract UUID of this network.
350 :return: UUID of this network
352 return self.network['id']
356 Extract vlan for this network.
358 :return: vlan ID for this network
360 if self.network['provider:network_type'] != 'vlan':
361 raise ChainException('Trying to retrieve VLAN id for non VLAN network')
362 return self.network['provider:segmentation_id']
366 Extract VNI for this network.
368 :return: VNI ID for this network
371 return self.network['provider:segmentation_id']
374 """Delete this network."""
375 if not self.reuse and self.network:
376 for retry in range(0, self.manager.config.generic_retry_count):
378 self.manager.neutron_client.delete_network(self.network['id'])
379 LOG.info("Deleted network: %s", self.name)
382 LOG.info('Error deleting network %s (retry %d/%d)...',
385 self.manager.config.generic_retry_count)
386 time.sleep(self.manager.config.generic_poll_sec)
387 LOG.error('Unable to delete network: %s', self.name)
390 class ChainVnf(object):
391 """A class to represent a VNF in a chain."""
393 def __init__(self, chain, vnf_id, networks):
394 """Reuse a VNF instance with same characteristics or create a new VNF instance.
396 chain: the chain where this vnf belongs
397 vnf_id: indicates the index of this vnf in its chain (first vnf=0)
398 networks: the list of all networks (ChainNetwork) of the current chain
400 self.manager = chain.manager
403 self.name = self.manager.config.loop_vm_name + str(chain.chain_id)
404 if len(networks) > 2:
405 # we will have more than 1 VM in each chain
406 self.name += '-' + str(vnf_id)
407 # A list of ports for this chain
408 # There are normally 2 ports carrying traffic (index 0, and index 1) and
409 # potentially multiple idle ports not carrying traffic (index 2 and up)
410 # For example if 7 idle interfaces are requested, the corresp. ports will be
413 self.management_port = None
419 self.idle_networks = []
422 # the vnf_id is conveniently also the starting index in networks
423 # for the left and right networks associated to this VNF
424 if self.manager.config.l3_router:
425 self._setup(networks[vnf_id:vnf_id + 4])
427 self._setup(networks[vnf_id:vnf_id + 2])
429 LOG.error("Error creating VNF %s", self.name)
433 def _get_vm_config(self, remote_mac_pair):
434 config = self.manager.config
435 devices = self.manager.generator_config.devices
438 tg_gateway1_ip = self.routers[LEFT].ports[1]['fixed_ips'][0][
439 'ip_address'] # router edge ip left
440 tg_gateway2_ip = self.routers[RIGHT].ports[1]['fixed_ips'][0][
441 'ip_address'] # router edge ip right
442 tg_mac1 = self.routers[LEFT].ports[1]['mac_address'] # router edge mac left
443 tg_mac2 = self.routers[RIGHT].ports[1]['mac_address'] # router edge mac right
444 # edge cidr mask left
445 vnf_gateway1_cidr = \
446 self.ports[LEFT].get_ip() + self.__get_network_mask(
447 self.manager.config.edge_networks.left.cidr)
448 # edge cidr mask right
449 vnf_gateway2_cidr = \
450 self.ports[RIGHT].get_ip() + self.__get_network_mask(
451 self.manager.config.edge_networks.right.cidr)
452 if config.vm_forwarder != 'vpp':
453 raise ChainException(
454 'L3 router mode imply to set VPP as VM forwarder.'
455 'Please update your config file with: vm_forwarder: vpp')
457 tg_gateway1_ip = devices[LEFT].tg_gateway_ip_addrs
458 tg_gateway2_ip = devices[RIGHT].tg_gateway_ip_addrs
459 if not config.loop_vm_arp:
460 tg_mac1 = remote_mac_pair[0]
461 tg_mac2 = remote_mac_pair[1]
466 g1cidr = devices[LEFT].get_gw_ip(
467 self.chain.chain_id) + self.__get_network_mask(
468 self.manager.config.internal_networks.left.cidr)
469 g2cidr = devices[RIGHT].get_gw_ip(
470 self.chain.chain_id) + self.__get_network_mask(
471 self.manager.config.internal_networks.right.cidr)
473 vnf_gateway1_cidr = g1cidr
474 vnf_gateway2_cidr = g2cidr
476 with open(BOOT_SCRIPT_PATHNAME, 'r') as boot_script:
477 content = boot_script.read()
479 'forwarder': config.vm_forwarder,
480 'intf_mac1': self.ports[LEFT].get_mac(),
481 'intf_mac2': self.ports[RIGHT].get_mac(),
482 'tg_gateway1_ip': tg_gateway1_ip,
483 'tg_gateway2_ip': tg_gateway2_ip,
484 'tg_net1': devices[LEFT].ip_addrs,
485 'tg_net2': devices[RIGHT].ip_addrs,
486 'vnf_gateway1_cidr': vnf_gateway1_cidr,
487 'vnf_gateway2_cidr': vnf_gateway2_cidr,
490 'vif_mq_size': config.vif_multiqueue_size,
491 'num_mbufs': config.num_mbufs
493 if self.manager.config.use_management_port:
494 mgmt_ip = self.management_port.port['fixed_ips'][0]['ip_address']
495 mgmt_mask = self.__get_network_mask(self.manager.config.management_network.cidr)
496 vm_config['intf_mgmt_cidr'] = mgmt_ip + mgmt_mask
497 vm_config['intf_mgmt_ip_gw'] = self.manager.config.management_network.gateway
498 vm_config['intf_mac_mgmt'] = self.management_port.port['mac_address']
500 # Interface management config left empty to avoid error in VM spawn
501 # if nfvbench config has values for management network but use_management_port=false
502 vm_config['intf_mgmt_cidr'] = ''
503 vm_config['intf_mgmt_ip_gw'] = ''
504 vm_config['intf_mac_mgmt'] = ''
505 return content.format(**vm_config)
508 def __get_network_mask(network):
509 return '/' + network.split('/')[1]
511 def _get_vnic_type(self, port_index):
512 """Get the right vnic type for given port indexself.
514 If SR-IOV is specified, middle ports in multi-VNF chains
515 can use vswitch or SR-IOV based on config.use_sriov_middle_net
517 if self.manager.config.sriov:
518 chain_length = self.chain.get_length()
519 if self.manager.config.use_sriov_middle_net or chain_length == 1:
521 if self.vnf_id == 0 and port_index == 0:
522 # first VNF in chain must use sriov for left port
524 if (self.vnf_id == chain_length - 1) and (port_index == 1):
525 # last VNF in chain must use sriov for right port
529 def _get_idle_networks_ports(self):
530 """Get the idle networks for PVP or PVVP chain (non shared net only)
532 For EXT packet path or shared net, returns empty list.
533 For PVP, PVVP these networks will be created if they do not exist.
534 chain_id: to which chain the networks belong.
535 a None value will mean that these networks are shared by all chains
539 config = self.manager.config
540 chain_id = self.chain.chain_id
541 idle_interfaces_per_vm = config.idle_interfaces_per_vm
542 if config.service_chain == ChainType.EXT or chain_id is None or \
543 idle_interfaces_per_vm == 0:
546 # Make a copy of the idle networks dict as we may have to modify the
548 idle_network_cfg = AttrDict(config.idle_networks)
549 if idle_network_cfg.segmentation_id:
550 segmentation_id = idle_network_cfg.segmentation_id + \
551 chain_id * idle_interfaces_per_vm
553 segmentation_id = None
555 # create as many idle networks and ports as requested
556 for idle_index in range(idle_interfaces_per_vm):
557 if config.service_chain == ChainType.PVP:
558 suffix = '.%d' % (idle_index)
560 suffix = '.%d.%d' % (self.vnf_id, idle_index)
561 port_name = self.name + '-idle' + str(idle_index)
562 # update the segmentation id based on chain id and idle index
564 idle_network_cfg.segmentation_id = segmentation_id + idle_index
565 port_name = port_name + "." + str(segmentation_id)
567 networks.append(ChainNetwork(self.manager,
571 ports.append(ChainVnfPort(port_name,
573 networks[idle_index],
576 # need to cleanup all successful networks
582 self.idle_networks = networks
583 self.idle_ports = ports
585 def _setup(self, networks):
586 flavor_id = self.manager.flavor.flavor.id
587 # Check if we can reuse an instance with same name
588 for instance in self.manager.existing_instances:
589 if instance.name == self.name:
591 instance_right = RIGHT
592 # In case of L3 traffic instance use edge networks
593 if self.manager.config.l3_router:
594 instance_left = EDGE_LEFT
595 instance_right = EDGE_RIGHT
596 # Verify that other instance characteristics match
597 if instance.flavor['id'] != flavor_id:
598 self._reuse_exception('Flavor mismatch')
599 if instance.status != "ACTIVE":
600 self._reuse_exception('Matching instance is not in ACTIVE state')
601 # The 2 networks for this instance must also be reused
602 if not networks[instance_left].reuse:
603 self._reuse_exception('network %s is new' % networks[instance_left].name)
604 if not networks[instance_right].reuse:
605 self._reuse_exception('network %s is new' % networks[instance_right].name)
606 # instance.networks have the network names as keys:
607 # {'nfvbench-rnet0': ['192.168.2.10'], 'nfvbench-lnet0': ['192.168.1.8']}
608 if networks[instance_left].name not in instance.networks:
609 self._reuse_exception('Left network mismatch')
610 if networks[instance_right].name not in instance.networks:
611 self._reuse_exception('Right network mismatch')
614 self.instance = instance
615 LOG.info('Reusing existing instance %s on %s',
616 self.name, self.get_hypervisor_name())
617 # create management port if needed
618 if self.manager.config.use_management_port:
619 self.management_port = ChainVnfPort(self.name + '-mgmt', self,
620 self.manager.management_network, 'normal')
621 ip = self.management_port.port['fixed_ips'][0]['ip_address']
622 if self.manager.config.use_floating_ip:
623 ip = self.management_port.set_floating_ip(self.manager.floating_ip_network)
624 LOG.info("Management interface will be active using IP: %s, "
625 "and you can connect over SSH with login: nfvbench and password: nfvbench", ip)
626 # create or reuse/discover 2 ports per instance
627 if self.manager.config.l3_router:
629 self.ports.append(ChainVnfPort(self.name + '-' + str(index),
632 self._get_vnic_type(index)))
635 self.ports.append(ChainVnfPort(self.name + '-' + str(index),
638 self._get_vnic_type(index)))
640 # create idle networks and ports only if instance is not reused
641 # if reused, we do not care about idle networks/ports
643 self._get_idle_networks_ports()
645 # Create neutron routers for L3 traffic use case
646 if self.manager.config.l3_router and self.manager.openstack:
647 internal_nets = networks[:2]
648 if self.manager.config.service_chain == ChainType.PVP:
649 edge_nets = networks[2:]
651 edge_nets = networks[3:]
652 subnets_left = [internal_nets[0], edge_nets[0]]
653 routes_left = [{'destination': self.manager.config.traffic_generator.ip_addrs[0],
654 'nexthop': self.manager.config.traffic_generator.tg_gateway_ip_addrs[
656 {'destination': self.manager.config.traffic_generator.ip_addrs[1],
657 'nexthop': self.ports[0].get_ip()}]
659 ChainRouter(self.manager, edge_nets[0].router_name, subnets_left, routes_left))
660 subnets_right = [internal_nets[1], edge_nets[1]]
661 routes_right = [{'destination': self.manager.config.traffic_generator.ip_addrs[0],
662 'nexthop': self.ports[1].get_ip()},
663 {'destination': self.manager.config.traffic_generator.ip_addrs[1],
664 'nexthop': self.manager.config.traffic_generator.tg_gateway_ip_addrs[
667 ChainRouter(self.manager, edge_nets[1].router_name, subnets_right, routes_right))
668 # Overload gateway_ips property with router ip address for ARP and traffic calls
669 self.manager.generator_config.devices[LEFT].set_gw_ip(
670 self.routers[LEFT].ports[0]['fixed_ips'][0]['ip_address']) # router edge ip left)
671 self.manager.generator_config.devices[RIGHT].set_gw_ip(
672 self.routers[RIGHT].ports[0]['fixed_ips'][0]['ip_address']) # router edge ip right)
674 # if no reuse, actual vm creation is deferred after all ports in the chain are created
675 # since we need to know the next mac in a multi-vnf chain
677 def create_vnf(self, remote_mac_pair):
678 """Create the VNF instance if it does not already exist."""
679 if self.instance is None:
681 if self.manager.config.use_management_port:
682 port_ids.append({'port-id': self.management_port.port['id']})
683 port_ids.extend([{'port-id': vnf_port.port['id']} for vnf_port in self.ports])
685 for idle_port in self.idle_ports:
686 port_ids.append({'port-id': idle_port.port['id']})
687 vm_config = self._get_vm_config(remote_mac_pair)
688 az = self.manager.placer.get_required_az()
689 server = self.manager.comp.create_server(self.name,
690 self.manager.image_instance,
691 self.manager.flavor.flavor,
698 files={NFVBENCH_CFG_VM_PATHNAME: vm_config})
700 self.instance = server
701 if self.manager.placer.is_resolved():
702 LOG.info('Created instance %s on %s', self.name, az)
704 # the location is undetermined at this point
705 # self.get_hypervisor_name() will return None
706 LOG.info('Created instance %s - waiting for placement resolution...', self.name)
707 # here we MUST wait until this instance is resolved otherwise subsequent
708 # VNF creation can be placed in other hypervisors!
709 config = self.manager.config
710 max_retries = int((config.check_traffic_time_sec +
711 config.generic_poll_sec - 1) / config.generic_poll_sec)
713 for retry in range(max_retries):
714 status = self.get_status()
715 if status == 'ACTIVE':
716 hyp_name = self.get_hypervisor_name()
717 LOG.info('Instance %s is active and has been placed on %s',
719 self.manager.placer.register_full_name(hyp_name)
721 if status == 'ERROR':
722 raise ChainException('Instance %s creation error: %s' %
724 self.instance.fault['message']))
725 LOG.info('Waiting for instance %s to become active (retry %d/%d)...',
726 self.name, retry + 1, max_retries + 1)
727 time.sleep(config.generic_poll_sec)
730 LOG.error('Instance %s creation timed out', self.name)
731 raise ChainException('Instance %s creation timed out' % self.name)
734 raise ChainException('Unable to create instance: %s' % (self.name))
736 def _reuse_exception(self, reason):
737 raise ChainException('Instance %s cannot be reused (%s)' % (self.name, reason))
739 def get_status(self):
740 """Get the statis of this instance."""
741 if self.instance.status != 'ACTIVE':
742 self.instance = self.manager.comp.poll_server(self.instance)
743 return self.instance.status
745 def get_hostname(self):
746 """Get the hypervisor host name running this VNF instance."""
747 if self.manager.is_admin:
748 hypervisor_hostname = getattr(self.instance, 'OS-EXT-SRV-ATTR:hypervisor_hostname')
750 hypervisor_hostname = self.manager.config.hypervisor_hostname
751 if not hypervisor_hostname:
752 raise ChainException('Hypervisor hostname parameter is mandatory')
753 return hypervisor_hostname
755 def get_host_ip(self):
756 """Get the IP address of the host where this instance runs.
758 return: the IP address
761 self.host_ip = self.manager.comp.get_hypervisor(self.get_hostname()).host_ip
764 def get_hypervisor_name(self):
765 """Get hypervisor name (az:hostname) for this VNF instance."""
767 if self.manager.is_admin:
768 az = getattr(self.instance, 'OS-EXT-AZ:availability_zone')
770 az = self.manager.config.availability_zone
772 raise ChainException('Availability zone parameter is mandatory')
773 hostname = self.get_hostname()
775 return az + ':' + hostname
780 """Get the uuid for this instance."""
781 return self.instance.id
783 def delete(self, forced=False):
784 """Delete this VNF instance."""
786 LOG.info("Instance %s not deleted (reused)", self.name)
789 self.manager.comp.delete_server(self.instance)
790 LOG.info("Deleted instance %s", self.name)
791 if self.manager.config.use_management_port:
792 self.management_port.delete()
793 for port in self.ports:
795 for port in self.idle_ports:
797 for network in self.idle_networks:
802 """A class to manage a single chain.
804 Can handle any type of chain (EXT, PVP, PVVP)
807 def __init__(self, chain_id, manager):
808 """Create a new chain.
810 chain_id: chain index (first chain is 0)
811 manager: the chain manager that owns all chains
813 self.chain_id = chain_id
814 self.manager = manager
815 self.encaps = manager.encaps
819 self.networks = manager.get_networks(chain_id)
820 # For external chain VNFs can only be discovered from their MAC addresses
821 # either from config or from ARP
822 if manager.config.service_chain != ChainType.EXT:
823 for chain_instance_index in range(self.get_length()):
824 self.instances.append(ChainVnf(self,
825 chain_instance_index,
827 # at this point new VNFs are not created yet but
828 # verify that all discovered VNFs are on the same hypervisor
829 self._check_hypervisors()
830 # now that all VNF ports are created we need to calculate the
831 # left/right remote MAC for each VNF in the chain
832 # before actually creating the VNF itself
833 rem_mac_pairs = self._get_remote_mac_pairs()
834 for instance in self.instances:
835 rem_mac_pair = rem_mac_pairs.pop(0)
836 instance.create_vnf(rem_mac_pair)
841 def _check_hypervisors(self):
842 common_hypervisor = None
843 for instance in self.instances:
844 # get the full hypervizor name (az:compute)
845 hname = instance.get_hypervisor_name()
847 if common_hypervisor:
848 if hname != common_hypervisor:
849 raise ChainException('Discovered instances on different hypervisors:'
850 ' %s and %s' % (hname, common_hypervisor))
852 common_hypervisor = hname
853 if common_hypervisor:
854 # check that the common hypervisor name matchs the requested hypervisor name
855 # and set the name to be used by all future instances (if any)
856 if not self.manager.placer.register_full_name(common_hypervisor):
857 raise ChainException('Discovered hypervisor placement %s is incompatible' %
860 def get_length(self):
861 """Get the number of VNF in the chain."""
862 # Take into account 2 edge networks for routers
863 return len(self.networks) - 3 if self.manager.config.l3_router else len(self.networks) - 1
865 def _get_remote_mac_pairs(self):
866 """Get the list of remote mac pairs for every VNF in the chain.
868 Traverse the chain from left to right and establish the
869 left/right remote MAC for each VNF in the chainself.
872 mac sequence: tg_src_mac, vm0-mac0, vm0-mac1, tg_dst_mac
873 must produce [[tg_src_mac, tg_dst_mac]] or looking at index in mac sequence: [[0, 3]]
874 the mac pair is what the VNF at that position (index 0) sees as next hop mac left and right
877 tg_src_mac, vm0-mac0, vm0-mac1, vm1-mac0, vm1-mac1, tg_dst_mac
878 Must produce the following list:
879 [[tg_src_mac, vm1-mac0], [vm0-mac1, tg_dst_mac]] or index: [[0, 3], [2, 5]]
881 General case with 3 VMs in chain, the list of consecutive macs (left to right):
882 tg_src_mac, vm0-mac0, vm0-mac1, vm1-mac0, vm1-mac1, vm2-mac0, vm2-mac1, tg_dst_mac
883 Must produce the following list:
884 [[tg_src_mac, vm1-mac0], [vm0-mac1, vm2-mac0], [vm1-mac1, tg_dst_mac]]
885 or index: [[0, 3], [2, 5], [4, 7]]
887 The series pattern is pretty clear: [[n, n+3],... ] where n is multiple of 2
889 # line up all mac from left to right
890 mac_seq = [self.manager.generator_config.devices[LEFT].mac]
891 for instance in self.instances:
892 mac_seq.append(instance.ports[0].get_mac())
893 mac_seq.append(instance.ports[1].get_mac())
894 mac_seq.append(self.manager.generator_config.devices[RIGHT].mac)
897 for _ in self.instances:
898 rem_mac_pairs.append([mac_seq[base], mac_seq[base + 3]])
902 def get_instances(self):
903 """Return all instances for this chain."""
904 return self.instances
906 def get_vlan(self, port_index):
907 """Get the VLAN id on a given port.
909 port_index: left port is 0, right port is 1
910 return: the vlan_id or None if there is no vlan tagging
912 # for port 1 we need to return the VLAN of the last network in the chain
913 # The networks array contains 2 networks for PVP [left, right]
914 # and 3 networks in the case of PVVP [left.middle,right]
916 # this will pick the last item in array
918 return self.networks[port_index].get_vlan()
920 def get_vxlan(self, port_index):
921 """Get the VXLAN id on a given port.
923 port_index: left port is 0, right port is 1
924 return: the vxlan_id or None if there is no vxlan
926 # for port 1 we need to return the VLAN of the last network in the chain
927 # The networks array contains 2 networks for PVP [left, right]
928 # and 3 networks in the case of PVVP [left.middle,right]
930 # this will pick the last item in array
932 return self.networks[port_index].get_vxlan()
934 def get_dest_mac(self, port_index):
935 """Get the dest MAC on a given port.
937 port_index: left port is 0, right port is 1
941 # for right port, use the right port MAC of the last (right most) VNF In chain
942 return self.instances[-1].ports[1].get_mac()
943 # for left port use the left port MAC of the first (left most) VNF in chain
944 return self.instances[0].ports[0].get_mac()
946 def get_network_uuids(self):
947 """Get UUID of networks in this chain from left to right (order is important).
949 :return: list of UUIDs of networks (2 or 3 elements)
951 return [net['id'] for net in self.networks]
953 def get_host_ips(self):
954 """Return the IP adresss(es) of the host compute nodes used for this chain.
956 :return: a list of 1 or 2 IP addresses
958 return [vnf.get_host_ip() for vnf in self.instances]
960 def get_compute_nodes(self):
961 """Return the name of the host compute nodes used for this chain.
963 :return: a list of 1 host name in the az:host format
965 # Since all chains go through the same compute node(s) we can just retrieve the
966 # compute node name(s) for the first chain
967 return [vnf.get_hypervisor_name() for vnf in self.instances]
970 """Delete this chain."""
971 for instance in self.instances:
973 # only delete if these are chain private networks (not shared)
974 if not self.manager.config.service_chain_shared_net:
975 for network in self.networks:
979 class InstancePlacer(object):
980 """A class to manage instance placement for all VNFs in all chains.
982 A full az string is made of 2 parts AZ and hypervisor.
983 The placement is resolved when both parts az and hypervisor names are known.
986 def __init__(self, req_az, req_hyp):
987 """Create a new instance placer.
989 req_az: requested AZ (can be None or empty if no preference)
990 req_hyp: requested hypervisor name (can be None of empty if no preference)
991 can be any of 'nova:', 'comp1', 'nova:comp1'
992 if it is a list, only the first item is used (backward compatibility in config)
994 req_az is ignored if req_hyp has an az part
995 all other parts beyond the first 2 are ignored in req_hyp
997 # if passed a list just pick the first item
998 if req_hyp and isinstance(req_hyp, list):
1000 # only pick first part of az
1001 if req_az and ':' in req_az:
1002 req_az = req_az.split(':')[0]
1004 # check if requested hypervisor string has an AZ part
1005 split_hyp = req_hyp.split(':')
1006 if len(split_hyp) > 1:
1007 # override the AZ part and hypervisor part
1008 req_az = split_hyp[0]
1009 req_hyp = split_hyp[1]
1010 self.requested_az = req_az if req_az else ''
1011 self.requested_hyp = req_hyp if req_hyp else ''
1012 # Nova can accept AZ only (e.g. 'nova:', use any hypervisor in that AZ)
1013 # or hypervisor only (e.g. ':comp1')
1014 # or both (e.g. 'nova:comp1')
1016 self.required_az = req_az + ':' + self.requested_hyp
1018 # need to insert a ':' so nova knows this is the hypervisor name
1019 self.required_az = ':' + self.requested_hyp if req_hyp else ''
1020 # placement is resolved when both AZ and hypervisor names are known and set
1021 self.resolved = self.requested_az != '' and self.requested_hyp != ''
1023 def get_required_az(self):
1024 """Return the required az (can be resolved or not)."""
1025 return self.required_az
1027 def register_full_name(self, discovered_az):
1028 """Verify compatibility and register a discovered hypervisor full name.
1030 discovered_az: a discovered AZ in az:hypervisor format
1031 return: True if discovered_az is compatible and set
1032 False if discovered_az is not compatible
1035 return discovered_az == self.required_az
1037 # must be in full az format
1038 split_daz = discovered_az.split(':')
1039 if len(split_daz) != 2:
1041 if self.requested_az and self.requested_az != split_daz[0]:
1043 if self.requested_hyp and self.requested_hyp != split_daz[1]:
1045 self.required_az = discovered_az
1046 self.resolved = True
1049 def is_resolved(self):
1050 """Check if the full AZ is resolved.
1052 return: True if resolved
1054 return self.resolved
1057 class ChainManager(object):
1058 """A class for managing all chains for a given run.
1060 Supports openstack or no openstack.
1061 Supports EXT, PVP and PVVP chains.
1064 def __init__(self, chain_runner):
1065 """Create a chain manager to take care of discovering or bringing up the requested chains.
1067 A new instance must be created every time a new config is used.
1068 config: the nfvbench config to use
1069 cred: openstack credentials to use of None if there is no openstack
1071 self.chain_runner = chain_runner
1072 self.config = chain_runner.config
1073 self.generator_config = chain_runner.traffic_client.generator_config
1075 self.image_instance = None
1076 self.image_name = None
1077 # Left and right networks shared across all chains (only if shared)
1082 self.nova_client = None
1083 self.neutron_client = None
1084 self.glance_client = None
1085 self.existing_instances = []
1086 # existing ports keyed by the network uuid they belong to
1087 self._existing_ports = {}
1088 config = self.config
1089 self.openstack = (chain_runner.cred is not None) and not config.l2_loopback
1090 self.chain_count = config.service_chain_count
1094 session = chain_runner.cred.get_session()
1095 self.is_admin = chain_runner.cred.is_admin
1096 self.nova_client = Client(2, session=session)
1097 self.neutron_client = neutronclient.Client('2.0', session=session)
1098 self.glance_client = glanceclient.Client('2', session=session)
1099 self.comp = compute.Compute(self.nova_client,
1103 if config.service_chain != ChainType.EXT:
1104 self.placer = InstancePlacer(config.availability_zone, config.compute_nodes)
1106 self.flavor = ChainFlavor(config.flavor_type, config.flavor, self.comp)
1107 # Get list of all existing instances to check if some instances can be reused
1108 self.existing_instances = self.comp.get_server_list()
1109 # If management port is requested for VMs, create management network (shared)
1110 if self.config.use_management_port:
1111 self.management_network = ChainNetwork(self, self.config.management_network,
1113 # If floating IP is used for management, create and share
1114 # across chains the floating network
1115 if self.config.use_floating_ip:
1116 self.floating_ip_network = ChainNetwork(self,
1117 self.config.floating_network,
1120 # For EXT chains, the external_networks left and right fields in the config
1121 # must be either a prefix string or a list of at least chain-count strings
1122 self._check_extnet('left', config.external_networks.left)
1123 self._check_extnet('right', config.external_networks.right)
1125 # If networks are shared across chains, get the list of networks
1126 if config.service_chain_shared_net:
1127 self.networks = self.get_networks()
1128 # Reuse/create chains
1129 for chain_id in range(self.chain_count):
1130 self.chains.append(Chain(chain_id, self))
1131 if config.service_chain == ChainType.EXT:
1132 # if EXT and no ARP or VxLAN we need to read dest MACs from config
1133 if config.no_arp or config.vxlan:
1134 self._get_dest_macs_from_config()
1136 # Make sure all instances are active before proceeding
1137 self._ensure_instances_active()
1138 # network API call do not show VLANS ID if not admin read from config
1139 if not self.is_admin and config.vlan_tagging:
1140 self._get_config_vlans()
1145 # no openstack, no need to create chains
1146 if not config.l2_loopback and config.no_arp:
1147 self._get_dest_macs_from_config()
1148 if config.vlan_tagging:
1149 # make sure there at least as many entries as chains in each left/right list
1150 if len(config.vlans) != 2:
1151 raise ChainException('The config vlans property must be a list '
1152 'with 2 lists of VLAN IDs')
1153 self._get_config_vlans()
1155 raise ChainException('VxLAN is only supported with OpenStack')
1157 def _check_extnet(self, side, name):
1159 raise ChainException('external_networks.%s must contain a valid network'
1160 ' name prefix or a list of network names' % side)
1161 if isinstance(name, tuple) and len(name) < self.chain_count:
1162 raise ChainException('external_networks.%s %s'
1163 ' must have at least %d names' % (side, name, self.chain_count))
1165 def _get_config_vlans(self):
1168 self.vlans = [self._check_list('vlans[0]', self.config.vlans[0], re_vlan),
1169 self._check_list('vlans[1]', self.config.vlans[1], re_vlan)]
1171 raise ChainException('vlans parameter is mandatory. Set valid value in config file')
1173 def _get_dest_macs_from_config(self):
1174 re_mac = "[0-9a-fA-F]{2}([-:])[0-9a-fA-F]{2}(\\1[0-9a-fA-F]{2}){4}$"
1175 tg_config = self.config.traffic_generator
1176 self.dest_macs = [self._check_list("mac_addrs_left",
1177 tg_config.mac_addrs_left, re_mac),
1178 self._check_list("mac_addrs_right",
1179 tg_config.mac_addrs_right, re_mac)]
1181 def _check_list(self, list_name, ll, pattern):
1182 # if it is a single int or mac, make it a list of 1 int
1183 if isinstance(ll, (int, str)):
1186 if not re.match(pattern, str(item)):
1187 raise ChainException("Invalid format '{item}' specified in {fname}"
1188 .format(item=item, fname=list_name))
1189 # must have at least 1 element
1191 raise ChainException('%s cannot be empty' % (list_name))
1192 # for shared network, if 1 element is passed, replicate it as many times
1194 if self.config.service_chain_shared_net and len(ll) == 1:
1195 ll = [ll[0]] * self.chain_count
1197 # number of elements musty be the number of chains
1198 elif len(ll) < self.chain_count:
1199 raise ChainException('%s=%s must be a list with %d elements per chain' %
1200 (list_name, ll, self.chain_count))
1203 def _setup_image(self):
1204 # To avoid reuploading image in server mode, check whether image_name is set or not
1206 self.image_instance = self.comp.find_image(self.image_name)
1207 if self.image_instance:
1208 LOG.info("Reusing image %s", self.image_name)
1210 image_name_search_pattern = r'(nfvbenchvm-\d+(\.\d+)*).qcow2'
1211 if self.config.vm_image_file:
1212 match = re.search(image_name_search_pattern, self.config.vm_image_file)
1214 self.image_name = match.group(1)
1215 LOG.info('Using provided VM image file %s', self.config.vm_image_file)
1217 raise ChainException('Provided VM image file name %s must start with '
1218 '"nfvbenchvm-<version>"' % self.config.vm_image_file)
1220 pkg_root = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
1221 for f in os.listdir(pkg_root):
1222 if re.search(image_name_search_pattern, f):
1223 self.config.vm_image_file = pkg_root + '/' + f
1224 self.image_name = f.replace('.qcow2', '')
1225 LOG.info('Found built-in VM image file %s', f)
1228 raise ChainException('Cannot find any built-in VM image file.')
1230 self.image_instance = self.comp.find_image(self.image_name)
1231 if not self.image_instance:
1232 LOG.info('Uploading %s', self.image_name)
1233 res = self.comp.upload_image_via_url(self.image_name,
1234 self.config.vm_image_file)
1237 raise ChainException('Error uploading image %s from %s. ABORTING.' %
1238 (self.image_name, self.config.vm_image_file))
1239 LOG.info('Image %s successfully uploaded.', self.image_name)
1240 self.image_instance = self.comp.find_image(self.image_name)
1242 # image multiqueue property must be set according to the vif_multiqueue_size
1243 # config value (defaults to 1 or disabled)
1244 self.comp.image_set_multiqueue(self.image_instance, self.config.vif_multiqueue_size > 1)
1246 def _ensure_instances_active(self):
1248 for chain in self.chains:
1249 instances.extend(chain.get_instances())
1250 initial_instance_count = len(instances)
1251 # Give additional 10 seconds per VM
1252 max_retries = (self.config.check_traffic_time_sec + (initial_instance_count - 1) * 10 +
1253 self.config.generic_poll_sec - 1) / self.config.generic_poll_sec
1256 remaining_instances = []
1257 for instance in instances:
1258 status = instance.get_status()
1259 if status == 'ACTIVE':
1260 LOG.info('Instance %s is ACTIVE on %s',
1261 instance.name, instance.get_hypervisor_name())
1263 if status == 'ERROR':
1264 raise ChainException('Instance %s creation error: %s' %
1266 instance.instance.fault['message']))
1267 remaining_instances.append(instance)
1268 if not remaining_instances:
1271 if retry >= max_retries:
1272 raise ChainException('Time-out: %d/%d instances still not active' %
1273 (len(remaining_instances), initial_instance_count))
1274 LOG.info('Waiting for %d/%d instance to become active (retry %d/%d)...',
1275 len(remaining_instances), initial_instance_count,
1277 instances = remaining_instances
1278 time.sleep(self.config.generic_poll_sec)
1279 if initial_instance_count:
1280 LOG.info('All instances are active')
1282 def get_networks(self, chain_id=None):
1283 """Get the networks for given EXT, PVP or PVVP chain.
1285 For EXT packet path, these networks must pre-exist.
1286 For PVP, PVVP these networks will be created if they do not exist.
1287 chain_id: to which chain the networks belong.
1288 a None value will mean that these networks are shared by all chains
1291 # the only case where self.networks exists is when the networks are shared
1293 return self.networks
1294 if self.config.service_chain == ChainType.EXT:
1296 ext_net = self.config.external_networks
1297 net_cfg = [AttrDict({'name': name,
1298 'segmentation_id': None,
1299 'physical_network': None})
1300 for name in [ext_net.left, ext_net.right]]
1301 # segmentation id and subnet should be discovered from neutron
1304 int_nets = self.config.internal_networks
1306 if self.config.service_chain == ChainType.PVP:
1307 net_cfg = [int_nets.left, int_nets.right]
1309 net_cfg = [int_nets.left, int_nets.middle, int_nets.right]
1310 if self.config.l3_router:
1311 edge_nets = self.config.edge_networks
1312 net_cfg.append(edge_nets.left)
1313 net_cfg.append(edge_nets.right)
1317 networks.append(ChainNetwork(self, cfg, chain_id, lookup_only=lookup_only))
1319 # need to cleanup all successful networks prior to bailing out
1320 for net in networks:
1325 def get_existing_ports(self):
1326 """Get the list of existing ports.
1328 Lazy retrieval of ports as this can be costly if there are lots of ports and
1329 is only needed when VM and network are being reused.
1331 return: a dict of list of neutron ports indexed by the network uuid they are attached to
1333 Each port is a dict with fields such as below:
1334 {'allowed_address_pairs': [], 'extra_dhcp_opts': [],
1335 'updated_at': '2018-10-06T07:15:35Z', 'device_owner': 'compute:nova',
1336 'revision_number': 10, 'port_security_enabled': False, 'binding:profile': {},
1337 'fixed_ips': [{'subnet_id': '6903a3b3-49a1-4ba4-8259-4a90e7a44b21',
1338 'ip_address': '192.168.1.4'}], 'id': '3dcb9cfa-d82a-4dd1-85a1-fd8284b52d72',
1339 'security_groups': [],
1340 'binding:vif_details': {'vhostuser_socket': '/tmp/3dcb9cfa-d82a-4dd1-85a1-fd8284b52d72',
1341 'vhostuser_mode': 'server'},
1342 'binding:vif_type': 'vhostuser',
1343 'mac_address': 'fa:16:3e:3c:63:04',
1344 'project_id': '977ac76a63d7492f927fa80e86baff4c',
1346 'binding:host_id': 'a20-champagne-compute-1',
1348 'device_id': 'a98e2ad2-5371-4aa5-a356-8264a970ce4b',
1349 'name': 'nfvbench-loop-vm0-0', 'admin_state_up': True,
1350 'network_id': '3ea5fd88-278f-4d9d-b24d-1e443791a055',
1351 'tenant_id': '977ac76a63d7492f927fa80e86baff4c',
1352 'created_at': '2018-10-06T07:15:10Z',
1353 'binding:vnic_type': 'normal'}
1355 if not self._existing_ports:
1356 LOG.info('Loading list of all ports...')
1357 existing_ports = self.neutron_client.list_ports()['ports']
1358 # place all ports in the dict keyed by the port network uuid
1359 for port in existing_ports:
1360 port_list = self._existing_ports.setdefault(port['network_id'], [])
1361 port_list.append(port)
1362 LOG.info("Loaded %d ports attached to %d networks",
1363 len(existing_ports), len(self._existing_ports))
1364 return self._existing_ports
1366 def get_ports_from_network(self, chain_network):
1367 """Get the list of existing ports that belong to a network.
1369 Lazy retrieval of ports as this can be costly if there are lots of ports and
1370 is only needed when VM and network are being reused.
1372 chain_network: a ChainNetwork instance for which attached ports neeed to be retrieved
1373 return: list of neutron ports attached to requested network
1375 return self.get_existing_ports().get(chain_network.get_uuid(), None)
1377 def get_hypervisor_from_mac(self, mac):
1378 """Get the hypervisor that hosts a VM MAC.
1380 mac: MAC address to look for
1381 return: the hypervisor where the matching port runs or None if not found
1383 # _existing_ports is a dict of list of ports indexed by network id
1384 for port_list in list(self.get_existing_ports().values()):
1385 for port in port_list:
1387 if port['mac_address'] == mac:
1388 host_id = port['binding:host_id']
1389 return self.comp.get_hypervisor(host_id)
1394 def get_host_ip_from_mac(self, mac):
1395 """Get the host IP address matching a MAC.
1397 mac: MAC address to look for
1398 return: the IP address of the host where the matching port runs or None if not found
1400 hypervisor = self.get_hypervisor_from_mac(mac)
1402 return hypervisor.host_ip
1405 def get_chain_vlans(self, port_index):
1406 """Get the list of per chain VLAN id on a given port.
1408 port_index: left port is 0, right port is 1
1409 return: a VLAN ID list indexed by the chain index or None if no vlan tagging
1411 if self.chains and self.is_admin:
1412 return [self.chains[chain_index].get_vlan(port_index)
1413 for chain_index in range(self.chain_count)]
1415 return self.vlans[port_index]
1417 def get_chain_vxlans(self, port_index):
1418 """Get the list of per chain VNIs id on a given port.
1420 port_index: left port is 0, right port is 1
1421 return: a VNIs ID list indexed by the chain index or None if no vlan tagging
1423 if self.chains and self.is_admin:
1424 return [self.chains[chain_index].get_vxlan(port_index)
1425 for chain_index in range(self.chain_count)]
1427 raise ChainException('VxLAN is only supported with OpenStack and with admin user')
1429 def get_dest_macs(self, port_index):
1430 """Get the list of per chain dest MACs on a given port.
1432 Should not be called if EXT+ARP is used (in that case the traffic gen will
1433 have the ARP responses back from VNFs with the dest MAC to use).
1435 port_index: left port is 0, right port is 1
1436 return: a list of dest MACs indexed by the chain index
1438 if self.chains and self.config.service_chain != ChainType.EXT:
1439 return [self.chains[chain_index].get_dest_mac(port_index)
1440 for chain_index in range(self.chain_count)]
1441 # no openstack or EXT+no-arp
1442 return self.dest_macs[port_index]
1444 def get_host_ips(self):
1445 """Return the IP adresss(es) of the host compute nodes used for this run.
1447 :return: a list of 1 IP address
1449 # Since all chains go through the same compute node(s) we can just retrieve the
1450 # compute node(s) for the first chain
1452 if self.config.service_chain != ChainType.EXT:
1453 return self.chains[0].get_host_ips()
1454 # in the case of EXT, the compute node must be retrieved from the port
1455 # associated to any of the dest MACs
1456 dst_macs = self.generator_config.get_dest_macs()
1457 # dest MAC on port 0, chain 0
1458 dst_mac = dst_macs[0][0]
1459 host_ip = self.get_host_ip_from_mac(dst_mac)
1461 LOG.info('Found compute node IP for EXT chain: %s', host_ip)
1465 def get_compute_nodes(self):
1466 """Return the name of the host compute nodes used for this run.
1468 :return: a list of 0 or 1 host name in the az:host format
1470 # Since all chains go through the same compute node(s) we can just retrieve the
1471 # compute node name(s) for the first chain
1473 # in the case of EXT, the compute node must be retrieved from the port
1474 # associated to any of the dest MACs
1475 if self.config.service_chain != ChainType.EXT:
1476 return self.chains[0].get_compute_nodes()
1477 # in the case of EXT, the compute node must be retrieved from the port
1478 # associated to any of the dest MACs
1479 dst_macs = self.generator_config.get_dest_macs()
1480 # dest MAC on port 0, chain 0
1481 dst_mac = dst_macs[0][0]
1482 hypervisor = self.get_hypervisor_from_mac(dst_mac)
1484 LOG.info('Found hypervisor for EXT chain: %s', hypervisor.hypervisor_hostname)
1485 return[':' + hypervisor.hypervisor_hostname]
1486 # no openstack = no chains
1490 """Delete resources for all chains."""
1491 for chain in self.chains:
1493 for network in self.networks:
1495 if self.config.use_management_port and hasattr(self, 'management_network'):
1496 self.management_network.delete()
1497 if self.config.use_floating_ip and hasattr(self, 'floating_ip_network'):
1498 self.floating_ip_network.delete()
1500 self.flavor.delete()