2 # Copyright 2018 Cisco Systems, Inc. All rights reserved.
4 # Licensed under the Apache License, Version 2.0 (the "License"); you may
5 # not use this file except in compliance with the License. You may obtain
6 # a copy of the License at
8 # http://www.apache.org/licenses/LICENSE-2.0
10 # Unless required by applicable law or agreed to in writing, software
11 # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
12 # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
13 # License for the specific language governing permissions and limitations
17 # This module takes care of chaining networks, ports and vms
19 """NFVBENCH CHAIN DISCOVERY/STAGING.
21 This module takes care of staging/discovering all resources that are participating in a
22 benchmarking session: flavors, networks, ports, VNF instances.
23 If a resource is discovered with the same name, it will be reused.
24 Otherwise it will be created.
26 ChainManager: manages VM image, flavor, the staging discovery of all chains
28 Chain: manages one chain, has 2 or more networks and 1 or more instances
29 ChainNetwork: manages 1 network in a chain
30 ChainVnf: manages 1 VNF instance in a chain, has 2 ports
31 ChainVnfPort: manages 1 instance port
33 ChainManager-->Chain(*)
34 Chain-->ChainNetwork(*),ChainVnf(*)
35 ChainVnf-->ChainVnfPort(2)
37 Once created/discovered, instances are checked to be in the active state (ready to pass traffic)
38 Configuration parameters that will influence how these resources are staged/related:
39 - openstack or no openstack
42 - number of VNF in each chain (PVP, PVVP)
43 - SRIOV and middle port SRIOV for port types
44 - whether networks are shared across chains or not
46 There is not traffic generation involved in this module.
53 from neutronclient.neutron import client as neutronclient
54 from novaclient.client import Client
56 from attrdict import AttrDict
57 from chain_router import ChainRouter
60 from specs import ChainType
61 # Left and right index for network and port lists
64 # L3 traffic edge networks are at the end of networks list
67 # Name of the VM config file
68 NFVBENCH_CFG_FILENAME = 'nfvbenchvm.conf'
69 # full pathame of the VM config in the VM
70 NFVBENCH_CFG_VM_PATHNAME = os.path.join('/etc/', NFVBENCH_CFG_FILENAME)
71 # full path of the boot shell script template file on the server where nfvbench runs
72 BOOT_SCRIPT_PATHNAME = os.path.join(os.path.dirname(os.path.abspath(__file__)),
74 NFVBENCH_CFG_FILENAME)
77 class ChainException(Exception):
78 """Exception while operating the chains."""
83 class NetworkEncaps(object):
84 """Network encapsulation."""
87 class ChainFlavor(object):
88 """Class to manage the chain flavor."""
90 def __init__(self, flavor_name, flavor_dict, comp):
91 """Create a flavor."""
92 self.name = flavor_name
94 self.flavor = self.comp.find_flavor(flavor_name)
98 LOG.info("Reused flavor '%s'", flavor_name)
100 extra_specs = flavor_dict.pop('extra_specs', None)
102 self.flavor = comp.create_flavor(flavor_name,
105 LOG.info("Created flavor '%s'", flavor_name)
107 self.flavor.set_keys(extra_specs)
110 """Delete this flavor."""
111 if not self.reuse and self.flavor:
113 LOG.info("Flavor '%s' deleted", self.name)
116 class ChainVnfPort(object):
117 """A port associated to one VNF in the chain."""
119 def __init__(self, name, vnf, chain_network, vnic_type):
120 """Create or reuse a port on a given network.
122 if vnf.instance is None the VNF instance is not reused and this ChainVnfPort instance must
124 Otherwise vnf.instance is a reused VNF instance and this ChainVnfPort instance must
125 find an existing port to reuse that matches the port requirements: same attached network,
126 instance, name, vnic type
128 name: name for this port
129 vnf: ChainVNf instance that owns this port
130 chain_network: ChainNetwork instance where this port should attach
131 vnic_type: required vnic type for this port
135 self.manager = vnf.manager
138 self.floating_ip = None
140 # VNF instance is reused, we need to find an existing port that matches this instance
142 # discover ports attached to this instance
143 port_list = self.manager.get_ports_from_network(chain_network)
144 for port in port_list:
145 if port['name'] != name:
147 if port['binding:vnic_type'] != vnic_type:
149 if port['device_id'] == vnf.get_uuid():
151 LOG.info('Reusing existing port %s mac=%s', name, port['mac_address'])
154 raise ChainException('Cannot find matching port')
156 # VNF instance is not created yet, we need to create a new port
160 'network_id': chain_network.get_uuid(),
161 'binding:vnic_type': vnic_type
164 port = self.manager.neutron_client.create_port(body)
165 self.port = port['port']
166 LOG.info('Created port %s', name)
168 self.manager.neutron_client.update_port(self.port['id'], {
170 'security_groups': [],
171 'port_security_enabled': False,
174 LOG.info('Security disabled on port %s', name)
176 LOG.info('Failed to disable security on port %s (ignored)', name)
179 """Get the MAC address for this port."""
180 return self.port['mac_address']
183 """Get the IP address for this port."""
184 return self.port['fixed_ips'][0]['ip_address']
186 def set_floating_ip(self, chain_network):
187 # create and add floating ip to port
189 self.floating_ip = self.manager.neutron_client.create_floatingip({
191 'floating_network_id': chain_network.get_uuid(),
192 'port_id': self.port['id'],
193 'description': 'nfvbench floating ip for port:' + self.port['name'],
195 LOG.info('Floating IP %s created and associated on port %s',
196 self.floating_ip['floating_ip_address'], self.name)
197 return self.floating_ip['floating_ip_address']
199 LOG.info('Failed to created and associated floating ip on port %s (ignored)', self.name)
200 return self.port['fixed_ips'][0]['ip_address']
203 """Delete this port instance."""
204 if self.reuse or not self.port:
206 for _ in range(0, self.manager.config.generic_retry_count):
208 self.manager.neutron_client.delete_port(self.port['id'])
209 LOG.info("Deleted port %s", self.name)
211 self.manager.neutron_client.delete_floatingip(self.floating_ip['id'])
212 LOG.info("Deleted floating IP %s", self.floating_ip['description'])
215 time.sleep(self.manager.config.generic_poll_sec)
216 LOG.error('Unable to delete port: %s', self.name)
219 class ChainNetwork(object):
220 """Could be a shared network across all chains or a chain private network."""
222 def __init__(self, manager, network_config, chain_id=None, lookup_only=False,
224 """Create a network for given chain.
226 network_config: a dict containing the network properties
227 (name, segmentation_id and physical_network)
228 chain_id: to which chain the networks belong.
229 a None value will mean that these networks are shared by all chains
230 suffix: a suffix to add to the network name (if not None)
232 self.manager = manager
234 self.name = network_config.name
236 # the name itself can be either a string or a list of names indexed by chain ID
237 if isinstance(network_config.name, tuple):
238 self.name = network_config.name[chain_id]
240 # network_config.name is a prefix string
241 self.name = network_config.name + str(chain_id)
243 self.name = self.name + suffix
244 self.segmentation_id = self._get_item(network_config.segmentation_id,
245 chain_id, auto_index=True)
246 self.physical_network = self._get_item(network_config.physical_network, chain_id)
251 if manager.config.l3_router and hasattr(network_config, 'router_name'):
252 self.router_name = network_config.router_name
254 self._setup(network_config, lookup_only)
257 LOG.error("Cannot find network %s", self.name)
259 LOG.error("Error creating network %s", self.name)
263 def _get_item(self, item_field, index, auto_index=False):
264 """Retrieve an item from a list or a single value.
266 item_field: can be None, a tuple of a single value
267 index: if None is same as 0, else is the index for a chain
268 auto_index: if true will automatically get the final value by adding the
269 index to the base value (if full list not provided)
271 If the item_field is not a tuple, it is considered same as a tuple with same value at any
273 If a list is provided, its length must be > index
279 if isinstance(item_field, tuple):
281 return item_field[index]
283 raise ChainException("List %s is too short for chain index %d" %
284 (str(item_field), index))
285 # single value is configured
287 return item_field + index
290 def _setup(self, network_config, lookup_only):
291 # Lookup if there is a matching network with same name
292 networks = self.manager.neutron_client.list_networks(name=self.name)
293 if networks['networks']:
294 network = networks['networks'][0]
295 # a network of same name already exists, we need to verify it has the same
297 if self.segmentation_id:
298 if network['provider:segmentation_id'] != self.segmentation_id:
299 raise ChainException("Mismatch of 'segmentation_id' for reused "
300 "network '{net}'. Network has id '{seg_id1}', "
301 "configuration requires '{seg_id2}'."
302 .format(net=self.name,
303 seg_id1=network['provider:segmentation_id'],
304 seg_id2=self.segmentation_id))
306 if self.physical_network:
307 if network['provider:physical_network'] != self.physical_network:
308 raise ChainException("Mismatch of 'physical_network' for reused "
309 "network '{net}'. Network has '{phys1}', "
310 "configuration requires '{phys2}'."
311 .format(net=self.name,
312 phys1=network['provider:physical_network'],
313 phys2=self.physical_network))
315 LOG.info('Reusing existing network %s', self.name)
317 self.network = network
320 raise ChainException('Network %s not found' % self.name)
324 'admin_state_up': True
327 if network_config.network_type:
328 body['network']['provider:network_type'] = network_config.network_type
329 if self.segmentation_id:
330 body['network']['provider:segmentation_id'] = self.segmentation_id
331 if self.physical_network:
332 body['network']['provider:physical_network'] = self.physical_network
333 self.network = self.manager.neutron_client.create_network(body)['network']
334 # create associated subnet, all subnets have the same name (which is ok since
335 # we do not need to address them directly by name)
337 'subnet': {'name': network_config.subnet,
338 'cidr': network_config.cidr,
339 'network_id': self.network['id'],
340 'enable_dhcp': False,
342 'dns_nameservers': []}
344 subnet = self.manager.neutron_client.create_subnet(body)['subnet']
345 # add subnet id to the network dict since it has just been added
346 self.network['subnets'] = [subnet['id']]
347 LOG.info('Created network: %s', self.name)
351 Extract UUID of this network.
353 :return: UUID of this network
355 return self.network['id']
359 Extract vlan for this network.
361 :return: vlan ID for this network
363 if self.network['provider:network_type'] != 'vlan':
364 raise ChainException('Trying to retrieve VLAN id for non VLAN network')
365 return self.network['provider:segmentation_id']
369 Extract VNI for this network.
371 :return: VNI ID for this network
374 return self.network['provider:segmentation_id']
377 """Delete this network."""
378 if not self.reuse and self.network:
379 for retry in range(0, self.manager.config.generic_retry_count):
381 self.manager.neutron_client.delete_network(self.network['id'])
382 LOG.info("Deleted network: %s", self.name)
385 LOG.info('Error deleting network %s (retry %d/%d)...',
388 self.manager.config.generic_retry_count)
389 time.sleep(self.manager.config.generic_poll_sec)
390 LOG.error('Unable to delete network: %s', self.name)
393 class ChainVnf(object):
394 """A class to represent a VNF in a chain."""
396 def __init__(self, chain, vnf_id, networks):
397 """Reuse a VNF instance with same characteristics or create a new VNF instance.
399 chain: the chain where this vnf belongs
400 vnf_id: indicates the index of this vnf in its chain (first vnf=0)
401 networks: the list of all networks (ChainNetwork) of the current chain
403 self.manager = chain.manager
406 self.name = self.manager.config.loop_vm_name + str(chain.chain_id)
407 if len(networks) > 2:
408 # we will have more than 1 VM in each chain
409 self.name += '-' + str(vnf_id)
410 # A list of ports for this chain
411 # There are normally 2 ports carrying traffic (index 0, and index 1) and
412 # potentially multiple idle ports not carrying traffic (index 2 and up)
413 # For example if 7 idle interfaces are requested, the corresp. ports will be
416 self.management_port = None
422 self.idle_networks = []
425 # the vnf_id is conveniently also the starting index in networks
426 # for the left and right networks associated to this VNF
427 if self.manager.config.l3_router:
428 self._setup(networks[vnf_id:vnf_id + 4])
430 self._setup(networks[vnf_id:vnf_id + 2])
432 LOG.error("Error creating VNF %s", self.name)
436 def _get_vm_config(self, remote_mac_pair):
437 config = self.manager.config
438 devices = self.manager.generator_config.devices
441 tg_gateway1_ip = self.routers[LEFT].ports[1]['fixed_ips'][0][
442 'ip_address'] # router edge ip left
443 tg_gateway2_ip = self.routers[RIGHT].ports[1]['fixed_ips'][0][
444 'ip_address'] # router edge ip right
445 tg_mac1 = self.routers[LEFT].ports[1]['mac_address'] # router edge mac left
446 tg_mac2 = self.routers[RIGHT].ports[1]['mac_address'] # router edge mac right
447 # edge cidr mask left
448 vnf_gateway1_cidr = \
449 self.ports[LEFT].get_ip() + self.__get_network_mask(
450 self.manager.config.edge_networks.left.cidr)
451 # edge cidr mask right
452 vnf_gateway2_cidr = \
453 self.ports[RIGHT].get_ip() + self.__get_network_mask(
454 self.manager.config.edge_networks.right.cidr)
455 if config.vm_forwarder != 'vpp':
456 raise ChainException(
457 'L3 router mode imply to set VPP as VM forwarder.'
458 'Please update your config file with: vm_forwarder: vpp')
460 tg_gateway1_ip = devices[LEFT].tg_gateway_ip_addrs
461 tg_gateway2_ip = devices[RIGHT].tg_gateway_ip_addrs
462 tg_mac1 = remote_mac_pair[0]
463 tg_mac2 = remote_mac_pair[1]
465 g1cidr = devices[LEFT].get_gw_ip(
466 self.chain.chain_id) + self.__get_network_mask(
467 self.manager.config.internal_networks.left.cidr)
468 g2cidr = devices[RIGHT].get_gw_ip(
469 self.chain.chain_id) + self.__get_network_mask(
470 self.manager.config.internal_networks.right.cidr)
472 vnf_gateway1_cidr = g1cidr
473 vnf_gateway2_cidr = g2cidr
475 with open(BOOT_SCRIPT_PATHNAME, 'r') as boot_script:
476 content = boot_script.read()
478 'forwarder': config.vm_forwarder,
479 'intf_mac1': self.ports[LEFT].get_mac(),
480 'intf_mac2': self.ports[RIGHT].get_mac(),
481 'tg_gateway1_ip': tg_gateway1_ip,
482 'tg_gateway2_ip': tg_gateway2_ip,
483 'tg_net1': devices[LEFT].ip_addrs,
484 'tg_net2': devices[RIGHT].ip_addrs,
485 'vnf_gateway1_cidr': vnf_gateway1_cidr,
486 'vnf_gateway2_cidr': vnf_gateway2_cidr,
489 'vif_mq_size': config.vif_multiqueue_size,
490 'num_mbufs': config.num_mbufs
492 if self.manager.config.use_management_port:
493 mgmt_ip = self.management_port.port['fixed_ips'][0]['ip_address']
494 mgmt_mask = self.__get_network_mask(self.manager.config.management_network.cidr)
495 vm_config['intf_mgmt_cidr'] = mgmt_ip + mgmt_mask
496 vm_config['intf_mgmt_ip_gw'] = self.manager.config.management_network.gateway
497 vm_config['intf_mac_mgmt'] = self.management_port.port['mac_address']
499 # Interface management config left empty to avoid error in VM spawn
500 # if nfvbench config has values for management network but use_management_port=false
501 vm_config['intf_mgmt_cidr'] = ''
502 vm_config['intf_mgmt_ip_gw'] = ''
503 vm_config['intf_mac_mgmt'] = ''
504 return content.format(**vm_config)
507 def __get_network_mask(network):
508 return '/' + network.split('/')[1]
510 def _get_vnic_type(self, port_index):
511 """Get the right vnic type for given port indexself.
513 If SR-IOV is specified, middle ports in multi-VNF chains
514 can use vswitch or SR-IOV based on config.use_sriov_middle_net
516 if self.manager.config.sriov:
517 chain_length = self.chain.get_length()
518 if self.manager.config.use_sriov_middle_net or chain_length == 1:
520 if self.vnf_id == 0 and port_index == 0:
521 # first VNF in chain must use sriov for left port
523 if (self.vnf_id == chain_length - 1) and (port_index == 1):
524 # last VNF in chain must use sriov for right port
528 def _get_idle_networks_ports(self):
529 """Get the idle networks for PVP or PVVP chain (non shared net only)
531 For EXT packet path or shared net, returns empty list.
532 For PVP, PVVP these networks will be created if they do not exist.
533 chain_id: to which chain the networks belong.
534 a None value will mean that these networks are shared by all chains
538 config = self.manager.config
539 chain_id = self.chain.chain_id
540 idle_interfaces_per_vm = config.idle_interfaces_per_vm
541 if config.service_chain == ChainType.EXT or chain_id is None or \
542 idle_interfaces_per_vm == 0:
545 # Make a copy of the idle networks dict as we may have to modify the
547 idle_network_cfg = AttrDict(config.idle_networks)
548 if idle_network_cfg.segmentation_id:
549 segmentation_id = idle_network_cfg.segmentation_id + \
550 chain_id * idle_interfaces_per_vm
552 segmentation_id = None
554 # create as many idle networks and ports as requested
555 for idle_index in range(idle_interfaces_per_vm):
556 if config.service_chain == ChainType.PVP:
557 suffix = '.%d' % (idle_index)
559 suffix = '.%d.%d' % (self.vnf_id, idle_index)
560 port_name = self.name + '-idle' + str(idle_index)
561 # update the segmentation id based on chain id and idle index
563 idle_network_cfg.segmentation_id = segmentation_id + idle_index
564 port_name = port_name + "." + str(segmentation_id)
566 networks.append(ChainNetwork(self.manager,
570 ports.append(ChainVnfPort(port_name,
572 networks[idle_index],
575 # need to cleanup all successful networks
581 self.idle_networks = networks
582 self.idle_ports = ports
584 def _setup(self, networks):
585 flavor_id = self.manager.flavor.flavor.id
586 # Check if we can reuse an instance with same name
587 for instance in self.manager.existing_instances:
588 if instance.name == self.name:
590 instance_right = RIGHT
591 # In case of L3 traffic instance use edge networks
592 if self.manager.config.l3_router:
593 instance_left = EDGE_LEFT
594 instance_right = EDGE_RIGHT
595 # Verify that other instance characteristics match
596 if instance.flavor['id'] != flavor_id:
597 self._reuse_exception('Flavor mismatch')
598 if instance.status != "ACTIVE":
599 self._reuse_exception('Matching instance is not in ACTIVE state')
600 # The 2 networks for this instance must also be reused
601 if not networks[instance_left].reuse:
602 self._reuse_exception('network %s is new' % networks[instance_left].name)
603 if not networks[instance_right].reuse:
604 self._reuse_exception('network %s is new' % networks[instance_right].name)
605 # instance.networks have the network names as keys:
606 # {'nfvbench-rnet0': ['192.168.2.10'], 'nfvbench-lnet0': ['192.168.1.8']}
607 if networks[instance_left].name not in instance.networks:
608 self._reuse_exception('Left network mismatch')
609 if networks[instance_right].name not in instance.networks:
610 self._reuse_exception('Right network mismatch')
613 self.instance = instance
614 LOG.info('Reusing existing instance %s on %s',
615 self.name, self.get_hypervisor_name())
616 # create management port if needed
617 if self.manager.config.use_management_port:
618 self.management_port = ChainVnfPort(self.name + '-mgmt', self,
619 self.manager.management_network, 'normal')
620 ip = self.management_port.port['fixed_ips'][0]['ip_address']
621 if self.manager.config.use_floating_ip:
622 ip = self.management_port.set_floating_ip(self.manager.floating_ip_network)
623 LOG.info("Management interface will be active using IP: %s, "
624 "and you can connect over SSH with login: nfvbench and password: nfvbench", ip)
625 # create or reuse/discover 2 ports per instance
626 if self.manager.config.l3_router:
628 self.ports.append(ChainVnfPort(self.name + '-' + str(index),
631 self._get_vnic_type(index)))
634 self.ports.append(ChainVnfPort(self.name + '-' + str(index),
637 self._get_vnic_type(index)))
639 # create idle networks and ports only if instance is not reused
640 # if reused, we do not care about idle networks/ports
642 self._get_idle_networks_ports()
644 # Create neutron routers for L3 traffic use case
645 if self.manager.config.l3_router and self.manager.openstack:
646 internal_nets = networks[:2]
647 if self.manager.config.service_chain == ChainType.PVP:
648 edge_nets = networks[2:]
650 edge_nets = networks[3:]
651 subnets_left = [internal_nets[0], edge_nets[0]]
652 routes_left = [{'destination': self.manager.config.traffic_generator.ip_addrs[0],
653 'nexthop': self.manager.config.traffic_generator.tg_gateway_ip_addrs[
655 {'destination': self.manager.config.traffic_generator.ip_addrs[1],
656 'nexthop': self.ports[0].get_ip()}]
658 ChainRouter(self.manager, edge_nets[0].router_name, subnets_left, routes_left))
659 subnets_right = [internal_nets[1], edge_nets[1]]
660 routes_right = [{'destination': self.manager.config.traffic_generator.ip_addrs[0],
661 'nexthop': self.ports[1].get_ip()},
662 {'destination': self.manager.config.traffic_generator.ip_addrs[1],
663 'nexthop': self.manager.config.traffic_generator.tg_gateway_ip_addrs[
666 ChainRouter(self.manager, edge_nets[1].router_name, subnets_right, routes_right))
667 # Overload gateway_ips property with router ip address for ARP and traffic calls
668 self.manager.generator_config.devices[LEFT].set_gw_ip(
669 self.routers[LEFT].ports[0]['fixed_ips'][0]['ip_address']) # router edge ip left)
670 self.manager.generator_config.devices[RIGHT].set_gw_ip(
671 self.routers[RIGHT].ports[0]['fixed_ips'][0]['ip_address']) # router edge ip right)
673 # if no reuse, actual vm creation is deferred after all ports in the chain are created
674 # since we need to know the next mac in a multi-vnf chain
676 def create_vnf(self, remote_mac_pair):
677 """Create the VNF instance if it does not already exist."""
678 if self.instance is None:
680 if self.manager.config.use_management_port:
681 port_ids.append({'port-id': self.management_port.port['id']})
682 port_ids.extend([{'port-id': vnf_port.port['id']} for vnf_port in self.ports])
684 for idle_port in self.idle_ports:
685 port_ids.append({'port-id': idle_port.port['id']})
686 vm_config = self._get_vm_config(remote_mac_pair)
687 az = self.manager.placer.get_required_az()
688 server = self.manager.comp.create_server(self.name,
689 self.manager.image_instance,
690 self.manager.flavor.flavor,
697 files={NFVBENCH_CFG_VM_PATHNAME: vm_config})
699 self.instance = server
700 if self.manager.placer.is_resolved():
701 LOG.info('Created instance %s on %s', self.name, az)
703 # the location is undetermined at this point
704 # self.get_hypervisor_name() will return None
705 LOG.info('Created instance %s - waiting for placement resolution...', self.name)
706 # here we MUST wait until this instance is resolved otherwise subsequent
707 # VNF creation can be placed in other hypervisors!
708 config = self.manager.config
709 max_retries = (config.check_traffic_time_sec +
710 config.generic_poll_sec - 1) / config.generic_poll_sec
712 for retry in range(max_retries):
713 status = self.get_status()
714 if status == 'ACTIVE':
715 hyp_name = self.get_hypervisor_name()
716 LOG.info('Instance %s is active and has been placed on %s',
718 self.manager.placer.register_full_name(hyp_name)
720 if status == 'ERROR':
721 raise ChainException('Instance %s creation error: %s' %
723 self.instance.fault['message']))
724 LOG.info('Waiting for instance %s to become active (retry %d/%d)...',
725 self.name, retry + 1, max_retries + 1)
726 time.sleep(config.generic_poll_sec)
729 LOG.error('Instance %s creation timed out', self.name)
730 raise ChainException('Instance %s creation timed out' % self.name)
733 raise ChainException('Unable to create instance: %s' % (self.name))
735 def _reuse_exception(self, reason):
736 raise ChainException('Instance %s cannot be reused (%s)' % (self.name, reason))
738 def get_status(self):
739 """Get the statis of this instance."""
740 if self.instance.status != 'ACTIVE':
741 self.instance = self.manager.comp.poll_server(self.instance)
742 return self.instance.status
744 def get_hostname(self):
745 """Get the hypervisor host name running this VNF instance."""
746 if self.manager.is_admin:
747 hypervisor_hostname = getattr(self.instance, 'OS-EXT-SRV-ATTR:hypervisor_hostname')
749 hypervisor_hostname = self.manager.config.hypervisor_hostname
750 if not hypervisor_hostname:
751 raise ChainException('Hypervisor hostname parameter is mandatory')
752 return hypervisor_hostname
754 def get_host_ip(self):
755 """Get the IP address of the host where this instance runs.
757 return: the IP address
760 self.host_ip = self.manager.comp.get_hypervisor(self.get_hostname()).host_ip
763 def get_hypervisor_name(self):
764 """Get hypervisor name (az:hostname) for this VNF instance."""
766 if self.manager.is_admin:
767 az = getattr(self.instance, 'OS-EXT-AZ:availability_zone')
769 az = self.manager.config.availability_zone
771 raise ChainException('Availability zone parameter is mandatory')
772 hostname = self.get_hostname()
774 return az + ':' + hostname
779 """Get the uuid for this instance."""
780 return self.instance.id
782 def delete(self, forced=False):
783 """Delete this VNF instance."""
785 LOG.info("Instance %s not deleted (reused)", self.name)
788 self.manager.comp.delete_server(self.instance)
789 LOG.info("Deleted instance %s", self.name)
790 if self.manager.config.use_management_port:
791 self.management_port.delete()
792 for port in self.ports:
794 for port in self.idle_ports:
796 for network in self.idle_networks:
801 """A class to manage a single chain.
803 Can handle any type of chain (EXT, PVP, PVVP)
806 def __init__(self, chain_id, manager):
807 """Create a new chain.
809 chain_id: chain index (first chain is 0)
810 manager: the chain manager that owns all chains
812 self.chain_id = chain_id
813 self.manager = manager
814 self.encaps = manager.encaps
818 self.networks = manager.get_networks(chain_id)
819 # For external chain VNFs can only be discovered from their MAC addresses
820 # either from config or from ARP
821 if manager.config.service_chain != ChainType.EXT:
822 for chain_instance_index in range(self.get_length()):
823 self.instances.append(ChainVnf(self,
824 chain_instance_index,
826 # at this point new VNFs are not created yet but
827 # verify that all discovered VNFs are on the same hypervisor
828 self._check_hypervisors()
829 # now that all VNF ports are created we need to calculate the
830 # left/right remote MAC for each VNF in the chain
831 # before actually creating the VNF itself
832 rem_mac_pairs = self._get_remote_mac_pairs()
833 for instance in self.instances:
834 rem_mac_pair = rem_mac_pairs.pop(0)
835 instance.create_vnf(rem_mac_pair)
840 def _check_hypervisors(self):
841 common_hypervisor = None
842 for instance in self.instances:
843 # get the full hypervizor name (az:compute)
844 hname = instance.get_hypervisor_name()
846 if common_hypervisor:
847 if hname != common_hypervisor:
848 raise ChainException('Discovered instances on different hypervisors:'
849 ' %s and %s' % (hname, common_hypervisor))
851 common_hypervisor = hname
852 if common_hypervisor:
853 # check that the common hypervisor name matchs the requested hypervisor name
854 # and set the name to be used by all future instances (if any)
855 if not self.manager.placer.register_full_name(common_hypervisor):
856 raise ChainException('Discovered hypervisor placement %s is incompatible' %
859 def get_length(self):
860 """Get the number of VNF in the chain."""
861 # Take into account 2 edge networks for routers
862 return len(self.networks) - 3 if self.manager.config.l3_router else len(self.networks) - 1
864 def _get_remote_mac_pairs(self):
865 """Get the list of remote mac pairs for every VNF in the chain.
867 Traverse the chain from left to right and establish the
868 left/right remote MAC for each VNF in the chainself.
871 mac sequence: tg_src_mac, vm0-mac0, vm0-mac1, tg_dst_mac
872 must produce [[tg_src_mac, tg_dst_mac]] or looking at index in mac sequence: [[0, 3]]
873 the mac pair is what the VNF at that position (index 0) sees as next hop mac left and right
876 tg_src_mac, vm0-mac0, vm0-mac1, vm1-mac0, vm1-mac1, tg_dst_mac
877 Must produce the following list:
878 [[tg_src_mac, vm1-mac0], [vm0-mac1, tg_dst_mac]] or index: [[0, 3], [2, 5]]
880 General case with 3 VMs in chain, the list of consecutive macs (left to right):
881 tg_src_mac, vm0-mac0, vm0-mac1, vm1-mac0, vm1-mac1, vm2-mac0, vm2-mac1, tg_dst_mac
882 Must produce the following list:
883 [[tg_src_mac, vm1-mac0], [vm0-mac1, vm2-mac0], [vm1-mac1, tg_dst_mac]]
884 or index: [[0, 3], [2, 5], [4, 7]]
886 The series pattern is pretty clear: [[n, n+3],... ] where n is multiple of 2
888 # line up all mac from left to right
889 mac_seq = [self.manager.generator_config.devices[LEFT].mac]
890 for instance in self.instances:
891 mac_seq.append(instance.ports[0].get_mac())
892 mac_seq.append(instance.ports[1].get_mac())
893 mac_seq.append(self.manager.generator_config.devices[RIGHT].mac)
896 for _ in self.instances:
897 rem_mac_pairs.append([mac_seq[base], mac_seq[base + 3]])
901 def get_instances(self):
902 """Return all instances for this chain."""
903 return self.instances
905 def get_vlan(self, port_index):
906 """Get the VLAN id on a given port.
908 port_index: left port is 0, right port is 1
909 return: the vlan_id or None if there is no vlan tagging
911 # for port 1 we need to return the VLAN of the last network in the chain
912 # The networks array contains 2 networks for PVP [left, right]
913 # and 3 networks in the case of PVVP [left.middle,right]
915 # this will pick the last item in array
917 return self.networks[port_index].get_vlan()
919 def get_vxlan(self, port_index):
920 """Get the VXLAN id on a given port.
922 port_index: left port is 0, right port is 1
923 return: the vxlan_id or None if there is no vxlan
925 # for port 1 we need to return the VLAN of the last network in the chain
926 # The networks array contains 2 networks for PVP [left, right]
927 # and 3 networks in the case of PVVP [left.middle,right]
929 # this will pick the last item in array
931 return self.networks[port_index].get_vxlan()
933 def get_dest_mac(self, port_index):
934 """Get the dest MAC on a given port.
936 port_index: left port is 0, right port is 1
940 # for right port, use the right port MAC of the last (right most) VNF In chain
941 return self.instances[-1].ports[1].get_mac()
942 # for left port use the left port MAC of the first (left most) VNF in chain
943 return self.instances[0].ports[0].get_mac()
945 def get_network_uuids(self):
946 """Get UUID of networks in this chain from left to right (order is important).
948 :return: list of UUIDs of networks (2 or 3 elements)
950 return [net['id'] for net in self.networks]
952 def get_host_ips(self):
953 """Return the IP adresss(es) of the host compute nodes used for this chain.
955 :return: a list of 1 or 2 IP addresses
957 return [vnf.get_host_ip() for vnf in self.instances]
959 def get_compute_nodes(self):
960 """Return the name of the host compute nodes used for this chain.
962 :return: a list of 1 host name in the az:host format
964 # Since all chains go through the same compute node(s) we can just retrieve the
965 # compute node name(s) for the first chain
966 return [vnf.get_hypervisor_name() for vnf in self.instances]
969 """Delete this chain."""
970 for instance in self.instances:
972 # only delete if these are chain private networks (not shared)
973 if not self.manager.config.service_chain_shared_net:
974 for network in self.networks:
978 class InstancePlacer(object):
979 """A class to manage instance placement for all VNFs in all chains.
981 A full az string is made of 2 parts AZ and hypervisor.
982 The placement is resolved when both parts az and hypervisor names are known.
985 def __init__(self, req_az, req_hyp):
986 """Create a new instance placer.
988 req_az: requested AZ (can be None or empty if no preference)
989 req_hyp: requested hypervisor name (can be None of empty if no preference)
990 can be any of 'nova:', 'comp1', 'nova:comp1'
991 if it is a list, only the first item is used (backward compatibility in config)
993 req_az is ignored if req_hyp has an az part
994 all other parts beyond the first 2 are ignored in req_hyp
996 # if passed a list just pick the first item
997 if req_hyp and isinstance(req_hyp, list):
999 # only pick first part of az
1000 if req_az and ':' in req_az:
1001 req_az = req_az.split(':')[0]
1003 # check if requested hypervisor string has an AZ part
1004 split_hyp = req_hyp.split(':')
1005 if len(split_hyp) > 1:
1006 # override the AZ part and hypervisor part
1007 req_az = split_hyp[0]
1008 req_hyp = split_hyp[1]
1009 self.requested_az = req_az if req_az else ''
1010 self.requested_hyp = req_hyp if req_hyp else ''
1011 # Nova can accept AZ only (e.g. 'nova:', use any hypervisor in that AZ)
1012 # or hypervisor only (e.g. ':comp1')
1013 # or both (e.g. 'nova:comp1')
1015 self.required_az = req_az + ':' + self.requested_hyp
1017 # need to insert a ':' so nova knows this is the hypervisor name
1018 self.required_az = ':' + self.requested_hyp if req_hyp else ''
1019 # placement is resolved when both AZ and hypervisor names are known and set
1020 self.resolved = self.requested_az != '' and self.requested_hyp != ''
1022 def get_required_az(self):
1023 """Return the required az (can be resolved or not)."""
1024 return self.required_az
1026 def register_full_name(self, discovered_az):
1027 """Verify compatibility and register a discovered hypervisor full name.
1029 discovered_az: a discovered AZ in az:hypervisor format
1030 return: True if discovered_az is compatible and set
1031 False if discovered_az is not compatible
1034 return discovered_az == self.required_az
1036 # must be in full az format
1037 split_daz = discovered_az.split(':')
1038 if len(split_daz) != 2:
1040 if self.requested_az and self.requested_az != split_daz[0]:
1042 if self.requested_hyp and self.requested_hyp != split_daz[1]:
1044 self.required_az = discovered_az
1045 self.resolved = True
1048 def is_resolved(self):
1049 """Check if the full AZ is resolved.
1051 return: True if resolved
1053 return self.resolved
1056 class ChainManager(object):
1057 """A class for managing all chains for a given run.
1059 Supports openstack or no openstack.
1060 Supports EXT, PVP and PVVP chains.
1063 def __init__(self, chain_runner):
1064 """Create a chain manager to take care of discovering or bringing up the requested chains.
1066 A new instance must be created every time a new config is used.
1067 config: the nfvbench config to use
1068 cred: openstack credentials to use of None if there is no openstack
1070 self.chain_runner = chain_runner
1071 self.config = chain_runner.config
1072 self.generator_config = chain_runner.traffic_client.generator_config
1074 self.image_instance = None
1075 self.image_name = None
1076 # Left and right networks shared across all chains (only if shared)
1081 self.nova_client = None
1082 self.neutron_client = None
1083 self.glance_client = None
1084 self.existing_instances = []
1085 # existing ports keyed by the network uuid they belong to
1086 self._existing_ports = {}
1087 config = self.config
1088 self.openstack = (chain_runner.cred is not None) and not config.l2_loopback
1089 self.chain_count = config.service_chain_count
1093 session = chain_runner.cred.get_session()
1094 self.is_admin = chain_runner.cred.is_admin
1095 self.nova_client = Client(2, session=session)
1096 self.neutron_client = neutronclient.Client('2.0', session=session)
1097 self.glance_client = glanceclient.Client('2', session=session)
1098 self.comp = compute.Compute(self.nova_client,
1102 if config.service_chain != ChainType.EXT:
1103 self.placer = InstancePlacer(config.availability_zone, config.compute_nodes)
1105 self.flavor = ChainFlavor(config.flavor_type, config.flavor, self.comp)
1106 # Get list of all existing instances to check if some instances can be reused
1107 self.existing_instances = self.comp.get_server_list()
1108 # If management port is requested for VMs, create management network (shared)
1109 if self.config.use_management_port:
1110 self.management_network = ChainNetwork(self, self.config.management_network,
1112 # If floating IP is used for management, create and share
1113 # across chains the floating network
1114 if self.config.use_floating_ip:
1115 self.floating_ip_network = ChainNetwork(self,
1116 self.config.floating_network,
1119 # For EXT chains, the external_networks left and right fields in the config
1120 # must be either a prefix string or a list of at least chain-count strings
1121 self._check_extnet('left', config.external_networks.left)
1122 self._check_extnet('right', config.external_networks.right)
1124 # If networks are shared across chains, get the list of networks
1125 if config.service_chain_shared_net:
1126 self.networks = self.get_networks()
1127 # Reuse/create chains
1128 for chain_id in range(self.chain_count):
1129 self.chains.append(Chain(chain_id, self))
1130 if config.service_chain == ChainType.EXT:
1131 # if EXT and no ARP or VxLAN we need to read dest MACs from config
1132 if config.no_arp or config.vxlan:
1133 self._get_dest_macs_from_config()
1135 # Make sure all instances are active before proceeding
1136 self._ensure_instances_active()
1137 # network API call do not show VLANS ID if not admin read from config
1138 if not self.is_admin and config.vlan_tagging:
1139 self._get_config_vlans()
1144 # no openstack, no need to create chains
1145 if not config.l2_loopback and config.no_arp:
1146 self._get_dest_macs_from_config()
1147 if config.vlan_tagging:
1148 # make sure there at least as many entries as chains in each left/right list
1149 if len(config.vlans) != 2:
1150 raise ChainException('The config vlans property must be a list '
1151 'with 2 lists of VLAN IDs')
1152 self._get_config_vlans()
1154 raise ChainException('VxLAN is only supported with OpenStack')
1156 def _check_extnet(self, side, name):
1158 raise ChainException('external_networks.%s must contain a valid network'
1159 ' name prefix or a list of network names' % side)
1160 if isinstance(name, tuple) and len(name) < self.chain_count:
1161 raise ChainException('external_networks.%s %s'
1162 ' must have at least %d names' % (side, name, self.chain_count))
1164 def _get_config_vlans(self):
1167 self.vlans = [self._check_list('vlans[0]', self.config.vlans[0], re_vlan),
1168 self._check_list('vlans[1]', self.config.vlans[1], re_vlan)]
1170 raise ChainException('vlans parameter is mandatory. Set valid value in config file')
1172 def _get_dest_macs_from_config(self):
1173 re_mac = "[0-9a-fA-F]{2}([-:])[0-9a-fA-F]{2}(\\1[0-9a-fA-F]{2}){4}$"
1174 tg_config = self.config.traffic_generator
1175 self.dest_macs = [self._check_list("mac_addrs_left",
1176 tg_config.mac_addrs_left, re_mac),
1177 self._check_list("mac_addrs_right",
1178 tg_config.mac_addrs_right, re_mac)]
1180 def _check_list(self, list_name, ll, pattern):
1181 # if it is a single int or mac, make it a list of 1 int
1182 if isinstance(ll, (int, str)):
1185 if not re.match(pattern, str(item)):
1186 raise ChainException("Invalid format '{item}' specified in {fname}"
1187 .format(item=item, fname=list_name))
1188 # must have at least 1 element
1190 raise ChainException('%s cannot be empty' % (list_name))
1191 # for shared network, if 1 element is passed, replicate it as many times
1193 if self.config.service_chain_shared_net and len(ll) == 1:
1194 ll = [ll[0]] * self.chain_count
1196 # number of elements musty be the number of chains
1197 elif len(ll) < self.chain_count:
1198 raise ChainException('%s=%s must be a list with %d elements per chain' %
1199 (list_name, ll, self.chain_count))
1202 def _setup_image(self):
1203 # To avoid reuploading image in server mode, check whether image_name is set or not
1205 self.image_instance = self.comp.find_image(self.image_name)
1206 if self.image_instance:
1207 LOG.info("Reusing image %s", self.image_name)
1209 image_name_search_pattern = r'(nfvbenchvm-\d+(\.\d+)*).qcow2'
1210 if self.config.vm_image_file:
1211 match = re.search(image_name_search_pattern, self.config.vm_image_file)
1213 self.image_name = match.group(1)
1214 LOG.info('Using provided VM image file %s', self.config.vm_image_file)
1216 raise ChainException('Provided VM image file name %s must start with '
1217 '"nfvbenchvm-<version>"' % self.config.vm_image_file)
1219 pkg_root = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
1220 for f in os.listdir(pkg_root):
1221 if re.search(image_name_search_pattern, f):
1222 self.config.vm_image_file = pkg_root + '/' + f
1223 self.image_name = f.replace('.qcow2', '')
1224 LOG.info('Found built-in VM image file %s', f)
1227 raise ChainException('Cannot find any built-in VM image file.')
1229 self.image_instance = self.comp.find_image(self.image_name)
1230 if not self.image_instance:
1231 LOG.info('Uploading %s', self.image_name)
1232 res = self.comp.upload_image_via_url(self.image_name,
1233 self.config.vm_image_file)
1236 raise ChainException('Error uploading image %s from %s. ABORTING.' %
1237 (self.image_name, self.config.vm_image_file))
1238 LOG.info('Image %s successfully uploaded.', self.image_name)
1239 self.image_instance = self.comp.find_image(self.image_name)
1241 # image multiqueue property must be set according to the vif_multiqueue_size
1242 # config value (defaults to 1 or disabled)
1243 self.comp.image_set_multiqueue(self.image_instance, self.config.vif_multiqueue_size > 1)
1245 def _ensure_instances_active(self):
1247 for chain in self.chains:
1248 instances.extend(chain.get_instances())
1249 initial_instance_count = len(instances)
1250 # Give additional 10 seconds per VM
1251 max_retries = (self.config.check_traffic_time_sec + (initial_instance_count - 1) * 10 +
1252 self.config.generic_poll_sec - 1) / self.config.generic_poll_sec
1255 remaining_instances = []
1256 for instance in instances:
1257 status = instance.get_status()
1258 if status == 'ACTIVE':
1259 LOG.info('Instance %s is ACTIVE on %s',
1260 instance.name, instance.get_hypervisor_name())
1262 if status == 'ERROR':
1263 raise ChainException('Instance %s creation error: %s' %
1265 instance.instance.fault['message']))
1266 remaining_instances.append(instance)
1267 if not remaining_instances:
1270 if retry >= max_retries:
1271 raise ChainException('Time-out: %d/%d instances still not active' %
1272 (len(remaining_instances), initial_instance_count))
1273 LOG.info('Waiting for %d/%d instance to become active (retry %d/%d)...',
1274 len(remaining_instances), initial_instance_count,
1276 instances = remaining_instances
1277 time.sleep(self.config.generic_poll_sec)
1278 if initial_instance_count:
1279 LOG.info('All instances are active')
1281 def get_networks(self, chain_id=None):
1282 """Get the networks for given EXT, PVP or PVVP chain.
1284 For EXT packet path, these networks must pre-exist.
1285 For PVP, PVVP these networks will be created if they do not exist.
1286 chain_id: to which chain the networks belong.
1287 a None value will mean that these networks are shared by all chains
1290 # the only case where self.networks exists is when the networks are shared
1292 return self.networks
1293 if self.config.service_chain == ChainType.EXT:
1295 ext_net = self.config.external_networks
1296 net_cfg = [AttrDict({'name': name,
1297 'segmentation_id': None,
1298 'physical_network': None})
1299 for name in [ext_net.left, ext_net.right]]
1300 # segmentation id and subnet should be discovered from neutron
1303 int_nets = self.config.internal_networks
1305 if self.config.service_chain == ChainType.PVP:
1306 net_cfg = [int_nets.left, int_nets.right]
1308 net_cfg = [int_nets.left, int_nets.middle, int_nets.right]
1309 if self.config.l3_router:
1310 edge_nets = self.config.edge_networks
1311 net_cfg.append(edge_nets.left)
1312 net_cfg.append(edge_nets.right)
1316 networks.append(ChainNetwork(self, cfg, chain_id, lookup_only=lookup_only))
1318 # need to cleanup all successful networks prior to bailing out
1319 for net in networks:
1324 def get_existing_ports(self):
1325 """Get the list of existing ports.
1327 Lazy retrieval of ports as this can be costly if there are lots of ports and
1328 is only needed when VM and network are being reused.
1330 return: a dict of list of neutron ports indexed by the network uuid they are attached to
1332 Each port is a dict with fields such as below:
1333 {'allowed_address_pairs': [], 'extra_dhcp_opts': [],
1334 'updated_at': '2018-10-06T07:15:35Z', 'device_owner': 'compute:nova',
1335 'revision_number': 10, 'port_security_enabled': False, 'binding:profile': {},
1336 'fixed_ips': [{'subnet_id': '6903a3b3-49a1-4ba4-8259-4a90e7a44b21',
1337 'ip_address': '192.168.1.4'}], 'id': '3dcb9cfa-d82a-4dd1-85a1-fd8284b52d72',
1338 'security_groups': [],
1339 'binding:vif_details': {'vhostuser_socket': '/tmp/3dcb9cfa-d82a-4dd1-85a1-fd8284b52d72',
1340 'vhostuser_mode': 'server'},
1341 'binding:vif_type': 'vhostuser',
1342 'mac_address': 'fa:16:3e:3c:63:04',
1343 'project_id': '977ac76a63d7492f927fa80e86baff4c',
1345 'binding:host_id': 'a20-champagne-compute-1',
1347 'device_id': 'a98e2ad2-5371-4aa5-a356-8264a970ce4b',
1348 'name': 'nfvbench-loop-vm0-0', 'admin_state_up': True,
1349 'network_id': '3ea5fd88-278f-4d9d-b24d-1e443791a055',
1350 'tenant_id': '977ac76a63d7492f927fa80e86baff4c',
1351 'created_at': '2018-10-06T07:15:10Z',
1352 'binding:vnic_type': 'normal'}
1354 if not self._existing_ports:
1355 LOG.info('Loading list of all ports...')
1356 existing_ports = self.neutron_client.list_ports()['ports']
1357 # place all ports in the dict keyed by the port network uuid
1358 for port in existing_ports:
1359 port_list = self._existing_ports.setdefault(port['network_id'], [])
1360 port_list.append(port)
1361 LOG.info("Loaded %d ports attached to %d networks",
1362 len(existing_ports), len(self._existing_ports))
1363 return self._existing_ports
1365 def get_ports_from_network(self, chain_network):
1366 """Get the list of existing ports that belong to a network.
1368 Lazy retrieval of ports as this can be costly if there are lots of ports and
1369 is only needed when VM and network are being reused.
1371 chain_network: a ChainNetwork instance for which attached ports neeed to be retrieved
1372 return: list of neutron ports attached to requested network
1374 return self.get_existing_ports().get(chain_network.get_uuid(), None)
1376 def get_hypervisor_from_mac(self, mac):
1377 """Get the hypervisor that hosts a VM MAC.
1379 mac: MAC address to look for
1380 return: the hypervisor where the matching port runs or None if not found
1382 # _existing_ports is a dict of list of ports indexed by network id
1383 for port_list in self.get_existing_ports().values():
1384 for port in port_list:
1386 if port['mac_address'] == mac:
1387 host_id = port['binding:host_id']
1388 return self.comp.get_hypervisor(host_id)
1393 def get_host_ip_from_mac(self, mac):
1394 """Get the host IP address matching a MAC.
1396 mac: MAC address to look for
1397 return: the IP address of the host where the matching port runs or None if not found
1399 hypervisor = self.get_hypervisor_from_mac(mac)
1401 return hypervisor.host_ip
1404 def get_chain_vlans(self, port_index):
1405 """Get the list of per chain VLAN id on a given port.
1407 port_index: left port is 0, right port is 1
1408 return: a VLAN ID list indexed by the chain index or None if no vlan tagging
1410 if self.chains and self.is_admin:
1411 return [self.chains[chain_index].get_vlan(port_index)
1412 for chain_index in range(self.chain_count)]
1414 return self.vlans[port_index]
1416 def get_chain_vxlans(self, port_index):
1417 """Get the list of per chain VNIs id on a given port.
1419 port_index: left port is 0, right port is 1
1420 return: a VNIs ID list indexed by the chain index or None if no vlan tagging
1422 if self.chains and self.is_admin:
1423 return [self.chains[chain_index].get_vxlan(port_index)
1424 for chain_index in range(self.chain_count)]
1426 raise ChainException('VxLAN is only supported with OpenStack and with admin user')
1428 def get_dest_macs(self, port_index):
1429 """Get the list of per chain dest MACs on a given port.
1431 Should not be called if EXT+ARP is used (in that case the traffic gen will
1432 have the ARP responses back from VNFs with the dest MAC to use).
1434 port_index: left port is 0, right port is 1
1435 return: a list of dest MACs indexed by the chain index
1437 if self.chains and self.config.service_chain != ChainType.EXT:
1438 return [self.chains[chain_index].get_dest_mac(port_index)
1439 for chain_index in range(self.chain_count)]
1440 # no openstack or EXT+no-arp
1441 return self.dest_macs[port_index]
1443 def get_host_ips(self):
1444 """Return the IP adresss(es) of the host compute nodes used for this run.
1446 :return: a list of 1 IP address
1448 # Since all chains go through the same compute node(s) we can just retrieve the
1449 # compute node(s) for the first chain
1451 if self.config.service_chain != ChainType.EXT:
1452 return self.chains[0].get_host_ips()
1453 # in the case of EXT, the compute node must be retrieved from the port
1454 # associated to any of the dest MACs
1455 dst_macs = self.generator_config.get_dest_macs()
1456 # dest MAC on port 0, chain 0
1457 dst_mac = dst_macs[0][0]
1458 host_ip = self.get_host_ip_from_mac(dst_mac)
1460 LOG.info('Found compute node IP for EXT chain: %s', host_ip)
1464 def get_compute_nodes(self):
1465 """Return the name of the host compute nodes used for this run.
1467 :return: a list of 0 or 1 host name in the az:host format
1469 # Since all chains go through the same compute node(s) we can just retrieve the
1470 # compute node name(s) for the first chain
1472 # in the case of EXT, the compute node must be retrieved from the port
1473 # associated to any of the dest MACs
1474 if self.config.service_chain != ChainType.EXT:
1475 return self.chains[0].get_compute_nodes()
1476 # in the case of EXT, the compute node must be retrieved from the port
1477 # associated to any of the dest MACs
1478 dst_macs = self.generator_config.get_dest_macs()
1479 # dest MAC on port 0, chain 0
1480 dst_mac = dst_macs[0][0]
1481 hypervisor = self.get_hypervisor_from_mac(dst_mac)
1483 LOG.info('Found hypervisor for EXT chain: %s', hypervisor.hypervisor_hostname)
1484 return[':' + hypervisor.hypervisor_hostname]
1485 # no openstack = no chains
1489 """Delete resources for all chains."""
1490 for chain in self.chains:
1492 for network in self.networks:
1494 if self.config.use_management_port and hasattr(self, 'management_network'):
1495 self.management_network.delete()
1496 if self.config.use_floating_ip and hasattr(self, 'floating_ip_network'):
1497 self.floating_ip_network.delete()
1499 self.flavor.delete()