2 # Copyright 2018 Cisco Systems, Inc. All rights reserved.
4 # Licensed under the Apache License, Version 2.0 (the "License"); you may
5 # not use this file except in compliance with the License. You may obtain
6 # a copy of the License at
8 # http://www.apache.org/licenses/LICENSE-2.0
10 # Unless required by applicable law or agreed to in writing, software
11 # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
12 # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
13 # License for the specific language governing permissions and limitations
17 # This module takes care of chaining networks, ports and vms
19 """NFVBENCH CHAIN DISCOVERY/STAGING.
21 This module takes care of staging/discovering all resources that are participating in a
22 benchmarking session: flavors, networks, ports, VNF instances.
23 If a resource is discovered with the same name, it will be reused.
24 Otherwise it will be created.
26 ChainManager: manages VM image, flavor, the staging discovery of all chains
28 Chain: manages one chain, has 2 or more networks and 1 or more instances
29 ChainNetwork: manages 1 network in a chain
30 ChainVnf: manages 1 VNF instance in a chain, has 2 ports
31 ChainVnfPort: manages 1 instance port
33 ChainManager-->Chain(*)
34 Chain-->ChainNetwork(*),ChainVnf(*)
35 ChainVnf-->ChainVnfPort(2)
37 Once created/discovered, instances are checked to be in the active state (ready to pass traffic)
38 Configuration parameters that will influence how these resources are staged/related:
39 - openstack or no openstack
42 - number of VNF in each chain (PVP, PVVP)
43 - SRIOV and middle port SRIOV for port types
44 - whether networks are shared across chains or not
46 There is not traffic generation involved in this module.
53 from neutronclient.neutron import client as neutronclient
54 from novaclient.client import Client
56 from attrdict import AttrDict
59 from specs import ChainType
61 # Left and right index for network and port lists
64 # Name of the VM config file
65 NFVBENCH_CFG_FILENAME = 'nfvbenchvm.conf'
66 # full pathame of the VM config in the VM
67 NFVBENCH_CFG_VM_PATHNAME = os.path.join('/etc/', NFVBENCH_CFG_FILENAME)
68 # full path of the boot shell script template file on the server where nfvbench runs
69 BOOT_SCRIPT_PATHNAME = os.path.join(os.path.dirname(os.path.abspath(__file__)),
71 NFVBENCH_CFG_FILENAME)
74 class ChainException(Exception):
75 """Exception while operating the chains."""
79 class NetworkEncaps(object):
80 """Network encapsulation."""
83 class ChainFlavor(object):
84 """Class to manage the chain flavor."""
86 def __init__(self, flavor_name, flavor_dict, comp):
87 """Create a flavor."""
88 self.name = flavor_name
90 self.flavor = self.comp.find_flavor(flavor_name)
94 LOG.info("Reused flavor '%s'", flavor_name)
96 extra_specs = flavor_dict.pop('extra_specs', None)
98 self.flavor = comp.create_flavor(flavor_name,
101 LOG.info("Created flavor '%s'", flavor_name)
103 self.flavor.set_keys(extra_specs)
106 """Delete this flavor."""
107 if not self.reuse and self.flavor:
109 LOG.info("Flavor '%s' deleted", self.name)
112 class ChainVnfPort(object):
113 """A port associated to one VNF in the chain."""
115 def __init__(self, name, vnf, chain_network, vnic_type):
116 """Create or reuse a port on a given network.
118 if vnf.instance is None the VNF instance is not reused and this ChainVnfPort instance must
120 Otherwise vnf.instance is a reused VNF instance and this ChainVnfPort instance must
121 find an existing port to reuse that matches the port requirements: same attached network,
122 instance, name, vnic type
124 name: name for this port
125 vnf: ChainVNf instance that owns this port
126 chain_network: ChainNetwork instance where this port should attach
127 vnic_type: required vnic type for this port
131 self.manager = vnf.manager
135 # VNF instance is reused, we need to find an existing port that matches this instance
137 # discover ports attached to this instance
138 port_list = self.manager.get_ports_from_network(chain_network)
139 for port in port_list:
140 if port['name'] != name:
142 if port['binding:vnic_type'] != vnic_type:
144 if port['device_id'] == vnf.get_uuid():
146 LOG.info('Reusing existing port %s mac=%s', name, port['mac_address'])
149 raise ChainException('Cannot find matching port')
151 # VNF instance is not created yet, we need to create a new port
155 'network_id': chain_network.get_uuid(),
156 'binding:vnic_type': vnic_type
159 port = self.manager.neutron_client.create_port(body)
160 self.port = port['port']
161 LOG.info('Created port %s', name)
163 self.manager.neutron_client.update_port(self.port['id'], {
165 'security_groups': [],
166 'port_security_enabled': False,
169 LOG.info('Security disabled on port %s', name)
171 LOG.info('Failed to disable security on port %s (ignored)', name)
174 """Get the MAC address for this port."""
175 return self.port['mac_address']
178 """Delete this port instance."""
179 if self.reuse or not self.port:
182 while retry < self.manager.config.generic_retry_count:
184 self.manager.neutron_client.delete_port(self.port['id'])
185 LOG.info("Deleted port %s", self.name)
189 time.sleep(self.manager.config.generic_poll_sec)
190 LOG.error('Unable to delete port: %s', self.name)
193 class ChainNetwork(object):
194 """Could be a shared network across all chains or a chain private network."""
196 def __init__(self, manager, network_config, chain_id=None, lookup_only=False,
198 """Create a network for given chain.
200 network_config: a dict containing the network properties
201 (name, segmentation_id and physical_network)
202 chain_id: to which chain the networks belong.
203 a None value will mean that these networks are shared by all chains
204 suffix: a suffix to add to the network name (if not None)
206 self.manager = manager
208 self.name = network_config.name
210 # the name itself can be either a string or a list of names indexed by chain ID
211 if isinstance(network_config.name, tuple):
212 self.name = network_config.name[chain_id]
214 # network_config.name is a prefix string
215 self.name = network_config.name + str(chain_id)
217 self.name = self.name + suffix
218 self.segmentation_id = self._get_item(network_config.segmentation_id,
219 chain_id, auto_index=True)
220 self.physical_network = self._get_item(network_config.physical_network, chain_id)
226 self._setup(network_config, lookup_only)
229 LOG.error("Cannot find network %s", self.name)
231 LOG.error("Error creating network %s", self.name)
235 def _get_item(self, item_field, index, auto_index=False):
236 """Retrieve an item from a list or a single value.
238 item_field: can be None, a tuple of a single value
239 index: if None is same as 0, else is the index for a chain
240 auto_index: if true will automatically get the final value by adding the
241 index to the base value (if full list not provided)
243 If the item_field is not a tuple, it is considered same as a tuple with same value at any
245 If a list is provided, its length must be > index
251 if isinstance(item_field, tuple):
253 return item_field[index]
255 raise ChainException("List %s is too short for chain index %d" %
256 (str(item_field), index))
257 # single value is configured
259 return item_field + index
262 def _setup(self, network_config, lookup_only):
263 # Lookup if there is a matching network with same name
264 networks = self.manager.neutron_client.list_networks(name=self.name)
265 if networks['networks']:
266 network = networks['networks'][0]
267 # a network of same name already exists, we need to verify it has the same
269 if self.segmentation_id:
270 if network['provider:segmentation_id'] != self.segmentation_id:
271 raise ChainException("Mismatch of 'segmentation_id' for reused "
272 "network '{net}'. Network has id '{seg_id1}', "
273 "configuration requires '{seg_id2}'."
274 .format(net=self.name,
275 seg_id1=network['provider:segmentation_id'],
276 seg_id2=self.segmentation_id))
278 if self.physical_network:
279 if network['provider:physical_network'] != self.physical_network:
280 raise ChainException("Mismatch of 'physical_network' for reused "
281 "network '{net}'. Network has '{phys1}', "
282 "configuration requires '{phys2}'."
283 .format(net=self.name,
284 phys1=network['provider:physical_network'],
285 phys2=self.physical_network))
287 LOG.info('Reusing existing network %s', self.name)
289 self.network = network
292 raise ChainException('Network %s not found' % self.name)
296 'admin_state_up': True
299 if network_config.network_type:
300 body['network']['provider:network_type'] = network_config.network_type
301 if self.segmentation_id:
302 body['network']['provider:segmentation_id'] = self.segmentation_id
303 if self.physical_network:
304 body['network']['provider:physical_network'] = self.physical_network
305 self.network = self.manager.neutron_client.create_network(body)['network']
306 # create associated subnet, all subnets have the same name (which is ok since
307 # we do not need to address them directly by name)
309 'subnet': {'name': network_config.subnet,
310 'cidr': network_config.cidr,
311 'network_id': self.network['id'],
312 'enable_dhcp': False,
314 'dns_nameservers': []}
316 subnet = self.manager.neutron_client.create_subnet(body)['subnet']
317 # add subnet id to the network dict since it has just been added
318 self.network['subnets'] = [subnet['id']]
319 LOG.info('Created network: %s', self.name)
323 Extract UUID of this network.
325 :return: UUID of this network
327 return self.network['id']
331 Extract vlan for this network.
333 :return: vlan ID for this network
335 if self.network['provider:network_type'] != 'vlan':
336 raise ChainException('Trying to retrieve VLAN id for non VLAN network')
337 return self.network['provider:segmentation_id']
341 Extract VNI for this network.
343 :return: VNI ID for this network
345 if 'vxlan' not in self.network['provider:network_type']:
346 raise ChainException('Trying to retrieve VNI for non VXLAN network')
347 return self.network['provider:segmentation_id']
350 """Delete this network."""
351 if not self.reuse and self.network:
353 while retry < self.manager.config.generic_retry_count:
355 self.manager.neutron_client.delete_network(self.network['id'])
356 LOG.info("Deleted network: %s", self.name)
360 LOG.info('Error deleting network %s (retry %d/%d)...',
363 self.manager.config.generic_retry_count)
364 time.sleep(self.manager.config.generic_poll_sec)
365 LOG.error('Unable to delete network: %s', self.name)
368 class ChainVnf(object):
369 """A class to represent a VNF in a chain."""
371 def __init__(self, chain, vnf_id, networks):
372 """Reuse a VNF instance with same characteristics or create a new VNF instance.
374 chain: the chain where this vnf belongs
375 vnf_id: indicates the index of this vnf in its chain (first vnf=0)
376 networks: the list of all networks (ChainNetwork) of the current chain
378 self.manager = chain.manager
381 self.name = self.manager.config.loop_vm_name + str(chain.chain_id)
382 if len(networks) > 2:
383 # we will have more than 1 VM in each chain
384 self.name += '-' + str(vnf_id)
385 # A list of ports for this chain
386 # There are normally 2 ports carrying traffic (index 0, and index 1) and
387 # potentially multiple idle ports not carrying traffic (index 2 and up)
388 # For example if 7 idle interfaces are requested, the corresp. ports will be
395 self.idle_networks = []
398 # the vnf_id is conveniently also the starting index in networks
399 # for the left and right networks associated to this VNF
400 self._setup(networks[vnf_id:vnf_id + 2])
402 LOG.error("Error creating VNF %s", self.name)
406 def _get_vm_config(self, remote_mac_pair):
407 config = self.manager.config
408 devices = self.manager.generator_config.devices
409 with open(BOOT_SCRIPT_PATHNAME, 'r') as boot_script:
410 content = boot_script.read()
411 g1cidr = devices[LEFT].get_gw_ip(self.chain.chain_id) + '/8'
412 g2cidr = devices[RIGHT].get_gw_ip(self.chain.chain_id) + '/8'
414 'forwarder': config.vm_forwarder,
415 'intf_mac1': self.ports[LEFT].get_mac(),
416 'intf_mac2': self.ports[RIGHT].get_mac(),
417 'tg_gateway1_ip': devices[LEFT].tg_gateway_ip_addrs,
418 'tg_gateway2_ip': devices[RIGHT].tg_gateway_ip_addrs,
419 'tg_net1': devices[LEFT].ip_addrs,
420 'tg_net2': devices[RIGHT].ip_addrs,
421 'vnf_gateway1_cidr': g1cidr,
422 'vnf_gateway2_cidr': g2cidr,
423 'tg_mac1': remote_mac_pair[0],
424 'tg_mac2': remote_mac_pair[1],
425 'vif_mq_size': config.vif_multiqueue_size
427 return content.format(**vm_config)
429 def _get_vnic_type(self, port_index):
430 """Get the right vnic type for given port indexself.
432 If SR-IOV is specified, middle ports in multi-VNF chains
433 can use vswitch or SR-IOV based on config.use_sriov_middle_net
435 if self.manager.config.sriov:
436 chain_length = self.chain.get_length()
437 if self.manager.config.use_sriov_middle_net or chain_length == 1:
439 if self.vnf_id == 0 and port_index == 0:
440 # first VNF in chain must use sriov for left port
442 if (self.vnf_id == chain_length - 1) and (port_index == 1):
443 # last VNF in chain must use sriov for right port
447 def _get_idle_networks_ports(self):
448 """Get the idle networks for PVP or PVVP chain (non shared net only)
450 For EXT packet path or shared net, returns empty list.
451 For PVP, PVVP these networks will be created if they do not exist.
452 chain_id: to which chain the networks belong.
453 a None value will mean that these networks are shared by all chains
457 config = self.manager.config
458 chain_id = self.chain.chain_id
459 idle_interfaces_per_vm = config.idle_interfaces_per_vm
460 if config.service_chain == ChainType.EXT or chain_id is None or \
461 idle_interfaces_per_vm == 0:
464 # Make a copy of the idle networks dict as we may have to modify the
466 idle_network_cfg = AttrDict(config.idle_networks)
467 if idle_network_cfg.segmentation_id:
468 segmentation_id = idle_network_cfg.segmentation_id + \
469 chain_id * idle_interfaces_per_vm
471 segmentation_id = None
473 # create as many idle networks and ports as requested
474 for idle_index in range(idle_interfaces_per_vm):
475 if config.service_chain == ChainType.PVP:
476 suffix = '.%d' % (idle_index)
478 suffix = '.%d.%d' % (self.vnf_id, idle_index)
479 port_name = self.name + '-idle' + str(idle_index)
480 # update the segmentation id based on chain id and idle index
482 idle_network_cfg.segmentation_id = segmentation_id + idle_index
483 port_name = port_name + "." + str(segmentation_id)
485 networks.append(ChainNetwork(self.manager,
489 ports.append(ChainVnfPort(port_name,
491 networks[idle_index],
494 # need to cleanup all successful networks
500 self.idle_networks = networks
501 self.idle_ports = ports
503 def _setup(self, networks):
504 flavor_id = self.manager.flavor.flavor.id
505 # Check if we can reuse an instance with same name
506 for instance in self.manager.existing_instances:
507 if instance.name == self.name:
508 # Verify that other instance characteristics match
509 if instance.flavor['id'] != flavor_id:
510 self._reuse_exception('Flavor mismatch')
511 if instance.status != "ACTIVE":
512 self._reuse_exception('Matching instance is not in ACTIVE state')
513 # The 2 networks for this instance must also be reused
514 if not networks[LEFT].reuse:
515 self._reuse_exception('network %s is new' % networks[LEFT].name)
516 if not networks[RIGHT].reuse:
517 self._reuse_exception('network %s is new' % networks[RIGHT].name)
518 # instance.networks have the network names as keys:
519 # {'nfvbench-rnet0': ['192.168.2.10'], 'nfvbench-lnet0': ['192.168.1.8']}
520 if networks[LEFT].name not in instance.networks:
521 self._reuse_exception('Left network mismatch')
522 if networks[RIGHT].name not in instance.networks:
523 self._reuse_exception('Right network mismatch')
526 self.instance = instance
527 LOG.info('Reusing existing instance %s on %s',
528 self.name, self.get_hypervisor_name())
529 # create or reuse/discover 2 ports per instance
530 self.ports = [ChainVnfPort(self.name + '-' + str(index),
533 self._get_vnic_type(index)) for index in [0, 1]]
535 # create idle networks and ports only if instance is not reused
536 # if reused, we do not care about idle networks/ports
538 self._get_idle_networks_ports()
540 # if no reuse, actual vm creation is deferred after all ports in the chain are created
541 # since we need to know the next mac in a multi-vnf chain
543 def create_vnf(self, remote_mac_pair):
544 """Create the VNF instance if it does not already exist."""
545 if self.instance is None:
546 port_ids = [{'port-id': vnf_port.port['id']}
547 for vnf_port in self.ports]
549 for idle_port in self.idle_ports:
550 port_ids.append({'port-id': idle_port.port['id']})
551 vm_config = self._get_vm_config(remote_mac_pair)
552 az = self.manager.placer.get_required_az()
553 server = self.manager.comp.create_server(self.name,
554 self.manager.image_instance,
555 self.manager.flavor.flavor,
562 files={NFVBENCH_CFG_VM_PATHNAME: vm_config})
564 self.instance = server
565 if self.manager.placer.is_resolved():
566 LOG.info('Created instance %s on %s', self.name, az)
568 # the location is undetermined at this point
569 # self.get_hypervisor_name() will return None
570 LOG.info('Created instance %s - waiting for placement resolution...', self.name)
571 # here we MUST wait until this instance is resolved otherwise subsequent
572 # VNF creation can be placed in other hypervisors!
573 config = self.manager.config
574 max_retries = (config.check_traffic_time_sec +
575 config.generic_poll_sec - 1) / config.generic_poll_sec
577 for retry in range(max_retries):
578 status = self.get_status()
579 if status == 'ACTIVE':
580 hyp_name = self.get_hypervisor_name()
581 LOG.info('Instance %s is active and has been placed on %s',
583 self.manager.placer.register_full_name(hyp_name)
585 if status == 'ERROR':
586 raise ChainException('Instance %s creation error: %s' %
588 self.instance.fault['message']))
589 LOG.info('Waiting for instance %s to become active (retry %d/%d)...',
590 self.name, retry + 1, max_retries + 1)
591 time.sleep(config.generic_poll_sec)
594 LOG.error('Instance %s creation timed out', self.name)
595 raise ChainException('Instance %s creation timed out' % self.name)
598 raise ChainException('Unable to create instance: %s' % (self.name))
600 def _reuse_exception(self, reason):
601 raise ChainException('Instance %s cannot be reused (%s)' % (self.name, reason))
603 def get_status(self):
604 """Get the statis of this instance."""
605 if self.instance.status != 'ACTIVE':
606 self.instance = self.manager.comp.poll_server(self.instance)
607 return self.instance.status
609 def get_hostname(self):
610 """Get the hypervisor host name running this VNF instance."""
611 if self.manager.is_admin:
612 hypervisor_hostname = getattr(self.instance, 'OS-EXT-SRV-ATTR:hypervisor_hostname')
614 hypervisor_hostname = self.manager.config.hypervisor_hostname
615 if not hypervisor_hostname:
616 raise ChainException('Hypervisor hostname parameter is mandatory')
617 return hypervisor_hostname
619 def get_host_ip(self):
620 """Get the IP address of the host where this instance runs.
622 return: the IP address
625 self.host_ip = self.manager.comp.get_hypervisor(self.get_hostname()).host_ip
628 def get_hypervisor_name(self):
629 """Get hypervisor name (az:hostname) for this VNF instance."""
631 if self.manager.is_admin:
632 az = getattr(self.instance, 'OS-EXT-AZ:availability_zone')
634 az = self.manager.config.availability_zone
636 raise ChainException('Availability zone parameter is mandatory')
637 hostname = self.get_hostname()
639 return az + ':' + hostname
644 """Get the uuid for this instance."""
645 return self.instance.id
647 def delete(self, forced=False):
648 """Delete this VNF instance."""
650 LOG.info("Instance %s not deleted (reused)", self.name)
653 self.manager.comp.delete_server(self.instance)
654 LOG.info("Deleted instance %s", self.name)
655 for port in self.ports:
657 for port in self.idle_ports:
659 for network in self.idle_networks:
663 """A class to manage a single chain.
665 Can handle any type of chain (EXT, PVP, PVVP)
668 def __init__(self, chain_id, manager):
669 """Create a new chain.
671 chain_id: chain index (first chain is 0)
672 manager: the chain manager that owns all chains
674 self.chain_id = chain_id
675 self.manager = manager
676 self.encaps = manager.encaps
680 self.networks = manager.get_networks(chain_id)
681 # For external chain VNFs can only be discovered from their MAC addresses
682 # either from config or from ARP
683 if manager.config.service_chain != ChainType.EXT:
684 for chain_instance_index in range(self.get_length()):
685 self.instances.append(ChainVnf(self,
686 chain_instance_index,
688 # at this point new VNFs are not created yet but
689 # verify that all discovered VNFs are on the same hypervisor
690 self._check_hypervisors()
691 # now that all VNF ports are created we need to calculate the
692 # left/right remote MAC for each VNF in the chain
693 # before actually creating the VNF itself
694 rem_mac_pairs = self._get_remote_mac_pairs()
695 for instance in self.instances:
696 rem_mac_pair = rem_mac_pairs.pop(0)
697 instance.create_vnf(rem_mac_pair)
702 def _check_hypervisors(self):
703 common_hypervisor = None
704 for instance in self.instances:
705 # get the full hypervizor name (az:compute)
706 hname = instance.get_hypervisor_name()
708 if common_hypervisor:
709 if hname != common_hypervisor:
710 raise ChainException('Discovered instances on different hypervisors:'
711 ' %s and %s' % (hname, common_hypervisor))
713 common_hypervisor = hname
714 if common_hypervisor:
715 # check that the common hypervisor name matchs the requested hypervisor name
716 # and set the name to be used by all future instances (if any)
717 if not self.manager.placer.register_full_name(common_hypervisor):
718 raise ChainException('Discovered hypervisor placement %s is incompatible' %
721 def get_length(self):
722 """Get the number of VNF in the chain."""
723 return len(self.networks) - 1
725 def _get_remote_mac_pairs(self):
726 """Get the list of remote mac pairs for every VNF in the chain.
728 Traverse the chain from left to right and establish the
729 left/right remote MAC for each VNF in the chainself.
732 mac sequence: tg_src_mac, vm0-mac0, vm0-mac1, tg_dst_mac
733 must produce [[tg_src_mac, tg_dst_mac]] or looking at index in mac sequence: [[0, 3]]
734 the mac pair is what the VNF at that position (index 0) sees as next hop mac left and right
737 tg_src_mac, vm0-mac0, vm0-mac1, vm1-mac0, vm1-mac1, tg_dst_mac
738 Must produce the following list:
739 [[tg_src_mac, vm1-mac0], [vm0-mac1, tg_dst_mac]] or index: [[0, 3], [2, 5]]
741 General case with 3 VMs in chain, the list of consecutive macs (left to right):
742 tg_src_mac, vm0-mac0, vm0-mac1, vm1-mac0, vm1-mac1, vm2-mac0, vm2-mac1, tg_dst_mac
743 Must produce the following list:
744 [[tg_src_mac, vm1-mac0], [vm0-mac1, vm2-mac0], [vm1-mac1, tg_dst_mac]]
745 or index: [[0, 3], [2, 5], [4, 7]]
747 The series pattern is pretty clear: [[n, n+3],... ] where n is multiple of 2
749 # line up all mac from left to right
750 mac_seq = [self.manager.generator_config.devices[LEFT].mac]
751 for instance in self.instances:
752 mac_seq.append(instance.ports[0].get_mac())
753 mac_seq.append(instance.ports[1].get_mac())
754 mac_seq.append(self.manager.generator_config.devices[RIGHT].mac)
757 for _ in self.instances:
758 rem_mac_pairs.append([mac_seq[base], mac_seq[base + 3]])
762 def get_instances(self):
763 """Return all instances for this chain."""
764 return self.instances
766 def get_vlan(self, port_index):
767 """Get the VLAN id on a given port.
769 port_index: left port is 0, right port is 1
770 return: the vlan_id or None if there is no vlan tagging
772 # for port 1 we need to return the VLAN of the last network in the chain
773 # The networks array contains 2 networks for PVP [left, right]
774 # and 3 networks in the case of PVVP [left.middle,right]
776 # this will pick the last item in array
778 return self.networks[port_index].get_vlan()
780 def get_vxlan(self, port_index):
781 """Get the VXLAN id on a given port.
783 port_index: left port is 0, right port is 1
784 return: the vxlan_id or None if there is no vxlan
786 # for port 1 we need to return the VLAN of the last network in the chain
787 # The networks array contains 2 networks for PVP [left, right]
788 # and 3 networks in the case of PVVP [left.middle,right]
790 # this will pick the last item in array
792 return self.networks[port_index].get_vxlan()
794 def get_dest_mac(self, port_index):
795 """Get the dest MAC on a given port.
797 port_index: left port is 0, right port is 1
801 # for right port, use the right port MAC of the last (right most) VNF In chain
802 return self.instances[-1].ports[1].get_mac()
803 # for left port use the left port MAC of the first (left most) VNF in chain
804 return self.instances[0].ports[0].get_mac()
806 def get_network_uuids(self):
807 """Get UUID of networks in this chain from left to right (order is important).
809 :return: list of UUIDs of networks (2 or 3 elements)
811 return [net['id'] for net in self.networks]
813 def get_host_ips(self):
814 """Return the IP adresss(es) of the host compute nodes used for this chain.
816 :return: a list of 1 or 2 IP addresses
818 return [vnf.get_host_ip() for vnf in self.instances]
820 def get_compute_nodes(self):
821 """Return the name of the host compute nodes used for this chain.
823 :return: a list of 1 host name in the az:host format
825 # Since all chains go through the same compute node(s) we can just retrieve the
826 # compute node name(s) for the first chain
827 return [vnf.get_hypervisor_name() for vnf in self.instances]
830 """Delete this chain."""
831 for instance in self.instances:
833 # only delete if these are chain private networks (not shared)
834 if not self.manager.config.service_chain_shared_net:
835 for network in self.networks:
839 class InstancePlacer(object):
840 """A class to manage instance placement for all VNFs in all chains.
842 A full az string is made of 2 parts AZ and hypervisor.
843 The placement is resolved when both parts az and hypervisor names are known.
846 def __init__(self, req_az, req_hyp):
847 """Create a new instance placer.
849 req_az: requested AZ (can be None or empty if no preference)
850 req_hyp: requested hypervisor name (can be None of empty if no preference)
851 can be any of 'nova:', 'comp1', 'nova:comp1'
852 if it is a list, only the first item is used (backward compatibility in config)
854 req_az is ignored if req_hyp has an az part
855 all other parts beyond the first 2 are ignored in req_hyp
857 # if passed a list just pick the first item
858 if req_hyp and isinstance(req_hyp, list):
860 # only pick first part of az
861 if req_az and ':' in req_az:
862 req_az = req_az.split(':')[0]
864 # check if requested hypervisor string has an AZ part
865 split_hyp = req_hyp.split(':')
866 if len(split_hyp) > 1:
867 # override the AZ part and hypervisor part
868 req_az = split_hyp[0]
869 req_hyp = split_hyp[1]
870 self.requested_az = req_az if req_az else ''
871 self.requested_hyp = req_hyp if req_hyp else ''
872 # Nova can accept AZ only (e.g. 'nova:', use any hypervisor in that AZ)
873 # or hypervisor only (e.g. ':comp1')
874 # or both (e.g. 'nova:comp1')
876 self.required_az = req_az + ':' + self.requested_hyp
878 # need to insert a ':' so nova knows this is the hypervisor name
879 self.required_az = ':' + self.requested_hyp if req_hyp else ''
880 # placement is resolved when both AZ and hypervisor names are known and set
881 self.resolved = self.requested_az != '' and self.requested_hyp != ''
883 def get_required_az(self):
884 """Return the required az (can be resolved or not)."""
885 return self.required_az
887 def register_full_name(self, discovered_az):
888 """Verify compatibility and register a discovered hypervisor full name.
890 discovered_az: a discovered AZ in az:hypervisor format
891 return: True if discovered_az is compatible and set
892 False if discovered_az is not compatible
895 return discovered_az == self.required_az
897 # must be in full az format
898 split_daz = discovered_az.split(':')
899 if len(split_daz) != 2:
901 if self.requested_az and self.requested_az != split_daz[0]:
903 if self.requested_hyp and self.requested_hyp != split_daz[1]:
905 self.required_az = discovered_az
909 def is_resolved(self):
910 """Check if the full AZ is resolved.
912 return: True if resolved
917 class ChainManager(object):
918 """A class for managing all chains for a given run.
920 Supports openstack or no openstack.
921 Supports EXT, PVP and PVVP chains.
924 def __init__(self, chain_runner):
925 """Create a chain manager to take care of discovering or bringing up the requested chains.
927 A new instance must be created every time a new config is used.
928 config: the nfvbench config to use
929 cred: openstack credentials to use of None if there is no openstack
931 self.chain_runner = chain_runner
932 self.config = chain_runner.config
933 self.generator_config = chain_runner.traffic_client.generator_config
935 self.image_instance = None
936 self.image_name = None
937 # Left and right networks shared across all chains (only if shared)
942 self.nova_client = None
943 self.neutron_client = None
944 self.glance_client = None
945 self.existing_instances = []
946 # existing ports keyed by the network uuid they belong to
947 self._existing_ports = {}
949 self.openstack = (chain_runner.cred is not None) and not config.l2_loopback
950 self.chain_count = config.service_chain_count
954 session = chain_runner.cred.get_session()
955 self.is_admin = chain_runner.cred.is_admin
956 self.nova_client = Client(2, session=session)
957 self.neutron_client = neutronclient.Client('2.0', session=session)
958 self.glance_client = glanceclient.Client('2', session=session)
959 self.comp = compute.Compute(self.nova_client,
963 if config.service_chain != ChainType.EXT:
964 self.placer = InstancePlacer(config.availability_zone, config.compute_nodes)
966 self.flavor = ChainFlavor(config.flavor_type, config.flavor, self.comp)
967 # Get list of all existing instances to check if some instances can be reused
968 self.existing_instances = self.comp.get_server_list()
970 # For EXT chains, the external_networks left and right fields in the config
971 # must be either a prefix string or a list of at least chain-count strings
972 self._check_extnet('left', config.external_networks.left)
973 self._check_extnet('right', config.external_networks.right)
975 # If networks are shared across chains, get the list of networks
976 if config.service_chain_shared_net:
977 self.networks = self.get_networks()
978 # Reuse/create chains
979 for chain_id in range(self.chain_count):
980 self.chains.append(Chain(chain_id, self))
981 if config.service_chain == ChainType.EXT:
982 # if EXT and no ARP or VxLAN we need to read dest MACs from config
983 if config.no_arp or config.vxlan:
984 self._get_dest_macs_from_config()
986 # Make sure all instances are active before proceeding
987 self._ensure_instances_active()
988 # network API call do not show VLANS ID if not admin read from config
989 if not self.is_admin and config.vlan_tagging:
990 self._get_config_vlans()
995 # no openstack, no need to create chains
996 if not config.l2_loopback and config.no_arp:
997 self._get_dest_macs_from_config()
998 if config.vlan_tagging:
999 # make sure there at least as many entries as chains in each left/right list
1000 if len(config.vlans) != 2:
1001 raise ChainException('The config vlans property must be a list '
1002 'with 2 lists of VLAN IDs')
1003 self._get_config_vlans()
1005 raise ChainException('VxLAN is only supported with OpenStack')
1007 def _check_extnet(self, side, name):
1009 raise ChainException('external_networks.%s must contain a valid network'
1010 ' name prefix or a list of network names' % side)
1011 if isinstance(name, tuple) and len(name) < self.chain_count:
1012 raise ChainException('external_networks.%s %s'
1013 ' must have at least %d names' % (side, name, self.chain_count))
1015 def _get_config_vlans(self):
1018 self.vlans = [self._check_list('vlans[0]', self.config.vlans[0], re_vlan),
1019 self._check_list('vlans[1]', self.config.vlans[1], re_vlan)]
1021 raise ChainException('vlans parameter is mandatory. Set valid value in config file')
1023 def _get_dest_macs_from_config(self):
1024 re_mac = "[0-9a-fA-F]{2}([-:])[0-9a-fA-F]{2}(\\1[0-9a-fA-F]{2}){4}$"
1025 tg_config = self.config.traffic_generator
1026 self.dest_macs = [self._check_list("mac_addrs_left",
1027 tg_config.mac_addrs_left, re_mac),
1028 self._check_list("mac_addrs_right",
1029 tg_config.mac_addrs_right, re_mac)]
1031 def _check_list(self, list_name, ll, pattern):
1032 # if it is a single int or mac, make it a list of 1 int
1033 if isinstance(ll, (int, str)):
1036 if not re.match(pattern, str(item)):
1037 raise ChainException("Invalid format '{item}' specified in {fname}"
1038 .format(item=item, fname=list_name))
1039 # must have at least 1 element
1041 raise ChainException('%s cannot be empty' % (list_name))
1042 # for shared network, if 1 element is passed, replicate it as many times
1044 if self.config.service_chain_shared_net and len(ll) == 1:
1045 ll = [ll[0]] * self.chain_count
1047 # number of elements musty be the number of chains
1048 elif len(ll) < self.chain_count:
1049 raise ChainException('%s=%s must be a list with %d elements per chain' %
1050 (list_name, ll, self.chain_count))
1053 def _setup_image(self):
1054 # To avoid reuploading image in server mode, check whether image_name is set or not
1056 self.image_instance = self.comp.find_image(self.image_name)
1057 if self.image_instance:
1058 LOG.info("Reusing image %s", self.image_name)
1060 image_name_search_pattern = r'(nfvbenchvm-\d+(\.\d+)*).qcow2'
1061 if self.config.vm_image_file:
1062 match = re.search(image_name_search_pattern, self.config.vm_image_file)
1064 self.image_name = match.group(1)
1065 LOG.info('Using provided VM image file %s', self.config.vm_image_file)
1067 raise ChainException('Provided VM image file name %s must start with '
1068 '"nfvbenchvm-<version>"' % self.config.vm_image_file)
1070 pkg_root = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
1071 for f in os.listdir(pkg_root):
1072 if re.search(image_name_search_pattern, f):
1073 self.config.vm_image_file = pkg_root + '/' + f
1074 self.image_name = f.replace('.qcow2', '')
1075 LOG.info('Found built-in VM image file %s', f)
1078 raise ChainException('Cannot find any built-in VM image file.')
1080 self.image_instance = self.comp.find_image(self.image_name)
1081 if not self.image_instance:
1082 LOG.info('Uploading %s', self.image_name)
1083 res = self.comp.upload_image_via_url(self.image_name,
1084 self.config.vm_image_file)
1087 raise ChainException('Error uploading image %s from %s. ABORTING.' %
1088 (self.image_name, self.config.vm_image_file))
1089 LOG.info('Image %s successfully uploaded.', self.image_name)
1090 self.image_instance = self.comp.find_image(self.image_name)
1092 # image multiqueue property must be set according to the vif_multiqueue_size
1093 # config value (defaults to 1 or disabled)
1094 self.comp.image_set_multiqueue(self.image_instance, self.config.vif_multiqueue_size > 1)
1096 def _ensure_instances_active(self):
1098 for chain in self.chains:
1099 instances.extend(chain.get_instances())
1100 initial_instance_count = len(instances)
1101 max_retries = (self.config.check_traffic_time_sec +
1102 self.config.generic_poll_sec - 1) / self.config.generic_poll_sec
1105 remaining_instances = []
1106 for instance in instances:
1107 status = instance.get_status()
1108 if status == 'ACTIVE':
1109 LOG.info('Instance %s is ACTIVE on %s',
1110 instance.name, instance.get_hypervisor_name())
1112 if status == 'ERROR':
1113 raise ChainException('Instance %s creation error: %s' %
1115 instance.instance.fault['message']))
1116 remaining_instances.append(instance)
1117 if not remaining_instances:
1120 if retry >= max_retries:
1121 raise ChainException('Time-out: %d/%d instances still not active' %
1122 (len(remaining_instances), initial_instance_count))
1123 LOG.info('Waiting for %d/%d instance to become active (retry %d/%d)...',
1124 len(remaining_instances), initial_instance_count,
1126 instances = remaining_instances
1127 time.sleep(self.config.generic_poll_sec)
1128 if initial_instance_count:
1129 LOG.info('All instances are active')
1131 def get_networks(self, chain_id=None):
1132 """Get the networks for given EXT, PVP or PVVP chain.
1134 For EXT packet path, these networks must pre-exist.
1135 For PVP, PVVP these networks will be created if they do not exist.
1136 chain_id: to which chain the networks belong.
1137 a None value will mean that these networks are shared by all chains
1140 # the only case where self.networks exists is when the networks are shared
1142 return self.networks
1143 if self.config.service_chain == ChainType.EXT:
1145 ext_net = self.config.external_networks
1146 net_cfg = [AttrDict({'name': name,
1147 'segmentation_id': None,
1148 'physical_network': None})
1149 for name in [ext_net.left, ext_net.right]]
1150 # segmentation id and subnet should be discovered from neutron
1153 int_nets = self.config.internal_networks
1155 if self.config.service_chain == ChainType.PVP:
1156 net_cfg = [int_nets.left, int_nets.right]
1158 net_cfg = [int_nets.left, int_nets.middle, int_nets.right]
1162 networks.append(ChainNetwork(self, cfg, chain_id, lookup_only=lookup_only))
1164 # need to cleanup all successful networks prior to bailing out
1165 for net in networks:
1170 def get_existing_ports(self):
1171 """Get the list of existing ports.
1173 Lazy retrieval of ports as this can be costly if there are lots of ports and
1174 is only needed when VM and network are being reused.
1176 return: a dict of list of neutron ports indexed by the network uuid they are attached to
1178 Each port is a dict with fields such as below:
1179 {'allowed_address_pairs': [], 'extra_dhcp_opts': [],
1180 'updated_at': '2018-10-06T07:15:35Z', 'device_owner': 'compute:nova',
1181 'revision_number': 10, 'port_security_enabled': False, 'binding:profile': {},
1182 'fixed_ips': [{'subnet_id': '6903a3b3-49a1-4ba4-8259-4a90e7a44b21',
1183 'ip_address': '192.168.1.4'}], 'id': '3dcb9cfa-d82a-4dd1-85a1-fd8284b52d72',
1184 'security_groups': [],
1185 'binding:vif_details': {'vhostuser_socket': '/tmp/3dcb9cfa-d82a-4dd1-85a1-fd8284b52d72',
1186 'vhostuser_mode': 'server'},
1187 'binding:vif_type': 'vhostuser',
1188 'mac_address': 'fa:16:3e:3c:63:04',
1189 'project_id': '977ac76a63d7492f927fa80e86baff4c',
1191 'binding:host_id': 'a20-champagne-compute-1',
1193 'device_id': 'a98e2ad2-5371-4aa5-a356-8264a970ce4b',
1194 'name': 'nfvbench-loop-vm0-0', 'admin_state_up': True,
1195 'network_id': '3ea5fd88-278f-4d9d-b24d-1e443791a055',
1196 'tenant_id': '977ac76a63d7492f927fa80e86baff4c',
1197 'created_at': '2018-10-06T07:15:10Z',
1198 'binding:vnic_type': 'normal'}
1200 if not self._existing_ports:
1201 LOG.info('Loading list of all ports...')
1202 existing_ports = self.neutron_client.list_ports()['ports']
1203 # place all ports in the dict keyed by the port network uuid
1204 for port in existing_ports:
1205 port_list = self._existing_ports.setdefault(port['network_id'], [])
1206 port_list.append(port)
1207 LOG.info("Loaded %d ports attached to %d networks",
1208 len(existing_ports), len(self._existing_ports))
1209 return self._existing_ports
1211 def get_ports_from_network(self, chain_network):
1212 """Get the list of existing ports that belong to a network.
1214 Lazy retrieval of ports as this can be costly if there are lots of ports and
1215 is only needed when VM and network are being reused.
1217 chain_network: a ChainNetwork instance for which attached ports neeed to be retrieved
1218 return: list of neutron ports attached to requested network
1220 return self.get_existing_ports().get(chain_network.get_uuid(), None)
1222 def get_hypervisor_from_mac(self, mac):
1223 """Get the hypervisor that hosts a VM MAC.
1225 mac: MAC address to look for
1226 return: the hypervisor where the matching port runs or None if not found
1228 # _existing_ports is a dict of list of ports indexed by network id
1229 for port_list in self.get_existing_ports().values():
1230 for port in port_list:
1232 if port['mac_address'] == mac:
1233 host_id = port['binding:host_id']
1234 return self.comp.get_hypervisor(host_id)
1239 def get_host_ip_from_mac(self, mac):
1240 """Get the host IP address matching a MAC.
1242 mac: MAC address to look for
1243 return: the IP address of the host where the matching port runs or None if not found
1245 hypervisor = self.get_hypervisor_from_mac(mac)
1247 return hypervisor.host_ip
1250 def get_chain_vlans(self, port_index):
1251 """Get the list of per chain VLAN id on a given port.
1253 port_index: left port is 0, right port is 1
1254 return: a VLAN ID list indexed by the chain index or None if no vlan tagging
1256 if self.chains and self.is_admin:
1257 return [self.chains[chain_index].get_vlan(port_index)
1258 for chain_index in range(self.chain_count)]
1260 return self.vlans[port_index]
1262 def get_chain_vxlans(self, port_index):
1263 """Get the list of per chain VNIs id on a given port.
1265 port_index: left port is 0, right port is 1
1266 return: a VNIs ID list indexed by the chain index or None if no vlan tagging
1268 if self.chains and self.is_admin:
1269 return [self.chains[chain_index].get_vxlan(port_index)
1270 for chain_index in range(self.chain_count)]
1272 raise ChainException('VxLAN is only supported with OpenStack and with admin user')
1274 def get_dest_macs(self, port_index):
1275 """Get the list of per chain dest MACs on a given port.
1277 Should not be called if EXT+ARP is used (in that case the traffic gen will
1278 have the ARP responses back from VNFs with the dest MAC to use).
1280 port_index: left port is 0, right port is 1
1281 return: a list of dest MACs indexed by the chain index
1283 if self.chains and self.config.service_chain != ChainType.EXT:
1284 return [self.chains[chain_index].get_dest_mac(port_index)
1285 for chain_index in range(self.chain_count)]
1286 # no openstack or EXT+no-arp
1287 return self.dest_macs[port_index]
1289 def get_host_ips(self):
1290 """Return the IP adresss(es) of the host compute nodes used for this run.
1292 :return: a list of 1 IP address
1294 # Since all chains go through the same compute node(s) we can just retrieve the
1295 # compute node(s) for the first chain
1297 if self.config.service_chain != ChainType.EXT:
1298 return self.chains[0].get_host_ips()
1299 # in the case of EXT, the compute node must be retrieved from the port
1300 # associated to any of the dest MACs
1301 dst_macs = self.generator_config.get_dest_macs()
1302 # dest MAC on port 0, chain 0
1303 dst_mac = dst_macs[0][0]
1304 host_ip = self.get_host_ip_from_mac(dst_mac)
1306 LOG.info('Found compute node IP for EXT chain: %s', host_ip)
1310 def get_compute_nodes(self):
1311 """Return the name of the host compute nodes used for this run.
1313 :return: a list of 0 or 1 host name in the az:host format
1315 # Since all chains go through the same compute node(s) we can just retrieve the
1316 # compute node name(s) for the first chain
1318 # in the case of EXT, the compute node must be retrieved from the port
1319 # associated to any of the dest MACs
1320 if self.config.service_chain != ChainType.EXT:
1321 return self.chains[0].get_compute_nodes()
1322 # in the case of EXT, the compute node must be retrieved from the port
1323 # associated to any of the dest MACs
1324 dst_macs = self.generator_config.get_dest_macs()
1325 # dest MAC on port 0, chain 0
1326 dst_mac = dst_macs[0][0]
1327 hypervisor = self.get_hypervisor_from_mac(dst_mac)
1329 LOG.info('Found hypervisor for EXT chain: %s', hypervisor.hypervisor_hostname)
1330 return[':' + hypervisor.hypervisor_hostname]
1332 # no openstack = no chains
1336 """Delete resources for all chains."""
1337 for chain in self.chains:
1339 for network in self.networks:
1342 self.flavor.delete()