2 # Copyright 2018 Cisco Systems, Inc. All rights reserved.
4 # Licensed under the Apache License, Version 2.0 (the "License"); you may
5 # not use this file except in compliance with the License. You may obtain
6 # a copy of the License at
8 # http://www.apache.org/licenses/LICENSE-2.0
10 # Unless required by applicable law or agreed to in writing, software
11 # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
12 # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
13 # License for the specific language governing permissions and limitations
17 # This module takes care of chaining networks, ports and vms
19 """NFVBENCH CHAIN DISCOVERY/STAGING.
21 This module takes care of staging/discovering all resources that are participating in a
22 benchmarking session: flavors, networks, ports, VNF instances.
23 If a resource is discovered with the same name, it will be reused.
24 Otherwise it will be created.
26 ChainManager: manages VM image, flavor, the staging discovery of all chains
28 Chain: manages one chain, has 2 or more networks and 1 or more instances
29 ChainNetwork: manages 1 network in a chain
30 ChainVnf: manages 1 VNF instance in a chain, has 2 ports
31 ChainVnfPort: manages 1 instance port
33 ChainManager-->Chain(*)
34 Chain-->ChainNetwork(*),ChainVnf(*)
35 ChainVnf-->ChainVnfPort(2)
37 Once created/discovered, instances are checked to be in the active state (ready to pass traffic)
38 Configuration parameters that will influence how these resources are staged/related:
39 - openstack or no openstack
42 - number of VNF in each chain (PVP, PVVP)
43 - SRIOV and middle port SRIOV for port types
44 - whether networks are shared across chains or not
46 There is not traffic generation involved in this module.
53 from neutronclient.neutron import client as neutronclient
54 from novaclient.client import Client
56 from attrdict import AttrDict
57 from chain_router import ChainRouter
60 from specs import ChainType
61 # Left and right index for network and port lists
64 # L3 traffic edge networks are at the end of networks list
67 # Name of the VM config file
68 NFVBENCH_CFG_FILENAME = 'nfvbenchvm.conf'
69 # full pathame of the VM config in the VM
70 NFVBENCH_CFG_VM_PATHNAME = os.path.join('/etc/', NFVBENCH_CFG_FILENAME)
71 # full path of the boot shell script template file on the server where nfvbench runs
72 BOOT_SCRIPT_PATHNAME = os.path.join(os.path.dirname(os.path.abspath(__file__)),
74 NFVBENCH_CFG_FILENAME)
77 class ChainException(Exception):
78 """Exception while operating the chains."""
83 class NetworkEncaps(object):
84 """Network encapsulation."""
87 class ChainFlavor(object):
88 """Class to manage the chain flavor."""
90 def __init__(self, flavor_name, flavor_dict, comp):
91 """Create a flavor."""
92 self.name = flavor_name
94 self.flavor = self.comp.find_flavor(flavor_name)
98 LOG.info("Reused flavor '%s'", flavor_name)
100 extra_specs = flavor_dict.pop('extra_specs', None)
102 self.flavor = comp.create_flavor(flavor_name,
105 LOG.info("Created flavor '%s'", flavor_name)
107 self.flavor.set_keys(extra_specs)
110 """Delete this flavor."""
111 if not self.reuse and self.flavor:
113 LOG.info("Flavor '%s' deleted", self.name)
116 class ChainVnfPort(object):
117 """A port associated to one VNF in the chain."""
119 def __init__(self, name, vnf, chain_network, vnic_type):
120 """Create or reuse a port on a given network.
122 if vnf.instance is None the VNF instance is not reused and this ChainVnfPort instance must
124 Otherwise vnf.instance is a reused VNF instance and this ChainVnfPort instance must
125 find an existing port to reuse that matches the port requirements: same attached network,
126 instance, name, vnic type
128 name: name for this port
129 vnf: ChainVNf instance that owns this port
130 chain_network: ChainNetwork instance where this port should attach
131 vnic_type: required vnic type for this port
135 self.manager = vnf.manager
139 # VNF instance is reused, we need to find an existing port that matches this instance
141 # discover ports attached to this instance
142 port_list = self.manager.get_ports_from_network(chain_network)
143 for port in port_list:
144 if port['name'] != name:
146 if port['binding:vnic_type'] != vnic_type:
148 if port['device_id'] == vnf.get_uuid():
150 LOG.info('Reusing existing port %s mac=%s', name, port['mac_address'])
153 raise ChainException('Cannot find matching port')
155 # VNF instance is not created yet, we need to create a new port
159 'network_id': chain_network.get_uuid(),
160 'binding:vnic_type': vnic_type
163 port = self.manager.neutron_client.create_port(body)
164 self.port = port['port']
165 LOG.info('Created port %s', name)
167 self.manager.neutron_client.update_port(self.port['id'], {
169 'security_groups': [],
170 'port_security_enabled': False,
173 LOG.info('Security disabled on port %s', name)
175 LOG.info('Failed to disable security on port %s (ignored)', name)
178 """Get the MAC address for this port."""
179 return self.port['mac_address']
182 """Get the IP address for this port."""
183 return self.port['fixed_ips'][0]['ip_address']
186 """Delete this port instance."""
187 if self.reuse or not self.port:
190 while retry < self.manager.config.generic_retry_count:
192 self.manager.neutron_client.delete_port(self.port['id'])
193 LOG.info("Deleted port %s", self.name)
197 time.sleep(self.manager.config.generic_poll_sec)
198 LOG.error('Unable to delete port: %s', self.name)
201 class ChainNetwork(object):
202 """Could be a shared network across all chains or a chain private network."""
204 def __init__(self, manager, network_config, chain_id=None, lookup_only=False,
206 """Create a network for given chain.
208 network_config: a dict containing the network properties
209 (name, segmentation_id and physical_network)
210 chain_id: to which chain the networks belong.
211 a None value will mean that these networks are shared by all chains
212 suffix: a suffix to add to the network name (if not None)
214 self.manager = manager
216 self.name = network_config.name
218 # the name itself can be either a string or a list of names indexed by chain ID
219 if isinstance(network_config.name, tuple):
220 self.name = network_config.name[chain_id]
222 # network_config.name is a prefix string
223 self.name = network_config.name + str(chain_id)
225 self.name = self.name + suffix
226 self.segmentation_id = self._get_item(network_config.segmentation_id,
227 chain_id, auto_index=True)
228 self.physical_network = self._get_item(network_config.physical_network, chain_id)
233 if manager.config.l3_router and hasattr(network_config, 'router_name'):
234 self.router_name = network_config.router_name
236 self._setup(network_config, lookup_only)
239 LOG.error("Cannot find network %s", self.name)
241 LOG.error("Error creating network %s", self.name)
245 def _get_item(self, item_field, index, auto_index=False):
246 """Retrieve an item from a list or a single value.
248 item_field: can be None, a tuple of a single value
249 index: if None is same as 0, else is the index for a chain
250 auto_index: if true will automatically get the final value by adding the
251 index to the base value (if full list not provided)
253 If the item_field is not a tuple, it is considered same as a tuple with same value at any
255 If a list is provided, its length must be > index
261 if isinstance(item_field, tuple):
263 return item_field[index]
265 raise ChainException("List %s is too short for chain index %d" %
266 (str(item_field), index))
267 # single value is configured
269 return item_field + index
272 def _setup(self, network_config, lookup_only):
273 # Lookup if there is a matching network with same name
274 networks = self.manager.neutron_client.list_networks(name=self.name)
275 if networks['networks']:
276 network = networks['networks'][0]
277 # a network of same name already exists, we need to verify it has the same
279 if self.segmentation_id:
280 if network['provider:segmentation_id'] != self.segmentation_id:
281 raise ChainException("Mismatch of 'segmentation_id' for reused "
282 "network '{net}'. Network has id '{seg_id1}', "
283 "configuration requires '{seg_id2}'."
284 .format(net=self.name,
285 seg_id1=network['provider:segmentation_id'],
286 seg_id2=self.segmentation_id))
288 if self.physical_network:
289 if network['provider:physical_network'] != self.physical_network:
290 raise ChainException("Mismatch of 'physical_network' for reused "
291 "network '{net}'. Network has '{phys1}', "
292 "configuration requires '{phys2}'."
293 .format(net=self.name,
294 phys1=network['provider:physical_network'],
295 phys2=self.physical_network))
297 LOG.info('Reusing existing network %s', self.name)
299 self.network = network
302 raise ChainException('Network %s not found' % self.name)
306 'admin_state_up': True
309 if network_config.network_type:
310 body['network']['provider:network_type'] = network_config.network_type
311 if self.segmentation_id:
312 body['network']['provider:segmentation_id'] = self.segmentation_id
313 if self.physical_network:
314 body['network']['provider:physical_network'] = self.physical_network
315 self.network = self.manager.neutron_client.create_network(body)['network']
316 # create associated subnet, all subnets have the same name (which is ok since
317 # we do not need to address them directly by name)
319 'subnet': {'name': network_config.subnet,
320 'cidr': network_config.cidr,
321 'network_id': self.network['id'],
322 'enable_dhcp': False,
324 'dns_nameservers': []}
326 subnet = self.manager.neutron_client.create_subnet(body)['subnet']
327 # add subnet id to the network dict since it has just been added
328 self.network['subnets'] = [subnet['id']]
329 LOG.info('Created network: %s', self.name)
333 Extract UUID of this network.
335 :return: UUID of this network
337 return self.network['id']
341 Extract vlan for this network.
343 :return: vlan ID for this network
345 if self.network['provider:network_type'] != 'vlan':
346 raise ChainException('Trying to retrieve VLAN id for non VLAN network')
347 return self.network['provider:segmentation_id']
351 Extract VNI for this network.
353 :return: VNI ID for this network
356 return self.network['provider:segmentation_id']
359 """Delete this network."""
360 if not self.reuse and self.network:
362 while retry < self.manager.config.generic_retry_count:
364 self.manager.neutron_client.delete_network(self.network['id'])
365 LOG.info("Deleted network: %s", self.name)
369 LOG.info('Error deleting network %s (retry %d/%d)...',
372 self.manager.config.generic_retry_count)
373 time.sleep(self.manager.config.generic_poll_sec)
374 LOG.error('Unable to delete network: %s', self.name)
377 class ChainVnf(object):
378 """A class to represent a VNF in a chain."""
380 def __init__(self, chain, vnf_id, networks):
381 """Reuse a VNF instance with same characteristics or create a new VNF instance.
383 chain: the chain where this vnf belongs
384 vnf_id: indicates the index of this vnf in its chain (first vnf=0)
385 networks: the list of all networks (ChainNetwork) of the current chain
387 self.manager = chain.manager
390 self.name = self.manager.config.loop_vm_name + str(chain.chain_id)
391 if len(networks) > 2:
392 # we will have more than 1 VM in each chain
393 self.name += '-' + str(vnf_id)
394 # A list of ports for this chain
395 # There are normally 2 ports carrying traffic (index 0, and index 1) and
396 # potentially multiple idle ports not carrying traffic (index 2 and up)
397 # For example if 7 idle interfaces are requested, the corresp. ports will be
405 self.idle_networks = []
408 # the vnf_id is conveniently also the starting index in networks
409 # for the left and right networks associated to this VNF
410 if self.manager.config.l3_router:
411 self._setup(networks[vnf_id:vnf_id + 4])
413 self._setup(networks[vnf_id:vnf_id + 2])
415 LOG.error("Error creating VNF %s", self.name)
419 def _get_vm_config(self, remote_mac_pair):
420 config = self.manager.config
421 devices = self.manager.generator_config.devices
424 tg_gateway1_ip = self.routers[LEFT].ports[1]['fixed_ips'][0][
425 'ip_address'] # router edge ip left
426 tg_gateway2_ip = self.routers[RIGHT].ports[1]['fixed_ips'][0][
427 'ip_address'] # router edge ip right
428 tg_mac1 = self.routers[LEFT].ports[1]['mac_address'] # router edge mac left
429 tg_mac2 = self.routers[RIGHT].ports[1]['mac_address'] # router edge mac right
430 # edge cidr mask left
431 vnf_gateway1_cidr = \
432 self.ports[LEFT].get_ip() + self.manager.config.edge_networks.left.cidr[-3:]
433 # edge cidr mask right
434 vnf_gateway2_cidr = \
435 self.ports[RIGHT].get_ip() + self.manager.config.edge_networks.right.cidr[-3:]
436 if config.vm_forwarder != 'vpp':
437 raise ChainException(
438 'L3 router mode imply to set VPP as VM forwarder.'
439 'Please update your config file with: vm_forwarder: vpp')
441 tg_gateway1_ip = devices[LEFT].tg_gateway_ip_addrs
442 tg_gateway2_ip = devices[RIGHT].tg_gateway_ip_addrs
443 tg_mac1 = remote_mac_pair[0]
444 tg_mac2 = remote_mac_pair[1]
446 g1cidr = devices[LEFT].get_gw_ip(
447 self.chain.chain_id) + self.manager.config.internal_networks.left.cidr[-3:]
448 g2cidr = devices[RIGHT].get_gw_ip(
449 self.chain.chain_id) + self.manager.config.internal_networks.right.cidr[-3:]
451 vnf_gateway1_cidr = g1cidr
452 vnf_gateway2_cidr = g2cidr
454 with open(BOOT_SCRIPT_PATHNAME, 'r') as boot_script:
455 content = boot_script.read()
457 'forwarder': config.vm_forwarder,
458 'intf_mac1': self.ports[LEFT].get_mac(),
459 'intf_mac2': self.ports[RIGHT].get_mac(),
460 'tg_gateway1_ip': tg_gateway1_ip,
461 'tg_gateway2_ip': tg_gateway2_ip,
462 'tg_net1': devices[LEFT].ip_addrs,
463 'tg_net2': devices[RIGHT].ip_addrs,
464 'vnf_gateway1_cidr': vnf_gateway1_cidr,
465 'vnf_gateway2_cidr': vnf_gateway2_cidr,
468 'vif_mq_size': config.vif_multiqueue_size
470 return content.format(**vm_config)
472 def _get_vnic_type(self, port_index):
473 """Get the right vnic type for given port indexself.
475 If SR-IOV is specified, middle ports in multi-VNF chains
476 can use vswitch or SR-IOV based on config.use_sriov_middle_net
478 if self.manager.config.sriov:
479 chain_length = self.chain.get_length()
480 if self.manager.config.use_sriov_middle_net or chain_length == 1:
482 if self.vnf_id == 0 and port_index == 0:
483 # first VNF in chain must use sriov for left port
485 if (self.vnf_id == chain_length - 1) and (port_index == 1):
486 # last VNF in chain must use sriov for right port
490 def _get_idle_networks_ports(self):
491 """Get the idle networks for PVP or PVVP chain (non shared net only)
493 For EXT packet path or shared net, returns empty list.
494 For PVP, PVVP these networks will be created if they do not exist.
495 chain_id: to which chain the networks belong.
496 a None value will mean that these networks are shared by all chains
500 config = self.manager.config
501 chain_id = self.chain.chain_id
502 idle_interfaces_per_vm = config.idle_interfaces_per_vm
503 if config.service_chain == ChainType.EXT or chain_id is None or \
504 idle_interfaces_per_vm == 0:
507 # Make a copy of the idle networks dict as we may have to modify the
509 idle_network_cfg = AttrDict(config.idle_networks)
510 if idle_network_cfg.segmentation_id:
511 segmentation_id = idle_network_cfg.segmentation_id + \
512 chain_id * idle_interfaces_per_vm
514 segmentation_id = None
516 # create as many idle networks and ports as requested
517 for idle_index in range(idle_interfaces_per_vm):
518 if config.service_chain == ChainType.PVP:
519 suffix = '.%d' % (idle_index)
521 suffix = '.%d.%d' % (self.vnf_id, idle_index)
522 port_name = self.name + '-idle' + str(idle_index)
523 # update the segmentation id based on chain id and idle index
525 idle_network_cfg.segmentation_id = segmentation_id + idle_index
526 port_name = port_name + "." + str(segmentation_id)
528 networks.append(ChainNetwork(self.manager,
532 ports.append(ChainVnfPort(port_name,
534 networks[idle_index],
537 # need to cleanup all successful networks
543 self.idle_networks = networks
544 self.idle_ports = ports
546 def _setup(self, networks):
547 flavor_id = self.manager.flavor.flavor.id
548 # Check if we can reuse an instance with same name
549 for instance in self.manager.existing_instances:
550 if instance.name == self.name:
552 instance_right = RIGHT
553 # In case of L3 traffic instance use edge networks
554 if self.manager.config.l3_router:
555 instance_left = EDGE_LEFT
556 instance_right = EDGE_RIGHT
557 # Verify that other instance characteristics match
558 if instance.flavor['id'] != flavor_id:
559 self._reuse_exception('Flavor mismatch')
560 if instance.status != "ACTIVE":
561 self._reuse_exception('Matching instance is not in ACTIVE state')
562 # The 2 networks for this instance must also be reused
563 if not networks[instance_left].reuse:
564 self._reuse_exception('network %s is new' % networks[instance_left].name)
565 if not networks[instance_right].reuse:
566 self._reuse_exception('network %s is new' % networks[instance_right].name)
567 # instance.networks have the network names as keys:
568 # {'nfvbench-rnet0': ['192.168.2.10'], 'nfvbench-lnet0': ['192.168.1.8']}
569 if networks[instance_left].name not in instance.networks:
570 self._reuse_exception('Left network mismatch')
571 if networks[instance_right].name not in instance.networks:
572 self._reuse_exception('Right network mismatch')
575 self.instance = instance
576 LOG.info('Reusing existing instance %s on %s',
577 self.name, self.get_hypervisor_name())
578 # create or reuse/discover 2 ports per instance
579 if self.manager.config.l3_router:
580 self.ports = [ChainVnfPort(self.name + '-' + str(index),
583 self._get_vnic_type(index)) for index in [0, 1]]
585 self.ports = [ChainVnfPort(self.name + '-' + str(index),
588 self._get_vnic_type(index)) for index in [0, 1]]
590 # create idle networks and ports only if instance is not reused
591 # if reused, we do not care about idle networks/ports
593 self._get_idle_networks_ports()
595 # Create neutron routers for L3 traffic use case
596 if self.manager.config.l3_router and self.manager.openstack:
597 internal_nets = networks[:2]
598 if self.manager.config.service_chain == ChainType.PVP:
599 edge_nets = networks[2:]
601 edge_nets = networks[3:]
602 subnets_left = [internal_nets[0], edge_nets[0]]
603 routes_left = [{'destination': self.manager.config.traffic_generator.ip_addrs[0],
604 'nexthop': self.manager.config.traffic_generator.tg_gateway_ip_addrs[
606 {'destination': self.manager.config.traffic_generator.ip_addrs[1],
607 'nexthop': self.ports[0].get_ip()}]
609 ChainRouter(self.manager, edge_nets[0].router_name, subnets_left, routes_left))
610 subnets_right = [internal_nets[1], edge_nets[1]]
611 routes_right = [{'destination': self.manager.config.traffic_generator.ip_addrs[0],
612 'nexthop': self.ports[1].get_ip()},
613 {'destination': self.manager.config.traffic_generator.ip_addrs[1],
614 'nexthop': self.manager.config.traffic_generator.tg_gateway_ip_addrs[
617 ChainRouter(self.manager, edge_nets[1].router_name, subnets_right, routes_right))
618 # Overload gateway_ips property with router ip address for ARP and traffic calls
619 self.manager.generator_config.devices[LEFT].set_gw_ip(
620 self.routers[LEFT].ports[0]['fixed_ips'][0]['ip_address']) # router edge ip left)
621 self.manager.generator_config.devices[RIGHT].set_gw_ip(
622 self.routers[RIGHT].ports[0]['fixed_ips'][0]['ip_address']) # router edge ip right)
624 # if no reuse, actual vm creation is deferred after all ports in the chain are created
625 # since we need to know the next mac in a multi-vnf chain
627 def create_vnf(self, remote_mac_pair):
628 """Create the VNF instance if it does not already exist."""
629 if self.instance is None:
630 port_ids = [{'port-id': vnf_port.port['id']}
631 for vnf_port in self.ports]
633 for idle_port in self.idle_ports:
634 port_ids.append({'port-id': idle_port.port['id']})
635 vm_config = self._get_vm_config(remote_mac_pair)
636 az = self.manager.placer.get_required_az()
637 server = self.manager.comp.create_server(self.name,
638 self.manager.image_instance,
639 self.manager.flavor.flavor,
646 files={NFVBENCH_CFG_VM_PATHNAME: vm_config})
648 self.instance = server
649 if self.manager.placer.is_resolved():
650 LOG.info('Created instance %s on %s', self.name, az)
652 # the location is undetermined at this point
653 # self.get_hypervisor_name() will return None
654 LOG.info('Created instance %s - waiting for placement resolution...', self.name)
655 # here we MUST wait until this instance is resolved otherwise subsequent
656 # VNF creation can be placed in other hypervisors!
657 config = self.manager.config
658 max_retries = (config.check_traffic_time_sec +
659 config.generic_poll_sec - 1) / config.generic_poll_sec
661 for retry in range(max_retries):
662 status = self.get_status()
663 if status == 'ACTIVE':
664 hyp_name = self.get_hypervisor_name()
665 LOG.info('Instance %s is active and has been placed on %s',
667 self.manager.placer.register_full_name(hyp_name)
669 if status == 'ERROR':
670 raise ChainException('Instance %s creation error: %s' %
672 self.instance.fault['message']))
673 LOG.info('Waiting for instance %s to become active (retry %d/%d)...',
674 self.name, retry + 1, max_retries + 1)
675 time.sleep(config.generic_poll_sec)
678 LOG.error('Instance %s creation timed out', self.name)
679 raise ChainException('Instance %s creation timed out' % self.name)
682 raise ChainException('Unable to create instance: %s' % (self.name))
684 def _reuse_exception(self, reason):
685 raise ChainException('Instance %s cannot be reused (%s)' % (self.name, reason))
687 def get_status(self):
688 """Get the statis of this instance."""
689 if self.instance.status != 'ACTIVE':
690 self.instance = self.manager.comp.poll_server(self.instance)
691 return self.instance.status
693 def get_hostname(self):
694 """Get the hypervisor host name running this VNF instance."""
695 if self.manager.is_admin:
696 hypervisor_hostname = getattr(self.instance, 'OS-EXT-SRV-ATTR:hypervisor_hostname')
698 hypervisor_hostname = self.manager.config.hypervisor_hostname
699 if not hypervisor_hostname:
700 raise ChainException('Hypervisor hostname parameter is mandatory')
701 return hypervisor_hostname
703 def get_host_ip(self):
704 """Get the IP address of the host where this instance runs.
706 return: the IP address
709 self.host_ip = self.manager.comp.get_hypervisor(self.get_hostname()).host_ip
712 def get_hypervisor_name(self):
713 """Get hypervisor name (az:hostname) for this VNF instance."""
715 if self.manager.is_admin:
716 az = getattr(self.instance, 'OS-EXT-AZ:availability_zone')
718 az = self.manager.config.availability_zone
720 raise ChainException('Availability zone parameter is mandatory')
721 hostname = self.get_hostname()
723 return az + ':' + hostname
728 """Get the uuid for this instance."""
729 return self.instance.id
731 def delete(self, forced=False):
732 """Delete this VNF instance."""
734 LOG.info("Instance %s not deleted (reused)", self.name)
737 self.manager.comp.delete_server(self.instance)
738 LOG.info("Deleted instance %s", self.name)
739 for port in self.ports:
741 for port in self.idle_ports:
743 for network in self.idle_networks:
748 """A class to manage a single chain.
750 Can handle any type of chain (EXT, PVP, PVVP)
753 def __init__(self, chain_id, manager):
754 """Create a new chain.
756 chain_id: chain index (first chain is 0)
757 manager: the chain manager that owns all chains
759 self.chain_id = chain_id
760 self.manager = manager
761 self.encaps = manager.encaps
765 self.networks = manager.get_networks(chain_id)
766 # For external chain VNFs can only be discovered from their MAC addresses
767 # either from config or from ARP
768 if manager.config.service_chain != ChainType.EXT:
769 for chain_instance_index in range(self.get_length()):
770 self.instances.append(ChainVnf(self,
771 chain_instance_index,
773 # at this point new VNFs are not created yet but
774 # verify that all discovered VNFs are on the same hypervisor
775 self._check_hypervisors()
776 # now that all VNF ports are created we need to calculate the
777 # left/right remote MAC for each VNF in the chain
778 # before actually creating the VNF itself
779 rem_mac_pairs = self._get_remote_mac_pairs()
780 for instance in self.instances:
781 rem_mac_pair = rem_mac_pairs.pop(0)
782 instance.create_vnf(rem_mac_pair)
787 def _check_hypervisors(self):
788 common_hypervisor = None
789 for instance in self.instances:
790 # get the full hypervizor name (az:compute)
791 hname = instance.get_hypervisor_name()
793 if common_hypervisor:
794 if hname != common_hypervisor:
795 raise ChainException('Discovered instances on different hypervisors:'
796 ' %s and %s' % (hname, common_hypervisor))
798 common_hypervisor = hname
799 if common_hypervisor:
800 # check that the common hypervisor name matchs the requested hypervisor name
801 # and set the name to be used by all future instances (if any)
802 if not self.manager.placer.register_full_name(common_hypervisor):
803 raise ChainException('Discovered hypervisor placement %s is incompatible' %
806 def get_length(self):
807 """Get the number of VNF in the chain."""
808 # Take into account 2 edge networks for routers
809 return len(self.networks) - 3 if self.manager.config.l3_router else len(self.networks) - 1
811 def _get_remote_mac_pairs(self):
812 """Get the list of remote mac pairs for every VNF in the chain.
814 Traverse the chain from left to right and establish the
815 left/right remote MAC for each VNF in the chainself.
818 mac sequence: tg_src_mac, vm0-mac0, vm0-mac1, tg_dst_mac
819 must produce [[tg_src_mac, tg_dst_mac]] or looking at index in mac sequence: [[0, 3]]
820 the mac pair is what the VNF at that position (index 0) sees as next hop mac left and right
823 tg_src_mac, vm0-mac0, vm0-mac1, vm1-mac0, vm1-mac1, tg_dst_mac
824 Must produce the following list:
825 [[tg_src_mac, vm1-mac0], [vm0-mac1, tg_dst_mac]] or index: [[0, 3], [2, 5]]
827 General case with 3 VMs in chain, the list of consecutive macs (left to right):
828 tg_src_mac, vm0-mac0, vm0-mac1, vm1-mac0, vm1-mac1, vm2-mac0, vm2-mac1, tg_dst_mac
829 Must produce the following list:
830 [[tg_src_mac, vm1-mac0], [vm0-mac1, vm2-mac0], [vm1-mac1, tg_dst_mac]]
831 or index: [[0, 3], [2, 5], [4, 7]]
833 The series pattern is pretty clear: [[n, n+3],... ] where n is multiple of 2
835 # line up all mac from left to right
836 mac_seq = [self.manager.generator_config.devices[LEFT].mac]
837 for instance in self.instances:
838 mac_seq.append(instance.ports[0].get_mac())
839 mac_seq.append(instance.ports[1].get_mac())
840 mac_seq.append(self.manager.generator_config.devices[RIGHT].mac)
843 for _ in self.instances:
844 rem_mac_pairs.append([mac_seq[base], mac_seq[base + 3]])
848 def get_instances(self):
849 """Return all instances for this chain."""
850 return self.instances
852 def get_vlan(self, port_index):
853 """Get the VLAN id on a given port.
855 port_index: left port is 0, right port is 1
856 return: the vlan_id or None if there is no vlan tagging
858 # for port 1 we need to return the VLAN of the last network in the chain
859 # The networks array contains 2 networks for PVP [left, right]
860 # and 3 networks in the case of PVVP [left.middle,right]
862 # this will pick the last item in array
864 return self.networks[port_index].get_vlan()
866 def get_vxlan(self, port_index):
867 """Get the VXLAN id on a given port.
869 port_index: left port is 0, right port is 1
870 return: the vxlan_id or None if there is no vxlan
872 # for port 1 we need to return the VLAN of the last network in the chain
873 # The networks array contains 2 networks for PVP [left, right]
874 # and 3 networks in the case of PVVP [left.middle,right]
876 # this will pick the last item in array
878 return self.networks[port_index].get_vxlan()
880 def get_dest_mac(self, port_index):
881 """Get the dest MAC on a given port.
883 port_index: left port is 0, right port is 1
887 # for right port, use the right port MAC of the last (right most) VNF In chain
888 return self.instances[-1].ports[1].get_mac()
889 # for left port use the left port MAC of the first (left most) VNF in chain
890 return self.instances[0].ports[0].get_mac()
892 def get_network_uuids(self):
893 """Get UUID of networks in this chain from left to right (order is important).
895 :return: list of UUIDs of networks (2 or 3 elements)
897 return [net['id'] for net in self.networks]
899 def get_host_ips(self):
900 """Return the IP adresss(es) of the host compute nodes used for this chain.
902 :return: a list of 1 or 2 IP addresses
904 return [vnf.get_host_ip() for vnf in self.instances]
906 def get_compute_nodes(self):
907 """Return the name of the host compute nodes used for this chain.
909 :return: a list of 1 host name in the az:host format
911 # Since all chains go through the same compute node(s) we can just retrieve the
912 # compute node name(s) for the first chain
913 return [vnf.get_hypervisor_name() for vnf in self.instances]
916 """Delete this chain."""
917 for instance in self.instances:
919 # only delete if these are chain private networks (not shared)
920 if not self.manager.config.service_chain_shared_net:
921 for network in self.networks:
925 class InstancePlacer(object):
926 """A class to manage instance placement for all VNFs in all chains.
928 A full az string is made of 2 parts AZ and hypervisor.
929 The placement is resolved when both parts az and hypervisor names are known.
932 def __init__(self, req_az, req_hyp):
933 """Create a new instance placer.
935 req_az: requested AZ (can be None or empty if no preference)
936 req_hyp: requested hypervisor name (can be None of empty if no preference)
937 can be any of 'nova:', 'comp1', 'nova:comp1'
938 if it is a list, only the first item is used (backward compatibility in config)
940 req_az is ignored if req_hyp has an az part
941 all other parts beyond the first 2 are ignored in req_hyp
943 # if passed a list just pick the first item
944 if req_hyp and isinstance(req_hyp, list):
946 # only pick first part of az
947 if req_az and ':' in req_az:
948 req_az = req_az.split(':')[0]
950 # check if requested hypervisor string has an AZ part
951 split_hyp = req_hyp.split(':')
952 if len(split_hyp) > 1:
953 # override the AZ part and hypervisor part
954 req_az = split_hyp[0]
955 req_hyp = split_hyp[1]
956 self.requested_az = req_az if req_az else ''
957 self.requested_hyp = req_hyp if req_hyp else ''
958 # Nova can accept AZ only (e.g. 'nova:', use any hypervisor in that AZ)
959 # or hypervisor only (e.g. ':comp1')
960 # or both (e.g. 'nova:comp1')
962 self.required_az = req_az + ':' + self.requested_hyp
964 # need to insert a ':' so nova knows this is the hypervisor name
965 self.required_az = ':' + self.requested_hyp if req_hyp else ''
966 # placement is resolved when both AZ and hypervisor names are known and set
967 self.resolved = self.requested_az != '' and self.requested_hyp != ''
969 def get_required_az(self):
970 """Return the required az (can be resolved or not)."""
971 return self.required_az
973 def register_full_name(self, discovered_az):
974 """Verify compatibility and register a discovered hypervisor full name.
976 discovered_az: a discovered AZ in az:hypervisor format
977 return: True if discovered_az is compatible and set
978 False if discovered_az is not compatible
981 return discovered_az == self.required_az
983 # must be in full az format
984 split_daz = discovered_az.split(':')
985 if len(split_daz) != 2:
987 if self.requested_az and self.requested_az != split_daz[0]:
989 if self.requested_hyp and self.requested_hyp != split_daz[1]:
991 self.required_az = discovered_az
995 def is_resolved(self):
996 """Check if the full AZ is resolved.
998 return: True if resolved
1000 return self.resolved
1003 class ChainManager(object):
1004 """A class for managing all chains for a given run.
1006 Supports openstack or no openstack.
1007 Supports EXT, PVP and PVVP chains.
1010 def __init__(self, chain_runner):
1011 """Create a chain manager to take care of discovering or bringing up the requested chains.
1013 A new instance must be created every time a new config is used.
1014 config: the nfvbench config to use
1015 cred: openstack credentials to use of None if there is no openstack
1017 self.chain_runner = chain_runner
1018 self.config = chain_runner.config
1019 self.generator_config = chain_runner.traffic_client.generator_config
1021 self.image_instance = None
1022 self.image_name = None
1023 # Left and right networks shared across all chains (only if shared)
1028 self.nova_client = None
1029 self.neutron_client = None
1030 self.glance_client = None
1031 self.existing_instances = []
1032 # existing ports keyed by the network uuid they belong to
1033 self._existing_ports = {}
1034 config = self.config
1035 self.openstack = (chain_runner.cred is not None) and not config.l2_loopback
1036 self.chain_count = config.service_chain_count
1040 session = chain_runner.cred.get_session()
1041 self.is_admin = chain_runner.cred.is_admin
1042 self.nova_client = Client(2, session=session)
1043 self.neutron_client = neutronclient.Client('2.0', session=session)
1044 self.glance_client = glanceclient.Client('2', session=session)
1045 self.comp = compute.Compute(self.nova_client,
1049 if config.service_chain != ChainType.EXT:
1050 self.placer = InstancePlacer(config.availability_zone, config.compute_nodes)
1052 self.flavor = ChainFlavor(config.flavor_type, config.flavor, self.comp)
1053 # Get list of all existing instances to check if some instances can be reused
1054 self.existing_instances = self.comp.get_server_list()
1056 # For EXT chains, the external_networks left and right fields in the config
1057 # must be either a prefix string or a list of at least chain-count strings
1058 self._check_extnet('left', config.external_networks.left)
1059 self._check_extnet('right', config.external_networks.right)
1061 # If networks are shared across chains, get the list of networks
1062 if config.service_chain_shared_net:
1063 self.networks = self.get_networks()
1064 # Reuse/create chains
1065 for chain_id in range(self.chain_count):
1066 self.chains.append(Chain(chain_id, self))
1067 if config.service_chain == ChainType.EXT:
1068 # if EXT and no ARP or VxLAN we need to read dest MACs from config
1069 if config.no_arp or config.vxlan:
1070 self._get_dest_macs_from_config()
1072 # Make sure all instances are active before proceeding
1073 self._ensure_instances_active()
1074 # network API call do not show VLANS ID if not admin read from config
1075 if not self.is_admin and config.vlan_tagging:
1076 self._get_config_vlans()
1081 # no openstack, no need to create chains
1082 if not config.l2_loopback and config.no_arp:
1083 self._get_dest_macs_from_config()
1084 if config.vlan_tagging:
1085 # make sure there at least as many entries as chains in each left/right list
1086 if len(config.vlans) != 2:
1087 raise ChainException('The config vlans property must be a list '
1088 'with 2 lists of VLAN IDs')
1089 self._get_config_vlans()
1091 raise ChainException('VxLAN is only supported with OpenStack')
1093 def _check_extnet(self, side, name):
1095 raise ChainException('external_networks.%s must contain a valid network'
1096 ' name prefix or a list of network names' % side)
1097 if isinstance(name, tuple) and len(name) < self.chain_count:
1098 raise ChainException('external_networks.%s %s'
1099 ' must have at least %d names' % (side, name, self.chain_count))
1101 def _get_config_vlans(self):
1104 self.vlans = [self._check_list('vlans[0]', self.config.vlans[0], re_vlan),
1105 self._check_list('vlans[1]', self.config.vlans[1], re_vlan)]
1107 raise ChainException('vlans parameter is mandatory. Set valid value in config file')
1109 def _get_dest_macs_from_config(self):
1110 re_mac = "[0-9a-fA-F]{2}([-:])[0-9a-fA-F]{2}(\\1[0-9a-fA-F]{2}){4}$"
1111 tg_config = self.config.traffic_generator
1112 self.dest_macs = [self._check_list("mac_addrs_left",
1113 tg_config.mac_addrs_left, re_mac),
1114 self._check_list("mac_addrs_right",
1115 tg_config.mac_addrs_right, re_mac)]
1117 def _check_list(self, list_name, ll, pattern):
1118 # if it is a single int or mac, make it a list of 1 int
1119 if isinstance(ll, (int, str)):
1122 if not re.match(pattern, str(item)):
1123 raise ChainException("Invalid format '{item}' specified in {fname}"
1124 .format(item=item, fname=list_name))
1125 # must have at least 1 element
1127 raise ChainException('%s cannot be empty' % (list_name))
1128 # for shared network, if 1 element is passed, replicate it as many times
1130 if self.config.service_chain_shared_net and len(ll) == 1:
1131 ll = [ll[0]] * self.chain_count
1133 # number of elements musty be the number of chains
1134 elif len(ll) < self.chain_count:
1135 raise ChainException('%s=%s must be a list with %d elements per chain' %
1136 (list_name, ll, self.chain_count))
1139 def _setup_image(self):
1140 # To avoid reuploading image in server mode, check whether image_name is set or not
1142 self.image_instance = self.comp.find_image(self.image_name)
1143 if self.image_instance:
1144 LOG.info("Reusing image %s", self.image_name)
1146 image_name_search_pattern = r'(nfvbenchvm-\d+(\.\d+)*).qcow2'
1147 if self.config.vm_image_file:
1148 match = re.search(image_name_search_pattern, self.config.vm_image_file)
1150 self.image_name = match.group(1)
1151 LOG.info('Using provided VM image file %s', self.config.vm_image_file)
1153 raise ChainException('Provided VM image file name %s must start with '
1154 '"nfvbenchvm-<version>"' % self.config.vm_image_file)
1156 pkg_root = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
1157 for f in os.listdir(pkg_root):
1158 if re.search(image_name_search_pattern, f):
1159 self.config.vm_image_file = pkg_root + '/' + f
1160 self.image_name = f.replace('.qcow2', '')
1161 LOG.info('Found built-in VM image file %s', f)
1164 raise ChainException('Cannot find any built-in VM image file.')
1166 self.image_instance = self.comp.find_image(self.image_name)
1167 if not self.image_instance:
1168 LOG.info('Uploading %s', self.image_name)
1169 res = self.comp.upload_image_via_url(self.image_name,
1170 self.config.vm_image_file)
1173 raise ChainException('Error uploading image %s from %s. ABORTING.' %
1174 (self.image_name, self.config.vm_image_file))
1175 LOG.info('Image %s successfully uploaded.', self.image_name)
1176 self.image_instance = self.comp.find_image(self.image_name)
1178 # image multiqueue property must be set according to the vif_multiqueue_size
1179 # config value (defaults to 1 or disabled)
1180 self.comp.image_set_multiqueue(self.image_instance, self.config.vif_multiqueue_size > 1)
1182 def _ensure_instances_active(self):
1184 for chain in self.chains:
1185 instances.extend(chain.get_instances())
1186 initial_instance_count = len(instances)
1187 # Give additional 10 seconds per VM
1188 max_retries = (self.config.check_traffic_time_sec + (initial_instance_count - 1) * 10 +
1189 self.config.generic_poll_sec - 1) / self.config.generic_poll_sec
1192 remaining_instances = []
1193 for instance in instances:
1194 status = instance.get_status()
1195 if status == 'ACTIVE':
1196 LOG.info('Instance %s is ACTIVE on %s',
1197 instance.name, instance.get_hypervisor_name())
1199 if status == 'ERROR':
1200 raise ChainException('Instance %s creation error: %s' %
1202 instance.instance.fault['message']))
1203 remaining_instances.append(instance)
1204 if not remaining_instances:
1207 if retry >= max_retries:
1208 raise ChainException('Time-out: %d/%d instances still not active' %
1209 (len(remaining_instances), initial_instance_count))
1210 LOG.info('Waiting for %d/%d instance to become active (retry %d/%d)...',
1211 len(remaining_instances), initial_instance_count,
1213 instances = remaining_instances
1214 time.sleep(self.config.generic_poll_sec)
1215 if initial_instance_count:
1216 LOG.info('All instances are active')
1218 def get_networks(self, chain_id=None):
1219 """Get the networks for given EXT, PVP or PVVP chain.
1221 For EXT packet path, these networks must pre-exist.
1222 For PVP, PVVP these networks will be created if they do not exist.
1223 chain_id: to which chain the networks belong.
1224 a None value will mean that these networks are shared by all chains
1227 # the only case where self.networks exists is when the networks are shared
1229 return self.networks
1230 if self.config.service_chain == ChainType.EXT:
1232 ext_net = self.config.external_networks
1233 net_cfg = [AttrDict({'name': name,
1234 'segmentation_id': None,
1235 'physical_network': None})
1236 for name in [ext_net.left, ext_net.right]]
1237 # segmentation id and subnet should be discovered from neutron
1240 int_nets = self.config.internal_networks
1242 if self.config.service_chain == ChainType.PVP:
1243 net_cfg = [int_nets.left, int_nets.right]
1245 net_cfg = [int_nets.left, int_nets.middle, int_nets.right]
1246 if self.config.l3_router:
1247 edge_nets = self.config.edge_networks
1248 net_cfg.append(edge_nets.left)
1249 net_cfg.append(edge_nets.right)
1253 networks.append(ChainNetwork(self, cfg, chain_id, lookup_only=lookup_only))
1255 # need to cleanup all successful networks prior to bailing out
1256 for net in networks:
1261 def get_existing_ports(self):
1262 """Get the list of existing ports.
1264 Lazy retrieval of ports as this can be costly if there are lots of ports and
1265 is only needed when VM and network are being reused.
1267 return: a dict of list of neutron ports indexed by the network uuid they are attached to
1269 Each port is a dict with fields such as below:
1270 {'allowed_address_pairs': [], 'extra_dhcp_opts': [],
1271 'updated_at': '2018-10-06T07:15:35Z', 'device_owner': 'compute:nova',
1272 'revision_number': 10, 'port_security_enabled': False, 'binding:profile': {},
1273 'fixed_ips': [{'subnet_id': '6903a3b3-49a1-4ba4-8259-4a90e7a44b21',
1274 'ip_address': '192.168.1.4'}], 'id': '3dcb9cfa-d82a-4dd1-85a1-fd8284b52d72',
1275 'security_groups': [],
1276 'binding:vif_details': {'vhostuser_socket': '/tmp/3dcb9cfa-d82a-4dd1-85a1-fd8284b52d72',
1277 'vhostuser_mode': 'server'},
1278 'binding:vif_type': 'vhostuser',
1279 'mac_address': 'fa:16:3e:3c:63:04',
1280 'project_id': '977ac76a63d7492f927fa80e86baff4c',
1282 'binding:host_id': 'a20-champagne-compute-1',
1284 'device_id': 'a98e2ad2-5371-4aa5-a356-8264a970ce4b',
1285 'name': 'nfvbench-loop-vm0-0', 'admin_state_up': True,
1286 'network_id': '3ea5fd88-278f-4d9d-b24d-1e443791a055',
1287 'tenant_id': '977ac76a63d7492f927fa80e86baff4c',
1288 'created_at': '2018-10-06T07:15:10Z',
1289 'binding:vnic_type': 'normal'}
1291 if not self._existing_ports:
1292 LOG.info('Loading list of all ports...')
1293 existing_ports = self.neutron_client.list_ports()['ports']
1294 # place all ports in the dict keyed by the port network uuid
1295 for port in existing_ports:
1296 port_list = self._existing_ports.setdefault(port['network_id'], [])
1297 port_list.append(port)
1298 LOG.info("Loaded %d ports attached to %d networks",
1299 len(existing_ports), len(self._existing_ports))
1300 return self._existing_ports
1302 def get_ports_from_network(self, chain_network):
1303 """Get the list of existing ports that belong to a network.
1305 Lazy retrieval of ports as this can be costly if there are lots of ports and
1306 is only needed when VM and network are being reused.
1308 chain_network: a ChainNetwork instance for which attached ports neeed to be retrieved
1309 return: list of neutron ports attached to requested network
1311 return self.get_existing_ports().get(chain_network.get_uuid(), None)
1313 def get_hypervisor_from_mac(self, mac):
1314 """Get the hypervisor that hosts a VM MAC.
1316 mac: MAC address to look for
1317 return: the hypervisor where the matching port runs or None if not found
1319 # _existing_ports is a dict of list of ports indexed by network id
1320 for port_list in self.get_existing_ports().values():
1321 for port in port_list:
1323 if port['mac_address'] == mac:
1324 host_id = port['binding:host_id']
1325 return self.comp.get_hypervisor(host_id)
1330 def get_host_ip_from_mac(self, mac):
1331 """Get the host IP address matching a MAC.
1333 mac: MAC address to look for
1334 return: the IP address of the host where the matching port runs or None if not found
1336 hypervisor = self.get_hypervisor_from_mac(mac)
1338 return hypervisor.host_ip
1341 def get_chain_vlans(self, port_index):
1342 """Get the list of per chain VLAN id on a given port.
1344 port_index: left port is 0, right port is 1
1345 return: a VLAN ID list indexed by the chain index or None if no vlan tagging
1347 if self.chains and self.is_admin:
1348 return [self.chains[chain_index].get_vlan(port_index)
1349 for chain_index in range(self.chain_count)]
1351 return self.vlans[port_index]
1353 def get_chain_vxlans(self, port_index):
1354 """Get the list of per chain VNIs id on a given port.
1356 port_index: left port is 0, right port is 1
1357 return: a VNIs ID list indexed by the chain index or None if no vlan tagging
1359 if self.chains and self.is_admin:
1360 return [self.chains[chain_index].get_vxlan(port_index)
1361 for chain_index in range(self.chain_count)]
1363 raise ChainException('VxLAN is only supported with OpenStack and with admin user')
1365 def get_dest_macs(self, port_index):
1366 """Get the list of per chain dest MACs on a given port.
1368 Should not be called if EXT+ARP is used (in that case the traffic gen will
1369 have the ARP responses back from VNFs with the dest MAC to use).
1371 port_index: left port is 0, right port is 1
1372 return: a list of dest MACs indexed by the chain index
1374 if self.chains and self.config.service_chain != ChainType.EXT:
1375 return [self.chains[chain_index].get_dest_mac(port_index)
1376 for chain_index in range(self.chain_count)]
1377 # no openstack or EXT+no-arp
1378 return self.dest_macs[port_index]
1380 def get_host_ips(self):
1381 """Return the IP adresss(es) of the host compute nodes used for this run.
1383 :return: a list of 1 IP address
1385 # Since all chains go through the same compute node(s) we can just retrieve the
1386 # compute node(s) for the first chain
1388 if self.config.service_chain != ChainType.EXT:
1389 return self.chains[0].get_host_ips()
1390 # in the case of EXT, the compute node must be retrieved from the port
1391 # associated to any of the dest MACs
1392 dst_macs = self.generator_config.get_dest_macs()
1393 # dest MAC on port 0, chain 0
1394 dst_mac = dst_macs[0][0]
1395 host_ip = self.get_host_ip_from_mac(dst_mac)
1397 LOG.info('Found compute node IP for EXT chain: %s', host_ip)
1401 def get_compute_nodes(self):
1402 """Return the name of the host compute nodes used for this run.
1404 :return: a list of 0 or 1 host name in the az:host format
1406 # Since all chains go through the same compute node(s) we can just retrieve the
1407 # compute node name(s) for the first chain
1409 # in the case of EXT, the compute node must be retrieved from the port
1410 # associated to any of the dest MACs
1411 if self.config.service_chain != ChainType.EXT:
1412 return self.chains[0].get_compute_nodes()
1413 # in the case of EXT, the compute node must be retrieved from the port
1414 # associated to any of the dest MACs
1415 dst_macs = self.generator_config.get_dest_macs()
1416 # dest MAC on port 0, chain 0
1417 dst_mac = dst_macs[0][0]
1418 hypervisor = self.get_hypervisor_from_mac(dst_mac)
1420 LOG.info('Found hypervisor for EXT chain: %s', hypervisor.hypervisor_hostname)
1421 return[':' + hypervisor.hypervisor_hostname]
1423 # no openstack = no chains
1427 """Delete resources for all chains."""
1428 for chain in self.chains:
1430 for network in self.networks:
1433 self.flavor.delete()