2 # Copyright 2018 Cisco Systems, Inc. All rights reserved.
4 # Licensed under the Apache License, Version 2.0 (the "License"); you may
5 # not use this file except in compliance with the License. You may obtain
6 # a copy of the License at
8 # http://www.apache.org/licenses/LICENSE-2.0
10 # Unless required by applicable law or agreed to in writing, software
11 # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
12 # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
13 # License for the specific language governing permissions and limitations
17 # This module takes care of chaining networks, ports and vms
19 """NFVBENCH CHAIN DISCOVERY/STAGING.
21 This module takes care of staging/discovering all resources that are participating in a
22 benchmarking session: flavors, networks, ports, VNF instances.
23 If a resource is discovered with the same name, it will be reused.
24 Otherwise it will be created.
26 ChainManager: manages VM image, flavor, the staging discovery of all chains
28 Chain: manages one chain, has 2 or more networks and 1 or more instances
29 ChainNetwork: manages 1 network in a chain
30 ChainVnf: manages 1 VNF instance in a chain, has 2 ports
31 ChainVnfPort: manages 1 instance port
33 ChainManager-->Chain(*)
34 Chain-->ChainNetwork(*),ChainVnf(*)
35 ChainVnf-->ChainVnfPort(2)
37 Once created/discovered, instances are checked to be in the active state (ready to pass traffic)
38 Configuration parameters that will influence how these resources are staged/related:
39 - openstack or no openstack
42 - number of VNF in each chain (PVP, PVVP)
43 - SRIOV and middle port SRIOV for port types
44 - whether networks are shared across chains or not
46 There is not traffic generation involved in this module.
53 from neutronclient.neutron import client as neutronclient
54 from novaclient.client import Client
56 from attrdict import AttrDict
57 from chain_router import ChainRouter
60 from specs import ChainType
61 # Left and right index for network and port lists
64 # L3 traffic edge networks are at the end of networks list
67 # Name of the VM config file
68 NFVBENCH_CFG_FILENAME = 'nfvbenchvm.conf'
69 # full pathame of the VM config in the VM
70 NFVBENCH_CFG_VM_PATHNAME = os.path.join('/etc/', NFVBENCH_CFG_FILENAME)
71 # full path of the boot shell script template file on the server where nfvbench runs
72 BOOT_SCRIPT_PATHNAME = os.path.join(os.path.dirname(os.path.abspath(__file__)),
74 NFVBENCH_CFG_FILENAME)
77 class ChainException(Exception):
78 """Exception while operating the chains."""
83 class NetworkEncaps(object):
84 """Network encapsulation."""
87 class ChainFlavor(object):
88 """Class to manage the chain flavor."""
90 def __init__(self, flavor_name, flavor_dict, comp):
91 """Create a flavor."""
92 self.name = flavor_name
94 self.flavor = self.comp.find_flavor(flavor_name)
98 LOG.info("Reused flavor '%s'", flavor_name)
100 extra_specs = flavor_dict.pop('extra_specs', None)
102 self.flavor = comp.create_flavor(flavor_name,
105 LOG.info("Created flavor '%s'", flavor_name)
107 self.flavor.set_keys(extra_specs)
110 """Delete this flavor."""
111 if not self.reuse and self.flavor:
113 LOG.info("Flavor '%s' deleted", self.name)
116 class ChainVnfPort(object):
117 """A port associated to one VNF in the chain."""
119 def __init__(self, name, vnf, chain_network, vnic_type):
120 """Create or reuse a port on a given network.
122 if vnf.instance is None the VNF instance is not reused and this ChainVnfPort instance must
124 Otherwise vnf.instance is a reused VNF instance and this ChainVnfPort instance must
125 find an existing port to reuse that matches the port requirements: same attached network,
126 instance, name, vnic type
128 name: name for this port
129 vnf: ChainVNf instance that owns this port
130 chain_network: ChainNetwork instance where this port should attach
131 vnic_type: required vnic type for this port
135 self.manager = vnf.manager
139 # VNF instance is reused, we need to find an existing port that matches this instance
141 # discover ports attached to this instance
142 port_list = self.manager.get_ports_from_network(chain_network)
143 for port in port_list:
144 if port['name'] != name:
146 if port['binding:vnic_type'] != vnic_type:
148 if port['device_id'] == vnf.get_uuid():
150 LOG.info('Reusing existing port %s mac=%s', name, port['mac_address'])
153 raise ChainException('Cannot find matching port')
155 # VNF instance is not created yet, we need to create a new port
159 'network_id': chain_network.get_uuid(),
160 'binding:vnic_type': vnic_type
163 port = self.manager.neutron_client.create_port(body)
164 self.port = port['port']
165 LOG.info('Created port %s', name)
167 self.manager.neutron_client.update_port(self.port['id'], {
169 'security_groups': [],
170 'port_security_enabled': False,
173 LOG.info('Security disabled on port %s', name)
175 LOG.info('Failed to disable security on port %s (ignored)', name)
178 """Get the MAC address for this port."""
179 return self.port['mac_address']
182 """Get the IP address for this port."""
183 return self.port['fixed_ips'][0]['ip_address']
186 """Delete this port instance."""
187 if self.reuse or not self.port:
190 while retry < self.manager.config.generic_retry_count:
192 self.manager.neutron_client.delete_port(self.port['id'])
193 LOG.info("Deleted port %s", self.name)
197 time.sleep(self.manager.config.generic_poll_sec)
198 LOG.error('Unable to delete port: %s', self.name)
201 class ChainNetwork(object):
202 """Could be a shared network across all chains or a chain private network."""
204 def __init__(self, manager, network_config, chain_id=None, lookup_only=False,
206 """Create a network for given chain.
208 network_config: a dict containing the network properties
209 (name, segmentation_id and physical_network)
210 chain_id: to which chain the networks belong.
211 a None value will mean that these networks are shared by all chains
212 suffix: a suffix to add to the network name (if not None)
214 self.manager = manager
216 self.name = network_config.name
218 # the name itself can be either a string or a list of names indexed by chain ID
219 if isinstance(network_config.name, tuple):
220 self.name = network_config.name[chain_id]
222 # network_config.name is a prefix string
223 self.name = network_config.name + str(chain_id)
225 self.name = self.name + suffix
226 self.segmentation_id = self._get_item(network_config.segmentation_id,
227 chain_id, auto_index=True)
228 self.physical_network = self._get_item(network_config.physical_network, chain_id)
233 if manager.config.l3_router and hasattr(network_config, 'router_name'):
234 self.router_name = network_config.router_name
236 self._setup(network_config, lookup_only)
239 LOG.error("Cannot find network %s", self.name)
241 LOG.error("Error creating network %s", self.name)
245 def _get_item(self, item_field, index, auto_index=False):
246 """Retrieve an item from a list or a single value.
248 item_field: can be None, a tuple of a single value
249 index: if None is same as 0, else is the index for a chain
250 auto_index: if true will automatically get the final value by adding the
251 index to the base value (if full list not provided)
253 If the item_field is not a tuple, it is considered same as a tuple with same value at any
255 If a list is provided, its length must be > index
261 if isinstance(item_field, tuple):
263 return item_field[index]
265 raise ChainException("List %s is too short for chain index %d" %
266 (str(item_field), index))
267 # single value is configured
269 return item_field + index
272 def _setup(self, network_config, lookup_only):
273 # Lookup if there is a matching network with same name
274 networks = self.manager.neutron_client.list_networks(name=self.name)
275 if networks['networks']:
276 network = networks['networks'][0]
277 # a network of same name already exists, we need to verify it has the same
279 if self.segmentation_id:
280 if network['provider:segmentation_id'] != self.segmentation_id:
281 raise ChainException("Mismatch of 'segmentation_id' for reused "
282 "network '{net}'. Network has id '{seg_id1}', "
283 "configuration requires '{seg_id2}'."
284 .format(net=self.name,
285 seg_id1=network['provider:segmentation_id'],
286 seg_id2=self.segmentation_id))
288 if self.physical_network:
289 if network['provider:physical_network'] != self.physical_network:
290 raise ChainException("Mismatch of 'physical_network' for reused "
291 "network '{net}'. Network has '{phys1}', "
292 "configuration requires '{phys2}'."
293 .format(net=self.name,
294 phys1=network['provider:physical_network'],
295 phys2=self.physical_network))
297 LOG.info('Reusing existing network %s', self.name)
299 self.network = network
302 raise ChainException('Network %s not found' % self.name)
306 'admin_state_up': True
309 if network_config.network_type:
310 body['network']['provider:network_type'] = network_config.network_type
311 if self.segmentation_id:
312 body['network']['provider:segmentation_id'] = self.segmentation_id
313 if self.physical_network:
314 body['network']['provider:physical_network'] = self.physical_network
315 self.network = self.manager.neutron_client.create_network(body)['network']
316 # create associated subnet, all subnets have the same name (which is ok since
317 # we do not need to address them directly by name)
319 'subnet': {'name': network_config.subnet,
320 'cidr': network_config.cidr,
321 'network_id': self.network['id'],
322 'enable_dhcp': False,
324 'dns_nameservers': []}
326 subnet = self.manager.neutron_client.create_subnet(body)['subnet']
327 # add subnet id to the network dict since it has just been added
328 self.network['subnets'] = [subnet['id']]
329 LOG.info('Created network: %s', self.name)
333 Extract UUID of this network.
335 :return: UUID of this network
337 return self.network['id']
341 Extract vlan for this network.
343 :return: vlan ID for this network
345 if self.network['provider:network_type'] != 'vlan':
346 raise ChainException('Trying to retrieve VLAN id for non VLAN network')
347 return self.network['provider:segmentation_id']
351 Extract VNI for this network.
353 :return: VNI ID for this network
355 if 'vxlan' not in self.network['provider:network_type']:
356 raise ChainException('Trying to retrieve VNI for non VXLAN network')
357 return self.network['provider:segmentation_id']
360 """Delete this network."""
361 if not self.reuse and self.network:
363 while retry < self.manager.config.generic_retry_count:
365 self.manager.neutron_client.delete_network(self.network['id'])
366 LOG.info("Deleted network: %s", self.name)
370 LOG.info('Error deleting network %s (retry %d/%d)...',
373 self.manager.config.generic_retry_count)
374 time.sleep(self.manager.config.generic_poll_sec)
375 LOG.error('Unable to delete network: %s', self.name)
378 class ChainVnf(object):
379 """A class to represent a VNF in a chain."""
381 def __init__(self, chain, vnf_id, networks):
382 """Reuse a VNF instance with same characteristics or create a new VNF instance.
384 chain: the chain where this vnf belongs
385 vnf_id: indicates the index of this vnf in its chain (first vnf=0)
386 networks: the list of all networks (ChainNetwork) of the current chain
388 self.manager = chain.manager
391 self.name = self.manager.config.loop_vm_name + str(chain.chain_id)
392 if len(networks) > 2:
393 # we will have more than 1 VM in each chain
394 self.name += '-' + str(vnf_id)
395 # A list of ports for this chain
396 # There are normally 2 ports carrying traffic (index 0, and index 1) and
397 # potentially multiple idle ports not carrying traffic (index 2 and up)
398 # For example if 7 idle interfaces are requested, the corresp. ports will be
406 self.idle_networks = []
409 # the vnf_id is conveniently also the starting index in networks
410 # for the left and right networks associated to this VNF
411 if self.manager.config.l3_router:
412 self._setup(networks[vnf_id:vnf_id + 4])
414 self._setup(networks[vnf_id:vnf_id + 2])
416 LOG.error("Error creating VNF %s", self.name)
420 def _get_vm_config(self, remote_mac_pair):
421 config = self.manager.config
422 devices = self.manager.generator_config.devices
425 tg_gateway1_ip = self.routers[LEFT].ports[1]['fixed_ips'][0][
426 'ip_address'] # router edge ip left
427 tg_gateway2_ip = self.routers[RIGHT].ports[1]['fixed_ips'][0][
428 'ip_address'] # router edge ip right
429 tg_mac1 = self.routers[LEFT].ports[1]['mac_address'] # router edge mac left
430 tg_mac2 = self.routers[RIGHT].ports[1]['mac_address'] # router edge mac right
431 # edge cidr mask left
432 vnf_gateway1_cidr = \
433 self.ports[LEFT].get_ip() + self.manager.config.edge_networks.left.cidr[-3:]
434 # edge cidr mask right
435 vnf_gateway2_cidr = \
436 self.ports[RIGHT].get_ip() + self.manager.config.edge_networks.right.cidr[-3:]
437 if config.vm_forwarder != 'vpp':
438 raise ChainException(
439 'L3 router mode imply to set VPP as VM forwarder.'
440 'Please update your config file with: vm_forwarder: vpp')
442 tg_gateway1_ip = devices[LEFT].tg_gateway_ip_addrs
443 tg_gateway2_ip = devices[RIGHT].tg_gateway_ip_addrs
444 tg_mac1 = remote_mac_pair[0]
445 tg_mac2 = remote_mac_pair[1]
447 g1cidr = devices[LEFT].get_gw_ip(
448 self.chain.chain_id) + self.manager.config.internal_networks.left.cidr[-3:]
449 g2cidr = devices[RIGHT].get_gw_ip(
450 self.chain.chain_id) + self.manager.config.internal_networks.right.cidr[-3:]
452 vnf_gateway1_cidr = g1cidr
453 vnf_gateway2_cidr = g2cidr
455 with open(BOOT_SCRIPT_PATHNAME, 'r') as boot_script:
456 content = boot_script.read()
458 'forwarder': config.vm_forwarder,
459 'intf_mac1': self.ports[LEFT].get_mac(),
460 'intf_mac2': self.ports[RIGHT].get_mac(),
461 'tg_gateway1_ip': tg_gateway1_ip,
462 'tg_gateway2_ip': tg_gateway2_ip,
463 'tg_net1': devices[LEFT].ip_addrs,
464 'tg_net2': devices[RIGHT].ip_addrs,
465 'vnf_gateway1_cidr': vnf_gateway1_cidr,
466 'vnf_gateway2_cidr': vnf_gateway2_cidr,
469 'vif_mq_size': config.vif_multiqueue_size
471 return content.format(**vm_config)
473 def _get_vnic_type(self, port_index):
474 """Get the right vnic type for given port indexself.
476 If SR-IOV is specified, middle ports in multi-VNF chains
477 can use vswitch or SR-IOV based on config.use_sriov_middle_net
479 if self.manager.config.sriov:
480 chain_length = self.chain.get_length()
481 if self.manager.config.use_sriov_middle_net or chain_length == 1:
483 if self.vnf_id == 0 and port_index == 0:
484 # first VNF in chain must use sriov for left port
486 if (self.vnf_id == chain_length - 1) and (port_index == 1):
487 # last VNF in chain must use sriov for right port
491 def _get_idle_networks_ports(self):
492 """Get the idle networks for PVP or PVVP chain (non shared net only)
494 For EXT packet path or shared net, returns empty list.
495 For PVP, PVVP these networks will be created if they do not exist.
496 chain_id: to which chain the networks belong.
497 a None value will mean that these networks are shared by all chains
501 config = self.manager.config
502 chain_id = self.chain.chain_id
503 idle_interfaces_per_vm = config.idle_interfaces_per_vm
504 if config.service_chain == ChainType.EXT or chain_id is None or \
505 idle_interfaces_per_vm == 0:
508 # Make a copy of the idle networks dict as we may have to modify the
510 idle_network_cfg = AttrDict(config.idle_networks)
511 if idle_network_cfg.segmentation_id:
512 segmentation_id = idle_network_cfg.segmentation_id + \
513 chain_id * idle_interfaces_per_vm
515 segmentation_id = None
517 # create as many idle networks and ports as requested
518 for idle_index in range(idle_interfaces_per_vm):
519 if config.service_chain == ChainType.PVP:
520 suffix = '.%d' % (idle_index)
522 suffix = '.%d.%d' % (self.vnf_id, idle_index)
523 port_name = self.name + '-idle' + str(idle_index)
524 # update the segmentation id based on chain id and idle index
526 idle_network_cfg.segmentation_id = segmentation_id + idle_index
527 port_name = port_name + "." + str(segmentation_id)
529 networks.append(ChainNetwork(self.manager,
533 ports.append(ChainVnfPort(port_name,
535 networks[idle_index],
538 # need to cleanup all successful networks
544 self.idle_networks = networks
545 self.idle_ports = ports
547 def _setup(self, networks):
548 flavor_id = self.manager.flavor.flavor.id
549 # Check if we can reuse an instance with same name
550 for instance in self.manager.existing_instances:
551 if instance.name == self.name:
553 instance_right = RIGHT
554 # In case of L3 traffic instance use edge networks
555 if self.manager.config.l3_router:
556 instance_left = EDGE_LEFT
557 instance_right = EDGE_RIGHT
558 # Verify that other instance characteristics match
559 if instance.flavor['id'] != flavor_id:
560 self._reuse_exception('Flavor mismatch')
561 if instance.status != "ACTIVE":
562 self._reuse_exception('Matching instance is not in ACTIVE state')
563 # The 2 networks for this instance must also be reused
564 if not networks[instance_left].reuse:
565 self._reuse_exception('network %s is new' % networks[instance_left].name)
566 if not networks[instance_right].reuse:
567 self._reuse_exception('network %s is new' % networks[instance_right].name)
568 # instance.networks have the network names as keys:
569 # {'nfvbench-rnet0': ['192.168.2.10'], 'nfvbench-lnet0': ['192.168.1.8']}
570 if networks[instance_left].name not in instance.networks:
571 self._reuse_exception('Left network mismatch')
572 if networks[instance_right].name not in instance.networks:
573 self._reuse_exception('Right network mismatch')
576 self.instance = instance
577 LOG.info('Reusing existing instance %s on %s',
578 self.name, self.get_hypervisor_name())
579 # create or reuse/discover 2 ports per instance
580 if self.manager.config.l3_router:
581 self.ports = [ChainVnfPort(self.name + '-' + str(index),
584 self._get_vnic_type(index)) for index in [0, 1]]
586 self.ports = [ChainVnfPort(self.name + '-' + str(index),
589 self._get_vnic_type(index)) for index in [0, 1]]
591 # create idle networks and ports only if instance is not reused
592 # if reused, we do not care about idle networks/ports
594 self._get_idle_networks_ports()
596 # Create neutron routers for L3 traffic use case
597 if self.manager.config.l3_router and self.manager.openstack:
598 internal_nets = networks[:2]
599 if self.manager.config.service_chain == ChainType.PVP:
600 edge_nets = networks[2:]
602 edge_nets = networks[3:]
603 subnets_left = [internal_nets[0], edge_nets[0]]
604 routes_left = [{'destination': self.manager.config.traffic_generator.ip_addrs[0],
605 'nexthop': self.manager.config.traffic_generator.tg_gateway_ip_addrs[
607 {'destination': self.manager.config.traffic_generator.ip_addrs[1],
608 'nexthop': self.ports[0].get_ip()}]
610 ChainRouter(self.manager, edge_nets[0].router_name, subnets_left, routes_left))
611 subnets_right = [internal_nets[1], edge_nets[1]]
612 routes_right = [{'destination': self.manager.config.traffic_generator.ip_addrs[0],
613 'nexthop': self.ports[1].get_ip()},
614 {'destination': self.manager.config.traffic_generator.ip_addrs[1],
615 'nexthop': self.manager.config.traffic_generator.tg_gateway_ip_addrs[
618 ChainRouter(self.manager, edge_nets[1].router_name, subnets_right, routes_right))
619 # Overload gateway_ips property with router ip address for ARP and traffic calls
620 self.manager.generator_config.devices[LEFT].set_gw_ip(
621 self.routers[LEFT].ports[0]['fixed_ips'][0]['ip_address']) # router edge ip left)
622 self.manager.generator_config.devices[RIGHT].set_gw_ip(
623 self.routers[RIGHT].ports[0]['fixed_ips'][0]['ip_address']) # router edge ip right)
625 # if no reuse, actual vm creation is deferred after all ports in the chain are created
626 # since we need to know the next mac in a multi-vnf chain
628 def create_vnf(self, remote_mac_pair):
629 """Create the VNF instance if it does not already exist."""
630 if self.instance is None:
631 port_ids = [{'port-id': vnf_port.port['id']}
632 for vnf_port in self.ports]
634 for idle_port in self.idle_ports:
635 port_ids.append({'port-id': idle_port.port['id']})
636 vm_config = self._get_vm_config(remote_mac_pair)
637 az = self.manager.placer.get_required_az()
638 server = self.manager.comp.create_server(self.name,
639 self.manager.image_instance,
640 self.manager.flavor.flavor,
647 files={NFVBENCH_CFG_VM_PATHNAME: vm_config})
649 self.instance = server
650 if self.manager.placer.is_resolved():
651 LOG.info('Created instance %s on %s', self.name, az)
653 # the location is undetermined at this point
654 # self.get_hypervisor_name() will return None
655 LOG.info('Created instance %s - waiting for placement resolution...', self.name)
656 # here we MUST wait until this instance is resolved otherwise subsequent
657 # VNF creation can be placed in other hypervisors!
658 config = self.manager.config
659 max_retries = (config.check_traffic_time_sec +
660 config.generic_poll_sec - 1) / config.generic_poll_sec
662 for retry in range(max_retries):
663 status = self.get_status()
664 if status == 'ACTIVE':
665 hyp_name = self.get_hypervisor_name()
666 LOG.info('Instance %s is active and has been placed on %s',
668 self.manager.placer.register_full_name(hyp_name)
670 if status == 'ERROR':
671 raise ChainException('Instance %s creation error: %s' %
673 self.instance.fault['message']))
674 LOG.info('Waiting for instance %s to become active (retry %d/%d)...',
675 self.name, retry + 1, max_retries + 1)
676 time.sleep(config.generic_poll_sec)
679 LOG.error('Instance %s creation timed out', self.name)
680 raise ChainException('Instance %s creation timed out' % self.name)
683 raise ChainException('Unable to create instance: %s' % (self.name))
685 def _reuse_exception(self, reason):
686 raise ChainException('Instance %s cannot be reused (%s)' % (self.name, reason))
688 def get_status(self):
689 """Get the statis of this instance."""
690 if self.instance.status != 'ACTIVE':
691 self.instance = self.manager.comp.poll_server(self.instance)
692 return self.instance.status
694 def get_hostname(self):
695 """Get the hypervisor host name running this VNF instance."""
696 if self.manager.is_admin:
697 hypervisor_hostname = getattr(self.instance, 'OS-EXT-SRV-ATTR:hypervisor_hostname')
699 hypervisor_hostname = self.manager.config.hypervisor_hostname
700 if not hypervisor_hostname:
701 raise ChainException('Hypervisor hostname parameter is mandatory')
702 return hypervisor_hostname
704 def get_host_ip(self):
705 """Get the IP address of the host where this instance runs.
707 return: the IP address
710 self.host_ip = self.manager.comp.get_hypervisor(self.get_hostname()).host_ip
713 def get_hypervisor_name(self):
714 """Get hypervisor name (az:hostname) for this VNF instance."""
716 if self.manager.is_admin:
717 az = getattr(self.instance, 'OS-EXT-AZ:availability_zone')
719 az = self.manager.config.availability_zone
721 raise ChainException('Availability zone parameter is mandatory')
722 hostname = self.get_hostname()
724 return az + ':' + hostname
729 """Get the uuid for this instance."""
730 return self.instance.id
732 def delete(self, forced=False):
733 """Delete this VNF instance."""
735 LOG.info("Instance %s not deleted (reused)", self.name)
738 self.manager.comp.delete_server(self.instance)
739 LOG.info("Deleted instance %s", self.name)
740 for port in self.ports:
742 for port in self.idle_ports:
744 for network in self.idle_networks:
749 """A class to manage a single chain.
751 Can handle any type of chain (EXT, PVP, PVVP)
754 def __init__(self, chain_id, manager):
755 """Create a new chain.
757 chain_id: chain index (first chain is 0)
758 manager: the chain manager that owns all chains
760 self.chain_id = chain_id
761 self.manager = manager
762 self.encaps = manager.encaps
766 self.networks = manager.get_networks(chain_id)
767 # For external chain VNFs can only be discovered from their MAC addresses
768 # either from config or from ARP
769 if manager.config.service_chain != ChainType.EXT:
770 for chain_instance_index in range(self.get_length()):
771 self.instances.append(ChainVnf(self,
772 chain_instance_index,
774 # at this point new VNFs are not created yet but
775 # verify that all discovered VNFs are on the same hypervisor
776 self._check_hypervisors()
777 # now that all VNF ports are created we need to calculate the
778 # left/right remote MAC for each VNF in the chain
779 # before actually creating the VNF itself
780 rem_mac_pairs = self._get_remote_mac_pairs()
781 for instance in self.instances:
782 rem_mac_pair = rem_mac_pairs.pop(0)
783 instance.create_vnf(rem_mac_pair)
788 def _check_hypervisors(self):
789 common_hypervisor = None
790 for instance in self.instances:
791 # get the full hypervizor name (az:compute)
792 hname = instance.get_hypervisor_name()
794 if common_hypervisor:
795 if hname != common_hypervisor:
796 raise ChainException('Discovered instances on different hypervisors:'
797 ' %s and %s' % (hname, common_hypervisor))
799 common_hypervisor = hname
800 if common_hypervisor:
801 # check that the common hypervisor name matchs the requested hypervisor name
802 # and set the name to be used by all future instances (if any)
803 if not self.manager.placer.register_full_name(common_hypervisor):
804 raise ChainException('Discovered hypervisor placement %s is incompatible' %
807 def get_length(self):
808 """Get the number of VNF in the chain."""
809 # Take into account 2 edge networks for routers
810 return len(self.networks) - 3 if self.manager.config.l3_router else len(self.networks) - 1
812 def _get_remote_mac_pairs(self):
813 """Get the list of remote mac pairs for every VNF in the chain.
815 Traverse the chain from left to right and establish the
816 left/right remote MAC for each VNF in the chainself.
819 mac sequence: tg_src_mac, vm0-mac0, vm0-mac1, tg_dst_mac
820 must produce [[tg_src_mac, tg_dst_mac]] or looking at index in mac sequence: [[0, 3]]
821 the mac pair is what the VNF at that position (index 0) sees as next hop mac left and right
824 tg_src_mac, vm0-mac0, vm0-mac1, vm1-mac0, vm1-mac1, tg_dst_mac
825 Must produce the following list:
826 [[tg_src_mac, vm1-mac0], [vm0-mac1, tg_dst_mac]] or index: [[0, 3], [2, 5]]
828 General case with 3 VMs in chain, the list of consecutive macs (left to right):
829 tg_src_mac, vm0-mac0, vm0-mac1, vm1-mac0, vm1-mac1, vm2-mac0, vm2-mac1, tg_dst_mac
830 Must produce the following list:
831 [[tg_src_mac, vm1-mac0], [vm0-mac1, vm2-mac0], [vm1-mac1, tg_dst_mac]]
832 or index: [[0, 3], [2, 5], [4, 7]]
834 The series pattern is pretty clear: [[n, n+3],... ] where n is multiple of 2
836 # line up all mac from left to right
837 mac_seq = [self.manager.generator_config.devices[LEFT].mac]
838 for instance in self.instances:
839 mac_seq.append(instance.ports[0].get_mac())
840 mac_seq.append(instance.ports[1].get_mac())
841 mac_seq.append(self.manager.generator_config.devices[RIGHT].mac)
844 for _ in self.instances:
845 rem_mac_pairs.append([mac_seq[base], mac_seq[base + 3]])
849 def get_instances(self):
850 """Return all instances for this chain."""
851 return self.instances
853 def get_vlan(self, port_index):
854 """Get the VLAN id on a given port.
856 port_index: left port is 0, right port is 1
857 return: the vlan_id or None if there is no vlan tagging
859 # for port 1 we need to return the VLAN of the last network in the chain
860 # The networks array contains 2 networks for PVP [left, right]
861 # and 3 networks in the case of PVVP [left.middle,right]
863 # this will pick the last item in array
865 return self.networks[port_index].get_vlan()
867 def get_vxlan(self, port_index):
868 """Get the VXLAN id on a given port.
870 port_index: left port is 0, right port is 1
871 return: the vxlan_id or None if there is no vxlan
873 # for port 1 we need to return the VLAN of the last network in the chain
874 # The networks array contains 2 networks for PVP [left, right]
875 # and 3 networks in the case of PVVP [left.middle,right]
877 # this will pick the last item in array
879 return self.networks[port_index].get_vxlan()
881 def get_dest_mac(self, port_index):
882 """Get the dest MAC on a given port.
884 port_index: left port is 0, right port is 1
888 # for right port, use the right port MAC of the last (right most) VNF In chain
889 return self.instances[-1].ports[1].get_mac()
890 # for left port use the left port MAC of the first (left most) VNF in chain
891 return self.instances[0].ports[0].get_mac()
893 def get_network_uuids(self):
894 """Get UUID of networks in this chain from left to right (order is important).
896 :return: list of UUIDs of networks (2 or 3 elements)
898 return [net['id'] for net in self.networks]
900 def get_host_ips(self):
901 """Return the IP adresss(es) of the host compute nodes used for this chain.
903 :return: a list of 1 or 2 IP addresses
905 return [vnf.get_host_ip() for vnf in self.instances]
907 def get_compute_nodes(self):
908 """Return the name of the host compute nodes used for this chain.
910 :return: a list of 1 host name in the az:host format
912 # Since all chains go through the same compute node(s) we can just retrieve the
913 # compute node name(s) for the first chain
914 return [vnf.get_hypervisor_name() for vnf in self.instances]
917 """Delete this chain."""
918 for instance in self.instances:
920 # only delete if these are chain private networks (not shared)
921 if not self.manager.config.service_chain_shared_net:
922 for network in self.networks:
926 class InstancePlacer(object):
927 """A class to manage instance placement for all VNFs in all chains.
929 A full az string is made of 2 parts AZ and hypervisor.
930 The placement is resolved when both parts az and hypervisor names are known.
933 def __init__(self, req_az, req_hyp):
934 """Create a new instance placer.
936 req_az: requested AZ (can be None or empty if no preference)
937 req_hyp: requested hypervisor name (can be None of empty if no preference)
938 can be any of 'nova:', 'comp1', 'nova:comp1'
939 if it is a list, only the first item is used (backward compatibility in config)
941 req_az is ignored if req_hyp has an az part
942 all other parts beyond the first 2 are ignored in req_hyp
944 # if passed a list just pick the first item
945 if req_hyp and isinstance(req_hyp, list):
947 # only pick first part of az
948 if req_az and ':' in req_az:
949 req_az = req_az.split(':')[0]
951 # check if requested hypervisor string has an AZ part
952 split_hyp = req_hyp.split(':')
953 if len(split_hyp) > 1:
954 # override the AZ part and hypervisor part
955 req_az = split_hyp[0]
956 req_hyp = split_hyp[1]
957 self.requested_az = req_az if req_az else ''
958 self.requested_hyp = req_hyp if req_hyp else ''
959 # Nova can accept AZ only (e.g. 'nova:', use any hypervisor in that AZ)
960 # or hypervisor only (e.g. ':comp1')
961 # or both (e.g. 'nova:comp1')
963 self.required_az = req_az + ':' + self.requested_hyp
965 # need to insert a ':' so nova knows this is the hypervisor name
966 self.required_az = ':' + self.requested_hyp if req_hyp else ''
967 # placement is resolved when both AZ and hypervisor names are known and set
968 self.resolved = self.requested_az != '' and self.requested_hyp != ''
970 def get_required_az(self):
971 """Return the required az (can be resolved or not)."""
972 return self.required_az
974 def register_full_name(self, discovered_az):
975 """Verify compatibility and register a discovered hypervisor full name.
977 discovered_az: a discovered AZ in az:hypervisor format
978 return: True if discovered_az is compatible and set
979 False if discovered_az is not compatible
982 return discovered_az == self.required_az
984 # must be in full az format
985 split_daz = discovered_az.split(':')
986 if len(split_daz) != 2:
988 if self.requested_az and self.requested_az != split_daz[0]:
990 if self.requested_hyp and self.requested_hyp != split_daz[1]:
992 self.required_az = discovered_az
996 def is_resolved(self):
997 """Check if the full AZ is resolved.
999 return: True if resolved
1001 return self.resolved
1004 class ChainManager(object):
1005 """A class for managing all chains for a given run.
1007 Supports openstack or no openstack.
1008 Supports EXT, PVP and PVVP chains.
1011 def __init__(self, chain_runner):
1012 """Create a chain manager to take care of discovering or bringing up the requested chains.
1014 A new instance must be created every time a new config is used.
1015 config: the nfvbench config to use
1016 cred: openstack credentials to use of None if there is no openstack
1018 self.chain_runner = chain_runner
1019 self.config = chain_runner.config
1020 self.generator_config = chain_runner.traffic_client.generator_config
1022 self.image_instance = None
1023 self.image_name = None
1024 # Left and right networks shared across all chains (only if shared)
1029 self.nova_client = None
1030 self.neutron_client = None
1031 self.glance_client = None
1032 self.existing_instances = []
1033 # existing ports keyed by the network uuid they belong to
1034 self._existing_ports = {}
1035 config = self.config
1036 self.openstack = (chain_runner.cred is not None) and not config.l2_loopback
1037 self.chain_count = config.service_chain_count
1041 session = chain_runner.cred.get_session()
1042 self.is_admin = chain_runner.cred.is_admin
1043 self.nova_client = Client(2, session=session)
1044 self.neutron_client = neutronclient.Client('2.0', session=session)
1045 self.glance_client = glanceclient.Client('2', session=session)
1046 self.comp = compute.Compute(self.nova_client,
1050 if config.service_chain != ChainType.EXT:
1051 self.placer = InstancePlacer(config.availability_zone, config.compute_nodes)
1053 self.flavor = ChainFlavor(config.flavor_type, config.flavor, self.comp)
1054 # Get list of all existing instances to check if some instances can be reused
1055 self.existing_instances = self.comp.get_server_list()
1057 # For EXT chains, the external_networks left and right fields in the config
1058 # must be either a prefix string or a list of at least chain-count strings
1059 self._check_extnet('left', config.external_networks.left)
1060 self._check_extnet('right', config.external_networks.right)
1062 # If networks are shared across chains, get the list of networks
1063 if config.service_chain_shared_net:
1064 self.networks = self.get_networks()
1065 # Reuse/create chains
1066 for chain_id in range(self.chain_count):
1067 self.chains.append(Chain(chain_id, self))
1068 if config.service_chain == ChainType.EXT:
1069 # if EXT and no ARP or VxLAN we need to read dest MACs from config
1070 if config.no_arp or config.vxlan:
1071 self._get_dest_macs_from_config()
1073 # Make sure all instances are active before proceeding
1074 self._ensure_instances_active()
1075 # network API call do not show VLANS ID if not admin read from config
1076 if not self.is_admin and config.vlan_tagging:
1077 self._get_config_vlans()
1082 # no openstack, no need to create chains
1083 if not config.l2_loopback and config.no_arp:
1084 self._get_dest_macs_from_config()
1085 if config.vlan_tagging:
1086 # make sure there at least as many entries as chains in each left/right list
1087 if len(config.vlans) != 2:
1088 raise ChainException('The config vlans property must be a list '
1089 'with 2 lists of VLAN IDs')
1090 self._get_config_vlans()
1092 raise ChainException('VxLAN is only supported with OpenStack')
1094 def _check_extnet(self, side, name):
1096 raise ChainException('external_networks.%s must contain a valid network'
1097 ' name prefix or a list of network names' % side)
1098 if isinstance(name, tuple) and len(name) < self.chain_count:
1099 raise ChainException('external_networks.%s %s'
1100 ' must have at least %d names' % (side, name, self.chain_count))
1102 def _get_config_vlans(self):
1105 self.vlans = [self._check_list('vlans[0]', self.config.vlans[0], re_vlan),
1106 self._check_list('vlans[1]', self.config.vlans[1], re_vlan)]
1108 raise ChainException('vlans parameter is mandatory. Set valid value in config file')
1110 def _get_dest_macs_from_config(self):
1111 re_mac = "[0-9a-fA-F]{2}([-:])[0-9a-fA-F]{2}(\\1[0-9a-fA-F]{2}){4}$"
1112 tg_config = self.config.traffic_generator
1113 self.dest_macs = [self._check_list("mac_addrs_left",
1114 tg_config.mac_addrs_left, re_mac),
1115 self._check_list("mac_addrs_right",
1116 tg_config.mac_addrs_right, re_mac)]
1118 def _check_list(self, list_name, ll, pattern):
1119 # if it is a single int or mac, make it a list of 1 int
1120 if isinstance(ll, (int, str)):
1123 if not re.match(pattern, str(item)):
1124 raise ChainException("Invalid format '{item}' specified in {fname}"
1125 .format(item=item, fname=list_name))
1126 # must have at least 1 element
1128 raise ChainException('%s cannot be empty' % (list_name))
1129 # for shared network, if 1 element is passed, replicate it as many times
1131 if self.config.service_chain_shared_net and len(ll) == 1:
1132 ll = [ll[0]] * self.chain_count
1134 # number of elements musty be the number of chains
1135 elif len(ll) < self.chain_count:
1136 raise ChainException('%s=%s must be a list with %d elements per chain' %
1137 (list_name, ll, self.chain_count))
1140 def _setup_image(self):
1141 # To avoid reuploading image in server mode, check whether image_name is set or not
1143 self.image_instance = self.comp.find_image(self.image_name)
1144 if self.image_instance:
1145 LOG.info("Reusing image %s", self.image_name)
1147 image_name_search_pattern = r'(nfvbenchvm-\d+(\.\d+)*).qcow2'
1148 if self.config.vm_image_file:
1149 match = re.search(image_name_search_pattern, self.config.vm_image_file)
1151 self.image_name = match.group(1)
1152 LOG.info('Using provided VM image file %s', self.config.vm_image_file)
1154 raise ChainException('Provided VM image file name %s must start with '
1155 '"nfvbenchvm-<version>"' % self.config.vm_image_file)
1157 pkg_root = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
1158 for f in os.listdir(pkg_root):
1159 if re.search(image_name_search_pattern, f):
1160 self.config.vm_image_file = pkg_root + '/' + f
1161 self.image_name = f.replace('.qcow2', '')
1162 LOG.info('Found built-in VM image file %s', f)
1165 raise ChainException('Cannot find any built-in VM image file.')
1167 self.image_instance = self.comp.find_image(self.image_name)
1168 if not self.image_instance:
1169 LOG.info('Uploading %s', self.image_name)
1170 res = self.comp.upload_image_via_url(self.image_name,
1171 self.config.vm_image_file)
1174 raise ChainException('Error uploading image %s from %s. ABORTING.' %
1175 (self.image_name, self.config.vm_image_file))
1176 LOG.info('Image %s successfully uploaded.', self.image_name)
1177 self.image_instance = self.comp.find_image(self.image_name)
1179 # image multiqueue property must be set according to the vif_multiqueue_size
1180 # config value (defaults to 1 or disabled)
1181 self.comp.image_set_multiqueue(self.image_instance, self.config.vif_multiqueue_size > 1)
1183 def _ensure_instances_active(self):
1185 for chain in self.chains:
1186 instances.extend(chain.get_instances())
1187 initial_instance_count = len(instances)
1188 max_retries = (self.config.check_traffic_time_sec +
1189 self.config.generic_poll_sec - 1) / self.config.generic_poll_sec
1192 remaining_instances = []
1193 for instance in instances:
1194 status = instance.get_status()
1195 if status == 'ACTIVE':
1196 LOG.info('Instance %s is ACTIVE on %s',
1197 instance.name, instance.get_hypervisor_name())
1199 if status == 'ERROR':
1200 raise ChainException('Instance %s creation error: %s' %
1202 instance.instance.fault['message']))
1203 remaining_instances.append(instance)
1204 if not remaining_instances:
1207 if retry >= max_retries:
1208 raise ChainException('Time-out: %d/%d instances still not active' %
1209 (len(remaining_instances), initial_instance_count))
1210 LOG.info('Waiting for %d/%d instance to become active (retry %d/%d)...',
1211 len(remaining_instances), initial_instance_count,
1213 instances = remaining_instances
1214 time.sleep(self.config.generic_poll_sec)
1215 if initial_instance_count:
1216 LOG.info('All instances are active')
1218 def get_networks(self, chain_id=None):
1219 """Get the networks for given EXT, PVP or PVVP chain.
1221 For EXT packet path, these networks must pre-exist.
1222 For PVP, PVVP these networks will be created if they do not exist.
1223 chain_id: to which chain the networks belong.
1224 a None value will mean that these networks are shared by all chains
1227 # the only case where self.networks exists is when the networks are shared
1229 return self.networks
1230 if self.config.service_chain == ChainType.EXT:
1232 ext_net = self.config.external_networks
1233 net_cfg = [AttrDict({'name': name,
1234 'segmentation_id': None,
1235 'physical_network': None})
1236 for name in [ext_net.left, ext_net.right]]
1237 # segmentation id and subnet should be discovered from neutron
1240 int_nets = self.config.internal_networks
1242 if self.config.service_chain == ChainType.PVP:
1243 net_cfg = [int_nets.left, int_nets.right]
1245 net_cfg = [int_nets.left, int_nets.middle, int_nets.right]
1246 if self.config.l3_router:
1247 edge_nets = self.config.edge_networks
1248 net_cfg.append(edge_nets.left)
1249 net_cfg.append(edge_nets.right)
1253 networks.append(ChainNetwork(self, cfg, chain_id, lookup_only=lookup_only))
1255 # need to cleanup all successful networks prior to bailing out
1256 for net in networks:
1261 def get_existing_ports(self):
1262 """Get the list of existing ports.
1264 Lazy retrieval of ports as this can be costly if there are lots of ports and
1265 is only needed when VM and network are being reused.
1267 return: a dict of list of neutron ports indexed by the network uuid they are attached to
1269 Each port is a dict with fields such as below:
1270 {'allowed_address_pairs': [], 'extra_dhcp_opts': [],
1271 'updated_at': '2018-10-06T07:15:35Z', 'device_owner': 'compute:nova',
1272 'revision_number': 10, 'port_security_enabled': False, 'binding:profile': {},
1273 'fixed_ips': [{'subnet_id': '6903a3b3-49a1-4ba4-8259-4a90e7a44b21',
1274 'ip_address': '192.168.1.4'}], 'id': '3dcb9cfa-d82a-4dd1-85a1-fd8284b52d72',
1275 'security_groups': [],
1276 'binding:vif_details': {'vhostuser_socket': '/tmp/3dcb9cfa-d82a-4dd1-85a1-fd8284b52d72',
1277 'vhostuser_mode': 'server'},
1278 'binding:vif_type': 'vhostuser',
1279 'mac_address': 'fa:16:3e:3c:63:04',
1280 'project_id': '977ac76a63d7492f927fa80e86baff4c',
1282 'binding:host_id': 'a20-champagne-compute-1',
1284 'device_id': 'a98e2ad2-5371-4aa5-a356-8264a970ce4b',
1285 'name': 'nfvbench-loop-vm0-0', 'admin_state_up': True,
1286 'network_id': '3ea5fd88-278f-4d9d-b24d-1e443791a055',
1287 'tenant_id': '977ac76a63d7492f927fa80e86baff4c',
1288 'created_at': '2018-10-06T07:15:10Z',
1289 'binding:vnic_type': 'normal'}
1291 if not self._existing_ports:
1292 LOG.info('Loading list of all ports...')
1293 existing_ports = self.neutron_client.list_ports()['ports']
1294 # place all ports in the dict keyed by the port network uuid
1295 for port in existing_ports:
1296 port_list = self._existing_ports.setdefault(port['network_id'], [])
1297 port_list.append(port)
1298 LOG.info("Loaded %d ports attached to %d networks",
1299 len(existing_ports), len(self._existing_ports))
1300 return self._existing_ports
1302 def get_ports_from_network(self, chain_network):
1303 """Get the list of existing ports that belong to a network.
1305 Lazy retrieval of ports as this can be costly if there are lots of ports and
1306 is only needed when VM and network are being reused.
1308 chain_network: a ChainNetwork instance for which attached ports neeed to be retrieved
1309 return: list of neutron ports attached to requested network
1311 return self.get_existing_ports().get(chain_network.get_uuid(), None)
1313 def get_hypervisor_from_mac(self, mac):
1314 """Get the hypervisor that hosts a VM MAC.
1316 mac: MAC address to look for
1317 return: the hypervisor where the matching port runs or None if not found
1319 # _existing_ports is a dict of list of ports indexed by network id
1320 for port_list in self.get_existing_ports().values():
1321 for port in port_list:
1323 if port['mac_address'] == mac:
1324 host_id = port['binding:host_id']
1325 return self.comp.get_hypervisor(host_id)
1330 def get_host_ip_from_mac(self, mac):
1331 """Get the host IP address matching a MAC.
1333 mac: MAC address to look for
1334 return: the IP address of the host where the matching port runs or None if not found
1336 hypervisor = self.get_hypervisor_from_mac(mac)
1338 return hypervisor.host_ip
1341 def get_chain_vlans(self, port_index):
1342 """Get the list of per chain VLAN id on a given port.
1344 port_index: left port is 0, right port is 1
1345 return: a VLAN ID list indexed by the chain index or None if no vlan tagging
1347 if self.chains and self.is_admin:
1348 return [self.chains[chain_index].get_vlan(port_index)
1349 for chain_index in range(self.chain_count)]
1351 return self.vlans[port_index]
1353 def get_chain_vxlans(self, port_index):
1354 """Get the list of per chain VNIs id on a given port.
1356 port_index: left port is 0, right port is 1
1357 return: a VNIs ID list indexed by the chain index or None if no vlan tagging
1359 if self.chains and self.is_admin:
1360 return [self.chains[chain_index].get_vxlan(port_index)
1361 for chain_index in range(self.chain_count)]
1363 raise ChainException('VxLAN is only supported with OpenStack and with admin user')
1365 def get_dest_macs(self, port_index):
1366 """Get the list of per chain dest MACs on a given port.
1368 Should not be called if EXT+ARP is used (in that case the traffic gen will
1369 have the ARP responses back from VNFs with the dest MAC to use).
1371 port_index: left port is 0, right port is 1
1372 return: a list of dest MACs indexed by the chain index
1374 if self.chains and self.config.service_chain != ChainType.EXT:
1375 return [self.chains[chain_index].get_dest_mac(port_index)
1376 for chain_index in range(self.chain_count)]
1377 # no openstack or EXT+no-arp
1378 return self.dest_macs[port_index]
1380 def get_host_ips(self):
1381 """Return the IP adresss(es) of the host compute nodes used for this run.
1383 :return: a list of 1 IP address
1385 # Since all chains go through the same compute node(s) we can just retrieve the
1386 # compute node(s) for the first chain
1388 if self.config.service_chain != ChainType.EXT:
1389 return self.chains[0].get_host_ips()
1390 # in the case of EXT, the compute node must be retrieved from the port
1391 # associated to any of the dest MACs
1392 dst_macs = self.generator_config.get_dest_macs()
1393 # dest MAC on port 0, chain 0
1394 dst_mac = dst_macs[0][0]
1395 host_ip = self.get_host_ip_from_mac(dst_mac)
1397 LOG.info('Found compute node IP for EXT chain: %s', host_ip)
1401 def get_compute_nodes(self):
1402 """Return the name of the host compute nodes used for this run.
1404 :return: a list of 0 or 1 host name in the az:host format
1406 # Since all chains go through the same compute node(s) we can just retrieve the
1407 # compute node name(s) for the first chain
1409 # in the case of EXT, the compute node must be retrieved from the port
1410 # associated to any of the dest MACs
1411 if self.config.service_chain != ChainType.EXT:
1412 return self.chains[0].get_compute_nodes()
1413 # in the case of EXT, the compute node must be retrieved from the port
1414 # associated to any of the dest MACs
1415 dst_macs = self.generator_config.get_dest_macs()
1416 # dest MAC on port 0, chain 0
1417 dst_mac = dst_macs[0][0]
1418 hypervisor = self.get_hypervisor_from_mac(dst_mac)
1420 LOG.info('Found hypervisor for EXT chain: %s', hypervisor.hypervisor_hostname)
1421 return[':' + hypervisor.hypervisor_hostname]
1423 # no openstack = no chains
1427 """Delete resources for all chains."""
1428 for chain in self.chains:
1430 for network in self.networks:
1433 self.flavor.delete()