2 # Copyright 2018 Cisco Systems, Inc. All rights reserved.
4 # Licensed under the Apache License, Version 2.0 (the "License"); you may
5 # not use this file except in compliance with the License. You may obtain
6 # a copy of the License at
8 # http://www.apache.org/licenses/LICENSE-2.0
10 # Unless required by applicable law or agreed to in writing, software
11 # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
12 # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
13 # License for the specific language governing permissions and limitations
17 # This module takes care of chaining networks, ports and vms
19 """NFVBENCH CHAIN DISCOVERY/STAGING.
21 This module takes care of staging/discovering all resources that are participating in a
22 benchmarking session: flavors, networks, ports, VNF instances.
23 If a resource is discovered with the same name, it will be reused.
24 Otherwise it will be created.
26 ChainManager: manages VM image, flavor, the staging discovery of all chains
28 Chain: manages one chain, has 2 or more networks and 1 or more instances
29 ChainNetwork: manages 1 network in a chain
30 ChainVnf: manages 1 VNF instance in a chain, has 2 ports
31 ChainVnfPort: manages 1 instance port
33 ChainManager-->Chain(*)
34 Chain-->ChainNetwork(*),ChainVnf(*)
35 ChainVnf-->ChainVnfPort(2)
37 Once created/discovered, instances are checked to be in the active state (ready to pass traffic)
38 Configuration parameters that will influence how these resources are staged/related:
39 - openstack or no openstack
42 - number of VNF in each chain (PVP, PVVP)
43 - SRIOV and middle port SRIOV for port types
44 - whether networks are shared across chains or not
46 There is not traffic generation involved in this module.
52 from glanceclient.v2 import client as glanceclient
53 from neutronclient.neutron import client as neutronclient
54 from novaclient.client import Client
56 from attrdict import AttrDict
59 from specs import ChainType
61 # Left and right index for network and port lists
64 # Name of the VM config file
65 NFVBENCH_CFG_FILENAME = 'nfvbenchvm.conf'
66 # full pathame of the VM config in the VM
67 NFVBENCH_CFG_VM_PATHNAME = os.path.join('/etc/', NFVBENCH_CFG_FILENAME)
68 # full path of the boot shell script template file on the server where nfvbench runs
69 BOOT_SCRIPT_PATHNAME = os.path.join(os.path.dirname(os.path.abspath(__file__)),
71 NFVBENCH_CFG_FILENAME)
74 class ChainException(Exception):
75 """Exception while operating the chains."""
79 class NetworkEncaps(object):
80 """Network encapsulation."""
83 class ChainFlavor(object):
84 """Class to manage the chain flavor."""
86 def __init__(self, flavor_name, flavor_dict, comp):
87 """Create a flavor."""
88 self.name = flavor_name
90 self.flavor = self.comp.find_flavor(flavor_name)
94 LOG.info("Reused flavor '%s'", flavor_name)
96 extra_specs = flavor_dict.pop('extra_specs', None)
98 self.flavor = comp.create_flavor(flavor_name,
101 LOG.info("Created flavor '%s'", flavor_name)
103 self.flavor.set_keys(extra_specs)
106 """Delete this flavor."""
107 if not self.reuse and self.flavor:
109 LOG.info("Flavor '%s' deleted", self.name)
112 class ChainVnfPort(object):
113 """A port associated to one VNF in the chain."""
115 def __init__(self, name, vnf, chain_network, vnic_type):
116 """Create or reuse a port on a given network.
118 if vnf.instance is None the VNF instance is not reused and this ChainVnfPort instance must
120 Otherwise vnf.instance is a reused VNF instance and this ChainVnfPort instance must
121 find an existing port to reuse that matches the port requirements: same attached network,
122 instance, name, vnic type
124 name: name for this port
125 vnf: ChainVNf instance that owns this port
126 chain_network: ChainNetwork instance where this port should attach
127 vnic_type: required vnic type for this port
131 self.manager = vnf.manager
135 # VNF instance is reused, we need to find an existing port that matches this instance
137 # discover ports attached to this instance
138 port_list = self.manager.get_ports_from_network(chain_network)
139 for port in port_list:
140 if port['name'] != name:
142 if port['binding:vnic_type'] != vnic_type:
144 if port['device_id'] == vnf.get_uuid():
146 LOG.info('Reusing existing port %s mac=%s', name, port['mac_address'])
149 raise ChainException('Cannot find matching port')
151 # VNF instance is not created yet, we need to create a new port
155 'network_id': chain_network.get_uuid(),
156 'binding:vnic_type': vnic_type
159 port = self.manager.neutron_client.create_port(body)
160 self.port = port['port']
161 LOG.info('Created port %s', name)
163 self.manager.neutron_client.update_port(self.port['id'], {
165 'security_groups': [],
166 'port_security_enabled': False,
169 LOG.info('Security disabled on port %s', name)
171 LOG.info('Failed to disable security on port %s (ignored)', name)
174 """Get the MAC address for this port."""
175 return self.port['mac_address']
178 """Delete this port instance."""
179 if self.reuse or not self.port:
182 while retry < self.manager.config.generic_retry_count:
184 self.manager.neutron_client.delete_port(self.port['id'])
185 LOG.info("Deleted port %s", self.name)
189 time.sleep(self.manager.config.generic_poll_sec)
190 LOG.error('Unable to delete port: %s', self.name)
193 class ChainNetwork(object):
194 """Could be a shared network across all chains or a chain private network."""
196 def __init__(self, manager, network_config, chain_id=None, lookup_only=False):
197 """Create a network for given chain.
199 network_config: a dict containing the network properties
200 (name, segmentation_id and physical_network)
201 chain_id: to which chain the networks belong.
202 a None value will mean that these networks are shared by all chains
204 self.manager = manager
206 self.name = network_config.name
208 # the name itself can be either a string or a list of names indexed by chain ID
209 if isinstance(network_config.name, tuple):
210 self.name = network_config.name[chain_id]
212 # network_config.name is a prefix string
213 self.name = network_config.name + str(chain_id)
214 self.segmentation_id = self._get_item(network_config.segmentation_id,
215 chain_id, auto_index=True)
216 self.physical_network = self._get_item(network_config.physical_network, chain_id)
222 self._setup(network_config, lookup_only)
225 LOG.error("Cannot find network %s", self.name)
227 LOG.error("Error creating network %s", self.name)
231 def _get_item(self, item_field, index, auto_index=False):
232 """Retrieve an item from a list or a single value.
234 item_field: can be None, a tuple of a single value
235 index: if None is same as 0, else is the index for a chain
236 auto_index: if true will automatically get the final value by adding the
237 index to the base value (if full list not provided)
239 If the item_field is not a tuple, it is considered same as a tuple with same value at any
241 If a list is provided, its length must be > index
247 if isinstance(item_field, tuple):
249 return item_field[index]
251 raise ChainException("List %s is too short for chain index %d" %
252 (str(item_field), index))
253 # single value is configured
255 return item_field + index
258 def _setup(self, network_config, lookup_only):
259 # Lookup if there is a matching network with same name
260 networks = self.manager.neutron_client.list_networks(name=self.name)
261 if networks['networks']:
262 network = networks['networks'][0]
263 # a network of same name already exists, we need to verify it has the same
265 if self.segmentation_id:
266 if network['provider:segmentation_id'] != self.segmentation_id:
267 raise ChainException("Mismatch of 'segmentation_id' for reused "
268 "network '{net}'. Network has id '{seg_id1}', "
269 "configuration requires '{seg_id2}'."
270 .format(net=self.name,
271 seg_id1=network['provider:segmentation_id'],
272 seg_id2=self.segmentation_id))
274 if self.physical_network:
275 if network['provider:physical_network'] != self.physical_network:
276 raise ChainException("Mismatch of 'physical_network' for reused "
277 "network '{net}'. Network has '{phys1}', "
278 "configuration requires '{phys2}'."
279 .format(net=self.name,
280 phys1=network['provider:physical_network'],
281 phys2=self.physical_network))
283 LOG.info('Reusing existing network %s', self.name)
285 self.network = network
288 raise ChainException('Network %s not found' % self.name)
292 'admin_state_up': True
295 if network_config.network_type:
296 body['network']['provider:network_type'] = network_config.network_type
297 if self.segmentation_id:
298 body['network']['provider:segmentation_id'] = self.segmentation_id
299 if self.physical_network:
300 body['network']['provider:physical_network'] = self.physical_network
301 self.network = self.manager.neutron_client.create_network(body)['network']
303 'subnet': {'name': network_config.subnet,
304 'cidr': network_config.cidr,
305 'network_id': self.network['id'],
306 'enable_dhcp': False,
308 'dns_nameservers': []}
310 subnet = self.manager.neutron_client.create_subnet(body)['subnet']
311 # add subnet id to the network dict since it has just been added
312 self.network['subnets'] = [subnet['id']]
313 LOG.info('Created network: %s', self.name)
317 Extract UUID of this network.
319 :return: UUID of this network
321 return self.network['id']
325 Extract vlan for this network.
327 :return: vlan ID for this network
329 if self.network['provider:network_type'] != 'vlan':
330 raise ChainException('Trying to retrieve VLAN id for non VLAN network')
331 return self.network['provider:segmentation_id']
335 Extract VNI for this network.
337 :return: VNI ID for this network
339 if 'vxlan' not in self.network['provider:network_type']:
340 raise ChainException('Trying to retrieve VNI for non VXLAN network')
341 return self.network['provider:segmentation_id']
344 """Delete this network."""
345 if not self.reuse and self.network:
347 while retry < self.manager.config.generic_retry_count:
349 self.manager.neutron_client.delete_network(self.network['id'])
350 LOG.info("Deleted network: %s", self.name)
354 LOG.info('Error deleting network %s (retry %d/%d)...',
357 self.manager.config.generic_retry_count)
358 time.sleep(self.manager.config.generic_poll_sec)
359 LOG.error('Unable to delete network: %s', self.name)
362 class ChainVnf(object):
363 """A class to represent a VNF in a chain."""
365 def __init__(self, chain, vnf_id, networks):
366 """Reuse a VNF instance with same characteristics or create a new VNF instance.
368 chain: the chain where this vnf belongs
369 vnf_id: indicates the index of this vnf in its chain (first vnf=0)
370 networks: the list of all networks (ChainNetwork) of the current chain
372 self.manager = chain.manager
375 self.name = self.manager.config.loop_vm_name + str(chain.chain_id)
376 if len(networks) > 2:
377 # we will have more than 1 VM in each chain
378 self.name += '-' + str(vnf_id)
385 # the vnf_id is conveniently also the starting index in networks
386 # for the left and right networks associated to this VNF
387 self._setup(networks[vnf_id:vnf_id + 2])
389 LOG.error("Error creating VNF %s", self.name)
393 def _get_vm_config(self, remote_mac_pair):
394 config = self.manager.config
395 devices = self.manager.generator_config.devices
396 with open(BOOT_SCRIPT_PATHNAME, 'r') as boot_script:
397 content = boot_script.read()
398 g1cidr = devices[LEFT].get_gw_ip(self.chain.chain_id) + '/8'
399 g2cidr = devices[RIGHT].get_gw_ip(self.chain.chain_id) + '/8'
401 'forwarder': config.vm_forwarder,
402 'intf_mac1': self.ports[LEFT].get_mac(),
403 'intf_mac2': self.ports[RIGHT].get_mac(),
404 'tg_gateway1_ip': devices[LEFT].tg_gateway_ip_addrs,
405 'tg_gateway2_ip': devices[RIGHT].tg_gateway_ip_addrs,
406 'tg_net1': devices[LEFT].ip_addrs,
407 'tg_net2': devices[RIGHT].ip_addrs,
408 'vnf_gateway1_cidr': g1cidr,
409 'vnf_gateway2_cidr': g2cidr,
410 'tg_mac1': remote_mac_pair[0],
411 'tg_mac2': remote_mac_pair[1]
413 return content.format(**vm_config)
415 def _get_vnic_type(self, port_index):
416 """Get the right vnic type for given port indexself.
418 If SR-IOV is speficied, middle ports in multi-VNF chains
419 can use vswitch or SR-IOV based on config.use_sriov_middle_net
421 if self.manager.config.sriov:
422 chain_length = self.chain.get_length()
423 if self.manager.config.use_sriov_middle_net or chain_length == 1:
425 if self.vnf_id == 0 and port_index == 0:
426 # first VNF in chain must use sriov for left port
428 if (self.vnf_id == chain_length - 1) and (port_index == 1):
429 # last VNF in chain must use sriov for right port
433 def _setup(self, networks):
434 flavor_id = self.manager.flavor.flavor.id
435 # Check if we can reuse an instance with same name
436 for instance in self.manager.existing_instances:
437 if instance.name == self.name:
438 # Verify that other instance characteristics match
439 if instance.flavor['id'] != flavor_id:
440 self._reuse_exception('Flavor mismatch')
441 if instance.status != "ACTIVE":
442 self._reuse_exception('Matching instance is not in ACTIVE state')
443 # The 2 networks for this instance must also be reused
444 if not networks[LEFT].reuse:
445 self._reuse_exception('network %s is new' % networks[LEFT].name)
446 if not networks[RIGHT].reuse:
447 self._reuse_exception('network %s is new' % networks[RIGHT].name)
448 # instance.networks have the network names as keys:
449 # {'nfvbench-rnet0': ['192.168.2.10'], 'nfvbench-lnet0': ['192.168.1.8']}
450 if networks[LEFT].name not in instance.networks:
451 self._reuse_exception('Left network mismatch')
452 if networks[RIGHT].name not in instance.networks:
453 self._reuse_exception('Right network mismatch')
456 self.instance = instance
457 LOG.info('Reusing existing instance %s on %s',
458 self.name, self.get_hypervisor_name())
459 # create or reuse/discover 2 ports per instance
460 self.ports = [ChainVnfPort(self.name + '-' + str(index),
463 self._get_vnic_type(index)) for index in [0, 1]]
464 # if no reuse, actual vm creation is deferred after all ports in the chain are created
465 # since we need to know the next mac in a multi-vnf chain
467 def create_vnf(self, remote_mac_pair):
468 """Create the VNF instance if it does not already exist."""
469 if self.instance is None:
470 port_ids = [{'port-id': vnf_port.port['id']}
471 for vnf_port in self.ports]
472 vm_config = self._get_vm_config(remote_mac_pair)
473 az = self.manager.placer.get_required_az()
474 server = self.manager.comp.create_server(self.name,
475 self.manager.image_instance,
476 self.manager.flavor.flavor,
483 files={NFVBENCH_CFG_VM_PATHNAME: vm_config})
485 self.instance = server
486 if self.manager.placer.is_resolved():
487 LOG.info('Created instance %s on %s', self.name, az)
489 # the location is undetermined at this point
490 # self.get_hypervisor_name() will return None
491 LOG.info('Created instance %s - waiting for placement resolution...', self.name)
492 # here we MUST wait until this instance is resolved otherwise subsequent
493 # VNF creation can be placed in other hypervisors!
494 config = self.manager.config
495 max_retries = (config.check_traffic_time_sec +
496 config.generic_poll_sec - 1) / config.generic_poll_sec
498 for retry in range(max_retries):
499 status = self.get_status()
500 if status == 'ACTIVE':
501 hyp_name = self.get_hypervisor_name()
502 LOG.info('Instance %s is active and has been placed on %s',
504 self.manager.placer.register_full_name(hyp_name)
506 if status == 'ERROR':
507 raise ChainException('Instance %s creation error: %s' %
509 self.instance.fault['message']))
510 LOG.info('Waiting for instance %s to become active (retry %d/%d)...',
511 self.name, retry + 1, max_retries + 1)
512 time.sleep(config.generic_poll_sec)
515 LOG.error('Instance %s creation timed out', self.name)
516 raise ChainException('Instance %s creation timed out' % self.name)
519 raise ChainException('Unable to create instance: %s' % (self.name))
521 def _reuse_exception(self, reason):
522 raise ChainException('Instance %s cannot be reused (%s)' % (self.name, reason))
524 def get_status(self):
525 """Get the statis of this instance."""
526 if self.instance.status != 'ACTIVE':
527 self.instance = self.manager.comp.poll_server(self.instance)
528 return self.instance.status
530 def get_hostname(self):
531 """Get the hypervisor host name running this VNF instance."""
532 if self.manager.is_admin:
533 hypervisor_hostname = getattr(self.instance, 'OS-EXT-SRV-ATTR:hypervisor_hostname')
535 hypervisor_hostname = self.manager.config.hypervisor_hostname
536 if not hypervisor_hostname:
537 raise ChainException('Hypervisor hostname parameter is mandatory')
538 return hypervisor_hostname
540 def get_host_ip(self):
541 """Get the IP address of the host where this instance runs.
543 return: the IP address
546 self.host_ip = self.manager.comp.get_hypervisor(self.get_hostname()).host_ip
549 def get_hypervisor_name(self):
550 """Get hypervisor name (az:hostname) for this VNF instance."""
552 if self.manager.is_admin:
553 az = getattr(self.instance, 'OS-EXT-AZ:availability_zone')
555 az = self.manager.config.availability_zone
557 raise ChainException('Availability zone parameter is mandatory')
558 hostname = self.get_hostname()
560 return az + ':' + hostname
565 """Get the uuid for this instance."""
566 return self.instance.id
568 def delete(self, forced=False):
569 """Delete this VNF instance."""
571 LOG.info("Instance %s not deleted (reused)", self.name)
574 self.manager.comp.delete_server(self.instance)
575 LOG.info("Deleted instance %s", self.name)
576 for port in self.ports:
580 """A class to manage a single chain.
582 Can handle any type of chain (EXT, PVP, PVVP)
585 def __init__(self, chain_id, manager):
586 """Create a new chain.
588 chain_id: chain index (first chain is 0)
589 manager: the chain manager that owns all chains
591 self.chain_id = chain_id
592 self.manager = manager
593 self.encaps = manager.encaps
597 self.networks = manager.get_networks(chain_id)
598 # For external chain VNFs can only be discovered from their MAC addresses
599 # either from config or from ARP
600 if manager.config.service_chain != ChainType.EXT:
601 for chain_instance_index in range(self.get_length()):
602 self.instances.append(ChainVnf(self,
603 chain_instance_index,
605 # at this point new VNFs are not created yet but
606 # verify that all discovered VNFs are on the same hypervisor
607 self._check_hypervisors()
608 # now that all VNF ports are created we need to calculate the
609 # left/right remote MAC for each VNF in the chain
610 # before actually creating the VNF itself
611 rem_mac_pairs = self._get_remote_mac_pairs()
612 for instance in self.instances:
613 rem_mac_pair = rem_mac_pairs.pop(0)
614 instance.create_vnf(rem_mac_pair)
619 def _check_hypervisors(self):
620 common_hypervisor = None
621 for instance in self.instances:
622 # get the full hypervizor name (az:compute)
623 hname = instance.get_hypervisor_name()
625 if common_hypervisor:
626 if hname != common_hypervisor:
627 raise ChainException('Discovered instances on different hypervisors:'
628 ' %s and %s' % (hname, common_hypervisor))
630 common_hypervisor = hname
631 if common_hypervisor:
632 # check that the common hypervisor name matchs the requested hypervisor name
633 # and set the name to be used by all future instances (if any)
634 if not self.manager.placer.register_full_name(common_hypervisor):
635 raise ChainException('Discovered hypervisor placement %s is incompatible' %
638 def get_length(self):
639 """Get the number of VNF in the chain."""
640 return len(self.networks) - 1
642 def _get_remote_mac_pairs(self):
643 """Get the list of remote mac pairs for every VNF in the chain.
645 Traverse the chain from left to right and establish the
646 left/right remote MAC for each VNF in the chainself.
649 mac sequence: tg_src_mac, vm0-mac0, vm0-mac1, tg_dst_mac
650 must produce [[tg_src_mac, tg_dst_mac]] or looking at index in mac sequence: [[0, 3]]
651 the mac pair is what the VNF at that position (index 0) sees as next hop mac left and right
654 tg_src_mac, vm0-mac0, vm0-mac1, vm1-mac0, vm1-mac1, tg_dst_mac
655 Must produce the following list:
656 [[tg_src_mac, vm1-mac0], [vm0-mac1, tg_dst_mac]] or index: [[0, 3], [2, 5]]
658 General case with 3 VMs in chain, the list of consecutive macs (left to right):
659 tg_src_mac, vm0-mac0, vm0-mac1, vm1-mac0, vm1-mac1, vm2-mac0, vm2-mac1, tg_dst_mac
660 Must produce the following list:
661 [[tg_src_mac, vm1-mac0], [vm0-mac1, vm2-mac0], [vm1-mac1, tg_dst_mac]]
662 or index: [[0, 3], [2, 5], [4, 7]]
664 The series pattern is pretty clear: [[n, n+3],... ] where n is multiple of 2
666 # line up all mac from left to right
667 mac_seq = [self.manager.generator_config.devices[LEFT].mac]
668 for instance in self.instances:
669 mac_seq.append(instance.ports[0].get_mac())
670 mac_seq.append(instance.ports[1].get_mac())
671 mac_seq.append(self.manager.generator_config.devices[RIGHT].mac)
674 for _ in self.instances:
675 rem_mac_pairs.append([mac_seq[base], mac_seq[base + 3]])
679 def get_instances(self):
680 """Return all instances for this chain."""
681 return self.instances
683 def get_vlan(self, port_index):
684 """Get the VLAN id on a given port.
686 port_index: left port is 0, right port is 1
687 return: the vlan_id or None if there is no vlan tagging
689 # for port 1 we need to return the VLAN of the last network in the chain
690 # The networks array contains 2 networks for PVP [left, right]
691 # and 3 networks in the case of PVVP [left.middle,right]
693 # this will pick the last item in array
695 return self.networks[port_index].get_vlan()
697 def get_vxlan(self, port_index):
698 """Get the VXLAN id on a given port.
700 port_index: left port is 0, right port is 1
701 return: the vxlan_id or None if there is no vxlan
703 # for port 1 we need to return the VLAN of the last network in the chain
704 # The networks array contains 2 networks for PVP [left, right]
705 # and 3 networks in the case of PVVP [left.middle,right]
707 # this will pick the last item in array
709 return self.networks[port_index].get_vxlan()
711 def get_dest_mac(self, port_index):
712 """Get the dest MAC on a given port.
714 port_index: left port is 0, right port is 1
718 # for right port, use the right port MAC of the last (right most) VNF In chain
719 return self.instances[-1].ports[1].get_mac()
720 # for left port use the left port MAC of the first (left most) VNF in chain
721 return self.instances[0].ports[0].get_mac()
723 def get_network_uuids(self):
724 """Get UUID of networks in this chain from left to right (order is important).
726 :return: list of UUIDs of networks (2 or 3 elements)
728 return [net['id'] for net in self.networks]
730 def get_host_ips(self):
731 """Return the IP adresss(es) of the host compute nodes used for this chain.
733 :return: a list of 1 or 2 IP addresses
735 return [vnf.get_host_ip() for vnf in self.instances]
737 def get_compute_nodes(self):
738 """Return the name of the host compute nodes used for this chain.
740 :return: a list of 1 host name in the az:host format
742 # Since all chains go through the same compute node(s) we can just retrieve the
743 # compute node name(s) for the first chain
744 return [vnf.get_hypervisor_name() for vnf in self.instances]
747 """Delete this chain."""
748 for instance in self.instances:
750 # only delete if these are chain private networks (not shared)
751 if not self.manager.config.service_chain_shared_net:
752 for network in self.networks:
756 class InstancePlacer(object):
757 """A class to manage instance placement for all VNFs in all chains.
759 A full az string is made of 2 parts AZ and hypervisor.
760 The placement is resolved when both parts az and hypervisor names are known.
763 def __init__(self, req_az, req_hyp):
764 """Create a new instance placer.
766 req_az: requested AZ (can be None or empty if no preference)
767 req_hyp: requested hypervisor name (can be None of empty if no preference)
768 can be any of 'nova:', 'comp1', 'nova:comp1'
769 if it is a list, only the first item is used (backward compatibility in config)
771 req_az is ignored if req_hyp has an az part
772 all other parts beyond the first 2 are ignored in req_hyp
774 # if passed a list just pick the first item
775 if req_hyp and isinstance(req_hyp, list):
777 # only pick first part of az
778 if req_az and ':' in req_az:
779 req_az = req_az.split(':')[0]
781 # check if requested hypervisor string has an AZ part
782 split_hyp = req_hyp.split(':')
783 if len(split_hyp) > 1:
784 # override the AZ part and hypervisor part
785 req_az = split_hyp[0]
786 req_hyp = split_hyp[1]
787 self.requested_az = req_az if req_az else ''
788 self.requested_hyp = req_hyp if req_hyp else ''
789 # Nova can accept AZ only (e.g. 'nova:', use any hypervisor in that AZ)
790 # or hypervisor only (e.g. ':comp1')
791 # or both (e.g. 'nova:comp1')
793 self.required_az = req_az + ':' + self.requested_hyp
795 # need to insert a ':' so nova knows this is the hypervisor name
796 self.required_az = ':' + self.requested_hyp if req_hyp else ''
797 # placement is resolved when both AZ and hypervisor names are known and set
798 self.resolved = self.requested_az != '' and self.requested_hyp != ''
800 def get_required_az(self):
801 """Return the required az (can be resolved or not)."""
802 return self.required_az
804 def register_full_name(self, discovered_az):
805 """Verify compatibility and register a discovered hypervisor full name.
807 discovered_az: a discovered AZ in az:hypervisor format
808 return: True if discovered_az is compatible and set
809 False if discovered_az is not compatible
812 return discovered_az == self.required_az
814 # must be in full az format
815 split_daz = discovered_az.split(':')
816 if len(split_daz) != 2:
818 if self.requested_az and self.requested_az != split_daz[0]:
820 if self.requested_hyp and self.requested_hyp != split_daz[1]:
822 self.required_az = discovered_az
826 def is_resolved(self):
827 """Check if the full AZ is resolved.
829 return: True if resolved
834 class ChainManager(object):
835 """A class for managing all chains for a given run.
837 Supports openstack or no openstack.
838 Supports EXT, PVP and PVVP chains.
841 def __init__(self, chain_runner):
842 """Create a chain manager to take care of discovering or bringing up the requested chains.
844 A new instance must be created every time a new config is used.
845 config: the nfvbench config to use
846 cred: openstack credentials to use of None if there is no openstack
848 self.chain_runner = chain_runner
849 self.config = chain_runner.config
850 self.generator_config = chain_runner.traffic_client.generator_config
852 self.image_instance = None
853 self.image_name = None
854 # Left and right networks shared across all chains (only if shared)
859 self.nova_client = None
860 self.neutron_client = None
861 self.glance_client = None
862 self.existing_instances = []
863 # existing ports keyed by the network uuid they belong to
864 self._existing_ports = {}
866 self.openstack = (chain_runner.cred is not None) and not config.l2_loopback
867 self.chain_count = config.service_chain_count
871 session = chain_runner.cred.get_session()
872 self.is_admin = chain_runner.cred.is_admin
873 self.nova_client = Client(2, session=session)
874 self.neutron_client = neutronclient.Client('2.0', session=session)
875 self.glance_client = glanceclient.Client('2', session=session)
876 self.comp = compute.Compute(self.nova_client,
880 if config.service_chain != ChainType.EXT:
881 self.placer = InstancePlacer(config.availability_zone, config.compute_nodes)
883 self.flavor = ChainFlavor(config.flavor_type, config.flavor, self.comp)
884 # Get list of all existing instances to check if some instances can be reused
885 self.existing_instances = self.comp.get_server_list()
887 # For EXT chains, the external_networks left and right fields in the config
888 # must be either a prefix string or a list of at least chain-count strings
889 self._check_extnet('left', config.external_networks.left)
890 self._check_extnet('right', config.external_networks.right)
892 # If networks are shared across chains, get the list of networks
893 if config.service_chain_shared_net:
894 self.networks = self.get_networks()
895 # Reuse/create chains
896 for chain_id in range(self.chain_count):
897 self.chains.append(Chain(chain_id, self))
898 if config.service_chain == ChainType.EXT:
899 # if EXT and no ARP or VxLAN we need to read dest MACs from config
900 if config.no_arp or config.vxlan:
901 self._get_dest_macs_from_config()
903 # Make sure all instances are active before proceeding
904 self._ensure_instances_active()
905 # network API call do not show VLANS ID if not admin read from config
906 if not self.is_admin and config.vlan_tagging:
907 self._get_config_vlans()
912 # no openstack, no need to create chains
913 if not config.l2_loopback and config.no_arp:
914 self._get_dest_macs_from_config()
915 if config.vlan_tagging:
916 # make sure there at least as many entries as chains in each left/right list
917 if len(config.vlans) != 2:
918 raise ChainException('The config vlans property must be a list '
919 'with 2 lists of VLAN IDs')
920 self._get_config_vlans()
922 raise ChainException('VxLAN is only supported with OpenStack')
924 def _check_extnet(self, side, name):
926 raise ChainException('external_networks.%s must contain a valid network'
927 ' name prefix or a list of network names' % side)
928 if isinstance(name, tuple) and len(name) < self.chain_count:
929 raise ChainException('external_networks.%s %s'
930 ' must have at least %d names' % (side, name, self.chain_count))
932 def _get_config_vlans(self):
935 self.vlans = [self._check_list('vlans[0]', self.config.vlans[0], re_vlan),
936 self._check_list('vlans[1]', self.config.vlans[1], re_vlan)]
938 raise ChainException('vlans parameter is mandatory. Set valid value in config file')
940 def _get_dest_macs_from_config(self):
941 re_mac = "[0-9a-fA-F]{2}([-:])[0-9a-fA-F]{2}(\\1[0-9a-fA-F]{2}){4}$"
942 tg_config = self.config.traffic_generator
943 self.dest_macs = [self._check_list("mac_addrs_left",
944 tg_config.mac_addrs_left, re_mac),
945 self._check_list("mac_addrs_right",
946 tg_config.mac_addrs_right, re_mac)]
948 def _check_list(self, list_name, ll, pattern):
949 # if it is a single int or mac, make it a list of 1 int
950 if isinstance(ll, (int, str)):
953 if not re.match(pattern, str(item)):
954 raise ChainException("Invalid format '{item}' specified in {fname}"
955 .format(item=item, fname=list_name))
956 # must have at least 1 element
958 raise ChainException('%s cannot be empty' % (list_name))
959 # for shared network, if 1 element is passed, replicate it as many times
961 if self.config.service_chain_shared_net and len(ll) == 1:
962 ll = [ll[0]] * self.chain_count
964 # number of elements musty be the number of chains
965 elif len(ll) < self.chain_count:
966 raise ChainException('%s=%s must be a list with %d elements per chain' %
967 (list_name, ll, self.chain_count))
970 def _setup_image(self):
971 # To avoid reuploading image in server mode, check whether image_name is set or not
973 self.image_instance = self.comp.find_image(self.image_name)
974 if self.image_instance:
975 LOG.info("Reusing image %s", self.image_name)
977 image_name_search_pattern = r'(nfvbenchvm-\d+(\.\d+)*).qcow2'
978 if self.config.vm_image_file:
979 match = re.search(image_name_search_pattern, self.config.vm_image_file)
981 self.image_name = match.group(1)
982 LOG.info('Using provided VM image file %s', self.config.vm_image_file)
984 raise ChainException('Provided VM image file name %s must start with '
985 '"nfvbenchvm-<version>"' % self.config.vm_image_file)
987 pkg_root = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
988 for f in os.listdir(pkg_root):
989 if re.search(image_name_search_pattern, f):
990 self.config.vm_image_file = pkg_root + '/' + f
991 self.image_name = f.replace('.qcow2', '')
992 LOG.info('Found built-in VM image file %s', f)
995 raise ChainException('Cannot find any built-in VM image file.')
997 self.image_instance = self.comp.find_image(self.image_name)
998 if not self.image_instance:
999 LOG.info('Uploading %s', self.image_name)
1000 res = self.comp.upload_image_via_url(self.image_name,
1001 self.config.vm_image_file)
1004 raise ChainException('Error uploading image %s from %s. ABORTING.' %
1005 (self.image_name, self.config.vm_image_file))
1006 LOG.info('Image %s successfully uploaded.', self.image_name)
1007 self.image_instance = self.comp.find_image(self.image_name)
1009 def _ensure_instances_active(self):
1011 for chain in self.chains:
1012 instances.extend(chain.get_instances())
1013 initial_instance_count = len(instances)
1014 max_retries = (self.config.check_traffic_time_sec +
1015 self.config.generic_poll_sec - 1) / self.config.generic_poll_sec
1018 remaining_instances = []
1019 for instance in instances:
1020 status = instance.get_status()
1021 if status == 'ACTIVE':
1022 LOG.info('Instance %s is ACTIVE on %s',
1023 instance.name, instance.get_hypervisor_name())
1025 if status == 'ERROR':
1026 raise ChainException('Instance %s creation error: %s' %
1028 instance.instance.fault['message']))
1029 remaining_instances.append(instance)
1030 if not remaining_instances:
1033 if retry >= max_retries:
1034 raise ChainException('Time-out: %d/%d instances still not active' %
1035 (len(remaining_instances), initial_instance_count))
1036 LOG.info('Waiting for %d/%d instance to become active (retry %d/%d)...',
1037 len(remaining_instances), initial_instance_count,
1039 instances = remaining_instances
1040 time.sleep(self.config.generic_poll_sec)
1041 if initial_instance_count:
1042 LOG.info('All instances are active')
1044 def get_networks(self, chain_id=None):
1045 """Get the networks for given EXT, PVP or PVVP chain.
1047 For EXT packet path, these networks must pre-exist.
1048 For PVP, PVVP these networks will be created if they do not exist.
1049 chain_id: to which chain the networks belong.
1050 a None value will mean that these networks are shared by all chains
1053 # the only case where self.networks exists is when the networks are shared
1055 return self.networks
1056 if self.config.service_chain == ChainType.EXT:
1058 ext_net = self.config.external_networks
1059 net_cfg = [AttrDict({'name': name,
1060 'segmentation_id': None,
1061 'physical_network': None})
1062 for name in [ext_net.left, ext_net.right]]
1063 # segmentation id and subnet should be discovered from neutron
1066 int_nets = self.config.internal_networks
1068 if self.config.service_chain == ChainType.PVP:
1069 net_cfg = [int_nets.left, int_nets.right]
1071 net_cfg = [int_nets.left, int_nets.middle, int_nets.right]
1075 networks.append(ChainNetwork(self, cfg, chain_id, lookup_only=lookup_only))
1077 # need to cleanup all successful networks prior to bailing out
1078 for net in networks:
1083 def get_existing_ports(self):
1084 """Get the list of existing ports.
1086 Lazy retrieval of ports as this can be costly if there are lots of ports and
1087 is only needed when VM and network are being reused.
1089 return: a dict of list of neutron ports indexed by the network uuid they are attached to
1091 Each port is a dict with fields such as below:
1092 {'allowed_address_pairs': [], 'extra_dhcp_opts': [],
1093 'updated_at': '2018-10-06T07:15:35Z', 'device_owner': 'compute:nova',
1094 'revision_number': 10, 'port_security_enabled': False, 'binding:profile': {},
1095 'fixed_ips': [{'subnet_id': '6903a3b3-49a1-4ba4-8259-4a90e7a44b21',
1096 'ip_address': '192.168.1.4'}], 'id': '3dcb9cfa-d82a-4dd1-85a1-fd8284b52d72',
1097 'security_groups': [],
1098 'binding:vif_details': {'vhostuser_socket': '/tmp/3dcb9cfa-d82a-4dd1-85a1-fd8284b52d72',
1099 'vhostuser_mode': 'server'},
1100 'binding:vif_type': 'vhostuser',
1101 'mac_address': 'fa:16:3e:3c:63:04',
1102 'project_id': '977ac76a63d7492f927fa80e86baff4c',
1104 'binding:host_id': 'a20-champagne-compute-1',
1106 'device_id': 'a98e2ad2-5371-4aa5-a356-8264a970ce4b',
1107 'name': 'nfvbench-loop-vm0-0', 'admin_state_up': True,
1108 'network_id': '3ea5fd88-278f-4d9d-b24d-1e443791a055',
1109 'tenant_id': '977ac76a63d7492f927fa80e86baff4c',
1110 'created_at': '2018-10-06T07:15:10Z',
1111 'binding:vnic_type': 'normal'}
1113 if not self._existing_ports:
1114 LOG.info('Loading list of all ports...')
1115 existing_ports = self.neutron_client.list_ports()['ports']
1116 # place all ports in the dict keyed by the port network uuid
1117 for port in existing_ports:
1118 port_list = self._existing_ports.setdefault(port['network_id'], [])
1119 port_list.append(port)
1120 LOG.info("Loaded %d ports attached to %d networks",
1121 len(existing_ports), len(self._existing_ports))
1122 return self._existing_ports
1124 def get_ports_from_network(self, chain_network):
1125 """Get the list of existing ports that belong to a network.
1127 Lazy retrieval of ports as this can be costly if there are lots of ports and
1128 is only needed when VM and network are being reused.
1130 chain_network: a ChainNetwork instance for which attached ports neeed to be retrieved
1131 return: list of neutron ports attached to requested network
1133 return self.get_existing_ports().get(chain_network.get_uuid(), None)
1135 def get_hypervisor_from_mac(self, mac):
1136 """Get the hypervisor that hosts a VM MAC.
1138 mac: MAC address to look for
1139 return: the hypervisor where the matching port runs or None if not found
1141 # _existing_ports is a dict of list of ports indexed by network id
1142 for port_list in self.get_existing_ports().values():
1143 for port in port_list:
1145 if port['mac_address'] == mac:
1146 host_id = port['binding:host_id']
1147 return self.comp.get_hypervisor(host_id)
1152 def get_host_ip_from_mac(self, mac):
1153 """Get the host IP address matching a MAC.
1155 mac: MAC address to look for
1156 return: the IP address of the host where the matching port runs or None if not found
1158 hypervisor = self.get_hypervisor_from_mac(mac)
1160 return hypervisor.host_ip
1163 def get_chain_vlans(self, port_index):
1164 """Get the list of per chain VLAN id on a given port.
1166 port_index: left port is 0, right port is 1
1167 return: a VLAN ID list indexed by the chain index or None if no vlan tagging
1169 if self.chains and self.is_admin:
1170 return [self.chains[chain_index].get_vlan(port_index)
1171 for chain_index in range(self.chain_count)]
1173 return self.vlans[port_index]
1175 def get_chain_vxlans(self, port_index):
1176 """Get the list of per chain VNIs id on a given port.
1178 port_index: left port is 0, right port is 1
1179 return: a VNIs ID list indexed by the chain index or None if no vlan tagging
1181 if self.chains and self.is_admin:
1182 return [self.chains[chain_index].get_vxlan(port_index)
1183 for chain_index in range(self.chain_count)]
1185 raise ChainException('VxLAN is only supported with OpenStack and with admin user')
1187 def get_dest_macs(self, port_index):
1188 """Get the list of per chain dest MACs on a given port.
1190 Should not be called if EXT+ARP is used (in that case the traffic gen will
1191 have the ARP responses back from VNFs with the dest MAC to use).
1193 port_index: left port is 0, right port is 1
1194 return: a list of dest MACs indexed by the chain index
1196 if self.chains and self.config.service_chain != ChainType.EXT:
1197 return [self.chains[chain_index].get_dest_mac(port_index)
1198 for chain_index in range(self.chain_count)]
1199 # no openstack or EXT+no-arp
1200 return self.dest_macs[port_index]
1202 def get_host_ips(self):
1203 """Return the IP adresss(es) of the host compute nodes used for this run.
1205 :return: a list of 1 IP address
1207 # Since all chains go through the same compute node(s) we can just retrieve the
1208 # compute node(s) for the first chain
1210 if self.config.service_chain != ChainType.EXT:
1211 return self.chains[0].get_host_ips()
1212 # in the case of EXT, the compute node must be retrieved from the port
1213 # associated to any of the dest MACs
1214 dst_macs = self.generator_config.get_dest_macs()
1215 # dest MAC on port 0, chain 0
1216 dst_mac = dst_macs[0][0]
1217 host_ip = self.get_host_ip_from_mac(dst_mac)
1219 LOG.info('Found compute node IP for EXT chain: %s', host_ip)
1223 def get_compute_nodes(self):
1224 """Return the name of the host compute nodes used for this run.
1226 :return: a list of 0 or 1 host name in the az:host format
1228 # Since all chains go through the same compute node(s) we can just retrieve the
1229 # compute node name(s) for the first chain
1231 # in the case of EXT, the compute node must be retrieved from the port
1232 # associated to any of the dest MACs
1233 if self.config.service_chain != ChainType.EXT:
1234 return self.chains[0].get_compute_nodes()
1235 # in the case of EXT, the compute node must be retrieved from the port
1236 # associated to any of the dest MACs
1237 dst_macs = self.generator_config.get_dest_macs()
1238 # dest MAC on port 0, chain 0
1239 dst_mac = dst_macs[0][0]
1240 hypervisor = self.get_hypervisor_from_mac(dst_mac)
1242 LOG.info('Found hypervisor for EXT chain: %s', hypervisor.hypervisor_hostname)
1243 return[':' + hypervisor.hypervisor_hostname]
1245 # no openstack = no chains
1249 """Delete resources for all chains."""
1250 for chain in self.chains:
1252 for network in self.networks:
1255 self.flavor.delete()