2 # Copyright 2018 Cisco Systems, Inc. All rights reserved.
4 # Licensed under the Apache License, Version 2.0 (the "License"); you may
5 # not use this file except in compliance with the License. You may obtain
6 # a copy of the License at
8 # http://www.apache.org/licenses/LICENSE-2.0
10 # Unless required by applicable law or agreed to in writing, software
11 # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
12 # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
13 # License for the specific language governing permissions and limitations
17 # This module takes care of chaining networks, ports and vms
19 """NFVBENCH CHAIN DISCOVERY/STAGING.
21 This module takes care of staging/discovering all resources that are participating in a
22 benchmarking session: flavors, networks, ports, VNF instances.
23 If a resource is discovered with the same name, it will be reused.
24 Otherwise it will be created.
26 ChainManager: manages VM image, flavor, the staging discovery of all chains
28 Chain: manages one chain, has 2 or more networks and 1 or more instances
29 ChainNetwork: manages 1 network in a chain
30 ChainVnf: manages 1 VNF instance in a chain, has 2 ports
31 ChainVnfPort: manages 1 instance port
33 ChainManager-->Chain(*)
34 Chain-->ChainNetwork(*),ChainVnf(*)
35 ChainVnf-->ChainVnfPort(2)
37 Once created/discovered, instances are checked to be in the active state (ready to pass traffic)
38 Configuration parameters that will influence how these resources are staged/related:
39 - openstack or no openstack
42 - number of VNF in each chain (PVP, PVVP)
43 - SRIOV and middle port SRIOV for port types
44 - whether networks are shared across chains or not
46 There is not traffic generation involved in this module.
52 from glanceclient.v2 import client as glanceclient
53 from neutronclient.neutron import client as neutronclient
54 from novaclient.client import Client
56 from attrdict import AttrDict
59 from specs import ChainType
61 # Left and right index for network and port lists
64 # Name of the VM config file
65 NFVBENCH_CFG_FILENAME = 'nfvbenchvm.conf'
66 # full pathame of the VM config in the VM
67 NFVBENCH_CFG_VM_PATHNAME = os.path.join('/etc/', NFVBENCH_CFG_FILENAME)
68 # full path of the boot shell script template file on the server where nfvbench runs
69 BOOT_SCRIPT_PATHNAME = os.path.join(os.path.dirname(os.path.abspath(__file__)),
71 NFVBENCH_CFG_FILENAME)
74 class ChainException(Exception):
75 """Exception while operating the chains."""
79 class NetworkEncaps(object):
80 """Network encapsulation."""
83 class ChainFlavor(object):
84 """Class to manage the chain flavor."""
86 def __init__(self, flavor_name, flavor_dict, comp):
87 """Create a flavor."""
88 self.name = flavor_name
90 self.flavor = self.comp.find_flavor(flavor_name)
94 LOG.info("Reused flavor '%s'", flavor_name)
96 extra_specs = flavor_dict.pop('extra_specs', None)
98 self.flavor = comp.create_flavor(flavor_name,
101 LOG.info("Created flavor '%s'", flavor_name)
103 self.flavor.set_keys(extra_specs)
106 """Delete this flavor."""
107 if not self.reuse and self.flavor:
109 LOG.info("Flavor '%s' deleted", self.name)
112 class ChainVnfPort(object):
113 """A port associated to one VNF in the chain."""
115 def __init__(self, name, vnf, chain_network, vnic_type):
116 """Create or reuse a port on a given network.
118 if vnf.instance is None the VNF instance is not reused and this ChainVnfPort instance must
120 Otherwise vnf.instance is a reused VNF instance and this ChainVnfPort instance must
121 find an existing port to reuse that matches the port requirements: same attached network,
122 instance, name, vnic type
124 name: name for this port
125 vnf: ChainVNf instance that owns this port
126 chain_network: ChainNetwork instance where this port should attach
127 vnic_type: required vnic type for this port
131 self.manager = vnf.manager
135 # VNF instance is reused, we need to find an existing port that matches this instance
137 # discover ports attached to this instance
138 port_list = self.manager.get_ports_from_network(chain_network)
139 for port in port_list:
140 if port['name'] != name:
142 if port['binding:vnic_type'] != vnic_type:
144 if port['device_id'] == vnf.get_uuid():
146 LOG.info('Reusing existing port %s mac=%s', name, port['mac_address'])
149 raise ChainException('Cannot find matching port')
151 # VNF instance is not created yet, we need to create a new port
155 'network_id': chain_network.get_uuid(),
156 'binding:vnic_type': vnic_type
159 port = self.manager.neutron_client.create_port(body)
160 self.port = port['port']
161 LOG.info('Created port %s', name)
163 self.manager.neutron_client.update_port(self.port['id'], {
165 'security_groups': [],
166 'port_security_enabled': False,
169 LOG.info('Security disabled on port %s', name)
171 LOG.info('Failed to disable security on port %s (ignored)', name)
174 """Get the MAC address for this port."""
175 return self.port['mac_address']
178 """Delete this port instance."""
179 if self.reuse or not self.port:
182 while retry < self.manager.config.generic_retry_count:
184 self.manager.neutron_client.delete_port(self.port['id'])
185 LOG.info("Deleted port %s", self.name)
189 time.sleep(self.manager.config.generic_poll_sec)
190 LOG.error('Unable to delete port: %s', self.name)
193 class ChainNetwork(object):
194 """Could be a shared network across all chains or a chain private network."""
196 def __init__(self, manager, network_config, chain_id=None, lookup_only=False):
197 """Create a network for given chain.
199 network_config: a dict containing the network properties
200 (segmentation_id and physical_network)
201 chain_id: to which chain the networks belong.
202 a None value will mean that these networks are shared by all chains
204 self.manager = manager
205 self.name = network_config.name
206 self.segmentation_id = self._get_item(network_config.segmentation_id,
207 chain_id, auto_index=True)
208 self.physical_network = self._get_item(network_config.physical_network, chain_id)
209 if chain_id is not None:
210 self.name += str(chain_id)
215 self._setup(network_config, lookup_only)
218 LOG.error("Cannot find network %s", self.name)
220 LOG.error("Error creating network %s", self.name)
224 def _get_item(self, item_field, index, auto_index=False):
225 """Retrieve an item from a list or a single value.
227 item_field: can be None, a tuple of a single value
228 index: if None is same as 0, else is the index for a chain
229 auto_index: if true will automatically get the final value by adding the
230 index to the base value (if full list not provided)
232 If the item_field is not a tuple, it is considered same as a tuple with same value at any
234 If a list is provided, its length must be > index
240 if isinstance(item_field, tuple):
242 return item_field[index]
244 raise ChainException("List %s is too short for chain index %d" %
245 (str(item_field), index))
246 # single value is configured
248 return item_field + index
251 def _setup(self, network_config, lookup_only):
252 # Lookup if there is a matching network with same name
253 networks = self.manager.neutron_client.list_networks(name=self.name)
254 if networks['networks']:
255 network = networks['networks'][0]
256 # a network of same name already exists, we need to verify it has the same
258 if self.segmentation_id:
259 if network['provider:segmentation_id'] != self.segmentation_id:
260 raise ChainException("Mismatch of 'segmentation_id' for reused "
261 "network '{net}'. Network has id '{seg_id1}', "
262 "configuration requires '{seg_id2}'."
263 .format(net=self.name,
264 seg_id1=network['provider:segmentation_id'],
265 seg_id2=self.segmentation_id))
267 if self.physical_network:
268 if network['provider:physical_network'] != self.physical_network:
269 raise ChainException("Mismatch of 'physical_network' for reused "
270 "network '{net}'. Network has '{phys1}', "
271 "configuration requires '{phys2}'."
272 .format(net=self.name,
273 phys1=network['provider:physical_network'],
274 phys2=self.physical_network))
276 LOG.info('Reusing existing network %s', self.name)
278 self.network = network
281 raise ChainException('Network %s not found' % self.name)
285 'admin_state_up': True
288 if network_config.network_type:
289 body['network']['provider:network_type'] = network_config.network_type
290 if self.segmentation_id:
291 body['network']['provider:segmentation_id'] = self.segmentation_id
292 if self.physical_network:
293 body['network']['provider:physical_network'] = self.physical_network
295 self.network = self.manager.neutron_client.create_network(body)['network']
297 'subnet': {'name': network_config.subnet,
298 'cidr': network_config.cidr,
299 'network_id': self.network['id'],
300 'enable_dhcp': False,
302 'dns_nameservers': []}
304 subnet = self.manager.neutron_client.create_subnet(body)['subnet']
305 # add subnet id to the network dict since it has just been added
306 self.network['subnets'] = [subnet['id']]
307 LOG.info('Created network: %s.', self.name)
311 Extract UUID of this network.
313 :return: UUID of this network
315 return self.network['id']
319 Extract vlan for this network.
321 :return: vlan ID for this network
323 if self.network['provider:network_type'] != 'vlan':
324 raise ChainException('Trying to retrieve VLAN id for non VLAN network')
325 return self.network['provider:segmentation_id']
329 Extract VNI for this network.
331 :return: VNI ID for this network
333 if self.network['provider:network_type'] != 'vxlan':
334 raise ChainException('Trying to retrieve VNI for non VXLAN network')
335 return self.network['provider:segmentation_id']
338 """Delete this network."""
339 if not self.reuse and self.network:
341 while retry < self.manager.config.generic_retry_count:
343 self.manager.neutron_client.delete_network(self.network['id'])
344 LOG.info("Deleted network: %s", self.name)
348 LOG.info('Error deleting network %s (retry %d/%d)...',
351 self.manager.config.generic_retry_count)
352 time.sleep(self.manager.config.generic_poll_sec)
353 LOG.error('Unable to delete network: %s', self.name)
356 class ChainVnf(object):
357 """A class to represent a VNF in a chain."""
359 def __init__(self, chain, vnf_id, networks):
360 """Reuse a VNF instance with same characteristics or create a new VNF instance.
362 chain: the chain where this vnf belongs
363 vnf_id: indicates the index of this vnf in its chain (first vnf=0)
364 networks: the list of all networks (ChainNetwork) of the current chain
366 self.manager = chain.manager
369 self.name = self.manager.config.loop_vm_name + str(chain.chain_id)
370 if len(networks) > 2:
371 # we will have more than 1 VM in each chain
372 self.name += '-' + str(vnf_id)
379 # the vnf_id is conveniently also the starting index in networks
380 # for the left and right networks associated to this VNF
381 self._setup(networks[vnf_id:vnf_id + 2])
383 LOG.error("Error creating VNF %s", self.name)
387 def _get_vm_config(self, remote_mac_pair):
388 config = self.manager.config
389 devices = self.manager.generator_config.devices
390 with open(BOOT_SCRIPT_PATHNAME, 'r') as boot_script:
391 content = boot_script.read()
392 g1cidr = devices[LEFT].get_gw_ip(self.chain.chain_id) + '/8'
393 g2cidr = devices[RIGHT].get_gw_ip(self.chain.chain_id) + '/8'
395 'forwarder': config.vm_forwarder,
396 'intf_mac1': self.ports[LEFT].get_mac(),
397 'intf_mac2': self.ports[RIGHT].get_mac(),
398 'tg_gateway1_ip': devices[LEFT].tg_gateway_ip_addrs,
399 'tg_gateway2_ip': devices[RIGHT].tg_gateway_ip_addrs,
400 'tg_net1': devices[LEFT].ip_addrs,
401 'tg_net2': devices[RIGHT].ip_addrs,
402 'vnf_gateway1_cidr': g1cidr,
403 'vnf_gateway2_cidr': g2cidr,
404 'tg_mac1': remote_mac_pair[0],
405 'tg_mac2': remote_mac_pair[1]
407 return content.format(**vm_config)
409 def _get_vnic_type(self, port_index):
410 """Get the right vnic type for given port indexself.
412 If SR-IOV is speficied, middle ports in multi-VNF chains
413 can use vswitch or SR-IOV based on config.use_sriov_middle_net
415 if self.manager.config.sriov:
416 chain_length = self.chain.get_length()
417 if self.manager.config.use_sriov_middle_net or chain_length == 1:
419 if self.vnf_id == 0 and port_index == 0:
420 # first VNF in chain must use sriov for left port
422 if (self.vnf_id == chain_length - 1) and (port_index == 1):
423 # last VNF in chain must use sriov for right port
427 def _setup(self, networks):
428 flavor_id = self.manager.flavor.flavor.id
429 # Check if we can reuse an instance with same name
430 for instance in self.manager.existing_instances:
431 if instance.name == self.name:
432 # Verify that other instance characteristics match
433 if instance.flavor['id'] != flavor_id:
434 self._reuse_exception('Flavor mismatch')
435 if instance.status != "ACTIVE":
436 self._reuse_exception('Matching instance is not in ACTIVE state')
437 # The 2 networks for this instance must also be reused
438 if not networks[LEFT].reuse:
439 self._reuse_exception('network %s is new' % networks[LEFT].name)
440 if not networks[RIGHT].reuse:
441 self._reuse_exception('network %s is new' % networks[RIGHT].name)
442 # instance.networks have the network names as keys:
443 # {'nfvbench-rnet0': ['192.168.2.10'], 'nfvbench-lnet0': ['192.168.1.8']}
444 if networks[LEFT].name not in instance.networks:
445 self._reuse_exception('Left network mismatch')
446 if networks[RIGHT].name not in instance.networks:
447 self._reuse_exception('Right network mismatch')
450 self.instance = instance
451 LOG.info('Reusing existing instance %s on %s',
452 self.name, self.get_hypervisor_name())
453 # create or reuse/discover 2 ports per instance
454 self.ports = [ChainVnfPort(self.name + '-' + str(index),
457 self._get_vnic_type(index)) for index in [0, 1]]
458 # if no reuse, actual vm creation is deferred after all ports in the chain are created
459 # since we need to know the next mac in a multi-vnf chain
461 def create_vnf(self, remote_mac_pair):
462 """Create the VNF instance if it does not already exist."""
463 if self.instance is None:
464 port_ids = [{'port-id': vnf_port.port['id']}
465 for vnf_port in self.ports]
466 vm_config = self._get_vm_config(remote_mac_pair)
467 az = self.manager.placer.get_required_az()
468 server = self.manager.comp.create_server(self.name,
469 self.manager.image_instance,
470 self.manager.flavor.flavor,
477 files={NFVBENCH_CFG_VM_PATHNAME: vm_config})
479 self.instance = server
480 if self.manager.placer.is_resolved():
481 LOG.info('Created instance %s on %s', self.name, az)
483 # the location is undetermined at this point
484 # self.get_hypervisor_name() will return None
485 LOG.info('Created instance %s - waiting for placement resolution...', self.name)
486 # here we MUST wait until this instance is resolved otherwise subsequent
487 # VNF creation can be placed in other hypervisors!
488 config = self.manager.config
489 max_retries = (config.check_traffic_time_sec +
490 config.generic_poll_sec - 1) / config.generic_poll_sec
492 for retry in range(max_retries):
493 status = self.get_status()
494 if status == 'ACTIVE':
495 hyp_name = self.get_hypervisor_name()
496 LOG.info('Instance %s is active and has been placed on %s',
498 self.manager.placer.register_full_name(hyp_name)
500 if status == 'ERROR':
501 raise ChainException('Instance %s creation error: %s' %
503 self.instance.fault['message']))
504 LOG.info('Waiting for instance %s to become active (retry %d/%d)...',
505 self.name, retry + 1, max_retries + 1)
506 time.sleep(config.generic_poll_sec)
509 LOG.error('Instance %s creation timed out', self.name)
510 raise ChainException('Instance %s creation timed out' % self.name)
513 raise ChainException('Unable to create instance: %s' % (self.name))
515 def _reuse_exception(self, reason):
516 raise ChainException('Instance %s cannot be reused (%s)' % (self.name, reason))
518 def get_status(self):
519 """Get the statis of this instance."""
520 if self.instance.status != 'ACTIVE':
521 self.instance = self.manager.comp.poll_server(self.instance)
522 return self.instance.status
524 def get_hostname(self):
525 """Get the hypervisor host name running this VNF instance."""
526 return getattr(self.instance, 'OS-EXT-SRV-ATTR:hypervisor_hostname')
528 def get_host_ip(self):
529 """Get the IP address of the host where this instance runs.
531 return: the IP address
534 self.host_ip = self.manager.comp.get_hypervisor(self.get_hostname()).host_ip
537 def get_hypervisor_name(self):
538 """Get hypervisor name (az:hostname) for this VNF instance."""
540 az = getattr(self.instance, 'OS-EXT-AZ:availability_zone')
541 hostname = self.get_hostname()
543 return az + ':' + hostname
548 """Get the uuid for this instance."""
549 return self.instance.id
551 def delete(self, forced=False):
552 """Delete this VNF instance."""
554 LOG.info("Instance %s not deleted (reused)", self.name)
557 self.manager.comp.delete_server(self.instance)
558 LOG.info("Deleted instance %s", self.name)
559 for port in self.ports:
563 """A class to manage a single chain.
565 Can handle any type of chain (EXT, PVP, PVVP)
568 def __init__(self, chain_id, manager):
569 """Create a new chain.
571 chain_id: chain index (first chain is 0)
572 manager: the chain manager that owns all chains
574 self.chain_id = chain_id
575 self.manager = manager
576 self.encaps = manager.encaps
580 self.networks = manager.get_networks(chain_id)
581 # For external chain VNFs can only be discovered from their MAC addresses
582 # either from config or from ARP
583 if manager.config.service_chain != ChainType.EXT:
584 for chain_instance_index in range(self.get_length()):
585 self.instances.append(ChainVnf(self,
586 chain_instance_index,
588 # at this point new VNFs are not created yet but
589 # verify that all discovered VNFs are on the same hypervisor
590 self._check_hypervisors()
591 # now that all VNF ports are created we need to calculate the
592 # left/right remote MAC for each VNF in the chain
593 # before actually creating the VNF itself
594 rem_mac_pairs = self._get_remote_mac_pairs()
595 for instance in self.instances:
596 rem_mac_pair = rem_mac_pairs.pop(0)
597 instance.create_vnf(rem_mac_pair)
602 def _check_hypervisors(self):
603 common_hypervisor = None
604 for instance in self.instances:
605 # get the full hypervizor name (az:compute)
606 hname = instance.get_hypervisor_name()
608 if common_hypervisor:
609 if hname != common_hypervisor:
610 raise ChainException('Discovered instances on different hypervisors:'
611 ' %s and %s' % (hname, common_hypervisor))
613 common_hypervisor = hname
614 if common_hypervisor:
615 # check that the common hypervisor name matchs the requested hypervisor name
616 # and set the name to be used by all future instances (if any)
617 if not self.manager.placer.register_full_name(common_hypervisor):
618 raise ChainException('Discovered hypervisor placement %s is incompatible' %
621 def get_length(self):
622 """Get the number of VNF in the chain."""
623 return len(self.networks) - 1
625 def _get_remote_mac_pairs(self):
626 """Get the list of remote mac pairs for every VNF in the chain.
628 Traverse the chain from left to right and establish the
629 left/right remote MAC for each VNF in the chainself.
632 mac sequence: tg_src_mac, vm0-mac0, vm0-mac1, tg_dst_mac
633 must produce [[tg_src_mac, tg_dst_mac]] or looking at index in mac sequence: [[0, 3]]
634 the mac pair is what the VNF at that position (index 0) sees as next hop mac left and right
637 tg_src_mac, vm0-mac0, vm0-mac1, vm1-mac0, vm1-mac1, tg_dst_mac
638 Must produce the following list:
639 [[tg_src_mac, vm1-mac0], [vm0-mac1, tg_dst_mac]] or index: [[0, 3], [2, 5]]
641 General case with 3 VMs in chain, the list of consecutive macs (left to right):
642 tg_src_mac, vm0-mac0, vm0-mac1, vm1-mac0, vm1-mac1, vm2-mac0, vm2-mac1, tg_dst_mac
643 Must produce the following list:
644 [[tg_src_mac, vm1-mac0], [vm0-mac1, vm2-mac0], [vm1-mac1, tg_dst_mac]]
645 or index: [[0, 3], [2, 5], [4, 7]]
647 The series pattern is pretty clear: [[n, n+3],... ] where n is multiple of 2
649 # line up all mac from left to right
650 mac_seq = [self.manager.generator_config.devices[LEFT].mac]
651 for instance in self.instances:
652 mac_seq.append(instance.ports[0].get_mac())
653 mac_seq.append(instance.ports[1].get_mac())
654 mac_seq.append(self.manager.generator_config.devices[RIGHT].mac)
657 for _ in self.instances:
658 rem_mac_pairs.append([mac_seq[base], mac_seq[base + 3]])
662 def get_instances(self):
663 """Return all instances for this chain."""
664 return self.instances
666 def get_vlan(self, port_index):
667 """Get the VLAN id on a given port.
669 port_index: left port is 0, right port is 1
670 return: the vlan_id or None if there is no vlan tagging
672 # for port 1 we need to return the VLAN of the last network in the chain
673 # The networks array contains 2 networks for PVP [left, right]
674 # and 3 networks in the case of PVVP [left.middle,right]
676 # this will pick the last item in array
678 return self.networks[port_index].get_vlan()
680 def get_vxlan(self, port_index):
681 """Get the VXLAN id on a given port.
683 port_index: left port is 0, right port is 1
684 return: the vxlan_id or None if there is no vxlan
686 # for port 1 we need to return the VLAN of the last network in the chain
687 # The networks array contains 2 networks for PVP [left, right]
688 # and 3 networks in the case of PVVP [left.middle,right]
690 # this will pick the last item in array
692 return self.networks[port_index].get_vxlan()
694 def get_dest_mac(self, port_index):
695 """Get the dest MAC on a given port.
697 port_index: left port is 0, right port is 1
701 # for right port, use the right port MAC of the last (right most) VNF In chain
702 return self.instances[-1].ports[1].get_mac()
703 # for left port use the left port MAC of the first (left most) VNF in chain
704 return self.instances[0].ports[0].get_mac()
706 def get_network_uuids(self):
707 """Get UUID of networks in this chain from left to right (order is important).
709 :return: list of UUIDs of networks (2 or 3 elements)
711 return [net['id'] for net in self.networks]
713 def get_host_ips(self):
714 """Return the IP adresss(es) of the host compute nodes used for this chain.
716 :return: a list of 1 or 2 IP addresses
718 return [vnf.get_host_ip() for vnf in self.instances]
720 def get_compute_nodes(self):
721 """Return the name of the host compute nodes used for this chain.
723 :return: a list of 1 host name in the az:host format
725 # Since all chains go through the same compute node(s) we can just retrieve the
726 # compute node name(s) for the first chain
727 return [vnf.get_hypervisor_name() for vnf in self.instances]
730 """Delete this chain."""
731 for instance in self.instances:
733 # only delete if these are chain private networks (not shared)
734 if not self.manager.config.service_chain_shared_net:
735 for network in self.networks:
739 class InstancePlacer(object):
740 """A class to manage instance placement for all VNFs in all chains.
742 A full az string is made of 2 parts AZ and hypervisor.
743 The placement is resolved when both parts az and hypervisor names are known.
746 def __init__(self, req_az, req_hyp):
747 """Create a new instance placer.
749 req_az: requested AZ (can be None or empty if no preference)
750 req_hyp: requested hypervisor name (can be None of empty if no preference)
751 can be any of 'nova:', 'comp1', 'nova:comp1'
752 if it is a list, only the first item is used (backward compatibility in config)
754 req_az is ignored if req_hyp has an az part
755 all other parts beyond the first 2 are ignored in req_hyp
757 # if passed a list just pick the first item
758 if req_hyp and isinstance(req_hyp, list):
760 # only pick first part of az
761 if req_az and ':' in req_az:
762 req_az = req_az.split(':')[0]
764 # check if requested hypervisor string has an AZ part
765 split_hyp = req_hyp.split(':')
766 if len(split_hyp) > 1:
767 # override the AZ part and hypervisor part
768 req_az = split_hyp[0]
769 req_hyp = split_hyp[1]
770 self.requested_az = req_az if req_az else ''
771 self.requested_hyp = req_hyp if req_hyp else ''
772 # Nova can accept AZ only (e.g. 'nova:', use any hypervisor in that AZ)
773 # or hypervisor only (e.g. ':comp1')
774 # or both (e.g. 'nova:comp1')
776 self.required_az = req_az + ':' + self.requested_hyp
778 # need to insert a ':' so nova knows this is the hypervisor name
779 self.required_az = ':' + self.requested_hyp if req_hyp else ''
780 # placement is resolved when both AZ and hypervisor names are known and set
781 self.resolved = self.requested_az != '' and self.requested_hyp != ''
783 def get_required_az(self):
784 """Return the required az (can be resolved or not)."""
785 return self.required_az
787 def register_full_name(self, discovered_az):
788 """Verify compatibility and register a discovered hypervisor full name.
790 discovered_az: a discovered AZ in az:hypervisor format
791 return: True if discovered_az is compatible and set
792 False if discovered_az is not compatible
795 return discovered_az == self.required_az
797 # must be in full az format
798 split_daz = discovered_az.split(':')
799 if len(split_daz) != 2:
801 if self.requested_az and self.requested_az != split_daz[0]:
803 if self.requested_hyp and self.requested_hyp != split_daz[1]:
805 self.required_az = discovered_az
809 def is_resolved(self):
810 """Check if the full AZ is resolved.
812 return: True if resolved
817 class ChainManager(object):
818 """A class for managing all chains for a given run.
820 Supports openstack or no openstack.
821 Supports EXT, PVP and PVVP chains.
824 def __init__(self, chain_runner):
825 """Create a chain manager to take care of discovering or bringing up the requested chains.
827 A new instance must be created every time a new config is used.
828 config: the nfvbench config to use
829 cred: openstack credentials to use of None if there is no openstack
831 self.chain_runner = chain_runner
832 self.config = chain_runner.config
833 self.generator_config = chain_runner.traffic_client.generator_config
835 self.image_instance = None
836 self.image_name = None
837 # Left and right networks shared across all chains (only if shared)
842 self.nova_client = None
843 self.neutron_client = None
844 self.glance_client = None
845 self.existing_instances = []
846 # existing ports keyed by the network uuid they belong to
847 self._existing_ports = {}
849 self.openstack = (chain_runner.cred is not None) and not config.l2_loopback
850 self.chain_count = config.service_chain_count
854 session = chain_runner.cred.get_session()
855 self.nova_client = Client(2, session=session)
856 self.neutron_client = neutronclient.Client('2.0', session=session)
857 self.glance_client = glanceclient.Client('2', session=session)
858 self.comp = compute.Compute(self.nova_client,
862 if config.service_chain != ChainType.EXT:
863 self.placer = InstancePlacer(config.availability_zone, config.compute_nodes)
865 self.flavor = ChainFlavor(config.flavor_type, config.flavor, self.comp)
866 # Get list of all existing instances to check if some instances can be reused
867 self.existing_instances = self.comp.get_server_list()
868 # If networks are shared across chains, get the list of networks
869 if config.service_chain_shared_net:
870 self.networks = self.get_networks()
871 # Reuse/create chains
872 for chain_id in range(self.chain_count):
873 self.chains.append(Chain(chain_id, self))
874 if config.service_chain == ChainType.EXT:
875 # if EXT and no ARP we need to read dest MACs from config
877 self._get_dest_macs_from_config()
879 # Make sure all instances are active before proceeding
880 self._ensure_instances_active()
885 # no openstack, no need to create chains
887 if not config.l2_loopback and config.no_arp:
888 self._get_dest_macs_from_config()
889 if config.vlan_tagging:
890 # make sure there at least as many entries as chains in each left/right list
891 if len(config.vlans) != 2:
892 raise ChainException('The config vlans property must be a list '
893 'with 2 lists of VLAN IDs')
895 self.vlans = [self._check_list('vlans[0]', config.vlans[0], re_vlan),
896 self._check_list('vlans[1]', config.vlans[1], re_vlan)]
898 # make sure there are 2 entries
899 if len(config.vnis) != 2:
900 raise ChainException('The config vnis property must be a list with 2 VNIs')
901 self.vnis = [self._check_list('vnis[0]', config.vnis[0], re_vlan),
902 self._check_list('vnis[1]', config.vnis[1], re_vlan)]
904 def _get_dest_macs_from_config(self):
905 re_mac = "[0-9a-fA-F]{2}([-:])[0-9a-fA-F]{2}(\\1[0-9a-fA-F]{2}){4}$"
906 tg_config = self.config.traffic_generator
907 self.dest_macs = [self._check_list("mac_addrs_left",
908 tg_config.mac_addrs_left, re_mac),
909 self._check_list("mac_addrs_right",
910 tg_config.mac_addrs_right, re_mac)]
912 def _check_list(self, list_name, ll, pattern):
913 # if it is a single int or mac, make it a list of 1 int
914 if isinstance(ll, (int, str)):
916 if not ll or len(ll) < self.chain_count:
917 raise ChainException('%s=%s must be a list with %d elements per chain' %
918 (list_name, ll, self.chain_count))
920 if not re.match(pattern, str(item)):
921 raise ChainException("Invalid format '{item}' specified in {fname}"
922 .format(item=item, fname=list_name))
925 def _setup_image(self):
926 # To avoid reuploading image in server mode, check whether image_name is set or not
928 self.image_instance = self.comp.find_image(self.image_name)
929 if self.image_instance:
930 LOG.info("Reusing image %s", self.image_name)
932 image_name_search_pattern = r'(nfvbenchvm-\d+(\.\d+)*).qcow2'
933 if self.config.vm_image_file:
934 match = re.search(image_name_search_pattern, self.config.vm_image_file)
936 self.image_name = match.group(1)
937 LOG.info('Using provided VM image file %s', self.config.vm_image_file)
939 raise ChainException('Provided VM image file name %s must start with '
940 '"nfvbenchvm-<version>"' % self.config.vm_image_file)
942 pkg_root = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
943 for f in os.listdir(pkg_root):
944 if re.search(image_name_search_pattern, f):
945 self.config.vm_image_file = pkg_root + '/' + f
946 self.image_name = f.replace('.qcow2', '')
947 LOG.info('Found built-in VM image file %s', f)
950 raise ChainException('Cannot find any built-in VM image file.')
952 self.image_instance = self.comp.find_image(self.image_name)
953 if not self.image_instance:
954 LOG.info('Uploading %s', self.image_name)
955 res = self.comp.upload_image_via_url(self.image_name,
956 self.config.vm_image_file)
959 raise ChainException('Error uploading image %s from %s. ABORTING.' %
960 (self.image_name, self.config.vm_image_file))
961 LOG.info('Image %s successfully uploaded.', self.image_name)
962 self.image_instance = self.comp.find_image(self.image_name)
964 def _ensure_instances_active(self):
966 for chain in self.chains:
967 instances.extend(chain.get_instances())
968 initial_instance_count = len(instances)
969 max_retries = (self.config.check_traffic_time_sec +
970 self.config.generic_poll_sec - 1) / self.config.generic_poll_sec
973 remaining_instances = []
974 for instance in instances:
975 status = instance.get_status()
976 if status == 'ACTIVE':
977 LOG.info('Instance %s is ACTIVE on %s',
978 instance.name, instance.get_hypervisor_name())
980 if status == 'ERROR':
981 raise ChainException('Instance %s creation error: %s' %
983 instance.instance.fault['message']))
984 remaining_instances.append(instance)
985 if not remaining_instances:
988 if retry >= max_retries:
989 raise ChainException('Time-out: %d/%d instances still not active' %
990 (len(remaining_instances), initial_instance_count))
991 LOG.info('Waiting for %d/%d instance to become active (retry %d/%d)...',
992 len(remaining_instances), initial_instance_count,
994 instances = remaining_instances
995 time.sleep(self.config.generic_poll_sec)
996 if initial_instance_count:
997 LOG.info('All instances are active')
999 def _get_vxlan_net_cfg(self, chain_id):
1000 int_nets = self.config.internal_networks
1001 net_left = int_nets.left
1002 net_right = int_nets.right
1003 vnis = self.generator_config.vnis
1005 seg_id_left = vnis[0]
1006 if self.config.service_chain == ChainType.PVP:
1008 seg_id_left = ((chain_id - 1) * 2) + seg_id_left
1009 seg_id_right = seg_id_left + 1
1010 if (seg_id_left and seg_id_right) > vnis[1]:
1011 raise Exception('Segmentation ID is more than allowed '
1012 'value: {}'.format(vnis[1]))
1013 net_left['segmentation_id'] = seg_id_left
1014 net_right['segmentation_id'] = seg_id_right
1015 net_cfg = [net_left, net_right]
1018 net_middle = int_nets.middle
1020 seg_id_left = ((chain_id - 1) * 3) + seg_id_left
1021 seg_id_middle = seg_id_left + 1
1022 seg_id_right = seg_id_left + 2
1023 if (seg_id_left and seg_id_right and seg_id_middle) > vnis[1]:
1024 raise Exception('Segmentation ID is more than allowed '
1025 'value: {}'.format(vnis[1]))
1026 net_left['segmentation_id'] = seg_id_left
1027 net_middle['segmentation_id'] = seg_id_middle
1028 net_right['segmentation_id'] = seg_id_right
1029 net_cfg = [net_left, net_middle, net_right]
1032 def get_networks(self, chain_id=None):
1033 """Get the networks for given EXT, PVP or PVVP chain.
1035 For EXT packet path, these networks must pre-exist.
1036 For PVP, PVVP these networks will be created if they do not exist.
1037 chain_id: to which chain the networks belong.
1038 a None value will mean that these networks are shared by all chains
1041 # the only case where self.networks exists is when the networks are shared
1043 return self.networks
1044 if self.config.service_chain == ChainType.EXT:
1046 ext_net = self.config.external_networks
1047 net_cfg = [AttrDict({'name': name,
1048 'segmentation_id': None,
1049 'physical_network': None})
1050 for name in [ext_net.left, ext_net.right]]
1051 # segmentation id and subnet should be discovered from neutron
1054 int_nets = self.config.internal_networks
1055 network_type = set([int_nets[net].get('network_type') for net in int_nets])
1056 if self.config.vxlan and 'vxlan' in network_type:
1057 net_cfg = self._get_vxlan_net_cfg(chain_id)
1060 if self.config.service_chain == ChainType.PVP:
1061 net_cfg = [int_nets.left, int_nets.right]
1063 net_cfg = [int_nets.left, int_nets.middle, int_nets.right]
1067 networks.append(ChainNetwork(self, cfg, chain_id, lookup_only=lookup_only))
1069 # need to cleanup all successful networks prior to bailing out
1070 for net in networks:
1075 def get_existing_ports(self):
1076 """Get the list of existing ports.
1078 Lazy retrieval of ports as this can be costly if there are lots of ports and
1079 is only needed when VM and network are being reused.
1081 return: a dict of list of neutron ports indexed by the network uuid they are attached to
1083 Each port is a dict with fields such as below:
1084 {'allowed_address_pairs': [], 'extra_dhcp_opts': [],
1085 'updated_at': '2018-10-06T07:15:35Z', 'device_owner': 'compute:nova',
1086 'revision_number': 10, 'port_security_enabled': False, 'binding:profile': {},
1087 'fixed_ips': [{'subnet_id': '6903a3b3-49a1-4ba4-8259-4a90e7a44b21',
1088 'ip_address': '192.168.1.4'}], 'id': '3dcb9cfa-d82a-4dd1-85a1-fd8284b52d72',
1089 'security_groups': [],
1090 'binding:vif_details': {'vhostuser_socket': '/tmp/3dcb9cfa-d82a-4dd1-85a1-fd8284b52d72',
1091 'vhostuser_mode': 'server'},
1092 'binding:vif_type': 'vhostuser',
1093 'mac_address': 'fa:16:3e:3c:63:04',
1094 'project_id': '977ac76a63d7492f927fa80e86baff4c',
1096 'binding:host_id': 'a20-champagne-compute-1',
1098 'device_id': 'a98e2ad2-5371-4aa5-a356-8264a970ce4b',
1099 'name': 'nfvbench-loop-vm0-0', 'admin_state_up': True,
1100 'network_id': '3ea5fd88-278f-4d9d-b24d-1e443791a055',
1101 'tenant_id': '977ac76a63d7492f927fa80e86baff4c',
1102 'created_at': '2018-10-06T07:15:10Z',
1103 'binding:vnic_type': 'normal'}
1105 if not self._existing_ports:
1106 LOG.info('Loading list of all ports...')
1107 existing_ports = self.neutron_client.list_ports()['ports']
1108 # place all ports in the dict keyed by the port network uuid
1109 for port in existing_ports:
1110 port_list = self._existing_ports.setdefault(port['network_id'], [])
1111 port_list.append(port)
1112 LOG.info("Loaded %d ports attached to %d networks",
1113 len(existing_ports), len(self._existing_ports))
1114 return self._existing_ports
1116 def get_ports_from_network(self, chain_network):
1117 """Get the list of existing ports that belong to a network.
1119 Lazy retrieval of ports as this can be costly if there are lots of ports and
1120 is only needed when VM and network are being reused.
1122 chain_network: a ChainNetwork instance for which attached ports neeed to be retrieved
1123 return: list of neutron ports attached to requested network
1125 return self.get_existing_ports().get(chain_network.get_uuid(), None)
1127 def get_host_ip_from_mac(self, mac):
1128 """Get the host IP address matching a MAC.
1130 mac: MAC address to look for
1131 return: the IP address of the host where the matching port runs or None if not found
1133 # _existing_ports is a dict of list of ports indexed by network id
1134 for port_list in self.get_existing_ports().values():
1135 for port in port_list:
1137 if port['mac_address'] == mac:
1138 host_id = port['binding:host_id']
1139 return self.comp.get_hypervisor(host_id).host_ip
1144 def get_chain_vlans(self, port_index):
1145 """Get the list of per chain VLAN id on a given port.
1147 port_index: left port is 0, right port is 1
1148 return: a VLAN ID list indexed by the chain index or None if no vlan tagging
1151 return [self.chains[chain_index].get_vlan(port_index)
1152 for chain_index in range(self.chain_count)]
1154 return self.vlans[port_index]
1156 def get_chain_vxlans(self, port_index):
1157 """Get the list of per chain VNIs id on a given port.
1159 port_index: left port is 0, right port is 1
1160 return: a VNIs ID list indexed by the chain index or None if no vlan tagging
1163 return [self.chains[chain_index].get_vxlan(port_index)
1164 for chain_index in range(self.chain_count)]
1166 return self.vnis[port_index]
1168 def get_dest_macs(self, port_index):
1169 """Get the list of per chain dest MACs on a given port.
1171 Should not be called if EXT+ARP is used (in that case the traffic gen will
1172 have the ARP responses back from VNFs with the dest MAC to use).
1174 port_index: left port is 0, right port is 1
1175 return: a list of dest MACs indexed by the chain index
1177 if self.chains and self.config.service_chain != ChainType.EXT:
1178 return [self.chains[chain_index].get_dest_mac(port_index)
1179 for chain_index in range(self.chain_count)]
1180 # no openstack or EXT+no-arp
1181 return self.dest_macs[port_index]
1183 def get_host_ips(self):
1184 """Return the IP adresss(es) of the host compute nodes used for this run.
1186 :return: a list of 1 IP address
1188 # Since all chains go through the same compute node(s) we can just retrieve the
1189 # compute node(s) for the first chain
1191 if self.config.service_chain != ChainType.EXT:
1192 return self.chains[0].get_host_ips()
1193 # in the case of EXT, the compute node must be retrieved from the port
1194 # associated to any of the dest MACs
1195 dst_macs = self.generator_config.get_dest_macs()
1196 # dest MAC on port 0, chain 0
1197 dst_mac = dst_macs[0][0]
1198 host_ip = self.get_host_ip_from_mac(dst_mac)
1200 LOG.info('Found compute node IP for EXT chain: %s', host_ip)
1204 def get_compute_nodes(self):
1205 """Return the name of the host compute nodes used for this run.
1207 :return: a list of 0 or 1 host name in the az:host format
1209 # Since all chains go through the same compute node(s) we can just retrieve the
1210 # compute node name(s) for the first chain
1212 # in the case of EXT, the compute node must be retrieved from the port
1213 # associated to any of the dest MACs
1214 return self.chains[0].get_compute_nodes()
1215 # no openstack = no chains
1219 """Delete resources for all chains."""
1220 for chain in self.chains:
1222 for network in self.networks:
1225 self.flavor.delete()