2 # Copyright 2018 Cisco Systems, Inc. All rights reserved.
4 # Licensed under the Apache License, Version 2.0 (the "License"); you may
5 # not use this file except in compliance with the License. You may obtain
6 # a copy of the License at
8 # http://www.apache.org/licenses/LICENSE-2.0
10 # Unless required by applicable law or agreed to in writing, software
11 # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
12 # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
13 # License for the specific language governing permissions and limitations
17 # This module takes care of chaining networks, ports and vms
19 """NFVBENCH CHAIN DISCOVERY/STAGING.
21 This module takes care of staging/discovering all resources that are participating in a
22 benchmarking session: flavors, networks, ports, VNF instances.
23 If a resource is discovered with the same name, it will be reused.
24 Otherwise it will be created.
26 ChainManager: manages VM image, flavor, the staging discovery of all chains
28 Chain: manages one chain, has 2 or more networks and 1 or more instances
29 ChainNetwork: manages 1 network in a chain
30 ChainVnf: manages 1 VNF instance in a chain, has 2 ports
31 ChainVnfPort: manages 1 instance port
33 ChainManager-->Chain(*)
34 Chain-->ChainNetwork(*),ChainVnf(*)
35 ChainVnf-->ChainVnfPort(2)
37 Once created/discovered, instances are checked to be in the active state (ready to pass traffic)
38 Configuration parameters that will influence how these resources are staged/related:
39 - openstack or no openstack
42 - number of VNF in each chain (PVP, PVVP)
43 - SRIOV and middle port SRIOV for port types
44 - whether networks are shared across chains or not
46 There is not traffic generation involved in this module.
52 from glanceclient.v2 import client as glanceclient
53 from neutronclient.neutron import client as neutronclient
54 from novaclient.client import Client
56 from attrdict import AttrDict
59 from specs import ChainType
61 # Left and right index for network and port lists
64 # Name of the VM config file
65 NFVBENCH_CFG_FILENAME = 'nfvbenchvm.conf'
66 # full pathame of the VM config in the VM
67 NFVBENCH_CFG_VM_PATHNAME = os.path.join('/etc/', NFVBENCH_CFG_FILENAME)
68 # full path of the boot shell script template file on the server where nfvbench runs
69 BOOT_SCRIPT_PATHNAME = os.path.join(os.path.dirname(os.path.abspath(__file__)),
71 NFVBENCH_CFG_FILENAME)
74 class ChainException(Exception):
75 """Exception while operating the chains."""
79 class NetworkEncaps(object):
80 """Network encapsulation."""
83 class ChainFlavor(object):
84 """Class to manage the chain flavor."""
86 def __init__(self, flavor_name, flavor_dict, comp):
87 """Create a flavor."""
88 self.name = flavor_name
90 self.flavor = self.comp.find_flavor(flavor_name)
94 LOG.info("Reused flavor '%s'", flavor_name)
96 extra_specs = flavor_dict.pop('extra_specs', None)
98 self.flavor = comp.create_flavor(flavor_name,
101 LOG.info("Created flavor '%s'", flavor_name)
103 self.flavor.set_keys(extra_specs)
106 """Delete this flavor."""
107 if not self.reuse and self.flavor:
109 LOG.info("Flavor '%s' deleted", self.name)
112 class ChainVnfPort(object):
113 """A port associated to one VNF in the chain."""
115 def __init__(self, name, vnf, chain_network, vnic_type):
116 """Create or reuse a port on a given network.
118 if vnf.instance is None the VNF instance is not reused and this ChainVnfPort instance must
120 Otherwise vnf.instance is a reused VNF instance and this ChainVnfPort instance must
121 find an existing port to reuse that matches the port requirements: same attached network,
122 instance, name, vnic type
124 name: name for this port
125 vnf: ChainVNf instance that owns this port
126 chain_network: ChainNetwork instance where this port should attach
127 vnic_type: required vnic type for this port
131 self.manager = vnf.manager
135 # VNF instance is reused, we need to find an existing port that matches this instance
137 # discover ports attached to this instance
138 port_list = self.manager.get_ports_from_network(chain_network)
139 for port in port_list:
140 if port['name'] != name:
142 if port['binding:vnic_type'] != vnic_type:
144 if port['device_id'] == vnf.get_uuid():
146 LOG.info('Reusing existing port %s mac=%s', name, port['mac_address'])
149 raise ChainException('Cannot find matching port')
151 # VNF instance is not created yet, we need to create a new port
155 'network_id': chain_network.get_uuid(),
156 'binding:vnic_type': vnic_type
159 port = self.manager.neutron_client.create_port(body)
160 self.port = port['port']
161 LOG.info('Created port %s', name)
163 self.manager.neutron_client.update_port(self.port['id'], {
165 'security_groups': [],
166 'port_security_enabled': False,
169 LOG.info('Security disabled on port %s', name)
171 LOG.info('Failed to disable security on port %s (ignored)', name)
174 """Get the MAC address for this port."""
175 return self.port['mac_address']
178 """Delete this port instance."""
179 if self.reuse or not self.port:
182 while retry < self.manager.config.generic_retry_count:
184 self.manager.neutron_client.delete_port(self.port['id'])
185 LOG.info("Deleted port %s", self.name)
189 time.sleep(self.manager.config.generic_poll_sec)
190 LOG.error('Unable to delete port: %s', self.name)
193 class ChainNetwork(object):
194 """Could be a shared network across all chains or a chain private network."""
196 def __init__(self, manager, network_config, chain_id=None, lookup_only=False):
197 """Create a network for given chain.
199 network_config: a dict containing the network properties
200 (segmentation_id and physical_network)
201 chain_id: to which chain the networks belong.
202 a None value will mean that these networks are shared by all chains
204 self.manager = manager
205 self.name = network_config.name
206 self.segmentation_id = self._get_item(network_config.segmentation_id,
207 chain_id, auto_index=True)
208 self.physical_network = self._get_item(network_config.physical_network, chain_id)
209 if chain_id is not None:
210 self.name += str(chain_id)
215 self._setup(network_config, lookup_only)
218 LOG.error("Cannot find network %s", self.name)
220 LOG.error("Error creating network %s", self.name)
224 def _get_item(self, item_field, index, auto_index=False):
225 """Retrieve an item from a list or a single value.
227 item_field: can be None, a tuple of a single value
228 index: if None is same as 0, else is the index for a chain
229 auto_index: if true will automatically get the final value by adding the
230 index to the base value (if full list not provided)
232 If the item_field is not a tuple, it is considered same as a tuple with same value at any
234 If a list is provided, its length must be > index
240 if isinstance(item_field, tuple):
242 return item_field[index]
244 raise ChainException("List %s is too short for chain index %d" %
245 (str(item_field), index))
246 # single value is configured
248 return item_field + index
251 def _setup(self, network_config, lookup_only):
252 # Lookup if there is a matching network with same name
253 networks = self.manager.neutron_client.list_networks(name=self.name)
254 if networks['networks']:
255 network = networks['networks'][0]
256 # a network of same name already exists, we need to verify it has the same
258 if self.segmentation_id:
259 if network['provider:segmentation_id'] != self.segmentation_id:
260 raise ChainException("Mismatch of 'segmentation_id' for reused "
261 "network '{net}'. Network has id '{seg_id1}', "
262 "configuration requires '{seg_id2}'."
263 .format(net=self.name,
264 seg_id1=network['provider:segmentation_id'],
265 seg_id2=self.segmentation_id))
267 if self.physical_network:
268 if network['provider:physical_network'] != self.physical_network:
269 raise ChainException("Mismatch of 'physical_network' for reused "
270 "network '{net}'. Network has '{phys1}', "
271 "configuration requires '{phys2}'."
272 .format(net=self.name,
273 phys1=network['provider:physical_network'],
274 phys2=self.physical_network))
276 LOG.info('Reusing existing network %s', self.name)
278 self.network = network
281 raise ChainException('Network %s not found' % self.name)
285 'admin_state_up': True
288 if network_config.network_type:
289 body['network']['provider:network_type'] = network_config.network_type
290 if self.segmentation_id:
291 body['network']['provider:segmentation_id'] = self.segmentation_id
292 if self.physical_network:
293 body['network']['provider:physical_network'] = self.physical_network
294 self.network = self.manager.neutron_client.create_network(body)['network']
296 'subnet': {'name': network_config.subnet,
297 'cidr': network_config.cidr,
298 'network_id': self.network['id'],
299 'enable_dhcp': False,
301 'dns_nameservers': []}
303 subnet = self.manager.neutron_client.create_subnet(body)['subnet']
304 # add subnet id to the network dict since it has just been added
305 self.network['subnets'] = [subnet['id']]
306 LOG.info('Created network: %s', self.name)
310 Extract UUID of this network.
312 :return: UUID of this network
314 return self.network['id']
318 Extract vlan for this network.
320 :return: vlan ID for this network
322 if self.network['provider:network_type'] != 'vlan':
323 raise ChainException('Trying to retrieve VLAN id for non VLAN network')
324 return self.network['provider:segmentation_id']
328 Extract VNI for this network.
330 :return: VNI ID for this network
332 if 'vxlan' not in self.network['provider:network_type']:
333 raise ChainException('Trying to retrieve VNI for non VXLAN network')
334 return self.network['provider:segmentation_id']
337 """Delete this network."""
338 if not self.reuse and self.network:
340 while retry < self.manager.config.generic_retry_count:
342 self.manager.neutron_client.delete_network(self.network['id'])
343 LOG.info("Deleted network: %s", self.name)
347 LOG.info('Error deleting network %s (retry %d/%d)...',
350 self.manager.config.generic_retry_count)
351 time.sleep(self.manager.config.generic_poll_sec)
352 LOG.error('Unable to delete network: %s', self.name)
355 class ChainVnf(object):
356 """A class to represent a VNF in a chain."""
358 def __init__(self, chain, vnf_id, networks):
359 """Reuse a VNF instance with same characteristics or create a new VNF instance.
361 chain: the chain where this vnf belongs
362 vnf_id: indicates the index of this vnf in its chain (first vnf=0)
363 networks: the list of all networks (ChainNetwork) of the current chain
365 self.manager = chain.manager
368 self.name = self.manager.config.loop_vm_name + str(chain.chain_id)
369 if len(networks) > 2:
370 # we will have more than 1 VM in each chain
371 self.name += '-' + str(vnf_id)
378 # the vnf_id is conveniently also the starting index in networks
379 # for the left and right networks associated to this VNF
380 self._setup(networks[vnf_id:vnf_id + 2])
382 LOG.error("Error creating VNF %s", self.name)
386 def _get_vm_config(self, remote_mac_pair):
387 config = self.manager.config
388 devices = self.manager.generator_config.devices
389 with open(BOOT_SCRIPT_PATHNAME, 'r') as boot_script:
390 content = boot_script.read()
391 g1cidr = devices[LEFT].get_gw_ip(self.chain.chain_id) + '/8'
392 g2cidr = devices[RIGHT].get_gw_ip(self.chain.chain_id) + '/8'
394 'forwarder': config.vm_forwarder,
395 'intf_mac1': self.ports[LEFT].get_mac(),
396 'intf_mac2': self.ports[RIGHT].get_mac(),
397 'tg_gateway1_ip': devices[LEFT].tg_gateway_ip_addrs,
398 'tg_gateway2_ip': devices[RIGHT].tg_gateway_ip_addrs,
399 'tg_net1': devices[LEFT].ip_addrs,
400 'tg_net2': devices[RIGHT].ip_addrs,
401 'vnf_gateway1_cidr': g1cidr,
402 'vnf_gateway2_cidr': g2cidr,
403 'tg_mac1': remote_mac_pair[0],
404 'tg_mac2': remote_mac_pair[1]
406 return content.format(**vm_config)
408 def _get_vnic_type(self, port_index):
409 """Get the right vnic type for given port indexself.
411 If SR-IOV is speficied, middle ports in multi-VNF chains
412 can use vswitch or SR-IOV based on config.use_sriov_middle_net
414 if self.manager.config.sriov:
415 chain_length = self.chain.get_length()
416 if self.manager.config.use_sriov_middle_net or chain_length == 1:
418 if self.vnf_id == 0 and port_index == 0:
419 # first VNF in chain must use sriov for left port
421 if (self.vnf_id == chain_length - 1) and (port_index == 1):
422 # last VNF in chain must use sriov for right port
426 def _setup(self, networks):
427 flavor_id = self.manager.flavor.flavor.id
428 # Check if we can reuse an instance with same name
429 for instance in self.manager.existing_instances:
430 if instance.name == self.name:
431 # Verify that other instance characteristics match
432 if instance.flavor['id'] != flavor_id:
433 self._reuse_exception('Flavor mismatch')
434 if instance.status != "ACTIVE":
435 self._reuse_exception('Matching instance is not in ACTIVE state')
436 # The 2 networks for this instance must also be reused
437 if not networks[LEFT].reuse:
438 self._reuse_exception('network %s is new' % networks[LEFT].name)
439 if not networks[RIGHT].reuse:
440 self._reuse_exception('network %s is new' % networks[RIGHT].name)
441 # instance.networks have the network names as keys:
442 # {'nfvbench-rnet0': ['192.168.2.10'], 'nfvbench-lnet0': ['192.168.1.8']}
443 if networks[LEFT].name not in instance.networks:
444 self._reuse_exception('Left network mismatch')
445 if networks[RIGHT].name not in instance.networks:
446 self._reuse_exception('Right network mismatch')
449 self.instance = instance
450 LOG.info('Reusing existing instance %s on %s',
451 self.name, self.get_hypervisor_name())
452 # create or reuse/discover 2 ports per instance
453 self.ports = [ChainVnfPort(self.name + '-' + str(index),
456 self._get_vnic_type(index)) for index in [0, 1]]
457 # if no reuse, actual vm creation is deferred after all ports in the chain are created
458 # since we need to know the next mac in a multi-vnf chain
460 def create_vnf(self, remote_mac_pair):
461 """Create the VNF instance if it does not already exist."""
462 if self.instance is None:
463 port_ids = [{'port-id': vnf_port.port['id']}
464 for vnf_port in self.ports]
465 vm_config = self._get_vm_config(remote_mac_pair)
466 az = self.manager.placer.get_required_az()
467 server = self.manager.comp.create_server(self.name,
468 self.manager.image_instance,
469 self.manager.flavor.flavor,
476 files={NFVBENCH_CFG_VM_PATHNAME: vm_config})
478 self.instance = server
479 if self.manager.placer.is_resolved():
480 LOG.info('Created instance %s on %s', self.name, az)
482 # the location is undetermined at this point
483 # self.get_hypervisor_name() will return None
484 LOG.info('Created instance %s - waiting for placement resolution...', self.name)
485 # here we MUST wait until this instance is resolved otherwise subsequent
486 # VNF creation can be placed in other hypervisors!
487 config = self.manager.config
488 max_retries = (config.check_traffic_time_sec +
489 config.generic_poll_sec - 1) / config.generic_poll_sec
491 for retry in range(max_retries):
492 status = self.get_status()
493 if status == 'ACTIVE':
494 hyp_name = self.get_hypervisor_name()
495 LOG.info('Instance %s is active and has been placed on %s',
497 self.manager.placer.register_full_name(hyp_name)
499 if status == 'ERROR':
500 raise ChainException('Instance %s creation error: %s' %
502 self.instance.fault['message']))
503 LOG.info('Waiting for instance %s to become active (retry %d/%d)...',
504 self.name, retry + 1, max_retries + 1)
505 time.sleep(config.generic_poll_sec)
508 LOG.error('Instance %s creation timed out', self.name)
509 raise ChainException('Instance %s creation timed out' % self.name)
512 raise ChainException('Unable to create instance: %s' % (self.name))
514 def _reuse_exception(self, reason):
515 raise ChainException('Instance %s cannot be reused (%s)' % (self.name, reason))
517 def get_status(self):
518 """Get the statis of this instance."""
519 if self.instance.status != 'ACTIVE':
520 self.instance = self.manager.comp.poll_server(self.instance)
521 return self.instance.status
523 def get_hostname(self):
524 """Get the hypervisor host name running this VNF instance."""
525 if self.manager.is_admin:
526 hypervisor_hostname = getattr(self.instance, 'OS-EXT-SRV-ATTR:hypervisor_hostname')
528 hypervisor_hostname = self.manager.config.hypervisor_hostname
529 if not hypervisor_hostname:
530 raise ChainException('Hypervisor hostname parameter is mandatory')
531 return hypervisor_hostname
533 def get_host_ip(self):
534 """Get the IP address of the host where this instance runs.
536 return: the IP address
539 self.host_ip = self.manager.comp.get_hypervisor(self.get_hostname()).host_ip
542 def get_hypervisor_name(self):
543 """Get hypervisor name (az:hostname) for this VNF instance."""
545 if self.manager.is_admin:
546 az = getattr(self.instance, 'OS-EXT-AZ:availability_zone')
548 az = self.manager.config.availability_zone
550 raise ChainException('Availability zone parameter is mandatory')
551 hostname = self.get_hostname()
553 return az + ':' + hostname
558 """Get the uuid for this instance."""
559 return self.instance.id
561 def delete(self, forced=False):
562 """Delete this VNF instance."""
564 LOG.info("Instance %s not deleted (reused)", self.name)
567 self.manager.comp.delete_server(self.instance)
568 LOG.info("Deleted instance %s", self.name)
569 for port in self.ports:
573 """A class to manage a single chain.
575 Can handle any type of chain (EXT, PVP, PVVP)
578 def __init__(self, chain_id, manager):
579 """Create a new chain.
581 chain_id: chain index (first chain is 0)
582 manager: the chain manager that owns all chains
584 self.chain_id = chain_id
585 self.manager = manager
586 self.encaps = manager.encaps
590 self.networks = manager.get_networks(chain_id)
591 # For external chain VNFs can only be discovered from their MAC addresses
592 # either from config or from ARP
593 if manager.config.service_chain != ChainType.EXT:
594 for chain_instance_index in range(self.get_length()):
595 self.instances.append(ChainVnf(self,
596 chain_instance_index,
598 # at this point new VNFs are not created yet but
599 # verify that all discovered VNFs are on the same hypervisor
600 self._check_hypervisors()
601 # now that all VNF ports are created we need to calculate the
602 # left/right remote MAC for each VNF in the chain
603 # before actually creating the VNF itself
604 rem_mac_pairs = self._get_remote_mac_pairs()
605 for instance in self.instances:
606 rem_mac_pair = rem_mac_pairs.pop(0)
607 instance.create_vnf(rem_mac_pair)
612 def _check_hypervisors(self):
613 common_hypervisor = None
614 for instance in self.instances:
615 # get the full hypervizor name (az:compute)
616 hname = instance.get_hypervisor_name()
618 if common_hypervisor:
619 if hname != common_hypervisor:
620 raise ChainException('Discovered instances on different hypervisors:'
621 ' %s and %s' % (hname, common_hypervisor))
623 common_hypervisor = hname
624 if common_hypervisor:
625 # check that the common hypervisor name matchs the requested hypervisor name
626 # and set the name to be used by all future instances (if any)
627 if not self.manager.placer.register_full_name(common_hypervisor):
628 raise ChainException('Discovered hypervisor placement %s is incompatible' %
631 def get_length(self):
632 """Get the number of VNF in the chain."""
633 return len(self.networks) - 1
635 def _get_remote_mac_pairs(self):
636 """Get the list of remote mac pairs for every VNF in the chain.
638 Traverse the chain from left to right and establish the
639 left/right remote MAC for each VNF in the chainself.
642 mac sequence: tg_src_mac, vm0-mac0, vm0-mac1, tg_dst_mac
643 must produce [[tg_src_mac, tg_dst_mac]] or looking at index in mac sequence: [[0, 3]]
644 the mac pair is what the VNF at that position (index 0) sees as next hop mac left and right
647 tg_src_mac, vm0-mac0, vm0-mac1, vm1-mac0, vm1-mac1, tg_dst_mac
648 Must produce the following list:
649 [[tg_src_mac, vm1-mac0], [vm0-mac1, tg_dst_mac]] or index: [[0, 3], [2, 5]]
651 General case with 3 VMs in chain, the list of consecutive macs (left to right):
652 tg_src_mac, vm0-mac0, vm0-mac1, vm1-mac0, vm1-mac1, vm2-mac0, vm2-mac1, tg_dst_mac
653 Must produce the following list:
654 [[tg_src_mac, vm1-mac0], [vm0-mac1, vm2-mac0], [vm1-mac1, tg_dst_mac]]
655 or index: [[0, 3], [2, 5], [4, 7]]
657 The series pattern is pretty clear: [[n, n+3],... ] where n is multiple of 2
659 # line up all mac from left to right
660 mac_seq = [self.manager.generator_config.devices[LEFT].mac]
661 for instance in self.instances:
662 mac_seq.append(instance.ports[0].get_mac())
663 mac_seq.append(instance.ports[1].get_mac())
664 mac_seq.append(self.manager.generator_config.devices[RIGHT].mac)
667 for _ in self.instances:
668 rem_mac_pairs.append([mac_seq[base], mac_seq[base + 3]])
672 def get_instances(self):
673 """Return all instances for this chain."""
674 return self.instances
676 def get_vlan(self, port_index):
677 """Get the VLAN id on a given port.
679 port_index: left port is 0, right port is 1
680 return: the vlan_id or None if there is no vlan tagging
682 # for port 1 we need to return the VLAN of the last network in the chain
683 # The networks array contains 2 networks for PVP [left, right]
684 # and 3 networks in the case of PVVP [left.middle,right]
686 # this will pick the last item in array
688 return self.networks[port_index].get_vlan()
690 def get_vxlan(self, port_index):
691 """Get the VXLAN id on a given port.
693 port_index: left port is 0, right port is 1
694 return: the vxlan_id or None if there is no vxlan
696 # for port 1 we need to return the VLAN of the last network in the chain
697 # The networks array contains 2 networks for PVP [left, right]
698 # and 3 networks in the case of PVVP [left.middle,right]
700 # this will pick the last item in array
702 return self.networks[port_index].get_vxlan()
704 def get_dest_mac(self, port_index):
705 """Get the dest MAC on a given port.
707 port_index: left port is 0, right port is 1
711 # for right port, use the right port MAC of the last (right most) VNF In chain
712 return self.instances[-1].ports[1].get_mac()
713 # for left port use the left port MAC of the first (left most) VNF in chain
714 return self.instances[0].ports[0].get_mac()
716 def get_network_uuids(self):
717 """Get UUID of networks in this chain from left to right (order is important).
719 :return: list of UUIDs of networks (2 or 3 elements)
721 return [net['id'] for net in self.networks]
723 def get_host_ips(self):
724 """Return the IP adresss(es) of the host compute nodes used for this chain.
726 :return: a list of 1 or 2 IP addresses
728 return [vnf.get_host_ip() for vnf in self.instances]
730 def get_compute_nodes(self):
731 """Return the name of the host compute nodes used for this chain.
733 :return: a list of 1 host name in the az:host format
735 # Since all chains go through the same compute node(s) we can just retrieve the
736 # compute node name(s) for the first chain
737 return [vnf.get_hypervisor_name() for vnf in self.instances]
740 """Delete this chain."""
741 for instance in self.instances:
743 # only delete if these are chain private networks (not shared)
744 if not self.manager.config.service_chain_shared_net:
745 for network in self.networks:
749 class InstancePlacer(object):
750 """A class to manage instance placement for all VNFs in all chains.
752 A full az string is made of 2 parts AZ and hypervisor.
753 The placement is resolved when both parts az and hypervisor names are known.
756 def __init__(self, req_az, req_hyp):
757 """Create a new instance placer.
759 req_az: requested AZ (can be None or empty if no preference)
760 req_hyp: requested hypervisor name (can be None of empty if no preference)
761 can be any of 'nova:', 'comp1', 'nova:comp1'
762 if it is a list, only the first item is used (backward compatibility in config)
764 req_az is ignored if req_hyp has an az part
765 all other parts beyond the first 2 are ignored in req_hyp
767 # if passed a list just pick the first item
768 if req_hyp and isinstance(req_hyp, list):
770 # only pick first part of az
771 if req_az and ':' in req_az:
772 req_az = req_az.split(':')[0]
774 # check if requested hypervisor string has an AZ part
775 split_hyp = req_hyp.split(':')
776 if len(split_hyp) > 1:
777 # override the AZ part and hypervisor part
778 req_az = split_hyp[0]
779 req_hyp = split_hyp[1]
780 self.requested_az = req_az if req_az else ''
781 self.requested_hyp = req_hyp if req_hyp else ''
782 # Nova can accept AZ only (e.g. 'nova:', use any hypervisor in that AZ)
783 # or hypervisor only (e.g. ':comp1')
784 # or both (e.g. 'nova:comp1')
786 self.required_az = req_az + ':' + self.requested_hyp
788 # need to insert a ':' so nova knows this is the hypervisor name
789 self.required_az = ':' + self.requested_hyp if req_hyp else ''
790 # placement is resolved when both AZ and hypervisor names are known and set
791 self.resolved = self.requested_az != '' and self.requested_hyp != ''
793 def get_required_az(self):
794 """Return the required az (can be resolved or not)."""
795 return self.required_az
797 def register_full_name(self, discovered_az):
798 """Verify compatibility and register a discovered hypervisor full name.
800 discovered_az: a discovered AZ in az:hypervisor format
801 return: True if discovered_az is compatible and set
802 False if discovered_az is not compatible
805 return discovered_az == self.required_az
807 # must be in full az format
808 split_daz = discovered_az.split(':')
809 if len(split_daz) != 2:
811 if self.requested_az and self.requested_az != split_daz[0]:
813 if self.requested_hyp and self.requested_hyp != split_daz[1]:
815 self.required_az = discovered_az
819 def is_resolved(self):
820 """Check if the full AZ is resolved.
822 return: True if resolved
827 class ChainManager(object):
828 """A class for managing all chains for a given run.
830 Supports openstack or no openstack.
831 Supports EXT, PVP and PVVP chains.
834 def __init__(self, chain_runner):
835 """Create a chain manager to take care of discovering or bringing up the requested chains.
837 A new instance must be created every time a new config is used.
838 config: the nfvbench config to use
839 cred: openstack credentials to use of None if there is no openstack
841 self.chain_runner = chain_runner
842 self.config = chain_runner.config
843 self.generator_config = chain_runner.traffic_client.generator_config
845 self.image_instance = None
846 self.image_name = None
847 # Left and right networks shared across all chains (only if shared)
852 self.nova_client = None
853 self.neutron_client = None
854 self.glance_client = None
855 self.existing_instances = []
856 # existing ports keyed by the network uuid they belong to
857 self._existing_ports = {}
859 self.openstack = (chain_runner.cred is not None) and not config.l2_loopback
860 self.chain_count = config.service_chain_count
864 session = chain_runner.cred.get_session()
865 self.is_admin = chain_runner.cred.is_admin
866 self.nova_client = Client(2, session=session)
867 self.neutron_client = neutronclient.Client('2.0', session=session)
868 self.glance_client = glanceclient.Client('2', session=session)
869 self.comp = compute.Compute(self.nova_client,
873 if config.service_chain != ChainType.EXT:
874 self.placer = InstancePlacer(config.availability_zone, config.compute_nodes)
876 self.flavor = ChainFlavor(config.flavor_type, config.flavor, self.comp)
877 # Get list of all existing instances to check if some instances can be reused
878 self.existing_instances = self.comp.get_server_list()
879 # If networks are shared across chains, get the list of networks
880 if config.service_chain_shared_net:
881 self.networks = self.get_networks()
882 # Reuse/create chains
883 for chain_id in range(self.chain_count):
884 self.chains.append(Chain(chain_id, self))
885 if config.service_chain == ChainType.EXT:
886 # if EXT and no ARP we need to read dest MACs from config
888 self._get_dest_macs_from_config()
890 # Make sure all instances are active before proceeding
891 self._ensure_instances_active()
892 # network API call do not show VLANS ID if not admin read from config
893 if not self.is_admin:
894 self._get_config_vlans()
899 # no openstack, no need to create chains
900 if not config.l2_loopback and config.no_arp:
901 self._get_dest_macs_from_config()
902 if config.vlan_tagging:
903 # make sure there at least as many entries as chains in each left/right list
904 if len(config.vlans) != 2:
905 raise ChainException('The config vlans property must be a list '
906 'with 2 lists of VLAN IDs')
907 self._get_config_vlans()
909 raise ChainException('VxLAN is only supported with OpenStack')
911 def _get_config_vlans(self):
914 self.vlans = [self._check_list('vlans[0]', self.config.vlans[0], re_vlan),
915 self._check_list('vlans[1]', self.config.vlans[1], re_vlan)]
917 raise ChainException('vlans parameter is mandatory. Set valid value in config file')
919 def _get_dest_macs_from_config(self):
920 re_mac = "[0-9a-fA-F]{2}([-:])[0-9a-fA-F]{2}(\\1[0-9a-fA-F]{2}){4}$"
921 tg_config = self.config.traffic_generator
922 self.dest_macs = [self._check_list("mac_addrs_left",
923 tg_config.mac_addrs_left, re_mac),
924 self._check_list("mac_addrs_right",
925 tg_config.mac_addrs_right, re_mac)]
927 def _check_list(self, list_name, ll, pattern):
928 # if it is a single int or mac, make it a list of 1 int
929 if isinstance(ll, (int, str)):
932 if not re.match(pattern, str(item)):
933 raise ChainException("Invalid format '{item}' specified in {fname}"
934 .format(item=item, fname=list_name))
935 # must have at least 1 element
937 raise ChainException('%s cannot be empty' % (list_name))
938 # for shared network, if 1 element is passed, replicate it as many times
940 if self.config.service_chain_shared_net and len(ll) == 1:
941 ll = [ll[0]] * self.chain_count
943 # number of elements musty be the number of chains
944 elif len(ll) < self.chain_count:
945 raise ChainException('%s=%s must be a list with %d elements per chain' %
946 (list_name, ll, self.chain_count))
949 def _setup_image(self):
950 # To avoid reuploading image in server mode, check whether image_name is set or not
952 self.image_instance = self.comp.find_image(self.image_name)
953 if self.image_instance:
954 LOG.info("Reusing image %s", self.image_name)
956 image_name_search_pattern = r'(nfvbenchvm-\d+(\.\d+)*).qcow2'
957 if self.config.vm_image_file:
958 match = re.search(image_name_search_pattern, self.config.vm_image_file)
960 self.image_name = match.group(1)
961 LOG.info('Using provided VM image file %s', self.config.vm_image_file)
963 raise ChainException('Provided VM image file name %s must start with '
964 '"nfvbenchvm-<version>"' % self.config.vm_image_file)
966 pkg_root = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
967 for f in os.listdir(pkg_root):
968 if re.search(image_name_search_pattern, f):
969 self.config.vm_image_file = pkg_root + '/' + f
970 self.image_name = f.replace('.qcow2', '')
971 LOG.info('Found built-in VM image file %s', f)
974 raise ChainException('Cannot find any built-in VM image file.')
976 self.image_instance = self.comp.find_image(self.image_name)
977 if not self.image_instance:
978 LOG.info('Uploading %s', self.image_name)
979 res = self.comp.upload_image_via_url(self.image_name,
980 self.config.vm_image_file)
983 raise ChainException('Error uploading image %s from %s. ABORTING.' %
984 (self.image_name, self.config.vm_image_file))
985 LOG.info('Image %s successfully uploaded.', self.image_name)
986 self.image_instance = self.comp.find_image(self.image_name)
988 def _ensure_instances_active(self):
990 for chain in self.chains:
991 instances.extend(chain.get_instances())
992 initial_instance_count = len(instances)
993 max_retries = (self.config.check_traffic_time_sec +
994 self.config.generic_poll_sec - 1) / self.config.generic_poll_sec
997 remaining_instances = []
998 for instance in instances:
999 status = instance.get_status()
1000 if status == 'ACTIVE':
1001 LOG.info('Instance %s is ACTIVE on %s',
1002 instance.name, instance.get_hypervisor_name())
1004 if status == 'ERROR':
1005 raise ChainException('Instance %s creation error: %s' %
1007 instance.instance.fault['message']))
1008 remaining_instances.append(instance)
1009 if not remaining_instances:
1012 if retry >= max_retries:
1013 raise ChainException('Time-out: %d/%d instances still not active' %
1014 (len(remaining_instances), initial_instance_count))
1015 LOG.info('Waiting for %d/%d instance to become active (retry %d/%d)...',
1016 len(remaining_instances), initial_instance_count,
1018 instances = remaining_instances
1019 time.sleep(self.config.generic_poll_sec)
1020 if initial_instance_count:
1021 LOG.info('All instances are active')
1023 def get_networks(self, chain_id=None):
1024 """Get the networks for given EXT, PVP or PVVP chain.
1026 For EXT packet path, these networks must pre-exist.
1027 For PVP, PVVP these networks will be created if they do not exist.
1028 chain_id: to which chain the networks belong.
1029 a None value will mean that these networks are shared by all chains
1032 # the only case where self.networks exists is when the networks are shared
1034 return self.networks
1035 if self.config.service_chain == ChainType.EXT:
1037 ext_net = self.config.external_networks
1038 net_cfg = [AttrDict({'name': name,
1039 'segmentation_id': None,
1040 'physical_network': None})
1041 for name in [ext_net.left, ext_net.right]]
1042 # segmentation id and subnet should be discovered from neutron
1045 int_nets = self.config.internal_networks
1047 if self.config.service_chain == ChainType.PVP:
1048 net_cfg = [int_nets.left, int_nets.right]
1050 net_cfg = [int_nets.left, int_nets.middle, int_nets.right]
1054 networks.append(ChainNetwork(self, cfg, chain_id, lookup_only=lookup_only))
1056 # need to cleanup all successful networks prior to bailing out
1057 for net in networks:
1062 def get_existing_ports(self):
1063 """Get the list of existing ports.
1065 Lazy retrieval of ports as this can be costly if there are lots of ports and
1066 is only needed when VM and network are being reused.
1068 return: a dict of list of neutron ports indexed by the network uuid they are attached to
1070 Each port is a dict with fields such as below:
1071 {'allowed_address_pairs': [], 'extra_dhcp_opts': [],
1072 'updated_at': '2018-10-06T07:15:35Z', 'device_owner': 'compute:nova',
1073 'revision_number': 10, 'port_security_enabled': False, 'binding:profile': {},
1074 'fixed_ips': [{'subnet_id': '6903a3b3-49a1-4ba4-8259-4a90e7a44b21',
1075 'ip_address': '192.168.1.4'}], 'id': '3dcb9cfa-d82a-4dd1-85a1-fd8284b52d72',
1076 'security_groups': [],
1077 'binding:vif_details': {'vhostuser_socket': '/tmp/3dcb9cfa-d82a-4dd1-85a1-fd8284b52d72',
1078 'vhostuser_mode': 'server'},
1079 'binding:vif_type': 'vhostuser',
1080 'mac_address': 'fa:16:3e:3c:63:04',
1081 'project_id': '977ac76a63d7492f927fa80e86baff4c',
1083 'binding:host_id': 'a20-champagne-compute-1',
1085 'device_id': 'a98e2ad2-5371-4aa5-a356-8264a970ce4b',
1086 'name': 'nfvbench-loop-vm0-0', 'admin_state_up': True,
1087 'network_id': '3ea5fd88-278f-4d9d-b24d-1e443791a055',
1088 'tenant_id': '977ac76a63d7492f927fa80e86baff4c',
1089 'created_at': '2018-10-06T07:15:10Z',
1090 'binding:vnic_type': 'normal'}
1092 if not self._existing_ports:
1093 LOG.info('Loading list of all ports...')
1094 existing_ports = self.neutron_client.list_ports()['ports']
1095 # place all ports in the dict keyed by the port network uuid
1096 for port in existing_ports:
1097 port_list = self._existing_ports.setdefault(port['network_id'], [])
1098 port_list.append(port)
1099 LOG.info("Loaded %d ports attached to %d networks",
1100 len(existing_ports), len(self._existing_ports))
1101 return self._existing_ports
1103 def get_ports_from_network(self, chain_network):
1104 """Get the list of existing ports that belong to a network.
1106 Lazy retrieval of ports as this can be costly if there are lots of ports and
1107 is only needed when VM and network are being reused.
1109 chain_network: a ChainNetwork instance for which attached ports neeed to be retrieved
1110 return: list of neutron ports attached to requested network
1112 return self.get_existing_ports().get(chain_network.get_uuid(), None)
1114 def get_host_ip_from_mac(self, mac):
1115 """Get the host IP address matching a MAC.
1117 mac: MAC address to look for
1118 return: the IP address of the host where the matching port runs or None if not found
1120 # _existing_ports is a dict of list of ports indexed by network id
1121 for port_list in self.get_existing_ports().values():
1122 for port in port_list:
1124 if port['mac_address'] == mac:
1125 host_id = port['binding:host_id']
1126 return self.comp.get_hypervisor(host_id).host_ip
1131 def get_chain_vlans(self, port_index):
1132 """Get the list of per chain VLAN id on a given port.
1134 port_index: left port is 0, right port is 1
1135 return: a VLAN ID list indexed by the chain index or None if no vlan tagging
1137 if self.chains and self.is_admin:
1138 return [self.chains[chain_index].get_vlan(port_index)
1139 for chain_index in range(self.chain_count)]
1141 return self.vlans[port_index]
1143 def get_chain_vxlans(self, port_index):
1144 """Get the list of per chain VNIs id on a given port.
1146 port_index: left port is 0, right port is 1
1147 return: a VNIs ID list indexed by the chain index or None if no vlan tagging
1150 return [self.chains[chain_index].get_vxlan(port_index)
1151 for chain_index in range(self.chain_count)]
1153 raise ChainException('VxLAN is only supported with OpenStack')
1155 def get_dest_macs(self, port_index):
1156 """Get the list of per chain dest MACs on a given port.
1158 Should not be called if EXT+ARP is used (in that case the traffic gen will
1159 have the ARP responses back from VNFs with the dest MAC to use).
1161 port_index: left port is 0, right port is 1
1162 return: a list of dest MACs indexed by the chain index
1164 if self.chains and self.config.service_chain != ChainType.EXT:
1165 return [self.chains[chain_index].get_dest_mac(port_index)
1166 for chain_index in range(self.chain_count)]
1167 # no openstack or EXT+no-arp
1168 return self.dest_macs[port_index]
1170 def get_host_ips(self):
1171 """Return the IP adresss(es) of the host compute nodes used for this run.
1173 :return: a list of 1 IP address
1175 # Since all chains go through the same compute node(s) we can just retrieve the
1176 # compute node(s) for the first chain
1178 if self.config.service_chain != ChainType.EXT:
1179 return self.chains[0].get_host_ips()
1180 # in the case of EXT, the compute node must be retrieved from the port
1181 # associated to any of the dest MACs
1182 dst_macs = self.generator_config.get_dest_macs()
1183 # dest MAC on port 0, chain 0
1184 dst_mac = dst_macs[0][0]
1185 host_ip = self.get_host_ip_from_mac(dst_mac)
1187 LOG.info('Found compute node IP for EXT chain: %s', host_ip)
1191 def get_compute_nodes(self):
1192 """Return the name of the host compute nodes used for this run.
1194 :return: a list of 0 or 1 host name in the az:host format
1196 # Since all chains go through the same compute node(s) we can just retrieve the
1197 # compute node name(s) for the first chain
1199 # in the case of EXT, the compute node must be retrieved from the port
1200 # associated to any of the dest MACs
1201 return self.chains[0].get_compute_nodes()
1202 # no openstack = no chains
1206 """Delete resources for all chains."""
1207 for chain in self.chains:
1209 for network in self.networks:
1212 self.flavor.delete()