2 # Copyright 2018 Cisco Systems, Inc. All rights reserved.
4 # Licensed under the Apache License, Version 2.0 (the "License"); you may
5 # not use this file except in compliance with the License. You may obtain
6 # a copy of the License at
8 # http://www.apache.org/licenses/LICENSE-2.0
10 # Unless required by applicable law or agreed to in writing, software
11 # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
12 # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
13 # License for the specific language governing permissions and limitations
17 # This module takes care of chaining networks, ports and vms
19 """NFVBENCH CHAIN DISCOVERY/STAGING.
21 This module takes care of staging/discovering all resources that are participating in a
22 benchmarking session: flavors, networks, ports, VNF instances.
23 If a resource is discovered with the same name, it will be reused.
24 Otherwise it will be created.
26 ChainManager: manages VM image, flavor, the staging discovery of all chains
28 Chain: manages one chain, has 2 or more networks and 1 or more instances
29 ChainNetwork: manages 1 network in a chain
30 ChainVnf: manages 1 VNF instance in a chain, has 2 ports
31 ChainVnfPort: manages 1 instance port
33 ChainManager-->Chain(*)
34 Chain-->ChainNetwork(*),ChainVnf(*)
35 ChainVnf-->ChainVnfPort(2)
37 Once created/discovered, instances are checked to be in the active state (ready to pass traffic)
38 Configuration parameters that will influence how these resources are staged/related:
39 - openstack or no openstack
42 - number of VNF in each chain (PVP, PVVP)
43 - SRIOV and middle port SRIOV for port types
44 - whether networks are shared across chains or not
46 There is not traffic generation involved in this module.
52 from glanceclient.v2 import client as glanceclient
53 from neutronclient.neutron import client as neutronclient
54 from novaclient.client import Client
56 from attrdict import AttrDict
59 from specs import ChainType
61 # Left and right index for network and port lists
64 # Name of the VM config file
65 NFVBENCH_CFG_FILENAME = 'nfvbenchvm.conf'
66 # full pathame of the VM config in the VM
67 NFVBENCH_CFG_VM_PATHNAME = os.path.join('/etc/', NFVBENCH_CFG_FILENAME)
68 # full path of the boot shell script template file on the server where nfvbench runs
69 BOOT_SCRIPT_PATHNAME = os.path.join(os.path.dirname(os.path.abspath(__file__)),
71 NFVBENCH_CFG_FILENAME)
74 class ChainException(Exception):
75 """Exception while operating the chains."""
79 class NetworkEncaps(object):
80 """Network encapsulation."""
83 class ChainFlavor(object):
84 """Class to manage the chain flavor."""
86 def __init__(self, flavor_name, flavor_dict, comp):
87 """Create a flavor."""
88 self.name = flavor_name
90 self.flavor = self.comp.find_flavor(flavor_name)
94 LOG.info("Reused flavor '%s'", flavor_name)
96 extra_specs = flavor_dict.pop('extra_specs', None)
98 self.flavor = comp.create_flavor(flavor_name,
101 LOG.info("Created flavor '%s'", flavor_name)
103 self.flavor.set_keys(extra_specs)
106 """Delete this flavor."""
107 if not self.reuse and self.flavor:
109 LOG.info("Flavor '%s' deleted", self.name)
112 class ChainVnfPort(object):
113 """A port associated to one VNF in the chain."""
115 def __init__(self, name, vnf, chain_network, vnic_type):
116 """Create or reuse a port on a given network.
118 if vnf.instance is None the VNF instance is not reused and this ChainVnfPort instance must
120 Otherwise vnf.instance is a reused VNF instance and this ChainVnfPort instance must
121 find an existing port to reuse that matches the port requirements: same attached network,
122 instance, name, vnic type
124 name: name for this port
125 vnf: ChainVNf instance that owns this port
126 chain_network: ChainNetwork instance where this port should attach
127 vnic_type: required vnic type for this port
131 self.manager = vnf.manager
135 # VNF instance is reused, we need to find an existing port that matches this instance
137 # discover ports attached to this instance
138 port_list = self.manager.get_ports_from_network(chain_network)
139 for port in port_list:
140 if port['name'] != name:
142 if port['binding:vnic_type'] != vnic_type:
144 if port['device_id'] == vnf.get_uuid():
146 LOG.info('Reusing existing port %s mac=%s', name, port['mac_address'])
149 raise ChainException('Cannot find matching port')
151 # VNF instance is not created yet, we need to create a new port
155 'network_id': chain_network.get_uuid(),
156 'binding:vnic_type': vnic_type
159 port = self.manager.neutron_client.create_port(body)
160 self.port = port['port']
161 LOG.info('Created port %s', name)
163 self.manager.neutron_client.update_port(self.port['id'], {
165 'security_groups': [],
166 'port_security_enabled': False,
169 LOG.info('Security disabled on port %s', name)
171 LOG.info('Failed to disable security on port %s (ignored)', name)
174 """Get the MAC address for this port."""
175 return self.port['mac_address']
178 """Delete this port instance."""
179 if self.reuse or not self.port:
182 while retry < self.manager.config.generic_retry_count:
184 self.manager.neutron_client.delete_port(self.port['id'])
185 LOG.info("Deleted port %s", self.name)
189 time.sleep(self.manager.config.generic_poll_sec)
190 LOG.error('Unable to delete port: %s', self.name)
193 class ChainNetwork(object):
194 """Could be a shared network across all chains or a chain private network."""
196 def __init__(self, manager, network_config, chain_id=None, lookup_only=False):
197 """Create a network for given chain.
199 network_config: a dict containing the network properties
200 (segmentation_id and physical_network)
201 chain_id: to which chain the networks belong.
202 a None value will mean that these networks are shared by all chains
204 self.manager = manager
205 self.name = network_config.name
206 self.segmentation_id = self._get_item(network_config.segmentation_id, chain_id)
207 self.physical_network = self._get_item(network_config.physical_network, chain_id)
208 if chain_id is not None:
209 self.name += str(chain_id)
214 self._setup(network_config, lookup_only)
217 LOG.error("Cannot find network %s", self.name)
219 LOG.error("Error creating network %s", self.name)
223 def _get_item(self, item_field, index):
224 """Retrieve an item from a list or a single value.
226 item_field: can be None, a tuple of a single value
227 index: if None is same as 0, else is the index in the list
229 If the item_field is not a tuple, it is considered same as a tuple with same value at any
231 If a list is provided, its length must be > index
237 if isinstance(item_field, tuple):
238 return item_field[index]
241 def _setup(self, network_config, lookup_only):
242 # Lookup if there is a matching network with same name
243 networks = self.manager.neutron_client.list_networks(name=self.name)
244 if networks['networks']:
245 network = networks['networks'][0]
246 # a network of same name already exists, we need to verify it has the same
248 if self.segmentation_id:
249 if network['provider:segmentation_id'] != self.segmentation_id:
250 raise ChainException("Mismatch of 'segmentation_id' for reused "
251 "network '{net}'. Network has id '{seg_id1}', "
252 "configuration requires '{seg_id2}'."
253 .format(net=self.name,
254 seg_id1=network['provider:segmentation_id'],
255 seg_id2=self.segmentation_id))
257 if self.physical_network:
258 if network['provider:physical_network'] != self.physical_network:
259 raise ChainException("Mismatch of 'physical_network' for reused "
260 "network '{net}'. Network has '{phys1}', "
261 "configuration requires '{phys2}'."
262 .format(net=self.name,
263 phys1=network['provider:physical_network'],
264 phys2=self.physical_network))
266 LOG.info('Reusing existing network %s', self.name)
268 self.network = network
271 raise ChainException('Network %s not found' % self.name)
275 'admin_state_up': True
278 if network_config.network_type:
279 body['network']['provider:network_type'] = network_config.network_type
280 if self.segmentation_id:
281 body['network']['provider:segmentation_id'] = self.segmentation_id
282 if self.physical_network:
283 body['network']['provider:physical_network'] = self.physical_network
285 self.network = self.manager.neutron_client.create_network(body)['network']
287 'subnet': {'name': network_config.subnet,
288 'cidr': network_config.cidr,
289 'network_id': self.network['id'],
290 'enable_dhcp': False,
292 'dns_nameservers': []}
294 subnet = self.manager.neutron_client.create_subnet(body)['subnet']
295 # add subnet id to the network dict since it has just been added
296 self.network['subnets'] = [subnet['id']]
297 LOG.info('Created network: %s.', self.name)
301 Extract UUID of this network.
303 :return: UUID of this network
305 return self.network['id']
309 Extract vlan for this network.
311 :return: vlan ID for this network
313 if self.network['provider:network_type'] != 'vlan':
314 raise ChainException('Trying to retrieve VLAN id for non VLAN network')
315 return self.network['provider:segmentation_id']
319 Extract VNI for this network.
321 :return: VNI ID for this network
323 if self.network['provider:network_type'] != 'vxlan':
324 raise ChainException('Trying to retrieve VNI for non VXLAN network')
325 return self.network['provider:segmentation_id']
328 """Delete this network."""
329 if not self.reuse and self.network:
331 while retry < self.manager.config.generic_retry_count:
333 self.manager.neutron_client.delete_network(self.network['id'])
334 LOG.info("Deleted network: %s", self.name)
338 LOG.info('Error deleting network %s (retry %d/%d)...',
341 self.manager.config.generic_retry_count)
342 time.sleep(self.manager.config.generic_poll_sec)
343 LOG.error('Unable to delete network: %s', self.name)
346 class ChainVnf(object):
347 """A class to represent a VNF in a chain."""
349 def __init__(self, chain, vnf_id, networks):
350 """Reuse a VNF instance with same characteristics or create a new VNF instance.
352 chain: the chain where this vnf belongs
353 vnf_id: indicates the index of this vnf in its chain (first vnf=0)
354 networks: the list of all networks (ChainNetwork) of the current chain
356 self.manager = chain.manager
359 self.name = self.manager.config.loop_vm_name + str(chain.chain_id)
360 if len(networks) > 2:
361 # we will have more than 1 VM in each chain
362 self.name += '-' + str(vnf_id)
369 # the vnf_id is conveniently also the starting index in networks
370 # for the left and right networks associated to this VNF
371 self._setup(networks[vnf_id:vnf_id + 2])
373 LOG.error("Error creating VNF %s", self.name)
377 def _get_vm_config(self, remote_mac_pair):
378 config = self.manager.config
379 devices = self.manager.generator_config.devices
380 with open(BOOT_SCRIPT_PATHNAME, 'r') as boot_script:
381 content = boot_script.read()
382 g1cidr = devices[LEFT].get_gw_ip(self.chain.chain_id) + '/8'
383 g2cidr = devices[RIGHT].get_gw_ip(self.chain.chain_id) + '/8'
385 'forwarder': config.vm_forwarder,
386 'intf_mac1': self.ports[LEFT].get_mac(),
387 'intf_mac2': self.ports[RIGHT].get_mac(),
388 'tg_gateway1_ip': devices[LEFT].tg_gateway_ip_addrs,
389 'tg_gateway2_ip': devices[RIGHT].tg_gateway_ip_addrs,
390 'tg_net1': devices[LEFT].ip_addrs,
391 'tg_net2': devices[RIGHT].ip_addrs,
392 'vnf_gateway1_cidr': g1cidr,
393 'vnf_gateway2_cidr': g2cidr,
394 'tg_mac1': remote_mac_pair[0],
395 'tg_mac2': remote_mac_pair[1]
397 return content.format(**vm_config)
399 def _get_vnic_type(self, port_index):
400 """Get the right vnic type for given port indexself.
402 If SR-IOV is speficied, middle ports in multi-VNF chains
403 can use vswitch or SR-IOV based on config.use_sriov_middle_net
405 if self.manager.config.sriov:
406 chain_length = self.chain.get_length()
407 if self.manager.config.use_sriov_middle_net or chain_length == 1:
409 if self.vnf_id == 0 and port_index == 0:
410 # first VNF in chain must use sriov for left port
412 if (self.vnf_id == chain_length - 1) and (port_index == 1):
413 # last VNF in chain must use sriov for right port
417 def _setup(self, networks):
418 flavor_id = self.manager.flavor.flavor.id
419 # Check if we can reuse an instance with same name
420 for instance in self.manager.existing_instances:
421 if instance.name == self.name:
422 # Verify that other instance characteristics match
423 if instance.flavor['id'] != flavor_id:
424 self._reuse_exception('Flavor mismatch')
425 if instance.status != "ACTIVE":
426 self._reuse_exception('Matching instance is not in ACTIVE state')
427 # The 2 networks for this instance must also be reused
428 if not networks[LEFT].reuse:
429 self._reuse_exception('network %s is new' % networks[LEFT].name)
430 if not networks[RIGHT].reuse:
431 self._reuse_exception('network %s is new' % networks[RIGHT].name)
432 # instance.networks have the network names as keys:
433 # {'nfvbench-rnet0': ['192.168.2.10'], 'nfvbench-lnet0': ['192.168.1.8']}
434 if networks[LEFT].name not in instance.networks:
435 self._reuse_exception('Left network mismatch')
436 if networks[RIGHT].name not in instance.networks:
437 self._reuse_exception('Right network mismatch')
440 self.instance = instance
441 LOG.info('Reusing existing instance %s on %s',
442 self.name, self.get_hypervisor_name())
443 # create or reuse/discover 2 ports per instance
444 self.ports = [ChainVnfPort(self.name + '-' + str(index),
447 self._get_vnic_type(index)) for index in [0, 1]]
448 # if no reuse, actual vm creation is deferred after all ports in the chain are created
449 # since we need to know the next mac in a multi-vnf chain
451 def create_vnf(self, remote_mac_pair):
452 """Create the VNF instance if it does not already exist."""
453 if self.instance is None:
454 port_ids = [{'port-id': vnf_port.port['id']}
455 for vnf_port in self.ports]
456 vm_config = self._get_vm_config(remote_mac_pair)
457 az = self.manager.placer.get_required_az()
458 server = self.manager.comp.create_server(self.name,
459 self.manager.image_instance,
460 self.manager.flavor.flavor,
467 files={NFVBENCH_CFG_VM_PATHNAME: vm_config})
469 self.instance = server
470 if self.manager.placer.is_resolved():
471 LOG.info('Created instance %s on %s', self.name, az)
473 # the location is undetermined at this point
474 # self.get_hypervisor_name() will return None
475 LOG.info('Created instance %s - waiting for placement resolution...', self.name)
476 # here we MUST wait until this instance is resolved otherwise subsequent
477 # VNF creation can be placed in other hypervisors!
478 config = self.manager.config
479 max_retries = (config.check_traffic_time_sec +
480 config.generic_poll_sec - 1) / config.generic_poll_sec
482 for retry in range(max_retries):
483 status = self.get_status()
484 if status == 'ACTIVE':
485 hyp_name = self.get_hypervisor_name()
486 LOG.info('Instance %s is active and has been placed on %s',
488 self.manager.placer.register_full_name(hyp_name)
490 if status == 'ERROR':
491 raise ChainException('Instance %s creation error: %s' %
493 self.instance.fault['message']))
494 LOG.info('Waiting for instance %s to become active (retry %d/%d)...',
495 self.name, retry + 1, max_retries + 1)
496 time.sleep(config.generic_poll_sec)
499 LOG.error('Instance %s creation timed out', self.name)
500 raise ChainException('Instance %s creation timed out' % self.name)
503 raise ChainException('Unable to create instance: %s' % (self.name))
505 def _reuse_exception(self, reason):
506 raise ChainException('Instance %s cannot be reused (%s)' % (self.name, reason))
508 def get_status(self):
509 """Get the statis of this instance."""
510 if self.instance.status != 'ACTIVE':
511 self.instance = self.manager.comp.poll_server(self.instance)
512 return self.instance.status
514 def get_hostname(self):
515 """Get the hypervisor host name running this VNF instance."""
516 return getattr(self.instance, 'OS-EXT-SRV-ATTR:hypervisor_hostname')
518 def get_host_ip(self):
519 """Get the IP address of the host where this instance runs.
521 return: the IP address
524 self.host_ip = self.manager.comp.get_hypervisor(self.get_hostname()).host_ip
527 def get_hypervisor_name(self):
528 """Get hypervisor name (az:hostname) for this VNF instance."""
530 az = getattr(self.instance, 'OS-EXT-AZ:availability_zone')
531 hostname = self.get_hostname()
533 return az + ':' + hostname
538 """Get the uuid for this instance."""
539 return self.instance.id
541 def delete(self, forced=False):
542 """Delete this VNF instance."""
544 LOG.info("Instance %s not deleted (reused)", self.name)
547 self.manager.comp.delete_server(self.instance)
548 LOG.info("Deleted instance %s", self.name)
549 for port in self.ports:
553 """A class to manage a single chain.
555 Can handle any type of chain (EXT, PVP, PVVP)
558 def __init__(self, chain_id, manager):
559 """Create a new chain.
561 chain_id: chain index (first chain is 0)
562 manager: the chain manager that owns all chains
564 self.chain_id = chain_id
565 self.manager = manager
566 self.encaps = manager.encaps
570 self.networks = manager.get_networks(chain_id)
571 # For external chain VNFs can only be discovered from their MAC addresses
572 # either from config or from ARP
573 if manager.config.service_chain != ChainType.EXT:
574 for chain_instance_index in range(self.get_length()):
575 self.instances.append(ChainVnf(self,
576 chain_instance_index,
578 # at this point new VNFs are not created yet but
579 # verify that all discovered VNFs are on the same hypervisor
580 self._check_hypervisors()
581 # now that all VNF ports are created we need to calculate the
582 # left/right remote MAC for each VNF in the chain
583 # before actually creating the VNF itself
584 rem_mac_pairs = self._get_remote_mac_pairs()
585 for instance in self.instances:
586 rem_mac_pair = rem_mac_pairs.pop(0)
587 instance.create_vnf(rem_mac_pair)
592 def _check_hypervisors(self):
593 common_hypervisor = None
594 for instance in self.instances:
595 # get the full hypervizor name (az:compute)
596 hname = instance.get_hypervisor_name()
598 if common_hypervisor:
599 if hname != common_hypervisor:
600 raise ChainException('Discovered instances on different hypervisors:'
601 ' %s and %s' % (hname, common_hypervisor))
603 common_hypervisor = hname
604 if common_hypervisor:
605 # check that the common hypervisor name matchs the requested hypervisor name
606 # and set the name to be used by all future instances (if any)
607 if not self.manager.placer.register_full_name(common_hypervisor):
608 raise ChainException('Discovered hypervisor placement %s is incompatible' %
611 def get_length(self):
612 """Get the number of VNF in the chain."""
613 return len(self.networks) - 1
615 def _get_remote_mac_pairs(self):
616 """Get the list of remote mac pairs for every VNF in the chain.
618 Traverse the chain from left to right and establish the
619 left/right remote MAC for each VNF in the chainself.
622 mac sequence: tg_src_mac, vm0-mac0, vm0-mac1, tg_dst_mac
623 must produce [[tg_src_mac, tg_dst_mac]] or looking at index in mac sequence: [[0, 3]]
624 the mac pair is what the VNF at that position (index 0) sees as next hop mac left and right
627 tg_src_mac, vm0-mac0, vm0-mac1, vm1-mac0, vm1-mac1, tg_dst_mac
628 Must produce the following list:
629 [[tg_src_mac, vm1-mac0], [vm0-mac1, tg_dst_mac]] or index: [[0, 3], [2, 5]]
631 General case with 3 VMs in chain, the list of consecutive macs (left to right):
632 tg_src_mac, vm0-mac0, vm0-mac1, vm1-mac0, vm1-mac1, vm2-mac0, vm2-mac1, tg_dst_mac
633 Must produce the following list:
634 [[tg_src_mac, vm1-mac0], [vm0-mac1, vm2-mac0], [vm1-mac1, tg_dst_mac]]
635 or index: [[0, 3], [2, 5], [4, 7]]
637 The series pattern is pretty clear: [[n, n+3],... ] where n is multiple of 2
639 # line up all mac from left to right
640 mac_seq = [self.manager.generator_config.devices[LEFT].mac]
641 for instance in self.instances:
642 mac_seq.append(instance.ports[0].get_mac())
643 mac_seq.append(instance.ports[1].get_mac())
644 mac_seq.append(self.manager.generator_config.devices[RIGHT].mac)
647 for _ in self.instances:
648 rem_mac_pairs.append([mac_seq[base], mac_seq[base + 3]])
652 def get_instances(self):
653 """Return all instances for this chain."""
654 return self.instances
656 def get_vlan(self, port_index):
657 """Get the VLAN id on a given port.
659 port_index: left port is 0, right port is 1
660 return: the vlan_id or None if there is no vlan tagging
662 # for port 1 we need to return the VLAN of the last network in the chain
663 # The networks array contains 2 networks for PVP [left, right]
664 # and 3 networks in the case of PVVP [left.middle,right]
666 # this will pick the last item in array
668 return self.networks[port_index].get_vlan()
670 def get_vxlan(self, port_index):
671 """Get the VXLAN id on a given port.
673 port_index: left port is 0, right port is 1
674 return: the vxlan_id or None if there is no vxlan
676 # for port 1 we need to return the VLAN of the last network in the chain
677 # The networks array contains 2 networks for PVP [left, right]
678 # and 3 networks in the case of PVVP [left.middle,right]
680 # this will pick the last item in array
682 return self.networks[port_index].get_vxlan()
684 def get_dest_mac(self, port_index):
685 """Get the dest MAC on a given port.
687 port_index: left port is 0, right port is 1
691 # for right port, use the right port MAC of the last (right most) VNF In chain
692 return self.instances[-1].ports[1].get_mac()
693 # for left port use the left port MAC of the first (left most) VNF in chain
694 return self.instances[0].ports[0].get_mac()
696 def get_network_uuids(self):
697 """Get UUID of networks in this chain from left to right (order is important).
699 :return: list of UUIDs of networks (2 or 3 elements)
701 return [net['id'] for net in self.networks]
703 def get_host_ips(self):
704 """Return the IP adresss(es) of the host compute nodes used for this chain.
706 :return: a list of 1 or 2 IP addresses
708 return [vnf.get_host_ip() for vnf in self.instances]
710 def get_compute_nodes(self):
711 """Return the name of the host compute nodes used for this chain.
713 :return: a list of 1 host name in the az:host format
715 # Since all chains go through the same compute node(s) we can just retrieve the
716 # compute node name(s) for the first chain
717 return [vnf.get_hypervisor_name() for vnf in self.instances]
720 """Delete this chain."""
721 for instance in self.instances:
723 # only delete if these are chain private networks (not shared)
724 if not self.manager.config.service_chain_shared_net:
725 for network in self.networks:
729 class InstancePlacer(object):
730 """A class to manage instance placement for all VNFs in all chains.
732 A full az string is made of 2 parts AZ and hypervisor.
733 The placement is resolved when both parts az and hypervisor names are known.
736 def __init__(self, req_az, req_hyp):
737 """Create a new instance placer.
739 req_az: requested AZ (can be None or empty if no preference)
740 req_hyp: requested hypervisor name (can be None of empty if no preference)
741 can be any of 'nova:', 'comp1', 'nova:comp1'
742 if it is a list, only the first item is used (backward compatibility in config)
744 req_az is ignored if req_hyp has an az part
745 all other parts beyond the first 2 are ignored in req_hyp
747 # if passed a list just pick the first item
748 if req_hyp and isinstance(req_hyp, list):
750 # only pick first part of az
751 if req_az and ':' in req_az:
752 req_az = req_az.split(':')[0]
754 # check if requested hypervisor string has an AZ part
755 split_hyp = req_hyp.split(':')
756 if len(split_hyp) > 1:
757 # override the AZ part and hypervisor part
758 req_az = split_hyp[0]
759 req_hyp = split_hyp[1]
760 self.requested_az = req_az if req_az else ''
761 self.requested_hyp = req_hyp if req_hyp else ''
762 # Nova can accept AZ only (e.g. 'nova:', use any hypervisor in that AZ)
763 # or hypervisor only (e.g. ':comp1')
764 # or both (e.g. 'nova:comp1')
766 self.required_az = req_az + ':' + self.requested_hyp
768 # need to insert a ':' so nova knows this is the hypervisor name
769 self.required_az = ':' + self.requested_hyp if req_hyp else ''
770 # placement is resolved when both AZ and hypervisor names are known and set
771 self.resolved = self.requested_az != '' and self.requested_hyp != ''
773 def get_required_az(self):
774 """Return the required az (can be resolved or not)."""
775 return self.required_az
777 def register_full_name(self, discovered_az):
778 """Verify compatibility and register a discovered hypervisor full name.
780 discovered_az: a discovered AZ in az:hypervisor format
781 return: True if discovered_az is compatible and set
782 False if discovered_az is not compatible
785 return discovered_az == self.required_az
787 # must be in full az format
788 split_daz = discovered_az.split(':')
789 if len(split_daz) != 2:
791 if self.requested_az and self.requested_az != split_daz[0]:
793 if self.requested_hyp and self.requested_hyp != split_daz[1]:
795 self.required_az = discovered_az
799 def is_resolved(self):
800 """Check if the full AZ is resolved.
802 return: True if resolved
807 class ChainManager(object):
808 """A class for managing all chains for a given run.
810 Supports openstack or no openstack.
811 Supports EXT, PVP and PVVP chains.
814 def __init__(self, chain_runner):
815 """Create a chain manager to take care of discovering or bringing up the requested chains.
817 A new instance must be created every time a new config is used.
818 config: the nfvbench config to use
819 cred: openstack credentials to use of None if there is no openstack
821 self.chain_runner = chain_runner
822 self.config = chain_runner.config
823 self.generator_config = chain_runner.traffic_client.generator_config
825 self.image_instance = None
826 self.image_name = None
827 # Left and right networks shared across all chains (only if shared)
832 self.nova_client = None
833 self.neutron_client = None
834 self.glance_client = None
835 self.existing_instances = []
836 # existing ports keyed by the network uuid they belong to
837 self._existing_ports = {}
839 self.openstack = (chain_runner.cred is not None) and not config.l2_loopback
840 self.chain_count = config.service_chain_count
844 session = chain_runner.cred.get_session()
845 self.nova_client = Client(2, session=session)
846 self.neutron_client = neutronclient.Client('2.0', session=session)
847 self.glance_client = glanceclient.Client('2', session=session)
848 self.comp = compute.Compute(self.nova_client,
852 if config.service_chain != ChainType.EXT:
853 self.placer = InstancePlacer(config.availability_zone, config.compute_nodes)
855 self.flavor = ChainFlavor(config.flavor_type, config.flavor, self.comp)
856 # Get list of all existing instances to check if some instances can be reused
857 self.existing_instances = self.comp.get_server_list()
858 # If networks are shared across chains, get the list of networks
859 if config.service_chain_shared_net:
860 self.networks = self.get_networks()
861 # Reuse/create chains
862 for chain_id in range(self.chain_count):
863 self.chains.append(Chain(chain_id, self))
864 if config.service_chain == ChainType.EXT:
865 # if EXT and no ARP we need to read dest MACs from config
867 self._get_dest_macs_from_config()
869 # Make sure all instances are active before proceeding
870 self._ensure_instances_active()
875 # no openstack, no need to create chains
877 if not config.l2_loopback and config.no_arp:
878 self._get_dest_macs_from_config()
879 if config.vlan_tagging:
880 # make sure there at least as many entries as chains in each left/right list
881 if len(config.vlans) != 2:
882 raise ChainException('The config vlans property must be a list '
883 'with 2 lists of VLAN IDs')
885 self.vlans = [self._check_list('vlans[0]', config.vlans[0], re_vlan),
886 self._check_list('vlans[1]', config.vlans[1], re_vlan)]
888 # make sure there are 2 entries
889 if len(config.vnis) != 2:
890 raise ChainException('The config vnis property must be a list with 2 VNIs')
891 self.vnis = [self._check_list('vnis[0]', config.vnis[0], re_vlan),
892 self._check_list('vnis[1]', config.vnis[1], re_vlan)]
894 def _get_dest_macs_from_config(self):
895 re_mac = "[0-9a-fA-F]{2}([-:])[0-9a-fA-F]{2}(\\1[0-9a-fA-F]{2}){4}$"
896 tg_config = self.config.traffic_generator
897 self.dest_macs = [self._check_list("mac_addrs_left",
898 tg_config.mac_addrs_left, re_mac),
899 self._check_list("mac_addrs_right",
900 tg_config.mac_addrs_right, re_mac)]
902 def _check_list(self, list_name, ll, pattern):
903 # if it is a single int or mac, make it a list of 1 int
904 if isinstance(ll, (int, str)):
906 if not ll or len(ll) < self.chain_count:
907 raise ChainException('%s=%s must be a list with %d elements per chain' %
908 (list_name, ll, self.chain_count))
910 if not re.match(pattern, str(item)):
911 raise ChainException("Invalid format '{item}' specified in {fname}"
912 .format(item=item, fname=list_name))
915 def _setup_image(self):
916 # To avoid reuploading image in server mode, check whether image_name is set or not
918 self.image_instance = self.comp.find_image(self.image_name)
919 if self.image_instance:
920 LOG.info("Reusing image %s", self.image_name)
922 image_name_search_pattern = r'(nfvbenchvm-\d+(\.\d+)*).qcow2'
923 if self.config.vm_image_file:
924 match = re.search(image_name_search_pattern, self.config.vm_image_file)
926 self.image_name = match.group(1)
927 LOG.info('Using provided VM image file %s', self.config.vm_image_file)
929 raise ChainException('Provided VM image file name %s must start with '
930 '"nfvbenchvm-<version>"' % self.config.vm_image_file)
932 pkg_root = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
933 for f in os.listdir(pkg_root):
934 if re.search(image_name_search_pattern, f):
935 self.config.vm_image_file = pkg_root + '/' + f
936 self.image_name = f.replace('.qcow2', '')
937 LOG.info('Found built-in VM image file %s', f)
940 raise ChainException('Cannot find any built-in VM image file.')
942 self.image_instance = self.comp.find_image(self.image_name)
943 if not self.image_instance:
944 LOG.info('Uploading %s', self.image_name)
945 res = self.comp.upload_image_via_url(self.image_name,
946 self.config.vm_image_file)
949 raise ChainException('Error uploading image %s from %s. ABORTING.' %
950 (self.image_name, self.config.vm_image_file))
951 LOG.info('Image %s successfully uploaded.', self.image_name)
952 self.image_instance = self.comp.find_image(self.image_name)
954 def _ensure_instances_active(self):
956 for chain in self.chains:
957 instances.extend(chain.get_instances())
958 initial_instance_count = len(instances)
959 max_retries = (self.config.check_traffic_time_sec +
960 self.config.generic_poll_sec - 1) / self.config.generic_poll_sec
963 remaining_instances = []
964 for instance in instances:
965 status = instance.get_status()
966 if status == 'ACTIVE':
967 LOG.info('Instance %s is ACTIVE on %s',
968 instance.name, instance.get_hypervisor_name())
970 if status == 'ERROR':
971 raise ChainException('Instance %s creation error: %s' %
973 instance.instance.fault['message']))
974 remaining_instances.append(instance)
975 if not remaining_instances:
978 if retry >= max_retries:
979 raise ChainException('Time-out: %d/%d instances still not active' %
980 (len(remaining_instances), initial_instance_count))
981 LOG.info('Waiting for %d/%d instance to become active (retry %d/%d)...',
982 len(remaining_instances), initial_instance_count,
984 instances = remaining_instances
985 time.sleep(self.config.generic_poll_sec)
986 if initial_instance_count:
987 LOG.info('All instances are active')
989 def _get_vxlan_net_cfg(self, chain_id):
990 int_nets = self.config.internal_networks
991 net_left = int_nets.left
992 net_right = int_nets.right
993 vnis = self.generator_config.vnis
995 seg_id_left = vnis[0]
996 if self.config.service_chain == ChainType.PVP:
998 seg_id_left = ((chain_id - 1) * 2) + seg_id_left
999 seg_id_right = seg_id_left + 1
1000 if (seg_id_left and seg_id_right) > vnis[1]:
1001 raise Exception('Segmentation ID is more than allowed '
1002 'value: {}'.format(vnis[1]))
1003 net_left['segmentation_id'] = seg_id_left
1004 net_right['segmentation_id'] = seg_id_right
1005 net_cfg = [net_left, net_right]
1008 net_middle = int_nets.middle
1010 seg_id_left = ((chain_id - 1) * 3) + seg_id_left
1011 seg_id_middle = seg_id_left + 1
1012 seg_id_right = seg_id_left + 2
1013 if (seg_id_left and seg_id_right and seg_id_middle) > vnis[1]:
1014 raise Exception('Segmentation ID is more than allowed '
1015 'value: {}'.format(vnis[1]))
1016 net_left['segmentation_id'] = seg_id_left
1017 net_middle['segmentation_id'] = seg_id_middle
1018 net_right['segmentation_id'] = seg_id_right
1019 net_cfg = [net_left, net_middle, net_right]
1022 def get_networks(self, chain_id=None):
1023 """Get the networks for given EXT, PVP or PVVP chain.
1025 For EXT packet path, these networks must pre-exist.
1026 For PVP, PVVP these networks will be created if they do not exist.
1027 chain_id: to which chain the networks belong.
1028 a None value will mean that these networks are shared by all chains
1031 # the only case where self.networks exists is when the networks are shared
1033 return self.networks
1034 if self.config.service_chain == ChainType.EXT:
1036 ext_net = self.config.external_networks
1037 net_cfg = [AttrDict({'name': name,
1038 'segmentation_id': None,
1039 'physical_network': None})
1040 for name in [ext_net.left, ext_net.right]]
1043 int_nets = self.config.internal_networks
1044 network_type = set([int_nets[net].get('network_type') for net in int_nets])
1045 if self.config.vxlan and 'vxlan' in network_type:
1046 net_cfg = self._get_vxlan_net_cfg(chain_id)
1049 if self.config.service_chain == ChainType.PVP:
1050 net_cfg = [int_nets.left, int_nets.right]
1052 net_cfg = [int_nets.left, int_nets.middle, int_nets.right]
1056 networks.append(ChainNetwork(self, cfg, chain_id, lookup_only=lookup_only))
1058 # need to cleanup all successful networks prior to bailing out
1059 for net in networks:
1064 def get_existing_ports(self):
1065 """Get the list of existing ports.
1067 Lazy retrieval of ports as this can be costly if there are lots of ports and
1068 is only needed when VM and network are being reused.
1070 return: a dict of list of neutron ports indexed by the network uuid they are attached to
1072 Each port is a dict with fields such as below:
1073 {'allowed_address_pairs': [], 'extra_dhcp_opts': [],
1074 'updated_at': '2018-10-06T07:15:35Z', 'device_owner': 'compute:nova',
1075 'revision_number': 10, 'port_security_enabled': False, 'binding:profile': {},
1076 'fixed_ips': [{'subnet_id': '6903a3b3-49a1-4ba4-8259-4a90e7a44b21',
1077 'ip_address': '192.168.1.4'}], 'id': '3dcb9cfa-d82a-4dd1-85a1-fd8284b52d72',
1078 'security_groups': [],
1079 'binding:vif_details': {'vhostuser_socket': '/tmp/3dcb9cfa-d82a-4dd1-85a1-fd8284b52d72',
1080 'vhostuser_mode': 'server'},
1081 'binding:vif_type': 'vhostuser',
1082 'mac_address': 'fa:16:3e:3c:63:04',
1083 'project_id': '977ac76a63d7492f927fa80e86baff4c',
1085 'binding:host_id': 'a20-champagne-compute-1',
1087 'device_id': 'a98e2ad2-5371-4aa5-a356-8264a970ce4b',
1088 'name': 'nfvbench-loop-vm0-0', 'admin_state_up': True,
1089 'network_id': '3ea5fd88-278f-4d9d-b24d-1e443791a055',
1090 'tenant_id': '977ac76a63d7492f927fa80e86baff4c',
1091 'created_at': '2018-10-06T07:15:10Z',
1092 'binding:vnic_type': 'normal'}
1094 if not self._existing_ports:
1095 LOG.info('Loading list of all ports...')
1096 existing_ports = self.neutron_client.list_ports()['ports']
1097 # place all ports in the dict keyed by the port network uuid
1098 for port in existing_ports:
1099 port_list = self._existing_ports.setdefault(port['network_id'], [])
1100 port_list.append(port)
1101 LOG.info("Loaded %d ports attached to %d networks",
1102 len(existing_ports), len(self._existing_ports))
1103 return self._existing_ports
1105 def get_ports_from_network(self, chain_network):
1106 """Get the list of existing ports that belong to a network.
1108 Lazy retrieval of ports as this can be costly if there are lots of ports and
1109 is only needed when VM and network are being reused.
1111 chain_network: a ChainNetwork instance for which attached ports neeed to be retrieved
1112 return: list of neutron ports attached to requested network
1114 return self.get_existing_ports().get(chain_network.get_uuid(), None)
1116 def get_host_ip_from_mac(self, mac):
1117 """Get the host IP address matching a MAC.
1119 mac: MAC address to look for
1120 return: the IP address of the host where the matching port runs or None if not found
1122 # _existing_ports is a dict of list of ports indexed by network id
1123 for port_list in self.get_existing_ports().values():
1124 for port in port_list:
1126 if port['mac_address'] == mac:
1127 host_id = port['binding:host_id']
1128 return self.comp.get_hypervisor(host_id).host_ip
1133 def get_chain_vlans(self, port_index):
1134 """Get the list of per chain VLAN id on a given port.
1136 port_index: left port is 0, right port is 1
1137 return: a VLAN ID list indexed by the chain index or None if no vlan tagging
1140 return [self.chains[chain_index].get_vlan(port_index)
1141 for chain_index in range(self.chain_count)]
1143 return self.vlans[port_index]
1145 def get_chain_vxlans(self, port_index):
1146 """Get the list of per chain VNIs id on a given port.
1148 port_index: left port is 0, right port is 1
1149 return: a VNIs ID list indexed by the chain index or None if no vlan tagging
1152 return [self.chains[chain_index].get_vxlan(port_index)
1153 for chain_index in range(self.chain_count)]
1155 return self.vnis[port_index]
1157 def get_dest_macs(self, port_index):
1158 """Get the list of per chain dest MACs on a given port.
1160 Should not be called if EXT+ARP is used (in that case the traffic gen will
1161 have the ARP responses back from VNFs with the dest MAC to use).
1163 port_index: left port is 0, right port is 1
1164 return: a list of dest MACs indexed by the chain index
1166 if self.chains and self.config.service_chain != ChainType.EXT:
1167 return [self.chains[chain_index].get_dest_mac(port_index)
1168 for chain_index in range(self.chain_count)]
1169 # no openstack or EXT+no-arp
1170 return self.dest_macs[port_index]
1172 def get_host_ips(self):
1173 """Return the IP adresss(es) of the host compute nodes used for this run.
1175 :return: a list of 1 IP address
1177 # Since all chains go through the same compute node(s) we can just retrieve the
1178 # compute node(s) for the first chain
1180 if self.config.service_chain != ChainType.EXT:
1181 return self.chains[0].get_host_ips()
1182 # in the case of EXT, the compute node must be retrieved from the port
1183 # associated to any of the dest MACs
1184 dst_macs = self.generator_config.get_dest_macs()
1185 # dest MAC on port 0, chain 0
1186 dst_mac = dst_macs[0][0]
1187 host_ip = self.get_host_ip_from_mac(dst_mac)
1189 LOG.info('Found compute node IP for EXT chain: %s', host_ip)
1193 def get_compute_nodes(self):
1194 """Return the name of the host compute nodes used for this run.
1196 :return: a list of 0 or 1 host name in the az:host format
1198 # Since all chains go through the same compute node(s) we can just retrieve the
1199 # compute node name(s) for the first chain
1201 # in the case of EXT, the compute node must be retrieved from the port
1202 # associated to any of the dest MACs
1203 return self.chains[0].get_compute_nodes()
1204 # no openstack = no chains
1208 """Delete resources for all chains."""
1209 for chain in self.chains:
1211 for network in self.networks:
1214 self.flavor.delete()