2 # Copyright 2018 Cisco Systems, Inc. All rights reserved.
4 # Licensed under the Apache License, Version 2.0 (the "License"); you may
5 # not use this file except in compliance with the License. You may obtain
6 # a copy of the License at
8 # http://www.apache.org/licenses/LICENSE-2.0
10 # Unless required by applicable law or agreed to in writing, software
11 # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
12 # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
13 # License for the specific language governing permissions and limitations
17 # This module takes care of chaining networks, ports and vms
19 """NFVBENCH CHAIN DISCOVERY/STAGING.
21 This module takes care of staging/discovering all resources that are participating in a
22 benchmarking session: flavors, networks, ports, VNF instances.
23 If a resource is discovered with the same name, it will be reused.
24 Otherwise it will be created.
26 ChainManager: manages VM image, flavor, the staging discovery of all chains
28 Chain: manages one chain, has 2 or more networks and 1 or more instances
29 ChainNetwork: manages 1 network in a chain
30 ChainVnf: manages 1 VNF instance in a chain, has 2 ports
31 ChainVnfPort: manages 1 instance port
33 ChainManager-->Chain(*)
34 Chain-->ChainNetwork(*),ChainVnf(*)
35 ChainVnf-->ChainVnfPort(2)
37 Once created/discovered, instances are checked to be in the active state (ready to pass traffic)
38 Configuration parameters that will influence how these resources are staged/related:
39 - openstack or no openstack
42 - number of VNF in each chain (PVP, PVVP)
43 - SRIOV and middle port SRIOV for port types
44 - whether networks are shared across chains or not
46 There is not traffic generation involved in this module.
53 from neutronclient.neutron import client as neutronclient
54 from novaclient.client import Client
56 from attrdict import AttrDict
57 from .chain_router import ChainRouter
60 from .specs import ChainType
61 # Left and right index for network and port lists
64 # L3 traffic edge networks are at the end of networks list
67 # Name of the VM config file
68 NFVBENCH_CFG_FILENAME = 'nfvbenchvm.conf'
69 # full pathame of the VM config in the VM
70 NFVBENCH_CFG_VM_PATHNAME = os.path.join('/etc/', NFVBENCH_CFG_FILENAME)
71 # full path of the boot shell script template file on the server where nfvbench runs
72 BOOT_SCRIPT_PATHNAME = os.path.join(os.path.dirname(os.path.abspath(__file__)),
74 NFVBENCH_CFG_FILENAME)
77 class ChainException(Exception):
78 """Exception while operating the chains."""
80 class NetworkEncaps(object):
81 """Network encapsulation."""
84 class ChainFlavor(object):
85 """Class to manage the chain flavor."""
87 def __init__(self, flavor_name, flavor_dict, comp):
88 """Create a flavor."""
89 self.name = flavor_name
91 self.flavor = self.comp.find_flavor(flavor_name)
95 LOG.info("Reused flavor '%s'", flavor_name)
97 extra_specs = flavor_dict.pop('extra_specs', None)
99 self.flavor = comp.create_flavor(flavor_name,
102 LOG.info("Created flavor '%s'", flavor_name)
104 self.flavor.set_keys(extra_specs)
107 """Delete this flavor."""
108 if not self.reuse and self.flavor:
110 LOG.info("Flavor '%s' deleted", self.name)
113 class ChainVnfPort(object):
114 """A port associated to one VNF in the chain."""
116 def __init__(self, name, vnf, chain_network, vnic_type):
117 """Create or reuse a port on a given network.
119 if vnf.instance is None the VNF instance is not reused and this ChainVnfPort instance must
121 Otherwise vnf.instance is a reused VNF instance and this ChainVnfPort instance must
122 find an existing port to reuse that matches the port requirements: same attached network,
123 instance, name, vnic type
125 name: name for this port
126 vnf: ChainVNf instance that owns this port
127 chain_network: ChainNetwork instance where this port should attach
128 vnic_type: required vnic type for this port
132 self.manager = vnf.manager
135 self.floating_ip = None
137 # VNF instance is reused, we need to find an existing port that matches this instance
139 # discover ports attached to this instance
140 port_list = self.manager.get_ports_from_network(chain_network)
141 for port in port_list:
142 if port['name'] != name:
144 if port['binding:vnic_type'] != vnic_type:
146 if port['device_id'] == vnf.get_uuid():
148 LOG.info('Reusing existing port %s mac=%s', name, port['mac_address'])
151 raise ChainException('Cannot find matching port')
153 # VNF instance is not created yet, we need to create a new port
157 'network_id': chain_network.get_uuid(),
158 'binding:vnic_type': vnic_type
161 port = self.manager.neutron_client.create_port(body)
162 self.port = port['port']
163 LOG.info('Created port %s', name)
165 self.manager.neutron_client.update_port(self.port['id'], {
167 'security_groups': [],
168 'port_security_enabled': False,
171 LOG.info('Security disabled on port %s', name)
173 LOG.info('Failed to disable security on port %s (ignored)', name)
176 """Get the MAC address for this port."""
177 return self.port['mac_address']
180 """Get the IP address for this port."""
181 return self.port['fixed_ips'][0]['ip_address']
183 def set_floating_ip(self, chain_network):
184 # create and add floating ip to port
186 self.floating_ip = self.manager.neutron_client.create_floatingip({
188 'floating_network_id': chain_network.get_uuid(),
189 'port_id': self.port['id'],
190 'description': 'nfvbench floating ip for port:' + self.port['name'],
192 LOG.info('Floating IP %s created and associated on port %s',
193 self.floating_ip['floating_ip_address'], self.name)
194 return self.floating_ip['floating_ip_address']
196 LOG.info('Failed to created and associated floating ip on port %s (ignored)', self.name)
197 return self.port['fixed_ips'][0]['ip_address']
200 """Delete this port instance."""
201 if self.reuse or not self.port:
203 for _ in range(0, self.manager.config.generic_retry_count):
205 self.manager.neutron_client.delete_port(self.port['id'])
206 LOG.info("Deleted port %s", self.name)
208 self.manager.neutron_client.delete_floatingip(self.floating_ip['id'])
209 LOG.info("Deleted floating IP %s", self.floating_ip['description'])
212 time.sleep(self.manager.config.generic_poll_sec)
213 LOG.error('Unable to delete port: %s', self.name)
216 class ChainNetwork(object):
217 """Could be a shared network across all chains or a chain private network."""
219 def __init__(self, manager, network_config, chain_id=None, lookup_only=False,
221 """Create a network for given chain.
223 network_config: a dict containing the network properties
224 (name, segmentation_id and physical_network)
225 chain_id: to which chain the networks belong.
226 a None value will mean that these networks are shared by all chains
227 suffix: a suffix to add to the network name (if not None)
229 self.manager = manager
231 self.name = network_config.name
233 # the name itself can be either a string or a list of names indexed by chain ID
234 if isinstance(network_config.name, tuple):
235 self.name = network_config.name[chain_id]
237 # network_config.name is a prefix string
238 self.name = network_config.name + str(chain_id)
240 self.name = self.name + suffix
241 self.segmentation_id = self._get_item(network_config.segmentation_id,
242 chain_id, auto_index=True)
243 self.physical_network = self._get_item(network_config.physical_network, chain_id)
248 self.router_name = None
249 if manager.config.l3_router and hasattr(network_config, 'router_name'):
250 self.router_name = network_config.router_name
252 self._setup(network_config, lookup_only)
255 LOG.error("Cannot find network %s", self.name)
257 LOG.error("Error creating network %s", self.name)
261 def _get_item(self, item_field, index, auto_index=False):
262 """Retrieve an item from a list or a single value.
264 item_field: can be None, a tuple of a single value
265 index: if None is same as 0, else is the index for a chain
266 auto_index: if true will automatically get the final value by adding the
267 index to the base value (if full list not provided)
269 If the item_field is not a tuple, it is considered same as a tuple with same value at any
271 If a list is provided, its length must be > index
277 if isinstance(item_field, tuple):
279 return item_field[index]
281 raise ChainException("List %s is too short for chain index %d" %
282 (str(item_field), index))
283 # single value is configured
285 return item_field + index
288 def _setup(self, network_config, lookup_only):
289 # Lookup if there is a matching network with same name
290 networks = self.manager.neutron_client.list_networks(name=self.name)
291 if networks['networks']:
292 network = networks['networks'][0]
293 # a network of same name already exists, we need to verify it has the same
295 if self.segmentation_id:
296 if network['provider:segmentation_id'] != self.segmentation_id:
297 raise ChainException("Mismatch of 'segmentation_id' for reused "
298 "network '{net}'. Network has id '{seg_id1}', "
299 "configuration requires '{seg_id2}'."
300 .format(net=self.name,
301 seg_id1=network['provider:segmentation_id'],
302 seg_id2=self.segmentation_id))
304 if self.physical_network:
305 if network['provider:physical_network'] != self.physical_network:
306 raise ChainException("Mismatch of 'physical_network' for reused "
307 "network '{net}'. Network has '{phys1}', "
308 "configuration requires '{phys2}'."
309 .format(net=self.name,
310 phys1=network['provider:physical_network'],
311 phys2=self.physical_network))
313 LOG.info('Reusing existing network %s', self.name)
315 self.network = network
318 raise ChainException('Network %s not found' % self.name)
322 'admin_state_up': True
325 if network_config.network_type:
326 body['network']['provider:network_type'] = network_config.network_type
327 if self.segmentation_id:
328 body['network']['provider:segmentation_id'] = self.segmentation_id
329 if self.physical_network:
330 body['network']['provider:physical_network'] = self.physical_network
331 self.network = self.manager.neutron_client.create_network(body)['network']
332 # create associated subnet, all subnets have the same name (which is ok since
333 # we do not need to address them directly by name)
335 'subnet': {'name': network_config.subnet,
336 'cidr': network_config.cidr,
337 'network_id': self.network['id'],
338 'enable_dhcp': False,
340 'dns_nameservers': []}
342 subnet = self.manager.neutron_client.create_subnet(body)['subnet']
343 # add subnet id to the network dict since it has just been added
344 self.network['subnets'] = [subnet['id']]
345 LOG.info('Created network: %s', self.name)
349 Extract UUID of this network.
351 :return: UUID of this network
353 return self.network['id']
357 Extract vlan for this network.
359 :return: vlan ID for this network
361 if self.network['provider:network_type'] != 'vlan':
362 raise ChainException('Trying to retrieve VLAN id for non VLAN network')
363 return self.network['provider:segmentation_id']
367 Extract VNI for this network.
369 :return: VNI ID for this network
372 return self.network['provider:segmentation_id']
374 def get_mpls_inner_label(self):
376 Extract MPLS VPN Label for this network.
378 :return: MPLS VPN Label for this network
381 return self.network['provider:segmentation_id']
384 """Delete this network."""
385 if not self.reuse and self.network:
386 for retry in range(0, self.manager.config.generic_retry_count):
388 self.manager.neutron_client.delete_network(self.network['id'])
389 LOG.info("Deleted network: %s", self.name)
392 LOG.info('Error deleting network %s (retry %d/%d)...',
395 self.manager.config.generic_retry_count)
396 time.sleep(self.manager.config.generic_poll_sec)
397 LOG.error('Unable to delete network: %s', self.name)
400 class ChainVnf(object):
401 """A class to represent a VNF in a chain."""
403 def __init__(self, chain, vnf_id, networks):
404 """Reuse a VNF instance with same characteristics or create a new VNF instance.
406 chain: the chain where this vnf belongs
407 vnf_id: indicates the index of this vnf in its chain (first vnf=0)
408 networks: the list of all networks (ChainNetwork) of the current chain
410 self.manager = chain.manager
413 self.name = self.manager.config.loop_vm_name + str(chain.chain_id)
414 if len(networks) > 2:
415 # we will have more than 1 VM in each chain
416 self.name += '-' + str(vnf_id)
417 # A list of ports for this chain
418 # There are normally 2 ports carrying traffic (index 0, and index 1) and
419 # potentially multiple idle ports not carrying traffic (index 2 and up)
420 # For example if 7 idle interfaces are requested, the corresp. ports will be
423 self.management_port = None
429 self.idle_networks = []
432 # the vnf_id is conveniently also the starting index in networks
433 # for the left and right networks associated to this VNF
434 if self.manager.config.l3_router:
435 self._setup(networks[vnf_id:vnf_id + 4])
437 self._setup(networks[vnf_id:vnf_id + 2])
439 LOG.error("Error creating VNF %s", self.name)
443 def _get_vm_config(self, remote_mac_pair):
444 config = self.manager.config
445 devices = self.manager.generator_config.devices
448 tg_gateway1_ip = self.routers[LEFT].ports[1]['fixed_ips'][0][
449 'ip_address'] # router edge ip left
450 tg_gateway2_ip = self.routers[RIGHT].ports[1]['fixed_ips'][0][
451 'ip_address'] # router edge ip right
452 tg_mac1 = self.routers[LEFT].ports[1]['mac_address'] # router edge mac left
453 tg_mac2 = self.routers[RIGHT].ports[1]['mac_address'] # router edge mac right
454 # edge cidr mask left
455 vnf_gateway1_cidr = \
456 self.ports[LEFT].get_ip() + self.__get_network_mask(
457 self.manager.config.edge_networks.left.cidr)
458 # edge cidr mask right
459 vnf_gateway2_cidr = \
460 self.ports[RIGHT].get_ip() + self.__get_network_mask(
461 self.manager.config.edge_networks.right.cidr)
462 if config.vm_forwarder != 'vpp':
463 raise ChainException(
464 'L3 router mode imply to set VPP as VM forwarder.'
465 'Please update your config file with: vm_forwarder: vpp')
467 tg_gateway1_ip = devices[LEFT].tg_gateway_ip_addrs
468 tg_gateway2_ip = devices[RIGHT].tg_gateway_ip_addrs
469 if not config.loop_vm_arp:
470 tg_mac1 = remote_mac_pair[0]
471 tg_mac2 = remote_mac_pair[1]
476 g1cidr = devices[LEFT].get_gw_ip(
477 self.chain.chain_id) + self.__get_network_mask(
478 self.manager.config.internal_networks.left.cidr)
479 g2cidr = devices[RIGHT].get_gw_ip(
480 self.chain.chain_id) + self.__get_network_mask(
481 self.manager.config.internal_networks.right.cidr)
483 vnf_gateway1_cidr = g1cidr
484 vnf_gateway2_cidr = g2cidr
486 with open(BOOT_SCRIPT_PATHNAME, 'r') as boot_script:
487 content = boot_script.read()
489 'forwarder': config.vm_forwarder,
490 'intf_mac1': self.ports[LEFT].get_mac(),
491 'intf_mac2': self.ports[RIGHT].get_mac(),
492 'tg_gateway1_ip': tg_gateway1_ip,
493 'tg_gateway2_ip': tg_gateway2_ip,
494 'tg_net1': devices[LEFT].ip_addrs,
495 'tg_net2': devices[RIGHT].ip_addrs,
496 'vnf_gateway1_cidr': vnf_gateway1_cidr,
497 'vnf_gateway2_cidr': vnf_gateway2_cidr,
500 'vif_mq_size': config.vif_multiqueue_size,
501 'num_mbufs': config.num_mbufs
503 if self.manager.config.use_management_port:
504 mgmt_ip = self.management_port.port['fixed_ips'][0]['ip_address']
505 mgmt_mask = self.__get_network_mask(self.manager.config.management_network.cidr)
506 vm_config['intf_mgmt_cidr'] = mgmt_ip + mgmt_mask
507 vm_config['intf_mgmt_ip_gw'] = self.manager.config.management_network.gateway
508 vm_config['intf_mac_mgmt'] = self.management_port.port['mac_address']
510 # Interface management config left empty to avoid error in VM spawn
511 # if nfvbench config has values for management network but use_management_port=false
512 vm_config['intf_mgmt_cidr'] = ''
513 vm_config['intf_mgmt_ip_gw'] = ''
514 vm_config['intf_mac_mgmt'] = ''
515 return content.format(**vm_config)
518 def __get_network_mask(network):
519 return '/' + network.split('/')[1]
521 def _get_vnic_type(self, port_index):
522 """Get the right vnic type for given port indexself.
524 If SR-IOV is specified, middle ports in multi-VNF chains
525 can use vswitch or SR-IOV based on config.use_sriov_middle_net
527 if self.manager.config.sriov:
528 chain_length = self.chain.get_length()
529 if self.manager.config.use_sriov_middle_net or chain_length == 1:
531 if self.vnf_id == 0 and port_index == 0:
532 # first VNF in chain must use sriov for left port
534 if (self.vnf_id == chain_length - 1) and (port_index == 1):
535 # last VNF in chain must use sriov for right port
539 def _get_idle_networks_ports(self):
540 """Get the idle networks for PVP or PVVP chain (non shared net only)
542 For EXT packet path or shared net, returns empty list.
543 For PVP, PVVP these networks will be created if they do not exist.
544 chain_id: to which chain the networks belong.
545 a None value will mean that these networks are shared by all chains
549 config = self.manager.config
550 chain_id = self.chain.chain_id
551 idle_interfaces_per_vm = config.idle_interfaces_per_vm
552 if config.service_chain == ChainType.EXT or chain_id is None or \
553 idle_interfaces_per_vm == 0:
556 # Make a copy of the idle networks dict as we may have to modify the
558 idle_network_cfg = AttrDict(config.idle_networks)
559 if idle_network_cfg.segmentation_id:
560 segmentation_id = idle_network_cfg.segmentation_id + \
561 chain_id * idle_interfaces_per_vm
563 segmentation_id = None
565 # create as many idle networks and ports as requested
566 for idle_index in range(idle_interfaces_per_vm):
567 if config.service_chain == ChainType.PVP:
568 suffix = '.%d' % (idle_index)
570 suffix = '.%d.%d' % (self.vnf_id, idle_index)
571 port_name = self.name + '-idle' + str(idle_index)
572 # update the segmentation id based on chain id and idle index
574 idle_network_cfg.segmentation_id = segmentation_id + idle_index
575 port_name = port_name + "." + str(segmentation_id)
577 networks.append(ChainNetwork(self.manager,
581 ports.append(ChainVnfPort(port_name,
583 networks[idle_index],
586 # need to cleanup all successful networks
592 self.idle_networks = networks
593 self.idle_ports = ports
595 def _setup(self, networks):
596 flavor_id = self.manager.flavor.flavor.id
597 # Check if we can reuse an instance with same name
598 for instance in self.manager.existing_instances:
599 if instance.name == self.name:
601 instance_right = RIGHT
602 # In case of L3 traffic instance use edge networks
603 if self.manager.config.l3_router:
604 instance_left = EDGE_LEFT
605 instance_right = EDGE_RIGHT
606 # Verify that other instance characteristics match
607 if instance.flavor['id'] != flavor_id:
608 self._reuse_exception('Flavor mismatch')
609 if instance.status != "ACTIVE":
610 self._reuse_exception('Matching instance is not in ACTIVE state')
611 # The 2 networks for this instance must also be reused
612 if not networks[instance_left].reuse:
613 self._reuse_exception('network %s is new' % networks[instance_left].name)
614 if not networks[instance_right].reuse:
615 self._reuse_exception('network %s is new' % networks[instance_right].name)
616 # instance.networks have the network names as keys:
617 # {'nfvbench-rnet0': ['192.168.2.10'], 'nfvbench-lnet0': ['192.168.1.8']}
618 if networks[instance_left].name not in instance.networks:
619 self._reuse_exception('Left network mismatch')
620 if networks[instance_right].name not in instance.networks:
621 self._reuse_exception('Right network mismatch')
624 self.instance = instance
625 LOG.info('Reusing existing instance %s on %s',
626 self.name, self.get_hypervisor_name())
627 # create management port if needed
628 if self.manager.config.use_management_port:
629 self.management_port = ChainVnfPort(self.name + '-mgmt', self,
630 self.manager.management_network, 'normal')
631 ip = self.management_port.port['fixed_ips'][0]['ip_address']
632 if self.manager.config.use_floating_ip:
633 ip = self.management_port.set_floating_ip(self.manager.floating_ip_network)
634 LOG.info("Management interface will be active using IP: %s, "
635 "and you can connect over SSH with login: nfvbench and password: nfvbench", ip)
636 # create or reuse/discover 2 ports per instance
637 if self.manager.config.l3_router:
639 self.ports.append(ChainVnfPort(self.name + '-' + str(index),
642 self._get_vnic_type(index)))
645 self.ports.append(ChainVnfPort(self.name + '-' + str(index),
648 self._get_vnic_type(index)))
650 # create idle networks and ports only if instance is not reused
651 # if reused, we do not care about idle networks/ports
653 self._get_idle_networks_ports()
655 # Create neutron routers for L3 traffic use case
656 if self.manager.config.l3_router and self.manager.openstack:
657 internal_nets = networks[:2]
658 if self.manager.config.service_chain == ChainType.PVP:
659 edge_nets = networks[2:]
661 edge_nets = networks[3:]
662 subnets_left = [internal_nets[0], edge_nets[0]]
663 routes_left = [{'destination': self.manager.config.traffic_generator.ip_addrs[0],
664 'nexthop': self.manager.config.traffic_generator.tg_gateway_ip_addrs[
666 {'destination': self.manager.config.traffic_generator.ip_addrs[1],
667 'nexthop': self.ports[0].get_ip()}]
669 ChainRouter(self.manager, edge_nets[0].router_name, subnets_left, routes_left))
670 subnets_right = [internal_nets[1], edge_nets[1]]
671 routes_right = [{'destination': self.manager.config.traffic_generator.ip_addrs[0],
672 'nexthop': self.ports[1].get_ip()},
673 {'destination': self.manager.config.traffic_generator.ip_addrs[1],
674 'nexthop': self.manager.config.traffic_generator.tg_gateway_ip_addrs[
677 ChainRouter(self.manager, edge_nets[1].router_name, subnets_right, routes_right))
678 # Overload gateway_ips property with router ip address for ARP and traffic calls
679 self.manager.generator_config.devices[LEFT].set_gw_ip(
680 self.routers[LEFT].ports[0]['fixed_ips'][0]['ip_address']) # router edge ip left)
681 self.manager.generator_config.devices[RIGHT].set_gw_ip(
682 self.routers[RIGHT].ports[0]['fixed_ips'][0]['ip_address']) # router edge ip right)
684 # if no reuse, actual vm creation is deferred after all ports in the chain are created
685 # since we need to know the next mac in a multi-vnf chain
687 def create_vnf(self, remote_mac_pair):
688 """Create the VNF instance if it does not already exist."""
689 if self.instance is None:
691 if self.manager.config.use_management_port:
692 port_ids.append({'port-id': self.management_port.port['id']})
693 port_ids.extend([{'port-id': vnf_port.port['id']} for vnf_port in self.ports])
695 for idle_port in self.idle_ports:
696 port_ids.append({'port-id': idle_port.port['id']})
697 vm_config = self._get_vm_config(remote_mac_pair)
698 az = self.manager.placer.get_required_az()
699 server = self.manager.comp.create_server(self.name,
700 self.manager.image_instance,
701 self.manager.flavor.flavor,
708 files={NFVBENCH_CFG_VM_PATHNAME: vm_config})
710 self.instance = server
711 if self.manager.placer.is_resolved():
712 LOG.info('Created instance %s on %s', self.name, az)
714 # the location is undetermined at this point
715 # self.get_hypervisor_name() will return None
716 LOG.info('Created instance %s - waiting for placement resolution...', self.name)
717 # here we MUST wait until this instance is resolved otherwise subsequent
718 # VNF creation can be placed in other hypervisors!
719 config = self.manager.config
720 max_retries = int((config.check_traffic_time_sec +
721 config.generic_poll_sec - 1) / config.generic_poll_sec)
723 for retry in range(max_retries):
724 status = self.get_status()
725 if status == 'ACTIVE':
726 hyp_name = self.get_hypervisor_name()
727 LOG.info('Instance %s is active and has been placed on %s',
729 self.manager.placer.register_full_name(hyp_name)
731 if status == 'ERROR':
732 raise ChainException('Instance %s creation error: %s' %
734 self.instance.fault['message']))
735 LOG.info('Waiting for instance %s to become active (retry %d/%d)...',
736 self.name, retry + 1, max_retries + 1)
737 time.sleep(config.generic_poll_sec)
740 LOG.error('Instance %s creation timed out', self.name)
741 raise ChainException('Instance %s creation timed out' % self.name)
744 raise ChainException('Unable to create instance: %s' % (self.name))
746 def _reuse_exception(self, reason):
747 raise ChainException('Instance %s cannot be reused (%s)' % (self.name, reason))
749 def get_status(self):
750 """Get the statis of this instance."""
751 if self.instance.status != 'ACTIVE':
752 self.instance = self.manager.comp.poll_server(self.instance)
753 return self.instance.status
755 def get_hostname(self):
756 """Get the hypervisor host name running this VNF instance."""
757 if self.manager.is_admin:
758 hypervisor_hostname = getattr(self.instance, 'OS-EXT-SRV-ATTR:hypervisor_hostname')
760 hypervisor_hostname = self.manager.config.hypervisor_hostname
761 if not hypervisor_hostname:
762 raise ChainException('Hypervisor hostname parameter is mandatory')
763 return hypervisor_hostname
765 def get_host_ip(self):
766 """Get the IP address of the host where this instance runs.
768 return: the IP address
771 self.host_ip = self.manager.comp.get_hypervisor(self.get_hostname()).host_ip
774 def get_hypervisor_name(self):
775 """Get hypervisor name (az:hostname) for this VNF instance."""
777 if self.manager.is_admin:
778 az = getattr(self.instance, 'OS-EXT-AZ:availability_zone')
780 az = self.manager.config.availability_zone
782 raise ChainException('Availability zone parameter is mandatory')
783 hostname = self.get_hostname()
785 return az + ':' + hostname
790 """Get the uuid for this instance."""
791 return self.instance.id
793 def delete(self, forced=False):
794 """Delete this VNF instance."""
796 LOG.info("Instance %s not deleted (reused)", self.name)
799 self.manager.comp.delete_server(self.instance)
800 LOG.info("Deleted instance %s", self.name)
801 if self.manager.config.use_management_port:
802 self.management_port.delete()
803 for port in self.ports:
805 for port in self.idle_ports:
807 for network in self.idle_networks:
812 """A class to manage a single chain.
814 Can handle any type of chain (EXT, PVP, PVVP)
817 def __init__(self, chain_id, manager):
818 """Create a new chain.
820 chain_id: chain index (first chain is 0)
821 manager: the chain manager that owns all chains
823 self.chain_id = chain_id
824 self.manager = manager
825 self.encaps = manager.encaps
829 self.networks = manager.get_networks(chain_id)
830 # For external chain VNFs can only be discovered from their MAC addresses
831 # either from config or from ARP
832 if manager.config.service_chain != ChainType.EXT:
833 for chain_instance_index in range(self.get_length()):
834 self.instances.append(ChainVnf(self,
835 chain_instance_index,
837 # at this point new VNFs are not created yet but
838 # verify that all discovered VNFs are on the same hypervisor
839 self._check_hypervisors()
840 # now that all VNF ports are created we need to calculate the
841 # left/right remote MAC for each VNF in the chain
842 # before actually creating the VNF itself
843 rem_mac_pairs = self._get_remote_mac_pairs()
844 for instance in self.instances:
845 rem_mac_pair = rem_mac_pairs.pop(0)
846 instance.create_vnf(rem_mac_pair)
851 def _check_hypervisors(self):
852 common_hypervisor = None
853 for instance in self.instances:
854 # get the full hypervizor name (az:compute)
855 hname = instance.get_hypervisor_name()
857 if common_hypervisor:
858 if hname != common_hypervisor:
859 raise ChainException('Discovered instances on different hypervisors:'
860 ' %s and %s' % (hname, common_hypervisor))
862 common_hypervisor = hname
863 if common_hypervisor:
864 # check that the common hypervisor name matchs the requested hypervisor name
865 # and set the name to be used by all future instances (if any)
866 if not self.manager.placer.register_full_name(common_hypervisor):
867 raise ChainException('Discovered hypervisor placement %s is incompatible' %
870 def get_length(self):
871 """Get the number of VNF in the chain."""
872 # Take into account 2 edge networks for routers
873 return len(self.networks) - 3 if self.manager.config.l3_router else len(self.networks) - 1
875 def _get_remote_mac_pairs(self):
876 """Get the list of remote mac pairs for every VNF in the chain.
878 Traverse the chain from left to right and establish the
879 left/right remote MAC for each VNF in the chainself.
882 mac sequence: tg_src_mac, vm0-mac0, vm0-mac1, tg_dst_mac
883 must produce [[tg_src_mac, tg_dst_mac]] or looking at index in mac sequence: [[0, 3]]
884 the mac pair is what the VNF at that position (index 0) sees as next hop mac left and right
887 tg_src_mac, vm0-mac0, vm0-mac1, vm1-mac0, vm1-mac1, tg_dst_mac
888 Must produce the following list:
889 [[tg_src_mac, vm1-mac0], [vm0-mac1, tg_dst_mac]] or index: [[0, 3], [2, 5]]
891 General case with 3 VMs in chain, the list of consecutive macs (left to right):
892 tg_src_mac, vm0-mac0, vm0-mac1, vm1-mac0, vm1-mac1, vm2-mac0, vm2-mac1, tg_dst_mac
893 Must produce the following list:
894 [[tg_src_mac, vm1-mac0], [vm0-mac1, vm2-mac0], [vm1-mac1, tg_dst_mac]]
895 or index: [[0, 3], [2, 5], [4, 7]]
897 The series pattern is pretty clear: [[n, n+3],... ] where n is multiple of 2
899 # line up all mac from left to right
900 mac_seq = [self.manager.generator_config.devices[LEFT].mac]
901 for instance in self.instances:
902 mac_seq.append(instance.ports[0].get_mac())
903 mac_seq.append(instance.ports[1].get_mac())
904 mac_seq.append(self.manager.generator_config.devices[RIGHT].mac)
907 for _ in self.instances:
908 rem_mac_pairs.append([mac_seq[base], mac_seq[base + 3]])
912 def get_instances(self):
913 """Return all instances for this chain."""
914 return self.instances
916 def get_vlan(self, port_index):
917 """Get the VLAN id on a given port.
919 port_index: left port is 0, right port is 1
920 return: the vlan_id or None if there is no vlan tagging
922 # for port 1 we need to return the VLAN of the last network in the chain
923 # The networks array contains 2 networks for PVP [left, right]
924 # and 3 networks in the case of PVVP [left.middle,right]
926 # this will pick the last item in array
928 # This string filters networks connected to TG, in case of
929 # l3-router feature we have 4 networks instead of 2
930 networks = [x for x in self.networks if not x.router_name]
931 return networks[port_index].get_vlan()
933 def get_vxlan(self, port_index):
934 """Get the VXLAN id on a given port.
936 port_index: left port is 0, right port is 1
937 return: the vxlan_id or None if there is no vxlan
939 # for port 1 we need to return the VLAN of the last network in the chain
940 # The networks array contains 2 networks for PVP [left, right]
941 # and 3 networks in the case of PVVP [left.middle,right]
943 # this will pick the last item in array
945 return self.networks[port_index].get_vxlan()
947 def get_mpls_inner_label(self, port_index):
948 """Get the MPLS VPN Label on a given port.
950 port_index: left port is 0, right port is 1
951 return: the mpls_label_id or None if there is no mpls
953 # for port 1 we need to return the MPLS Label of the last network in the chain
954 # The networks array contains 2 networks for PVP [left, right]
955 # and 3 networks in the case of PVVP [left.middle,right]
957 # this will pick the last item in array
959 return self.networks[port_index].get_mpls_inner_label()
961 def get_dest_mac(self, port_index):
962 """Get the dest MAC on a given port.
964 port_index: left port is 0, right port is 1
968 # for right port, use the right port MAC of the last (right most) VNF In chain
969 return self.instances[-1].ports[1].get_mac()
970 # for left port use the left port MAC of the first (left most) VNF in chain
971 return self.instances[0].ports[0].get_mac()
973 def get_network_uuids(self):
974 """Get UUID of networks in this chain from left to right (order is important).
976 :return: list of UUIDs of networks (2 or 3 elements)
978 return [net['id'] for net in self.networks]
980 def get_host_ips(self):
981 """Return the IP adresss(es) of the host compute nodes used for this chain.
983 :return: a list of 1 or 2 IP addresses
985 return [vnf.get_host_ip() for vnf in self.instances]
987 def get_compute_nodes(self):
988 """Return the name of the host compute nodes used for this chain.
990 :return: a list of 1 host name in the az:host format
992 # Since all chains go through the same compute node(s) we can just retrieve the
993 # compute node name(s) for the first chain
994 return [vnf.get_hypervisor_name() for vnf in self.instances]
997 """Delete this chain."""
998 for instance in self.instances:
1000 # only delete if these are chain private networks (not shared)
1001 if not self.manager.config.service_chain_shared_net:
1002 for network in self.networks:
1006 class InstancePlacer(object):
1007 """A class to manage instance placement for all VNFs in all chains.
1009 A full az string is made of 2 parts AZ and hypervisor.
1010 The placement is resolved when both parts az and hypervisor names are known.
1013 def __init__(self, req_az, req_hyp):
1014 """Create a new instance placer.
1016 req_az: requested AZ (can be None or empty if no preference)
1017 req_hyp: requested hypervisor name (can be None of empty if no preference)
1018 can be any of 'nova:', 'comp1', 'nova:comp1'
1019 if it is a list, only the first item is used (backward compatibility in config)
1021 req_az is ignored if req_hyp has an az part
1022 all other parts beyond the first 2 are ignored in req_hyp
1024 # if passed a list just pick the first item
1025 if req_hyp and isinstance(req_hyp, list):
1026 req_hyp = req_hyp[0]
1027 # only pick first part of az
1028 if req_az and ':' in req_az:
1029 req_az = req_az.split(':')[0]
1031 # check if requested hypervisor string has an AZ part
1032 split_hyp = req_hyp.split(':')
1033 if len(split_hyp) > 1:
1034 # override the AZ part and hypervisor part
1035 req_az = split_hyp[0]
1036 req_hyp = split_hyp[1]
1037 self.requested_az = req_az if req_az else ''
1038 self.requested_hyp = req_hyp if req_hyp else ''
1039 # Nova can accept AZ only (e.g. 'nova:', use any hypervisor in that AZ)
1040 # or hypervisor only (e.g. ':comp1')
1041 # or both (e.g. 'nova:comp1')
1043 self.required_az = req_az + ':' + self.requested_hyp
1045 # need to insert a ':' so nova knows this is the hypervisor name
1046 self.required_az = ':' + self.requested_hyp if req_hyp else ''
1047 # placement is resolved when both AZ and hypervisor names are known and set
1048 self.resolved = self.requested_az != '' and self.requested_hyp != ''
1050 def get_required_az(self):
1051 """Return the required az (can be resolved or not)."""
1052 return self.required_az
1054 def register_full_name(self, discovered_az):
1055 """Verify compatibility and register a discovered hypervisor full name.
1057 discovered_az: a discovered AZ in az:hypervisor format
1058 return: True if discovered_az is compatible and set
1059 False if discovered_az is not compatible
1062 return discovered_az == self.required_az
1064 # must be in full az format
1065 split_daz = discovered_az.split(':')
1066 if len(split_daz) != 2:
1068 if self.requested_az and self.requested_az != split_daz[0]:
1070 if self.requested_hyp and self.requested_hyp != split_daz[1]:
1072 self.required_az = discovered_az
1073 self.resolved = True
1076 def is_resolved(self):
1077 """Check if the full AZ is resolved.
1079 return: True if resolved
1081 return self.resolved
1084 class ChainManager(object):
1085 """A class for managing all chains for a given run.
1087 Supports openstack or no openstack.
1088 Supports EXT, PVP and PVVP chains.
1091 def __init__(self, chain_runner):
1092 """Create a chain manager to take care of discovering or bringing up the requested chains.
1094 A new instance must be created every time a new config is used.
1095 config: the nfvbench config to use
1096 cred: openstack credentials to use of None if there is no openstack
1098 self.chain_runner = chain_runner
1099 self.config = chain_runner.config
1100 self.generator_config = chain_runner.traffic_client.generator_config
1102 self.image_instance = None
1103 self.image_name = None
1104 # Left and right networks shared across all chains (only if shared)
1109 self.nova_client = None
1110 self.neutron_client = None
1111 self.glance_client = None
1112 self.existing_instances = []
1113 # existing ports keyed by the network uuid they belong to
1114 self._existing_ports = {}
1115 config = self.config
1116 self.openstack = (chain_runner.cred is not None) and not config.l2_loopback
1117 self.chain_count = config.service_chain_count
1121 session = chain_runner.cred.get_session()
1122 self.is_admin = chain_runner.cred.is_admin
1123 self.nova_client = Client(2, session=session)
1124 self.neutron_client = neutronclient.Client('2.0', session=session)
1125 self.glance_client = glanceclient.Client('2', session=session)
1126 self.comp = compute.Compute(self.nova_client,
1130 if config.service_chain != ChainType.EXT:
1131 self.placer = InstancePlacer(config.availability_zone, config.compute_nodes)
1133 self.flavor = ChainFlavor(config.flavor_type, config.flavor, self.comp)
1134 # Get list of all existing instances to check if some instances can be reused
1135 self.existing_instances = self.comp.get_server_list()
1136 # If management port is requested for VMs, create management network (shared)
1137 if self.config.use_management_port:
1138 self.management_network = ChainNetwork(self, self.config.management_network,
1140 # If floating IP is used for management, create and share
1141 # across chains the floating network
1142 if self.config.use_floating_ip:
1143 self.floating_ip_network = ChainNetwork(self,
1144 self.config.floating_network,
1147 # For EXT chains, the external_networks left and right fields in the config
1148 # must be either a prefix string or a list of at least chain-count strings
1149 self._check_extnet('left', config.external_networks.left)
1150 self._check_extnet('right', config.external_networks.right)
1152 # If networks are shared across chains, get the list of networks
1153 if config.service_chain_shared_net:
1154 self.networks = self.get_networks()
1155 # Reuse/create chains
1156 for chain_id in range(self.chain_count):
1157 self.chains.append(Chain(chain_id, self))
1158 if config.service_chain == ChainType.EXT:
1159 # if EXT and no ARP or VxLAN we need to read dest MACs from config
1160 if config.no_arp or config.vxlan:
1161 self._get_dest_macs_from_config()
1163 # Make sure all instances are active before proceeding
1164 self._ensure_instances_active()
1165 # network API call do not show VLANS ID if not admin read from config
1166 if not self.is_admin and config.vlan_tagging:
1167 self._get_config_vlans()
1172 # no openstack, no need to create chains
1173 if not config.l2_loopback and config.no_arp:
1174 self._get_dest_macs_from_config()
1175 if config.vlan_tagging:
1176 # make sure there at least as many entries as chains in each left/right list
1177 if len(config.vlans) != 2:
1178 raise ChainException('The config vlans property must be a list '
1179 'with 2 lists of VLAN IDs')
1180 self._get_config_vlans()
1182 raise ChainException('VxLAN is only supported with OpenStack')
1184 def _check_extnet(self, side, name):
1186 raise ChainException('external_networks.%s must contain a valid network'
1187 ' name prefix or a list of network names' % side)
1188 if isinstance(name, tuple) and len(name) < self.chain_count:
1189 raise ChainException('external_networks.%s %s'
1190 ' must have at least %d names' % (side, name, self.chain_count))
1192 def _get_config_vlans(self):
1195 self.vlans = [self._check_list('vlans[0]', self.config.vlans[0], re_vlan),
1196 self._check_list('vlans[1]', self.config.vlans[1], re_vlan)]
1198 raise ChainException('vlans parameter is mandatory. Set valid value in config file')
1200 def _get_dest_macs_from_config(self):
1201 re_mac = "[0-9a-fA-F]{2}([-:])[0-9a-fA-F]{2}(\\1[0-9a-fA-F]{2}){4}$"
1202 tg_config = self.config.traffic_generator
1203 self.dest_macs = [self._check_list("mac_addrs_left",
1204 tg_config.mac_addrs_left, re_mac),
1205 self._check_list("mac_addrs_right",
1206 tg_config.mac_addrs_right, re_mac)]
1208 def _check_list(self, list_name, ll, pattern):
1209 # if it is a single int or mac, make it a list of 1 int
1210 if isinstance(ll, (int, str)):
1213 if not re.match(pattern, str(item)):
1214 raise ChainException("Invalid format '{item}' specified in {fname}"
1215 .format(item=item, fname=list_name))
1216 # must have at least 1 element
1218 raise ChainException('%s cannot be empty' % (list_name))
1219 # for shared network, if 1 element is passed, replicate it as many times
1221 if self.config.service_chain_shared_net and len(ll) == 1:
1222 ll = [ll[0]] * self.chain_count
1224 # number of elements musty be the number of chains
1225 elif len(ll) < self.chain_count:
1226 raise ChainException('%s=%s must be a list with %d elements per chain' %
1227 (list_name, ll, self.chain_count))
1230 def _setup_image(self):
1231 # To avoid reuploading image in server mode, check whether image_name is set or not
1233 self.image_instance = self.comp.find_image(self.image_name)
1234 if self.image_instance:
1235 LOG.info("Reusing image %s", self.image_name)
1237 image_name_search_pattern = r'(nfvbenchvm-\d+(\.\d+)*).qcow2'
1238 if self.config.vm_image_file:
1239 match = re.search(image_name_search_pattern, self.config.vm_image_file)
1241 self.image_name = match.group(1)
1242 LOG.info('Using provided VM image file %s', self.config.vm_image_file)
1244 raise ChainException('Provided VM image file name %s must start with '
1245 '"nfvbenchvm-<version>"' % self.config.vm_image_file)
1247 pkg_root = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
1248 for f in os.listdir(pkg_root):
1249 if re.search(image_name_search_pattern, f):
1250 self.config.vm_image_file = pkg_root + '/' + f
1251 self.image_name = f.replace('.qcow2', '')
1252 LOG.info('Found built-in VM image file %s', f)
1255 raise ChainException('Cannot find any built-in VM image file.')
1257 self.image_instance = self.comp.find_image(self.image_name)
1258 if not self.image_instance:
1259 LOG.info('Uploading %s', self.image_name)
1260 res = self.comp.upload_image_via_url(self.image_name,
1261 self.config.vm_image_file)
1264 raise ChainException('Error uploading image %s from %s. ABORTING.' %
1265 (self.image_name, self.config.vm_image_file))
1266 LOG.info('Image %s successfully uploaded.', self.image_name)
1267 self.image_instance = self.comp.find_image(self.image_name)
1269 # image multiqueue property must be set according to the vif_multiqueue_size
1270 # config value (defaults to 1 or disabled)
1271 self.comp.image_set_multiqueue(self.image_instance, self.config.vif_multiqueue_size > 1)
1273 def _ensure_instances_active(self):
1275 for chain in self.chains:
1276 instances.extend(chain.get_instances())
1277 initial_instance_count = len(instances)
1278 max_retries = (self.config.check_traffic_time_sec + (initial_instance_count - 1) * 10 +
1279 self.config.generic_poll_sec - 1) / self.config.generic_poll_sec
1282 remaining_instances = []
1283 for instance in instances:
1284 status = instance.get_status()
1285 if status == 'ACTIVE':
1286 LOG.info('Instance %s is ACTIVE on %s',
1287 instance.name, instance.get_hypervisor_name())
1289 if status == 'ERROR':
1290 raise ChainException('Instance %s creation error: %s' %
1292 instance.instance.fault['message']))
1293 remaining_instances.append(instance)
1294 if not remaining_instances:
1297 if retry >= max_retries:
1298 raise ChainException('Time-out: %d/%d instances still not active' %
1299 (len(remaining_instances), initial_instance_count))
1300 LOG.info('Waiting for %d/%d instance to become active (retry %d/%d)...',
1301 len(remaining_instances), initial_instance_count,
1303 instances = remaining_instances
1304 time.sleep(self.config.generic_poll_sec)
1305 if initial_instance_count:
1306 LOG.info('All instances are active')
1308 def get_networks(self, chain_id=None):
1309 """Get the networks for given EXT, PVP or PVVP chain.
1311 For EXT packet path, these networks must pre-exist.
1312 For PVP, PVVP these networks will be created if they do not exist.
1313 chain_id: to which chain the networks belong.
1314 a None value will mean that these networks are shared by all chains
1317 # the only case where self.networks exists is when the networks are shared
1319 return self.networks
1320 if self.config.service_chain == ChainType.EXT:
1322 ext_net = self.config.external_networks
1323 net_cfg = [AttrDict({'name': name,
1324 'segmentation_id': None,
1325 'physical_network': None})
1326 for name in [ext_net.left, ext_net.right]]
1327 # segmentation id and subnet should be discovered from neutron
1330 int_nets = self.config.internal_networks
1332 if self.config.service_chain == ChainType.PVP:
1333 net_cfg = [int_nets.left, int_nets.right]
1335 net_cfg = [int_nets.left, int_nets.middle, int_nets.right]
1336 if self.config.l3_router:
1337 edge_nets = self.config.edge_networks
1338 net_cfg.append(edge_nets.left)
1339 net_cfg.append(edge_nets.right)
1343 networks.append(ChainNetwork(self, cfg, chain_id, lookup_only=lookup_only))
1345 # need to cleanup all successful networks prior to bailing out
1346 for net in networks:
1351 def get_existing_ports(self):
1352 """Get the list of existing ports.
1354 Lazy retrieval of ports as this can be costly if there are lots of ports and
1355 is only needed when VM and network are being reused.
1357 return: a dict of list of neutron ports indexed by the network uuid they are attached to
1359 Each port is a dict with fields such as below:
1360 {'allowed_address_pairs': [], 'extra_dhcp_opts': [],
1361 'updated_at': '2018-10-06T07:15:35Z', 'device_owner': 'compute:nova',
1362 'revision_number': 10, 'port_security_enabled': False, 'binding:profile': {},
1363 'fixed_ips': [{'subnet_id': '6903a3b3-49a1-4ba4-8259-4a90e7a44b21',
1364 'ip_address': '192.168.1.4'}], 'id': '3dcb9cfa-d82a-4dd1-85a1-fd8284b52d72',
1365 'security_groups': [],
1366 'binding:vif_details': {'vhostuser_socket': '/tmp/3dcb9cfa-d82a-4dd1-85a1-fd8284b52d72',
1367 'vhostuser_mode': 'server'},
1368 'binding:vif_type': 'vhostuser',
1369 'mac_address': 'fa:16:3e:3c:63:04',
1370 'project_id': '977ac76a63d7492f927fa80e86baff4c',
1372 'binding:host_id': 'a20-champagne-compute-1',
1374 'device_id': 'a98e2ad2-5371-4aa5-a356-8264a970ce4b',
1375 'name': 'nfvbench-loop-vm0-0', 'admin_state_up': True,
1376 'network_id': '3ea5fd88-278f-4d9d-b24d-1e443791a055',
1377 'tenant_id': '977ac76a63d7492f927fa80e86baff4c',
1378 'created_at': '2018-10-06T07:15:10Z',
1379 'binding:vnic_type': 'normal'}
1381 if not self._existing_ports:
1382 LOG.info('Loading list of all ports...')
1383 existing_ports = self.neutron_client.list_ports()['ports']
1384 # place all ports in the dict keyed by the port network uuid
1385 for port in existing_ports:
1386 port_list = self._existing_ports.setdefault(port['network_id'], [])
1387 port_list.append(port)
1388 LOG.info("Loaded %d ports attached to %d networks",
1389 len(existing_ports), len(self._existing_ports))
1390 return self._existing_ports
1392 def get_ports_from_network(self, chain_network):
1393 """Get the list of existing ports that belong to a network.
1395 Lazy retrieval of ports as this can be costly if there are lots of ports and
1396 is only needed when VM and network are being reused.
1398 chain_network: a ChainNetwork instance for which attached ports neeed to be retrieved
1399 return: list of neutron ports attached to requested network
1401 return self.get_existing_ports().get(chain_network.get_uuid(), None)
1403 def get_hypervisor_from_mac(self, mac):
1404 """Get the hypervisor that hosts a VM MAC.
1406 mac: MAC address to look for
1407 return: the hypervisor where the matching port runs or None if not found
1409 # _existing_ports is a dict of list of ports indexed by network id
1410 for port_list in list(self.get_existing_ports().values()):
1411 for port in port_list:
1413 if port['mac_address'] == mac:
1414 host_id = port['binding:host_id']
1415 return self.comp.get_hypervisor(host_id)
1420 def get_host_ip_from_mac(self, mac):
1421 """Get the host IP address matching a MAC.
1423 mac: MAC address to look for
1424 return: the IP address of the host where the matching port runs or None if not found
1426 hypervisor = self.get_hypervisor_from_mac(mac)
1428 return hypervisor.host_ip
1431 def get_chain_vlans(self, port_index):
1432 """Get the list of per chain VLAN id on a given port.
1434 port_index: left port is 0, right port is 1
1435 return: a VLAN ID list indexed by the chain index or None if no vlan tagging
1437 if self.chains and self.is_admin:
1438 return [self.chains[chain_index].get_vlan(port_index)
1439 for chain_index in range(self.chain_count)]
1441 return self.vlans[port_index]
1443 def get_chain_vxlans(self, port_index):
1444 """Get the list of per chain VNIs id on a given port.
1446 port_index: left port is 0, right port is 1
1447 return: a VNIs ID list indexed by the chain index or None if no vlan tagging
1449 if self.chains and self.is_admin:
1450 return [self.chains[chain_index].get_vxlan(port_index)
1451 for chain_index in range(self.chain_count)]
1453 raise ChainException('VxLAN is only supported with OpenStack and with admin user')
1455 def get_chain_mpls_inner_labels(self, port_index):
1456 """Get the list of per chain MPLS VPN Labels on a given port.
1458 port_index: left port is 0, right port is 1
1459 return: a MPLSs ID list indexed by the chain index or None if no mpls
1461 if self.chains and self.is_admin:
1462 return [self.chains[chain_index].get_mpls_inner_label(port_index)
1463 for chain_index in range(self.chain_count)]
1465 raise ChainException('MPLS is only supported with OpenStack and with admin user')
1467 def get_dest_macs(self, port_index):
1468 """Get the list of per chain dest MACs on a given port.
1470 Should not be called if EXT+ARP is used (in that case the traffic gen will
1471 have the ARP responses back from VNFs with the dest MAC to use).
1473 port_index: left port is 0, right port is 1
1474 return: a list of dest MACs indexed by the chain index
1476 if self.chains and self.config.service_chain != ChainType.EXT:
1477 return [self.chains[chain_index].get_dest_mac(port_index)
1478 for chain_index in range(self.chain_count)]
1479 # no openstack or EXT+no-arp
1480 return self.dest_macs[port_index]
1482 def get_host_ips(self):
1483 """Return the IP adresss(es) of the host compute nodes used for this run.
1485 :return: a list of 1 IP address
1487 # Since all chains go through the same compute node(s) we can just retrieve the
1488 # compute node(s) for the first chain
1490 if self.config.service_chain != ChainType.EXT:
1491 return self.chains[0].get_host_ips()
1492 # in the case of EXT, the compute node must be retrieved from the port
1493 # associated to any of the dest MACs
1494 dst_macs = self.generator_config.get_dest_macs()
1495 # dest MAC on port 0, chain 0
1496 dst_mac = dst_macs[0][0]
1497 host_ip = self.get_host_ip_from_mac(dst_mac)
1499 LOG.info('Found compute node IP for EXT chain: %s', host_ip)
1503 def get_compute_nodes(self):
1504 """Return the name of the host compute nodes used for this run.
1506 :return: a list of 0 or 1 host name in the az:host format
1508 # Since all chains go through the same compute node(s) we can just retrieve the
1509 # compute node name(s) for the first chain
1511 # in the case of EXT, the compute node must be retrieved from the port
1512 # associated to any of the dest MACs
1513 if self.config.service_chain != ChainType.EXT:
1514 return self.chains[0].get_compute_nodes()
1515 # in the case of EXT, the compute node must be retrieved from the port
1516 # associated to any of the dest MACs
1517 dst_macs = self.generator_config.get_dest_macs()
1518 # dest MAC on port 0, chain 0
1519 dst_mac = dst_macs[0][0]
1520 hypervisor = self.get_hypervisor_from_mac(dst_mac)
1522 LOG.info('Found hypervisor for EXT chain: %s', hypervisor.hypervisor_hostname)
1523 return[':' + hypervisor.hypervisor_hostname]
1524 # no openstack = no chains
1528 """Delete resources for all chains."""
1529 for chain in self.chains:
1531 for network in self.networks:
1533 if self.config.use_management_port and hasattr(self, 'management_network'):
1534 self.management_network.delete()
1535 if self.config.use_floating_ip and hasattr(self, 'floating_ip_network'):
1536 self.floating_ip_network.delete()
1538 self.flavor.delete()