2 # Copyright 2018 Cisco Systems, Inc. All rights reserved.
4 # Licensed under the Apache License, Version 2.0 (the "License"); you may
5 # not use this file except in compliance with the License. You may obtain
6 # a copy of the License at
8 # http://www.apache.org/licenses/LICENSE-2.0
10 # Unless required by applicable law or agreed to in writing, software
11 # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
12 # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
13 # License for the specific language governing permissions and limitations
17 # This module takes care of chaining networks, ports and vms
19 """NFVBENCH CHAIN DISCOVERY/STAGING.
21 This module takes care of staging/discovering all resources that are participating in a
22 benchmarking session: flavors, networks, ports, VNF instances.
23 If a resource is discovered with the same name, it will be reused.
24 Otherwise it will be created.
26 ChainManager: manages VM image, flavor, the staging discovery of all chains
28 Chain: manages one chain, has 2 or more networks and 1 or more instances
29 ChainNetwork: manages 1 network in a chain
30 ChainVnf: manages 1 VNF instance in a chain, has 2 ports
31 ChainVnfPort: manages 1 instance port
33 ChainManager-->Chain(*)
34 Chain-->ChainNetwork(*),ChainVnf(*)
35 ChainVnf-->ChainVnfPort(2)
37 Once created/discovered, instances are checked to be in the active state (ready to pass traffic)
38 Configuration parameters that will influence how these resources are staged/related:
39 - openstack or no openstack
42 - number of VNF in each chain (PVP, PVVP)
43 - SRIOV and middle port SRIOV for port types
44 - whether networks are shared across chains or not
46 There is not traffic generation involved in this module.
53 from neutronclient.neutron import client as neutronclient
54 from novaclient.client import Client
56 from attrdict import AttrDict
57 from .chain_router import ChainRouter
60 from .specs import ChainType
61 # Left and right index for network and port lists
64 # L3 traffic edge networks are at the end of networks list
67 # Name of the VM config file
68 NFVBENCH_CFG_FILENAME = 'nfvbenchvm.conf'
69 # full pathame of the VM config in the VM
70 NFVBENCH_CFG_VM_PATHNAME = os.path.join('/etc/', NFVBENCH_CFG_FILENAME)
71 # full path of the boot shell script template file on the server where nfvbench runs
72 BOOT_SCRIPT_PATHNAME = os.path.join(os.path.dirname(os.path.abspath(__file__)),
74 NFVBENCH_CFG_FILENAME)
77 class ChainException(Exception):
78 """Exception while operating the chains."""
80 class NetworkEncaps(object):
81 """Network encapsulation."""
84 class ChainFlavor(object):
85 """Class to manage the chain flavor."""
87 def __init__(self, flavor_name, flavor_dict, comp):
88 """Create a flavor."""
89 self.name = flavor_name
91 self.flavor = self.comp.find_flavor(flavor_name)
95 LOG.info("Reused flavor '%s'", flavor_name)
97 extra_specs = flavor_dict.pop('extra_specs', None)
99 self.flavor = comp.create_flavor(flavor_name,
102 LOG.info("Created flavor '%s'", flavor_name)
104 self.flavor.set_keys(extra_specs)
107 """Delete this flavor."""
108 if not self.reuse and self.flavor:
110 LOG.info("Flavor '%s' deleted", self.name)
113 class ChainVnfPort(object):
114 """A port associated to one VNF in the chain."""
116 def __init__(self, name, vnf, chain_network, vnic_type):
117 """Create or reuse a port on a given network.
119 if vnf.instance is None the VNF instance is not reused and this ChainVnfPort instance must
121 Otherwise vnf.instance is a reused VNF instance and this ChainVnfPort instance must
122 find an existing port to reuse that matches the port requirements: same attached network,
123 instance, name, vnic type
125 name: name for this port
126 vnf: ChainVNf instance that owns this port
127 chain_network: ChainNetwork instance where this port should attach
128 vnic_type: required vnic type for this port
132 self.manager = vnf.manager
135 self.floating_ip = None
137 # VNF instance is reused, we need to find an existing port that matches this instance
139 # discover ports attached to this instance
140 port_list = self.manager.get_ports_from_network(chain_network)
141 for port in port_list:
142 if port['name'] != name:
144 if port['binding:vnic_type'] != vnic_type:
146 if port['device_id'] == vnf.get_uuid():
148 LOG.info('Reusing existing port %s mac=%s', name, port['mac_address'])
151 raise ChainException('Cannot find matching port')
153 # VNF instance is not created yet, we need to create a new port
157 'network_id': chain_network.get_uuid(),
158 'binding:vnic_type': vnic_type
161 port = self.manager.neutron_client.create_port(body)
162 self.port = port['port']
163 LOG.info('Created port %s', name)
165 self.manager.neutron_client.update_port(self.port['id'], {
167 'security_groups': [],
168 'port_security_enabled': False,
171 LOG.info('Security disabled on port %s', name)
173 LOG.info('Failed to disable security on port %s (ignored)', name)
176 """Get the MAC address for this port."""
177 return self.port['mac_address']
180 """Get the IP address for this port."""
181 return self.port['fixed_ips'][0]['ip_address']
183 def set_floating_ip(self, chain_network):
184 # create and add floating ip to port
186 self.floating_ip = self.manager.neutron_client.create_floatingip({
188 'floating_network_id': chain_network.get_uuid(),
189 'port_id': self.port['id'],
190 'description': 'nfvbench floating ip for port:' + self.port['name'],
192 LOG.info('Floating IP %s created and associated on port %s',
193 self.floating_ip['floating_ip_address'], self.name)
194 return self.floating_ip['floating_ip_address']
196 LOG.info('Failed to created and associated floating ip on port %s (ignored)', self.name)
197 return self.port['fixed_ips'][0]['ip_address']
200 """Delete this port instance."""
201 if self.reuse or not self.port:
203 for _ in range(0, self.manager.config.generic_retry_count):
205 self.manager.neutron_client.delete_port(self.port['id'])
206 LOG.info("Deleted port %s", self.name)
208 self.manager.neutron_client.delete_floatingip(self.floating_ip['id'])
209 LOG.info("Deleted floating IP %s", self.floating_ip['description'])
212 time.sleep(self.manager.config.generic_poll_sec)
213 LOG.error('Unable to delete port: %s', self.name)
216 class ChainNetwork(object):
217 """Could be a shared network across all chains or a chain private network."""
219 def __init__(self, manager, network_config, chain_id=None, lookup_only=False,
221 """Create a network for given chain.
223 network_config: a dict containing the network properties
224 (name, segmentation_id and physical_network)
225 chain_id: to which chain the networks belong.
226 a None value will mean that these networks are shared by all chains
227 suffix: a suffix to add to the network name (if not None)
229 self.manager = manager
231 self.name = network_config.name
233 # the name itself can be either a string or a list of names indexed by chain ID
234 if isinstance(network_config.name, tuple):
235 self.name = network_config.name[chain_id]
237 # network_config.name is a prefix string
238 self.name = network_config.name + str(chain_id)
240 self.name = self.name + suffix
241 self.segmentation_id = self._get_item(network_config.segmentation_id,
242 chain_id, auto_index=True)
243 self.physical_network = self._get_item(network_config.physical_network, chain_id)
248 if manager.config.l3_router and hasattr(network_config, 'router_name'):
249 self.router_name = network_config.router_name
251 self._setup(network_config, lookup_only)
254 LOG.error("Cannot find network %s", self.name)
256 LOG.error("Error creating network %s", self.name)
260 def _get_item(self, item_field, index, auto_index=False):
261 """Retrieve an item from a list or a single value.
263 item_field: can be None, a tuple of a single value
264 index: if None is same as 0, else is the index for a chain
265 auto_index: if true will automatically get the final value by adding the
266 index to the base value (if full list not provided)
268 If the item_field is not a tuple, it is considered same as a tuple with same value at any
270 If a list is provided, its length must be > index
276 if isinstance(item_field, tuple):
278 return item_field[index]
280 raise ChainException("List %s is too short for chain index %d" %
281 (str(item_field), index))
282 # single value is configured
284 return item_field + index
287 def _setup(self, network_config, lookup_only):
288 # Lookup if there is a matching network with same name
289 networks = self.manager.neutron_client.list_networks(name=self.name)
290 if networks['networks']:
291 network = networks['networks'][0]
292 # a network of same name already exists, we need to verify it has the same
294 if self.segmentation_id:
295 if network['provider:segmentation_id'] != self.segmentation_id:
296 raise ChainException("Mismatch of 'segmentation_id' for reused "
297 "network '{net}'. Network has id '{seg_id1}', "
298 "configuration requires '{seg_id2}'."
299 .format(net=self.name,
300 seg_id1=network['provider:segmentation_id'],
301 seg_id2=self.segmentation_id))
303 if self.physical_network:
304 if network['provider:physical_network'] != self.physical_network:
305 raise ChainException("Mismatch of 'physical_network' for reused "
306 "network '{net}'. Network has '{phys1}', "
307 "configuration requires '{phys2}'."
308 .format(net=self.name,
309 phys1=network['provider:physical_network'],
310 phys2=self.physical_network))
312 LOG.info('Reusing existing network %s', self.name)
314 self.network = network
317 raise ChainException('Network %s not found' % self.name)
321 'admin_state_up': True
324 if network_config.network_type:
325 body['network']['provider:network_type'] = network_config.network_type
326 if self.segmentation_id:
327 body['network']['provider:segmentation_id'] = self.segmentation_id
328 if self.physical_network:
329 body['network']['provider:physical_network'] = self.physical_network
330 self.network = self.manager.neutron_client.create_network(body)['network']
331 # create associated subnet, all subnets have the same name (which is ok since
332 # we do not need to address them directly by name)
334 'subnet': {'name': network_config.subnet,
335 'cidr': network_config.cidr,
336 'network_id': self.network['id'],
337 'enable_dhcp': False,
339 'dns_nameservers': []}
341 subnet = self.manager.neutron_client.create_subnet(body)['subnet']
342 # add subnet id to the network dict since it has just been added
343 self.network['subnets'] = [subnet['id']]
344 LOG.info('Created network: %s', self.name)
348 Extract UUID of this network.
350 :return: UUID of this network
352 return self.network['id']
356 Extract vlan for this network.
358 :return: vlan ID for this network
360 if self.network['provider:network_type'] != 'vlan':
361 raise ChainException('Trying to retrieve VLAN id for non VLAN network')
362 return self.network['provider:segmentation_id']
366 Extract VNI for this network.
368 :return: VNI ID for this network
371 return self.network['provider:segmentation_id']
373 def get_mpls_inner_label(self):
375 Extract MPLS VPN Label for this network.
377 :return: MPLS VPN Label for this network
380 return self.network['provider:segmentation_id']
383 """Delete this network."""
384 if not self.reuse and self.network:
385 for retry in range(0, self.manager.config.generic_retry_count):
387 self.manager.neutron_client.delete_network(self.network['id'])
388 LOG.info("Deleted network: %s", self.name)
391 LOG.info('Error deleting network %s (retry %d/%d)...',
394 self.manager.config.generic_retry_count)
395 time.sleep(self.manager.config.generic_poll_sec)
396 LOG.error('Unable to delete network: %s', self.name)
399 class ChainVnf(object):
400 """A class to represent a VNF in a chain."""
402 def __init__(self, chain, vnf_id, networks):
403 """Reuse a VNF instance with same characteristics or create a new VNF instance.
405 chain: the chain where this vnf belongs
406 vnf_id: indicates the index of this vnf in its chain (first vnf=0)
407 networks: the list of all networks (ChainNetwork) of the current chain
409 self.manager = chain.manager
412 self.name = self.manager.config.loop_vm_name + str(chain.chain_id)
413 if len(networks) > 2:
414 # we will have more than 1 VM in each chain
415 self.name += '-' + str(vnf_id)
416 # A list of ports for this chain
417 # There are normally 2 ports carrying traffic (index 0, and index 1) and
418 # potentially multiple idle ports not carrying traffic (index 2 and up)
419 # For example if 7 idle interfaces are requested, the corresp. ports will be
422 self.management_port = None
428 self.idle_networks = []
431 # the vnf_id is conveniently also the starting index in networks
432 # for the left and right networks associated to this VNF
433 if self.manager.config.l3_router:
434 self._setup(networks[vnf_id:vnf_id + 4])
436 self._setup(networks[vnf_id:vnf_id + 2])
438 LOG.error("Error creating VNF %s", self.name)
442 def _get_vm_config(self, remote_mac_pair):
443 config = self.manager.config
444 devices = self.manager.generator_config.devices
447 tg_gateway1_ip = self.routers[LEFT].ports[1]['fixed_ips'][0][
448 'ip_address'] # router edge ip left
449 tg_gateway2_ip = self.routers[RIGHT].ports[1]['fixed_ips'][0][
450 'ip_address'] # router edge ip right
451 tg_mac1 = self.routers[LEFT].ports[1]['mac_address'] # router edge mac left
452 tg_mac2 = self.routers[RIGHT].ports[1]['mac_address'] # router edge mac right
453 # edge cidr mask left
454 vnf_gateway1_cidr = \
455 self.ports[LEFT].get_ip() + self.__get_network_mask(
456 self.manager.config.edge_networks.left.cidr)
457 # edge cidr mask right
458 vnf_gateway2_cidr = \
459 self.ports[RIGHT].get_ip() + self.__get_network_mask(
460 self.manager.config.edge_networks.right.cidr)
461 if config.vm_forwarder != 'vpp':
462 raise ChainException(
463 'L3 router mode imply to set VPP as VM forwarder.'
464 'Please update your config file with: vm_forwarder: vpp')
466 tg_gateway1_ip = devices[LEFT].tg_gateway_ip_addrs
467 tg_gateway2_ip = devices[RIGHT].tg_gateway_ip_addrs
468 if not config.loop_vm_arp:
469 tg_mac1 = remote_mac_pair[0]
470 tg_mac2 = remote_mac_pair[1]
475 g1cidr = devices[LEFT].get_gw_ip(
476 self.chain.chain_id) + self.__get_network_mask(
477 self.manager.config.internal_networks.left.cidr)
478 g2cidr = devices[RIGHT].get_gw_ip(
479 self.chain.chain_id) + self.__get_network_mask(
480 self.manager.config.internal_networks.right.cidr)
482 vnf_gateway1_cidr = g1cidr
483 vnf_gateway2_cidr = g2cidr
485 with open(BOOT_SCRIPT_PATHNAME, 'r') as boot_script:
486 content = boot_script.read()
488 'forwarder': config.vm_forwarder,
489 'intf_mac1': self.ports[LEFT].get_mac(),
490 'intf_mac2': self.ports[RIGHT].get_mac(),
491 'tg_gateway1_ip': tg_gateway1_ip,
492 'tg_gateway2_ip': tg_gateway2_ip,
493 'tg_net1': devices[LEFT].ip_addrs,
494 'tg_net2': devices[RIGHT].ip_addrs,
495 'vnf_gateway1_cidr': vnf_gateway1_cidr,
496 'vnf_gateway2_cidr': vnf_gateway2_cidr,
499 'vif_mq_size': config.vif_multiqueue_size,
500 'num_mbufs': config.num_mbufs
502 if self.manager.config.use_management_port:
503 mgmt_ip = self.management_port.port['fixed_ips'][0]['ip_address']
504 mgmt_mask = self.__get_network_mask(self.manager.config.management_network.cidr)
505 vm_config['intf_mgmt_cidr'] = mgmt_ip + mgmt_mask
506 vm_config['intf_mgmt_ip_gw'] = self.manager.config.management_network.gateway
507 vm_config['intf_mac_mgmt'] = self.management_port.port['mac_address']
509 # Interface management config left empty to avoid error in VM spawn
510 # if nfvbench config has values for management network but use_management_port=false
511 vm_config['intf_mgmt_cidr'] = ''
512 vm_config['intf_mgmt_ip_gw'] = ''
513 vm_config['intf_mac_mgmt'] = ''
514 return content.format(**vm_config)
517 def __get_network_mask(network):
518 return '/' + network.split('/')[1]
520 def _get_vnic_type(self, port_index):
521 """Get the right vnic type for given port indexself.
523 If SR-IOV is specified, middle ports in multi-VNF chains
524 can use vswitch or SR-IOV based on config.use_sriov_middle_net
526 if self.manager.config.sriov:
527 chain_length = self.chain.get_length()
528 if self.manager.config.use_sriov_middle_net or chain_length == 1:
530 if self.vnf_id == 0 and port_index == 0:
531 # first VNF in chain must use sriov for left port
533 if (self.vnf_id == chain_length - 1) and (port_index == 1):
534 # last VNF in chain must use sriov for right port
538 def _get_idle_networks_ports(self):
539 """Get the idle networks for PVP or PVVP chain (non shared net only)
541 For EXT packet path or shared net, returns empty list.
542 For PVP, PVVP these networks will be created if they do not exist.
543 chain_id: to which chain the networks belong.
544 a None value will mean that these networks are shared by all chains
548 config = self.manager.config
549 chain_id = self.chain.chain_id
550 idle_interfaces_per_vm = config.idle_interfaces_per_vm
551 if config.service_chain == ChainType.EXT or chain_id is None or \
552 idle_interfaces_per_vm == 0:
555 # Make a copy of the idle networks dict as we may have to modify the
557 idle_network_cfg = AttrDict(config.idle_networks)
558 if idle_network_cfg.segmentation_id:
559 segmentation_id = idle_network_cfg.segmentation_id + \
560 chain_id * idle_interfaces_per_vm
562 segmentation_id = None
564 # create as many idle networks and ports as requested
565 for idle_index in range(idle_interfaces_per_vm):
566 if config.service_chain == ChainType.PVP:
567 suffix = '.%d' % (idle_index)
569 suffix = '.%d.%d' % (self.vnf_id, idle_index)
570 port_name = self.name + '-idle' + str(idle_index)
571 # update the segmentation id based on chain id and idle index
573 idle_network_cfg.segmentation_id = segmentation_id + idle_index
574 port_name = port_name + "." + str(segmentation_id)
576 networks.append(ChainNetwork(self.manager,
580 ports.append(ChainVnfPort(port_name,
582 networks[idle_index],
585 # need to cleanup all successful networks
591 self.idle_networks = networks
592 self.idle_ports = ports
594 def _setup(self, networks):
595 flavor_id = self.manager.flavor.flavor.id
596 # Check if we can reuse an instance with same name
597 for instance in self.manager.existing_instances:
598 if instance.name == self.name:
600 instance_right = RIGHT
601 # In case of L3 traffic instance use edge networks
602 if self.manager.config.l3_router:
603 instance_left = EDGE_LEFT
604 instance_right = EDGE_RIGHT
605 # Verify that other instance characteristics match
606 if instance.flavor['id'] != flavor_id:
607 self._reuse_exception('Flavor mismatch')
608 if instance.status != "ACTIVE":
609 self._reuse_exception('Matching instance is not in ACTIVE state')
610 # The 2 networks for this instance must also be reused
611 if not networks[instance_left].reuse:
612 self._reuse_exception('network %s is new' % networks[instance_left].name)
613 if not networks[instance_right].reuse:
614 self._reuse_exception('network %s is new' % networks[instance_right].name)
615 # instance.networks have the network names as keys:
616 # {'nfvbench-rnet0': ['192.168.2.10'], 'nfvbench-lnet0': ['192.168.1.8']}
617 if networks[instance_left].name not in instance.networks:
618 self._reuse_exception('Left network mismatch')
619 if networks[instance_right].name not in instance.networks:
620 self._reuse_exception('Right network mismatch')
623 self.instance = instance
624 LOG.info('Reusing existing instance %s on %s',
625 self.name, self.get_hypervisor_name())
626 # create management port if needed
627 if self.manager.config.use_management_port:
628 self.management_port = ChainVnfPort(self.name + '-mgmt', self,
629 self.manager.management_network, 'normal')
630 ip = self.management_port.port['fixed_ips'][0]['ip_address']
631 if self.manager.config.use_floating_ip:
632 ip = self.management_port.set_floating_ip(self.manager.floating_ip_network)
633 LOG.info("Management interface will be active using IP: %s, "
634 "and you can connect over SSH with login: nfvbench and password: nfvbench", ip)
635 # create or reuse/discover 2 ports per instance
636 if self.manager.config.l3_router:
638 self.ports.append(ChainVnfPort(self.name + '-' + str(index),
641 self._get_vnic_type(index)))
644 self.ports.append(ChainVnfPort(self.name + '-' + str(index),
647 self._get_vnic_type(index)))
649 # create idle networks and ports only if instance is not reused
650 # if reused, we do not care about idle networks/ports
652 self._get_idle_networks_ports()
654 # Create neutron routers for L3 traffic use case
655 if self.manager.config.l3_router and self.manager.openstack:
656 internal_nets = networks[:2]
657 if self.manager.config.service_chain == ChainType.PVP:
658 edge_nets = networks[2:]
660 edge_nets = networks[3:]
661 subnets_left = [internal_nets[0], edge_nets[0]]
662 routes_left = [{'destination': self.manager.config.traffic_generator.ip_addrs[0],
663 'nexthop': self.manager.config.traffic_generator.tg_gateway_ip_addrs[
665 {'destination': self.manager.config.traffic_generator.ip_addrs[1],
666 'nexthop': self.ports[0].get_ip()}]
668 ChainRouter(self.manager, edge_nets[0].router_name, subnets_left, routes_left))
669 subnets_right = [internal_nets[1], edge_nets[1]]
670 routes_right = [{'destination': self.manager.config.traffic_generator.ip_addrs[0],
671 'nexthop': self.ports[1].get_ip()},
672 {'destination': self.manager.config.traffic_generator.ip_addrs[1],
673 'nexthop': self.manager.config.traffic_generator.tg_gateway_ip_addrs[
676 ChainRouter(self.manager, edge_nets[1].router_name, subnets_right, routes_right))
677 # Overload gateway_ips property with router ip address for ARP and traffic calls
678 self.manager.generator_config.devices[LEFT].set_gw_ip(
679 self.routers[LEFT].ports[0]['fixed_ips'][0]['ip_address']) # router edge ip left)
680 self.manager.generator_config.devices[RIGHT].set_gw_ip(
681 self.routers[RIGHT].ports[0]['fixed_ips'][0]['ip_address']) # router edge ip right)
683 # if no reuse, actual vm creation is deferred after all ports in the chain are created
684 # since we need to know the next mac in a multi-vnf chain
686 def create_vnf(self, remote_mac_pair):
687 """Create the VNF instance if it does not already exist."""
688 if self.instance is None:
690 if self.manager.config.use_management_port:
691 port_ids.append({'port-id': self.management_port.port['id']})
692 port_ids.extend([{'port-id': vnf_port.port['id']} for vnf_port in self.ports])
694 for idle_port in self.idle_ports:
695 port_ids.append({'port-id': idle_port.port['id']})
696 vm_config = self._get_vm_config(remote_mac_pair)
697 az = self.manager.placer.get_required_az()
698 server = self.manager.comp.create_server(self.name,
699 self.manager.image_instance,
700 self.manager.flavor.flavor,
707 files={NFVBENCH_CFG_VM_PATHNAME: vm_config})
709 self.instance = server
710 if self.manager.placer.is_resolved():
711 LOG.info('Created instance %s on %s', self.name, az)
713 # the location is undetermined at this point
714 # self.get_hypervisor_name() will return None
715 LOG.info('Created instance %s - waiting for placement resolution...', self.name)
716 # here we MUST wait until this instance is resolved otherwise subsequent
717 # VNF creation can be placed in other hypervisors!
718 config = self.manager.config
719 max_retries = int((config.check_traffic_time_sec +
720 config.generic_poll_sec - 1) / config.generic_poll_sec)
722 for retry in range(max_retries):
723 status = self.get_status()
724 if status == 'ACTIVE':
725 hyp_name = self.get_hypervisor_name()
726 LOG.info('Instance %s is active and has been placed on %s',
728 self.manager.placer.register_full_name(hyp_name)
730 if status == 'ERROR':
731 raise ChainException('Instance %s creation error: %s' %
733 self.instance.fault['message']))
734 LOG.info('Waiting for instance %s to become active (retry %d/%d)...',
735 self.name, retry + 1, max_retries + 1)
736 time.sleep(config.generic_poll_sec)
739 LOG.error('Instance %s creation timed out', self.name)
740 raise ChainException('Instance %s creation timed out' % self.name)
743 raise ChainException('Unable to create instance: %s' % (self.name))
745 def _reuse_exception(self, reason):
746 raise ChainException('Instance %s cannot be reused (%s)' % (self.name, reason))
748 def get_status(self):
749 """Get the statis of this instance."""
750 if self.instance.status != 'ACTIVE':
751 self.instance = self.manager.comp.poll_server(self.instance)
752 return self.instance.status
754 def get_hostname(self):
755 """Get the hypervisor host name running this VNF instance."""
756 if self.manager.is_admin:
757 hypervisor_hostname = getattr(self.instance, 'OS-EXT-SRV-ATTR:hypervisor_hostname')
759 hypervisor_hostname = self.manager.config.hypervisor_hostname
760 if not hypervisor_hostname:
761 raise ChainException('Hypervisor hostname parameter is mandatory')
762 return hypervisor_hostname
764 def get_host_ip(self):
765 """Get the IP address of the host where this instance runs.
767 return: the IP address
770 self.host_ip = self.manager.comp.get_hypervisor(self.get_hostname()).host_ip
773 def get_hypervisor_name(self):
774 """Get hypervisor name (az:hostname) for this VNF instance."""
776 if self.manager.is_admin:
777 az = getattr(self.instance, 'OS-EXT-AZ:availability_zone')
779 az = self.manager.config.availability_zone
781 raise ChainException('Availability zone parameter is mandatory')
782 hostname = self.get_hostname()
784 return az + ':' + hostname
789 """Get the uuid for this instance."""
790 return self.instance.id
792 def delete(self, forced=False):
793 """Delete this VNF instance."""
795 LOG.info("Instance %s not deleted (reused)", self.name)
798 self.manager.comp.delete_server(self.instance)
799 LOG.info("Deleted instance %s", self.name)
800 if self.manager.config.use_management_port:
801 self.management_port.delete()
802 for port in self.ports:
804 for port in self.idle_ports:
806 for network in self.idle_networks:
811 """A class to manage a single chain.
813 Can handle any type of chain (EXT, PVP, PVVP)
816 def __init__(self, chain_id, manager):
817 """Create a new chain.
819 chain_id: chain index (first chain is 0)
820 manager: the chain manager that owns all chains
822 self.chain_id = chain_id
823 self.manager = manager
824 self.encaps = manager.encaps
828 self.networks = manager.get_networks(chain_id)
829 # For external chain VNFs can only be discovered from their MAC addresses
830 # either from config or from ARP
831 if manager.config.service_chain != ChainType.EXT:
832 for chain_instance_index in range(self.get_length()):
833 self.instances.append(ChainVnf(self,
834 chain_instance_index,
836 # at this point new VNFs are not created yet but
837 # verify that all discovered VNFs are on the same hypervisor
838 self._check_hypervisors()
839 # now that all VNF ports are created we need to calculate the
840 # left/right remote MAC for each VNF in the chain
841 # before actually creating the VNF itself
842 rem_mac_pairs = self._get_remote_mac_pairs()
843 for instance in self.instances:
844 rem_mac_pair = rem_mac_pairs.pop(0)
845 instance.create_vnf(rem_mac_pair)
850 def _check_hypervisors(self):
851 common_hypervisor = None
852 for instance in self.instances:
853 # get the full hypervizor name (az:compute)
854 hname = instance.get_hypervisor_name()
856 if common_hypervisor:
857 if hname != common_hypervisor:
858 raise ChainException('Discovered instances on different hypervisors:'
859 ' %s and %s' % (hname, common_hypervisor))
861 common_hypervisor = hname
862 if common_hypervisor:
863 # check that the common hypervisor name matchs the requested hypervisor name
864 # and set the name to be used by all future instances (if any)
865 if not self.manager.placer.register_full_name(common_hypervisor):
866 raise ChainException('Discovered hypervisor placement %s is incompatible' %
869 def get_length(self):
870 """Get the number of VNF in the chain."""
871 # Take into account 2 edge networks for routers
872 return len(self.networks) - 3 if self.manager.config.l3_router else len(self.networks) - 1
874 def _get_remote_mac_pairs(self):
875 """Get the list of remote mac pairs for every VNF in the chain.
877 Traverse the chain from left to right and establish the
878 left/right remote MAC for each VNF in the chainself.
881 mac sequence: tg_src_mac, vm0-mac0, vm0-mac1, tg_dst_mac
882 must produce [[tg_src_mac, tg_dst_mac]] or looking at index in mac sequence: [[0, 3]]
883 the mac pair is what the VNF at that position (index 0) sees as next hop mac left and right
886 tg_src_mac, vm0-mac0, vm0-mac1, vm1-mac0, vm1-mac1, tg_dst_mac
887 Must produce the following list:
888 [[tg_src_mac, vm1-mac0], [vm0-mac1, tg_dst_mac]] or index: [[0, 3], [2, 5]]
890 General case with 3 VMs in chain, the list of consecutive macs (left to right):
891 tg_src_mac, vm0-mac0, vm0-mac1, vm1-mac0, vm1-mac1, vm2-mac0, vm2-mac1, tg_dst_mac
892 Must produce the following list:
893 [[tg_src_mac, vm1-mac0], [vm0-mac1, vm2-mac0], [vm1-mac1, tg_dst_mac]]
894 or index: [[0, 3], [2, 5], [4, 7]]
896 The series pattern is pretty clear: [[n, n+3],... ] where n is multiple of 2
898 # line up all mac from left to right
899 mac_seq = [self.manager.generator_config.devices[LEFT].mac]
900 for instance in self.instances:
901 mac_seq.append(instance.ports[0].get_mac())
902 mac_seq.append(instance.ports[1].get_mac())
903 mac_seq.append(self.manager.generator_config.devices[RIGHT].mac)
906 for _ in self.instances:
907 rem_mac_pairs.append([mac_seq[base], mac_seq[base + 3]])
911 def get_instances(self):
912 """Return all instances for this chain."""
913 return self.instances
915 def get_vlan(self, port_index):
916 """Get the VLAN id on a given port.
918 port_index: left port is 0, right port is 1
919 return: the vlan_id or None if there is no vlan tagging
921 # for port 1 we need to return the VLAN of the last network in the chain
922 # The networks array contains 2 networks for PVP [left, right]
923 # and 3 networks in the case of PVVP [left.middle,right]
925 # this will pick the last item in array
927 return self.networks[port_index].get_vlan()
929 def get_vxlan(self, port_index):
930 """Get the VXLAN id on a given port.
932 port_index: left port is 0, right port is 1
933 return: the vxlan_id or None if there is no vxlan
935 # for port 1 we need to return the VLAN of the last network in the chain
936 # The networks array contains 2 networks for PVP [left, right]
937 # and 3 networks in the case of PVVP [left.middle,right]
939 # this will pick the last item in array
941 return self.networks[port_index].get_vxlan()
943 def get_mpls_inner_label(self, port_index):
944 """Get the MPLS VPN Label on a given port.
946 port_index: left port is 0, right port is 1
947 return: the mpls_label_id or None if there is no mpls
949 # for port 1 we need to return the MPLS Label of the last network in the chain
950 # The networks array contains 2 networks for PVP [left, right]
951 # and 3 networks in the case of PVVP [left.middle,right]
953 # this will pick the last item in array
955 return self.networks[port_index].get_mpls_inner_label()
957 def get_dest_mac(self, port_index):
958 """Get the dest MAC on a given port.
960 port_index: left port is 0, right port is 1
964 # for right port, use the right port MAC of the last (right most) VNF In chain
965 return self.instances[-1].ports[1].get_mac()
966 # for left port use the left port MAC of the first (left most) VNF in chain
967 return self.instances[0].ports[0].get_mac()
969 def get_network_uuids(self):
970 """Get UUID of networks in this chain from left to right (order is important).
972 :return: list of UUIDs of networks (2 or 3 elements)
974 return [net['id'] for net in self.networks]
976 def get_host_ips(self):
977 """Return the IP adresss(es) of the host compute nodes used for this chain.
979 :return: a list of 1 or 2 IP addresses
981 return [vnf.get_host_ip() for vnf in self.instances]
983 def get_compute_nodes(self):
984 """Return the name of the host compute nodes used for this chain.
986 :return: a list of 1 host name in the az:host format
988 # Since all chains go through the same compute node(s) we can just retrieve the
989 # compute node name(s) for the first chain
990 return [vnf.get_hypervisor_name() for vnf in self.instances]
993 """Delete this chain."""
994 for instance in self.instances:
996 # only delete if these are chain private networks (not shared)
997 if not self.manager.config.service_chain_shared_net:
998 for network in self.networks:
1002 class InstancePlacer(object):
1003 """A class to manage instance placement for all VNFs in all chains.
1005 A full az string is made of 2 parts AZ and hypervisor.
1006 The placement is resolved when both parts az and hypervisor names are known.
1009 def __init__(self, req_az, req_hyp):
1010 """Create a new instance placer.
1012 req_az: requested AZ (can be None or empty if no preference)
1013 req_hyp: requested hypervisor name (can be None of empty if no preference)
1014 can be any of 'nova:', 'comp1', 'nova:comp1'
1015 if it is a list, only the first item is used (backward compatibility in config)
1017 req_az is ignored if req_hyp has an az part
1018 all other parts beyond the first 2 are ignored in req_hyp
1020 # if passed a list just pick the first item
1021 if req_hyp and isinstance(req_hyp, list):
1022 req_hyp = req_hyp[0]
1023 # only pick first part of az
1024 if req_az and ':' in req_az:
1025 req_az = req_az.split(':')[0]
1027 # check if requested hypervisor string has an AZ part
1028 split_hyp = req_hyp.split(':')
1029 if len(split_hyp) > 1:
1030 # override the AZ part and hypervisor part
1031 req_az = split_hyp[0]
1032 req_hyp = split_hyp[1]
1033 self.requested_az = req_az if req_az else ''
1034 self.requested_hyp = req_hyp if req_hyp else ''
1035 # Nova can accept AZ only (e.g. 'nova:', use any hypervisor in that AZ)
1036 # or hypervisor only (e.g. ':comp1')
1037 # or both (e.g. 'nova:comp1')
1039 self.required_az = req_az + ':' + self.requested_hyp
1041 # need to insert a ':' so nova knows this is the hypervisor name
1042 self.required_az = ':' + self.requested_hyp if req_hyp else ''
1043 # placement is resolved when both AZ and hypervisor names are known and set
1044 self.resolved = self.requested_az != '' and self.requested_hyp != ''
1046 def get_required_az(self):
1047 """Return the required az (can be resolved or not)."""
1048 return self.required_az
1050 def register_full_name(self, discovered_az):
1051 """Verify compatibility and register a discovered hypervisor full name.
1053 discovered_az: a discovered AZ in az:hypervisor format
1054 return: True if discovered_az is compatible and set
1055 False if discovered_az is not compatible
1058 return discovered_az == self.required_az
1060 # must be in full az format
1061 split_daz = discovered_az.split(':')
1062 if len(split_daz) != 2:
1064 if self.requested_az and self.requested_az != split_daz[0]:
1066 if self.requested_hyp and self.requested_hyp != split_daz[1]:
1068 self.required_az = discovered_az
1069 self.resolved = True
1072 def is_resolved(self):
1073 """Check if the full AZ is resolved.
1075 return: True if resolved
1077 return self.resolved
1080 class ChainManager(object):
1081 """A class for managing all chains for a given run.
1083 Supports openstack or no openstack.
1084 Supports EXT, PVP and PVVP chains.
1087 def __init__(self, chain_runner):
1088 """Create a chain manager to take care of discovering or bringing up the requested chains.
1090 A new instance must be created every time a new config is used.
1091 config: the nfvbench config to use
1092 cred: openstack credentials to use of None if there is no openstack
1094 self.chain_runner = chain_runner
1095 self.config = chain_runner.config
1096 self.generator_config = chain_runner.traffic_client.generator_config
1098 self.image_instance = None
1099 self.image_name = None
1100 # Left and right networks shared across all chains (only if shared)
1105 self.nova_client = None
1106 self.neutron_client = None
1107 self.glance_client = None
1108 self.existing_instances = []
1109 # existing ports keyed by the network uuid they belong to
1110 self._existing_ports = {}
1111 config = self.config
1112 self.openstack = (chain_runner.cred is not None) and not config.l2_loopback
1113 self.chain_count = config.service_chain_count
1117 session = chain_runner.cred.get_session()
1118 self.is_admin = chain_runner.cred.is_admin
1119 self.nova_client = Client(2, session=session)
1120 self.neutron_client = neutronclient.Client('2.0', session=session)
1121 self.glance_client = glanceclient.Client('2', session=session)
1122 self.comp = compute.Compute(self.nova_client,
1126 if config.service_chain != ChainType.EXT:
1127 self.placer = InstancePlacer(config.availability_zone, config.compute_nodes)
1129 self.flavor = ChainFlavor(config.flavor_type, config.flavor, self.comp)
1130 # Get list of all existing instances to check if some instances can be reused
1131 self.existing_instances = self.comp.get_server_list()
1132 # If management port is requested for VMs, create management network (shared)
1133 if self.config.use_management_port:
1134 self.management_network = ChainNetwork(self, self.config.management_network,
1136 # If floating IP is used for management, create and share
1137 # across chains the floating network
1138 if self.config.use_floating_ip:
1139 self.floating_ip_network = ChainNetwork(self,
1140 self.config.floating_network,
1143 # For EXT chains, the external_networks left and right fields in the config
1144 # must be either a prefix string or a list of at least chain-count strings
1145 self._check_extnet('left', config.external_networks.left)
1146 self._check_extnet('right', config.external_networks.right)
1148 # If networks are shared across chains, get the list of networks
1149 if config.service_chain_shared_net:
1150 self.networks = self.get_networks()
1151 # Reuse/create chains
1152 for chain_id in range(self.chain_count):
1153 self.chains.append(Chain(chain_id, self))
1154 if config.service_chain == ChainType.EXT:
1155 # if EXT and no ARP or VxLAN we need to read dest MACs from config
1156 if config.no_arp or config.vxlan:
1157 self._get_dest_macs_from_config()
1159 # Make sure all instances are active before proceeding
1160 self._ensure_instances_active()
1161 # network API call do not show VLANS ID if not admin read from config
1162 if not self.is_admin and config.vlan_tagging:
1163 self._get_config_vlans()
1168 # no openstack, no need to create chains
1169 if not config.l2_loopback and config.no_arp:
1170 self._get_dest_macs_from_config()
1171 if config.vlan_tagging:
1172 # make sure there at least as many entries as chains in each left/right list
1173 if len(config.vlans) != 2:
1174 raise ChainException('The config vlans property must be a list '
1175 'with 2 lists of VLAN IDs')
1176 self._get_config_vlans()
1178 raise ChainException('VxLAN is only supported with OpenStack')
1180 def _check_extnet(self, side, name):
1182 raise ChainException('external_networks.%s must contain a valid network'
1183 ' name prefix or a list of network names' % side)
1184 if isinstance(name, tuple) and len(name) < self.chain_count:
1185 raise ChainException('external_networks.%s %s'
1186 ' must have at least %d names' % (side, name, self.chain_count))
1188 def _get_config_vlans(self):
1191 self.vlans = [self._check_list('vlans[0]', self.config.vlans[0], re_vlan),
1192 self._check_list('vlans[1]', self.config.vlans[1], re_vlan)]
1194 raise ChainException('vlans parameter is mandatory. Set valid value in config file')
1196 def _get_dest_macs_from_config(self):
1197 re_mac = "[0-9a-fA-F]{2}([-:])[0-9a-fA-F]{2}(\\1[0-9a-fA-F]{2}){4}$"
1198 tg_config = self.config.traffic_generator
1199 self.dest_macs = [self._check_list("mac_addrs_left",
1200 tg_config.mac_addrs_left, re_mac),
1201 self._check_list("mac_addrs_right",
1202 tg_config.mac_addrs_right, re_mac)]
1204 def _check_list(self, list_name, ll, pattern):
1205 # if it is a single int or mac, make it a list of 1 int
1206 if isinstance(ll, (int, str)):
1209 if not re.match(pattern, str(item)):
1210 raise ChainException("Invalid format '{item}' specified in {fname}"
1211 .format(item=item, fname=list_name))
1212 # must have at least 1 element
1214 raise ChainException('%s cannot be empty' % (list_name))
1215 # for shared network, if 1 element is passed, replicate it as many times
1217 if self.config.service_chain_shared_net and len(ll) == 1:
1218 ll = [ll[0]] * self.chain_count
1220 # number of elements musty be the number of chains
1221 elif len(ll) < self.chain_count:
1222 raise ChainException('%s=%s must be a list with %d elements per chain' %
1223 (list_name, ll, self.chain_count))
1226 def _setup_image(self):
1227 # To avoid reuploading image in server mode, check whether image_name is set or not
1229 self.image_instance = self.comp.find_image(self.image_name)
1230 if self.image_instance:
1231 LOG.info("Reusing image %s", self.image_name)
1233 image_name_search_pattern = r'(nfvbenchvm-\d+(\.\d+)*).qcow2'
1234 if self.config.vm_image_file:
1235 match = re.search(image_name_search_pattern, self.config.vm_image_file)
1237 self.image_name = match.group(1)
1238 LOG.info('Using provided VM image file %s', self.config.vm_image_file)
1240 raise ChainException('Provided VM image file name %s must start with '
1241 '"nfvbenchvm-<version>"' % self.config.vm_image_file)
1243 pkg_root = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
1244 for f in os.listdir(pkg_root):
1245 if re.search(image_name_search_pattern, f):
1246 self.config.vm_image_file = pkg_root + '/' + f
1247 self.image_name = f.replace('.qcow2', '')
1248 LOG.info('Found built-in VM image file %s', f)
1251 raise ChainException('Cannot find any built-in VM image file.')
1253 self.image_instance = self.comp.find_image(self.image_name)
1254 if not self.image_instance:
1255 LOG.info('Uploading %s', self.image_name)
1256 res = self.comp.upload_image_via_url(self.image_name,
1257 self.config.vm_image_file)
1260 raise ChainException('Error uploading image %s from %s. ABORTING.' %
1261 (self.image_name, self.config.vm_image_file))
1262 LOG.info('Image %s successfully uploaded.', self.image_name)
1263 self.image_instance = self.comp.find_image(self.image_name)
1265 # image multiqueue property must be set according to the vif_multiqueue_size
1266 # config value (defaults to 1 or disabled)
1267 self.comp.image_set_multiqueue(self.image_instance, self.config.vif_multiqueue_size > 1)
1269 def _ensure_instances_active(self):
1271 for chain in self.chains:
1272 instances.extend(chain.get_instances())
1273 initial_instance_count = len(instances)
1274 max_retries = (self.config.check_traffic_time_sec + (initial_instance_count - 1) * 10 +
1275 self.config.generic_poll_sec - 1) / self.config.generic_poll_sec
1278 remaining_instances = []
1279 for instance in instances:
1280 status = instance.get_status()
1281 if status == 'ACTIVE':
1282 LOG.info('Instance %s is ACTIVE on %s',
1283 instance.name, instance.get_hypervisor_name())
1285 if status == 'ERROR':
1286 raise ChainException('Instance %s creation error: %s' %
1288 instance.instance.fault['message']))
1289 remaining_instances.append(instance)
1290 if not remaining_instances:
1293 if retry >= max_retries:
1294 raise ChainException('Time-out: %d/%d instances still not active' %
1295 (len(remaining_instances), initial_instance_count))
1296 LOG.info('Waiting for %d/%d instance to become active (retry %d/%d)...',
1297 len(remaining_instances), initial_instance_count,
1299 instances = remaining_instances
1300 time.sleep(self.config.generic_poll_sec)
1301 if initial_instance_count:
1302 LOG.info('All instances are active')
1304 def get_networks(self, chain_id=None):
1305 """Get the networks for given EXT, PVP or PVVP chain.
1307 For EXT packet path, these networks must pre-exist.
1308 For PVP, PVVP these networks will be created if they do not exist.
1309 chain_id: to which chain the networks belong.
1310 a None value will mean that these networks are shared by all chains
1313 # the only case where self.networks exists is when the networks are shared
1315 return self.networks
1316 if self.config.service_chain == ChainType.EXT:
1318 ext_net = self.config.external_networks
1319 net_cfg = [AttrDict({'name': name,
1320 'segmentation_id': None,
1321 'physical_network': None})
1322 for name in [ext_net.left, ext_net.right]]
1323 # segmentation id and subnet should be discovered from neutron
1326 int_nets = self.config.internal_networks
1328 if self.config.service_chain == ChainType.PVP:
1329 net_cfg = [int_nets.left, int_nets.right]
1331 net_cfg = [int_nets.left, int_nets.middle, int_nets.right]
1332 if self.config.l3_router:
1333 edge_nets = self.config.edge_networks
1334 net_cfg.append(edge_nets.left)
1335 net_cfg.append(edge_nets.right)
1339 networks.append(ChainNetwork(self, cfg, chain_id, lookup_only=lookup_only))
1341 # need to cleanup all successful networks prior to bailing out
1342 for net in networks:
1347 def get_existing_ports(self):
1348 """Get the list of existing ports.
1350 Lazy retrieval of ports as this can be costly if there are lots of ports and
1351 is only needed when VM and network are being reused.
1353 return: a dict of list of neutron ports indexed by the network uuid they are attached to
1355 Each port is a dict with fields such as below:
1356 {'allowed_address_pairs': [], 'extra_dhcp_opts': [],
1357 'updated_at': '2018-10-06T07:15:35Z', 'device_owner': 'compute:nova',
1358 'revision_number': 10, 'port_security_enabled': False, 'binding:profile': {},
1359 'fixed_ips': [{'subnet_id': '6903a3b3-49a1-4ba4-8259-4a90e7a44b21',
1360 'ip_address': '192.168.1.4'}], 'id': '3dcb9cfa-d82a-4dd1-85a1-fd8284b52d72',
1361 'security_groups': [],
1362 'binding:vif_details': {'vhostuser_socket': '/tmp/3dcb9cfa-d82a-4dd1-85a1-fd8284b52d72',
1363 'vhostuser_mode': 'server'},
1364 'binding:vif_type': 'vhostuser',
1365 'mac_address': 'fa:16:3e:3c:63:04',
1366 'project_id': '977ac76a63d7492f927fa80e86baff4c',
1368 'binding:host_id': 'a20-champagne-compute-1',
1370 'device_id': 'a98e2ad2-5371-4aa5-a356-8264a970ce4b',
1371 'name': 'nfvbench-loop-vm0-0', 'admin_state_up': True,
1372 'network_id': '3ea5fd88-278f-4d9d-b24d-1e443791a055',
1373 'tenant_id': '977ac76a63d7492f927fa80e86baff4c',
1374 'created_at': '2018-10-06T07:15:10Z',
1375 'binding:vnic_type': 'normal'}
1377 if not self._existing_ports:
1378 LOG.info('Loading list of all ports...')
1379 existing_ports = self.neutron_client.list_ports()['ports']
1380 # place all ports in the dict keyed by the port network uuid
1381 for port in existing_ports:
1382 port_list = self._existing_ports.setdefault(port['network_id'], [])
1383 port_list.append(port)
1384 LOG.info("Loaded %d ports attached to %d networks",
1385 len(existing_ports), len(self._existing_ports))
1386 return self._existing_ports
1388 def get_ports_from_network(self, chain_network):
1389 """Get the list of existing ports that belong to a network.
1391 Lazy retrieval of ports as this can be costly if there are lots of ports and
1392 is only needed when VM and network are being reused.
1394 chain_network: a ChainNetwork instance for which attached ports neeed to be retrieved
1395 return: list of neutron ports attached to requested network
1397 return self.get_existing_ports().get(chain_network.get_uuid(), None)
1399 def get_hypervisor_from_mac(self, mac):
1400 """Get the hypervisor that hosts a VM MAC.
1402 mac: MAC address to look for
1403 return: the hypervisor where the matching port runs or None if not found
1405 # _existing_ports is a dict of list of ports indexed by network id
1406 for port_list in list(self.get_existing_ports().values()):
1407 for port in port_list:
1409 if port['mac_address'] == mac:
1410 host_id = port['binding:host_id']
1411 return self.comp.get_hypervisor(host_id)
1416 def get_host_ip_from_mac(self, mac):
1417 """Get the host IP address matching a MAC.
1419 mac: MAC address to look for
1420 return: the IP address of the host where the matching port runs or None if not found
1422 hypervisor = self.get_hypervisor_from_mac(mac)
1424 return hypervisor.host_ip
1427 def get_chain_vlans(self, port_index):
1428 """Get the list of per chain VLAN id on a given port.
1430 port_index: left port is 0, right port is 1
1431 return: a VLAN ID list indexed by the chain index or None if no vlan tagging
1433 if self.chains and self.is_admin:
1434 return [self.chains[chain_index].get_vlan(port_index)
1435 for chain_index in range(self.chain_count)]
1437 return self.vlans[port_index]
1439 def get_chain_vxlans(self, port_index):
1440 """Get the list of per chain VNIs id on a given port.
1442 port_index: left port is 0, right port is 1
1443 return: a VNIs ID list indexed by the chain index or None if no vlan tagging
1445 if self.chains and self.is_admin:
1446 return [self.chains[chain_index].get_vxlan(port_index)
1447 for chain_index in range(self.chain_count)]
1449 raise ChainException('VxLAN is only supported with OpenStack and with admin user')
1451 def get_chain_mpls_inner_labels(self, port_index):
1452 """Get the list of per chain MPLS VPN Labels on a given port.
1454 port_index: left port is 0, right port is 1
1455 return: a MPLSs ID list indexed by the chain index or None if no mpls
1457 if self.chains and self.is_admin:
1458 return [self.chains[chain_index].get_mpls_inner_label(port_index)
1459 for chain_index in range(self.chain_count)]
1461 raise ChainException('MPLS is only supported with OpenStack and with admin user')
1463 def get_dest_macs(self, port_index):
1464 """Get the list of per chain dest MACs on a given port.
1466 Should not be called if EXT+ARP is used (in that case the traffic gen will
1467 have the ARP responses back from VNFs with the dest MAC to use).
1469 port_index: left port is 0, right port is 1
1470 return: a list of dest MACs indexed by the chain index
1472 if self.chains and self.config.service_chain != ChainType.EXT:
1473 return [self.chains[chain_index].get_dest_mac(port_index)
1474 for chain_index in range(self.chain_count)]
1475 # no openstack or EXT+no-arp
1476 return self.dest_macs[port_index]
1478 def get_host_ips(self):
1479 """Return the IP adresss(es) of the host compute nodes used for this run.
1481 :return: a list of 1 IP address
1483 # Since all chains go through the same compute node(s) we can just retrieve the
1484 # compute node(s) for the first chain
1486 if self.config.service_chain != ChainType.EXT:
1487 return self.chains[0].get_host_ips()
1488 # in the case of EXT, the compute node must be retrieved from the port
1489 # associated to any of the dest MACs
1490 dst_macs = self.generator_config.get_dest_macs()
1491 # dest MAC on port 0, chain 0
1492 dst_mac = dst_macs[0][0]
1493 host_ip = self.get_host_ip_from_mac(dst_mac)
1495 LOG.info('Found compute node IP for EXT chain: %s', host_ip)
1499 def get_compute_nodes(self):
1500 """Return the name of the host compute nodes used for this run.
1502 :return: a list of 0 or 1 host name in the az:host format
1504 # Since all chains go through the same compute node(s) we can just retrieve the
1505 # compute node name(s) for the first chain
1507 # in the case of EXT, the compute node must be retrieved from the port
1508 # associated to any of the dest MACs
1509 if self.config.service_chain != ChainType.EXT:
1510 return self.chains[0].get_compute_nodes()
1511 # in the case of EXT, the compute node must be retrieved from the port
1512 # associated to any of the dest MACs
1513 dst_macs = self.generator_config.get_dest_macs()
1514 # dest MAC on port 0, chain 0
1515 dst_mac = dst_macs[0][0]
1516 hypervisor = self.get_hypervisor_from_mac(dst_mac)
1518 LOG.info('Found hypervisor for EXT chain: %s', hypervisor.hypervisor_hostname)
1519 return[':' + hypervisor.hypervisor_hostname]
1520 # no openstack = no chains
1524 """Delete resources for all chains."""
1525 for chain in self.chains:
1527 for network in self.networks:
1529 if self.config.use_management_port and hasattr(self, 'management_network'):
1530 self.management_network.delete()
1531 if self.config.use_floating_ip and hasattr(self, 'floating_ip_network'):
1532 self.floating_ip_network.delete()
1534 self.flavor.delete()