2 # Copyright 2018 Cisco Systems, Inc. All rights reserved.
4 # Licensed under the Apache License, Version 2.0 (the "License"); you may
5 # not use this file except in compliance with the License. You may obtain
6 # a copy of the License at
8 # http://www.apache.org/licenses/LICENSE-2.0
10 # Unless required by applicable law or agreed to in writing, software
11 # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
12 # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
13 # License for the specific language governing permissions and limitations
17 # This module takes care of chaining networks, ports and vms
19 """NFVBENCH CHAIN DISCOVERY/STAGING.
21 This module takes care of staging/discovering all resources that are participating in a
22 benchmarking session: flavors, networks, ports, VNF instances.
23 If a resource is discovered with the same name, it will be reused.
24 Otherwise it will be created.
26 ChainManager: manages VM image, flavor, the staging discovery of all chains
28 Chain: manages one chain, has 2 or more networks and 1 or more instances
29 ChainNetwork: manages 1 network in a chain
30 ChainVnf: manages 1 VNF instance in a chain, has 2 ports
31 ChainVnfPort: manages 1 instance port
33 ChainManager-->Chain(*)
34 Chain-->ChainNetwork(*),ChainVnf(*)
35 ChainVnf-->ChainVnfPort(2)
37 Once created/discovered, instances are checked to be in the active state (ready to pass traffic)
38 Configuration parameters that will influence how these resources are staged/related:
39 - openstack or no openstack
42 - number of VNF in each chain (PVP, PVVP)
43 - SRIOV and middle port SRIOV for port types
44 - whether networks are shared across chains or not
46 There is not traffic generation involved in this module.
53 from neutronclient.neutron import client as neutronclient
54 from novaclient.client import Client
56 from attrdict import AttrDict
57 from .chain_router import ChainRouter
60 from .specs import ChainType
61 # Left and right index for network and port lists
64 # L3 traffic edge networks are at the end of networks list
67 # Name of the VM config file
68 NFVBENCH_CFG_FILENAME = 'nfvbenchvm.conf'
69 # full pathame of the VM config in the VM
70 NFVBENCH_CFG_VM_PATHNAME = os.path.join('/etc/', NFVBENCH_CFG_FILENAME)
71 # full path of the boot shell script template file on the server where nfvbench runs
72 BOOT_SCRIPT_PATHNAME = os.path.join(os.path.dirname(os.path.abspath(__file__)),
74 NFVBENCH_CFG_FILENAME)
77 class ChainException(Exception):
78 """Exception while operating the chains."""
80 class NetworkEncaps(object):
81 """Network encapsulation."""
84 class ChainFlavor(object):
85 """Class to manage the chain flavor."""
87 def __init__(self, flavor_name, flavor_dict, comp):
88 """Create a flavor."""
89 self.name = flavor_name
91 self.flavor = self.comp.find_flavor(flavor_name)
95 LOG.info("Reused flavor '%s'", flavor_name)
97 extra_specs = flavor_dict.pop('extra_specs', None)
99 self.flavor = comp.create_flavor(flavor_name,
102 LOG.info("Created flavor '%s'", flavor_name)
104 self.flavor.set_keys(extra_specs)
107 """Delete this flavor."""
108 if not self.reuse and self.flavor:
110 LOG.info("Flavor '%s' deleted", self.name)
113 class ChainVnfPort(object):
114 """A port associated to one VNF in the chain."""
116 def __init__(self, name, vnf, chain_network, vnic_type):
117 """Create or reuse a port on a given network.
119 if vnf.instance is None the VNF instance is not reused and this ChainVnfPort instance must
121 Otherwise vnf.instance is a reused VNF instance and this ChainVnfPort instance must
122 find an existing port to reuse that matches the port requirements: same attached network,
123 instance, name, vnic type
125 name: name for this port
126 vnf: ChainVNf instance that owns this port
127 chain_network: ChainNetwork instance where this port should attach
128 vnic_type: required vnic type for this port
132 self.manager = vnf.manager
135 self.floating_ip = None
137 # VNF instance is reused, we need to find an existing port that matches this instance
139 # discover ports attached to this instance
140 port_list = self.manager.get_ports_from_network(chain_network)
141 for port in port_list:
142 if port['name'] != name:
144 if port['binding:vnic_type'] != vnic_type:
146 if port['device_id'] == vnf.get_uuid():
148 LOG.info('Reusing existing port %s mac=%s', name, port['mac_address'])
151 raise ChainException('Cannot find matching port')
153 # VNF instance is not created yet, we need to create a new port
157 'network_id': chain_network.get_uuid(),
158 'binding:vnic_type': vnic_type
161 subnet_id = chain_network.get_subnet_uuid()
163 body['port']['fixed_ips'] = [{'subnet_id': subnet_id}]
165 port = self.manager.neutron_client.create_port(body)
166 self.port = port['port']
167 LOG.info('Created port %s', name)
169 self.manager.neutron_client.update_port(self.port['id'], {
171 'security_groups': [],
172 'port_security_enabled': False,
175 LOG.info('Security disabled on port %s', name)
177 LOG.info('Failed to disable security on port %s (ignored)', name)
180 """Get the MAC address for this port."""
181 return self.port['mac_address']
184 """Get the IP address for this port."""
185 return self.port['fixed_ips'][0]['ip_address']
187 def set_floating_ip(self, chain_network):
188 # create and add floating ip to port
190 self.floating_ip = self.manager.neutron_client.create_floatingip({
192 'floating_network_id': chain_network.get_uuid(),
193 'port_id': self.port['id'],
194 'description': 'nfvbench floating ip for port:' + self.port['name'],
196 LOG.info('Floating IP %s created and associated on port %s',
197 self.floating_ip['floating_ip_address'], self.name)
198 return self.floating_ip['floating_ip_address']
200 LOG.info('Failed to created and associated floating ip on port %s (ignored)', self.name)
201 return self.port['fixed_ips'][0]['ip_address']
204 """Delete this port instance."""
205 if self.reuse or not self.port:
207 for _ in range(0, self.manager.config.generic_retry_count):
209 self.manager.neutron_client.delete_port(self.port['id'])
210 LOG.info("Deleted port %s", self.name)
212 self.manager.neutron_client.delete_floatingip(self.floating_ip['id'])
213 LOG.info("Deleted floating IP %s", self.floating_ip['description'])
216 time.sleep(self.manager.config.generic_poll_sec)
217 LOG.error('Unable to delete port: %s', self.name)
220 class ChainNetwork(object):
221 """Could be a shared network across all chains or a chain private network."""
223 def __init__(self, manager, network_config, chain_id=None, lookup_only=False,
225 """Create a network for given chain.
227 network_config: a dict containing the network properties
228 (name, segmentation_id and physical_network)
229 chain_id: to which chain the networks belong.
230 a None value will mean that these networks are shared by all chains
231 suffix: a suffix to add to the network name (if not None)
233 self.manager = manager
235 self.name = network_config.name
237 # the name itself can be either a string or a list of names indexed by chain ID
238 if isinstance(network_config.name, tuple):
239 self.name = network_config.name[chain_id]
241 # network_config.name is a prefix string
242 self.name = network_config.name + str(chain_id)
244 self.name = self.name + suffix
245 self.segmentation_id = self._get_item(network_config.segmentation_id,
246 chain_id, auto_index=True)
247 self.subnet_name = self._get_item(network_config.subnet, chain_id)
248 self.physical_network = self._get_item(network_config.physical_network, chain_id)
253 self.router_name = None
254 if manager.config.l3_router and hasattr(network_config, 'router_name'):
255 self.router_name = network_config.router_name
257 self._setup(network_config, lookup_only)
260 LOG.error("Cannot find network %s", self.name)
262 LOG.error("Error creating network %s", self.name)
266 def _get_item(self, item_field, index, auto_index=False):
267 """Retrieve an item from a list or a single value.
269 item_field: can be None, a tuple of a single value
270 index: if None is same as 0, else is the index for a chain
271 auto_index: if true will automatically get the final value by adding the
272 index to the base value (if full list not provided)
274 If the item_field is not a tuple, it is considered same as a tuple with same value at any
276 If a list is provided, its length must be > index
282 if isinstance(item_field, tuple):
284 return item_field[index]
286 raise ChainException("List %s is too short for chain index %d" %
287 (str(item_field), index)) from IndexError
288 # single value is configured
290 return item_field + index
293 def _setup(self, network_config, lookup_only):
294 # Lookup if there is a matching network with same name
295 networks = self.manager.neutron_client.list_networks(name=self.name)
296 if networks['networks']:
297 network = networks['networks'][0]
298 # a network of same name already exists, we need to verify it has the same
300 if self.segmentation_id:
301 if network['provider:segmentation_id'] != self.segmentation_id:
302 raise ChainException("Mismatch of 'segmentation_id' for reused "
303 "network '{net}'. Network has id '{seg_id1}', "
304 "configuration requires '{seg_id2}'."
305 .format(net=self.name,
306 seg_id1=network['provider:segmentation_id'],
307 seg_id2=self.segmentation_id))
309 if self.physical_network:
310 if network['provider:physical_network'] != self.physical_network:
311 raise ChainException("Mismatch of 'physical_network' for reused "
312 "network '{net}'. Network has '{phys1}', "
313 "configuration requires '{phys2}'."
314 .format(net=self.name,
315 phys1=network['provider:physical_network'],
316 phys2=self.physical_network))
318 LOG.info('Reusing existing network %s', self.name)
320 self.network = network
323 raise ChainException('Network %s not found' % self.name)
327 'admin_state_up': True
330 if network_config.network_type:
331 body['network']['provider:network_type'] = network_config.network_type
332 if self.segmentation_id:
333 body['network']['provider:segmentation_id'] = self.segmentation_id
334 if self.physical_network:
335 body['network']['provider:physical_network'] = self.physical_network
336 self.network = self.manager.neutron_client.create_network(body)['network']
337 # create associated subnet, all subnets have the same name (which is ok since
338 # we do not need to address them directly by name)
340 'subnet': {'name': network_config.subnet,
341 'cidr': network_config.cidr,
342 'network_id': self.network['id'],
343 'enable_dhcp': False,
345 'dns_nameservers': []}
347 subnet = self.manager.neutron_client.create_subnet(body)['subnet']
348 # add subnet id to the network dict since it has just been added
349 self.network['subnets'] = [subnet['id']]
350 LOG.info('Created network: %s', self.name)
354 Extract UUID of this network.
356 :return: UUID of this network
358 return self.network['id']
360 def get_subnet_uuid(self):
362 Extract UUID of this subnet network.
364 :return: UUID of this subnet network
366 for subnet in self.network['subnets']:
367 if self.subnet_name == self.manager.neutron_client \
368 .show_subnet(subnet)['subnet']['name']:
374 Extract vlan for this network.
376 :return: vlan ID for this network
378 if self.network['provider:network_type'] != 'vlan':
379 raise ChainException('Trying to retrieve VLAN id for non VLAN network')
380 return self.network['provider:segmentation_id']
384 Extract VNI for this network.
386 :return: VNI ID for this network
389 return self.network['provider:segmentation_id']
391 def get_mpls_inner_label(self):
393 Extract MPLS VPN Label for this network.
395 :return: MPLS VPN Label for this network
398 return self.network['provider:segmentation_id']
401 """Delete this network."""
402 if not self.reuse and self.network:
403 for retry in range(0, self.manager.config.generic_retry_count):
405 self.manager.neutron_client.delete_network(self.network['id'])
406 LOG.info("Deleted network: %s", self.name)
409 LOG.info('Error deleting network %s (retry %d/%d)...',
412 self.manager.config.generic_retry_count)
413 time.sleep(self.manager.config.generic_poll_sec)
414 LOG.error('Unable to delete network: %s', self.name)
417 class ChainVnf(object):
418 """A class to represent a VNF in a chain."""
420 def __init__(self, chain, vnf_id, networks):
421 """Reuse a VNF instance with same characteristics or create a new VNF instance.
423 chain: the chain where this vnf belongs
424 vnf_id: indicates the index of this vnf in its chain (first vnf=0)
425 networks: the list of all networks (ChainNetwork) of the current chain
427 self.manager = chain.manager
430 self.name = self.manager.config.loop_vm_name + str(chain.chain_id)
431 if len(networks) > 2:
432 # we will have more than 1 VM in each chain
433 self.name += '-' + str(vnf_id)
434 # A list of ports for this chain
435 # There are normally 2 ports carrying traffic (index 0, and index 1) and
436 # potentially multiple idle ports not carrying traffic (index 2 and up)
437 # For example if 7 idle interfaces are requested, the corresp. ports will be
440 self.management_port = None
446 self.idle_networks = []
449 # the vnf_id is conveniently also the starting index in networks
450 # for the left and right networks associated to this VNF
451 if self.manager.config.l3_router:
452 self._setup(networks[vnf_id:vnf_id + 4])
454 self._setup(networks[vnf_id:vnf_id + 2])
456 LOG.error("Error creating VNF %s", self.name)
460 def _get_vm_config(self, remote_mac_pair):
461 config = self.manager.config
462 devices = self.manager.generator_config.devices
465 tg_gateway1_ip = self.routers[LEFT].ports[1]['fixed_ips'][0][
466 'ip_address'] # router edge ip left
467 tg_gateway2_ip = self.routers[RIGHT].ports[1]['fixed_ips'][0][
468 'ip_address'] # router edge ip right
469 tg_mac1 = self.routers[LEFT].ports[1]['mac_address'] # router edge mac left
470 tg_mac2 = self.routers[RIGHT].ports[1]['mac_address'] # router edge mac right
471 # edge cidr mask left
472 vnf_gateway1_cidr = \
473 self.ports[LEFT].get_ip() + self.__get_network_mask(
474 self.manager.config.edge_networks.left.cidr)
475 # edge cidr mask right
476 vnf_gateway2_cidr = \
477 self.ports[RIGHT].get_ip() + self.__get_network_mask(
478 self.manager.config.edge_networks.right.cidr)
479 if config.vm_forwarder != 'vpp':
480 raise ChainException(
481 'L3 router mode imply to set VPP as VM forwarder.'
482 'Please update your config file with: vm_forwarder: vpp')
484 tg_gateway1_ip = devices[LEFT].tg_gateway_ip_addrs
485 tg_gateway2_ip = devices[RIGHT].tg_gateway_ip_addrs
486 if not config.loop_vm_arp:
487 tg_mac1 = remote_mac_pair[0]
488 tg_mac2 = remote_mac_pair[1]
493 g1cidr = devices[LEFT].get_gw_ip(
494 self.chain.chain_id) + self.__get_network_mask(
495 self.manager.config.internal_networks.left.cidr)
496 g2cidr = devices[RIGHT].get_gw_ip(
497 self.chain.chain_id) + self.__get_network_mask(
498 self.manager.config.internal_networks.right.cidr)
500 vnf_gateway1_cidr = g1cidr
501 vnf_gateway2_cidr = g2cidr
503 with open(BOOT_SCRIPT_PATHNAME, 'r', encoding="utf-8") as boot_script:
504 content = boot_script.read()
506 'forwarder': config.vm_forwarder,
507 'intf_mac1': self.ports[LEFT].get_mac(),
508 'intf_mac2': self.ports[RIGHT].get_mac(),
509 'tg_gateway1_ip': tg_gateway1_ip,
510 'tg_gateway2_ip': tg_gateway2_ip,
511 'tg_net1': devices[LEFT].ip_addrs,
512 'tg_net2': devices[RIGHT].ip_addrs,
513 'vnf_gateway1_cidr': vnf_gateway1_cidr,
514 'vnf_gateway2_cidr': vnf_gateway2_cidr,
517 'vif_mq_size': config.vif_multiqueue_size,
518 'num_mbufs': config.num_mbufs
520 if self.manager.config.use_management_port:
521 mgmt_ip = self.management_port.port['fixed_ips'][0]['ip_address']
522 mgmt_mask = self.__get_network_mask(self.manager.config.management_network.cidr)
523 vm_config['intf_mgmt_cidr'] = mgmt_ip + mgmt_mask
524 vm_config['intf_mgmt_ip_gw'] = self.manager.config.management_network.gateway
525 vm_config['intf_mac_mgmt'] = self.management_port.port['mac_address']
527 # Interface management config left empty to avoid error in VM spawn
528 # if nfvbench config has values for management network but use_management_port=false
529 vm_config['intf_mgmt_cidr'] = ''
530 vm_config['intf_mgmt_ip_gw'] = ''
531 vm_config['intf_mac_mgmt'] = ''
532 return content.format(**vm_config)
535 def __get_network_mask(network):
536 return '/' + network.split('/')[1]
538 def _get_vnic_type(self, port_index):
539 """Get the right vnic type for given port indexself.
541 If SR-IOV is specified, middle ports in multi-VNF chains
542 can use vswitch or SR-IOV based on config.use_sriov_middle_net
544 if self.manager.config.sriov:
545 chain_length = self.chain.get_length()
546 if self.manager.config.use_sriov_middle_net or chain_length == 1:
548 if self.vnf_id == 0 and port_index == 0:
549 # first VNF in chain must use sriov for left port
551 if (self.vnf_id == chain_length - 1) and (port_index == 1):
552 # last VNF in chain must use sriov for right port
556 def _get_idle_networks_ports(self):
557 """Get the idle networks for PVP or PVVP chain (non shared net only)
559 For EXT packet path or shared net, returns empty list.
560 For PVP, PVVP these networks will be created if they do not exist.
561 chain_id: to which chain the networks belong.
562 a None value will mean that these networks are shared by all chains
566 config = self.manager.config
567 chain_id = self.chain.chain_id
568 idle_interfaces_per_vm = config.idle_interfaces_per_vm
569 if config.service_chain == ChainType.EXT or chain_id is None or \
570 idle_interfaces_per_vm == 0:
573 # Make a copy of the idle networks dict as we may have to modify the
575 idle_network_cfg = AttrDict(config.idle_networks)
576 if idle_network_cfg.segmentation_id:
577 segmentation_id = idle_network_cfg.segmentation_id + \
578 chain_id * idle_interfaces_per_vm
580 segmentation_id = None
582 # create as many idle networks and ports as requested
583 for idle_index in range(idle_interfaces_per_vm):
584 if config.service_chain == ChainType.PVP:
585 suffix = '.%d' % (idle_index)
587 suffix = '.%d.%d' % (self.vnf_id, idle_index)
588 port_name = self.name + '-idle' + str(idle_index)
589 # update the segmentation id based on chain id and idle index
591 idle_network_cfg.segmentation_id = segmentation_id + idle_index
592 port_name = port_name + "." + str(segmentation_id)
594 networks.append(ChainNetwork(self.manager,
598 ports.append(ChainVnfPort(port_name,
600 networks[idle_index],
603 # need to cleanup all successful networks
609 self.idle_networks = networks
610 self.idle_ports = ports
612 def _setup(self, networks):
613 flavor_id = self.manager.flavor.flavor.id
614 # Check if we can reuse an instance with same name
615 for instance in self.manager.existing_instances:
616 if instance.name == self.name:
618 instance_right = RIGHT
619 # In case of L3 traffic instance use edge networks
620 if self.manager.config.l3_router:
621 instance_left = EDGE_LEFT
622 instance_right = EDGE_RIGHT
623 # Verify that other instance characteristics match
624 if instance.flavor['id'] != flavor_id:
625 self._reuse_exception('Flavor mismatch')
626 if instance.status != "ACTIVE":
627 self._reuse_exception('Matching instance is not in ACTIVE state')
628 # The 2 networks for this instance must also be reused
629 if not networks[instance_left].reuse:
630 self._reuse_exception('network %s is new' % networks[instance_left].name)
631 if not networks[instance_right].reuse:
632 self._reuse_exception('network %s is new' % networks[instance_right].name)
633 # instance.networks have the network names as keys:
634 # {'nfvbench-rnet0': ['192.168.2.10'], 'nfvbench-lnet0': ['192.168.1.8']}
635 if networks[instance_left].name not in instance.networks:
636 self._reuse_exception('Left network mismatch')
637 if networks[instance_right].name not in instance.networks:
638 self._reuse_exception('Right network mismatch')
641 self.instance = instance
642 LOG.info('Reusing existing instance %s on %s',
643 self.name, self.get_hypervisor_name())
644 # create management port if needed
645 if self.manager.config.use_management_port:
646 self.management_port = ChainVnfPort(self.name + '-mgmt', self,
647 self.manager.management_network, 'normal')
648 ip = self.management_port.port['fixed_ips'][0]['ip_address']
649 if self.manager.config.use_floating_ip:
650 ip = self.management_port.set_floating_ip(self.manager.floating_ip_network)
651 LOG.info("Management interface will be active using IP: %s, "
652 "and you can connect over SSH with login: nfvbench and password: nfvbench", ip)
653 # create or reuse/discover 2 ports per instance
654 if self.manager.config.l3_router:
656 self.ports.append(ChainVnfPort(self.name + '-' + str(index),
659 self._get_vnic_type(index)))
662 self.ports.append(ChainVnfPort(self.name + '-' + str(index),
665 self._get_vnic_type(index)))
667 # create idle networks and ports only if instance is not reused
668 # if reused, we do not care about idle networks/ports
670 self._get_idle_networks_ports()
672 # Create neutron routers for L3 traffic use case
673 if self.manager.config.l3_router and self.manager.openstack:
674 internal_nets = networks[:2]
675 if self.manager.config.service_chain == ChainType.PVP:
676 edge_nets = networks[2:]
678 edge_nets = networks[3:]
679 subnets_left = [internal_nets[0], edge_nets[0]]
680 routes_left = [{'destination': self.manager.config.traffic_generator.ip_addrs[0],
681 'nexthop': self.manager.config.traffic_generator.tg_gateway_ip_addrs[
683 {'destination': self.manager.config.traffic_generator.ip_addrs[1],
684 'nexthop': self.ports[0].get_ip()}]
686 ChainRouter(self.manager, edge_nets[0].router_name, subnets_left, routes_left))
687 subnets_right = [internal_nets[1], edge_nets[1]]
688 routes_right = [{'destination': self.manager.config.traffic_generator.ip_addrs[0],
689 'nexthop': self.ports[1].get_ip()},
690 {'destination': self.manager.config.traffic_generator.ip_addrs[1],
691 'nexthop': self.manager.config.traffic_generator.tg_gateway_ip_addrs[
694 ChainRouter(self.manager, edge_nets[1].router_name, subnets_right, routes_right))
695 # Overload gateway_ips property with router ip address for ARP and traffic calls
696 self.manager.generator_config.devices[LEFT].set_gw_ip(
697 self.routers[LEFT].ports[0]['fixed_ips'][0]['ip_address']) # router edge ip left)
698 self.manager.generator_config.devices[RIGHT].set_gw_ip(
699 self.routers[RIGHT].ports[0]['fixed_ips'][0]['ip_address']) # router edge ip right)
701 # if no reuse, actual vm creation is deferred after all ports in the chain are created
702 # since we need to know the next mac in a multi-vnf chain
704 def create_vnf(self, remote_mac_pair):
705 """Create the VNF instance if it does not already exist."""
706 if self.instance is None:
708 if self.manager.config.use_management_port:
709 port_ids.append({'port-id': self.management_port.port['id']})
710 port_ids.extend([{'port-id': vnf_port.port['id']} for vnf_port in self.ports])
712 for idle_port in self.idle_ports:
713 port_ids.append({'port-id': idle_port.port['id']})
714 vm_config = self._get_vm_config(remote_mac_pair)
715 az = self.manager.placer.get_required_az()
716 server = self.manager.comp.create_server(self.name,
717 self.manager.image_instance,
718 self.manager.flavor.flavor,
725 files={NFVBENCH_CFG_VM_PATHNAME: vm_config})
727 self.instance = server
728 if self.manager.placer.is_resolved():
729 LOG.info('Created instance %s on %s', self.name, az)
731 # the location is undetermined at this point
732 # self.get_hypervisor_name() will return None
733 LOG.info('Created instance %s - waiting for placement resolution...', self.name)
734 # here we MUST wait until this instance is resolved otherwise subsequent
735 # VNF creation can be placed in other hypervisors!
736 config = self.manager.config
737 max_retries = int((config.check_traffic_time_sec +
738 config.generic_poll_sec - 1) / config.generic_poll_sec)
740 for retry in range(max_retries):
741 status = self.get_status()
742 if status == 'ACTIVE':
743 hyp_name = self.get_hypervisor_name()
744 LOG.info('Instance %s is active and has been placed on %s',
746 self.manager.placer.register_full_name(hyp_name)
748 if status == 'ERROR':
749 raise ChainException('Instance %s creation error: %s' %
751 self.instance.fault['message']))
752 LOG.info('Waiting for instance %s to become active (retry %d/%d)...',
753 self.name, retry + 1, max_retries + 1)
754 time.sleep(config.generic_poll_sec)
757 LOG.error('Instance %s creation timed out', self.name)
758 raise ChainException('Instance %s creation timed out' % self.name)
761 raise ChainException('Unable to create instance: %s' % (self.name))
763 def _reuse_exception(self, reason):
764 raise ChainException('Instance %s cannot be reused (%s)' % (self.name, reason))
766 def get_status(self):
767 """Get the statis of this instance."""
768 if self.instance.status != 'ACTIVE':
769 self.instance = self.manager.comp.poll_server(self.instance)
770 return self.instance.status
772 def get_hostname(self):
773 """Get the hypervisor host name running this VNF instance."""
774 if self.manager.is_admin:
775 hypervisor_hostname = getattr(self.instance, 'OS-EXT-SRV-ATTR:hypervisor_hostname')
777 hypervisor_hostname = self.manager.config.hypervisor_hostname
778 if not hypervisor_hostname:
779 raise ChainException('Hypervisor hostname parameter is mandatory')
780 return hypervisor_hostname
782 def get_host_ip(self):
783 """Get the IP address of the host where this instance runs.
785 return: the IP address
788 self.host_ip = self.manager.comp.get_hypervisor(self.get_hostname()).host_ip
791 def get_hypervisor_name(self):
792 """Get hypervisor name (az:hostname) for this VNF instance."""
794 if self.manager.is_admin:
795 az = getattr(self.instance, 'OS-EXT-AZ:availability_zone')
797 az = self.manager.config.availability_zone
799 raise ChainException('Availability zone parameter is mandatory')
800 hostname = self.get_hostname()
802 return az + ':' + hostname
807 """Get the uuid for this instance."""
808 return self.instance.id
810 def delete(self, forced=False):
811 """Delete this VNF instance."""
813 LOG.info("Instance %s not deleted (reused)", self.name)
816 self.manager.comp.delete_server(self.instance)
817 LOG.info("Deleted instance %s", self.name)
818 if self.manager.config.use_management_port:
819 self.management_port.delete()
820 for port in self.ports:
822 for port in self.idle_ports:
824 for network in self.idle_networks:
829 """A class to manage a single chain.
831 Can handle any type of chain (EXT, PVP, PVVP)
834 def __init__(self, chain_id, manager):
835 """Create a new chain.
837 chain_id: chain index (first chain is 0)
838 manager: the chain manager that owns all chains
840 self.chain_id = chain_id
841 self.manager = manager
842 self.encaps = manager.encaps
846 self.networks = manager.get_networks(chain_id)
847 # For external chain VNFs can only be discovered from their MAC addresses
848 # either from config or from ARP
849 if manager.config.service_chain != ChainType.EXT:
850 for chain_instance_index in range(self.get_length()):
851 self.instances.append(ChainVnf(self,
852 chain_instance_index,
854 # at this point new VNFs are not created yet but
855 # verify that all discovered VNFs are on the same hypervisor
856 self._check_hypervisors()
857 # now that all VNF ports are created we need to calculate the
858 # left/right remote MAC for each VNF in the chain
859 # before actually creating the VNF itself
860 rem_mac_pairs = self._get_remote_mac_pairs()
861 for instance in self.instances:
862 rem_mac_pair = rem_mac_pairs.pop(0)
863 instance.create_vnf(rem_mac_pair)
868 def _check_hypervisors(self):
869 common_hypervisor = None
870 for instance in self.instances:
871 # get the full hypervizor name (az:compute)
872 hname = instance.get_hypervisor_name()
874 if common_hypervisor:
875 if hname != common_hypervisor:
876 raise ChainException('Discovered instances on different hypervisors:'
877 ' %s and %s' % (hname, common_hypervisor))
879 common_hypervisor = hname
880 if common_hypervisor:
881 # check that the common hypervisor name matchs the requested hypervisor name
882 # and set the name to be used by all future instances (if any)
883 if not self.manager.placer.register_full_name(common_hypervisor):
884 raise ChainException('Discovered hypervisor placement %s is incompatible' %
887 def get_length(self):
888 """Get the number of VNF in the chain."""
889 # Take into account 2 edge networks for routers
890 return len(self.networks) - 3 if self.manager.config.l3_router else len(self.networks) - 1
892 def _get_remote_mac_pairs(self):
893 """Get the list of remote mac pairs for every VNF in the chain.
895 Traverse the chain from left to right and establish the
896 left/right remote MAC for each VNF in the chainself.
899 mac sequence: tg_src_mac, vm0-mac0, vm0-mac1, tg_dst_mac
900 must produce [[tg_src_mac, tg_dst_mac]] or looking at index in mac sequence: [[0, 3]]
901 the mac pair is what the VNF at that position (index 0) sees as next hop mac left and right
904 tg_src_mac, vm0-mac0, vm0-mac1, vm1-mac0, vm1-mac1, tg_dst_mac
905 Must produce the following list:
906 [[tg_src_mac, vm1-mac0], [vm0-mac1, tg_dst_mac]] or index: [[0, 3], [2, 5]]
908 General case with 3 VMs in chain, the list of consecutive macs (left to right):
909 tg_src_mac, vm0-mac0, vm0-mac1, vm1-mac0, vm1-mac1, vm2-mac0, vm2-mac1, tg_dst_mac
910 Must produce the following list:
911 [[tg_src_mac, vm1-mac0], [vm0-mac1, vm2-mac0], [vm1-mac1, tg_dst_mac]]
912 or index: [[0, 3], [2, 5], [4, 7]]
914 The series pattern is pretty clear: [[n, n+3],... ] where n is multiple of 2
916 # line up all mac from left to right
917 mac_seq = [self.manager.generator_config.devices[LEFT].mac]
918 for instance in self.instances:
919 mac_seq.append(instance.ports[0].get_mac())
920 mac_seq.append(instance.ports[1].get_mac())
921 mac_seq.append(self.manager.generator_config.devices[RIGHT].mac)
924 for _ in self.instances:
925 rem_mac_pairs.append([mac_seq[base], mac_seq[base + 3]])
929 def get_instances(self):
930 """Return all instances for this chain."""
931 return self.instances
933 def get_vlan(self, port_index):
934 """Get the VLAN id on a given port.
936 port_index: left port is 0, right port is 1
937 return: the vlan_id or None if there is no vlan tagging
939 # for port 1 we need to return the VLAN of the last network in the chain
940 # The networks array contains 2 networks for PVP [left, right]
941 # and 3 networks in the case of PVVP [left.middle,right]
943 # this will pick the last item in array
945 # This string filters networks connected to TG, in case of
946 # l3-router feature we have 4 networks instead of 2
947 networks = [x for x in self.networks if not x.router_name]
948 return networks[port_index].get_vlan()
950 def get_vxlan(self, port_index):
951 """Get the VXLAN id on a given port.
953 port_index: left port is 0, right port is 1
954 return: the vxlan_id or None if there is no vxlan
956 # for port 1 we need to return the VLAN of the last network in the chain
957 # The networks array contains 2 networks for PVP [left, right]
958 # and 3 networks in the case of PVVP [left.middle,right]
960 # this will pick the last item in array
962 return self.networks[port_index].get_vxlan()
964 def get_mpls_inner_label(self, port_index):
965 """Get the MPLS VPN Label on a given port.
967 port_index: left port is 0, right port is 1
968 return: the mpls_label_id or None if there is no mpls
970 # for port 1 we need to return the MPLS Label of the last network in the chain
971 # The networks array contains 2 networks for PVP [left, right]
972 # and 3 networks in the case of PVVP [left.middle,right]
974 # this will pick the last item in array
976 return self.networks[port_index].get_mpls_inner_label()
978 def get_dest_mac(self, port_index):
979 """Get the dest MAC on a given port.
981 port_index: left port is 0, right port is 1
985 # for right port, use the right port MAC of the last (right most) VNF In chain
986 return self.instances[-1].ports[1].get_mac()
987 # for left port use the left port MAC of the first (left most) VNF in chain
988 return self.instances[0].ports[0].get_mac()
990 def get_network_uuids(self):
991 """Get UUID of networks in this chain from left to right (order is important).
993 :return: list of UUIDs of networks (2 or 3 elements)
995 return [net['id'] for net in self.networks]
997 def get_host_ips(self):
998 """Return the IP adresss(es) of the host compute nodes used for this chain.
1000 :return: a list of 1 or 2 IP addresses
1002 return [vnf.get_host_ip() for vnf in self.instances]
1004 def get_compute_nodes(self):
1005 """Return the name of the host compute nodes used for this chain.
1007 :return: a list of 1 host name in the az:host format
1009 # Since all chains go through the same compute node(s) we can just retrieve the
1010 # compute node name(s) for the first chain
1011 return [vnf.get_hypervisor_name() for vnf in self.instances]
1014 """Delete this chain."""
1015 for instance in self.instances:
1017 # only delete if these are chain private networks (not shared)
1018 if not self.manager.config.service_chain_shared_net:
1019 for network in self.networks:
1023 class InstancePlacer(object):
1024 """A class to manage instance placement for all VNFs in all chains.
1026 A full az string is made of 2 parts AZ and hypervisor.
1027 The placement is resolved when both parts az and hypervisor names are known.
1030 def __init__(self, req_az, req_hyp):
1031 """Create a new instance placer.
1033 req_az: requested AZ (can be None or empty if no preference)
1034 req_hyp: requested hypervisor name (can be None of empty if no preference)
1035 can be any of 'nova:', 'comp1', 'nova:comp1'
1036 if it is a list, only the first item is used (backward compatibility in config)
1038 req_az is ignored if req_hyp has an az part
1039 all other parts beyond the first 2 are ignored in req_hyp
1041 # if passed a list just pick the first item
1042 if req_hyp and isinstance(req_hyp, list):
1043 req_hyp = req_hyp[0]
1044 # only pick first part of az
1045 if req_az and ':' in req_az:
1046 req_az = req_az.split(':')[0]
1048 # check if requested hypervisor string has an AZ part
1049 split_hyp = req_hyp.split(':')
1050 if len(split_hyp) > 1:
1051 # override the AZ part and hypervisor part
1052 req_az = split_hyp[0]
1053 req_hyp = split_hyp[1]
1054 self.requested_az = req_az if req_az else ''
1055 self.requested_hyp = req_hyp if req_hyp else ''
1056 # Nova can accept AZ only (e.g. 'nova:', use any hypervisor in that AZ)
1057 # or hypervisor only (e.g. ':comp1')
1058 # or both (e.g. 'nova:comp1')
1060 self.required_az = req_az + ':' + self.requested_hyp
1062 # need to insert a ':' so nova knows this is the hypervisor name
1063 self.required_az = ':' + self.requested_hyp if req_hyp else ''
1064 # placement is resolved when both AZ and hypervisor names are known and set
1065 self.resolved = self.requested_az != '' and self.requested_hyp != ''
1067 def get_required_az(self):
1068 """Return the required az (can be resolved or not)."""
1069 return self.required_az
1071 def register_full_name(self, discovered_az):
1072 """Verify compatibility and register a discovered hypervisor full name.
1074 discovered_az: a discovered AZ in az:hypervisor format
1075 return: True if discovered_az is compatible and set
1076 False if discovered_az is not compatible
1079 return discovered_az == self.required_az
1081 # must be in full az format
1082 split_daz = discovered_az.split(':')
1083 if len(split_daz) != 2:
1085 if self.requested_az and self.requested_az != split_daz[0]:
1087 if self.requested_hyp and self.requested_hyp != split_daz[1]:
1089 self.required_az = discovered_az
1090 self.resolved = True
1093 def is_resolved(self):
1094 """Check if the full AZ is resolved.
1096 return: True if resolved
1098 return self.resolved
1101 class ChainManager(object):
1102 """A class for managing all chains for a given run.
1104 Supports openstack or no openstack.
1105 Supports EXT, PVP and PVVP chains.
1108 def __init__(self, chain_runner):
1109 """Create a chain manager to take care of discovering or bringing up the requested chains.
1111 A new instance must be created every time a new config is used.
1112 config: the nfvbench config to use
1113 cred: openstack credentials to use of None if there is no openstack
1115 self.chain_runner = chain_runner
1116 self.config = chain_runner.config
1117 self.generator_config = chain_runner.traffic_client.generator_config
1119 self.image_instance = None
1120 self.image_name = None
1121 # Left and right networks shared across all chains (only if shared)
1126 self.nova_client = None
1127 self.neutron_client = None
1128 self.glance_client = None
1129 self.existing_instances = []
1130 # existing ports keyed by the network uuid they belong to
1131 self._existing_ports = {}
1132 config = self.config
1133 self.openstack = (chain_runner.cred is not None) and not config.l2_loopback
1134 self.chain_count = config.service_chain_count
1138 session = chain_runner.cred.get_session()
1139 self.is_admin = chain_runner.cred.is_admin
1140 self.nova_client = Client(2, session=session)
1141 self.neutron_client = neutronclient.Client('2.0', session=session)
1142 self.glance_client = glanceclient.Client('2', session=session)
1143 self.comp = compute.Compute(self.nova_client,
1147 if config.service_chain != ChainType.EXT:
1148 self.placer = InstancePlacer(config.availability_zone, config.compute_nodes)
1150 self.flavor = ChainFlavor(config.flavor_type, config.flavor, self.comp)
1151 # Get list of all existing instances to check if some instances can be reused
1152 self.existing_instances = self.comp.get_server_list()
1153 # If management port is requested for VMs, create management network (shared)
1154 if self.config.use_management_port:
1155 self.management_network = ChainNetwork(self, self.config.management_network,
1157 # If floating IP is used for management, create and share
1158 # across chains the floating network
1159 if self.config.use_floating_ip:
1160 self.floating_ip_network = ChainNetwork(self,
1161 self.config.floating_network,
1164 # For EXT chains, the external_networks left and right fields in the config
1165 # must be either a prefix string or a list of at least chain-count strings
1166 self._check_extnet('left', config.external_networks.left)
1167 self._check_extnet('right', config.external_networks.right)
1169 # If networks are shared across chains, get the list of networks
1170 if config.service_chain_shared_net:
1171 self.networks = self.get_networks()
1172 # Reuse/create chains
1173 for chain_id in range(self.chain_count):
1174 self.chains.append(Chain(chain_id, self))
1175 if config.service_chain == ChainType.EXT:
1176 # if EXT and no ARP or VxLAN we need to read dest MACs from config
1177 if config.no_arp or config.vxlan:
1178 self._get_dest_macs_from_config()
1180 # Make sure all instances are active before proceeding
1181 self._ensure_instances_active()
1182 # network API call do not show VLANS ID if not admin read from config
1183 if not self.is_admin and config.vlan_tagging:
1184 self._get_config_vlans()
1189 # no openstack, no need to create chains
1190 if not config.l2_loopback and config.no_arp:
1191 self._get_dest_macs_from_config()
1192 if config.vlan_tagging:
1193 # make sure there at least as many entries as chains in each left/right list
1194 if len(config.vlans) != 2:
1195 raise ChainException('The config vlans property must be a list '
1196 'with 2 lists of VLAN IDs')
1197 self._get_config_vlans()
1199 raise ChainException('VxLAN is only supported with OpenStack')
1201 def _check_extnet(self, side, name):
1203 raise ChainException('external_networks.%s must contain a valid network'
1204 ' name prefix or a list of network names' % side)
1205 if isinstance(name, tuple) and len(name) < self.chain_count:
1206 raise ChainException('external_networks.%s %s'
1207 ' must have at least %d names' % (side, name, self.chain_count))
1209 def _get_config_vlans(self):
1212 self.vlans = [self._check_list('vlans[0]', self.config.vlans[0], re_vlan),
1213 self._check_list('vlans[1]', self.config.vlans[1], re_vlan)]
1215 raise ChainException(
1216 'vlans parameter is mandatory. Set valid value in config file') from IndexError
1218 def _get_dest_macs_from_config(self):
1219 re_mac = "[0-9a-fA-F]{2}([-:])[0-9a-fA-F]{2}(\\1[0-9a-fA-F]{2}){4}$"
1220 tg_config = self.config.traffic_generator
1221 self.dest_macs = [self._check_list("mac_addrs_left",
1222 tg_config.mac_addrs_left, re_mac),
1223 self._check_list("mac_addrs_right",
1224 tg_config.mac_addrs_right, re_mac)]
1226 def _check_list(self, list_name, ll, pattern):
1227 # if it is a single int or mac, make it a list of 1 int
1228 if isinstance(ll, (int, str)):
1233 if not re.match(pattern, str(item)):
1234 raise ChainException("Invalid format '{item}' specified in {fname}"
1235 .format(item=item, fname=list_name))
1236 # must have at least 1 element
1238 raise ChainException('%s cannot be empty' % (list_name))
1239 # for shared network, if 1 element is passed, replicate it as many times
1241 if self.config.service_chain_shared_net and len(ll) == 1:
1242 ll = [ll[0]] * self.chain_count
1244 # number of elements musty be the number of chains
1245 elif len(ll) < self.chain_count:
1246 raise ChainException('%s=%s must be a list with %d elements per chain' %
1247 (list_name, ll, self.chain_count))
1250 def _setup_image(self):
1251 # To avoid reuploading image in server mode, check whether image_name is set or not
1253 self.image_instance = self.comp.find_image(self.image_name)
1254 if self.image_instance:
1255 LOG.info("Reusing image %s", self.image_name)
1257 image_name_search_pattern = r'(nfvbenchvm-\d+(\.\d+)*).qcow2'
1258 if self.config.vm_image_file:
1259 match = re.search(image_name_search_pattern, self.config.vm_image_file)
1261 self.image_name = match.group(1)
1262 LOG.info('Using provided VM image file %s', self.config.vm_image_file)
1264 raise ChainException('Provided VM image file name %s must start with '
1265 '"nfvbenchvm-<version>"' % self.config.vm_image_file)
1267 pkg_root = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
1268 for f in os.listdir(pkg_root):
1269 if re.search(image_name_search_pattern, f):
1270 self.config.vm_image_file = pkg_root + '/' + f
1271 self.image_name = f.replace('.qcow2', '')
1272 LOG.info('Found built-in VM image file %s', f)
1275 raise ChainException('Cannot find any built-in VM image file.')
1277 self.image_instance = self.comp.find_image(self.image_name)
1278 if not self.image_instance:
1279 LOG.info('Uploading %s', self.image_name)
1280 res = self.comp.upload_image_via_url(self.image_name,
1281 self.config.vm_image_file)
1284 raise ChainException('Error uploading image %s from %s. ABORTING.' %
1285 (self.image_name, self.config.vm_image_file))
1286 LOG.info('Image %s successfully uploaded.', self.image_name)
1287 self.image_instance = self.comp.find_image(self.image_name)
1289 # image multiqueue property must be set according to the vif_multiqueue_size
1290 # config value (defaults to 1 or disabled)
1291 self.comp.image_set_multiqueue(self.image_instance, self.config.vif_multiqueue_size > 1)
1293 def _ensure_instances_active(self):
1295 for chain in self.chains:
1296 instances.extend(chain.get_instances())
1297 initial_instance_count = len(instances)
1298 max_retries = (self.config.check_traffic_time_sec + (initial_instance_count - 1) * 10 +
1299 self.config.generic_poll_sec - 1) / self.config.generic_poll_sec
1302 remaining_instances = []
1303 for instance in instances:
1304 status = instance.get_status()
1305 if status == 'ACTIVE':
1306 LOG.info('Instance %s is ACTIVE on %s',
1307 instance.name, instance.get_hypervisor_name())
1309 if status == 'ERROR':
1310 raise ChainException('Instance %s creation error: %s' %
1312 instance.instance.fault['message']))
1313 remaining_instances.append(instance)
1314 if not remaining_instances:
1317 if retry >= max_retries:
1318 raise ChainException('Time-out: %d/%d instances still not active' %
1319 (len(remaining_instances), initial_instance_count))
1320 LOG.info('Waiting for %d/%d instance to become active (retry %d/%d)...',
1321 len(remaining_instances), initial_instance_count,
1323 instances = remaining_instances
1324 time.sleep(self.config.generic_poll_sec)
1325 if initial_instance_count:
1326 LOG.info('All instances are active')
1328 def get_networks(self, chain_id=None):
1329 """Get the networks for given EXT, PVP or PVVP chain.
1331 For EXT packet path, these networks must pre-exist.
1332 For PVP, PVVP these networks will be created if they do not exist.
1333 chain_id: to which chain the networks belong.
1334 a None value will mean that these networks are shared by all chains
1337 # the only case where self.networks exists is when the networks are shared
1339 return self.networks
1340 if self.config.service_chain == ChainType.EXT:
1342 ext_net = self.config.external_networks
1343 net_cfg = [AttrDict({'name': name,
1345 'segmentation_id': None,
1346 'physical_network': None})
1347 for name in [ext_net.left, ext_net.right]]
1348 # segmentation id and subnet should be discovered from neutron
1351 int_nets = self.config.internal_networks
1353 if self.config.service_chain == ChainType.PVP:
1354 net_cfg = [int_nets.left, int_nets.right]
1356 net_cfg = [int_nets.left, int_nets.middle, int_nets.right]
1357 if self.config.l3_router:
1358 edge_nets = self.config.edge_networks
1359 net_cfg.append(edge_nets.left)
1360 net_cfg.append(edge_nets.right)
1364 networks.append(ChainNetwork(self, cfg, chain_id, lookup_only=lookup_only))
1366 # need to cleanup all successful networks prior to bailing out
1367 for net in networks:
1372 def get_existing_ports(self):
1373 """Get the list of existing ports.
1375 Lazy retrieval of ports as this can be costly if there are lots of ports and
1376 is only needed when VM and network are being reused.
1378 return: a dict of list of neutron ports indexed by the network uuid they are attached to
1380 Each port is a dict with fields such as below:
1381 {'allowed_address_pairs': [], 'extra_dhcp_opts': [],
1382 'updated_at': '2018-10-06T07:15:35Z', 'device_owner': 'compute:nova',
1383 'revision_number': 10, 'port_security_enabled': False, 'binding:profile': {},
1384 'fixed_ips': [{'subnet_id': '6903a3b3-49a1-4ba4-8259-4a90e7a44b21',
1385 'ip_address': '192.168.1.4'}], 'id': '3dcb9cfa-d82a-4dd1-85a1-fd8284b52d72',
1386 'security_groups': [],
1387 'binding:vif_details': {'vhostuser_socket': '/tmp/3dcb9cfa-d82a-4dd1-85a1-fd8284b52d72',
1388 'vhostuser_mode': 'server'},
1389 'binding:vif_type': 'vhostuser',
1390 'mac_address': 'fa:16:3e:3c:63:04',
1391 'project_id': '977ac76a63d7492f927fa80e86baff4c',
1393 'binding:host_id': 'a20-champagne-compute-1',
1395 'device_id': 'a98e2ad2-5371-4aa5-a356-8264a970ce4b',
1396 'name': 'nfvbench-loop-vm0-0', 'admin_state_up': True,
1397 'network_id': '3ea5fd88-278f-4d9d-b24d-1e443791a055',
1398 'tenant_id': '977ac76a63d7492f927fa80e86baff4c',
1399 'created_at': '2018-10-06T07:15:10Z',
1400 'binding:vnic_type': 'normal'}
1402 if not self._existing_ports:
1403 LOG.info('Loading list of all ports...')
1404 existing_ports = self.neutron_client.list_ports()['ports']
1405 # place all ports in the dict keyed by the port network uuid
1406 for port in existing_ports:
1407 port_list = self._existing_ports.setdefault(port['network_id'], [])
1408 port_list.append(port)
1409 LOG.info("Loaded %d ports attached to %d networks",
1410 len(existing_ports), len(self._existing_ports))
1411 return self._existing_ports
1413 def get_ports_from_network(self, chain_network):
1414 """Get the list of existing ports that belong to a network.
1416 Lazy retrieval of ports as this can be costly if there are lots of ports and
1417 is only needed when VM and network are being reused.
1419 chain_network: a ChainNetwork instance for which attached ports neeed to be retrieved
1420 return: list of neutron ports attached to requested network
1422 return self.get_existing_ports().get(chain_network.get_uuid(), None)
1424 def get_hypervisor_from_mac(self, mac):
1425 """Get the hypervisor that hosts a VM MAC.
1427 mac: MAC address to look for
1428 return: the hypervisor where the matching port runs or None if not found
1430 # _existing_ports is a dict of list of ports indexed by network id
1431 for port_list in list(self.get_existing_ports().values()):
1432 for port in port_list:
1434 if port['mac_address'] == mac:
1435 host_id = port['binding:host_id']
1436 return self.comp.get_hypervisor(host_id)
1441 def get_host_ip_from_mac(self, mac):
1442 """Get the host IP address matching a MAC.
1444 mac: MAC address to look for
1445 return: the IP address of the host where the matching port runs or None if not found
1447 hypervisor = self.get_hypervisor_from_mac(mac)
1449 return hypervisor.host_ip
1452 def get_chain_vlans(self, port_index):
1453 """Get the list of per chain VLAN id on a given port.
1455 port_index: left port is 0, right port is 1
1456 return: a VLAN ID list indexed by the chain index or None if no vlan tagging
1458 if self.chains and self.is_admin:
1459 return [self.chains[chain_index].get_vlan(port_index)
1460 for chain_index in range(self.chain_count)]
1462 return self.vlans[port_index]
1464 def get_chain_vxlans(self, port_index):
1465 """Get the list of per chain VNIs id on a given port.
1467 port_index: left port is 0, right port is 1
1468 return: a VNIs ID list indexed by the chain index or None if no vlan tagging
1470 if self.chains and self.is_admin:
1471 return [self.chains[chain_index].get_vxlan(port_index)
1472 for chain_index in range(self.chain_count)]
1474 raise ChainException('VxLAN is only supported with OpenStack and with admin user')
1476 def get_chain_mpls_inner_labels(self, port_index):
1477 """Get the list of per chain MPLS VPN Labels on a given port.
1479 port_index: left port is 0, right port is 1
1480 return: a MPLSs ID list indexed by the chain index or None if no mpls
1482 if self.chains and self.is_admin:
1483 return [self.chains[chain_index].get_mpls_inner_label(port_index)
1484 for chain_index in range(self.chain_count)]
1486 raise ChainException('MPLS is only supported with OpenStack and with admin user')
1488 def get_dest_macs(self, port_index):
1489 """Get the list of per chain dest MACs on a given port.
1491 Should not be called if EXT+ARP is used (in that case the traffic gen will
1492 have the ARP responses back from VNFs with the dest MAC to use).
1494 port_index: left port is 0, right port is 1
1495 return: a list of dest MACs indexed by the chain index
1497 if self.chains and self.config.service_chain != ChainType.EXT:
1498 return [self.chains[chain_index].get_dest_mac(port_index)
1499 for chain_index in range(self.chain_count)]
1500 # no openstack or EXT+no-arp
1501 return self.dest_macs[port_index]
1503 def get_host_ips(self):
1504 """Return the IP adresss(es) of the host compute nodes used for this run.
1506 :return: a list of 1 IP address
1508 # Since all chains go through the same compute node(s) we can just retrieve the
1509 # compute node(s) for the first chain
1511 if self.config.service_chain != ChainType.EXT:
1512 return self.chains[0].get_host_ips()
1513 # in the case of EXT, the compute node must be retrieved from the port
1514 # associated to any of the dest MACs
1515 dst_macs = self.generator_config.get_dest_macs()
1516 # dest MAC on port 0, chain 0
1517 dst_mac = dst_macs[0][0]
1518 host_ip = self.get_host_ip_from_mac(dst_mac)
1520 LOG.info('Found compute node IP for EXT chain: %s', host_ip)
1524 def get_compute_nodes(self):
1525 """Return the name of the host compute nodes used for this run.
1527 :return: a list of 0 or 1 host name in the az:host format
1529 # Since all chains go through the same compute node(s) we can just retrieve the
1530 # compute node name(s) for the first chain
1532 # in the case of EXT, the compute node must be retrieved from the port
1533 # associated to any of the dest MACs
1534 if self.config.service_chain != ChainType.EXT:
1535 return self.chains[0].get_compute_nodes()
1536 # in the case of EXT, the compute node must be retrieved from the port
1537 # associated to any of the dest MACs
1538 dst_macs = self.generator_config.get_dest_macs()
1539 # dest MAC on port 0, chain 0
1540 dst_mac = dst_macs[0][0]
1541 hypervisor = self.get_hypervisor_from_mac(dst_mac)
1543 LOG.info('Found hypervisor for EXT chain: %s', hypervisor.hypervisor_hostname)
1544 return[':' + hypervisor.hypervisor_hostname]
1545 # no openstack = no chains
1549 """Delete resources for all chains."""
1550 for chain in self.chains:
1552 for network in self.networks:
1554 if self.config.use_management_port and hasattr(self, 'management_network'):
1555 self.management_network.delete()
1556 if self.config.use_floating_ip and hasattr(self, 'floating_ip_network'):
1557 self.floating_ip_network.delete()
1559 self.flavor.delete()