"""Could be a shared network across all chains or a chain private network."""
def __init__(self, manager, network_config, chain_id=None, lookup_only=False):
- """Create a network for given chain."""
+ """Create a network for given chain.
+
+ network_config: a dict containing the network properties
+ (segmentation_id and physical_network)
+ chain_id: to which chain the networks belong.
+ a None value will mean that these networks are shared by all chains
+ """
self.manager = manager
self.name = network_config.name
+ self.segmentation_id = self._get_item(network_config.segmentation_id,
+ chain_id, auto_index=True)
+ self.physical_network = self._get_item(network_config.physical_network, chain_id)
if chain_id is not None:
self.name += str(chain_id)
self.reuse = False
self.delete()
raise
+ def _get_item(self, item_field, index, auto_index=False):
+ """Retrieve an item from a list or a single value.
+
+ item_field: can be None, a tuple of a single value
+ index: if None is same as 0, else is the index for a chain
+ auto_index: if true will automatically get the final value by adding the
+ index to the base value (if full list not provided)
+
+ If the item_field is not a tuple, it is considered same as a tuple with same value at any
+ index.
+ If a list is provided, its length must be > index
+ """
+ if not item_field:
+ return None
+ if index is None:
+ index = 0
+ if isinstance(item_field, tuple):
+ try:
+ return item_field[index]
+ except IndexError:
+ raise ChainException("List %s is too short for chain index %d" %
+ (str(item_field), index))
+ # single value is configured
+ if auto_index:
+ return item_field + index
+ return item_field
+
def _setup(self, network_config, lookup_only):
# Lookup if there is a matching network with same name
networks = self.manager.neutron_client.list_networks(name=self.name)
network = networks['networks'][0]
# a network of same name already exists, we need to verify it has the same
# characteristics
- if network_config.segmentation_id:
- if network['provider:segmentation_id'] != network_config.segmentation_id:
+ if self.segmentation_id:
+ if network['provider:segmentation_id'] != self.segmentation_id:
raise ChainException("Mismatch of 'segmentation_id' for reused "
"network '{net}'. Network has id '{seg_id1}', "
"configuration requires '{seg_id2}'."
.format(net=self.name,
seg_id1=network['provider:segmentation_id'],
- seg_id2=network_config.segmentation_id))
+ seg_id2=self.segmentation_id))
- if network_config.physical_network:
- if network['provider:physical_network'] != network_config.physical_network:
+ if self.physical_network:
+ if network['provider:physical_network'] != self.physical_network:
raise ChainException("Mismatch of 'physical_network' for reused "
"network '{net}'. Network has '{phys1}', "
"configuration requires '{phys2}'."
.format(net=self.name,
phys1=network['provider:physical_network'],
- phys2=network_config.physical_network))
+ phys2=self.physical_network))
LOG.info('Reusing existing network %s', self.name)
self.reuse = True
}
if network_config.network_type:
body['network']['provider:network_type'] = network_config.network_type
- if network_config.segmentation_id:
- body['network']['provider:segmentation_id'] = network_config.segmentation_id
- if network_config.physical_network:
- body['network']['provider:physical_network'] = network_config.physical_network
-
+ if self.segmentation_id:
+ body['network']['provider:segmentation_id'] = self.segmentation_id
+ if self.physical_network:
+ body['network']['provider:physical_network'] = self.physical_network
self.network = self.manager.neutron_client.create_network(body)['network']
body = {
'subnet': {'name': network_config.subnet,
subnet = self.manager.neutron_client.create_subnet(body)['subnet']
# add subnet id to the network dict since it has just been added
self.network['subnets'] = [subnet['id']]
- LOG.info('Created network: %s.', self.name)
+ LOG.info('Created network: %s', self.name)
def get_uuid(self):
"""
raise ChainException('Trying to retrieve VLAN id for non VLAN network')
return self.network['provider:segmentation_id']
+ def get_vxlan(self):
+ """
+ Extract VNI for this network.
+
+ :return: VNI ID for this network
+ """
+ if self.network['provider:network_type'] != 'vxlan':
+ raise ChainException('Trying to retrieve VNI for non VXLAN network')
+ return self.network['provider:segmentation_id']
+
def delete(self):
"""Delete this network."""
if not self.reuse and self.network:
can use vswitch or SR-IOV based on config.use_sriov_middle_net
"""
if self.manager.config.sriov:
- if self.manager.config.use_sriov_middle_net:
+ chain_length = self.chain.get_length()
+ if self.manager.config.use_sriov_middle_net or chain_length == 1:
return 'direct'
- if self.vnf_id == 0:
+ if self.vnf_id == 0 and port_index == 0:
# first VNF in chain must use sriov for left port
- if port_index == 0:
- return 'direct'
- elif (self.vnf_id == self.chain.get_length() - 1) and (port_index == 1):
+ return 'direct'
+ if (self.vnf_id == chain_length - 1) and (port_index == 1):
# last VNF in chain must use sriov for right port
return 'direct'
return 'normal'
self._reuse_exception('Left network mismatch')
if networks[RIGHT].name not in instance.networks:
self._reuse_exception('Right network mismatch')
- # Other checks not performed (yet)
- # check if az and compute node match
+
self.reuse = True
self.instance = instance
LOG.info('Reusing existing instance %s on %s',
# if no reuse, actual vm creation is deferred after all ports in the chain are created
# since we need to know the next mac in a multi-vnf chain
- def get_az(self):
- """Get the AZ associated to this VNF."""
- return self.manager.az[0]
-
def create_vnf(self, remote_mac_pair):
"""Create the VNF instance if it does not already exist."""
if self.instance is None:
port_ids = [{'port-id': vnf_port.port['id']}
for vnf_port in self.ports]
vm_config = self._get_vm_config(remote_mac_pair)
- az = self.get_az()
+ az = self.manager.placer.get_required_az()
server = self.manager.comp.create_server(self.name,
self.manager.image_instance,
self.manager.flavor.flavor,
config_drive=True,
files={NFVBENCH_CFG_VM_PATHNAME: vm_config})
if server:
- LOG.info('Created instance %s on %s', self.name, az)
self.instance = server
+ if self.manager.placer.is_resolved():
+ LOG.info('Created instance %s on %s', self.name, az)
+ else:
+ # the location is undetermined at this point
+ # self.get_hypervisor_name() will return None
+ LOG.info('Created instance %s - waiting for placement resolution...', self.name)
+ # here we MUST wait until this instance is resolved otherwise subsequent
+ # VNF creation can be placed in other hypervisors!
+ config = self.manager.config
+ max_retries = (config.check_traffic_time_sec +
+ config.generic_poll_sec - 1) / config.generic_poll_sec
+ retry = 0
+ for retry in range(max_retries):
+ status = self.get_status()
+ if status == 'ACTIVE':
+ hyp_name = self.get_hypervisor_name()
+ LOG.info('Instance %s is active and has been placed on %s',
+ self.name, hyp_name)
+ self.manager.placer.register_full_name(hyp_name)
+ break
+ if status == 'ERROR':
+ raise ChainException('Instance %s creation error: %s' %
+ (self.name,
+ self.instance.fault['message']))
+ LOG.info('Waiting for instance %s to become active (retry %d/%d)...',
+ self.name, retry + 1, max_retries + 1)
+ time.sleep(config.generic_poll_sec)
+ else:
+ # timing out
+ LOG.error('Instance %s creation timed out', self.name)
+ raise ChainException('Instance %s creation timed out' % self.name)
self.reuse = False
else:
raise ChainException('Unable to create instance: %s' % (self.name))
self.instances.append(ChainVnf(self,
chain_instance_index,
self.networks))
+ # at this point new VNFs are not created yet but
+ # verify that all discovered VNFs are on the same hypervisor
+ self._check_hypervisors()
# now that all VNF ports are created we need to calculate the
# left/right remote MAC for each VNF in the chain
# before actually creating the VNF itself
self.delete()
raise
+ def _check_hypervisors(self):
+ common_hypervisor = None
+ for instance in self.instances:
+ # get the full hypervizor name (az:compute)
+ hname = instance.get_hypervisor_name()
+ if hname:
+ if common_hypervisor:
+ if hname != common_hypervisor:
+ raise ChainException('Discovered instances on different hypervisors:'
+ ' %s and %s' % (hname, common_hypervisor))
+ else:
+ common_hypervisor = hname
+ if common_hypervisor:
+ # check that the common hypervisor name matchs the requested hypervisor name
+ # and set the name to be used by all future instances (if any)
+ if not self.manager.placer.register_full_name(common_hypervisor):
+ raise ChainException('Discovered hypervisor placement %s is incompatible' %
+ common_hypervisor)
+
def get_length(self):
"""Get the number of VNF in the chain."""
return len(self.networks) - 1
port_index = -1
return self.networks[port_index].get_vlan()
+ def get_vxlan(self, port_index):
+ """Get the VXLAN id on a given port.
+
+ port_index: left port is 0, right port is 1
+ return: the vxlan_id or None if there is no vxlan
+ """
+ # for port 1 we need to return the VLAN of the last network in the chain
+ # The networks array contains 2 networks for PVP [left, right]
+ # and 3 networks in the case of PVVP [left.middle,right]
+ if port_index:
+ # this will pick the last item in array
+ port_index = -1
+ return self.networks[port_index].get_vxlan()
+
def get_dest_mac(self, port_index):
"""Get the dest MAC on a given port.
network.delete()
+class InstancePlacer(object):
+ """A class to manage instance placement for all VNFs in all chains.
+
+ A full az string is made of 2 parts AZ and hypervisor.
+ The placement is resolved when both parts az and hypervisor names are known.
+ """
+
+ def __init__(self, req_az, req_hyp):
+ """Create a new instance placer.
+
+ req_az: requested AZ (can be None or empty if no preference)
+ req_hyp: requested hypervisor name (can be None of empty if no preference)
+ can be any of 'nova:', 'comp1', 'nova:comp1'
+ if it is a list, only the first item is used (backward compatibility in config)
+
+ req_az is ignored if req_hyp has an az part
+ all other parts beyond the first 2 are ignored in req_hyp
+ """
+ # if passed a list just pick the first item
+ if req_hyp and isinstance(req_hyp, list):
+ req_hyp = req_hyp[0]
+ # only pick first part of az
+ if req_az and ':' in req_az:
+ req_az = req_az.split(':')[0]
+ if req_hyp:
+ # check if requested hypervisor string has an AZ part
+ split_hyp = req_hyp.split(':')
+ if len(split_hyp) > 1:
+ # override the AZ part and hypervisor part
+ req_az = split_hyp[0]
+ req_hyp = split_hyp[1]
+ self.requested_az = req_az if req_az else ''
+ self.requested_hyp = req_hyp if req_hyp else ''
+ # Nova can accept AZ only (e.g. 'nova:', use any hypervisor in that AZ)
+ # or hypervisor only (e.g. ':comp1')
+ # or both (e.g. 'nova:comp1')
+ if req_az:
+ self.required_az = req_az + ':' + self.requested_hyp
+ else:
+ # need to insert a ':' so nova knows this is the hypervisor name
+ self.required_az = ':' + self.requested_hyp if req_hyp else ''
+ # placement is resolved when both AZ and hypervisor names are known and set
+ self.resolved = self.requested_az != '' and self.requested_hyp != ''
+
+ def get_required_az(self):
+ """Return the required az (can be resolved or not)."""
+ return self.required_az
+
+ def register_full_name(self, discovered_az):
+ """Verify compatibility and register a discovered hypervisor full name.
+
+ discovered_az: a discovered AZ in az:hypervisor format
+ return: True if discovered_az is compatible and set
+ False if discovered_az is not compatible
+ """
+ if self.resolved:
+ return discovered_az == self.required_az
+
+ # must be in full az format
+ split_daz = discovered_az.split(':')
+ if len(split_daz) != 2:
+ return False
+ if self.requested_az and self.requested_az != split_daz[0]:
+ return False
+ if self.requested_hyp and self.requested_hyp != split_daz[1]:
+ return False
+ self.required_az = discovered_az
+ self.resolved = True
+ return True
+
+ def is_resolved(self):
+ """Check if the full AZ is resolved.
+
+ return: True if resolved
+ """
+ return self.resolved
+
+
class ChainManager(object):
"""A class for managing all chains for a given run.
config = self.config
self.openstack = (chain_runner.cred is not None) and not config.l2_loopback
self.chain_count = config.service_chain_count
+ self.az = None
if self.openstack:
# openstack only
session = chain_runner.cred.get_session()
self.comp = compute.Compute(self.nova_client,
self.glance_client,
config)
- self.az = None
try:
if config.service_chain != ChainType.EXT:
- # we need to find 1 hypervisor
- az_list = self.comp.get_enabled_az_host_list(1)
- if not az_list:
- raise ChainException('No matching hypervisor found')
- self.az = az_list
+ self.placer = InstancePlacer(config.availability_zone, config.compute_nodes)
self._setup_image()
self.flavor = ChainFlavor(config.flavor_type, config.flavor, self.comp)
# Get list of all existing instances to check if some instances can be reused
raise
else:
# no openstack, no need to create chains
- # make sure there at least as many entries as chains in each left/right list
- if len(config.vlans) != 2:
- raise ChainException('The config vlans property must be a list '
- 'with 2 lists of VLAN IDs')
- if not config.l2_loopback:
- self._get_dest_macs_from_config()
- re_vlan = "[0-9]*$"
- self.vlans = [self._check_list('vlans[0]', config.vlans[0], re_vlan),
- self._check_list('vlans[1]', config.vlans[1], re_vlan)]
+ if not config.l2_loopback and config.no_arp:
+ self._get_dest_macs_from_config()
+ if config.vlan_tagging:
+ # make sure there at least as many entries as chains in each left/right list
+ if len(config.vlans) != 2:
+ raise ChainException('The config vlans property must be a list '
+ 'with 2 lists of VLAN IDs')
+ re_vlan = "[0-9]*$"
+ self.vlans = [self._check_list('vlans[0]', config.vlans[0], re_vlan),
+ self._check_list('vlans[1]', config.vlans[1], re_vlan)]
+ if config.vxlan:
+ raise ChainException('VxLAN is only supported with OpenStack')
def _get_dest_macs_from_config(self):
re_mac = "[0-9a-fA-F]{2}([-:])[0-9a-fA-F]{2}(\\1[0-9a-fA-F]{2}){4}$"
if isinstance(ll, (int, str)):
ll = [ll]
if not ll or len(ll) < self.chain_count:
- raise ChainException('%s=%s must be a list with 1 element per chain' % (list_name, ll))
+ raise ChainException('%s=%s must be a list with %d elements per chain' %
+ (list_name, ll, self.chain_count))
for item in ll:
if not re.match(pattern, str(item)):
raise ChainException("Invalid format '{item}' specified in {fname}"
for instance in instances:
status = instance.get_status()
if status == 'ACTIVE':
+ LOG.info('Instance %s is ACTIVE on %s',
+ instance.name, instance.get_hypervisor_name())
continue
if status == 'ERROR':
raise ChainException('Instance %s creation error: %s' %
'segmentation_id': None,
'physical_network': None})
for name in [ext_net.left, ext_net.right]]
+ # segmentation id and subnet should be discovered from neutron
else:
lookup_only = False
int_nets = self.config.internal_networks
+ # VLAN and VxLAN
if self.config.service_chain == ChainType.PVP:
net_cfg = [int_nets.left, int_nets.right]
else:
# no openstack
return self.vlans[port_index]
+ def get_chain_vxlans(self, port_index):
+ """Get the list of per chain VNIs id on a given port.
+
+ port_index: left port is 0, right port is 1
+ return: a VNIs ID list indexed by the chain index or None if no vlan tagging
+ """
+ if self.chains:
+ return [self.chains[chain_index].get_vxlan(port_index)
+ for chain_index in range(self.chain_count)]
+ # no openstack
+ raise ChainException('VxLAN is only supported with OpenStack')
+
def get_dest_macs(self, port_index):
"""Get the list of per chain dest MACs on a given port.
return self.chains[0].get_host_ips()
# in the case of EXT, the compute node must be retrieved from the port
# associated to any of the dest MACs
- dst_macs = self.chain_runner.traffic_client.gen.get_dest_macs()
+ dst_macs = self.generator_config.get_dest_macs()
# dest MAC on port 0, chain 0
dst_mac = dst_macs[0][0]
host_ip = self.get_host_ip_from_mac(dst_mac)
return []
def delete(self):
- """Delete resources for all chains.
-
- Will not delete any resource if no-cleanup has been requested.
- """
- if self.config.no_cleanup:
- return
+ """Delete resources for all chains."""
for chain in self.chains:
chain.delete()
for network in self.networks: