segmentation_id:
         physical_network:
 
+# IDLE INTERFACES: PVP, PVVP and non shared net only.
+# By default each test VM will have 2 virtual interfaces for looping traffic.
+# If service_chain_shared_net is false, additional virtual interfaces can be
+# added at VM creation time, these interfaces will not carry any traffic and
+# can be used to test the impact of idle interfaces in the overall performance.
+# All these idle interfaces will use normal ports (not direct).
+# Number of idle interfaces per VM (none by default)
+idle_interfaces_per_vm: 0
+
+# A new network is created for each idle interface.
+# If service_chain_shared_net is true, the options below will be ignored
+# and no idle interfaces will be added.
+idle_networks:
+    # Prefix for all idle networks
+    name: 'nfvbench-idle-net'
+    # Prefix for all idle subnetworks 
+    subnet: 'nfvbench-idle-subnet'
+    # CIDR to use for all idle networks (value should not matter)
+    cidr: '192.169.1.0/24'
+    # Type of network associated to the idle virtual interfaces (vlan or vxlan)
+    network_type: 'vlan'
+    # segmentation ID to use for the network attached to the idle virtual interfaces
+    # vlan: leave empty to let neutron pick the segmentation ID
+    # vxlan: must specify the VNI value to be used (cannot be empty)
+    # Note that NFVbench will use as many consecutive segmentation IDs as needed.
+    # For example, for 4 PVP chains and 8 idle
+    # interfaces per VM, NFVbench will use 32 consecutive values of segmentation ID
+    # starting from the value provided.
+    segmentation_id:
+    # physnet name to use for all idle interfaces
+    physical_network:
+
 # In the scenario of PVVP + SRIOV, there is choice of how the traffic will be
 # handled in the middle network. The default (false) will use vswitch, while
 # SRIOV can be used by toggling below setting.
 
 class ChainNetwork(object):
     """Could be a shared network across all chains or a chain private network."""
 
-    def __init__(self, manager, network_config, chain_id=None, lookup_only=False):
+    def __init__(self, manager, network_config, chain_id=None, lookup_only=False,
+                 suffix=None):
         """Create a network for given chain.
 
         network_config: a dict containing the network properties
                         (name, segmentation_id and physical_network)
         chain_id: to which chain the networks belong.
                   a None value will mean that these networks are shared by all chains
+        suffix: a suffix to add to the network name (if not None)
         """
         self.manager = manager
         if chain_id is None:
             else:
                 # network_config.name is a prefix string
                 self.name = network_config.name + str(chain_id)
+        if suffix:
+            self.name = self.name + suffix
         self.segmentation_id = self._get_item(network_config.segmentation_id,
                                               chain_id, auto_index=True)
         self.physical_network = self._get_item(network_config.physical_network, chain_id)
             if self.physical_network:
                 body['network']['provider:physical_network'] = self.physical_network
             self.network = self.manager.neutron_client.create_network(body)['network']
+            # create associated subnet, all subnets have the same name (which is ok since
+            # we do not need to address them directly by name)
             body = {
                 'subnet': {'name': network_config.subnet,
                            'cidr': network_config.cidr,
         if len(networks) > 2:
             # we will have more than 1 VM in each chain
             self.name += '-' + str(vnf_id)
+        # A list of ports for this chain
+        # There are normally 2 ports carrying traffic (index 0, and index 1) and
+        # potentially multiple idle ports not carrying traffic (index 2 and up)
+        # For example if 7 idle interfaces are requested, the corresp. ports will be
+        # at index 2 to 8
         self.ports = []
         self.status = None
         self.instance = None
         self.reuse = False
         self.host_ip = None
+        self.idle_networks = []
+        self.idle_ports = []
         try:
             # the vnf_id is conveniently also the starting index in networks
             # for the left and right networks associated to this VNF
     def _get_vnic_type(self, port_index):
         """Get the right vnic type for given port indexself.
 
-        If SR-IOV is speficied, middle ports in multi-VNF chains
+        If SR-IOV is specified, middle ports in multi-VNF chains
         can use vswitch or SR-IOV based on config.use_sriov_middle_net
         """
         if self.manager.config.sriov:
                 return 'direct'
         return 'normal'
 
+    def _get_idle_networks_ports(self):
+        """Get the idle networks for PVP or PVVP chain (non shared net only)
+
+        For EXT packet path or shared net, returns empty list.
+        For PVP, PVVP these networks will be created if they do not exist.
+        chain_id: to which chain the networks belong.
+                a None value will mean that these networks are shared by all chains
+        """
+        networks = []
+        ports = []
+        config = self.manager.config
+        chain_id = self.chain.chain_id
+        idle_interfaces_per_vm = config.idle_interfaces_per_vm
+        if config.service_chain == ChainType.EXT or chain_id is None or \
+           idle_interfaces_per_vm == 0:
+            return
+
+        # Make a copy of the idle networks dict as we may have to modify the
+        # segmentation ID
+        idle_network_cfg = AttrDict(config.idle_networks)
+        if idle_network_cfg.segmentation_id:
+            segmentation_id = idle_network_cfg.segmentation_id + \
+                chain_id * idle_interfaces_per_vm
+        else:
+            segmentation_id = None
+        try:
+            # create as many idle networks and ports as requested
+            for idle_index in range(idle_interfaces_per_vm):
+                if config.service_chain == ChainType.PVP:
+                    suffix = '.%d' % (idle_index)
+                else:
+                    suffix = '.%d.%d' % (self.vnf_id, idle_index)
+                port_name = self.name + '-idle' + str(idle_index)
+                # update the segmentation id based on chain id and idle index
+                if segmentation_id:
+                    idle_network_cfg.segmentation_id = segmentation_id + idle_index
+                    port_name = port_name + "." + str(segmentation_id)
+
+                networks.append(ChainNetwork(self.manager,
+                                             idle_network_cfg,
+                                             chain_id,
+                                             suffix=suffix))
+                ports.append(ChainVnfPort(port_name,
+                                          self,
+                                          networks[idle_index],
+                                          'normal'))
+        except Exception:
+            # need to cleanup all successful networks
+            for net in networks:
+                net.delete()
+            for port in ports:
+                port.delete()
+            raise
+        self.idle_networks = networks
+        self.idle_ports = ports
+
     def _setup(self, networks):
         flavor_id = self.manager.flavor.flavor.id
         # Check if we can reuse an instance with same name
                                    self,
                                    networks[index],
                                    self._get_vnic_type(index)) for index in [0, 1]]
+
+        # create idle networks and ports only if instance is not reused
+        # if reused, we do not care about idle networks/ports
+        if not self.reuse:
+            self._get_idle_networks_ports()
+
         # if no reuse, actual vm creation is deferred after all ports in the chain are created
         # since we need to know the next mac in a multi-vnf chain
 
         if self.instance is None:
             port_ids = [{'port-id': vnf_port.port['id']}
                         for vnf_port in self.ports]
+            # add idle ports
+            for idle_port in self.idle_ports:
+                port_ids.append({'port-id': idle_port.port['id']})
             vm_config = self._get_vm_config(remote_mac_pair)
             az = self.manager.placer.get_required_az()
             server = self.manager.comp.create_server(self.name,
                 LOG.info("Deleted instance %s", self.name)
             for port in self.ports:
                 port.delete()
+            for port in self.idle_ports:
+                port.delete()
+            for network in self.idle_networks:
+                network.delete()
 
 class Chain(object):
     """A class to manage a single chain.
 
             except Exception:
                 LOG.exception("Port deletion failed")
 
+        # associated subnets are automatically deleted by neutron
         for net in self.networks:
             LOG.info("Deleting network %s...", net['name'])
             try:
         self.neutron_client = nclient.Client('2.0', session=session)
         self.nova_client = Client(2, session=session)
         network_names = [inet['name'] for inet in config.internal_networks.values()]
+        # add idle networks as well
+        if config.idle_networks.name:
+            network_names.append(config.idle_networks.name)
         self.cleaners = [ComputeCleaner(self.nova_client, config.loop_vm_name),
                          FlavorCleaner(self.nova_client, config.flavor_type),
                          NetworkCleaner(self.neutron_client, network_names)]
 
 gs_url=artifacts.opnfv.org/nfvbench/images
 
 # image version number
-__version__=0.6
+__version__=0.7
 image_name=nfvbenchvm_centos-$__version__
 
 # if image exists skip building
 
 echo "Generating configurations for forwarder..."
 eval $(cat $NFVBENCH_CONF)
 touch /nfvbench_configured.flag
-NICS=`lspci -D | grep Ethernet | cut -d' ' -f1 | xargs`
-PCI_ADDRESS_1=`echo $NICS | awk '{ print $1 }'`
-PCI_ADDRESS_2=`echo $NICS | awk '{ print $2 }'`
+
+
 CPU_CORES=`grep -c ^processor /proc/cpuinfo`
 CPU_MASKS=0x`echo "obase=16; 2 ^ $CPU_CORES - 1" | bc`
 WORKER_CORES=`expr $CPU_CORES - 1`
 echo 1 > /sys/devices/virtual/workqueue/cpumask
 echo 1 > /proc/irq/default_smp_affinity
 for irq in `ls /proc/irq/`; do
-    echo 1 > /proc/irq/$irq/smp_affinity
+    if [ -f /proc/irq/$irq/smp_affinity ]; then
+        echo 1 > /proc/irq/$irq/smp_affinity
+    fi
 done
 tuna -c $(seq -s, 1 1 $WORKER_CORES) --isolate
 
+NET_PATH=/sys/class/net
+
+get_pci_address() {
+    # device mapping for CentOS Linux 7:
+    # lspci:
+    #   00.03.0 Ethernet controller: Red Hat, Inc. Virtio network device
+    #   00.04.0 Ethernet controller: Red Hat, Inc. Virtio network device
+    # /sys/class/net:
+    # /sys/class/net/eth0 -> ../../devices/pci0000:00/0000:00:03.0/virtio0/net/eth0
+    # /sys/class/net/eth1 -> ../../devices/pci0000:00/0000:00:04.0/virtio1/net/eth1
+
+    mac=$1
+    for f in $(ls $NET_PATH/); do
+        if grep -q "$mac" $NET_PATH/$f/address; then
+            pci_addr=$(readlink $NET_PATH/$f | cut -d "/" -f5)
+            # some virtual interfaces match on MAC and do not have a PCI address
+            if [ "$pci_addr" -a "$pci_addr" != "N/A" ]; then
+                break
+            else
+                pci_addr=""
+            fi
+        fi;
+    done
+    if [ -z "$pci_addr" ]; then
+        echo "ERROR: Cannot find pci address for MAC $mac" >&2
+        logger "NFVBENCHVM ERROR: Cannot find pci address for MAC $mac"
+        return 1
+    fi
+    echo $pci_addr
+    return 0
+}
+
 # Sometimes the interfaces on the loopback VM will use different drivers, e.g.
 # one from vswitch which is virtio based, one is from SRIOV VF. In this case,
 # we have to make sure the forwarder uses them in the right order, which is
 # especially important if the VM is in a PVVP chain.
-SWAP_FLAG=0
 if [ $INTF_MAC1 ] && [ $INTF_MAC2 ]; then
-    NET_PATH=/sys/class/net
-    EXP_INTF_1=$(for f in $(ls $NET_PATH/); do if grep -q "$INTF_MAC1" $NET_PATH/$f/address; then echo $f; break; fi; done)
-    EXP_PCI_ADDRESS_1=$(ethtool -i $EXP_INTF_1 | grep "bus-info" | awk -F' ' '{ print $2 }')
-    EXP_INTF_2=$(for f in $(ls $NET_PATH/); do if grep -q "$INTF_MAC2" $NET_PATH/$f/address; then echo $f; break; fi; done)
-    EXP_PCI_ADDRESS_2=$(ethtool -i $EXP_INTF_2 | grep "bus-info" | awk -F' ' '{ print $2 }')
-    if [ "$PCI_ADDRESS_1" == "$EXP_PCI_ADDRESS_2" ] && [ "$PCI_ADDRESS_2" == "$EXP_PCI_ADDRESS_1" ]; then
-        # Interfaces are not coming in the expected order:
-        #     (1) Swap the traffic generator MAC in the case of testpmd;
-        #     (2) Swap the interface configs in the case of VPP;
-        SWAP_FLAG=1
-    fi
+    PCI_ADDRESS_1=$(get_pci_address $INTF_MAC1)
+    PCI_ADDRESS_2=$(get_pci_address $INTF_MAC2)
+else
+    echo "ERROR: VM MAC Addresses missing in $NFVBENCH_CONF"
+    logger "NFVBENCHVM ERROR: VM MAC Addresses missing in $NFVBENCH_CONF"
 fi
 
-# Configure the forwarder
-if [ -z "`lsmod | grep igb_uio`" ]; then
-    modprobe uio
-    insmod /dpdk/igb_uio.ko
-fi
-if [ "$FORWARDER" == "testpmd" ]; then
-    echo "Configuring testpmd..."
-    if [ $SWAP_FLAG -eq 1 ]; then
-        TEMP=$TG_MAC1; TG_MAC1=$TG_MAC2; TG_MAC2=$TEMP
+if [ $PCI_ADDRESS_1 ] && [ $PCI_ADDRESS_2 ]; then
+    logger "NFVBENCHVM: Using pci $PCI_ADDRESS_1 ($INTF_MAC1)"
+    logger "NFVBENCHVM: Using pci $PCI_ADDRESS_2 ($INTF_MAC2)"
+    # Configure the forwarder
+    if [ -z "`lsmod | grep igb_uio`" ]; then
+        modprobe uio
+        insmod /dpdk/igb_uio.ko
     fi
-    # Binding ports to DPDK
-    /dpdk/dpdk-devbind.py -b igb_uio $PCI_ADDRESS_1
-    /dpdk/dpdk-devbind.py -b igb_uio $PCI_ADDRESS_2
-    screen -dmSL testpmd /dpdk/testpmd \
-                         -c $CPU_MASKS \
-                         -n 4 \
-                         -- \
-                             --burst=32 \
-                             --txd=256 \
-                             --rxd=1024 \
-                             --eth-peer=0,$TG_MAC1 \
-                             --eth-peer=1,$TG_MAC2 \
-                             --forward-mode=mac \
-                             --nb-cores=$WORKER_CORES \
-                             --max-pkt-len=9000 \
-                             --cmdline-file=/dpdk/testpmd_cmd.txt
-else
-    echo "Configuring vpp..."
-    cp /vpp/startup.conf /etc/vpp/startup.conf
-    cp /vpp/vm.conf /etc/vpp/vm.conf
+    if [ "$FORWARDER" == "testpmd" ]; then
+        echo "Configuring testpmd..."
+        # Binding ports to DPDK
+        /dpdk/dpdk-devbind.py -b igb_uio $PCI_ADDRESS_1
+        /dpdk/dpdk-devbind.py -b igb_uio $PCI_ADDRESS_2
+        screen -dmSL testpmd /dpdk/testpmd \
+                            -c $CPU_MASKS \
+                            -n 4 \
+                            -- \
+                                --burst=32 \
+                                --txd=256 \
+                                --rxd=1024 \
+                                --eth-peer=0,$TG_MAC1 \
+                                --eth-peer=1,$TG_MAC2 \
+                                --forward-mode=mac \
+                                --nb-cores=$WORKER_CORES \
+                                --max-pkt-len=9000 \
+                                --cmdline-file=/dpdk/testpmd_cmd.txt
+        echo "testpmd running in screen 'testpmd'"
+        logger "NFVBENCHVM: testpmd running in screen 'testpmd'"
+    else
+        echo "Configuring vpp..."
+        cp /vpp/startup.conf /etc/vpp/startup.conf
+        cp /vpp/vm.conf /etc/vpp/vm.conf
 
-    sed -i "s/{{PCI_ADDRESS_1}}/$PCI_ADDRESS_1/g" /etc/vpp/startup.conf
-    sed -i "s/{{PCI_ADDRESS_2}}/$PCI_ADDRESS_2/g" /etc/vpp/startup.conf
-    sed -i "s/{{WORKER_CORES}}/$WORKER_CORES/g" /etc/vpp/startup.conf
-    service vpp start
-    sleep 10
+        sed -i "s/{{PCI_ADDRESS_1}}/$PCI_ADDRESS_1/g" /etc/vpp/startup.conf
+        sed -i "s/{{PCI_ADDRESS_2}}/$PCI_ADDRESS_2/g" /etc/vpp/startup.conf
+        sed -i "s/{{WORKER_CORES}}/$WORKER_CORES/g" /etc/vpp/startup.conf
+        service vpp start
+        sleep 10
 
-    INTFS=`vppctl show int | grep Ethernet | xargs`
-    INTF_1=`echo $INTFS | awk '{ print $1 }'`
-    INTF_2=`echo $INTFS | awk '{ print $4 }'`
-    if [ $SWAP_FLAG -eq 1 ]; then
-        TEMP=$INTF_1; INTF_1=$INTF_2; INTF_2=$TEMP
+        INTFS=`vppctl show int | grep Ethernet | xargs`
+        INTF_1=`echo $INTFS | awk '{ print $1 }'`
+        INTF_2=`echo $INTFS | awk '{ print $4 }'`
+        sed -i "s/{{INTF_1}}/${INTF_1//\//\/}/g" /etc/vpp/vm.conf
+        sed -i "s/{{INTF_2}}/${INTF_2//\//\/}/g" /etc/vpp/vm.conf
+        sed -i "s/{{VNF_GATEWAY1_CIDR}}/${VNF_GATEWAY1_CIDR//\//\/}/g" /etc/vpp/vm.conf
+        sed -i "s/{{VNF_GATEWAY2_CIDR}}/${VNF_GATEWAY2_CIDR//\//\/}/g" /etc/vpp/vm.conf
+        sed -i "s/{{TG_MAC1}}/${TG_MAC1}/g" /etc/vpp/vm.conf
+        sed -i "s/{{TG_MAC2}}/${TG_MAC2}/g" /etc/vpp/vm.conf
+        sed -i "s/{{TG_NET1}}/${TG_NET1//\//\/}/g" /etc/vpp/vm.conf
+        sed -i "s/{{TG_NET2}}/${TG_NET2//\//\/}/g" /etc/vpp/vm.conf
+        sed -i "s/{{TG_GATEWAY1_IP}}/${TG_GATEWAY1_IP}/g" /etc/vpp/vm.conf
+        sed -i "s/{{TG_GATEWAY2_IP}}/${TG_GATEWAY2_IP}/g" /etc/vpp/vm.conf
+        service vpp restart
+        logger "NFVBENCHVM: vpp service restarted"
     fi
-    sed -i "s/{{INTF_1}}/${INTF_1//\//\/}/g" /etc/vpp/vm.conf
-    sed -i "s/{{INTF_2}}/${INTF_2//\//\/}/g" /etc/vpp/vm.conf
-    sed -i "s/{{VNF_GATEWAY1_CIDR}}/${VNF_GATEWAY1_CIDR//\//\/}/g" /etc/vpp/vm.conf
-    sed -i "s/{{VNF_GATEWAY2_CIDR}}/${VNF_GATEWAY2_CIDR//\//\/}/g" /etc/vpp/vm.conf
-    sed -i "s/{{TG_MAC1}}/${TG_MAC1}/g" /etc/vpp/vm.conf
-    sed -i "s/{{TG_MAC2}}/${TG_MAC2}/g" /etc/vpp/vm.conf
-    sed -i "s/{{TG_NET1}}/${TG_NET1//\//\/}/g" /etc/vpp/vm.conf
-    sed -i "s/{{TG_NET2}}/${TG_NET2//\//\/}/g" /etc/vpp/vm.conf
-    sed -i "s/{{TG_GATEWAY1_IP}}/${TG_GATEWAY1_IP}/g" /etc/vpp/vm.conf
-    sed -i "s/{{TG_GATEWAY2_IP}}/${TG_GATEWAY2_IP}/g" /etc/vpp/vm.conf
-    service vpp restart
+else
+    echo "ERROR: Cannot find PCI Address from MAC"
+    echo "$INTF_MAC1: $PCI_ADDRESS_1"
+    echo "$INTF_MAC2: $PCI_ADDRESS_2"
+    logger "NFVBENCHVM ERROR: Cannot find PCI Address from MAC"
 fi