NFVBENCH-134 Support multiple idle interfaces per test VM
[nfvbench.git] / nfvbenchvm / dib / elements / nfvbenchvm / static / etc / rc.d / rc.local
index caf3142..94fbd74 100644 (file)
@@ -18,9 +18,8 @@ fi
 echo "Generating configurations for forwarder..."
 eval $(cat $NFVBENCH_CONF)
 touch /nfvbench_configured.flag
-NICS=`lspci -D | grep Ethernet | cut -d' ' -f1 | xargs`
-PCI_ADDRESS_1=`echo $NICS | awk '{ print $1 }'`
-PCI_ADDRESS_2=`echo $NICS | awk '{ print $2 }'`
+
+
 CPU_CORES=`grep -c ^processor /proc/cpuinfo`
 CPU_MASKS=0x`echo "obase=16; 2 ^ $CPU_CORES - 1" | bc`
 WORKER_CORES=`expr $CPU_CORES - 1`
@@ -30,81 +29,114 @@ echo 1 > /sys/bus/workqueue/devices/writeback/cpumask
 echo 1 > /sys/devices/virtual/workqueue/cpumask
 echo 1 > /proc/irq/default_smp_affinity
 for irq in `ls /proc/irq/`; do
-    echo 1 > /proc/irq/$irq/smp_affinity
+    if [ -f /proc/irq/$irq/smp_affinity ]; then
+        echo 1 > /proc/irq/$irq/smp_affinity
+    fi
 done
 tuna -c $(seq -s, 1 1 $WORKER_CORES) --isolate
 
+NET_PATH=/sys/class/net
+
+get_pci_address() {
+    # device mapping for CentOS Linux 7:
+    # lspci:
+    #   00.03.0 Ethernet controller: Red Hat, Inc. Virtio network device
+    #   00.04.0 Ethernet controller: Red Hat, Inc. Virtio network device
+    # /sys/class/net:
+    # /sys/class/net/eth0 -> ../../devices/pci0000:00/0000:00:03.0/virtio0/net/eth0
+    # /sys/class/net/eth1 -> ../../devices/pci0000:00/0000:00:04.0/virtio1/net/eth1
+
+    mac=$1
+    for f in $(ls $NET_PATH/); do
+        if grep -q "$mac" $NET_PATH/$f/address; then
+            pci_addr=$(readlink $NET_PATH/$f | cut -d "/" -f5)
+            # some virtual interfaces match on MAC and do not have a PCI address
+            if [ "$pci_addr" -a "$pci_addr" != "N/A" ]; then
+                break
+            else
+                pci_addr=""
+            fi
+        fi;
+    done
+    if [ -z "$pci_addr" ]; then
+        echo "ERROR: Cannot find pci address for MAC $mac" >&2
+        logger "NFVBENCHVM ERROR: Cannot find pci address for MAC $mac"
+        return 1
+    fi
+    echo $pci_addr
+    return 0
+}
+
 # Sometimes the interfaces on the loopback VM will use different drivers, e.g.
 # one from vswitch which is virtio based, one is from SRIOV VF. In this case,
 # we have to make sure the forwarder uses them in the right order, which is
 # especially important if the VM is in a PVVP chain.
-SWAP_FLAG=0
 if [ $INTF_MAC1 ] && [ $INTF_MAC2 ]; then
-    NET_PATH=/sys/class/net
-    EXP_INTF_1=$(for f in $(ls $NET_PATH/); do if grep -q "$INTF_MAC1" $NET_PATH/$f/address; then echo $f; break; fi; done)
-    EXP_PCI_ADDRESS_1=$(ethtool -i $EXP_INTF_1 | grep "bus-info" | awk -F' ' '{ print $2 }')
-    EXP_INTF_2=$(for f in $(ls $NET_PATH/); do if grep -q "$INTF_MAC2" $NET_PATH/$f/address; then echo $f; break; fi; done)
-    EXP_PCI_ADDRESS_2=$(ethtool -i $EXP_INTF_2 | grep "bus-info" | awk -F' ' '{ print $2 }')
-    if [ "$PCI_ADDRESS_1" == "$EXP_PCI_ADDRESS_2" ] && [ "$PCI_ADDRESS_2" == "$EXP_PCI_ADDRESS_1" ]; then
-        # Interfaces are not coming in the expected order:
-        #     (1) Swap the traffic generator MAC in the case of testpmd;
-        #     (2) Swap the interface configs in the case of VPP;
-        SWAP_FLAG=1
-    fi
+    PCI_ADDRESS_1=$(get_pci_address $INTF_MAC1)
+    PCI_ADDRESS_2=$(get_pci_address $INTF_MAC2)
+else
+    echo "ERROR: VM MAC Addresses missing in $NFVBENCH_CONF"
+    logger "NFVBENCHVM ERROR: VM MAC Addresses missing in $NFVBENCH_CONF"
 fi
 
-# Configure the forwarder
-if [ -z "`lsmod | grep igb_uio`" ]; then
-    modprobe uio
-    insmod /dpdk/igb_uio.ko
-fi
-if [ "$FORWARDER" == "testpmd" ]; then
-    echo "Configuring testpmd..."
-    if [ $SWAP_FLAG -eq 1 ]; then
-        TEMP=$TG_MAC1; TG_MAC1=$TG_MAC2; TG_MAC2=$TEMP
+if [ $PCI_ADDRESS_1 ] && [ $PCI_ADDRESS_2 ]; then
+    logger "NFVBENCHVM: Using pci $PCI_ADDRESS_1 ($INTF_MAC1)"
+    logger "NFVBENCHVM: Using pci $PCI_ADDRESS_2 ($INTF_MAC2)"
+    # Configure the forwarder
+    if [ -z "`lsmod | grep igb_uio`" ]; then
+        modprobe uio
+        insmod /dpdk/igb_uio.ko
     fi
-    # Binding ports to DPDK
-    /dpdk/dpdk-devbind.py -b igb_uio $PCI_ADDRESS_1
-    /dpdk/dpdk-devbind.py -b igb_uio $PCI_ADDRESS_2
-    screen -dmSL testpmd /dpdk/testpmd \
-                         -c $CPU_MASKS \
-                         -n 4 \
-                         -- \
-                             --burst=32 \
-                             --txd=256 \
-                             --rxd=1024 \
-                             --eth-peer=0,$TG_MAC1 \
-                             --eth-peer=1,$TG_MAC2 \
-                             --forward-mode=mac \
-                             --nb-cores=$WORKER_CORES \
-                             --max-pkt-len=9000 \
-                             --cmdline-file=/dpdk/testpmd_cmd.txt
-else
-    echo "Configuring vpp..."
-    cp /vpp/startup.conf /etc/vpp/startup.conf
-    cp /vpp/vm.conf /etc/vpp/vm.conf
+    if [ "$FORWARDER" == "testpmd" ]; then
+        echo "Configuring testpmd..."
+        # Binding ports to DPDK
+        /dpdk/dpdk-devbind.py -b igb_uio $PCI_ADDRESS_1
+        /dpdk/dpdk-devbind.py -b igb_uio $PCI_ADDRESS_2
+        screen -dmSL testpmd /dpdk/testpmd \
+                            -c $CPU_MASKS \
+                            -n 4 \
+                            -- \
+                                --burst=32 \
+                                --txd=256 \
+                                --rxd=1024 \
+                                --eth-peer=0,$TG_MAC1 \
+                                --eth-peer=1,$TG_MAC2 \
+                                --forward-mode=mac \
+                                --nb-cores=$WORKER_CORES \
+                                --max-pkt-len=9000 \
+                                --cmdline-file=/dpdk/testpmd_cmd.txt
+        echo "testpmd running in screen 'testpmd'"
+        logger "NFVBENCHVM: testpmd running in screen 'testpmd'"
+    else
+        echo "Configuring vpp..."
+        cp /vpp/startup.conf /etc/vpp/startup.conf
+        cp /vpp/vm.conf /etc/vpp/vm.conf
 
-    sed -i "s/{{PCI_ADDRESS_1}}/$PCI_ADDRESS_1/g" /etc/vpp/startup.conf
-    sed -i "s/{{PCI_ADDRESS_2}}/$PCI_ADDRESS_2/g" /etc/vpp/startup.conf
-    sed -i "s/{{WORKER_CORES}}/$WORKER_CORES/g" /etc/vpp/startup.conf
-    service vpp start
-    sleep 10
+        sed -i "s/{{PCI_ADDRESS_1}}/$PCI_ADDRESS_1/g" /etc/vpp/startup.conf
+        sed -i "s/{{PCI_ADDRESS_2}}/$PCI_ADDRESS_2/g" /etc/vpp/startup.conf
+        sed -i "s/{{WORKER_CORES}}/$WORKER_CORES/g" /etc/vpp/startup.conf
+        service vpp start
+        sleep 10
 
-    INTFS=`vppctl show int | grep Ethernet | xargs`
-    INTF_1=`echo $INTFS | awk '{ print $1 }'`
-    INTF_2=`echo $INTFS | awk '{ print $4 }'`
-    if [ $SWAP_FLAG -eq 1 ]; then
-        TEMP=$INTF_1; INTF_1=$INTF_2; INTF_2=$TEMP
+        INTFS=`vppctl show int | grep Ethernet | xargs`
+        INTF_1=`echo $INTFS | awk '{ print $1 }'`
+        INTF_2=`echo $INTFS | awk '{ print $4 }'`
+        sed -i "s/{{INTF_1}}/${INTF_1//\//\/}/g" /etc/vpp/vm.conf
+        sed -i "s/{{INTF_2}}/${INTF_2//\//\/}/g" /etc/vpp/vm.conf
+        sed -i "s/{{VNF_GATEWAY1_CIDR}}/${VNF_GATEWAY1_CIDR//\//\/}/g" /etc/vpp/vm.conf
+        sed -i "s/{{VNF_GATEWAY2_CIDR}}/${VNF_GATEWAY2_CIDR//\//\/}/g" /etc/vpp/vm.conf
+        sed -i "s/{{TG_MAC1}}/${TG_MAC1}/g" /etc/vpp/vm.conf
+        sed -i "s/{{TG_MAC2}}/${TG_MAC2}/g" /etc/vpp/vm.conf
+        sed -i "s/{{TG_NET1}}/${TG_NET1//\//\/}/g" /etc/vpp/vm.conf
+        sed -i "s/{{TG_NET2}}/${TG_NET2//\//\/}/g" /etc/vpp/vm.conf
+        sed -i "s/{{TG_GATEWAY1_IP}}/${TG_GATEWAY1_IP}/g" /etc/vpp/vm.conf
+        sed -i "s/{{TG_GATEWAY2_IP}}/${TG_GATEWAY2_IP}/g" /etc/vpp/vm.conf
+        service vpp restart
+        logger "NFVBENCHVM: vpp service restarted"
     fi
-    sed -i "s/{{INTF_1}}/${INTF_1//\//\/}/g" /etc/vpp/vm.conf
-    sed -i "s/{{INTF_2}}/${INTF_2//\//\/}/g" /etc/vpp/vm.conf
-    sed -i "s/{{VNF_GATEWAY1_CIDR}}/${VNF_GATEWAY1_CIDR//\//\/}/g" /etc/vpp/vm.conf
-    sed -i "s/{{VNF_GATEWAY2_CIDR}}/${VNF_GATEWAY2_CIDR//\//\/}/g" /etc/vpp/vm.conf
-    sed -i "s/{{TG_MAC1}}/${TG_MAC1}/g" /etc/vpp/vm.conf
-    sed -i "s/{{TG_MAC2}}/${TG_MAC2}/g" /etc/vpp/vm.conf
-    sed -i "s/{{TG_NET1}}/${TG_NET1//\//\/}/g" /etc/vpp/vm.conf
-    sed -i "s/{{TG_NET2}}/${TG_NET2//\//\/}/g" /etc/vpp/vm.conf
-    sed -i "s/{{TG_GATEWAY1_IP}}/${TG_GATEWAY1_IP}/g" /etc/vpp/vm.conf
-    sed -i "s/{{TG_GATEWAY2_IP}}/${TG_GATEWAY2_IP}/g" /etc/vpp/vm.conf
-    service vpp restart
+else
+    echo "ERROR: Cannot find PCI Address from MAC"
+    echo "$INTF_MAC1: $PCI_ADDRESS_1"
+    echo "$INTF_MAC2: $PCI_ADDRESS_2"
+    logger "NFVBENCHVM ERROR: Cannot find PCI Address from MAC"
 fi