3 touch /var/lock/subsys/local
5 # Waiting for cloud-init to generate $TESTPMD_CONF, retry 60 seconds
6 NFVBENCH_CONF=/etc/nfvbenchvm.conf
8 until [ $retry -eq 0 ]; do
9 if [ -f $NFVBENCH_CONF ]; then break; fi
13 if [ ! -f $NFVBENCH_CONF ]; then
17 # Parse and obtain all configurations
18 echo "Generating configurations for forwarder..."
19 eval $(cat $NFVBENCH_CONF)
20 touch /nfvbench_configured.flag
22 # WE assume there are at least 2 cores available for the VM
23 CPU_CORES=$(grep -c ^processor /proc/cpuinfo)
25 # We need at least 1 admin core.
26 if [ $CPU_CORES -le 2 ]; then
29 # If the number of cores is even we
30 # reserve 2 cores for admin (second being idle) so the number of
31 # workers is either 1 (if CPU_CORES is 2) or always even
32 if (( $CPU_CORES % 2 )); then
38 # 2 vcpus: AW (core 0: Admin, core 1: Worker)
39 # 3 vcpus: AWW (core 0: Admin, core 1,2: Worker)
40 # 4 vcpus: AWWU (core 0: Admin, core 1,2: Worker, core 3: Unused)
43 WORKER_CORES=$(expr $CPU_CORES - $ADMIN_CORES)
44 # worker cores are all cores except the admin core (core 0) and the eventual unused core
48 WORKER_CORE_LIST=$(seq -s, $ADMIN_CORES $WORKER_CORES)
49 # always use all cores
50 CORE_MASK=0x$(echo "obase=16; 2 ^ $CPU_CORES - 1" | bc)
52 logger "NFVBENCHVM: CPU_CORES=$CPU_CORES, ADMIN_CORES=$ADMIN_CORES, WORKER_CORES=$WORKER_CORES ($WORKER_CORE_LIST)"
54 # CPU isolation optimizations
55 echo 1 > /sys/bus/workqueue/devices/writeback/cpumask
56 echo 1 > /sys/devices/virtual/workqueue/cpumask
57 echo 1 > /proc/irq/default_smp_affinity
58 for irq in `ls /proc/irq/`; do
59 if [ -f /proc/irq/$irq/smp_affinity ]; then
60 echo 1 > /proc/irq/$irq/smp_affinity
64 # Isolate all cores that are reserved for workers
65 tuna -c $WORKER_CORE_LIST --isolate
67 NET_PATH=/sys/class/net
70 # device mapping for CentOS Linux 7:
72 # 00.03.0 Ethernet controller: Red Hat, Inc. Virtio network device
73 # 00.04.0 Ethernet controller: Red Hat, Inc. Virtio network device
75 # /sys/class/net/eth0 -> ../../devices/pci0000:00/0000:00:03.0/virtio0/net/eth0
76 # /sys/class/net/eth1 -> ../../devices/pci0000:00/0000:00:04.0/virtio1/net/eth1
79 for f in $(ls $NET_PATH/); do
80 if grep -q "$mac" $NET_PATH/$f/address; then
81 pci_addr=$(readlink $NET_PATH/$f | cut -d "/" -f5)
82 # some virtual interfaces match on MAC and do not have a PCI address
83 if [ "$pci_addr" -a "$pci_addr" != "N/A" ]; then
84 # Found matching interface
85 logger "NFVBENCHVM: found interface $f ($pci_addr) matching $mac"
92 if [ -z "$pci_addr" ]; then
93 echo "ERROR: Cannot find pci address for MAC $mac" >&2
94 logger "NFVBENCHVM ERROR: Cannot find pci address for MAC $mac"
101 # Sometimes the interfaces on the loopback VM will use different drivers, e.g.
102 # one from vswitch which is virtio based, one is from SRIOV VF. In this case,
103 # we have to make sure the forwarder uses them in the right order, which is
104 # especially important if the VM is in a PVVP chain.
105 if [ $INTF_MAC1 ] && [ $INTF_MAC2 ]; then
106 PCI_ADDRESS_1=$(get_pci_address $INTF_MAC1)
107 PCI_ADDRESS_2=$(get_pci_address $INTF_MAC2)
109 echo "ERROR: VM MAC Addresses missing in $NFVBENCH_CONF"
110 logger "NFVBENCHVM ERROR: VM MAC Addresses missing in $NFVBENCH_CONF"
113 if [ $PCI_ADDRESS_1 ] && [ $PCI_ADDRESS_2 ]; then
114 logger "NFVBENCHVM: Using pci $PCI_ADDRESS_1 ($INTF_MAC1)"
115 logger "NFVBENCHVM: Using pci $PCI_ADDRESS_2 ($INTF_MAC2)"
116 # Configure the forwarder
117 if [ -z "`lsmod | grep igb_uio`" ]; then
119 insmod /dpdk/igb_uio.ko
121 if [ "$FORWARDER" == "testpmd" ]; then
122 echo "Configuring testpmd..."
123 # Binding ports to DPDK
124 /dpdk/dpdk-devbind.py -b igb_uio $PCI_ADDRESS_1
125 /dpdk/dpdk-devbind.py -b igb_uio $PCI_ADDRESS_2
126 screen -dmSL testpmd /dpdk/testpmd \
134 --eth-peer=0,$TG_MAC1 \
135 --eth-peer=1,$TG_MAC2 \
137 --nb-cores=$WORKER_CORES \
141 --cmdline-file=/dpdk/testpmd_cmd.txt
142 echo "testpmd running in screen 'testpmd'"
143 logger "NFVBENCHVM: testpmd running in screen 'testpmd'"
145 echo "Configuring vpp..."
146 cp /vpp/startup.conf /etc/vpp/startup.conf
147 cp /vpp/vm.conf /etc/vpp/vm.conf
149 sed -i "s/{{PCI_ADDRESS_1}}/$PCI_ADDRESS_1/g" /etc/vpp/startup.conf
150 sed -i "s/{{PCI_ADDRESS_2}}/$PCI_ADDRESS_2/g" /etc/vpp/startup.conf
151 sed -i "s/{{WORKER_CORES}}/$WORKER_CORES/g" /etc/vpp/startup.conf
152 sed -i "s/{{VIF_MQ_SIZE}}/${VIF_MQ_SIZE}/g" /etc/vpp/startup.conf
156 INTFS=`vppctl show int | grep Ethernet | xargs`
157 INTF_1=`echo $INTFS | awk '{ print $1 }'`
158 INTF_2=`echo $INTFS | awk '{ print $4 }'`
159 sed -i "s/{{INTF_1}}/${INTF_1//\//\/}/g" /etc/vpp/vm.conf
160 sed -i "s/{{INTF_2}}/${INTF_2//\//\/}/g" /etc/vpp/vm.conf
161 sed -i "s/{{VNF_GATEWAY1_CIDR}}/${VNF_GATEWAY1_CIDR//\//\/}/g" /etc/vpp/vm.conf
162 sed -i "s/{{VNF_GATEWAY2_CIDR}}/${VNF_GATEWAY2_CIDR//\//\/}/g" /etc/vpp/vm.conf
163 sed -i "s/{{TG_MAC1}}/${TG_MAC1}/g" /etc/vpp/vm.conf
164 sed -i "s/{{TG_MAC2}}/${TG_MAC2}/g" /etc/vpp/vm.conf
165 sed -i "s/{{TG_NET1}}/${TG_NET1//\//\/}/g" /etc/vpp/vm.conf
166 sed -i "s/{{TG_NET2}}/${TG_NET2//\//\/}/g" /etc/vpp/vm.conf
167 sed -i "s/{{TG_GATEWAY1_IP}}/${TG_GATEWAY1_IP}/g" /etc/vpp/vm.conf
168 sed -i "s/{{TG_GATEWAY2_IP}}/${TG_GATEWAY2_IP}/g" /etc/vpp/vm.conf
170 logger "NFVBENCHVM: vpp service restarted"
173 echo "ERROR: Cannot find PCI Address from MAC"
174 echo "$INTF_MAC1: $PCI_ADDRESS_1"
175 echo "$INTF_MAC2: $PCI_ADDRESS_2"
176 logger "NFVBENCHVM ERROR: Cannot find PCI Address from MAC"