NFVBENCH-134 Support multiple idle interfaces per test VM
[nfvbench.git] / nfvbenchvm / dib / elements / nfvbenchvm / static / etc / rc.d / rc.local
1 #!/bin/bash
2
3 touch /var/lock/subsys/local
4
5 # Waiting for cloud-init to generate $TESTPMD_CONF, retry 60 seconds
6 NFVBENCH_CONF=/etc/nfvbenchvm.conf
7 retry=30
8 until [ $retry -eq 0 ]; do
9     if [ -f $NFVBENCH_CONF ]; then break; fi
10     retry=$[$retry-1]
11     sleep 2
12 done
13 if [ ! -f $NFVBENCH_CONF ]; then
14     exit 0
15 fi
16
17 # Parse and obtain all configurations
18 echo "Generating configurations for forwarder..."
19 eval $(cat $NFVBENCH_CONF)
20 touch /nfvbench_configured.flag
21
22
23 CPU_CORES=`grep -c ^processor /proc/cpuinfo`
24 CPU_MASKS=0x`echo "obase=16; 2 ^ $CPU_CORES - 1" | bc`
25 WORKER_CORES=`expr $CPU_CORES - 1`
26
27 # CPU isolation optimizations
28 echo 1 > /sys/bus/workqueue/devices/writeback/cpumask
29 echo 1 > /sys/devices/virtual/workqueue/cpumask
30 echo 1 > /proc/irq/default_smp_affinity
31 for irq in `ls /proc/irq/`; do
32     if [ -f /proc/irq/$irq/smp_affinity ]; then
33         echo 1 > /proc/irq/$irq/smp_affinity
34     fi
35 done
36 tuna -c $(seq -s, 1 1 $WORKER_CORES) --isolate
37
38 NET_PATH=/sys/class/net
39
40 get_pci_address() {
41     # device mapping for CentOS Linux 7:
42     # lspci:
43     #   00.03.0 Ethernet controller: Red Hat, Inc. Virtio network device
44     #   00.04.0 Ethernet controller: Red Hat, Inc. Virtio network device
45     # /sys/class/net:
46     # /sys/class/net/eth0 -> ../../devices/pci0000:00/0000:00:03.0/virtio0/net/eth0
47     # /sys/class/net/eth1 -> ../../devices/pci0000:00/0000:00:04.0/virtio1/net/eth1
48
49     mac=$1
50     for f in $(ls $NET_PATH/); do
51         if grep -q "$mac" $NET_PATH/$f/address; then
52             pci_addr=$(readlink $NET_PATH/$f | cut -d "/" -f5)
53             # some virtual interfaces match on MAC and do not have a PCI address
54             if [ "$pci_addr" -a "$pci_addr" != "N/A" ]; then
55                 break
56             else
57                 pci_addr=""
58             fi
59         fi;
60     done
61     if [ -z "$pci_addr" ]; then
62         echo "ERROR: Cannot find pci address for MAC $mac" >&2
63         logger "NFVBENCHVM ERROR: Cannot find pci address for MAC $mac"
64         return 1
65     fi
66     echo $pci_addr
67     return 0
68 }
69
70 # Sometimes the interfaces on the loopback VM will use different drivers, e.g.
71 # one from vswitch which is virtio based, one is from SRIOV VF. In this case,
72 # we have to make sure the forwarder uses them in the right order, which is
73 # especially important if the VM is in a PVVP chain.
74 if [ $INTF_MAC1 ] && [ $INTF_MAC2 ]; then
75     PCI_ADDRESS_1=$(get_pci_address $INTF_MAC1)
76     PCI_ADDRESS_2=$(get_pci_address $INTF_MAC2)
77 else
78     echo "ERROR: VM MAC Addresses missing in $NFVBENCH_CONF"
79     logger "NFVBENCHVM ERROR: VM MAC Addresses missing in $NFVBENCH_CONF"
80 fi
81
82 if [ $PCI_ADDRESS_1 ] && [ $PCI_ADDRESS_2 ]; then
83     logger "NFVBENCHVM: Using pci $PCI_ADDRESS_1 ($INTF_MAC1)"
84     logger "NFVBENCHVM: Using pci $PCI_ADDRESS_2 ($INTF_MAC2)"
85     # Configure the forwarder
86     if [ -z "`lsmod | grep igb_uio`" ]; then
87         modprobe uio
88         insmod /dpdk/igb_uio.ko
89     fi
90     if [ "$FORWARDER" == "testpmd" ]; then
91         echo "Configuring testpmd..."
92         # Binding ports to DPDK
93         /dpdk/dpdk-devbind.py -b igb_uio $PCI_ADDRESS_1
94         /dpdk/dpdk-devbind.py -b igb_uio $PCI_ADDRESS_2
95         screen -dmSL testpmd /dpdk/testpmd \
96                             -c $CPU_MASKS \
97                             -n 4 \
98                             -- \
99                                 --burst=32 \
100                                 --txd=256 \
101                                 --rxd=1024 \
102                                 --eth-peer=0,$TG_MAC1 \
103                                 --eth-peer=1,$TG_MAC2 \
104                                 --forward-mode=mac \
105                                 --nb-cores=$WORKER_CORES \
106                                 --max-pkt-len=9000 \
107                                 --cmdline-file=/dpdk/testpmd_cmd.txt
108         echo "testpmd running in screen 'testpmd'"
109         logger "NFVBENCHVM: testpmd running in screen 'testpmd'"
110     else
111         echo "Configuring vpp..."
112         cp /vpp/startup.conf /etc/vpp/startup.conf
113         cp /vpp/vm.conf /etc/vpp/vm.conf
114
115         sed -i "s/{{PCI_ADDRESS_1}}/$PCI_ADDRESS_1/g" /etc/vpp/startup.conf
116         sed -i "s/{{PCI_ADDRESS_2}}/$PCI_ADDRESS_2/g" /etc/vpp/startup.conf
117         sed -i "s/{{WORKER_CORES}}/$WORKER_CORES/g" /etc/vpp/startup.conf
118         service vpp start
119         sleep 10
120
121         INTFS=`vppctl show int | grep Ethernet | xargs`
122         INTF_1=`echo $INTFS | awk '{ print $1 }'`
123         INTF_2=`echo $INTFS | awk '{ print $4 }'`
124         sed -i "s/{{INTF_1}}/${INTF_1//\//\/}/g" /etc/vpp/vm.conf
125         sed -i "s/{{INTF_2}}/${INTF_2//\//\/}/g" /etc/vpp/vm.conf
126         sed -i "s/{{VNF_GATEWAY1_CIDR}}/${VNF_GATEWAY1_CIDR//\//\/}/g" /etc/vpp/vm.conf
127         sed -i "s/{{VNF_GATEWAY2_CIDR}}/${VNF_GATEWAY2_CIDR//\//\/}/g" /etc/vpp/vm.conf
128         sed -i "s/{{TG_MAC1}}/${TG_MAC1}/g" /etc/vpp/vm.conf
129         sed -i "s/{{TG_MAC2}}/${TG_MAC2}/g" /etc/vpp/vm.conf
130         sed -i "s/{{TG_NET1}}/${TG_NET1//\//\/}/g" /etc/vpp/vm.conf
131         sed -i "s/{{TG_NET2}}/${TG_NET2//\//\/}/g" /etc/vpp/vm.conf
132         sed -i "s/{{TG_GATEWAY1_IP}}/${TG_GATEWAY1_IP}/g" /etc/vpp/vm.conf
133         sed -i "s/{{TG_GATEWAY2_IP}}/${TG_GATEWAY2_IP}/g" /etc/vpp/vm.conf
134         service vpp restart
135         logger "NFVBENCHVM: vpp service restarted"
136     fi
137 else
138     echo "ERROR: Cannot find PCI Address from MAC"
139     echo "$INTF_MAC1: $PCI_ADDRESS_1"
140     echo "$INTF_MAC2: $PCI_ADDRESS_2"
141     logger "NFVBENCHVM ERROR: Cannot find PCI Address from MAC"
142 fi