NFVBENCH-136 Add support for multiqueue for PVP/PVVP chains
[nfvbench.git] / nfvbenchvm / dib / elements / nfvbenchvm / static / etc / rc.d / rc.local
1 #!/bin/bash
2
3 touch /var/lock/subsys/local
4
5 # Waiting for cloud-init to generate $TESTPMD_CONF, retry 60 seconds
6 NFVBENCH_CONF=/etc/nfvbenchvm.conf
7 retry=30
8 until [ $retry -eq 0 ]; do
9     if [ -f $NFVBENCH_CONF ]; then break; fi
10     retry=$[$retry-1]
11     sleep 2
12 done
13 if [ ! -f $NFVBENCH_CONF ]; then
14     exit 0
15 fi
16
17 # Parse and obtain all configurations
18 echo "Generating configurations for forwarder..."
19 eval $(cat $NFVBENCH_CONF)
20 touch /nfvbench_configured.flag
21
22 # WE assume there are at least 2 cores available for the VM
23 CPU_CORES=$(grep -c ^processor /proc/cpuinfo)
24
25 # We need at least 1 admin core. 
26 if [ $CPU_CORES -le 2 ]; then
27     ADMIN_CORES=1
28 else
29     # If the number of cores is even we
30     # reserve 2 cores for admin (second being idle) so the number of
31     # workers is either 1 (if CPU_CORES is 2) or always even
32     if (( $CPU_CORES % 2 )); then
33         ADMIN_CORES=1
34     else
35         ADMIN_CORES=2
36     fi
37 fi
38 # 2 vcpus: AW (core 0: Admin, core 1: Worker)
39 # 3 vcpus: AWW (core 0: Admin, core 1,2: Worker)
40 # 4 vcpus: AWWU (core 0: Admin, core 1,2: Worker, core 3: Unused)
41 # 5 vcpus: AWWWW
42 # 6 vcpus: AWWWWU
43 WORKER_CORES=$(expr $CPU_CORES - $ADMIN_CORES)
44 # worker cores are all cores except the admin core (core 0) and the eventual unused core
45 # AW -> 1
46 # AWW -> 1,2
47 # AWWU -> 1,2
48 WORKER_CORE_LIST=$(seq -s, $ADMIN_CORES $WORKER_CORES)
49 # always use all cores
50 CORE_MASK=0x$(echo "obase=16; 2 ^ $CPU_CORES - 1" | bc)
51
52 logger "NFVBENCHVM: CPU_CORES=$CPU_CORES, ADMIN_CORES=$ADMIN_CORES, WORKER_CORES=$WORKER_CORES ($WORKER_CORE_LIST)"
53
54 # CPU isolation optimizations
55 echo 1 > /sys/bus/workqueue/devices/writeback/cpumask
56 echo 1 > /sys/devices/virtual/workqueue/cpumask
57 echo 1 > /proc/irq/default_smp_affinity
58 for irq in `ls /proc/irq/`; do
59     if [ -f /proc/irq/$irq/smp_affinity ]; then
60         echo 1 > /proc/irq/$irq/smp_affinity
61     fi
62 done
63
64 # Isolate all cores that are reserved for workers
65 tuna -c $WORKER_CORE_LIST --isolate
66
67 NET_PATH=/sys/class/net
68
69 get_pci_address() {
70     # device mapping for CentOS Linux 7:
71     # lspci:
72     #   00.03.0 Ethernet controller: Red Hat, Inc. Virtio network device
73     #   00.04.0 Ethernet controller: Red Hat, Inc. Virtio network device
74     # /sys/class/net:
75     # /sys/class/net/eth0 -> ../../devices/pci0000:00/0000:00:03.0/virtio0/net/eth0
76     # /sys/class/net/eth1 -> ../../devices/pci0000:00/0000:00:04.0/virtio1/net/eth1
77
78     mac=$1
79     for f in $(ls $NET_PATH/); do
80         if grep -q "$mac" $NET_PATH/$f/address; then
81             pci_addr=$(readlink $NET_PATH/$f | cut -d "/" -f5)
82             # some virtual interfaces match on MAC and do not have a PCI address
83             if [ "$pci_addr" -a "$pci_addr" != "N/A" ]; then
84                 # Found matching interface
85                 logger "NFVBENCHVM: found interface $f ($pci_addr) matching $mac"
86                 break
87             else
88                 pci_addr=""
89             fi
90         fi;
91     done
92     if [ -z "$pci_addr" ]; then
93         echo "ERROR: Cannot find pci address for MAC $mac" >&2
94         logger "NFVBENCHVM ERROR: Cannot find pci address for MAC $mac"
95         return 1
96     fi
97     echo $pci_addr
98     return 0
99 }
100
101 # Sometimes the interfaces on the loopback VM will use different drivers, e.g.
102 # one from vswitch which is virtio based, one is from SRIOV VF. In this case,
103 # we have to make sure the forwarder uses them in the right order, which is
104 # especially important if the VM is in a PVVP chain.
105 if [ $INTF_MAC1 ] && [ $INTF_MAC2 ]; then
106     PCI_ADDRESS_1=$(get_pci_address $INTF_MAC1)
107     PCI_ADDRESS_2=$(get_pci_address $INTF_MAC2)
108 else
109     echo "ERROR: VM MAC Addresses missing in $NFVBENCH_CONF"
110     logger "NFVBENCHVM ERROR: VM MAC Addresses missing in $NFVBENCH_CONF"
111 fi
112
113 if [ $PCI_ADDRESS_1 ] && [ $PCI_ADDRESS_2 ]; then
114     logger "NFVBENCHVM: Using pci $PCI_ADDRESS_1 ($INTF_MAC1)"
115     logger "NFVBENCHVM: Using pci $PCI_ADDRESS_2 ($INTF_MAC2)"
116     # Configure the forwarder
117     if [ -z "`lsmod | grep igb_uio`" ]; then
118         modprobe uio
119         insmod /dpdk/igb_uio.ko
120     fi
121     if [ "$FORWARDER" == "testpmd" ]; then
122         echo "Configuring testpmd..."
123         # Binding ports to DPDK
124         /dpdk/dpdk-devbind.py -b igb_uio $PCI_ADDRESS_1
125         /dpdk/dpdk-devbind.py -b igb_uio $PCI_ADDRESS_2
126         screen -dmSL testpmd /dpdk/testpmd \
127                             -c $CORE_MASK \
128                             -n 4 \
129                             -- \
130                                 --nb-ports=2 \
131                                 --burst=32 \
132                                 --txd=256 \
133                                 --rxd=1024 \
134                                 --eth-peer=0,$TG_MAC1 \
135                                 --eth-peer=1,$TG_MAC2 \
136                                 --forward-mode=mac \
137                                 --nb-cores=$WORKER_CORES \
138                                 --txq=$VIF_MQ_SIZE \
139                                 --rxq=$VIF_MQ_SIZE \
140                                 --max-pkt-len=9000 \
141                                 --cmdline-file=/dpdk/testpmd_cmd.txt
142         echo "testpmd running in screen 'testpmd'"
143         logger "NFVBENCHVM: testpmd running in screen 'testpmd'"
144     else
145         echo "Configuring vpp..."
146         cp /vpp/startup.conf /etc/vpp/startup.conf
147         cp /vpp/vm.conf /etc/vpp/vm.conf
148
149         sed -i "s/{{PCI_ADDRESS_1}}/$PCI_ADDRESS_1/g" /etc/vpp/startup.conf
150         sed -i "s/{{PCI_ADDRESS_2}}/$PCI_ADDRESS_2/g" /etc/vpp/startup.conf
151         sed -i "s/{{WORKER_CORES}}/$WORKER_CORES/g" /etc/vpp/startup.conf
152         sed -i "s/{{VIF_MQ_SIZE}}/${VIF_MQ_SIZE}/g" /etc/vpp/startup.conf
153         service vpp start
154         sleep 10
155
156         INTFS=`vppctl show int | grep Ethernet | xargs`
157         INTF_1=`echo $INTFS | awk '{ print $1 }'`
158         INTF_2=`echo $INTFS | awk '{ print $4 }'`
159         sed -i "s/{{INTF_1}}/${INTF_1//\//\/}/g" /etc/vpp/vm.conf
160         sed -i "s/{{INTF_2}}/${INTF_2//\//\/}/g" /etc/vpp/vm.conf
161         sed -i "s/{{VNF_GATEWAY1_CIDR}}/${VNF_GATEWAY1_CIDR//\//\/}/g" /etc/vpp/vm.conf
162         sed -i "s/{{VNF_GATEWAY2_CIDR}}/${VNF_GATEWAY2_CIDR//\//\/}/g" /etc/vpp/vm.conf
163         sed -i "s/{{TG_MAC1}}/${TG_MAC1}/g" /etc/vpp/vm.conf
164         sed -i "s/{{TG_MAC2}}/${TG_MAC2}/g" /etc/vpp/vm.conf
165         sed -i "s/{{TG_NET1}}/${TG_NET1//\//\/}/g" /etc/vpp/vm.conf
166         sed -i "s/{{TG_NET2}}/${TG_NET2//\//\/}/g" /etc/vpp/vm.conf
167         sed -i "s/{{TG_GATEWAY1_IP}}/${TG_GATEWAY1_IP}/g" /etc/vpp/vm.conf
168         sed -i "s/{{TG_GATEWAY2_IP}}/${TG_GATEWAY2_IP}/g" /etc/vpp/vm.conf
169         service vpp restart
170         logger "NFVBENCHVM: vpp service restarted"
171     fi
172 else
173     echo "ERROR: Cannot find PCI Address from MAC"
174     echo "$INTF_MAC1: $PCI_ADDRESS_1"
175     echo "$INTF_MAC2: $PCI_ADDRESS_2"
176     logger "NFVBENCHVM ERROR: Cannot find PCI Address from MAC"
177 fi