Merge "[storperf] Create stable/gambia branch"
[releng.git] / jjb / cperf / cperf-robot-netvirt-csit.sh
1 #!/usr/bin/env bash
2
3 set -o errexit
4 set -o nounset
5 set -o pipefail
6
7 if [ "$OS_VERSION" == 'master' ]; then
8   FULL_OS_VER='master'
9 else
10   FULL_OS_VER="stable/${OS_VERSION}"
11 fi
12
13 if [ "$ODL_BRANCH" == 'master' ]; then
14   ODL_STREAM='neon'
15 else
16   ODL_STREAM=${ODL_BRANCH#"stable/"}
17 fi
18
19 echo "ODL Stream set: ${ODL_STREAM} and OS Version is ${FULL_OS_VER}"
20
21 sudo rm -rf releng
22 git clone https://gerrit.opnfv.org/gerrit/releng.git
23 REL_PATH='releng/jjb/cperf'
24
25 # NOTE: sourcing overcloudrc unsets any variable with OS_ prefix
26 source ${WORKSPACE}/overcloudrc
27 # note SDN_CONTROLLER_IP is set in overcloudrc, which is the VIP
28 # for admin/public network (since we are running single network deployment)
29
30 NUM_CONTROL_NODES=$(python ${REL_PATH}/parse-node-yaml.py num_nodes --file $NODE_FILE_PATH)
31 NUM_COMPUTE_NODES=$(python ${REL_PATH}/parse-node-yaml.py num_nodes --node-type compute --file $NODE_FILE_PATH)
32
33 echo "Number of Control nodes found: ${NUM_CONTROL_NODES}"
34 echo "Number of Compute nodes found: ${NUM_COMPUTE_NODES}"
35
36 # Only 1 combo or ctrl node is specified, even for OS HA deployments
37 # Currently supported combinations are:
38 # 0cmb-1ctl-2cmp
39 # 1cmb-0ctl-0cmp
40 # 1cmb-0ctl-1cmp
41 if [ "$NUM_COMPUTE_NODES" -eq 0 ]; then
42   OPENSTACK_TOPO="1cmb-0ctl-0cmp"
43 else
44   OPENSTACK_TOPO="0cmb-1ctl-2cmp"
45 fi
46
47 idx=1
48 EXTRA_ROBOT_ARGS=""
49 for idx in `seq 1 $NUM_CONTROL_NODES`; do
50   CONTROLLER_IP=$(python ${REL_PATH}/parse-node-yaml.py get_value -k address --node-number ${idx} --file $NODE_FILE_PATH)
51   EXTRA_ROBOT_ARGS+=" -v ODL_SYSTEM_${idx}_IP:${CONTROLLER_IP} \
52                       -v OS_CONTROL_NODE_${idx}_IP:${CONTROLLER_IP} \
53                       -v ODL_SYSTEM_${idx}_IP:${CONTROLLER_IP} \
54                       -v HA_PROXY_${idx}_IP:${SDN_CONTROLLER_IP}"
55 done
56
57 # In all-in-one these Compute IPs still need to be passed to robot
58 if [ "$NUM_COMPUTE_NODES" -eq 0 ]; then
59   EXTRA_ROBOT_ARGS+=" -v OS_COMPUTE_1_IP:'' -v OS_COMPUTE_2_IP:''"
60 else
61   idx=1
62   for idx in `seq 1 $NUM_COMPUTE_NODES`; do
63     COMPUTE_IP=$(python ${REL_PATH}/parse-node-yaml.py get_value -k address --node-type compute --node-number ${idx} --file $NODE_FILE_PATH)
64     EXTRA_ROBOT_ARGS+=" -v OS_COMPUTE_${idx}_IP:${COMPUTE_IP}"
65   done
66 fi
67
68 CONTROLLER_1_IP=$(python ${REL_PATH}/parse-node-yaml.py get_value -k address --node-number 1 --file $NODE_FILE_PATH)
69
70 if [ "$ODL_CONTAINERIZED" == 'false' ]; then
71   EXTRA_ROBOT_ARGS+=" -v NODE_KARAF_COUNT_COMMAND:'ps axf | grep org.apache.karaf | grep -v grep | wc -l || echo 0' \
72                       -v NODE_START_COMMAND:'sudo systemctl start opendaylight_api' \
73                       -v NODE_KILL_COMMAND:'sudo systemctl stop opendaylight_api' \
74                       -v NODE_STOP_COMMAND:'sudo systemctl stop opendaylight_api' \
75                       -v NODE_FREEZE_COMMAND:'sudo systemctl stop opendaylight_api' "
76 else
77   EXTRA_ROBOT_ARGS+=" -v NODE_KARAF_COUNT_COMMAND:'sudo docker ps | grep opendaylight_api | wc -l || echo 0' \
78                       -v NODE_START_COMMAND:'sudo docker start opendaylight_api' \
79                       -v NODE_KILL_COMMAND:'sudo docker stop opendaylight_api' \
80                       -v NODE_STOP_COMMAND:'sudo docker stop opendaylight_api' \
81                       -v NODE_FREEZE_COMMAND:'sudo docker stop opendaylight_api' "
82 fi
83
84 # FIXME(trozet) remove this once it is fixed in csit
85 # Upload glance image into openstack
86 wget -O ${WORKSPACE}/cirros-0.3.5-x86_64-disk.img http://download.cirros-cloud.net/0.3.5/cirros-0.3.5-x86_64-disk.img
87 export ANSIBLE_HOST_KEY_CHECKING=False
88 ansible-playbook -i ${CONTROLLER_1_IP}, -u heat-admin --key-file ${WORKSPACE}/id_rsa ${REL_PATH}/cirros-upload.yaml.ansible -vvv
89
90 LOGS_LOCATION=/tmp/robot_results
91
92 robot_cmd="pybot \
93   --removekeywords wuks \
94   --xunit robotxunit.xml \
95   --name 'CSIT' \
96   -e exclude \
97   -d $LOGS_LOCATION \
98   -v BUNDLEFOLDER:/opt/opendaylight \
99   -v CONTROLLER_USER:heat-admin \
100   -v DEFAULT_LINUX_PROMPT:\$ \
101   -v DEFAULT_LINUX_PROMPT_STRICT:]\$ \
102   -v DEFAULT_USER:heat-admin \
103   -v DEVSTACK_DEPLOY_PATH:/tmp \
104   -v EXTERNAL_GATEWAY:$CONTROLLER_1_IP \
105   -v EXTERNAL_PNF:$CONTROLLER_1_IP \
106   -v EXTERNAL_SUBNET:192.0.2.0/24 \
107   -v EXTERNAL_SUBNET_ALLOCATION_POOL:start=192.0.2.100,end=192.0.2.200 \
108   -v EXTERNAL_INTERNET_ADDR:$CONTROLLER_1_IP  \
109   -v HA_PROXY_IP:$SDN_CONTROLLER_IP \
110   -v NUM_ODL_SYSTEM:$NUM_CONTROL_NODES \
111   -v NUM_OS_SYSTEM:$(($NUM_CONTROL_NODES + $NUM_COMPUTE_NODES)) \
112   -v NUM_TOOLS_SYSTEM:0 \
113   -v ODL_SNAT_MODE:conntrack \
114   -v ODL_STREAM:$ODL_STREAM \
115   -v ODL_SYSTEM_IP:$CONTROLLER_1_IP \
116   -v OS_CONTROL_NODE_IP:$CONTROLLER_1_IP \
117   -v OPENSTACK_BRANCH:$FULL_OS_VER \
118   -v OPENSTACK_TOPO:$OPENSTACK_TOPO \
119   -v OS_USER:heat-admin \
120   -v ODL_ENABLE_L3_FWD:yes \
121   -v ODL_SYSTEM_USER:heat-admin \
122   -v ODL_SYSTEM_PROMPT:\$ \
123   -v PRE_CLEAN_OPENSTACK_ALL:True \
124   -v PUBLIC_PHYSICAL_NETWORK:datacentre \
125   -v RESTCONFPORT:8081 \
126   -v ODL_RESTCONF_USER:admin \
127   -v ODL_RESTCONF_PASSWORD:$SDN_CONTROLLER_PASSWORD \
128   -v KARAF_PROMPT_LOGIN:'opendaylight-user' \
129   -v KARAF_PROMPT:'opendaylight-user.*root.*>' \
130   -v SECURITY_GROUP_MODE:stateful \
131   -v USER:heat-admin \
132   -v USER_HOME:\$HOME \
133   -v TOOLS_SYSTEM_IP:'' \
134   -v NODE_ROLE_INDEX_START:0 \
135   -v WORKSPACE:/tmp  \
136   $EXTRA_ROBOT_ARGS \
137   -v of_port:6653 "
138
139 SUITE_HOME='/home/opnfv/repos/odl_test/csit/suites'
140
141 # Disabled suites
142 #
143 # ${SUITE_HOME}/openstack/connectivity/live_migration.robot
144 # Live migration will not work unless we use a shared storage backend like
145 # Ceph which we do not currently use with CSIT images
146 #
147 # ${SUITE_HOME}/netvirt/vpnservice/vpn_basic_ipv6.robot
148 # This suite fails with an error indicating the connection was closed
149 # to the overcloud control node:
150 # https://build.opnfv.org/ci/job/cperf-apex-csit-master/104/consoleFull
151 #
152 # Minimize HA CSIT as it does not pass all suites
153 if [ "$NUM_CONTROL_NODES" -eq 3 ]; then
154   suites="${SUITE_HOME}/openstack/connectivity/l2.robot \
155           ${SUITE_HOME}/openstack/connectivity/l3.robot"
156 else
157   suites="${SUITE_HOME}/openstack/connectivity/l2.robot \
158           ${SUITE_HOME}/openstack/connectivity/l3.robot \
159           ${SUITE_HOME}/openstack/connectivity/external_network.robot \
160           ${SUITE_HOME}/openstack/connectivity/security_group.robot \
161           ${SUITE_HOME}/openstack/securitygroup/neutron_security_group.robot \
162           ${SUITE_HOME}/openstack/securitygroup/security_group_l3bcast.robot \
163           ${SUITE_HOME}/netvirt/vpnservice/vpn_basic.robot \
164           ${SUITE_HOME}/netvirt/elan/elan.robot \
165           ${SUITE_HOME}/netvirt/vpnservice/arp_learning.robot \
166           ${SUITE_HOME}/netvirt/l2l3_gatewaymac_arp.robot \
167           ${SUITE_HOME}/integration/Create_JVM_Plots.robot"
168 fi
169
170 echo "Robot command set: ${robot_cmd}"
171 echo "Running robot..."
172 docker run -i --net=host \
173   -v ${LOGS_LOCATION}:${LOGS_LOCATION} \
174   -v ${WORKSPACE}/id_rsa:/tmp/id_rsa \
175   -v ${WORKSPACE}/overcloudrc:/tmp/overcloudrc \
176   opnfv/cperf:$DOCKER_TAG \
177   /bin/bash -c "source /tmp/overcloudrc; mkdir -p \$HOME/.ssh; cp /tmp/id_rsa \$HOME/.ssh; \
178   cd /home/opnfv/repos/odl_test/ && git pull origin master; \
179   pip install odltools; \
180   ${robot_cmd} ${suites};"
181
182 echo "Running post CSIT clean"
183 ansible-playbook -i ${CONTROLLER_1_IP}, -u heat-admin --key-file ${WORKSPACE}/id_rsa ${REL_PATH}/csit-clean.yaml.ansible -vvv