Support run dovetail CI jobs on fuel euphrates and master
[releng.git] / jjb / dovetail / dovetail-run.sh
1 #!/bin/bash
2 ##############################################################################
3 # Copyright (c) 2017 Huawei Technologies Co.,Ltd and others.
4 # All rights reserved. This program and the accompanying materials
5 # are made available under the terms of the Apache License, Version 2.0
6 # which accompanies this distribution, and is available at
7 # http://www.apache.org/licenses/LICENSE-2.0
8 ##############################################################################
9
10 #the noun INSTALLER is used in community, here is just the example to run.
11 #multi-platforms are supported.
12
13 set -e
14 [[ $CI_DEBUG == true ]] && redirect="/dev/stdout" || redirect="/dev/null"
15
16 DOVETAIL_HOME=${WORKSPACE}/cvp
17 [ -d ${DOVETAIL_HOME} ] && sudo rm -rf ${DOVETAIL_HOME}
18
19 mkdir -p ${DOVETAIL_HOME}
20
21 DOVETAIL_CONFIG=${DOVETAIL_HOME}/pre_config
22 mkdir -p ${DOVETAIL_CONFIG}
23
24 sshkey=""
25 # The path of openrc.sh is defined in fetch_os_creds.sh
26 OPENRC=${DOVETAIL_CONFIG}/env_config.sh
27 CACERT=${DOVETAIL_CONFIG}/os_cacert
28 if [[ ${INSTALLER_TYPE} == 'apex' ]]; then
29     instack_mac=$(sudo virsh domiflist undercloud | grep default | \
30                   grep -Eo "[0-9a-f]+:[0-9a-f]+:[0-9a-f]+:[0-9a-f]+:[0-9a-f]+:[0-9a-f]+")
31     INSTALLER_IP=$(/usr/sbin/arp -e | grep ${instack_mac} | awk {'print $1'})
32     sshkey="-v /root/.ssh/id_rsa:/root/.ssh/id_rsa"
33     if [[ -n $(sudo iptables -L FORWARD |grep "REJECT"|grep "reject-with icmp-port-unreachable") ]]; then
34         #note: this happens only in opnfv-lf-pod1
35         sudo iptables -D FORWARD -o virbr0 -j REJECT --reject-with icmp-port-unreachable
36         sudo iptables -D FORWARD -i virbr0 -j REJECT --reject-with icmp-port-unreachable
37     fi
38 elif [[ ${INSTALLER_TYPE} == 'joid' ]]; then
39     # If production lab then creds may be retrieved dynamically
40     # creds are on the jumphost, always in the same folder
41     sudo cp $LAB_CONFIG/admin-openrc $OPENRC
42     # If dev lab, credentials may not be the default ones, just provide a path to put them into docker
43     # replace the default one by the customized one provided by jenkins config
44 fi
45
46 # Set iptables rule to allow forwarding return traffic for container
47 if ! sudo iptables -C FORWARD -j RETURN 2> ${redirect} || ! sudo iptables -L FORWARD | awk 'NR==3' | grep RETURN 2> ${redirect}; then
48     sudo iptables -I FORWARD -j RETURN
49 fi
50
51 releng_repo=${WORKSPACE}/releng
52 [ -d ${releng_repo} ] && sudo rm -rf ${releng_repo}
53 git clone https://gerrit.opnfv.org/gerrit/releng ${releng_repo} >/dev/null
54
55 if [[ ${INSTALLER_TYPE} != 'joid' ]]; then
56     echo "SUT branch is $SUT_BRANCH"
57     echo "dovetail branch is $BRANCH"
58     BRANCH_BACKUP=$BRANCH
59     export BRANCH=$SUT_BRANCH
60     ${releng_repo}/utils/fetch_os_creds.sh -d ${OPENRC} -i ${INSTALLER_TYPE} -a ${INSTALLER_IP} -o ${CACERT} >${redirect}
61     export BRANCH=$BRANCH_BACKUP
62 fi
63
64 if [[ -f $OPENRC ]]; then
65     echo "INFO: openstack credentials path is $OPENRC"
66     if [[ ! "${SUT_BRANCH}" =~ "danube" && ${INSTALLER_TYPE} == "compass" ]]; then
67         if [[ -f ${CACERT} ]]; then
68             echo "INFO: ${INSTALLER_TYPE} openstack cacert file is ${CACERT}"
69             echo "export OS_CACERT=${CACERT}" >> ${OPENRC}
70         else
71             echo "ERROR: Can't find ${INSTALLER_TYPE} openstack cacert file. Please check if it is existing."
72             sudo ls -al ${DOVETAIL_CONFIG}
73             exit 1
74         fi
75     fi
76 else
77     echo "ERROR: cannot find file $OPENRC. Please check if it is existing."
78     sudo ls -al ${DOVETAIL_CONFIG}
79     exit 1
80 fi
81
82 if [[ ! "${SUT_BRANCH}" =~ "danube" && ${INSTALLER_TYPE} == "fuel" ]]; then
83     sed -i "s#/etc/ssl/certs/mcp_os_cacert#${CACERT}#g" ${OPENRC}
84 fi
85 cat $OPENRC
86
87 if [[ ! "${SUT_BRANCH}" =~ "danube" && ${INSTALLER_TYPE} == "compass" ]]; then
88     cat << EOF >${DOVETAIL_CONFIG}/pod.yaml
89 nodes:
90 - {ip: 10.1.0.52, name: node1, password: root, role: controller, user: root}
91 - {ip: 10.1.0.51, name: node2, password: root, role: controller, user: root}
92 - {ip: 10.1.0.50, name: node3, password: root, role: controller, user: root}
93 - {ip: 10.1.0.54, name: node4, password: root, role: compute, user: root}
94 - {ip: 10.1.0.53, name: node5, password: root, role: compute, user: root}
95
96 EOF
97 fi
98
99 if [[ ! -f ${DOVETAIL_CONFIG}/pod.yaml ]]; then
100     set +e
101
102     sudo pip install virtualenv
103
104     cd ${releng_repo}/modules
105     sudo virtualenv venv
106     source venv/bin/activate
107     sudo pip install -e ./ >/dev/null
108     sudo pip install netaddr
109
110     if [[ ${INSTALLER_TYPE} == compass ]]; then
111         options="-u root -p root"
112     elif [[ ${INSTALLER_TYPE} == fuel ]]; then
113         options="-u root -p r00tme"
114     elif [[ ${INSTALLER_TYPE} == apex ]]; then
115         options="-u stack -k /root/.ssh/id_rsa"
116     else
117         echo "Don't support to generate pod.yaml on ${INSTALLER_TYPE} currently."
118         echo "HA test cases may not run properly."
119     fi
120
121     cmd="sudo python ${releng_repo}/utils/create_pod_file.py -t ${INSTALLER_TYPE} \
122          -i ${INSTALLER_IP} ${options} -f ${DOVETAIL_CONFIG}/pod.yaml"
123     echo ${cmd}
124     ${cmd}
125
126     deactivate
127
128     set -e
129
130     cd ${WORKSPACE}
131 fi
132
133 if [ -f ${DOVETAIL_CONFIG}/pod.yaml ]; then
134     echo "file ${DOVETAIL_CONFIG}/pod.yaml:"
135     cat ${DOVETAIL_CONFIG}/pod.yaml
136 else
137     echo "Error: cannot find file ${DOVETAIL_CONFIG}/pod.yaml. Please check if it is existing."
138     sudo ls -al ${DOVETAIL_CONFIG}
139     echo "HA test cases may not run properly."
140 fi
141
142 ssh_options="-o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no"
143
144 if [ "$INSTALLER_TYPE" == "fuel" ]; then
145     if [[ "${SUT_BRANCH}" =~ "danube" ]]; then
146         echo "Fetching id_rsa file from jump_server $INSTALLER_IP..."
147         sshpass -p r00tme sudo scp $ssh_options root@${INSTALLER_IP}:~/.ssh/id_rsa ${DOVETAIL_CONFIG}/id_rsa
148     else
149         cp ${SSH_KEY} ${DOVETAIL_CONFIG}/id_rsa
150     fi
151 fi
152
153 if [ "$INSTALLER_TYPE" == "apex" ]; then
154     echo "Fetching id_rsa file from jump_server $INSTALLER_IP..."
155     sudo scp $ssh_options stack@${INSTALLER_IP}:~/.ssh/id_rsa ${DOVETAIL_CONFIG}/id_rsa
156 fi
157
158 image_path=${HOME}/opnfv/dovetail/images
159 if [[ ! -d ${image_path} ]]; then
160     mkdir -p ${image_path}
161 fi
162 # sdnvpn test case needs to download this image first before running
163 ubuntu_image=${image_path}/ubuntu-16.04-server-cloudimg-amd64-disk1.img
164 if [[ ! -f ${ubuntu_image} ]]; then
165     echo "Download image ubuntu-16.04-server-cloudimg-amd64-disk1.img ..."
166     wget -q -nc http://artifacts.opnfv.org/sdnvpn/ubuntu-16.04-server-cloudimg-amd64-disk1.img -P ${image_path}
167 fi
168 sudo cp ${ubuntu_image} ${DOVETAIL_CONFIG}
169
170 # functest needs to download this image first before running
171 cirros_image=${image_path}/cirros-0.3.5-x86_64-disk.img
172 if [[ ! -f ${cirros_image} ]]; then
173     echo "Download image cirros-0.3.5-x86_64-disk.img ..."
174     wget -q -nc http://download.cirros-cloud.net/0.3.5/cirros-0.3.5-x86_64-disk.img -P ${image_path}
175 fi
176 sudo cp ${cirros_image} ${DOVETAIL_CONFIG}
177
178
179 opts="--privileged=true -id"
180
181 docker_volume="-v /var/run/docker.sock:/var/run/docker.sock"
182 dovetail_home_volume="-v ${DOVETAIL_HOME}:${DOVETAIL_HOME}"
183
184 # Pull the image with correct tag
185 DOCKER_REPO='opnfv/dovetail'
186 if [ "$(uname -m)" = 'aarch64' ]; then
187     DOCKER_REPO="${DOCKER_REPO}_$(uname -m)"
188 fi
189
190 echo "Dovetail: Pulling image ${DOCKER_REPO}:${DOCKER_TAG}"
191 docker pull ${DOCKER_REPO}:$DOCKER_TAG >$redirect
192
193 env4bgpvpn="-e INSTALLER_TYPE=${INSTALLER_TYPE} -e INSTALLER_IP=${INSTALLER_IP}"
194
195 cmd="docker run ${opts} -e DOVETAIL_HOME=${DOVETAIL_HOME} ${docker_volume} ${dovetail_home_volume} \
196      ${sshkey} ${env4bgpvpn} ${DOCKER_REPO}:${DOCKER_TAG} /bin/bash"
197 echo "Dovetail: running docker run command: ${cmd}"
198 ${cmd} >${redirect}
199 sleep 5
200 container_id=$(docker ps | grep "${DOCKER_REPO}:${DOCKER_TAG}" | awk '{print $1}' | head -1)
201 echo "Container ID=${container_id}"
202 if [ -z ${container_id} ]; then
203     echo "Cannot find ${DOCKER_REPO} container ID ${container_id}. Please check if it is existing."
204     docker ps -a
205     exit 1
206 fi
207 echo "Container Start: docker start ${container_id}"
208 docker start ${container_id}
209 sleep 5
210 docker ps >${redirect}
211 if [ $(docker ps | grep "${DOCKER_REPO}:${DOCKER_TAG}" | wc -l) == 0 ]; then
212     echo "The container ${DOCKER_REPO} with ID=${container_id} has not been properly started. Exiting..."
213     exit 1
214 fi
215
216 # Modify tempest_conf.yaml file
217 tempest_conf_file=${DOVETAIL_CONFIG}/tempest_conf.yaml
218 if [[ ${INSTALLER_TYPE} == 'compass' || ${INSTALLER_TYPE} == 'apex' ]]; then
219     volume_device='vdb'
220 else
221     volume_device='vdc'
222 fi
223
224 cat << EOF >$tempest_conf_file
225
226 compute:
227     min_compute_nodes: 2
228     volume_device_name: ${volume_device}
229
230 EOF
231
232 echo "${tempest_conf_file}..."
233 cat ${tempest_conf_file}
234
235 cp_tempest_cmd="docker cp ${DOVETAIL_CONFIG}/tempest_conf.yaml $container_id:/home/opnfv/dovetail/dovetail/userconfig"
236 echo "exec command: ${cp_tempest_cmd}"
237 $cp_tempest_cmd
238
239 list_cmd="dovetail list ${TESTSUITE}"
240 run_cmd="dovetail run --testsuite ${TESTSUITE} -d"
241 echo "Container exec command: ${list_cmd}"
242 docker exec $container_id ${list_cmd}
243 echo "Container exec command: ${run_cmd}"
244 docker exec $container_id ${run_cmd}
245
246 sudo cp -r ${DOVETAIL_HOME}/results ./
247 # To make sure the file owner is the current user, for the copied results files in the above line
248 # if not, there will be error when next time to wipe workspace
249 # CURRENT_USER=${SUDO_USER:-$USER}
250 # PRIMARY_GROUP=$(id -gn $CURRENT_USER)
251 # sudo chown -R ${CURRENT_USER}:${PRIMARY_GROUP} ${WORKSPACE}/results
252
253 #remove useless workspace from yardstick to save disk space
254 sudo rm -rf ./results/workspace
255
256 echo "Dovetail: done!"
257