Merge "change rabbitmq-server to rabbitmq for all installers"
[releng.git] / jjb / dovetail / dovetail-run.sh
1 #!/bin/bash
2 ##############################################################################
3 # Copyright (c) 2017 Huawei Technologies Co.,Ltd and others.
4 # All rights reserved. This program and the accompanying materials
5 # are made available under the terms of the Apache License, Version 2.0
6 # which accompanies this distribution, and is available at
7 # http://www.apache.org/licenses/LICENSE-2.0
8 ##############################################################################
9
10 #the noun INSTALLER is used in community, here is just the example to run.
11 #multi-platforms are supported.
12
13 set -e
14 [[ $CI_DEBUG == true ]] && redirect="/dev/stdout" || redirect="/dev/null"
15
16 DEPLOY_TYPE=baremetal
17 [[ $BUILD_TAG =~ "virtual" ]] && DEPLOY_TYPE=virt
18
19 DOVETAIL_HOME=${WORKSPACE}/cvp
20 [ -d ${DOVETAIL_HOME} ] && sudo rm -rf ${DOVETAIL_HOME}
21
22 mkdir -p ${DOVETAIL_HOME}
23
24 DOVETAIL_CONFIG=${DOVETAIL_HOME}/pre_config
25 mkdir -p ${DOVETAIL_CONFIG}
26
27 DOVETAIL_IMAGES=${DOVETAIL_HOME}/images
28 mkdir -p ${DOVETAIL_IMAGES}
29
30 ssh_options="-o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no"
31
32 sshkey=""
33 # The path of openrc.sh is defined in fetch_os_creds.sh
34 OPENRC=${DOVETAIL_CONFIG}/env_config.sh
35 CACERT=${DOVETAIL_CONFIG}/os_cacert
36 if [[ ${INSTALLER_TYPE} == 'apex' ]]; then
37     instack_mac=$(sudo virsh domiflist undercloud | grep default | \
38                   grep -Eo "[0-9a-f]+:[0-9a-f]+:[0-9a-f]+:[0-9a-f]+:[0-9a-f]+:[0-9a-f]+")
39     INSTALLER_IP=$(/usr/sbin/arp -e | grep ${instack_mac} | awk {'print $1'})
40     sshkey="-v /root/.ssh/id_rsa:/root/.ssh/id_rsa"
41     if [[ -n $(sudo iptables -L FORWARD |grep "REJECT"|grep "reject-with icmp-port-unreachable") ]]; then
42         #note: this happens only in opnfv-lf-pod1
43         sudo iptables -D FORWARD -o virbr0 -j REJECT --reject-with icmp-port-unreachable
44         sudo iptables -D FORWARD -i virbr0 -j REJECT --reject-with icmp-port-unreachable
45     fi
46 elif [[ ${INSTALLER_TYPE} == 'joid' ]]; then
47     # If production lab then creds may be retrieved dynamically
48     # creds are on the jumphost, always in the same folder
49     sudo cp $LAB_CONFIG/admin-openrc $OPENRC
50     # If dev lab, credentials may not be the default ones, just provide a path to put them into docker
51     # replace the default one by the customized one provided by jenkins config
52 fi
53
54 # Set iptables rule to allow forwarding return traffic for container
55 if ! sudo iptables -C FORWARD -j RETURN 2> ${redirect} || ! sudo iptables -L FORWARD | awk 'NR==3' | grep RETURN 2> ${redirect}; then
56     sudo iptables -I FORWARD -j RETURN
57 fi
58
59 releng_repo=${WORKSPACE}/releng
60 [ -d ${releng_repo} ] && sudo rm -rf ${releng_repo}
61 git clone https://gerrit.opnfv.org/gerrit/releng ${releng_repo} >/dev/null
62
63 if [[ ${INSTALLER_TYPE} != 'joid' ]]; then
64     echo "SUT branch is $SUT_BRANCH"
65     echo "dovetail branch is $BRANCH"
66     BRANCH_BACKUP=$BRANCH
67     export BRANCH=$SUT_BRANCH
68     ${releng_repo}/utils/fetch_os_creds.sh -d ${OPENRC} -i ${INSTALLER_TYPE} -a ${INSTALLER_IP} -o ${CACERT} >${redirect}
69     export BRANCH=$BRANCH_BACKUP
70 fi
71
72 if [[ -f $OPENRC ]]; then
73     echo "INFO: openstack credentials path is $OPENRC"
74     if [[ ! "${SUT_BRANCH}" =~ "danube" && ${INSTALLER_TYPE} == "compass" ]]; then
75         if [[ -f ${CACERT} ]]; then
76             echo "INFO: ${INSTALLER_TYPE} openstack cacert file is ${CACERT}"
77             echo "export OS_CACERT=${CACERT}" >> ${OPENRC}
78         else
79             echo "ERROR: Can't find ${INSTALLER_TYPE} openstack cacert file. Please check if it is existing."
80             sudo ls -al ${DOVETAIL_CONFIG}
81             exit 1
82         fi
83     fi
84 else
85     echo "ERROR: cannot find file $OPENRC. Please check if it is existing."
86     sudo ls -al ${DOVETAIL_CONFIG}
87     exit 1
88 fi
89
90 if [[ ! "${SUT_BRANCH}" =~ "danube" && ${INSTALLER_TYPE} == "fuel" ]]; then
91     sed -i "s#/etc/ssl/certs/mcp_os_cacert#${CACERT}#g" ${OPENRC}
92 fi
93 cat $OPENRC
94
95 if [[ ! "${SUT_BRANCH}" =~ "danube" && ${INSTALLER_TYPE} == "compass" ]]; then
96     cat << EOF >${DOVETAIL_CONFIG}/pod.yaml
97 nodes:
98 - {ip: 10.1.0.52, name: node1, password: root, role: controller, user: root}
99 - {ip: 10.1.0.51, name: node2, password: root, role: controller, user: root}
100 - {ip: 10.1.0.50, name: node3, password: root, role: controller, user: root}
101 - {ip: 10.1.0.54, name: node4, password: root, role: compute, user: root}
102 - {ip: 10.1.0.53, name: node5, password: root, role: compute, user: root}
103
104 EOF
105 fi
106
107 if [[ ! "${SUT_BRANCH}" =~ "danube" && ${INSTALLER_TYPE} == 'fuel' && ${DEPLOY_TYPE} == 'baremetal' ]]; then
108     fuel_ctl_ssh_options="${ssh_options} -i ${SSH_KEY}"
109     ssh_user="ubuntu"
110     fuel_ctl_ip=$(ssh 2>/dev/null ${fuel_ctl_ssh_options} "${ssh_user}@${INSTALLER_IP}" \
111             "sudo salt --out yaml 'ctl*' pillar.get _param:openstack_control_address | \
112                 awk '{print \$2; exit}'") &> /dev/null
113     cat << EOF >${DOVETAIL_CONFIG}/pod.yaml
114 nodes:
115 - {ip: ${fuel_ctl_ip}, name: node1, key_filename: /root/.ssh/id_rsa, role: controller, user: ${ssh_user}}
116
117 EOF
118 fi
119
120 if [[ ! -f ${DOVETAIL_CONFIG}/pod.yaml ]]; then
121     set +e
122
123     sudo pip install virtualenv
124
125     cd ${releng_repo}/modules
126     sudo virtualenv venv
127     source venv/bin/activate
128     sudo pip install -e ./ >/dev/null
129     sudo pip install netaddr
130
131     if [[ ${INSTALLER_TYPE} == compass ]]; then
132         options="-u root -p root"
133     elif [[ ${INSTALLER_TYPE} == fuel ]]; then
134         options="-u root -p r00tme"
135     elif [[ ${INSTALLER_TYPE} == apex ]]; then
136         options="-u stack -k /root/.ssh/id_rsa"
137     elif [[ ${INSTALLER_TYPE} == daisy ]]; then
138         options="-u root -p r00tme"
139     else
140         echo "Don't support to generate pod.yaml on ${INSTALLER_TYPE} currently."
141         echo "HA test cases may not run properly."
142     fi
143
144     cmd="sudo python ${releng_repo}/utils/create_pod_file.py -t ${INSTALLER_TYPE} \
145          -i ${INSTALLER_IP} ${options} -f ${DOVETAIL_CONFIG}/pod.yaml"
146     echo ${cmd}
147     ${cmd}
148
149     deactivate
150
151     set -e
152
153     cd ${WORKSPACE}
154 fi
155
156 if [ -f ${DOVETAIL_CONFIG}/pod.yaml ]; then
157     echo "Adapt process info for $INSTALLER_TYPE ..."
158     attack_process='rabbitmq'
159     cat << EOF >> ${DOVETAIL_CONFIG}/pod.yaml
160 process_info:
161 - {testcase_name: dovetail.ha.tc010, attack_process: ${attack_process}}
162
163 EOF
164     echo "file ${DOVETAIL_CONFIG}/pod.yaml:"
165     cat ${DOVETAIL_CONFIG}/pod.yaml
166 else
167     echo "Error: cannot find file ${DOVETAIL_CONFIG}/pod.yaml. Please check if it is existing."
168     sudo ls -al ${DOVETAIL_CONFIG}
169     echo "HA test cases may not run properly."
170 fi
171
172 if [ "$INSTALLER_TYPE" == "fuel" ]; then
173     if [[ "${SUT_BRANCH}" =~ "danube" ]]; then
174         echo "Fetching id_rsa file from jump_server $INSTALLER_IP..."
175         sshpass -p r00tme sudo scp $ssh_options root@${INSTALLER_IP}:~/.ssh/id_rsa ${DOVETAIL_CONFIG}/id_rsa
176     else
177         cp ${SSH_KEY} ${DOVETAIL_CONFIG}/id_rsa
178     fi
179 fi
180
181 if [ "$INSTALLER_TYPE" == "apex" ]; then
182     echo "Fetching id_rsa file from jump_server $INSTALLER_IP..."
183     sudo scp $ssh_options stack@${INSTALLER_IP}:~/.ssh/id_rsa ${DOVETAIL_CONFIG}/id_rsa
184 fi
185
186 if [ "$INSTALLER_TYPE" == "daisy" ]; then
187     echo "Fetching id_dsa file from jump_server $INSTALLER_IP..."
188     sshpass -p r00tme sudo scp $ssh_options root@${INSTALLER_IP}:~/.ssh/id_dsa ${DOVETAIL_CONFIG}/id_rsa
189 fi
190
191
192 image_path=${HOME}/opnfv/dovetail/images
193 if [[ ! -d ${image_path} ]]; then
194     mkdir -p ${image_path}
195 fi
196 # sdnvpn test case needs to download this image first before running
197 ubuntu_image=${image_path}/ubuntu-16.04-server-cloudimg-amd64-disk1.img
198 if [[ ! -f ${ubuntu_image} ]]; then
199     echo "Download image ubuntu-16.04-server-cloudimg-amd64-disk1.img ..."
200     wget -q -nc http://artifacts.opnfv.org/sdnvpn/ubuntu-16.04-server-cloudimg-amd64-disk1.img -P ${image_path}
201 fi
202 sudo cp ${ubuntu_image} ${DOVETAIL_IMAGES}
203
204 # functest needs to download this image first before running
205 cirros_image=${image_path}/cirros-0.3.5-x86_64-disk.img
206 if [[ ! -f ${cirros_image} ]]; then
207     echo "Download image cirros-0.3.5-x86_64-disk.img ..."
208     wget -q -nc http://download.cirros-cloud.net/0.3.5/cirros-0.3.5-x86_64-disk.img -P ${image_path}
209 fi
210 sudo cp ${cirros_image} ${DOVETAIL_IMAGES}
211
212 # snaps_smoke test case needs to download this image first before running
213 ubuntu14_image=${image_path}/ubuntu-14.04-server-cloudimg-amd64-disk1.img
214 if [[ ! -f ${ubuntu14_image} ]]; then
215     echo "Download image ubuntu-14.04-server-cloudimg-amd64-disk1.img ..."
216     wget -q -nc https://cloud-images.ubuntu.com/releases/14.04/release/ubuntu-14.04-server-cloudimg-amd64-disk1.img -P ${image_path}
217 fi
218 sudo cp ${ubuntu14_image} ${DOVETAIL_IMAGES}
219
220 # cloudify_ims test case needs to download these 2 images first before running
221 cloudify_image=${image_path}/cloudify-manager-premium-4.0.1.qcow2
222 if [[ ! -f ${cloudify_image} ]]; then
223     echo "Download image cloudify-manager-premium-4.0.1.qcow2 ..."
224     wget -q -nc http://repository.cloudifysource.org/cloudify/4.0.1/sp-release/cloudify-manager-premium-4.0.1.qcow2 -P ${image_path}
225 fi
226 sudo cp ${cloudify_image} ${DOVETAIL_IMAGES}
227 trusty_image=${image_path}/trusty-server-cloudimg-amd64-disk1.img
228 if [[ ! -f ${trusty_image} ]]; then
229     echo "Download image trusty-server-cloudimg-amd64-disk1.img ..."
230     wget -q -nc http://cloud-images.ubuntu.com/trusty/current/trusty-server-cloudimg-amd64-disk1.img -P ${image_path}
231 fi
232 sudo cp ${trusty_image} ${DOVETAIL_IMAGES}
233
234 opts="--privileged=true -id"
235
236 docker_volume="-v /var/run/docker.sock:/var/run/docker.sock"
237 dovetail_home_volume="-v ${DOVETAIL_HOME}:${DOVETAIL_HOME}"
238
239 # Pull the image with correct tag
240 DOCKER_REPO='opnfv/dovetail'
241 if [ "$(uname -m)" = 'aarch64' ]; then
242     DOCKER_REPO="${DOCKER_REPO}_$(uname -m)"
243     DOCKER_TAG="latest"
244 fi
245
246 echo "Dovetail: Pulling image ${DOCKER_REPO}:${DOCKER_TAG}"
247 docker pull ${DOCKER_REPO}:$DOCKER_TAG >$redirect
248
249 cmd="docker run ${opts} -e DOVETAIL_HOME=${DOVETAIL_HOME} ${docker_volume} ${dovetail_home_volume} \
250      ${sshkey} ${DOCKER_REPO}:${DOCKER_TAG} /bin/bash"
251 echo "Dovetail: running docker run command: ${cmd}"
252 ${cmd} >${redirect}
253 sleep 5
254 container_id=$(docker ps | grep "${DOCKER_REPO}:${DOCKER_TAG}" | awk '{print $1}' | head -1)
255 echo "Container ID=${container_id}"
256 if [ -z ${container_id} ]; then
257     echo "Cannot find ${DOCKER_REPO} container ID ${container_id}. Please check if it is existing."
258     docker ps -a
259     exit 1
260 fi
261 echo "Container Start: docker start ${container_id}"
262 docker start ${container_id}
263 sleep 5
264 docker ps >${redirect}
265 if [ $(docker ps | grep "${DOCKER_REPO}:${DOCKER_TAG}" | wc -l) == 0 ]; then
266     echo "The container ${DOCKER_REPO} with ID=${container_id} has not been properly started. Exiting..."
267     exit 1
268 fi
269
270 # Modify tempest_conf.yaml file
271 tempest_conf_file=${DOVETAIL_CONFIG}/tempest_conf.yaml
272 if [[ ${INSTALLER_TYPE} == 'compass' || ${INSTALLER_TYPE} == 'apex' ]]; then
273     volume_device='vdb'
274 else
275     volume_device='vdc'
276 fi
277
278 cat << EOF >$tempest_conf_file
279
280 compute:
281     min_compute_nodes: 2
282     volume_device_name: ${volume_device}
283
284 EOF
285
286 echo "${tempest_conf_file}..."
287 cat ${tempest_conf_file}
288
289 cp_tempest_cmd="docker cp ${DOVETAIL_CONFIG}/tempest_conf.yaml $container_id:/home/opnfv/dovetail/dovetail/userconfig"
290 echo "exec command: ${cp_tempest_cmd}"
291 $cp_tempest_cmd
292
293 if [[ ${TESTSUITE} == 'default' ]]; then
294     testsuite=''
295 else
296     testsuite="--testsuite ${TESTSUITE}"
297 fi
298
299 run_cmd="dovetail run ${testsuite} -d"
300 echo "Container exec command: ${run_cmd}"
301 docker exec $container_id ${run_cmd}
302
303 sudo cp -r ${DOVETAIL_HOME}/results ./
304 # To make sure the file owner is the current user, for the copied results files in the above line
305 # if not, there will be error when next time to wipe workspace
306 # CURRENT_USER=${SUDO_USER:-$USER}
307 # PRIMARY_GROUP=$(id -gn $CURRENT_USER)
308 # sudo chown -R ${CURRENT_USER}:${PRIMARY_GROUP} ${WORKSPACE}/results
309
310 #remove useless workspace from yardstick to save disk space
311 sudo rm -rf ./results/workspace
312
313 echo "Dovetail: done!"
314