Remove all compass jobs
[releng.git] / jjb / dovetail / dovetail-run.sh
1 #!/bin/bash
2 ##############################################################################
3 # Copyright (c) 2017 Huawei Technologies Co.,Ltd and others.
4 # All rights reserved. This program and the accompanying materials
5 # are made available under the terms of the Apache License, Version 2.0
6 # which accompanies this distribution, and is available at
7 # http://www.apache.org/licenses/LICENSE-2.0
8 ##############################################################################
9
10 #the noun INSTALLER is used in community, here is just the example to run.
11 #multi-platforms are supported.
12
13 set -e
14 [[ $CI_DEBUG == true ]] && redirect="/dev/stdout" || redirect="/dev/null"
15
16 DEPLOY_TYPE=baremetal
17 [[ $BUILD_TAG =~ "virtual" ]] && DEPLOY_TYPE=virt
18
19 DOVETAIL_HOME=${WORKSPACE}/ovp
20 [ -d ${DOVETAIL_HOME} ] && sudo rm -rf ${DOVETAIL_HOME}
21
22 mkdir -p ${DOVETAIL_HOME}
23
24 DOVETAIL_CONFIG=${DOVETAIL_HOME}/pre_config
25 mkdir -p ${DOVETAIL_CONFIG}
26
27 DOVETAIL_IMAGES=${DOVETAIL_HOME}/images
28 mkdir -p ${DOVETAIL_IMAGES}
29
30 OPENRC=${DOVETAIL_CONFIG}/env_config.sh
31 CACERT=${DOVETAIL_CONFIG}/os_cacert
32 POD=${DOVETAIL_CONFIG}/pod.yaml
33
34 ssh_options="-o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no"
35
36 sshkey=""
37
38 TEST_DB_URL=http://testresults.opnfv.org/test/api/v1/results
39
40 check_file_exists() {
41     if [[ -f $1 ]]; then
42         echo 0
43     else
44         echo 1
45     fi
46 }
47
48 get_cred_file_with_scripts() {
49     echo "INFO: clone releng repo..."
50     releng_repo=${WORKSPACE}/releng
51     [ -d ${releng_repo} ] && sudo rm -rf ${releng_repo}
52     git clone https://gerrit.opnfv.org/gerrit/releng ${releng_repo} >/dev/null
53
54     echo "INFO: clone pharos repo..."
55     pharos_repo=${WORKSPACE}/pharos
56     [ -d ${pharos_repo} ] && sudo rm -rf ${pharos_repo}
57     git clone https://git.opnfv.org/pharos ${pharos_repo} >/dev/null
58
59     echo "INFO: SUT branch is $SUT_BRANCH"
60     echo "INFO: dovetail branch is $BRANCH"
61     BRANCH_BACKUP=$BRANCH
62     export BRANCH=$SUT_BRANCH
63     cmd="${releng_repo}/utils/fetch_os_creds.sh -d ${OPENRC} -i ${INSTALLER_TYPE} -a ${INSTALLER_IP} -o ${CACERT} >${redirect}"
64     echo "INFO: cmd is ${cmd}"
65     ${cmd}
66     export BRANCH=$BRANCH_BACKUP
67 }
68
69 get_apex_cred_file() {
70     instack_mac=$(sudo virsh domiflist undercloud | grep default | \
71                   grep -Eo "[0-9a-f]+:[0-9a-f]+:[0-9a-f]+:[0-9a-f]+:[0-9a-f]+:[0-9a-f]+")
72     INSTALLER_IP=$(/usr/sbin/arp -e | grep ${instack_mac} | awk {'print $1'})
73     sshkey="-v /root/.ssh/id_rsa:/root/.ssh/id_rsa"
74     if [[ -n $(sudo iptables -L FORWARD |grep "REJECT"|grep "reject-with icmp-port-unreachable") ]]; then
75         #note: this happens only in opnfv-lf-pod1
76         sudo iptables -D FORWARD -o virbr0 -j REJECT --reject-with icmp-port-unreachable
77         sudo iptables -D FORWARD -i virbr0 -j REJECT --reject-with icmp-port-unreachable
78     fi
79     get_cred_file_with_scripts
80 }
81
82 get_fuel_cred_file() {
83     get_cred_file_with_scripts
84 }
85
86 get_joid_cred_file() {
87     # If production lab then creds may be retrieved dynamically
88     # creds are on the jumphost, always in the same folder
89     sudo cp $LAB_CONFIG/admin-openrc $OPENRC
90 }
91
92 change_cred_file_cacert_path() {
93     if [[ ${INSTALLER_TYPE} == "apex" ]]; then
94         echo "INFO: apex doesn't need to set OS_CACERT."
95         return 0
96     fi
97     exists=`check_file_exists ${CACERT}`
98     if [[ $exists == 0 ]]; then
99         echo "INFO: set ${INSTALLER_TYPE} openstack cacert file to be ${CACERT}"
100         if [[ ${INSTALLER_TYPE} == "fuel" ]]; then
101             sed -i "s#/etc/ssl/certs/mcp_os_cacert#${CACERT}#g" ${OPENRC}
102         fi
103     else
104         echo "ERROR: cannot find file ${CACERT}. Please check if it exists."
105         sudo ls -al ${DOVETAIL_CONFIG}
106         exit 1
107     fi
108 }
109
110 change_cred_file_ext_net() {
111     exists=`check_file_exists ${OPENRC}`
112     if [[ $exists == 0 ]]; then
113         echo "export EXTERNAL_NETWORK=${EXTERNAL_NETWORK}" >> ${OPENRC}
114     else
115         echo "ERROR: cannot find file $OPENRC. Please check if it exists."
116         sudo ls -al ${DOVETAIL_CONFIG}
117         exit 1
118     fi
119 }
120
121 get_cred_file() {
122     if [[ ${INSTALLER_TYPE} == 'apex' ]]; then
123         get_apex_cred_file
124     elif [[ ${INSTALLER_TYPE} == 'fuel' ]]; then
125         get_fuel_cred_file
126     elif [[ ${INSTALLER_TYPE} == 'joid' ]]; then
127         get_joid_cred_file
128     fi
129
130     exists=`check_file_exists ${OPENRC}`
131     if [[ $exists == 0 ]]; then
132         echo "INFO: original openstack credentials file is:"
133         cat $OPENRC
134         echo "INFO: change cacert file path in credentials file"
135         change_cred_file_cacert_path
136         echo "INFO: set external network in credentials file"
137         change_cred_file_ext_net
138         echo "INFO: final openstack credentials file is:"
139         cat $OPENRC
140     else
141         echo "ERROR: cannot find file $OPENRC. Please check if it exists."
142         sudo ls -al ${DOVETAIL_CONFIG}
143         exit 1
144     fi
145 }
146
147 get_fuel_baremetal_pod_file() {
148     fuel_ctl_ssh_options="${ssh_options} -i ${SSH_KEY}"
149     ssh_user="ubuntu"
150     fuel_ctl_ip=$(ssh 2>/dev/null ${fuel_ctl_ssh_options} "${ssh_user}@${INSTALLER_IP}" \
151             "sudo salt 'cfg*' pillar.get _param:openstack_control_address --out text| \
152                 cut -f2 -d' '")
153     fuel_cmp_ip=$(ssh 2>/dev/null ${fuel_ctl_ssh_options} "${ssh_user}@${INSTALLER_IP}" \
154             "sudo salt 'cmp001*' pillar.get _param:openstack_control_address --out text| \
155                 cut -f2 -d' '")
156     fuel_dbs_ip=$(ssh 2>/dev/null ${fuel_ctl_ssh_options} "${ssh_user}@${INSTALLER_IP}" \
157             "sudo salt 'dbs01*' pillar.get _param:openstack_database_node01_address --out text| \
158                 cut -f2 -d' '")
159     fuel_msg_ip=$(ssh 2>/dev/null ${fuel_ctl_ssh_options} "${ssh_user}@${INSTALLER_IP}" \
160             "sudo salt 'msg01*' pillar.get _param:openstack_message_queue_node01_address --out text| \
161                 cut -f2 -d' '")
162     ipmi_index=$(ssh 2>/dev/null ${fuel_ctl_ssh_options} "${ssh_user}@${INSTALLER_IP}" \
163             "sudo salt 'ctl*' network.ip_addrs cidr=${fuel_ctl_ip} --out text | grep ${fuel_ctl_ip} | cut -c 5")
164
165     organization="$(cut -d'-' -f1 <<< "${NODE_NAME}")"
166     pod_name="$(cut -d'-' -f2 <<< "${NODE_NAME}")"
167     pdf_file=${pharos_repo}/labs/${organization}/${pod_name}.yaml
168     ipmiIp=$(cat ${pdf_file} | shyaml get-value nodes.$[ipmi_index-1].remote_management.address)
169     ipmiIp="$(cut -d'/' -f1 <<< "${ipmiIp}")"
170     ipmiPass=$(cat ${pdf_file} | shyaml get-value nodes.$[ipmi_index-1].remote_management.pass)
171     ipmiUser=$(cat ${pdf_file} | shyaml get-value nodes.$[ipmi_index-1].remote_management.user)
172     [[ $ipmiUser == ENC* ]] && ipmiUser=$(eyaml decrypt -s ${ipmiUser//[[:blank:]]/})
173     [[ $ipmiPass == ENC* ]] && ipmiPass=$(eyaml decrypt -s ${ipmiPass//[[:blank:]]/})
174
175     cat << EOF >${POD}
176 nodes:
177 - {ip: ${INSTALLER_IP}, name: node0, key_filename: ${DOVETAIL_CONFIG}/id_rsa,
178    role: Jumpserver, user: ${ssh_user}}
179 - {ip: ${fuel_ctl_ip}, name: node1, key_filename: ${DOVETAIL_CONFIG}/id_rsa,
180    role: controller, user: ${ssh_user}, ipmi_ip: ${ipmiIp}, ipmi_user: ${ipmiUser}, ipmi_password: ${ipmiPass}}
181 - {ip: ${fuel_msg_ip}, name: msg01, key_filename: ${DOVETAIL_CONFIG}/id_rsa, role: controller, user: ${ssh_user}}
182 - {ip: ${fuel_cmp_ip}, name: cmp01, key_filename: ${DOVETAIL_CONFIG}/id_rsa, role: controller, user: ${ssh_user}}
183 - {ip: ${fuel_dbs_ip}, name: dbs01, key_filename: ${DOVETAIL_CONFIG}/id_rsa, role: controller, user: ${ssh_user}}
184 EOF
185 }
186
187 get_pod_file_with_scripts() {
188     set +e
189     sudo pip install virtualenv
190
191     cd ${releng_repo}/modules
192     sudo virtualenv venv
193     source venv/bin/activate
194     sudo pip install -e ./ >/dev/null
195     sudo pip install netaddr
196
197     if [[ ${INSTALLER_TYPE} == fuel ]]; then
198         options="-u ubuntu -k /root/.ssh/id_rsa"
199     elif [[ ${INSTALLER_TYPE} == apex ]]; then
200         options="-u stack -k /root/.ssh/id_rsa"
201     elif [[ ${INSTALLER_TYPE} == daisy ]]; then
202         options="-u root -p r00tme"
203     else
204         echo "WARNING: Don't support to generate ${POD} on ${INSTALLER_TYPE} currently."
205         echo "WARNING: HA test cases may not run properly."
206     fi
207
208     cmd="sudo python ${releng_repo}/utils/create_pod_file.py -t ${INSTALLER_TYPE} \
209          -i ${INSTALLER_IP} ${options} -f ${POD} \
210          -s ${DOVETAIL_CONFIG}/id_rsa"
211     echo "INFO: cmd is ${cmd}"
212     ${cmd}
213
214     deactivate
215     set -e
216     cd ${WORKSPACE}
217 }
218
219 change_apex_pod_file_process_info() {
220     cat << EOF >> ${POD}
221 process_info:
222 - {testcase_name: yardstick.ha.rabbitmq, attack_process: rabbitmq_server}
223 - {testcase_name: yardstick.ha.cinder_api, attack_process: cinder_wsgi}
224 EOF
225 }
226
227 change_fuel_pod_file_process_info() {
228     cat << EOF >> ${POD}
229 process_info:
230 - {testcase_name: yardstick.ha.cinder_api, attack_process: cinder-wsgi}
231 - {testcase_name: yardstick.ha.rabbitmq, attack_process: rabbitmq_server, attack_host: msg01}
232 - {testcase_name: yardstick.ha.neutron_l3_agent, attack_process: neutron-l3-agent, attack_host: cmp01}
233 - {testcase_name: yardstick.ha.database, attack_process: mysqld, attack_host: dbs01}
234 EOF
235 }
236
237 change_pod_file_process_info() {
238     sudo chmod 666 ${POD}
239     echo "INFO: adapt process info for $INSTALLER_TYPE ..."
240     if [ "$INSTALLER_TYPE" == "apex" ]; then
241         change_apex_pod_file_process_info
242     elif [ "$INSTALLER_TYPE" == "fuel" ]; then
243         change_fuel_pod_file_process_info
244     fi
245 }
246
247 get_pod_file() {
248     # These packages are used for parsing yaml files and decrypting ipmi user and password.
249     sudo pip install shyaml
250     sudo yum install -y rubygems || sudo apt-get install -y ruby
251     sudo gem install hiera-eyaml
252     if [[ ${INSTALLER_TYPE} == 'fuel' && ${DEPLOY_TYPE} == 'baremetal' ]]; then
253         get_fuel_baremetal_pod_file
254     fi
255
256     exists=`check_file_exists ${POD}`
257     if [[ $exists == 1 ]]; then
258         get_pod_file_with_scripts
259     fi
260
261     exists=`check_file_exists ${POD}`
262     if [[ $exists == 0 ]]; then
263         change_pod_file_process_info
264     else
265         echo "ERROR: cannot find file ${POD}. Please check if it exists."
266         sudo ls -al ${DOVETAIL_CONFIG}
267         exit 1
268     fi
269
270     echo "INFO: file ${POD} is:"
271     cat ${POD}
272 }
273
274 get_cred_file
275 get_pod_file
276
277 if [ "$INSTALLER_TYPE" == "fuel" ]; then
278     if [[ "${SUT_BRANCH}" =~ "danube" ]]; then
279         echo "Fetching id_rsa file from jump_server $INSTALLER_IP..."
280         sshpass -p r00tme sudo scp $ssh_options root@${INSTALLER_IP}:~/.ssh/id_rsa ${DOVETAIL_CONFIG}/id_rsa
281     else
282         cp ${SSH_KEY} ${DOVETAIL_CONFIG}/id_rsa
283     fi
284 fi
285
286 if [ "$INSTALLER_TYPE" == "apex" ]; then
287     echo "Fetching id_rsa file from jump_server $INSTALLER_IP..."
288     sudo scp $ssh_options stack@${INSTALLER_IP}:~/.ssh/id_rsa ${DOVETAIL_CONFIG}/id_rsa
289 fi
290
291 if [ "$INSTALLER_TYPE" == "daisy" ]; then
292     echo "Fetching id_dsa file from jump_server $INSTALLER_IP..."
293     sshpass -p r00tme sudo scp $ssh_options root@${INSTALLER_IP}:~/.ssh/id_dsa ${DOVETAIL_CONFIG}/id_rsa
294 fi
295
296
297 image_path=${HOME}/opnfv/dovetail/images
298 if [[ ! -d ${image_path} ]]; then
299     mkdir -p ${image_path}
300 fi
301 # sdnvpn test case needs to download this image first before running
302 ubuntu_image=${image_path}/ubuntu-16.04-server-cloudimg-amd64-disk1.img
303 if [[ ! -f ${ubuntu_image} ]]; then
304     echo "Download image ubuntu-16.04-server-cloudimg-amd64-disk1.img ..."
305     wget -q -nc https://artifacts.opnfv.org/sdnvpn/ubuntu-16.04-server-cloudimg-amd64-disk1.img -P ${image_path}
306 fi
307 sudo cp ${ubuntu_image} ${DOVETAIL_IMAGES}
308
309 # yardstick and bottlenecks need to download this image first before running
310 cirros_image=${image_path}/cirros-0.3.5-x86_64-disk.img
311 if [[ ! -f ${cirros_image} ]]; then
312     echo "Download image cirros-0.3.5-x86_64-disk.img ..."
313     wget -q -nc http://download.cirros-cloud.net/0.3.5/cirros-0.3.5-x86_64-disk.img -P ${image_path}
314 fi
315 sudo cp ${cirros_image} ${DOVETAIL_IMAGES}
316
317 # functest needs to download this image first before running
318 cirros_image=${image_path}/cirros-0.4.0-x86_64-disk.img
319 if [[ ! -f ${cirros_image} ]]; then
320     echo "Download image cirros-0.4.0-x86_64-disk.img ..."
321     wget -q -nc http://download.cirros-cloud.net/0.4.0/cirros-0.4.0-x86_64-disk.img -P ${image_path}
322 fi
323 sudo cp ${cirros_image} ${DOVETAIL_IMAGES}
324
325 # snaps_smoke test case needs to download this image first before running
326 ubuntu14_image=${image_path}/ubuntu-14.04-server-cloudimg-amd64-disk1.img
327 if [[ ! -f ${ubuntu14_image} ]]; then
328     echo "Download image ubuntu-14.04-server-cloudimg-amd64-disk1.img ..."
329     wget -q -nc https://cloud-images.ubuntu.com/releases/14.04/release/ubuntu-14.04-server-cloudimg-amd64-disk1.img -P ${image_path}
330 fi
331 sudo cp ${ubuntu14_image} ${DOVETAIL_IMAGES}
332
333 # cloudify_ims test case needs to download these 2 images first before running
334 cloudify_image=${image_path}/cloudify-manager-premium-4.0.1.qcow2
335 if [[ ! -f ${cloudify_image} ]]; then
336     echo "Download image cloudify-manager-premium-4.0.1.qcow2 ..."
337     wget -q -nc http://repository.cloudifysource.org/cloudify/4.0.1/sp-release/cloudify-manager-premium-4.0.1.qcow2 -P ${image_path}
338 fi
339 sudo cp ${cloudify_image} ${DOVETAIL_IMAGES}
340 trusty_image=${image_path}/trusty-server-cloudimg-amd64-disk1.img
341 if [[ ! -f ${trusty_image} ]]; then
342     echo "Download image trusty-server-cloudimg-amd64-disk1.img ..."
343     wget -q -nc http://cloud-images.ubuntu.com/trusty/current/trusty-server-cloudimg-amd64-disk1.img -P ${image_path}
344 fi
345 sudo cp ${trusty_image} ${DOVETAIL_IMAGES}
346
347 opts="--privileged=true -id"
348
349 docker_volume="-v /var/run/docker.sock:/var/run/docker.sock"
350 dovetail_home_volume="-v ${DOVETAIL_HOME}:${DOVETAIL_HOME}"
351
352 # Pull the image with correct tag
353 DOCKER_REPO='opnfv/dovetail'
354 if [ "$(uname -m)" = 'aarch64' ]; then
355     DOCKER_REPO="${DOCKER_REPO}_$(uname -m)"
356     DOCKER_TAG="latest"
357 fi
358
359 echo "Dovetail: Pulling image ${DOCKER_REPO}:${DOCKER_TAG}"
360 docker pull ${DOCKER_REPO}:$DOCKER_TAG >$redirect
361
362 cmd="docker run ${opts} -e DOVETAIL_HOME=${DOVETAIL_HOME} -e INSTALLER_TYPE=${INSTALLER_TYPE} \
363      -e DEPLOY_SCENARIO=${DEPLOY_SCENARIO} -e NODE_NAME=${NODE_NAME} -e BUILD_TAG=${BUILD_TAG} \
364      -e TEST_DB_URL=${TEST_DB_URL} -e VERSION=${SUT_BRANCH} \
365      ${docker_volume} ${dovetail_home_volume} \
366      ${sshkey} ${DOCKER_REPO}:${DOCKER_TAG} /bin/bash"
367 echo "Dovetail: running docker run command: ${cmd}"
368 ${cmd} >${redirect}
369 sleep 5
370 container_id=$(docker ps | grep "${DOCKER_REPO}:${DOCKER_TAG}" | awk '{print $1}' | head -1)
371 echo "Container ID=${container_id}"
372 if [ -z ${container_id} ]; then
373     echo "Cannot find ${DOCKER_REPO} container ID ${container_id}. Please check if it exists."
374     docker ps -a
375     exit 1
376 fi
377 echo "Container Start: docker start ${container_id}"
378 docker start ${container_id}
379 sleep 5
380 docker ps >${redirect}
381 if [ $(docker ps | grep "${DOCKER_REPO}:${DOCKER_TAG}" | wc -l) == 0 ]; then
382     echo "The container ${DOCKER_REPO} with ID=${container_id} has not been properly started. Exiting..."
383     exit 1
384 fi
385
386 # Modify tempest_conf.yaml file
387 tempest_conf_file=${DOVETAIL_CONFIG}/tempest_conf.yaml
388 if [[ ${INSTALLER_TYPE} == 'apex' ]]; then
389     volume_device='vdb'
390 else
391     volume_device='vdc'
392 fi
393
394 cat << EOF >$tempest_conf_file
395
396 compute:
397     min_compute_nodes: 2
398     volume_device_name: ${volume_device}
399
400 EOF
401
402 echo "${tempest_conf_file}..."
403 cat ${tempest_conf_file}
404
405 cp_tempest_cmd="docker cp ${DOVETAIL_CONFIG}/tempest_conf.yaml $container_id:/home/opnfv/dovetail/dovetail/userconfig"
406 echo "exec command: ${cp_tempest_cmd}"
407 $cp_tempest_cmd
408
409 if [[ ${TESTSUITE} == 'default' ]]; then
410     testsuite=''
411 else
412     testsuite="--testsuite ${TESTSUITE}"
413 fi
414
415 if [[ ${TESTAREA} == 'mandatory' ]]; then
416     testarea='--mandatory'
417 elif [[ ${TESTAREA} == 'optional' ]]; then
418     testarea="--optional"
419 elif [[ ${TESTAREA} == 'all' ]]; then
420     testarea=""
421 else
422     testarea="--testarea ${TESTAREA}"
423 fi
424
425 run_cmd="dovetail run ${testsuite} ${testarea} --deploy-scenario ${DEPLOY_SCENARIO} -d -r --opnfv-ci"
426 echo "Container exec command: ${run_cmd}"
427 docker exec $container_id ${run_cmd}
428
429 sudo cp -r ${DOVETAIL_HOME}/results ./
430 result_package=$(find ${DOVETAIL_HOME} -name 'logs_*')
431 echo "Results package is ${result_package}"
432 for item in ${result_package};
433 do
434   sudo mv ${item} ./results
435 done
436
437 # To make sure the file owner is the current user, for the copied results files in the above line
438 echo "Change owner of result files ..."
439 CURRENT_USER=${SUDO_USER:-$USER}
440 PRIMARY_GROUP=$(id -gn $CURRENT_USER)
441 echo "Current user is ${CURRENT_USER}, group is ${PRIMARY_GROUP}"
442 sudo chown -R ${CURRENT_USER}:${PRIMARY_GROUP} ./results
443
444 #remove useless files to save disk space
445 sudo rm -rf ./results/workspace
446 sudo rm -f ./results/yardstick.img
447 sudo rm -f ./results/bottlenecks/tmp*
448
449 echo "Dovetail: done!"
450