Merge "[docs] Limit git submodule recurse to depth 1"
[releng.git] / jjb / dovetail / dovetail-run.sh
1 #!/bin/bash
2 ##############################################################################
3 # Copyright (c) 2017 Huawei Technologies Co.,Ltd and others.
4 # All rights reserved. This program and the accompanying materials
5 # are made available under the terms of the Apache License, Version 2.0
6 # which accompanies this distribution, and is available at
7 # http://www.apache.org/licenses/LICENSE-2.0
8 ##############################################################################
9
10 #the noun INSTALLER is used in community, here is just the example to run.
11 #multi-platforms are supported.
12
13 set -e
14 [[ $CI_DEBUG == true ]] && redirect="/dev/stdout" || redirect="/dev/null"
15
16 DEPLOY_TYPE=baremetal
17 [[ $BUILD_TAG =~ "virtual" ]] && DEPLOY_TYPE=virt
18
19 DOVETAIL_HOME=${WORKSPACE}/cvp
20 [ -d ${DOVETAIL_HOME} ] && sudo rm -rf ${DOVETAIL_HOME}
21
22 mkdir -p ${DOVETAIL_HOME}
23
24 DOVETAIL_CONFIG=${DOVETAIL_HOME}/pre_config
25 mkdir -p ${DOVETAIL_CONFIG}
26
27 DOVETAIL_IMAGES=${DOVETAIL_HOME}/images
28 mkdir -p ${DOVETAIL_IMAGES}
29
30 ssh_options="-o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no"
31
32 sshkey=""
33 # The path of openrc.sh is defined in fetch_os_creds.sh
34 OPENRC=${DOVETAIL_CONFIG}/env_config.sh
35 CACERT=${DOVETAIL_CONFIG}/os_cacert
36 if [[ ${INSTALLER_TYPE} == 'apex' ]]; then
37     instack_mac=$(sudo virsh domiflist undercloud | grep default | \
38                   grep -Eo "[0-9a-f]+:[0-9a-f]+:[0-9a-f]+:[0-9a-f]+:[0-9a-f]+:[0-9a-f]+")
39     INSTALLER_IP=$(/usr/sbin/arp -e | grep ${instack_mac} | awk {'print $1'})
40     sshkey="-v /root/.ssh/id_rsa:/root/.ssh/id_rsa"
41     if [[ -n $(sudo iptables -L FORWARD |grep "REJECT"|grep "reject-with icmp-port-unreachable") ]]; then
42         #note: this happens only in opnfv-lf-pod1
43         sudo iptables -D FORWARD -o virbr0 -j REJECT --reject-with icmp-port-unreachable
44         sudo iptables -D FORWARD -i virbr0 -j REJECT --reject-with icmp-port-unreachable
45     fi
46 elif [[ ${INSTALLER_TYPE} == 'joid' ]]; then
47     # If production lab then creds may be retrieved dynamically
48     # creds are on the jumphost, always in the same folder
49     sudo cp $LAB_CONFIG/admin-openrc $OPENRC
50     # If dev lab, credentials may not be the default ones, just provide a path to put them into docker
51     # replace the default one by the customized one provided by jenkins config
52 fi
53
54 releng_repo=${WORKSPACE}/releng
55 [ -d ${releng_repo} ] && sudo rm -rf ${releng_repo}
56 git clone https://gerrit.opnfv.org/gerrit/releng ${releng_repo} >/dev/null
57
58 pharos_repo=${WORKSPACE}/pharos
59 [ -d ${pharos_repo} ] && sudo rm -rf ${pharos_repo}
60 git clone https://git.opnfv.org/pharos ${pharos_repo} >/dev/null
61
62 if [[ ${INSTALLER_TYPE} != 'joid' ]]; then
63     echo "SUT branch is $SUT_BRANCH"
64     echo "dovetail branch is $BRANCH"
65     BRANCH_BACKUP=$BRANCH
66     export BRANCH=$SUT_BRANCH
67     ${releng_repo}/utils/fetch_os_creds.sh -d ${OPENRC} -i ${INSTALLER_TYPE} -a ${INSTALLER_IP} -o ${CACERT} >${redirect}
68     export BRANCH=$BRANCH_BACKUP
69 fi
70
71 if [[ -f $OPENRC ]]; then
72     echo "INFO: openstack credentials path is $OPENRC"
73     if [[ ! "${SUT_BRANCH}" =~ "danube" && ${INSTALLER_TYPE} == "compass" ]]; then
74         if [[ -f ${CACERT} ]]; then
75             echo "INFO: ${INSTALLER_TYPE} openstack cacert file is ${CACERT}"
76             echo "export OS_CACERT=${CACERT}" >> ${OPENRC}
77         else
78             echo "ERROR: Can't find ${INSTALLER_TYPE} openstack cacert file. Please check if it is existing."
79             sudo ls -al ${DOVETAIL_CONFIG}
80             exit 1
81         fi
82     fi
83     echo "export EXTERNAL_NETWORK=${EXTERNAL_NETWORK}" >> ${OPENRC}
84 else
85     echo "ERROR: cannot find file $OPENRC. Please check if it is existing."
86     sudo ls -al ${DOVETAIL_CONFIG}
87     exit 1
88 fi
89
90 if [[ ! "${SUT_BRANCH}" =~ "danube" && ${INSTALLER_TYPE} == "fuel" ]]; then
91     sed -i "s#/etc/ssl/certs/mcp_os_cacert#${CACERT}#g" ${OPENRC}
92 fi
93 cat $OPENRC
94
95 # These packages are used for parsing yaml files and decrypting ipmi user and password.
96 sudo pip install shyaml
97 sudo yum install -y rubygems || sudo apt-get install -y ruby
98 sudo gem install hiera-eyaml
99
100 if [[ ! "${SUT_BRANCH}" =~ "danube" && ${INSTALLER_TYPE} == "compass" ]]; then
101     compass_repo=${WORKSPACE}/compass4nfv/
102     git clone https://github.com/opnfv/compass4nfv.git ${compass_repo} >/dev/null
103     scenario_file=${compass_repo}/deploy/conf/hardware_environment/$NODE_NAME/os-nosdn-nofeature-ha.yml
104     ipmiIp=$(cat ${scenario_file} | shyaml get-value hosts.0.ipmiIp)
105     ipmiPass=$(cat ${scenario_file} | shyaml get-value hosts.0.ipmiPass)
106     ipmiUser=root
107     jumpserver_ip=$(ifconfig | grep -A 5 docker0 | grep "inet addr" | cut -d ':' -f 2 | cut -d ' ' -f 1)
108
109     cat << EOF >${DOVETAIL_CONFIG}/pod.yaml
110 nodes:
111 - {ip: ${jumpserver_ip}, name: node0, password: root, role: Jumpserver, user: root}
112 - {ip: 10.1.0.50, name: node1, password: root, role: controller, user: root,
113    ipmi_ip: ${ipmiIp}, ipmi_user: ${ipmiUser}, ipmi_password: ${ipmiPass}}
114 - {ip: 10.1.0.51, name: node2, password: root, role: controller, user: root}
115 - {ip: 10.1.0.52, name: node3, password: root, role: controller, user: root}
116 - {ip: 10.1.0.53, name: node4, password: root, role: compute, user: root}
117 - {ip: 10.1.0.54, name: node5, password: root, role: compute, user: root}
118
119 EOF
120 fi
121
122 if [[ ! "${SUT_BRANCH}" =~ "danube" && ${INSTALLER_TYPE} == 'fuel' && ${DEPLOY_TYPE} == 'baremetal' ]]; then
123     fuel_ctl_ssh_options="${ssh_options} -i ${SSH_KEY}"
124     ssh_user="ubuntu"
125     fuel_ctl_ip=$(ssh 2>/dev/null ${fuel_ctl_ssh_options} "${ssh_user}@${INSTALLER_IP}" \
126             "sudo salt 'cfg*' pillar.get _param:openstack_control_address --out text| \
127                 cut -f2 -d' '")
128     fuel_cmp_ip=$(ssh 2>/dev/null ${fuel_ctl_ssh_options} "${ssh_user}@${INSTALLER_IP}" \
129             "sudo salt 'cmp001*' pillar.get _param:openstack_control_address --out text| \
130                 cut -f2 -d' '")
131     fuel_dbs_ip=$(ssh 2>/dev/null ${fuel_ctl_ssh_options} "${ssh_user}@${INSTALLER_IP}" \
132             "sudo salt 'dbs01*' pillar.get _param:openstack_database_node01_address --out text| \
133                 cut -f2 -d' '")
134     fuel_msg_ip=$(ssh 2>/dev/null ${fuel_ctl_ssh_options} "${ssh_user}@${INSTALLER_IP}" \
135             "sudo salt 'msg01*' pillar.get _param:openstack_message_queue_node01_address --out text| \
136                 cut -f2 -d' '")
137     ipmi_index=$(ssh 2>/dev/null ${fuel_ctl_ssh_options} "${ssh_user}@${INSTALLER_IP}" \
138             "sudo salt 'ctl*' network.ip_addrs cidr=${fuel_ctl_ip} --out text | grep ${fuel_ctl_ip} | cut -c 5")
139
140     organization="$(cut -d'-' -f1 <<< "${NODE_NAME}")"
141     pod_name="$(cut -d'-' -f2 <<< "${NODE_NAME}")"
142     pdf_file=${pharos_repo}/labs/${organization}/${pod_name}.yaml
143     ipmiIp=$(cat ${pdf_file} | shyaml get-value nodes.$[ipmi_index-1].remote_management.address)
144     ipmiIp="$(cut -d'/' -f1 <<< "${ipmiIp}")"
145     ipmiPass=$(cat ${pdf_file} | shyaml get-value nodes.$[ipmi_index-1].remote_management.pass)
146     ipmiUser=$(cat ${pdf_file} | shyaml get-value nodes.$[ipmi_index-1].remote_management.user)
147     [[ $ipmiUser == ENC* ]] && ipmiUser=$(eyaml decrypt -s ${ipmiUser//[[:blank:]]/})
148     [[ $ipmiPass == ENC* ]] && ipmiPass=$(eyaml decrypt -s ${ipmiPass//[[:blank:]]/})
149
150     cat << EOF >${DOVETAIL_CONFIG}/pod.yaml
151 nodes:
152 - {ip: ${INSTALLER_IP}, name: node0, key_filename: /home/opnfv/userconfig/pre_config/id_rsa,
153    role: Jumpserver, user: ${ssh_user}}
154 - {ip: ${fuel_ctl_ip}, name: node1, key_filename: /home/opnfv/userconfig/pre_config/id_rsa,
155    role: controller, user: ${ssh_user}, ipmi_ip: ${ipmiIp}, ipmi_user: ${ipmiUser}, ipmi_password: ${ipmiPass}}
156 - {ip: ${fuel_msg_ip}, name: msg01, key_filename: /home/opnfv/userconfig/pre_config/id_rsa, role: controller, user: ${ssh_user}}
157 - {ip: ${fuel_cmp_ip}, name: cmp01, key_filename: /home/opnfv/userconfig/pre_config/id_rsa, role: controller, user: ${ssh_user}}
158 - {ip: ${fuel_dbs_ip}, name: dbs01, key_filename: /home/opnfv/userconfig/pre_config/id_rsa, role: controller, user: ${ssh_user}}
159 EOF
160 fi
161
162 if [[ ! -f ${DOVETAIL_CONFIG}/pod.yaml ]]; then
163     set +e
164
165     sudo pip install virtualenv
166
167     cd ${releng_repo}/modules
168     sudo virtualenv venv
169     source venv/bin/activate
170     sudo pip install -e ./ >/dev/null
171     sudo pip install netaddr
172
173     if [[ ${INSTALLER_TYPE} == compass ]]; then
174         options="-u root -p root"
175     elif [[ ${INSTALLER_TYPE} == fuel ]]; then
176         options="-u root -p r00tme"
177     elif [[ ${INSTALLER_TYPE} == apex ]]; then
178         options="-u stack -k /root/.ssh/id_rsa"
179     elif [[ ${INSTALLER_TYPE} == daisy ]]; then
180         options="-u root -p r00tme"
181     else
182         echo "Don't support to generate pod.yaml on ${INSTALLER_TYPE} currently."
183         echo "HA test cases may not run properly."
184     fi
185
186     cmd="sudo python ${releng_repo}/utils/create_pod_file.py -t ${INSTALLER_TYPE} \
187          -i ${INSTALLER_IP} ${options} -f ${DOVETAIL_CONFIG}/pod.yaml \
188          -s /home/opnfv/userconfig/pre_config/id_rsa"
189     echo ${cmd}
190     ${cmd}
191
192     deactivate
193
194     set -e
195
196     cd ${WORKSPACE}
197 fi
198
199 if [ -f ${DOVETAIL_CONFIG}/pod.yaml ]; then
200     sudo chmod 666 ${DOVETAIL_CONFIG}/pod.yaml
201     echo "Adapt process info for $INSTALLER_TYPE ..."
202     if [ "$INSTALLER_TYPE" == "apex" ]; then
203         cat << EOF >> ${DOVETAIL_CONFIG}/pod.yaml
204 process_info:
205 - {testcase_name: yardstick.ha.rabbitmq, attack_process: rabbitmq_server}
206 - {testcase_name: yardstick.ha.cinder_api, attack_process: cinder_wsgi}
207 EOF
208     elif [ "$INSTALLER_TYPE" == "fuel" ]; then
209         cat << EOF >> ${DOVETAIL_CONFIG}/pod.yaml
210 process_info:
211 - {testcase_name: yardstick.ha.cinder_api, attack_process: cinder-wsgi}
212 - {testcase_name: yardstick.ha.rabbitmq, attack_process: rabbitmq_server, attack_host: msg01}
213 - {testcase_name: yardstick.ha.neutron_l3_agent, attack_process: neutron-l3-agent, attack_host: cmp01}
214 - {testcase_name: yardstick.ha.database, attack_process: mysqld, attack_host: dbs01}
215 EOF
216     elif [ "$INSTALLER_TYPE" == "compass" ]; then
217         cat << EOF >> ${DOVETAIL_CONFIG}/pod.yaml
218 process_info:
219 - {testcase_name: yardstick.ha.rabbitmq, attack_process: rabbitmq}
220 EOF
221     fi
222
223     echo "file ${DOVETAIL_CONFIG}/pod.yaml:"
224     cat ${DOVETAIL_CONFIG}/pod.yaml
225 else
226     echo "Error: cannot find file ${DOVETAIL_CONFIG}/pod.yaml. Please check if it is existing."
227     sudo ls -al ${DOVETAIL_CONFIG}
228     echo "HA test cases may not run properly."
229 fi
230
231 if [ "$INSTALLER_TYPE" == "fuel" ]; then
232     if [[ "${SUT_BRANCH}" =~ "danube" ]]; then
233         echo "Fetching id_rsa file from jump_server $INSTALLER_IP..."
234         sshpass -p r00tme sudo scp $ssh_options root@${INSTALLER_IP}:~/.ssh/id_rsa ${DOVETAIL_CONFIG}/id_rsa
235     else
236         cp ${SSH_KEY} ${DOVETAIL_CONFIG}/id_rsa
237     fi
238 fi
239
240 if [ "$INSTALLER_TYPE" == "apex" ]; then
241     echo "Fetching id_rsa file from jump_server $INSTALLER_IP..."
242     sudo scp $ssh_options stack@${INSTALLER_IP}:~/.ssh/id_rsa ${DOVETAIL_CONFIG}/id_rsa
243 fi
244
245 if [ "$INSTALLER_TYPE" == "daisy" ]; then
246     echo "Fetching id_dsa file from jump_server $INSTALLER_IP..."
247     sshpass -p r00tme sudo scp $ssh_options root@${INSTALLER_IP}:~/.ssh/id_dsa ${DOVETAIL_CONFIG}/id_rsa
248 fi
249
250
251 image_path=${HOME}/opnfv/dovetail/images
252 if [[ ! -d ${image_path} ]]; then
253     mkdir -p ${image_path}
254 fi
255 # sdnvpn test case needs to download this image first before running
256 ubuntu_image=${image_path}/ubuntu-16.04-server-cloudimg-amd64-disk1.img
257 if [[ ! -f ${ubuntu_image} ]]; then
258     echo "Download image ubuntu-16.04-server-cloudimg-amd64-disk1.img ..."
259     wget -q -nc https://artifacts.opnfv.org/sdnvpn/ubuntu-16.04-server-cloudimg-amd64-disk1.img -P ${image_path}
260 fi
261 sudo cp ${ubuntu_image} ${DOVETAIL_IMAGES}
262
263 # yardstick and bottlenecks need to download this image first before running
264 cirros_image=${image_path}/cirros-0.3.5-x86_64-disk.img
265 if [[ ! -f ${cirros_image} ]]; then
266     echo "Download image cirros-0.3.5-x86_64-disk.img ..."
267     wget -q -nc http://download.cirros-cloud.net/0.3.5/cirros-0.3.5-x86_64-disk.img -P ${image_path}
268 fi
269 sudo cp ${cirros_image} ${DOVETAIL_IMAGES}
270
271 # functest needs to download this image first before running
272 cirros_image=${image_path}/cirros-0.4.0-x86_64-disk.img
273 if [[ ! -f ${cirros_image} ]]; then
274     echo "Download image cirros-0.4.0-x86_64-disk.img ..."
275     wget -q -nc http://download.cirros-cloud.net/0.4.0/cirros-0.4.0-x86_64-disk.img -P ${image_path}
276 fi
277 sudo cp ${cirros_image} ${DOVETAIL_IMAGES}
278
279 # snaps_smoke test case needs to download this image first before running
280 ubuntu14_image=${image_path}/ubuntu-14.04-server-cloudimg-amd64-disk1.img
281 if [[ ! -f ${ubuntu14_image} ]]; then
282     echo "Download image ubuntu-14.04-server-cloudimg-amd64-disk1.img ..."
283     wget -q -nc https://cloud-images.ubuntu.com/releases/14.04/release/ubuntu-14.04-server-cloudimg-amd64-disk1.img -P ${image_path}
284 fi
285 sudo cp ${ubuntu14_image} ${DOVETAIL_IMAGES}
286
287 # cloudify_ims test case needs to download these 2 images first before running
288 cloudify_image=${image_path}/cloudify-manager-premium-4.0.1.qcow2
289 if [[ ! -f ${cloudify_image} ]]; then
290     echo "Download image cloudify-manager-premium-4.0.1.qcow2 ..."
291     wget -q -nc http://repository.cloudifysource.org/cloudify/4.0.1/sp-release/cloudify-manager-premium-4.0.1.qcow2 -P ${image_path}
292 fi
293 sudo cp ${cloudify_image} ${DOVETAIL_IMAGES}
294 trusty_image=${image_path}/trusty-server-cloudimg-amd64-disk1.img
295 if [[ ! -f ${trusty_image} ]]; then
296     echo "Download image trusty-server-cloudimg-amd64-disk1.img ..."
297     wget -q -nc http://cloud-images.ubuntu.com/trusty/current/trusty-server-cloudimg-amd64-disk1.img -P ${image_path}
298 fi
299 sudo cp ${trusty_image} ${DOVETAIL_IMAGES}
300
301 opts="--privileged=true -id"
302
303 docker_volume="-v /var/run/docker.sock:/var/run/docker.sock"
304 dovetail_home_volume="-v ${DOVETAIL_HOME}:${DOVETAIL_HOME}"
305
306 # Pull the image with correct tag
307 DOCKER_REPO='opnfv/dovetail'
308 if [ "$(uname -m)" = 'aarch64' ]; then
309     DOCKER_REPO="${DOCKER_REPO}_$(uname -m)"
310     DOCKER_TAG="latest"
311 fi
312
313 echo "Dovetail: Pulling image ${DOCKER_REPO}:${DOCKER_TAG}"
314 docker pull ${DOCKER_REPO}:$DOCKER_TAG >$redirect
315
316 cmd="docker run ${opts} -e DOVETAIL_HOME=${DOVETAIL_HOME} ${docker_volume} ${dovetail_home_volume} \
317      ${sshkey} ${DOCKER_REPO}:${DOCKER_TAG} /bin/bash"
318 echo "Dovetail: running docker run command: ${cmd}"
319 ${cmd} >${redirect}
320 sleep 5
321 container_id=$(docker ps | grep "${DOCKER_REPO}:${DOCKER_TAG}" | awk '{print $1}' | head -1)
322 echo "Container ID=${container_id}"
323 if [ -z ${container_id} ]; then
324     echo "Cannot find ${DOCKER_REPO} container ID ${container_id}. Please check if it is existing."
325     docker ps -a
326     exit 1
327 fi
328 echo "Container Start: docker start ${container_id}"
329 docker start ${container_id}
330 sleep 5
331 docker ps >${redirect}
332 if [ $(docker ps | grep "${DOCKER_REPO}:${DOCKER_TAG}" | wc -l) == 0 ]; then
333     echo "The container ${DOCKER_REPO} with ID=${container_id} has not been properly started. Exiting..."
334     exit 1
335 fi
336
337 # Modify tempest_conf.yaml file
338 tempest_conf_file=${DOVETAIL_CONFIG}/tempest_conf.yaml
339 if [[ ${INSTALLER_TYPE} == 'compass' || ${INSTALLER_TYPE} == 'apex' ]]; then
340     volume_device='vdb'
341 else
342     volume_device='vdc'
343 fi
344
345 cat << EOF >$tempest_conf_file
346
347 compute:
348     min_compute_nodes: 2
349     volume_device_name: ${volume_device}
350
351 EOF
352
353 echo "${tempest_conf_file}..."
354 cat ${tempest_conf_file}
355
356 cp_tempest_cmd="docker cp ${DOVETAIL_CONFIG}/tempest_conf.yaml $container_id:/home/opnfv/dovetail/dovetail/userconfig"
357 echo "exec command: ${cp_tempest_cmd}"
358 $cp_tempest_cmd
359
360 if [[ ${TESTSUITE} == 'default' ]]; then
361     testsuite=''
362 else
363     testsuite="--testsuite ${TESTSUITE}"
364 fi
365
366 if [[ ${TESTAREA} == 'mandatory' ]]; then
367     testarea='--mandatory'
368 elif [[ ${TESTAREA} == 'optional' ]]; then
369     testarea="--optional"
370 elif [[ ${TESTAREA} == 'all' ]]; then
371     testarea=""
372 else
373     testarea="--testarea ${TESTAREA}"
374 fi
375
376 run_cmd="dovetail run ${testsuite} ${testarea} --deploy-scenario ${DEPLOY_SCENARIO} -d -r"
377 echo "Container exec command: ${run_cmd}"
378 docker exec $container_id ${run_cmd}
379
380 sudo cp -r ${DOVETAIL_HOME}/results ./
381 result_package=$(find ${DOVETAIL_HOME} -name 'logs_*')
382 echo "Results package is ${result_package}"
383 for item in ${result_package};
384 do
385   sudo mv ${item} ./results
386 done
387
388 # To make sure the file owner is the current user, for the copied results files in the above line
389 echo "Change owner of result files ..."
390 CURRENT_USER=${SUDO_USER:-$USER}
391 PRIMARY_GROUP=$(id -gn $CURRENT_USER)
392 echo "Current user is ${CURRENT_USER}, group is ${PRIMARY_GROUP}"
393 sudo chown -R ${CURRENT_USER}:${PRIMARY_GROUP} ./results
394
395 #remove useless files to save disk space
396 sudo rm -rf ./results/workspace
397 sudo rm -f ./results/yardstick.img
398 sudo rm -f ./results/bottlenecks/tmp*
399
400 echo "Dovetail: done!"
401