Merge "Remove References to Securedlab"
[releng.git] / jjb / dovetail / dovetail-run.sh
1 #!/bin/bash
2 ##############################################################################
3 # Copyright (c) 2017 Huawei Technologies Co.,Ltd and others.
4 # All rights reserved. This program and the accompanying materials
5 # are made available under the terms of the Apache License, Version 2.0
6 # which accompanies this distribution, and is available at
7 # http://www.apache.org/licenses/LICENSE-2.0
8 ##############################################################################
9
10 #the noun INSTALLER is used in community, here is just the example to run.
11 #multi-platforms are supported.
12
13 set -e
14 [[ $CI_DEBUG == true ]] && redirect="/dev/stdout" || redirect="/dev/null"
15
16 DEPLOY_TYPE=baremetal
17 [[ $BUILD_TAG =~ "virtual" ]] && DEPLOY_TYPE=virt
18
19 DOVETAIL_HOME=${WORKSPACE}/ovp
20 [ -d ${DOVETAIL_HOME} ] && sudo rm -rf ${DOVETAIL_HOME}
21
22 mkdir -p ${DOVETAIL_HOME}
23
24 DOVETAIL_CONFIG=${DOVETAIL_HOME}/pre_config
25 mkdir -p ${DOVETAIL_CONFIG}
26
27 DOVETAIL_IMAGES=${DOVETAIL_HOME}/images
28 mkdir -p ${DOVETAIL_IMAGES}
29
30 OPENRC=${DOVETAIL_CONFIG}/env_config.sh
31 CACERT=${DOVETAIL_CONFIG}/os_cacert
32 POD=${DOVETAIL_CONFIG}/pod.yaml
33
34 ssh_options="-o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no"
35
36 sshkey=""
37
38 check_file_exists() {
39     if [[ -f $1 ]]; then
40         echo 0
41     else
42         echo 1
43     fi
44 }
45
46 get_cred_file_with_scripts() {
47     echo "INFO: clone releng repo..."
48     releng_repo=${WORKSPACE}/releng
49     [ -d ${releng_repo} ] && sudo rm -rf ${releng_repo}
50     git clone https://gerrit.opnfv.org/gerrit/releng ${releng_repo} >/dev/null
51
52     echo "INFO: clone pharos repo..."
53     pharos_repo=${WORKSPACE}/pharos
54     [ -d ${pharos_repo} ] && sudo rm -rf ${pharos_repo}
55     git clone https://git.opnfv.org/pharos ${pharos_repo} >/dev/null
56
57     echo "INFO: SUT branch is $SUT_BRANCH"
58     echo "INFO: dovetail branch is $BRANCH"
59     BRANCH_BACKUP=$BRANCH
60     export BRANCH=$SUT_BRANCH
61     cmd="${releng_repo}/utils/fetch_os_creds.sh -d ${OPENRC} -i ${INSTALLER_TYPE} -a ${INSTALLER_IP} -o ${CACERT} >${redirect}"
62     echo "INFO: cmd is ${cmd}"
63     ${cmd}
64     export BRANCH=$BRANCH_BACKUP
65 }
66
67 get_apex_cred_file() {
68     instack_mac=$(sudo virsh domiflist undercloud | grep default | \
69                   grep -Eo "[0-9a-f]+:[0-9a-f]+:[0-9a-f]+:[0-9a-f]+:[0-9a-f]+:[0-9a-f]+")
70     INSTALLER_IP=$(/usr/sbin/arp -e | grep ${instack_mac} | awk {'print $1'})
71     sshkey="-v /root/.ssh/id_rsa:/root/.ssh/id_rsa"
72     if [[ -n $(sudo iptables -L FORWARD |grep "REJECT"|grep "reject-with icmp-port-unreachable") ]]; then
73         #note: this happens only in opnfv-lf-pod1
74         sudo iptables -D FORWARD -o virbr0 -j REJECT --reject-with icmp-port-unreachable
75         sudo iptables -D FORWARD -i virbr0 -j REJECT --reject-with icmp-port-unreachable
76     fi
77     get_cred_file_with_scripts
78 }
79
80 get_compass_cred_file() {
81     get_cred_file_with_scripts
82 }
83
84 get_fuel_cred_file() {
85     get_cred_file_with_scripts
86 }
87
88 get_joid_cred_file() {
89     # If production lab then creds may be retrieved dynamically
90     # creds are on the jumphost, always in the same folder
91     sudo cp $LAB_CONFIG/admin-openrc $OPENRC
92 }
93
94 change_cred_file_cacert_path() {
95     if [[ ${INSTALLER_TYPE} == "apex" ]]; then
96         echo "INFO: apex doesn't need to set OS_CACERT."
97         return 0
98     fi
99     exists=`check_file_exists ${CACERT}`
100     if [[ $exists == 0 ]]; then
101         echo "INFO: set ${INSTALLER_TYPE} openstack cacert file to be ${CACERT}"
102         if [[ ${INSTALLER_TYPE} == "compass" ]]; then
103             echo "export OS_CACERT=${CACERT}" >> ${OPENRC}
104         elif [[ ${INSTALLER_TYPE} == "fuel" ]]; then
105             sed -i "s#/etc/ssl/certs/mcp_os_cacert#${CACERT}#g" ${OPENRC}
106         fi
107     else
108         echo "ERROR: cannot find file ${CACERT}. Please check if it exists."
109         sudo ls -al ${DOVETAIL_CONFIG}
110         exit 1
111     fi
112 }
113
114 change_cred_file_ext_net() {
115     exists=`check_file_exists ${OPENRC}`
116     if [[ $exists == 0 ]]; then
117         echo "export EXTERNAL_NETWORK=${EXTERNAL_NETWORK}" >> ${OPENRC}
118     else
119         echo "ERROR: cannot find file $OPENRC. Please check if it exists."
120         sudo ls -al ${DOVETAIL_CONFIG}
121         exit 1
122     fi
123 }
124
125 get_cred_file() {
126     if [[ ${INSTALLER_TYPE} == 'apex' ]]; then
127         get_apex_cred_file
128     elif [[ ${INSTALLER_TYPE} == 'compass' ]]; then
129         get_compass_cred_file
130     elif [[ ${INSTALLER_TYPE} == 'fuel' ]]; then
131         get_fuel_cred_file
132     elif [[ ${INSTALLER_TYPE} == 'joid' ]]; then
133         get_joid_cred_file
134     fi
135
136     exists=`check_file_exists ${OPENRC}`
137     if [[ $exists == 0 ]]; then
138         echo "INFO: original openstack credentials file is:"
139         cat $OPENRC
140         echo "INFO: change cacert file path in credentials file"
141         change_cred_file_cacert_path
142         echo "INFO: set external network in credentials file"
143         change_cred_file_ext_net
144         echo "INFO: final openstack credentials file is:"
145         cat $OPENRC
146     else
147         echo "ERROR: cannot find file $OPENRC. Please check if it exists."
148         sudo ls -al ${DOVETAIL_CONFIG}
149         exit 1
150     fi
151 }
152
153 get_compass_pod_file() {
154     compass_repo=${WORKSPACE}/compass4nfv/
155     echo "INFO: clone compass repo..."
156     git clone https://github.com/opnfv/compass4nfv.git ${compass_repo} >/dev/null
157     scenario_file=${compass_repo}/deploy/conf/hardware_environment/$NODE_NAME/os-nosdn-nofeature-ha.yml
158     ipmiIp=$(cat ${scenario_file} | shyaml get-value hosts.0.ipmiIp)
159     ipmiPass=$(cat ${scenario_file} | shyaml get-value hosts.0.ipmiPass)
160     ipmiUser=root
161     jumpserver_ip=$(ifconfig | grep -A 5 docker0 | grep "inet addr" | cut -d ':' -f 2 | cut -d ' ' -f 1)
162
163     cat << EOF >${POD}
164 nodes:
165 - {ip: ${jumpserver_ip}, name: node0, password: root, role: Jumpserver, user: root}
166 - {ip: 10.1.0.50, name: node1, password: root, role: controller, user: root,
167    ipmi_ip: ${ipmiIp}, ipmi_user: ${ipmiUser}, ipmi_password: ${ipmiPass}}
168 - {ip: 10.1.0.51, name: node2, password: root, role: controller, user: root}
169 - {ip: 10.1.0.52, name: node3, password: root, role: controller, user: root}
170 - {ip: 10.1.0.53, name: node4, password: root, role: compute, user: root}
171 - {ip: 10.1.0.54, name: node5, password: root, role: compute, user: root}
172
173 EOF
174
175 }
176
177 get_fuel_baremetal_pod_file() {
178     fuel_ctl_ssh_options="${ssh_options} -i ${SSH_KEY}"
179     ssh_user="ubuntu"
180     fuel_ctl_ip=$(ssh 2>/dev/null ${fuel_ctl_ssh_options} "${ssh_user}@${INSTALLER_IP}" \
181             "sudo salt 'cfg*' pillar.get _param:openstack_control_address --out text| \
182                 cut -f2 -d' '")
183     fuel_cmp_ip=$(ssh 2>/dev/null ${fuel_ctl_ssh_options} "${ssh_user}@${INSTALLER_IP}" \
184             "sudo salt 'cmp001*' pillar.get _param:openstack_control_address --out text| \
185                 cut -f2 -d' '")
186     fuel_dbs_ip=$(ssh 2>/dev/null ${fuel_ctl_ssh_options} "${ssh_user}@${INSTALLER_IP}" \
187             "sudo salt 'dbs01*' pillar.get _param:openstack_database_node01_address --out text| \
188                 cut -f2 -d' '")
189     fuel_msg_ip=$(ssh 2>/dev/null ${fuel_ctl_ssh_options} "${ssh_user}@${INSTALLER_IP}" \
190             "sudo salt 'msg01*' pillar.get _param:openstack_message_queue_node01_address --out text| \
191                 cut -f2 -d' '")
192     ipmi_index=$(ssh 2>/dev/null ${fuel_ctl_ssh_options} "${ssh_user}@${INSTALLER_IP}" \
193             "sudo salt 'ctl*' network.ip_addrs cidr=${fuel_ctl_ip} --out text | grep ${fuel_ctl_ip} | cut -c 5")
194
195     organization="$(cut -d'-' -f1 <<< "${NODE_NAME}")"
196     pod_name="$(cut -d'-' -f2 <<< "${NODE_NAME}")"
197     pdf_file=${pharos_repo}/labs/${organization}/${pod_name}.yaml
198     ipmiIp=$(cat ${pdf_file} | shyaml get-value nodes.$[ipmi_index-1].remote_management.address)
199     ipmiIp="$(cut -d'/' -f1 <<< "${ipmiIp}")"
200     ipmiPass=$(cat ${pdf_file} | shyaml get-value nodes.$[ipmi_index-1].remote_management.pass)
201     ipmiUser=$(cat ${pdf_file} | shyaml get-value nodes.$[ipmi_index-1].remote_management.user)
202     [[ $ipmiUser == ENC* ]] && ipmiUser=$(eyaml decrypt -s ${ipmiUser//[[:blank:]]/})
203     [[ $ipmiPass == ENC* ]] && ipmiPass=$(eyaml decrypt -s ${ipmiPass//[[:blank:]]/})
204
205     cat << EOF >${POD}
206 nodes:
207 - {ip: ${INSTALLER_IP}, name: node0, key_filename: /home/opnfv/userconfig/pre_config/id_rsa,
208    role: Jumpserver, user: ${ssh_user}}
209 - {ip: ${fuel_ctl_ip}, name: node1, key_filename: /home/opnfv/userconfig/pre_config/id_rsa,
210    role: controller, user: ${ssh_user}, ipmi_ip: ${ipmiIp}, ipmi_user: ${ipmiUser}, ipmi_password: ${ipmiPass}}
211 - {ip: ${fuel_msg_ip}, name: msg01, key_filename: /home/opnfv/userconfig/pre_config/id_rsa, role: controller, user: ${ssh_user}}
212 - {ip: ${fuel_cmp_ip}, name: cmp01, key_filename: /home/opnfv/userconfig/pre_config/id_rsa, role: controller, user: ${ssh_user}}
213 - {ip: ${fuel_dbs_ip}, name: dbs01, key_filename: /home/opnfv/userconfig/pre_config/id_rsa, role: controller, user: ${ssh_user}}
214 EOF
215 }
216
217 get_pod_file_with_scripts() {
218     set +e
219     sudo pip install virtualenv
220
221     cd ${releng_repo}/modules
222     sudo virtualenv venv
223     source venv/bin/activate
224     sudo pip install -e ./ >/dev/null
225     sudo pip install netaddr
226
227     if [[ ${INSTALLER_TYPE} == compass ]]; then
228         options="-u root -p root"
229     elif [[ ${INSTALLER_TYPE} == fuel ]]; then
230         options="-u ubuntu -k /root/.ssh/id_rsa"
231     elif [[ ${INSTALLER_TYPE} == apex ]]; then
232         options="-u stack -k /root/.ssh/id_rsa"
233     elif [[ ${INSTALLER_TYPE} == daisy ]]; then
234         options="-u root -p r00tme"
235     else
236         echo "WARNING: Don't support to generate ${POD} on ${INSTALLER_TYPE} currently."
237         echo "WARNING: HA test cases may not run properly."
238     fi
239
240     cmd="sudo python ${releng_repo}/utils/create_pod_file.py -t ${INSTALLER_TYPE} \
241          -i ${INSTALLER_IP} ${options} -f ${POD} \
242          -s /home/opnfv/userconfig/pre_config/id_rsa"
243     echo "INFO: cmd is ${cmd}"
244     ${cmd}
245
246     deactivate
247     set -e
248     cd ${WORKSPACE}
249 }
250
251 change_apex_pod_file_process_info() {
252     cat << EOF >> ${POD}
253 process_info:
254 - {testcase_name: yardstick.ha.rabbitmq, attack_process: rabbitmq_server}
255 - {testcase_name: yardstick.ha.cinder_api, attack_process: cinder_wsgi}
256 EOF
257 }
258
259 change_fuel_pod_file_process_info() {
260     cat << EOF >> ${POD}
261 process_info:
262 - {testcase_name: yardstick.ha.cinder_api, attack_process: cinder-wsgi}
263 - {testcase_name: yardstick.ha.rabbitmq, attack_process: rabbitmq_server, attack_host: msg01}
264 - {testcase_name: yardstick.ha.neutron_l3_agent, attack_process: neutron-l3-agent, attack_host: cmp01}
265 - {testcase_name: yardstick.ha.database, attack_process: mysqld, attack_host: dbs01}
266 EOF
267 }
268
269 change_compass_pod_file_process_info() {
270     cat << EOF >> ${POD}
271 process_info:
272 - {testcase_name: yardstick.ha.rabbitmq, attack_process: rabbitmq}
273 EOF
274 }
275
276 change_pod_file_process_info() {
277     sudo chmod 666 ${POD}
278     echo "INFO: adapt process info for $INSTALLER_TYPE ..."
279     if [ "$INSTALLER_TYPE" == "apex" ]; then
280         change_apex_pod_file_process_info
281     elif [ "$INSTALLER_TYPE" == "fuel" ]; then
282         change_fuel_pod_file_process_info
283     elif [ "$INSTALLER_TYPE" == "compass" ]; then
284         change_compass_pod_file_process_info
285     fi
286 }
287
288 get_pod_file() {
289     # These packages are used for parsing yaml files and decrypting ipmi user and password.
290     sudo pip install shyaml
291     sudo yum install -y rubygems || sudo apt-get install -y ruby
292     sudo gem install hiera-eyaml
293     if [[ ${INSTALLER_TYPE} == 'compass' ]]; then
294         get_compass_pod_file
295     elif [[ ${INSTALLER_TYPE} == 'fuel' && ${DEPLOY_TYPE} == 'baremetal' ]]; then
296         get_fuel_baremetal_pod_file
297     fi
298
299     exists=`check_file_exists ${POD}`
300     if [[ $exists == 1 ]]; then
301         get_pod_file_with_scripts
302     fi
303
304     exists=`check_file_exists ${POD}`
305     if [[ $exists == 0 ]]; then
306         change_pod_file_process_info
307     else
308         echo "ERROR: cannot find file ${POD}. Please check if it exists."
309         sudo ls -al ${DOVETAIL_CONFIG}
310         exit 1
311     fi
312
313     echo "INFO: file ${POD} is:"
314     cat ${POD}
315 }
316
317 get_cred_file
318 get_pod_file
319
320 if [ "$INSTALLER_TYPE" == "fuel" ]; then
321     if [[ "${SUT_BRANCH}" =~ "danube" ]]; then
322         echo "Fetching id_rsa file from jump_server $INSTALLER_IP..."
323         sshpass -p r00tme sudo scp $ssh_options root@${INSTALLER_IP}:~/.ssh/id_rsa ${DOVETAIL_CONFIG}/id_rsa
324     else
325         cp ${SSH_KEY} ${DOVETAIL_CONFIG}/id_rsa
326     fi
327 fi
328
329 if [ "$INSTALLER_TYPE" == "apex" ]; then
330     echo "Fetching id_rsa file from jump_server $INSTALLER_IP..."
331     sudo scp $ssh_options stack@${INSTALLER_IP}:~/.ssh/id_rsa ${DOVETAIL_CONFIG}/id_rsa
332 fi
333
334 if [ "$INSTALLER_TYPE" == "daisy" ]; then
335     echo "Fetching id_dsa file from jump_server $INSTALLER_IP..."
336     sshpass -p r00tme sudo scp $ssh_options root@${INSTALLER_IP}:~/.ssh/id_dsa ${DOVETAIL_CONFIG}/id_rsa
337 fi
338
339
340 image_path=${HOME}/opnfv/dovetail/images
341 if [[ ! -d ${image_path} ]]; then
342     mkdir -p ${image_path}
343 fi
344 # sdnvpn test case needs to download this image first before running
345 ubuntu_image=${image_path}/ubuntu-16.04-server-cloudimg-amd64-disk1.img
346 if [[ ! -f ${ubuntu_image} ]]; then
347     echo "Download image ubuntu-16.04-server-cloudimg-amd64-disk1.img ..."
348     wget -q -nc https://artifacts.opnfv.org/sdnvpn/ubuntu-16.04-server-cloudimg-amd64-disk1.img -P ${image_path}
349 fi
350 sudo cp ${ubuntu_image} ${DOVETAIL_IMAGES}
351
352 # yardstick and bottlenecks need to download this image first before running
353 cirros_image=${image_path}/cirros-0.3.5-x86_64-disk.img
354 if [[ ! -f ${cirros_image} ]]; then
355     echo "Download image cirros-0.3.5-x86_64-disk.img ..."
356     wget -q -nc http://download.cirros-cloud.net/0.3.5/cirros-0.3.5-x86_64-disk.img -P ${image_path}
357 fi
358 sudo cp ${cirros_image} ${DOVETAIL_IMAGES}
359
360 # functest needs to download this image first before running
361 cirros_image=${image_path}/cirros-0.4.0-x86_64-disk.img
362 if [[ ! -f ${cirros_image} ]]; then
363     echo "Download image cirros-0.4.0-x86_64-disk.img ..."
364     wget -q -nc http://download.cirros-cloud.net/0.4.0/cirros-0.4.0-x86_64-disk.img -P ${image_path}
365 fi
366 sudo cp ${cirros_image} ${DOVETAIL_IMAGES}
367
368 # snaps_smoke test case needs to download this image first before running
369 ubuntu14_image=${image_path}/ubuntu-14.04-server-cloudimg-amd64-disk1.img
370 if [[ ! -f ${ubuntu14_image} ]]; then
371     echo "Download image ubuntu-14.04-server-cloudimg-amd64-disk1.img ..."
372     wget -q -nc https://cloud-images.ubuntu.com/releases/14.04/release/ubuntu-14.04-server-cloudimg-amd64-disk1.img -P ${image_path}
373 fi
374 sudo cp ${ubuntu14_image} ${DOVETAIL_IMAGES}
375
376 # cloudify_ims test case needs to download these 2 images first before running
377 cloudify_image=${image_path}/cloudify-manager-premium-4.0.1.qcow2
378 if [[ ! -f ${cloudify_image} ]]; then
379     echo "Download image cloudify-manager-premium-4.0.1.qcow2 ..."
380     wget -q -nc http://repository.cloudifysource.org/cloudify/4.0.1/sp-release/cloudify-manager-premium-4.0.1.qcow2 -P ${image_path}
381 fi
382 sudo cp ${cloudify_image} ${DOVETAIL_IMAGES}
383 trusty_image=${image_path}/trusty-server-cloudimg-amd64-disk1.img
384 if [[ ! -f ${trusty_image} ]]; then
385     echo "Download image trusty-server-cloudimg-amd64-disk1.img ..."
386     wget -q -nc http://cloud-images.ubuntu.com/trusty/current/trusty-server-cloudimg-amd64-disk1.img -P ${image_path}
387 fi
388 sudo cp ${trusty_image} ${DOVETAIL_IMAGES}
389
390 opts="--privileged=true -id"
391
392 docker_volume="-v /var/run/docker.sock:/var/run/docker.sock"
393 dovetail_home_volume="-v ${DOVETAIL_HOME}:${DOVETAIL_HOME}"
394
395 # Pull the image with correct tag
396 DOCKER_REPO='opnfv/dovetail'
397 if [ "$(uname -m)" = 'aarch64' ]; then
398     DOCKER_REPO="${DOCKER_REPO}_$(uname -m)"
399     DOCKER_TAG="latest"
400 fi
401
402 echo "Dovetail: Pulling image ${DOCKER_REPO}:${DOCKER_TAG}"
403 docker pull ${DOCKER_REPO}:$DOCKER_TAG >$redirect
404
405 cmd="docker run ${opts} -e DOVETAIL_HOME=${DOVETAIL_HOME} ${docker_volume} ${dovetail_home_volume} \
406      ${sshkey} ${DOCKER_REPO}:${DOCKER_TAG} /bin/bash"
407 echo "Dovetail: running docker run command: ${cmd}"
408 ${cmd} >${redirect}
409 sleep 5
410 container_id=$(docker ps | grep "${DOCKER_REPO}:${DOCKER_TAG}" | awk '{print $1}' | head -1)
411 echo "Container ID=${container_id}"
412 if [ -z ${container_id} ]; then
413     echo "Cannot find ${DOCKER_REPO} container ID ${container_id}. Please check if it exists."
414     docker ps -a
415     exit 1
416 fi
417 echo "Container Start: docker start ${container_id}"
418 docker start ${container_id}
419 sleep 5
420 docker ps >${redirect}
421 if [ $(docker ps | grep "${DOCKER_REPO}:${DOCKER_TAG}" | wc -l) == 0 ]; then
422     echo "The container ${DOCKER_REPO} with ID=${container_id} has not been properly started. Exiting..."
423     exit 1
424 fi
425
426 # Modify tempest_conf.yaml file
427 tempest_conf_file=${DOVETAIL_CONFIG}/tempest_conf.yaml
428 if [[ ${INSTALLER_TYPE} == 'compass' || ${INSTALLER_TYPE} == 'apex' ]]; then
429     volume_device='vdb'
430 else
431     volume_device='vdc'
432 fi
433
434 cat << EOF >$tempest_conf_file
435
436 compute:
437     min_compute_nodes: 2
438     volume_device_name: ${volume_device}
439
440 EOF
441
442 echo "${tempest_conf_file}..."
443 cat ${tempest_conf_file}
444
445 cp_tempest_cmd="docker cp ${DOVETAIL_CONFIG}/tempest_conf.yaml $container_id:/home/opnfv/dovetail/dovetail/userconfig"
446 echo "exec command: ${cp_tempest_cmd}"
447 $cp_tempest_cmd
448
449 if [[ ${TESTSUITE} == 'default' ]]; then
450     testsuite=''
451 else
452     testsuite="--testsuite ${TESTSUITE}"
453 fi
454
455 if [[ ${TESTAREA} == 'mandatory' ]]; then
456     testarea='--mandatory'
457 elif [[ ${TESTAREA} == 'optional' ]]; then
458     testarea="--optional"
459 elif [[ ${TESTAREA} == 'all' ]]; then
460     testarea=""
461 else
462     testarea="--testarea ${TESTAREA}"
463 fi
464
465 run_cmd="dovetail run ${testsuite} ${testarea} --deploy-scenario ${DEPLOY_SCENARIO} -d -r"
466 echo "Container exec command: ${run_cmd}"
467 docker exec $container_id ${run_cmd}
468
469 sudo cp -r ${DOVETAIL_HOME}/results ./
470 result_package=$(find ${DOVETAIL_HOME} -name 'logs_*')
471 echo "Results package is ${result_package}"
472 for item in ${result_package};
473 do
474   sudo mv ${item} ./results
475 done
476
477 # To make sure the file owner is the current user, for the copied results files in the above line
478 echo "Change owner of result files ..."
479 CURRENT_USER=${SUDO_USER:-$USER}
480 PRIMARY_GROUP=$(id -gn $CURRENT_USER)
481 echo "Current user is ${CURRENT_USER}, group is ${PRIMARY_GROUP}"
482 sudo chown -R ${CURRENT_USER}:${PRIMARY_GROUP} ./results
483
484 #remove useless files to save disk space
485 sudo rm -rf ./results/workspace
486 sudo rm -f ./results/yardstick.img
487 sudo rm -f ./results/bottlenecks/tmp*
488
489 echo "Dovetail: done!"
490