fix some bugs for python verify jobs:
[doctor.git] / tests / run.sh
1 #!/bin/bash -e
2 ##############################################################################
3 # Copyright (c) 2016 NEC Corporation and others.
4 #
5 # All rights reserved. This program and the accompanying materials
6 # are made available under the terms of the Apache License, Version 2.0
7 # which accompanies this distribution, and is available at
8 # http://www.apache.org/licenses/LICENSE-2.0
9 ##############################################################################
10
11 # Configuration
12
13 [[ "${CI_DEBUG:-true}" == [Tt]rue ]] && set -x
14
15 IMAGE_URL=https://launchpad.net/cirros/trunk/0.3.0/+download/cirros-0.3.0-x86_64-disk.img
16 #if an existing image name is provided in the enviroment, use that one
17 IMAGE_NAME=${IMAGE_NAME:-cirros}
18 IMAGE_FILE="${IMAGE_NAME}.img"
19 IMAGE_FORMAT=qcow2
20 VM_BASENAME=doctor_vm
21 VM_FLAVOR=m1.tiny
22 #if VM_COUNT set, use that instead
23 VM_COUNT=${VM_COUNT:-1}
24 NET_NAME=doctor_net
25 NET_CIDR=192.168.168.0/24
26 ALARM_BASENAME=doctor_alarm
27 CONSUMER_PORT=12346
28 DOCTOR_USER=doctor
29 DOCTOR_PW=doctor
30 DOCTOR_PROJECT=doctor
31 DOCTOR_ROLE=_member_
32 PROFILER_TYPE=${PROFILER_TYPE:-none}
33 PYTHON_ENABLE=${PYTHON_ENABLE:-false}
34
35 TOP_DIR=$(cd $(dirname "$0") && pwd)
36
37 as_doctor_user="--os-username $DOCTOR_USER --os-password $DOCTOR_PW
38                 --os-project-name $DOCTOR_PROJECT --os-tenant-name $DOCTOR_PROJECT"
39 # NOTE: ceilometer command still requires '--os-tenant-name'.
40 #ceilometer="ceilometer ${as_doctor_user/--os-project-name/--os-tenant-name}"
41 ceilometer="ceilometer $as_doctor_user"
42 as_admin_user="--os-username admin --os-project-name $DOCTOR_PROJECT
43                --os-tenant-name $DOCTOR_PROJECT"
44
45
46 # Functions
47
48 get_compute_host_info() {
49     # get computer host info which first VM boot in as admin user
50     COMPUTE_HOST=$(openstack $as_admin_user server show ${VM_BASENAME}1 |
51                    grep "OS-EXT-SRV-ATTR:host" | awk '{ print $4 }')
52     compute_host_in_undercloud=${COMPUTE_HOST%%.*}
53     die_if_not_set $LINENO COMPUTE_HOST "Failed to get compute hostname"
54
55     get_compute_ip_from_hostname $COMPUTE_HOST
56
57     echo "COMPUTE_HOST=$COMPUTE_HOST"
58     echo "COMPUTE_IP=$COMPUTE_IP"
59
60     # verify connectivity to target compute host
61     ping -c 1 "$COMPUTE_IP"
62     if [[ $? -ne 0 ]] ; then
63         die $LINENO "Can not ping to computer host"
64     fi
65
66     # verify ssh to target compute host
67     ssh $ssh_opts_cpu "$COMPUTE_USER@$COMPUTE_IP" 'exit'
68     if [[ $? -ne 0 ]] ; then
69         die $LINENO "Can not ssh to computer host"
70     fi
71 }
72
73 # TODO(r-mibu): update this function to support consumer instance
74 #               and migrate this function into installer lib
75 get_consumer_ip___to_be_removed() {
76     local get_consumer_command="ip route get $COMPUTE_IP | awk '/ src /{print \$NF}'"
77     if is_installer apex; then
78         CONSUMER_IP=$(sudo ssh $ssh_opts root@$INSTALLER_IP \
79                       "$get_consumer_command")
80     elif is_installer fuel; then
81         CONSUMER_IP=$(sudo sshpass -p r00tme ssh $ssh_opts root@${INSTALLER_IP} \
82                       "$get_consumer_command")
83     elif is_installer local; then
84         CONSUMER_IP=`$get_consumer_command`
85     fi
86     echo "CONSUMER_IP=$CONSUMER_IP"
87
88     die_if_not_set $LINENO CONSUMER_IP "Could not get CONSUMER_IP."
89 }
90
91 download_image() {
92     #if a different name was provided for the image in the enviroment there's no need to download the image
93     use_existing_image=false
94     openstack image list | grep -q " $IMAGE_NAME " && use_existing_image=true
95
96     if [[ "$use_existing_image" == false ]] ; then
97         [ -e "$IMAGE_FILE" ] && return 0
98         wget "$IMAGE_URL" -o "$IMAGE_FILE"
99     fi
100 }
101
102 register_image() {
103     openstack image list | grep -q " $IMAGE_NAME " && return 0
104     openstack image create "$IMAGE_NAME" \
105                            --public \
106                            --disk-format "$IMAGE_FORMAT" \
107                            --container-format bare \
108                            --file "$IMAGE_FILE"
109 }
110
111 create_test_user() {
112     openstack project list | grep -q " $DOCTOR_PROJECT " || {
113         openstack project create --description "Doctor Project" \
114                                  "$DOCTOR_PROJECT"
115     }
116     openstack user list | grep -q " $DOCTOR_USER " || {
117         openstack user create "$DOCTOR_USER" --password "$DOCTOR_PW" \
118                               --project "$DOCTOR_PROJECT"
119     }
120     openstack role show "$DOCTOR_ROLE" | grep -q " $DOCTOR_ROLE " || {
121         openstack role create "$DOCTOR_ROLE"
122     }
123     openstack role assignment list --user "$DOCTOR_USER" \
124     --project "$DOCTOR_PROJECT" --names | grep -q " $DOCTOR_ROLE " || {
125         openstack role add "$DOCTOR_ROLE" --user "$DOCTOR_USER" \
126                            --project "$DOCTOR_PROJECT"
127     }
128     openstack role assignment list --user admin --project "$DOCTOR_PROJECT" \
129     --names | grep -q " admin " || {
130         openstack role add admin --user admin --project "$DOCTOR_PROJECT"
131     }
132     # tojuvone: openstack quota show is broken and have to use nova
133     # https://bugs.launchpad.net/manila/+bug/1652118
134     # Note! while it is encouraged to use openstack client it has proven
135     # quite buggy.
136     # QUOTA=$(openstack quota show $DOCTOR_PROJECT)
137     DOCTOR_QUOTA=$(nova quota-show --tenant $DOCTOR_PROJECT)
138     # We make sure that quota allows number of instances and cores
139     OLD_INSTANCE_QUOTA=$(echo "${DOCTOR_QUOTA}" | grep " instances " | \
140                          awk '{print $4}')
141     if [ $OLD_INSTANCE_QUOTA -lt $VM_COUNT ]; then
142         openstack quota set --instances $VM_COUNT \
143                   $DOCTOR_USER
144     fi
145     OLD_CORES_QUOTA=$(echo "${DOCTOR_QUOTA}" | grep " cores " | \
146                       awk '{print $4}')
147     if [ $OLD_CORES_QUOTA -lt $VM_COUNT ]; then
148         openstack quota set --cores $VM_COUNT \
149                   $DOCTOR_USER
150     fi
151 }
152
153 remove_test_user() {
154     openstack project list | grep -q " $DOCTOR_PROJECT " && {
155         openstack role assignment list --user admin \
156         --project "$DOCTOR_PROJECT" --names | grep -q " admin " && {
157             openstack role remove admin --user admin --project "$DOCTOR_PROJECT"
158         }
159         openstack user list | grep -q " $DOCTOR_USER " && {
160             openstack role assignment list --user "$DOCTOR_USER" \
161             --project "$DOCTOR_PROJECT" --names | grep -q " $DOCTOR_ROLE " && {
162                 openstack role remove "$DOCTOR_ROLE" --user "$DOCTOR_USER" \
163                 --project "$DOCTOR_PROJECT"
164             }
165             openstack user delete "$DOCTOR_USER"
166         }
167         openstack project delete "$DOCTOR_PROJECT"
168     }
169 }
170
171 boot_vm() {
172     # test VM done with test user, so can test non-admin
173
174     if ! openstack $as_doctor_user network show $NET_NAME; then
175         openstack $as_doctor_user network create $NET_NAME
176     fi
177     if ! openstack $as_doctor_user subnet show $NET_NAME; then
178         openstack $as_doctor_user subnet create $NET_NAME \
179             --network $NET_NAME --subnet-range $NET_CIDR --no-dhcp
180     fi
181     net_id=$(openstack $as_doctor_user network show $NET_NAME -f value -c id)
182
183     servers=$(openstack $as_doctor_user server list)
184     for i in `seq $VM_COUNT`; do
185         echo "${servers}" | grep -q " $VM_BASENAME$i " && continue
186         openstack $as_doctor_user server create --flavor "$VM_FLAVOR" \
187             --image "$IMAGE_NAME" --nic net-id=$net_id "$VM_BASENAME$i"
188     done
189     sleep 1
190 }
191
192 create_alarm() {
193     # get vm_id as test user
194     alarm_list=$($ceilometer alarm-list)
195     vms=$(openstack $as_doctor_user server list)
196     for i in `seq $VM_COUNT`; do
197         echo "${alarm_list}" | grep -q " $ALARM_BASENAME$i " || {
198             vm_id=$(echo "${vms}" | grep " $VM_BASENAME$i " | awk '{print $2}')
199             # TODO(r-mibu): change notification endpoint from localhost to the
200             # consumer. IP address (functest container).
201             $ceilometer alarm-event-create \
202                        --name "$ALARM_BASENAME$i" \
203                        --alarm-action "http://localhost:$CONSUMER_PORT/failure" \
204                        --description "VM failure" \
205                        --enabled True \
206                        --repeat-actions False \
207                        --severity "moderate" \
208                        --event-type compute.instance.update \
209                        -q "traits.state=string::error; \
210                        traits.instance_id=string::$vm_id"
211             }
212      done
213 }
214
215 start_monitor() {
216     pgrep -f "python monitor.py" && return 0
217     sudo -E python monitor.py "$COMPUTE_HOST" "$COMPUTE_IP" "$INSPECTOR_TYPE" \
218         > monitor.log 2>&1 &
219 }
220
221 stop_monitor() {
222     pgrep -f "python monitor.py" || return 0
223     sudo kill $(pgrep -f "python monitor.py")
224 }
225
226 start_consumer() {
227     pgrep -f "python consumer.py" && return 0
228     python consumer.py "$CONSUMER_PORT" > consumer.log 2>&1 &
229
230     # NOTE(r-mibu): create tunnel to the controller nodes, so that we can
231     # avoid some network problems dpends on infra and installers.
232     # This tunnel will be terminated by stop_consumer() or after 10 mins passed.
233     if ! is_installer local; then
234         for ip in $CONTROLLER_IPS
235         do
236             forward_rule="-R $CONSUMER_PORT:localhost:$CONSUMER_PORT"
237             tunnel_command="sudo ssh $ssh_opts_cpu $COMPUTE_USER@$ip $forward_rule sleep 600"
238             $tunnel_command > "ssh_tunnel.${ip}.log" 2>&1 < /dev/null &
239         done
240     fi
241 }
242
243 stop_consumer() {
244     pgrep -f "python consumer.py" || return 0
245     kill $(pgrep -f "python consumer.py")
246
247     # NOTE(r-mibu): terminate tunnels to the controller nodes
248     if ! is_installer local; then
249         for ip in $CONTROLLER_IPS
250         do
251             forward_rule="-R $CONSUMER_PORT:localhost:$CONSUMER_PORT"
252             tunnel_command="sudo ssh $ssh_opts_cpu $COMPUTE_USER@$ip $forward_rule sleep 600"
253             kill $(pgrep -f "$tunnel_command")
254         done
255     fi
256 }
257
258 wait_for_vm_launch() {
259     echo "waiting for vm launch..."
260
261     count=0
262     while [[ ${count} -lt 60 ]]
263     do
264         active_count=0
265         vms=$(openstack $as_doctor_user server list)
266         for i in `seq $VM_COUNT`; do
267             state=$(echo "${vms}" | grep " $VM_BASENAME$i " | awk '{print $6}')
268             if [[ "$state" == "ACTIVE" ]]; then
269                 active_count=$(($active_count+1))
270             elif [[ "$state" == "ERROR" ]]; then
271                 die $LINENO "vm state $VM_BASENAME$i is ERROR"
272             else
273                 #This VM not yet active
274                 count=$(($count+1))
275                 sleep 5
276                 continue
277             fi
278         done
279         [[ $active_count -eq $VM_COUNT ]] && {
280             echo "get computer host info..."
281             get_compute_host_info
282             VMS_ON_FAILED_HOST=$(openstack $as_doctor_user server list --host \
283                          $COMPUTE_HOST | grep " ${VM_BASENAME}" |  wc -l)
284             return 0
285         }
286         #Not all VMs active
287         count=$(($count+1))
288         sleep 5
289     done
290     die $LINENO "Time out while waiting for VM launch"
291 }
292
293 inject_failure() {
294     echo "disabling network of compute host [$COMPUTE_HOST] for 3 mins..."
295     cat > disable_network.sh << 'END_TXT'
296 #!/bin/bash -x
297 dev=$(sudo ip a | awk '/ @COMPUTE_IP@\//{print $NF}')
298 sleep 1
299 sudo ip link set $dev down
300 echo "doctor set link down at" $(date "+%s.%N")
301 sleep 180
302 sudo ip link set $dev up
303 sleep 1
304 END_TXT
305     sed -i -e "s/@COMPUTE_IP@/$COMPUTE_IP/" disable_network.sh
306     chmod +x disable_network.sh
307     scp $ssh_opts_cpu disable_network.sh "$COMPUTE_USER@$COMPUTE_IP:"
308     ssh $ssh_opts_cpu "$COMPUTE_USER@$COMPUTE_IP" 'nohup ./disable_network.sh > disable_network.log 2>&1 &'
309     # use host time to get rid of potential time sync deviation between nodes
310     triggered=$(date "+%s.%N")
311 }
312
313 wait_consumer() {
314     local interval=1
315     local rounds=$(($1 / $interval))
316     for i in `seq $rounds`; do
317         notified_count=$(grep "doctor consumer notified at" consumer.log | wc -l)
318         if [[ $notified_count -eq  $VMS_ON_FAILED_HOST ]]; then
319             return 0
320         fi
321         sleep $interval
322     done
323     die $LINENO "Consumer hasn't received fault notification."
324 }
325
326 calculate_notification_time() {
327     wait_consumer 60
328     #keep 'at' as the last keyword just before the value, and
329     #use regex to get value instead of the fixed column
330     detected=$(grep "doctor monitor detected at" monitor.log |\
331                sed -e "s/^.* at //")
332     notified=$(grep "doctor consumer notified at" consumer.log |\
333                sed -e "s/^.* at //" | tail -1)
334
335     echo "$notified $detected" | \
336         awk '{
337             d = $1 - $2;
338             if (d < 1 && d > 0) { print d " OK"; exit 0 }
339             else { print d " NG"; exit 1 }
340         }'
341 }
342
343 check_host_status() {
344     # Check host related to first Doctor VM is in wanted state
345     # $1    Expected state
346     # $2    Seconds to wait to have wanted state
347     expected_state=$1
348     local interval=5
349     local rounds=$(($2 / $interval))
350     for i in `seq $rounds`; do
351         host_status_line=$(openstack $as_doctor_user --os-compute-api-version \
352                            2.16 server show ${VM_BASENAME}1 | grep "host_status")
353         host_status=$(echo $host_status_line | awk '{print $4}')
354         die_if_not_set $LINENO host_status "host_status not reported by: nova show ${VM_BASENAME}1"
355         if [[ "$expected_state" =~ "$host_status" ]] ; then
356             echo "${VM_BASENAME}1 showing host_status: $host_status"
357             return 0
358         else
359             sleep $interval
360         fi
361     done
362     if [[ "$expected_state" =~ "$host_status" ]] ; then
363         echo "${VM_BASENAME}1 showing host_status: $host_status"
364     else
365         die $LINENO  "host_status:$host_status not equal to expected_state: $expected_state"
366     fi
367 }
368
369 unset_forced_down_hosts() {
370     # for debug
371     openstack compute service list --service nova-compute
372
373     downed_computes=$(openstack compute service list --service nova-compute \
374                       -f value -c Host -c State | grep ' down$' \
375                       | sed -e 's/ *down$//')
376     echo "downed_computes: $downed_computes"
377     for host in $downed_computes
378     do
379         # TODO(r-mibu): use openstack client
380         #openstack compute service set --up $host nova-compute
381         nova service-force-down --unset $host nova-compute
382     done
383
384     echo "waiting disabled compute host back to be enabled..."
385     wait_until 'openstack compute service list --service nova-compute
386                 -f value -c State | grep -q down' 240 5
387
388     for host in $downed_computes
389     do
390         # TODO(r-mibu): improve 'get_compute_ip_from_hostname'
391         get_compute_ip_from_hostname $host
392         wait_until "! ping -c 1 $COMPUTE_IP" 120 5
393     done
394 }
395
396 collect_logs() {
397     if [[ -n "$COMPUTE_IP" ]];then
398         scp $ssh_opts_cpu "$COMPUTE_USER@$COMPUTE_IP:disable_network.log" .
399     fi
400
401     # TODO(yujunz) collect other logs, e.g. nova, aodh
402 }
403
404 run_profiler() {
405     if [[ "$PROFILER_TYPE" == "poc" ]]; then
406         linkdown=$(grep "doctor set link down at " disable_network.log |\
407                   sed -e "s/^.* at //")
408         vmdown=$(grep "doctor mark vm.* error at" inspector.log |tail -n 1 |\
409                  sed -e "s/^.* at //")
410         hostdown=$(grep "doctor mark host.* down at" inspector.log |\
411                  sed -e "s/^.* at //")
412
413         # TODO(yujunz) check the actual delay to verify time sync status
414         # expected ~1s delay from $trigger to $linkdown
415         relative_start=${linkdown}
416         export DOCTOR_PROFILER_T00=$(python -c \
417           "print(int(($linkdown-$relative_start)*1000))")
418         export DOCTOR_PROFILER_T01=$(python -c \
419           "print(int(($detected-$relative_start)*1000))")
420         export DOCTOR_PROFILER_T03=$(python -c \
421           "print(int(($vmdown-$relative_start)*1000))")
422         export DOCTOR_PROFILER_T04=$(python -c \
423           "print(int(($hostdown-$relative_start)*1000))")
424         export DOCTOR_PROFILER_T09=$(python -c \
425           "print(int(($notified-$relative_start)*1000))")
426
427         python profiler-poc.py >doctor_profiler.log 2>&1
428     fi
429 }
430
431 cleanup() {
432     set +e
433     echo "cleanup..."
434     stop_monitor
435     stop_inspector
436     stop_consumer
437
438     unset_forced_down_hosts
439     collect_logs
440
441     vms=$(openstack $as_doctor_user server list)
442     vmstodel=""
443     for i in `seq $VM_COUNT`; do
444         $(echo "${vms}" | grep -q " $VM_BASENAME$i ") &&
445         vmstodel+=" $VM_BASENAME$i"
446     done
447     [[ $vmstodel ]] && openstack $as_doctor_user server delete $vmstodel
448     alarm_list=$($ceilometer alarm-list)
449     for i in `seq $VM_COUNT`; do
450         alarm_id=$(echo "${alarm_list}" | grep " $ALARM_BASENAME$i " |
451                    awk '{print $2}')
452         [ -n "$alarm_id" ] && $ceilometer alarm-delete "$alarm_id"
453     done
454     openstack $as_doctor_user subnet delete $NET_NAME
455     sleep 1
456     openstack $as_doctor_user network delete $NET_NAME
457     sleep 1
458
459     image_id=$(openstack image list | grep " $IMAGE_NAME " | awk '{print $2}')
460     sleep 1
461     #if an existing image was used, there's no need to remove it here
462     if [[ "$use_existing_image" == false ]] ; then
463         [ -n "$image_id" ] && openstack image delete "$image_id"
464     fi
465
466     remove_test_user
467
468     cleanup_installer
469     cleanup_inspector
470
471     # NOTE: Temporal log printer.
472     for f in $(find . -name '*.log')
473     do
474         echo
475         echo "[$f]"
476         sed -e 's/^/ | /' $f
477         echo
478     done
479 }
480
481 # Main process
482
483 if [[ $PYTHON_ENABLE == [Tt]rue ]]; then
484     which tox || sudo pip install tox
485     if [ -f /usr/bin/apt-get ]; then
486         sudo apt-get install -y python3-dev
487     elif [ -f /usr/bin/yum ] ; then
488         sudo yum install -y python3-devel
489     fi
490
491     cd $TOP_DIR
492     echo "executing tox..."
493     tox
494     exit $?
495 fi
496
497 echo "Note: doctor/tests/run.sh has been executed."
498 git log --oneline -1 || true   # ignore even you don't have git installed
499
500 trap cleanup EXIT
501
502 source $TOP_DIR/functions-common
503 source $TOP_DIR/lib/installer
504 source $TOP_DIR/lib/inspector
505
506 setup_installer
507
508 echo "preparing VM image..."
509 download_image
510 register_image
511
512 echo "creating test user..."
513 create_test_user
514
515 echo "creating VM..."
516 boot_vm
517 wait_for_vm_launch
518
519 echo "creating alarm..."
520 #TODO: change back to use, network problems depends on infra and installers
521 #get_consumer_ip
522 create_alarm
523
524 echo "starting doctor sample components..."
525 start_inspector
526 start_monitor
527 start_consumer
528
529 sleep 60
530 echo "injecting host failure..."
531 inject_failure
532
533 check_host_status "(DOWN|UNKNOWN)" 60
534 calculate_notification_time
535 unset_forced_down_hosts
536 collect_logs
537 run_profiler
538
539 echo "done"