Select stable/ocata packages
[doctor.git] / tests / run.sh
1 #!/bin/bash -e
2 ##############################################################################
3 # Copyright (c) 2016 NEC Corporation and others.
4 #
5 # All rights reserved. This program and the accompanying materials
6 # are made available under the terms of the Apache License, Version 2.0
7 # which accompanies this distribution, and is available at
8 # http://www.apache.org/licenses/LICENSE-2.0
9 ##############################################################################
10
11 # Configuration
12
13 [[ "${CI_DEBUG:-true}" == [Tt]rue ]] && set -x
14
15 IMAGE_URL=https://launchpad.net/cirros/trunk/0.3.0/+download/cirros-0.3.0-x86_64-disk.img
16 #if an existing image name is provided in the enviroment, use that one
17 IMAGE_NAME=${IMAGE_NAME:-cirros}
18 IMAGE_FILE="${IMAGE_NAME}.img"
19 IMAGE_FORMAT=qcow2
20 VM_BASENAME=doctor_vm
21 VM_FLAVOR=m1.tiny
22 #if VM_COUNT set, use that instead
23 VM_COUNT=${VM_COUNT:-1}
24 NET_NAME=doctor_net
25 NET_CIDR=192.168.168.0/24
26 ALARM_BASENAME=doctor_alarm
27 CONSUMER_PORT=12346
28 DOCTOR_USER=doctor
29 DOCTOR_PW=doctor
30 DOCTOR_PROJECT=doctor
31 DOCTOR_ROLE=_member_
32 PROFILER_TYPE=${PROFILER_TYPE:-poc}
33 PYTHON_ENABLE=${PYTHON_ENABLE:-false}
34
35 TOP_DIR=$(cd $(dirname "$0") && pwd)
36
37 as_doctor_user="--os-username $DOCTOR_USER --os-password $DOCTOR_PW
38                 --os-project-name $DOCTOR_PROJECT --os-tenant-name $DOCTOR_PROJECT"
39 # NOTE: ceilometer command still requires '--os-tenant-name'.
40 #ceilometer="ceilometer ${as_doctor_user/--os-project-name/--os-tenant-name}"
41 ceilometer="ceilometer $as_doctor_user"
42 as_admin_user="--os-username admin --os-project-name $DOCTOR_PROJECT
43                --os-tenant-name $DOCTOR_PROJECT"
44
45 upper_constraints="https://git.openstack.org/cgit/openstack/requirements/plain/upper-constraints.txt?h=stable/ocata"
46 pip_install="pip install -c${upper_constraints}"
47
48 # Functions
49
50 get_compute_host_info() {
51     # get computer host info which first VM boot in as admin user
52     COMPUTE_HOST=$(openstack $as_admin_user server show ${VM_BASENAME}1 |
53                    grep "OS-EXT-SRV-ATTR:host " | awk '{ print $4 }')
54     compute_host_in_undercloud=${COMPUTE_HOST%%.*}
55     die_if_not_set $LINENO COMPUTE_HOST "Failed to get compute hostname"
56
57     get_compute_ip_from_hostname $COMPUTE_HOST
58
59     echo "COMPUTE_HOST=$COMPUTE_HOST"
60     echo "COMPUTE_IP=$COMPUTE_IP"
61
62     # verify connectivity to target compute host
63     ping -c 1 "$COMPUTE_IP"
64     if [[ $? -ne 0 ]] ; then
65         die $LINENO "Can not ping to computer host"
66     fi
67
68     # verify ssh to target compute host
69     ssh $ssh_opts_cpu "$COMPUTE_USER@$COMPUTE_IP" 'exit'
70     if [[ $? -ne 0 ]] ; then
71         die $LINENO "Can not ssh to computer host"
72     fi
73 }
74
75 # TODO(r-mibu): update this function to support consumer instance
76 #               and migrate this function into installer lib
77 get_consumer_ip___to_be_removed() {
78     local get_consumer_command="ip route get $COMPUTE_IP | awk '/ src /{print \$NF}'"
79     if is_installer apex; then
80         CONSUMER_IP=$(sudo ssh $ssh_opts root@$INSTALLER_IP \
81                       "$get_consumer_command")
82     elif is_installer fuel; then
83         CONSUMER_IP=$(sudo sshpass -p r00tme ssh $ssh_opts root@${INSTALLER_IP} \
84                       "$get_consumer_command")
85     elif is_installer local; then
86         CONSUMER_IP=`$get_consumer_command`
87     fi
88     echo "CONSUMER_IP=$CONSUMER_IP"
89
90     die_if_not_set $LINENO CONSUMER_IP "Could not get CONSUMER_IP."
91 }
92
93 download_image() {
94     #if a different name was provided for the image in the enviroment there's no need to download the image
95     use_existing_image=false
96     openstack image list | grep -q " $IMAGE_NAME " && use_existing_image=true
97
98     if [[ "$use_existing_image" == false ]] ; then
99         [ -e "$IMAGE_FILE" ] && return 0
100         wget "$IMAGE_URL" -o "$IMAGE_FILE"
101     fi
102 }
103
104 register_image() {
105     openstack image list | grep -q " $IMAGE_NAME " && return 0
106     openstack image create "$IMAGE_NAME" \
107                            --public \
108                            --disk-format "$IMAGE_FORMAT" \
109                            --container-format bare \
110                            --file "$IMAGE_FILE"
111 }
112
113 create_test_user() {
114     openstack project list | grep -q " $DOCTOR_PROJECT " || {
115         openstack project create --description "Doctor Project" \
116                                  "$DOCTOR_PROJECT"
117     }
118     openstack user list | grep -q " $DOCTOR_USER " || {
119         openstack user create "$DOCTOR_USER" --password "$DOCTOR_PW" \
120                               --project "$DOCTOR_PROJECT"
121     }
122     openstack role show "$DOCTOR_ROLE" | grep -q " $DOCTOR_ROLE " || {
123         openstack role create "$DOCTOR_ROLE"
124     }
125     openstack role assignment list --user "$DOCTOR_USER" \
126     --project "$DOCTOR_PROJECT" --names | grep -q " $DOCTOR_ROLE " || {
127         openstack role add "$DOCTOR_ROLE" --user "$DOCTOR_USER" \
128                            --project "$DOCTOR_PROJECT"
129     }
130     openstack role assignment list --user admin --project "$DOCTOR_PROJECT" \
131     --names | grep -q " admin " || {
132         openstack role add admin --user admin --project "$DOCTOR_PROJECT"
133     }
134     # tojuvone: openstack quota show is broken and have to use nova
135     # https://bugs.launchpad.net/manila/+bug/1652118
136     # Note! while it is encouraged to use openstack client it has proven
137     # quite buggy.
138     # QUOTA=$(openstack quota show $DOCTOR_PROJECT)
139     DOCTOR_QUOTA=$(nova quota-show --tenant $DOCTOR_PROJECT)
140     # We make sure that quota allows number of instances and cores
141     OLD_INSTANCE_QUOTA=$(echo "${DOCTOR_QUOTA}" | grep " instances " | \
142                          awk '{print $4}')
143     if [ $OLD_INSTANCE_QUOTA -lt $VM_COUNT ]; then
144         openstack quota set --instances $VM_COUNT \
145                   $DOCTOR_USER
146     fi
147     OLD_CORES_QUOTA=$(echo "${DOCTOR_QUOTA}" | grep " cores " | \
148                       awk '{print $4}')
149     if [ $OLD_CORES_QUOTA -lt $VM_COUNT ]; then
150         openstack quota set --cores $VM_COUNT \
151                   $DOCTOR_USER
152     fi
153 }
154
155 remove_test_user() {
156     openstack project list | grep -q " $DOCTOR_PROJECT " && {
157         openstack role assignment list --user admin \
158         --project "$DOCTOR_PROJECT" --names | grep -q " admin " && {
159             openstack role remove admin --user admin --project "$DOCTOR_PROJECT"
160         }
161         openstack user list | grep -q " $DOCTOR_USER " && {
162             openstack role assignment list --user "$DOCTOR_USER" \
163             --project "$DOCTOR_PROJECT" --names | grep -q " $DOCTOR_ROLE " && {
164                 openstack role remove "$DOCTOR_ROLE" --user "$DOCTOR_USER" \
165                 --project "$DOCTOR_PROJECT"
166             }
167             openstack user delete "$DOCTOR_USER"
168         }
169         openstack project delete "$DOCTOR_PROJECT"
170     }
171 }
172
173 boot_vm() {
174     # test VM done with test user, so can test non-admin
175
176     if ! openstack $as_doctor_user network show $NET_NAME; then
177         openstack $as_doctor_user network create $NET_NAME
178     fi
179     if ! openstack $as_doctor_user subnet show $NET_NAME; then
180         openstack $as_doctor_user subnet create $NET_NAME \
181             --network $NET_NAME --subnet-range $NET_CIDR --no-dhcp
182     fi
183     net_id=$(openstack $as_doctor_user network show $NET_NAME -f value -c id)
184
185     servers=$(openstack $as_doctor_user server list)
186     for i in `seq $VM_COUNT`; do
187         echo "${servers}" | grep -q " $VM_BASENAME$i " && continue
188         openstack $as_doctor_user server create --flavor "$VM_FLAVOR" \
189             --image "$IMAGE_NAME" --nic net-id=$net_id "$VM_BASENAME$i"
190     done
191     sleep 1
192 }
193
194 create_alarm() {
195     # get vm_id as test user
196     alarm_list=$($ceilometer alarm-list)
197     vms=$(openstack $as_doctor_user server list)
198     for i in `seq $VM_COUNT`; do
199         echo "${alarm_list}" | grep -q " $ALARM_BASENAME$i " || {
200             vm_id=$(echo "${vms}" | grep " $VM_BASENAME$i " | awk '{print $2}')
201             # TODO(r-mibu): change notification endpoint from localhost to the
202             # consumer. IP address (functest container).
203             $ceilometer alarm-event-create \
204                        --name "$ALARM_BASENAME$i" \
205                        --alarm-action "http://localhost:$CONSUMER_PORT/failure" \
206                        --description "VM failure" \
207                        --enabled True \
208                        --repeat-actions False \
209                        --severity "moderate" \
210                        --event-type compute.instance.update \
211                        -q "traits.state=string::error; \
212                        traits.instance_id=string::$vm_id"
213             }
214      done
215 }
216
217 start_consumer() {
218     pgrep -f "python consumer.py" && return 0
219     python consumer.py "$CONSUMER_PORT" > consumer.log 2>&1 &
220
221     # NOTE(r-mibu): create tunnel to the controller nodes, so that we can
222     # avoid some network problems dpends on infra and installers.
223     # This tunnel will be terminated by stop_consumer() or after 10 mins passed.
224     if ! is_installer local; then
225         for ip in $CONTROLLER_IPS
226         do
227             forward_rule="-R $CONSUMER_PORT:localhost:$CONSUMER_PORT"
228             tunnel_command="sudo ssh $ssh_opts_cpu $COMPUTE_USER@$ip $forward_rule sleep 600"
229             $tunnel_command > "ssh_tunnel.${ip}.log" 2>&1 < /dev/null &
230         done
231     fi
232 }
233
234 stop_consumer() {
235     pgrep -f "python consumer.py" || return 0
236     kill $(pgrep -f "python consumer.py")
237
238     # NOTE(r-mibu): terminate tunnels to the controller nodes
239     if ! is_installer local; then
240         for ip in $CONTROLLER_IPS
241         do
242             forward_rule="-R $CONSUMER_PORT:localhost:$CONSUMER_PORT"
243             tunnel_command="sudo ssh $ssh_opts_cpu $COMPUTE_USER@$ip $forward_rule sleep 600"
244             kill $(pgrep -f "$tunnel_command")
245         done
246     fi
247 }
248
249 wait_for_vm_launch() {
250     echo "waiting for vm launch..."
251
252     count=0
253     while [[ ${count} -lt 60 ]]
254     do
255         active_count=0
256         vms=$(openstack $as_doctor_user server list)
257         for i in `seq $VM_COUNT`; do
258             state=$(echo "${vms}" | grep " $VM_BASENAME$i " | awk '{print $6}')
259             if [[ "$state" == "ACTIVE" ]]; then
260                 active_count=$(($active_count+1))
261             elif [[ "$state" == "ERROR" ]]; then
262                 die $LINENO "vm state $VM_BASENAME$i is ERROR"
263             else
264                 #This VM not yet active
265                 count=$(($count+1))
266                 sleep 5
267                 continue
268             fi
269         done
270         [[ $active_count -eq $VM_COUNT ]] && {
271             echo "get computer host info..."
272             get_compute_host_info
273             VMS_ON_FAILED_HOST=$(openstack $as_doctor_user server list --host \
274                          $COMPUTE_HOST | grep " ${VM_BASENAME}" |  wc -l)
275             return 0
276         }
277         #Not all VMs active
278         count=$(($count+1))
279         sleep 5
280     done
281     die $LINENO "Time out while waiting for VM launch"
282 }
283
284 inject_failure() {
285     echo "disabling network of compute host [$COMPUTE_HOST] for 3 mins..."
286     cat > disable_network.sh << 'END_TXT'
287 #!/bin/bash -x
288 sleep 1
289 if [ -n "@INTERFACE_NAME@" ]; then
290     dev=@INTERFACE_NAME@
291 else
292     dev=$(sudo ip a | awk '/ @COMPUTE_IP@\//{print $NF}')
293 fi
294 sudo ip link set $dev down
295 echo "doctor set link down at" $(date "+%s.%N")
296 sleep 180
297 sudo ip link set $dev up
298 sleep 1
299 END_TXT
300     sed -i -e "s/@COMPUTE_IP@/$COMPUTE_IP/" disable_network.sh
301     sed -i -e "s/@INTERFACE_NAME@/$INTERFACE_NAME/" disable_network.sh
302     chmod +x disable_network.sh
303     scp $ssh_opts_cpu disable_network.sh "$COMPUTE_USER@$COMPUTE_IP:"
304     ssh $ssh_opts_cpu "$COMPUTE_USER@$COMPUTE_IP" 'nohup ./disable_network.sh > disable_network.log 2>&1 &'
305     # use host time to get rid of potential time sync deviation between nodes
306     triggered=$(date "+%s.%N")
307 }
308
309 wait_consumer() {
310     local interval=1
311     local rounds=$(($1 / $interval))
312     for i in `seq $rounds`; do
313         notified_count=$(grep "doctor consumer notified at" consumer.log | wc -l)
314         if [[ $notified_count -eq  $VMS_ON_FAILED_HOST ]]; then
315             return 0
316         fi
317         sleep $interval
318     done
319     die $LINENO "Consumer hasn't received fault notification."
320 }
321
322 calculate_notification_time() {
323     wait_consumer 60
324     #keep 'at' as the last keyword just before the value, and
325     #use regex to get value instead of the fixed column
326     if [ ! -f monitor.log ]; then
327         scp $ssh_opts_cpu "$COMPUTE_USER@$COMPUTE_IP:monitor.log" .
328     fi
329     detected=$(grep "doctor monitor detected at" monitor.log |\
330                sed -e "s/^.* at //" | tail -1)
331     notified=$(grep "doctor consumer notified at" consumer.log |\
332                sed -e "s/^.* at //" | tail -1)
333
334     echo "$notified $detected" | \
335         awk '{
336             d = $1 - $2;
337             if (d < 1 && d > 0) { print d " OK"; exit 0 }
338             else { print d " NG"; exit 1 }
339         }'
340 }
341
342 check_host_status() {
343     # Check host related to first Doctor VM is in wanted state
344     # $1    Expected state
345     # $2    Seconds to wait to have wanted state
346     expected_state=$1
347     local interval=5
348     local rounds=$(($2 / $interval))
349     for i in `seq $rounds`; do
350         host_status_line=$(openstack $as_doctor_user --os-compute-api-version \
351                            2.16 server show ${VM_BASENAME}1 | grep "host_status")
352         host_status=$(echo $host_status_line | awk '{print $4}')
353         die_if_not_set $LINENO host_status "host_status not reported by: nova show ${VM_BASENAME}1"
354         if [[ "$expected_state" =~ "$host_status" ]] ; then
355             echo "${VM_BASENAME}1 showing host_status: $host_status"
356             return 0
357         else
358             sleep $interval
359         fi
360     done
361     if [[ "$expected_state" =~ "$host_status" ]] ; then
362         echo "${VM_BASENAME}1 showing host_status: $host_status"
363     else
364         die $LINENO  "host_status:$host_status not equal to expected_state: $expected_state"
365     fi
366 }
367
368 unset_forced_down_hosts() {
369     # for debug
370     openstack compute service list --service nova-compute
371
372     downed_computes=$(openstack compute service list --service nova-compute \
373                       -f value -c Host -c State | grep ' down$' \
374                       | sed -e 's/ *down$//')
375     echo "downed_computes: $downed_computes"
376     for host in $downed_computes
377     do
378         # TODO(r-mibu): use openstack client
379         #openstack compute service set --up $host nova-compute
380         nova service-force-down --unset $host nova-compute
381     done
382
383     echo "waiting disabled compute host back to be enabled..."
384     wait_until 'openstack compute service list --service nova-compute
385                 -f value -c State | grep -q down' 240 5
386
387     for host in $downed_computes
388     do
389         # TODO(r-mibu): improve 'get_compute_ip_from_hostname'
390         get_compute_ip_from_hostname $host
391         wait_until "! ping -c 1 $COMPUTE_IP" 120 5
392     done
393 }
394
395 collect_logs() {
396     if [[ -n "$COMPUTE_IP" ]];then
397         scp $ssh_opts_cpu "$COMPUTE_USER@$COMPUTE_IP:disable_network.log" .
398     fi
399
400     # TODO(yujunz) collect other logs, e.g. nova, aodh
401 }
402
403 run_profiler() {
404     if [[ "$PROFILER_TYPE" == "poc" ]]; then
405         linkdown=$(grep "doctor set link down at " disable_network.log |\
406                   sed -e "s/^.* at //")
407         vmdown=$(grep "doctor mark vm.* error at" inspector.log |tail -n 1 |\
408                  sed -e "s/^.* at //")
409         hostdown=$(grep "doctor mark host.* down at" inspector.log |\
410                  sed -e "s/^.* at //")
411
412         # TODO(yujunz) check the actual delay to verify time sync status
413         # expected ~1s delay from $trigger to $linkdown
414         relative_start=${linkdown}
415         export DOCTOR_PROFILER_T00=$(python -c \
416           "print(int(($linkdown-$relative_start)*1000))")
417         export DOCTOR_PROFILER_T01=$(python -c \
418           "print(int(($detected-$relative_start)*1000))")
419         export DOCTOR_PROFILER_T03=$(python -c \
420           "print(int(($vmdown-$relative_start)*1000))")
421         export DOCTOR_PROFILER_T04=$(python -c \
422           "print(int(($hostdown-$relative_start)*1000))")
423         export DOCTOR_PROFILER_T09=$(python -c \
424           "print(int(($notified-$relative_start)*1000))")
425
426         python profiler_poc.py > doctor_profiler.log 2>&1
427     fi
428 }
429
430 cleanup() {
431     set +e
432     echo "cleanup..."
433     stop_inspector
434     stop_consumer
435
436     unset_forced_down_hosts
437     stop_monitor
438     collect_logs
439
440     vms=$(openstack $as_doctor_user server list)
441     vmstodel=""
442     for i in `seq $VM_COUNT`; do
443         $(echo "${vms}" | grep -q " $VM_BASENAME$i ") &&
444         vmstodel+=" $VM_BASENAME$i"
445     done
446     [[ $vmstodel ]] && openstack $as_doctor_user server delete $vmstodel
447     alarm_list=$($ceilometer alarm-list)
448     for i in `seq $VM_COUNT`; do
449         alarm_id=$(echo "${alarm_list}" | grep " $ALARM_BASENAME$i " |
450                    awk '{print $2}')
451         [ -n "$alarm_id" ] && $ceilometer alarm-delete "$alarm_id"
452     done
453     openstack $as_doctor_user subnet delete $NET_NAME
454     sleep 1
455     openstack $as_doctor_user network delete $NET_NAME
456     sleep 1
457
458     image_id=$(openstack image list | grep " $IMAGE_NAME " | awk '{print $2}')
459     sleep 1
460     #if an existing image was used, there's no need to remove it here
461     if [[ "$use_existing_image" == false ]] ; then
462         [ -n "$image_id" ] && openstack image delete "$image_id"
463     fi
464
465     remove_test_user
466
467     cleanup_installer
468     cleanup_inspector
469     cleanup_monitor
470
471     # NOTE: Temporal log printer.
472     for f in $(find . -name '*.log')
473     do
474         echo
475         echo "[$f]"
476         sed -e 's/^/ | /' $f
477         echo
478     done
479 }
480
481 setup_python_packages() {
482     pip freeze |grep -i flask\= > /dev/null || sudo ${pip_install} flask
483     command -v openstack || sudo ${pip_install} python-openstackclient
484     command -v ceilometer || sudo ${pip_install} python-ceilometerclient
485     command -v congress || sudo ${pip_install} python-congressclient
486 }
487
488 # Main process
489
490 if [[ $PYTHON_ENABLE == [Tt]rue ]]; then
491     which tox || sudo ${pip_install} tox
492     if [ -f /usr/bin/apt-get ]; then
493         sudo apt-get install -y python3-dev
494     elif [ -f /usr/bin/yum ] ; then
495         sudo yum install -y python3-devel
496     fi
497
498     cd $TOP_DIR
499     echo "executing tox..."
500     tox
501     exit $?
502 fi
503
504 echo "Note: doctor/tests/run.sh has been executed."
505 git log --oneline -1 || true   # ignore even you don't have git installed
506
507 trap cleanup EXIT
508
509 setup_python_packages
510
511 source $TOP_DIR/functions-common
512 source $TOP_DIR/lib/installer
513 source $TOP_DIR/lib/inspector
514 source $TOP_DIR/lib/monitor
515
516 rm -f *.log
517
518 setup_installer
519
520 echo "preparing VM image..."
521 download_image
522 register_image
523
524 echo "creating test user..."
525 create_test_user
526
527 echo "creating VM..."
528 boot_vm
529 wait_for_vm_launch
530
531 echo "creating alarm..."
532 #TODO: change back to use, network problems depends on infra and installers
533 #get_consumer_ip
534 create_alarm
535
536 echo "starting doctor sample components..."
537 start_inspector
538 start_monitor
539 start_consumer
540
541 sleep 60
542 echo "injecting host failure..."
543 inject_failure
544
545 check_host_status "(DOWN|UNKNOWN)" 60
546 unset_forced_down_hosts
547 calculate_notification_time
548 collect_logs
549 run_profiler
550
551 echo "done"