Adding PYTHON_ENABLE option
[doctor.git] / tests / run.sh
1 #!/bin/bash -e
2 ##############################################################################
3 # Copyright (c) 2016 NEC Corporation and others.
4 #
5 # All rights reserved. This program and the accompanying materials
6 # are made available under the terms of the Apache License, Version 2.0
7 # which accompanies this distribution, and is available at
8 # http://www.apache.org/licenses/LICENSE-2.0
9 ##############################################################################
10
11 # Configuration
12
13 [[ "${CI_DEBUG:-true}" == [Tt]rue ]] && set -x
14
15 IMAGE_URL=https://launchpad.net/cirros/trunk/0.3.0/+download/cirros-0.3.0-x86_64-disk.img
16 #if an existing image name is provided in the enviroment, use that one
17 IMAGE_NAME=${IMAGE_NAME:-cirros}
18 IMAGE_FILE="${IMAGE_NAME}.img"
19 IMAGE_FORMAT=qcow2
20 VM_BASENAME=doctor_vm
21 VM_FLAVOR=m1.tiny
22 #if VM_COUNT set, use that instead
23 VM_COUNT=${VM_COUNT:-1}
24 NET_NAME=doctor_net
25 NET_CIDR=192.168.168.0/24
26 ALARM_BASENAME=doctor_alarm
27 CONSUMER_PORT=12346
28 DOCTOR_USER=doctor
29 DOCTOR_PW=doctor
30 DOCTOR_PROJECT=doctor
31 #TODO: change back to `_member_` when JIRA DOCTOR-55 is done
32 DOCTOR_ROLE=admin
33 PROFILER_TYPE=${PROFILER_TYPE:-none}
34 PYTHON_ENABLE=${PYTHON_ENABLE:-false}
35
36 TOP_DIR=$(cd $(dirname "$0") && pwd)
37
38 as_doctor_user="--os-username $DOCTOR_USER --os-password $DOCTOR_PW
39                 --os-project-name $DOCTOR_PROJECT --os-tenant-name $DOCTOR_PROJECT"
40 # NOTE: ceilometer command still requires '--os-tenant-name'.
41 #ceilometer="ceilometer ${as_doctor_user/--os-project-name/--os-tenant-name}"
42 ceilometer="ceilometer $as_doctor_user"
43
44
45 # Functions
46
47 get_compute_host_info() {
48     # get computer host info which first VM boot in
49     COMPUTE_HOST=$(openstack $as_doctor_user server show ${VM_BASENAME}1 |
50                    grep "OS-EXT-SRV-ATTR:host" | awk '{ print $4 }')
51     compute_host_in_undercloud=${COMPUTE_HOST%%.*}
52     die_if_not_set $LINENO COMPUTE_HOST "Failed to get compute hostname"
53
54     get_compute_ip_from_hostname $COMPUTE_HOST
55
56     echo "COMPUTE_HOST=$COMPUTE_HOST"
57     echo "COMPUTE_IP=$COMPUTE_IP"
58
59     # verify connectivity to target compute host
60     ping -c 1 "$COMPUTE_IP"
61     if [[ $? -ne 0 ]] ; then
62         die $LINENO "Can not ping to computer host"
63     fi
64
65     # verify ssh to target compute host
66     ssh $ssh_opts_cpu "$COMPUTE_USER@$COMPUTE_IP" 'exit'
67     if [[ $? -ne 0 ]] ; then
68         die $LINENO "Can not ssh to computer host"
69     fi
70 }
71
72 # TODO(r-mibu): update this function to support consumer instance
73 #               and migrate this function into installer lib
74 get_consumer_ip___to_be_removed() {
75     local get_consumer_command="ip route get $COMPUTE_IP | awk '/ src /{print \$NF}'"
76     if is_installer apex; then
77         CONSUMER_IP=$(sudo ssh $ssh_opts root@$INSTALLER_IP \
78                       "$get_consumer_command")
79     elif is_installer fuel; then
80         CONSUMER_IP=$(sudo sshpass -p r00tme ssh $ssh_opts root@${INSTALLER_IP} \
81                       "$get_consumer_command")
82     elif is_installer local; then
83         CONSUMER_IP=`$get_consumer_command`
84     fi
85     echo "CONSUMER_IP=$CONSUMER_IP"
86
87     die_if_not_set $LINENO CONSUMER_IP "Could not get CONSUMER_IP."
88 }
89
90 download_image() {
91     #if a different name was provided for the image in the enviroment there's no need to download the image
92     use_existing_image=false
93     openstack image list | grep -q " $IMAGE_NAME " && use_existing_image=true
94
95     if [[ "$use_existing_image" == false ]] ; then
96         [ -e "$IMAGE_FILE" ] && return 0
97         wget "$IMAGE_URL" -o "$IMAGE_FILE"
98     fi
99 }
100
101 register_image() {
102     openstack image list | grep -q " $IMAGE_NAME " && return 0
103     openstack image create "$IMAGE_NAME" \
104                            --public \
105                            --disk-format "$IMAGE_FORMAT" \
106                            --container-format bare \
107                            --file "$IMAGE_FILE"
108 }
109
110 create_test_user() {
111     openstack project list | grep -q " $DOCTOR_PROJECT " || {
112         openstack project create "$DOCTOR_PROJECT"
113     }
114     openstack user list | grep -q " $DOCTOR_USER " || {
115         openstack user create "$DOCTOR_USER" --password "$DOCTOR_PW" \
116                               --project "$DOCTOR_PROJECT"
117     }
118     openstack role show "$DOCTOR_ROLE" || {
119         openstack role create "$DOCTOR_ROLE"
120     }
121     openstack role add "$DOCTOR_ROLE" --user "$DOCTOR_USER" \
122                        --project "$DOCTOR_PROJECT"
123     # tojuvone: openstack quota show is broken and have to use nova
124     # https://bugs.launchpad.net/manila/+bug/1652118
125     # Note! while it is encouraged to use openstack client it has proven
126     # quite buggy.
127     # QUOTA=$(openstack quota show $DOCTOR_PROJECT)
128     DOCTOR_QUOTA=$(nova quota-show --tenant $DOCTOR_PROJECT)
129     # We make sure that quota allows number of instances and cores
130     OLD_INSTANCE_QUOTA=$(echo "${DOCTOR_QUOTA}" | grep " instances " | \
131                          awk '{print $4}')
132     if [ $OLD_INSTANCE_QUOTA -lt $VM_COUNT ]; then
133         openstack quota set --instances $VM_COUNT \
134                   $DOCTOR_USER
135     fi
136     OLD_CORES_QUOTA=$(echo "${DOCTOR_QUOTA}" | grep " cores " | \
137                       awk '{print $4}')
138     if [ $OLD_CORES_QUOTA -lt $VM_COUNT ]; then
139         openstack quota set --cores $VM_COUNT \
140                   $DOCTOR_USER
141     fi
142 }
143
144 boot_vm() {
145     # test VM done with test user, so can test non-admin
146
147     if ! openstack $as_doctor_user network show $NET_NAME; then
148         openstack $as_doctor_user network create $NET_NAME
149     fi
150     if ! openstack $as_doctor_user subnet show $NET_NAME; then
151         openstack $as_doctor_user subnet create $NET_NAME \
152             --network $NET_NAME --subnet-range $NET_CIDR --no-dhcp
153     fi
154     net_id=$(openstack $as_doctor_user network show $NET_NAME -f value -c id)
155
156     servers=$(openstack $as_doctor_user server list)
157     for i in `seq $VM_COUNT`; do
158         echo "${servers}" | grep -q " $VM_BASENAME$i " && continue
159         openstack $as_doctor_user server create --flavor "$VM_FLAVOR" \
160             --image "$IMAGE_NAME" --nic net-id=$net_id "$VM_BASENAME$i"
161     done
162     sleep 1
163 }
164
165 create_alarm() {
166     # get vm_id as test user
167     alarm_list=$($ceilometer alarm-list)
168     vms=$(openstack $as_doctor_user server list)
169     for i in `seq $VM_COUNT`; do
170         echo "${alarm_list}" | grep -q " $ALARM_BASENAME$i " || {
171             vm_id=$(echo "${vms}" | grep " $VM_BASENAME$i " | awk '{print $2}')
172             # TODO(r-mibu): change notification endpoint from localhost to the
173             # consumer. IP address (functest container).
174             $ceilometer alarm-event-create \
175                        --name "$ALARM_BASENAME$i" \
176                        --alarm-action "http://localhost:$CONSUMER_PORT/failure" \
177                        --description "VM failure" \
178                        --enabled True \
179                        --repeat-actions False \
180                        --severity "moderate" \
181                        --event-type compute.instance.update \
182                        -q "traits.state=string::error; \
183                        traits.instance_id=string::$vm_id"
184             }
185      done
186 }
187
188 start_monitor() {
189     pgrep -f "python monitor.py" && return 0
190     sudo -E python monitor.py "$COMPUTE_HOST" "$COMPUTE_IP" "$INSPECTOR_TYPE" \
191         > monitor.log 2>&1 &
192 }
193
194 stop_monitor() {
195     pgrep -f "python monitor.py" || return 0
196     sudo kill $(pgrep -f "python monitor.py")
197 }
198
199 start_consumer() {
200     pgrep -f "python consumer.py" && return 0
201     python consumer.py "$CONSUMER_PORT" > consumer.log 2>&1 &
202
203     # NOTE(r-mibu): create tunnel to the controller nodes, so that we can
204     # avoid some network problems dpends on infra and installers.
205     # This tunnel will be terminated by stop_consumer() or after 10 mins passed.
206     if ! is_installer local; then
207         for ip in $CONTROLLER_IPS
208         do
209             forward_rule="-R $CONSUMER_PORT:localhost:$CONSUMER_PORT"
210             tunnel_command="sudo ssh $ssh_opts_cpu $COMPUTE_USER@$ip $forward_rule sleep 600"
211             $tunnel_command > "ssh_tunnel.${ip}.log" 2>&1 < /dev/null &
212         done
213     fi
214 }
215
216 stop_consumer() {
217     pgrep -f "python consumer.py" || return 0
218     kill $(pgrep -f "python consumer.py")
219
220     # NOTE(r-mibu): terminate tunnels to the controller nodes
221     if ! is_installer local; then
222         for ip in $CONTROLLER_IPS
223         do
224             forward_rule="-R $CONSUMER_PORT:localhost:$CONSUMER_PORT"
225             tunnel_command="sudo ssh $ssh_opts_cpu $COMPUTE_USER@$ip $forward_rule sleep 600"
226             kill $(pgrep -f "$tunnel_command")
227         done
228     fi
229 }
230
231 wait_for_vm_launch() {
232     echo "waiting for vm launch..."
233
234     count=0
235     while [[ ${count} -lt 60 ]]
236     do
237         active_count=0
238         vms=$(openstack $as_doctor_user server list)
239         for i in `seq $VM_COUNT`; do
240             state=$(echo "${vms}" | grep " $VM_BASENAME$i " | awk '{print $6}')
241             if [[ "$state" == "ACTIVE" ]]; then
242                 active_count=$(($active_count+1))
243             elif [[ "$state" == "ERROR" ]]; then
244                 die $LINENO "vm state $VM_BASENAME$i is ERROR"
245             else
246                 #This VM not yet active
247                 count=$(($count+1))
248                 sleep 5
249                 continue
250             fi
251         done
252         [[ $active_count -eq $VM_COUNT ]] && {
253             echo "get computer host info..."
254             get_compute_host_info
255             VMS_ON_FAILED_HOST=$(openstack $as_doctor_user server list --host \
256                          $COMPUTE_HOST | grep " ${VM_BASENAME}" |  wc -l)
257             return 0
258         }
259         #Not all VMs active
260         count=$(($count+1))
261         sleep 5
262     done
263     die $LINENO "Time out while waiting for VM launch"
264 }
265
266 inject_failure() {
267     echo "disabling network of compute host [$COMPUTE_HOST] for 3 mins..."
268     cat > disable_network.sh << 'END_TXT'
269 #!/bin/bash -x
270 dev=$(sudo ip a | awk '/ @COMPUTE_IP@\//{print $7}')
271 [[ -n "$dev" ]] || dev=$(sudo ip a | awk '/ @COMPUTE_IP@\//{print $5}')
272 sleep 1
273 sudo ip link set $dev down
274 echo "doctor set link down at" $(date "+%s.%N")
275 sleep 180
276 sudo ip link set $dev up
277 sleep 1
278 END_TXT
279     sed -i -e "s/@COMPUTE_IP@/$COMPUTE_IP/" disable_network.sh
280     chmod +x disable_network.sh
281     scp $ssh_opts_cpu disable_network.sh "$COMPUTE_USER@$COMPUTE_IP:"
282     ssh $ssh_opts_cpu "$COMPUTE_USER@$COMPUTE_IP" 'nohup ./disable_network.sh > disable_network.log 2>&1 &'
283     # use host time to get rid of potential time sync deviation between nodes
284     triggered=$(date "+%s.%N")
285 }
286
287 wait_consumer() {
288     local interval=1
289     local rounds=$(($1 / $interval))
290     for i in `seq $rounds`; do
291         notified_count=$(grep "doctor consumer notified at" consumer.log | wc -l)
292         if [[ $notified_count -eq  $VMS_ON_FAILED_HOST ]]; then
293             return 0
294         fi
295         sleep $interval
296     done
297     die $LINENO "Consumer hasn't received fault notification."
298 }
299
300 calculate_notification_time() {
301     wait_consumer 60
302     #keep 'at' as the last keyword just before the value, and
303     #use regex to get value instead of the fixed column
304     detected=$(grep "doctor monitor detected at" monitor.log |\
305                sed -e "s/^.* at //")
306     notified=$(grep "doctor consumer notified at" consumer.log |\
307                sed -e "s/^.* at //" | tail -1)
308
309     echo "$notified $detected" | \
310         awk '{
311             d = $1 - $2;
312             if (d < 1 && d > 0) { print d " OK"; exit 0 }
313             else { print d " NG"; exit 1 }
314         }'
315 }
316
317 check_host_status() {
318     # Check host related to first Doctor VM is in wanted state
319     # $1    Expected state
320     # $2    Seconds to wait to have wanted state
321     expected_state=$1
322     local interval=5
323     local rounds=$(($2 / $interval))
324     for i in `seq $rounds`; do
325         host_status_line=$(openstack $as_doctor_user --os-compute-api-version \
326                            2.16 server show ${VM_BASENAME}1 | grep "host_status")
327         host_status=$(echo $host_status_line | awk '{print $4}')
328         die_if_not_set $LINENO host_status "host_status not reported by: nova show ${VM_BASENAME}1"
329         if [[ "$expected_state" =~ "$host_status" ]] ; then
330             echo "${VM_BASENAME}1 showing host_status: $host_status"
331             return 0
332         else
333             sleep $interval
334         fi
335     done
336     if [[ "$expected_state" =~ "$host_status" ]] ; then
337         echo "${VM_BASENAME}1 showing host_status: $host_status"
338     else
339         die $LINENO  "host_status:$host_status not equal to expected_state: $expected_state"
340     fi
341 }
342
343 unset_forced_down_hosts() {
344     # for debug
345     openstack compute service list --service nova-compute
346
347     downed_computes=$(openstack compute service list --service nova-compute \
348                       -f value -c Host -c State | grep ' down$' \
349                       | sed -e 's/ *down$//')
350     echo "downed_computes: $downed_computes"
351     for host in $downed_computes
352     do
353         # TODO(r-mibu): use openstack client
354         #openstack compute service set --up $host nova-compute
355         nova service-force-down --unset $host nova-compute
356     done
357
358     echo "waiting disabled compute host back to be enabled..."
359     wait_until 'openstack compute service list --service nova-compute
360                 -f value -c State | grep -q down' 240 5
361
362     for host in $downed_computes
363     do
364         # TODO(r-mibu): improve 'get_compute_ip_from_hostname'
365         get_compute_ip_from_hostname $host
366         wait_until "! ping -c 1 $COMPUTE_IP" 120 5
367     done
368 }
369
370 collect_logs() {
371     if [[ -n "$COMPUTE_IP" ]];then
372         scp $ssh_opts_cpu "$COMPUTE_USER@$COMPUTE_IP:disable_network.log" .
373     fi
374
375     # TODO(yujunz) collect other logs, e.g. nova, aodh
376 }
377
378 run_profiler() {
379     if [[ "$PROFILER_TYPE" == "poc" ]]; then
380         linkdown=$(grep "doctor set link down at " disable_network.log |\
381                   sed -e "s/^.* at //")
382         vmdown=$(grep "doctor mark vm.* error at" inspector.log |tail -n 1 |\
383                  sed -e "s/^.* at //")
384         hostdown=$(grep "doctor mark host.* down at" inspector.log |\
385                  sed -e "s/^.* at //")
386
387         # TODO(yujunz) check the actual delay to verify time sync status
388         # expected ~1s delay from $trigger to $linkdown
389         relative_start=${linkdown}
390         export DOCTOR_PROFILER_T00=$(python -c \
391           "print(int(($linkdown-$relative_start)*1000))")
392         export DOCTOR_PROFILER_T01=$(python -c \
393           "print(int(($detected-$relative_start)*1000))")
394         export DOCTOR_PROFILER_T03=$(python -c \
395           "print(int(($vmdown-$relative_start)*1000))")
396         export DOCTOR_PROFILER_T04=$(python -c \
397           "print(int(($hostdown-$relative_start)*1000))")
398         export DOCTOR_PROFILER_T09=$(python -c \
399           "print(int(($notified-$relative_start)*1000))")
400
401         python profiler-poc.py >doctor_profiler.log 2>&1
402     fi
403 }
404
405 cleanup() {
406     set +e
407     echo "cleanup..."
408     stop_monitor
409     stop_inspector
410     stop_consumer
411
412     unset_forced_down_hosts
413     collect_logs
414
415     vms=$(openstack $as_doctor_user server list)
416     vmstodel=""
417     for i in `seq $VM_COUNT`; do
418         $(echo "${vms}" | grep -q " $VM_BASENAME$i ") &&
419         vmstodel+=" $VM_BASENAME$i"
420     done
421     [[ $vmstodel ]] && openstack $as_doctor_user server delete $vmstodel
422     alarm_list=$($ceilometer alarm-list)
423     for i in `seq $VM_COUNT`; do
424         alarm_id=$(echo "${alarm_list}" | grep " $ALARM_BASENAME$i " |
425                    awk '{print $2}')
426         [ -n "$alarm_id" ] && $ceilometer alarm-delete "$alarm_id"
427     done
428     openstack $as_doctor_user subnet delete $NET_NAME
429     sleep 1
430     openstack $as_doctor_user network delete $NET_NAME
431     sleep 1
432
433     image_id=$(openstack image list | grep " $IMAGE_NAME " | awk '{print $2}')
434     sleep 1
435     #if an existing image was used, there's no need to remove it here
436     if [[ "$use_existing_image" == false ]] ; then
437         [ -n "$image_id" ] && openstack image delete "$image_id"
438     fi
439     openstack role remove "$DOCTOR_ROLE" --user "$DOCTOR_USER" \
440                               --project "$DOCTOR_PROJECT"
441     openstack project delete "$DOCTOR_PROJECT"
442     openstack user delete "$DOCTOR_USER"
443     # NOTE: remove role only for doctor test.
444     #openstack role delete "$DOCTOR_ROLE"
445
446     cleanup_installer
447     cleanup_inspector
448
449     # NOTE: Temporal log printer.
450     for f in $(find . -name '*.log')
451     do
452         echo
453         echo "[$f]"
454         sed -e 's/^/ | /' $f
455         echo
456     done
457 }
458
459 # Main process
460
461 if $PYTHON_ENABLE; then
462     cd $TOP_DIR
463     echo "executing tox..."
464     tox
465     exit $?
466 fi
467
468 echo "Note: doctor/tests/run.sh has been executed."
469 git log --oneline -1 || true   # ignore even you don't have git installed
470
471 trap cleanup EXIT
472
473 source $TOP_DIR/functions-common
474 source $TOP_DIR/lib/installer
475 source $TOP_DIR/lib/inspector
476
477 setup_installer
478
479 echo "preparing VM image..."
480 download_image
481 register_image
482
483 echo "creating test user..."
484 create_test_user
485
486 echo "creating VM..."
487 boot_vm
488 wait_for_vm_launch
489
490 echo "creating alarm..."
491 #TODO: change back to use, network problems depends on infra and installers
492 #get_consumer_ip
493 create_alarm
494
495 echo "starting doctor sample components..."
496 start_inspector
497 start_monitor
498 start_consumer
499
500 sleep 60
501 echo "injecting host failure..."
502 inject_failure
503
504 check_host_status "(DOWN|UNKNOWN)" 60
505 calculate_notification_time
506 unset_forced_down_hosts
507 collect_logs
508 run_profiler
509
510 echo "done"