Merge "Run the tests with existing image"
[doctor.git] / tests / run.sh
1 #!/bin/bash -e
2 ##############################################################################
3 # Copyright (c) 2016 NEC Corporation and others.
4 #
5 # All rights reserved. This program and the accompanying materials
6 # are made available under the terms of the Apache License, Version 2.0
7 # which accompanies this distribution, and is available at
8 # http://www.apache.org/licenses/LICENSE-2.0
9 ##############################################################################
10
11 [[ "${CI_DEBUG:-true}" == [Tt]rue ]] && set -x
12
13 IMAGE_URL=https://launchpad.net/cirros/trunk/0.3.0/+download/cirros-0.3.0-x86_64-disk.img
14 #if an existing image name is provided in the enviroment, use that one
15 IMAGE_NAME=${IMAGE_NAME:-cirros}
16 IMAGE_FILE="${IMAGE_NAME}.img"
17 IMAGE_FORMAT=qcow2
18 VM_NAME=doctor_vm1
19 VM_FLAVOR=m1.tiny
20 ALARM_NAME=doctor_alarm1
21 INSPECTOR_PORT=12345
22 CONSUMER_PORT=12346
23 DOCTOR_USER=doctor
24 DOCTOR_PW=doctor
25 DOCTOR_PROJECT=doctor
26 #TODO: change back to `_member_` when JIRA DOCTOR-55 is done
27 DOCTOR_ROLE=admin
28
29 SUPPORTED_INSTALLER_TYPES="apex fuel local"
30 INSTALLER_TYPE=${INSTALLER_TYPE:-local}
31 INSTALLER_IP=${INSTALLER_IP:-none}
32
33 SUPPORTED_INSPECTOR_TYPES="sample congress"
34 INSPECTOR_TYPE=${INSPECTOR_TYPE:-sample}
35
36 ssh_opts="-o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no"
37 as_doctor_user="--os-username $DOCTOR_USER --os-password $DOCTOR_PW
38                 --os-tenant-name $DOCTOR_PROJECT"
39
40 if [[ ! "$SUPPORTED_INSTALLER_TYPES" =~ "$INSTALLER_TYPE" ]] ; then
41     echo "ERROR: INSTALLER_TYPE=$INSTALLER_TYPE is not supported."
42     exit 1
43 fi
44
45 if [[ ! "$SUPPORTED_INSPECTOR_TYPES" =~ "$INSPECTOR_TYPE" ]] ; then
46     echo "ERROR: INSPECTOR_TYPE=$INSPECTOR_TYPE is not supported."
47     exit 1
48 fi
49
50 get_installer_ip() {
51     if [[ "$INSTALLER_TYPE" == "apex" ]] ; then
52         if [[ "$INSTALLER_IP" == "none" ]] ; then
53             instack_mac=$(sudo virsh domiflist instack | awk '/default/{print $5}')
54             INSTALLER_IP=$(/usr/sbin/arp -e | grep ${instack_mac} | awk '{print $1}')
55         fi
56     elif [[ "$INSTALLER_TYPE" == "fuel" ]] ; then
57         if [[ "$INSTALLER_IP" == "none" ]] ; then
58             instack_mac=$(sudo virsh domiflist fuel-opnfv | awk '/pxebr/{print $5}')
59             INSTALLER_IP=$(/usr/sbin/arp -e | grep ${instack_mac} | awk '{print $1}')
60         fi
61     fi
62
63     if [[ "$INSTALLER_TYPE" != "local" ]] ; then
64         if [[ -z "$INSTALLER_IP" ]] ; then
65             echo "ERROR: no installer ip"
66             exit 1
67         fi
68     fi
69 }
70
71 prepare_ssh_to_cloud() {
72     ssh_opts_cpu="$ssh_opts"
73
74     # get ssh key from installer node
75     if [[ "$INSTALLER_TYPE" == "apex" ]] ; then
76         sudo scp $ssh_opts root@"$INSTALLER_IP":/home/stack/.ssh/id_rsa instack_key
77         sudo chown $(whoami):$(whoami) instack_key
78         chmod 400 instack_key
79         ssh_opts_cpu+=" -i instack_key"
80     elif [[ "$INSTALLER_TYPE" == "fuel" ]] ; then
81         sshpass -p r00tme scp $ssh_opts root@${INSTALLER_IP}:.ssh/id_rsa instack_key
82         sudo chown $(whoami):$(whoami) instack_key
83         chmod 400 instack_key
84         ssh_opts_cpu+=" -i instack_key"
85     elif [[ "$INSTALLER_TYPE" == "local" ]] ; then
86         echo "INSTALLER_TYPE set to 'local'. Assuming SSH keys already exchanged with $COMPUTE_HOST"
87     fi
88 }
89
90 prepare_test_env() {
91     #TODO delete it when fuel support the configuration
92     if [[ "$INSTALLER_TYPE" == "fuel" ]] ; then
93         echo "modify the configuration..."
94         cat > set_conf.sh << 'END_TXT'
95 #!/bin/bash
96 if [ -e /etc/ceilometer/event_pipeline.yaml ]; then
97     if ! grep -q '^ *- notifier://?topic=alarm.all$' /etc/ceilometer/event_pipeline.yaml; then
98         sed -i 's|- notifier://|- notifier://?topic=alarm.all|' /etc/ceilometer/event_pipeline.yaml
99         echo "modify the ceilometer config"
100         service ceilometer-agent-notification restart
101     fi
102 else
103     echo "ceilometer event_pipeline.yaml file does not exist"
104     exit 1
105 fi
106 if [ -e /etc/nova/nova.conf ]; then
107     if ! grep -q '^notification_driver=messaging$' /etc/nova/nova.conf; then
108         sed -i -r 's/notification_driver=/notification_driver=messaging/g' /etc/nova/nova.conf
109         echo "modify nova config"
110         service nova-api restart
111     fi
112 else
113     echo "nova.conf file does not exist"
114     exit 1
115 fi
116 exit 0
117 END_TXT
118         chmod +x set_conf.sh
119         CONTROLLER_IP=$(sshpass -p r00tme ssh 2>/dev/null $ssh_opts root@${INSTALLER_IP} \
120              "fuel node | grep controller | cut -d '|' -f 5|xargs")
121         for node in $CONTROLLER_IP;do
122             scp $ssh_opts_cpu set_conf.sh "root@$node:"
123             ssh $ssh_opts_cpu "root@$node" './set_conf.sh > set_conf.log 2>&1 &'
124             sleep 1
125             scp $ssh_opts_cpu "root@$node:set_conf.log" set_conf_$node.log
126         done
127
128         if grep -q "modify the ceilometer config" set_conf_*.log ; then
129             NEED_TO_RESTORE_CEILOMETER=true
130         fi
131         if grep -q "modify nova config" set_conf_*.log ; then
132             NEED_TO_RESTORE_NOVA=true
133         fi
134
135         echo "waiting service restart..."
136         sleep 60
137     fi
138 }
139
140 restore_test_env() {
141     #TODO delete it when fuel support the configuration
142     if [[ "$INSTALLER_TYPE" == "fuel" ]] ; then
143         if ! ($NEED_TO_RESTORE_CEILOMETER || $NEED_TO_RESTORE_NOVA) ; then
144             echo "Don't need to restore config"
145             exit 0
146         fi
147
148         echo "restore the configuration..."
149         cat > restore_conf.sh << 'END_TXT'
150 #!/bin/bash
151 if @NEED_TO_RESTORE_CEILOMETER@ ; then
152     if [ -e /etc/ceilometer/event_pipeline.yaml ]; then
153         if grep -q '^ *- notifier://?topic=alarm.all$' /etc/ceilometer/event_pipeline.yaml; then
154             sed -i 's|- notifier://?topic=alarm.all|- notifier://|' /etc/ceilometer/event_pipeline.yaml
155             service ceilometer-agent-notification restart
156         fi
157     else
158         echo "ceilometer event_pipeline.yaml file does not exist"
159         exit 1
160     fi
161 fi
162 if @NEED_TO_RESTORE_NOVA@ ; then
163     if [ -e /etc/nova/nova.conf ]; then
164         if grep -q '^notification_driver=messaging$' /etc/nova/nova.conf; then
165             sed -i -r 's/notification_driver=messaging/notification_driver=/g' /etc/nova/nova.conf
166             service nova-api restart
167         fi
168     else
169         echo "nova.conf file does not exist"
170         exit 1
171     fi
172 fi
173 exit 0
174 END_TXT
175         sed -i -e "s/@NEED_TO_RESTORE_CEILOMETER@/$NEED_TO_RESTORE_CEILOMETER/" restore_conf.sh
176         sed -i -e "s/@NEED_TO_RESTORE_NOVA@/$NEED_TO_RESTORE_NOVA/" restore_conf.sh
177         chmod +x restore_conf.sh
178         for node in $CONTROLLER_IP;do
179             scp $ssh_opts_cpu restore_conf.sh "root@$node:"
180             ssh $ssh_opts_cpu "root@$node" './restore_conf.sh > restore_conf.log 2>&1 &'
181         done
182
183         echo "waiting service restart..."
184         sleep 60
185     fi
186 }
187
188 get_compute_host_info() {
189     # get computer host info which VM boot in
190     COMPUTE_HOST=$(openstack $as_doctor_user server show $VM_NAME |
191                    grep "OS-EXT-SRV-ATTR:host" | awk '{ print $4 }')
192     compute_host_in_undercloud=${COMPUTE_HOST%%.*}
193     if [[ -z "$COMPUTE_HOST" ]] ; then
194         echo "ERROR: failed to get compute hostname"
195         exit 1
196     fi
197
198     if [[ "$INSTALLER_TYPE" == "apex" ]] ; then
199         COMPUTE_USER=${COMPUTE_USER:-heat-admin}
200         COMPUTE_IP=$(sudo ssh $ssh_opts $INSTALLER_IP \
201              "source stackrc; \
202              nova show $compute_host_in_undercloud \
203              | awk '/ ctlplane network /{print \$5}'")
204     elif [[ "$INSTALLER_TYPE" == "fuel" ]] ; then
205         COMPUTE_USER=${COMPUTE_USER:-root}
206         node_id=$(echo $compute_host_in_undercloud | cut -d "-" -f 2)
207         COMPUTE_IP=$(sshpass -p r00tme ssh 2>/dev/null $ssh_opts root@${INSTALLER_IP} \
208              "fuel node|awk -F '|' -v id=$node_id '{if (\$1 == id) print \$5}' |xargs")
209     elif [[ "$INSTALLER_TYPE" == "local" ]] ; then
210         COMPUTE_USER=${COMPUTE_USER:-$(whoami)}
211         COMPUTE_IP=$(getent hosts "$COMPUTE_HOST" | awk '{ print $1 }')
212     fi
213
214     if [[ -z "$COMPUTE_IP" ]]; then
215         echo "ERROR: Could not resolve $COMPUTE_HOST. Either manually set COMPUTE_IP or enable DNS resolution."
216         exit 1
217     fi
218     echo "COMPUTE_HOST=$COMPUTE_HOST"
219     echo "COMPUTE_IP=$COMPUTE_IP"
220
221     # verify connectivity to target compute host
222     ping -c 1 "$COMPUTE_IP"
223     if [[ $? -ne 0 ]] ; then
224         echo "ERROR: can not ping to computer host"
225         exit 1
226     fi
227
228     # verify ssh to target compute host
229     ssh $ssh_opts_cpu "$COMPUTE_USER@$COMPUTE_IP" 'exit'
230     if [[ $? -ne 0 ]] ; then
231         echo "ERROR: can not ssh to computer host"
232         exit 1
233     fi
234 }
235
236 get_consumer_ip() {
237     local get_consumer_command="ip route get $COMPUTE_IP | awk '/ src /{print \$NF}'"
238     if [[ "$INSTALLER_TYPE" == "apex" ]] ; then
239         CONSUMER_IP=$(sudo ssh $ssh_opts root@$INSTALLER_IP \
240                       "$get_consumer_command")
241     elif [[ "$INSTALLER_TYPE" == "fuel" ]] ; then
242         CONSUMER_IP=$(sudo sshpass -p r00tme ssh $ssh_opts root@${INSTALLER_IP} \
243                       "$get_consumer_command")
244     elif [[ "$INSTALLER_TYPE" == "local" ]] ; then
245         CONSUMER_IP=`$get_consumer_command`
246     fi
247     echo "CONSUMER_IP=$CONSUMER_IP"
248
249     if [[ -z "$CONSUMER_IP" ]]; then
250         echo "ERROR: Could not get CONSUMER_IP."
251         exit 1
252     fi
253 }
254
255 download_image() {
256     #if a different name was provided for the image in the enviroment there's no need to download the image
257     use_existing_image=false
258     openstack image list | grep -q " $IMAGE_NAME " && use_existing_image=true
259
260     if [[ "$use_existing_image" == false ]] ; then
261         [ -e "$IMAGE_FILE" ] && return 0
262         wget "$IMAGE_URL" -o "$IMAGE_FILE"
263     fi
264 }
265
266 register_image() {
267     openstack image list | grep -q " $IMAGE_NAME " && return 0
268     openstack image create "$IMAGE_NAME" \
269                            --public \
270                            --disk-format "$IMAGE_FORMAT" \
271                            --container-format bare \
272                            --file "$IMAGE_FILE"
273 }
274
275 create_test_user() {
276     openstack project list | grep -q " $DOCTOR_PROJECT " || {
277         openstack project create "$DOCTOR_PROJECT"
278     }
279     openstack user list | grep -q " $DOCTOR_USER " || {
280         openstack user create "$DOCTOR_USER" --password "$DOCTOR_PW" \
281                               --project "$DOCTOR_PROJECT"
282     }
283     openstack user role list "$DOCTOR_USER" --project "$DOCTOR_PROJECT" \
284     | grep -q " $DOCTOR_ROLE " || {
285         openstack role add "$DOCTOR_ROLE" --user "$DOCTOR_USER" \
286                            --project "$DOCTOR_PROJECT"
287     }
288 }
289
290 boot_vm() {
291     # test VM done with test user, so can test non-admin
292     openstack $as_doctor_user server list | grep -q " $VM_NAME " && return 0
293     openstack $as_doctor_user server create --flavor "$VM_FLAVOR" \
294                             --image "$IMAGE_NAME" \
295                             "$VM_NAME"
296     sleep 1
297 }
298
299 create_alarm() {
300     # get vm_id as test user
301     ceilometer $as_doctor_user alarm-list | grep -q " $ALARM_NAME " && return 0
302     vm_id=$(openstack $as_doctor_user server list | grep " $VM_NAME " | awk '{print $2}')
303     # TODO(r-mibu): change notification endpoint from localhost to the consumer
304     # IP address (functest container).
305     ceilometer $as_doctor_user alarm-event-create --name "$ALARM_NAME" \
306         --alarm-action "http://localhost:$CONSUMER_PORT/failure" \
307         --description "VM failure" \
308         --enabled True \
309         --repeat-actions False \
310         --severity "moderate" \
311         --event-type compute.instance.update \
312         -q "traits.state=string::error; traits.instance_id=string::$vm_id"
313 }
314
315 print_log() {
316     log_file=$1
317     echo "$log_file:"
318     sed -e 's/^/    /' "$log_file"
319 }
320
321 start_monitor() {
322     pgrep -f "python monitor.py" && return 0
323     sudo -E python monitor.py "$COMPUTE_HOST" "$COMPUTE_IP" "$INSPECTOR_TYPE" \
324         "http://127.0.0.1:$INSPECTOR_PORT/events" > monitor.log 2>&1 &
325 }
326
327 stop_monitor() {
328     pgrep -f "python monitor.py" || return 0
329     sudo kill $(pgrep -f "python monitor.py")
330     print_log monitor.log
331 }
332
333 congress_add_rule() {
334     name=$1
335     policy=$2
336     rule=$3
337
338     if ! openstack congress policy rule list $policy | grep -q -e "// Name: $name$" ; then
339         openstack congress policy rule create --name $name $policy "$rule"
340     fi
341 }
342
343 congress_del_rule() {
344     name=$1
345     policy=$2
346
347     if openstack congress policy rule list $policy | grep -q -e "^// Name: $name$" ; then
348         openstack congress policy rule delete $policy $name
349     fi
350 }
351
352 congress_setup_rules() {
353     congress_add_rule host_down classification \
354         'host_down(host) :-
355             doctor:events(hostname=host, type="compute.host.down", status="down")'
356
357     congress_add_rule active_instance_in_host classification \
358         'active_instance_in_host(vmid, host) :-
359             nova:servers(id=vmid, host_name=host, status="ACTIVE")'
360
361     congress_add_rule host_force_down classification \
362         'execute[nova:services.force_down(host, "nova-compute", "True")] :-
363             host_down(host)'
364
365     congress_add_rule error_vm_states classification \
366         'execute[nova:servers.reset_state(vmid, "error")] :-
367             host_down(host),
368             active_instance_in_host(vmid, host)'
369 }
370
371 start_inspector() {
372     if [[ "$INSPECTOR_TYPE" == "sample" ]] ; then
373         pgrep -f "python inspector.py" && return 0
374         python inspector.py "$INSPECTOR_PORT" > inspector.log 2>&1 &
375     elif [[ "$INSPECTOR_TYPE" == "congress" ]] ; then
376         nova_api_min_version="2.11"
377         nova_api_version=$(openstack congress datasource list | \
378                            grep nova | grep -Po "(?<='api_version': ')[^']*")
379         [[ -z $nova_api_version ]] && nova_api_version="2.0"
380         if [[ "$nova_api_version" < "$nova_api_min_version" ]]; then
381             echo "ERROR: Congress Nova datasource API version < $nova_api_min_version ($nova_api_version)"
382             exit 1
383         fi
384         openstack congress driver list | grep -q " doctor "
385         openstack congress datasource list | grep -q " doctor " || {
386             openstack congress datasource create doctor doctor
387         }
388         congress_setup_rules
389     fi
390 }
391
392 stop_inspector() {
393     if [[ "$INSPECTOR_TYPE" == "sample" ]] ; then
394         pgrep -f "python inspector.py" || return 0
395         kill $(pgrep -f "python inspector.py")
396         print_log inspector.log
397     elif [[ "$INSPECTOR_TYPE" == "congress" ]] ; then
398         congress_del_rule host_force_down classification
399         congress_del_rule error_vm_states classification
400         congress_del_rule active_instance_in_host classification
401         congress_del_rule host_down classification
402     fi
403 }
404
405 start_consumer() {
406     pgrep -f "python consumer.py" && return 0
407     python consumer.py "$CONSUMER_PORT" > consumer.log 2>&1 &
408
409     # NOTE(r-mibu): create tunnel to the controller nodes, so that we can
410     # avoid some network problems dpends on infra and installers.
411     # This tunnel will be terminated by stop_consumer() or after 10 mins passed.
412     if [[ "$INSTALLER_TYPE" != "local" ]] ; then
413         if [[ "$INSTALLER_TYPE" == "apex" ]] ; then
414             CONTROLLER_IPS=$(sudo ssh $ssh_opts $INSTALLER_IP \
415                              "source stackrc; \
416                              nova list | grep ' overcloud-controller-[0-9] ' \
417                              | sed -e 's/^.*ctlplane=//' -e 's/ *|\$//'")
418         elif [[ "$INSTALLER_TYPE" == "fuel" ]] ; then
419             CONTROLLER_IPS=$(sshpass -p r00tme ssh 2>/dev/null $ssh_opts root@${INSTALLER_IP} \
420                             "fuel node | grep controller | cut -d '|' -f 5|xargs")
421         fi
422
423         if [[ -z "$CONTROLLER_IPS" ]]; then
424             echo "ERROR: Could not get CONTROLLER_IPS."
425             exit 1
426         fi
427         for ip in $CONTROLLER_IPS
428         do
429             forward_rule="-R $CONSUMER_PORT:localhost:$CONSUMER_PORT"
430             tunnel_command="sudo ssh $ssh_opts_cpu $COMPUTE_USER@$ip $forward_rule sleep 600"
431             $tunnel_command > "ssh_tunnel.${ip}.log" 2>&1 < /dev/null &
432         done
433     fi
434 }
435
436 stop_consumer() {
437     pgrep -f "python consumer.py" || return 0
438     kill $(pgrep -f "python consumer.py")
439     print_log consumer.log
440
441     # NOTE(r-mibu): terminate tunnels to the controller nodes
442     if [[ "$INSTALLER_TYPE" != "local" ]] ; then
443         for ip in $CONTROLLER_IPS
444         do
445             forward_rule="-R $CONSUMER_PORT:localhost:$CONSUMER_PORT"
446             tunnel_command="sudo ssh $ssh_opts_cpu $COMPUTE_USER@$ip $forward_rule sleep 600"
447             kill $(pgrep -f "$tunnel_command")
448             print_log "ssh_tunnel.${ip}.log"
449         done
450     fi
451 }
452
453 wait_for_vm_launch() {
454     echo "waiting for vm launch..."
455
456     count=0
457     while [[ ${count} -lt 60 ]]
458     do
459         state=$(openstack $as_doctor_user server list | grep " $VM_NAME " | awk '{print $6}')
460         if [[ "$state" == "ACTIVE" ]]; then
461             # NOTE(cgoncalves): sleeping for a bit to stabilize
462             # See python-openstackclient/functional/tests/compute/v2/test_server.py:wait_for_status
463             sleep 5
464             return 0
465         fi
466         [[ "$state" == "ERROR" ]] && echo "vm state is ERROR" && exit 1
467         count=$(($count+1))
468         sleep 1
469     done
470     echo "ERROR: time out while waiting for vm launch"
471     exit 1
472 }
473
474 inject_failure() {
475     echo "disabling network of compute host [$COMPUTE_HOST] for 3 mins..."
476     cat > disable_network.sh << 'END_TXT'
477 #!/bin/bash -x
478 dev=$(sudo ip a | awk '/ @COMPUTE_IP@\//{print $7}')
479 sleep 1
480 sudo ip link set $dev down
481 sleep 180
482 sudo ip link set $dev up
483 sleep 1
484 END_TXT
485     sed -i -e "s/@COMPUTE_IP@/$COMPUTE_IP/" disable_network.sh
486     chmod +x disable_network.sh
487     scp $ssh_opts_cpu disable_network.sh "$COMPUTE_USER@$COMPUTE_IP:"
488     ssh $ssh_opts_cpu "$COMPUTE_USER@$COMPUTE_IP" 'nohup ./disable_network.sh > disable_network.log 2>&1 &'
489 }
490
491 calculate_notification_time() {
492     detected=$(grep "doctor monitor detected at" monitor.log | awk '{print $5}')
493     notified=$(grep "doctor consumer notified at" consumer.log | awk '{print $5}')
494     if ! grep -q "doctor consumer notified at" consumer.log ; then
495         echo "ERROR: consumer hasn't received fault notification."
496         exit 1
497     fi
498     echo "$notified $detected" | \
499         awk '{
500             d = $1 - $2;
501             if (d < 1 && d > 0) { print d " OK"; exit 0 }
502             else { print d " NG"; exit 1 }
503         }'
504 }
505
506 check_host_status() {
507     expected_state=$1
508
509     host_status_line=$(openstack $as_doctor_user --os-compute-api-version 2.16 \
510                        server show $VM_NAME | grep "host_status")
511     host_status=$(echo $host_status_line | awk '{print $4}')
512     if [ -z "$host_status" ] ; then
513         echo "ERROR: host_status not reported by: nova show $VM_NAME"
514         exit 1
515     elif [[ "$expected_state" =~ "$host_status" ]] ; then
516         echo "$VM_NAME showing host_status: $host_status"
517     else
518         echo "ERROR: host_status:$host_status not equal to expected_state: $expected_state"
519         exit 1
520     fi
521 }
522
523 cleanup() {
524     set +e
525     echo "cleanup..."
526     stop_monitor
527     stop_inspector
528     stop_consumer
529
530     echo "waiting disabled compute host back to be enabled..."
531     python ./nova_force_down.py "$COMPUTE_HOST" --unset
532     sleep 240
533     check_host_status "UP"
534     scp $ssh_opts_cpu "$COMPUTE_USER@$COMPUTE_IP:disable_network.log" .
535     print_log disable_network.log
536
537     openstack $as_doctor_user server list | grep -q " $VM_NAME " && openstack $as_doctor_user server delete "$VM_NAME"
538     sleep 1
539     alarm_id=$(ceilometer $as_doctor_user alarm-list | grep " $ALARM_NAME " | awk '{print $2}')
540     sleep 1
541     [ -n "$alarm_id" ] && ceilometer $as_doctor_user alarm-delete "$alarm_id"
542     sleep 1
543
544     image_id=$(openstack image list | grep " $IMAGE_NAME " | awk '{print $2}')
545     sleep 1
546     #if an existing image was used, there's no need to remove it here
547     if [[ "$use_existing_image" == false ]] ; then
548         [ -n "$image_id" ] && openstack image delete "$image_id"
549     fi
550     openstack role remove "$DOCTOR_ROLE" --user "$DOCTOR_USER" \
551                               --project "$DOCTOR_PROJECT"
552     openstack project delete "$DOCTOR_PROJECT"
553     openstack user delete "$DOCTOR_USER"
554
555     restore_test_env
556 }
557
558
559 echo "Note: doctor/tests/run.sh has been executed."
560
561 trap cleanup EXIT
562
563 echo "preparing test env..."
564 get_installer_ip
565 prepare_ssh_to_cloud
566 prepare_test_env
567
568 echo "preparing VM image..."
569 download_image
570 register_image
571
572 echo "creating test user..."
573 create_test_user
574
575 echo "creating VM..."
576 boot_vm
577 wait_for_vm_launch
578
579 echo "get computer host info..."
580 get_compute_host_info
581
582 echo "creating alarm..."
583 #TODO: change back to use, network problems depends on infra and installers
584 #get_consumer_ip
585 create_alarm
586
587 echo "starting doctor sample components..."
588 start_inspector
589 start_monitor
590 start_consumer
591
592 sleep 60
593 echo "injecting host failure..."
594 inject_failure
595 sleep 60
596
597 check_host_status "(DOWN|UNKNOWN)"
598 calculate_notification_time
599
600 echo "done"