76eaa8b67546c554ac3be330cf1b1d0fc056e4c7
[doctor.git] / tests / run.sh
1 #!/bin/bash -e
2 ##############################################################################
3 # Copyright (c) 2016 NEC Corporation and others.
4 #
5 # All rights reserved. This program and the accompanying materials
6 # are made available under the terms of the Apache License, Version 2.0
7 # which accompanies this distribution, and is available at
8 # http://www.apache.org/licenses/LICENSE-2.0
9 ##############################################################################
10
11 [[ "${CI_DEBUG:-true}" == "true" ]] && set -x
12
13 IMAGE_URL=https://launchpad.net/cirros/trunk/0.3.0/+download/cirros-0.3.0-x86_64-disk.img
14 IMAGE_NAME=cirros
15 IMAGE_FILE="${IMAGE_NAME}.img"
16 IMAGE_FORMAT=qcow2
17 VM_NAME=doctor_vm1
18 VM_FLAVOR=m1.tiny
19 ALARM_NAME=doctor_alarm1
20 INSPECTOR_PORT=12345
21 CONSUMER_PORT=12346
22 DOCTOR_USER=doctor
23 DOCTOR_PW=doctor
24 DOCTOR_PROJECT=doctor
25 DOCTOR_ROLE=_member_
26
27 SUPPORTED_INSTALLER_TYPES="apex local"
28 INSTALLER_TYPE=${INSTALLER_TYPE:-apex}
29 INSTALLER_IP=${INSTALLER_IP:-none}
30 COMPUTE_HOST=${COMPUTE_HOST:-overcloud-novacompute-0}
31 COMPUTE_IP=${COMPUTE_IP:-none}
32 COMPUTE_USER=${COMPUTE_USER:-heat-admin}
33 ssh_opts="-o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no"
34
35 if [[ ! "$SUPPORTED_INSTALLER_TYPES" =~ "$INSTALLER_TYPE" ]] ; then
36     echo "ERROR: INSTALLER_TYPE=$INSTALLER_TYPE is not supported."
37     exit 1
38 fi
39
40 prepare_compute_ssh() {
41     ssh_opts_cpu="$ssh_opts"
42
43     if [[ "$INSTALLER_TYPE" == "apex" ]] ; then
44         if [[ "$INSTALLER_IP" == "none" ]] ; then
45             instack_mac=$(sudo virsh domiflist instack | awk '/default/{print $5}')
46             INSTALLER_IP=$(/usr/sbin/arp -e | grep ${instack_mac} | awk '{print $1}')
47         fi
48
49         if [[ "$COMPUTE_IP" == "none" ]] ; then
50             COMPUTE_IP=$(sudo ssh $ssh_opts $INSTALLER_IP \
51                          "source stackrc; \
52                           nova show $COMPUTE_HOST \
53                           | awk '/ ctlplane network /{print \$5}'")
54         fi
55
56         # get ssh key from installer node
57         sudo scp $ssh_opts root@"$INSTALLER_IP":/home/stack/.ssh/id_rsa instack_key
58         sudo chown $(whoami):$(whoami) instack_key
59         chmod 400 instack_key
60         ssh_opts_cpu+=" -i instack_key"
61     elif [[ "$INSTALLER_TYPE" == "local" ]] ; then
62         if [[ "$COMPUTE_IP" == "none" ]] ; then
63             COMPUTE_IP=$(getent hosts "$COMPUTE_HOST" | awk '{ print $1 }')
64             if [[ -z "$COMPUTE_IP" ]]; then
65                 echo "ERROR: Could not resolve $COMPUTE_HOST. Either manually set COMPUTE_IP or enable DNS resolution."
66                 exit 1
67             fi
68         fi
69
70         echo "INSTALLER_TYPE set to 'local'. Assuming SSH keys already exchanged with $COMPUTE_HOST"
71     fi
72
73     # verify connectivity to target compute host
74     ping -c 1 "$COMPUTE_IP"
75 }
76
77 download_image() {
78     [ -e "$IMAGE_FILE" ] && return 0
79     wget "$IMAGE_URL" -o "$IMAGE_FILE"
80 }
81
82 register_image() {
83     glance image-list | grep -q " $IMAGE_NAME " && return 0
84     glance image-create --name "$IMAGE_NAME" \
85                         --visibility public \
86                         --disk-format "$IMAGE_FORMAT" \
87                         --container-format bare \
88                         --file "$IMAGE_FILE"
89 }
90
91 create_test_user() {
92     openstack user list | grep -q " $DOCTOR_USER " || {
93         openstack user create "$DOCTOR_USER" --password "$DOCTOR_PW"
94     }
95     openstack project list | grep -q " $DOCTOR_PROJECT " || {
96         openstack project create "$DOCTOR_PROJECT"
97     }
98     openstack user role list "$DOCTOR_USER" --project "$DOCTOR_PROJECT" \
99     | grep -q " $DOCTOR_ROLE " || {
100         openstack role add "$DOCTOR_ROLE" --user "$DOCTOR_USER" \
101                            --project "$DOCTOR_PROJECT"
102     }
103 }
104
105 change_to_doctor_user() {
106     export OS_USERNAME="$DOCTOR_USER"
107     export OS_PASSWORD="$DOCTOR_PW"
108     export OS_PROJECT_NAME="$DOCTOR_PROJECT"
109     export OS_TENANT_NAME="$DOCTOR_PROJECT"
110 }
111
112 boot_vm() {
113     (
114         # test VM done with test user, so can test non-admin
115         change_to_doctor_user
116         nova list | grep -q " $VM_NAME " && return 0
117         nova boot --flavor "$VM_FLAVOR" \
118                   --image "$IMAGE_NAME" \
119                   "$VM_NAME"
120         sleep 1
121     )
122
123 }
124
125 create_alarm() {
126     (
127         # get vm_id as test user
128         change_to_doctor_user
129         ceilometer alarm-list | grep -q " $ALARM_NAME " && return 0
130         vm_id=$(nova list | grep " $VM_NAME " | awk '{print $2}')
131         ceilometer alarm-event-create --name "$ALARM_NAME" \
132             --alarm-action "http://localhost:$CONSUMER_PORT/failure" \
133             --description "VM failure" \
134             --enabled True \
135             --repeat-actions False \
136             --severity "moderate" \
137             --event-type compute.instance.update \
138             -q "traits.state=string::error; traits.instance_id=string::$vm_id"
139     )
140 }
141
142
143 start_monitor() {
144     pgrep -f "python monitor.py" && return 0
145     sudo python monitor.py "$COMPUTE_HOST" "$COMPUTE_IP" \
146         "http://127.0.0.1:$INSPECTOR_PORT/events" > monitor.log 2>&1 &
147 }
148
149 stop_monitor() {
150     pgrep -f "python monitor.py" || return 0
151     sudo kill $(pgrep -f "python monitor.py")
152     cat monitor.log
153 }
154
155 start_inspector() {
156     pgrep -f "python inspector.py" && return 0
157     python inspector.py "$INSPECTOR_PORT" > inspector.log 2>&1 &
158 }
159
160 stop_inspector() {
161     pgrep -f "python inspector.py" || return 0
162     kill $(pgrep -f "python inspector.py")
163     cat inspector.log
164 }
165
166 start_consumer() {
167     pgrep -f "python consumer.py" && return 0
168     python consumer.py "$CONSUMER_PORT" > consumer.log 2>&1 &
169 }
170
171 stop_consumer() {
172     pgrep -f "python consumer.py" || return 0
173     kill $(pgrep -f "python consumer.py")
174     cat consumer.log
175 }
176
177 wait_for_vm_launch() {
178     echo "waiting for vm launch..."
179
180     (
181         # get VM state as test user
182         change_to_doctor_user
183
184         count=0
185         while [[ ${count} -lt 60 ]]
186         do
187             state=$(nova list | grep " $VM_NAME " | awk '{print $6}')
188             [[ "$state" == "ACTIVE" ]] && return 0
189             [[ "$state" == "ERROR" ]] && echo "vm state is ERROR" && exit 1
190             count=$(($count+1))
191             sleep 1
192         done
193         echo "ERROR: time out while waiting for vm launch"
194         exit 1
195     )
196 }
197
198 inject_failure() {
199     echo "disabling network of compute host [$COMPUTE_HOST] for 3 mins..."
200     cat > disable_network.sh << 'END_TXT'
201 #!/bin/bash -x
202 dev=$(sudo ip route | awk '/^default/{print $5}')
203 sleep 1
204 sudo ip link set $dev down
205 sleep 180
206 sudo ip link set $dev up
207 sleep 1
208 END_TXT
209     chmod +x disable_network.sh
210     scp $ssh_opts_cpu disable_network.sh "$COMPUTE_USER@$COMPUTE_IP:"
211     ssh $ssh_opts_cpu "$COMPUTE_USER@$COMPUTE_IP" 'nohup ./disable_network.sh > disable_network.log 2>&1 &'
212 }
213
214 calculate_notification_time() {
215     detected=$(grep "doctor monitor detected at" monitor.log | awk '{print $5}')
216     notified=$(grep "doctor consumer notified at" consumer.log | awk '{print $5}')
217     echo "$notified $detected" | \
218         awk '{d = $1 - $2; if (d < 1 && d > 0) print d " OK"; else print d " NG"}'
219 }
220
221 check_host_status_down() {
222     (
223         change_to_doctor_user
224
225         host_status_line=$(nova show $VM_NAME | grep "host_status")
226         [[ $? -ne 0 ]] && {
227             echo "ERROR: host_status not configured for owner in Nova policy.json"
228         }
229
230         host_status=$(echo $host_status_line | awk '{print $4}')
231         [[ "$host_status" == "DOWN" ]] && {
232             echo "$VM_NAME showing host_status: $host_status"
233         }
234         echo "ERROR: host_status not reported by: nova show $VM_NAME"
235     )
236 }
237
238 cleanup() {
239     set +e
240     echo "cleanup..."
241     stop_monitor
242     stop_inspector
243     stop_consumer
244
245     python ./nova_force_down.py "$COMPUTE_HOST" --unset
246     sleep 1
247     (
248         change_to_doctor_user
249         nova list | grep -q " $VM_NAME " && nova delete "$VM_NAME"
250         sleep 1
251         alarm_id=$(ceilometer alarm-list | grep " $ALARM_NAME " | awk '{print $2}')
252         sleep 1
253         [ -n "$alarm_id" ] && ceilometer alarm-delete "$alarm_id"
254         sleep 1
255     )
256     image_id=$(glance image-list | grep " $IMAGE_NAME " | awk '{print $2}')
257     sleep 1
258     [ -n "$image_id" ] && glance image-delete "$image_id"
259     openstack role remove "$DOCTOR_ROLE" --user "$DOCTOR_USER" \
260                               --project "$DOCTOR_PROJECT"
261     openstack project delete "$DOCTOR_PROJECT"
262     openstack user delete "$DOCTOR_USER"
263
264     #TODO: add host status check via nova admin api
265     echo "waiting disabled compute host back to be enabled..."
266     sleep 180
267     ssh $ssh_opts_cpu "$COMPUTE_USER@$COMPUTE_IP" \
268         "[ -e disable_network.log ] && cat disable_network.log"
269 }
270
271
272 echo "Note: doctor/tests/run.sh has been executed."
273
274 prepare_compute_ssh
275
276 trap cleanup EXIT
277
278 echo "preparing VM image..."
279 download_image
280 register_image
281
282 echo "starting doctor sample components..."
283 start_monitor
284 start_inspector
285 start_consumer
286
287 echo "creating test user..."
288 create_test_user
289
290 echo "creating VM and alarm..."
291 boot_vm
292 create_alarm
293 wait_for_vm_launch
294
295 sleep 60
296 echo "injecting host failure..."
297 inject_failure
298 sleep 10
299
300 check_host_status_down
301 calculate_notification_time
302
303 echo "done"