IMAGE_NAME=${IMAGE_NAME:-cirros}
IMAGE_FILE="${IMAGE_NAME}.img"
IMAGE_FORMAT=qcow2
-VM_NAME=doctor_vm1
+VM_BASENAME=doctor_vm
VM_FLAVOR=m1.tiny
-ALARM_NAME=doctor_alarm1
+#if VM_COUNT set, use that instead
+VM_COUNT=${VM_COUNT:-1}
+ALARM_BASENAME=doctor_alarm
INSPECTOR_PORT=12345
CONSUMER_PORT=12346
DOCTOR_USER=doctor
TOP_DIR=$(cd $(dirname "$0") && pwd)
-ssh_opts="-o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no"
as_doctor_user="--os-username $DOCTOR_USER --os-password $DOCTOR_PW
--os-tenant-name $DOCTOR_PROJECT"
# Functions
get_compute_host_info() {
- # get computer host info which VM boot in
- COMPUTE_HOST=$(openstack $as_doctor_user server show $VM_NAME |
+ # get computer host info which first VM boot in
+ COMPUTE_HOST=$(openstack $as_doctor_user server show ${VM_BASENAME}1 |
grep "OS-EXT-SRV-ATTR:host" | awk '{ print $4 }')
compute_host_in_undercloud=${COMPUTE_HOST%%.*}
die_if_not_set $LINENO COMPUTE_HOST "Failed to get compute hostname"
- if is_installer apex; then
- COMPUTE_USER=${COMPUTE_USER:-heat-admin}
- COMPUTE_IP=$(sudo ssh $ssh_opts $INSTALLER_IP \
- "source stackrc; \
- nova show $compute_host_in_undercloud \
- | awk '/ ctlplane network /{print \$5}'")
- elif is_installer fuel; then
- COMPUTE_USER=${COMPUTE_USER:-root}
- node_id=$(echo $compute_host_in_undercloud | cut -d "-" -f 2)
- COMPUTE_IP=$(sshpass -p r00tme ssh 2>/dev/null $ssh_opts root@${INSTALLER_IP} \
- "fuel node|awk -F '|' -v id=$node_id '{if (\$1 == id) print \$5}' |xargs")
- elif is_installer local; then
- COMPUTE_USER=${COMPUTE_USER:-$(whoami)}
- COMPUTE_IP=$(getent hosts "$COMPUTE_HOST" | awk '{ print $1 }')
- fi
+ get_compute_ip_from_hostname $COMPUTE_HOST
- die_if_not_set $LINENO COMPUTE_IP "Could not resolve $COMPUTE_HOST. Either manually set COMPUTE_IP or enable DNS resolution."
echo "COMPUTE_HOST=$COMPUTE_HOST"
echo "COMPUTE_IP=$COMPUTE_IP"
fi
}
-get_consumer_ip() {
+# TODO(r-mibu): update this function to support consumer instance
+# and migrate this function into installer lib
+get_consumer_ip___to_be_removed() {
local get_consumer_command="ip route get $COMPUTE_IP | awk '/ src /{print \$NF}'"
if is_installer apex; then
CONSUMER_IP=$(sudo ssh $ssh_opts root@$INSTALLER_IP \
openstack role add "$DOCTOR_ROLE" --user "$DOCTOR_USER" \
--project "$DOCTOR_PROJECT"
}
+ # tojuvone: openstack quota show is broken and have to use nova
+ # https://bugs.launchpad.net/manila/+bug/1652118
+ # Note! while it is encouraged to use openstack client it has proven
+ # quite buggy.
+ # QUOTA=$(openstack quota show $DOCTOR_PROJECT)
+ DOCTOR_QUOTA=$(nova quota-show --tenant DOCTOR_PROJECT)
+ # We make sure that quota allows number of instances and cores
+ OLD_INSTANCE_QUOTA=$(echo "${DOCTOR_QUOTA}" | grep " instances " | \
+ awk '{print $4}')
+ if [ $OLD_INSTANCE_QUOTA -lt $VM_COUNT ]; then
+ openstack quota set --instances $VM_COUNT \
+ $DOCTOR_USER
+ fi
+ OLD_CORES_QUOTA=$(echo "${DOCTOR_QUOTA}" | grep " cores " | \
+ awk '{print $4}')
+ if [ $OLD_CORES_QUOTA -lt $VM_COUNT ]; then
+ openstack quota set --cores $VM_COUNT \
+ $DOCTOR_USER
+ fi
}
boot_vm() {
# test VM done with test user, so can test non-admin
- openstack $as_doctor_user server list | grep -q " $VM_NAME " && return 0
- openstack $as_doctor_user server create --flavor "$VM_FLAVOR" \
+ servers=$(openstack $as_doctor_user server list)
+ for i in `seq $VM_COUNT`; do
+ echo "${servers}" | grep -q " $VM_BASENAME$i " && continue
+ openstack $as_doctor_user server create --flavor "$VM_FLAVOR" \
--image "$IMAGE_NAME" \
- "$VM_NAME"
+ "$VM_BASENAME$i"
+ done
sleep 1
}
create_alarm() {
# get vm_id as test user
- ceilometer $as_doctor_user alarm-list | grep -q " $ALARM_NAME " && return 0
- vm_id=$(openstack $as_doctor_user server list | grep " $VM_NAME " | awk '{print $2}')
- # TODO(r-mibu): change notification endpoint from localhost to the consumer
- # IP address (functest container).
- ceilometer $as_doctor_user alarm-event-create --name "$ALARM_NAME" \
- --alarm-action "http://localhost:$CONSUMER_PORT/failure" \
- --description "VM failure" \
- --enabled True \
- --repeat-actions False \
- --severity "moderate" \
- --event-type compute.instance.update \
- -q "traits.state=string::error; traits.instance_id=string::$vm_id"
+ alarm_list=$(ceilometer $as_doctor_user alarm-list)
+ vms=$(openstack $as_doctor_user server list)
+ for i in `seq $VM_COUNT`; do
+ echo "${alarm_list}" | grep -q " $ALARM_BASENAME$i " || {
+ vm_id=$(echo "${vms}" | grep " $VM_BASENAME$i " | awk '{print $2}')
+ # TODO(r-mibu): change notification endpoint from localhost to the
+ # consumer. IP address (functest container).
+ ceilometer $as_doctor_user alarm-event-create \
+ --name "$ALARM_BASENAME$i" \
+ --alarm-action "http://localhost:$CONSUMER_PORT/failure" \
+ --description "VM failure" \
+ --enabled True \
+ --repeat-actions False \
+ --severity "moderate" \
+ --event-type compute.instance.update \
+ -q "traits.state=string::error; \
+ traits.instance_id=string::$vm_id"
+ }
+ done
}
start_monitor() {
# avoid some network problems dpends on infra and installers.
# This tunnel will be terminated by stop_consumer() or after 10 mins passed.
if ! is_installer local; then
- if is_installer apex; then
- CONTROLLER_IPS=$(sudo ssh $ssh_opts $INSTALLER_IP \
- "source stackrc; \
- nova list | grep ' overcloud-controller-[0-9] ' \
- | sed -e 's/^.*ctlplane=//' -e 's/ *|\$//'")
- elif is_installer fuel; then
- CONTROLLER_IPS=$(sshpass -p r00tme ssh 2>/dev/null $ssh_opts root@${INSTALLER_IP} \
- "fuel node | grep controller | cut -d '|' -f 5|xargs")
- fi
-
- die_if_not_set $LINENO CONTROLLER_IPS "Could not get CONTROLLER_IPS."
for ip in $CONTROLLER_IPS
do
forward_rule="-R $CONSUMER_PORT:localhost:$CONSUMER_PORT"
count=0
while [[ ${count} -lt 60 ]]
do
- state=$(openstack $as_doctor_user server list | grep " $VM_NAME " | awk '{print $6}')
- if [[ "$state" == "ACTIVE" ]]; then
- # NOTE(cgoncalves): sleeping for a bit to stabilize
- # See python-openstackclient/functional/tests/compute/v2/test_server.py:wait_for_status
- sleep 5
+ active_count=0
+ vms=$(openstack $as_doctor_user server list)
+ for i in `seq $VM_COUNT`; do
+ state=$(echo "${vms}" | grep " $VM_BASENAME$i " | awk '{print $6}')
+ if [[ "$state" == "ACTIVE" ]]; then
+ active_count=$(($active_count+1))
+ elif [[ "$state" == "ERROR" ]]; then
+ die $LINENO "vm state $VM_BASENAME$i is ERROR"
+ else
+ #This VM not yet active
+ count=$(($count+1))
+ sleep 5
+ continue
+ fi
+ done
+ [[ $active_count -eq $VM_COUNT ]] && {
+ echo "get computer host info..."
+ get_compute_host_info
+ VMS_ON_FAILED_HOST=$(openstack $as_doctor_user server list --host \
+ $COMPUTE_HOST | grep " ${VM_BASENAME}" | wc -l)
return 0
- fi
- if [[ "$state" == "ERROR" ]]; then
- openstack $as_doctor_user server show $VM_NAME
- die $LINENO "vm state is ERROR"
- fi
+ }
+ #Not all VMs active
count=$(($count+1))
- sleep 1
+ sleep 5
done
die $LINENO "Time out while waiting for VM launch"
}
cat > disable_network.sh << 'END_TXT'
#!/bin/bash -x
dev=$(sudo ip a | awk '/ @COMPUTE_IP@\//{print $7}')
+[[ -n "$dev" ]] || dev=$(sudo ip a | awk '/ @COMPUTE_IP@\//{print $5}')
sleep 1
sudo ip link set $dev down
echo "doctor set link down at" $(date "+%s.%N")
triggered=$(date "+%s.%N")
}
-calculate_notification_time() {
- if ! grep -q "doctor consumer notified at" consumer.log ; then
- die $LINENO "Consumer hasn't received fault notification."
- fi
+wait_consumer() {
+ local interval=1
+ local rounds=$(($1 / $interval))
+ for i in `seq $rounds`; do
+ notified_count=$(grep "doctor consumer notified at" consumer.log | wc -l)
+ if [[ $notified_count -eq $VMS_ON_FAILED_HOST ]]; then
+ return 0
+ fi
+ sleep $interval
+ done
+ die $LINENO "Consumer hasn't received fault notification."
+}
+calculate_notification_time() {
+ wait_consumer 60
#keep 'at' as the last keyword just before the value, and
#use regex to get value instead of the fixed column
detected=$(grep "doctor monitor detected at" monitor.log |\
sed -e "s/^.* at //")
notified=$(grep "doctor consumer notified at" consumer.log |\
- sed -e "s/^.* at //")
+ sed -e "s/^.* at //" | tail -1)
echo "$notified $detected" | \
awk '{
}'
}
+wait_ping() {
+ local interval=5
+ local rounds=$(($1 / $interval))
+ for i in `seq $rounds`; do
+ ping -c 1 "$COMPUTE_IP"
+ if [[ $? -ne 0 ]] ; then
+ sleep $interval
+ continue
+ fi
+ return 0
+ done
+}
+
check_host_status() {
+ # Check host related to first Doctor VM is in wanted state
+ # $1 Expected state
+ # $2 Seconds to wait to have wanted state
expected_state=$1
-
- host_status_line=$(openstack $as_doctor_user --os-compute-api-version 2.16 \
- server show $VM_NAME | grep "host_status")
- host_status=$(echo $host_status_line | awk '{print $4}')
- die_if_not_set $LINENO host_status "host_status not reported by: nova show $VM_NAME"
+ local interval=5
+ local rounds=$(($2 / $interval))
+ for i in `seq $rounds`; do
+ host_status_line=$(openstack $as_doctor_user --os-compute-api-version \
+ 2.16 server show ${VM_BASENAME}1 | grep "host_status")
+ host_status=$(echo $host_status_line | awk '{print $4}')
+ die_if_not_set $LINENO host_status "host_status not reported by: nova show ${VM_BASENAME}1"
+ if [[ "$expected_state" =~ "$host_status" ]] ; then
+ echo "${VM_BASENAME}1 showing host_status: $host_status"
+ return 0
+ else
+ sleep $interval
+ fi
+ done
if [[ "$expected_state" =~ "$host_status" ]] ; then
- echo "$VM_NAME showing host_status: $host_status"
+ echo "${VM_BASENAME}1 showing host_status: $host_status"
else
- die $LINENO "host_status:$host_status not equal to expected_state: $expected_state"
+ die $LINENO "host_status:$host_status not equal to expected_state: $expected_state"
fi
}
for host in $(openstack compute service list --service nova-compute \
-f value -c Host -c State | sed -n -e '/down$/s/ *down$//p')
do
+ # TODO (r-mibu): make sample inspector use keystone v3 api
+ OS_AUTH_URL=${OS_AUTH_URL/v3/v2.0} \
python ./nova_force_down.py $host --unset
done
stop_consumer
unset_forced_down_hosts
- # TODO: We need to make sure the target compute host is back to IP
- # reachable. wait_ping() will be added by tojuvone .
- sleep 110
- scp $ssh_opts_cpu "$COMPUTE_USER@$COMPUTE_IP:disable_network.log" .
- openstack $as_doctor_user server list | grep -q " $VM_NAME " && openstack $as_doctor_user server delete "$VM_NAME"
- sleep 1
- alarm_id=$(ceilometer $as_doctor_user alarm-list | grep " $ALARM_NAME " | awk '{print $2}')
- sleep 1
- [ -n "$alarm_id" ] && ceilometer $as_doctor_user alarm-delete "$alarm_id"
+ wait_ping 120
+
+ scp $ssh_opts_cpu "$COMPUTE_USER@$COMPUTE_IP:disable_network.log" .
+ vms=$(openstack $as_doctor_user server list)
+ vmstodel=""
+ for i in `seq $VM_COUNT`; do
+ $(echo "${vms}" | grep -q " $VM_BASENAME$i ") &&
+ vmstodel+=" $VM_BASENAME$i"
+ done
+ [[ $vmstodel ]] && openstack $as_doctor_user server delete $vmstodel
+ alarm_list=$(ceilometer $as_doctor_user alarm-list)
+ for i in `seq $VM_COUNT`; do
+ alarm_id=$(echo "${alarm_list}" | grep " $ALARM_BASENAME$i " |
+ awk '{print $2}')
+ [ -n "$alarm_id" ] && ceilometer $as_doctor_user alarm-delete "$alarm_id"
+ done
sleep 1
image_id=$(openstack image list | grep " $IMAGE_NAME " | awk '{print $2}')
cleanup_installer
cleanup_inspector
+
+ # NOTE: Temporal log printer.
+ for f in $(find . -name '*.log')
+ do
+ echo
+ echo "[$f]"
+ sed -e 's/^/ | /' $f
+ echo
+ done
}
# Main process
boot_vm
wait_for_vm_launch
-echo "get computer host info..."
-get_compute_host_info
-
echo "creating alarm..."
#TODO: change back to use, network problems depends on infra and installers
#get_consumer_ip
sleep 60
echo "injecting host failure..."
inject_failure
-sleep 60
-check_host_status "(DOWN|UNKNOWN)"
+check_host_status "(DOWN|UNKNOWN)" 60
calculate_notification_time
collect_logs
run_profiler