DOCTOR_USER=doctor
DOCTOR_PW=doctor
DOCTOR_PROJECT=doctor
-DOCTOR_ROLE=_member_
+#TODO: change back to `_member_` when JIRA DOCTOR-55 is done
+DOCTOR_ROLE=admin
-SUPPORTED_INSTALLER_TYPES="apex local"
-INSTALLER_TYPE=${INSTALLER_TYPE:-apex}
+SUPPORTED_INSTALLER_TYPES="apex fuel local"
+INSTALLER_TYPE=${INSTALLER_TYPE:-local}
INSTALLER_IP=${INSTALLER_IP:-none}
ssh_opts="-o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no"
# get computer host info which VM boot in
COMPUTE_HOST=$(openstack $as_doctor_user server show $VM_NAME |
grep "OS-EXT-SRV-ATTR:host" | awk '{ print $4 }')
+ compute_host_in_undercloud=${COMPUTE_HOST%%.*}
if [[ -z "$COMPUTE_HOST" ]] ; then
echo "ERROR: failed to get compute hostname"
exit 1
fi
+
if [[ "$INSTALLER_TYPE" == "apex" ]] ; then
- compute_host_in_undercloud=${COMPUTE_HOST%%.*}
COMPUTE_USER=${COMPUTE_USER:-heat-admin}
if [[ "$INSTALLER_IP" == "none" ]] ; then
instack_mac=$(sudo virsh domiflist instack | awk '/default/{print $5}')
"source stackrc; \
nova show $compute_host_in_undercloud \
| awk '/ ctlplane network /{print \$5}'")
+ elif [[ "$INSTALLER_TYPE" == "fuel" ]] ; then
+ COMPUTE_USER=${COMPUTE_USER:-root}
+ if [[ "$INSTALLER_IP" == "none" ]] ; then
+ instack_mac=$(sudo virsh domiflist fuel-opnfv | awk '/pxebr/{print $5}')
+ INSTALLER_IP=$(/usr/sbin/arp -e | grep ${instack_mac} | awk '{print $1}')
+ fi
+ node_id=$(echo $compute_host_in_undercloud | cut -d "-" -f 2)
+ COMPUTE_IP=$(sshpass -p r00tme ssh 2>/dev/null $ssh_opts root@${INSTALLER_IP} \
+ "fuel node|awk -F '|' -v id=$node_id '{if (\$1 == id) print \$5}' |xargs")
elif [[ "$INSTALLER_TYPE" == "local" ]] ; then
COMPUTE_USER=${COMPUTE_USER:-$(whoami)}
COMPUTE_IP=$(getent hosts "$COMPUTE_HOST" | awk '{ print $1 }')
- if [[ -z "$COMPUTE_IP" ]]; then
- echo "ERROR: Could not resolve $COMPUTE_HOST. Either manually set COMPUTE_IP or enable DNS resolution."
- exit 1
- fi
+ fi
+
+ if [[ -z "$COMPUTE_IP" ]]; then
+ echo "ERROR: Could not resolve $COMPUTE_HOST. Either manually set COMPUTE_IP or enable DNS resolution."
+ exit 1
fi
echo "COMPUTE_HOST=$COMPUTE_HOST"
echo "COMPUTE_IP=$COMPUTE_IP"
sudo chown $(whoami):$(whoami) instack_key
chmod 400 instack_key
ssh_opts_cpu+=" -i instack_key"
+ elif [[ "$INSTALLER_TYPE" == "fuel" ]] ; then
+ sshpass -p r00tme scp $ssh_opts root@${INSTALLER_IP}:.ssh/id_rsa instack_key
+ sudo chown $(whoami):$(whoami) instack_key
+ chmod 400 instack_key
+ ssh_opts_cpu+=" -i instack_key"
elif [[ "$INSTALLER_TYPE" == "local" ]] ; then
echo "INSTALLER_TYPE set to 'local'. Assuming SSH keys already exchanged with $COMPUTE_HOST"
fi
}
get_consumer_ip() {
- CONSUMER_IP=$(ip route get $COMPUTE_IP | awk '/ src /{print $NF}')
+ CONSUMER_IP=$(sudo ssh $ssh_opts root@$INSTALLER_IP \
+ "ip route get $COMPUTE_IP | awk '/ src /{print \$NF}'")
echo "CONSUMER_IP=$CONSUMER_IP"
if [[ -z "$CONSUMER_IP" ]]; then
}
create_test_user() {
- openstack user list | grep -q " $DOCTOR_USER " || {
- openstack user create "$DOCTOR_USER" --password "$DOCTOR_PW"
- }
openstack project list | grep -q " $DOCTOR_PROJECT " || {
openstack project create "$DOCTOR_PROJECT"
}
+ openstack user list | grep -q " $DOCTOR_USER " || {
+ openstack user create "$DOCTOR_USER" --password "$DOCTOR_PW" \
+ --project "$DOCTOR_PROJECT"
+ }
openstack user role list "$DOCTOR_USER" --project "$DOCTOR_PROJECT" \
| grep -q " $DOCTOR_ROLE " || {
openstack role add "$DOCTOR_ROLE" --user "$DOCTOR_USER" \
# get vm_id as test user
ceilometer $as_doctor_user alarm-list | grep -q " $ALARM_NAME " && return 0
vm_id=$(openstack $as_doctor_user server list | grep " $VM_NAME " | awk '{print $2}')
+ # TODO(r-mibu): change notification endpoint from localhost to the consumer
+ # IP address (functest container).
ceilometer $as_doctor_user alarm-event-create --name "$ALARM_NAME" \
- --alarm-action "http://$CONSUMER_IP:$CONSUMER_PORT/failure" \
+ --alarm-action "http://localhost:$CONSUMER_PORT/failure" \
--description "VM failure" \
--enabled True \
--repeat-actions False \
-q "traits.state=string::error; traits.instance_id=string::$vm_id"
}
+print_log() {
+ log_file=$1
+ echo "$log_file:"
+ sed -e 's/^/ /' "$log_file"
+}
start_monitor() {
pgrep -f "python monitor.py" && return 0
stop_monitor() {
pgrep -f "python monitor.py" || return 0
sudo kill $(pgrep -f "python monitor.py")
- cat monitor.log
+ print_log monitor.log
}
start_inspector() {
stop_inspector() {
pgrep -f "python inspector.py" || return 0
kill $(pgrep -f "python inspector.py")
- cat inspector.log
+ print_log inspector.log
}
start_consumer() {
pgrep -f "python consumer.py" && return 0
python consumer.py "$CONSUMER_PORT" > consumer.log 2>&1 &
+
+ # NOTE(r-mibu): create tunnel to the controller nodes, so that we can
+ # avoid some network problems dpends on infra and installers.
+ # This tunnel will be terminated by stop_consumer() or after 10 mins passed.
+ if [[ "$INSTALLER_TYPE" == "apex" ]] ; then
+ CONTROLLER_IPS=$(sudo ssh $ssh_opts $INSTALLER_IP \
+ "source stackrc; \
+ nova list | grep ' overcloud-controller-[0-9] ' \
+ | sed -e 's/^.*ctlplane=//' -e 's/ *|\$//'")
+ fi
+ if [[ -z "$CONTROLLER_IPS" ]]; then
+ echo "ERROR: Could not get CONTROLLER_IPS."
+ exit 1
+ fi
+ for ip in $CONTROLLER_IPS
+ do
+ forward_rule="-R $CONSUMER_PORT:localhost:$CONSUMER_PORT"
+ tunnel_command="sudo ssh $ssh_opts $ip $forward_rule sleep 600"
+ $tunnel_command > "ssh_tunnel.${ip}.log" 2>&1 < /dev/null &
+ done
}
stop_consumer() {
pgrep -f "python consumer.py" || return 0
kill $(pgrep -f "python consumer.py")
- cat consumer.log
+ print_log consumer.log
+
+ # NOTE(r-mibu): terminate tunnels to the controller nodes
+ for ip in $CONTROLLER_IPS
+ do
+ forward_rule="-R $CONSUMER_PORT:localhost:$CONSUMER_PORT"
+ tunnel_command="sudo ssh $ssh_opts $ip $forward_rule sleep 600"
+ kill $(pgrep -f "$tunnel_command")
+ print_log "ssh_tunnel.${ip}.log"
+ done
}
wait_for_vm_launch() {
python ./nova_force_down.py "$COMPUTE_HOST" --unset
sleep 240
check_host_status "UP"
- ssh $ssh_opts_cpu "$COMPUTE_USER@$COMPUTE_IP" \
- "[ -e disable_network.log ] && cat disable_network.log"
+ scp $ssh_opts_cpu "$COMPUTE_USER@$COMPUTE_IP:disable_network.log" .
+ print_log disable_network.log
openstack $as_doctor_user server list | grep -q " $VM_NAME " && openstack $as_doctor_user server delete "$VM_NAME"
sleep 1