update workaround for notification from OpenStack
[doctor.git] / tests / run.sh
index 2b88879..42600da 100755 (executable)
@@ -65,7 +65,7 @@ get_compute_host_info() {
             INSTALLER_IP=$(/usr/sbin/arp -e | grep ${instack_mac} | awk '{print $1}')
         fi
         node_id=$(echo $compute_host_in_undercloud | cut -d "-" -f 2)
-        COMPUTE_IP=$(sshpass -p r00tme ssh 2>/dev/null $ssh_options root@${INSTALLER_IP} \
+        COMPUTE_IP=$(sshpass -p r00tme ssh 2>/dev/null $ssh_opts root@${INSTALLER_IP} \
              "fuel node|awk -F '|' -v id=$node_id '{if (\$1 == id) print \$5}' |xargs")
     elif [[ "$INSTALLER_TYPE" == "local" ]] ; then
         COMPUTE_USER=${COMPUTE_USER:-$(whoami)}
@@ -97,7 +97,7 @@ prepare_compute_ssh() {
         chmod 400 instack_key
         ssh_opts_cpu+=" -i instack_key"
     elif [[ "$INSTALLER_TYPE" == "fuel" ]] ; then
-        sshpass -p r00tme scp $ssh_options root@${INSTALLER_IP}:.ssh/id_rsa instack_key
+        sshpass -p r00tme scp $ssh_opts root@${INSTALLER_IP}:.ssh/id_rsa instack_key
         sudo chown $(whoami):$(whoami) instack_key
         chmod 400 instack_key
         ssh_opts_cpu+=" -i instack_key"
@@ -114,7 +114,8 @@ prepare_compute_ssh() {
 }
 
 get_consumer_ip() {
-    CONSUMER_IP=$(ip route get $COMPUTE_IP | awk '/ src /{print $NF}')
+    CONSUMER_IP=$(sudo ssh $ssh_opts root@$INSTALLER_IP \
+                  "ip route get $COMPUTE_IP | awk '/ src /{print \$NF}'")
     echo "CONSUMER_IP=$CONSUMER_IP"
 
     if [[ -z "$CONSUMER_IP" ]]; then
@@ -165,8 +166,10 @@ create_alarm() {
     # get vm_id as test user
     ceilometer $as_doctor_user alarm-list | grep -q " $ALARM_NAME " && return 0
     vm_id=$(openstack $as_doctor_user server list | grep " $VM_NAME " | awk '{print $2}')
+    # TODO(r-mibu): change notification endpoint from localhost to the consumer
+    # IP address (functest container).
     ceilometer $as_doctor_user alarm-event-create --name "$ALARM_NAME" \
-        --alarm-action "http://$CONSUMER_IP:$CONSUMER_PORT/failure" \
+        --alarm-action "http://localhost:$CONSUMER_PORT/failure" \
         --description "VM failure" \
         --enabled True \
         --repeat-actions False \
@@ -207,12 +210,39 @@ stop_inspector() {
 start_consumer() {
     pgrep -f "python consumer.py" && return 0
     python consumer.py "$CONSUMER_PORT" > consumer.log 2>&1 &
+
+    # NOTE(r-mibu): create tunnel to the controller nodes, so that we can
+    # avoid some network problems dpends on infra and installers.
+    # This tunnel will be terminated by stop_consumer() or after 10 mins passed.
+    if [[ "$INSTALLER_TYPE" == "apex" ]] ; then
+        CONTROLLER_IPS=$(nova list | grep ' overcloud-controller-[0-9] ' \
+                         | sed -e 's/^.*ctlplane=//' -e 's/ *|$//')
+    fi
+    if [[ -z "$CONTROLLER_IPS" ]]; then
+        echo "ERROR: Could not get CONTROLLER_IPS."
+        exit 1
+    fi
+    for ip in $CONTROLLER_IPS
+    do
+        forward_rule="-R $CONSUMER_PORT:localhost:$CONSUMER_PORT"
+        tunnel_command="sudo ssh $ssh_opts $ip $forward_rule sleep 600"
+        $tunnel_command > "ssh_tunnel.${ip}.log" 2>&1 < /dev/null &
+    done
 }
 
 stop_consumer() {
     pgrep -f "python consumer.py" || return 0
     kill $(pgrep -f "python consumer.py")
     print_log consumer.log
+
+    # NOTE(r-mibu): terminate tunnels to the controller nodes
+    for ip in $CONTROLLER_IPS
+    do
+        forward_rule="-R $CONSUMER_PORT:localhost:$CONSUMER_PORT"
+        tunnel_command="sudo ssh $ssh_opts $ip $forward_rule sleep 600"
+        kill $(pgrep -f "$tunnel_command")
+        print_log "ssh_tunnel.${ip}.log"
+    done
 }
 
 wait_for_vm_launch() {