fi
fi
- if [[ "$INSTALLER_TYPE" -ne "local" ]] ; then
+ if [[ "$INSTALLER_TYPE" != "local" ]] ; then
if [[ -z "$INSTALLER_IP" ]] ; then
echo "ERROR: no installer ip"
exit 1
fi
}
+prepare_test_env() {
+ #TODO delete it when fuel support the configuration
+ if [[ "$INSTALLER_TYPE" == "fuel" ]] ; then
+ echo "modify the configuration..."
+ cat > set_conf.sh << 'END_TXT'
+#!/bin/bash
+if [ -e /etc/ceilometer/event_pipeline.yaml ]; then
+ if ! grep -q '^ *- notifier://?topic=alarm.all$' /etc/ceilometer/event_pipeline.yaml; then
+ sed -i 's|- notifier://|- notifier://?topic=alarm.all|' /etc/ceilometer/event_pipeline.yaml
+ echo "modify the ceilometer config"
+ service ceilometer-agent-notification restart
+ fi
+else
+ echo "ceilometer event_pipeline.yaml file does not exist"
+ exit 1
+fi
+if [ -e /etc/nova/nova.conf ]; then
+ if ! grep -q '^notification_driver=messaging$' /etc/nova/nova.conf; then
+ sed -i -r 's/notification_driver=/notification_driver=messaging/g' /etc/nova/nova.conf
+ echo "modify nova config"
+ service nova-api restart
+ fi
+else
+ echo "nova.conf file does not exist"
+ exit 1
+fi
+exit 0
+END_TXT
+ chmod +x set_conf.sh
+ CONTROLLER_IP=$(sshpass -p r00tme ssh 2>/dev/null $ssh_opts root@${INSTALLER_IP} \
+ "fuel node | grep controller | cut -d '|' -f 5|xargs")
+ for node in $CONTROLLER_IP;do
+ scp $ssh_opts_cpu set_conf.sh "root@$node:"
+ ssh $ssh_opts_cpu "root@$node" './set_conf.sh > set_conf.log 2>&1 &'
+ sleep 1
+ scp $ssh_opts_cpu "root@$node:set_conf.log" set_conf_$node.log
+ done
+
+ if grep -q "modify the ceilometer config" set_conf_*.log ; then
+ NEED_TO_RESTORE_CEILOMETER=true
+ fi
+ if grep -q "modify nova config" set_conf_*.log ; then
+ NEED_TO_RESTORE_NOVA=true
+ fi
+
+ echo "waiting service restart..."
+ sleep 60
+ fi
+}
+
+restore_test_env() {
+ #TODO delete it when fuel support the configuration
+ if [[ "$INSTALLER_TYPE" == "fuel" ]] ; then
+ if ! ($NEED_TO_RESTORE_CEILOMETER || $NEED_TO_RESTORE_NOVA) ; then
+ echo "Don't need to restore config"
+ exit 0
+ fi
+
+ echo "restore the configuration..."
+ cat > restore_conf.sh << 'END_TXT'
+#!/bin/bash
+if @NEED_TO_RESTORE_CEILOMETER@ ; then
+ if [ -e /etc/ceilometer/event_pipeline.yaml ]; then
+ if grep -q '^ *- notifier://?topic=alarm.all$' /etc/ceilometer/event_pipeline.yaml; then
+ sed -i 's|- notifier://?topic=alarm.all|- notifier://|' /etc/ceilometer/event_pipeline.yaml
+ service ceilometer-agent-notification restart
+ fi
+ else
+ echo "ceilometer event_pipeline.yaml file does not exist"
+ exit 1
+ fi
+fi
+if @NEED_TO_RESTORE_NOVA@ ; then
+ if [ -e /etc/nova/nova.conf ]; then
+ if grep -q '^notification_driver=messaging$' /etc/nova/nova.conf; then
+ sed -i -r 's/notification_driver=messaging/notification_driver=/g' /etc/nova/nova.conf
+ service nova-api restart
+ fi
+ else
+ echo "nova.conf file does not exist"
+ exit 1
+ fi
+fi
+exit 0
+END_TXT
+ sed -i -e "s/@NEED_TO_RESTORE_CEILOMETER@/$NEED_TO_RESTORE_CEILOMETER/" restore_conf.sh
+ sed -i -e "s/@NEED_TO_RESTORE_NOVA@/$NEED_TO_RESTORE_NOVA/" restore_conf.sh
+ chmod +x restore_conf.sh
+ for node in $CONTROLLER_IP;do
+ scp $ssh_opts_cpu restore_conf.sh "root@$node:"
+ ssh $ssh_opts_cpu "root@$node" './restore_conf.sh > restore_conf.log 2>&1 &'
+ done
+
+ echo "waiting service restart..."
+ sleep 60
+ fi
+}
+
get_compute_host_info() {
# get computer host info which VM boot in
COMPUTE_HOST=$(openstack $as_doctor_user server show $VM_NAME |
}
get_consumer_ip() {
- CONSUMER_IP=$(sudo ssh $ssh_opts root@$INSTALLER_IP \
- "ip route get $COMPUTE_IP | awk '/ src /{print \$NF}'")
+ local get_consumer_command="ip route get $COMPUTE_IP | awk '/ src /{print \$NF}'"
+ if [[ "$INSTALLER_TYPE" == "apex" ]] ; then
+ CONSUMER_IP=$(sudo ssh $ssh_opts root@$INSTALLER_IP \
+ "$get_consumer_command")
+ elif [[ "$INSTALLER_TYPE" == "fuel" ]] ; then
+ CONSUMER_IP=$(sudo sshpass -p r00tme ssh $ssh_opts root@${INSTALLER_IP} \
+ "$get_consumer_command")
+ elif [[ "$INSTALLER_TYPE" == "local" ]] ; then
+ CONSUMER_IP=`$get_consumer_command`
+ fi
echo "CONSUMER_IP=$CONSUMER_IP"
if [[ -z "$CONSUMER_IP" ]]; then
# NOTE(r-mibu): create tunnel to the controller nodes, so that we can
# avoid some network problems dpends on infra and installers.
# This tunnel will be terminated by stop_consumer() or after 10 mins passed.
- if [[ "$INSTALLER_TYPE" == "apex" ]] ; then
- CONTROLLER_IPS=$(sudo ssh $ssh_opts $INSTALLER_IP \
- "source stackrc; \
- nova list | grep ' overcloud-controller-[0-9] ' \
- | sed -e 's/^.*ctlplane=//' -e 's/ *|\$//'")
- fi
- if [[ -z "$CONTROLLER_IPS" ]]; then
- echo "ERROR: Could not get CONTROLLER_IPS."
- exit 1
+ if [[ "$INSTALLER_TYPE" != "local" ]] ; then
+ if [[ "$INSTALLER_TYPE" == "apex" ]] ; then
+ CONTROLLER_IPS=$(sudo ssh $ssh_opts $INSTALLER_IP \
+ "source stackrc; \
+ nova list | grep ' overcloud-controller-[0-9] ' \
+ | sed -e 's/^.*ctlplane=//' -e 's/ *|\$//'")
+ elif [[ "$INSTALLER_TYPE" == "fuel" ]] ; then
+ CONTROLLER_IPS=$(sshpass -p r00tme ssh 2>/dev/null $ssh_opts root@${INSTALLER_IP} \
+ "fuel node | grep controller | cut -d '|' -f 5|xargs")
+ fi
+
+ if [[ -z "$CONTROLLER_IPS" ]]; then
+ echo "ERROR: Could not get CONTROLLER_IPS."
+ exit 1
+ fi
+ for ip in $CONTROLLER_IPS
+ do
+ forward_rule="-R $CONSUMER_PORT:localhost:$CONSUMER_PORT"
+ tunnel_command="sudo ssh $ssh_opts_cpu $COMPUTE_USER@$ip $forward_rule sleep 600"
+ $tunnel_command > "ssh_tunnel.${ip}.log" 2>&1 < /dev/null &
+ done
fi
- for ip in $CONTROLLER_IPS
- do
- forward_rule="-R $CONSUMER_PORT:localhost:$CONSUMER_PORT"
- tunnel_command="sudo ssh $ssh_opts_cpu $COMPUTE_USER@$ip $forward_rule sleep 600"
- $tunnel_command > "ssh_tunnel.${ip}.log" 2>&1 < /dev/null &
- done
}
stop_consumer() {
print_log consumer.log
# NOTE(r-mibu): terminate tunnels to the controller nodes
- for ip in $CONTROLLER_IPS
- do
- forward_rule="-R $CONSUMER_PORT:localhost:$CONSUMER_PORT"
- tunnel_command="sudo ssh $ssh_opts_cpu $COMPUTE_USER@$ip $forward_rule sleep 600"
- kill $(pgrep -f "$tunnel_command")
- print_log "ssh_tunnel.${ip}.log"
- done
+ if [[ "$INSTALLER_TYPE" != "local" ]] ; then
+ for ip in $CONTROLLER_IPS
+ do
+ forward_rule="-R $CONSUMER_PORT:localhost:$CONSUMER_PORT"
+ tunnel_command="sudo ssh $ssh_opts_cpu $COMPUTE_USER@$ip $forward_rule sleep 600"
+ kill $(pgrep -f "$tunnel_command")
+ print_log "ssh_tunnel.${ip}.log"
+ done
+ fi
}
wait_for_vm_launch() {
--project "$DOCTOR_PROJECT"
openstack project delete "$DOCTOR_PROJECT"
openstack user delete "$DOCTOR_USER"
+
+ restore_test_env
}
echo "preparing test env..."
get_installer_ip
prepare_ssh_to_cloud
+prepare_test_env
echo "preparing VM image..."
download_image
get_compute_host_info
echo "creating alarm..."
-get_consumer_ip
+#TODO: change back to use, network problems depends on infra and installers
+#get_consumer_ip
create_alarm
echo "starting doctor sample components..."