INSTALLER_TYPE=${INSTALLER_TYPE:-local}
INSTALLER_IP=${INSTALLER_IP:-none}
+SUPPORTED_INSPECTOR_TYPES="sample congress"
+INSPECTOR_TYPE=${INSPECTOR_TYPE:-sample}
+
ssh_opts="-o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no"
as_doctor_user="--os-username $DOCTOR_USER --os-password $DOCTOR_PW
--os-tenant-name $DOCTOR_PROJECT"
exit 1
fi
+if [[ ! "$SUPPORTED_INSPECTOR_TYPES" =~ "$INSPECTOR_TYPE" ]] ; then
+ echo "ERROR: INSPECTOR_TYPE=$INSPECTOR_TYPE is not supported."
+ exit 1
+fi
+
get_compute_host_info() {
# get computer host info which VM boot in
COMPUTE_HOST=$(openstack $as_doctor_user server show $VM_NAME |
# get vm_id as test user
ceilometer $as_doctor_user alarm-list | grep -q " $ALARM_NAME " && return 0
vm_id=$(openstack $as_doctor_user server list | grep " $VM_NAME " | awk '{print $2}')
+ # TODO(r-mibu): change notification endpoint from localhost to the consumer
+ # IP address (functest container).
ceilometer $as_doctor_user alarm-event-create --name "$ALARM_NAME" \
- --alarm-action "http://$CONSUMER_IP:$CONSUMER_PORT/failure" \
+ --alarm-action "http://localhost:$CONSUMER_PORT/failure" \
--description "VM failure" \
--enabled True \
--repeat-actions False \
start_monitor() {
pgrep -f "python monitor.py" && return 0
- sudo python monitor.py "$COMPUTE_HOST" "$COMPUTE_IP" \
+ sudo -E python monitor.py "$COMPUTE_HOST" "$COMPUTE_IP" "$INSPECTOR_TYPE" \
"http://127.0.0.1:$INSPECTOR_PORT/events" > monitor.log 2>&1 &
}
print_log monitor.log
}
+congress_add_rule() {
+ name=$1
+ policy=$2
+ rule=$3
+
+ if ! openstack congress policy rule list $policy | grep -q -e "// Name: $name$" ; then
+ openstack congress policy rule create --name $name $policy "$rule"
+ fi
+}
+
+congress_del_rule() {
+ name=$1
+ policy=$2
+
+ if openstack congress policy rule list $policy | grep -q -e "^// Name: $name$" ; then
+ openstack congress policy rule delete $policy $name
+ fi
+}
+
+congress_setup_rules() {
+ congress_add_rule host_down classification \
+ 'host_down(host) :-
+ doctor:events(hostname=host, type="compute.host.down", status="down")'
+
+ congress_add_rule active_instance_in_host classification \
+ 'active_instance_in_host(vmid, host) :-
+ nova:servers(id=vmid, host_name=host, status="ACTIVE")'
+
+ congress_add_rule host_force_down classification \
+ 'execute[nova:services.force_down(host, "nova-compute", "True")] :-
+ host_down(host)'
+
+ congress_add_rule error_vm_states classification \
+ 'execute[nova:servers.reset_state(vmid, "error")] :-
+ host_down(host),
+ active_instance_in_host(vmid, host)'
+}
+
start_inspector() {
- pgrep -f "python inspector.py" && return 0
- python inspector.py "$INSPECTOR_PORT" > inspector.log 2>&1 &
+ if [[ "$INSPECTOR_TYPE" == "sample" ]] ; then
+ pgrep -f "python inspector.py" && return 0
+ python inspector.py "$INSPECTOR_PORT" > inspector.log 2>&1 &
+ elif [[ "$INSPECTOR_TYPE" == "congress" ]] ; then
+ nova_api_min_version="2.11"
+ nova_api_version=$(openstack congress datasource list | \
+ grep nova | grep -Po "(?<='api_version': ')[^']*")
+ [[ -z $nova_api_version ]] && nova_api_version="2.0"
+ if [[ "$nova_api_version" < "$nova_api_min_version" ]]; then
+ echo "ERROR: Congress Nova datasource API version < $nova_api_min_version ($nova_api_version)"
+ exit 1
+ fi
+ openstack congress driver list | grep -q " doctor "
+ openstack congress datasource list | grep -q " doctor " || {
+ openstack congress datasource create doctor doctor
+ }
+ congress_setup_rules
+ fi
}
stop_inspector() {
- pgrep -f "python inspector.py" || return 0
- kill $(pgrep -f "python inspector.py")
- print_log inspector.log
+ if [[ "$INSPECTOR_TYPE" == "sample" ]] ; then
+ pgrep -f "python inspector.py" || return 0
+ kill $(pgrep -f "python inspector.py")
+ print_log inspector.log
+ elif [[ "$INSPECTOR_TYPE" == "congress" ]] ; then
+ congress_del_rule host_force_down classification
+ congress_del_rule error_vm_states classification
+ congress_del_rule active_instance_in_host classification
+ congress_del_rule host_down classification
+ fi
}
start_consumer() {
pgrep -f "python consumer.py" && return 0
python consumer.py "$CONSUMER_PORT" > consumer.log 2>&1 &
- # NOTE(r-mibu): create tunnel to the installer node, so that we can
+
+ # NOTE(r-mibu): create tunnel to the controller nodes, so that we can
# avoid some network problems dpends on infra and installers.
# This tunnel will be terminated by stop_consumer() or after 10 mins passed.
- TUNNEL_COMMAND="sudo ssh $ssh_opts $INSTALLER_IP -R $CONSUMER_PORT:localhost:$CONSUMER_PORT sleep 600"
- $TUNNEL_COMMAND > ssh_tunnel.log 2>&1 < /dev/null &
+ if [[ "$INSTALLER_TYPE" == "apex" ]] ; then
+ CONTROLLER_IPS=$(sudo ssh $ssh_opts $INSTALLER_IP \
+ "source stackrc; \
+ nova list | grep ' overcloud-controller-[0-9] ' \
+ | sed -e 's/^.*ctlplane=//' -e 's/ *|\$//'")
+ fi
+ if [[ -z "$CONTROLLER_IPS" ]]; then
+ echo "ERROR: Could not get CONTROLLER_IPS."
+ exit 1
+ fi
+ for ip in $CONTROLLER_IPS
+ do
+ forward_rule="-R $CONSUMER_PORT:localhost:$CONSUMER_PORT"
+ tunnel_command="sudo ssh $ssh_opts_cpu $COMPUTE_USER@$ip $forward_rule sleep 600"
+ $tunnel_command > "ssh_tunnel.${ip}.log" 2>&1 < /dev/null &
+ done
}
stop_consumer() {
pgrep -f "python consumer.py" || return 0
kill $(pgrep -f "python consumer.py")
print_log consumer.log
- kill $(pgrep -f "$TUNNEL_COMMAND")
- print_log ssh_tunnel.log
+
+ # NOTE(r-mibu): terminate tunnels to the controller nodes
+ for ip in $CONTROLLER_IPS
+ do
+ forward_rule="-R $CONSUMER_PORT:localhost:$CONSUMER_PORT"
+ tunnel_command="sudo ssh $ssh_opts_cpu $COMPUTE_USER@$ip $forward_rule sleep 600"
+ kill $(pgrep -f "$tunnel_command")
+ print_log "ssh_tunnel.${ip}.log"
+ done
}
wait_for_vm_launch() {
create_alarm
echo "starting doctor sample components..."
-start_monitor
start_inspector
+start_monitor
start_consumer
sleep 60