1 ##############################################################################
2 # Copyright (c) 2017 ZTE Corporation and others.
4 # All rights reserved. This program and the accompanying materials
5 # are made available under the terms of the Apache License, Version 2.0
6 # which accompanies this distribution, and is available at
7 # http://www.apache.org/licenses/LICENSE-2.0
8 ##############################################################################
10 from os.path import isfile, join
15 from doctor_tests.alarm import Alarm
16 from doctor_tests.common.constants import Host
17 from doctor_tests.common.utils import match_rep_in_file
18 from doctor_tests import config
19 from doctor_tests.consumer import get_consumer
20 from doctor_tests.identity_auth import get_identity_auth
21 from doctor_tests.identity_auth import get_session
22 from doctor_tests.image import Image
23 from doctor_tests.instance import Instance
24 from doctor_tests.inspector import get_inspector
25 from doctor_tests.installer import get_installer
26 import doctor_tests.logger as doctor_log
27 from doctor_tests.network import Network
28 from doctor_tests.monitor import get_monitor
29 from doctor_tests.os_clients import nova_client
30 from doctor_tests.profiler_poc import main as profiler_main
31 from doctor_tests.scenario.common import calculate_notification_time
32 from doctor_tests.scenario.network_failure import NetworkFault
33 from doctor_tests.user import User
36 Logger = doctor_log.Logger('doctor')
37 LOG = Logger.getLogger()
38 LogFile = Logger.getLogFilename()
41 class DoctorTest(object):
43 def __init__(self, conf):
45 self.image = Image(self.conf, LOG)
46 self.user = User(self.conf, LOG)
47 self.network = Network(self.conf, LOG)
48 self.instance = Instance(self.conf, LOG)
49 self.alarm = Alarm(self.conf, LOG)
50 self.installer = get_installer(self.conf, LOG)
51 self.inspector = get_inspector(self.conf, LOG)
52 self.monitor = get_monitor(self.conf,
53 self.inspector.get_inspector_url(),
55 self.consumer = get_consumer(self.conf, LOG)
56 self.fault = NetworkFault(self.conf, self.installer, LOG)
57 auth = get_identity_auth(project=self.conf.doctor_project)
58 self.nova = nova_client(self.conf.nova_version,
59 get_session(auth=auth))
63 # prepare the cloud env
64 self.installer.setup()
66 # preparing VM image...
69 # creating test user...
72 def setup_fault_management(self):
74 self.user.update_quota()
78 self.instance.create()
79 self.instance.wait_for_vm_launch()
84 # starting doctor sample components...
85 # tbd tojuvone: move inspector and consumer to common setup
86 # when they support updating VMs via instance.create and
87 # instance.delete alarm
89 self.inspector.start()
91 self.down_host = self.get_host_info_for_random_vm()
92 self.monitor.start(self.down_host)
94 def test_fault_management(self):
96 LOG.info('doctor fault management test starting.......')
99 self.setup_fault_management()
101 # wait for aodh alarms are updated in caches for event evaluator,
102 # sleep time should be larger than event_alarm_cache_ttl
106 # injecting host failure...
107 # NOTE (umar) add INTERFACE_NAME logic to host injection
109 self.fault.start(self.down_host)
112 # verify the test results
113 # NOTE (umar) copy remote monitor.log file when monitor=collectd
114 self.check_host_status(self.down_host.name, 'down')
116 notification_time = calculate_notification_time(LogFile)
117 if notification_time < 1 and notification_time > 0:
118 LOG.info('doctor fault management test successfully, '
119 'notification_time=%s' % notification_time)
121 LOG.error('doctor fault management test failed, '
122 'notification_time=%s' % notification_time)
125 if self.conf.profiler_type:
126 LOG.info('doctor fault management test begin to run '
130 except Exception as e:
131 LOG.error('doctor fault management test failed, '
135 self.cleanup_fault_management()
137 def _amount_compute_nodes(self):
138 services = self.nova.services.list(binary='nova-compute')
141 def test_maintenance(self):
142 cnodes = self._amount_compute_nodes()
144 # need 2 compute for redundancy and one spare to migrate
145 LOG.info('not enough compute nodes, skipping doctor '
149 LOG.info('doctor maintenance test starting.......')
150 # TODO (tojuvone) test setup and actual test
151 except Exception as e:
152 LOG.error('doctor maintenance test failed, Exception=%s' % e)
154 # TODO (tojuvone) finally: test case specific cleanup
157 """run doctor tests"""
159 LOG.info('doctor test starting.......')
160 # prepare common test env
162 if self.conf.test_case == 'all':
163 self.test_fault_management()
164 self.test_maintenance()
166 function = 'test_%s' % self.conf.test_case
167 if hasattr(self, function):
168 getattr(self, function)()
170 raise Exception('Can not find function <%s> in'
171 'DoctorTest, see config manual'
173 except Exception as e:
174 LOG.error('doctor test failed, Exception=%s' % e)
179 def get_host_info_for_random_vm(self):
180 num = random.randint(0, self.conf.instance_count - 1)
181 vm_name = "%s%d" % (self.conf.instance_basename, num)
184 {getattr(server, 'name'): server
185 for server in self.nova.servers.list()}
186 server = servers.get(vm_name)
189 Exception('Can not find instance: vm_name(%s)' % vm_name)
190 host_name = server.__dict__.get('OS-EXT-SRV-ATTR:hypervisor_hostname')
191 host_ip = self.installer.get_host_ip_from_hostname(host_name)
193 LOG.info('Get host info(name:%s, ip:%s) which vm(%s) launched at'
194 % (host_name, host_ip, vm_name))
195 return Host(host_name, host_ip)
197 def check_host_status(self, hostname, state):
198 service = self.nova.services.list(host=hostname,
199 binary='nova-compute')
200 host_state = service[0].__dict__.get('state')
201 assert host_state == state
203 def unset_forced_down_hosts(self):
205 self.nova.services.force_down(self.down_host.name,
206 'nova-compute', False)
208 self.check_host_status(self.down_host.name, 'up')
210 def collect_logs(self):
211 self.fault.get_disable_network_log()
213 def run_profiler(self):
215 net_down_log_file = self.fault.get_disable_network_log()
216 reg = '(?<=doctor set link down at )\d+.\d+'
217 linkdown = float(match_rep_in_file(reg, net_down_log_file).group(0))
219 reg = '(.* doctor mark vm.* error at )(\d+.\d+)'
220 vmdown = float(match_rep_in_file(reg, LogFile).group(2))
222 reg = '(.* doctor mark host.* down at )(\d+.\d+)'
223 hostdown = float(match_rep_in_file(reg, LogFile).group(2))
225 reg = '(?<=doctor monitor detected at )\d+.\d+'
226 detected = float(match_rep_in_file(reg, LogFile).group(0))
228 reg = '(?<=doctor consumer notified at )\d+.\d+'
229 notified = float(match_rep_in_file(reg, LogFile).group(0))
231 # TODO(yujunz) check the actual delay to verify time sync status
232 # expected ~1s delay from $trigger to $linkdown
233 relative_start = linkdown
234 os.environ['DOCTOR_PROFILER_T00'] = \
235 str(int((linkdown - relative_start) * 1000))
236 os.environ['DOCTOR_PROFILER_T01'] = \
237 str(int((detected - relative_start) * 1000))
238 os.environ['DOCTOR_PROFILER_T03'] = \
239 str(int((vmdown - relative_start) * 1000))
240 os.environ['DOCTOR_PROFILER_T04'] = \
241 str(int((hostdown - relative_start) * 1000))
242 os.environ['DOCTOR_PROFILER_T09'] = \
243 str(int((notified - relative_start) * 1000))
245 profiler_main(log=LOG)
247 def cleanup_fault_management(self):
248 self.unset_forced_down_hosts()
249 self.inspector.stop()
253 self.instance.delete()
254 self.network.delete()
258 self.installer.cleanup()
265 test_dir = os.path.split(os.path.realpath(__file__))[0]
266 doctor_root_dir = os.path.dirname(test_dir)
268 config_file_dir = '{0}/{1}'.format(doctor_root_dir, 'etc/')
269 config_files = [join(config_file_dir, f)
270 for f in os.listdir(config_file_dir)
271 if isfile(join(config_file_dir, f))]
273 conf = config.prepare_conf(args=sys.argv[1:],
274 config_files=config_files)
276 doctor = DoctorTest(conf)