8051ad6e756a6b088a218feef8cd311708105861
[doctor.git] / doctor_tests / main.py
1 ##############################################################################
2 # Copyright (c) 2017 ZTE Corporation and others.
3 #
4 # All rights reserved. This program and the accompanying materials
5 # are made available under the terms of the Apache License, Version 2.0
6 # which accompanies this distribution, and is available at
7 # http://www.apache.org/licenses/LICENSE-2.0
8 ##############################################################################
9 import os
10 from os.path import isfile, join
11 import random
12 import signal
13 import sys
14 import time
15
16 from doctor_tests.alarm import Alarm
17 from doctor_tests.common.constants import Host
18 from doctor_tests.common.utils import match_rep_in_file
19 from doctor_tests import config
20 from doctor_tests.consumer import get_consumer
21 from doctor_tests.identity_auth import get_identity_auth
22 from doctor_tests.identity_auth import get_session
23 from doctor_tests.image import Image
24 from doctor_tests.instance import Instance
25 from doctor_tests.inspector import get_inspector
26 from doctor_tests.installer import get_installer
27 import doctor_tests.logger as doctor_log
28 from doctor_tests.network import Network
29 from doctor_tests.monitor import get_monitor
30 from doctor_tests.os_clients import nova_client
31 from doctor_tests.profiler_poc import main as profiler_main
32 from doctor_tests.scenario.common import calculate_notification_time
33 from doctor_tests.scenario.network_failure import NetworkFault
34 from doctor_tests.user import User
35
36
37 Logger = doctor_log.Logger('doctor')
38 LOG = Logger.getLogger()
39 LogFile = Logger.getLogFilename()
40
41
42 class DoctorTest(object):
43
44     def __init__(self, conf):
45         self.conf = conf
46         self.image = Image(self.conf, LOG)
47         self.user = User(self.conf, LOG)
48         self.network = Network(self.conf, LOG)
49         self.instance = Instance(self.conf, LOG)
50         self.alarm = Alarm(self.conf, LOG)
51         self.installer = get_installer(self.conf, LOG)
52         self.inspector = get_inspector(self.conf, LOG)
53         self.monitor = get_monitor(self.conf,
54                                    self.inspector.get_inspector_url(),
55                                    LOG)
56         self.consumer = get_consumer(self.conf, LOG)
57         self.fault = NetworkFault(self.conf, self.installer, LOG)
58         auth = get_identity_auth(project=self.conf.doctor_project)
59         self.nova = nova_client(self.conf.nova_version,
60                                 get_session(auth=auth))
61         self.down_host = None
62
63     def setup(self):
64         # prepare the cloud env
65         self.installer.setup()
66
67         # preparing VM image...
68         self.image.create()
69
70         # creating test user...
71         self.user.create()
72
73     def setup_fault_management(self):
74         # user settings...
75         self.user.update_quota()
76
77         # creating VM...
78         self.network.create()
79         self.instance.create()
80         self.instance.wait_for_vm_launch()
81
82         # creating alarm...
83         self.alarm.create()
84
85         # starting doctor sample components...
86         # tbd tojuvone: move inspector and consumer to common setup
87         # when they support updating VMs via instance.create and
88         # instance.delete alarm
89
90         self.inspector.start()
91         self.consumer.start()
92         self.down_host = self.get_host_info_for_random_vm()
93         self.monitor.start(self.down_host)
94
95     def test_fault_management(self):
96         try:
97             LOG.info('doctor fault management test starting.......')
98
99             # prepare test env
100             self.setup_fault_management()
101
102             # wait for aodh alarms are updated in caches for event evaluator,
103             # sleep time should be larger than event_alarm_cache_ttl
104             # (default 60)
105             # (tojuvone) Fraser currently needs 120
106             time.sleep(120)
107
108             # injecting host failure...
109             # NOTE (umar) add INTERFACE_NAME logic to host injection
110
111             self.fault.start(self.down_host)
112             time.sleep(10)
113
114             # verify the test results
115             # NOTE (umar) copy remote monitor.log file when monitor=collectd
116             self.check_host_status(self.down_host.name, 'down')
117
118             notification_time = calculate_notification_time(LogFile)
119             if notification_time < 1 and notification_time > 0:
120                 LOG.info('doctor fault management test successfully, '
121                          'notification_time=%s' % notification_time)
122             else:
123                 LOG.error('doctor fault management test failed, '
124                           'notification_time=%s' % notification_time)
125                 sys.exit(1)
126
127             if self.conf.profiler_type:
128                 LOG.info('doctor fault management test begin to run '
129                          'profile.......')
130                 self.collect_logs()
131                 self.run_profiler()
132         except Exception as e:
133             LOG.error('doctor fault management test failed, '
134                       'Exception=%s' % e)
135             sys.exit(1)
136         finally:
137             self.cleanup_fault_management()
138
139     def _amount_compute_nodes(self):
140         services = self.nova.services.list(binary='nova-compute')
141         return len(services)
142
143     def test_maintenance(self):
144         cnodes = self._amount_compute_nodes()
145         if cnodes < 3:
146             # need 2 compute for redundancy and one spare to migrate
147             LOG.info('not enough compute nodes, skipping doctor '
148                      'maintenance test')
149             return
150         try:
151             LOG.info('doctor maintenance test starting.......')
152             # TODO (tojuvone) test setup and actual test
153         except Exception as e:
154             LOG.error('doctor maintenance test failed, Exception=%s' % e)
155             sys.exit(1)
156         # TODO (tojuvone) finally: test case specific cleanup
157
158     def run(self):
159         """run doctor tests"""
160         try:
161             LOG.info('doctor test starting.......')
162             # prepare common test env
163             self.setup()
164             if self.conf.test_case == 'all':
165                 self.test_fault_management()
166                 self.test_maintenance()
167             else:
168                 function = 'test_%s' % self.conf.test_case
169                 if hasattr(self, function):
170                     getattr(self, function)()
171                 else:
172                     raise Exception('Can not find function <%s> in'
173                                     'DoctorTest, see config manual'
174                                     % function)
175         except Exception as e:
176             LOG.error('doctor test failed, Exception=%s' % e)
177             sys.exit(1)
178         finally:
179             self.cleanup()
180
181     def get_host_info_for_random_vm(self):
182         num = random.randint(0, self.conf.instance_count - 1)
183         vm_name = "%s%d" % (self.conf.instance_basename, num)
184
185         servers = {getattr(server, 'name'): server
186                    for server in self.nova.servers.list()}
187         server = servers.get(vm_name)
188         if not server:
189             raise Exception('Can not find instance: vm_name(%s)' % vm_name)
190         host_name = server.__dict__.get('OS-EXT-SRV-ATTR:hypervisor_hostname')
191         host_ip = self.installer.get_host_ip_from_hostname(host_name)
192
193         LOG.info('Get host info(name:%s, ip:%s) which vm(%s) launched at'
194                  % (host_name, host_ip, vm_name))
195         return Host(host_name, host_ip)
196
197     def check_host_status(self, hostname, state):
198         service = self.nova.services.list(host=hostname,
199                                           binary='nova-compute')
200         host_state = service[0].__dict__.get('state')
201         assert host_state == state
202
203     def unset_forced_down_hosts(self):
204         if self.down_host:
205             self.nova.services.force_down(self.down_host.name,
206                                           'nova-compute', False)
207             time.sleep(2)
208             self.check_host_status(self.down_host.name, 'up')
209
210     def collect_logs(self):
211         self.fault.get_disable_network_log()
212
213     def run_profiler(self):
214
215         net_down_log_file = self.fault.get_disable_network_log()
216         reg = '(?<=doctor set link down at )\d+.\d+'
217         linkdown = float(match_rep_in_file(reg, net_down_log_file).group(0))
218
219         reg = '(.* doctor mark vm.* error at )(\d+.\d+)'
220         vmdown = float(match_rep_in_file(reg, LogFile).group(2))
221
222         reg = '(.* doctor mark host.* down at )(\d+.\d+)'
223         hostdown = float(match_rep_in_file(reg, LogFile).group(2))
224
225         reg = '(?<=doctor monitor detected at )\d+.\d+'
226         detected = float(match_rep_in_file(reg, LogFile).group(0))
227
228         reg = '(?<=doctor consumer notified at )\d+.\d+'
229         notified = float(match_rep_in_file(reg, LogFile).group(0))
230
231         # TODO(yujunz) check the actual delay to verify time sync status
232         # expected ~1s delay from $trigger to $linkdown
233         relative_start = linkdown
234         os.environ['DOCTOR_PROFILER_T00'] = (
235             str(int((linkdown - relative_start) * 1000)))
236         os.environ['DOCTOR_PROFILER_T01'] = (
237             str(int((detected - relative_start) * 1000)))
238         os.environ['DOCTOR_PROFILER_T03'] = (
239             str(int((vmdown - relative_start) * 1000)))
240         os.environ['DOCTOR_PROFILER_T04'] = (
241             str(int((hostdown - relative_start) * 1000)))
242         os.environ['DOCTOR_PROFILER_T09'] = (
243             str(int((notified - relative_start) * 1000)))
244
245         profiler_main(log=LOG)
246
247     def cleanup_fault_management(self):
248         self.unset_forced_down_hosts()
249         self.inspector.stop()
250         self.monitor.stop()
251         self.consumer.stop()
252         self.alarm.delete()
253         self.instance.delete()
254         self.network.delete()
255         self.fault.cleanup()
256
257     def cleanup(self):
258         self.installer.cleanup()
259         self.image.delete()
260         self.user.delete()
261         # Kill possible hanging subprocess
262         os.killpg(0, signal.SIGKILL)
263
264
265 def main():
266     """doctor main"""
267     # TODO (tojuvone): JIRA DOCTOR-123: Test cases have some issue to always
268     # kill all subprocesses. To ensure they are killed this group is done so
269     # all processes can be killed without knowing what they are.
270     os.setpgrp()
271     test_dir = os.path.split(os.path.realpath(__file__))[0]
272     doctor_root_dir = os.path.dirname(test_dir)
273
274     config_file_dir = '{0}/{1}'.format(doctor_root_dir, 'etc/')
275     config_files = [join(config_file_dir, f)
276                     for f in os.listdir(config_file_dir)
277                     if isfile(join(config_file_dir, f))]
278
279     conf = config.prepare_conf(args=sys.argv[1:],
280                                config_files=config_files)
281
282     doctor = DoctorTest(conf)
283     doctor.run()