Add maintenance test code
[doctor.git] / doctor_tests / admin_tool / sample.py
1 ##############################################################################
2 # Copyright (c) 2018 Nokia Corporation and others.
3 #
4 # All rights reserved. This program and the accompanying materials
5 # are made available under the terms of the Apache License, Version 2.0
6 # which accompanies this distribution, and is available at
7 # http://www.apache.org/licenses/LICENSE-2.0
8 ##############################################################################
9 import datetime
10 from flask import Flask
11 from flask import request
12 import json
13 from novaclient.exceptions import BadRequest
14 import oslo_messaging as messaging
15 import requests
16 import time
17 from threading import Thread
18 from traceback import format_exc
19 from uuid import uuid1 as generate_uuid
20
21 from doctor_tests.admin_tool.base import BaseAdminTool
22 from doctor_tests.identity_auth import get_identity_auth
23 from doctor_tests.identity_auth import get_session
24 from doctor_tests.os_clients import aodh_client
25 from doctor_tests.os_clients import nova_client
26
27
28 class SampleAdminTool(BaseAdminTool):
29
30     def __init__(self, trasport_url, conf, log):
31         super(SampleAdminTool, self).__init__(conf, log)
32         self.trasport_url = trasport_url
33         self.app = None
34
35     def start(self):
36         self.log.info('sample admin tool start......')
37         self.app = AdminTool(self.trasport_url, self.conf, self, self.log)
38         self.app.start()
39
40     def stop(self):
41         self.log.info('sample admin tool stop......')
42         if not self.app:
43             return
44         headers = {
45             'Content-Type': 'application/json',
46             'Accept': 'application/json',
47         }
48         url = 'http://%s:%d/shutdown'\
49               % (self.conf.admin_tool.ip,
50                  self.conf.admin_tool.port)
51         requests.post(url, data='', headers=headers)
52
53
54 class AdminMain(Thread):
55
56     def __init__(self, trasport_url, session_id, data, parent, conf, log):
57         Thread.__init__(self)
58         self.session_id = session_id
59         self.parent = parent
60         self.log = log
61         self.conf = conf
62         self.url = 'http://0.0.0.0:%s' % conf.admin_tool.port
63         self.projects_state = dict()  # current state for each project
64         self.proj_server_actions = dict()  # actions for each project server
65         self.projects_servers = dict()  # servers processed in current state
66         self.maint_proj_servers = dict()  # servers under whole maintenance
67         self.hosts = data['hosts']
68         self.maintenance_at = data['maintenance_at']
69         self.computes_disabled = list()
70         self.metadata = data['metadata']
71         self.auth = get_identity_auth(project=self.conf.doctor_project)
72         self.state = data['state']
73         self.aodh = aodh_client(self.conf.aodh_version,
74                                 get_session(auth=self.auth))
75         self.nova = nova_client(self.conf.nova_version,
76                                 get_session(auth=self.auth))
77         self.log.info('transport_url %s' % trasport_url)
78         transport = messaging.get_transport(self.conf, trasport_url)
79         self.notif_proj = messaging.Notifier(transport,
80                                              'maintenance.planned',
81                                              driver='messaging',
82                                              topics=['notifications'])
83         self.notif_proj = self.notif_proj.prepare(publisher_id='admin_tool')
84         self.notif_admin = messaging.Notifier(transport,
85                                               'maintenance.host',
86                                               driver='messaging',
87                                               topics=['notifications'])
88         self.notif_admin = self.notif_admin.prepare(publisher_id='admin_tool')
89         self.log.info('Admin tool session %s initialized' % self.session_id)
90
91     def cleanup(self):
92         for host in self.computes_disabled:
93             self.log.info('enable nova-compute on %s' % host)
94             self.nova.services.enable(host, 'nova-compute')
95
96     def _projects_not_in_wanted_states(self, wanted_states):
97         if len([v for v in self.projects_state.values()
98                if v not in wanted_states]):
99             return True
100         else:
101             return False
102
103     def projects_not_in_state(self, state):
104         if len([v for v in self.projects_state.values()
105                if v != state]):
106             return True
107         else:
108             return False
109
110     def wait_projects_state(self, wanted_states, wait_seconds):
111         retries = wait_seconds
112         while (retries > 0 and
113                self._projects_not_in_wanted_states(wanted_states)):
114             time.sleep(1)
115             retries = retries - 1
116         if self._projects_not_in_wanted_states(wanted_states):
117             self.log.error('Admin tool session %s: projects in invalid states '
118                            '%s' % (self.session_id, self.projects_state))
119             raise Exception('Admin tool session %s: not all projects in states'
120                             ' %s' % (self.session_id, wanted_states))
121         else:
122             self.log.info('all projects replied')
123
124     def _project_notify(self, project_id, instance_ids, allowed_actions,
125                         actions_at, state, metadata):
126         reply_url = '%s/%s/maintenance' % (self.url, project_id)
127
128         payload = dict(project_id=project_id,
129                        instance_ids=instance_ids,
130                        allowed_actions=allowed_actions,
131                        state=state,
132                        actions_at=actions_at,
133                        session_id=self.session_id,
134                        metadata=metadata,
135                        reply_url=reply_url)
136
137         self.log.debug('Sending "maintenance.planned" to project: %s' %
138                        payload)
139
140         self.notif_proj.info({'some': 'context'}, 'maintenance.scheduled',
141                              payload)
142
143     def _admin_notify(self, project, host, state, session_id):
144         payload = dict(project_id=project, host=host, state=state,
145                        session_id=session_id)
146
147         self.log.debug('Sending "maintenance.host": %s' % payload)
148
149         self.notif_admin.info({'some': 'context'}, 'maintenance.host', payload)
150
151     def down_scale(self):
152         for project in self.projects_servers:
153             self.log.info('DOWN_SCALE to project %s' % project)
154             self.log.debug('instance_ids %s' % self.projects_servers[project])
155             instance_ids = '%s/%s/maintenance' % (self.url, project)
156             allowed_actions = []
157             wait_seconds = 120
158             actions_at = (datetime.datetime.utcnow() +
159                           datetime.timedelta(seconds=wait_seconds)
160                           ).strftime('%Y-%m-%d %H:%M:%S')
161             state = self.state
162             metadata = self.metadata
163             self._project_notify(project, instance_ids,
164                                  allowed_actions, actions_at, state,
165                                  metadata)
166         allowed_states = ['ACK_DOWN_SCALE', 'NACK_DOWN_SCALE']
167         self.wait_projects_state(allowed_states, wait_seconds)
168         if self.projects_not_in_state('ACK_DOWN_SCALE'):
169             raise Exception('Admin tool session %s: all states not '
170                             'ACK_DOWN_SCALE %s' %
171                             (self.session_id, self.projects_state))
172
173     def maintenance(self):
174         for project in self.projects_servers:
175             self.log.info('\nMAINTENANCE to project %s\n' % project)
176             self.log.debug('instance_ids %s' % self.projects_servers[project])
177             instance_ids = '%s/%s/maintenance' % (self.url, project)
178             allowed_actions = []
179             actions_at = self.maintenance_at
180             state = self.state
181             metadata = self.metadata
182             maint_at = self.str_to_datetime(self.maintenance_at)
183             td = maint_at - datetime.datetime.utcnow()
184             wait_seconds = int(td.total_seconds())
185             if wait_seconds < 10:
186                 raise Exception('Admin tool session %s: No time for project to'
187                                 ' answer: %s' %
188                                 (self.session_id, wait_seconds))
189             self._project_notify(project, instance_ids,
190                                  allowed_actions, actions_at, state,
191                                  metadata)
192         allowed_states = ['ACK_MAINTENANCE', 'NACK_MAINTENANCE']
193         self.wait_projects_state(allowed_states, wait_seconds)
194         if self.projects_not_in_state('ACK_MAINTENANCE'):
195             raise Exception('Admin tool session %s: all states not '
196                             'ACK_MAINTENANCE %s' %
197                             (self.session_id, self.projects_state))
198
199     def maintenance_complete(self):
200         for project in self.projects_servers:
201             self.log.info('MAINTENANCE_COMPLETE to project %s' % project)
202             instance_ids = '%s/%s/maintenance' % (self.url, project)
203             allowed_actions = []
204             wait_seconds = 120
205             actions_at = (datetime.datetime.utcnow() +
206                           datetime.timedelta(seconds=wait_seconds)
207                           ).strftime('%Y-%m-%d %H:%M:%S')
208             state = 'MAINTENANCE_COMPLETE'
209             metadata = self.metadata
210             self._project_notify(project, instance_ids,
211                                  allowed_actions, actions_at, state,
212                                  metadata)
213         allowed_states = ['ACK_MAINTENANCE_COMPLETE',
214                           'NACK_MAINTENANCE_COMPLETE']
215         self.wait_projects_state(allowed_states, wait_seconds)
216         if self.projects_not_in_state('ACK_MAINTENANCE_COMPLETE'):
217             raise Exception('Admin tool session %s: all states not '
218                             'ACK_MAINTENANCE_COMPLETE %s' %
219                             (self.session_id, self.projects_state))
220
221     def need_down_scale(self, host_servers):
222         room_for_instances = 0
223         for host in host_servers:
224             instances = 0
225             for project in host_servers[host]:
226                 for instance in host_servers[host][project]:
227                     instances += 1
228             room_for_instances += (2 - instances)
229         self.log.info('there is room for %d instances' % room_for_instances)
230         if room_for_instances > 1:
231             return False
232         else:
233             return True
234
235     def find_host_to_be_empty(self, host_servers):
236         host_to_be_empty = None
237         host_nonha_instances = 0
238         for host in host_servers:
239             ha_instances = 0
240             nonha_instances = 0
241             for project in host_servers[host]:
242                 for instance in host_servers[host][project]:
243                     if ('doctor_ha_app_' in
244                             host_servers[host][project][instance]):
245                         ha_instances += 1
246                     else:
247                         nonha_instances += 1
248             self.log.info('host %s has %d ha and %d non ha instances' %
249                           (host, ha_instances, nonha_instances))
250             if ha_instances == 0:
251                 if host_to_be_empty:
252                     if nonha_instances < host_nonha_instances:
253                         host_to_be_empty = host
254                         host_nonha_instances = nonha_instances
255                 else:
256                     host_to_be_empty = host
257                     host_nonha_instances = nonha_instances
258         self.log.info('host %s selected to be empty' % host_to_be_empty)
259         return host_to_be_empty
260
261     def make_compute_host_empty(self, host, projects_servers, statebase):
262         state = statebase
263         state_ack = 'ACK_%s' % statebase
264         state_nack = 'NACK_%s' % statebase
265         for project in projects_servers:
266             # self.projects_servers must have servers under action
267             self.projects_servers[project] = projects_servers[project].copy()
268             self.log.info('%s to project %s' % (state, project))
269             self.project_servers_log_info(project, projects_servers)
270             instance_ids = '%s/%s/maintenance' % (self.url, project)
271             allowed_actions = ['MIGRATE', 'LIVE_MIGRATE', 'OWN_ACTION']
272             wait_seconds = 120
273             actions_at = (datetime.datetime.utcnow() +
274                           datetime.timedelta(seconds=wait_seconds)
275                           ).strftime('%Y-%m-%d %H:%M:%S')
276             metadata = self.metadata
277             self._project_notify(project, instance_ids,
278                                  allowed_actions, actions_at, state,
279                                  metadata)
280         allowed_states = [state_ack, state_nack]
281         self.wait_projects_state(allowed_states, wait_seconds)
282         if self.projects_not_in_state(state_ack):
283             raise Exception('Admin tool session %s: all states not %s %s' %
284                             (self.session_id, state_ack, self.projects_state))
285         self.actions_to_have_empty_host(host)
286
287     def notify_action_done(self, project, instance_id):
288         instance_ids = instance_id
289         allowed_actions = []
290         actions_at = None
291         state = "INSTANCE_ACTION_DONE"
292         metadata = None
293         self._project_notify(project, instance_ids, allowed_actions,
294                              actions_at, state, metadata)
295
296     def actions_to_have_empty_host(self, host):
297         retry = 0
298         while len(self.proj_server_actions) == 0:
299             time.sleep(2)
300             if retry == 10:
301                 raise Exception('Admin tool session %s: project server actions'
302                                 ' not set' % self.session_id)
303             retry += 1
304         for project in self.proj_server_actions:
305             for server, action in self.proj_server_actions[project].items():
306                 self.log.info('Action %s server %s: %s' % (action, server,
307                               self.projects_servers[project][server]))
308                 if action == 'MIGRATE':
309                     self.migrate_server(server)
310                     self.notify_action_done(project, server)
311                 elif action == 'OWN_ACTION':
312                     pass
313                 else:
314                     raise Exception('Admin tool session %s: server %s action '
315                                     '%s not supported' %
316                                     (self.session_id, server, action))
317         self.proj_server_actions = dict()
318         self._wait_host_empty(host)
319
320     def migrate_server(self, server_id):
321         server = self.nova.servers.get(server_id)
322         vm_state = server.__dict__.get('OS-EXT-STS:vm_state')
323         self.log.info('server %s state %s' % (server_id, vm_state))
324         last_vm_state = vm_state
325         retry_migrate = 5
326         while True:
327             try:
328                 server.migrate()
329                 time.sleep(5)
330                 retries = 36
331                 while vm_state != 'resized' and retries > 0:
332                     # try to confirm within 3min
333                     server = self.nova.servers.get(server_id)
334                     vm_state = server.__dict__.get('OS-EXT-STS:vm_state')
335                     if vm_state == 'resized':
336                         server.confirm_resize()
337                         self.log.info('server %s migration confirmed' %
338                                       server_id)
339                         return
340                     if last_vm_state != vm_state:
341                         self.log.info('server %s state: %s' % (server_id,
342                                       vm_state))
343                     if vm_state == 'error':
344                         raise Exception('server %s migration failed, state: %s'
345                                         % (server_id, vm_state))
346                     time.sleep(5)
347                     retries = retries - 1
348                     last_vm_state = vm_state
349                 # Timout waiting state to change
350                 break
351
352             except BadRequest:
353                 if retry_migrate == 0:
354                     raise Exception('server %s migrate failed' % server_id)
355                 # Might take time for scheduler to sync inconsistent instance
356                 # list for host
357                 retry_time = 180 - (retry_migrate * 30)
358                 self.log.info('server %s migrate failed, retry in %s sec'
359                               % (server_id, retry_time))
360                 time.sleep(retry_time)
361             except Exception as e:
362                 self.log.error('server %s migration failed, Exception=%s' %
363                                (server_id, e))
364                 self.log.error(format_exc())
365                 raise Exception('server %s migration failed, state: %s' %
366                                 (server_id, vm_state))
367             finally:
368                 retry_migrate = retry_migrate - 1
369         raise Exception('server %s migration timeout, state: %s' %
370                         (server_id, vm_state))
371
372     def _wait_host_empty(self, host):
373         hid = self.nova.hypervisors.search(host)[0].id
374         vcpus_used_last = 0
375         # wait 4min to get host empty
376         for j in range(48):
377             hvisor = self.nova.hypervisors.get(hid)
378             vcpus_used = hvisor.__getattr__('vcpus_used')
379             if vcpus_used > 0:
380                 if vcpus_used_last == 0:
381                     self.log.info('%s still has %d vcpus reserved. wait...'
382                                   % (host, vcpus_used))
383                 elif vcpus_used != vcpus_used_last:
384                     self.log.info('%s still has %d vcpus reserved. wait...'
385                                   % (host, vcpus_used))
386                 vcpus_used_last = vcpus_used
387                 time.sleep(5)
388             else:
389                 self.log.info('%s empty' % host)
390                 return
391         raise Exception('%s host not empty' % host)
392
393     def projects_listen_alarm(self, match_event):
394         match_projects = ([str(alarm['project_id']) for alarm in
395                           self.aodh.alarm.list() if
396                           str(alarm['event_rule']['event_type']) ==
397                           match_event])
398         all_projects_match = True
399         for project in list(self.projects_state):
400             if project not in match_projects:
401                 self.log.error('Admin tool session %s: project %s not '
402                                'listening to %s' %
403                                (self.session_id, project, match_event))
404                 all_projects_match = False
405         return all_projects_match
406
407     def project_servers_log_info(self, project, host_servers):
408         info = 'Project servers:\n'
409         for server in host_servers[project]:
410             info += ('  %s: %s\n' %
411                      (server, host_servers[project][server]))
412         self.log.info('%s' % info)
413
414     def servers_log_info(self, host_servers):
415         info = '\n'
416         for host in self.hosts:
417             info += '%s:\n' % host
418             if host in host_servers:
419                 for project in host_servers[host]:
420                     info += '  %s:\n' % project
421                     for server in host_servers[host][project]:
422                         info += ('    %s: %s\n' %
423                                  (server, host_servers[host][project][server]))
424         self.log.info('%s' % info)
425
426     def update_server_info(self):
427         opts = {'all_tenants': True}
428         servers = self.nova.servers.list(search_opts=opts)
429         self.projects_servers = dict()
430         host_servers = dict()
431         for server in servers:
432             try:
433                 host = str(server.__dict__.get('OS-EXT-SRV-ATTR:host'))
434                 project = str(server.tenant_id)
435                 server_name = str(server.name)
436                 server_id = str(server.id)
437             except Exception:
438                 raise Exception('can not get params from server=%s' %
439                                 server)
440             if host not in self.hosts:
441                 continue
442             if host not in host_servers:
443                 host_servers[host] = dict()
444             if project not in host_servers[host]:
445                 host_servers[host][project] = dict()
446             if project not in self.projects_servers:
447                 self.projects_servers[project] = dict()
448             if project not in self.projects_state:
449                 self.projects_state[project] = None
450             host_servers[host][project][server_id] = server_name
451             self.projects_servers[project][server_id] = server_name
452         return host_servers
453
454     def str_to_datetime(self, dt_str):
455         mdate, mtime = dt_str.split()
456         year, month, day = map(int, mdate.split('-'))
457         hours, minutes, seconds = map(int, mtime.split(':'))
458         return datetime.datetime(year, month, day, hours, minutes, seconds)
459
460     def host_maintenance(self, host):
461         self.log.info('maintaining host %s' % host)
462         # no implementation to make real maintenance
463         time.sleep(5)
464
465     def run(self):
466         while self.state != 'MAINTENANCE_COMPLETE':
467             self.log.info('--==session %s: processing state %s==--' %
468                           (self.session_id, self.state))
469             if self.state == 'MAINTENANCE':
470                 host_servers = self.update_server_info()
471                 self.servers_log_info(host_servers)
472
473                 if not self.projects_listen_alarm('maintenance.scheduled'):
474                     raise Exception('all projects do not listen maintenance '
475                                     'alarm')
476                 self.maintenance()
477
478                 maint_at = self.str_to_datetime(self.maintenance_at)
479                 if maint_at > datetime.datetime.utcnow():
480                     time_now = (datetime.datetime.utcnow().strftime(
481                                 '%Y-%m-%d %H:%M:%S'))
482                     self.log.info('Time now: %s maintenance starts: %s....' %
483                                   (time_now, self.maintenance_at))
484                     td = maint_at - datetime.datetime.utcnow()
485                     time.sleep(td.total_seconds())
486                 time_now = (datetime.datetime.utcnow().strftime(
487                             '%Y-%m-%d %H:%M:%S'))
488                 self.log.info('Time to start maintenance starts: %s' %
489                               time_now)
490
491                 # check if we have empty compute host
492                 # True -> PLANNED_MAINTENANCE
493                 # False -> check if we can migrate VMs to get empty host
494                 # True -> PREPARE_MAINTENANCE
495                 # False -> DOWN_SCALE
496                 maintenance_empty_hosts = ([h for h in self.hosts if h not in
497                                            host_servers])
498
499                 if len(maintenance_empty_hosts) == 0:
500                     if self.need_down_scale(host_servers):
501                         self.log.info('Need to down scale')
502                         self.state = 'DOWN_SCALE'
503                     else:
504                         self.log.info('Free capacity, but need empty host')
505                         self.state = 'PREPARE_MAINTENANCE'
506                 else:
507                     self.log.info('Free capacity, but need empty host')
508                     self.state = 'PLANNED_MAINTENANCE'
509                 self.log.info('--==State change from MAINTENANCE to %s==--'
510                               % self.state)
511             elif self.state == 'DOWN_SCALE':
512                 # Test case is hard coded to have all compute capacity used
513                 # We need to down scale to have one empty compute host
514                 self.down_scale()
515                 self.state = 'PREPARE_MAINTENANCE'
516                 host_servers = self.update_server_info()
517                 self.servers_log_info(host_servers)
518                 self.log.info('--==State change from DOWN_SCALE to'
519                               ' %s==--' % self.state)
520
521             elif self.state == 'PREPARE_MAINTENANCE':
522                 # It might be down scale did not free capacity on a single
523                 # compute host, so we need to arrange free capacity to a single
524                 # compute host
525                 self.maint_proj_servers = self.projects_servers.copy()
526                 maintenance_empty_hosts = ([h for h in self.hosts if h not in
527                                            host_servers])
528                 if len(maintenance_empty_hosts) == 0:
529                     self.log.info('no empty hosts for maintenance')
530                     if self.need_down_scale(host_servers):
531                         raise Exception('Admin tool session %s: Not enough '
532                                         'free capacity for maintenance' %
533                                         self.session_id)
534                     host = self.find_host_to_be_empty(host_servers)
535                     if host:
536                         self.make_compute_host_empty(host, host_servers[host],
537                                                      'PREPARE_MAINTENANCE')
538                     else:
539                         # We do not currently support another down scale if
540                         # first was not enough
541                         raise Exception('Admin tool session %s: No host '
542                                         'candidate to be emptied' %
543                                         self.session_id)
544                 else:
545                     for host in maintenance_empty_hosts:
546                         self.log.info('%s already empty '
547                                       'for maintenance' % host)
548                 self.state = 'PLANNED_MAINTENANCE'
549                 host_servers = self.update_server_info()
550                 self.servers_log_info(host_servers)
551                 self.log.info('--==State change from PREPARE_MAINTENANCE to %s'
552                               '==--' % self.state)
553             elif self.state == 'PLANNED_MAINTENANCE':
554                 maintenance_hosts = list()
555                 maintenance_empty_hosts = list()
556                 # TODO This should be admin. hack for now to have it work
557                 admin_project = list(self.projects_state)[0]
558                 for host in self.hosts:
559                     self.log.info('disable nova-compute on host %s' % host)
560                     self.nova.services.disable_log_reason(host, 'nova-compute',
561                                                           'maintenance')
562                     self.computes_disabled.append(host)
563                     if host in host_servers and len(host_servers[host]):
564                         maintenance_hosts.append(host)
565                     else:
566                         maintenance_empty_hosts.append(host)
567                 self.log.info('--==Start to maintain empty hosts==--\n%s' %
568                               maintenance_empty_hosts)
569                 for host in maintenance_empty_hosts:
570                     # scheduler has problems, let's see if just down scaled
571                     # host is really empty
572                     self._wait_host_empty(host)
573                     self.log.info('IN_MAINTENANCE host %s' % host)
574                     self._admin_notify(admin_project, host, 'IN_MAINTENANCE',
575                                        self.session_id)
576                     self.host_maintenance(host)
577                     self._admin_notify(admin_project, host,
578                                        'MAINTENANCE_COMPLETE',
579                                        self.session_id)
580                     self.nova.services.enable(host, 'nova-compute')
581                     self.computes_disabled.remove(host)
582                     self.log.info('MAINTENANCE_COMPLETE host %s' % host)
583                 self.log.info('--==Start to maintain occupied hosts==--\n%s' %
584                               maintenance_hosts)
585                 for host in maintenance_hosts:
586                     self.log.info('PLANNED_MAINTENANCE host %s' % host)
587                     self.make_compute_host_empty(host, host_servers[host],
588                                                  'PLANNED_MAINTENANCE')
589                     self.log.info('IN_MAINTENANCE  host %s' % host)
590                     self._admin_notify(admin_project, host, 'IN_MAINTENANCE',
591                                        self.session_id)
592                     self.host_maintenance(host)
593                     self._admin_notify(admin_project, host,
594                                        'MAINTENANCE_COMPLETE',
595                                        self.session_id)
596                     self.nova.services.enable(host, 'nova-compute')
597                     self.computes_disabled.remove(host)
598                     self.log.info('MAINTENANCE_COMPLETE host %s' % host)
599                 self.state = 'PLANNED_MAINTENANCE_COMPLETE'
600                 host_servers = self.update_server_info()
601                 self.servers_log_info(host_servers)
602             elif self.state == 'PLANNED_MAINTENANCE_COMPLETE':
603                 self.log.info('Projects still need to up scale back to full '
604                               'capcity')
605                 self.maintenance_complete()
606                 host_servers = self.update_server_info()
607                 self.servers_log_info(host_servers)
608                 self.state = 'MAINTENANCE_COMPLETE'
609             else:
610                 raise Exception('Admin tool session %s: session in invalid '
611                                 'state %s' % (self.session_id, self.state))
612         self.log.info('--==Maintenance session %s: '
613                       'MAINTENANCE SESSION COMPLETE==--' % self.session_id)
614
615     def project_input(self, project_id, data):
616         self.log.debug('Admin tool session %s: project %s input' %
617                        (self.session_id, project_id))
618         if 'instance_actions' in data:
619             self.proj_server_actions[project_id] = (
620                 data['instance_actions'].copy())
621         self.projects_state[project_id] = data['state']
622
623     def project_get_instances(self, project_id):
624         ret = list(self.projects_servers[project_id])
625         self.log.debug('Admin tool session %s: project %s GET return: %s' %
626                        (self.session_id, project_id, ret))
627         return ret
628
629     def stop(self):
630         self.stopped = True
631
632
633 class AdminTool(Thread):
634
635     def __init__(self, trasport_url, conf, admin_tool, log):
636         Thread.__init__(self)
637         self.admin_tool = admin_tool
638         self.log = log
639         self.conf = conf
640         self.port = self.conf.admin_tool.port
641         self.maint_sessions = {}
642         self.projects = {}
643         self.maintenance_hosts = []
644         self.trasport_url = trasport_url
645
646     def run(self):
647         app = Flask('admin_tool')
648
649         @app.route('/maintenance', methods=['POST'])
650         def admin_maintenance_api_post():
651             data = json.loads(request.data.decode('utf8'))
652             self.log.info('maintenance message: %s' % data)
653             if 'session_id' in data:
654                 if data['state'] == 'REMOVE_MAINTENANCE_SESSION':
655                     session_id = data['session_id']
656                     self.log.info('remove session %s'
657                                   % session_id)
658                     self.maint_sessions[session_id].cleanup()
659                     self.maint_sessions[session_id].stop()
660                     del self.maint_sessions[session_id]
661             else:
662                 session_id = str(generate_uuid())
663                 self.log.info('creating session: %s' % session_id)
664                 self.maint_sessions[session_id] = (
665                     AdminMain(self.trasport_url,
666                               session_id,
667                               data,
668                               self,
669                               self.conf,
670                               self.log))
671                 self.maint_sessions[session_id].start()
672             reply = json.dumps({'session_id': session_id,
673                                 'state': 'ACK_%s' % data['state']})
674             self.log.debug('reply: %s' % reply)
675             return reply, 200, None
676
677         @app.route('/maintenance', methods=['GET'])
678         def admin_maintenance_api_get():
679             data = json.loads(request.data.decode('utf8'))
680             self.log.debug('Admin get maintenance: %s' % data)
681             session_id = data['session_id']
682             reply = json.dumps({'state':
683                                self.maint_sessions[session_id].state})
684             self.log.debug('reply: %s' % reply)
685             return reply, 200, None
686
687         @app.route('/<projet_id>/maintenance', methods=['PUT'])
688         def project_maintenance_api_put(projet_id=None):
689             data = json.loads(request.data.decode('utf8'))
690             self.log.debug('%s project put: %s' % (projet_id, data))
691             self.project_input(projet_id, data)
692             return 'OK'
693
694         @app.route('/<projet_id>/maintenance', methods=['GET'])
695         def project_maintenance_api_get(projet_id=None):
696             data = json.loads(request.data.decode('utf8'))
697             self.log.debug('%s project get %s' % (projet_id, data))
698             instances = self.project_get_instances(projet_id, data)
699             reply = json.dumps({'instance_ids': instances})
700             self.log.debug('%s reply: %s' % (projet_id, reply))
701             return reply, 200, None
702
703         @app.route('/shutdown', methods=['POST'])
704         def shutdown():
705             for session in self.maint_sessions:
706                 self.log.info('shutdown admin tool session %s thread' %
707                               session)
708                 self.maint_sessions[session].cleanup()
709                 self.maint_sessions[session].stop()
710             self.log.info('shutdown admin_tool server at %s' % time.time())
711             func = request.environ.get('werkzeug.server.shutdown')
712             if func is None:
713                 raise RuntimeError('Not running with the Werkzeug Server')
714             func()
715             return 'admin_tool app shutting down...'
716
717         app.run(host='0.0.0.0', port=self.port)
718
719     def project_input(self, project_id, data):
720         session_id = data['session_id']
721         self.maint_sessions[session_id].project_input(project_id, data)
722
723     def project_get_instances(self, project_id, data):
724         session_id = data['session_id']
725         return self.maint_sessions[session_id].project_get_instances(
726             project_id)