1 ##############################################################################
2 # Copyright (c) 2018 Nokia Corporation and others.
4 # All rights reserved. This program and the accompanying materials
5 # are made available under the terms of the Apache License, Version 2.0
6 # which accompanies this distribution, and is available at
7 # http://www.apache.org/licenses/LICENSE-2.0
8 ##############################################################################
10 from flask import Flask
11 from flask import request
13 from novaclient.exceptions import BadRequest
14 import oslo_messaging as messaging
17 from threading import Thread
18 from traceback import format_exc
19 from uuid import uuid1 as generate_uuid
21 from doctor_tests.admin_tool.base import BaseAdminTool
22 from doctor_tests.identity_auth import get_identity_auth
23 from doctor_tests.identity_auth import get_session
24 from doctor_tests.os_clients import aodh_client
25 from doctor_tests.os_clients import nova_client
28 class SampleAdminTool(BaseAdminTool):
30 def __init__(self, trasport_url, conf, log):
31 super(SampleAdminTool, self).__init__(conf, log)
32 self.trasport_url = trasport_url
36 self.log.info('sample admin tool start......')
37 self.app = AdminTool(self.trasport_url, self.conf, self, self.log)
41 self.log.info('sample admin tool stop......')
45 'Content-Type': 'application/json',
46 'Accept': 'application/json',
48 url = 'http://%s:%d/shutdown'\
49 % (self.conf.admin_tool.ip,
50 self.conf.admin_tool.port)
51 requests.post(url, data='', headers=headers)
54 class AdminMain(Thread):
56 def __init__(self, trasport_url, session_id, data, parent, conf, log):
58 self.session_id = session_id
62 self.url = 'http://%s:%s' % (conf.admin_tool.ip, conf.admin_tool.port)
63 self.projects_state = dict() # current state for each project
64 self.proj_server_actions = dict() # actions for each project server
65 self.projects_servers = dict() # servers processed in current state
66 self.maint_proj_servers = dict() # servers under whole maintenance
67 self.hosts = data['hosts']
68 self.maintenance_at = data['maintenance_at']
69 self.computes_disabled = list()
70 self.metadata = data['metadata']
71 self.auth = get_identity_auth(project=self.conf.doctor_project)
72 self.state = data['state']
73 self.aodh = aodh_client(self.conf.aodh_version,
74 get_session(auth=self.auth))
75 self.nova = nova_client(self.conf.nova_version,
76 get_session(auth=self.auth))
77 self.log.info('transport_url %s' % trasport_url)
78 transport = messaging.get_transport(self.conf, trasport_url)
79 self.notif_proj = messaging.Notifier(transport,
80 'maintenance.planned',
82 topics=['notifications'])
83 self.notif_proj = self.notif_proj.prepare(publisher_id='admin_tool')
84 self.notif_admin = messaging.Notifier(transport,
87 topics=['notifications'])
88 self.notif_admin = self.notif_admin.prepare(publisher_id='admin_tool')
90 self.log.info('Admin tool session %s initialized' % self.session_id)
93 for host in self.computes_disabled:
94 self.log.info('enable nova-compute on %s' % host)
95 self.nova.services.enable(host, 'nova-compute')
97 def _projects_not_in_wanted_states(self, wanted_states):
98 if len([v for v in self.projects_state.values()
99 if v not in wanted_states]):
104 def projects_not_in_state(self, state):
105 if len([v for v in self.projects_state.values()
111 def wait_projects_state(self, wanted_states, wait_seconds):
112 retries = wait_seconds
113 while (retries > 0 and
114 self._projects_not_in_wanted_states(wanted_states)):
116 retries = retries - 1
117 if self._projects_not_in_wanted_states(wanted_states):
118 self.log.error('Admin tool session %s: projects in invalid states '
119 '%s' % (self.session_id, self.projects_state))
122 self.log.info('all projects replied')
125 def _project_notify(self, project_id, instance_ids, allowed_actions,
126 actions_at, state, metadata):
127 reply_url = '%s/maintenance/%s/%s' % (self.url, self.session_id,
130 payload = dict(project_id=project_id,
131 instance_ids=instance_ids,
132 allowed_actions=allowed_actions,
134 actions_at=actions_at,
135 session_id=self.session_id,
139 self.log.debug('Sending "maintenance.planned" to project: %s' %
142 self.notif_proj.info({'some': 'context'}, 'maintenance.scheduled',
145 def _admin_notify(self, project, host, state, session_id):
146 payload = dict(project_id=project, host=host, state=state,
147 session_id=session_id)
149 self.log.debug('Sending "maintenance.host": %s' % payload)
151 self.notif_admin.info({'some': 'context'}, 'maintenance.host', payload)
154 for project in self.projects_servers:
155 self.log.info('SCALE_IN to project %s' % project)
156 self.log.debug('instance_ids %s' % self.projects_servers[project])
157 instance_ids = '%s/maintenance/%s/%s' % (self.url, self.session_id,
161 actions_at = (datetime.datetime.utcnow() +
162 datetime.timedelta(seconds=wait_seconds)
163 ).strftime('%Y-%m-%d %H:%M:%S')
165 metadata = self.metadata
166 self._project_notify(project, instance_ids,
167 allowed_actions, actions_at, state,
169 allowed_states = ['ACK_SCALE_IN', 'NACK_SCALE_IN']
170 if not self.wait_projects_state(allowed_states, wait_seconds):
171 self.state = 'MAINTENANCE_FAILED'
172 if self.projects_not_in_state('ACK_SCALE_IN'):
173 self.log.error('%s: all states not ACK_SCALE_IN' %
175 self.state = 'MAINTENANCE_FAILED'
177 def maintenance(self):
178 for project in self.projects_servers:
179 self.log.info('\nMAINTENANCE to project %s\n' % project)
180 self.log.debug('instance_ids %s' % self.projects_servers[project])
181 instance_ids = '%s/maintenance/%s/%s' % (self.url, self.session_id,
184 actions_at = self.maintenance_at
186 metadata = self.metadata
187 maint_at = self.str_to_datetime(self.maintenance_at)
188 td = maint_at - datetime.datetime.utcnow()
189 wait_seconds = int(td.total_seconds())
190 if wait_seconds < 10:
191 raise Exception('Admin tool session %s: No time for project to'
193 (self.session_id, wait_seconds))
194 self._project_notify(project, instance_ids,
195 allowed_actions, actions_at, state,
197 allowed_states = ['ACK_MAINTENANCE', 'NACK_MAINTENANCE']
198 if not self.wait_projects_state(allowed_states, wait_seconds):
199 self.state = 'MAINTENANCE_FAILED'
200 if self.projects_not_in_state('ACK_MAINTENANCE'):
201 self.log.error('%s: all states not ACK_MAINTENANCE' %
203 self.state = 'MAINTENANCE_FAILED'
205 def maintenance_complete(self):
206 for project in self.projects_servers:
207 self.log.info('MAINTENANCE_COMPLETE to project %s' % project)
208 instance_ids = '%s/maintenance/%s/%s' % (self.url, self.session_id,
212 actions_at = (datetime.datetime.utcnow() +
213 datetime.timedelta(seconds=wait_seconds)
214 ).strftime('%Y-%m-%d %H:%M:%S')
215 state = 'MAINTENANCE_COMPLETE'
216 metadata = self.metadata
217 self._project_notify(project, instance_ids,
218 allowed_actions, actions_at, state,
220 allowed_states = ['ACK_MAINTENANCE_COMPLETE',
221 'NACK_MAINTENANCE_COMPLETE']
222 if not self.wait_projects_state(allowed_states, wait_seconds):
223 self.state = 'MAINTENANCE_FAILED'
224 if self.projects_not_in_state('ACK_MAINTENANCE_COMPLETE'):
225 self.log.error('%s: all states not ACK_MAINTENANCE_COMPLETE' %
227 self.state = 'MAINTENANCE_FAILED'
229 def need_in_scale(self, host_servers):
230 room_for_instances = 0
231 for host in host_servers:
233 for project in host_servers[host]:
234 for instance in host_servers[host][project]:
236 room_for_instances += (2 - instances)
237 self.log.info('there is room for %d instances' % room_for_instances)
238 if room_for_instances > 1:
243 def find_host_to_be_empty(self, host_servers):
244 host_to_be_empty = None
245 host_nonha_instances = 0
246 for host in host_servers:
249 for project in host_servers[host]:
250 for instance in host_servers[host][project]:
251 if ('doctor_ha_app_' in
252 host_servers[host][project][instance]):
256 self.log.info('host %s has %d ha and %d non ha instances' %
257 (host, ha_instances, nonha_instances))
258 if ha_instances == 0:
260 if nonha_instances < host_nonha_instances:
261 host_to_be_empty = host
262 host_nonha_instances = nonha_instances
264 host_to_be_empty = host
265 host_nonha_instances = nonha_instances
266 self.log.info('host %s selected to be empty' % host_to_be_empty)
267 return host_to_be_empty
269 def make_compute_host_empty(self, host, projects_servers, statebase):
271 state_ack = 'ACK_%s' % statebase
272 state_nack = 'NACK_%s' % statebase
273 for project in projects_servers:
274 # self.projects_servers must have servers under action
275 self.projects_servers[project] = projects_servers[project].copy()
276 self.log.info('%s to project %s' % (state, project))
277 self.project_servers_log_info(project, projects_servers)
278 instance_ids = '%s/maintenance/%s/%s' % (self.url, self.session_id,
280 allowed_actions = ['MIGRATE', 'LIVE_MIGRATE', 'OWN_ACTION']
282 actions_at = (datetime.datetime.utcnow() +
283 datetime.timedelta(seconds=wait_seconds)
284 ).strftime('%Y-%m-%d %H:%M:%S')
285 metadata = self.metadata
286 self._project_notify(project, instance_ids,
287 allowed_actions, actions_at, state,
289 allowed_states = [state_ack, state_nack]
290 if not self.wait_projects_state(allowed_states, wait_seconds):
291 self.state = 'MAINTENANCE_FAILED'
292 elif self.projects_not_in_state(state_ack):
293 self.log.error('%s: all states not %s' %
294 (self.session_id, state_ack))
295 self.state = 'MAINTENANCE_FAILED'
297 self.actions_to_have_empty_host(host)
299 def notify_action_done(self, project, instance_id):
300 instance_ids = instance_id
303 state = "INSTANCE_ACTION_DONE"
305 self._project_notify(project, instance_ids, allowed_actions,
306 actions_at, state, metadata)
308 def actions_to_have_empty_host(self, host):
310 while len(self.proj_server_actions) == 0:
313 raise Exception('Admin tool session %s: project server actions'
314 ' not set' % self.session_id)
316 for project in self.proj_server_actions:
317 for server, action in self.proj_server_actions[project].items():
318 self.log.info('Action %s server %s: %s' % (action, server,
319 self.projects_servers[project][server]))
320 if action == 'MIGRATE':
321 self.migrate_server(server)
322 self.notify_action_done(project, server)
323 elif action == 'OWN_ACTION':
326 raise Exception('Admin tool session %s: server %s action '
328 (self.session_id, server, action))
329 self.proj_server_actions = dict()
330 self._wait_host_empty(host)
332 def migrate_server(self, server_id):
333 server = self.nova.servers.get(server_id)
334 vm_state = server.__dict__.get('OS-EXT-STS:vm_state')
335 self.log.info('server %s state %s' % (server_id, vm_state))
336 last_vm_state = vm_state
343 while vm_state != 'resized' and retries > 0:
344 # try to confirm within 3min
345 server = self.nova.servers.get(server_id)
346 vm_state = server.__dict__.get('OS-EXT-STS:vm_state')
347 if vm_state == 'resized':
348 server.confirm_resize()
349 self.log.info('server %s migration confirmed' %
352 if last_vm_state != vm_state:
353 self.log.info('server %s state: %s' % (server_id,
355 if vm_state == 'error':
356 raise Exception('server %s migration failed, state: %s'
357 % (server_id, vm_state))
359 retries = retries - 1
360 last_vm_state = vm_state
361 # Timout waiting state to change
365 if retry_migrate == 0:
366 raise Exception('server %s migrate failed' % server_id)
367 # Might take time for scheduler to sync inconsistent instance
369 retry_time = 180 - (retry_migrate * 30)
370 self.log.info('server %s migrate failed, retry in %s sec'
371 % (server_id, retry_time))
372 time.sleep(retry_time)
373 except Exception as e:
374 self.log.error('server %s migration failed, Exception=%s' %
376 self.log.error(format_exc())
377 raise Exception('server %s migration failed, state: %s' %
378 (server_id, vm_state))
380 retry_migrate = retry_migrate - 1
381 raise Exception('server %s migration timeout, state: %s' %
382 (server_id, vm_state))
384 def _wait_host_empty(self, host):
385 hid = self.nova.hypervisors.search(host)[0].id
387 # wait 4min to get host empty
389 hvisor = self.nova.hypervisors.get(hid)
390 vcpus_used = hvisor.__getattr__('vcpus_used')
392 if vcpus_used_last == 0:
393 self.log.info('%s still has %d vcpus reserved. wait...'
394 % (host, vcpus_used))
395 elif vcpus_used != vcpus_used_last:
396 self.log.info('%s still has %d vcpus reserved. wait...'
397 % (host, vcpus_used))
398 vcpus_used_last = vcpus_used
401 self.log.info('%s empty' % host)
403 raise Exception('%s host not empty' % host)
405 def projects_listen_alarm(self, match_event):
406 match_projects = ([str(alarm['project_id']) for alarm in
407 self.aodh.alarm.list() if
408 str(alarm['event_rule']['event_type']) ==
410 all_projects_match = True
411 for project in list(self.projects_state):
412 if project not in match_projects:
413 self.log.error('Admin tool session %s: project %s not '
415 (self.session_id, project, match_event))
416 all_projects_match = False
417 return all_projects_match
419 def project_servers_log_info(self, project, host_servers):
420 info = 'Project servers:\n'
421 for server in host_servers[project]:
422 info += (' %s: %s\n' %
423 (server, host_servers[project][server]))
424 self.log.info('%s' % info)
426 def servers_log_info(self, host_servers):
428 for host in self.hosts:
429 info += '%s:\n' % host
430 if host in host_servers:
431 for project in host_servers[host]:
432 info += ' %s:\n' % project
433 for server in host_servers[host][project]:
434 info += (' %s: %s\n' %
435 (server, host_servers[host][project][server]))
436 self.log.info('%s' % info)
438 def update_server_info(self):
439 opts = {'all_tenants': True}
440 servers = self.nova.servers.list(search_opts=opts)
441 self.projects_servers = dict()
442 host_servers = dict()
443 for server in servers:
445 host = str(server.__dict__.get('OS-EXT-SRV-ATTR:host'))
446 project = str(server.tenant_id)
447 server_name = str(server.name)
448 server_id = str(server.id)
450 raise Exception('can not get params from server=%s' %
452 if host not in self.hosts:
454 if host not in host_servers:
455 host_servers[host] = dict()
456 if project not in host_servers[host]:
457 host_servers[host][project] = dict()
458 if project not in self.projects_servers:
459 self.projects_servers[project] = dict()
460 if project not in self.projects_state:
461 self.projects_state[project] = None
462 host_servers[host][project][server_id] = server_name
463 self.projects_servers[project][server_id] = server_name
466 def str_to_datetime(self, dt_str):
467 mdate, mtime = dt_str.split()
468 year, month, day = map(int, mdate.split('-'))
469 hours, minutes, seconds = map(int, mtime.split(':'))
470 return datetime.datetime(year, month, day, hours, minutes, seconds)
472 def host_maintenance(self, host):
473 self.log.info('maintaining host %s' % host)
474 # no implementation to make real maintenance
478 while (self.state not in ['MAINTENANCE_DONE', 'MAINTENANCE_FAILED'] and
480 self.log.info('--==session %s: processing state %s==--' %
481 (self.session_id, self.state))
482 if self.state == 'MAINTENANCE':
483 host_servers = self.update_server_info()
484 self.servers_log_info(host_servers)
486 if not self.projects_listen_alarm('maintenance.scheduled'):
487 raise Exception('all projects do not listen maintenance '
490 if self.state == 'MAINTENANCE_FAILED':
492 maint_at = self.str_to_datetime(self.maintenance_at)
493 if maint_at > datetime.datetime.utcnow():
494 time_now = (datetime.datetime.utcnow().strftime(
495 '%Y-%m-%d %H:%M:%S'))
496 self.log.info('Time now: %s maintenance starts: %s....' %
497 (time_now, self.maintenance_at))
498 td = maint_at - datetime.datetime.utcnow()
499 time.sleep(td.total_seconds())
500 time_now = (datetime.datetime.utcnow().strftime(
501 '%Y-%m-%d %H:%M:%S'))
502 self.log.info('Time to start maintenance starts: %s' %
505 # check if we have empty compute host
506 # True -> PLANNED_MAINTENANCE
507 # False -> check if we can migrate VMs to get empty host
508 # True -> PREPARE_MAINTENANCE
510 maintenance_empty_hosts = ([h for h in self.hosts if h not in
513 if len(maintenance_empty_hosts) == 0:
514 if self.need_in_scale(host_servers):
515 self.log.info('Need to down scale')
516 self.state = 'SCALE_IN'
518 self.log.info('Free capacity, but need empty host')
519 self.state = 'PREPARE_MAINTENANCE'
521 self.log.info('Free capacity, but need empty host')
522 self.state = 'PLANNED_MAINTENANCE'
523 self.log.info('--==State change from MAINTENANCE to %s==--'
525 elif self.state == 'SCALE_IN':
526 # Test case is hard coded to have all compute capacity used
527 # We need to down scale to have one empty compute host
528 self.update_server_info()
530 if self.state == 'MAINTENANCE_FAILED':
532 self.state = 'PREPARE_MAINTENANCE'
533 host_servers = self.update_server_info()
534 self.servers_log_info(host_servers)
535 self.log.info('--==State change from SCALE_IN to'
536 ' %s==--' % self.state)
538 elif self.state == 'PREPARE_MAINTENANCE':
539 # It might be down scale did not free capacity on a single
540 # compute host, so we need to arrange free capacity to a single
542 self.maint_proj_servers = self.projects_servers.copy()
543 maintenance_empty_hosts = ([h for h in self.hosts if h not in
545 if len(maintenance_empty_hosts) == 0:
546 self.log.info('no empty hosts for maintenance')
547 if self.need_in_scale(host_servers):
548 raise Exception('Admin tool session %s: Not enough '
549 'free capacity for maintenance' %
551 host = self.find_host_to_be_empty(host_servers)
553 self.make_compute_host_empty(host, host_servers[host],
554 'PREPARE_MAINTENANCE')
555 if self.state == 'MAINTENANCE_FAILED':
558 # We do not currently support another down scale if
559 # first was not enough
560 raise Exception('Admin tool session %s: No host '
561 'candidate to be emptied' %
564 for host in maintenance_empty_hosts:
565 self.log.info('%s already empty '
566 'for maintenance' % host)
567 self.state = 'PLANNED_MAINTENANCE'
568 host_servers = self.update_server_info()
569 self.servers_log_info(host_servers)
570 self.log.info('--==State change from PREPARE_MAINTENANCE to %s'
572 elif self.state == 'PLANNED_MAINTENANCE':
573 maintenance_hosts = list()
574 maintenance_empty_hosts = list()
575 # TODO This should be admin. hack for now to have it work
576 admin_project = list(self.projects_state)[0]
577 for host in self.hosts:
578 self.log.info('disable nova-compute on host %s' % host)
579 self.nova.services.disable_log_reason(host, 'nova-compute',
581 self.computes_disabled.append(host)
582 if host in host_servers and len(host_servers[host]):
583 maintenance_hosts.append(host)
585 maintenance_empty_hosts.append(host)
586 self.log.info('--==Start to maintain empty hosts==--\n%s' %
587 maintenance_empty_hosts)
588 self.update_server_info()
589 for host in maintenance_empty_hosts:
590 # scheduler has problems, let's see if just down scaled
591 # host is really empty
592 self._wait_host_empty(host)
593 self.log.info('IN_MAINTENANCE host %s' % host)
594 self._admin_notify(admin_project, host, 'IN_MAINTENANCE',
596 self.host_maintenance(host)
597 self._admin_notify(admin_project, host,
598 'MAINTENANCE_COMPLETE',
600 self.nova.services.enable(host, 'nova-compute')
601 self.computes_disabled.remove(host)
602 self.log.info('MAINTENANCE_COMPLETE host %s' % host)
603 self.log.info('--==Start to maintain occupied hosts==--\n%s' %
605 for host in maintenance_hosts:
606 self.log.info('PLANNED_MAINTENANCE host %s' % host)
607 self.make_compute_host_empty(host, host_servers[host],
608 'PLANNED_MAINTENANCE')
609 if self.state == 'MAINTENANCE_FAILED':
611 self.log.info('IN_MAINTENANCE host %s' % host)
612 self._admin_notify(admin_project, host, 'IN_MAINTENANCE',
614 self.host_maintenance(host)
615 self._admin_notify(admin_project, host,
616 'MAINTENANCE_COMPLETE',
618 self.nova.services.enable(host, 'nova-compute')
619 self.computes_disabled.remove(host)
620 self.log.info('MAINTENANCE_COMPLETE host %s' % host)
621 self.state = 'PLANNED_MAINTENANCE_COMPLETE'
622 host_servers = self.update_server_info()
623 self.servers_log_info(host_servers)
624 elif self.state == 'PLANNED_MAINTENANCE_COMPLETE':
625 self.log.info('Projects still need to up scale back to full '
627 self.maintenance_complete()
628 if self.state == 'MAINTENANCE_FAILED':
630 host_servers = self.update_server_info()
631 self.servers_log_info(host_servers)
632 self.state = 'MAINTENANCE_DONE'
634 raise Exception('Admin tool session %s: session in invalid '
635 'state %s' % (self.session_id, self.state))
636 self.log.info('--==Maintenance session %s: %s==--' %
637 (self.session_id, self.state))
639 def project_input(self, project_id, data):
640 self.log.debug('Admin tool session %s: project %s input' %
641 (self.session_id, project_id))
642 if 'instance_actions' in data:
643 self.proj_server_actions[project_id] = (
644 data['instance_actions'].copy())
645 self.projects_state[project_id] = data['state']
647 def project_get_instances(self, project_id):
648 ret = list(self.projects_servers[project_id])
649 self.log.debug('Admin tool session %s: project %s GET return: %s' %
650 (self.session_id, project_id, ret))
657 class AdminTool(Thread):
659 def __init__(self, trasport_url, conf, admin_tool, log):
660 Thread.__init__(self)
661 self.admin_tool = admin_tool
664 self.maint_sessions = {}
666 self.maintenance_hosts = []
667 self.trasport_url = trasport_url
670 app = Flask('admin_tool')
672 @app.route('/maintenance', methods=['POST'])
673 def admin_maintenance_api_post():
674 data = json.loads(request.data.decode('utf8'))
675 self.log.info('maintenance message: %s' % data)
676 session_id = str(generate_uuid())
677 self.log.info('creating session: %s' % session_id)
678 self.maint_sessions[session_id] = (
679 AdminMain(self.trasport_url,
685 self.maint_sessions[session_id].start()
686 reply = json.dumps({'session_id': session_id,
687 'state': 'ACK_%s' % data['state']})
688 self.log.debug('reply: %s' % reply)
689 return reply, 200, None
691 @app.route('/maintenance/<session_id>', methods=['GET'])
692 def admin_maintenance_api_get(session_id=None):
693 self.log.debug('Admin get maintenance')
694 reply = json.dumps({'state':
695 self.maint_sessions[session_id].state})
696 self.log.info('reply: %s' % reply)
697 return reply, 200, None
699 @app.route('/maintenance/<session_id>/<projet_id>', methods=['PUT'])
700 def project_maintenance_api_put(session_id=None, projet_id=None):
701 data = json.loads(request.data.decode('utf8'))
702 self.log.debug('%s project put: %s' % (projet_id, data))
703 self.project_input(session_id, projet_id, data)
706 @app.route('/maintenance/<session_id>/<projet_id>', methods=['GET'])
707 def project_maintenance_api_get(session_id=None, projet_id=None):
708 self.log.debug('%s project get %s' % (projet_id, session_id))
709 instances = self.project_get_instances(session_id, projet_id)
710 reply = json.dumps({'instance_ids': instances})
711 self.log.debug('%s reply: %s' % (projet_id, reply))
712 return reply, 200, None
714 @app.route('/maintenance/<session_id>', methods=['DELETE'])
715 def remove_session(session_id=None):
716 self.log.info('remove session %s'
718 self.maint_sessions[session_id].cleanup()
719 self.maint_sessions[session_id].stop()
720 del self.maint_sessions[session_id]
723 @app.route('/shutdown', methods=['POST'])
725 self.log.info('shutdown admin_tool server at %s' % time.time())
726 func = request.environ.get('werkzeug.server.shutdown')
728 raise RuntimeError('Not running with the Werkzeug Server')
730 return 'admin_tool app shutting down...'
732 app.run(host=self.conf.admin_tool.ip, port=self.conf.admin_tool.port)
734 def project_input(self, session_id, project_id, data):
735 self.maint_sessions[session_id].project_input(project_id, data)
737 def project_get_instances(self, session_id, project_id):
738 return self.maint_sessions[session_id].project_get_instances(