3 Demonstrate writing a Ceph web interface inside a mgr module.
6 # We must share a global reference to this instance, because it is the
7 # gatekeeper to all accesses to data from the C++ side (e.g. the REST API
8 # request handlers need to see it)
9 from collections import defaultdict
12 _global_instance = {'plugin': None}
13 def global_instance():
14 assert _global_instance['plugin'] is not None
15 return _global_instance['plugin']
30 from mgr_module import MgrModule, MgrStandbyModule, CommandResult
32 from types import OsdMap, NotFound, Config, FsMap, MonMap, \
33 PgSummary, Health, MonStatus
38 from rbd_ls import RbdLs, RbdPoolLs
39 from cephfs_clients import CephFSClients
41 log = logging.getLogger("dashboard")
44 # How many cluster log lines shall we hold onto in our
45 # python module for the convenience of the GUI?
48 # cherrypy likes to sys.exit on error. don't let it take us down too!
49 def os_exit_noop(*args, **kwargs):
52 os._exit = os_exit_noop
55 def recurse_refs(root, path):
56 if isinstance(root, dict):
57 for k, v in root.items():
58 recurse_refs(v, path + "->%s" % k)
59 elif isinstance(root, list):
60 for n, i in enumerate(root):
61 recurse_refs(i, path + "[%d]" % n)
63 log.info("%s %d (%s)" % (path, sys.getrefcount(root), root.__class__))
65 def get_prefixed_url(url):
66 return global_instance().url_prefix + url
70 class StandbyModule(MgrStandbyModule):
72 server_addr = self.get_localized_config('server_addr', '::')
73 server_port = self.get_localized_config('server_port', '7000')
74 if server_addr is None:
75 raise RuntimeError('no server_addr configured; try "ceph config-key set mgr/dashboard/server_addr <ip>"')
76 log.info("server_addr: %s server_port: %s" % (server_addr, server_port))
77 cherrypy.config.update({
78 'server.socket_host': server_addr,
79 'server.socket_port': int(server_port),
80 'engine.autoreload.on': False
83 current_dir = os.path.dirname(os.path.abspath(__file__))
84 jinja_loader = jinja2.FileSystemLoader(current_dir)
85 env = jinja2.Environment(loader=jinja_loader)
92 active_uri = module.get_active_uri()
94 log.info("Redirecting to active '{0}'".format(active_uri))
95 raise cherrypy.HTTPRedirect(active_uri)
97 template = env.get_template("standby.html")
98 return template.render(delay=5)
100 cherrypy.tree.mount(Root(), "/", {})
101 log.info("Starting engine...")
102 cherrypy.engine.start()
103 log.info("Waiting for engine...")
104 cherrypy.engine.wait(state=cherrypy.engine.states.STOPPED)
105 log.info("Engine done.")
108 log.info("Stopping server...")
109 cherrypy.engine.wait(state=cherrypy.engine.states.STARTED)
110 cherrypy.engine.stop()
111 log.info("Stopped server")
114 class Module(MgrModule):
115 def __init__(self, *args, **kwargs):
116 super(Module, self).__init__(*args, **kwargs)
117 _global_instance['plugin'] = self
118 self.log.info("Constructing module {0}: instance {1}".format(
119 __name__, _global_instance))
121 self.log_primed = False
122 self.log_buffer = collections.deque(maxlen=LOG_BUFFER_SIZE)
123 self.audit_buffer = collections.deque(maxlen=LOG_BUFFER_SIZE)
125 # Keep a librados instance for those that need it.
128 # Stateful instances of RbdLs, hold cached results. Key to dict
132 # Stateful instance of RbdPoolLs, hold cached list of RBD
134 self.rbd_pool_ls = RbdPoolLs(self)
136 # Stateful instance of RbdISCSI
137 self.rbd_iscsi = rbd_iscsi.Controller(self)
139 # Stateful instance of RbdMirroring, hold cached results.
140 self.rbd_mirroring = rbd_mirroring.Controller(self)
142 # Stateful instances of CephFSClients, hold cached results. Key to
144 self.cephfs_clients = {}
146 # A short history of pool df stats
147 self.pool_stats = defaultdict(lambda: defaultdict(
148 lambda: collections.deque(maxlen=10)))
150 # A prefix for all URLs to use the dashboard with a reverse http proxy
156 A librados instance to be shared by any classes within
157 this mgr module that want one.
162 ctx_capsule = self.get_context()
163 self._rados = rados.Rados(context=ctx_capsule)
164 self._rados.connect()
168 def update_pool_stats(self):
169 df = global_instance().get("df")
170 pool_stats = dict([(p['id'], p['stats']) for p in df['pools']])
172 for pool_id, stats in pool_stats.items():
173 for stat_name, stat_val in stats.items():
174 self.pool_stats[pool_id][stat_name].appendleft((now, stat_val))
176 def notify(self, notify_type, notify_val):
177 if notify_type == "clog":
178 # Only store log messages once we've done our initial load,
179 # so that we don't end up duplicating.
181 if notify_val['channel'] == "audit":
182 self.audit_buffer.appendleft(notify_val)
184 self.log_buffer.appendleft(notify_val)
185 elif notify_type == "pg_summary":
186 self.update_pool_stats()
190 def get_sync_object(self, object_type, path=None):
191 if object_type == OsdMap:
192 data = self.get("osd_map")
194 assert data is not None
196 data['tree'] = self.get("osd_map_tree")
197 data['crush'] = self.get("osd_map_crush")
198 data['crush_map_text'] = self.get("osd_map_crush_map_text")
199 data['osd_metadata'] = self.get("osd_metadata")
201 elif object_type == Config:
202 data = self.get("config")
204 elif object_type == MonMap:
205 data = self.get("mon_map")
207 elif object_type == FsMap:
208 data = self.get("fs_map")
210 elif object_type == PgSummary:
211 data = self.get("pg_summary")
212 self.log.debug("JSON: {0}".format(data))
213 obj = PgSummary(data)
214 elif object_type == Health:
215 data = self.get("health")
216 obj = Health(json.loads(data['json']))
217 elif object_type == MonStatus:
218 data = self.get("mon_status")
219 obj = MonStatus(json.loads(data['json']))
221 raise NotImplementedError(object_type)
223 # TODO: move 'path' handling up into C++ land so that we only
224 # Pythonize the part we're interested in
228 if isinstance(obj, dict):
231 obj = getattr(obj, part)
232 except (AttributeError, KeyError):
233 raise NotFound(object_type, path)
238 log.info("Stopping server...")
239 cherrypy.engine.exit()
240 log.info("Stopped server")
242 log.info("Stopping librados...")
244 self._rados.shutdown()
245 log.info("Stopped librados.")
247 def get_latest(self, daemon_type, daemon_name, stat):
248 data = self.get_counter(daemon_type, daemon_name, stat)[stat]
254 def get_rate(self, daemon_type, daemon_name, stat):
255 data = self.get_counter(daemon_type, daemon_name, stat)[stat]
257 if data and len(data) > 1:
258 return (data[-1][1] - data[-2][1]) / float(data[-1][0] - data[-2][0])
262 def format_dimless(self, n, width, colored=True):
264 Format a number without units, so as to fit into `width` characters, substituting
265 an appropriate unit suffix.
267 units = [' ', 'k', 'M', 'G', 'T', 'P']
269 while len("%s" % (int(n) // (1000**unit))) > width - 1:
273 truncated_float = ("%f" % (n / (1000.0 ** unit)))[0:width - 1]
274 if truncated_float[-1] == '.':
275 truncated_float = " " + truncated_float[0:-1]
277 truncated_float = "%{wid}d".format(wid=width-1) % n
278 formatted = "%s%s" % (truncated_float, units[unit])
281 # TODO: html equivalent
283 # color = self.BLACK, False
285 # color = self.YELLOW, False
286 # return self.bold(self.colorize(formatted[0:-1], color[0], color[1])) \
287 # + self.bold(self.colorize(formatted[-1], self.BLACK, False))
292 def fs_status(self, fs_id):
293 mds_versions = defaultdict(list)
295 fsmap = self.get("fs_map")
297 for fs in fsmap['filesystems']:
298 if fs['id'] == fs_id:
304 mdsmap = filesystem['mdsmap']
308 for rank in mdsmap["in"]:
309 up = "mds_{0}".format(rank) in mdsmap["up"]
311 gid = mdsmap['up']["mds_{0}".format(rank)]
312 info = mdsmap['info']['gid_{0}'.format(gid)]
313 dns = self.get_latest("mds", info['name'], "mds.inodes")
314 inos = self.get_latest("mds", info['name'], "mds_mem.ino")
317 client_count = self.get_latest("mds", info['name'],
318 "mds_sessions.session_count")
319 elif client_count == 0:
320 # In case rank 0 was down, look at another rank's
321 # sessionmap to get an indication of clients.
322 client_count = self.get_latest("mds", info['name'],
323 "mds_sessions.session_count")
325 laggy = "laggy_since" in info
327 state = info['state'].split(":")[1]
331 # if state == "active" and not laggy:
332 # c_state = self.colorize(state, self.GREEN)
334 # c_state = self.colorize(state, self.YELLOW)
336 # Populate based on context of state, e.g. client
337 # ops for an active daemon, replay progress, reconnect
341 if state == "active":
342 activity = "Reqs: " + self.format_dimless(
343 self.get_rate("mds", info['name'], "mds_server.handle_client_request"),
347 metadata = self.get_metadata('mds', info['name'])
348 mds_versions[metadata.get('ceph_version', 'unknown')].append(info['name'])
354 "activity": activity,
372 # Find the standby replays
373 for gid_str, daemon_info in mdsmap['info'].iteritems():
374 if daemon_info['state'] != "up:standby-replay":
377 inos = self.get_latest("mds", daemon_info['name'], "mds_mem.ino")
378 dns = self.get_latest("mds", daemon_info['name'], "mds.inodes")
380 activity = "Evts: " + self.format_dimless(
381 self.get_rate("mds", daemon_info['name'], "mds_log.replay"),
387 "rank": "{0}-s".format(daemon_info['rank']),
388 "state": "standby-replay",
389 "mds": daemon_info['name'],
390 "activity": activity,
397 pool_stats = dict([(p['id'], p['stats']) for p in df['pools']])
398 osdmap = self.get("osd_map")
399 pools = dict([(p['pool'], p) for p in osdmap['pools']])
400 metadata_pool_id = mdsmap['metadata_pool']
401 data_pool_ids = mdsmap['data_pools']
404 for pool_id in [metadata_pool_id] + data_pool_ids:
405 pool_type = "metadata" if pool_id == metadata_pool_id else "data"
406 stats = pool_stats[pool_id]
408 "pool": pools[pool_id]['pool_name'],
410 "used": stats['bytes_used'],
411 "avail": stats['max_avail']
415 for standby in fsmap['standbys']:
416 metadata = self.get_metadata('mds', standby['name'])
417 mds_versions[metadata.get('ceph_version', 'unknown')].append(standby['name'])
419 standby_table.append({
420 'name': standby['name']
426 "name": mdsmap['fs_name'],
427 "client_count": client_count,
428 "clients_url": get_prefixed_url("/clients/{0}/".format(fs_id)),
432 "standbys": standby_table,
433 "versions": mds_versions
436 def _prime_log(self):
437 def load_buffer(buf, channel_name):
438 result = CommandResult("")
439 self.send_command(result, "mon", "", json.dumps({
440 "prefix": "log last",
442 "channel": channel_name,
443 "num": LOG_BUFFER_SIZE
445 r, outb, outs = result.wait()
447 # Oh well. We won't let this stop us though.
448 self.log.error("Error fetching log history (r={0}, \"{1}\")".format(
452 lines = json.loads(outb)
454 self.log.error("Error decoding log history")
459 load_buffer(self.log_buffer, "cluster")
460 load_buffer(self.audit_buffer, "audit")
461 self.log_primed = True
464 current_dir = os.path.dirname(os.path.abspath(__file__))
466 jinja_loader = jinja2.FileSystemLoader(current_dir)
467 env = jinja2.Environment(loader=jinja_loader)
471 class EndPoint(object):
472 def _health_data(self):
473 health = global_instance().get_sync_object(Health).data
474 # Transform the `checks` dict into a list for the convenience
475 # of rendering from javascript.
477 for k, v in health['checks'].iteritems():
481 checks = sorted(checks, cmp=lambda a, b: a['severity'] > b['severity'])
483 health['checks'] = checks
487 def _toplevel_data(self):
489 Data consumed by the base.html template
491 status, data = global_instance().rbd_pool_ls.get()
493 log.warning("Failed to get RBD pool list")
499 "url": get_prefixed_url("/rbd_pool/{0}/".format(name))
502 ], key=lambda k: k['name'])
504 status, rbd_mirroring = global_instance().rbd_mirroring.toplevel.get()
505 if rbd_mirroring is None:
506 log.warning("Failed to get RBD mirroring summary")
509 fsmap = global_instance().get_sync_object(FsMap)
513 "name": f['mdsmap']['fs_name'],
514 "url": get_prefixed_url("/filesystem/{0}/".format(f['id']))
516 for f in fsmap.data['filesystems']
520 'rbd_pools': rbd_pools,
521 'rbd_mirroring': rbd_mirroring,
522 'health_status': self._health_data()['status'],
523 'filesystems': filesystems
526 class Root(EndPoint):
528 def filesystem(self, fs_id):
529 template = env.get_template("filesystem.html")
531 toplevel_data = self._toplevel_data()
534 "fs_status": global_instance().fs_status(int(fs_id))
537 return template.render(
538 url_prefix = global_instance().url_prefix,
539 ceph_version=global_instance().version,
540 path_info=cherrypy.request.path_info,
541 toplevel_data=json.dumps(toplevel_data, indent=2),
542 content_data=json.dumps(content_data, indent=2)
546 @cherrypy.tools.json_out()
547 def filesystem_data(self, fs_id):
548 return global_instance().fs_status(int(fs_id))
550 def _clients(self, fs_id):
551 cephfs_clients = global_instance().cephfs_clients.get(fs_id, None)
552 if cephfs_clients is None:
553 cephfs_clients = CephFSClients(global_instance(), fs_id)
554 global_instance().cephfs_clients[fs_id] = cephfs_clients
556 status, clients = cephfs_clients.get()
557 #TODO do something sensible with status
559 # Decorate the metadata with some fields that will be
560 # indepdendent of whether it's a kernel or userspace
561 # client, so that the javascript doesn't have to grok that.
562 for client in clients:
563 if "ceph_version" in client['client_metadata']:
564 client['type'] = "userspace"
565 client['version'] = client['client_metadata']['ceph_version']
566 client['hostname'] = client['client_metadata']['hostname']
567 elif "kernel_version" in client['client_metadata']:
568 client['type'] = "kernel"
569 client['version'] = client['client_metadata']['kernel_version']
570 client['hostname'] = client['client_metadata']['hostname']
572 client['type'] = "unknown"
573 client['version'] = ""
574 client['hostname'] = ""
579 def clients(self, fscid_str):
581 fscid = int(fscid_str)
583 raise cherrypy.HTTPError(400,
584 "Invalid filesystem id {0}".format(fscid_str))
587 fs_name = FsMap(global_instance().get(
588 "fs_map")).get_filesystem(fscid)['mdsmap']['fs_name']
590 log.warning("Missing FSCID, dumping fsmap:\n{0}".format(
591 json.dumps(global_instance().get("fs_map"), indent=2)
593 raise cherrypy.HTTPError(404,
594 "No filesystem with id {0}".format(fscid))
596 clients = self._clients(fscid)
597 global_instance().log.debug(json.dumps(clients, indent=2))
602 "fs_url": get_prefixed_url("/filesystem/" + fscid_str + "/")
605 template = env.get_template("clients.html")
606 return template.render(
607 url_prefix = global_instance().url_prefix,
608 ceph_version=global_instance().version,
609 path_info=cherrypy.request.path_info,
610 toplevel_data=json.dumps(self._toplevel_data(), indent=2),
611 content_data=json.dumps(content_data, indent=2)
615 @cherrypy.tools.json_out()
616 def clients_data(self, fs_id):
617 return self._clients(int(fs_id))
619 def _rbd_pool(self, pool_name):
620 rbd_ls = global_instance().rbd_ls.get(pool_name, None)
622 rbd_ls = RbdLs(global_instance(), pool_name)
623 global_instance().rbd_ls[pool_name] = rbd_ls
625 status, value = rbd_ls.get()
629 wait = interval - rbd_ls.latency
634 threading.Thread(target=wait_and_load).start()
636 assert status != RbdLs.VALUE_NONE # FIXME bubble status up to UI
640 def rbd_pool(self, pool_name):
641 template = env.get_template("rbd_pool.html")
643 toplevel_data = self._toplevel_data()
645 images = self._rbd_pool(pool_name)
648 "pool_name": pool_name
651 return template.render(
652 url_prefix = global_instance().url_prefix,
653 ceph_version=global_instance().version,
654 path_info=cherrypy.request.path_info,
655 toplevel_data=json.dumps(toplevel_data, indent=2),
656 content_data=json.dumps(content_data, indent=2)
660 @cherrypy.tools.json_out()
661 def rbd_pool_data(self, pool_name):
662 return self._rbd_pool(pool_name)
664 def _rbd_mirroring(self):
665 status, data = global_instance().rbd_mirroring.content_data.get()
667 log.warning("Failed to get RBD mirroring status")
672 def rbd_mirroring(self):
673 template = env.get_template("rbd_mirroring.html")
675 toplevel_data = self._toplevel_data()
676 content_data = self._rbd_mirroring()
678 return template.render(
679 url_prefix = global_instance().url_prefix,
680 ceph_version=global_instance().version,
681 path_info=cherrypy.request.path_info,
682 toplevel_data=json.dumps(toplevel_data, indent=2),
683 content_data=json.dumps(content_data, indent=2)
687 @cherrypy.tools.json_out()
688 def rbd_mirroring_data(self):
689 return self._rbd_mirroring()
691 def _rbd_iscsi(self):
692 status, data = global_instance().rbd_iscsi.content_data.get()
694 log.warning("Failed to get RBD iSCSI status")
700 template = env.get_template("rbd_iscsi.html")
702 toplevel_data = self._toplevel_data()
703 content_data = self._rbd_iscsi()
705 return template.render(
706 url_prefix = global_instance().url_prefix,
707 ceph_version=global_instance().version,
708 path_info=cherrypy.request.path_info,
709 toplevel_data=json.dumps(toplevel_data, indent=2),
710 content_data=json.dumps(content_data, indent=2)
714 @cherrypy.tools.json_out()
715 def rbd_iscsi_data(self):
716 return self._rbd_iscsi()
720 template = env.get_template("health.html")
721 return template.render(
722 url_prefix = global_instance().url_prefix,
723 ceph_version=global_instance().version,
724 path_info=cherrypy.request.path_info,
725 toplevel_data=json.dumps(self._toplevel_data(), indent=2),
726 content_data=json.dumps(self._health(), indent=2)
731 template = env.get_template("servers.html")
732 return template.render(
733 url_prefix = global_instance().url_prefix,
734 ceph_version=global_instance().version,
735 path_info=cherrypy.request.path_info,
736 toplevel_data=json.dumps(self._toplevel_data(), indent=2),
737 content_data=json.dumps(self._servers(), indent=2)
742 'servers': global_instance().list_servers()
746 @cherrypy.tools.json_out()
747 def servers_data(self):
748 return self._servers()
751 # Fuse osdmap with pg_summary to get description of pools
752 # including their PG states
753 osd_map = global_instance().get_sync_object(OsdMap).data
754 pg_summary = global_instance().get_sync_object(PgSummary).data
757 if len(global_instance().pool_stats) == 0:
758 global_instance().update_pool_stats()
760 for pool in osd_map['pools']:
761 pool['pg_status'] = pg_summary['by_pool'][pool['pool'].__str__()]
762 stats = global_instance().pool_stats[pool['pool']]
765 def get_rate(series):
767 return (float(series[0][1]) - float(series[1][1])) / (float(series[0][0]) - float(series[1][0]))
771 for stat_name, stat_series in stats.items():
773 'latest': stat_series[0][1],
774 'rate': get_rate(stat_series),
775 'series': [i for i in stat_series]
780 # Not needed, skip the effort of transmitting this
782 del osd_map['pg_temp']
784 df = global_instance().get("df")
785 df['stats']['total_objects'] = sum(
786 [p['stats']['objects'] for p in df['pools']])
789 "health": self._health_data(),
790 "mon_status": global_instance().get_sync_object(
792 "fs_map": global_instance().get_sync_object(FsMap).data,
794 "clog": list(global_instance().log_buffer),
795 "audit_log": list(global_instance().audit_buffer),
797 "mgr_map": global_instance().get("mgr_map"),
802 @cherrypy.tools.json_out()
803 def health_data(self):
804 return self._health()
811 @cherrypy.tools.json_out()
812 def toplevel_data(self):
813 return self._toplevel_data()
815 def _get_mds_names(self, filesystem_id=None):
818 fsmap = global_instance().get("fs_map")
819 for fs in fsmap['filesystems']:
820 if filesystem_id is not None and fs['id'] != filesystem_id:
822 names.extend([info['name'] for _, info in fs['mdsmap']['info'].items()])
824 if filesystem_id is None:
825 names.extend(info['name'] for info in fsmap['standbys'])
830 @cherrypy.tools.json_out()
831 def mds_counters(self, fs_id):
833 Result format: map of daemon name to map of counter to list of datapoints
836 # Opinionated list of interesting performance counters for the GUI --
837 # if you need something else just add it. See how simple life is
838 # when you don't have to write general purpose APIs?
840 "mds_server.handle_client_request",
842 "mds_cache.num_strays",
844 "mds.exported_inodes",
846 "mds.imported_inodes",
853 mds_names = self._get_mds_names(int(fs_id))
855 for mds_name in mds_names:
856 result[mds_name] = {}
857 for counter in counters:
858 data = global_instance().get_counter("mds", mds_name, counter)
860 result[mds_name][counter] = data[counter]
862 result[mds_name][counter] = []
867 @cherrypy.tools.json_out()
868 def get_counter(self, type, id, path):
869 return global_instance().get_counter(type, id, path)
872 @cherrypy.tools.json_out()
873 def get_perf_schema(self, **args):
874 type = args.get('type', '')
875 id = args.get('id', '')
876 schema = global_instance().get_perf_schema(type, id)
878 for k1 in schema.keys(): # 'perf_schema'
879 ret[k1] = collections.OrderedDict()
880 for k2 in sorted(schema[k1].keys()):
881 sorted_dict = collections.OrderedDict(
882 sorted(schema[k1][k2].items(), key=lambda i: i[0])
884 ret[k1][k2] = sorted_dict
887 url_prefix = self.get_config('url_prefix')
888 if url_prefix == None:
891 if len(url_prefix) != 0:
892 if url_prefix[0] != '/':
893 url_prefix = '/'+url_prefix
894 if url_prefix[-1] == '/':
895 url_prefix = url_prefix[:-1]
896 self.url_prefix = url_prefix
898 server_addr = self.get_localized_config('server_addr', '::')
899 server_port = self.get_localized_config('server_port', '7000')
900 if server_addr is None:
901 raise RuntimeError('no server_addr configured; try "ceph config-key set mgr/dashboard/server_addr <ip>"')
902 log.info("server_addr: %s server_port: %s" % (server_addr, server_port))
903 cherrypy.config.update({
904 'server.socket_host': server_addr,
905 'server.socket_port': int(server_port),
906 'engine.autoreload.on': False
909 osdmap = self.get_osdmap()
910 log.info("latest osdmap is %d" % osdmap.get_epoch())
912 # Publish the URI that others may use to access the service we're
913 # about to start serving
914 self.set_uri("http://{0}:{1}/".format(
915 socket.getfqdn() if server_addr == "::" else server_addr,
919 static_dir = os.path.join(current_dir, 'static')
922 "tools.staticdir.on": True,
923 'tools.staticdir.dir': static_dir
926 log.info("Serving static from {0}".format(static_dir))
928 class OSDEndpoint(EndPoint):
929 def _osd(self, osd_id):
932 osd_map = global_instance().get("osd_map")
935 for o in osd_map['osds']:
936 if o['osd'] == osd_id:
940 assert osd is not None # TODO 400
942 osd_spec = "{0}".format(osd_id)
944 osd_metadata = global_instance().get_metadata(
947 result = CommandResult("")
948 global_instance().send_command(result, "osd", osd_spec,
950 "prefix": "perf histogram dump",
953 r, outb, outs = result.wait()
955 histogram = json.loads(outb)
959 "osd_metadata": osd_metadata,
960 "osd_histogram": histogram
964 def perf(self, osd_id):
965 template = env.get_template("osd_perf.html")
966 toplevel_data = self._toplevel_data()
968 return template.render(
969 url_prefix = global_instance().url_prefix,
970 ceph_version=global_instance().version,
971 path_info='/osd' + cherrypy.request.path_info,
972 toplevel_data=json.dumps(toplevel_data, indent=2),
973 content_data=json.dumps(self._osd(osd_id), indent=2)
977 @cherrypy.tools.json_out()
978 def perf_data(self, osd_id):
979 return self._osd(osd_id)
982 @cherrypy.tools.json_out()
984 return self._osds_by_server()
986 def _osd_summary(self, osd_id, osd_info):
988 The info used for displaying an OSD in a table
991 osd_spec = "{0}".format(osd_id)
994 result['id'] = osd_id
996 result['stats_history'] = {}
999 for s in ['osd.op_w', 'osd.op_in_bytes', 'osd.op_r', 'osd.op_out_bytes']:
1000 result['stats'][s.split(".")[1]] = global_instance().get_rate('osd', osd_spec, s)
1001 result['stats_history'][s.split(".")[1]] = \
1002 global_instance().get_counter('osd', osd_spec, s)[s]
1005 for s in ["osd.numpg", "osd.stat_bytes", "osd.stat_bytes_used"]:
1006 result['stats'][s.split(".")[1]] = global_instance().get_latest('osd', osd_spec, s)
1008 result['up'] = osd_info['up']
1009 result['in'] = osd_info['in']
1011 result['url'] = get_prefixed_url("/osd/perf/{0}".format(osd_id))
1015 def _osds_by_server(self):
1016 result = defaultdict(list)
1017 servers = global_instance().list_servers()
1019 osd_map = global_instance().get_sync_object(OsdMap)
1021 for server in servers:
1022 hostname = server['hostname']
1023 services = server['services']
1025 if s["type"] == "osd":
1026 osd_id = int(s["id"])
1027 # If metadata doesn't tally with osdmap, drop it.
1028 if osd_id not in osd_map.osds_by_id:
1029 global_instance().log.warn(
1030 "OSD service {0} missing in OSDMap, stale metadata?".format(osd_id))
1032 summary = self._osd_summary(osd_id,
1033 osd_map.osds_by_id[osd_id])
1035 result[hostname].append(summary)
1037 result[hostname].sort(key=lambda a: a['id'])
1038 if len(result[hostname]):
1039 result[hostname][0]['first'] = True
1041 global_instance().log.warn("result.size {0} servers.size {1}".format(
1042 len(result), len(servers)
1045 # Return list form for convenience of rendering
1046 return sorted(result.items(), key=lambda a: a[0])
1051 List of all OSDS grouped by host
1055 template = env.get_template("osds.html")
1056 toplevel_data = self._toplevel_data()
1059 "osds_by_server": self._osds_by_server()
1062 return template.render(
1063 url_prefix = global_instance().url_prefix,
1064 ceph_version=global_instance().version,
1065 path_info='/osd' + cherrypy.request.path_info,
1066 toplevel_data=json.dumps(toplevel_data, indent=2),
1067 content_data=json.dumps(content_data, indent=2)
1070 cherrypy.tree.mount(Root(), get_prefixed_url("/"), conf)
1071 cherrypy.tree.mount(OSDEndpoint(), get_prefixed_url("/osd"), conf)
1073 log.info("Starting engine on {0}:{1}...".format(
1074 server_addr, server_port))
1075 cherrypy.engine.start()
1076 log.info("Waiting for engine...")
1077 cherrypy.engine.block()
1078 log.info("Engine done.")