1 // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
2 // vim: ts=8 sw=2 smarttab
9 #include "common/errno.h"
10 #include "common/safe_io.h"
11 #include "common/Clock.h"
12 #include "common/Graylog.h"
13 #include "common/valgrind.h"
15 #include "include/assert.h"
16 #include "include/compat.h"
17 #include "include/on_exit.h"
20 #include "SubsystemMap.h"
22 #define DEFAULT_MAX_NEW 100
23 #define DEFAULT_MAX_RECENT 10000
25 #define PREALLOC 1000000
31 static OnExitManager exit_callbacks;
33 static void log_on_exit(void *p)
38 delete (Log **)p;// Delete allocated pointer (not Log object, the pointer only!)
41 Log::Log(SubsystemMap *s)
42 : m_indirect_this(NULL),
44 m_queue_mutex_holder(0),
45 m_flush_mutex_holder(0),
51 m_syslog_log(-2), m_syslog_crash(-2),
52 m_stderr_log(1), m_stderr_crash(-1),
53 m_graylog_log(-3), m_graylog_crash(-3),
55 m_max_new(DEFAULT_MAX_NEW),
56 m_max_recent(DEFAULT_MAX_RECENT),
61 ret = pthread_mutex_init(&m_flush_mutex, NULL);
64 ret = pthread_mutex_init(&m_queue_mutex, NULL);
67 ret = pthread_cond_init(&m_cond_loggers, NULL);
70 ret = pthread_cond_init(&m_cond_flusher, NULL);
73 // kludge for prealloc testing
75 for (int i=0; i < PREALLOC; i++)
76 m_recent.enqueue(new Entry);
81 if (m_indirect_this) {
82 *m_indirect_this = NULL;
85 assert(!is_started());
87 VOID_TEMP_FAILURE_RETRY(::close(m_fd));
89 pthread_mutex_destroy(&m_queue_mutex);
90 pthread_mutex_destroy(&m_flush_mutex);
91 pthread_cond_destroy(&m_cond_loggers);
92 pthread_cond_destroy(&m_cond_flusher);
98 void Log::set_flush_on_exit()
100 // Make sure we flush on shutdown. We do this by deliberately
101 // leaking an indirect pointer to ourselves (on_exit() can't
102 // unregister a callback). This is not racy only becuase we
103 // assume that exit() won't race with ~Log().
104 if (m_indirect_this == NULL) {
105 m_indirect_this = new (Log*)(this);
106 exit_callbacks.add_callback(log_on_exit, m_indirect_this);
110 void Log::set_max_new(int n)
115 void Log::set_max_recent(int n)
117 pthread_mutex_lock(&m_flush_mutex);
118 m_flush_mutex_holder = pthread_self();
120 m_flush_mutex_holder = 0;
121 pthread_mutex_unlock(&m_flush_mutex);
124 void Log::set_log_file(string fn)
129 void Log::reopen_log_file()
131 pthread_mutex_lock(&m_flush_mutex);
132 m_flush_mutex_holder = pthread_self();
134 VOID_TEMP_FAILURE_RETRY(::close(m_fd));
135 if (m_log_file.length()) {
136 m_fd = ::open(m_log_file.c_str(), O_CREAT|O_WRONLY|O_APPEND, 0644);
137 if (m_fd >= 0 && (m_uid || m_gid)) {
138 int r = ::fchown(m_fd, m_uid, m_gid);
141 cerr << "failed to chown " << m_log_file << ": " << cpp_strerror(r)
148 m_flush_mutex_holder = 0;
149 pthread_mutex_unlock(&m_flush_mutex);
152 void Log::chown_log_file(uid_t uid, gid_t gid)
154 pthread_mutex_lock(&m_flush_mutex);
156 int r = ::fchown(m_fd, uid, gid);
159 cerr << "failed to chown " << m_log_file << ": " << cpp_strerror(r)
163 pthread_mutex_unlock(&m_flush_mutex);
166 void Log::set_syslog_level(int log, int crash)
168 pthread_mutex_lock(&m_flush_mutex);
170 m_syslog_crash = crash;
171 pthread_mutex_unlock(&m_flush_mutex);
174 void Log::set_stderr_level(int log, int crash)
176 pthread_mutex_lock(&m_flush_mutex);
178 m_stderr_crash = crash;
179 pthread_mutex_unlock(&m_flush_mutex);
182 void Log::set_graylog_level(int log, int crash)
184 pthread_mutex_lock(&m_flush_mutex);
186 m_graylog_crash = crash;
187 pthread_mutex_unlock(&m_flush_mutex);
190 void Log::start_graylog()
192 pthread_mutex_lock(&m_flush_mutex);
193 if (! m_graylog.get())
194 m_graylog = std::make_shared<Graylog>(m_subs, "dlog");
195 pthread_mutex_unlock(&m_flush_mutex);
199 void Log::stop_graylog()
201 pthread_mutex_lock(&m_flush_mutex);
203 pthread_mutex_unlock(&m_flush_mutex);
206 void Log::submit_entry(Entry *e)
208 pthread_mutex_lock(&m_queue_mutex);
209 m_queue_mutex_holder = pthread_self();
212 *(volatile int *)(0) = 0xdead;
214 // wait for flush to catch up
215 while (m_new.m_len > m_max_new)
216 pthread_cond_wait(&m_cond_loggers, &m_queue_mutex);
219 pthread_cond_signal(&m_cond_flusher);
220 m_queue_mutex_holder = 0;
221 pthread_mutex_unlock(&m_queue_mutex);
225 Entry *Log::create_entry(int level, int subsys)
228 return new Entry(ceph_clock_now(),
232 // kludge for perf testing
233 Entry *e = m_recent.dequeue();
234 e->m_stamp = ceph_clock_now();
235 e->m_thread = pthread_self();
237 e->m_subsys = subsys;
242 Entry *Log::create_entry(int level, int subsys, size_t* expected_size)
245 ANNOTATE_BENIGN_RACE_SIZED(expected_size, sizeof(*expected_size),
247 size_t size = __atomic_load_n(expected_size, __ATOMIC_RELAXED);
248 void *ptr = ::operator new(sizeof(Entry) + size);
249 return new(ptr) Entry(ceph_clock_now(),
250 pthread_self(), level, subsys,
251 reinterpret_cast<char*>(ptr) + sizeof(Entry), size, expected_size);
253 // kludge for perf testing
254 Entry *e = m_recent.dequeue();
255 e->m_stamp = ceph_clock_now();
256 e->m_thread = pthread_self();
258 e->m_subsys = subsys;
265 pthread_mutex_lock(&m_flush_mutex);
266 m_flush_mutex_holder = pthread_self();
267 pthread_mutex_lock(&m_queue_mutex);
268 m_queue_mutex_holder = pthread_self();
271 pthread_cond_broadcast(&m_cond_loggers);
272 m_queue_mutex_holder = 0;
273 pthread_mutex_unlock(&m_queue_mutex);
274 _flush(&t, &m_recent, false);
277 while (m_recent.m_len > m_max_recent) {
278 delete m_recent.dequeue();
281 m_flush_mutex_holder = 0;
282 pthread_mutex_unlock(&m_flush_mutex);
285 void Log::_flush(EntryQueue *t, EntryQueue *requeue, bool crash)
288 while ((e = t->dequeue()) != NULL) {
289 unsigned sub = e->m_subsys;
291 bool should_log = crash || m_subs->get_log_level(sub) >= e->m_prio;
292 bool do_fd = m_fd >= 0 && should_log;
293 bool do_syslog = m_syslog_crash >= e->m_prio && should_log;
294 bool do_stderr = m_stderr_crash >= e->m_prio && should_log;
295 bool do_graylog2 = m_graylog_crash >= e->m_prio && should_log;
298 if (do_fd || do_syslog || do_stderr) {
302 size_t buf_size = 80 + e->size();
303 bool need_dynamic = buf_size >= 0x10000; //avoids >64K buffers
304 //allocation at stack
305 char buf0[need_dynamic ? 1 : buf_size];
307 buf = new char[buf_size];
313 buflen += snprintf(buf, buf_size, "%6d> ", -t->m_len);
314 buflen += e->m_stamp.sprintf(buf + buflen, buf_size-buflen);
315 buflen += snprintf(buf + buflen, buf_size-buflen, " %lx %2d ",
316 (unsigned long)e->m_thread, e->m_prio);
318 buflen += e->snprintf(buf + buflen, buf_size - buflen - 1);
319 if (buflen > buf_size - 1) { //paranoid check, buf was declared
321 buflen = buf_size - 1;
326 syslog(LOG_USER|LOG_INFO, "%s", buf);
330 cerr << buf << std::endl;
334 int r = safe_write(m_fd, buf, buflen+1);
335 if (r != m_fd_last_error) {
337 cerr << "problem writing to " << m_log_file
338 << ": " << cpp_strerror(r)
346 if (do_graylog2 && m_graylog) {
347 m_graylog->log_entry(e);
354 void Log::_log_message(const char *s, bool crash)
357 size_t len = strlen(s);
362 int r = safe_write(m_fd, b.c_str(), b.size());
364 cerr << "problem writing to " << m_log_file << ": " << cpp_strerror(r) << std::endl;
366 if ((crash ? m_syslog_crash : m_syslog_log) >= 0) {
367 syslog(LOG_USER|LOG_INFO, "%s", s);
370 if ((crash ? m_stderr_crash : m_stderr_log) >= 0) {
371 cerr << s << std::endl;
375 void Log::dump_recent()
377 pthread_mutex_lock(&m_flush_mutex);
378 m_flush_mutex_holder = pthread_self();
380 pthread_mutex_lock(&m_queue_mutex);
381 m_queue_mutex_holder = pthread_self();
386 m_queue_mutex_holder = 0;
387 pthread_mutex_unlock(&m_queue_mutex);
388 _flush(&t, &m_recent, false);
391 _log_message("--- begin dump of recent events ---", true);
392 _flush(&m_recent, &old, true);
395 _log_message("--- logging levels ---", true);
396 for (vector<Subsystem>::iterator p = m_subs->m_subsys.begin();
397 p != m_subs->m_subsys.end();
399 snprintf(buf, sizeof(buf), " %2d/%2d %s", p->log_level, p->gather_level, p->name.c_str());
400 _log_message(buf, true);
403 sprintf(buf, " %2d/%2d (syslog threshold)", m_syslog_log, m_syslog_crash);
404 _log_message(buf, true);
405 sprintf(buf, " %2d/%2d (stderr threshold)", m_stderr_log, m_stderr_crash);
406 _log_message(buf, true);
407 sprintf(buf, " max_recent %9d", m_max_recent);
408 _log_message(buf, true);
409 sprintf(buf, " max_new %9d", m_max_new);
410 _log_message(buf, true);
411 sprintf(buf, " log_file %s", m_log_file.c_str());
412 _log_message(buf, true);
414 _log_message("--- end dump of recent events ---", true);
416 m_flush_mutex_holder = 0;
417 pthread_mutex_unlock(&m_flush_mutex);
422 assert(!is_started());
423 pthread_mutex_lock(&m_queue_mutex);
425 pthread_mutex_unlock(&m_queue_mutex);
431 assert(is_started());
432 pthread_mutex_lock(&m_queue_mutex);
434 pthread_cond_signal(&m_cond_flusher);
435 pthread_cond_broadcast(&m_cond_loggers);
436 pthread_mutex_unlock(&m_queue_mutex);
442 pthread_mutex_lock(&m_queue_mutex);
443 m_queue_mutex_holder = pthread_self();
445 if (!m_new.empty()) {
446 m_queue_mutex_holder = 0;
447 pthread_mutex_unlock(&m_queue_mutex);
449 pthread_mutex_lock(&m_queue_mutex);
450 m_queue_mutex_holder = pthread_self();
454 pthread_cond_wait(&m_cond_flusher, &m_queue_mutex);
456 m_queue_mutex_holder = 0;
457 pthread_mutex_unlock(&m_queue_mutex);
462 bool Log::is_inside_log_lock()
465 pthread_self() == m_queue_mutex_holder ||
466 pthread_self() == m_flush_mutex_holder;
469 void Log::inject_segv()
471 m_inject_segv = true;
474 void Log::reset_segv()
476 m_inject_segv = false;