1 // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
2 // vim: ts=8 sw=2 smarttab
4 * Ceph - scalable distributed file system
6 * Copyright (C) 2004-2006 Sage Weil <sage@newdream.net>
8 * This is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License version 2.1, as published by the Free Software
11 * Foundation. See file COPYING.
19 #define dout_subsys ceph_subsys_timer
21 #define dout_prefix *_dout << "timer(" << this << ")."
25 class SafeTimerThread : public Thread {
28 explicit SafeTimerThread(SafeTimer *s) : parent(s) {}
29 void *entry() override {
30 parent->timer_thread();
37 typedef std::multimap < utime_t, Context *> scheduled_map_t;
38 typedef std::map < Context*, scheduled_map_t::iterator > event_lookup_map_t;
40 SafeTimer::SafeTimer(CephContext *cct_, Mutex &l, bool safe_callbacks)
42 safe_callbacks(safe_callbacks),
48 SafeTimer::~SafeTimer()
50 assert(thread == NULL);
53 void SafeTimer::init()
55 ldout(cct,10) << "init" << dendl;
56 thread = new SafeTimerThread(this);
57 thread->create("safe_timer");
60 void SafeTimer::shutdown()
62 ldout(cct,10) << "shutdown" << dendl;
64 assert(lock.is_locked());
76 void SafeTimer::timer_thread()
79 ldout(cct,10) << "timer_thread starting" << dendl;
81 utime_t now = ceph_clock_now();
83 while (!schedule.empty()) {
84 scheduled_map_t::iterator p = schedule.begin();
90 Context *callback = p->second;
91 events.erase(callback);
93 ldout(cct,10) << "timer_thread executing " << callback << dendl;
97 callback->complete(0);
102 // recheck stopping if we dropped the lock
103 if (!safe_callbacks && stopping)
106 ldout(cct,20) << "timer_thread going to sleep" << dendl;
107 if (schedule.empty())
110 cond.WaitUntil(lock, schedule.begin()->first);
111 ldout(cct,20) << "timer_thread awake" << dendl;
113 ldout(cct,10) << "timer_thread exiting" << dendl;
117 Context* SafeTimer::add_event_after(double seconds, Context *callback)
119 assert(lock.is_locked());
121 utime_t when = ceph_clock_now();
123 return add_event_at(when, callback);
126 Context* SafeTimer::add_event_at(utime_t when, Context *callback)
128 assert(lock.is_locked());
129 ldout(cct,10) << __func__ << " " << when << " -> " << callback << dendl;
131 ldout(cct,5) << __func__ << " already shutdown, event not added" << dendl;
135 scheduled_map_t::value_type s_val(when, callback);
136 scheduled_map_t::iterator i = schedule.insert(s_val);
138 event_lookup_map_t::value_type e_val(callback, i);
139 pair < event_lookup_map_t::iterator, bool > rval(events.insert(e_val));
141 /* If you hit this, you tried to insert the same Context* twice. */
144 /* If the event we have just inserted comes before everything else, we need to
145 * adjust our timeout. */
146 if (i == schedule.begin())
151 bool SafeTimer::cancel_event(Context *callback)
153 assert(lock.is_locked());
155 auto p = events.find(callback);
156 if (p == events.end()) {
157 ldout(cct,10) << "cancel_event " << callback << " not found" << dendl;
161 ldout(cct,10) << "cancel_event " << p->second->first << " -> " << callback << dendl;
164 schedule.erase(p->second);
169 void SafeTimer::cancel_all_events()
171 ldout(cct,10) << "cancel_all_events" << dendl;
172 assert(lock.is_locked());
174 while (!events.empty()) {
175 auto p = events.begin();
176 ldout(cct,10) << " cancelled " << p->second->first << " -> " << p->first << dendl;
178 schedule.erase(p->second);
183 void SafeTimer::dump(const char *caller) const
187 ldout(cct,10) << "dump " << caller << dendl;
189 for (scheduled_map_t::const_iterator s = schedule.begin();
192 ldout(cct,10) << " " << s->first << "->" << s->second << dendl;