1 #ifndef __LINUX_SEQLOCK_H
2 #define __LINUX_SEQLOCK_H
4 * Reader/writer consistent mechanism without starving writers. This type of
5 * lock for data where the reader wants a consistent set of information
6 * and is willing to retry if the information changes. There are two types
8 * 1. Sequence readers which never block a writer but they may have to retry
9 * if a writer is in progress by detecting change in sequence number.
10 * Writers do not wait for a sequence reader.
11 * 2. Locking readers which will wait if a writer or another locking reader
12 * is in progress. A locking reader in progress will also block a writer
13 * from going forward. Unlike the regular rwlock, the read lock here is
14 * exclusive so that only one locking reader can get it.
16 * This is not as cache friendly as brlock. Also, this may not work well
17 * for data that contains pointers, because any writer could
18 * invalidate a pointer that a reader was following.
20 * Expected non-blocking reader usage:
22 * seq = read_seqbegin(&foo);
24 * } while (read_seqretry(&foo, seq));
27 * On non-SMP the spin locks disappear but the writer still needs
28 * to increment the sequence variables because an interrupt routine could
29 * change the state of the data.
31 * Based on x86_64 vsyscall gettimeofday
32 * by Keith Owens and Andrea Arcangeli
35 #include <linux/spinlock.h>
36 #include <linux/preempt.h>
37 #include <linux/lockdep.h>
38 #include <asm/processor.h>
41 * Version using sequence counter only.
42 * This can be used when code has its own mutex protecting the
43 * updating starting before the write_seqcountbeqin() and ending
44 * after the write_seqcount_end().
46 typedef struct seqcount {
48 #ifdef CONFIG_DEBUG_LOCK_ALLOC
49 struct lockdep_map dep_map;
53 static inline void __seqcount_init(seqcount_t *s, const char *name,
54 struct lock_class_key *key)
57 * Make sure we are not reinitializing a held lock:
59 lockdep_init_map(&s->dep_map, name, key, 0);
63 #ifdef CONFIG_DEBUG_LOCK_ALLOC
64 # define SEQCOUNT_DEP_MAP_INIT(lockname) \
65 .dep_map = { .name = #lockname } \
67 # define seqcount_init(s) \
69 static struct lock_class_key __key; \
70 __seqcount_init((s), #s, &__key); \
73 static inline void seqcount_lockdep_reader_access(const seqcount_t *s)
75 seqcount_t *l = (seqcount_t *)s;
78 local_irq_save(flags);
79 seqcount_acquire_read(&l->dep_map, 0, 0, _RET_IP_);
80 seqcount_release(&l->dep_map, 1, _RET_IP_);
81 local_irq_restore(flags);
85 # define SEQCOUNT_DEP_MAP_INIT(lockname)
86 # define seqcount_init(s) __seqcount_init(s, NULL, NULL)
87 # define seqcount_lockdep_reader_access(x)
90 #define SEQCNT_ZERO(lockname) { .sequence = 0, SEQCOUNT_DEP_MAP_INIT(lockname)}
94 * __read_seqcount_begin - begin a seq-read critical section (without barrier)
95 * @s: pointer to seqcount_t
96 * Returns: count to be passed to read_seqcount_retry
98 * __read_seqcount_begin is like read_seqcount_begin, but has no smp_rmb()
99 * barrier. Callers should ensure that smp_rmb() or equivalent ordering is
100 * provided before actually loading any of the variables that are to be
101 * protected in this critical section.
103 * Use carefully, only in critical code, and comment how the barrier is
106 static inline unsigned __read_seqcount_begin(const seqcount_t *s)
111 ret = READ_ONCE(s->sequence);
112 if (unlikely(ret & 1)) {
120 * raw_read_seqcount - Read the raw seqcount
121 * @s: pointer to seqcount_t
122 * Returns: count to be passed to read_seqcount_retry
124 * raw_read_seqcount opens a read critical section of the given
125 * seqcount without any lockdep checking and without checking or
126 * masking the LSB. Calling code is responsible for handling that.
128 static inline unsigned raw_read_seqcount(const seqcount_t *s)
130 unsigned ret = READ_ONCE(s->sequence);
136 * raw_read_seqcount_begin - start seq-read critical section w/o lockdep
137 * @s: pointer to seqcount_t
138 * Returns: count to be passed to read_seqcount_retry
140 * raw_read_seqcount_begin opens a read critical section of the given
141 * seqcount, but without any lockdep checking. Validity of the critical
142 * section is tested by checking read_seqcount_retry function.
144 static inline unsigned raw_read_seqcount_begin(const seqcount_t *s)
146 unsigned ret = __read_seqcount_begin(s);
152 * read_seqcount_begin - begin a seq-read critical section
153 * @s: pointer to seqcount_t
154 * Returns: count to be passed to read_seqcount_retry
156 * read_seqcount_begin opens a read critical section of the given seqcount.
157 * Validity of the critical section is tested by checking read_seqcount_retry
160 static inline unsigned read_seqcount_begin(const seqcount_t *s)
162 seqcount_lockdep_reader_access(s);
163 return raw_read_seqcount_begin(s);
167 * raw_seqcount_begin - begin a seq-read critical section
168 * @s: pointer to seqcount_t
169 * Returns: count to be passed to read_seqcount_retry
171 * raw_seqcount_begin opens a read critical section of the given seqcount.
172 * Validity of the critical section is tested by checking read_seqcount_retry
175 * Unlike read_seqcount_begin(), this function will not wait for the count
176 * to stabilize. If a writer is active when we begin, we will fail the
177 * read_seqcount_retry() instead of stabilizing at the beginning of the
180 static inline unsigned raw_seqcount_begin(const seqcount_t *s)
182 unsigned ret = READ_ONCE(s->sequence);
188 * __read_seqcount_retry - end a seq-read critical section (without barrier)
189 * @s: pointer to seqcount_t
190 * @start: count, from read_seqcount_begin
191 * Returns: 1 if retry is required, else 0
193 * __read_seqcount_retry is like read_seqcount_retry, but has no smp_rmb()
194 * barrier. Callers should ensure that smp_rmb() or equivalent ordering is
195 * provided before actually loading any of the variables that are to be
196 * protected in this critical section.
198 * Use carefully, only in critical code, and comment how the barrier is
201 static inline int __read_seqcount_retry(const seqcount_t *s, unsigned start)
203 return unlikely(s->sequence != start);
207 * read_seqcount_retry - end a seq-read critical section
208 * @s: pointer to seqcount_t
209 * @start: count, from read_seqcount_begin
210 * Returns: 1 if retry is required, else 0
212 * read_seqcount_retry closes a read critical section of the given seqcount.
213 * If the critical section was invalid, it must be ignored (and typically
216 static inline int read_seqcount_retry(const seqcount_t *s, unsigned start)
219 return __read_seqcount_retry(s, start);
222 static inline void __raw_write_seqcount_begin(seqcount_t *s)
228 static inline void raw_write_seqcount_begin(seqcount_t *s)
230 preempt_disable_rt();
231 __raw_write_seqcount_begin(s);
234 static inline void __raw_write_seqcount_end(seqcount_t *s)
240 static inline void raw_write_seqcount_end(seqcount_t *s)
242 __raw_write_seqcount_end(s);
247 * raw_write_seqcount_latch - redirect readers to even/odd copy
248 * @s: pointer to seqcount_t
250 static inline void raw_write_seqcount_latch(seqcount_t *s)
252 smp_wmb(); /* prior stores before incrementing "sequence" */
254 smp_wmb(); /* increment "sequence" before following stores */
258 * Sequence counter only version assumes that callers are using their
261 static inline void write_seqcount_begin_nested(seqcount_t *s, int subclass)
263 raw_write_seqcount_begin(s);
264 seqcount_acquire(&s->dep_map, subclass, 0, _RET_IP_);
267 static inline void write_seqcount_begin(seqcount_t *s)
269 write_seqcount_begin_nested(s, 0);
272 static inline void write_seqcount_end(seqcount_t *s)
274 seqcount_release(&s->dep_map, 1, _RET_IP_);
275 raw_write_seqcount_end(s);
279 * write_seqcount_barrier - invalidate in-progress read-side seq operations
280 * @s: pointer to seqcount_t
282 * After write_seqcount_barrier, no read-side seq operations will complete
283 * successfully and see data older than this.
285 static inline void write_seqcount_barrier(seqcount_t *s)
292 struct seqcount seqcount;
297 * These macros triggered gcc-3.x compile-time problems. We think these are
298 * OK now. Be cautious.
300 #define __SEQLOCK_UNLOCKED(lockname) \
302 .seqcount = SEQCNT_ZERO(lockname), \
303 .lock = __SPIN_LOCK_UNLOCKED(lockname) \
306 #define seqlock_init(x) \
308 seqcount_init(&(x)->seqcount); \
309 spin_lock_init(&(x)->lock); \
312 #define DEFINE_SEQLOCK(x) \
313 seqlock_t x = __SEQLOCK_UNLOCKED(x)
316 * Read side functions for starting and finalizing a read side section.
318 #ifndef CONFIG_PREEMPT_RT_FULL
319 static inline unsigned read_seqbegin(const seqlock_t *sl)
321 return read_seqcount_begin(&sl->seqcount);
325 * Starvation safe read side for RT
327 static inline unsigned read_seqbegin(seqlock_t *sl)
332 ret = ACCESS_ONCE(sl->seqcount.sequence);
333 if (unlikely(ret & 1)) {
335 * Take the lock and let the writer proceed (i.e. evtl
336 * boost it), otherwise we could loop here forever.
338 spin_unlock_wait(&sl->lock);
345 static inline unsigned read_seqretry(const seqlock_t *sl, unsigned start)
347 return read_seqcount_retry(&sl->seqcount, start);
351 * Lock out other writers and update the count.
352 * Acts like a normal spin_lock/unlock.
353 * Don't need preempt_disable() because that is in the spin_lock already.
355 static inline void write_seqlock(seqlock_t *sl)
357 spin_lock(&sl->lock);
358 __raw_write_seqcount_begin(&sl->seqcount);
361 static inline void write_sequnlock(seqlock_t *sl)
363 __raw_write_seqcount_end(&sl->seqcount);
364 spin_unlock(&sl->lock);
367 static inline void write_seqlock_bh(seqlock_t *sl)
369 spin_lock_bh(&sl->lock);
370 __raw_write_seqcount_begin(&sl->seqcount);
373 static inline void write_sequnlock_bh(seqlock_t *sl)
375 __raw_write_seqcount_end(&sl->seqcount);
376 spin_unlock_bh(&sl->lock);
379 static inline void write_seqlock_irq(seqlock_t *sl)
381 spin_lock_irq(&sl->lock);
382 __raw_write_seqcount_begin(&sl->seqcount);
385 static inline void write_sequnlock_irq(seqlock_t *sl)
387 __raw_write_seqcount_end(&sl->seqcount);
388 spin_unlock_irq(&sl->lock);
391 static inline unsigned long __write_seqlock_irqsave(seqlock_t *sl)
395 spin_lock_irqsave(&sl->lock, flags);
396 __raw_write_seqcount_begin(&sl->seqcount);
400 #define write_seqlock_irqsave(lock, flags) \
401 do { flags = __write_seqlock_irqsave(lock); } while (0)
404 write_sequnlock_irqrestore(seqlock_t *sl, unsigned long flags)
406 __raw_write_seqcount_end(&sl->seqcount);
407 spin_unlock_irqrestore(&sl->lock, flags);
411 * A locking reader exclusively locks out other writers and locking readers,
412 * but doesn't update the sequence number. Acts like a normal spin_lock/unlock.
413 * Don't need preempt_disable() because that is in the spin_lock already.
415 static inline void read_seqlock_excl(seqlock_t *sl)
417 spin_lock(&sl->lock);
420 static inline void read_sequnlock_excl(seqlock_t *sl)
422 spin_unlock(&sl->lock);
426 * read_seqbegin_or_lock - begin a sequence number check or locking block
427 * @lock: sequence lock
428 * @seq : sequence number to be checked
430 * First try it once optimistically without taking the lock. If that fails,
431 * take the lock. The sequence number is also used as a marker for deciding
432 * whether to be a reader (even) or writer (odd).
433 * N.B. seq must be initialized to an even number to begin with.
435 static inline void read_seqbegin_or_lock(seqlock_t *lock, int *seq)
437 if (!(*seq & 1)) /* Even */
438 *seq = read_seqbegin(lock);
440 read_seqlock_excl(lock);
443 static inline int need_seqretry(seqlock_t *lock, int seq)
445 return !(seq & 1) && read_seqretry(lock, seq);
448 static inline void done_seqretry(seqlock_t *lock, int seq)
451 read_sequnlock_excl(lock);
454 static inline void read_seqlock_excl_bh(seqlock_t *sl)
456 spin_lock_bh(&sl->lock);
459 static inline void read_sequnlock_excl_bh(seqlock_t *sl)
461 spin_unlock_bh(&sl->lock);
464 static inline void read_seqlock_excl_irq(seqlock_t *sl)
466 spin_lock_irq(&sl->lock);
469 static inline void read_sequnlock_excl_irq(seqlock_t *sl)
471 spin_unlock_irq(&sl->lock);
474 static inline unsigned long __read_seqlock_excl_irqsave(seqlock_t *sl)
478 spin_lock_irqsave(&sl->lock, flags);
482 #define read_seqlock_excl_irqsave(lock, flags) \
483 do { flags = __read_seqlock_excl_irqsave(lock); } while (0)
486 read_sequnlock_excl_irqrestore(seqlock_t *sl, unsigned long flags)
488 spin_unlock_irqrestore(&sl->lock, flags);
491 static inline unsigned long
492 read_seqbegin_or_lock_irqsave(seqlock_t *lock, int *seq)
494 unsigned long flags = 0;
496 if (!(*seq & 1)) /* Even */
497 *seq = read_seqbegin(lock);
499 read_seqlock_excl_irqsave(lock, flags);
505 done_seqretry_irqrestore(seqlock_t *lock, int seq, unsigned long flags)
508 read_sequnlock_excl_irqrestore(lock, flags);
510 #endif /* __LINUX_SEQLOCK_H */