| 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
| 2 | #ifndef __LINUX_SEQLOCK_TYPES_H |
| 3 | #define __LINUX_SEQLOCK_TYPES_H |
| 4 | |
| 5 | #include <linux/lockdep_types.h> |
| 6 | #include <linux/mutex_types.h> |
| 7 | #include <linux/spinlock_types.h> |
| 8 | |
| 9 | /* |
| 10 | * Sequence counters (seqcount_t) |
| 11 | * |
| 12 | * This is the raw counting mechanism, without any writer protection. |
| 13 | * |
| 14 | * Write side critical sections must be serialized and non-preemptible. |
| 15 | * |
| 16 | * If readers can be invoked from hardirq or softirq contexts, |
| 17 | * interrupts or bottom halves must also be respectively disabled before |
| 18 | * entering the write section. |
| 19 | * |
| 20 | * This mechanism can't be used if the protected data contains pointers, |
| 21 | * as the writer can invalidate a pointer that a reader is following. |
| 22 | * |
| 23 | * If the write serialization mechanism is one of the common kernel |
| 24 | * locking primitives, use a sequence counter with associated lock |
| 25 | * (seqcount_LOCKNAME_t) instead. |
| 26 | * |
| 27 | * If it's desired to automatically handle the sequence counter writer |
| 28 | * serialization and non-preemptibility requirements, use a sequential |
| 29 | * lock (seqlock_t) instead. |
| 30 | * |
| 31 | * See Documentation/locking/seqlock.rst |
| 32 | */ |
| 33 | typedef struct seqcount { |
| 34 | unsigned sequence; |
| 35 | #ifdef CONFIG_DEBUG_LOCK_ALLOC |
| 36 | struct lockdep_map dep_map; |
| 37 | #endif |
| 38 | } seqcount_t; |
| 39 | |
| 40 | /* |
| 41 | * For PREEMPT_RT, seqcount_LOCKNAME_t write side critical sections cannot |
| 42 | * disable preemption. It can lead to higher latencies, and the write side |
| 43 | * sections will not be able to acquire locks which become sleeping locks |
| 44 | * (e.g. spinlock_t). |
| 45 | * |
| 46 | * To remain preemptible while avoiding a possible livelock caused by the |
| 47 | * reader preempting the writer, use a different technique: let the reader |
| 48 | * detect if a seqcount_LOCKNAME_t writer is in progress. If that is the |
| 49 | * case, acquire then release the associated LOCKNAME writer serialization |
| 50 | * lock. This will allow any possibly-preempted writer to make progress |
| 51 | * until the end of its writer serialization lock critical section. |
| 52 | * |
| 53 | * This lock-unlock technique must be implemented for all of PREEMPT_RT |
| 54 | * sleeping locks. See Documentation/locking/locktypes.rst |
| 55 | */ |
| 56 | #if defined(CONFIG_LOCKDEP) || defined(CONFIG_PREEMPT_RT) |
| 57 | #define __SEQ_LOCK(expr) expr |
| 58 | #else |
| 59 | #define __SEQ_LOCK(expr) |
| 60 | #endif |
| 61 | |
| 62 | #define SEQCOUNT_LOCKNAME(lockname, locktype, preemptible, lockbase) \ |
| 63 | typedef struct seqcount_##lockname { \ |
| 64 | seqcount_t seqcount; \ |
| 65 | __SEQ_LOCK(locktype *lock); \ |
| 66 | } seqcount_##lockname##_t; |
| 67 | |
| 68 | SEQCOUNT_LOCKNAME(raw_spinlock, raw_spinlock_t, false, raw_spin) |
| 69 | SEQCOUNT_LOCKNAME(spinlock, spinlock_t, __SEQ_RT, spin) |
| 70 | SEQCOUNT_LOCKNAME(rwlock, rwlock_t, __SEQ_RT, read) |
| 71 | SEQCOUNT_LOCKNAME(mutex, struct mutex, true, mutex) |
| 72 | #undef SEQCOUNT_LOCKNAME |
| 73 | |
| 74 | /* |
| 75 | * Sequential locks (seqlock_t) |
| 76 | * |
| 77 | * Sequence counters with an embedded spinlock for writer serialization |
| 78 | * and non-preemptibility. |
| 79 | * |
| 80 | * For more info, see: |
| 81 | * - Comments on top of seqcount_t |
| 82 | * - Documentation/locking/seqlock.rst |
| 83 | */ |
| 84 | typedef struct { |
| 85 | /* |
| 86 | * Make sure that readers don't starve writers on PREEMPT_RT: use |
| 87 | * seqcount_spinlock_t instead of seqcount_t. Check __SEQ_LOCK(). |
| 88 | */ |
| 89 | seqcount_spinlock_t seqcount; |
| 90 | spinlock_t lock; |
| 91 | } seqlock_t; |
| 92 | |
| 93 | #endif /* __LINUX_SEQLOCK_TYPES_H */ |
| 94 | |