| 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
| 2 | /* |
| 3 | * include/linux/irqflags.h |
| 4 | * |
| 5 | * IRQ flags tracing: follow the state of the hardirq and softirq flags and |
| 6 | * provide callbacks for transitions between ON and OFF states. |
| 7 | * |
| 8 | * This file gets included from lowlevel asm headers too, to provide |
| 9 | * wrapped versions of the local_irq_*() APIs, based on the |
| 10 | * raw_local_irq_*() macros from the lowlevel headers. |
| 11 | */ |
| 12 | #ifndef _LINUX_TRACE_IRQFLAGS_H |
| 13 | #define _LINUX_TRACE_IRQFLAGS_H |
| 14 | |
| 15 | #include <linux/irqflags_types.h> |
| 16 | #include <linux/typecheck.h> |
| 17 | #include <linux/cleanup.h> |
| 18 | #include <asm/irqflags.h> |
| 19 | #include <asm/percpu.h> |
| 20 | |
| 21 | struct task_struct; |
| 22 | |
| 23 | /* Currently lockdep_softirqs_on/off is used only by lockdep */ |
| 24 | #ifdef CONFIG_PROVE_LOCKING |
| 25 | extern void lockdep_softirqs_on(unsigned long ip); |
| 26 | extern void lockdep_softirqs_off(unsigned long ip); |
| 27 | extern void lockdep_hardirqs_on_prepare(void); |
| 28 | extern void lockdep_hardirqs_on(unsigned long ip); |
| 29 | extern void lockdep_hardirqs_off(unsigned long ip); |
| 30 | extern void lockdep_cleanup_dead_cpu(unsigned int cpu, |
| 31 | struct task_struct *idle); |
| 32 | #else |
| 33 | static inline void lockdep_softirqs_on(unsigned long ip) { } |
| 34 | static inline void lockdep_softirqs_off(unsigned long ip) { } |
| 35 | static inline void lockdep_hardirqs_on_prepare(void) { } |
| 36 | static inline void lockdep_hardirqs_on(unsigned long ip) { } |
| 37 | static inline void lockdep_hardirqs_off(unsigned long ip) { } |
| 38 | static inline void lockdep_cleanup_dead_cpu(unsigned int cpu, |
| 39 | struct task_struct *idle) {} |
| 40 | #endif |
| 41 | |
| 42 | #ifdef CONFIG_TRACE_IRQFLAGS |
| 43 | |
| 44 | DECLARE_PER_CPU(int, hardirqs_enabled); |
| 45 | DECLARE_PER_CPU(int, hardirq_context); |
| 46 | |
| 47 | extern void trace_hardirqs_on_prepare(void); |
| 48 | extern void trace_hardirqs_off_finish(void); |
| 49 | extern void trace_hardirqs_on(void); |
| 50 | extern void trace_hardirqs_off(void); |
| 51 | |
| 52 | # define lockdep_hardirq_context() (raw_cpu_read(hardirq_context)) |
| 53 | # define lockdep_softirq_context(p) ((p)->softirq_context) |
| 54 | # define lockdep_hardirqs_enabled() (this_cpu_read(hardirqs_enabled)) |
| 55 | # define lockdep_softirqs_enabled(p) ((p)->softirqs_enabled) |
| 56 | # define lockdep_hardirq_enter() \ |
| 57 | do { \ |
| 58 | if (__this_cpu_inc_return(hardirq_context) == 1)\ |
| 59 | current->hardirq_threaded = 0; \ |
| 60 | } while (0) |
| 61 | # define lockdep_hardirq_threaded() \ |
| 62 | do { \ |
| 63 | current->hardirq_threaded = 1; \ |
| 64 | } while (0) |
| 65 | # define lockdep_hardirq_exit() \ |
| 66 | do { \ |
| 67 | __this_cpu_dec(hardirq_context); \ |
| 68 | } while (0) |
| 69 | |
| 70 | # define lockdep_hrtimer_enter(__hrtimer) \ |
| 71 | ({ \ |
| 72 | bool __expires_hardirq = true; \ |
| 73 | \ |
| 74 | if (!__hrtimer->is_hard) { \ |
| 75 | current->irq_config = 1; \ |
| 76 | __expires_hardirq = false; \ |
| 77 | } \ |
| 78 | __expires_hardirq; \ |
| 79 | }) |
| 80 | |
| 81 | # define lockdep_hrtimer_exit(__expires_hardirq) \ |
| 82 | do { \ |
| 83 | if (!__expires_hardirq) \ |
| 84 | current->irq_config = 0; \ |
| 85 | } while (0) |
| 86 | |
| 87 | # define lockdep_posixtimer_enter() \ |
| 88 | do { \ |
| 89 | current->irq_config = 1; \ |
| 90 | } while (0) |
| 91 | |
| 92 | # define lockdep_posixtimer_exit() \ |
| 93 | do { \ |
| 94 | current->irq_config = 0; \ |
| 95 | } while (0) |
| 96 | |
| 97 | # define lockdep_irq_work_enter(_flags) \ |
| 98 | do { \ |
| 99 | if (!((_flags) & IRQ_WORK_HARD_IRQ)) \ |
| 100 | current->irq_config = 1; \ |
| 101 | } while (0) |
| 102 | # define lockdep_irq_work_exit(_flags) \ |
| 103 | do { \ |
| 104 | if (!((_flags) & IRQ_WORK_HARD_IRQ)) \ |
| 105 | current->irq_config = 0; \ |
| 106 | } while (0) |
| 107 | |
| 108 | #else |
| 109 | # define trace_hardirqs_on_prepare() do { } while (0) |
| 110 | # define trace_hardirqs_off_finish() do { } while (0) |
| 111 | # define trace_hardirqs_on() do { } while (0) |
| 112 | # define trace_hardirqs_off() do { } while (0) |
| 113 | # define lockdep_hardirq_context() 0 |
| 114 | # define lockdep_softirq_context(p) 0 |
| 115 | # define lockdep_hardirqs_enabled() 0 |
| 116 | # define lockdep_softirqs_enabled(p) 0 |
| 117 | # define lockdep_hardirq_enter() do { } while (0) |
| 118 | # define lockdep_hardirq_threaded() do { } while (0) |
| 119 | # define lockdep_hardirq_exit() do { } while (0) |
| 120 | # define lockdep_softirq_enter() do { } while (0) |
| 121 | # define lockdep_softirq_exit() do { } while (0) |
| 122 | # define lockdep_hrtimer_enter(__hrtimer) false |
| 123 | # define lockdep_hrtimer_exit(__context) do { (void)(__context); } while (0) |
| 124 | # define lockdep_posixtimer_enter() do { } while (0) |
| 125 | # define lockdep_posixtimer_exit() do { } while (0) |
| 126 | # define lockdep_irq_work_enter(__work) do { } while (0) |
| 127 | # define lockdep_irq_work_exit(__work) do { } while (0) |
| 128 | #endif |
| 129 | |
| 130 | #if defined(CONFIG_TRACE_IRQFLAGS) && !defined(CONFIG_PREEMPT_RT) |
| 131 | # define lockdep_softirq_enter() \ |
| 132 | do { \ |
| 133 | current->softirq_context++; \ |
| 134 | } while (0) |
| 135 | # define lockdep_softirq_exit() \ |
| 136 | do { \ |
| 137 | current->softirq_context--; \ |
| 138 | } while (0) |
| 139 | |
| 140 | #else |
| 141 | # define lockdep_softirq_enter() do { } while (0) |
| 142 | # define lockdep_softirq_exit() do { } while (0) |
| 143 | #endif |
| 144 | |
| 145 | #if defined(CONFIG_IRQSOFF_TRACER) || \ |
| 146 | defined(CONFIG_PREEMPT_TRACER) |
| 147 | extern void stop_critical_timings(void); |
| 148 | extern void start_critical_timings(void); |
| 149 | #else |
| 150 | # define stop_critical_timings() do { } while (0) |
| 151 | # define start_critical_timings() do { } while (0) |
| 152 | #endif |
| 153 | |
| 154 | #ifdef CONFIG_DEBUG_IRQFLAGS |
| 155 | extern void warn_bogus_irq_restore(void); |
| 156 | #define raw_check_bogus_irq_restore() \ |
| 157 | do { \ |
| 158 | if (unlikely(!arch_irqs_disabled())) \ |
| 159 | warn_bogus_irq_restore(); \ |
| 160 | } while (0) |
| 161 | #else |
| 162 | #define raw_check_bogus_irq_restore() do { } while (0) |
| 163 | #endif |
| 164 | |
| 165 | /* |
| 166 | * Wrap the arch provided IRQ routines to provide appropriate checks. |
| 167 | */ |
| 168 | #define raw_local_irq_disable() arch_local_irq_disable() |
| 169 | #define raw_local_irq_enable() arch_local_irq_enable() |
| 170 | #define raw_local_irq_save(flags) \ |
| 171 | do { \ |
| 172 | typecheck(unsigned long, flags); \ |
| 173 | flags = arch_local_irq_save(); \ |
| 174 | } while (0) |
| 175 | #define raw_local_irq_restore(flags) \ |
| 176 | do { \ |
| 177 | typecheck(unsigned long, flags); \ |
| 178 | raw_check_bogus_irq_restore(); \ |
| 179 | arch_local_irq_restore(flags); \ |
| 180 | } while (0) |
| 181 | #define raw_local_save_flags(flags) \ |
| 182 | do { \ |
| 183 | typecheck(unsigned long, flags); \ |
| 184 | flags = arch_local_save_flags(); \ |
| 185 | } while (0) |
| 186 | #define raw_irqs_disabled_flags(flags) \ |
| 187 | ({ \ |
| 188 | typecheck(unsigned long, flags); \ |
| 189 | arch_irqs_disabled_flags(flags); \ |
| 190 | }) |
| 191 | #define raw_irqs_disabled() (arch_irqs_disabled()) |
| 192 | #define raw_safe_halt() arch_safe_halt() |
| 193 | |
| 194 | /* |
| 195 | * The local_irq_*() APIs are equal to the raw_local_irq*() |
| 196 | * if !TRACE_IRQFLAGS. |
| 197 | */ |
| 198 | #ifdef CONFIG_TRACE_IRQFLAGS |
| 199 | |
| 200 | #define local_irq_enable() \ |
| 201 | do { \ |
| 202 | trace_hardirqs_on(); \ |
| 203 | raw_local_irq_enable(); \ |
| 204 | } while (0) |
| 205 | |
| 206 | #define local_irq_disable() \ |
| 207 | do { \ |
| 208 | bool was_disabled = raw_irqs_disabled();\ |
| 209 | raw_local_irq_disable(); \ |
| 210 | if (!was_disabled) \ |
| 211 | trace_hardirqs_off(); \ |
| 212 | } while (0) |
| 213 | |
| 214 | #define local_irq_save(flags) \ |
| 215 | do { \ |
| 216 | raw_local_irq_save(flags); \ |
| 217 | if (!raw_irqs_disabled_flags(flags)) \ |
| 218 | trace_hardirqs_off(); \ |
| 219 | } while (0) |
| 220 | |
| 221 | #define local_irq_restore(flags) \ |
| 222 | do { \ |
| 223 | if (!raw_irqs_disabled_flags(flags)) \ |
| 224 | trace_hardirqs_on(); \ |
| 225 | raw_local_irq_restore(flags); \ |
| 226 | } while (0) |
| 227 | |
| 228 | #define safe_halt() \ |
| 229 | do { \ |
| 230 | trace_hardirqs_on(); \ |
| 231 | raw_safe_halt(); \ |
| 232 | } while (0) |
| 233 | |
| 234 | |
| 235 | #else /* !CONFIG_TRACE_IRQFLAGS */ |
| 236 | |
| 237 | #define local_irq_enable() do { raw_local_irq_enable(); } while (0) |
| 238 | #define local_irq_disable() do { raw_local_irq_disable(); } while (0) |
| 239 | #define local_irq_save(flags) do { raw_local_irq_save(flags); } while (0) |
| 240 | #define local_irq_restore(flags) do { raw_local_irq_restore(flags); } while (0) |
| 241 | #define safe_halt() do { raw_safe_halt(); } while (0) |
| 242 | |
| 243 | #endif /* CONFIG_TRACE_IRQFLAGS */ |
| 244 | |
| 245 | #define local_save_flags(flags) raw_local_save_flags(flags) |
| 246 | |
| 247 | /* |
| 248 | * Some architectures don't define arch_irqs_disabled(), so even if either |
| 249 | * definition would be fine we need to use different ones for the time being |
| 250 | * to avoid build issues. |
| 251 | */ |
| 252 | #ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT |
| 253 | #define irqs_disabled() \ |
| 254 | ({ \ |
| 255 | unsigned long _flags; \ |
| 256 | raw_local_save_flags(_flags); \ |
| 257 | raw_irqs_disabled_flags(_flags); \ |
| 258 | }) |
| 259 | #else /* !CONFIG_TRACE_IRQFLAGS_SUPPORT */ |
| 260 | #define irqs_disabled() raw_irqs_disabled() |
| 261 | #endif /* CONFIG_TRACE_IRQFLAGS_SUPPORT */ |
| 262 | |
| 263 | #define irqs_disabled_flags(flags) raw_irqs_disabled_flags(flags) |
| 264 | |
| 265 | DEFINE_LOCK_GUARD_0(irq, local_irq_disable(), local_irq_enable()) |
| 266 | DEFINE_LOCK_GUARD_0(irqsave, |
| 267 | local_irq_save(_T->flags), |
| 268 | local_irq_restore(_T->flags), |
| 269 | unsigned long flags) |
| 270 | |
| 271 | #endif |
| 272 | |