| 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
| 2 | #ifndef _LINUX_SCHED_MM_H |
| 3 | #define _LINUX_SCHED_MM_H |
| 4 | |
| 5 | #include <linux/kernel.h> |
| 6 | #include <linux/atomic.h> |
| 7 | #include <linux/sched.h> |
| 8 | #include <linux/mm_types.h> |
| 9 | #include <linux/gfp.h> |
| 10 | #include <linux/sync_core.h> |
| 11 | #include <linux/sched/coredump.h> |
| 12 | |
| 13 | /* |
| 14 | * Routines for handling mm_structs |
| 15 | */ |
| 16 | extern struct mm_struct *mm_alloc(void); |
| 17 | |
| 18 | /** |
| 19 | * mmgrab() - Pin a &struct mm_struct. |
| 20 | * @mm: The &struct mm_struct to pin. |
| 21 | * |
| 22 | * Make sure that @mm will not get freed even after the owning task |
| 23 | * exits. This doesn't guarantee that the associated address space |
| 24 | * will still exist later on and mmget_not_zero() has to be used before |
| 25 | * accessing it. |
| 26 | * |
| 27 | * This is a preferred way to pin @mm for a longer/unbounded amount |
| 28 | * of time. |
| 29 | * |
| 30 | * Use mmdrop() to release the reference acquired by mmgrab(). |
| 31 | * |
| 32 | * See also <Documentation/mm/active_mm.rst> for an in-depth explanation |
| 33 | * of &mm_struct.mm_count vs &mm_struct.mm_users. |
| 34 | */ |
| 35 | static inline void mmgrab(struct mm_struct *mm) |
| 36 | { |
| 37 | atomic_inc(v: &mm->mm_count); |
| 38 | } |
| 39 | |
| 40 | static inline void smp_mb__after_mmgrab(void) |
| 41 | { |
| 42 | smp_mb__after_atomic(); |
| 43 | } |
| 44 | |
| 45 | extern void __mmdrop(struct mm_struct *mm); |
| 46 | |
| 47 | static inline void mmdrop(struct mm_struct *mm) |
| 48 | { |
| 49 | /* |
| 50 | * The implicit full barrier implied by atomic_dec_and_test() is |
| 51 | * required by the membarrier system call before returning to |
| 52 | * user-space, after storing to rq->curr. |
| 53 | */ |
| 54 | if (unlikely(atomic_dec_and_test(&mm->mm_count))) |
| 55 | __mmdrop(mm); |
| 56 | } |
| 57 | |
| 58 | #ifdef CONFIG_PREEMPT_RT |
| 59 | /* |
| 60 | * RCU callback for delayed mm drop. Not strictly RCU, but call_rcu() is |
| 61 | * by far the least expensive way to do that. |
| 62 | */ |
| 63 | static inline void __mmdrop_delayed(struct rcu_head *rhp) |
| 64 | { |
| 65 | struct mm_struct *mm = container_of(rhp, struct mm_struct, delayed_drop); |
| 66 | |
| 67 | __mmdrop(mm); |
| 68 | } |
| 69 | |
| 70 | /* |
| 71 | * Invoked from finish_task_switch(). Delegates the heavy lifting on RT |
| 72 | * kernels via RCU. |
| 73 | */ |
| 74 | static inline void mmdrop_sched(struct mm_struct *mm) |
| 75 | { |
| 76 | /* Provides a full memory barrier. See mmdrop() */ |
| 77 | if (atomic_dec_and_test(&mm->mm_count)) |
| 78 | call_rcu(&mm->delayed_drop, __mmdrop_delayed); |
| 79 | } |
| 80 | #else |
| 81 | static inline void mmdrop_sched(struct mm_struct *mm) |
| 82 | { |
| 83 | mmdrop(mm); |
| 84 | } |
| 85 | #endif |
| 86 | |
| 87 | /* Helpers for lazy TLB mm refcounting */ |
| 88 | static inline void mmgrab_lazy_tlb(struct mm_struct *mm) |
| 89 | { |
| 90 | if (IS_ENABLED(CONFIG_MMU_LAZY_TLB_REFCOUNT)) |
| 91 | mmgrab(mm); |
| 92 | } |
| 93 | |
| 94 | static inline void mmdrop_lazy_tlb(struct mm_struct *mm) |
| 95 | { |
| 96 | if (IS_ENABLED(CONFIG_MMU_LAZY_TLB_REFCOUNT)) { |
| 97 | mmdrop(mm); |
| 98 | } else { |
| 99 | /* |
| 100 | * mmdrop_lazy_tlb must provide a full memory barrier, see the |
| 101 | * membarrier comment finish_task_switch which relies on this. |
| 102 | */ |
| 103 | smp_mb(); |
| 104 | } |
| 105 | } |
| 106 | |
| 107 | static inline void mmdrop_lazy_tlb_sched(struct mm_struct *mm) |
| 108 | { |
| 109 | if (IS_ENABLED(CONFIG_MMU_LAZY_TLB_REFCOUNT)) |
| 110 | mmdrop_sched(mm); |
| 111 | else |
| 112 | smp_mb(); /* see mmdrop_lazy_tlb() above */ |
| 113 | } |
| 114 | |
| 115 | /** |
| 116 | * mmget() - Pin the address space associated with a &struct mm_struct. |
| 117 | * @mm: The address space to pin. |
| 118 | * |
| 119 | * Make sure that the address space of the given &struct mm_struct doesn't |
| 120 | * go away. This does not protect against parts of the address space being |
| 121 | * modified or freed, however. |
| 122 | * |
| 123 | * Never use this function to pin this address space for an |
| 124 | * unbounded/indefinite amount of time. |
| 125 | * |
| 126 | * Use mmput() to release the reference acquired by mmget(). |
| 127 | * |
| 128 | * See also <Documentation/mm/active_mm.rst> for an in-depth explanation |
| 129 | * of &mm_struct.mm_count vs &mm_struct.mm_users. |
| 130 | */ |
| 131 | static inline void mmget(struct mm_struct *mm) |
| 132 | { |
| 133 | atomic_inc(v: &mm->mm_users); |
| 134 | } |
| 135 | |
| 136 | static inline bool mmget_not_zero(struct mm_struct *mm) |
| 137 | { |
| 138 | return atomic_inc_not_zero(v: &mm->mm_users); |
| 139 | } |
| 140 | |
| 141 | /* mmput gets rid of the mappings and all user-space */ |
| 142 | extern void mmput(struct mm_struct *); |
| 143 | #if defined(CONFIG_MMU) || defined(CONFIG_FUTEX_PRIVATE_HASH) |
| 144 | /* same as above but performs the slow path from the async context. Can |
| 145 | * be called from the atomic context as well |
| 146 | */ |
| 147 | void mmput_async(struct mm_struct *); |
| 148 | #endif |
| 149 | |
| 150 | /* Grab a reference to a task's mm, if it is not already going away */ |
| 151 | extern struct mm_struct *get_task_mm(struct task_struct *task); |
| 152 | /* |
| 153 | * Grab a reference to a task's mm, if it is not already going away |
| 154 | * and ptrace_may_access with the mode parameter passed to it |
| 155 | * succeeds. |
| 156 | */ |
| 157 | extern struct mm_struct *mm_access(struct task_struct *task, unsigned int mode); |
| 158 | /* Remove the current tasks stale references to the old mm_struct on exit() */ |
| 159 | extern void exit_mm_release(struct task_struct *, struct mm_struct *); |
| 160 | /* Remove the current tasks stale references to the old mm_struct on exec() */ |
| 161 | extern void exec_mm_release(struct task_struct *, struct mm_struct *); |
| 162 | |
| 163 | #ifdef CONFIG_MEMCG |
| 164 | extern void mm_update_next_owner(struct mm_struct *mm); |
| 165 | #else |
| 166 | static inline void mm_update_next_owner(struct mm_struct *mm) |
| 167 | { |
| 168 | } |
| 169 | #endif /* CONFIG_MEMCG */ |
| 170 | |
| 171 | #ifdef CONFIG_MMU |
| 172 | #ifndef arch_get_mmap_end |
| 173 | #define arch_get_mmap_end(addr, len, flags) (TASK_SIZE) |
| 174 | #endif |
| 175 | |
| 176 | #ifndef arch_get_mmap_base |
| 177 | #define arch_get_mmap_base(addr, base) (base) |
| 178 | #endif |
| 179 | |
| 180 | extern void arch_pick_mmap_layout(struct mm_struct *mm, |
| 181 | const struct rlimit *rlim_stack); |
| 182 | |
| 183 | unsigned long |
| 184 | arch_get_unmapped_area(struct file *filp, unsigned long addr, |
| 185 | unsigned long len, unsigned long pgoff, |
| 186 | unsigned long flags, vm_flags_t vm_flags); |
| 187 | unsigned long |
| 188 | arch_get_unmapped_area_topdown(struct file *filp, unsigned long addr, |
| 189 | unsigned long len, unsigned long pgoff, |
| 190 | unsigned long flags, vm_flags_t); |
| 191 | |
| 192 | unsigned long mm_get_unmapped_area(struct file *filp, unsigned long addr, |
| 193 | unsigned long len, unsigned long pgoff, |
| 194 | unsigned long flags); |
| 195 | |
| 196 | unsigned long mm_get_unmapped_area_vmflags(struct file *filp, |
| 197 | unsigned long addr, |
| 198 | unsigned long len, |
| 199 | unsigned long pgoff, |
| 200 | unsigned long flags, |
| 201 | vm_flags_t vm_flags); |
| 202 | |
| 203 | unsigned long |
| 204 | generic_get_unmapped_area(struct file *filp, unsigned long addr, |
| 205 | unsigned long len, unsigned long pgoff, |
| 206 | unsigned long flags, vm_flags_t vm_flags); |
| 207 | unsigned long |
| 208 | generic_get_unmapped_area_topdown(struct file *filp, unsigned long addr, |
| 209 | unsigned long len, unsigned long pgoff, |
| 210 | unsigned long flags, vm_flags_t vm_flags); |
| 211 | #else |
| 212 | static inline void arch_pick_mmap_layout(struct mm_struct *mm, |
| 213 | const struct rlimit *rlim_stack) {} |
| 214 | #endif |
| 215 | |
| 216 | static inline bool in_vfork(struct task_struct *tsk) |
| 217 | { |
| 218 | bool ret; |
| 219 | |
| 220 | /* |
| 221 | * need RCU to access ->real_parent if CLONE_VM was used along with |
| 222 | * CLONE_PARENT. |
| 223 | * |
| 224 | * We check real_parent->mm == tsk->mm because CLONE_VFORK does not |
| 225 | * imply CLONE_VM |
| 226 | * |
| 227 | * CLONE_VFORK can be used with CLONE_PARENT/CLONE_THREAD and thus |
| 228 | * ->real_parent is not necessarily the task doing vfork(), so in |
| 229 | * theory we can't rely on task_lock() if we want to dereference it. |
| 230 | * |
| 231 | * And in this case we can't trust the real_parent->mm == tsk->mm |
| 232 | * check, it can be false negative. But we do not care, if init or |
| 233 | * another oom-unkillable task does this it should blame itself. |
| 234 | */ |
| 235 | rcu_read_lock(); |
| 236 | ret = tsk->vfork_done && |
| 237 | rcu_dereference(tsk->real_parent)->mm == tsk->mm; |
| 238 | rcu_read_unlock(); |
| 239 | |
| 240 | return ret; |
| 241 | } |
| 242 | |
| 243 | /* |
| 244 | * Applies per-task gfp context to the given allocation flags. |
| 245 | * PF_MEMALLOC_NOIO implies GFP_NOIO |
| 246 | * PF_MEMALLOC_NOFS implies GFP_NOFS |
| 247 | * PF_MEMALLOC_PIN implies !GFP_MOVABLE |
| 248 | */ |
| 249 | static inline gfp_t current_gfp_context(gfp_t flags) |
| 250 | { |
| 251 | unsigned int pflags = READ_ONCE(current->flags); |
| 252 | |
| 253 | if (unlikely(pflags & (PF_MEMALLOC_NOIO | PF_MEMALLOC_NOFS | PF_MEMALLOC_PIN))) { |
| 254 | /* |
| 255 | * NOIO implies both NOIO and NOFS and it is a weaker context |
| 256 | * so always make sure it makes precedence |
| 257 | */ |
| 258 | if (pflags & PF_MEMALLOC_NOIO) |
| 259 | flags &= ~(__GFP_IO | __GFP_FS); |
| 260 | else if (pflags & PF_MEMALLOC_NOFS) |
| 261 | flags &= ~__GFP_FS; |
| 262 | |
| 263 | if (pflags & PF_MEMALLOC_PIN) |
| 264 | flags &= ~__GFP_MOVABLE; |
| 265 | } |
| 266 | return flags; |
| 267 | } |
| 268 | |
| 269 | #ifdef CONFIG_LOCKDEP |
| 270 | extern void __fs_reclaim_acquire(unsigned long ip); |
| 271 | extern void __fs_reclaim_release(unsigned long ip); |
| 272 | extern void fs_reclaim_acquire(gfp_t gfp_mask); |
| 273 | extern void fs_reclaim_release(gfp_t gfp_mask); |
| 274 | #else |
| 275 | static inline void __fs_reclaim_acquire(unsigned long ip) { } |
| 276 | static inline void __fs_reclaim_release(unsigned long ip) { } |
| 277 | static inline void fs_reclaim_acquire(gfp_t gfp_mask) { } |
| 278 | static inline void fs_reclaim_release(gfp_t gfp_mask) { } |
| 279 | #endif |
| 280 | |
| 281 | /* Any memory-allocation retry loop should use |
| 282 | * memalloc_retry_wait(), and pass the flags for the most |
| 283 | * constrained allocation attempt that might have failed. |
| 284 | * This provides useful documentation of where loops are, |
| 285 | * and a central place to fine tune the waiting as the MM |
| 286 | * implementation changes. |
| 287 | */ |
| 288 | static inline void memalloc_retry_wait(gfp_t gfp_flags) |
| 289 | { |
| 290 | /* We use io_schedule_timeout because waiting for memory |
| 291 | * typically included waiting for dirty pages to be |
| 292 | * written out, which requires IO. |
| 293 | */ |
| 294 | __set_current_state(TASK_UNINTERRUPTIBLE); |
| 295 | gfp_flags = current_gfp_context(flags: gfp_flags); |
| 296 | if (gfpflags_allow_blocking(gfp_flags) && |
| 297 | !(gfp_flags & __GFP_NORETRY)) |
| 298 | /* Probably waited already, no need for much more */ |
| 299 | io_schedule_timeout(timeout: 1); |
| 300 | else |
| 301 | /* Probably didn't wait, and has now released a lock, |
| 302 | * so now is a good time to wait |
| 303 | */ |
| 304 | io_schedule_timeout(HZ/50); |
| 305 | } |
| 306 | |
| 307 | /** |
| 308 | * might_alloc - Mark possible allocation sites |
| 309 | * @gfp_mask: gfp_t flags that would be used to allocate |
| 310 | * |
| 311 | * Similar to might_sleep() and other annotations, this can be used in functions |
| 312 | * that might allocate, but often don't. Compiles to nothing without |
| 313 | * CONFIG_LOCKDEP. Includes a conditional might_sleep() if @gfp allows blocking. |
| 314 | */ |
| 315 | static inline void might_alloc(gfp_t gfp_mask) |
| 316 | { |
| 317 | fs_reclaim_acquire(gfp_mask); |
| 318 | fs_reclaim_release(gfp_mask); |
| 319 | |
| 320 | if (current->flags & PF_MEMALLOC) |
| 321 | return; |
| 322 | |
| 323 | might_sleep_if(gfpflags_allow_blocking(gfp_mask)); |
| 324 | } |
| 325 | |
| 326 | /** |
| 327 | * memalloc_flags_save - Add a PF_* flag to current->flags, save old value |
| 328 | * @flags: Flags to add. |
| 329 | * |
| 330 | * This allows PF_* flags to be conveniently added, irrespective of current |
| 331 | * value, and then the old version restored with memalloc_flags_restore(). |
| 332 | */ |
| 333 | static inline unsigned memalloc_flags_save(unsigned flags) |
| 334 | { |
| 335 | unsigned oldflags = ~current->flags & flags; |
| 336 | current->flags |= flags; |
| 337 | return oldflags; |
| 338 | } |
| 339 | |
| 340 | static inline void memalloc_flags_restore(unsigned flags) |
| 341 | { |
| 342 | current->flags &= ~flags; |
| 343 | } |
| 344 | |
| 345 | /** |
| 346 | * memalloc_noio_save - Marks implicit GFP_NOIO allocation scope. |
| 347 | * |
| 348 | * This functions marks the beginning of the GFP_NOIO allocation scope. |
| 349 | * All further allocations will implicitly drop __GFP_IO flag and so |
| 350 | * they are safe for the IO critical section from the allocation recursion |
| 351 | * point of view. Use memalloc_noio_restore to end the scope with flags |
| 352 | * returned by this function. |
| 353 | * |
| 354 | * Context: This function is safe to be used from any context. |
| 355 | * Return: The saved flags to be passed to memalloc_noio_restore. |
| 356 | */ |
| 357 | static inline unsigned int memalloc_noio_save(void) |
| 358 | { |
| 359 | return memalloc_flags_save(PF_MEMALLOC_NOIO); |
| 360 | } |
| 361 | |
| 362 | /** |
| 363 | * memalloc_noio_restore - Ends the implicit GFP_NOIO scope. |
| 364 | * @flags: Flags to restore. |
| 365 | * |
| 366 | * Ends the implicit GFP_NOIO scope started by memalloc_noio_save function. |
| 367 | * Always make sure that the given flags is the return value from the |
| 368 | * pairing memalloc_noio_save call. |
| 369 | */ |
| 370 | static inline void memalloc_noio_restore(unsigned int flags) |
| 371 | { |
| 372 | memalloc_flags_restore(flags); |
| 373 | } |
| 374 | |
| 375 | /** |
| 376 | * memalloc_nofs_save - Marks implicit GFP_NOFS allocation scope. |
| 377 | * |
| 378 | * This functions marks the beginning of the GFP_NOFS allocation scope. |
| 379 | * All further allocations will implicitly drop __GFP_FS flag and so |
| 380 | * they are safe for the FS critical section from the allocation recursion |
| 381 | * point of view. Use memalloc_nofs_restore to end the scope with flags |
| 382 | * returned by this function. |
| 383 | * |
| 384 | * Context: This function is safe to be used from any context. |
| 385 | * Return: The saved flags to be passed to memalloc_nofs_restore. |
| 386 | */ |
| 387 | static inline unsigned int memalloc_nofs_save(void) |
| 388 | { |
| 389 | return memalloc_flags_save(PF_MEMALLOC_NOFS); |
| 390 | } |
| 391 | |
| 392 | /** |
| 393 | * memalloc_nofs_restore - Ends the implicit GFP_NOFS scope. |
| 394 | * @flags: Flags to restore. |
| 395 | * |
| 396 | * Ends the implicit GFP_NOFS scope started by memalloc_nofs_save function. |
| 397 | * Always make sure that the given flags is the return value from the |
| 398 | * pairing memalloc_nofs_save call. |
| 399 | */ |
| 400 | static inline void memalloc_nofs_restore(unsigned int flags) |
| 401 | { |
| 402 | memalloc_flags_restore(flags); |
| 403 | } |
| 404 | |
| 405 | /** |
| 406 | * memalloc_noreclaim_save - Marks implicit __GFP_MEMALLOC scope. |
| 407 | * |
| 408 | * This function marks the beginning of the __GFP_MEMALLOC allocation scope. |
| 409 | * All further allocations will implicitly add the __GFP_MEMALLOC flag, which |
| 410 | * prevents entering reclaim and allows access to all memory reserves. This |
| 411 | * should only be used when the caller guarantees the allocation will allow more |
| 412 | * memory to be freed very shortly, i.e. it needs to allocate some memory in |
| 413 | * the process of freeing memory, and cannot reclaim due to potential recursion. |
| 414 | * |
| 415 | * Users of this scope have to be extremely careful to not deplete the reserves |
| 416 | * completely and implement a throttling mechanism which controls the |
| 417 | * consumption of the reserve based on the amount of freed memory. Usage of a |
| 418 | * pre-allocated pool (e.g. mempool) should be always considered before using |
| 419 | * this scope. |
| 420 | * |
| 421 | * Individual allocations under the scope can opt out using __GFP_NOMEMALLOC |
| 422 | * |
| 423 | * Context: This function should not be used in an interrupt context as that one |
| 424 | * does not give PF_MEMALLOC access to reserves. |
| 425 | * See __gfp_pfmemalloc_flags(). |
| 426 | * Return: The saved flags to be passed to memalloc_noreclaim_restore. |
| 427 | */ |
| 428 | static inline unsigned int memalloc_noreclaim_save(void) |
| 429 | { |
| 430 | return memalloc_flags_save(PF_MEMALLOC); |
| 431 | } |
| 432 | |
| 433 | /** |
| 434 | * memalloc_noreclaim_restore - Ends the implicit __GFP_MEMALLOC scope. |
| 435 | * @flags: Flags to restore. |
| 436 | * |
| 437 | * Ends the implicit __GFP_MEMALLOC scope started by memalloc_noreclaim_save |
| 438 | * function. Always make sure that the given flags is the return value from the |
| 439 | * pairing memalloc_noreclaim_save call. |
| 440 | */ |
| 441 | static inline void memalloc_noreclaim_restore(unsigned int flags) |
| 442 | { |
| 443 | memalloc_flags_restore(flags); |
| 444 | } |
| 445 | |
| 446 | /** |
| 447 | * memalloc_pin_save - Marks implicit ~__GFP_MOVABLE scope. |
| 448 | * |
| 449 | * This function marks the beginning of the ~__GFP_MOVABLE allocation scope. |
| 450 | * All further allocations will implicitly remove the __GFP_MOVABLE flag, which |
| 451 | * will constraint the allocations to zones that allow long term pinning, i.e. |
| 452 | * not ZONE_MOVABLE zones. |
| 453 | * |
| 454 | * Return: The saved flags to be passed to memalloc_pin_restore. |
| 455 | */ |
| 456 | static inline unsigned int memalloc_pin_save(void) |
| 457 | { |
| 458 | return memalloc_flags_save(PF_MEMALLOC_PIN); |
| 459 | } |
| 460 | |
| 461 | /** |
| 462 | * memalloc_pin_restore - Ends the implicit ~__GFP_MOVABLE scope. |
| 463 | * @flags: Flags to restore. |
| 464 | * |
| 465 | * Ends the implicit ~__GFP_MOVABLE scope started by memalloc_pin_save function. |
| 466 | * Always make sure that the given flags is the return value from the pairing |
| 467 | * memalloc_pin_save call. |
| 468 | */ |
| 469 | static inline void memalloc_pin_restore(unsigned int flags) |
| 470 | { |
| 471 | memalloc_flags_restore(flags); |
| 472 | } |
| 473 | |
| 474 | #ifdef CONFIG_MEMCG |
| 475 | DECLARE_PER_CPU(struct mem_cgroup *, int_active_memcg); |
| 476 | /** |
| 477 | * set_active_memcg - Starts the remote memcg charging scope. |
| 478 | * @memcg: memcg to charge. |
| 479 | * |
| 480 | * This function marks the beginning of the remote memcg charging scope. All the |
| 481 | * __GFP_ACCOUNT allocations till the end of the scope will be charged to the |
| 482 | * given memcg. |
| 483 | * |
| 484 | * Please, make sure that caller has a reference to the passed memcg structure, |
| 485 | * so its lifetime is guaranteed to exceed the scope between two |
| 486 | * set_active_memcg() calls. |
| 487 | * |
| 488 | * NOTE: This function can nest. Users must save the return value and |
| 489 | * reset the previous value after their own charging scope is over. |
| 490 | */ |
| 491 | static inline struct mem_cgroup * |
| 492 | set_active_memcg(struct mem_cgroup *memcg) |
| 493 | { |
| 494 | struct mem_cgroup *old; |
| 495 | |
| 496 | if (!in_task()) { |
| 497 | old = this_cpu_read(int_active_memcg); |
| 498 | this_cpu_write(int_active_memcg, memcg); |
| 499 | } else { |
| 500 | old = current->active_memcg; |
| 501 | current->active_memcg = memcg; |
| 502 | } |
| 503 | |
| 504 | return old; |
| 505 | } |
| 506 | #else |
| 507 | static inline struct mem_cgroup * |
| 508 | set_active_memcg(struct mem_cgroup *memcg) |
| 509 | { |
| 510 | return NULL; |
| 511 | } |
| 512 | #endif |
| 513 | |
| 514 | #ifdef CONFIG_MEMBARRIER |
| 515 | enum { |
| 516 | MEMBARRIER_STATE_PRIVATE_EXPEDITED_READY = (1U << 0), |
| 517 | MEMBARRIER_STATE_PRIVATE_EXPEDITED = (1U << 1), |
| 518 | MEMBARRIER_STATE_GLOBAL_EXPEDITED_READY = (1U << 2), |
| 519 | MEMBARRIER_STATE_GLOBAL_EXPEDITED = (1U << 3), |
| 520 | MEMBARRIER_STATE_PRIVATE_EXPEDITED_SYNC_CORE_READY = (1U << 4), |
| 521 | MEMBARRIER_STATE_PRIVATE_EXPEDITED_SYNC_CORE = (1U << 5), |
| 522 | MEMBARRIER_STATE_PRIVATE_EXPEDITED_RSEQ_READY = (1U << 6), |
| 523 | MEMBARRIER_STATE_PRIVATE_EXPEDITED_RSEQ = (1U << 7), |
| 524 | }; |
| 525 | |
| 526 | enum { |
| 527 | MEMBARRIER_FLAG_SYNC_CORE = (1U << 0), |
| 528 | MEMBARRIER_FLAG_RSEQ = (1U << 1), |
| 529 | }; |
| 530 | |
| 531 | #ifdef CONFIG_ARCH_HAS_MEMBARRIER_CALLBACKS |
| 532 | #include <asm/membarrier.h> |
| 533 | #endif |
| 534 | |
| 535 | static inline void membarrier_mm_sync_core_before_usermode(struct mm_struct *mm) |
| 536 | { |
| 537 | /* |
| 538 | * The atomic_read() below prevents CSE. The following should |
| 539 | * help the compiler generate more efficient code on architectures |
| 540 | * where sync_core_before_usermode() is a no-op. |
| 541 | */ |
| 542 | if (!IS_ENABLED(CONFIG_ARCH_HAS_SYNC_CORE_BEFORE_USERMODE)) |
| 543 | return; |
| 544 | if (current->mm != mm) |
| 545 | return; |
| 546 | if (likely(!(atomic_read(&mm->membarrier_state) & |
| 547 | MEMBARRIER_STATE_PRIVATE_EXPEDITED_SYNC_CORE))) |
| 548 | return; |
| 549 | sync_core_before_usermode(); |
| 550 | } |
| 551 | |
| 552 | extern void membarrier_exec_mmap(struct mm_struct *mm); |
| 553 | |
| 554 | extern void membarrier_update_current_mm(struct mm_struct *next_mm); |
| 555 | |
| 556 | #else |
| 557 | #ifdef CONFIG_ARCH_HAS_MEMBARRIER_CALLBACKS |
| 558 | static inline void membarrier_arch_switch_mm(struct mm_struct *prev, |
| 559 | struct mm_struct *next, |
| 560 | struct task_struct *tsk) |
| 561 | { |
| 562 | } |
| 563 | #endif |
| 564 | static inline void membarrier_exec_mmap(struct mm_struct *mm) |
| 565 | { |
| 566 | } |
| 567 | static inline void membarrier_mm_sync_core_before_usermode(struct mm_struct *mm) |
| 568 | { |
| 569 | } |
| 570 | static inline void membarrier_update_current_mm(struct mm_struct *next_mm) |
| 571 | { |
| 572 | } |
| 573 | #endif |
| 574 | |
| 575 | #endif /* _LINUX_SCHED_MM_H */ |
| 576 | |