| 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
| 2 | #ifndef _LINUX_MM_TYPES_TASK_H |
| 3 | #define _LINUX_MM_TYPES_TASK_H |
| 4 | |
| 5 | /* |
| 6 | * Here are the definitions of the MM data types that are embedded in 'struct task_struct'. |
| 7 | * |
| 8 | * (These are defined separately to decouple sched.h from mm_types.h as much as possible.) |
| 9 | */ |
| 10 | |
| 11 | #include <linux/align.h> |
| 12 | #include <linux/types.h> |
| 13 | |
| 14 | #include <asm/page.h> |
| 15 | |
| 16 | #ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH |
| 17 | #include <asm/tlbbatch.h> |
| 18 | #endif |
| 19 | |
| 20 | #define ALLOC_SPLIT_PTLOCKS (SPINLOCK_SIZE > BITS_PER_LONG/8) |
| 21 | |
| 22 | /* |
| 23 | * When updating this, please also update struct resident_page_types[] in |
| 24 | * kernel/fork.c |
| 25 | */ |
| 26 | enum { |
| 27 | MM_FILEPAGES, /* Resident file mapping pages */ |
| 28 | MM_ANONPAGES, /* Resident anonymous pages */ |
| 29 | MM_SWAPENTS, /* Anonymous swap entries */ |
| 30 | MM_SHMEMPAGES, /* Resident shared memory pages */ |
| 31 | NR_MM_COUNTERS |
| 32 | }; |
| 33 | |
| 34 | struct page; |
| 35 | |
| 36 | struct page_frag { |
| 37 | struct page *page; |
| 38 | #if (BITS_PER_LONG > 32) || (PAGE_SIZE >= 65536) |
| 39 | __u32 offset; |
| 40 | __u32 size; |
| 41 | #else |
| 42 | __u16 offset; |
| 43 | __u16 size; |
| 44 | #endif |
| 45 | }; |
| 46 | |
| 47 | #define PAGE_FRAG_CACHE_MAX_SIZE __ALIGN_MASK(32768, ~PAGE_MASK) |
| 48 | #define PAGE_FRAG_CACHE_MAX_ORDER get_order(PAGE_FRAG_CACHE_MAX_SIZE) |
| 49 | struct page_frag_cache { |
| 50 | /* encoded_page consists of the virtual address, pfmemalloc bit and |
| 51 | * order of a page. |
| 52 | */ |
| 53 | unsigned long encoded_page; |
| 54 | |
| 55 | /* we maintain a pagecount bias, so that we dont dirty cache line |
| 56 | * containing page->_refcount every time we allocate a fragment. |
| 57 | */ |
| 58 | #if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE) && (BITS_PER_LONG <= 32) |
| 59 | __u16 offset; |
| 60 | __u16 pagecnt_bias; |
| 61 | #else |
| 62 | __u32 offset; |
| 63 | __u32 pagecnt_bias; |
| 64 | #endif |
| 65 | }; |
| 66 | |
| 67 | /* Track pages that require TLB flushes */ |
| 68 | struct tlbflush_unmap_batch { |
| 69 | #ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH |
| 70 | /* |
| 71 | * The arch code makes the following promise: generic code can modify a |
| 72 | * PTE, then call arch_tlbbatch_add_pending() (which internally provides |
| 73 | * all needed barriers), then call arch_tlbbatch_flush(), and the entries |
| 74 | * will be flushed on all CPUs by the time that arch_tlbbatch_flush() |
| 75 | * returns. |
| 76 | */ |
| 77 | struct arch_tlbflush_unmap_batch arch; |
| 78 | |
| 79 | /* True if a flush is needed. */ |
| 80 | bool flush_required; |
| 81 | |
| 82 | /* |
| 83 | * If true then the PTE was dirty when unmapped. The entry must be |
| 84 | * flushed before IO is initiated or a stale TLB entry potentially |
| 85 | * allows an update without redirtying the page. |
| 86 | */ |
| 87 | bool writable; |
| 88 | #endif |
| 89 | }; |
| 90 | |
| 91 | #endif /* _LINUX_MM_TYPES_TASK_H */ |
| 92 | |