| 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
| 2 | #ifndef __LINUX_GFP_H |
| 3 | #define __LINUX_GFP_H |
| 4 | |
| 5 | #include <linux/gfp_types.h> |
| 6 | |
| 7 | #include <linux/mmzone.h> |
| 8 | #include <linux/topology.h> |
| 9 | #include <linux/alloc_tag.h> |
| 10 | #include <linux/cleanup.h> |
| 11 | #include <linux/sched.h> |
| 12 | |
| 13 | struct vm_area_struct; |
| 14 | struct mempolicy; |
| 15 | |
| 16 | /* Convert GFP flags to their corresponding migrate type */ |
| 17 | #define GFP_MOVABLE_MASK (__GFP_RECLAIMABLE|__GFP_MOVABLE) |
| 18 | #define GFP_MOVABLE_SHIFT 3 |
| 19 | |
| 20 | static inline int gfp_migratetype(const gfp_t gfp_flags) |
| 21 | { |
| 22 | VM_WARN_ON((gfp_flags & GFP_MOVABLE_MASK) == GFP_MOVABLE_MASK); |
| 23 | BUILD_BUG_ON((1UL << GFP_MOVABLE_SHIFT) != ___GFP_MOVABLE); |
| 24 | BUILD_BUG_ON((___GFP_MOVABLE >> GFP_MOVABLE_SHIFT) != MIGRATE_MOVABLE); |
| 25 | BUILD_BUG_ON((___GFP_RECLAIMABLE >> GFP_MOVABLE_SHIFT) != MIGRATE_RECLAIMABLE); |
| 26 | BUILD_BUG_ON(((___GFP_MOVABLE | ___GFP_RECLAIMABLE) >> |
| 27 | GFP_MOVABLE_SHIFT) != MIGRATE_HIGHATOMIC); |
| 28 | |
| 29 | if (unlikely(page_group_by_mobility_disabled)) |
| 30 | return MIGRATE_UNMOVABLE; |
| 31 | |
| 32 | /* Group based on mobility */ |
| 33 | return (__force unsigned long)(gfp_flags & GFP_MOVABLE_MASK) >> GFP_MOVABLE_SHIFT; |
| 34 | } |
| 35 | #undef GFP_MOVABLE_MASK |
| 36 | #undef GFP_MOVABLE_SHIFT |
| 37 | |
| 38 | static inline bool gfpflags_allow_blocking(const gfp_t gfp_flags) |
| 39 | { |
| 40 | return !!(gfp_flags & __GFP_DIRECT_RECLAIM); |
| 41 | } |
| 42 | |
| 43 | static inline bool gfpflags_allow_spinning(const gfp_t gfp_flags) |
| 44 | { |
| 45 | /* |
| 46 | * !__GFP_DIRECT_RECLAIM -> direct claim is not allowed. |
| 47 | * !__GFP_KSWAPD_RECLAIM -> it's not safe to wake up kswapd. |
| 48 | * All GFP_* flags including GFP_NOWAIT use one or both flags. |
| 49 | * alloc_pages_nolock() is the only API that doesn't specify either flag. |
| 50 | * |
| 51 | * This is stronger than GFP_NOWAIT or GFP_ATOMIC because |
| 52 | * those are guaranteed to never block on a sleeping lock. |
| 53 | * Here we are enforcing that the allocation doesn't ever spin |
| 54 | * on any locks (i.e. only trylocks). There is no high level |
| 55 | * GFP_$FOO flag for this use in alloc_pages_nolock() as the |
| 56 | * regular page allocator doesn't fully support this |
| 57 | * allocation mode. |
| 58 | */ |
| 59 | return !!(gfp_flags & __GFP_RECLAIM); |
| 60 | } |
| 61 | |
| 62 | #ifdef CONFIG_HIGHMEM |
| 63 | #define OPT_ZONE_HIGHMEM ZONE_HIGHMEM |
| 64 | #else |
| 65 | #define OPT_ZONE_HIGHMEM ZONE_NORMAL |
| 66 | #endif |
| 67 | |
| 68 | #ifdef CONFIG_ZONE_DMA |
| 69 | #define OPT_ZONE_DMA ZONE_DMA |
| 70 | #else |
| 71 | #define OPT_ZONE_DMA ZONE_NORMAL |
| 72 | #endif |
| 73 | |
| 74 | #ifdef CONFIG_ZONE_DMA32 |
| 75 | #define OPT_ZONE_DMA32 ZONE_DMA32 |
| 76 | #else |
| 77 | #define OPT_ZONE_DMA32 ZONE_NORMAL |
| 78 | #endif |
| 79 | |
| 80 | /* |
| 81 | * GFP_ZONE_TABLE is a word size bitstring that is used for looking up the |
| 82 | * zone to use given the lowest 4 bits of gfp_t. Entries are GFP_ZONES_SHIFT |
| 83 | * bits long and there are 16 of them to cover all possible combinations of |
| 84 | * __GFP_DMA, __GFP_DMA32, __GFP_MOVABLE and __GFP_HIGHMEM. |
| 85 | * |
| 86 | * The zone fallback order is MOVABLE=>HIGHMEM=>NORMAL=>DMA32=>DMA. |
| 87 | * But GFP_MOVABLE is not only a zone specifier but also an allocation |
| 88 | * policy. Therefore __GFP_MOVABLE plus another zone selector is valid. |
| 89 | * Only 1 bit of the lowest 3 bits (DMA,DMA32,HIGHMEM) can be set to "1". |
| 90 | * |
| 91 | * bit result |
| 92 | * ================= |
| 93 | * 0x0 => NORMAL |
| 94 | * 0x1 => DMA or NORMAL |
| 95 | * 0x2 => HIGHMEM or NORMAL |
| 96 | * 0x3 => BAD (DMA+HIGHMEM) |
| 97 | * 0x4 => DMA32 or NORMAL |
| 98 | * 0x5 => BAD (DMA+DMA32) |
| 99 | * 0x6 => BAD (HIGHMEM+DMA32) |
| 100 | * 0x7 => BAD (HIGHMEM+DMA32+DMA) |
| 101 | * 0x8 => NORMAL (MOVABLE+0) |
| 102 | * 0x9 => DMA or NORMAL (MOVABLE+DMA) |
| 103 | * 0xa => MOVABLE (Movable is valid only if HIGHMEM is set too) |
| 104 | * 0xb => BAD (MOVABLE+HIGHMEM+DMA) |
| 105 | * 0xc => DMA32 or NORMAL (MOVABLE+DMA32) |
| 106 | * 0xd => BAD (MOVABLE+DMA32+DMA) |
| 107 | * 0xe => BAD (MOVABLE+DMA32+HIGHMEM) |
| 108 | * 0xf => BAD (MOVABLE+DMA32+HIGHMEM+DMA) |
| 109 | * |
| 110 | * GFP_ZONES_SHIFT must be <= 2 on 32 bit platforms. |
| 111 | */ |
| 112 | |
| 113 | #if defined(CONFIG_ZONE_DEVICE) && (MAX_NR_ZONES-1) <= 4 |
| 114 | /* ZONE_DEVICE is not a valid GFP zone specifier */ |
| 115 | #define GFP_ZONES_SHIFT 2 |
| 116 | #else |
| 117 | #define GFP_ZONES_SHIFT ZONES_SHIFT |
| 118 | #endif |
| 119 | |
| 120 | #if 16 * GFP_ZONES_SHIFT > BITS_PER_LONG |
| 121 | #error GFP_ZONES_SHIFT too large to create GFP_ZONE_TABLE integer |
| 122 | #endif |
| 123 | |
| 124 | #define GFP_ZONE_TABLE ( \ |
| 125 | (ZONE_NORMAL << 0 * GFP_ZONES_SHIFT) \ |
| 126 | | (OPT_ZONE_DMA << ___GFP_DMA * GFP_ZONES_SHIFT) \ |
| 127 | | (OPT_ZONE_HIGHMEM << ___GFP_HIGHMEM * GFP_ZONES_SHIFT) \ |
| 128 | | (OPT_ZONE_DMA32 << ___GFP_DMA32 * GFP_ZONES_SHIFT) \ |
| 129 | | (ZONE_NORMAL << ___GFP_MOVABLE * GFP_ZONES_SHIFT) \ |
| 130 | | (OPT_ZONE_DMA << (___GFP_MOVABLE | ___GFP_DMA) * GFP_ZONES_SHIFT) \ |
| 131 | | (ZONE_MOVABLE << (___GFP_MOVABLE | ___GFP_HIGHMEM) * GFP_ZONES_SHIFT)\ |
| 132 | | (OPT_ZONE_DMA32 << (___GFP_MOVABLE | ___GFP_DMA32) * GFP_ZONES_SHIFT)\ |
| 133 | ) |
| 134 | |
| 135 | /* |
| 136 | * GFP_ZONE_BAD is a bitmap for all combinations of __GFP_DMA, __GFP_DMA32 |
| 137 | * __GFP_HIGHMEM and __GFP_MOVABLE that are not permitted. One flag per |
| 138 | * entry starting with bit 0. Bit is set if the combination is not |
| 139 | * allowed. |
| 140 | */ |
| 141 | #define GFP_ZONE_BAD ( \ |
| 142 | 1 << (___GFP_DMA | ___GFP_HIGHMEM) \ |
| 143 | | 1 << (___GFP_DMA | ___GFP_DMA32) \ |
| 144 | | 1 << (___GFP_DMA32 | ___GFP_HIGHMEM) \ |
| 145 | | 1 << (___GFP_DMA | ___GFP_DMA32 | ___GFP_HIGHMEM) \ |
| 146 | | 1 << (___GFP_MOVABLE | ___GFP_HIGHMEM | ___GFP_DMA) \ |
| 147 | | 1 << (___GFP_MOVABLE | ___GFP_DMA32 | ___GFP_DMA) \ |
| 148 | | 1 << (___GFP_MOVABLE | ___GFP_DMA32 | ___GFP_HIGHMEM) \ |
| 149 | | 1 << (___GFP_MOVABLE | ___GFP_DMA32 | ___GFP_DMA | ___GFP_HIGHMEM) \ |
| 150 | ) |
| 151 | |
| 152 | static inline enum zone_type gfp_zone(gfp_t flags) |
| 153 | { |
| 154 | enum zone_type z; |
| 155 | int bit = (__force int) (flags & GFP_ZONEMASK); |
| 156 | |
| 157 | z = (GFP_ZONE_TABLE >> (bit * GFP_ZONES_SHIFT)) & |
| 158 | ((1 << GFP_ZONES_SHIFT) - 1); |
| 159 | VM_BUG_ON((GFP_ZONE_BAD >> bit) & 1); |
| 160 | return z; |
| 161 | } |
| 162 | |
| 163 | /* |
| 164 | * There is only one page-allocator function, and two main namespaces to |
| 165 | * it. The alloc_page*() variants return 'struct page *' and as such |
| 166 | * can allocate highmem pages, the *get*page*() variants return |
| 167 | * virtual kernel addresses to the allocated page(s). |
| 168 | */ |
| 169 | |
| 170 | static inline int gfp_zonelist(gfp_t flags) |
| 171 | { |
| 172 | #ifdef CONFIG_NUMA |
| 173 | if (unlikely(flags & __GFP_THISNODE)) |
| 174 | return ZONELIST_NOFALLBACK; |
| 175 | #endif |
| 176 | return ZONELIST_FALLBACK; |
| 177 | } |
| 178 | |
| 179 | /* |
| 180 | * gfp flag masking for nested internal allocations. |
| 181 | * |
| 182 | * For code that needs to do allocations inside the public allocation API (e.g. |
| 183 | * memory allocation tracking code) the allocations need to obey the caller |
| 184 | * allocation context constrains to prevent allocation context mismatches (e.g. |
| 185 | * GFP_KERNEL allocations in GFP_NOFS contexts) from potential deadlock |
| 186 | * situations. |
| 187 | * |
| 188 | * It is also assumed that these nested allocations are for internal kernel |
| 189 | * object storage purposes only and are not going to be used for DMA, etc. Hence |
| 190 | * we strip out all the zone information and leave just the context information |
| 191 | * intact. |
| 192 | * |
| 193 | * Further, internal allocations must fail before the higher level allocation |
| 194 | * can fail, so we must make them fail faster and fail silently. We also don't |
| 195 | * want them to deplete emergency reserves. Hence nested allocations must be |
| 196 | * prepared for these allocations to fail. |
| 197 | */ |
| 198 | static inline gfp_t gfp_nested_mask(gfp_t flags) |
| 199 | { |
| 200 | return ((flags & (GFP_KERNEL | GFP_ATOMIC | __GFP_NOLOCKDEP)) | |
| 201 | (__GFP_NORETRY | __GFP_NOMEMALLOC | __GFP_NOWARN)); |
| 202 | } |
| 203 | |
| 204 | /* |
| 205 | * We get the zone list from the current node and the gfp_mask. |
| 206 | * This zone list contains a maximum of MAX_NUMNODES*MAX_NR_ZONES zones. |
| 207 | * There are two zonelists per node, one for all zones with memory and |
| 208 | * one containing just zones from the node the zonelist belongs to. |
| 209 | * |
| 210 | * For the case of non-NUMA systems the NODE_DATA() gets optimized to |
| 211 | * &contig_page_data at compile-time. |
| 212 | */ |
| 213 | static inline struct zonelist *node_zonelist(int nid, gfp_t flags) |
| 214 | { |
| 215 | return NODE_DATA(nid)->node_zonelists + gfp_zonelist(flags); |
| 216 | } |
| 217 | |
| 218 | #ifndef HAVE_ARCH_FREE_PAGE |
| 219 | static inline void arch_free_page(struct page *page, int order) { } |
| 220 | #endif |
| 221 | #ifndef HAVE_ARCH_ALLOC_PAGE |
| 222 | static inline void arch_alloc_page(struct page *page, int order) { } |
| 223 | #endif |
| 224 | |
| 225 | struct page *__alloc_pages_noprof(gfp_t gfp, unsigned int order, int preferred_nid, |
| 226 | nodemask_t *nodemask); |
| 227 | #define __alloc_pages(...) alloc_hooks(__alloc_pages_noprof(__VA_ARGS__)) |
| 228 | |
| 229 | struct folio *__folio_alloc_noprof(gfp_t gfp, unsigned int order, int preferred_nid, |
| 230 | nodemask_t *nodemask); |
| 231 | #define __folio_alloc(...) alloc_hooks(__folio_alloc_noprof(__VA_ARGS__)) |
| 232 | |
| 233 | unsigned long alloc_pages_bulk_noprof(gfp_t gfp, int preferred_nid, |
| 234 | nodemask_t *nodemask, int nr_pages, |
| 235 | struct page **page_array); |
| 236 | #define __alloc_pages_bulk(...) alloc_hooks(alloc_pages_bulk_noprof(__VA_ARGS__)) |
| 237 | |
| 238 | unsigned long alloc_pages_bulk_mempolicy_noprof(gfp_t gfp, |
| 239 | unsigned long nr_pages, |
| 240 | struct page **page_array); |
| 241 | #define alloc_pages_bulk_mempolicy(...) \ |
| 242 | alloc_hooks(alloc_pages_bulk_mempolicy_noprof(__VA_ARGS__)) |
| 243 | |
| 244 | /* Bulk allocate order-0 pages */ |
| 245 | #define alloc_pages_bulk(_gfp, _nr_pages, _page_array) \ |
| 246 | __alloc_pages_bulk(_gfp, numa_mem_id(), NULL, _nr_pages, _page_array) |
| 247 | |
| 248 | static inline unsigned long |
| 249 | alloc_pages_bulk_node_noprof(gfp_t gfp, int nid, unsigned long nr_pages, |
| 250 | struct page **page_array) |
| 251 | { |
| 252 | if (nid == NUMA_NO_NODE) |
| 253 | nid = numa_mem_id(); |
| 254 | |
| 255 | return alloc_pages_bulk_noprof(gfp, preferred_nid: nid, NULL, nr_pages, page_array); |
| 256 | } |
| 257 | |
| 258 | #define alloc_pages_bulk_node(...) \ |
| 259 | alloc_hooks(alloc_pages_bulk_node_noprof(__VA_ARGS__)) |
| 260 | |
| 261 | static inline void warn_if_node_offline(int this_node, gfp_t gfp_mask) |
| 262 | { |
| 263 | gfp_t warn_gfp = gfp_mask & (__GFP_THISNODE|__GFP_NOWARN); |
| 264 | |
| 265 | if (warn_gfp != (__GFP_THISNODE|__GFP_NOWARN)) |
| 266 | return; |
| 267 | |
| 268 | if (node_online(this_node)) |
| 269 | return; |
| 270 | |
| 271 | pr_warn("%pGg allocation from offline node %d\n" , &gfp_mask, this_node); |
| 272 | dump_stack(); |
| 273 | } |
| 274 | |
| 275 | /* |
| 276 | * Allocate pages, preferring the node given as nid. The node must be valid and |
| 277 | * online. For more general interface, see alloc_pages_node(). |
| 278 | */ |
| 279 | static inline struct page * |
| 280 | __alloc_pages_node_noprof(int nid, gfp_t gfp_mask, unsigned int order) |
| 281 | { |
| 282 | VM_BUG_ON(nid < 0 || nid >= MAX_NUMNODES); |
| 283 | warn_if_node_offline(this_node: nid, gfp_mask); |
| 284 | |
| 285 | return __alloc_pages_noprof(gfp: gfp_mask, order, preferred_nid: nid, NULL); |
| 286 | } |
| 287 | |
| 288 | #define __alloc_pages_node(...) alloc_hooks(__alloc_pages_node_noprof(__VA_ARGS__)) |
| 289 | |
| 290 | static inline |
| 291 | struct folio *__folio_alloc_node_noprof(gfp_t gfp, unsigned int order, int nid) |
| 292 | { |
| 293 | VM_BUG_ON(nid < 0 || nid >= MAX_NUMNODES); |
| 294 | warn_if_node_offline(this_node: nid, gfp_mask: gfp); |
| 295 | |
| 296 | return __folio_alloc_noprof(gfp, order, preferred_nid: nid, NULL); |
| 297 | } |
| 298 | |
| 299 | #define __folio_alloc_node(...) alloc_hooks(__folio_alloc_node_noprof(__VA_ARGS__)) |
| 300 | |
| 301 | /* |
| 302 | * Allocate pages, preferring the node given as nid. When nid == NUMA_NO_NODE, |
| 303 | * prefer the current CPU's closest node. Otherwise node must be valid and |
| 304 | * online. |
| 305 | */ |
| 306 | static inline struct page *alloc_pages_node_noprof(int nid, gfp_t gfp_mask, |
| 307 | unsigned int order) |
| 308 | { |
| 309 | if (nid == NUMA_NO_NODE) |
| 310 | nid = numa_mem_id(); |
| 311 | |
| 312 | return __alloc_pages_node_noprof(nid, gfp_mask, order); |
| 313 | } |
| 314 | |
| 315 | #define alloc_pages_node(...) alloc_hooks(alloc_pages_node_noprof(__VA_ARGS__)) |
| 316 | |
| 317 | #ifdef CONFIG_NUMA |
| 318 | struct page *alloc_pages_noprof(gfp_t gfp, unsigned int order); |
| 319 | struct folio *folio_alloc_noprof(gfp_t gfp, unsigned int order); |
| 320 | struct folio *folio_alloc_mpol_noprof(gfp_t gfp, unsigned int order, |
| 321 | struct mempolicy *mpol, pgoff_t ilx, int nid); |
| 322 | struct folio *vma_alloc_folio_noprof(gfp_t gfp, int order, struct vm_area_struct *vma, |
| 323 | unsigned long addr); |
| 324 | #else |
| 325 | static inline struct page *alloc_pages_noprof(gfp_t gfp_mask, unsigned int order) |
| 326 | { |
| 327 | return alloc_pages_node_noprof(numa_node_id(), gfp_mask, order); |
| 328 | } |
| 329 | static inline struct folio *folio_alloc_noprof(gfp_t gfp, unsigned int order) |
| 330 | { |
| 331 | return __folio_alloc_node_noprof(gfp, order, numa_node_id()); |
| 332 | } |
| 333 | static inline struct folio *folio_alloc_mpol_noprof(gfp_t gfp, unsigned int order, |
| 334 | struct mempolicy *mpol, pgoff_t ilx, int nid) |
| 335 | { |
| 336 | return folio_alloc_noprof(gfp, order); |
| 337 | } |
| 338 | #define vma_alloc_folio_noprof(gfp, order, vma, addr) \ |
| 339 | folio_alloc_noprof(gfp, order) |
| 340 | #endif |
| 341 | |
| 342 | #define alloc_pages(...) alloc_hooks(alloc_pages_noprof(__VA_ARGS__)) |
| 343 | #define folio_alloc(...) alloc_hooks(folio_alloc_noprof(__VA_ARGS__)) |
| 344 | #define folio_alloc_mpol(...) alloc_hooks(folio_alloc_mpol_noprof(__VA_ARGS__)) |
| 345 | #define vma_alloc_folio(...) alloc_hooks(vma_alloc_folio_noprof(__VA_ARGS__)) |
| 346 | |
| 347 | #define alloc_page(gfp_mask) alloc_pages(gfp_mask, 0) |
| 348 | |
| 349 | static inline struct page *alloc_page_vma_noprof(gfp_t gfp, |
| 350 | struct vm_area_struct *vma, unsigned long addr) |
| 351 | { |
| 352 | struct folio *folio = vma_alloc_folio_noprof(gfp, order: 0, vma, addr); |
| 353 | |
| 354 | return &folio->page; |
| 355 | } |
| 356 | #define alloc_page_vma(...) alloc_hooks(alloc_page_vma_noprof(__VA_ARGS__)) |
| 357 | |
| 358 | struct page *alloc_pages_nolock_noprof(gfp_t gfp_flags, int nid, unsigned int order); |
| 359 | #define alloc_pages_nolock(...) alloc_hooks(alloc_pages_nolock_noprof(__VA_ARGS__)) |
| 360 | |
| 361 | extern unsigned long get_free_pages_noprof(gfp_t gfp_mask, unsigned int order); |
| 362 | #define __get_free_pages(...) alloc_hooks(get_free_pages_noprof(__VA_ARGS__)) |
| 363 | |
| 364 | extern unsigned long get_zeroed_page_noprof(gfp_t gfp_mask); |
| 365 | #define get_zeroed_page(...) alloc_hooks(get_zeroed_page_noprof(__VA_ARGS__)) |
| 366 | |
| 367 | void *alloc_pages_exact_noprof(size_t size, gfp_t gfp_mask) __alloc_size(1); |
| 368 | #define alloc_pages_exact(...) alloc_hooks(alloc_pages_exact_noprof(__VA_ARGS__)) |
| 369 | |
| 370 | void free_pages_exact(void *virt, size_t size); |
| 371 | |
| 372 | __meminit void *alloc_pages_exact_nid_noprof(int nid, size_t size, gfp_t gfp_mask) __alloc_size(2); |
| 373 | #define alloc_pages_exact_nid(...) \ |
| 374 | alloc_hooks(alloc_pages_exact_nid_noprof(__VA_ARGS__)) |
| 375 | |
| 376 | #define __get_free_page(gfp_mask) \ |
| 377 | __get_free_pages((gfp_mask), 0) |
| 378 | |
| 379 | #define __get_dma_pages(gfp_mask, order) \ |
| 380 | __get_free_pages((gfp_mask) | GFP_DMA, (order)) |
| 381 | |
| 382 | extern void __free_pages(struct page *page, unsigned int order); |
| 383 | extern void free_pages_nolock(struct page *page, unsigned int order); |
| 384 | extern void free_pages(unsigned long addr, unsigned int order); |
| 385 | |
| 386 | #define __free_page(page) __free_pages((page), 0) |
| 387 | #define free_page(addr) free_pages((addr), 0) |
| 388 | |
| 389 | void page_alloc_init_cpuhp(void); |
| 390 | bool decay_pcp_high(struct zone *zone, struct per_cpu_pages *pcp); |
| 391 | void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp); |
| 392 | void drain_all_pages(struct zone *zone); |
| 393 | void drain_local_pages(struct zone *zone); |
| 394 | |
| 395 | void page_alloc_init_late(void); |
| 396 | void setup_pcp_cacheinfo(unsigned int cpu); |
| 397 | |
| 398 | /* |
| 399 | * gfp_allowed_mask is set to GFP_BOOT_MASK during early boot to restrict what |
| 400 | * GFP flags are used before interrupts are enabled. Once interrupts are |
| 401 | * enabled, it is set to __GFP_BITS_MASK while the system is running. During |
| 402 | * hibernation, it is used by PM to avoid I/O during memory allocation while |
| 403 | * devices are suspended. |
| 404 | */ |
| 405 | extern gfp_t gfp_allowed_mask; |
| 406 | |
| 407 | /* Returns true if the gfp_mask allows use of ALLOC_NO_WATERMARK */ |
| 408 | bool gfp_pfmemalloc_allowed(gfp_t gfp_mask); |
| 409 | |
| 410 | static inline bool gfp_has_io_fs(gfp_t gfp) |
| 411 | { |
| 412 | return (gfp & (__GFP_IO | __GFP_FS)) == (__GFP_IO | __GFP_FS); |
| 413 | } |
| 414 | |
| 415 | /* |
| 416 | * Check if the gfp flags allow compaction - GFP_NOIO is a really |
| 417 | * tricky context because the migration might require IO. |
| 418 | */ |
| 419 | static inline bool gfp_compaction_allowed(gfp_t gfp_mask) |
| 420 | { |
| 421 | return IS_ENABLED(CONFIG_COMPACTION) && (gfp_mask & __GFP_IO); |
| 422 | } |
| 423 | |
| 424 | extern gfp_t vma_thp_gfp_mask(struct vm_area_struct *vma); |
| 425 | |
| 426 | #ifdef CONFIG_CONTIG_ALLOC |
| 427 | |
| 428 | typedef unsigned int __bitwise acr_flags_t; |
| 429 | #define ACR_FLAGS_NONE ((__force acr_flags_t)0) // ordinary allocation request |
| 430 | #define ACR_FLAGS_CMA ((__force acr_flags_t)BIT(0)) // allocate for CMA |
| 431 | |
| 432 | /* The below functions must be run on a range from a single zone. */ |
| 433 | extern int alloc_contig_range_noprof(unsigned long start, unsigned long end, |
| 434 | acr_flags_t alloc_flags, gfp_t gfp_mask); |
| 435 | #define alloc_contig_range(...) alloc_hooks(alloc_contig_range_noprof(__VA_ARGS__)) |
| 436 | |
| 437 | extern struct page *alloc_contig_pages_noprof(unsigned long nr_pages, gfp_t gfp_mask, |
| 438 | int nid, nodemask_t *nodemask); |
| 439 | #define alloc_contig_pages(...) alloc_hooks(alloc_contig_pages_noprof(__VA_ARGS__)) |
| 440 | |
| 441 | #endif |
| 442 | void free_contig_range(unsigned long pfn, unsigned long nr_pages); |
| 443 | |
| 444 | #ifdef CONFIG_CONTIG_ALLOC |
| 445 | static inline struct folio *folio_alloc_gigantic_noprof(int order, gfp_t gfp, |
| 446 | int nid, nodemask_t *node) |
| 447 | { |
| 448 | struct page *page; |
| 449 | |
| 450 | if (WARN_ON(!order || !(gfp & __GFP_COMP))) |
| 451 | return NULL; |
| 452 | |
| 453 | page = alloc_contig_pages_noprof(nr_pages: 1 << order, gfp_mask: gfp, nid, nodemask: node); |
| 454 | |
| 455 | return page ? page_folio(page) : NULL; |
| 456 | } |
| 457 | #else |
| 458 | static inline struct folio *folio_alloc_gigantic_noprof(int order, gfp_t gfp, |
| 459 | int nid, nodemask_t *node) |
| 460 | { |
| 461 | return NULL; |
| 462 | } |
| 463 | #endif |
| 464 | /* This should be paired with folio_put() rather than free_contig_range(). */ |
| 465 | #define folio_alloc_gigantic(...) alloc_hooks(folio_alloc_gigantic_noprof(__VA_ARGS__)) |
| 466 | |
| 467 | DEFINE_FREE(free_page, void *, free_page((unsigned long)_T)) |
| 468 | |
| 469 | #endif /* __LINUX_GFP_H */ |
| 470 | |