| 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
| 2 | #ifndef _LINUX_SWAP_H |
| 3 | #define _LINUX_SWAP_H |
| 4 | |
| 5 | #include <linux/spinlock.h> |
| 6 | #include <linux/linkage.h> |
| 7 | #include <linux/mmzone.h> |
| 8 | #include <linux/list.h> |
| 9 | #include <linux/memcontrol.h> |
| 10 | #include <linux/sched.h> |
| 11 | #include <linux/node.h> |
| 12 | #include <linux/fs.h> |
| 13 | #include <linux/pagemap.h> |
| 14 | #include <linux/atomic.h> |
| 15 | #include <linux/page-flags.h> |
| 16 | #include <uapi/linux/mempolicy.h> |
| 17 | #include <asm/page.h> |
| 18 | |
| 19 | struct notifier_block; |
| 20 | |
| 21 | struct bio; |
| 22 | |
| 23 | struct pagevec; |
| 24 | |
| 25 | #define SWAP_FLAG_PREFER 0x8000 /* set if swap priority specified */ |
| 26 | #define SWAP_FLAG_PRIO_MASK 0x7fff |
| 27 | #define SWAP_FLAG_DISCARD 0x10000 /* enable discard for swap */ |
| 28 | #define SWAP_FLAG_DISCARD_ONCE 0x20000 /* discard swap area at swapon-time */ |
| 29 | #define SWAP_FLAG_DISCARD_PAGES 0x40000 /* discard page-clusters after use */ |
| 30 | |
| 31 | #define SWAP_FLAGS_VALID (SWAP_FLAG_PRIO_MASK | SWAP_FLAG_PREFER | \ |
| 32 | SWAP_FLAG_DISCARD | SWAP_FLAG_DISCARD_ONCE | \ |
| 33 | SWAP_FLAG_DISCARD_PAGES) |
| 34 | #define SWAP_BATCH 64 |
| 35 | |
| 36 | static inline int current_is_kswapd(void) |
| 37 | { |
| 38 | return current->flags & PF_KSWAPD; |
| 39 | } |
| 40 | |
| 41 | /* |
| 42 | * MAX_SWAPFILES defines the maximum number of swaptypes: things which can |
| 43 | * be swapped to. The swap type and the offset into that swap type are |
| 44 | * encoded into pte's and into pgoff_t's in the swapcache. Using five bits |
| 45 | * for the type means that the maximum number of swapcache pages is 27 bits |
| 46 | * on 32-bit-pgoff_t architectures. And that assumes that the architecture packs |
| 47 | * the type/offset into the pte as 5/27 as well. |
| 48 | */ |
| 49 | #define MAX_SWAPFILES_SHIFT 5 |
| 50 | |
| 51 | /* |
| 52 | * Use some of the swap files numbers for other purposes. This |
| 53 | * is a convenient way to hook into the VM to trigger special |
| 54 | * actions on faults. |
| 55 | */ |
| 56 | |
| 57 | /* |
| 58 | * PTE markers are used to persist information onto PTEs that otherwise |
| 59 | * should be a none pte. As its name "PTE" hints, it should only be |
| 60 | * applied to the leaves of pgtables. |
| 61 | */ |
| 62 | #define SWP_PTE_MARKER_NUM 1 |
| 63 | #define SWP_PTE_MARKER (MAX_SWAPFILES + SWP_HWPOISON_NUM + \ |
| 64 | SWP_MIGRATION_NUM + SWP_DEVICE_NUM) |
| 65 | |
| 66 | /* |
| 67 | * Unaddressable device memory support. See include/linux/hmm.h and |
| 68 | * Documentation/mm/hmm.rst. Short description is we need struct pages for |
| 69 | * device memory that is unaddressable (inaccessible) by CPU, so that we can |
| 70 | * migrate part of a process memory to device memory. |
| 71 | * |
| 72 | * When a page is migrated from CPU to device, we set the CPU page table entry |
| 73 | * to a special SWP_DEVICE_{READ|WRITE} entry. |
| 74 | * |
| 75 | * When a page is mapped by the device for exclusive access we set the CPU page |
| 76 | * table entries to a special SWP_DEVICE_EXCLUSIVE entry. |
| 77 | */ |
| 78 | #ifdef CONFIG_DEVICE_PRIVATE |
| 79 | #define SWP_DEVICE_NUM 3 |
| 80 | #define SWP_DEVICE_WRITE (MAX_SWAPFILES+SWP_HWPOISON_NUM+SWP_MIGRATION_NUM) |
| 81 | #define SWP_DEVICE_READ (MAX_SWAPFILES+SWP_HWPOISON_NUM+SWP_MIGRATION_NUM+1) |
| 82 | #define SWP_DEVICE_EXCLUSIVE (MAX_SWAPFILES+SWP_HWPOISON_NUM+SWP_MIGRATION_NUM+2) |
| 83 | #else |
| 84 | #define SWP_DEVICE_NUM 0 |
| 85 | #endif |
| 86 | |
| 87 | /* |
| 88 | * Page migration support. |
| 89 | * |
| 90 | * SWP_MIGRATION_READ_EXCLUSIVE is only applicable to anonymous pages and |
| 91 | * indicates that the referenced (part of) an anonymous page is exclusive to |
| 92 | * a single process. For SWP_MIGRATION_WRITE, that information is implicit: |
| 93 | * (part of) an anonymous page that are mapped writable are exclusive to a |
| 94 | * single process. |
| 95 | */ |
| 96 | #ifdef CONFIG_MIGRATION |
| 97 | #define SWP_MIGRATION_NUM 3 |
| 98 | #define SWP_MIGRATION_READ (MAX_SWAPFILES + SWP_HWPOISON_NUM) |
| 99 | #define SWP_MIGRATION_READ_EXCLUSIVE (MAX_SWAPFILES + SWP_HWPOISON_NUM + 1) |
| 100 | #define SWP_MIGRATION_WRITE (MAX_SWAPFILES + SWP_HWPOISON_NUM + 2) |
| 101 | #else |
| 102 | #define SWP_MIGRATION_NUM 0 |
| 103 | #endif |
| 104 | |
| 105 | /* |
| 106 | * Handling of hardware poisoned pages with memory corruption. |
| 107 | */ |
| 108 | #ifdef CONFIG_MEMORY_FAILURE |
| 109 | #define SWP_HWPOISON_NUM 1 |
| 110 | #define SWP_HWPOISON MAX_SWAPFILES |
| 111 | #else |
| 112 | #define SWP_HWPOISON_NUM 0 |
| 113 | #endif |
| 114 | |
| 115 | #define MAX_SWAPFILES \ |
| 116 | ((1 << MAX_SWAPFILES_SHIFT) - SWP_DEVICE_NUM - \ |
| 117 | SWP_MIGRATION_NUM - SWP_HWPOISON_NUM - \ |
| 118 | SWP_PTE_MARKER_NUM) |
| 119 | |
| 120 | /* |
| 121 | * Magic header for a swap area. The first part of the union is |
| 122 | * what the swap magic looks like for the old (limited to 128MB) |
| 123 | * swap area format, the second part of the union adds - in the |
| 124 | * old reserved area - some extra information. Note that the first |
| 125 | * kilobyte is reserved for boot loader or disk label stuff... |
| 126 | * |
| 127 | * Having the magic at the end of the PAGE_SIZE makes detecting swap |
| 128 | * areas somewhat tricky on machines that support multiple page sizes. |
| 129 | * For 2.5 we'll probably want to move the magic to just beyond the |
| 130 | * bootbits... |
| 131 | */ |
| 132 | union { |
| 133 | struct { |
| 134 | char [PAGE_SIZE - 10]; |
| 135 | char [10]; /* SWAP-SPACE or SWAPSPACE2 */ |
| 136 | } ; |
| 137 | struct { |
| 138 | char [1024]; /* Space for disklabel etc. */ |
| 139 | __u32 ; |
| 140 | __u32 ; |
| 141 | __u32 ; |
| 142 | unsigned char [16]; |
| 143 | unsigned char [16]; |
| 144 | __u32 [117]; |
| 145 | __u32 [1]; |
| 146 | } ; |
| 147 | }; |
| 148 | |
| 149 | /* |
| 150 | * current->reclaim_state points to one of these when a task is running |
| 151 | * memory reclaim |
| 152 | */ |
| 153 | struct reclaim_state { |
| 154 | /* pages reclaimed outside of LRU-based reclaim */ |
| 155 | unsigned long reclaimed; |
| 156 | #ifdef CONFIG_LRU_GEN |
| 157 | /* per-thread mm walk data */ |
| 158 | struct lru_gen_mm_walk *mm_walk; |
| 159 | #endif |
| 160 | }; |
| 161 | |
| 162 | /* |
| 163 | * mm_account_reclaimed_pages(): account reclaimed pages outside of LRU-based |
| 164 | * reclaim |
| 165 | * @pages: number of pages reclaimed |
| 166 | * |
| 167 | * If the current process is undergoing a reclaim operation, increment the |
| 168 | * number of reclaimed pages by @pages. |
| 169 | */ |
| 170 | static inline void mm_account_reclaimed_pages(unsigned long pages) |
| 171 | { |
| 172 | if (current->reclaim_state) |
| 173 | current->reclaim_state->reclaimed += pages; |
| 174 | } |
| 175 | |
| 176 | #ifdef __KERNEL__ |
| 177 | |
| 178 | struct address_space; |
| 179 | struct sysinfo; |
| 180 | struct writeback_control; |
| 181 | struct zone; |
| 182 | |
| 183 | /* |
| 184 | * A swap extent maps a range of a swapfile's PAGE_SIZE pages onto a range of |
| 185 | * disk blocks. A rbtree of swap extents maps the entire swapfile (Where the |
| 186 | * term `swapfile' refers to either a blockdevice or an IS_REG file). Apart |
| 187 | * from setup, they're handled identically. |
| 188 | * |
| 189 | * We always assume that blocks are of size PAGE_SIZE. |
| 190 | */ |
| 191 | struct swap_extent { |
| 192 | struct rb_node rb_node; |
| 193 | pgoff_t start_page; |
| 194 | pgoff_t nr_pages; |
| 195 | sector_t start_block; |
| 196 | }; |
| 197 | |
| 198 | /* |
| 199 | * Max bad pages in the new format.. |
| 200 | */ |
| 201 | #define MAX_SWAP_BADPAGES \ |
| 202 | ((offsetof(union swap_header, magic.magic) - \ |
| 203 | offsetof(union swap_header, info.badpages)) / sizeof(int)) |
| 204 | |
| 205 | enum { |
| 206 | SWP_USED = (1 << 0), /* is slot in swap_info[] used? */ |
| 207 | SWP_WRITEOK = (1 << 1), /* ok to write to this swap? */ |
| 208 | SWP_DISCARDABLE = (1 << 2), /* blkdev support discard */ |
| 209 | SWP_DISCARDING = (1 << 3), /* now discarding a free cluster */ |
| 210 | SWP_SOLIDSTATE = (1 << 4), /* blkdev seeks are cheap */ |
| 211 | SWP_CONTINUED = (1 << 5), /* swap_map has count continuation */ |
| 212 | SWP_BLKDEV = (1 << 6), /* its a block device */ |
| 213 | SWP_ACTIVATED = (1 << 7), /* set after swap_activate success */ |
| 214 | SWP_FS_OPS = (1 << 8), /* swapfile operations go through fs */ |
| 215 | SWP_AREA_DISCARD = (1 << 9), /* single-time swap area discards */ |
| 216 | SWP_PAGE_DISCARD = (1 << 10), /* freed swap page-cluster discards */ |
| 217 | SWP_STABLE_WRITES = (1 << 11), /* no overwrite PG_writeback pages */ |
| 218 | SWP_SYNCHRONOUS_IO = (1 << 12), /* synchronous IO is efficient */ |
| 219 | /* add others here before... */ |
| 220 | }; |
| 221 | |
| 222 | #define SWAP_CLUSTER_MAX 32UL |
| 223 | #define SWAP_CLUSTER_MAX_SKIPPED (SWAP_CLUSTER_MAX << 10) |
| 224 | #define COMPACT_CLUSTER_MAX SWAP_CLUSTER_MAX |
| 225 | |
| 226 | /* Bit flag in swap_map */ |
| 227 | #define SWAP_HAS_CACHE 0x40 /* Flag page is cached, in first swap_map */ |
| 228 | #define COUNT_CONTINUED 0x80 /* Flag swap_map continuation for full count */ |
| 229 | |
| 230 | /* Special value in first swap_map */ |
| 231 | #define SWAP_MAP_MAX 0x3e /* Max count */ |
| 232 | #define SWAP_MAP_BAD 0x3f /* Note page is bad */ |
| 233 | #define SWAP_MAP_SHMEM 0xbf /* Owned by shmem/tmpfs */ |
| 234 | |
| 235 | /* Special value in each swap_map continuation */ |
| 236 | #define SWAP_CONT_MAX 0x7f /* Max count */ |
| 237 | |
| 238 | /* |
| 239 | * The first page in the swap file is the swap header, which is always marked |
| 240 | * bad to prevent it from being allocated as an entry. This also prevents the |
| 241 | * cluster to which it belongs being marked free. Therefore 0 is safe to use as |
| 242 | * a sentinel to indicate an entry is not valid. |
| 243 | */ |
| 244 | #define SWAP_ENTRY_INVALID 0 |
| 245 | |
| 246 | #ifdef CONFIG_THP_SWAP |
| 247 | #define SWAP_NR_ORDERS (PMD_ORDER + 1) |
| 248 | #else |
| 249 | #define SWAP_NR_ORDERS 1 |
| 250 | #endif |
| 251 | |
| 252 | /* |
| 253 | * We keep using same cluster for rotational device so IO will be sequential. |
| 254 | * The purpose is to optimize SWAP throughput on these device. |
| 255 | */ |
| 256 | struct swap_sequential_cluster { |
| 257 | unsigned int next[SWAP_NR_ORDERS]; /* Likely next allocation offset */ |
| 258 | }; |
| 259 | |
| 260 | /* |
| 261 | * The in-memory structure used to track swap areas. |
| 262 | */ |
| 263 | struct swap_info_struct { |
| 264 | struct percpu_ref users; /* indicate and keep swap device valid. */ |
| 265 | unsigned long flags; /* SWP_USED etc: see above */ |
| 266 | signed short prio; /* swap priority of this type */ |
| 267 | struct plist_node list; /* entry in swap_active_head */ |
| 268 | signed char type; /* strange name for an index */ |
| 269 | unsigned int max; /* extent of the swap_map */ |
| 270 | unsigned char *swap_map; /* vmalloc'ed array of usage counts */ |
| 271 | unsigned long *zeromap; /* kvmalloc'ed bitmap to track zero pages */ |
| 272 | struct swap_cluster_info *cluster_info; /* cluster info. Only for SSD */ |
| 273 | struct list_head free_clusters; /* free clusters list */ |
| 274 | struct list_head full_clusters; /* full clusters list */ |
| 275 | struct list_head nonfull_clusters[SWAP_NR_ORDERS]; |
| 276 | /* list of cluster that contains at least one free slot */ |
| 277 | struct list_head frag_clusters[SWAP_NR_ORDERS]; |
| 278 | /* list of cluster that are fragmented or contented */ |
| 279 | unsigned int pages; /* total of usable pages of swap */ |
| 280 | atomic_long_t inuse_pages; /* number of those currently in use */ |
| 281 | struct swap_sequential_cluster *global_cluster; /* Use one global cluster for rotating device */ |
| 282 | spinlock_t global_cluster_lock; /* Serialize usage of global cluster */ |
| 283 | struct rb_root swap_extent_root;/* root of the swap extent rbtree */ |
| 284 | struct block_device *bdev; /* swap device or bdev of swap file */ |
| 285 | struct file *swap_file; /* seldom referenced */ |
| 286 | struct completion comp; /* seldom referenced */ |
| 287 | spinlock_t lock; /* |
| 288 | * protect map scan related fields like |
| 289 | * swap_map, inuse_pages and all cluster |
| 290 | * lists. other fields are only changed |
| 291 | * at swapon/swapoff, so are protected |
| 292 | * by swap_lock. changing flags need |
| 293 | * hold this lock and swap_lock. If |
| 294 | * both locks need hold, hold swap_lock |
| 295 | * first. |
| 296 | */ |
| 297 | spinlock_t cont_lock; /* |
| 298 | * protect swap count continuation page |
| 299 | * list. |
| 300 | */ |
| 301 | struct work_struct discard_work; /* discard worker */ |
| 302 | struct work_struct reclaim_work; /* reclaim worker */ |
| 303 | struct list_head discard_clusters; /* discard clusters list */ |
| 304 | struct plist_node avail_list; /* entry in swap_avail_head */ |
| 305 | }; |
| 306 | |
| 307 | static inline swp_entry_t page_swap_entry(struct page *page) |
| 308 | { |
| 309 | struct folio *folio = page_folio(page); |
| 310 | swp_entry_t entry = folio->swap; |
| 311 | |
| 312 | entry.val += folio_page_idx(folio, page); |
| 313 | return entry; |
| 314 | } |
| 315 | |
| 316 | /* linux/mm/workingset.c */ |
| 317 | bool workingset_test_recent(void *shadow, bool file, bool *workingset, |
| 318 | bool flush); |
| 319 | void workingset_age_nonresident(struct lruvec *lruvec, unsigned long nr_pages); |
| 320 | void *workingset_eviction(struct folio *folio, struct mem_cgroup *target_memcg); |
| 321 | void workingset_refault(struct folio *folio, void *shadow); |
| 322 | void workingset_activation(struct folio *folio); |
| 323 | |
| 324 | /* linux/mm/page_alloc.c */ |
| 325 | extern unsigned long totalreserve_pages; |
| 326 | |
| 327 | /* Definition of global_zone_page_state not available yet */ |
| 328 | #define nr_free_pages() global_zone_page_state(NR_FREE_PAGES) |
| 329 | |
| 330 | |
| 331 | /* linux/mm/swap.c */ |
| 332 | void lru_note_cost_unlock_irq(struct lruvec *lruvec, bool file, |
| 333 | unsigned int nr_io, unsigned int nr_rotated) |
| 334 | __releases(lruvec->lru_lock); |
| 335 | void lru_note_cost_refault(struct folio *); |
| 336 | void folio_add_lru(struct folio *); |
| 337 | void folio_add_lru_vma(struct folio *, struct vm_area_struct *); |
| 338 | void mark_page_accessed(struct page *); |
| 339 | void folio_mark_accessed(struct folio *); |
| 340 | |
| 341 | static inline bool folio_may_be_lru_cached(struct folio *folio) |
| 342 | { |
| 343 | /* |
| 344 | * Holding PMD-sized folios in per-CPU LRU cache unbalances accounting. |
| 345 | * Holding small numbers of low-order mTHP folios in per-CPU LRU cache |
| 346 | * will be sensible, but nobody has implemented and tested that yet. |
| 347 | */ |
| 348 | return !folio_test_large(folio); |
| 349 | } |
| 350 | |
| 351 | extern atomic_t lru_disable_count; |
| 352 | |
| 353 | static inline bool lru_cache_disabled(void) |
| 354 | { |
| 355 | return atomic_read(v: &lru_disable_count); |
| 356 | } |
| 357 | |
| 358 | static inline void lru_cache_enable(void) |
| 359 | { |
| 360 | atomic_dec(v: &lru_disable_count); |
| 361 | } |
| 362 | |
| 363 | extern void lru_cache_disable(void); |
| 364 | extern void lru_add_drain(void); |
| 365 | extern void lru_add_drain_cpu(int cpu); |
| 366 | extern void lru_add_drain_cpu_zone(struct zone *zone); |
| 367 | extern void lru_add_drain_all(void); |
| 368 | void folio_deactivate(struct folio *folio); |
| 369 | void folio_mark_lazyfree(struct folio *folio); |
| 370 | extern void swap_setup(void); |
| 371 | |
| 372 | /* linux/mm/vmscan.c */ |
| 373 | extern unsigned long zone_reclaimable_pages(struct zone *zone); |
| 374 | extern unsigned long try_to_free_pages(struct zonelist *zonelist, int order, |
| 375 | gfp_t gfp_mask, nodemask_t *mask); |
| 376 | |
| 377 | #define MEMCG_RECLAIM_MAY_SWAP (1 << 1) |
| 378 | #define MEMCG_RECLAIM_PROACTIVE (1 << 2) |
| 379 | #define MIN_SWAPPINESS 0 |
| 380 | #define MAX_SWAPPINESS 200 |
| 381 | |
| 382 | /* Just reclaim from anon folios in proactive memory reclaim */ |
| 383 | #define SWAPPINESS_ANON_ONLY (MAX_SWAPPINESS + 1) |
| 384 | |
| 385 | extern unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *memcg, |
| 386 | unsigned long nr_pages, |
| 387 | gfp_t gfp_mask, |
| 388 | unsigned int reclaim_options, |
| 389 | int *swappiness); |
| 390 | extern unsigned long mem_cgroup_shrink_node(struct mem_cgroup *mem, |
| 391 | gfp_t gfp_mask, bool noswap, |
| 392 | pg_data_t *pgdat, |
| 393 | unsigned long *nr_scanned); |
| 394 | extern unsigned long shrink_all_memory(unsigned long nr_pages); |
| 395 | extern int vm_swappiness; |
| 396 | long remove_mapping(struct address_space *mapping, struct folio *folio); |
| 397 | |
| 398 | #if defined(CONFIG_SYSFS) && defined(CONFIG_NUMA) |
| 399 | extern int reclaim_register_node(struct node *node); |
| 400 | extern void reclaim_unregister_node(struct node *node); |
| 401 | |
| 402 | #else |
| 403 | |
| 404 | static inline int reclaim_register_node(struct node *node) |
| 405 | { |
| 406 | return 0; |
| 407 | } |
| 408 | |
| 409 | static inline void reclaim_unregister_node(struct node *node) |
| 410 | { |
| 411 | } |
| 412 | #endif /* CONFIG_SYSFS && CONFIG_NUMA */ |
| 413 | |
| 414 | #ifdef CONFIG_NUMA |
| 415 | extern int sysctl_min_unmapped_ratio; |
| 416 | extern int sysctl_min_slab_ratio; |
| 417 | #endif |
| 418 | |
| 419 | void check_move_unevictable_folios(struct folio_batch *fbatch); |
| 420 | |
| 421 | extern void __meminit kswapd_run(int nid); |
| 422 | extern void __meminit kswapd_stop(int nid); |
| 423 | |
| 424 | #ifdef CONFIG_SWAP |
| 425 | |
| 426 | int add_swap_extent(struct swap_info_struct *sis, unsigned long start_page, |
| 427 | unsigned long nr_pages, sector_t start_block); |
| 428 | int generic_swapfile_activate(struct swap_info_struct *, struct file *, |
| 429 | sector_t *); |
| 430 | |
| 431 | static inline unsigned long total_swapcache_pages(void) |
| 432 | { |
| 433 | return global_node_page_state(item: NR_SWAPCACHE); |
| 434 | } |
| 435 | |
| 436 | void free_swap_cache(struct folio *folio); |
| 437 | void free_folio_and_swap_cache(struct folio *folio); |
| 438 | void free_pages_and_swap_cache(struct encoded_page **, int); |
| 439 | /* linux/mm/swapfile.c */ |
| 440 | extern atomic_long_t nr_swap_pages; |
| 441 | extern long total_swap_pages; |
| 442 | extern atomic_t nr_rotate_swap; |
| 443 | |
| 444 | /* Swap 50% full? Release swapcache more aggressively.. */ |
| 445 | static inline bool vm_swap_full(void) |
| 446 | { |
| 447 | return atomic_long_read(v: &nr_swap_pages) * 2 < total_swap_pages; |
| 448 | } |
| 449 | |
| 450 | static inline long get_nr_swap_pages(void) |
| 451 | { |
| 452 | return atomic_long_read(v: &nr_swap_pages); |
| 453 | } |
| 454 | |
| 455 | extern void si_swapinfo(struct sysinfo *); |
| 456 | int folio_alloc_swap(struct folio *folio); |
| 457 | bool folio_free_swap(struct folio *folio); |
| 458 | void put_swap_folio(struct folio *folio, swp_entry_t entry); |
| 459 | extern swp_entry_t get_swap_page_of_type(int); |
| 460 | extern int add_swap_count_continuation(swp_entry_t, gfp_t); |
| 461 | extern void swap_shmem_alloc(swp_entry_t, int); |
| 462 | extern int swap_duplicate(swp_entry_t); |
| 463 | extern int swapcache_prepare(swp_entry_t entry, int nr); |
| 464 | extern void swap_free_nr(swp_entry_t entry, int nr_pages); |
| 465 | extern void free_swap_and_cache_nr(swp_entry_t entry, int nr); |
| 466 | int swap_type_of(dev_t device, sector_t offset); |
| 467 | int find_first_swap(dev_t *device); |
| 468 | extern unsigned int count_swap_pages(int, int); |
| 469 | extern sector_t swapdev_block(int, pgoff_t); |
| 470 | extern int __swap_count(swp_entry_t entry); |
| 471 | extern bool swap_entry_swapped(struct swap_info_struct *si, swp_entry_t entry); |
| 472 | extern int swp_swapcount(swp_entry_t entry); |
| 473 | struct backing_dev_info; |
| 474 | extern struct swap_info_struct *get_swap_device(swp_entry_t entry); |
| 475 | sector_t swap_folio_sector(struct folio *folio); |
| 476 | |
| 477 | static inline void put_swap_device(struct swap_info_struct *si) |
| 478 | { |
| 479 | percpu_ref_put(ref: &si->users); |
| 480 | } |
| 481 | |
| 482 | #else /* CONFIG_SWAP */ |
| 483 | static inline struct swap_info_struct *get_swap_device(swp_entry_t entry) |
| 484 | { |
| 485 | return NULL; |
| 486 | } |
| 487 | |
| 488 | static inline void put_swap_device(struct swap_info_struct *si) |
| 489 | { |
| 490 | } |
| 491 | |
| 492 | #define get_nr_swap_pages() 0L |
| 493 | #define total_swap_pages 0L |
| 494 | #define total_swapcache_pages() 0UL |
| 495 | #define vm_swap_full() 0 |
| 496 | |
| 497 | #define si_swapinfo(val) \ |
| 498 | do { (val)->freeswap = (val)->totalswap = 0; } while (0) |
| 499 | #define free_folio_and_swap_cache(folio) \ |
| 500 | folio_put(folio) |
| 501 | #define free_pages_and_swap_cache(pages, nr) \ |
| 502 | release_pages((pages), (nr)); |
| 503 | |
| 504 | static inline void free_swap_and_cache_nr(swp_entry_t entry, int nr) |
| 505 | { |
| 506 | } |
| 507 | |
| 508 | static inline void free_swap_cache(struct folio *folio) |
| 509 | { |
| 510 | } |
| 511 | |
| 512 | static inline int add_swap_count_continuation(swp_entry_t swp, gfp_t gfp_mask) |
| 513 | { |
| 514 | return 0; |
| 515 | } |
| 516 | |
| 517 | static inline void swap_shmem_alloc(swp_entry_t swp, int nr) |
| 518 | { |
| 519 | } |
| 520 | |
| 521 | static inline int swap_duplicate(swp_entry_t swp) |
| 522 | { |
| 523 | return 0; |
| 524 | } |
| 525 | |
| 526 | static inline int swapcache_prepare(swp_entry_t swp, int nr) |
| 527 | { |
| 528 | return 0; |
| 529 | } |
| 530 | |
| 531 | static inline void swap_free_nr(swp_entry_t entry, int nr_pages) |
| 532 | { |
| 533 | } |
| 534 | |
| 535 | static inline void put_swap_folio(struct folio *folio, swp_entry_t swp) |
| 536 | { |
| 537 | } |
| 538 | |
| 539 | static inline int __swap_count(swp_entry_t entry) |
| 540 | { |
| 541 | return 0; |
| 542 | } |
| 543 | |
| 544 | static inline bool swap_entry_swapped(struct swap_info_struct *si, swp_entry_t entry) |
| 545 | { |
| 546 | return false; |
| 547 | } |
| 548 | |
| 549 | static inline int swp_swapcount(swp_entry_t entry) |
| 550 | { |
| 551 | return 0; |
| 552 | } |
| 553 | |
| 554 | static inline int folio_alloc_swap(struct folio *folio) |
| 555 | { |
| 556 | return -EINVAL; |
| 557 | } |
| 558 | |
| 559 | static inline bool folio_free_swap(struct folio *folio) |
| 560 | { |
| 561 | return false; |
| 562 | } |
| 563 | |
| 564 | static inline int add_swap_extent(struct swap_info_struct *sis, |
| 565 | unsigned long start_page, |
| 566 | unsigned long nr_pages, sector_t start_block) |
| 567 | { |
| 568 | return -EINVAL; |
| 569 | } |
| 570 | #endif /* CONFIG_SWAP */ |
| 571 | |
| 572 | static inline void free_swap_and_cache(swp_entry_t entry) |
| 573 | { |
| 574 | free_swap_and_cache_nr(entry, nr: 1); |
| 575 | } |
| 576 | |
| 577 | static inline void swap_free(swp_entry_t entry) |
| 578 | { |
| 579 | swap_free_nr(entry, nr_pages: 1); |
| 580 | } |
| 581 | |
| 582 | #ifdef CONFIG_MEMCG |
| 583 | static inline int mem_cgroup_swappiness(struct mem_cgroup *memcg) |
| 584 | { |
| 585 | /* Cgroup2 doesn't have per-cgroup swappiness */ |
| 586 | if (cgroup_subsys_on_dfl(memory_cgrp_subsys)) |
| 587 | return READ_ONCE(vm_swappiness); |
| 588 | |
| 589 | /* root ? */ |
| 590 | if (mem_cgroup_disabled() || mem_cgroup_is_root(memcg)) |
| 591 | return READ_ONCE(vm_swappiness); |
| 592 | |
| 593 | return READ_ONCE(memcg->swappiness); |
| 594 | } |
| 595 | #else |
| 596 | static inline int mem_cgroup_swappiness(struct mem_cgroup *mem) |
| 597 | { |
| 598 | return READ_ONCE(vm_swappiness); |
| 599 | } |
| 600 | #endif |
| 601 | |
| 602 | #if defined(CONFIG_SWAP) && defined(CONFIG_MEMCG) && defined(CONFIG_BLK_CGROUP) |
| 603 | void __folio_throttle_swaprate(struct folio *folio, gfp_t gfp); |
| 604 | static inline void folio_throttle_swaprate(struct folio *folio, gfp_t gfp) |
| 605 | { |
| 606 | if (mem_cgroup_disabled()) |
| 607 | return; |
| 608 | __folio_throttle_swaprate(folio, gfp); |
| 609 | } |
| 610 | #else |
| 611 | static inline void folio_throttle_swaprate(struct folio *folio, gfp_t gfp) |
| 612 | { |
| 613 | } |
| 614 | #endif |
| 615 | |
| 616 | #if defined(CONFIG_MEMCG) && defined(CONFIG_SWAP) |
| 617 | int __mem_cgroup_try_charge_swap(struct folio *folio, swp_entry_t entry); |
| 618 | static inline int mem_cgroup_try_charge_swap(struct folio *folio, |
| 619 | swp_entry_t entry) |
| 620 | { |
| 621 | if (mem_cgroup_disabled()) |
| 622 | return 0; |
| 623 | return __mem_cgroup_try_charge_swap(folio, entry); |
| 624 | } |
| 625 | |
| 626 | extern void __mem_cgroup_uncharge_swap(swp_entry_t entry, unsigned int nr_pages); |
| 627 | static inline void mem_cgroup_uncharge_swap(swp_entry_t entry, unsigned int nr_pages) |
| 628 | { |
| 629 | if (mem_cgroup_disabled()) |
| 630 | return; |
| 631 | __mem_cgroup_uncharge_swap(entry, nr_pages); |
| 632 | } |
| 633 | |
| 634 | extern long mem_cgroup_get_nr_swap_pages(struct mem_cgroup *memcg); |
| 635 | extern bool mem_cgroup_swap_full(struct folio *folio); |
| 636 | #else |
| 637 | static inline int mem_cgroup_try_charge_swap(struct folio *folio, |
| 638 | swp_entry_t entry) |
| 639 | { |
| 640 | return 0; |
| 641 | } |
| 642 | |
| 643 | static inline void mem_cgroup_uncharge_swap(swp_entry_t entry, |
| 644 | unsigned int nr_pages) |
| 645 | { |
| 646 | } |
| 647 | |
| 648 | static inline long mem_cgroup_get_nr_swap_pages(struct mem_cgroup *memcg) |
| 649 | { |
| 650 | return get_nr_swap_pages(); |
| 651 | } |
| 652 | |
| 653 | static inline bool mem_cgroup_swap_full(struct folio *folio) |
| 654 | { |
| 655 | return vm_swap_full(); |
| 656 | } |
| 657 | #endif |
| 658 | |
| 659 | #endif /* __KERNEL__*/ |
| 660 | #endif /* _LINUX_SWAP_H */ |
| 661 | |