| 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
| 2 | /* |
| 3 | * include/linux/writeback.h |
| 4 | */ |
| 5 | #ifndef WRITEBACK_H |
| 6 | #define WRITEBACK_H |
| 7 | |
| 8 | #include <linux/sched.h> |
| 9 | #include <linux/workqueue.h> |
| 10 | #include <linux/fs.h> |
| 11 | #include <linux/flex_proportions.h> |
| 12 | #include <linux/backing-dev-defs.h> |
| 13 | #include <linux/blk_types.h> |
| 14 | #include <linux/pagevec.h> |
| 15 | |
| 16 | struct bio; |
| 17 | |
| 18 | DECLARE_PER_CPU(int, dirty_throttle_leaks); |
| 19 | |
| 20 | /* |
| 21 | * The global dirty threshold is normally equal to the global dirty limit, |
| 22 | * except when the system suddenly allocates a lot of anonymous memory and |
| 23 | * knocks down the global dirty threshold quickly, in which case the global |
| 24 | * dirty limit will follow down slowly to prevent livelocking all dirtier tasks. |
| 25 | */ |
| 26 | #define DIRTY_SCOPE 8 |
| 27 | |
| 28 | struct backing_dev_info; |
| 29 | |
| 30 | /* |
| 31 | * fs/fs-writeback.c |
| 32 | */ |
| 33 | enum writeback_sync_modes { |
| 34 | WB_SYNC_NONE, /* Don't wait on anything */ |
| 35 | WB_SYNC_ALL, /* Wait on every mapping */ |
| 36 | }; |
| 37 | |
| 38 | /* |
| 39 | * A control structure which tells the writeback code what to do. These are |
| 40 | * always on the stack, and hence need no locking. They are always initialised |
| 41 | * in a manner such that unspecified fields are set to zero. |
| 42 | */ |
| 43 | struct writeback_control { |
| 44 | /* public fields that can be set and/or consumed by the caller: */ |
| 45 | long nr_to_write; /* Write this many pages, and decrement |
| 46 | this for each page written */ |
| 47 | long pages_skipped; /* Pages which were not written */ |
| 48 | |
| 49 | /* |
| 50 | * For a_ops->writepages(): if start or end are non-zero then this is |
| 51 | * a hint that the filesystem need only write out the pages inside that |
| 52 | * byterange. The byte at `end' is included in the writeout request. |
| 53 | */ |
| 54 | loff_t range_start; |
| 55 | loff_t range_end; |
| 56 | |
| 57 | enum writeback_sync_modes sync_mode; |
| 58 | |
| 59 | unsigned for_kupdate:1; /* A kupdate writeback */ |
| 60 | unsigned for_background:1; /* A background writeback */ |
| 61 | unsigned tagged_writepages:1; /* tag-and-write to avoid livelock */ |
| 62 | unsigned range_cyclic:1; /* range_start is cyclic */ |
| 63 | unsigned for_sync:1; /* sync(2) WB_SYNC_ALL writeback */ |
| 64 | unsigned unpinned_netfs_wb:1; /* Cleared I_PINNING_NETFS_WB */ |
| 65 | |
| 66 | /* |
| 67 | * When writeback IOs are bounced through async layers, only the |
| 68 | * initial synchronous phase should be accounted towards inode |
| 69 | * cgroup ownership arbitration to avoid confusion. Later stages |
| 70 | * can set the following flag to disable the accounting. |
| 71 | */ |
| 72 | unsigned no_cgroup_owner:1; |
| 73 | |
| 74 | /* internal fields used by the ->writepages implementation: */ |
| 75 | struct folio_batch fbatch; |
| 76 | pgoff_t index; |
| 77 | int saved_err; |
| 78 | |
| 79 | #ifdef CONFIG_CGROUP_WRITEBACK |
| 80 | struct bdi_writeback *wb; /* wb this writeback is issued under */ |
| 81 | struct inode *inode; /* inode being written out */ |
| 82 | |
| 83 | /* foreign inode detection, see wbc_detach_inode() */ |
| 84 | int wb_id; /* current wb id */ |
| 85 | int wb_lcand_id; /* last foreign candidate wb id */ |
| 86 | int wb_tcand_id; /* this foreign candidate wb id */ |
| 87 | size_t wb_bytes; /* bytes written by current wb */ |
| 88 | size_t wb_lcand_bytes; /* bytes written by last candidate */ |
| 89 | size_t wb_tcand_bytes; /* bytes written by this candidate */ |
| 90 | #endif |
| 91 | }; |
| 92 | |
| 93 | static inline blk_opf_t wbc_to_write_flags(struct writeback_control *wbc) |
| 94 | { |
| 95 | blk_opf_t flags = 0; |
| 96 | |
| 97 | if (wbc->sync_mode == WB_SYNC_ALL) |
| 98 | flags |= REQ_SYNC; |
| 99 | else if (wbc->for_kupdate || wbc->for_background) |
| 100 | flags |= REQ_BACKGROUND; |
| 101 | |
| 102 | return flags; |
| 103 | } |
| 104 | |
| 105 | #ifdef CONFIG_CGROUP_WRITEBACK |
| 106 | #define wbc_blkcg_css(wbc) \ |
| 107 | ((wbc)->wb ? (wbc)->wb->blkcg_css : blkcg_root_css) |
| 108 | #else |
| 109 | #define wbc_blkcg_css(wbc) (blkcg_root_css) |
| 110 | #endif /* CONFIG_CGROUP_WRITEBACK */ |
| 111 | |
| 112 | /* |
| 113 | * A wb_domain represents a domain that wb's (bdi_writeback's) belong to |
| 114 | * and are measured against each other in. There always is one global |
| 115 | * domain, global_wb_domain, that every wb in the system is a member of. |
| 116 | * This allows measuring the relative bandwidth of each wb to distribute |
| 117 | * dirtyable memory accordingly. |
| 118 | */ |
| 119 | struct wb_domain { |
| 120 | spinlock_t lock; |
| 121 | |
| 122 | /* |
| 123 | * Scale the writeback cache size proportional to the relative |
| 124 | * writeout speed. |
| 125 | * |
| 126 | * We do this by keeping a floating proportion between BDIs, based |
| 127 | * on page writeback completions [end_page_writeback()]. Those |
| 128 | * devices that write out pages fastest will get the larger share, |
| 129 | * while the slower will get a smaller share. |
| 130 | * |
| 131 | * We use page writeout completions because we are interested in |
| 132 | * getting rid of dirty pages. Having them written out is the |
| 133 | * primary goal. |
| 134 | * |
| 135 | * We introduce a concept of time, a period over which we measure |
| 136 | * these events, because demand can/will vary over time. The length |
| 137 | * of this period itself is measured in page writeback completions. |
| 138 | */ |
| 139 | struct fprop_global completions; |
| 140 | struct timer_list period_timer; /* timer for aging of completions */ |
| 141 | unsigned long period_time; |
| 142 | |
| 143 | /* |
| 144 | * The dirtyable memory and dirty threshold could be suddenly |
| 145 | * knocked down by a large amount (eg. on the startup of KVM in a |
| 146 | * swapless system). This may throw the system into deep dirty |
| 147 | * exceeded state and throttle heavy/light dirtiers alike. To |
| 148 | * retain good responsiveness, maintain global_dirty_limit for |
| 149 | * tracking slowly down to the knocked down dirty threshold. |
| 150 | * |
| 151 | * Both fields are protected by ->lock. |
| 152 | */ |
| 153 | unsigned long dirty_limit_tstamp; |
| 154 | unsigned long dirty_limit; |
| 155 | }; |
| 156 | |
| 157 | /** |
| 158 | * wb_domain_size_changed - memory available to a wb_domain has changed |
| 159 | * @dom: wb_domain of interest |
| 160 | * |
| 161 | * This function should be called when the amount of memory available to |
| 162 | * @dom has changed. It resets @dom's dirty limit parameters to prevent |
| 163 | * the past values which don't match the current configuration from skewing |
| 164 | * dirty throttling. Without this, when memory size of a wb_domain is |
| 165 | * greatly reduced, the dirty throttling logic may allow too many pages to |
| 166 | * be dirtied leading to consecutive unnecessary OOMs and may get stuck in |
| 167 | * that situation. |
| 168 | */ |
| 169 | static inline void wb_domain_size_changed(struct wb_domain *dom) |
| 170 | { |
| 171 | spin_lock(lock: &dom->lock); |
| 172 | dom->dirty_limit_tstamp = jiffies; |
| 173 | dom->dirty_limit = 0; |
| 174 | spin_unlock(lock: &dom->lock); |
| 175 | } |
| 176 | |
| 177 | /* |
| 178 | * fs/fs-writeback.c |
| 179 | */ |
| 180 | struct bdi_writeback; |
| 181 | void writeback_inodes_sb(struct super_block *, enum wb_reason reason); |
| 182 | void writeback_inodes_sb_nr(struct super_block *, unsigned long nr, |
| 183 | enum wb_reason reason); |
| 184 | void try_to_writeback_inodes_sb(struct super_block *sb, enum wb_reason reason); |
| 185 | void sync_inodes_sb(struct super_block *); |
| 186 | void wakeup_flusher_threads(enum wb_reason reason); |
| 187 | void wakeup_flusher_threads_bdi(struct backing_dev_info *bdi, |
| 188 | enum wb_reason reason); |
| 189 | void inode_wait_for_writeback(struct inode *inode); |
| 190 | void inode_io_list_del(struct inode *inode); |
| 191 | |
| 192 | static inline xa_mark_t wbc_to_tag(struct writeback_control *wbc) |
| 193 | { |
| 194 | if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages) |
| 195 | return PAGECACHE_TAG_TOWRITE; |
| 196 | return PAGECACHE_TAG_DIRTY; |
| 197 | } |
| 198 | |
| 199 | #ifdef CONFIG_CGROUP_WRITEBACK |
| 200 | |
| 201 | #include <linux/cgroup.h> |
| 202 | #include <linux/bio.h> |
| 203 | |
| 204 | void __inode_attach_wb(struct inode *inode, struct folio *folio); |
| 205 | void wbc_detach_inode(struct writeback_control *wbc); |
| 206 | void wbc_account_cgroup_owner(struct writeback_control *wbc, struct folio *folio, |
| 207 | size_t bytes); |
| 208 | int cgroup_writeback_by_id(u64 bdi_id, int memcg_id, |
| 209 | enum wb_reason reason, struct wb_completion *done); |
| 210 | void cgroup_writeback_umount(struct super_block *sb); |
| 211 | bool cleanup_offline_cgwb(struct bdi_writeback *wb); |
| 212 | |
| 213 | /** |
| 214 | * inode_attach_wb - associate an inode with its wb |
| 215 | * @inode: inode of interest |
| 216 | * @folio: folio being dirtied (may be NULL) |
| 217 | * |
| 218 | * If @inode doesn't have its wb, associate it with the wb matching the |
| 219 | * memcg of @folio or, if @folio is NULL, %current. May be called w/ or w/o |
| 220 | * @inode->i_lock. |
| 221 | */ |
| 222 | static inline void inode_attach_wb(struct inode *inode, struct folio *folio) |
| 223 | { |
| 224 | if (!inode->i_wb) |
| 225 | __inode_attach_wb(inode, folio); |
| 226 | } |
| 227 | |
| 228 | /** |
| 229 | * inode_detach_wb - disassociate an inode from its wb |
| 230 | * @inode: inode of interest |
| 231 | * |
| 232 | * @inode is being freed. Detach from its wb. |
| 233 | */ |
| 234 | static inline void inode_detach_wb(struct inode *inode) |
| 235 | { |
| 236 | if (inode->i_wb) { |
| 237 | WARN_ON_ONCE(!(inode_state_read_once(inode) & I_CLEAR)); |
| 238 | wb_put(wb: inode->i_wb); |
| 239 | inode->i_wb = NULL; |
| 240 | } |
| 241 | } |
| 242 | |
| 243 | void wbc_attach_fdatawrite_inode(struct writeback_control *wbc, |
| 244 | struct inode *inode); |
| 245 | |
| 246 | /** |
| 247 | * wbc_init_bio - writeback specific initializtion of bio |
| 248 | * @wbc: writeback_control for the writeback in progress |
| 249 | * @bio: bio to be initialized |
| 250 | * |
| 251 | * @bio is a part of the writeback in progress controlled by @wbc. Perform |
| 252 | * writeback specific initialization. This is used to apply the cgroup |
| 253 | * writeback context. Must be called after the bio has been associated with |
| 254 | * a device. |
| 255 | */ |
| 256 | static inline void wbc_init_bio(struct writeback_control *wbc, struct bio *bio) |
| 257 | { |
| 258 | /* |
| 259 | * pageout() path doesn't attach @wbc to the inode being written |
| 260 | * out. This is intentional as we don't want the function to block |
| 261 | * behind a slow cgroup. Ultimately, we want pageout() to kick off |
| 262 | * regular writeback instead of writing things out itself. |
| 263 | */ |
| 264 | if (wbc->wb) |
| 265 | bio_associate_blkg_from_css(bio, css: wbc->wb->blkcg_css); |
| 266 | } |
| 267 | |
| 268 | void inode_switch_wbs_work_fn(struct work_struct *work); |
| 269 | |
| 270 | #else /* CONFIG_CGROUP_WRITEBACK */ |
| 271 | |
| 272 | static inline void inode_attach_wb(struct inode *inode, struct folio *folio) |
| 273 | { |
| 274 | } |
| 275 | |
| 276 | static inline void inode_detach_wb(struct inode *inode) |
| 277 | { |
| 278 | } |
| 279 | |
| 280 | static inline void wbc_attach_fdatawrite_inode(struct writeback_control *wbc, |
| 281 | struct inode *inode) |
| 282 | { |
| 283 | } |
| 284 | |
| 285 | static inline void wbc_detach_inode(struct writeback_control *wbc) |
| 286 | { |
| 287 | } |
| 288 | |
| 289 | static inline void wbc_init_bio(struct writeback_control *wbc, struct bio *bio) |
| 290 | { |
| 291 | } |
| 292 | |
| 293 | static inline void wbc_account_cgroup_owner(struct writeback_control *wbc, |
| 294 | struct folio *folio, size_t bytes) |
| 295 | { |
| 296 | } |
| 297 | |
| 298 | static inline void cgroup_writeback_umount(struct super_block *sb) |
| 299 | { |
| 300 | } |
| 301 | |
| 302 | #endif /* CONFIG_CGROUP_WRITEBACK */ |
| 303 | |
| 304 | /* |
| 305 | * mm/page-writeback.c |
| 306 | */ |
| 307 | /* consolidated parameters for balance_dirty_pages() and its subroutines */ |
| 308 | struct dirty_throttle_control { |
| 309 | #ifdef CONFIG_CGROUP_WRITEBACK |
| 310 | struct wb_domain *dom; |
| 311 | struct dirty_throttle_control *gdtc; /* only set in memcg dtc's */ |
| 312 | #endif |
| 313 | struct bdi_writeback *wb; |
| 314 | struct fprop_local_percpu *wb_completions; |
| 315 | |
| 316 | unsigned long avail; /* dirtyable */ |
| 317 | unsigned long dirty; /* file_dirty + write + nfs */ |
| 318 | unsigned long thresh; /* dirty threshold */ |
| 319 | unsigned long bg_thresh; /* dirty background threshold */ |
| 320 | unsigned long limit; /* hard dirty limit */ |
| 321 | |
| 322 | unsigned long wb_dirty; /* per-wb counterparts */ |
| 323 | unsigned long wb_thresh; |
| 324 | unsigned long wb_bg_thresh; |
| 325 | |
| 326 | unsigned long pos_ratio; |
| 327 | bool freerun; |
| 328 | bool dirty_exceeded; |
| 329 | }; |
| 330 | |
| 331 | void laptop_io_completion(struct backing_dev_info *info); |
| 332 | void laptop_sync_completion(void); |
| 333 | void laptop_mode_timer_fn(struct timer_list *t); |
| 334 | bool node_dirty_ok(struct pglist_data *pgdat); |
| 335 | int wb_domain_init(struct wb_domain *dom, gfp_t gfp); |
| 336 | #ifdef CONFIG_CGROUP_WRITEBACK |
| 337 | void wb_domain_exit(struct wb_domain *dom); |
| 338 | #endif |
| 339 | |
| 340 | extern struct wb_domain global_wb_domain; |
| 341 | |
| 342 | /* These are exported to sysctl. */ |
| 343 | extern unsigned int dirty_writeback_interval; |
| 344 | extern unsigned int dirty_expire_interval; |
| 345 | extern int laptop_mode; |
| 346 | |
| 347 | void global_dirty_limits(unsigned long *pbackground, unsigned long *pdirty); |
| 348 | unsigned long wb_calc_thresh(struct bdi_writeback *wb, unsigned long thresh); |
| 349 | unsigned long cgwb_calc_thresh(struct bdi_writeback *wb); |
| 350 | |
| 351 | void wb_update_bandwidth(struct bdi_writeback *wb); |
| 352 | |
| 353 | /* Invoke balance dirty pages in async mode. */ |
| 354 | #define BDP_ASYNC 0x0001 |
| 355 | |
| 356 | void balance_dirty_pages_ratelimited(struct address_space *mapping); |
| 357 | int balance_dirty_pages_ratelimited_flags(struct address_space *mapping, |
| 358 | unsigned int flags); |
| 359 | |
| 360 | bool wb_over_bg_thresh(struct bdi_writeback *wb); |
| 361 | |
| 362 | struct folio *writeback_iter(struct address_space *mapping, |
| 363 | struct writeback_control *wbc, struct folio *folio, int *error); |
| 364 | |
| 365 | int do_writepages(struct address_space *mapping, struct writeback_control *wbc); |
| 366 | void writeback_set_ratelimit(void); |
| 367 | void tag_pages_for_writeback(struct address_space *mapping, |
| 368 | pgoff_t start, pgoff_t end); |
| 369 | |
| 370 | bool filemap_dirty_folio(struct address_space *mapping, struct folio *folio); |
| 371 | bool folio_redirty_for_writepage(struct writeback_control *, struct folio *); |
| 372 | bool redirty_page_for_writepage(struct writeback_control *, struct page *); |
| 373 | |
| 374 | void sb_mark_inode_writeback(struct inode *inode); |
| 375 | void sb_clear_inode_writeback(struct inode *inode); |
| 376 | |
| 377 | /* |
| 378 | * 4MB minimal write chunk size |
| 379 | */ |
| 380 | #define MIN_WRITEBACK_PAGES (4096UL >> (PAGE_SHIFT - 10)) |
| 381 | |
| 382 | #endif /* WRITEBACK_H */ |
| 383 | |