| 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
| 2 | #ifndef _LINUX_HIGHMEM_H |
| 3 | #define _LINUX_HIGHMEM_H |
| 4 | |
| 5 | #include <linux/fs.h> |
| 6 | #include <linux/kernel.h> |
| 7 | #include <linux/bug.h> |
| 8 | #include <linux/cacheflush.h> |
| 9 | #include <linux/kmsan.h> |
| 10 | #include <linux/mm.h> |
| 11 | #include <linux/uaccess.h> |
| 12 | #include <linux/hardirq.h> |
| 13 | |
| 14 | #include "highmem-internal.h" |
| 15 | |
| 16 | /** |
| 17 | * kmap - Map a page for long term usage |
| 18 | * @page: Pointer to the page to be mapped |
| 19 | * |
| 20 | * Returns: The virtual address of the mapping |
| 21 | * |
| 22 | * Can only be invoked from preemptible task context because on 32bit |
| 23 | * systems with CONFIG_HIGHMEM enabled this function might sleep. |
| 24 | * |
| 25 | * For systems with CONFIG_HIGHMEM=n and for pages in the low memory area |
| 26 | * this returns the virtual address of the direct kernel mapping. |
| 27 | * |
| 28 | * The returned virtual address is globally visible and valid up to the |
| 29 | * point where it is unmapped via kunmap(). The pointer can be handed to |
| 30 | * other contexts. |
| 31 | * |
| 32 | * For highmem pages on 32bit systems this can be slow as the mapping space |
| 33 | * is limited and protected by a global lock. In case that there is no |
| 34 | * mapping slot available the function blocks until a slot is released via |
| 35 | * kunmap(). |
| 36 | */ |
| 37 | static inline void *kmap(struct page *page); |
| 38 | |
| 39 | /** |
| 40 | * kunmap - Unmap the virtual address mapped by kmap() |
| 41 | * @page: Pointer to the page which was mapped by kmap() |
| 42 | * |
| 43 | * Counterpart to kmap(). A NOOP for CONFIG_HIGHMEM=n and for mappings of |
| 44 | * pages in the low memory area. |
| 45 | */ |
| 46 | static inline void kunmap(const struct page *page); |
| 47 | |
| 48 | /** |
| 49 | * kmap_to_page - Get the page for a kmap'ed address |
| 50 | * @addr: The address to look up |
| 51 | * |
| 52 | * Returns: The page which is mapped to @addr. |
| 53 | */ |
| 54 | static inline struct page *kmap_to_page(void *addr); |
| 55 | |
| 56 | /** |
| 57 | * kmap_flush_unused - Flush all unused kmap mappings in order to |
| 58 | * remove stray mappings |
| 59 | */ |
| 60 | static inline void kmap_flush_unused(void); |
| 61 | |
| 62 | /** |
| 63 | * kmap_local_page - Map a page for temporary usage |
| 64 | * @page: Pointer to the page to be mapped |
| 65 | * |
| 66 | * Returns: The virtual address of the mapping |
| 67 | * |
| 68 | * Can be invoked from any context, including interrupts. |
| 69 | * |
| 70 | * Requires careful handling when nesting multiple mappings because the map |
| 71 | * management is stack based. The unmap has to be in the reverse order of |
| 72 | * the map operation: |
| 73 | * |
| 74 | * addr1 = kmap_local_page(page1); |
| 75 | * addr2 = kmap_local_page(page2); |
| 76 | * ... |
| 77 | * kunmap_local(addr2); |
| 78 | * kunmap_local(addr1); |
| 79 | * |
| 80 | * Unmapping addr1 before addr2 is invalid and causes malfunction. |
| 81 | * |
| 82 | * Contrary to kmap() mappings the mapping is only valid in the context of |
| 83 | * the caller and cannot be handed to other contexts. |
| 84 | * |
| 85 | * On CONFIG_HIGHMEM=n kernels and for low memory pages this returns the |
| 86 | * virtual address of the direct mapping. Only real highmem pages are |
| 87 | * temporarily mapped. |
| 88 | * |
| 89 | * While kmap_local_page() is significantly faster than kmap() for the highmem |
| 90 | * case it comes with restrictions about the pointer validity. |
| 91 | * |
| 92 | * On HIGHMEM enabled systems mapping a highmem page has the side effect of |
| 93 | * disabling migration in order to keep the virtual address stable across |
| 94 | * preemption. No caller of kmap_local_page() can rely on this side effect. |
| 95 | */ |
| 96 | static inline void *kmap_local_page(const struct page *page); |
| 97 | |
| 98 | /** |
| 99 | * kmap_local_folio - Map a page in this folio for temporary usage |
| 100 | * @folio: The folio containing the page. |
| 101 | * @offset: The byte offset within the folio which identifies the page. |
| 102 | * |
| 103 | * Requires careful handling when nesting multiple mappings because the map |
| 104 | * management is stack based. The unmap has to be in the reverse order of |
| 105 | * the map operation:: |
| 106 | * |
| 107 | * addr1 = kmap_local_folio(folio1, offset1); |
| 108 | * addr2 = kmap_local_folio(folio2, offset2); |
| 109 | * ... |
| 110 | * kunmap_local(addr2); |
| 111 | * kunmap_local(addr1); |
| 112 | * |
| 113 | * Unmapping addr1 before addr2 is invalid and causes malfunction. |
| 114 | * |
| 115 | * Contrary to kmap() mappings the mapping is only valid in the context of |
| 116 | * the caller and cannot be handed to other contexts. |
| 117 | * |
| 118 | * On CONFIG_HIGHMEM=n kernels and for low memory pages this returns the |
| 119 | * virtual address of the direct mapping. Only real highmem pages are |
| 120 | * temporarily mapped. |
| 121 | * |
| 122 | * While it is significantly faster than kmap() for the highmem case it |
| 123 | * comes with restrictions about the pointer validity. |
| 124 | * |
| 125 | * On HIGHMEM enabled systems mapping a highmem page has the side effect of |
| 126 | * disabling migration in order to keep the virtual address stable across |
| 127 | * preemption. No caller of kmap_local_folio() can rely on this side effect. |
| 128 | * |
| 129 | * Context: Can be invoked from any context. |
| 130 | * Return: The virtual address of @offset. |
| 131 | */ |
| 132 | static inline void *kmap_local_folio(const struct folio *folio, size_t offset); |
| 133 | |
| 134 | /** |
| 135 | * kmap_atomic - Atomically map a page for temporary usage - Deprecated! |
| 136 | * @page: Pointer to the page to be mapped |
| 137 | * |
| 138 | * Returns: The virtual address of the mapping |
| 139 | * |
| 140 | * In fact a wrapper around kmap_local_page() which also disables pagefaults |
| 141 | * and, depending on PREEMPT_RT configuration, also CPU migration and |
| 142 | * preemption. Therefore users should not count on the latter two side effects. |
| 143 | * |
| 144 | * Mappings should always be released by kunmap_atomic(). |
| 145 | * |
| 146 | * Do not use in new code. Use kmap_local_page() instead. |
| 147 | * |
| 148 | * It is used in atomic context when code wants to access the contents of a |
| 149 | * page that might be allocated from high memory (see __GFP_HIGHMEM), for |
| 150 | * example a page in the pagecache. The API has two functions, and they |
| 151 | * can be used in a manner similar to the following:: |
| 152 | * |
| 153 | * // Find the page of interest. |
| 154 | * struct page *page = find_get_page(mapping, offset); |
| 155 | * |
| 156 | * // Gain access to the contents of that page. |
| 157 | * void *vaddr = kmap_atomic(page); |
| 158 | * |
| 159 | * // Do something to the contents of that page. |
| 160 | * memset(vaddr, 0, PAGE_SIZE); |
| 161 | * |
| 162 | * // Unmap that page. |
| 163 | * kunmap_atomic(vaddr); |
| 164 | * |
| 165 | * Note that the kunmap_atomic() call takes the result of the kmap_atomic() |
| 166 | * call, not the argument. |
| 167 | * |
| 168 | * If you need to map two pages because you want to copy from one page to |
| 169 | * another you need to keep the kmap_atomic calls strictly nested, like: |
| 170 | * |
| 171 | * vaddr1 = kmap_atomic(page1); |
| 172 | * vaddr2 = kmap_atomic(page2); |
| 173 | * |
| 174 | * memcpy(vaddr1, vaddr2, PAGE_SIZE); |
| 175 | * |
| 176 | * kunmap_atomic(vaddr2); |
| 177 | * kunmap_atomic(vaddr1); |
| 178 | */ |
| 179 | static inline void *kmap_atomic(const struct page *page); |
| 180 | |
| 181 | /* Highmem related interfaces for management code */ |
| 182 | static inline unsigned long nr_free_highpages(void); |
| 183 | static inline unsigned long totalhigh_pages(void); |
| 184 | |
| 185 | #ifndef ARCH_HAS_FLUSH_ANON_PAGE |
| 186 | static inline void flush_anon_page(struct vm_area_struct *vma, struct page *page, unsigned long vmaddr) |
| 187 | { |
| 188 | } |
| 189 | #endif |
| 190 | |
| 191 | #ifndef ARCH_IMPLEMENTS_FLUSH_KERNEL_VMAP_RANGE |
| 192 | static inline void flush_kernel_vmap_range(void *vaddr, int size) |
| 193 | { |
| 194 | } |
| 195 | static inline void invalidate_kernel_vmap_range(void *vaddr, int size) |
| 196 | { |
| 197 | } |
| 198 | #endif |
| 199 | |
| 200 | /* when CONFIG_HIGHMEM is not set these will be plain clear/copy_page */ |
| 201 | #ifndef clear_user_highpage |
| 202 | static inline void clear_user_highpage(struct page *page, unsigned long vaddr) |
| 203 | { |
| 204 | void *addr = kmap_local_page(page); |
| 205 | clear_user_page(page: addr, vaddr, pg: page); |
| 206 | kunmap_local(addr); |
| 207 | } |
| 208 | #endif |
| 209 | |
| 210 | #ifndef vma_alloc_zeroed_movable_folio |
| 211 | /** |
| 212 | * vma_alloc_zeroed_movable_folio - Allocate a zeroed page for a VMA. |
| 213 | * @vma: The VMA the page is to be allocated for. |
| 214 | * @vaddr: The virtual address the page will be inserted into. |
| 215 | * |
| 216 | * This function will allocate a page suitable for inserting into this |
| 217 | * VMA at this virtual address. It may be allocated from highmem or |
| 218 | * the movable zone. An architecture may provide its own implementation. |
| 219 | * |
| 220 | * Return: A folio containing one allocated and zeroed page or NULL if |
| 221 | * we are out of memory. |
| 222 | */ |
| 223 | static inline |
| 224 | struct folio *vma_alloc_zeroed_movable_folio(struct vm_area_struct *vma, |
| 225 | unsigned long vaddr) |
| 226 | { |
| 227 | struct folio *folio; |
| 228 | |
| 229 | folio = vma_alloc_folio(GFP_HIGHUSER_MOVABLE, 0, vma, vaddr); |
| 230 | if (folio && user_alloc_needs_zeroing()) |
| 231 | clear_user_highpage(&folio->page, vaddr); |
| 232 | |
| 233 | return folio; |
| 234 | } |
| 235 | #endif |
| 236 | |
| 237 | static inline void clear_highpage(struct page *page) |
| 238 | { |
| 239 | void *kaddr = kmap_local_page(page); |
| 240 | clear_page(page: kaddr); |
| 241 | kunmap_local(kaddr); |
| 242 | } |
| 243 | |
| 244 | static inline void clear_highpage_kasan_tagged(struct page *page) |
| 245 | { |
| 246 | void *kaddr = kmap_local_page(page); |
| 247 | |
| 248 | clear_page(page: kasan_reset_tag(addr: kaddr)); |
| 249 | kunmap_local(kaddr); |
| 250 | } |
| 251 | |
| 252 | #ifndef __HAVE_ARCH_TAG_CLEAR_HIGHPAGES |
| 253 | |
| 254 | /* Return false to let people know we did not initialize the pages */ |
| 255 | static inline bool tag_clear_highpages(struct page *page, int numpages) |
| 256 | { |
| 257 | return false; |
| 258 | } |
| 259 | |
| 260 | #endif |
| 261 | |
| 262 | /* |
| 263 | * If we pass in a base or tail page, we can zero up to PAGE_SIZE. |
| 264 | * If we pass in a head page, we can zero up to the size of the compound page. |
| 265 | */ |
| 266 | #ifdef CONFIG_HIGHMEM |
| 267 | void zero_user_segments(struct page *page, unsigned start1, unsigned end1, |
| 268 | unsigned start2, unsigned end2); |
| 269 | #else |
| 270 | static inline void zero_user_segments(struct page *page, |
| 271 | unsigned start1, unsigned end1, |
| 272 | unsigned start2, unsigned end2) |
| 273 | { |
| 274 | void *kaddr = kmap_local_page(page); |
| 275 | unsigned int i; |
| 276 | |
| 277 | BUG_ON(end1 > page_size(page) || end2 > page_size(page)); |
| 278 | |
| 279 | if (end1 > start1) |
| 280 | memset(kaddr + start1, 0, end1 - start1); |
| 281 | |
| 282 | if (end2 > start2) |
| 283 | memset(kaddr + start2, 0, end2 - start2); |
| 284 | |
| 285 | kunmap_local(kaddr); |
| 286 | for (i = 0; i < compound_nr(page); i++) |
| 287 | flush_dcache_page(page: page + i); |
| 288 | } |
| 289 | #endif |
| 290 | |
| 291 | static inline void zero_user_segment(struct page *page, |
| 292 | unsigned start, unsigned end) |
| 293 | { |
| 294 | zero_user_segments(page, start1: start, end1: end, start2: 0, end2: 0); |
| 295 | } |
| 296 | |
| 297 | #ifndef __HAVE_ARCH_COPY_USER_HIGHPAGE |
| 298 | |
| 299 | static inline void copy_user_highpage(struct page *to, struct page *from, |
| 300 | unsigned long vaddr, struct vm_area_struct *vma) |
| 301 | { |
| 302 | char *vfrom, *vto; |
| 303 | |
| 304 | vfrom = kmap_local_page(page: from); |
| 305 | vto = kmap_local_page(page: to); |
| 306 | copy_user_page(to: vto, from: vfrom, vaddr, topage: to); |
| 307 | kmsan_unpoison_memory(page_address(to), PAGE_SIZE); |
| 308 | kunmap_local(vto); |
| 309 | kunmap_local(vfrom); |
| 310 | } |
| 311 | |
| 312 | #endif |
| 313 | |
| 314 | #ifndef __HAVE_ARCH_COPY_HIGHPAGE |
| 315 | |
| 316 | static inline void copy_highpage(struct page *to, struct page *from) |
| 317 | { |
| 318 | char *vfrom, *vto; |
| 319 | |
| 320 | vfrom = kmap_local_page(page: from); |
| 321 | vto = kmap_local_page(page: to); |
| 322 | copy_page(to: vto, from: vfrom); |
| 323 | kmsan_copy_page_meta(dst: to, src: from); |
| 324 | kunmap_local(vto); |
| 325 | kunmap_local(vfrom); |
| 326 | } |
| 327 | |
| 328 | #endif |
| 329 | |
| 330 | #ifdef copy_mc_to_kernel |
| 331 | /* |
| 332 | * If architecture supports machine check exception handling, define the |
| 333 | * #MC versions of copy_user_highpage and copy_highpage. They copy a memory |
| 334 | * page with #MC in source page (@from) handled, and return the number |
| 335 | * of bytes not copied if there was a #MC, otherwise 0 for success. |
| 336 | */ |
| 337 | static inline int copy_mc_user_highpage(struct page *to, struct page *from, |
| 338 | unsigned long vaddr, struct vm_area_struct *vma) |
| 339 | { |
| 340 | unsigned long ret; |
| 341 | char *vfrom, *vto; |
| 342 | |
| 343 | vfrom = kmap_local_page(page: from); |
| 344 | vto = kmap_local_page(page: to); |
| 345 | ret = copy_mc_to_kernel(to: vto, from: vfrom, PAGE_SIZE); |
| 346 | if (!ret) |
| 347 | kmsan_unpoison_memory(page_address(to), PAGE_SIZE); |
| 348 | kunmap_local(vto); |
| 349 | kunmap_local(vfrom); |
| 350 | |
| 351 | if (ret) |
| 352 | memory_failure_queue(page_to_pfn(from), flags: 0); |
| 353 | |
| 354 | return ret; |
| 355 | } |
| 356 | |
| 357 | static inline int copy_mc_highpage(struct page *to, struct page *from) |
| 358 | { |
| 359 | unsigned long ret; |
| 360 | char *vfrom, *vto; |
| 361 | |
| 362 | vfrom = kmap_local_page(page: from); |
| 363 | vto = kmap_local_page(page: to); |
| 364 | ret = copy_mc_to_kernel(to: vto, from: vfrom, PAGE_SIZE); |
| 365 | if (!ret) |
| 366 | kmsan_copy_page_meta(dst: to, src: from); |
| 367 | kunmap_local(vto); |
| 368 | kunmap_local(vfrom); |
| 369 | |
| 370 | if (ret) |
| 371 | memory_failure_queue(page_to_pfn(from), flags: 0); |
| 372 | |
| 373 | return ret; |
| 374 | } |
| 375 | #else |
| 376 | static inline int copy_mc_user_highpage(struct page *to, struct page *from, |
| 377 | unsigned long vaddr, struct vm_area_struct *vma) |
| 378 | { |
| 379 | copy_user_highpage(to, from, vaddr, vma); |
| 380 | return 0; |
| 381 | } |
| 382 | |
| 383 | static inline int copy_mc_highpage(struct page *to, struct page *from) |
| 384 | { |
| 385 | copy_highpage(to, from); |
| 386 | return 0; |
| 387 | } |
| 388 | #endif |
| 389 | |
| 390 | static inline void memcpy_page(struct page *dst_page, size_t dst_off, |
| 391 | struct page *src_page, size_t src_off, |
| 392 | size_t len) |
| 393 | { |
| 394 | char *dst = kmap_local_page(page: dst_page); |
| 395 | char *src = kmap_local_page(page: src_page); |
| 396 | |
| 397 | VM_BUG_ON(dst_off + len > PAGE_SIZE || src_off + len > PAGE_SIZE); |
| 398 | memcpy(dst + dst_off, src + src_off, len); |
| 399 | kunmap_local(src); |
| 400 | kunmap_local(dst); |
| 401 | } |
| 402 | |
| 403 | static inline void memcpy_folio(struct folio *dst_folio, size_t dst_off, |
| 404 | struct folio *src_folio, size_t src_off, size_t len) |
| 405 | { |
| 406 | VM_BUG_ON(dst_off + len > folio_size(dst_folio)); |
| 407 | VM_BUG_ON(src_off + len > folio_size(src_folio)); |
| 408 | |
| 409 | do { |
| 410 | char *dst = kmap_local_folio(folio: dst_folio, offset: dst_off); |
| 411 | const char *src = kmap_local_folio(folio: src_folio, offset: src_off); |
| 412 | size_t chunk = len; |
| 413 | |
| 414 | if (folio_test_highmem(folio: dst_folio) && |
| 415 | chunk > PAGE_SIZE - offset_in_page(dst_off)) |
| 416 | chunk = PAGE_SIZE - offset_in_page(dst_off); |
| 417 | if (folio_test_highmem(folio: src_folio) && |
| 418 | chunk > PAGE_SIZE - offset_in_page(src_off)) |
| 419 | chunk = PAGE_SIZE - offset_in_page(src_off); |
| 420 | memcpy(dst, src, chunk); |
| 421 | kunmap_local(src); |
| 422 | kunmap_local(dst); |
| 423 | |
| 424 | dst_off += chunk; |
| 425 | src_off += chunk; |
| 426 | len -= chunk; |
| 427 | } while (len > 0); |
| 428 | } |
| 429 | |
| 430 | static inline void memset_page(struct page *page, size_t offset, int val, |
| 431 | size_t len) |
| 432 | { |
| 433 | char *addr = kmap_local_page(page); |
| 434 | |
| 435 | VM_BUG_ON(offset + len > PAGE_SIZE); |
| 436 | memset(addr + offset, val, len); |
| 437 | kunmap_local(addr); |
| 438 | } |
| 439 | |
| 440 | static inline void memcpy_from_page(char *to, struct page *page, |
| 441 | size_t offset, size_t len) |
| 442 | { |
| 443 | char *from = kmap_local_page(page); |
| 444 | |
| 445 | VM_BUG_ON(offset + len > PAGE_SIZE); |
| 446 | memcpy(to, from + offset, len); |
| 447 | kunmap_local(from); |
| 448 | } |
| 449 | |
| 450 | static inline void memcpy_to_page(struct page *page, size_t offset, |
| 451 | const char *from, size_t len) |
| 452 | { |
| 453 | char *to = kmap_local_page(page); |
| 454 | |
| 455 | VM_BUG_ON(offset + len > PAGE_SIZE); |
| 456 | memcpy(to + offset, from, len); |
| 457 | flush_dcache_page(page); |
| 458 | kunmap_local(to); |
| 459 | } |
| 460 | |
| 461 | static inline void memzero_page(struct page *page, size_t offset, size_t len) |
| 462 | { |
| 463 | char *addr = kmap_local_page(page); |
| 464 | |
| 465 | VM_BUG_ON(offset + len > PAGE_SIZE); |
| 466 | memset(addr + offset, 0, len); |
| 467 | flush_dcache_page(page); |
| 468 | kunmap_local(addr); |
| 469 | } |
| 470 | |
| 471 | /** |
| 472 | * memcpy_from_folio - Copy a range of bytes from a folio. |
| 473 | * @to: The memory to copy to. |
| 474 | * @folio: The folio to read from. |
| 475 | * @offset: The first byte in the folio to read. |
| 476 | * @len: The number of bytes to copy. |
| 477 | */ |
| 478 | static inline void memcpy_from_folio(char *to, struct folio *folio, |
| 479 | size_t offset, size_t len) |
| 480 | { |
| 481 | VM_BUG_ON(offset + len > folio_size(folio)); |
| 482 | |
| 483 | do { |
| 484 | const char *from = kmap_local_folio(folio, offset); |
| 485 | size_t chunk = len; |
| 486 | |
| 487 | if (folio_test_partial_kmap(folio) && |
| 488 | chunk > PAGE_SIZE - offset_in_page(offset)) |
| 489 | chunk = PAGE_SIZE - offset_in_page(offset); |
| 490 | memcpy(to, from, chunk); |
| 491 | kunmap_local(from); |
| 492 | |
| 493 | to += chunk; |
| 494 | offset += chunk; |
| 495 | len -= chunk; |
| 496 | } while (len > 0); |
| 497 | } |
| 498 | |
| 499 | /** |
| 500 | * memcpy_to_folio - Copy a range of bytes to a folio. |
| 501 | * @folio: The folio to write to. |
| 502 | * @offset: The first byte in the folio to store to. |
| 503 | * @from: The memory to copy from. |
| 504 | * @len: The number of bytes to copy. |
| 505 | */ |
| 506 | static inline void memcpy_to_folio(struct folio *folio, size_t offset, |
| 507 | const char *from, size_t len) |
| 508 | { |
| 509 | VM_BUG_ON(offset + len > folio_size(folio)); |
| 510 | |
| 511 | do { |
| 512 | char *to = kmap_local_folio(folio, offset); |
| 513 | size_t chunk = len; |
| 514 | |
| 515 | if (folio_test_partial_kmap(folio) && |
| 516 | chunk > PAGE_SIZE - offset_in_page(offset)) |
| 517 | chunk = PAGE_SIZE - offset_in_page(offset); |
| 518 | memcpy(to, from, chunk); |
| 519 | kunmap_local(to); |
| 520 | |
| 521 | from += chunk; |
| 522 | offset += chunk; |
| 523 | len -= chunk; |
| 524 | } while (len > 0); |
| 525 | |
| 526 | flush_dcache_folio(folio); |
| 527 | } |
| 528 | |
| 529 | /** |
| 530 | * folio_zero_tail - Zero the tail of a folio. |
| 531 | * @folio: The folio to zero. |
| 532 | * @offset: The byte offset in the folio to start zeroing at. |
| 533 | * @kaddr: The address the folio is currently mapped to. |
| 534 | * |
| 535 | * If you have already used kmap_local_folio() to map a folio, written |
| 536 | * some data to it and now need to zero the end of the folio (and flush |
| 537 | * the dcache), you can use this function. If you do not have the |
| 538 | * folio kmapped (eg the folio has been partially populated by DMA), |
| 539 | * use folio_zero_range() or folio_zero_segment() instead. |
| 540 | * |
| 541 | * Return: An address which can be passed to kunmap_local(). |
| 542 | */ |
| 543 | static inline __must_check void *folio_zero_tail(struct folio *folio, |
| 544 | size_t offset, void *kaddr) |
| 545 | { |
| 546 | size_t len = folio_size(folio) - offset; |
| 547 | |
| 548 | if (folio_test_partial_kmap(folio)) { |
| 549 | size_t max = PAGE_SIZE - offset_in_page(offset); |
| 550 | |
| 551 | while (len > max) { |
| 552 | memset(kaddr, 0, max); |
| 553 | kunmap_local(kaddr); |
| 554 | len -= max; |
| 555 | offset += max; |
| 556 | max = PAGE_SIZE; |
| 557 | kaddr = kmap_local_folio(folio, offset); |
| 558 | } |
| 559 | } |
| 560 | |
| 561 | memset(kaddr, 0, len); |
| 562 | flush_dcache_folio(folio); |
| 563 | |
| 564 | return kaddr; |
| 565 | } |
| 566 | |
| 567 | /** |
| 568 | * folio_fill_tail - Copy some data to a folio and pad with zeroes. |
| 569 | * @folio: The destination folio. |
| 570 | * @offset: The offset into @folio at which to start copying. |
| 571 | * @from: The data to copy. |
| 572 | * @len: How many bytes of data to copy. |
| 573 | * |
| 574 | * This function is most useful for filesystems which support inline data. |
| 575 | * When they want to copy data from the inode into the page cache, this |
| 576 | * function does everything for them. It supports large folios even on |
| 577 | * HIGHMEM configurations. |
| 578 | */ |
| 579 | static inline void folio_fill_tail(struct folio *folio, size_t offset, |
| 580 | const char *from, size_t len) |
| 581 | { |
| 582 | char *to = kmap_local_folio(folio, offset); |
| 583 | |
| 584 | VM_BUG_ON(offset + len > folio_size(folio)); |
| 585 | |
| 586 | if (folio_test_partial_kmap(folio)) { |
| 587 | size_t max = PAGE_SIZE - offset_in_page(offset); |
| 588 | |
| 589 | while (len > max) { |
| 590 | memcpy(to, from, max); |
| 591 | kunmap_local(to); |
| 592 | len -= max; |
| 593 | from += max; |
| 594 | offset += max; |
| 595 | max = PAGE_SIZE; |
| 596 | to = kmap_local_folio(folio, offset); |
| 597 | } |
| 598 | } |
| 599 | |
| 600 | memcpy(to, from, len); |
| 601 | to = folio_zero_tail(folio, offset: offset + len, kaddr: to + len); |
| 602 | kunmap_local(to); |
| 603 | } |
| 604 | |
| 605 | /** |
| 606 | * memcpy_from_file_folio - Copy some bytes from a file folio. |
| 607 | * @to: The destination buffer. |
| 608 | * @folio: The folio to copy from. |
| 609 | * @pos: The position in the file. |
| 610 | * @len: The maximum number of bytes to copy. |
| 611 | * |
| 612 | * Copy up to @len bytes from this folio. This may be limited by PAGE_SIZE |
| 613 | * if the folio comes from HIGHMEM, and by the size of the folio. |
| 614 | * |
| 615 | * Return: The number of bytes copied from the folio. |
| 616 | */ |
| 617 | static inline size_t memcpy_from_file_folio(char *to, struct folio *folio, |
| 618 | loff_t pos, size_t len) |
| 619 | { |
| 620 | size_t offset = offset_in_folio(folio, pos); |
| 621 | char *from = kmap_local_folio(folio, offset); |
| 622 | |
| 623 | if (folio_test_partial_kmap(folio)) { |
| 624 | offset = offset_in_page(offset); |
| 625 | len = min_t(size_t, len, PAGE_SIZE - offset); |
| 626 | } else |
| 627 | len = min(len, folio_size(folio) - offset); |
| 628 | |
| 629 | memcpy(to, from, len); |
| 630 | kunmap_local(from); |
| 631 | |
| 632 | return len; |
| 633 | } |
| 634 | |
| 635 | /** |
| 636 | * folio_zero_segments() - Zero two byte ranges in a folio. |
| 637 | * @folio: The folio to write to. |
| 638 | * @start1: The first byte to zero. |
| 639 | * @xend1: One more than the last byte in the first range. |
| 640 | * @start2: The first byte to zero in the second range. |
| 641 | * @xend2: One more than the last byte in the second range. |
| 642 | */ |
| 643 | static inline void folio_zero_segments(struct folio *folio, |
| 644 | size_t start1, size_t xend1, size_t start2, size_t xend2) |
| 645 | { |
| 646 | zero_user_segments(page: &folio->page, start1, end1: xend1, start2, end2: xend2); |
| 647 | } |
| 648 | |
| 649 | /** |
| 650 | * folio_zero_segment() - Zero a byte range in a folio. |
| 651 | * @folio: The folio to write to. |
| 652 | * @start: The first byte to zero. |
| 653 | * @xend: One more than the last byte to zero. |
| 654 | */ |
| 655 | static inline void folio_zero_segment(struct folio *folio, |
| 656 | size_t start, size_t xend) |
| 657 | { |
| 658 | zero_user_segments(page: &folio->page, start1: start, end1: xend, start2: 0, end2: 0); |
| 659 | } |
| 660 | |
| 661 | /** |
| 662 | * folio_zero_range() - Zero a byte range in a folio. |
| 663 | * @folio: The folio to write to. |
| 664 | * @start: The first byte to zero. |
| 665 | * @length: The number of bytes to zero. |
| 666 | */ |
| 667 | static inline void folio_zero_range(struct folio *folio, |
| 668 | size_t start, size_t length) |
| 669 | { |
| 670 | zero_user_segments(page: &folio->page, start1: start, end1: start + length, start2: 0, end2: 0); |
| 671 | } |
| 672 | |
| 673 | /** |
| 674 | * folio_release_kmap - Unmap a folio and drop a refcount. |
| 675 | * @folio: The folio to release. |
| 676 | * @addr: The address previously returned by a call to kmap_local_folio(). |
| 677 | * |
| 678 | * It is common, eg in directory handling to kmap a folio. This function |
| 679 | * unmaps the folio and drops the refcount that was being held to keep the |
| 680 | * folio alive while we accessed it. |
| 681 | */ |
| 682 | static inline void folio_release_kmap(struct folio *folio, void *addr) |
| 683 | { |
| 684 | kunmap_local(addr); |
| 685 | folio_put(folio); |
| 686 | } |
| 687 | #endif /* _LINUX_HIGHMEM_H */ |
| 688 | |