| 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
| 2 | #ifndef _LINUX_MEMREMAP_H_ |
| 3 | #define _LINUX_MEMREMAP_H_ |
| 4 | |
| 5 | #include <linux/mmzone.h> |
| 6 | #include <linux/range.h> |
| 7 | #include <linux/ioport.h> |
| 8 | #include <linux/percpu-refcount.h> |
| 9 | |
| 10 | struct resource; |
| 11 | struct device; |
| 12 | |
| 13 | /** |
| 14 | * struct vmem_altmap - pre-allocated storage for vmemmap_populate |
| 15 | * @base_pfn: base of the entire dev_pagemap mapping |
| 16 | * @reserve: pages mapped, but reserved for driver use (relative to @base) |
| 17 | * @free: free pages set aside in the mapping for memmap storage |
| 18 | * @align: pages reserved to meet allocation alignments |
| 19 | * @alloc: track pages consumed, private to vmemmap_populate() |
| 20 | */ |
| 21 | struct vmem_altmap { |
| 22 | unsigned long base_pfn; |
| 23 | const unsigned long end_pfn; |
| 24 | const unsigned long reserve; |
| 25 | unsigned long free; |
| 26 | unsigned long align; |
| 27 | unsigned long alloc; |
| 28 | }; |
| 29 | |
| 30 | /* |
| 31 | * Specialize ZONE_DEVICE memory into multiple types each has a different |
| 32 | * usage. |
| 33 | * |
| 34 | * MEMORY_DEVICE_PRIVATE: |
| 35 | * Device memory that is not directly addressable by the CPU: CPU can neither |
| 36 | * read nor write private memory. In this case, we do still have struct pages |
| 37 | * backing the device memory. Doing so simplifies the implementation, but it is |
| 38 | * important to remember that there are certain points at which the struct page |
| 39 | * must be treated as an opaque object, rather than a "normal" struct page. |
| 40 | * |
| 41 | * A more complete discussion of unaddressable memory may be found in |
| 42 | * include/linux/hmm.h and Documentation/mm/hmm.rst. |
| 43 | * |
| 44 | * MEMORY_DEVICE_COHERENT: |
| 45 | * Device memory that is cache coherent from device and CPU point of view. This |
| 46 | * is used on platforms that have an advanced system bus (like CAPI or CXL). A |
| 47 | * driver can hotplug the device memory using ZONE_DEVICE and with that memory |
| 48 | * type. Any page of a process can be migrated to such memory. However no one |
| 49 | * should be allowed to pin such memory so that it can always be evicted. |
| 50 | * |
| 51 | * MEMORY_DEVICE_FS_DAX: |
| 52 | * Host memory that has similar access semantics as System RAM i.e. DMA |
| 53 | * coherent and supports page pinning. In support of coordinating page |
| 54 | * pinning vs other operations MEMORY_DEVICE_FS_DAX arranges for a |
| 55 | * wakeup event whenever a page is unpinned and becomes idle. This |
| 56 | * wakeup is used to coordinate physical address space management (ex: |
| 57 | * fs truncate/hole punch) vs pinned pages (ex: device dma). |
| 58 | * |
| 59 | * MEMORY_DEVICE_GENERIC: |
| 60 | * Host memory that has similar access semantics as System RAM i.e. DMA |
| 61 | * coherent and supports page pinning. This is for example used by DAX devices |
| 62 | * that expose memory using a character device. |
| 63 | * |
| 64 | * MEMORY_DEVICE_PCI_P2PDMA: |
| 65 | * Device memory residing in a PCI BAR intended for use with Peer-to-Peer |
| 66 | * transactions. |
| 67 | */ |
| 68 | enum memory_type { |
| 69 | /* 0 is reserved to catch uninitialized type fields */ |
| 70 | MEMORY_DEVICE_PRIVATE = 1, |
| 71 | MEMORY_DEVICE_COHERENT, |
| 72 | MEMORY_DEVICE_FS_DAX, |
| 73 | MEMORY_DEVICE_GENERIC, |
| 74 | MEMORY_DEVICE_PCI_P2PDMA, |
| 75 | }; |
| 76 | |
| 77 | struct dev_pagemap_ops { |
| 78 | /* |
| 79 | * Called once the folio refcount reaches 0. The reference count will be |
| 80 | * reset to one by the core code after the method is called to prepare |
| 81 | * for handing out the folio again. |
| 82 | */ |
| 83 | void (*folio_free)(struct folio *folio); |
| 84 | |
| 85 | /* |
| 86 | * Used for private (un-addressable) device memory only. Must migrate |
| 87 | * the page back to a CPU accessible page. |
| 88 | */ |
| 89 | vm_fault_t (*migrate_to_ram)(struct vm_fault *vmf); |
| 90 | |
| 91 | /* |
| 92 | * Handle the memory failure happens on a range of pfns. Notify the |
| 93 | * processes who are using these pfns, and try to recover the data on |
| 94 | * them if necessary. The mf_flags is finally passed to the recover |
| 95 | * function through the whole notify routine. |
| 96 | * |
| 97 | * When this is not implemented, or it returns -EOPNOTSUPP, the caller |
| 98 | * will fall back to a common handler called mf_generic_kill_procs(). |
| 99 | */ |
| 100 | int (*memory_failure)(struct dev_pagemap *pgmap, unsigned long pfn, |
| 101 | unsigned long nr_pages, int mf_flags); |
| 102 | |
| 103 | /* |
| 104 | * Used for private (un-addressable) device memory only. |
| 105 | * This callback is used when a folio is split into |
| 106 | * a smaller folio |
| 107 | */ |
| 108 | void (*folio_split)(struct folio *head, struct folio *tail); |
| 109 | }; |
| 110 | |
| 111 | #define PGMAP_ALTMAP_VALID (1 << 0) |
| 112 | |
| 113 | /** |
| 114 | * struct dev_pagemap - metadata for ZONE_DEVICE mappings |
| 115 | * @altmap: pre-allocated/reserved memory for vmemmap allocations |
| 116 | * @ref: reference count that pins the devm_memremap_pages() mapping |
| 117 | * @done: completion for @ref |
| 118 | * @type: memory type: see MEMORY_* above in memremap.h |
| 119 | * @flags: PGMAP_* flags to specify defailed behavior |
| 120 | * @vmemmap_shift: structural definition of how the vmemmap page metadata |
| 121 | * is populated, specifically the metadata page order. |
| 122 | * A zero value (default) uses base pages as the vmemmap metadata |
| 123 | * representation. A bigger value will set up compound struct pages |
| 124 | * of the requested order value. |
| 125 | * @ops: method table |
| 126 | * @owner: an opaque pointer identifying the entity that manages this |
| 127 | * instance. Used by various helpers to make sure that no |
| 128 | * foreign ZONE_DEVICE memory is accessed. |
| 129 | * @nr_range: number of ranges to be mapped |
| 130 | * @range: range to be mapped when nr_range == 1 |
| 131 | * @ranges: array of ranges to be mapped when nr_range > 1 |
| 132 | */ |
| 133 | struct dev_pagemap { |
| 134 | struct vmem_altmap altmap; |
| 135 | struct percpu_ref ref; |
| 136 | struct completion done; |
| 137 | enum memory_type type; |
| 138 | unsigned int flags; |
| 139 | unsigned long vmemmap_shift; |
| 140 | const struct dev_pagemap_ops *ops; |
| 141 | void *owner; |
| 142 | int nr_range; |
| 143 | union { |
| 144 | struct range range; |
| 145 | DECLARE_FLEX_ARRAY(struct range, ranges); |
| 146 | }; |
| 147 | }; |
| 148 | |
| 149 | static inline bool pgmap_has_memory_failure(struct dev_pagemap *pgmap) |
| 150 | { |
| 151 | return pgmap->ops && pgmap->ops->memory_failure; |
| 152 | } |
| 153 | |
| 154 | static inline struct vmem_altmap *pgmap_altmap(struct dev_pagemap *pgmap) |
| 155 | { |
| 156 | if (pgmap->flags & PGMAP_ALTMAP_VALID) |
| 157 | return &pgmap->altmap; |
| 158 | return NULL; |
| 159 | } |
| 160 | |
| 161 | static inline unsigned long pgmap_vmemmap_nr(struct dev_pagemap *pgmap) |
| 162 | { |
| 163 | return 1 << pgmap->vmemmap_shift; |
| 164 | } |
| 165 | |
| 166 | static inline bool folio_is_device_private(const struct folio *folio) |
| 167 | { |
| 168 | return IS_ENABLED(CONFIG_DEVICE_PRIVATE) && |
| 169 | folio_is_zone_device(folio) && |
| 170 | folio->pgmap->type == MEMORY_DEVICE_PRIVATE; |
| 171 | } |
| 172 | |
| 173 | static inline bool is_device_private_page(const struct page *page) |
| 174 | { |
| 175 | return IS_ENABLED(CONFIG_DEVICE_PRIVATE) && |
| 176 | folio_is_device_private(page_folio(page)); |
| 177 | } |
| 178 | |
| 179 | static inline bool folio_is_pci_p2pdma(const struct folio *folio) |
| 180 | { |
| 181 | return IS_ENABLED(CONFIG_PCI_P2PDMA) && |
| 182 | folio_is_zone_device(folio) && |
| 183 | folio->pgmap->type == MEMORY_DEVICE_PCI_P2PDMA; |
| 184 | } |
| 185 | |
| 186 | static inline void *folio_zone_device_data(const struct folio *folio) |
| 187 | { |
| 188 | VM_WARN_ON_FOLIO(!folio_is_device_private(folio), folio); |
| 189 | return folio->page.zone_device_data; |
| 190 | } |
| 191 | |
| 192 | static inline void folio_set_zone_device_data(struct folio *folio, void *data) |
| 193 | { |
| 194 | VM_WARN_ON_FOLIO(!folio_is_device_private(folio), folio); |
| 195 | folio->page.zone_device_data = data; |
| 196 | } |
| 197 | |
| 198 | static inline bool is_pci_p2pdma_page(const struct page *page) |
| 199 | { |
| 200 | return IS_ENABLED(CONFIG_PCI_P2PDMA) && |
| 201 | folio_is_pci_p2pdma(page_folio(page)); |
| 202 | } |
| 203 | |
| 204 | static inline bool folio_is_device_coherent(const struct folio *folio) |
| 205 | { |
| 206 | return folio_is_zone_device(folio) && |
| 207 | folio->pgmap->type == MEMORY_DEVICE_COHERENT; |
| 208 | } |
| 209 | |
| 210 | static inline bool is_device_coherent_page(const struct page *page) |
| 211 | { |
| 212 | return folio_is_device_coherent(page_folio(page)); |
| 213 | } |
| 214 | |
| 215 | static inline bool folio_is_fsdax(const struct folio *folio) |
| 216 | { |
| 217 | return folio_is_zone_device(folio) && |
| 218 | folio->pgmap->type == MEMORY_DEVICE_FS_DAX; |
| 219 | } |
| 220 | |
| 221 | static inline bool is_fsdax_page(const struct page *page) |
| 222 | { |
| 223 | return folio_is_fsdax(page_folio(page)); |
| 224 | } |
| 225 | |
| 226 | #ifdef CONFIG_ZONE_DEVICE |
| 227 | void zone_device_page_init(struct page *page, struct dev_pagemap *pgmap, |
| 228 | unsigned int order); |
| 229 | void *memremap_pages(struct dev_pagemap *pgmap, int nid); |
| 230 | void memunmap_pages(struct dev_pagemap *pgmap); |
| 231 | void *devm_memremap_pages(struct device *dev, struct dev_pagemap *pgmap); |
| 232 | void devm_memunmap_pages(struct device *dev, struct dev_pagemap *pgmap); |
| 233 | struct dev_pagemap *get_dev_pagemap(unsigned long pfn); |
| 234 | bool pgmap_pfn_valid(struct dev_pagemap *pgmap, unsigned long pfn); |
| 235 | |
| 236 | unsigned long memremap_compat_align(void); |
| 237 | |
| 238 | static inline void zone_device_folio_init(struct folio *folio, |
| 239 | struct dev_pagemap *pgmap, |
| 240 | unsigned int order) |
| 241 | { |
| 242 | zone_device_page_init(page: &folio->page, pgmap, order); |
| 243 | if (order) |
| 244 | folio_set_large_rmappable(folio); |
| 245 | } |
| 246 | |
| 247 | static inline void zone_device_private_split_cb(struct folio *original_folio, |
| 248 | struct folio *new_folio) |
| 249 | { |
| 250 | if (folio_is_device_private(folio: original_folio)) { |
| 251 | if (!original_folio->pgmap->ops->folio_split) { |
| 252 | if (new_folio) { |
| 253 | new_folio->pgmap = original_folio->pgmap; |
| 254 | new_folio->page.mapping = |
| 255 | original_folio->page.mapping; |
| 256 | } |
| 257 | } else { |
| 258 | original_folio->pgmap->ops->folio_split(original_folio, |
| 259 | new_folio); |
| 260 | } |
| 261 | } |
| 262 | } |
| 263 | |
| 264 | #else |
| 265 | static inline void *devm_memremap_pages(struct device *dev, |
| 266 | struct dev_pagemap *pgmap) |
| 267 | { |
| 268 | /* |
| 269 | * Fail attempts to call devm_memremap_pages() without |
| 270 | * ZONE_DEVICE support enabled, this requires callers to fall |
| 271 | * back to plain devm_memremap() based on config |
| 272 | */ |
| 273 | WARN_ON_ONCE(1); |
| 274 | return ERR_PTR(-ENXIO); |
| 275 | } |
| 276 | |
| 277 | static inline void devm_memunmap_pages(struct device *dev, |
| 278 | struct dev_pagemap *pgmap) |
| 279 | { |
| 280 | } |
| 281 | |
| 282 | static inline struct dev_pagemap *get_dev_pagemap(unsigned long pfn) |
| 283 | { |
| 284 | return NULL; |
| 285 | } |
| 286 | |
| 287 | static inline bool pgmap_pfn_valid(struct dev_pagemap *pgmap, unsigned long pfn) |
| 288 | { |
| 289 | return false; |
| 290 | } |
| 291 | |
| 292 | /* when memremap_pages() is disabled all archs can remap a single page */ |
| 293 | static inline unsigned long memremap_compat_align(void) |
| 294 | { |
| 295 | return PAGE_SIZE; |
| 296 | } |
| 297 | |
| 298 | static inline void zone_device_private_split_cb(struct folio *original_folio, |
| 299 | struct folio *new_folio) |
| 300 | { |
| 301 | } |
| 302 | #endif /* CONFIG_ZONE_DEVICE */ |
| 303 | |
| 304 | static inline void put_dev_pagemap(struct dev_pagemap *pgmap) |
| 305 | { |
| 306 | if (pgmap) |
| 307 | percpu_ref_put(ref: &pgmap->ref); |
| 308 | } |
| 309 | |
| 310 | #endif /* _LINUX_MEMREMAP_H_ */ |
| 311 | |