| 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
| 2 | #ifndef _LINUX_DMA_MAPPING_H |
| 3 | #define _LINUX_DMA_MAPPING_H |
| 4 | |
| 5 | #include <linux/device.h> |
| 6 | #include <linux/err.h> |
| 7 | #include <linux/dma-direction.h> |
| 8 | #include <linux/scatterlist.h> |
| 9 | #include <linux/bug.h> |
| 10 | |
| 11 | /** |
| 12 | * List of possible attributes associated with a DMA mapping. The semantics |
| 13 | * of each attribute should be defined in Documentation/core-api/dma-attributes.rst. |
| 14 | */ |
| 15 | |
| 16 | /* |
| 17 | * DMA_ATTR_WEAK_ORDERING: Specifies that reads and writes to the mapping |
| 18 | * may be weakly ordered, that is that reads and writes may pass each other. |
| 19 | */ |
| 20 | #define DMA_ATTR_WEAK_ORDERING (1UL << 1) |
| 21 | /* |
| 22 | * DMA_ATTR_WRITE_COMBINE: Specifies that writes to the mapping may be |
| 23 | * buffered to improve performance. |
| 24 | */ |
| 25 | #define DMA_ATTR_WRITE_COMBINE (1UL << 2) |
| 26 | /* |
| 27 | * DMA_ATTR_NO_KERNEL_MAPPING: Lets the platform to avoid creating a kernel |
| 28 | * virtual mapping for the allocated buffer. |
| 29 | */ |
| 30 | #define DMA_ATTR_NO_KERNEL_MAPPING (1UL << 4) |
| 31 | /* |
| 32 | * DMA_ATTR_SKIP_CPU_SYNC: Allows platform code to skip synchronization of |
| 33 | * the CPU cache for the given buffer assuming that it has been already |
| 34 | * transferred to 'device' domain. |
| 35 | */ |
| 36 | #define DMA_ATTR_SKIP_CPU_SYNC (1UL << 5) |
| 37 | /* |
| 38 | * DMA_ATTR_FORCE_CONTIGUOUS: Forces contiguous allocation of the buffer |
| 39 | * in physical memory. |
| 40 | */ |
| 41 | #define DMA_ATTR_FORCE_CONTIGUOUS (1UL << 6) |
| 42 | /* |
| 43 | * DMA_ATTR_ALLOC_SINGLE_PAGES: This is a hint to the DMA-mapping subsystem |
| 44 | * that it's probably not worth the time to try to allocate memory to in a way |
| 45 | * that gives better TLB efficiency. |
| 46 | */ |
| 47 | #define DMA_ATTR_ALLOC_SINGLE_PAGES (1UL << 7) |
| 48 | /* |
| 49 | * DMA_ATTR_NO_WARN: This tells the DMA-mapping subsystem to suppress |
| 50 | * allocation failure reports (similarly to __GFP_NOWARN). |
| 51 | */ |
| 52 | #define DMA_ATTR_NO_WARN (1UL << 8) |
| 53 | |
| 54 | /* |
| 55 | * DMA_ATTR_PRIVILEGED: used to indicate that the buffer is fully |
| 56 | * accessible at an elevated privilege level (and ideally inaccessible or |
| 57 | * at least read-only at lesser-privileged levels). |
| 58 | */ |
| 59 | #define DMA_ATTR_PRIVILEGED (1UL << 9) |
| 60 | |
| 61 | /* |
| 62 | * DMA_ATTR_MMIO - Indicates memory-mapped I/O (MMIO) region for DMA mapping |
| 63 | * |
| 64 | * This attribute indicates the physical address is not normal system |
| 65 | * memory. It may not be used with kmap*()/phys_to_virt()/phys_to_page() |
| 66 | * functions, it may not be cacheable, and access using CPU load/store |
| 67 | * instructions may not be allowed. |
| 68 | * |
| 69 | * Usually this will be used to describe MMIO addresses, or other non-cacheable |
| 70 | * register addresses. When DMA mapping this sort of address we call |
| 71 | * the operation Peer to Peer as a one device is DMA'ing to another device. |
| 72 | * For PCI devices the p2pdma APIs must be used to determine if DMA_ATTR_MMIO |
| 73 | * is appropriate. |
| 74 | * |
| 75 | * For architectures that require cache flushing for DMA coherence |
| 76 | * DMA_ATTR_MMIO will not perform any cache flushing. The address |
| 77 | * provided must never be mapped cacheable into the CPU. |
| 78 | */ |
| 79 | #define DMA_ATTR_MMIO (1UL << 10) |
| 80 | |
| 81 | /* |
| 82 | * A dma_addr_t can hold any valid DMA or bus address for the platform. It can |
| 83 | * be given to a device to use as a DMA source or target. It is specific to a |
| 84 | * given device and there may be a translation between the CPU physical address |
| 85 | * space and the bus address space. |
| 86 | * |
| 87 | * DMA_MAPPING_ERROR is the magic error code if a mapping failed. It should not |
| 88 | * be used directly in drivers, but checked for using dma_mapping_error() |
| 89 | * instead. |
| 90 | */ |
| 91 | #define DMA_MAPPING_ERROR (~(dma_addr_t)0) |
| 92 | |
| 93 | #define DMA_BIT_MASK(n) GENMASK_ULL((n) - 1, 0) |
| 94 | |
| 95 | struct dma_iova_state { |
| 96 | dma_addr_t addr; |
| 97 | u64 __size; |
| 98 | }; |
| 99 | |
| 100 | /* |
| 101 | * Use the high bit to mark if we used swiotlb for one or more ranges. |
| 102 | */ |
| 103 | #define DMA_IOVA_USE_SWIOTLB (1ULL << 63) |
| 104 | |
| 105 | static inline size_t dma_iova_size(struct dma_iova_state *state) |
| 106 | { |
| 107 | /* Casting is needed for 32-bits systems */ |
| 108 | return (size_t)(state->__size & ~DMA_IOVA_USE_SWIOTLB); |
| 109 | } |
| 110 | |
| 111 | #ifdef CONFIG_DMA_API_DEBUG |
| 112 | void debug_dma_mapping_error(struct device *dev, dma_addr_t dma_addr); |
| 113 | void debug_dma_map_single(struct device *dev, const void *addr, |
| 114 | unsigned long len); |
| 115 | #else |
| 116 | static inline void debug_dma_mapping_error(struct device *dev, |
| 117 | dma_addr_t dma_addr) |
| 118 | { |
| 119 | } |
| 120 | static inline void debug_dma_map_single(struct device *dev, const void *addr, |
| 121 | unsigned long len) |
| 122 | { |
| 123 | } |
| 124 | #endif /* CONFIG_DMA_API_DEBUG */ |
| 125 | |
| 126 | #ifdef CONFIG_HAS_DMA |
| 127 | static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr) |
| 128 | { |
| 129 | debug_dma_mapping_error(dev, dma_addr); |
| 130 | |
| 131 | if (unlikely(dma_addr == DMA_MAPPING_ERROR)) |
| 132 | return -ENOMEM; |
| 133 | return 0; |
| 134 | } |
| 135 | |
| 136 | dma_addr_t dma_map_page_attrs(struct device *dev, struct page *page, |
| 137 | size_t offset, size_t size, enum dma_data_direction dir, |
| 138 | unsigned long attrs); |
| 139 | void dma_unmap_page_attrs(struct device *dev, dma_addr_t addr, size_t size, |
| 140 | enum dma_data_direction dir, unsigned long attrs); |
| 141 | dma_addr_t dma_map_phys(struct device *dev, phys_addr_t phys, size_t size, |
| 142 | enum dma_data_direction dir, unsigned long attrs); |
| 143 | void dma_unmap_phys(struct device *dev, dma_addr_t addr, size_t size, |
| 144 | enum dma_data_direction dir, unsigned long attrs); |
| 145 | unsigned int dma_map_sg_attrs(struct device *dev, struct scatterlist *sg, |
| 146 | int nents, enum dma_data_direction dir, unsigned long attrs); |
| 147 | void dma_unmap_sg_attrs(struct device *dev, struct scatterlist *sg, |
| 148 | int nents, enum dma_data_direction dir, |
| 149 | unsigned long attrs); |
| 150 | int dma_map_sgtable(struct device *dev, struct sg_table *sgt, |
| 151 | enum dma_data_direction dir, unsigned long attrs); |
| 152 | dma_addr_t dma_map_resource(struct device *dev, phys_addr_t phys_addr, |
| 153 | size_t size, enum dma_data_direction dir, unsigned long attrs); |
| 154 | void dma_unmap_resource(struct device *dev, dma_addr_t addr, size_t size, |
| 155 | enum dma_data_direction dir, unsigned long attrs); |
| 156 | void *dma_alloc_attrs(struct device *dev, size_t size, dma_addr_t *dma_handle, |
| 157 | gfp_t flag, unsigned long attrs); |
| 158 | void dma_free_attrs(struct device *dev, size_t size, void *cpu_addr, |
| 159 | dma_addr_t dma_handle, unsigned long attrs); |
| 160 | void *dmam_alloc_attrs(struct device *dev, size_t size, dma_addr_t *dma_handle, |
| 161 | gfp_t gfp, unsigned long attrs); |
| 162 | void dmam_free_coherent(struct device *dev, size_t size, void *vaddr, |
| 163 | dma_addr_t dma_handle); |
| 164 | int dma_get_sgtable_attrs(struct device *dev, struct sg_table *sgt, |
| 165 | void *cpu_addr, dma_addr_t dma_addr, size_t size, |
| 166 | unsigned long attrs); |
| 167 | int dma_mmap_attrs(struct device *dev, struct vm_area_struct *vma, |
| 168 | void *cpu_addr, dma_addr_t dma_addr, size_t size, |
| 169 | unsigned long attrs); |
| 170 | bool dma_can_mmap(struct device *dev); |
| 171 | bool dma_pci_p2pdma_supported(struct device *dev); |
| 172 | int dma_set_mask(struct device *dev, u64 mask); |
| 173 | int dma_set_coherent_mask(struct device *dev, u64 mask); |
| 174 | u64 dma_get_required_mask(struct device *dev); |
| 175 | bool dma_addressing_limited(struct device *dev); |
| 176 | size_t dma_max_mapping_size(struct device *dev); |
| 177 | size_t dma_opt_mapping_size(struct device *dev); |
| 178 | unsigned long dma_get_merge_boundary(struct device *dev); |
| 179 | struct sg_table *dma_alloc_noncontiguous(struct device *dev, size_t size, |
| 180 | enum dma_data_direction dir, gfp_t gfp, unsigned long attrs); |
| 181 | void dma_free_noncontiguous(struct device *dev, size_t size, |
| 182 | struct sg_table *sgt, enum dma_data_direction dir); |
| 183 | void *dma_vmap_noncontiguous(struct device *dev, size_t size, |
| 184 | struct sg_table *sgt); |
| 185 | void dma_vunmap_noncontiguous(struct device *dev, void *vaddr); |
| 186 | int dma_mmap_noncontiguous(struct device *dev, struct vm_area_struct *vma, |
| 187 | size_t size, struct sg_table *sgt); |
| 188 | #else /* CONFIG_HAS_DMA */ |
| 189 | static inline dma_addr_t dma_map_page_attrs(struct device *dev, |
| 190 | struct page *page, size_t offset, size_t size, |
| 191 | enum dma_data_direction dir, unsigned long attrs) |
| 192 | { |
| 193 | return DMA_MAPPING_ERROR; |
| 194 | } |
| 195 | static inline void dma_unmap_page_attrs(struct device *dev, dma_addr_t addr, |
| 196 | size_t size, enum dma_data_direction dir, unsigned long attrs) |
| 197 | { |
| 198 | } |
| 199 | static inline dma_addr_t dma_map_phys(struct device *dev, phys_addr_t phys, |
| 200 | size_t size, enum dma_data_direction dir, unsigned long attrs) |
| 201 | { |
| 202 | return DMA_MAPPING_ERROR; |
| 203 | } |
| 204 | static inline void dma_unmap_phys(struct device *dev, dma_addr_t addr, |
| 205 | size_t size, enum dma_data_direction dir, unsigned long attrs) |
| 206 | { |
| 207 | } |
| 208 | static inline unsigned int dma_map_sg_attrs(struct device *dev, |
| 209 | struct scatterlist *sg, int nents, enum dma_data_direction dir, |
| 210 | unsigned long attrs) |
| 211 | { |
| 212 | return 0; |
| 213 | } |
| 214 | static inline void dma_unmap_sg_attrs(struct device *dev, |
| 215 | struct scatterlist *sg, int nents, enum dma_data_direction dir, |
| 216 | unsigned long attrs) |
| 217 | { |
| 218 | } |
| 219 | static inline int dma_map_sgtable(struct device *dev, struct sg_table *sgt, |
| 220 | enum dma_data_direction dir, unsigned long attrs) |
| 221 | { |
| 222 | return -EOPNOTSUPP; |
| 223 | } |
| 224 | static inline dma_addr_t dma_map_resource(struct device *dev, |
| 225 | phys_addr_t phys_addr, size_t size, enum dma_data_direction dir, |
| 226 | unsigned long attrs) |
| 227 | { |
| 228 | return DMA_MAPPING_ERROR; |
| 229 | } |
| 230 | static inline void dma_unmap_resource(struct device *dev, dma_addr_t addr, |
| 231 | size_t size, enum dma_data_direction dir, unsigned long attrs) |
| 232 | { |
| 233 | } |
| 234 | static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr) |
| 235 | { |
| 236 | return -ENOMEM; |
| 237 | } |
| 238 | static inline void *dma_alloc_attrs(struct device *dev, size_t size, |
| 239 | dma_addr_t *dma_handle, gfp_t flag, unsigned long attrs) |
| 240 | { |
| 241 | return NULL; |
| 242 | } |
| 243 | static void dma_free_attrs(struct device *dev, size_t size, void *cpu_addr, |
| 244 | dma_addr_t dma_handle, unsigned long attrs) |
| 245 | { |
| 246 | } |
| 247 | static inline void *dmam_alloc_attrs(struct device *dev, size_t size, |
| 248 | dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs) |
| 249 | { |
| 250 | return NULL; |
| 251 | } |
| 252 | static inline void dmam_free_coherent(struct device *dev, size_t size, |
| 253 | void *vaddr, dma_addr_t dma_handle) |
| 254 | { |
| 255 | } |
| 256 | static inline int dma_get_sgtable_attrs(struct device *dev, |
| 257 | struct sg_table *sgt, void *cpu_addr, dma_addr_t dma_addr, |
| 258 | size_t size, unsigned long attrs) |
| 259 | { |
| 260 | return -ENXIO; |
| 261 | } |
| 262 | static inline int dma_mmap_attrs(struct device *dev, struct vm_area_struct *vma, |
| 263 | void *cpu_addr, dma_addr_t dma_addr, size_t size, |
| 264 | unsigned long attrs) |
| 265 | { |
| 266 | return -ENXIO; |
| 267 | } |
| 268 | static inline bool dma_can_mmap(struct device *dev) |
| 269 | { |
| 270 | return false; |
| 271 | } |
| 272 | static inline bool dma_pci_p2pdma_supported(struct device *dev) |
| 273 | { |
| 274 | return false; |
| 275 | } |
| 276 | static inline int dma_set_mask(struct device *dev, u64 mask) |
| 277 | { |
| 278 | return -EIO; |
| 279 | } |
| 280 | static inline int dma_set_coherent_mask(struct device *dev, u64 mask) |
| 281 | { |
| 282 | return -EIO; |
| 283 | } |
| 284 | static inline u64 dma_get_required_mask(struct device *dev) |
| 285 | { |
| 286 | return 0; |
| 287 | } |
| 288 | static inline bool dma_addressing_limited(struct device *dev) |
| 289 | { |
| 290 | return false; |
| 291 | } |
| 292 | static inline size_t dma_max_mapping_size(struct device *dev) |
| 293 | { |
| 294 | return 0; |
| 295 | } |
| 296 | static inline size_t dma_opt_mapping_size(struct device *dev) |
| 297 | { |
| 298 | return 0; |
| 299 | } |
| 300 | static inline unsigned long dma_get_merge_boundary(struct device *dev) |
| 301 | { |
| 302 | return 0; |
| 303 | } |
| 304 | static inline struct sg_table *dma_alloc_noncontiguous(struct device *dev, |
| 305 | size_t size, enum dma_data_direction dir, gfp_t gfp, |
| 306 | unsigned long attrs) |
| 307 | { |
| 308 | return NULL; |
| 309 | } |
| 310 | static inline void dma_free_noncontiguous(struct device *dev, size_t size, |
| 311 | struct sg_table *sgt, enum dma_data_direction dir) |
| 312 | { |
| 313 | } |
| 314 | static inline void *dma_vmap_noncontiguous(struct device *dev, size_t size, |
| 315 | struct sg_table *sgt) |
| 316 | { |
| 317 | return NULL; |
| 318 | } |
| 319 | static inline void dma_vunmap_noncontiguous(struct device *dev, void *vaddr) |
| 320 | { |
| 321 | } |
| 322 | static inline int dma_mmap_noncontiguous(struct device *dev, |
| 323 | struct vm_area_struct *vma, size_t size, struct sg_table *sgt) |
| 324 | { |
| 325 | return -EINVAL; |
| 326 | } |
| 327 | #endif /* CONFIG_HAS_DMA */ |
| 328 | |
| 329 | #ifdef CONFIG_IOMMU_DMA |
| 330 | /** |
| 331 | * dma_use_iova - check if the IOVA API is used for this state |
| 332 | * @state: IOVA state |
| 333 | * |
| 334 | * Return %true if the DMA transfers uses the dma_iova_*() calls or %false if |
| 335 | * they can't be used. |
| 336 | */ |
| 337 | static inline bool dma_use_iova(struct dma_iova_state *state) |
| 338 | { |
| 339 | return state->__size != 0; |
| 340 | } |
| 341 | |
| 342 | bool dma_iova_try_alloc(struct device *dev, struct dma_iova_state *state, |
| 343 | phys_addr_t phys, size_t size); |
| 344 | void dma_iova_free(struct device *dev, struct dma_iova_state *state); |
| 345 | void dma_iova_destroy(struct device *dev, struct dma_iova_state *state, |
| 346 | size_t mapped_len, enum dma_data_direction dir, |
| 347 | unsigned long attrs); |
| 348 | int dma_iova_sync(struct device *dev, struct dma_iova_state *state, |
| 349 | size_t offset, size_t size); |
| 350 | int dma_iova_link(struct device *dev, struct dma_iova_state *state, |
| 351 | phys_addr_t phys, size_t offset, size_t size, |
| 352 | enum dma_data_direction dir, unsigned long attrs); |
| 353 | void dma_iova_unlink(struct device *dev, struct dma_iova_state *state, |
| 354 | size_t offset, size_t size, enum dma_data_direction dir, |
| 355 | unsigned long attrs); |
| 356 | #else /* CONFIG_IOMMU_DMA */ |
| 357 | static inline bool dma_use_iova(struct dma_iova_state *state) |
| 358 | { |
| 359 | return false; |
| 360 | } |
| 361 | static inline bool dma_iova_try_alloc(struct device *dev, |
| 362 | struct dma_iova_state *state, phys_addr_t phys, size_t size) |
| 363 | { |
| 364 | return false; |
| 365 | } |
| 366 | static inline void dma_iova_free(struct device *dev, |
| 367 | struct dma_iova_state *state) |
| 368 | { |
| 369 | } |
| 370 | static inline void dma_iova_destroy(struct device *dev, |
| 371 | struct dma_iova_state *state, size_t mapped_len, |
| 372 | enum dma_data_direction dir, unsigned long attrs) |
| 373 | { |
| 374 | } |
| 375 | static inline int dma_iova_sync(struct device *dev, |
| 376 | struct dma_iova_state *state, size_t offset, size_t size) |
| 377 | { |
| 378 | return -EOPNOTSUPP; |
| 379 | } |
| 380 | static inline int dma_iova_link(struct device *dev, |
| 381 | struct dma_iova_state *state, phys_addr_t phys, size_t offset, |
| 382 | size_t size, enum dma_data_direction dir, unsigned long attrs) |
| 383 | { |
| 384 | return -EOPNOTSUPP; |
| 385 | } |
| 386 | static inline void dma_iova_unlink(struct device *dev, |
| 387 | struct dma_iova_state *state, size_t offset, size_t size, |
| 388 | enum dma_data_direction dir, unsigned long attrs) |
| 389 | { |
| 390 | } |
| 391 | #endif /* CONFIG_IOMMU_DMA */ |
| 392 | |
| 393 | #if defined(CONFIG_HAS_DMA) && defined(CONFIG_DMA_NEED_SYNC) |
| 394 | void __dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr, size_t size, |
| 395 | enum dma_data_direction dir); |
| 396 | void __dma_sync_single_for_device(struct device *dev, dma_addr_t addr, |
| 397 | size_t size, enum dma_data_direction dir); |
| 398 | void __dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, |
| 399 | int nelems, enum dma_data_direction dir); |
| 400 | void __dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, |
| 401 | int nelems, enum dma_data_direction dir); |
| 402 | bool __dma_need_sync(struct device *dev, dma_addr_t dma_addr); |
| 403 | |
| 404 | static inline bool dma_dev_need_sync(const struct device *dev) |
| 405 | { |
| 406 | /* Always call DMA sync operations when debugging is enabled */ |
| 407 | return !dev->dma_skip_sync || IS_ENABLED(CONFIG_DMA_API_DEBUG); |
| 408 | } |
| 409 | |
| 410 | static inline void dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr, |
| 411 | size_t size, enum dma_data_direction dir) |
| 412 | { |
| 413 | if (dma_dev_need_sync(dev)) |
| 414 | __dma_sync_single_for_cpu(dev, addr, size, dir); |
| 415 | } |
| 416 | |
| 417 | static inline void dma_sync_single_for_device(struct device *dev, |
| 418 | dma_addr_t addr, size_t size, enum dma_data_direction dir) |
| 419 | { |
| 420 | if (dma_dev_need_sync(dev)) |
| 421 | __dma_sync_single_for_device(dev, addr, size, dir); |
| 422 | } |
| 423 | |
| 424 | static inline void dma_sync_sg_for_cpu(struct device *dev, |
| 425 | struct scatterlist *sg, int nelems, enum dma_data_direction dir) |
| 426 | { |
| 427 | if (dma_dev_need_sync(dev)) |
| 428 | __dma_sync_sg_for_cpu(dev, sg, nelems, dir); |
| 429 | } |
| 430 | |
| 431 | static inline void dma_sync_sg_for_device(struct device *dev, |
| 432 | struct scatterlist *sg, int nelems, enum dma_data_direction dir) |
| 433 | { |
| 434 | if (dma_dev_need_sync(dev)) |
| 435 | __dma_sync_sg_for_device(dev, sg, nelems, dir); |
| 436 | } |
| 437 | |
| 438 | static inline bool dma_need_sync(struct device *dev, dma_addr_t dma_addr) |
| 439 | { |
| 440 | return dma_dev_need_sync(dev) ? __dma_need_sync(dev, dma_addr) : false; |
| 441 | } |
| 442 | bool dma_need_unmap(struct device *dev); |
| 443 | #else /* !CONFIG_HAS_DMA || !CONFIG_DMA_NEED_SYNC */ |
| 444 | static inline bool dma_dev_need_sync(const struct device *dev) |
| 445 | { |
| 446 | return false; |
| 447 | } |
| 448 | static inline void dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr, |
| 449 | size_t size, enum dma_data_direction dir) |
| 450 | { |
| 451 | } |
| 452 | static inline void dma_sync_single_for_device(struct device *dev, |
| 453 | dma_addr_t addr, size_t size, enum dma_data_direction dir) |
| 454 | { |
| 455 | } |
| 456 | static inline void dma_sync_sg_for_cpu(struct device *dev, |
| 457 | struct scatterlist *sg, int nelems, enum dma_data_direction dir) |
| 458 | { |
| 459 | } |
| 460 | static inline void dma_sync_sg_for_device(struct device *dev, |
| 461 | struct scatterlist *sg, int nelems, enum dma_data_direction dir) |
| 462 | { |
| 463 | } |
| 464 | static inline bool dma_need_sync(struct device *dev, dma_addr_t dma_addr) |
| 465 | { |
| 466 | return false; |
| 467 | } |
| 468 | static inline bool dma_need_unmap(struct device *dev) |
| 469 | { |
| 470 | return false; |
| 471 | } |
| 472 | #endif /* !CONFIG_HAS_DMA || !CONFIG_DMA_NEED_SYNC */ |
| 473 | |
| 474 | struct page *dma_alloc_pages(struct device *dev, size_t size, |
| 475 | dma_addr_t *dma_handle, enum dma_data_direction dir, gfp_t gfp); |
| 476 | void dma_free_pages(struct device *dev, size_t size, struct page *page, |
| 477 | dma_addr_t dma_handle, enum dma_data_direction dir); |
| 478 | int dma_mmap_pages(struct device *dev, struct vm_area_struct *vma, |
| 479 | size_t size, struct page *page); |
| 480 | |
| 481 | static inline void *dma_alloc_noncoherent(struct device *dev, size_t size, |
| 482 | dma_addr_t *dma_handle, enum dma_data_direction dir, gfp_t gfp) |
| 483 | { |
| 484 | struct page *page = dma_alloc_pages(dev, size, dma_handle, dir, gfp); |
| 485 | return page ? page_address(page) : NULL; |
| 486 | } |
| 487 | |
| 488 | static inline void dma_free_noncoherent(struct device *dev, size_t size, |
| 489 | void *vaddr, dma_addr_t dma_handle, enum dma_data_direction dir) |
| 490 | { |
| 491 | dma_free_pages(dev, size, virt_to_page(vaddr), dma_handle, dir); |
| 492 | } |
| 493 | |
| 494 | static inline dma_addr_t dma_map_single_attrs(struct device *dev, void *ptr, |
| 495 | size_t size, enum dma_data_direction dir, unsigned long attrs) |
| 496 | { |
| 497 | /* DMA must never operate on areas that might be remapped. */ |
| 498 | if (dev_WARN_ONCE(dev, is_vmalloc_addr(ptr), |
| 499 | "rejecting DMA map of vmalloc memory\n" )) |
| 500 | return DMA_MAPPING_ERROR; |
| 501 | debug_dma_map_single(dev, addr: ptr, len: size); |
| 502 | return dma_map_page_attrs(dev, virt_to_page(ptr), offset_in_page(ptr), |
| 503 | size, dir, attrs); |
| 504 | } |
| 505 | |
| 506 | static inline void dma_unmap_single_attrs(struct device *dev, dma_addr_t addr, |
| 507 | size_t size, enum dma_data_direction dir, unsigned long attrs) |
| 508 | { |
| 509 | return dma_unmap_page_attrs(dev, addr, size, dir, attrs); |
| 510 | } |
| 511 | |
| 512 | static inline void dma_sync_single_range_for_cpu(struct device *dev, |
| 513 | dma_addr_t addr, unsigned long offset, size_t size, |
| 514 | enum dma_data_direction dir) |
| 515 | { |
| 516 | return dma_sync_single_for_cpu(dev, addr: addr + offset, size, dir); |
| 517 | } |
| 518 | |
| 519 | static inline void dma_sync_single_range_for_device(struct device *dev, |
| 520 | dma_addr_t addr, unsigned long offset, size_t size, |
| 521 | enum dma_data_direction dir) |
| 522 | { |
| 523 | return dma_sync_single_for_device(dev, addr: addr + offset, size, dir); |
| 524 | } |
| 525 | |
| 526 | /** |
| 527 | * dma_unmap_sgtable - Unmap the given buffer for DMA |
| 528 | * @dev: The device for which to perform the DMA operation |
| 529 | * @sgt: The sg_table object describing the buffer |
| 530 | * @dir: DMA direction |
| 531 | * @attrs: Optional DMA attributes for the unmap operation |
| 532 | * |
| 533 | * Unmaps a buffer described by a scatterlist stored in the given sg_table |
| 534 | * object for the @dir DMA operation by the @dev device. After this function |
| 535 | * the ownership of the buffer is transferred back to the CPU domain. |
| 536 | */ |
| 537 | static inline void dma_unmap_sgtable(struct device *dev, struct sg_table *sgt, |
| 538 | enum dma_data_direction dir, unsigned long attrs) |
| 539 | { |
| 540 | dma_unmap_sg_attrs(dev, sg: sgt->sgl, nents: sgt->orig_nents, dir, attrs); |
| 541 | } |
| 542 | |
| 543 | /** |
| 544 | * dma_sync_sgtable_for_cpu - Synchronize the given buffer for CPU access |
| 545 | * @dev: The device for which to perform the DMA operation |
| 546 | * @sgt: The sg_table object describing the buffer |
| 547 | * @dir: DMA direction |
| 548 | * |
| 549 | * Performs the needed cache synchronization and moves the ownership of the |
| 550 | * buffer back to the CPU domain, so it is safe to perform any access to it |
| 551 | * by the CPU. Before doing any further DMA operations, one has to transfer |
| 552 | * the ownership of the buffer back to the DMA domain by calling the |
| 553 | * dma_sync_sgtable_for_device(). |
| 554 | */ |
| 555 | static inline void dma_sync_sgtable_for_cpu(struct device *dev, |
| 556 | struct sg_table *sgt, enum dma_data_direction dir) |
| 557 | { |
| 558 | dma_sync_sg_for_cpu(dev, sg: sgt->sgl, nelems: sgt->orig_nents, dir); |
| 559 | } |
| 560 | |
| 561 | /** |
| 562 | * dma_sync_sgtable_for_device - Synchronize the given buffer for DMA |
| 563 | * @dev: The device for which to perform the DMA operation |
| 564 | * @sgt: The sg_table object describing the buffer |
| 565 | * @dir: DMA direction |
| 566 | * |
| 567 | * Performs the needed cache synchronization and moves the ownership of the |
| 568 | * buffer back to the DMA domain, so it is safe to perform the DMA operation. |
| 569 | * Once finished, one has to call dma_sync_sgtable_for_cpu() or |
| 570 | * dma_unmap_sgtable(). |
| 571 | */ |
| 572 | static inline void dma_sync_sgtable_for_device(struct device *dev, |
| 573 | struct sg_table *sgt, enum dma_data_direction dir) |
| 574 | { |
| 575 | dma_sync_sg_for_device(dev, sg: sgt->sgl, nelems: sgt->orig_nents, dir); |
| 576 | } |
| 577 | |
| 578 | #define dma_map_single(d, a, s, r) dma_map_single_attrs(d, a, s, r, 0) |
| 579 | #define dma_unmap_single(d, a, s, r) dma_unmap_single_attrs(d, a, s, r, 0) |
| 580 | #define dma_map_sg(d, s, n, r) dma_map_sg_attrs(d, s, n, r, 0) |
| 581 | #define dma_unmap_sg(d, s, n, r) dma_unmap_sg_attrs(d, s, n, r, 0) |
| 582 | #define dma_map_page(d, p, o, s, r) dma_map_page_attrs(d, p, o, s, r, 0) |
| 583 | #define dma_unmap_page(d, a, s, r) dma_unmap_page_attrs(d, a, s, r, 0) |
| 584 | #define dma_get_sgtable(d, t, v, h, s) dma_get_sgtable_attrs(d, t, v, h, s, 0) |
| 585 | #define dma_mmap_coherent(d, v, c, h, s) dma_mmap_attrs(d, v, c, h, s, 0) |
| 586 | |
| 587 | bool dma_coherent_ok(struct device *dev, phys_addr_t phys, size_t size); |
| 588 | |
| 589 | static inline void *dma_alloc_coherent(struct device *dev, size_t size, |
| 590 | dma_addr_t *dma_handle, gfp_t gfp) |
| 591 | { |
| 592 | return dma_alloc_attrs(dev, size, dma_handle, flag: gfp, |
| 593 | attrs: (gfp & __GFP_NOWARN) ? DMA_ATTR_NO_WARN : 0); |
| 594 | } |
| 595 | |
| 596 | static inline void dma_free_coherent(struct device *dev, size_t size, |
| 597 | void *cpu_addr, dma_addr_t dma_handle) |
| 598 | { |
| 599 | return dma_free_attrs(dev, size, cpu_addr, dma_handle, attrs: 0); |
| 600 | } |
| 601 | |
| 602 | |
| 603 | static inline u64 dma_get_mask(struct device *dev) |
| 604 | { |
| 605 | if (dev->dma_mask && *dev->dma_mask) |
| 606 | return *dev->dma_mask; |
| 607 | return DMA_BIT_MASK(32); |
| 608 | } |
| 609 | |
| 610 | /* |
| 611 | * Set both the DMA mask and the coherent DMA mask to the same thing. |
| 612 | * Note that we don't check the return value from dma_set_coherent_mask() |
| 613 | * as the DMA API guarantees that the coherent DMA mask can be set to |
| 614 | * the same or smaller than the streaming DMA mask. |
| 615 | */ |
| 616 | static inline int dma_set_mask_and_coherent(struct device *dev, u64 mask) |
| 617 | { |
| 618 | int rc = dma_set_mask(dev, mask); |
| 619 | if (rc == 0) |
| 620 | dma_set_coherent_mask(dev, mask); |
| 621 | return rc; |
| 622 | } |
| 623 | |
| 624 | /* |
| 625 | * Similar to the above, except it deals with the case where the device |
| 626 | * does not have dev->dma_mask appropriately setup. |
| 627 | */ |
| 628 | static inline int dma_coerce_mask_and_coherent(struct device *dev, u64 mask) |
| 629 | { |
| 630 | dev->dma_mask = &dev->coherent_dma_mask; |
| 631 | return dma_set_mask_and_coherent(dev, mask); |
| 632 | } |
| 633 | |
| 634 | static inline unsigned int dma_get_max_seg_size(struct device *dev) |
| 635 | { |
| 636 | if (dev->dma_parms && dev->dma_parms->max_segment_size) |
| 637 | return dev->dma_parms->max_segment_size; |
| 638 | return SZ_64K; |
| 639 | } |
| 640 | |
| 641 | static inline void dma_set_max_seg_size(struct device *dev, unsigned int size) |
| 642 | { |
| 643 | if (WARN_ON_ONCE(!dev->dma_parms)) |
| 644 | return; |
| 645 | dev->dma_parms->max_segment_size = size; |
| 646 | } |
| 647 | |
| 648 | static inline unsigned long dma_get_seg_boundary(struct device *dev) |
| 649 | { |
| 650 | if (dev->dma_parms && dev->dma_parms->segment_boundary_mask) |
| 651 | return dev->dma_parms->segment_boundary_mask; |
| 652 | return ULONG_MAX; |
| 653 | } |
| 654 | |
| 655 | /** |
| 656 | * dma_get_seg_boundary_nr_pages - return the segment boundary in "page" units |
| 657 | * @dev: device to guery the boundary for |
| 658 | * @page_shift: ilog() of the IOMMU page size |
| 659 | * |
| 660 | * Return the segment boundary in IOMMU page units (which may be different from |
| 661 | * the CPU page size) for the passed in device. |
| 662 | * |
| 663 | * If @dev is NULL a boundary of U32_MAX is assumed, this case is just for |
| 664 | * non-DMA API callers. |
| 665 | */ |
| 666 | static inline unsigned long dma_get_seg_boundary_nr_pages(struct device *dev, |
| 667 | unsigned int page_shift) |
| 668 | { |
| 669 | if (!dev) |
| 670 | return (U32_MAX >> page_shift) + 1; |
| 671 | return (dma_get_seg_boundary(dev) >> page_shift) + 1; |
| 672 | } |
| 673 | |
| 674 | static inline void dma_set_seg_boundary(struct device *dev, unsigned long mask) |
| 675 | { |
| 676 | if (WARN_ON_ONCE(!dev->dma_parms)) |
| 677 | return; |
| 678 | dev->dma_parms->segment_boundary_mask = mask; |
| 679 | } |
| 680 | |
| 681 | static inline unsigned int dma_get_min_align_mask(struct device *dev) |
| 682 | { |
| 683 | if (dev->dma_parms) |
| 684 | return dev->dma_parms->min_align_mask; |
| 685 | return 0; |
| 686 | } |
| 687 | |
| 688 | static inline void dma_set_min_align_mask(struct device *dev, |
| 689 | unsigned int min_align_mask) |
| 690 | { |
| 691 | if (WARN_ON_ONCE(!dev->dma_parms)) |
| 692 | return; |
| 693 | dev->dma_parms->min_align_mask = min_align_mask; |
| 694 | } |
| 695 | |
| 696 | #ifndef dma_get_cache_alignment |
| 697 | static inline int dma_get_cache_alignment(void) |
| 698 | { |
| 699 | #ifdef ARCH_HAS_DMA_MINALIGN |
| 700 | return ARCH_DMA_MINALIGN; |
| 701 | #endif |
| 702 | return 1; |
| 703 | } |
| 704 | #endif |
| 705 | |
| 706 | static inline void *dmam_alloc_coherent(struct device *dev, size_t size, |
| 707 | dma_addr_t *dma_handle, gfp_t gfp) |
| 708 | { |
| 709 | return dmam_alloc_attrs(dev, size, dma_handle, gfp, |
| 710 | attrs: (gfp & __GFP_NOWARN) ? DMA_ATTR_NO_WARN : 0); |
| 711 | } |
| 712 | |
| 713 | static inline void *dma_alloc_wc(struct device *dev, size_t size, |
| 714 | dma_addr_t *dma_addr, gfp_t gfp) |
| 715 | { |
| 716 | unsigned long attrs = DMA_ATTR_WRITE_COMBINE; |
| 717 | |
| 718 | if (gfp & __GFP_NOWARN) |
| 719 | attrs |= DMA_ATTR_NO_WARN; |
| 720 | |
| 721 | return dma_alloc_attrs(dev, size, dma_handle: dma_addr, flag: gfp, attrs); |
| 722 | } |
| 723 | |
| 724 | static inline void dma_free_wc(struct device *dev, size_t size, |
| 725 | void *cpu_addr, dma_addr_t dma_addr) |
| 726 | { |
| 727 | return dma_free_attrs(dev, size, cpu_addr, dma_handle: dma_addr, |
| 728 | DMA_ATTR_WRITE_COMBINE); |
| 729 | } |
| 730 | |
| 731 | static inline int dma_mmap_wc(struct device *dev, |
| 732 | struct vm_area_struct *vma, |
| 733 | void *cpu_addr, dma_addr_t dma_addr, |
| 734 | size_t size) |
| 735 | { |
| 736 | return dma_mmap_attrs(dev, vma, cpu_addr, dma_addr, size, |
| 737 | DMA_ATTR_WRITE_COMBINE); |
| 738 | } |
| 739 | |
| 740 | #ifdef CONFIG_NEED_DMA_MAP_STATE |
| 741 | #define DEFINE_DMA_UNMAP_ADDR(ADDR_NAME) dma_addr_t ADDR_NAME |
| 742 | #define DEFINE_DMA_UNMAP_LEN(LEN_NAME) __u32 LEN_NAME |
| 743 | #define dma_unmap_addr(PTR, ADDR_NAME) ((PTR)->ADDR_NAME) |
| 744 | #define dma_unmap_addr_set(PTR, ADDR_NAME, VAL) (((PTR)->ADDR_NAME) = (VAL)) |
| 745 | #define dma_unmap_len(PTR, LEN_NAME) ((PTR)->LEN_NAME) |
| 746 | #define dma_unmap_len_set(PTR, LEN_NAME, VAL) (((PTR)->LEN_NAME) = (VAL)) |
| 747 | #else |
| 748 | #define DEFINE_DMA_UNMAP_ADDR(ADDR_NAME) |
| 749 | #define DEFINE_DMA_UNMAP_LEN(LEN_NAME) |
| 750 | #define dma_unmap_addr(PTR, ADDR_NAME) \ |
| 751 | ({ typeof(PTR) __p __maybe_unused = PTR; 0; }) |
| 752 | #define dma_unmap_addr_set(PTR, ADDR_NAME, VAL) \ |
| 753 | do { typeof(PTR) __p __maybe_unused = PTR; } while (0) |
| 754 | #define dma_unmap_len(PTR, LEN_NAME) \ |
| 755 | ({ typeof(PTR) __p __maybe_unused = PTR; 0; }) |
| 756 | #define dma_unmap_len_set(PTR, LEN_NAME, VAL) \ |
| 757 | do { typeof(PTR) __p __maybe_unused = PTR; } while (0) |
| 758 | #endif |
| 759 | |
| 760 | #endif /* _LINUX_DMA_MAPPING_H */ |
| 761 | |