| 1 | /* SPDX-License-Identifier: GPL-2.0-only */ |
| 2 | /* |
| 3 | * Copyright 2013-2015 Analog Devices Inc. |
| 4 | * Author: Lars-Peter Clausen <lars@metafoo.de> |
| 5 | */ |
| 6 | |
| 7 | #ifndef __INDUSTRIALIO_DMA_BUFFER_H__ |
| 8 | #define __INDUSTRIALIO_DMA_BUFFER_H__ |
| 9 | |
| 10 | #include <linux/atomic.h> |
| 11 | #include <linux/list.h> |
| 12 | #include <linux/kref.h> |
| 13 | #include <linux/spinlock.h> |
| 14 | #include <linux/mutex.h> |
| 15 | #include <linux/iio/buffer_impl.h> |
| 16 | |
| 17 | struct iio_dma_buffer_queue; |
| 18 | struct iio_dma_buffer_ops; |
| 19 | struct device; |
| 20 | struct dma_buf_attachment; |
| 21 | struct dma_fence; |
| 22 | struct sg_table; |
| 23 | |
| 24 | /** |
| 25 | * enum iio_block_state - State of a struct iio_dma_buffer_block |
| 26 | * @IIO_BLOCK_STATE_QUEUED: Block is on the incoming queue |
| 27 | * @IIO_BLOCK_STATE_ACTIVE: Block is currently being processed by the DMA |
| 28 | * @IIO_BLOCK_STATE_DONE: Block is on the outgoing queue |
| 29 | * @IIO_BLOCK_STATE_DEAD: Block has been marked as to be freed |
| 30 | */ |
| 31 | enum iio_block_state { |
| 32 | IIO_BLOCK_STATE_QUEUED, |
| 33 | IIO_BLOCK_STATE_ACTIVE, |
| 34 | IIO_BLOCK_STATE_DONE, |
| 35 | IIO_BLOCK_STATE_DEAD, |
| 36 | }; |
| 37 | |
| 38 | /** |
| 39 | * struct iio_dma_buffer_block - IIO buffer block |
| 40 | * @head: List head |
| 41 | * @size: Total size of the block in bytes |
| 42 | * @bytes_used: Number of bytes that contain valid data |
| 43 | * @vaddr: Virutal address of the blocks memory |
| 44 | * @phys_addr: Physical address of the blocks memory |
| 45 | * @queue: Parent DMA buffer queue |
| 46 | * @kref: kref used to manage the lifetime of block |
| 47 | * @state: Current state of the block |
| 48 | * @cyclic: True if this is a cyclic buffer |
| 49 | * @fileio: True if this buffer is used for fileio mode |
| 50 | * @sg_table: DMA table for the transfer when transferring a DMABUF |
| 51 | * @fence: DMA fence to be signaled when a DMABUF transfer is complete |
| 52 | */ |
| 53 | struct iio_dma_buffer_block { |
| 54 | /* May only be accessed by the owner of the block */ |
| 55 | struct list_head head; |
| 56 | size_t bytes_used; |
| 57 | |
| 58 | /* |
| 59 | * Set during allocation, constant thereafter. May be accessed read-only |
| 60 | * by anybody holding a reference to the block. |
| 61 | */ |
| 62 | void *vaddr; |
| 63 | dma_addr_t phys_addr; |
| 64 | size_t size; |
| 65 | struct iio_dma_buffer_queue *queue; |
| 66 | |
| 67 | /* Must not be accessed outside the core. */ |
| 68 | struct kref kref; |
| 69 | /* |
| 70 | * Must not be accessed outside the core. Access needs to hold |
| 71 | * queue->list_lock if the block is not owned by the core. |
| 72 | */ |
| 73 | enum iio_block_state state; |
| 74 | |
| 75 | bool cyclic; |
| 76 | bool fileio; |
| 77 | |
| 78 | struct sg_table *sg_table; |
| 79 | struct dma_fence *fence; |
| 80 | }; |
| 81 | |
| 82 | /** |
| 83 | * struct iio_dma_buffer_queue_fileio - FileIO state for the DMA buffer |
| 84 | * @blocks: Buffer blocks used for fileio |
| 85 | * @active_block: Block being used in read() |
| 86 | * @pos: Read offset in the active block |
| 87 | * @block_size: Size of each block |
| 88 | * @next_dequeue: index of next block that will be dequeued |
| 89 | * @enabled: Whether the buffer is operating in fileio mode |
| 90 | */ |
| 91 | struct iio_dma_buffer_queue_fileio { |
| 92 | struct iio_dma_buffer_block *blocks[2]; |
| 93 | struct iio_dma_buffer_block *active_block; |
| 94 | size_t pos; |
| 95 | size_t block_size; |
| 96 | |
| 97 | unsigned int next_dequeue; |
| 98 | bool enabled; |
| 99 | }; |
| 100 | |
| 101 | /** |
| 102 | * struct iio_dma_buffer_queue - DMA buffer base structure |
| 103 | * @buffer: IIO buffer base structure |
| 104 | * @dev: Parent device |
| 105 | * @ops: DMA buffer callbacks |
| 106 | * @lock: Protects the incoming list, active and the fields in the fileio |
| 107 | * substruct |
| 108 | * @list_lock: Protects lists that contain blocks which can be modified in |
| 109 | * atomic context as well as blocks on those lists. This is the outgoing queue |
| 110 | * list and typically also a list of active blocks in the part that handles |
| 111 | * the DMA controller |
| 112 | * @incoming: List of buffers on the incoming queue |
| 113 | * @active: Whether the buffer is currently active |
| 114 | * @num_dmabufs: Total number of DMABUFs attached to this queue |
| 115 | * @fileio: FileIO state |
| 116 | */ |
| 117 | struct iio_dma_buffer_queue { |
| 118 | struct iio_buffer buffer; |
| 119 | struct device *dev; |
| 120 | const struct iio_dma_buffer_ops *ops; |
| 121 | |
| 122 | struct mutex lock; |
| 123 | spinlock_t list_lock; |
| 124 | struct list_head incoming; |
| 125 | |
| 126 | bool active; |
| 127 | atomic_t num_dmabufs; |
| 128 | |
| 129 | struct iio_dma_buffer_queue_fileio fileio; |
| 130 | }; |
| 131 | |
| 132 | /** |
| 133 | * struct iio_dma_buffer_ops - DMA buffer callback operations |
| 134 | * @submit: Called when a block is submitted to the DMA controller |
| 135 | * @abort: Should abort all pending transfers |
| 136 | */ |
| 137 | struct iio_dma_buffer_ops { |
| 138 | int (*submit)(struct iio_dma_buffer_queue *queue, |
| 139 | struct iio_dma_buffer_block *block); |
| 140 | void (*abort)(struct iio_dma_buffer_queue *queue); |
| 141 | }; |
| 142 | |
| 143 | void iio_dma_buffer_block_done(struct iio_dma_buffer_block *block); |
| 144 | void iio_dma_buffer_block_list_abort(struct iio_dma_buffer_queue *queue, |
| 145 | struct list_head *list); |
| 146 | |
| 147 | int iio_dma_buffer_enable(struct iio_buffer *buffer, |
| 148 | struct iio_dev *indio_dev); |
| 149 | int iio_dma_buffer_disable(struct iio_buffer *buffer, |
| 150 | struct iio_dev *indio_dev); |
| 151 | int iio_dma_buffer_read(struct iio_buffer *buffer, size_t n, |
| 152 | char __user *user_buffer); |
| 153 | int iio_dma_buffer_write(struct iio_buffer *buffer, size_t n, |
| 154 | const char __user *user_buffer); |
| 155 | size_t iio_dma_buffer_usage(struct iio_buffer *buffer); |
| 156 | int iio_dma_buffer_set_bytes_per_datum(struct iio_buffer *buffer, size_t bpd); |
| 157 | int iio_dma_buffer_set_length(struct iio_buffer *buffer, unsigned int length); |
| 158 | int iio_dma_buffer_request_update(struct iio_buffer *buffer); |
| 159 | |
| 160 | int iio_dma_buffer_init(struct iio_dma_buffer_queue *queue, |
| 161 | struct device *dma_dev, const struct iio_dma_buffer_ops *ops); |
| 162 | void iio_dma_buffer_exit(struct iio_dma_buffer_queue *queue); |
| 163 | void iio_dma_buffer_release(struct iio_dma_buffer_queue *queue); |
| 164 | |
| 165 | struct iio_dma_buffer_block * |
| 166 | iio_dma_buffer_attach_dmabuf(struct iio_buffer *buffer, |
| 167 | struct dma_buf_attachment *attach); |
| 168 | void iio_dma_buffer_detach_dmabuf(struct iio_buffer *buffer, |
| 169 | struct iio_dma_buffer_block *block); |
| 170 | int iio_dma_buffer_enqueue_dmabuf(struct iio_buffer *buffer, |
| 171 | struct iio_dma_buffer_block *block, |
| 172 | struct dma_fence *fence, |
| 173 | struct sg_table *sgt, |
| 174 | size_t size, bool cyclic); |
| 175 | void iio_dma_buffer_lock_queue(struct iio_buffer *buffer); |
| 176 | void iio_dma_buffer_unlock_queue(struct iio_buffer *buffer); |
| 177 | struct device *iio_dma_buffer_get_dma_dev(struct iio_buffer *buffer); |
| 178 | |
| 179 | #endif |
| 180 | |