| 1 | /* SPDX-License-Identifier: GPL-2.0-only */ |
| 2 | |
| 3 | #ifndef LWQ_H |
| 4 | #define LWQ_H |
| 5 | /* |
| 6 | * Light-weight single-linked queue built from llist |
| 7 | * |
| 8 | * Entries can be enqueued from any context with no locking. |
| 9 | * Entries can be dequeued from process context with integrated locking. |
| 10 | * |
| 11 | * This is particularly suitable when work items are queued in |
| 12 | * BH or IRQ context, and where work items are handled one at a time |
| 13 | * by dedicated threads. |
| 14 | */ |
| 15 | #include <linux/container_of.h> |
| 16 | #include <linux/spinlock.h> |
| 17 | #include <linux/llist.h> |
| 18 | |
| 19 | struct lwq_node { |
| 20 | struct llist_node node; |
| 21 | }; |
| 22 | |
| 23 | struct lwq { |
| 24 | spinlock_t lock; |
| 25 | struct llist_node *ready; /* entries to be dequeued */ |
| 26 | struct llist_head new; /* entries being enqueued */ |
| 27 | }; |
| 28 | |
| 29 | /** |
| 30 | * lwq_init - initialise a lwq |
| 31 | * @q: the lwq object |
| 32 | */ |
| 33 | static inline void lwq_init(struct lwq *q) |
| 34 | { |
| 35 | spin_lock_init(&q->lock); |
| 36 | q->ready = NULL; |
| 37 | init_llist_head(list: &q->new); |
| 38 | } |
| 39 | |
| 40 | /** |
| 41 | * lwq_empty - test if lwq contains any entry |
| 42 | * @q: the lwq object |
| 43 | * |
| 44 | * This empty test contains an acquire barrier so that if a wakeup |
| 45 | * is sent when lwq_dequeue returns true, it is safe to go to sleep after |
| 46 | * a test on lwq_empty(). |
| 47 | */ |
| 48 | static inline bool lwq_empty(struct lwq *q) |
| 49 | { |
| 50 | /* acquire ensures ordering wrt lwq_enqueue() */ |
| 51 | return smp_load_acquire(&q->ready) == NULL && llist_empty(head: &q->new); |
| 52 | } |
| 53 | |
| 54 | struct llist_node *__lwq_dequeue(struct lwq *q); |
| 55 | /** |
| 56 | * lwq_dequeue - dequeue first (oldest) entry from lwq |
| 57 | * @q: the queue to dequeue from |
| 58 | * @type: the type of object to return |
| 59 | * @member: them member in returned object which is an lwq_node. |
| 60 | * |
| 61 | * Remove a single object from the lwq and return it. This will take |
| 62 | * a spinlock and so must always be called in the same context, typcially |
| 63 | * process contet. |
| 64 | */ |
| 65 | #define lwq_dequeue(q, type, member) \ |
| 66 | ({ struct llist_node *_n = __lwq_dequeue(q); \ |
| 67 | _n ? container_of(_n, type, member.node) : NULL; }) |
| 68 | |
| 69 | struct llist_node *lwq_dequeue_all(struct lwq *q); |
| 70 | |
| 71 | /** |
| 72 | * lwq_for_each_safe - iterate over detached queue allowing deletion |
| 73 | * @_n: iterator variable |
| 74 | * @_t1: temporary struct llist_node ** |
| 75 | * @_t2: temporary struct llist_node * |
| 76 | * @_l: address of llist_node pointer from lwq_dequeue_all() |
| 77 | * @_member: member in _n where lwq_node is found. |
| 78 | * |
| 79 | * Iterate over members in a dequeued list. If the iterator variable |
| 80 | * is set to NULL, the iterator removes that entry from the queue. |
| 81 | */ |
| 82 | #define lwq_for_each_safe(_n, _t1, _t2, _l, _member) \ |
| 83 | for (_t1 = (_l); \ |
| 84 | *(_t1) ? (_n = container_of(*(_t1), typeof(*(_n)), _member.node),\ |
| 85 | _t2 = ((*_t1)->next), \ |
| 86 | true) \ |
| 87 | : false; \ |
| 88 | (_n) ? (_t1 = &(_n)->_member.node.next, 0) \ |
| 89 | : ((*(_t1) = (_t2)), 0)) |
| 90 | |
| 91 | /** |
| 92 | * lwq_enqueue - add a new item to the end of the queue |
| 93 | * @n - the lwq_node embedded in the item to be added |
| 94 | * @q - the lwq to append to. |
| 95 | * |
| 96 | * No locking is needed to append to the queue so this can |
| 97 | * be called from any context. |
| 98 | * Return %true is the list may have previously been empty. |
| 99 | */ |
| 100 | static inline bool lwq_enqueue(struct lwq_node *n, struct lwq *q) |
| 101 | { |
| 102 | /* acquire enqures ordering wrt lwq_dequeue */ |
| 103 | return llist_add(new: &n->node, head: &q->new) && |
| 104 | smp_load_acquire(&q->ready) == NULL; |
| 105 | } |
| 106 | |
| 107 | /** |
| 108 | * lwq_enqueue_batch - add a list of new items to the end of the queue |
| 109 | * @n - the lwq_node embedded in the first item to be added |
| 110 | * @q - the lwq to append to. |
| 111 | * |
| 112 | * No locking is needed to append to the queue so this can |
| 113 | * be called from any context. |
| 114 | * Return %true is the list may have previously been empty. |
| 115 | */ |
| 116 | static inline bool lwq_enqueue_batch(struct llist_node *n, struct lwq *q) |
| 117 | { |
| 118 | struct llist_node *e = n; |
| 119 | |
| 120 | /* acquire enqures ordering wrt lwq_dequeue */ |
| 121 | return llist_add_batch(new_first: llist_reverse_order(head: n), new_last: e, head: &q->new) && |
| 122 | smp_load_acquire(&q->ready) == NULL; |
| 123 | } |
| 124 | #endif /* LWQ_H */ |
| 125 | |