| 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
| 2 | #ifndef _LINUX_RSEQ_TYPES_H |
| 3 | #define _LINUX_RSEQ_TYPES_H |
| 4 | |
| 5 | #include <linux/irq_work_types.h> |
| 6 | #include <linux/types.h> |
| 7 | #include <linux/workqueue_types.h> |
| 8 | |
| 9 | #ifdef CONFIG_RSEQ |
| 10 | struct rseq; |
| 11 | |
| 12 | /** |
| 13 | * struct rseq_event - Storage for rseq related event management |
| 14 | * @all: Compound to initialize and clear the data efficiently |
| 15 | * @events: Compound to access events with a single load/store |
| 16 | * @sched_switch: True if the task was scheduled and needs update on |
| 17 | * exit to user |
| 18 | * @ids_changed: Indicator that IDs need to be updated |
| 19 | * @user_irq: True on interrupt entry from user mode |
| 20 | * @has_rseq: True if the task has a rseq pointer installed |
| 21 | * @error: Compound error code for the slow path to analyze |
| 22 | * @fatal: User space data corrupted or invalid |
| 23 | * @slowpath: Indicator that slow path processing via TIF_NOTIFY_RESUME |
| 24 | * is required |
| 25 | * |
| 26 | * @sched_switch and @ids_changed must be adjacent and the combo must be |
| 27 | * 16bit aligned to allow a single store, when both are set at the same |
| 28 | * time in the scheduler. |
| 29 | */ |
| 30 | struct rseq_event { |
| 31 | union { |
| 32 | u64 all; |
| 33 | struct { |
| 34 | union { |
| 35 | u32 events; |
| 36 | struct { |
| 37 | u8 sched_switch; |
| 38 | u8 ids_changed; |
| 39 | u8 user_irq; |
| 40 | }; |
| 41 | }; |
| 42 | |
| 43 | u8 has_rseq; |
| 44 | u8 __pad; |
| 45 | union { |
| 46 | u16 error; |
| 47 | struct { |
| 48 | u8 fatal; |
| 49 | u8 slowpath; |
| 50 | }; |
| 51 | }; |
| 52 | }; |
| 53 | }; |
| 54 | }; |
| 55 | |
| 56 | /** |
| 57 | * struct rseq_ids - Cache for ids, which need to be updated |
| 58 | * @cpu_cid: Compound of @cpu_id and @mm_cid to make the |
| 59 | * compiler emit a single compare on 64-bit |
| 60 | * @cpu_id: The CPU ID which was written last to user space |
| 61 | * @mm_cid: The MM CID which was written last to user space |
| 62 | * |
| 63 | * @cpu_id and @mm_cid are updated when the data is written to user space. |
| 64 | */ |
| 65 | struct rseq_ids { |
| 66 | union { |
| 67 | u64 cpu_cid; |
| 68 | struct { |
| 69 | u32 cpu_id; |
| 70 | u32 mm_cid; |
| 71 | }; |
| 72 | }; |
| 73 | }; |
| 74 | |
| 75 | /** |
| 76 | * struct rseq_data - Storage for all rseq related data |
| 77 | * @usrptr: Pointer to the registered user space RSEQ memory |
| 78 | * @len: Length of the RSEQ region |
| 79 | * @sig: Signature of critial section abort IPs |
| 80 | * @event: Storage for event management |
| 81 | * @ids: Storage for cached CPU ID and MM CID |
| 82 | */ |
| 83 | struct rseq_data { |
| 84 | struct rseq __user *usrptr; |
| 85 | u32 len; |
| 86 | u32 sig; |
| 87 | struct rseq_event event; |
| 88 | struct rseq_ids ids; |
| 89 | }; |
| 90 | |
| 91 | #else /* CONFIG_RSEQ */ |
| 92 | struct rseq_data { }; |
| 93 | #endif /* !CONFIG_RSEQ */ |
| 94 | |
| 95 | #ifdef CONFIG_SCHED_MM_CID |
| 96 | |
| 97 | #define MM_CID_UNSET BIT(31) |
| 98 | #define MM_CID_ONCPU BIT(30) |
| 99 | #define MM_CID_TRANSIT BIT(29) |
| 100 | |
| 101 | /** |
| 102 | * struct sched_mm_cid - Storage for per task MM CID data |
| 103 | * @active: MM CID is active for the task |
| 104 | * @cid: The CID associated to the task either permanently or |
| 105 | * borrowed from the CPU |
| 106 | */ |
| 107 | struct sched_mm_cid { |
| 108 | unsigned int active; |
| 109 | unsigned int cid; |
| 110 | }; |
| 111 | |
| 112 | /** |
| 113 | * struct mm_cid_pcpu - Storage for per CPU MM_CID data |
| 114 | * @cid: The CID associated to the CPU either permanently or |
| 115 | * while a task with a CID is running |
| 116 | */ |
| 117 | struct mm_cid_pcpu { |
| 118 | unsigned int cid; |
| 119 | }____cacheline_aligned_in_smp; |
| 120 | |
| 121 | /** |
| 122 | * struct mm_mm_cid - Storage for per MM CID data |
| 123 | * @pcpu: Per CPU storage for CIDs associated to a CPU |
| 124 | * @percpu: Set, when CIDs are in per CPU mode |
| 125 | * @transit: Set to MM_CID_TRANSIT during a mode change transition phase |
| 126 | * @max_cids: The exclusive maximum CID value for allocation and convergence |
| 127 | * @irq_work: irq_work to handle the affinity mode change case |
| 128 | * @work: Regular work to handle the affinity mode change case |
| 129 | * @lock: Spinlock to protect against affinity setting which can't take @mutex |
| 130 | * @mutex: Mutex to serialize forks and exits related to this mm |
| 131 | * @nr_cpus_allowed: The number of CPUs in the per MM allowed CPUs map. The map |
| 132 | * is growth only. |
| 133 | * @users: The number of tasks sharing this MM. Separate from mm::mm_users |
| 134 | * as that is modified by mmget()/mm_put() by other entities which |
| 135 | * do not actually share the MM. |
| 136 | * @pcpu_thrs: Threshold for switching back from per CPU mode |
| 137 | * @update_deferred: A deferred switch back to per task mode is pending. |
| 138 | */ |
| 139 | struct mm_mm_cid { |
| 140 | /* Hotpath read mostly members */ |
| 141 | struct mm_cid_pcpu __percpu *pcpu; |
| 142 | unsigned int percpu; |
| 143 | unsigned int transit; |
| 144 | unsigned int max_cids; |
| 145 | |
| 146 | /* Rarely used. Moves @lock and @mutex into the second cacheline */ |
| 147 | struct irq_work irq_work; |
| 148 | struct work_struct work; |
| 149 | |
| 150 | raw_spinlock_t lock; |
| 151 | struct mutex mutex; |
| 152 | |
| 153 | /* Low frequency modified */ |
| 154 | unsigned int nr_cpus_allowed; |
| 155 | unsigned int users; |
| 156 | unsigned int pcpu_thrs; |
| 157 | unsigned int update_deferred; |
| 158 | }____cacheline_aligned_in_smp; |
| 159 | #else /* CONFIG_SCHED_MM_CID */ |
| 160 | struct mm_mm_cid { }; |
| 161 | struct sched_mm_cid { }; |
| 162 | #endif /* !CONFIG_SCHED_MM_CID */ |
| 163 | |
| 164 | #endif |
| 165 | |