| 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
| 2 | #ifndef _RESCTRL_H |
| 3 | #define _RESCTRL_H |
| 4 | |
| 5 | #include <linux/cacheinfo.h> |
| 6 | #include <linux/kernel.h> |
| 7 | #include <linux/list.h> |
| 8 | #include <linux/pid.h> |
| 9 | #include <linux/resctrl_types.h> |
| 10 | |
| 11 | #ifdef CONFIG_ARCH_HAS_CPU_RESCTRL |
| 12 | #include <asm/resctrl.h> |
| 13 | #endif |
| 14 | |
| 15 | /* CLOSID, RMID value used by the default control group */ |
| 16 | #define RESCTRL_RESERVED_CLOSID 0 |
| 17 | #define RESCTRL_RESERVED_RMID 0 |
| 18 | |
| 19 | #define RESCTRL_PICK_ANY_CPU -1 |
| 20 | |
| 21 | #ifdef CONFIG_PROC_CPU_RESCTRL |
| 22 | |
| 23 | int proc_resctrl_show(struct seq_file *m, |
| 24 | struct pid_namespace *ns, |
| 25 | struct pid *pid, |
| 26 | struct task_struct *tsk); |
| 27 | |
| 28 | #endif |
| 29 | |
| 30 | /* max value for struct rdt_domain's mbps_val */ |
| 31 | #define MBA_MAX_MBPS U32_MAX |
| 32 | |
| 33 | /* Walk all possible resources, with variants for only controls or monitors. */ |
| 34 | #define for_each_rdt_resource(_r) \ |
| 35 | for ((_r) = resctrl_arch_get_resource(0); \ |
| 36 | (_r) && (_r)->rid < RDT_NUM_RESOURCES; \ |
| 37 | (_r) = resctrl_arch_get_resource((_r)->rid + 1)) |
| 38 | |
| 39 | #define for_each_capable_rdt_resource(r) \ |
| 40 | for_each_rdt_resource((r)) \ |
| 41 | if ((r)->alloc_capable || (r)->mon_capable) |
| 42 | |
| 43 | #define for_each_alloc_capable_rdt_resource(r) \ |
| 44 | for_each_rdt_resource((r)) \ |
| 45 | if ((r)->alloc_capable) |
| 46 | |
| 47 | #define for_each_mon_capable_rdt_resource(r) \ |
| 48 | for_each_rdt_resource((r)) \ |
| 49 | if ((r)->mon_capable) |
| 50 | |
| 51 | enum resctrl_res_level { |
| 52 | RDT_RESOURCE_L3, |
| 53 | RDT_RESOURCE_L2, |
| 54 | RDT_RESOURCE_MBA, |
| 55 | RDT_RESOURCE_SMBA, |
| 56 | |
| 57 | /* Must be the last */ |
| 58 | RDT_NUM_RESOURCES, |
| 59 | }; |
| 60 | |
| 61 | /** |
| 62 | * enum resctrl_conf_type - The type of configuration. |
| 63 | * @CDP_NONE: No prioritisation, both code and data are controlled or monitored. |
| 64 | * @CDP_CODE: Configuration applies to instruction fetches. |
| 65 | * @CDP_DATA: Configuration applies to reads and writes. |
| 66 | */ |
| 67 | enum resctrl_conf_type { |
| 68 | CDP_NONE, |
| 69 | CDP_CODE, |
| 70 | CDP_DATA, |
| 71 | }; |
| 72 | |
| 73 | #define CDP_NUM_TYPES (CDP_DATA + 1) |
| 74 | |
| 75 | /* |
| 76 | * struct pseudo_lock_region - pseudo-lock region information |
| 77 | * @s: Resctrl schema for the resource to which this |
| 78 | * pseudo-locked region belongs |
| 79 | * @closid: The closid that this pseudo-locked region uses |
| 80 | * @d: RDT domain to which this pseudo-locked region |
| 81 | * belongs |
| 82 | * @cbm: bitmask of the pseudo-locked region |
| 83 | * @lock_thread_wq: waitqueue used to wait on the pseudo-locking thread |
| 84 | * completion |
| 85 | * @thread_done: variable used by waitqueue to test if pseudo-locking |
| 86 | * thread completed |
| 87 | * @cpu: core associated with the cache on which the setup code |
| 88 | * will be run |
| 89 | * @line_size: size of the cache lines |
| 90 | * @size: size of pseudo-locked region in bytes |
| 91 | * @kmem: the kernel memory associated with pseudo-locked region |
| 92 | * @minor: minor number of character device associated with this |
| 93 | * region |
| 94 | * @debugfs_dir: pointer to this region's directory in the debugfs |
| 95 | * filesystem |
| 96 | * @pm_reqs: Power management QoS requests related to this region |
| 97 | */ |
| 98 | struct pseudo_lock_region { |
| 99 | struct resctrl_schema *s; |
| 100 | u32 closid; |
| 101 | struct rdt_ctrl_domain *d; |
| 102 | u32 cbm; |
| 103 | wait_queue_head_t lock_thread_wq; |
| 104 | int thread_done; |
| 105 | int cpu; |
| 106 | unsigned int line_size; |
| 107 | unsigned int size; |
| 108 | void *kmem; |
| 109 | unsigned int minor; |
| 110 | struct dentry *debugfs_dir; |
| 111 | struct list_head pm_reqs; |
| 112 | }; |
| 113 | |
| 114 | /** |
| 115 | * struct resctrl_staged_config - parsed configuration to be applied |
| 116 | * @new_ctrl: new ctrl value to be loaded |
| 117 | * @have_new_ctrl: whether the user provided new_ctrl is valid |
| 118 | */ |
| 119 | struct resctrl_staged_config { |
| 120 | u32 new_ctrl; |
| 121 | bool have_new_ctrl; |
| 122 | }; |
| 123 | |
| 124 | enum resctrl_domain_type { |
| 125 | RESCTRL_CTRL_DOMAIN, |
| 126 | RESCTRL_MON_DOMAIN, |
| 127 | }; |
| 128 | |
| 129 | /** |
| 130 | * struct rdt_domain_hdr - common header for different domain types |
| 131 | * @list: all instances of this resource |
| 132 | * @id: unique id for this instance |
| 133 | * @type: type of this instance |
| 134 | * @cpu_mask: which CPUs share this resource |
| 135 | */ |
| 136 | struct rdt_domain_hdr { |
| 137 | struct list_head list; |
| 138 | int id; |
| 139 | enum resctrl_domain_type type; |
| 140 | struct cpumask cpu_mask; |
| 141 | }; |
| 142 | |
| 143 | /** |
| 144 | * struct rdt_ctrl_domain - group of CPUs sharing a resctrl control resource |
| 145 | * @hdr: common header for different domain types |
| 146 | * @plr: pseudo-locked region (if any) associated with domain |
| 147 | * @staged_config: parsed configuration to be applied |
| 148 | * @mbps_val: When mba_sc is enabled, this holds the array of user |
| 149 | * specified control values for mba_sc in MBps, indexed |
| 150 | * by closid |
| 151 | */ |
| 152 | struct rdt_ctrl_domain { |
| 153 | struct rdt_domain_hdr hdr; |
| 154 | struct pseudo_lock_region *plr; |
| 155 | struct resctrl_staged_config staged_config[CDP_NUM_TYPES]; |
| 156 | u32 *mbps_val; |
| 157 | }; |
| 158 | |
| 159 | /** |
| 160 | * struct mbm_cntr_cfg - Assignable counter configuration. |
| 161 | * @evtid: MBM event to which the counter is assigned. Only valid |
| 162 | * if @rdtgroup is not NULL. |
| 163 | * @rdtgrp: resctrl group assigned to the counter. NULL if the |
| 164 | * counter is free. |
| 165 | */ |
| 166 | struct mbm_cntr_cfg { |
| 167 | enum resctrl_event_id evtid; |
| 168 | struct rdtgroup *rdtgrp; |
| 169 | }; |
| 170 | |
| 171 | /** |
| 172 | * struct rdt_mon_domain - group of CPUs sharing a resctrl monitor resource |
| 173 | * @hdr: common header for different domain types |
| 174 | * @ci_id: cache info id for this domain |
| 175 | * @rmid_busy_llc: bitmap of which limbo RMIDs are above threshold |
| 176 | * @mbm_states: Per-event pointer to the MBM event's saved state. |
| 177 | * An MBM event's state is an array of struct mbm_state |
| 178 | * indexed by RMID on x86 or combined CLOSID, RMID on Arm. |
| 179 | * @mbm_over: worker to periodically read MBM h/w counters |
| 180 | * @cqm_limbo: worker to periodically read CQM h/w counters |
| 181 | * @mbm_work_cpu: worker CPU for MBM h/w counters |
| 182 | * @cqm_work_cpu: worker CPU for CQM h/w counters |
| 183 | * @cntr_cfg: array of assignable counters' configuration (indexed |
| 184 | * by counter ID) |
| 185 | */ |
| 186 | struct rdt_mon_domain { |
| 187 | struct rdt_domain_hdr hdr; |
| 188 | unsigned int ci_id; |
| 189 | unsigned long *rmid_busy_llc; |
| 190 | struct mbm_state *mbm_states[QOS_NUM_L3_MBM_EVENTS]; |
| 191 | struct delayed_work mbm_over; |
| 192 | struct delayed_work cqm_limbo; |
| 193 | int mbm_work_cpu; |
| 194 | int cqm_work_cpu; |
| 195 | struct mbm_cntr_cfg *cntr_cfg; |
| 196 | }; |
| 197 | |
| 198 | /** |
| 199 | * struct resctrl_cache - Cache allocation related data |
| 200 | * @cbm_len: Length of the cache bit mask |
| 201 | * @min_cbm_bits: Minimum number of consecutive bits to be set. |
| 202 | * The value 0 means the architecture can support |
| 203 | * zero CBM. |
| 204 | * @shareable_bits: Bitmask of shareable resource with other |
| 205 | * executing entities |
| 206 | * @arch_has_sparse_bitmasks: True if a bitmask like f00f is valid. |
| 207 | * @arch_has_per_cpu_cfg: True if QOS_CFG register for this cache |
| 208 | * level has CPU scope. |
| 209 | * @io_alloc_capable: True if portion of the cache can be configured |
| 210 | * for I/O traffic. |
| 211 | */ |
| 212 | struct resctrl_cache { |
| 213 | unsigned int cbm_len; |
| 214 | unsigned int min_cbm_bits; |
| 215 | unsigned int shareable_bits; |
| 216 | bool arch_has_sparse_bitmasks; |
| 217 | bool arch_has_per_cpu_cfg; |
| 218 | bool io_alloc_capable; |
| 219 | }; |
| 220 | |
| 221 | /** |
| 222 | * enum membw_throttle_mode - System's memory bandwidth throttling mode |
| 223 | * @THREAD_THROTTLE_UNDEFINED: Not relevant to the system |
| 224 | * @THREAD_THROTTLE_MAX: Memory bandwidth is throttled at the core |
| 225 | * always using smallest bandwidth percentage |
| 226 | * assigned to threads, aka "max throttling" |
| 227 | * @THREAD_THROTTLE_PER_THREAD: Memory bandwidth is throttled at the thread |
| 228 | */ |
| 229 | enum membw_throttle_mode { |
| 230 | THREAD_THROTTLE_UNDEFINED = 0, |
| 231 | THREAD_THROTTLE_MAX, |
| 232 | THREAD_THROTTLE_PER_THREAD, |
| 233 | }; |
| 234 | |
| 235 | /** |
| 236 | * struct resctrl_membw - Memory bandwidth allocation related data |
| 237 | * @min_bw: Minimum memory bandwidth percentage user can request |
| 238 | * @max_bw: Maximum memory bandwidth value, used as the reset value |
| 239 | * @bw_gran: Granularity at which the memory bandwidth is allocated |
| 240 | * @delay_linear: True if memory B/W delay is in linear scale |
| 241 | * @arch_needs_linear: True if we can't configure non-linear resources |
| 242 | * @throttle_mode: Bandwidth throttling mode when threads request |
| 243 | * different memory bandwidths |
| 244 | * @mba_sc: True if MBA software controller(mba_sc) is enabled |
| 245 | * @mb_map: Mapping of memory B/W percentage to memory B/W delay |
| 246 | */ |
| 247 | struct resctrl_membw { |
| 248 | u32 min_bw; |
| 249 | u32 max_bw; |
| 250 | u32 bw_gran; |
| 251 | u32 delay_linear; |
| 252 | bool arch_needs_linear; |
| 253 | enum membw_throttle_mode throttle_mode; |
| 254 | bool mba_sc; |
| 255 | u32 *mb_map; |
| 256 | }; |
| 257 | |
| 258 | struct resctrl_schema; |
| 259 | |
| 260 | enum resctrl_scope { |
| 261 | RESCTRL_L2_CACHE = 2, |
| 262 | RESCTRL_L3_CACHE = 3, |
| 263 | RESCTRL_L3_NODE, |
| 264 | }; |
| 265 | |
| 266 | /** |
| 267 | * enum resctrl_schema_fmt - The format user-space provides for a schema. |
| 268 | * @RESCTRL_SCHEMA_BITMAP: The schema is a bitmap in hex. |
| 269 | * @RESCTRL_SCHEMA_RANGE: The schema is a decimal number. |
| 270 | */ |
| 271 | enum resctrl_schema_fmt { |
| 272 | RESCTRL_SCHEMA_BITMAP, |
| 273 | RESCTRL_SCHEMA_RANGE, |
| 274 | }; |
| 275 | |
| 276 | /** |
| 277 | * struct resctrl_mon - Monitoring related data of a resctrl resource. |
| 278 | * @num_rmid: Number of RMIDs available. |
| 279 | * @mbm_cfg_mask: Memory transactions that can be tracked when bandwidth |
| 280 | * monitoring events can be configured. |
| 281 | * @num_mbm_cntrs: Number of assignable counters. |
| 282 | * @mbm_cntr_assignable:Is system capable of supporting counter assignment? |
| 283 | * @mbm_assign_on_mkdir:True if counters should automatically be assigned to MBM |
| 284 | * events of monitor groups created via mkdir. |
| 285 | */ |
| 286 | struct resctrl_mon { |
| 287 | int num_rmid; |
| 288 | unsigned int mbm_cfg_mask; |
| 289 | int num_mbm_cntrs; |
| 290 | bool mbm_cntr_assignable; |
| 291 | bool mbm_assign_on_mkdir; |
| 292 | }; |
| 293 | |
| 294 | /** |
| 295 | * struct rdt_resource - attributes of a resctrl resource |
| 296 | * @rid: The index of the resource |
| 297 | * @alloc_capable: Is allocation available on this machine |
| 298 | * @mon_capable: Is monitor feature available on this machine |
| 299 | * @ctrl_scope: Scope of this resource for control functions |
| 300 | * @mon_scope: Scope of this resource for monitor functions |
| 301 | * @cache: Cache allocation related data |
| 302 | * @membw: If the component has bandwidth controls, their properties. |
| 303 | * @mon: Monitoring related data. |
| 304 | * @ctrl_domains: RCU list of all control domains for this resource |
| 305 | * @mon_domains: RCU list of all monitor domains for this resource |
| 306 | * @name: Name to use in "schemata" file. |
| 307 | * @schema_fmt: Which format string and parser is used for this schema. |
| 308 | * @cdp_capable: Is the CDP feature available on this resource |
| 309 | */ |
| 310 | struct rdt_resource { |
| 311 | int rid; |
| 312 | bool alloc_capable; |
| 313 | bool mon_capable; |
| 314 | enum resctrl_scope ctrl_scope; |
| 315 | enum resctrl_scope mon_scope; |
| 316 | struct resctrl_cache cache; |
| 317 | struct resctrl_membw membw; |
| 318 | struct resctrl_mon mon; |
| 319 | struct list_head ctrl_domains; |
| 320 | struct list_head mon_domains; |
| 321 | char *name; |
| 322 | enum resctrl_schema_fmt schema_fmt; |
| 323 | bool cdp_capable; |
| 324 | }; |
| 325 | |
| 326 | /* |
| 327 | * Get the resource that exists at this level. If the level is not supported |
| 328 | * a dummy/not-capable resource can be returned. Levels >= RDT_NUM_RESOURCES |
| 329 | * will return NULL. |
| 330 | */ |
| 331 | struct rdt_resource *resctrl_arch_get_resource(enum resctrl_res_level l); |
| 332 | |
| 333 | /** |
| 334 | * struct resctrl_schema - configuration abilities of a resource presented to |
| 335 | * user-space |
| 336 | * @list: Member of resctrl_schema_all. |
| 337 | * @name: The name to use in the "schemata" file. |
| 338 | * @fmt_str: Format string to show domain value. |
| 339 | * @conf_type: Whether this schema is specific to code/data. |
| 340 | * @res: The resource structure exported by the architecture to describe |
| 341 | * the hardware that is configured by this schema. |
| 342 | * @num_closid: The number of closid that can be used with this schema. When |
| 343 | * features like CDP are enabled, this will be lower than the |
| 344 | * hardware supports for the resource. |
| 345 | */ |
| 346 | struct resctrl_schema { |
| 347 | struct list_head list; |
| 348 | char name[8]; |
| 349 | const char *fmt_str; |
| 350 | enum resctrl_conf_type conf_type; |
| 351 | struct rdt_resource *res; |
| 352 | u32 num_closid; |
| 353 | }; |
| 354 | |
| 355 | struct resctrl_cpu_defaults { |
| 356 | u32 closid; |
| 357 | u32 rmid; |
| 358 | }; |
| 359 | |
| 360 | struct resctrl_mon_config_info { |
| 361 | struct rdt_resource *r; |
| 362 | struct rdt_mon_domain *d; |
| 363 | u32 evtid; |
| 364 | u32 mon_config; |
| 365 | }; |
| 366 | |
| 367 | /** |
| 368 | * resctrl_arch_sync_cpu_closid_rmid() - Refresh this CPU's CLOSID and RMID. |
| 369 | * Call via IPI. |
| 370 | * @info: If non-NULL, a pointer to a struct resctrl_cpu_defaults |
| 371 | * specifying the new CLOSID and RMID for tasks in the default |
| 372 | * resctrl ctrl and mon group when running on this CPU. If NULL, |
| 373 | * this CPU is not re-assigned to a different default group. |
| 374 | * |
| 375 | * Propagates reassignment of CPUs and/or tasks to different resctrl groups |
| 376 | * when requested by the resctrl core code. |
| 377 | * |
| 378 | * This function records the per-cpu defaults specified by @info (if any), |
| 379 | * and then reconfigures the CPU's hardware CLOSID and RMID for subsequent |
| 380 | * execution based on @current, in the same way as during a task switch. |
| 381 | */ |
| 382 | void resctrl_arch_sync_cpu_closid_rmid(void *info); |
| 383 | |
| 384 | /** |
| 385 | * resctrl_get_default_ctrl() - Return the default control value for this |
| 386 | * resource. |
| 387 | * @r: The resource whose default control type is queried. |
| 388 | */ |
| 389 | static inline u32 resctrl_get_default_ctrl(struct rdt_resource *r) |
| 390 | { |
| 391 | switch (r->schema_fmt) { |
| 392 | case RESCTRL_SCHEMA_BITMAP: |
| 393 | return BIT_MASK(r->cache.cbm_len) - 1; |
| 394 | case RESCTRL_SCHEMA_RANGE: |
| 395 | return r->membw.max_bw; |
| 396 | } |
| 397 | |
| 398 | return WARN_ON_ONCE(1); |
| 399 | } |
| 400 | |
| 401 | /* The number of closid supported by this resource regardless of CDP */ |
| 402 | u32 resctrl_arch_get_num_closid(struct rdt_resource *r); |
| 403 | u32 resctrl_arch_system_num_rmid_idx(void); |
| 404 | int resctrl_arch_update_domains(struct rdt_resource *r, u32 closid); |
| 405 | |
| 406 | void resctrl_enable_mon_event(enum resctrl_event_id eventid); |
| 407 | |
| 408 | bool resctrl_is_mon_event_enabled(enum resctrl_event_id eventid); |
| 409 | |
| 410 | bool resctrl_arch_is_evt_configurable(enum resctrl_event_id evt); |
| 411 | |
| 412 | static inline bool resctrl_is_mbm_event(enum resctrl_event_id eventid) |
| 413 | { |
| 414 | return (eventid >= QOS_L3_MBM_TOTAL_EVENT_ID && |
| 415 | eventid <= QOS_L3_MBM_LOCAL_EVENT_ID); |
| 416 | } |
| 417 | |
| 418 | u32 resctrl_get_mon_evt_cfg(enum resctrl_event_id eventid); |
| 419 | |
| 420 | /* Iterate over all memory bandwidth events */ |
| 421 | #define for_each_mbm_event_id(eventid) \ |
| 422 | for (eventid = QOS_L3_MBM_TOTAL_EVENT_ID; \ |
| 423 | eventid <= QOS_L3_MBM_LOCAL_EVENT_ID; eventid++) |
| 424 | |
| 425 | /* Iterate over memory bandwidth arrays in domain structures */ |
| 426 | #define for_each_mbm_idx(idx) \ |
| 427 | for (idx = 0; idx < QOS_NUM_L3_MBM_EVENTS; idx++) |
| 428 | |
| 429 | /** |
| 430 | * resctrl_arch_mon_event_config_write() - Write the config for an event. |
| 431 | * @config_info: struct resctrl_mon_config_info describing the resource, domain |
| 432 | * and event. |
| 433 | * |
| 434 | * Reads resource, domain and eventid from @config_info and writes the |
| 435 | * event config_info->mon_config into hardware. |
| 436 | * |
| 437 | * Called via IPI to reach a CPU that is a member of the specified domain. |
| 438 | */ |
| 439 | void resctrl_arch_mon_event_config_write(void *config_info); |
| 440 | |
| 441 | /** |
| 442 | * resctrl_arch_mon_event_config_read() - Read the config for an event. |
| 443 | * @config_info: struct resctrl_mon_config_info describing the resource, domain |
| 444 | * and event. |
| 445 | * |
| 446 | * Reads resource, domain and eventid from @config_info and reads the |
| 447 | * hardware config value into config_info->mon_config. |
| 448 | * |
| 449 | * Called via IPI to reach a CPU that is a member of the specified domain. |
| 450 | */ |
| 451 | void resctrl_arch_mon_event_config_read(void *config_info); |
| 452 | |
| 453 | /* For use by arch code to remap resctrl's smaller CDP CLOSID range */ |
| 454 | static inline u32 resctrl_get_config_index(u32 closid, |
| 455 | enum resctrl_conf_type type) |
| 456 | { |
| 457 | switch (type) { |
| 458 | default: |
| 459 | case CDP_NONE: |
| 460 | return closid; |
| 461 | case CDP_CODE: |
| 462 | return closid * 2 + 1; |
| 463 | case CDP_DATA: |
| 464 | return closid * 2; |
| 465 | } |
| 466 | } |
| 467 | |
| 468 | bool resctrl_arch_get_cdp_enabled(enum resctrl_res_level l); |
| 469 | int resctrl_arch_set_cdp_enabled(enum resctrl_res_level l, bool enable); |
| 470 | |
| 471 | /** |
| 472 | * resctrl_arch_mbm_cntr_assign_enabled() - Check if MBM counter assignment |
| 473 | * mode is enabled. |
| 474 | * @r: Pointer to the resource structure. |
| 475 | * |
| 476 | * Return: |
| 477 | * true if the assignment mode is enabled, false otherwise. |
| 478 | */ |
| 479 | bool resctrl_arch_mbm_cntr_assign_enabled(struct rdt_resource *r); |
| 480 | |
| 481 | /** |
| 482 | * resctrl_arch_mbm_cntr_assign_set() - Configure the MBM counter assignment mode. |
| 483 | * @r: Pointer to the resource structure. |
| 484 | * @enable: Set to true to enable, false to disable the assignment mode. |
| 485 | * |
| 486 | * Return: |
| 487 | * 0 on success, < 0 on error. |
| 488 | */ |
| 489 | int resctrl_arch_mbm_cntr_assign_set(struct rdt_resource *r, bool enable); |
| 490 | |
| 491 | /* |
| 492 | * Update the ctrl_val and apply this config right now. |
| 493 | * Must be called on one of the domain's CPUs. |
| 494 | */ |
| 495 | int resctrl_arch_update_one(struct rdt_resource *r, struct rdt_ctrl_domain *d, |
| 496 | u32 closid, enum resctrl_conf_type t, u32 cfg_val); |
| 497 | |
| 498 | u32 resctrl_arch_get_config(struct rdt_resource *r, struct rdt_ctrl_domain *d, |
| 499 | u32 closid, enum resctrl_conf_type type); |
| 500 | int resctrl_online_ctrl_domain(struct rdt_resource *r, struct rdt_ctrl_domain *d); |
| 501 | int resctrl_online_mon_domain(struct rdt_resource *r, struct rdt_mon_domain *d); |
| 502 | void resctrl_offline_ctrl_domain(struct rdt_resource *r, struct rdt_ctrl_domain *d); |
| 503 | void resctrl_offline_mon_domain(struct rdt_resource *r, struct rdt_mon_domain *d); |
| 504 | void resctrl_online_cpu(unsigned int cpu); |
| 505 | void resctrl_offline_cpu(unsigned int cpu); |
| 506 | |
| 507 | /** |
| 508 | * resctrl_arch_rmid_read() - Read the eventid counter corresponding to rmid |
| 509 | * for this resource and domain. |
| 510 | * @r: resource that the counter should be read from. |
| 511 | * @d: domain that the counter should be read from. |
| 512 | * @closid: closid that matches the rmid. Depending on the architecture, the |
| 513 | * counter may match traffic of both @closid and @rmid, or @rmid |
| 514 | * only. |
| 515 | * @rmid: rmid of the counter to read. |
| 516 | * @eventid: eventid to read, e.g. L3 occupancy. |
| 517 | * @val: result of the counter read in bytes. |
| 518 | * @arch_mon_ctx: An architecture specific value from |
| 519 | * resctrl_arch_mon_ctx_alloc(), for MPAM this identifies |
| 520 | * the hardware monitor allocated for this read request. |
| 521 | * |
| 522 | * Some architectures need to sleep when first programming some of the counters. |
| 523 | * (specifically: arm64's MPAM cache occupancy counters can return 'not ready' |
| 524 | * for a short period of time). Call from a non-migrateable process context on |
| 525 | * a CPU that belongs to domain @d. e.g. use smp_call_on_cpu() or |
| 526 | * schedule_work_on(). This function can be called with interrupts masked, |
| 527 | * e.g. using smp_call_function_any(), but may consistently return an error. |
| 528 | * |
| 529 | * Return: |
| 530 | * 0 on success, or -EIO, -EINVAL etc on error. |
| 531 | */ |
| 532 | int resctrl_arch_rmid_read(struct rdt_resource *r, struct rdt_mon_domain *d, |
| 533 | u32 closid, u32 rmid, enum resctrl_event_id eventid, |
| 534 | u64 *val, void *arch_mon_ctx); |
| 535 | |
| 536 | /** |
| 537 | * resctrl_arch_rmid_read_context_check() - warn about invalid contexts |
| 538 | * |
| 539 | * When built with CONFIG_DEBUG_ATOMIC_SLEEP generate a warning when |
| 540 | * resctrl_arch_rmid_read() is called with preemption disabled. |
| 541 | * |
| 542 | * The contract with resctrl_arch_rmid_read() is that if interrupts |
| 543 | * are unmasked, it can sleep. This allows NOHZ_FULL systems to use an |
| 544 | * IPI, (and fail if the call needed to sleep), while most of the time |
| 545 | * the work is scheduled, allowing the call to sleep. |
| 546 | */ |
| 547 | static inline void resctrl_arch_rmid_read_context_check(void) |
| 548 | { |
| 549 | if (!irqs_disabled()) |
| 550 | might_sleep(); |
| 551 | } |
| 552 | |
| 553 | /** |
| 554 | * resctrl_find_domain() - Search for a domain id in a resource domain list. |
| 555 | * @h: The domain list to search. |
| 556 | * @id: The domain id to search for. |
| 557 | * @pos: A pointer to position in the list id should be inserted. |
| 558 | * |
| 559 | * Search the domain list to find the domain id. If the domain id is |
| 560 | * found, return the domain. NULL otherwise. If the domain id is not |
| 561 | * found (and NULL returned) then the first domain with id bigger than |
| 562 | * the input id can be returned to the caller via @pos. |
| 563 | */ |
| 564 | struct rdt_domain_hdr *resctrl_find_domain(struct list_head *h, int id, |
| 565 | struct list_head **pos); |
| 566 | |
| 567 | /** |
| 568 | * resctrl_arch_reset_rmid() - Reset any private state associated with rmid |
| 569 | * and eventid. |
| 570 | * @r: The domain's resource. |
| 571 | * @d: The rmid's domain. |
| 572 | * @closid: closid that matches the rmid. Depending on the architecture, the |
| 573 | * counter may match traffic of both @closid and @rmid, or @rmid only. |
| 574 | * @rmid: The rmid whose counter values should be reset. |
| 575 | * @eventid: The eventid whose counter values should be reset. |
| 576 | * |
| 577 | * This can be called from any CPU. |
| 578 | */ |
| 579 | void resctrl_arch_reset_rmid(struct rdt_resource *r, struct rdt_mon_domain *d, |
| 580 | u32 closid, u32 rmid, |
| 581 | enum resctrl_event_id eventid); |
| 582 | |
| 583 | /** |
| 584 | * resctrl_arch_reset_rmid_all() - Reset all private state associated with |
| 585 | * all rmids and eventids. |
| 586 | * @r: The resctrl resource. |
| 587 | * @d: The domain for which all architectural counter state will |
| 588 | * be cleared. |
| 589 | * |
| 590 | * This can be called from any CPU. |
| 591 | */ |
| 592 | void resctrl_arch_reset_rmid_all(struct rdt_resource *r, struct rdt_mon_domain *d); |
| 593 | |
| 594 | /** |
| 595 | * resctrl_arch_reset_all_ctrls() - Reset the control for each CLOSID to its |
| 596 | * default. |
| 597 | * @r: The resctrl resource to reset. |
| 598 | * |
| 599 | * This can be called from any CPU. |
| 600 | */ |
| 601 | void resctrl_arch_reset_all_ctrls(struct rdt_resource *r); |
| 602 | |
| 603 | /** |
| 604 | * resctrl_arch_config_cntr() - Configure the counter with its new RMID |
| 605 | * and event details. |
| 606 | * @r: Resource structure. |
| 607 | * @d: The domain in which counter with ID @cntr_id should be configured. |
| 608 | * @evtid: Monitoring event type (e.g., QOS_L3_MBM_TOTAL_EVENT_ID |
| 609 | * or QOS_L3_MBM_LOCAL_EVENT_ID). |
| 610 | * @rmid: RMID. |
| 611 | * @closid: CLOSID. |
| 612 | * @cntr_id: Counter ID to configure. |
| 613 | * @assign: True to assign the counter or update an existing assignment, |
| 614 | * false to unassign the counter. |
| 615 | * |
| 616 | * This can be called from any CPU. |
| 617 | */ |
| 618 | void resctrl_arch_config_cntr(struct rdt_resource *r, struct rdt_mon_domain *d, |
| 619 | enum resctrl_event_id evtid, u32 rmid, u32 closid, |
| 620 | u32 cntr_id, bool assign); |
| 621 | |
| 622 | /** |
| 623 | * resctrl_arch_cntr_read() - Read the event data corresponding to the counter ID |
| 624 | * assigned to the RMID, event pair for this resource |
| 625 | * and domain. |
| 626 | * @r: Resource that the counter should be read from. |
| 627 | * @d: Domain that the counter should be read from. |
| 628 | * @closid: CLOSID that matches the RMID. |
| 629 | * @rmid: The RMID to which @cntr_id is assigned. |
| 630 | * @cntr_id: The counter to read. |
| 631 | * @eventid: The MBM event to which @cntr_id is assigned. |
| 632 | * @val: Result of the counter read in bytes. |
| 633 | * |
| 634 | * Called on a CPU that belongs to domain @d when "mbm_event" mode is enabled. |
| 635 | * Called from a non-migrateable process context via smp_call_on_cpu() unless all |
| 636 | * CPUs are nohz_full, in which case it is called via IPI (smp_call_function_any()). |
| 637 | * |
| 638 | * Return: |
| 639 | * 0 on success, or -EIO, -EINVAL etc on error. |
| 640 | */ |
| 641 | int resctrl_arch_cntr_read(struct rdt_resource *r, struct rdt_mon_domain *d, |
| 642 | u32 closid, u32 rmid, int cntr_id, |
| 643 | enum resctrl_event_id eventid, u64 *val); |
| 644 | |
| 645 | /** |
| 646 | * resctrl_arch_reset_cntr() - Reset any private state associated with counter ID. |
| 647 | * @r: The domain's resource. |
| 648 | * @d: The counter ID's domain. |
| 649 | * @closid: CLOSID that matches the RMID. |
| 650 | * @rmid: The RMID to which @cntr_id is assigned. |
| 651 | * @cntr_id: The counter to reset. |
| 652 | * @eventid: The MBM event to which @cntr_id is assigned. |
| 653 | * |
| 654 | * This can be called from any CPU. |
| 655 | */ |
| 656 | void resctrl_arch_reset_cntr(struct rdt_resource *r, struct rdt_mon_domain *d, |
| 657 | u32 closid, u32 rmid, int cntr_id, |
| 658 | enum resctrl_event_id eventid); |
| 659 | |
| 660 | /** |
| 661 | * resctrl_arch_io_alloc_enable() - Enable/disable io_alloc feature. |
| 662 | * @r: The resctrl resource. |
| 663 | * @enable: Enable (true) or disable (false) io_alloc on resource @r. |
| 664 | * |
| 665 | * This can be called from any CPU. |
| 666 | * |
| 667 | * Return: |
| 668 | * 0 on success, <0 on error. |
| 669 | */ |
| 670 | int resctrl_arch_io_alloc_enable(struct rdt_resource *r, bool enable); |
| 671 | |
| 672 | /** |
| 673 | * resctrl_arch_get_io_alloc_enabled() - Get io_alloc feature state. |
| 674 | * @r: The resctrl resource. |
| 675 | * |
| 676 | * Return: |
| 677 | * true if io_alloc is enabled or false if disabled. |
| 678 | */ |
| 679 | bool resctrl_arch_get_io_alloc_enabled(struct rdt_resource *r); |
| 680 | |
| 681 | extern unsigned int resctrl_rmid_realloc_threshold; |
| 682 | extern unsigned int resctrl_rmid_realloc_limit; |
| 683 | |
| 684 | int resctrl_init(void); |
| 685 | void resctrl_exit(void); |
| 686 | |
| 687 | #ifdef CONFIG_RESCTRL_FS_PSEUDO_LOCK |
| 688 | u64 resctrl_arch_get_prefetch_disable_bits(void); |
| 689 | int resctrl_arch_pseudo_lock_fn(void *_plr); |
| 690 | int resctrl_arch_measure_cycles_lat_fn(void *_plr); |
| 691 | int resctrl_arch_measure_l2_residency(void *_plr); |
| 692 | int resctrl_arch_measure_l3_residency(void *_plr); |
| 693 | #else |
| 694 | static inline u64 resctrl_arch_get_prefetch_disable_bits(void) { return 0; } |
| 695 | static inline int resctrl_arch_pseudo_lock_fn(void *_plr) { return 0; } |
| 696 | static inline int resctrl_arch_measure_cycles_lat_fn(void *_plr) { return 0; } |
| 697 | static inline int resctrl_arch_measure_l2_residency(void *_plr) { return 0; } |
| 698 | static inline int resctrl_arch_measure_l3_residency(void *_plr) { return 0; } |
| 699 | #endif /* CONFIG_RESCTRL_FS_PSEUDO_LOCK */ |
| 700 | #endif /* _RESCTRL_H */ |
| 701 | |