| 1 | // SPDX-License-Identifier: GPL-2.0 |
| 2 | /* |
| 3 | * drivers/base/power/main.c - Where the driver meets power management. |
| 4 | * |
| 5 | * Copyright (c) 2003 Patrick Mochel |
| 6 | * Copyright (c) 2003 Open Source Development Lab |
| 7 | * |
| 8 | * The driver model core calls device_pm_add() when a device is registered. |
| 9 | * This will initialize the embedded device_pm_info object in the device |
| 10 | * and add it to the list of power-controlled devices. sysfs entries for |
| 11 | * controlling device power management will also be added. |
| 12 | * |
| 13 | * A separate list is used for keeping track of power info, because the power |
| 14 | * domain dependencies may differ from the ancestral dependencies that the |
| 15 | * subsystem list maintains. |
| 16 | */ |
| 17 | |
| 18 | #define pr_fmt(fmt) "PM: " fmt |
| 19 | #define dev_fmt pr_fmt |
| 20 | |
| 21 | #include <linux/device.h> |
| 22 | #include <linux/export.h> |
| 23 | #include <linux/mutex.h> |
| 24 | #include <linux/pm.h> |
| 25 | #include <linux/pm_runtime.h> |
| 26 | #include <linux/pm-trace.h> |
| 27 | #include <linux/pm_wakeirq.h> |
| 28 | #include <linux/interrupt.h> |
| 29 | #include <linux/sched.h> |
| 30 | #include <linux/sched/debug.h> |
| 31 | #include <linux/async.h> |
| 32 | #include <linux/suspend.h> |
| 33 | #include <trace/events/power.h> |
| 34 | #include <linux/cpufreq.h> |
| 35 | #include <linux/devfreq.h> |
| 36 | #include <linux/timer.h> |
| 37 | #include <linux/nmi.h> |
| 38 | |
| 39 | #include "../base.h" |
| 40 | #include "power.h" |
| 41 | |
| 42 | typedef int (*pm_callback_t)(struct device *); |
| 43 | |
| 44 | /* |
| 45 | * The entries in the dpm_list list are in a depth first order, simply |
| 46 | * because children are guaranteed to be discovered after parents, and |
| 47 | * are inserted at the back of the list on discovery. |
| 48 | * |
| 49 | * Since device_pm_add() may be called with a device lock held, |
| 50 | * we must never try to acquire a device lock while holding |
| 51 | * dpm_list_mutex. |
| 52 | */ |
| 53 | |
| 54 | LIST_HEAD(dpm_list); |
| 55 | static LIST_HEAD(dpm_prepared_list); |
| 56 | static LIST_HEAD(dpm_suspended_list); |
| 57 | static LIST_HEAD(dpm_late_early_list); |
| 58 | static LIST_HEAD(dpm_noirq_list); |
| 59 | |
| 60 | static DEFINE_MUTEX(dpm_list_mtx); |
| 61 | static pm_message_t pm_transition; |
| 62 | |
| 63 | static DEFINE_MUTEX(async_wip_mtx); |
| 64 | static int async_error; |
| 65 | |
| 66 | /** |
| 67 | * pm_hibernate_is_recovering - if recovering from hibernate due to error. |
| 68 | * |
| 69 | * Used to query if dev_pm_ops.thaw() is called for normal hibernation case or |
| 70 | * recovering from some error. |
| 71 | * |
| 72 | * Return: true for error case, false for normal case. |
| 73 | */ |
| 74 | bool pm_hibernate_is_recovering(void) |
| 75 | { |
| 76 | return pm_transition.event == PM_EVENT_RECOVER; |
| 77 | } |
| 78 | EXPORT_SYMBOL_GPL(pm_hibernate_is_recovering); |
| 79 | |
| 80 | static const char *pm_verb(int event) |
| 81 | { |
| 82 | switch (event) { |
| 83 | case PM_EVENT_SUSPEND: |
| 84 | return "suspend" ; |
| 85 | case PM_EVENT_RESUME: |
| 86 | return "resume" ; |
| 87 | case PM_EVENT_FREEZE: |
| 88 | return "freeze" ; |
| 89 | case PM_EVENT_QUIESCE: |
| 90 | return "quiesce" ; |
| 91 | case PM_EVENT_HIBERNATE: |
| 92 | return "hibernate" ; |
| 93 | case PM_EVENT_THAW: |
| 94 | return "thaw" ; |
| 95 | case PM_EVENT_RESTORE: |
| 96 | return "restore" ; |
| 97 | case PM_EVENT_RECOVER: |
| 98 | return "recover" ; |
| 99 | case PM_EVENT_POWEROFF: |
| 100 | return "poweroff" ; |
| 101 | default: |
| 102 | return "(unknown PM event)" ; |
| 103 | } |
| 104 | } |
| 105 | |
| 106 | /** |
| 107 | * device_pm_sleep_init - Initialize system suspend-related device fields. |
| 108 | * @dev: Device object being initialized. |
| 109 | */ |
| 110 | void device_pm_sleep_init(struct device *dev) |
| 111 | { |
| 112 | dev->power.is_prepared = false; |
| 113 | dev->power.is_suspended = false; |
| 114 | dev->power.is_noirq_suspended = false; |
| 115 | dev->power.is_late_suspended = false; |
| 116 | init_completion(x: &dev->power.completion); |
| 117 | complete_all(&dev->power.completion); |
| 118 | dev->power.wakeup = NULL; |
| 119 | INIT_LIST_HEAD(list: &dev->power.entry); |
| 120 | } |
| 121 | |
| 122 | /** |
| 123 | * device_pm_lock - Lock the list of active devices used by the PM core. |
| 124 | */ |
| 125 | void device_pm_lock(void) |
| 126 | { |
| 127 | mutex_lock(&dpm_list_mtx); |
| 128 | } |
| 129 | |
| 130 | /** |
| 131 | * device_pm_unlock - Unlock the list of active devices used by the PM core. |
| 132 | */ |
| 133 | void device_pm_unlock(void) |
| 134 | { |
| 135 | mutex_unlock(lock: &dpm_list_mtx); |
| 136 | } |
| 137 | |
| 138 | /** |
| 139 | * device_pm_add - Add a device to the PM core's list of active devices. |
| 140 | * @dev: Device to add to the list. |
| 141 | */ |
| 142 | void device_pm_add(struct device *dev) |
| 143 | { |
| 144 | /* Skip PM setup/initialization. */ |
| 145 | if (device_pm_not_required(dev)) |
| 146 | return; |
| 147 | |
| 148 | pr_debug("Adding info for %s:%s\n" , |
| 149 | dev->bus ? dev->bus->name : "No Bus" , dev_name(dev)); |
| 150 | device_pm_check_callbacks(dev); |
| 151 | mutex_lock(&dpm_list_mtx); |
| 152 | if (dev->parent && dev->parent->power.is_prepared) |
| 153 | dev_warn(dev, "parent %s should not be sleeping\n" , |
| 154 | dev_name(dev->parent)); |
| 155 | list_add_tail(new: &dev->power.entry, head: &dpm_list); |
| 156 | dev->power.in_dpm_list = true; |
| 157 | mutex_unlock(lock: &dpm_list_mtx); |
| 158 | } |
| 159 | |
| 160 | /** |
| 161 | * device_pm_remove - Remove a device from the PM core's list of active devices. |
| 162 | * @dev: Device to be removed from the list. |
| 163 | */ |
| 164 | void device_pm_remove(struct device *dev) |
| 165 | { |
| 166 | if (device_pm_not_required(dev)) |
| 167 | return; |
| 168 | |
| 169 | pr_debug("Removing info for %s:%s\n" , |
| 170 | dev->bus ? dev->bus->name : "No Bus" , dev_name(dev)); |
| 171 | complete_all(&dev->power.completion); |
| 172 | mutex_lock(&dpm_list_mtx); |
| 173 | list_del_init(entry: &dev->power.entry); |
| 174 | dev->power.in_dpm_list = false; |
| 175 | mutex_unlock(lock: &dpm_list_mtx); |
| 176 | device_wakeup_disable(dev); |
| 177 | pm_runtime_remove(dev); |
| 178 | device_pm_check_callbacks(dev); |
| 179 | } |
| 180 | |
| 181 | /** |
| 182 | * device_pm_move_before - Move device in the PM core's list of active devices. |
| 183 | * @deva: Device to move in dpm_list. |
| 184 | * @devb: Device @deva should come before. |
| 185 | */ |
| 186 | void device_pm_move_before(struct device *deva, struct device *devb) |
| 187 | { |
| 188 | pr_debug("Moving %s:%s before %s:%s\n" , |
| 189 | deva->bus ? deva->bus->name : "No Bus" , dev_name(deva), |
| 190 | devb->bus ? devb->bus->name : "No Bus" , dev_name(devb)); |
| 191 | /* Delete deva from dpm_list and reinsert before devb. */ |
| 192 | list_move_tail(list: &deva->power.entry, head: &devb->power.entry); |
| 193 | } |
| 194 | |
| 195 | /** |
| 196 | * device_pm_move_after - Move device in the PM core's list of active devices. |
| 197 | * @deva: Device to move in dpm_list. |
| 198 | * @devb: Device @deva should come after. |
| 199 | */ |
| 200 | void device_pm_move_after(struct device *deva, struct device *devb) |
| 201 | { |
| 202 | pr_debug("Moving %s:%s after %s:%s\n" , |
| 203 | deva->bus ? deva->bus->name : "No Bus" , dev_name(deva), |
| 204 | devb->bus ? devb->bus->name : "No Bus" , dev_name(devb)); |
| 205 | /* Delete deva from dpm_list and reinsert after devb. */ |
| 206 | list_move(list: &deva->power.entry, head: &devb->power.entry); |
| 207 | } |
| 208 | |
| 209 | /** |
| 210 | * device_pm_move_last - Move device to end of the PM core's list of devices. |
| 211 | * @dev: Device to move in dpm_list. |
| 212 | */ |
| 213 | void device_pm_move_last(struct device *dev) |
| 214 | { |
| 215 | pr_debug("Moving %s:%s to end of list\n" , |
| 216 | dev->bus ? dev->bus->name : "No Bus" , dev_name(dev)); |
| 217 | list_move_tail(list: &dev->power.entry, head: &dpm_list); |
| 218 | } |
| 219 | |
| 220 | static ktime_t initcall_debug_start(struct device *dev, void *cb) |
| 221 | { |
| 222 | if (!pm_print_times_enabled) |
| 223 | return 0; |
| 224 | |
| 225 | dev_info(dev, "calling %ps @ %i, parent: %s\n" , cb, |
| 226 | task_pid_nr(current), |
| 227 | dev->parent ? dev_name(dev->parent) : "none" ); |
| 228 | return ktime_get(); |
| 229 | } |
| 230 | |
| 231 | static void initcall_debug_report(struct device *dev, ktime_t calltime, |
| 232 | void *cb, int error) |
| 233 | { |
| 234 | ktime_t rettime; |
| 235 | |
| 236 | if (!pm_print_times_enabled) |
| 237 | return; |
| 238 | |
| 239 | rettime = ktime_get(); |
| 240 | dev_info(dev, "%ps returned %d after %Ld usecs\n" , cb, error, |
| 241 | (unsigned long long)ktime_us_delta(rettime, calltime)); |
| 242 | } |
| 243 | |
| 244 | /** |
| 245 | * dpm_wait - Wait for a PM operation to complete. |
| 246 | * @dev: Device to wait for. |
| 247 | * @async: If unset, wait only if the device's power.async_suspend flag is set. |
| 248 | */ |
| 249 | static void dpm_wait(struct device *dev, bool async) |
| 250 | { |
| 251 | if (!dev) |
| 252 | return; |
| 253 | |
| 254 | if (async || (pm_async_enabled && dev->power.async_suspend)) |
| 255 | wait_for_completion(&dev->power.completion); |
| 256 | } |
| 257 | |
| 258 | static int dpm_wait_fn(struct device *dev, void *async_ptr) |
| 259 | { |
| 260 | dpm_wait(dev, async: *((bool *)async_ptr)); |
| 261 | return 0; |
| 262 | } |
| 263 | |
| 264 | static void dpm_wait_for_children(struct device *dev, bool async) |
| 265 | { |
| 266 | device_for_each_child(parent: dev, data: &async, fn: dpm_wait_fn); |
| 267 | } |
| 268 | |
| 269 | static void dpm_wait_for_suppliers(struct device *dev, bool async) |
| 270 | { |
| 271 | struct device_link *link; |
| 272 | int idx; |
| 273 | |
| 274 | idx = device_links_read_lock(); |
| 275 | |
| 276 | /* |
| 277 | * If the supplier goes away right after we've checked the link to it, |
| 278 | * we'll wait for its completion to change the state, but that's fine, |
| 279 | * because the only things that will block as a result are the SRCU |
| 280 | * callbacks freeing the link objects for the links in the list we're |
| 281 | * walking. |
| 282 | */ |
| 283 | dev_for_each_link_to_supplier(link, dev) |
| 284 | if (READ_ONCE(link->status) != DL_STATE_DORMANT && |
| 285 | !device_link_flag_is_sync_state_only(flags: link->flags)) |
| 286 | dpm_wait(dev: link->supplier, async); |
| 287 | |
| 288 | device_links_read_unlock(idx); |
| 289 | } |
| 290 | |
| 291 | static bool dpm_wait_for_superior(struct device *dev, bool async) |
| 292 | { |
| 293 | struct device *parent; |
| 294 | |
| 295 | /* |
| 296 | * If the device is resumed asynchronously and the parent's callback |
| 297 | * deletes both the device and the parent itself, the parent object may |
| 298 | * be freed while this function is running, so avoid that by reference |
| 299 | * counting the parent once more unless the device has been deleted |
| 300 | * already (in which case return right away). |
| 301 | */ |
| 302 | mutex_lock(&dpm_list_mtx); |
| 303 | |
| 304 | if (!device_pm_initialized(dev)) { |
| 305 | mutex_unlock(lock: &dpm_list_mtx); |
| 306 | return false; |
| 307 | } |
| 308 | |
| 309 | parent = get_device(dev: dev->parent); |
| 310 | |
| 311 | mutex_unlock(lock: &dpm_list_mtx); |
| 312 | |
| 313 | dpm_wait(dev: parent, async); |
| 314 | put_device(dev: parent); |
| 315 | |
| 316 | dpm_wait_for_suppliers(dev, async); |
| 317 | |
| 318 | /* |
| 319 | * If the parent's callback has deleted the device, attempting to resume |
| 320 | * it would be invalid, so avoid doing that then. |
| 321 | */ |
| 322 | return device_pm_initialized(dev); |
| 323 | } |
| 324 | |
| 325 | static void dpm_wait_for_consumers(struct device *dev, bool async) |
| 326 | { |
| 327 | struct device_link *link; |
| 328 | int idx; |
| 329 | |
| 330 | idx = device_links_read_lock(); |
| 331 | |
| 332 | /* |
| 333 | * The status of a device link can only be changed from "dormant" by a |
| 334 | * probe, but that cannot happen during system suspend/resume. In |
| 335 | * theory it can change to "dormant" at that time, but then it is |
| 336 | * reasonable to wait for the target device anyway (eg. if it goes |
| 337 | * away, it's better to wait for it to go away completely and then |
| 338 | * continue instead of trying to continue in parallel with its |
| 339 | * unregistration). |
| 340 | */ |
| 341 | dev_for_each_link_to_consumer(link, dev) |
| 342 | if (READ_ONCE(link->status) != DL_STATE_DORMANT && |
| 343 | !device_link_flag_is_sync_state_only(flags: link->flags)) |
| 344 | dpm_wait(dev: link->consumer, async); |
| 345 | |
| 346 | device_links_read_unlock(idx); |
| 347 | } |
| 348 | |
| 349 | static void dpm_wait_for_subordinate(struct device *dev, bool async) |
| 350 | { |
| 351 | dpm_wait_for_children(dev, async); |
| 352 | dpm_wait_for_consumers(dev, async); |
| 353 | } |
| 354 | |
| 355 | /** |
| 356 | * pm_op - Return the PM operation appropriate for given PM event. |
| 357 | * @ops: PM operations to choose from. |
| 358 | * @state: PM transition of the system being carried out. |
| 359 | */ |
| 360 | static pm_callback_t pm_op(const struct dev_pm_ops *ops, pm_message_t state) |
| 361 | { |
| 362 | switch (state.event) { |
| 363 | #ifdef CONFIG_SUSPEND |
| 364 | case PM_EVENT_SUSPEND: |
| 365 | return ops->suspend; |
| 366 | case PM_EVENT_RESUME: |
| 367 | return ops->resume; |
| 368 | #endif /* CONFIG_SUSPEND */ |
| 369 | #ifdef CONFIG_HIBERNATE_CALLBACKS |
| 370 | case PM_EVENT_FREEZE: |
| 371 | case PM_EVENT_QUIESCE: |
| 372 | return ops->freeze; |
| 373 | case PM_EVENT_POWEROFF: |
| 374 | case PM_EVENT_HIBERNATE: |
| 375 | return ops->poweroff; |
| 376 | case PM_EVENT_THAW: |
| 377 | case PM_EVENT_RECOVER: |
| 378 | return ops->thaw; |
| 379 | case PM_EVENT_RESTORE: |
| 380 | return ops->restore; |
| 381 | #endif /* CONFIG_HIBERNATE_CALLBACKS */ |
| 382 | } |
| 383 | |
| 384 | return NULL; |
| 385 | } |
| 386 | |
| 387 | /** |
| 388 | * pm_late_early_op - Return the PM operation appropriate for given PM event. |
| 389 | * @ops: PM operations to choose from. |
| 390 | * @state: PM transition of the system being carried out. |
| 391 | * |
| 392 | * Runtime PM is disabled for @dev while this function is being executed. |
| 393 | */ |
| 394 | static pm_callback_t pm_late_early_op(const struct dev_pm_ops *ops, |
| 395 | pm_message_t state) |
| 396 | { |
| 397 | switch (state.event) { |
| 398 | #ifdef CONFIG_SUSPEND |
| 399 | case PM_EVENT_SUSPEND: |
| 400 | return ops->suspend_late; |
| 401 | case PM_EVENT_RESUME: |
| 402 | return ops->resume_early; |
| 403 | #endif /* CONFIG_SUSPEND */ |
| 404 | #ifdef CONFIG_HIBERNATE_CALLBACKS |
| 405 | case PM_EVENT_FREEZE: |
| 406 | case PM_EVENT_QUIESCE: |
| 407 | return ops->freeze_late; |
| 408 | case PM_EVENT_POWEROFF: |
| 409 | case PM_EVENT_HIBERNATE: |
| 410 | return ops->poweroff_late; |
| 411 | case PM_EVENT_THAW: |
| 412 | case PM_EVENT_RECOVER: |
| 413 | return ops->thaw_early; |
| 414 | case PM_EVENT_RESTORE: |
| 415 | return ops->restore_early; |
| 416 | #endif /* CONFIG_HIBERNATE_CALLBACKS */ |
| 417 | } |
| 418 | |
| 419 | return NULL; |
| 420 | } |
| 421 | |
| 422 | /** |
| 423 | * pm_noirq_op - Return the PM operation appropriate for given PM event. |
| 424 | * @ops: PM operations to choose from. |
| 425 | * @state: PM transition of the system being carried out. |
| 426 | * |
| 427 | * The driver of @dev will not receive interrupts while this function is being |
| 428 | * executed. |
| 429 | */ |
| 430 | static pm_callback_t pm_noirq_op(const struct dev_pm_ops *ops, pm_message_t state) |
| 431 | { |
| 432 | switch (state.event) { |
| 433 | #ifdef CONFIG_SUSPEND |
| 434 | case PM_EVENT_SUSPEND: |
| 435 | return ops->suspend_noirq; |
| 436 | case PM_EVENT_RESUME: |
| 437 | return ops->resume_noirq; |
| 438 | #endif /* CONFIG_SUSPEND */ |
| 439 | #ifdef CONFIG_HIBERNATE_CALLBACKS |
| 440 | case PM_EVENT_FREEZE: |
| 441 | case PM_EVENT_QUIESCE: |
| 442 | return ops->freeze_noirq; |
| 443 | case PM_EVENT_POWEROFF: |
| 444 | case PM_EVENT_HIBERNATE: |
| 445 | return ops->poweroff_noirq; |
| 446 | case PM_EVENT_THAW: |
| 447 | case PM_EVENT_RECOVER: |
| 448 | return ops->thaw_noirq; |
| 449 | case PM_EVENT_RESTORE: |
| 450 | return ops->restore_noirq; |
| 451 | #endif /* CONFIG_HIBERNATE_CALLBACKS */ |
| 452 | } |
| 453 | |
| 454 | return NULL; |
| 455 | } |
| 456 | |
| 457 | static void pm_dev_dbg(struct device *dev, pm_message_t state, const char *info) |
| 458 | { |
| 459 | dev_dbg(dev, "%s%s%s driver flags: %x\n" , info, pm_verb(state.event), |
| 460 | ((state.event & PM_EVENT_SLEEP) && device_may_wakeup(dev)) ? |
| 461 | ", may wakeup" : "" , dev->power.driver_flags); |
| 462 | } |
| 463 | |
| 464 | static void pm_dev_err(struct device *dev, pm_message_t state, const char *info, |
| 465 | int error) |
| 466 | { |
| 467 | dev_err(dev, "failed to %s%s: error %d\n" , pm_verb(state.event), info, |
| 468 | error); |
| 469 | } |
| 470 | |
| 471 | static void dpm_show_time(ktime_t starttime, pm_message_t state, int error, |
| 472 | const char *info) |
| 473 | { |
| 474 | ktime_t calltime; |
| 475 | u64 usecs64; |
| 476 | int usecs; |
| 477 | |
| 478 | calltime = ktime_get(); |
| 479 | usecs64 = ktime_to_ns(ktime_sub(calltime, starttime)); |
| 480 | do_div(usecs64, NSEC_PER_USEC); |
| 481 | usecs = usecs64; |
| 482 | if (usecs == 0) |
| 483 | usecs = 1; |
| 484 | |
| 485 | pm_pr_dbg("%s%s%s of devices %s after %ld.%03ld msecs\n" , |
| 486 | info ?: "" , info ? " " : "" , pm_verb(state.event), |
| 487 | error ? "aborted" : "complete" , |
| 488 | usecs / USEC_PER_MSEC, usecs % USEC_PER_MSEC); |
| 489 | } |
| 490 | |
| 491 | static int dpm_run_callback(pm_callback_t cb, struct device *dev, |
| 492 | pm_message_t state, const char *info) |
| 493 | { |
| 494 | ktime_t calltime; |
| 495 | int error; |
| 496 | |
| 497 | if (!cb) |
| 498 | return 0; |
| 499 | |
| 500 | calltime = initcall_debug_start(dev, cb); |
| 501 | |
| 502 | pm_dev_dbg(dev, state, info); |
| 503 | trace_device_pm_callback_start(dev, pm_ops: info, event: state.event); |
| 504 | error = cb(dev); |
| 505 | trace_device_pm_callback_end(dev, error); |
| 506 | suspend_report_result(dev, cb, error); |
| 507 | |
| 508 | initcall_debug_report(dev, calltime, cb, error); |
| 509 | |
| 510 | return error; |
| 511 | } |
| 512 | |
| 513 | #ifdef CONFIG_DPM_WATCHDOG |
| 514 | struct dpm_watchdog { |
| 515 | struct device *dev; |
| 516 | struct task_struct *tsk; |
| 517 | struct timer_list timer; |
| 518 | bool fatal; |
| 519 | }; |
| 520 | |
| 521 | #define DECLARE_DPM_WATCHDOG_ON_STACK(wd) \ |
| 522 | struct dpm_watchdog wd |
| 523 | |
| 524 | static bool __read_mostly dpm_watchdog_all_cpu_backtrace; |
| 525 | module_param(dpm_watchdog_all_cpu_backtrace, bool, 0644); |
| 526 | MODULE_PARM_DESC(dpm_watchdog_all_cpu_backtrace, |
| 527 | "Backtrace all CPUs on DPM watchdog timeout" ); |
| 528 | |
| 529 | /** |
| 530 | * dpm_watchdog_handler - Driver suspend / resume watchdog handler. |
| 531 | * @t: The timer that PM watchdog depends on. |
| 532 | * |
| 533 | * Called when a driver has timed out suspending or resuming. |
| 534 | * There's not much we can do here to recover so panic() to |
| 535 | * capture a crash-dump in pstore. |
| 536 | */ |
| 537 | static void dpm_watchdog_handler(struct timer_list *t) |
| 538 | { |
| 539 | struct dpm_watchdog *wd = timer_container_of(wd, t, timer); |
| 540 | struct timer_list *timer = &wd->timer; |
| 541 | unsigned int time_left; |
| 542 | |
| 543 | if (wd->fatal) { |
| 544 | unsigned int this_cpu = smp_processor_id(); |
| 545 | |
| 546 | dev_emerg(wd->dev, "**** DPM device timeout ****\n" ); |
| 547 | show_stack(task: wd->tsk, NULL, KERN_EMERG); |
| 548 | if (dpm_watchdog_all_cpu_backtrace) |
| 549 | trigger_allbutcpu_cpu_backtrace(exclude_cpu: this_cpu); |
| 550 | panic(fmt: "%s %s: unrecoverable failure\n" , |
| 551 | dev_driver_string(dev: wd->dev), dev_name(dev: wd->dev)); |
| 552 | } |
| 553 | |
| 554 | time_left = CONFIG_DPM_WATCHDOG_TIMEOUT - CONFIG_DPM_WATCHDOG_WARNING_TIMEOUT; |
| 555 | dev_warn(wd->dev, "**** DPM device timeout after %u seconds; %u seconds until panic ****\n" , |
| 556 | CONFIG_DPM_WATCHDOG_WARNING_TIMEOUT, time_left); |
| 557 | show_stack(task: wd->tsk, NULL, KERN_WARNING); |
| 558 | |
| 559 | wd->fatal = true; |
| 560 | mod_timer(timer, expires: jiffies + HZ * time_left); |
| 561 | } |
| 562 | |
| 563 | /** |
| 564 | * dpm_watchdog_set - Enable pm watchdog for given device. |
| 565 | * @wd: Watchdog. Must be allocated on the stack. |
| 566 | * @dev: Device to handle. |
| 567 | */ |
| 568 | static void dpm_watchdog_set(struct dpm_watchdog *wd, struct device *dev) |
| 569 | { |
| 570 | struct timer_list *timer = &wd->timer; |
| 571 | |
| 572 | wd->dev = dev; |
| 573 | wd->tsk = current; |
| 574 | wd->fatal = CONFIG_DPM_WATCHDOG_TIMEOUT == CONFIG_DPM_WATCHDOG_WARNING_TIMEOUT; |
| 575 | |
| 576 | timer_setup_on_stack(timer, dpm_watchdog_handler, 0); |
| 577 | /* use same timeout value for both suspend and resume */ |
| 578 | timer->expires = jiffies + HZ * CONFIG_DPM_WATCHDOG_WARNING_TIMEOUT; |
| 579 | add_timer(timer); |
| 580 | } |
| 581 | |
| 582 | /** |
| 583 | * dpm_watchdog_clear - Disable suspend/resume watchdog. |
| 584 | * @wd: Watchdog to disable. |
| 585 | */ |
| 586 | static void dpm_watchdog_clear(struct dpm_watchdog *wd) |
| 587 | { |
| 588 | struct timer_list *timer = &wd->timer; |
| 589 | |
| 590 | timer_delete_sync(timer); |
| 591 | timer_destroy_on_stack(timer); |
| 592 | } |
| 593 | #else |
| 594 | #define DECLARE_DPM_WATCHDOG_ON_STACK(wd) |
| 595 | #define dpm_watchdog_set(x, y) |
| 596 | #define dpm_watchdog_clear(x) |
| 597 | #endif |
| 598 | |
| 599 | /*------------------------- Resume routines -------------------------*/ |
| 600 | |
| 601 | /** |
| 602 | * dev_pm_skip_resume - System-wide device resume optimization check. |
| 603 | * @dev: Target device. |
| 604 | * |
| 605 | * Return: |
| 606 | * - %false if the transition under way is RESTORE. |
| 607 | * - Return value of dev_pm_skip_suspend() if the transition under way is THAW. |
| 608 | * - The logical negation of %power.must_resume otherwise (that is, when the |
| 609 | * transition under way is RESUME). |
| 610 | */ |
| 611 | bool dev_pm_skip_resume(struct device *dev) |
| 612 | { |
| 613 | if (pm_transition.event == PM_EVENT_RESTORE) |
| 614 | return false; |
| 615 | |
| 616 | if (pm_transition.event == PM_EVENT_THAW) |
| 617 | return dev_pm_skip_suspend(dev); |
| 618 | |
| 619 | return !dev->power.must_resume; |
| 620 | } |
| 621 | |
| 622 | static bool is_async(struct device *dev) |
| 623 | { |
| 624 | return dev->power.async_suspend && pm_async_enabled |
| 625 | && !pm_trace_is_enabled(); |
| 626 | } |
| 627 | |
| 628 | static bool __dpm_async(struct device *dev, async_func_t func) |
| 629 | { |
| 630 | if (dev->power.work_in_progress) |
| 631 | return true; |
| 632 | |
| 633 | if (!is_async(dev)) |
| 634 | return false; |
| 635 | |
| 636 | dev->power.work_in_progress = true; |
| 637 | |
| 638 | get_device(dev); |
| 639 | |
| 640 | if (async_schedule_dev_nocall(func, dev)) |
| 641 | return true; |
| 642 | |
| 643 | put_device(dev); |
| 644 | |
| 645 | return false; |
| 646 | } |
| 647 | |
| 648 | static bool dpm_async_fn(struct device *dev, async_func_t func) |
| 649 | { |
| 650 | guard(mutex)(T: &async_wip_mtx); |
| 651 | |
| 652 | return __dpm_async(dev, func); |
| 653 | } |
| 654 | |
| 655 | static int dpm_async_with_cleanup(struct device *dev, void *fn) |
| 656 | { |
| 657 | guard(mutex)(T: &async_wip_mtx); |
| 658 | |
| 659 | if (!__dpm_async(dev, func: fn)) |
| 660 | dev->power.work_in_progress = false; |
| 661 | |
| 662 | return 0; |
| 663 | } |
| 664 | |
| 665 | static void dpm_async_resume_children(struct device *dev, async_func_t func) |
| 666 | { |
| 667 | /* |
| 668 | * Prevent racing with dpm_clear_async_state() during initial list |
| 669 | * walks in dpm_noirq_resume_devices(), dpm_resume_early(), and |
| 670 | * dpm_resume(). |
| 671 | */ |
| 672 | guard(mutex)(T: &dpm_list_mtx); |
| 673 | |
| 674 | /* |
| 675 | * Start processing "async" children of the device unless it's been |
| 676 | * started already for them. |
| 677 | */ |
| 678 | device_for_each_child(parent: dev, data: func, fn: dpm_async_with_cleanup); |
| 679 | } |
| 680 | |
| 681 | static void dpm_async_resume_subordinate(struct device *dev, async_func_t func) |
| 682 | { |
| 683 | struct device_link *link; |
| 684 | int idx; |
| 685 | |
| 686 | dpm_async_resume_children(dev, func); |
| 687 | |
| 688 | idx = device_links_read_lock(); |
| 689 | |
| 690 | /* Start processing the device's "async" consumers. */ |
| 691 | dev_for_each_link_to_consumer(link, dev) |
| 692 | if (READ_ONCE(link->status) != DL_STATE_DORMANT) |
| 693 | dpm_async_with_cleanup(dev: link->consumer, fn: func); |
| 694 | |
| 695 | device_links_read_unlock(idx); |
| 696 | } |
| 697 | |
| 698 | static void dpm_clear_async_state(struct device *dev) |
| 699 | { |
| 700 | reinit_completion(x: &dev->power.completion); |
| 701 | dev->power.work_in_progress = false; |
| 702 | } |
| 703 | |
| 704 | static bool dpm_root_device(struct device *dev) |
| 705 | { |
| 706 | lockdep_assert_held(&dpm_list_mtx); |
| 707 | |
| 708 | /* |
| 709 | * Since this function is required to run under dpm_list_mtx, the |
| 710 | * list_empty() below will only return true if the device's list of |
| 711 | * consumers is actually empty before calling it. |
| 712 | */ |
| 713 | return !dev->parent && list_empty(head: &dev->links.suppliers); |
| 714 | } |
| 715 | |
| 716 | static void async_resume_noirq(void *data, async_cookie_t cookie); |
| 717 | |
| 718 | /** |
| 719 | * device_resume_noirq - Execute a "noirq resume" callback for given device. |
| 720 | * @dev: Device to handle. |
| 721 | * @state: PM transition of the system being carried out. |
| 722 | * @async: If true, the device is being resumed asynchronously. |
| 723 | * |
| 724 | * The driver of @dev will not receive interrupts while this function is being |
| 725 | * executed. |
| 726 | */ |
| 727 | static void device_resume_noirq(struct device *dev, pm_message_t state, bool async) |
| 728 | { |
| 729 | pm_callback_t callback = NULL; |
| 730 | const char *info = NULL; |
| 731 | bool skip_resume; |
| 732 | int error = 0; |
| 733 | |
| 734 | TRACE_DEVICE(dev); |
| 735 | TRACE_RESUME(0); |
| 736 | |
| 737 | if (dev->power.syscore || dev->power.direct_complete) |
| 738 | goto Out; |
| 739 | |
| 740 | if (!dev->power.is_noirq_suspended) { |
| 741 | /* |
| 742 | * This means that system suspend has been aborted in the noirq |
| 743 | * phase before invoking the noirq suspend callback for the |
| 744 | * device, so if device_suspend_late() has left it in suspend, |
| 745 | * device_resume_early() should leave it in suspend either in |
| 746 | * case the early resume of it depends on the noirq resume that |
| 747 | * has not run. |
| 748 | */ |
| 749 | if (dev_pm_skip_suspend(dev)) |
| 750 | dev->power.must_resume = false; |
| 751 | |
| 752 | goto Out; |
| 753 | } |
| 754 | |
| 755 | if (!dpm_wait_for_superior(dev, async)) |
| 756 | goto Out; |
| 757 | |
| 758 | skip_resume = dev_pm_skip_resume(dev); |
| 759 | /* |
| 760 | * If the driver callback is skipped below or by the middle layer |
| 761 | * callback and device_resume_early() also skips the driver callback for |
| 762 | * this device later, it needs to appear as "suspended" to PM-runtime, |
| 763 | * so change its status accordingly. |
| 764 | * |
| 765 | * Otherwise, the device is going to be resumed, so set its PM-runtime |
| 766 | * status to "active" unless its power.smart_suspend flag is clear, in |
| 767 | * which case it is not necessary to update its PM-runtime status. |
| 768 | */ |
| 769 | if (skip_resume) |
| 770 | pm_runtime_set_suspended(dev); |
| 771 | else if (dev_pm_smart_suspend(dev)) |
| 772 | pm_runtime_set_active(dev); |
| 773 | |
| 774 | if (dev->pm_domain) { |
| 775 | info = "noirq power domain " ; |
| 776 | callback = pm_noirq_op(ops: &dev->pm_domain->ops, state); |
| 777 | } else if (dev->type && dev->type->pm) { |
| 778 | info = "noirq type " ; |
| 779 | callback = pm_noirq_op(ops: dev->type->pm, state); |
| 780 | } else if (dev->class && dev->class->pm) { |
| 781 | info = "noirq class " ; |
| 782 | callback = pm_noirq_op(ops: dev->class->pm, state); |
| 783 | } else if (dev->bus && dev->bus->pm) { |
| 784 | info = "noirq bus " ; |
| 785 | callback = pm_noirq_op(ops: dev->bus->pm, state); |
| 786 | } |
| 787 | if (callback) |
| 788 | goto Run; |
| 789 | |
| 790 | if (skip_resume) |
| 791 | goto Skip; |
| 792 | |
| 793 | if (dev->driver && dev->driver->pm) { |
| 794 | info = "noirq driver " ; |
| 795 | callback = pm_noirq_op(ops: dev->driver->pm, state); |
| 796 | } |
| 797 | |
| 798 | Run: |
| 799 | error = dpm_run_callback(cb: callback, dev, state, info); |
| 800 | |
| 801 | Skip: |
| 802 | dev->power.is_noirq_suspended = false; |
| 803 | |
| 804 | Out: |
| 805 | complete_all(&dev->power.completion); |
| 806 | TRACE_RESUME(error); |
| 807 | |
| 808 | if (error) { |
| 809 | WRITE_ONCE(async_error, error); |
| 810 | dpm_save_failed_dev(name: dev_name(dev)); |
| 811 | pm_dev_err(dev, state, info: async ? " async noirq" : " noirq" , error); |
| 812 | } |
| 813 | |
| 814 | dpm_async_resume_subordinate(dev, func: async_resume_noirq); |
| 815 | } |
| 816 | |
| 817 | static void async_resume_noirq(void *data, async_cookie_t cookie) |
| 818 | { |
| 819 | struct device *dev = data; |
| 820 | |
| 821 | device_resume_noirq(dev, state: pm_transition, async: true); |
| 822 | put_device(dev); |
| 823 | } |
| 824 | |
| 825 | static void dpm_noirq_resume_devices(pm_message_t state) |
| 826 | { |
| 827 | struct device *dev; |
| 828 | ktime_t starttime = ktime_get(); |
| 829 | |
| 830 | trace_suspend_resume(TPS("dpm_resume_noirq" ), val: state.event, start: true); |
| 831 | |
| 832 | async_error = 0; |
| 833 | pm_transition = state; |
| 834 | |
| 835 | mutex_lock(&dpm_list_mtx); |
| 836 | |
| 837 | /* |
| 838 | * Start processing "async" root devices upfront so they don't wait for |
| 839 | * the "sync" devices they don't depend on. |
| 840 | */ |
| 841 | list_for_each_entry(dev, &dpm_noirq_list, power.entry) { |
| 842 | dpm_clear_async_state(dev); |
| 843 | if (dpm_root_device(dev)) |
| 844 | dpm_async_with_cleanup(dev, fn: async_resume_noirq); |
| 845 | } |
| 846 | |
| 847 | while (!list_empty(head: &dpm_noirq_list)) { |
| 848 | dev = to_device(entry: dpm_noirq_list.next); |
| 849 | list_move_tail(list: &dev->power.entry, head: &dpm_late_early_list); |
| 850 | |
| 851 | if (!dpm_async_fn(dev, func: async_resume_noirq)) { |
| 852 | get_device(dev); |
| 853 | |
| 854 | mutex_unlock(lock: &dpm_list_mtx); |
| 855 | |
| 856 | device_resume_noirq(dev, state, async: false); |
| 857 | |
| 858 | put_device(dev); |
| 859 | |
| 860 | mutex_lock(&dpm_list_mtx); |
| 861 | } |
| 862 | } |
| 863 | mutex_unlock(lock: &dpm_list_mtx); |
| 864 | async_synchronize_full(); |
| 865 | dpm_show_time(starttime, state, error: 0, info: "noirq" ); |
| 866 | if (READ_ONCE(async_error)) |
| 867 | dpm_save_failed_step(step: SUSPEND_RESUME_NOIRQ); |
| 868 | |
| 869 | trace_suspend_resume(TPS("dpm_resume_noirq" ), val: state.event, start: false); |
| 870 | } |
| 871 | |
| 872 | /** |
| 873 | * dpm_resume_noirq - Execute "noirq resume" callbacks for all devices. |
| 874 | * @state: PM transition of the system being carried out. |
| 875 | * |
| 876 | * Invoke the "noirq" resume callbacks for all devices in dpm_noirq_list and |
| 877 | * allow device drivers' interrupt handlers to be called. |
| 878 | */ |
| 879 | void dpm_resume_noirq(pm_message_t state) |
| 880 | { |
| 881 | dpm_noirq_resume_devices(state); |
| 882 | |
| 883 | resume_device_irqs(); |
| 884 | device_wakeup_disarm_wake_irqs(); |
| 885 | } |
| 886 | |
| 887 | static void async_resume_early(void *data, async_cookie_t cookie); |
| 888 | |
| 889 | /** |
| 890 | * device_resume_early - Execute an "early resume" callback for given device. |
| 891 | * @dev: Device to handle. |
| 892 | * @state: PM transition of the system being carried out. |
| 893 | * @async: If true, the device is being resumed asynchronously. |
| 894 | * |
| 895 | * Runtime PM is disabled for @dev while this function is being executed. |
| 896 | */ |
| 897 | static void device_resume_early(struct device *dev, pm_message_t state, bool async) |
| 898 | { |
| 899 | pm_callback_t callback = NULL; |
| 900 | const char *info = NULL; |
| 901 | int error = 0; |
| 902 | |
| 903 | TRACE_DEVICE(dev); |
| 904 | TRACE_RESUME(0); |
| 905 | |
| 906 | if (dev->power.direct_complete) |
| 907 | goto Out; |
| 908 | |
| 909 | if (!dev->power.is_late_suspended) |
| 910 | goto Out; |
| 911 | |
| 912 | if (dev->power.syscore) |
| 913 | goto Skip; |
| 914 | |
| 915 | if (!dpm_wait_for_superior(dev, async)) |
| 916 | goto Out; |
| 917 | |
| 918 | if (dev->pm_domain) { |
| 919 | info = "early power domain " ; |
| 920 | callback = pm_late_early_op(ops: &dev->pm_domain->ops, state); |
| 921 | } else if (dev->type && dev->type->pm) { |
| 922 | info = "early type " ; |
| 923 | callback = pm_late_early_op(ops: dev->type->pm, state); |
| 924 | } else if (dev->class && dev->class->pm) { |
| 925 | info = "early class " ; |
| 926 | callback = pm_late_early_op(ops: dev->class->pm, state); |
| 927 | } else if (dev->bus && dev->bus->pm) { |
| 928 | info = "early bus " ; |
| 929 | callback = pm_late_early_op(ops: dev->bus->pm, state); |
| 930 | } |
| 931 | if (callback) |
| 932 | goto Run; |
| 933 | |
| 934 | if (dev_pm_skip_resume(dev)) |
| 935 | goto Skip; |
| 936 | |
| 937 | if (dev->driver && dev->driver->pm) { |
| 938 | info = "early driver " ; |
| 939 | callback = pm_late_early_op(ops: dev->driver->pm, state); |
| 940 | } |
| 941 | |
| 942 | Run: |
| 943 | error = dpm_run_callback(cb: callback, dev, state, info); |
| 944 | |
| 945 | Skip: |
| 946 | dev->power.is_late_suspended = false; |
| 947 | pm_runtime_enable(dev); |
| 948 | |
| 949 | Out: |
| 950 | TRACE_RESUME(error); |
| 951 | |
| 952 | complete_all(&dev->power.completion); |
| 953 | |
| 954 | if (error) { |
| 955 | WRITE_ONCE(async_error, error); |
| 956 | dpm_save_failed_dev(name: dev_name(dev)); |
| 957 | pm_dev_err(dev, state, info: async ? " async early" : " early" , error); |
| 958 | } |
| 959 | |
| 960 | dpm_async_resume_subordinate(dev, func: async_resume_early); |
| 961 | } |
| 962 | |
| 963 | static void async_resume_early(void *data, async_cookie_t cookie) |
| 964 | { |
| 965 | struct device *dev = data; |
| 966 | |
| 967 | device_resume_early(dev, state: pm_transition, async: true); |
| 968 | put_device(dev); |
| 969 | } |
| 970 | |
| 971 | /** |
| 972 | * dpm_resume_early - Execute "early resume" callbacks for all devices. |
| 973 | * @state: PM transition of the system being carried out. |
| 974 | */ |
| 975 | void dpm_resume_early(pm_message_t state) |
| 976 | { |
| 977 | struct device *dev; |
| 978 | ktime_t starttime = ktime_get(); |
| 979 | |
| 980 | trace_suspend_resume(TPS("dpm_resume_early" ), val: state.event, start: true); |
| 981 | |
| 982 | async_error = 0; |
| 983 | pm_transition = state; |
| 984 | |
| 985 | mutex_lock(&dpm_list_mtx); |
| 986 | |
| 987 | /* |
| 988 | * Start processing "async" root devices upfront so they don't wait for |
| 989 | * the "sync" devices they don't depend on. |
| 990 | */ |
| 991 | list_for_each_entry(dev, &dpm_late_early_list, power.entry) { |
| 992 | dpm_clear_async_state(dev); |
| 993 | if (dpm_root_device(dev)) |
| 994 | dpm_async_with_cleanup(dev, fn: async_resume_early); |
| 995 | } |
| 996 | |
| 997 | while (!list_empty(head: &dpm_late_early_list)) { |
| 998 | dev = to_device(entry: dpm_late_early_list.next); |
| 999 | list_move_tail(list: &dev->power.entry, head: &dpm_suspended_list); |
| 1000 | |
| 1001 | if (!dpm_async_fn(dev, func: async_resume_early)) { |
| 1002 | get_device(dev); |
| 1003 | |
| 1004 | mutex_unlock(lock: &dpm_list_mtx); |
| 1005 | |
| 1006 | device_resume_early(dev, state, async: false); |
| 1007 | |
| 1008 | put_device(dev); |
| 1009 | |
| 1010 | mutex_lock(&dpm_list_mtx); |
| 1011 | } |
| 1012 | } |
| 1013 | mutex_unlock(lock: &dpm_list_mtx); |
| 1014 | async_synchronize_full(); |
| 1015 | dpm_show_time(starttime, state, error: 0, info: "early" ); |
| 1016 | if (READ_ONCE(async_error)) |
| 1017 | dpm_save_failed_step(step: SUSPEND_RESUME_EARLY); |
| 1018 | |
| 1019 | trace_suspend_resume(TPS("dpm_resume_early" ), val: state.event, start: false); |
| 1020 | } |
| 1021 | |
| 1022 | /** |
| 1023 | * dpm_resume_start - Execute "noirq" and "early" device callbacks. |
| 1024 | * @state: PM transition of the system being carried out. |
| 1025 | */ |
| 1026 | void dpm_resume_start(pm_message_t state) |
| 1027 | { |
| 1028 | dpm_resume_noirq(state); |
| 1029 | dpm_resume_early(state); |
| 1030 | } |
| 1031 | EXPORT_SYMBOL_GPL(dpm_resume_start); |
| 1032 | |
| 1033 | static void async_resume(void *data, async_cookie_t cookie); |
| 1034 | |
| 1035 | /** |
| 1036 | * device_resume - Execute "resume" callbacks for given device. |
| 1037 | * @dev: Device to handle. |
| 1038 | * @state: PM transition of the system being carried out. |
| 1039 | * @async: If true, the device is being resumed asynchronously. |
| 1040 | */ |
| 1041 | static void device_resume(struct device *dev, pm_message_t state, bool async) |
| 1042 | { |
| 1043 | pm_callback_t callback = NULL; |
| 1044 | const char *info = NULL; |
| 1045 | int error = 0; |
| 1046 | DECLARE_DPM_WATCHDOG_ON_STACK(wd); |
| 1047 | |
| 1048 | TRACE_DEVICE(dev); |
| 1049 | TRACE_RESUME(0); |
| 1050 | |
| 1051 | if (dev->power.syscore) |
| 1052 | goto Complete; |
| 1053 | |
| 1054 | if (!dev->power.is_suspended) |
| 1055 | goto Complete; |
| 1056 | |
| 1057 | dev->power.is_suspended = false; |
| 1058 | |
| 1059 | if (dev->power.direct_complete) { |
| 1060 | /* |
| 1061 | * Allow new children to be added under the device after this |
| 1062 | * point if it has no PM callbacks. |
| 1063 | */ |
| 1064 | if (dev->power.no_pm_callbacks) |
| 1065 | dev->power.is_prepared = false; |
| 1066 | |
| 1067 | /* Match the pm_runtime_disable() in device_suspend(). */ |
| 1068 | pm_runtime_enable(dev); |
| 1069 | goto Complete; |
| 1070 | } |
| 1071 | |
| 1072 | if (!dpm_wait_for_superior(dev, async)) |
| 1073 | goto Complete; |
| 1074 | |
| 1075 | dpm_watchdog_set(wd: &wd, dev); |
| 1076 | device_lock(dev); |
| 1077 | |
| 1078 | /* |
| 1079 | * This is a fib. But we'll allow new children to be added below |
| 1080 | * a resumed device, even if the device hasn't been completed yet. |
| 1081 | */ |
| 1082 | dev->power.is_prepared = false; |
| 1083 | |
| 1084 | if (dev->pm_domain) { |
| 1085 | info = "power domain " ; |
| 1086 | callback = pm_op(ops: &dev->pm_domain->ops, state); |
| 1087 | goto Driver; |
| 1088 | } |
| 1089 | |
| 1090 | if (dev->type && dev->type->pm) { |
| 1091 | info = "type " ; |
| 1092 | callback = pm_op(ops: dev->type->pm, state); |
| 1093 | goto Driver; |
| 1094 | } |
| 1095 | |
| 1096 | if (dev->class && dev->class->pm) { |
| 1097 | info = "class " ; |
| 1098 | callback = pm_op(ops: dev->class->pm, state); |
| 1099 | goto Driver; |
| 1100 | } |
| 1101 | |
| 1102 | if (dev->bus) { |
| 1103 | if (dev->bus->pm) { |
| 1104 | info = "bus " ; |
| 1105 | callback = pm_op(ops: dev->bus->pm, state); |
| 1106 | } else if (dev->bus->resume) { |
| 1107 | info = "legacy bus " ; |
| 1108 | callback = dev->bus->resume; |
| 1109 | goto End; |
| 1110 | } |
| 1111 | } |
| 1112 | |
| 1113 | Driver: |
| 1114 | if (!callback && dev->driver && dev->driver->pm) { |
| 1115 | info = "driver " ; |
| 1116 | callback = pm_op(ops: dev->driver->pm, state); |
| 1117 | } |
| 1118 | |
| 1119 | End: |
| 1120 | error = dpm_run_callback(cb: callback, dev, state, info); |
| 1121 | |
| 1122 | device_unlock(dev); |
| 1123 | dpm_watchdog_clear(wd: &wd); |
| 1124 | |
| 1125 | Complete: |
| 1126 | complete_all(&dev->power.completion); |
| 1127 | |
| 1128 | TRACE_RESUME(error); |
| 1129 | |
| 1130 | if (error) { |
| 1131 | WRITE_ONCE(async_error, error); |
| 1132 | dpm_save_failed_dev(name: dev_name(dev)); |
| 1133 | pm_dev_err(dev, state, info: async ? " async" : "" , error); |
| 1134 | } |
| 1135 | |
| 1136 | dpm_async_resume_subordinate(dev, func: async_resume); |
| 1137 | } |
| 1138 | |
| 1139 | static void async_resume(void *data, async_cookie_t cookie) |
| 1140 | { |
| 1141 | struct device *dev = data; |
| 1142 | |
| 1143 | device_resume(dev, state: pm_transition, async: true); |
| 1144 | put_device(dev); |
| 1145 | } |
| 1146 | |
| 1147 | /** |
| 1148 | * dpm_resume - Execute "resume" callbacks for non-sysdev devices. |
| 1149 | * @state: PM transition of the system being carried out. |
| 1150 | * |
| 1151 | * Execute the appropriate "resume" callback for all devices whose status |
| 1152 | * indicates that they are suspended. |
| 1153 | */ |
| 1154 | void dpm_resume(pm_message_t state) |
| 1155 | { |
| 1156 | struct device *dev; |
| 1157 | ktime_t starttime = ktime_get(); |
| 1158 | |
| 1159 | trace_suspend_resume(TPS("dpm_resume" ), val: state.event, start: true); |
| 1160 | |
| 1161 | pm_transition = state; |
| 1162 | async_error = 0; |
| 1163 | |
| 1164 | mutex_lock(&dpm_list_mtx); |
| 1165 | |
| 1166 | /* |
| 1167 | * Start processing "async" root devices upfront so they don't wait for |
| 1168 | * the "sync" devices they don't depend on. |
| 1169 | */ |
| 1170 | list_for_each_entry(dev, &dpm_suspended_list, power.entry) { |
| 1171 | dpm_clear_async_state(dev); |
| 1172 | if (dpm_root_device(dev)) |
| 1173 | dpm_async_with_cleanup(dev, fn: async_resume); |
| 1174 | } |
| 1175 | |
| 1176 | while (!list_empty(head: &dpm_suspended_list)) { |
| 1177 | dev = to_device(entry: dpm_suspended_list.next); |
| 1178 | list_move_tail(list: &dev->power.entry, head: &dpm_prepared_list); |
| 1179 | |
| 1180 | if (!dpm_async_fn(dev, func: async_resume)) { |
| 1181 | get_device(dev); |
| 1182 | |
| 1183 | mutex_unlock(lock: &dpm_list_mtx); |
| 1184 | |
| 1185 | device_resume(dev, state, async: false); |
| 1186 | |
| 1187 | put_device(dev); |
| 1188 | |
| 1189 | mutex_lock(&dpm_list_mtx); |
| 1190 | } |
| 1191 | } |
| 1192 | mutex_unlock(lock: &dpm_list_mtx); |
| 1193 | async_synchronize_full(); |
| 1194 | dpm_show_time(starttime, state, error: 0, NULL); |
| 1195 | if (READ_ONCE(async_error)) |
| 1196 | dpm_save_failed_step(step: SUSPEND_RESUME); |
| 1197 | |
| 1198 | cpufreq_resume(); |
| 1199 | devfreq_resume(); |
| 1200 | trace_suspend_resume(TPS("dpm_resume" ), val: state.event, start: false); |
| 1201 | } |
| 1202 | |
| 1203 | /** |
| 1204 | * device_complete - Complete a PM transition for given device. |
| 1205 | * @dev: Device to handle. |
| 1206 | * @state: PM transition of the system being carried out. |
| 1207 | */ |
| 1208 | static void device_complete(struct device *dev, pm_message_t state) |
| 1209 | { |
| 1210 | void (*callback)(struct device *) = NULL; |
| 1211 | const char *info = NULL; |
| 1212 | |
| 1213 | if (dev->power.syscore) |
| 1214 | goto out; |
| 1215 | |
| 1216 | device_lock(dev); |
| 1217 | |
| 1218 | if (dev->pm_domain) { |
| 1219 | info = "completing power domain " ; |
| 1220 | callback = dev->pm_domain->ops.complete; |
| 1221 | } else if (dev->type && dev->type->pm) { |
| 1222 | info = "completing type " ; |
| 1223 | callback = dev->type->pm->complete; |
| 1224 | } else if (dev->class && dev->class->pm) { |
| 1225 | info = "completing class " ; |
| 1226 | callback = dev->class->pm->complete; |
| 1227 | } else if (dev->bus && dev->bus->pm) { |
| 1228 | info = "completing bus " ; |
| 1229 | callback = dev->bus->pm->complete; |
| 1230 | } |
| 1231 | |
| 1232 | if (!callback && dev->driver && dev->driver->pm) { |
| 1233 | info = "completing driver " ; |
| 1234 | callback = dev->driver->pm->complete; |
| 1235 | } |
| 1236 | |
| 1237 | if (callback) { |
| 1238 | pm_dev_dbg(dev, state, info); |
| 1239 | callback(dev); |
| 1240 | } |
| 1241 | |
| 1242 | device_unlock(dev); |
| 1243 | |
| 1244 | out: |
| 1245 | /* If enabling runtime PM for the device is blocked, unblock it. */ |
| 1246 | pm_runtime_unblock(dev); |
| 1247 | pm_runtime_put(dev); |
| 1248 | } |
| 1249 | |
| 1250 | /** |
| 1251 | * dpm_complete - Complete a PM transition for all non-sysdev devices. |
| 1252 | * @state: PM transition of the system being carried out. |
| 1253 | * |
| 1254 | * Execute the ->complete() callbacks for all devices whose PM status is not |
| 1255 | * DPM_ON (this allows new devices to be registered). |
| 1256 | */ |
| 1257 | void dpm_complete(pm_message_t state) |
| 1258 | { |
| 1259 | struct list_head list; |
| 1260 | |
| 1261 | trace_suspend_resume(TPS("dpm_complete" ), val: state.event, start: true); |
| 1262 | |
| 1263 | INIT_LIST_HEAD(list: &list); |
| 1264 | mutex_lock(&dpm_list_mtx); |
| 1265 | while (!list_empty(head: &dpm_prepared_list)) { |
| 1266 | struct device *dev = to_device(entry: dpm_prepared_list.prev); |
| 1267 | |
| 1268 | get_device(dev); |
| 1269 | dev->power.is_prepared = false; |
| 1270 | list_move(list: &dev->power.entry, head: &list); |
| 1271 | |
| 1272 | mutex_unlock(lock: &dpm_list_mtx); |
| 1273 | |
| 1274 | trace_device_pm_callback_start(dev, pm_ops: "" , event: state.event); |
| 1275 | device_complete(dev, state); |
| 1276 | trace_device_pm_callback_end(dev, error: 0); |
| 1277 | |
| 1278 | put_device(dev); |
| 1279 | |
| 1280 | mutex_lock(&dpm_list_mtx); |
| 1281 | } |
| 1282 | list_splice(list: &list, head: &dpm_list); |
| 1283 | mutex_unlock(lock: &dpm_list_mtx); |
| 1284 | |
| 1285 | /* Allow device probing and trigger re-probing of deferred devices */ |
| 1286 | device_unblock_probing(); |
| 1287 | trace_suspend_resume(TPS("dpm_complete" ), val: state.event, start: false); |
| 1288 | } |
| 1289 | |
| 1290 | /** |
| 1291 | * dpm_resume_end - Execute "resume" callbacks and complete system transition. |
| 1292 | * @state: PM transition of the system being carried out. |
| 1293 | * |
| 1294 | * Execute "resume" callbacks for all devices and complete the PM transition of |
| 1295 | * the system. |
| 1296 | */ |
| 1297 | void dpm_resume_end(pm_message_t state) |
| 1298 | { |
| 1299 | dpm_resume(state); |
| 1300 | pm_restore_gfp_mask(); |
| 1301 | dpm_complete(state); |
| 1302 | } |
| 1303 | EXPORT_SYMBOL_GPL(dpm_resume_end); |
| 1304 | |
| 1305 | |
| 1306 | /*------------------------- Suspend routines -------------------------*/ |
| 1307 | |
| 1308 | static bool dpm_leaf_device(struct device *dev) |
| 1309 | { |
| 1310 | struct device *child; |
| 1311 | |
| 1312 | lockdep_assert_held(&dpm_list_mtx); |
| 1313 | |
| 1314 | child = device_find_any_child(parent: dev); |
| 1315 | if (child) { |
| 1316 | put_device(dev: child); |
| 1317 | |
| 1318 | return false; |
| 1319 | } |
| 1320 | |
| 1321 | /* |
| 1322 | * Since this function is required to run under dpm_list_mtx, the |
| 1323 | * list_empty() below will only return true if the device's list of |
| 1324 | * consumers is actually empty before calling it. |
| 1325 | */ |
| 1326 | return list_empty(head: &dev->links.consumers); |
| 1327 | } |
| 1328 | |
| 1329 | static bool dpm_async_suspend_parent(struct device *dev, async_func_t func) |
| 1330 | { |
| 1331 | guard(mutex)(T: &dpm_list_mtx); |
| 1332 | |
| 1333 | /* |
| 1334 | * If the device is suspended asynchronously and the parent's callback |
| 1335 | * deletes both the device and the parent itself, the parent object may |
| 1336 | * be freed while this function is running, so avoid that by checking |
| 1337 | * if the device has been deleted already as the parent cannot be |
| 1338 | * deleted before it. |
| 1339 | */ |
| 1340 | if (!device_pm_initialized(dev)) |
| 1341 | return false; |
| 1342 | |
| 1343 | /* Start processing the device's parent if it is "async". */ |
| 1344 | if (dev->parent) |
| 1345 | dpm_async_with_cleanup(dev: dev->parent, fn: func); |
| 1346 | |
| 1347 | return true; |
| 1348 | } |
| 1349 | |
| 1350 | static void dpm_async_suspend_superior(struct device *dev, async_func_t func) |
| 1351 | { |
| 1352 | struct device_link *link; |
| 1353 | int idx; |
| 1354 | |
| 1355 | if (!dpm_async_suspend_parent(dev, func)) |
| 1356 | return; |
| 1357 | |
| 1358 | idx = device_links_read_lock(); |
| 1359 | |
| 1360 | /* Start processing the device's "async" suppliers. */ |
| 1361 | dev_for_each_link_to_supplier(link, dev) |
| 1362 | if (READ_ONCE(link->status) != DL_STATE_DORMANT) |
| 1363 | dpm_async_with_cleanup(dev: link->supplier, fn: func); |
| 1364 | |
| 1365 | device_links_read_unlock(idx); |
| 1366 | } |
| 1367 | |
| 1368 | static void dpm_async_suspend_complete_all(struct list_head *device_list) |
| 1369 | { |
| 1370 | struct device *dev; |
| 1371 | |
| 1372 | guard(mutex)(T: &async_wip_mtx); |
| 1373 | |
| 1374 | list_for_each_entry_reverse(dev, device_list, power.entry) { |
| 1375 | /* |
| 1376 | * In case the device is being waited for and async processing |
| 1377 | * has not started for it yet, let the waiters make progress. |
| 1378 | */ |
| 1379 | if (!dev->power.work_in_progress) |
| 1380 | complete_all(&dev->power.completion); |
| 1381 | } |
| 1382 | } |
| 1383 | |
| 1384 | /** |
| 1385 | * resume_event - Return a "resume" message for given "suspend" sleep state. |
| 1386 | * @sleep_state: PM message representing a sleep state. |
| 1387 | * |
| 1388 | * Return a PM message representing the resume event corresponding to given |
| 1389 | * sleep state. |
| 1390 | */ |
| 1391 | static pm_message_t resume_event(pm_message_t sleep_state) |
| 1392 | { |
| 1393 | switch (sleep_state.event) { |
| 1394 | case PM_EVENT_SUSPEND: |
| 1395 | return PMSG_RESUME; |
| 1396 | case PM_EVENT_FREEZE: |
| 1397 | case PM_EVENT_QUIESCE: |
| 1398 | return PMSG_RECOVER; |
| 1399 | case PM_EVENT_HIBERNATE: |
| 1400 | return PMSG_RESTORE; |
| 1401 | } |
| 1402 | return PMSG_ON; |
| 1403 | } |
| 1404 | |
| 1405 | static void dpm_superior_set_must_resume(struct device *dev) |
| 1406 | { |
| 1407 | struct device_link *link; |
| 1408 | int idx; |
| 1409 | |
| 1410 | if (dev->parent) |
| 1411 | dev->parent->power.must_resume = true; |
| 1412 | |
| 1413 | idx = device_links_read_lock(); |
| 1414 | |
| 1415 | dev_for_each_link_to_supplier(link, dev) |
| 1416 | link->supplier->power.must_resume = true; |
| 1417 | |
| 1418 | device_links_read_unlock(idx); |
| 1419 | } |
| 1420 | |
| 1421 | static void async_suspend_noirq(void *data, async_cookie_t cookie); |
| 1422 | |
| 1423 | /** |
| 1424 | * device_suspend_noirq - Execute a "noirq suspend" callback for given device. |
| 1425 | * @dev: Device to handle. |
| 1426 | * @state: PM transition of the system being carried out. |
| 1427 | * @async: If true, the device is being suspended asynchronously. |
| 1428 | * |
| 1429 | * The driver of @dev will not receive interrupts while this function is being |
| 1430 | * executed. |
| 1431 | */ |
| 1432 | static void device_suspend_noirq(struct device *dev, pm_message_t state, bool async) |
| 1433 | { |
| 1434 | pm_callback_t callback = NULL; |
| 1435 | const char *info = NULL; |
| 1436 | int error = 0; |
| 1437 | |
| 1438 | TRACE_DEVICE(dev); |
| 1439 | TRACE_SUSPEND(0); |
| 1440 | |
| 1441 | dpm_wait_for_subordinate(dev, async); |
| 1442 | |
| 1443 | if (READ_ONCE(async_error)) |
| 1444 | goto Complete; |
| 1445 | |
| 1446 | if (dev->power.syscore || dev->power.direct_complete) |
| 1447 | goto Complete; |
| 1448 | |
| 1449 | if (dev->pm_domain) { |
| 1450 | info = "noirq power domain " ; |
| 1451 | callback = pm_noirq_op(ops: &dev->pm_domain->ops, state); |
| 1452 | } else if (dev->type && dev->type->pm) { |
| 1453 | info = "noirq type " ; |
| 1454 | callback = pm_noirq_op(ops: dev->type->pm, state); |
| 1455 | } else if (dev->class && dev->class->pm) { |
| 1456 | info = "noirq class " ; |
| 1457 | callback = pm_noirq_op(ops: dev->class->pm, state); |
| 1458 | } else if (dev->bus && dev->bus->pm) { |
| 1459 | info = "noirq bus " ; |
| 1460 | callback = pm_noirq_op(ops: dev->bus->pm, state); |
| 1461 | } |
| 1462 | if (callback) |
| 1463 | goto Run; |
| 1464 | |
| 1465 | if (dev_pm_skip_suspend(dev)) |
| 1466 | goto Skip; |
| 1467 | |
| 1468 | if (dev->driver && dev->driver->pm) { |
| 1469 | info = "noirq driver " ; |
| 1470 | callback = pm_noirq_op(ops: dev->driver->pm, state); |
| 1471 | } |
| 1472 | |
| 1473 | Run: |
| 1474 | error = dpm_run_callback(cb: callback, dev, state, info); |
| 1475 | if (error) { |
| 1476 | WRITE_ONCE(async_error, error); |
| 1477 | dpm_save_failed_dev(name: dev_name(dev)); |
| 1478 | pm_dev_err(dev, state, info: async ? " async noirq" : " noirq" , error); |
| 1479 | goto Complete; |
| 1480 | } |
| 1481 | |
| 1482 | Skip: |
| 1483 | dev->power.is_noirq_suspended = true; |
| 1484 | |
| 1485 | /* |
| 1486 | * Devices must be resumed unless they are explicitly allowed to be left |
| 1487 | * in suspend, but even in that case skipping the resume of devices that |
| 1488 | * were in use right before the system suspend (as indicated by their |
| 1489 | * runtime PM usage counters and child counters) would be suboptimal. |
| 1490 | */ |
| 1491 | if (!(dev_pm_test_driver_flags(dev, DPM_FLAG_MAY_SKIP_RESUME) && |
| 1492 | dev->power.may_skip_resume) || !pm_runtime_need_not_resume(dev)) |
| 1493 | dev->power.must_resume = true; |
| 1494 | |
| 1495 | if (dev->power.must_resume) |
| 1496 | dpm_superior_set_must_resume(dev); |
| 1497 | |
| 1498 | Complete: |
| 1499 | complete_all(&dev->power.completion); |
| 1500 | TRACE_SUSPEND(error); |
| 1501 | |
| 1502 | if (error || READ_ONCE(async_error)) |
| 1503 | return; |
| 1504 | |
| 1505 | dpm_async_suspend_superior(dev, func: async_suspend_noirq); |
| 1506 | } |
| 1507 | |
| 1508 | static void async_suspend_noirq(void *data, async_cookie_t cookie) |
| 1509 | { |
| 1510 | struct device *dev = data; |
| 1511 | |
| 1512 | device_suspend_noirq(dev, state: pm_transition, async: true); |
| 1513 | put_device(dev); |
| 1514 | } |
| 1515 | |
| 1516 | static int dpm_noirq_suspend_devices(pm_message_t state) |
| 1517 | { |
| 1518 | ktime_t starttime = ktime_get(); |
| 1519 | struct device *dev; |
| 1520 | int error; |
| 1521 | |
| 1522 | trace_suspend_resume(TPS("dpm_suspend_noirq" ), val: state.event, start: true); |
| 1523 | |
| 1524 | pm_transition = state; |
| 1525 | async_error = 0; |
| 1526 | |
| 1527 | mutex_lock(&dpm_list_mtx); |
| 1528 | |
| 1529 | /* |
| 1530 | * Start processing "async" leaf devices upfront so they don't need to |
| 1531 | * wait for the "sync" devices they don't depend on. |
| 1532 | */ |
| 1533 | list_for_each_entry_reverse(dev, &dpm_late_early_list, power.entry) { |
| 1534 | dpm_clear_async_state(dev); |
| 1535 | if (dpm_leaf_device(dev)) |
| 1536 | dpm_async_with_cleanup(dev, fn: async_suspend_noirq); |
| 1537 | } |
| 1538 | |
| 1539 | while (!list_empty(head: &dpm_late_early_list)) { |
| 1540 | dev = to_device(entry: dpm_late_early_list.prev); |
| 1541 | |
| 1542 | list_move(list: &dev->power.entry, head: &dpm_noirq_list); |
| 1543 | |
| 1544 | if (dpm_async_fn(dev, func: async_suspend_noirq)) |
| 1545 | continue; |
| 1546 | |
| 1547 | get_device(dev); |
| 1548 | |
| 1549 | mutex_unlock(lock: &dpm_list_mtx); |
| 1550 | |
| 1551 | device_suspend_noirq(dev, state, async: false); |
| 1552 | |
| 1553 | put_device(dev); |
| 1554 | |
| 1555 | mutex_lock(&dpm_list_mtx); |
| 1556 | |
| 1557 | if (READ_ONCE(async_error)) { |
| 1558 | dpm_async_suspend_complete_all(device_list: &dpm_late_early_list); |
| 1559 | /* |
| 1560 | * Move all devices to the target list to resume them |
| 1561 | * properly. |
| 1562 | */ |
| 1563 | list_splice_init(list: &dpm_late_early_list, head: &dpm_noirq_list); |
| 1564 | break; |
| 1565 | } |
| 1566 | } |
| 1567 | |
| 1568 | mutex_unlock(lock: &dpm_list_mtx); |
| 1569 | |
| 1570 | async_synchronize_full(); |
| 1571 | |
| 1572 | error = READ_ONCE(async_error); |
| 1573 | if (error) |
| 1574 | dpm_save_failed_step(step: SUSPEND_SUSPEND_NOIRQ); |
| 1575 | |
| 1576 | dpm_show_time(starttime, state, error, info: "noirq" ); |
| 1577 | trace_suspend_resume(TPS("dpm_suspend_noirq" ), val: state.event, start: false); |
| 1578 | return error; |
| 1579 | } |
| 1580 | |
| 1581 | /** |
| 1582 | * dpm_suspend_noirq - Execute "noirq suspend" callbacks for all devices. |
| 1583 | * @state: PM transition of the system being carried out. |
| 1584 | * |
| 1585 | * Prevent device drivers' interrupt handlers from being called and invoke |
| 1586 | * "noirq" suspend callbacks for all non-sysdev devices. |
| 1587 | */ |
| 1588 | int dpm_suspend_noirq(pm_message_t state) |
| 1589 | { |
| 1590 | int ret; |
| 1591 | |
| 1592 | device_wakeup_arm_wake_irqs(); |
| 1593 | suspend_device_irqs(); |
| 1594 | |
| 1595 | ret = dpm_noirq_suspend_devices(state); |
| 1596 | if (ret) |
| 1597 | dpm_resume_noirq(state: resume_event(sleep_state: state)); |
| 1598 | |
| 1599 | return ret; |
| 1600 | } |
| 1601 | |
| 1602 | static void dpm_propagate_wakeup_to_parent(struct device *dev) |
| 1603 | { |
| 1604 | struct device *parent = dev->parent; |
| 1605 | |
| 1606 | if (!parent) |
| 1607 | return; |
| 1608 | |
| 1609 | spin_lock_irq(lock: &parent->power.lock); |
| 1610 | |
| 1611 | if (device_wakeup_path(dev) && !parent->power.ignore_children) |
| 1612 | parent->power.wakeup_path = true; |
| 1613 | |
| 1614 | spin_unlock_irq(lock: &parent->power.lock); |
| 1615 | } |
| 1616 | |
| 1617 | static void async_suspend_late(void *data, async_cookie_t cookie); |
| 1618 | |
| 1619 | /** |
| 1620 | * device_suspend_late - Execute a "late suspend" callback for given device. |
| 1621 | * @dev: Device to handle. |
| 1622 | * @state: PM transition of the system being carried out. |
| 1623 | * @async: If true, the device is being suspended asynchronously. |
| 1624 | * |
| 1625 | * Runtime PM is disabled for @dev while this function is being executed. |
| 1626 | */ |
| 1627 | static void device_suspend_late(struct device *dev, pm_message_t state, bool async) |
| 1628 | { |
| 1629 | pm_callback_t callback = NULL; |
| 1630 | const char *info = NULL; |
| 1631 | int error = 0; |
| 1632 | |
| 1633 | TRACE_DEVICE(dev); |
| 1634 | TRACE_SUSPEND(0); |
| 1635 | |
| 1636 | dpm_wait_for_subordinate(dev, async); |
| 1637 | |
| 1638 | if (READ_ONCE(async_error)) |
| 1639 | goto Complete; |
| 1640 | |
| 1641 | if (pm_wakeup_pending()) { |
| 1642 | WRITE_ONCE(async_error, -EBUSY); |
| 1643 | goto Complete; |
| 1644 | } |
| 1645 | |
| 1646 | if (dev->power.direct_complete) |
| 1647 | goto Complete; |
| 1648 | |
| 1649 | /* |
| 1650 | * Disable runtime PM for the device without checking if there is a |
| 1651 | * pending resume request for it. |
| 1652 | */ |
| 1653 | __pm_runtime_disable(dev, check_resume: false); |
| 1654 | |
| 1655 | if (dev->power.syscore) |
| 1656 | goto Skip; |
| 1657 | |
| 1658 | if (dev->pm_domain) { |
| 1659 | info = "late power domain " ; |
| 1660 | callback = pm_late_early_op(ops: &dev->pm_domain->ops, state); |
| 1661 | } else if (dev->type && dev->type->pm) { |
| 1662 | info = "late type " ; |
| 1663 | callback = pm_late_early_op(ops: dev->type->pm, state); |
| 1664 | } else if (dev->class && dev->class->pm) { |
| 1665 | info = "late class " ; |
| 1666 | callback = pm_late_early_op(ops: dev->class->pm, state); |
| 1667 | } else if (dev->bus && dev->bus->pm) { |
| 1668 | info = "late bus " ; |
| 1669 | callback = pm_late_early_op(ops: dev->bus->pm, state); |
| 1670 | } |
| 1671 | if (callback) |
| 1672 | goto Run; |
| 1673 | |
| 1674 | if (dev_pm_skip_suspend(dev)) |
| 1675 | goto Skip; |
| 1676 | |
| 1677 | if (dev->driver && dev->driver->pm) { |
| 1678 | info = "late driver " ; |
| 1679 | callback = pm_late_early_op(ops: dev->driver->pm, state); |
| 1680 | } |
| 1681 | |
| 1682 | Run: |
| 1683 | error = dpm_run_callback(cb: callback, dev, state, info); |
| 1684 | if (error) { |
| 1685 | WRITE_ONCE(async_error, error); |
| 1686 | dpm_save_failed_dev(name: dev_name(dev)); |
| 1687 | pm_dev_err(dev, state, info: async ? " async late" : " late" , error); |
| 1688 | pm_runtime_enable(dev); |
| 1689 | goto Complete; |
| 1690 | } |
| 1691 | dpm_propagate_wakeup_to_parent(dev); |
| 1692 | |
| 1693 | Skip: |
| 1694 | dev->power.is_late_suspended = true; |
| 1695 | |
| 1696 | Complete: |
| 1697 | TRACE_SUSPEND(error); |
| 1698 | complete_all(&dev->power.completion); |
| 1699 | |
| 1700 | if (error || READ_ONCE(async_error)) |
| 1701 | return; |
| 1702 | |
| 1703 | dpm_async_suspend_superior(dev, func: async_suspend_late); |
| 1704 | } |
| 1705 | |
| 1706 | static void async_suspend_late(void *data, async_cookie_t cookie) |
| 1707 | { |
| 1708 | struct device *dev = data; |
| 1709 | |
| 1710 | device_suspend_late(dev, state: pm_transition, async: true); |
| 1711 | put_device(dev); |
| 1712 | } |
| 1713 | |
| 1714 | /** |
| 1715 | * dpm_suspend_late - Execute "late suspend" callbacks for all devices. |
| 1716 | * @state: PM transition of the system being carried out. |
| 1717 | */ |
| 1718 | int dpm_suspend_late(pm_message_t state) |
| 1719 | { |
| 1720 | ktime_t starttime = ktime_get(); |
| 1721 | struct device *dev; |
| 1722 | int error; |
| 1723 | |
| 1724 | trace_suspend_resume(TPS("dpm_suspend_late" ), val: state.event, start: true); |
| 1725 | |
| 1726 | pm_transition = state; |
| 1727 | async_error = 0; |
| 1728 | |
| 1729 | wake_up_all_idle_cpus(); |
| 1730 | |
| 1731 | mutex_lock(&dpm_list_mtx); |
| 1732 | |
| 1733 | /* |
| 1734 | * Start processing "async" leaf devices upfront so they don't need to |
| 1735 | * wait for the "sync" devices they don't depend on. |
| 1736 | */ |
| 1737 | list_for_each_entry_reverse(dev, &dpm_suspended_list, power.entry) { |
| 1738 | dpm_clear_async_state(dev); |
| 1739 | if (dpm_leaf_device(dev)) |
| 1740 | dpm_async_with_cleanup(dev, fn: async_suspend_late); |
| 1741 | } |
| 1742 | |
| 1743 | while (!list_empty(head: &dpm_suspended_list)) { |
| 1744 | dev = to_device(entry: dpm_suspended_list.prev); |
| 1745 | |
| 1746 | list_move(list: &dev->power.entry, head: &dpm_late_early_list); |
| 1747 | |
| 1748 | if (dpm_async_fn(dev, func: async_suspend_late)) |
| 1749 | continue; |
| 1750 | |
| 1751 | get_device(dev); |
| 1752 | |
| 1753 | mutex_unlock(lock: &dpm_list_mtx); |
| 1754 | |
| 1755 | device_suspend_late(dev, state, async: false); |
| 1756 | |
| 1757 | put_device(dev); |
| 1758 | |
| 1759 | mutex_lock(&dpm_list_mtx); |
| 1760 | |
| 1761 | if (READ_ONCE(async_error)) { |
| 1762 | dpm_async_suspend_complete_all(device_list: &dpm_suspended_list); |
| 1763 | /* |
| 1764 | * Move all devices to the target list to resume them |
| 1765 | * properly. |
| 1766 | */ |
| 1767 | list_splice_init(list: &dpm_suspended_list, head: &dpm_late_early_list); |
| 1768 | break; |
| 1769 | } |
| 1770 | } |
| 1771 | |
| 1772 | mutex_unlock(lock: &dpm_list_mtx); |
| 1773 | |
| 1774 | async_synchronize_full(); |
| 1775 | |
| 1776 | error = READ_ONCE(async_error); |
| 1777 | if (error) { |
| 1778 | dpm_save_failed_step(step: SUSPEND_SUSPEND_LATE); |
| 1779 | dpm_resume_early(state: resume_event(sleep_state: state)); |
| 1780 | } |
| 1781 | dpm_show_time(starttime, state, error, info: "late" ); |
| 1782 | trace_suspend_resume(TPS("dpm_suspend_late" ), val: state.event, start: false); |
| 1783 | return error; |
| 1784 | } |
| 1785 | |
| 1786 | /** |
| 1787 | * dpm_suspend_end - Execute "late" and "noirq" device suspend callbacks. |
| 1788 | * @state: PM transition of the system being carried out. |
| 1789 | */ |
| 1790 | int dpm_suspend_end(pm_message_t state) |
| 1791 | { |
| 1792 | ktime_t starttime = ktime_get(); |
| 1793 | int error; |
| 1794 | |
| 1795 | error = dpm_suspend_late(state); |
| 1796 | if (error) |
| 1797 | goto out; |
| 1798 | |
| 1799 | error = dpm_suspend_noirq(state); |
| 1800 | if (error) |
| 1801 | dpm_resume_early(state: resume_event(sleep_state: state)); |
| 1802 | |
| 1803 | out: |
| 1804 | dpm_show_time(starttime, state, error, info: "end" ); |
| 1805 | return error; |
| 1806 | } |
| 1807 | EXPORT_SYMBOL_GPL(dpm_suspend_end); |
| 1808 | |
| 1809 | /** |
| 1810 | * legacy_suspend - Execute a legacy (bus or class) suspend callback for device. |
| 1811 | * @dev: Device to suspend. |
| 1812 | * @state: PM transition of the system being carried out. |
| 1813 | * @cb: Suspend callback to execute. |
| 1814 | * @info: string description of caller. |
| 1815 | */ |
| 1816 | static int legacy_suspend(struct device *dev, pm_message_t state, |
| 1817 | int (*cb)(struct device *dev, pm_message_t state), |
| 1818 | const char *info) |
| 1819 | { |
| 1820 | int error; |
| 1821 | ktime_t calltime; |
| 1822 | |
| 1823 | calltime = initcall_debug_start(dev, cb); |
| 1824 | |
| 1825 | trace_device_pm_callback_start(dev, pm_ops: info, event: state.event); |
| 1826 | error = cb(dev, state); |
| 1827 | trace_device_pm_callback_end(dev, error); |
| 1828 | suspend_report_result(dev, cb, error); |
| 1829 | |
| 1830 | initcall_debug_report(dev, calltime, cb, error); |
| 1831 | |
| 1832 | return error; |
| 1833 | } |
| 1834 | |
| 1835 | static void dpm_clear_superiors_direct_complete(struct device *dev) |
| 1836 | { |
| 1837 | struct device_link *link; |
| 1838 | int idx; |
| 1839 | |
| 1840 | if (dev->parent) { |
| 1841 | spin_lock_irq(lock: &dev->parent->power.lock); |
| 1842 | dev->parent->power.direct_complete = false; |
| 1843 | spin_unlock_irq(lock: &dev->parent->power.lock); |
| 1844 | } |
| 1845 | |
| 1846 | idx = device_links_read_lock(); |
| 1847 | |
| 1848 | dev_for_each_link_to_supplier(link, dev) { |
| 1849 | spin_lock_irq(lock: &link->supplier->power.lock); |
| 1850 | link->supplier->power.direct_complete = false; |
| 1851 | spin_unlock_irq(lock: &link->supplier->power.lock); |
| 1852 | } |
| 1853 | |
| 1854 | device_links_read_unlock(idx); |
| 1855 | } |
| 1856 | |
| 1857 | static void async_suspend(void *data, async_cookie_t cookie); |
| 1858 | |
| 1859 | /** |
| 1860 | * device_suspend - Execute "suspend" callbacks for given device. |
| 1861 | * @dev: Device to handle. |
| 1862 | * @state: PM transition of the system being carried out. |
| 1863 | * @async: If true, the device is being suspended asynchronously. |
| 1864 | */ |
| 1865 | static void device_suspend(struct device *dev, pm_message_t state, bool async) |
| 1866 | { |
| 1867 | pm_callback_t callback = NULL; |
| 1868 | const char *info = NULL; |
| 1869 | int error = 0; |
| 1870 | DECLARE_DPM_WATCHDOG_ON_STACK(wd); |
| 1871 | |
| 1872 | TRACE_DEVICE(dev); |
| 1873 | TRACE_SUSPEND(0); |
| 1874 | |
| 1875 | dpm_wait_for_subordinate(dev, async); |
| 1876 | |
| 1877 | if (READ_ONCE(async_error)) { |
| 1878 | dev->power.direct_complete = false; |
| 1879 | goto Complete; |
| 1880 | } |
| 1881 | |
| 1882 | /* |
| 1883 | * Wait for possible runtime PM transitions of the device in progress |
| 1884 | * to complete and if there's a runtime resume request pending for it, |
| 1885 | * resume it before proceeding with invoking the system-wide suspend |
| 1886 | * callbacks for it. |
| 1887 | * |
| 1888 | * If the system-wide suspend callbacks below change the configuration |
| 1889 | * of the device, they must disable runtime PM for it or otherwise |
| 1890 | * ensure that its runtime-resume callbacks will not be confused by that |
| 1891 | * change in case they are invoked going forward. |
| 1892 | */ |
| 1893 | pm_runtime_barrier(dev); |
| 1894 | |
| 1895 | if (pm_wakeup_pending()) { |
| 1896 | dev->power.direct_complete = false; |
| 1897 | WRITE_ONCE(async_error, -EBUSY); |
| 1898 | goto Complete; |
| 1899 | } |
| 1900 | |
| 1901 | if (dev->power.syscore) |
| 1902 | goto Complete; |
| 1903 | |
| 1904 | /* Avoid direct_complete to let wakeup_path propagate. */ |
| 1905 | if (device_may_wakeup(dev) || device_wakeup_path(dev)) |
| 1906 | dev->power.direct_complete = false; |
| 1907 | |
| 1908 | if (dev->power.direct_complete) { |
| 1909 | if (pm_runtime_status_suspended(dev)) { |
| 1910 | pm_runtime_disable(dev); |
| 1911 | if (pm_runtime_status_suspended(dev)) { |
| 1912 | pm_dev_dbg(dev, state, info: "direct-complete " ); |
| 1913 | dev->power.is_suspended = true; |
| 1914 | goto Complete; |
| 1915 | } |
| 1916 | |
| 1917 | pm_runtime_enable(dev); |
| 1918 | } |
| 1919 | dev->power.direct_complete = false; |
| 1920 | } |
| 1921 | |
| 1922 | dev->power.may_skip_resume = true; |
| 1923 | dev->power.must_resume = !dev_pm_test_driver_flags(dev, DPM_FLAG_MAY_SKIP_RESUME); |
| 1924 | |
| 1925 | dpm_watchdog_set(wd: &wd, dev); |
| 1926 | device_lock(dev); |
| 1927 | |
| 1928 | if (dev->pm_domain) { |
| 1929 | info = "power domain " ; |
| 1930 | callback = pm_op(ops: &dev->pm_domain->ops, state); |
| 1931 | goto Run; |
| 1932 | } |
| 1933 | |
| 1934 | if (dev->type && dev->type->pm) { |
| 1935 | info = "type " ; |
| 1936 | callback = pm_op(ops: dev->type->pm, state); |
| 1937 | goto Run; |
| 1938 | } |
| 1939 | |
| 1940 | if (dev->class && dev->class->pm) { |
| 1941 | info = "class " ; |
| 1942 | callback = pm_op(ops: dev->class->pm, state); |
| 1943 | goto Run; |
| 1944 | } |
| 1945 | |
| 1946 | if (dev->bus) { |
| 1947 | if (dev->bus->pm) { |
| 1948 | info = "bus " ; |
| 1949 | callback = pm_op(ops: dev->bus->pm, state); |
| 1950 | } else if (dev->bus->suspend) { |
| 1951 | pm_dev_dbg(dev, state, info: "legacy bus " ); |
| 1952 | error = legacy_suspend(dev, state, cb: dev->bus->suspend, |
| 1953 | info: "legacy bus " ); |
| 1954 | goto End; |
| 1955 | } |
| 1956 | } |
| 1957 | |
| 1958 | Run: |
| 1959 | if (!callback && dev->driver && dev->driver->pm) { |
| 1960 | info = "driver " ; |
| 1961 | callback = pm_op(ops: dev->driver->pm, state); |
| 1962 | } |
| 1963 | |
| 1964 | error = dpm_run_callback(cb: callback, dev, state, info); |
| 1965 | |
| 1966 | End: |
| 1967 | if (!error) { |
| 1968 | dev->power.is_suspended = true; |
| 1969 | if (device_may_wakeup(dev)) |
| 1970 | dev->power.wakeup_path = true; |
| 1971 | |
| 1972 | dpm_propagate_wakeup_to_parent(dev); |
| 1973 | dpm_clear_superiors_direct_complete(dev); |
| 1974 | } |
| 1975 | |
| 1976 | device_unlock(dev); |
| 1977 | dpm_watchdog_clear(wd: &wd); |
| 1978 | |
| 1979 | Complete: |
| 1980 | if (error) { |
| 1981 | WRITE_ONCE(async_error, error); |
| 1982 | dpm_save_failed_dev(name: dev_name(dev)); |
| 1983 | pm_dev_err(dev, state, info: async ? " async" : "" , error); |
| 1984 | } |
| 1985 | |
| 1986 | complete_all(&dev->power.completion); |
| 1987 | TRACE_SUSPEND(error); |
| 1988 | |
| 1989 | if (error || READ_ONCE(async_error)) |
| 1990 | return; |
| 1991 | |
| 1992 | dpm_async_suspend_superior(dev, func: async_suspend); |
| 1993 | } |
| 1994 | |
| 1995 | static void async_suspend(void *data, async_cookie_t cookie) |
| 1996 | { |
| 1997 | struct device *dev = data; |
| 1998 | |
| 1999 | device_suspend(dev, state: pm_transition, async: true); |
| 2000 | put_device(dev); |
| 2001 | } |
| 2002 | |
| 2003 | /** |
| 2004 | * dpm_suspend - Execute "suspend" callbacks for all non-sysdev devices. |
| 2005 | * @state: PM transition of the system being carried out. |
| 2006 | */ |
| 2007 | int dpm_suspend(pm_message_t state) |
| 2008 | { |
| 2009 | ktime_t starttime = ktime_get(); |
| 2010 | struct device *dev; |
| 2011 | int error; |
| 2012 | |
| 2013 | trace_suspend_resume(TPS("dpm_suspend" ), val: state.event, start: true); |
| 2014 | might_sleep(); |
| 2015 | |
| 2016 | devfreq_suspend(); |
| 2017 | cpufreq_suspend(); |
| 2018 | |
| 2019 | pm_transition = state; |
| 2020 | async_error = 0; |
| 2021 | |
| 2022 | mutex_lock(&dpm_list_mtx); |
| 2023 | |
| 2024 | /* |
| 2025 | * Start processing "async" leaf devices upfront so they don't need to |
| 2026 | * wait for the "sync" devices they don't depend on. |
| 2027 | */ |
| 2028 | list_for_each_entry_reverse(dev, &dpm_prepared_list, power.entry) { |
| 2029 | dpm_clear_async_state(dev); |
| 2030 | if (dpm_leaf_device(dev)) |
| 2031 | dpm_async_with_cleanup(dev, fn: async_suspend); |
| 2032 | } |
| 2033 | |
| 2034 | while (!list_empty(head: &dpm_prepared_list)) { |
| 2035 | dev = to_device(entry: dpm_prepared_list.prev); |
| 2036 | |
| 2037 | list_move(list: &dev->power.entry, head: &dpm_suspended_list); |
| 2038 | |
| 2039 | if (dpm_async_fn(dev, func: async_suspend)) |
| 2040 | continue; |
| 2041 | |
| 2042 | get_device(dev); |
| 2043 | |
| 2044 | mutex_unlock(lock: &dpm_list_mtx); |
| 2045 | |
| 2046 | device_suspend(dev, state, async: false); |
| 2047 | |
| 2048 | put_device(dev); |
| 2049 | |
| 2050 | mutex_lock(&dpm_list_mtx); |
| 2051 | |
| 2052 | if (READ_ONCE(async_error)) { |
| 2053 | dpm_async_suspend_complete_all(device_list: &dpm_prepared_list); |
| 2054 | /* |
| 2055 | * Move all devices to the target list to resume them |
| 2056 | * properly. |
| 2057 | */ |
| 2058 | list_splice_init(list: &dpm_prepared_list, head: &dpm_suspended_list); |
| 2059 | break; |
| 2060 | } |
| 2061 | } |
| 2062 | |
| 2063 | mutex_unlock(lock: &dpm_list_mtx); |
| 2064 | |
| 2065 | async_synchronize_full(); |
| 2066 | |
| 2067 | error = READ_ONCE(async_error); |
| 2068 | if (error) |
| 2069 | dpm_save_failed_step(step: SUSPEND_SUSPEND); |
| 2070 | |
| 2071 | dpm_show_time(starttime, state, error, NULL); |
| 2072 | trace_suspend_resume(TPS("dpm_suspend" ), val: state.event, start: false); |
| 2073 | return error; |
| 2074 | } |
| 2075 | |
| 2076 | static bool device_prepare_smart_suspend(struct device *dev) |
| 2077 | { |
| 2078 | struct device_link *link; |
| 2079 | bool ret = true; |
| 2080 | int idx; |
| 2081 | |
| 2082 | /* |
| 2083 | * The "smart suspend" feature is enabled for devices whose drivers ask |
| 2084 | * for it and for devices without PM callbacks. |
| 2085 | * |
| 2086 | * However, if "smart suspend" is not enabled for the device's parent |
| 2087 | * or any of its suppliers that take runtime PM into account, it cannot |
| 2088 | * be enabled for the device either. |
| 2089 | */ |
| 2090 | if (!dev->power.no_pm_callbacks && |
| 2091 | !dev_pm_test_driver_flags(dev, DPM_FLAG_SMART_SUSPEND)) |
| 2092 | return false; |
| 2093 | |
| 2094 | if (dev->parent && !dev_pm_smart_suspend(dev: dev->parent) && |
| 2095 | !dev->parent->power.ignore_children && !pm_runtime_blocked(dev: dev->parent)) |
| 2096 | return false; |
| 2097 | |
| 2098 | idx = device_links_read_lock(); |
| 2099 | |
| 2100 | dev_for_each_link_to_supplier(link, dev) { |
| 2101 | if (!device_link_test(link, DL_FLAG_PM_RUNTIME)) |
| 2102 | continue; |
| 2103 | |
| 2104 | if (!dev_pm_smart_suspend(dev: link->supplier) && |
| 2105 | !pm_runtime_blocked(dev: link->supplier)) { |
| 2106 | ret = false; |
| 2107 | break; |
| 2108 | } |
| 2109 | } |
| 2110 | |
| 2111 | device_links_read_unlock(idx); |
| 2112 | |
| 2113 | return ret; |
| 2114 | } |
| 2115 | |
| 2116 | /** |
| 2117 | * device_prepare - Prepare a device for system power transition. |
| 2118 | * @dev: Device to handle. |
| 2119 | * @state: PM transition of the system being carried out. |
| 2120 | * |
| 2121 | * Execute the ->prepare() callback(s) for given device. No new children of the |
| 2122 | * device may be registered after this function has returned. |
| 2123 | */ |
| 2124 | static int device_prepare(struct device *dev, pm_message_t state) |
| 2125 | { |
| 2126 | int (*callback)(struct device *) = NULL; |
| 2127 | bool smart_suspend; |
| 2128 | int ret = 0; |
| 2129 | |
| 2130 | /* |
| 2131 | * If a device's parent goes into runtime suspend at the wrong time, |
| 2132 | * it won't be possible to resume the device. To prevent this we |
| 2133 | * block runtime suspend here, during the prepare phase, and allow |
| 2134 | * it again during the complete phase. |
| 2135 | */ |
| 2136 | pm_runtime_get_noresume(dev); |
| 2137 | /* |
| 2138 | * If runtime PM is disabled for the device at this point and it has |
| 2139 | * never been enabled so far, it should not be enabled until this system |
| 2140 | * suspend-resume cycle is complete, so prepare to trigger a warning on |
| 2141 | * subsequent attempts to enable it. |
| 2142 | */ |
| 2143 | smart_suspend = !pm_runtime_block_if_disabled(dev); |
| 2144 | |
| 2145 | if (dev->power.syscore) |
| 2146 | return 0; |
| 2147 | |
| 2148 | device_lock(dev); |
| 2149 | |
| 2150 | dev->power.wakeup_path = false; |
| 2151 | dev->power.out_band_wakeup = false; |
| 2152 | |
| 2153 | if (dev->power.no_pm_callbacks) |
| 2154 | goto unlock; |
| 2155 | |
| 2156 | if (dev->pm_domain) |
| 2157 | callback = dev->pm_domain->ops.prepare; |
| 2158 | else if (dev->type && dev->type->pm) |
| 2159 | callback = dev->type->pm->prepare; |
| 2160 | else if (dev->class && dev->class->pm) |
| 2161 | callback = dev->class->pm->prepare; |
| 2162 | else if (dev->bus && dev->bus->pm) |
| 2163 | callback = dev->bus->pm->prepare; |
| 2164 | |
| 2165 | if (!callback && dev->driver && dev->driver->pm) |
| 2166 | callback = dev->driver->pm->prepare; |
| 2167 | |
| 2168 | if (callback) |
| 2169 | ret = callback(dev); |
| 2170 | |
| 2171 | unlock: |
| 2172 | device_unlock(dev); |
| 2173 | |
| 2174 | if (ret < 0) { |
| 2175 | suspend_report_result(dev, callback, ret); |
| 2176 | pm_runtime_put(dev); |
| 2177 | return ret; |
| 2178 | } |
| 2179 | /* Do not enable "smart suspend" for devices with disabled runtime PM. */ |
| 2180 | if (smart_suspend) |
| 2181 | smart_suspend = device_prepare_smart_suspend(dev); |
| 2182 | |
| 2183 | spin_lock_irq(lock: &dev->power.lock); |
| 2184 | |
| 2185 | dev->power.smart_suspend = smart_suspend; |
| 2186 | /* |
| 2187 | * A positive return value from ->prepare() means "this device appears |
| 2188 | * to be runtime-suspended and its state is fine, so if it really is |
| 2189 | * runtime-suspended, you can leave it in that state provided that you |
| 2190 | * will do the same thing with all of its descendants". This only |
| 2191 | * applies to suspend transitions, however. |
| 2192 | */ |
| 2193 | dev->power.direct_complete = state.event == PM_EVENT_SUSPEND && |
| 2194 | (ret > 0 || dev->power.no_pm_callbacks) && |
| 2195 | !dev_pm_test_driver_flags(dev, DPM_FLAG_NO_DIRECT_COMPLETE); |
| 2196 | |
| 2197 | spin_unlock_irq(lock: &dev->power.lock); |
| 2198 | |
| 2199 | return 0; |
| 2200 | } |
| 2201 | |
| 2202 | /** |
| 2203 | * dpm_prepare - Prepare all non-sysdev devices for a system PM transition. |
| 2204 | * @state: PM transition of the system being carried out. |
| 2205 | * |
| 2206 | * Execute the ->prepare() callback(s) for all devices. |
| 2207 | */ |
| 2208 | int dpm_prepare(pm_message_t state) |
| 2209 | { |
| 2210 | int error = 0; |
| 2211 | |
| 2212 | trace_suspend_resume(TPS("dpm_prepare" ), val: state.event, start: true); |
| 2213 | |
| 2214 | /* |
| 2215 | * Give a chance for the known devices to complete their probes, before |
| 2216 | * disable probing of devices. This sync point is important at least |
| 2217 | * at boot time + hibernation restore. |
| 2218 | */ |
| 2219 | wait_for_device_probe(); |
| 2220 | /* |
| 2221 | * It is unsafe if probing of devices will happen during suspend or |
| 2222 | * hibernation and system behavior will be unpredictable in this case. |
| 2223 | * So, let's prohibit device's probing here and defer their probes |
| 2224 | * instead. The normal behavior will be restored in dpm_complete(). |
| 2225 | */ |
| 2226 | device_block_probing(); |
| 2227 | |
| 2228 | mutex_lock(&dpm_list_mtx); |
| 2229 | while (!list_empty(head: &dpm_list) && !error) { |
| 2230 | struct device *dev = to_device(entry: dpm_list.next); |
| 2231 | |
| 2232 | get_device(dev); |
| 2233 | |
| 2234 | mutex_unlock(lock: &dpm_list_mtx); |
| 2235 | |
| 2236 | trace_device_pm_callback_start(dev, pm_ops: "" , event: state.event); |
| 2237 | error = device_prepare(dev, state); |
| 2238 | trace_device_pm_callback_end(dev, error); |
| 2239 | |
| 2240 | mutex_lock(&dpm_list_mtx); |
| 2241 | |
| 2242 | if (!error) { |
| 2243 | dev->power.is_prepared = true; |
| 2244 | if (!list_empty(head: &dev->power.entry)) |
| 2245 | list_move_tail(list: &dev->power.entry, head: &dpm_prepared_list); |
| 2246 | } else if (error == -EAGAIN) { |
| 2247 | error = 0; |
| 2248 | } else { |
| 2249 | dev_info(dev, "not prepared for power transition: code %d\n" , |
| 2250 | error); |
| 2251 | } |
| 2252 | |
| 2253 | mutex_unlock(lock: &dpm_list_mtx); |
| 2254 | |
| 2255 | put_device(dev); |
| 2256 | |
| 2257 | mutex_lock(&dpm_list_mtx); |
| 2258 | } |
| 2259 | mutex_unlock(lock: &dpm_list_mtx); |
| 2260 | trace_suspend_resume(TPS("dpm_prepare" ), val: state.event, start: false); |
| 2261 | return error; |
| 2262 | } |
| 2263 | |
| 2264 | /** |
| 2265 | * dpm_suspend_start - Prepare devices for PM transition and suspend them. |
| 2266 | * @state: PM transition of the system being carried out. |
| 2267 | * |
| 2268 | * Prepare all non-sysdev devices for system PM transition and execute "suspend" |
| 2269 | * callbacks for them. |
| 2270 | */ |
| 2271 | int dpm_suspend_start(pm_message_t state) |
| 2272 | { |
| 2273 | ktime_t starttime = ktime_get(); |
| 2274 | int error; |
| 2275 | |
| 2276 | error = dpm_prepare(state); |
| 2277 | if (error) |
| 2278 | dpm_save_failed_step(step: SUSPEND_PREPARE); |
| 2279 | else { |
| 2280 | pm_restrict_gfp_mask(); |
| 2281 | error = dpm_suspend(state); |
| 2282 | } |
| 2283 | |
| 2284 | dpm_show_time(starttime, state, error, info: "start" ); |
| 2285 | return error; |
| 2286 | } |
| 2287 | EXPORT_SYMBOL_GPL(dpm_suspend_start); |
| 2288 | |
| 2289 | void __suspend_report_result(const char *function, struct device *dev, void *fn, int ret) |
| 2290 | { |
| 2291 | if (ret) |
| 2292 | dev_err(dev, "%s(): %ps returns %d\n" , function, fn, ret); |
| 2293 | } |
| 2294 | EXPORT_SYMBOL_GPL(__suspend_report_result); |
| 2295 | |
| 2296 | /** |
| 2297 | * device_pm_wait_for_dev - Wait for suspend/resume of a device to complete. |
| 2298 | * @subordinate: Device that needs to wait for @dev. |
| 2299 | * @dev: Device to wait for. |
| 2300 | */ |
| 2301 | int device_pm_wait_for_dev(struct device *subordinate, struct device *dev) |
| 2302 | { |
| 2303 | dpm_wait(dev, async: subordinate->power.async_suspend); |
| 2304 | return async_error; |
| 2305 | } |
| 2306 | EXPORT_SYMBOL_GPL(device_pm_wait_for_dev); |
| 2307 | |
| 2308 | /** |
| 2309 | * dpm_for_each_dev - device iterator. |
| 2310 | * @data: data for the callback. |
| 2311 | * @fn: function to be called for each device. |
| 2312 | * |
| 2313 | * Iterate over devices in dpm_list, and call @fn for each device, |
| 2314 | * passing it @data. |
| 2315 | */ |
| 2316 | void dpm_for_each_dev(void *data, void (*fn)(struct device *, void *)) |
| 2317 | { |
| 2318 | struct device *dev; |
| 2319 | |
| 2320 | if (!fn) |
| 2321 | return; |
| 2322 | |
| 2323 | device_pm_lock(); |
| 2324 | list_for_each_entry(dev, &dpm_list, power.entry) |
| 2325 | fn(dev, data); |
| 2326 | device_pm_unlock(); |
| 2327 | } |
| 2328 | EXPORT_SYMBOL_GPL(dpm_for_each_dev); |
| 2329 | |
| 2330 | static bool pm_ops_is_empty(const struct dev_pm_ops *ops) |
| 2331 | { |
| 2332 | if (!ops) |
| 2333 | return true; |
| 2334 | |
| 2335 | return !ops->prepare && |
| 2336 | !ops->suspend && |
| 2337 | !ops->suspend_late && |
| 2338 | !ops->suspend_noirq && |
| 2339 | !ops->resume_noirq && |
| 2340 | !ops->resume_early && |
| 2341 | !ops->resume && |
| 2342 | !ops->complete; |
| 2343 | } |
| 2344 | |
| 2345 | void device_pm_check_callbacks(struct device *dev) |
| 2346 | { |
| 2347 | unsigned long flags; |
| 2348 | |
| 2349 | spin_lock_irqsave(&dev->power.lock, flags); |
| 2350 | dev->power.no_pm_callbacks = |
| 2351 | (!dev->bus || (pm_ops_is_empty(ops: dev->bus->pm) && |
| 2352 | !dev->bus->suspend && !dev->bus->resume)) && |
| 2353 | (!dev->class || pm_ops_is_empty(ops: dev->class->pm)) && |
| 2354 | (!dev->type || pm_ops_is_empty(ops: dev->type->pm)) && |
| 2355 | (!dev->pm_domain || pm_ops_is_empty(ops: &dev->pm_domain->ops)) && |
| 2356 | (!dev->driver || (pm_ops_is_empty(ops: dev->driver->pm) && |
| 2357 | !dev->driver->suspend && !dev->driver->resume)); |
| 2358 | spin_unlock_irqrestore(lock: &dev->power.lock, flags); |
| 2359 | } |
| 2360 | |
| 2361 | bool dev_pm_skip_suspend(struct device *dev) |
| 2362 | { |
| 2363 | return dev_pm_smart_suspend(dev) && pm_runtime_status_suspended(dev); |
| 2364 | } |
| 2365 | |