main.c 44 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770
  1. /*
  2. * drivers/base/power/main.c - Where the driver meets power management.
  3. *
  4. * Copyright (c) 2003 Patrick Mochel
  5. * Copyright (c) 2003 Open Source Development Lab
  6. *
  7. * This file is released under the GPLv2
  8. *
  9. *
  10. * The driver model core calls device_pm_add() when a device is registered.
  11. * This will initialize the embedded device_pm_info object in the device
  12. * and add it to the list of power-controlled devices. sysfs entries for
  13. * controlling device power management will also be added.
  14. *
  15. * A separate list is used for keeping track of power info, because the power
  16. * domain dependencies may differ from the ancestral dependencies that the
  17. * subsystem list maintains.
  18. */
  19. #include <linux/device.h>
  20. #include <linux/kallsyms.h>
  21. #include <linux/export.h>
  22. #include <linux/mutex.h>
  23. #include <linux/pm.h>
  24. #include <linux/pm_runtime.h>
  25. #include <linux/pm-trace.h>
  26. #include <linux/pm_wakeirq.h>
  27. #include <linux/interrupt.h>
  28. #include <linux/sched.h>
  29. #include <linux/async.h>
  30. #include <linux/suspend.h>
  31. #include <trace/events/power.h>
  32. #include <linux/cpufreq.h>
  33. #include <linux/cpuidle.h>
  34. #include <linux/timer.h>
  35. #include "../base.h"
  36. #include "power.h"
  37. typedef int (*pm_callback_t)(struct device *);
  38. /*
  39. * The entries in the dpm_list list are in a depth first order, simply
  40. * because children are guaranteed to be discovered after parents, and
  41. * are inserted at the back of the list on discovery.
  42. *
  43. * Since device_pm_add() may be called with a device lock held,
  44. * we must never try to acquire a device lock while holding
  45. * dpm_list_mutex.
  46. */
  47. LIST_HEAD(dpm_list);
  48. static LIST_HEAD(dpm_prepared_list);
  49. static LIST_HEAD(dpm_suspended_list);
  50. static LIST_HEAD(dpm_late_early_list);
  51. static LIST_HEAD(dpm_noirq_list);
  52. struct suspend_stats suspend_stats;
  53. static DEFINE_MUTEX(dpm_list_mtx);
  54. static pm_message_t pm_transition;
  55. static int async_error;
  56. static char *pm_verb(int event)
  57. {
  58. switch (event) {
  59. case PM_EVENT_SUSPEND:
  60. return "suspend";
  61. case PM_EVENT_RESUME:
  62. return "resume";
  63. case PM_EVENT_FREEZE:
  64. return "freeze";
  65. case PM_EVENT_QUIESCE:
  66. return "quiesce";
  67. case PM_EVENT_HIBERNATE:
  68. return "hibernate";
  69. case PM_EVENT_THAW:
  70. return "thaw";
  71. case PM_EVENT_RESTORE:
  72. return "restore";
  73. case PM_EVENT_RECOVER:
  74. return "recover";
  75. default:
  76. return "(unknown PM event)";
  77. }
  78. }
  79. /**
  80. * device_pm_sleep_init - Initialize system suspend-related device fields.
  81. * @dev: Device object being initialized.
  82. */
  83. void device_pm_sleep_init(struct device *dev)
  84. {
  85. dev->power.is_prepared = false;
  86. dev->power.is_suspended = false;
  87. dev->power.is_noirq_suspended = false;
  88. dev->power.is_late_suspended = false;
  89. init_completion(&dev->power.completion);
  90. complete_all(&dev->power.completion);
  91. dev->power.wakeup = NULL;
  92. INIT_LIST_HEAD(&dev->power.entry);
  93. }
  94. /**
  95. * device_pm_lock - Lock the list of active devices used by the PM core.
  96. */
  97. void device_pm_lock(void)
  98. {
  99. mutex_lock(&dpm_list_mtx);
  100. }
  101. /**
  102. * device_pm_unlock - Unlock the list of active devices used by the PM core.
  103. */
  104. void device_pm_unlock(void)
  105. {
  106. mutex_unlock(&dpm_list_mtx);
  107. }
  108. /**
  109. * device_pm_add - Add a device to the PM core's list of active devices.
  110. * @dev: Device to add to the list.
  111. */
  112. void device_pm_add(struct device *dev)
  113. {
  114. pr_debug("PM: Adding info for %s:%s\n",
  115. dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
  116. device_pm_check_callbacks(dev);
  117. mutex_lock(&dpm_list_mtx);
  118. if (dev->parent && dev->parent->power.is_prepared)
  119. dev_warn(dev, "parent %s should not be sleeping\n",
  120. dev_name(dev->parent));
  121. list_add_tail(&dev->power.entry, &dpm_list);
  122. mutex_unlock(&dpm_list_mtx);
  123. }
  124. /**
  125. * device_pm_remove - Remove a device from the PM core's list of active devices.
  126. * @dev: Device to be removed from the list.
  127. */
  128. void device_pm_remove(struct device *dev)
  129. {
  130. pr_debug("PM: Removing info for %s:%s\n",
  131. dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
  132. complete_all(&dev->power.completion);
  133. mutex_lock(&dpm_list_mtx);
  134. list_del_init(&dev->power.entry);
  135. mutex_unlock(&dpm_list_mtx);
  136. device_wakeup_disable(dev);
  137. pm_runtime_remove(dev);
  138. device_pm_check_callbacks(dev);
  139. }
  140. /**
  141. * device_pm_move_before - Move device in the PM core's list of active devices.
  142. * @deva: Device to move in dpm_list.
  143. * @devb: Device @deva should come before.
  144. */
  145. void device_pm_move_before(struct device *deva, struct device *devb)
  146. {
  147. pr_debug("PM: Moving %s:%s before %s:%s\n",
  148. deva->bus ? deva->bus->name : "No Bus", dev_name(deva),
  149. devb->bus ? devb->bus->name : "No Bus", dev_name(devb));
  150. /* Delete deva from dpm_list and reinsert before devb. */
  151. list_move_tail(&deva->power.entry, &devb->power.entry);
  152. }
  153. /**
  154. * device_pm_move_after - Move device in the PM core's list of active devices.
  155. * @deva: Device to move in dpm_list.
  156. * @devb: Device @deva should come after.
  157. */
  158. void device_pm_move_after(struct device *deva, struct device *devb)
  159. {
  160. pr_debug("PM: Moving %s:%s after %s:%s\n",
  161. deva->bus ? deva->bus->name : "No Bus", dev_name(deva),
  162. devb->bus ? devb->bus->name : "No Bus", dev_name(devb));
  163. /* Delete deva from dpm_list and reinsert after devb. */
  164. list_move(&deva->power.entry, &devb->power.entry);
  165. }
  166. /**
  167. * device_pm_move_last - Move device to end of the PM core's list of devices.
  168. * @dev: Device to move in dpm_list.
  169. */
  170. void device_pm_move_last(struct device *dev)
  171. {
  172. pr_debug("PM: Moving %s:%s to end of list\n",
  173. dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
  174. list_move_tail(&dev->power.entry, &dpm_list);
  175. }
  176. static ktime_t initcall_debug_start(struct device *dev)
  177. {
  178. ktime_t calltime = ktime_set(0, 0);
  179. if (pm_print_times_enabled) {
  180. pr_info("calling %s+ @ %i, parent: %s\n",
  181. dev_name(dev), task_pid_nr(current),
  182. dev->parent ? dev_name(dev->parent) : "none");
  183. calltime = ktime_get();
  184. }
  185. return calltime;
  186. }
  187. static void initcall_debug_report(struct device *dev, ktime_t calltime,
  188. int error, pm_message_t state, char *info)
  189. {
  190. ktime_t rettime;
  191. s64 nsecs;
  192. rettime = ktime_get();
  193. nsecs = (s64) ktime_to_ns(ktime_sub(rettime, calltime));
  194. if (pm_print_times_enabled) {
  195. pr_info("call %s+ returned %d after %Ld usecs\n", dev_name(dev),
  196. error, (unsigned long long)nsecs >> 10);
  197. }
  198. }
  199. /**
  200. * dpm_wait - Wait for a PM operation to complete.
  201. * @dev: Device to wait for.
  202. * @async: If unset, wait only if the device's power.async_suspend flag is set.
  203. */
  204. static void dpm_wait(struct device *dev, bool async)
  205. {
  206. if (!dev)
  207. return;
  208. if (async || (pm_async_enabled && dev->power.async_suspend))
  209. wait_for_completion(&dev->power.completion);
  210. }
  211. static int dpm_wait_fn(struct device *dev, void *async_ptr)
  212. {
  213. dpm_wait(dev, *((bool *)async_ptr));
  214. return 0;
  215. }
  216. static void dpm_wait_for_children(struct device *dev, bool async)
  217. {
  218. device_for_each_child(dev, &async, dpm_wait_fn);
  219. }
  220. /**
  221. * pm_op - Return the PM operation appropriate for given PM event.
  222. * @ops: PM operations to choose from.
  223. * @state: PM transition of the system being carried out.
  224. */
  225. static pm_callback_t pm_op(const struct dev_pm_ops *ops, pm_message_t state)
  226. {
  227. switch (state.event) {
  228. #ifdef CONFIG_SUSPEND
  229. case PM_EVENT_SUSPEND:
  230. return ops->suspend;
  231. case PM_EVENT_RESUME:
  232. return ops->resume;
  233. #endif /* CONFIG_SUSPEND */
  234. #ifdef CONFIG_HIBERNATE_CALLBACKS
  235. case PM_EVENT_FREEZE:
  236. case PM_EVENT_QUIESCE:
  237. return ops->freeze;
  238. case PM_EVENT_HIBERNATE:
  239. return ops->poweroff;
  240. case PM_EVENT_THAW:
  241. case PM_EVENT_RECOVER:
  242. return ops->thaw;
  243. break;
  244. case PM_EVENT_RESTORE:
  245. return ops->restore;
  246. #endif /* CONFIG_HIBERNATE_CALLBACKS */
  247. }
  248. return NULL;
  249. }
  250. /**
  251. * pm_late_early_op - Return the PM operation appropriate for given PM event.
  252. * @ops: PM operations to choose from.
  253. * @state: PM transition of the system being carried out.
  254. *
  255. * Runtime PM is disabled for @dev while this function is being executed.
  256. */
  257. static pm_callback_t pm_late_early_op(const struct dev_pm_ops *ops,
  258. pm_message_t state)
  259. {
  260. switch (state.event) {
  261. #ifdef CONFIG_SUSPEND
  262. case PM_EVENT_SUSPEND:
  263. return ops->suspend_late;
  264. case PM_EVENT_RESUME:
  265. return ops->resume_early;
  266. #endif /* CONFIG_SUSPEND */
  267. #ifdef CONFIG_HIBERNATE_CALLBACKS
  268. case PM_EVENT_FREEZE:
  269. case PM_EVENT_QUIESCE:
  270. return ops->freeze_late;
  271. case PM_EVENT_HIBERNATE:
  272. return ops->poweroff_late;
  273. case PM_EVENT_THAW:
  274. case PM_EVENT_RECOVER:
  275. return ops->thaw_early;
  276. case PM_EVENT_RESTORE:
  277. return ops->restore_early;
  278. #endif /* CONFIG_HIBERNATE_CALLBACKS */
  279. }
  280. return NULL;
  281. }
  282. /**
  283. * pm_noirq_op - Return the PM operation appropriate for given PM event.
  284. * @ops: PM operations to choose from.
  285. * @state: PM transition of the system being carried out.
  286. *
  287. * The driver of @dev will not receive interrupts while this function is being
  288. * executed.
  289. */
  290. static pm_callback_t pm_noirq_op(const struct dev_pm_ops *ops, pm_message_t state)
  291. {
  292. switch (state.event) {
  293. #ifdef CONFIG_SUSPEND
  294. case PM_EVENT_SUSPEND:
  295. return ops->suspend_noirq;
  296. case PM_EVENT_RESUME:
  297. return ops->resume_noirq;
  298. #endif /* CONFIG_SUSPEND */
  299. #ifdef CONFIG_HIBERNATE_CALLBACKS
  300. case PM_EVENT_FREEZE:
  301. case PM_EVENT_QUIESCE:
  302. return ops->freeze_noirq;
  303. case PM_EVENT_HIBERNATE:
  304. return ops->poweroff_noirq;
  305. case PM_EVENT_THAW:
  306. case PM_EVENT_RECOVER:
  307. return ops->thaw_noirq;
  308. case PM_EVENT_RESTORE:
  309. return ops->restore_noirq;
  310. #endif /* CONFIG_HIBERNATE_CALLBACKS */
  311. }
  312. return NULL;
  313. }
  314. static void pm_dev_dbg(struct device *dev, pm_message_t state, char *info)
  315. {
  316. dev_dbg(dev, "%s%s%s\n", info, pm_verb(state.event),
  317. ((state.event & PM_EVENT_SLEEP) && device_may_wakeup(dev)) ?
  318. ", may wakeup" : "");
  319. }
  320. static void pm_dev_err(struct device *dev, pm_message_t state, char *info,
  321. int error)
  322. {
  323. printk(KERN_ERR "PM: Device %s failed to %s%s: error %d\n",
  324. dev_name(dev), pm_verb(state.event), info, error);
  325. }
  326. static void dpm_show_time(ktime_t starttime, pm_message_t state, char *info)
  327. {
  328. ktime_t calltime;
  329. u64 usecs64;
  330. int usecs;
  331. calltime = ktime_get();
  332. usecs64 = ktime_to_ns(ktime_sub(calltime, starttime));
  333. do_div(usecs64, NSEC_PER_USEC);
  334. usecs = usecs64;
  335. if (usecs == 0)
  336. usecs = 1;
  337. pr_info("PM: %s%s%s of devices complete after %ld.%03ld msecs\n",
  338. info ?: "", info ? " " : "", pm_verb(state.event),
  339. usecs / USEC_PER_MSEC, usecs % USEC_PER_MSEC);
  340. }
  341. static int dpm_run_callback(pm_callback_t cb, struct device *dev,
  342. pm_message_t state, char *info)
  343. {
  344. ktime_t calltime;
  345. int error;
  346. if (!cb)
  347. return 0;
  348. calltime = initcall_debug_start(dev);
  349. pm_dev_dbg(dev, state, info);
  350. trace_device_pm_callback_start(dev, info, state.event);
  351. error = cb(dev);
  352. trace_device_pm_callback_end(dev, error);
  353. suspend_report_result(cb, error);
  354. initcall_debug_report(dev, calltime, error, state, info);
  355. return error;
  356. }
  357. #ifdef CONFIG_DPM_WATCHDOG
  358. struct dpm_watchdog {
  359. struct device *dev;
  360. struct task_struct *tsk;
  361. struct timer_list timer;
  362. };
  363. #define DECLARE_DPM_WATCHDOG_ON_STACK(wd) \
  364. struct dpm_watchdog wd
  365. /**
  366. * dpm_watchdog_handler - Driver suspend / resume watchdog handler.
  367. * @data: Watchdog object address.
  368. *
  369. * Called when a driver has timed out suspending or resuming.
  370. * There's not much we can do here to recover so panic() to
  371. * capture a crash-dump in pstore.
  372. */
  373. static void dpm_watchdog_handler(unsigned long data)
  374. {
  375. struct dpm_watchdog *wd = (void *)data;
  376. dev_emerg(wd->dev, "**** DPM device timeout ****\n");
  377. show_stack(wd->tsk, NULL);
  378. panic("%s %s: unrecoverable failure\n",
  379. dev_driver_string(wd->dev), dev_name(wd->dev));
  380. }
  381. /**
  382. * dpm_watchdog_set - Enable pm watchdog for given device.
  383. * @wd: Watchdog. Must be allocated on the stack.
  384. * @dev: Device to handle.
  385. */
  386. static void dpm_watchdog_set(struct dpm_watchdog *wd, struct device *dev)
  387. {
  388. struct timer_list *timer = &wd->timer;
  389. wd->dev = dev;
  390. wd->tsk = current;
  391. init_timer_on_stack(timer);
  392. /* use same timeout value for both suspend and resume */
  393. timer->expires = jiffies + HZ * CONFIG_DPM_WATCHDOG_TIMEOUT;
  394. timer->function = dpm_watchdog_handler;
  395. timer->data = (unsigned long)wd;
  396. add_timer(timer);
  397. }
  398. /**
  399. * dpm_watchdog_clear - Disable suspend/resume watchdog.
  400. * @wd: Watchdog to disable.
  401. */
  402. static void dpm_watchdog_clear(struct dpm_watchdog *wd)
  403. {
  404. struct timer_list *timer = &wd->timer;
  405. del_timer_sync(timer);
  406. destroy_timer_on_stack(timer);
  407. }
  408. #else
  409. #define DECLARE_DPM_WATCHDOG_ON_STACK(wd)
  410. #define dpm_watchdog_set(x, y)
  411. #define dpm_watchdog_clear(x)
  412. #endif
  413. /*------------------------- Resume routines -------------------------*/
  414. /**
  415. * device_resume_noirq - Execute an "early resume" callback for given device.
  416. * @dev: Device to handle.
  417. * @state: PM transition of the system being carried out.
  418. * @async: If true, the device is being resumed asynchronously.
  419. *
  420. * The driver of @dev will not receive interrupts while this function is being
  421. * executed.
  422. */
  423. static int device_resume_noirq(struct device *dev, pm_message_t state, bool async)
  424. {
  425. pm_callback_t callback = NULL;
  426. char *info = NULL;
  427. int error = 0;
  428. TRACE_DEVICE(dev);
  429. TRACE_RESUME(0);
  430. if (dev->power.syscore || dev->power.direct_complete)
  431. goto Out;
  432. if (!dev->power.is_noirq_suspended)
  433. goto Out;
  434. dpm_wait(dev->parent, async);
  435. if (dev->pm_domain) {
  436. info = "noirq power domain ";
  437. callback = pm_noirq_op(&dev->pm_domain->ops, state);
  438. } else if (dev->type && dev->type->pm) {
  439. info = "noirq type ";
  440. callback = pm_noirq_op(dev->type->pm, state);
  441. } else if (dev->class && dev->class->pm) {
  442. info = "noirq class ";
  443. callback = pm_noirq_op(dev->class->pm, state);
  444. } else if (dev->bus && dev->bus->pm) {
  445. info = "noirq bus ";
  446. callback = pm_noirq_op(dev->bus->pm, state);
  447. }
  448. if (!callback && dev->driver && dev->driver->pm) {
  449. info = "noirq driver ";
  450. callback = pm_noirq_op(dev->driver->pm, state);
  451. }
  452. error = dpm_run_callback(callback, dev, state, info);
  453. dev->power.is_noirq_suspended = false;
  454. Out:
  455. complete_all(&dev->power.completion);
  456. TRACE_RESUME(error);
  457. return error;
  458. }
  459. static bool is_async(struct device *dev)
  460. {
  461. return dev->power.async_suspend && pm_async_enabled
  462. && !pm_trace_is_enabled();
  463. }
  464. static void async_resume_noirq(void *data, async_cookie_t cookie)
  465. {
  466. struct device *dev = (struct device *)data;
  467. int error;
  468. error = device_resume_noirq(dev, pm_transition, true);
  469. if (error)
  470. pm_dev_err(dev, pm_transition, " async", error);
  471. put_device(dev);
  472. }
  473. /**
  474. * dpm_resume_noirq - Execute "noirq resume" callbacks for all devices.
  475. * @state: PM transition of the system being carried out.
  476. *
  477. * Call the "noirq" resume handlers for all devices in dpm_noirq_list and
  478. * enable device drivers to receive interrupts.
  479. */
  480. void dpm_resume_noirq(pm_message_t state)
  481. {
  482. struct device *dev;
  483. ktime_t starttime = ktime_get();
  484. trace_suspend_resume(TPS("dpm_resume_noirq"), state.event, true);
  485. mutex_lock(&dpm_list_mtx);
  486. pm_transition = state;
  487. /*
  488. * Advanced the async threads upfront,
  489. * in case the starting of async threads is
  490. * delayed by non-async resuming devices.
  491. */
  492. list_for_each_entry(dev, &dpm_noirq_list, power.entry) {
  493. reinit_completion(&dev->power.completion);
  494. if (is_async(dev)) {
  495. get_device(dev);
  496. async_schedule(async_resume_noirq, dev);
  497. }
  498. }
  499. while (!list_empty(&dpm_noirq_list)) {
  500. dev = to_device(dpm_noirq_list.next);
  501. get_device(dev);
  502. list_move_tail(&dev->power.entry, &dpm_late_early_list);
  503. mutex_unlock(&dpm_list_mtx);
  504. if (!is_async(dev)) {
  505. int error;
  506. error = device_resume_noirq(dev, state, false);
  507. if (error) {
  508. suspend_stats.failed_resume_noirq++;
  509. dpm_save_failed_step(SUSPEND_RESUME_NOIRQ);
  510. dpm_save_failed_dev(dev_name(dev));
  511. pm_dev_err(dev, state, " noirq", error);
  512. }
  513. }
  514. mutex_lock(&dpm_list_mtx);
  515. put_device(dev);
  516. }
  517. mutex_unlock(&dpm_list_mtx);
  518. async_synchronize_full();
  519. dpm_show_time(starttime, state, "noirq");
  520. resume_device_irqs();
  521. device_wakeup_disarm_wake_irqs();
  522. cpuidle_resume();
  523. trace_suspend_resume(TPS("dpm_resume_noirq"), state.event, false);
  524. }
  525. /**
  526. * device_resume_early - Execute an "early resume" callback for given device.
  527. * @dev: Device to handle.
  528. * @state: PM transition of the system being carried out.
  529. * @async: If true, the device is being resumed asynchronously.
  530. *
  531. * Runtime PM is disabled for @dev while this function is being executed.
  532. */
  533. static int device_resume_early(struct device *dev, pm_message_t state, bool async)
  534. {
  535. pm_callback_t callback = NULL;
  536. char *info = NULL;
  537. int error = 0;
  538. TRACE_DEVICE(dev);
  539. TRACE_RESUME(0);
  540. if (dev->power.syscore || dev->power.direct_complete)
  541. goto Out;
  542. if (!dev->power.is_late_suspended)
  543. goto Out;
  544. dpm_wait(dev->parent, async);
  545. if (dev->pm_domain) {
  546. info = "early power domain ";
  547. callback = pm_late_early_op(&dev->pm_domain->ops, state);
  548. } else if (dev->type && dev->type->pm) {
  549. info = "early type ";
  550. callback = pm_late_early_op(dev->type->pm, state);
  551. } else if (dev->class && dev->class->pm) {
  552. info = "early class ";
  553. callback = pm_late_early_op(dev->class->pm, state);
  554. } else if (dev->bus && dev->bus->pm) {
  555. info = "early bus ";
  556. callback = pm_late_early_op(dev->bus->pm, state);
  557. }
  558. if (!callback && dev->driver && dev->driver->pm) {
  559. info = "early driver ";
  560. callback = pm_late_early_op(dev->driver->pm, state);
  561. }
  562. error = dpm_run_callback(callback, dev, state, info);
  563. dev->power.is_late_suspended = false;
  564. Out:
  565. TRACE_RESUME(error);
  566. pm_runtime_enable(dev);
  567. complete_all(&dev->power.completion);
  568. return error;
  569. }
  570. static void async_resume_early(void *data, async_cookie_t cookie)
  571. {
  572. struct device *dev = (struct device *)data;
  573. int error;
  574. error = device_resume_early(dev, pm_transition, true);
  575. if (error)
  576. pm_dev_err(dev, pm_transition, " async", error);
  577. put_device(dev);
  578. }
  579. /**
  580. * dpm_resume_early - Execute "early resume" callbacks for all devices.
  581. * @state: PM transition of the system being carried out.
  582. */
  583. void dpm_resume_early(pm_message_t state)
  584. {
  585. struct device *dev;
  586. ktime_t starttime = ktime_get();
  587. trace_suspend_resume(TPS("dpm_resume_early"), state.event, true);
  588. mutex_lock(&dpm_list_mtx);
  589. pm_transition = state;
  590. /*
  591. * Advanced the async threads upfront,
  592. * in case the starting of async threads is
  593. * delayed by non-async resuming devices.
  594. */
  595. list_for_each_entry(dev, &dpm_late_early_list, power.entry) {
  596. reinit_completion(&dev->power.completion);
  597. if (is_async(dev)) {
  598. get_device(dev);
  599. async_schedule(async_resume_early, dev);
  600. }
  601. }
  602. while (!list_empty(&dpm_late_early_list)) {
  603. dev = to_device(dpm_late_early_list.next);
  604. get_device(dev);
  605. list_move_tail(&dev->power.entry, &dpm_suspended_list);
  606. mutex_unlock(&dpm_list_mtx);
  607. if (!is_async(dev)) {
  608. int error;
  609. error = device_resume_early(dev, state, false);
  610. if (error) {
  611. suspend_stats.failed_resume_early++;
  612. dpm_save_failed_step(SUSPEND_RESUME_EARLY);
  613. dpm_save_failed_dev(dev_name(dev));
  614. pm_dev_err(dev, state, " early", error);
  615. }
  616. }
  617. mutex_lock(&dpm_list_mtx);
  618. put_device(dev);
  619. }
  620. mutex_unlock(&dpm_list_mtx);
  621. async_synchronize_full();
  622. dpm_show_time(starttime, state, "early");
  623. trace_suspend_resume(TPS("dpm_resume_early"), state.event, false);
  624. }
  625. /**
  626. * dpm_resume_start - Execute "noirq" and "early" device callbacks.
  627. * @state: PM transition of the system being carried out.
  628. */
  629. void dpm_resume_start(pm_message_t state)
  630. {
  631. dpm_resume_noirq(state);
  632. dpm_resume_early(state);
  633. }
  634. EXPORT_SYMBOL_GPL(dpm_resume_start);
  635. /**
  636. * device_resume - Execute "resume" callbacks for given device.
  637. * @dev: Device to handle.
  638. * @state: PM transition of the system being carried out.
  639. * @async: If true, the device is being resumed asynchronously.
  640. */
  641. static int device_resume(struct device *dev, pm_message_t state, bool async)
  642. {
  643. pm_callback_t callback = NULL;
  644. char *info = NULL;
  645. int error = 0;
  646. DECLARE_DPM_WATCHDOG_ON_STACK(wd);
  647. TRACE_DEVICE(dev);
  648. TRACE_RESUME(0);
  649. if (dev->power.syscore)
  650. goto Complete;
  651. if (dev->power.direct_complete) {
  652. /* Match the pm_runtime_disable() in __device_suspend(). */
  653. pm_runtime_enable(dev);
  654. goto Complete;
  655. }
  656. dpm_wait(dev->parent, async);
  657. dpm_watchdog_set(&wd, dev);
  658. device_lock(dev);
  659. /*
  660. * This is a fib. But we'll allow new children to be added below
  661. * a resumed device, even if the device hasn't been completed yet.
  662. */
  663. dev->power.is_prepared = false;
  664. if (!dev->power.is_suspended)
  665. goto Unlock;
  666. if (dev->pm_domain) {
  667. info = "power domain ";
  668. callback = pm_op(&dev->pm_domain->ops, state);
  669. goto Driver;
  670. }
  671. if (dev->type && dev->type->pm) {
  672. info = "type ";
  673. callback = pm_op(dev->type->pm, state);
  674. goto Driver;
  675. }
  676. if (dev->class) {
  677. if (dev->class->pm) {
  678. info = "class ";
  679. callback = pm_op(dev->class->pm, state);
  680. goto Driver;
  681. } else if (dev->class->resume) {
  682. info = "legacy class ";
  683. callback = dev->class->resume;
  684. goto End;
  685. }
  686. }
  687. if (dev->bus) {
  688. if (dev->bus->pm) {
  689. info = "bus ";
  690. callback = pm_op(dev->bus->pm, state);
  691. } else if (dev->bus->resume) {
  692. info = "legacy bus ";
  693. callback = dev->bus->resume;
  694. goto End;
  695. }
  696. }
  697. Driver:
  698. if (!callback && dev->driver && dev->driver->pm) {
  699. info = "driver ";
  700. callback = pm_op(dev->driver->pm, state);
  701. }
  702. End:
  703. error = dpm_run_callback(callback, dev, state, info);
  704. dev->power.is_suspended = false;
  705. Unlock:
  706. device_unlock(dev);
  707. dpm_watchdog_clear(&wd);
  708. Complete:
  709. complete_all(&dev->power.completion);
  710. TRACE_RESUME(error);
  711. return error;
  712. }
  713. static void async_resume(void *data, async_cookie_t cookie)
  714. {
  715. struct device *dev = (struct device *)data;
  716. int error;
  717. error = device_resume(dev, pm_transition, true);
  718. if (error)
  719. pm_dev_err(dev, pm_transition, " async", error);
  720. put_device(dev);
  721. }
  722. /**
  723. * dpm_resume - Execute "resume" callbacks for non-sysdev devices.
  724. * @state: PM transition of the system being carried out.
  725. *
  726. * Execute the appropriate "resume" callback for all devices whose status
  727. * indicates that they are suspended.
  728. */
  729. void dpm_resume(pm_message_t state)
  730. {
  731. struct device *dev;
  732. ktime_t starttime = ktime_get();
  733. trace_suspend_resume(TPS("dpm_resume"), state.event, true);
  734. might_sleep();
  735. mutex_lock(&dpm_list_mtx);
  736. pm_transition = state;
  737. async_error = 0;
  738. list_for_each_entry(dev, &dpm_suspended_list, power.entry) {
  739. reinit_completion(&dev->power.completion);
  740. if (is_async(dev)) {
  741. get_device(dev);
  742. async_schedule(async_resume, dev);
  743. }
  744. }
  745. while (!list_empty(&dpm_suspended_list)) {
  746. dev = to_device(dpm_suspended_list.next);
  747. get_device(dev);
  748. if (!is_async(dev)) {
  749. int error;
  750. mutex_unlock(&dpm_list_mtx);
  751. error = device_resume(dev, state, false);
  752. if (error) {
  753. suspend_stats.failed_resume++;
  754. dpm_save_failed_step(SUSPEND_RESUME);
  755. dpm_save_failed_dev(dev_name(dev));
  756. pm_dev_err(dev, state, "", error);
  757. }
  758. mutex_lock(&dpm_list_mtx);
  759. }
  760. if (!list_empty(&dev->power.entry))
  761. list_move_tail(&dev->power.entry, &dpm_prepared_list);
  762. put_device(dev);
  763. }
  764. mutex_unlock(&dpm_list_mtx);
  765. async_synchronize_full();
  766. dpm_show_time(starttime, state, NULL);
  767. cpufreq_resume();
  768. trace_suspend_resume(TPS("dpm_resume"), state.event, false);
  769. }
  770. /**
  771. * device_complete - Complete a PM transition for given device.
  772. * @dev: Device to handle.
  773. * @state: PM transition of the system being carried out.
  774. */
  775. static void device_complete(struct device *dev, pm_message_t state)
  776. {
  777. void (*callback)(struct device *) = NULL;
  778. char *info = NULL;
  779. if (dev->power.syscore)
  780. return;
  781. device_lock(dev);
  782. if (dev->pm_domain) {
  783. info = "completing power domain ";
  784. callback = dev->pm_domain->ops.complete;
  785. } else if (dev->type && dev->type->pm) {
  786. info = "completing type ";
  787. callback = dev->type->pm->complete;
  788. } else if (dev->class && dev->class->pm) {
  789. info = "completing class ";
  790. callback = dev->class->pm->complete;
  791. } else if (dev->bus && dev->bus->pm) {
  792. info = "completing bus ";
  793. callback = dev->bus->pm->complete;
  794. }
  795. if (!callback && dev->driver && dev->driver->pm) {
  796. info = "completing driver ";
  797. callback = dev->driver->pm->complete;
  798. }
  799. if (callback) {
  800. pm_dev_dbg(dev, state, info);
  801. callback(dev);
  802. }
  803. device_unlock(dev);
  804. pm_runtime_put(dev);
  805. }
  806. /**
  807. * dpm_complete - Complete a PM transition for all non-sysdev devices.
  808. * @state: PM transition of the system being carried out.
  809. *
  810. * Execute the ->complete() callbacks for all devices whose PM status is not
  811. * DPM_ON (this allows new devices to be registered).
  812. */
  813. void dpm_complete(pm_message_t state)
  814. {
  815. struct list_head list;
  816. trace_suspend_resume(TPS("dpm_complete"), state.event, true);
  817. might_sleep();
  818. INIT_LIST_HEAD(&list);
  819. mutex_lock(&dpm_list_mtx);
  820. while (!list_empty(&dpm_prepared_list)) {
  821. struct device *dev = to_device(dpm_prepared_list.prev);
  822. get_device(dev);
  823. dev->power.is_prepared = false;
  824. list_move(&dev->power.entry, &list);
  825. mutex_unlock(&dpm_list_mtx);
  826. trace_device_pm_callback_start(dev, "", state.event);
  827. device_complete(dev, state);
  828. trace_device_pm_callback_end(dev, 0);
  829. mutex_lock(&dpm_list_mtx);
  830. put_device(dev);
  831. }
  832. list_splice(&list, &dpm_list);
  833. mutex_unlock(&dpm_list_mtx);
  834. /* Allow device probing and trigger re-probing of deferred devices */
  835. device_unblock_probing();
  836. trace_suspend_resume(TPS("dpm_complete"), state.event, false);
  837. }
  838. /**
  839. * dpm_resume_end - Execute "resume" callbacks and complete system transition.
  840. * @state: PM transition of the system being carried out.
  841. *
  842. * Execute "resume" callbacks for all devices and complete the PM transition of
  843. * the system.
  844. */
  845. void dpm_resume_end(pm_message_t state)
  846. {
  847. dpm_resume(state);
  848. dpm_complete(state);
  849. }
  850. EXPORT_SYMBOL_GPL(dpm_resume_end);
  851. /*------------------------- Suspend routines -------------------------*/
  852. /**
  853. * resume_event - Return a "resume" message for given "suspend" sleep state.
  854. * @sleep_state: PM message representing a sleep state.
  855. *
  856. * Return a PM message representing the resume event corresponding to given
  857. * sleep state.
  858. */
  859. static pm_message_t resume_event(pm_message_t sleep_state)
  860. {
  861. switch (sleep_state.event) {
  862. case PM_EVENT_SUSPEND:
  863. return PMSG_RESUME;
  864. case PM_EVENT_FREEZE:
  865. case PM_EVENT_QUIESCE:
  866. return PMSG_RECOVER;
  867. case PM_EVENT_HIBERNATE:
  868. return PMSG_RESTORE;
  869. }
  870. return PMSG_ON;
  871. }
  872. /**
  873. * device_suspend_noirq - Execute a "late suspend" callback for given device.
  874. * @dev: Device to handle.
  875. * @state: PM transition of the system being carried out.
  876. * @async: If true, the device is being suspended asynchronously.
  877. *
  878. * The driver of @dev will not receive interrupts while this function is being
  879. * executed.
  880. */
  881. static int __device_suspend_noirq(struct device *dev, pm_message_t state, bool async)
  882. {
  883. pm_callback_t callback = NULL;
  884. char *info = NULL;
  885. int error = 0;
  886. TRACE_DEVICE(dev);
  887. TRACE_SUSPEND(0);
  888. dpm_wait_for_children(dev, async);
  889. if (async_error)
  890. goto Complete;
  891. if (pm_wakeup_pending()) {
  892. async_error = -EBUSY;
  893. goto Complete;
  894. }
  895. if (dev->power.syscore || dev->power.direct_complete)
  896. goto Complete;
  897. if (dev->pm_domain) {
  898. info = "noirq power domain ";
  899. callback = pm_noirq_op(&dev->pm_domain->ops, state);
  900. } else if (dev->type && dev->type->pm) {
  901. info = "noirq type ";
  902. callback = pm_noirq_op(dev->type->pm, state);
  903. } else if (dev->class && dev->class->pm) {
  904. info = "noirq class ";
  905. callback = pm_noirq_op(dev->class->pm, state);
  906. } else if (dev->bus && dev->bus->pm) {
  907. info = "noirq bus ";
  908. callback = pm_noirq_op(dev->bus->pm, state);
  909. }
  910. if (!callback && dev->driver && dev->driver->pm) {
  911. info = "noirq driver ";
  912. callback = pm_noirq_op(dev->driver->pm, state);
  913. }
  914. error = dpm_run_callback(callback, dev, state, info);
  915. if (!error)
  916. dev->power.is_noirq_suspended = true;
  917. else
  918. async_error = error;
  919. Complete:
  920. complete_all(&dev->power.completion);
  921. TRACE_SUSPEND(error);
  922. return error;
  923. }
  924. static void async_suspend_noirq(void *data, async_cookie_t cookie)
  925. {
  926. struct device *dev = (struct device *)data;
  927. int error;
  928. error = __device_suspend_noirq(dev, pm_transition, true);
  929. if (error) {
  930. dpm_save_failed_dev(dev_name(dev));
  931. pm_dev_err(dev, pm_transition, " async", error);
  932. }
  933. put_device(dev);
  934. }
  935. static int device_suspend_noirq(struct device *dev)
  936. {
  937. reinit_completion(&dev->power.completion);
  938. if (is_async(dev)) {
  939. get_device(dev);
  940. async_schedule(async_suspend_noirq, dev);
  941. return 0;
  942. }
  943. return __device_suspend_noirq(dev, pm_transition, false);
  944. }
  945. /**
  946. * dpm_suspend_noirq - Execute "noirq suspend" callbacks for all devices.
  947. * @state: PM transition of the system being carried out.
  948. *
  949. * Prevent device drivers from receiving interrupts and call the "noirq" suspend
  950. * handlers for all non-sysdev devices.
  951. */
  952. int dpm_suspend_noirq(pm_message_t state)
  953. {
  954. ktime_t starttime = ktime_get();
  955. int error = 0;
  956. trace_suspend_resume(TPS("dpm_suspend_noirq"), state.event, true);
  957. cpuidle_pause();
  958. device_wakeup_arm_wake_irqs();
  959. suspend_device_irqs();
  960. mutex_lock(&dpm_list_mtx);
  961. pm_transition = state;
  962. async_error = 0;
  963. while (!list_empty(&dpm_late_early_list)) {
  964. struct device *dev = to_device(dpm_late_early_list.prev);
  965. get_device(dev);
  966. mutex_unlock(&dpm_list_mtx);
  967. error = device_suspend_noirq(dev);
  968. mutex_lock(&dpm_list_mtx);
  969. if (error) {
  970. pm_dev_err(dev, state, " noirq", error);
  971. dpm_save_failed_dev(dev_name(dev));
  972. put_device(dev);
  973. break;
  974. }
  975. if (!list_empty(&dev->power.entry))
  976. list_move(&dev->power.entry, &dpm_noirq_list);
  977. put_device(dev);
  978. if (async_error)
  979. break;
  980. }
  981. mutex_unlock(&dpm_list_mtx);
  982. async_synchronize_full();
  983. if (!error)
  984. error = async_error;
  985. if (error) {
  986. suspend_stats.failed_suspend_noirq++;
  987. dpm_save_failed_step(SUSPEND_SUSPEND_NOIRQ);
  988. dpm_resume_noirq(resume_event(state));
  989. } else {
  990. dpm_show_time(starttime, state, "noirq");
  991. }
  992. trace_suspend_resume(TPS("dpm_suspend_noirq"), state.event, false);
  993. return error;
  994. }
  995. /**
  996. * device_suspend_late - Execute a "late suspend" callback for given device.
  997. * @dev: Device to handle.
  998. * @state: PM transition of the system being carried out.
  999. * @async: If true, the device is being suspended asynchronously.
  1000. *
  1001. * Runtime PM is disabled for @dev while this function is being executed.
  1002. */
  1003. static int __device_suspend_late(struct device *dev, pm_message_t state, bool async)
  1004. {
  1005. pm_callback_t callback = NULL;
  1006. char *info = NULL;
  1007. int error = 0;
  1008. TRACE_DEVICE(dev);
  1009. TRACE_SUSPEND(0);
  1010. __pm_runtime_disable(dev, false);
  1011. dpm_wait_for_children(dev, async);
  1012. if (async_error)
  1013. goto Complete;
  1014. if (pm_wakeup_pending()) {
  1015. async_error = -EBUSY;
  1016. goto Complete;
  1017. }
  1018. if (dev->power.syscore || dev->power.direct_complete)
  1019. goto Complete;
  1020. if (dev->pm_domain) {
  1021. info = "late power domain ";
  1022. callback = pm_late_early_op(&dev->pm_domain->ops, state);
  1023. } else if (dev->type && dev->type->pm) {
  1024. info = "late type ";
  1025. callback = pm_late_early_op(dev->type->pm, state);
  1026. } else if (dev->class && dev->class->pm) {
  1027. info = "late class ";
  1028. callback = pm_late_early_op(dev->class->pm, state);
  1029. } else if (dev->bus && dev->bus->pm) {
  1030. info = "late bus ";
  1031. callback = pm_late_early_op(dev->bus->pm, state);
  1032. }
  1033. if (!callback && dev->driver && dev->driver->pm) {
  1034. info = "late driver ";
  1035. callback = pm_late_early_op(dev->driver->pm, state);
  1036. }
  1037. error = dpm_run_callback(callback, dev, state, info);
  1038. if (!error)
  1039. dev->power.is_late_suspended = true;
  1040. else
  1041. async_error = error;
  1042. Complete:
  1043. TRACE_SUSPEND(error);
  1044. complete_all(&dev->power.completion);
  1045. return error;
  1046. }
  1047. static void async_suspend_late(void *data, async_cookie_t cookie)
  1048. {
  1049. struct device *dev = (struct device *)data;
  1050. int error;
  1051. error = __device_suspend_late(dev, pm_transition, true);
  1052. if (error) {
  1053. dpm_save_failed_dev(dev_name(dev));
  1054. pm_dev_err(dev, pm_transition, " async", error);
  1055. }
  1056. put_device(dev);
  1057. }
  1058. static int device_suspend_late(struct device *dev)
  1059. {
  1060. reinit_completion(&dev->power.completion);
  1061. if (is_async(dev)) {
  1062. get_device(dev);
  1063. async_schedule(async_suspend_late, dev);
  1064. return 0;
  1065. }
  1066. return __device_suspend_late(dev, pm_transition, false);
  1067. }
  1068. /**
  1069. * dpm_suspend_late - Execute "late suspend" callbacks for all devices.
  1070. * @state: PM transition of the system being carried out.
  1071. */
  1072. int dpm_suspend_late(pm_message_t state)
  1073. {
  1074. ktime_t starttime = ktime_get();
  1075. int error = 0;
  1076. trace_suspend_resume(TPS("dpm_suspend_late"), state.event, true);
  1077. mutex_lock(&dpm_list_mtx);
  1078. pm_transition = state;
  1079. async_error = 0;
  1080. while (!list_empty(&dpm_suspended_list)) {
  1081. struct device *dev = to_device(dpm_suspended_list.prev);
  1082. get_device(dev);
  1083. mutex_unlock(&dpm_list_mtx);
  1084. error = device_suspend_late(dev);
  1085. mutex_lock(&dpm_list_mtx);
  1086. if (!list_empty(&dev->power.entry))
  1087. list_move(&dev->power.entry, &dpm_late_early_list);
  1088. if (error) {
  1089. pm_dev_err(dev, state, " late", error);
  1090. dpm_save_failed_dev(dev_name(dev));
  1091. put_device(dev);
  1092. break;
  1093. }
  1094. put_device(dev);
  1095. if (async_error)
  1096. break;
  1097. }
  1098. mutex_unlock(&dpm_list_mtx);
  1099. async_synchronize_full();
  1100. if (!error)
  1101. error = async_error;
  1102. if (error) {
  1103. suspend_stats.failed_suspend_late++;
  1104. dpm_save_failed_step(SUSPEND_SUSPEND_LATE);
  1105. dpm_resume_early(resume_event(state));
  1106. } else {
  1107. dpm_show_time(starttime, state, "late");
  1108. }
  1109. trace_suspend_resume(TPS("dpm_suspend_late"), state.event, false);
  1110. return error;
  1111. }
  1112. /**
  1113. * dpm_suspend_end - Execute "late" and "noirq" device suspend callbacks.
  1114. * @state: PM transition of the system being carried out.
  1115. */
  1116. int dpm_suspend_end(pm_message_t state)
  1117. {
  1118. int error = dpm_suspend_late(state);
  1119. if (error)
  1120. return error;
  1121. error = dpm_suspend_noirq(state);
  1122. if (error) {
  1123. dpm_resume_early(resume_event(state));
  1124. return error;
  1125. }
  1126. return 0;
  1127. }
  1128. EXPORT_SYMBOL_GPL(dpm_suspend_end);
  1129. /**
  1130. * legacy_suspend - Execute a legacy (bus or class) suspend callback for device.
  1131. * @dev: Device to suspend.
  1132. * @state: PM transition of the system being carried out.
  1133. * @cb: Suspend callback to execute.
  1134. * @info: string description of caller.
  1135. */
  1136. static int legacy_suspend(struct device *dev, pm_message_t state,
  1137. int (*cb)(struct device *dev, pm_message_t state),
  1138. char *info)
  1139. {
  1140. int error;
  1141. ktime_t calltime;
  1142. calltime = initcall_debug_start(dev);
  1143. trace_device_pm_callback_start(dev, info, state.event);
  1144. error = cb(dev, state);
  1145. trace_device_pm_callback_end(dev, error);
  1146. suspend_report_result(cb, error);
  1147. initcall_debug_report(dev, calltime, error, state, info);
  1148. return error;
  1149. }
  1150. /**
  1151. * device_suspend - Execute "suspend" callbacks for given device.
  1152. * @dev: Device to handle.
  1153. * @state: PM transition of the system being carried out.
  1154. * @async: If true, the device is being suspended asynchronously.
  1155. */
  1156. static int __device_suspend(struct device *dev, pm_message_t state, bool async)
  1157. {
  1158. pm_callback_t callback = NULL;
  1159. char *info = NULL;
  1160. int error = 0;
  1161. DECLARE_DPM_WATCHDOG_ON_STACK(wd);
  1162. TRACE_DEVICE(dev);
  1163. TRACE_SUSPEND(0);
  1164. dpm_wait_for_children(dev, async);
  1165. if (async_error)
  1166. goto Complete;
  1167. /*
  1168. * If a device configured to wake up the system from sleep states
  1169. * has been suspended at run time and there's a resume request pending
  1170. * for it, this is equivalent to the device signaling wakeup, so the
  1171. * system suspend operation should be aborted.
  1172. */
  1173. if (pm_runtime_barrier(dev) && device_may_wakeup(dev))
  1174. pm_wakeup_event(dev, 0);
  1175. if (pm_wakeup_pending()) {
  1176. async_error = -EBUSY;
  1177. goto Complete;
  1178. }
  1179. if (dev->power.syscore)
  1180. goto Complete;
  1181. if (dev->power.direct_complete) {
  1182. if (pm_runtime_status_suspended(dev)) {
  1183. pm_runtime_disable(dev);
  1184. if (pm_runtime_status_suspended(dev))
  1185. goto Complete;
  1186. pm_runtime_enable(dev);
  1187. }
  1188. dev->power.direct_complete = false;
  1189. }
  1190. dpm_watchdog_set(&wd, dev);
  1191. device_lock(dev);
  1192. if (dev->pm_domain) {
  1193. info = "power domain ";
  1194. callback = pm_op(&dev->pm_domain->ops, state);
  1195. goto Run;
  1196. }
  1197. if (dev->type && dev->type->pm) {
  1198. info = "type ";
  1199. callback = pm_op(dev->type->pm, state);
  1200. goto Run;
  1201. }
  1202. if (dev->class) {
  1203. if (dev->class->pm) {
  1204. info = "class ";
  1205. callback = pm_op(dev->class->pm, state);
  1206. goto Run;
  1207. } else if (dev->class->suspend) {
  1208. pm_dev_dbg(dev, state, "legacy class ");
  1209. error = legacy_suspend(dev, state, dev->class->suspend,
  1210. "legacy class ");
  1211. goto End;
  1212. }
  1213. }
  1214. if (dev->bus) {
  1215. if (dev->bus->pm) {
  1216. info = "bus ";
  1217. callback = pm_op(dev->bus->pm, state);
  1218. } else if (dev->bus->suspend) {
  1219. pm_dev_dbg(dev, state, "legacy bus ");
  1220. error = legacy_suspend(dev, state, dev->bus->suspend,
  1221. "legacy bus ");
  1222. goto End;
  1223. }
  1224. }
  1225. Run:
  1226. if (!callback && dev->driver && dev->driver->pm) {
  1227. info = "driver ";
  1228. callback = pm_op(dev->driver->pm, state);
  1229. }
  1230. error = dpm_run_callback(callback, dev, state, info);
  1231. End:
  1232. if (!error) {
  1233. struct device *parent = dev->parent;
  1234. dev->power.is_suspended = true;
  1235. if (parent) {
  1236. spin_lock_irq(&parent->power.lock);
  1237. dev->parent->power.direct_complete = false;
  1238. if (dev->power.wakeup_path
  1239. && !dev->parent->power.ignore_children)
  1240. dev->parent->power.wakeup_path = true;
  1241. spin_unlock_irq(&parent->power.lock);
  1242. }
  1243. }
  1244. device_unlock(dev);
  1245. dpm_watchdog_clear(&wd);
  1246. Complete:
  1247. complete_all(&dev->power.completion);
  1248. if (error)
  1249. async_error = error;
  1250. TRACE_SUSPEND(error);
  1251. return error;
  1252. }
  1253. static void async_suspend(void *data, async_cookie_t cookie)
  1254. {
  1255. struct device *dev = (struct device *)data;
  1256. int error;
  1257. error = __device_suspend(dev, pm_transition, true);
  1258. if (error) {
  1259. dpm_save_failed_dev(dev_name(dev));
  1260. pm_dev_err(dev, pm_transition, " async", error);
  1261. }
  1262. put_device(dev);
  1263. }
  1264. static int device_suspend(struct device *dev)
  1265. {
  1266. reinit_completion(&dev->power.completion);
  1267. if (is_async(dev)) {
  1268. get_device(dev);
  1269. async_schedule(async_suspend, dev);
  1270. return 0;
  1271. }
  1272. return __device_suspend(dev, pm_transition, false);
  1273. }
  1274. /**
  1275. * dpm_suspend - Execute "suspend" callbacks for all non-sysdev devices.
  1276. * @state: PM transition of the system being carried out.
  1277. */
  1278. int dpm_suspend(pm_message_t state)
  1279. {
  1280. ktime_t starttime = ktime_get();
  1281. int error = 0;
  1282. trace_suspend_resume(TPS("dpm_suspend"), state.event, true);
  1283. might_sleep();
  1284. cpufreq_suspend();
  1285. mutex_lock(&dpm_list_mtx);
  1286. pm_transition = state;
  1287. async_error = 0;
  1288. while (!list_empty(&dpm_prepared_list)) {
  1289. struct device *dev = to_device(dpm_prepared_list.prev);
  1290. get_device(dev);
  1291. mutex_unlock(&dpm_list_mtx);
  1292. error = device_suspend(dev);
  1293. mutex_lock(&dpm_list_mtx);
  1294. if (error) {
  1295. pm_dev_err(dev, state, "", error);
  1296. dpm_save_failed_dev(dev_name(dev));
  1297. put_device(dev);
  1298. break;
  1299. }
  1300. if (!list_empty(&dev->power.entry))
  1301. list_move(&dev->power.entry, &dpm_suspended_list);
  1302. put_device(dev);
  1303. if (async_error)
  1304. break;
  1305. }
  1306. mutex_unlock(&dpm_list_mtx);
  1307. async_synchronize_full();
  1308. if (!error)
  1309. error = async_error;
  1310. if (error) {
  1311. suspend_stats.failed_suspend++;
  1312. dpm_save_failed_step(SUSPEND_SUSPEND);
  1313. } else
  1314. dpm_show_time(starttime, state, NULL);
  1315. trace_suspend_resume(TPS("dpm_suspend"), state.event, false);
  1316. return error;
  1317. }
  1318. /**
  1319. * device_prepare - Prepare a device for system power transition.
  1320. * @dev: Device to handle.
  1321. * @state: PM transition of the system being carried out.
  1322. *
  1323. * Execute the ->prepare() callback(s) for given device. No new children of the
  1324. * device may be registered after this function has returned.
  1325. */
  1326. static int device_prepare(struct device *dev, pm_message_t state)
  1327. {
  1328. int (*callback)(struct device *) = NULL;
  1329. int ret = 0;
  1330. if (dev->power.syscore)
  1331. return 0;
  1332. /*
  1333. * If a device's parent goes into runtime suspend at the wrong time,
  1334. * it won't be possible to resume the device. To prevent this we
  1335. * block runtime suspend here, during the prepare phase, and allow
  1336. * it again during the complete phase.
  1337. */
  1338. pm_runtime_get_noresume(dev);
  1339. device_lock(dev);
  1340. dev->power.wakeup_path = device_may_wakeup(dev);
  1341. if (dev->power.no_pm_callbacks) {
  1342. ret = 1; /* Let device go direct_complete */
  1343. goto unlock;
  1344. }
  1345. if (dev->pm_domain)
  1346. callback = dev->pm_domain->ops.prepare;
  1347. else if (dev->type && dev->type->pm)
  1348. callback = dev->type->pm->prepare;
  1349. else if (dev->class && dev->class->pm)
  1350. callback = dev->class->pm->prepare;
  1351. else if (dev->bus && dev->bus->pm)
  1352. callback = dev->bus->pm->prepare;
  1353. if (!callback && dev->driver && dev->driver->pm)
  1354. callback = dev->driver->pm->prepare;
  1355. if (callback)
  1356. ret = callback(dev);
  1357. unlock:
  1358. device_unlock(dev);
  1359. if (ret < 0) {
  1360. suspend_report_result(callback, ret);
  1361. pm_runtime_put(dev);
  1362. return ret;
  1363. }
  1364. /*
  1365. * A positive return value from ->prepare() means "this device appears
  1366. * to be runtime-suspended and its state is fine, so if it really is
  1367. * runtime-suspended, you can leave it in that state provided that you
  1368. * will do the same thing with all of its descendants". This only
  1369. * applies to suspend transitions, however.
  1370. */
  1371. spin_lock_irq(&dev->power.lock);
  1372. dev->power.direct_complete = ret > 0 && state.event == PM_EVENT_SUSPEND;
  1373. spin_unlock_irq(&dev->power.lock);
  1374. return 0;
  1375. }
  1376. /**
  1377. * dpm_prepare - Prepare all non-sysdev devices for a system PM transition.
  1378. * @state: PM transition of the system being carried out.
  1379. *
  1380. * Execute the ->prepare() callback(s) for all devices.
  1381. */
  1382. int dpm_prepare(pm_message_t state)
  1383. {
  1384. int error = 0;
  1385. trace_suspend_resume(TPS("dpm_prepare"), state.event, true);
  1386. might_sleep();
  1387. /*
  1388. * Give a chance for the known devices to complete their probes, before
  1389. * disable probing of devices. This sync point is important at least
  1390. * at boot time + hibernation restore.
  1391. */
  1392. wait_for_device_probe();
  1393. /*
  1394. * It is unsafe if probing of devices will happen during suspend or
  1395. * hibernation and system behavior will be unpredictable in this case.
  1396. * So, let's prohibit device's probing here and defer their probes
  1397. * instead. The normal behavior will be restored in dpm_complete().
  1398. */
  1399. device_block_probing();
  1400. mutex_lock(&dpm_list_mtx);
  1401. while (!list_empty(&dpm_list)) {
  1402. struct device *dev = to_device(dpm_list.next);
  1403. get_device(dev);
  1404. mutex_unlock(&dpm_list_mtx);
  1405. trace_device_pm_callback_start(dev, "", state.event);
  1406. error = device_prepare(dev, state);
  1407. trace_device_pm_callback_end(dev, error);
  1408. mutex_lock(&dpm_list_mtx);
  1409. if (error) {
  1410. if (error == -EAGAIN) {
  1411. put_device(dev);
  1412. error = 0;
  1413. continue;
  1414. }
  1415. printk(KERN_INFO "PM: Device %s not prepared "
  1416. "for power transition: code %d\n",
  1417. dev_name(dev), error);
  1418. put_device(dev);
  1419. break;
  1420. }
  1421. dev->power.is_prepared = true;
  1422. if (!list_empty(&dev->power.entry))
  1423. list_move_tail(&dev->power.entry, &dpm_prepared_list);
  1424. put_device(dev);
  1425. }
  1426. mutex_unlock(&dpm_list_mtx);
  1427. trace_suspend_resume(TPS("dpm_prepare"), state.event, false);
  1428. return error;
  1429. }
  1430. /**
  1431. * dpm_suspend_start - Prepare devices for PM transition and suspend them.
  1432. * @state: PM transition of the system being carried out.
  1433. *
  1434. * Prepare all non-sysdev devices for system PM transition and execute "suspend"
  1435. * callbacks for them.
  1436. */
  1437. int dpm_suspend_start(pm_message_t state)
  1438. {
  1439. int error;
  1440. error = dpm_prepare(state);
  1441. if (error) {
  1442. suspend_stats.failed_prepare++;
  1443. dpm_save_failed_step(SUSPEND_PREPARE);
  1444. } else
  1445. error = dpm_suspend(state);
  1446. return error;
  1447. }
  1448. EXPORT_SYMBOL_GPL(dpm_suspend_start);
  1449. void __suspend_report_result(const char *function, void *fn, int ret)
  1450. {
  1451. if (ret)
  1452. printk(KERN_ERR "%s(): %pF returns %d\n", function, fn, ret);
  1453. }
  1454. EXPORT_SYMBOL_GPL(__suspend_report_result);
  1455. /**
  1456. * device_pm_wait_for_dev - Wait for suspend/resume of a device to complete.
  1457. * @dev: Device to wait for.
  1458. * @subordinate: Device that needs to wait for @dev.
  1459. */
  1460. int device_pm_wait_for_dev(struct device *subordinate, struct device *dev)
  1461. {
  1462. dpm_wait(dev, subordinate->power.async_suspend);
  1463. return async_error;
  1464. }
  1465. EXPORT_SYMBOL_GPL(device_pm_wait_for_dev);
  1466. /**
  1467. * dpm_for_each_dev - device iterator.
  1468. * @data: data for the callback.
  1469. * @fn: function to be called for each device.
  1470. *
  1471. * Iterate over devices in dpm_list, and call @fn for each device,
  1472. * passing it @data.
  1473. */
  1474. void dpm_for_each_dev(void *data, void (*fn)(struct device *, void *))
  1475. {
  1476. struct device *dev;
  1477. if (!fn)
  1478. return;
  1479. device_pm_lock();
  1480. list_for_each_entry(dev, &dpm_list, power.entry)
  1481. fn(dev, data);
  1482. device_pm_unlock();
  1483. }
  1484. EXPORT_SYMBOL_GPL(dpm_for_each_dev);
  1485. static bool pm_ops_is_empty(const struct dev_pm_ops *ops)
  1486. {
  1487. if (!ops)
  1488. return true;
  1489. return !ops->prepare &&
  1490. !ops->suspend &&
  1491. !ops->suspend_late &&
  1492. !ops->suspend_noirq &&
  1493. !ops->resume_noirq &&
  1494. !ops->resume_early &&
  1495. !ops->resume &&
  1496. !ops->complete;
  1497. }
  1498. void device_pm_check_callbacks(struct device *dev)
  1499. {
  1500. spin_lock_irq(&dev->power.lock);
  1501. dev->power.no_pm_callbacks =
  1502. (!dev->bus || (pm_ops_is_empty(dev->bus->pm) &&
  1503. !dev->bus->suspend && !dev->bus->resume)) &&
  1504. (!dev->class || (pm_ops_is_empty(dev->class->pm) &&
  1505. !dev->class->suspend && !dev->class->resume)) &&
  1506. (!dev->type || pm_ops_is_empty(dev->type->pm)) &&
  1507. (!dev->pm_domain || pm_ops_is_empty(&dev->pm_domain->ops)) &&
  1508. (!dev->driver || (pm_ops_is_empty(dev->driver->pm) &&
  1509. !dev->driver->suspend && !dev->driver->resume));
  1510. spin_unlock_irq(&dev->power.lock);
  1511. }