Upgrade to 4.4.50-rt62
[kvmfornfv.git] / kernel / drivers / base / power / main.c
1 /*
2  * drivers/base/power/main.c - Where the driver meets power management.
3  *
4  * Copyright (c) 2003 Patrick Mochel
5  * Copyright (c) 2003 Open Source Development Lab
6  *
7  * This file is released under the GPLv2
8  *
9  *
10  * The driver model core calls device_pm_add() when a device is registered.
11  * This will initialize the embedded device_pm_info object in the device
12  * and add it to the list of power-controlled devices. sysfs entries for
13  * controlling device power management will also be added.
14  *
15  * A separate list is used for keeping track of power info, because the power
16  * domain dependencies may differ from the ancestral dependencies that the
17  * subsystem list maintains.
18  */
19
20 #include <linux/device.h>
21 #include <linux/kallsyms.h>
22 #include <linux/export.h>
23 #include <linux/mutex.h>
24 #include <linux/pm.h>
25 #include <linux/pm_runtime.h>
26 #include <linux/pm-trace.h>
27 #include <linux/pm_wakeirq.h>
28 #include <linux/interrupt.h>
29 #include <linux/sched.h>
30 #include <linux/async.h>
31 #include <linux/suspend.h>
32 #include <trace/events/power.h>
33 #include <linux/cpufreq.h>
34 #include <linux/cpuidle.h>
35 #include <linux/timer.h>
36
37 #include "../base.h"
38 #include "power.h"
39
40 typedef int (*pm_callback_t)(struct device *);
41
42 /*
43  * The entries in the dpm_list list are in a depth first order, simply
44  * because children are guaranteed to be discovered after parents, and
45  * are inserted at the back of the list on discovery.
46  *
47  * Since device_pm_add() may be called with a device lock held,
48  * we must never try to acquire a device lock while holding
49  * dpm_list_mutex.
50  */
51
52 LIST_HEAD(dpm_list);
53 static LIST_HEAD(dpm_prepared_list);
54 static LIST_HEAD(dpm_suspended_list);
55 static LIST_HEAD(dpm_late_early_list);
56 static LIST_HEAD(dpm_noirq_list);
57
58 struct suspend_stats suspend_stats;
59 static DEFINE_MUTEX(dpm_list_mtx);
60 static pm_message_t pm_transition;
61
62 static int async_error;
63
64 static char *pm_verb(int event)
65 {
66         switch (event) {
67         case PM_EVENT_SUSPEND:
68                 return "suspend";
69         case PM_EVENT_RESUME:
70                 return "resume";
71         case PM_EVENT_FREEZE:
72                 return "freeze";
73         case PM_EVENT_QUIESCE:
74                 return "quiesce";
75         case PM_EVENT_HIBERNATE:
76                 return "hibernate";
77         case PM_EVENT_THAW:
78                 return "thaw";
79         case PM_EVENT_RESTORE:
80                 return "restore";
81         case PM_EVENT_RECOVER:
82                 return "recover";
83         default:
84                 return "(unknown PM event)";
85         }
86 }
87
88 /**
89  * device_pm_sleep_init - Initialize system suspend-related device fields.
90  * @dev: Device object being initialized.
91  */
92 void device_pm_sleep_init(struct device *dev)
93 {
94         dev->power.is_prepared = false;
95         dev->power.is_suspended = false;
96         dev->power.is_noirq_suspended = false;
97         dev->power.is_late_suspended = false;
98         init_completion(&dev->power.completion);
99         complete_all(&dev->power.completion);
100         dev->power.wakeup = NULL;
101         INIT_LIST_HEAD(&dev->power.entry);
102 }
103
104 /**
105  * device_pm_lock - Lock the list of active devices used by the PM core.
106  */
107 void device_pm_lock(void)
108 {
109         mutex_lock(&dpm_list_mtx);
110 }
111
112 /**
113  * device_pm_unlock - Unlock the list of active devices used by the PM core.
114  */
115 void device_pm_unlock(void)
116 {
117         mutex_unlock(&dpm_list_mtx);
118 }
119
120 /**
121  * device_pm_add - Add a device to the PM core's list of active devices.
122  * @dev: Device to add to the list.
123  */
124 void device_pm_add(struct device *dev)
125 {
126         pr_debug("PM: Adding info for %s:%s\n",
127                  dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
128         mutex_lock(&dpm_list_mtx);
129         if (dev->parent && dev->parent->power.is_prepared)
130                 dev_warn(dev, "parent %s should not be sleeping\n",
131                         dev_name(dev->parent));
132         list_add_tail(&dev->power.entry, &dpm_list);
133         mutex_unlock(&dpm_list_mtx);
134 }
135
136 /**
137  * device_pm_remove - Remove a device from the PM core's list of active devices.
138  * @dev: Device to be removed from the list.
139  */
140 void device_pm_remove(struct device *dev)
141 {
142         pr_debug("PM: Removing info for %s:%s\n",
143                  dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
144         complete_all(&dev->power.completion);
145         mutex_lock(&dpm_list_mtx);
146         list_del_init(&dev->power.entry);
147         mutex_unlock(&dpm_list_mtx);
148         device_wakeup_disable(dev);
149         pm_runtime_remove(dev);
150 }
151
152 /**
153  * device_pm_move_before - Move device in the PM core's list of active devices.
154  * @deva: Device to move in dpm_list.
155  * @devb: Device @deva should come before.
156  */
157 void device_pm_move_before(struct device *deva, struct device *devb)
158 {
159         pr_debug("PM: Moving %s:%s before %s:%s\n",
160                  deva->bus ? deva->bus->name : "No Bus", dev_name(deva),
161                  devb->bus ? devb->bus->name : "No Bus", dev_name(devb));
162         /* Delete deva from dpm_list and reinsert before devb. */
163         list_move_tail(&deva->power.entry, &devb->power.entry);
164 }
165
166 /**
167  * device_pm_move_after - Move device in the PM core's list of active devices.
168  * @deva: Device to move in dpm_list.
169  * @devb: Device @deva should come after.
170  */
171 void device_pm_move_after(struct device *deva, struct device *devb)
172 {
173         pr_debug("PM: Moving %s:%s after %s:%s\n",
174                  deva->bus ? deva->bus->name : "No Bus", dev_name(deva),
175                  devb->bus ? devb->bus->name : "No Bus", dev_name(devb));
176         /* Delete deva from dpm_list and reinsert after devb. */
177         list_move(&deva->power.entry, &devb->power.entry);
178 }
179
180 /**
181  * device_pm_move_last - Move device to end of the PM core's list of devices.
182  * @dev: Device to move in dpm_list.
183  */
184 void device_pm_move_last(struct device *dev)
185 {
186         pr_debug("PM: Moving %s:%s to end of list\n",
187                  dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
188         list_move_tail(&dev->power.entry, &dpm_list);
189 }
190
191 static ktime_t initcall_debug_start(struct device *dev)
192 {
193         ktime_t calltime = ktime_set(0, 0);
194
195         if (pm_print_times_enabled) {
196                 pr_info("calling  %s+ @ %i, parent: %s\n",
197                         dev_name(dev), task_pid_nr(current),
198                         dev->parent ? dev_name(dev->parent) : "none");
199                 calltime = ktime_get();
200         }
201
202         return calltime;
203 }
204
205 static void initcall_debug_report(struct device *dev, ktime_t calltime,
206                                   int error, pm_message_t state, char *info)
207 {
208         ktime_t rettime;
209         s64 nsecs;
210
211         rettime = ktime_get();
212         nsecs = (s64) ktime_to_ns(ktime_sub(rettime, calltime));
213
214         if (pm_print_times_enabled) {
215                 pr_info("call %s+ returned %d after %Ld usecs\n", dev_name(dev),
216                         error, (unsigned long long)nsecs >> 10);
217         }
218 }
219
220 /**
221  * dpm_wait - Wait for a PM operation to complete.
222  * @dev: Device to wait for.
223  * @async: If unset, wait only if the device's power.async_suspend flag is set.
224  */
225 static void dpm_wait(struct device *dev, bool async)
226 {
227         if (!dev)
228                 return;
229
230         if (async || (pm_async_enabled && dev->power.async_suspend))
231                 wait_for_completion(&dev->power.completion);
232 }
233
234 static int dpm_wait_fn(struct device *dev, void *async_ptr)
235 {
236         dpm_wait(dev, *((bool *)async_ptr));
237         return 0;
238 }
239
240 static void dpm_wait_for_children(struct device *dev, bool async)
241 {
242        device_for_each_child(dev, &async, dpm_wait_fn);
243 }
244
245 /**
246  * pm_op - Return the PM operation appropriate for given PM event.
247  * @ops: PM operations to choose from.
248  * @state: PM transition of the system being carried out.
249  */
250 static pm_callback_t pm_op(const struct dev_pm_ops *ops, pm_message_t state)
251 {
252         switch (state.event) {
253 #ifdef CONFIG_SUSPEND
254         case PM_EVENT_SUSPEND:
255                 return ops->suspend;
256         case PM_EVENT_RESUME:
257                 return ops->resume;
258 #endif /* CONFIG_SUSPEND */
259 #ifdef CONFIG_HIBERNATE_CALLBACKS
260         case PM_EVENT_FREEZE:
261         case PM_EVENT_QUIESCE:
262                 return ops->freeze;
263         case PM_EVENT_HIBERNATE:
264                 return ops->poweroff;
265         case PM_EVENT_THAW:
266         case PM_EVENT_RECOVER:
267                 return ops->thaw;
268                 break;
269         case PM_EVENT_RESTORE:
270                 return ops->restore;
271 #endif /* CONFIG_HIBERNATE_CALLBACKS */
272         }
273
274         return NULL;
275 }
276
277 /**
278  * pm_late_early_op - Return the PM operation appropriate for given PM event.
279  * @ops: PM operations to choose from.
280  * @state: PM transition of the system being carried out.
281  *
282  * Runtime PM is disabled for @dev while this function is being executed.
283  */
284 static pm_callback_t pm_late_early_op(const struct dev_pm_ops *ops,
285                                       pm_message_t state)
286 {
287         switch (state.event) {
288 #ifdef CONFIG_SUSPEND
289         case PM_EVENT_SUSPEND:
290                 return ops->suspend_late;
291         case PM_EVENT_RESUME:
292                 return ops->resume_early;
293 #endif /* CONFIG_SUSPEND */
294 #ifdef CONFIG_HIBERNATE_CALLBACKS
295         case PM_EVENT_FREEZE:
296         case PM_EVENT_QUIESCE:
297                 return ops->freeze_late;
298         case PM_EVENT_HIBERNATE:
299                 return ops->poweroff_late;
300         case PM_EVENT_THAW:
301         case PM_EVENT_RECOVER:
302                 return ops->thaw_early;
303         case PM_EVENT_RESTORE:
304                 return ops->restore_early;
305 #endif /* CONFIG_HIBERNATE_CALLBACKS */
306         }
307
308         return NULL;
309 }
310
311 /**
312  * pm_noirq_op - Return the PM operation appropriate for given PM event.
313  * @ops: PM operations to choose from.
314  * @state: PM transition of the system being carried out.
315  *
316  * The driver of @dev will not receive interrupts while this function is being
317  * executed.
318  */
319 static pm_callback_t pm_noirq_op(const struct dev_pm_ops *ops, pm_message_t state)
320 {
321         switch (state.event) {
322 #ifdef CONFIG_SUSPEND
323         case PM_EVENT_SUSPEND:
324                 return ops->suspend_noirq;
325         case PM_EVENT_RESUME:
326                 return ops->resume_noirq;
327 #endif /* CONFIG_SUSPEND */
328 #ifdef CONFIG_HIBERNATE_CALLBACKS
329         case PM_EVENT_FREEZE:
330         case PM_EVENT_QUIESCE:
331                 return ops->freeze_noirq;
332         case PM_EVENT_HIBERNATE:
333                 return ops->poweroff_noirq;
334         case PM_EVENT_THAW:
335         case PM_EVENT_RECOVER:
336                 return ops->thaw_noirq;
337         case PM_EVENT_RESTORE:
338                 return ops->restore_noirq;
339 #endif /* CONFIG_HIBERNATE_CALLBACKS */
340         }
341
342         return NULL;
343 }
344
345 static void pm_dev_dbg(struct device *dev, pm_message_t state, char *info)
346 {
347         dev_dbg(dev, "%s%s%s\n", info, pm_verb(state.event),
348                 ((state.event & PM_EVENT_SLEEP) && device_may_wakeup(dev)) ?
349                 ", may wakeup" : "");
350 }
351
352 static void pm_dev_err(struct device *dev, pm_message_t state, char *info,
353                         int error)
354 {
355         printk(KERN_ERR "PM: Device %s failed to %s%s: error %d\n",
356                 dev_name(dev), pm_verb(state.event), info, error);
357 }
358
359 static void dpm_show_time(ktime_t starttime, pm_message_t state, char *info)
360 {
361         ktime_t calltime;
362         u64 usecs64;
363         int usecs;
364
365         calltime = ktime_get();
366         usecs64 = ktime_to_ns(ktime_sub(calltime, starttime));
367         do_div(usecs64, NSEC_PER_USEC);
368         usecs = usecs64;
369         if (usecs == 0)
370                 usecs = 1;
371         pr_info("PM: %s%s%s of devices complete after %ld.%03ld msecs\n",
372                 info ?: "", info ? " " : "", pm_verb(state.event),
373                 usecs / USEC_PER_MSEC, usecs % USEC_PER_MSEC);
374 }
375
376 static int dpm_run_callback(pm_callback_t cb, struct device *dev,
377                             pm_message_t state, char *info)
378 {
379         ktime_t calltime;
380         int error;
381
382         if (!cb)
383                 return 0;
384
385         calltime = initcall_debug_start(dev);
386
387         pm_dev_dbg(dev, state, info);
388         trace_device_pm_callback_start(dev, info, state.event);
389         error = cb(dev);
390         trace_device_pm_callback_end(dev, error);
391         suspend_report_result(cb, error);
392
393         initcall_debug_report(dev, calltime, error, state, info);
394
395         return error;
396 }
397
398 #ifdef CONFIG_DPM_WATCHDOG
399 struct dpm_watchdog {
400         struct device           *dev;
401         struct task_struct      *tsk;
402         struct timer_list       timer;
403 };
404
405 #define DECLARE_DPM_WATCHDOG_ON_STACK(wd) \
406         struct dpm_watchdog wd
407
408 /**
409  * dpm_watchdog_handler - Driver suspend / resume watchdog handler.
410  * @data: Watchdog object address.
411  *
412  * Called when a driver has timed out suspending or resuming.
413  * There's not much we can do here to recover so panic() to
414  * capture a crash-dump in pstore.
415  */
416 static void dpm_watchdog_handler(unsigned long data)
417 {
418         struct dpm_watchdog *wd = (void *)data;
419
420         dev_emerg(wd->dev, "**** DPM device timeout ****\n");
421         show_stack(wd->tsk, NULL);
422         panic("%s %s: unrecoverable failure\n",
423                 dev_driver_string(wd->dev), dev_name(wd->dev));
424 }
425
426 /**
427  * dpm_watchdog_set - Enable pm watchdog for given device.
428  * @wd: Watchdog. Must be allocated on the stack.
429  * @dev: Device to handle.
430  */
431 static void dpm_watchdog_set(struct dpm_watchdog *wd, struct device *dev)
432 {
433         struct timer_list *timer = &wd->timer;
434
435         wd->dev = dev;
436         wd->tsk = current;
437
438         init_timer_on_stack(timer);
439         /* use same timeout value for both suspend and resume */
440         timer->expires = jiffies + HZ * CONFIG_DPM_WATCHDOG_TIMEOUT;
441         timer->function = dpm_watchdog_handler;
442         timer->data = (unsigned long)wd;
443         add_timer(timer);
444 }
445
446 /**
447  * dpm_watchdog_clear - Disable suspend/resume watchdog.
448  * @wd: Watchdog to disable.
449  */
450 static void dpm_watchdog_clear(struct dpm_watchdog *wd)
451 {
452         struct timer_list *timer = &wd->timer;
453
454         del_timer_sync(timer);
455         destroy_timer_on_stack(timer);
456 }
457 #else
458 #define DECLARE_DPM_WATCHDOG_ON_STACK(wd)
459 #define dpm_watchdog_set(x, y)
460 #define dpm_watchdog_clear(x)
461 #endif
462
463 /*------------------------- Resume routines -------------------------*/
464
465 /**
466  * device_resume_noirq - Execute an "early resume" callback for given device.
467  * @dev: Device to handle.
468  * @state: PM transition of the system being carried out.
469  * @async: If true, the device is being resumed asynchronously.
470  *
471  * The driver of @dev will not receive interrupts while this function is being
472  * executed.
473  */
474 static int device_resume_noirq(struct device *dev, pm_message_t state, bool async)
475 {
476         pm_callback_t callback = NULL;
477         char *info = NULL;
478         int error = 0;
479
480         TRACE_DEVICE(dev);
481         TRACE_RESUME(0);
482
483         if (dev->power.syscore || dev->power.direct_complete)
484                 goto Out;
485
486         if (!dev->power.is_noirq_suspended)
487                 goto Out;
488
489         dpm_wait(dev->parent, async);
490
491         if (dev->pm_domain) {
492                 info = "noirq power domain ";
493                 callback = pm_noirq_op(&dev->pm_domain->ops, state);
494         } else if (dev->type && dev->type->pm) {
495                 info = "noirq type ";
496                 callback = pm_noirq_op(dev->type->pm, state);
497         } else if (dev->class && dev->class->pm) {
498                 info = "noirq class ";
499                 callback = pm_noirq_op(dev->class->pm, state);
500         } else if (dev->bus && dev->bus->pm) {
501                 info = "noirq bus ";
502                 callback = pm_noirq_op(dev->bus->pm, state);
503         }
504
505         if (!callback && dev->driver && dev->driver->pm) {
506                 info = "noirq driver ";
507                 callback = pm_noirq_op(dev->driver->pm, state);
508         }
509
510         error = dpm_run_callback(callback, dev, state, info);
511         dev->power.is_noirq_suspended = false;
512
513  Out:
514         complete_all(&dev->power.completion);
515         TRACE_RESUME(error);
516         return error;
517 }
518
519 static bool is_async(struct device *dev)
520 {
521         return dev->power.async_suspend && pm_async_enabled
522                 && !pm_trace_is_enabled();
523 }
524
525 static void async_resume_noirq(void *data, async_cookie_t cookie)
526 {
527         struct device *dev = (struct device *)data;
528         int error;
529
530         error = device_resume_noirq(dev, pm_transition, true);
531         if (error)
532                 pm_dev_err(dev, pm_transition, " async", error);
533
534         put_device(dev);
535 }
536
537 /**
538  * dpm_resume_noirq - Execute "noirq resume" callbacks for all devices.
539  * @state: PM transition of the system being carried out.
540  *
541  * Call the "noirq" resume handlers for all devices in dpm_noirq_list and
542  * enable device drivers to receive interrupts.
543  */
544 void dpm_resume_noirq(pm_message_t state)
545 {
546         struct device *dev;
547         ktime_t starttime = ktime_get();
548
549         trace_suspend_resume(TPS("dpm_resume_noirq"), state.event, true);
550         mutex_lock(&dpm_list_mtx);
551         pm_transition = state;
552
553         /*
554          * Advanced the async threads upfront,
555          * in case the starting of async threads is
556          * delayed by non-async resuming devices.
557          */
558         list_for_each_entry(dev, &dpm_noirq_list, power.entry) {
559                 reinit_completion(&dev->power.completion);
560                 if (is_async(dev)) {
561                         get_device(dev);
562                         async_schedule(async_resume_noirq, dev);
563                 }
564         }
565
566         while (!list_empty(&dpm_noirq_list)) {
567                 dev = to_device(dpm_noirq_list.next);
568                 get_device(dev);
569                 list_move_tail(&dev->power.entry, &dpm_late_early_list);
570                 mutex_unlock(&dpm_list_mtx);
571
572                 if (!is_async(dev)) {
573                         int error;
574
575                         error = device_resume_noirq(dev, state, false);
576                         if (error) {
577                                 suspend_stats.failed_resume_noirq++;
578                                 dpm_save_failed_step(SUSPEND_RESUME_NOIRQ);
579                                 dpm_save_failed_dev(dev_name(dev));
580                                 pm_dev_err(dev, state, " noirq", error);
581                         }
582                 }
583
584                 mutex_lock(&dpm_list_mtx);
585                 put_device(dev);
586         }
587         mutex_unlock(&dpm_list_mtx);
588         async_synchronize_full();
589         dpm_show_time(starttime, state, "noirq");
590         resume_device_irqs();
591         device_wakeup_disarm_wake_irqs();
592         cpuidle_resume();
593         trace_suspend_resume(TPS("dpm_resume_noirq"), state.event, false);
594 }
595
596 /**
597  * device_resume_early - Execute an "early resume" callback for given device.
598  * @dev: Device to handle.
599  * @state: PM transition of the system being carried out.
600  * @async: If true, the device is being resumed asynchronously.
601  *
602  * Runtime PM is disabled for @dev while this function is being executed.
603  */
604 static int device_resume_early(struct device *dev, pm_message_t state, bool async)
605 {
606         pm_callback_t callback = NULL;
607         char *info = NULL;
608         int error = 0;
609
610         TRACE_DEVICE(dev);
611         TRACE_RESUME(0);
612
613         if (dev->power.syscore || dev->power.direct_complete)
614                 goto Out;
615
616         if (!dev->power.is_late_suspended)
617                 goto Out;
618
619         dpm_wait(dev->parent, async);
620
621         if (dev->pm_domain) {
622                 info = "early power domain ";
623                 callback = pm_late_early_op(&dev->pm_domain->ops, state);
624         } else if (dev->type && dev->type->pm) {
625                 info = "early type ";
626                 callback = pm_late_early_op(dev->type->pm, state);
627         } else if (dev->class && dev->class->pm) {
628                 info = "early class ";
629                 callback = pm_late_early_op(dev->class->pm, state);
630         } else if (dev->bus && dev->bus->pm) {
631                 info = "early bus ";
632                 callback = pm_late_early_op(dev->bus->pm, state);
633         }
634
635         if (!callback && dev->driver && dev->driver->pm) {
636                 info = "early driver ";
637                 callback = pm_late_early_op(dev->driver->pm, state);
638         }
639
640         error = dpm_run_callback(callback, dev, state, info);
641         dev->power.is_late_suspended = false;
642
643  Out:
644         TRACE_RESUME(error);
645
646         pm_runtime_enable(dev);
647         complete_all(&dev->power.completion);
648         return error;
649 }
650
651 static void async_resume_early(void *data, async_cookie_t cookie)
652 {
653         struct device *dev = (struct device *)data;
654         int error;
655
656         error = device_resume_early(dev, pm_transition, true);
657         if (error)
658                 pm_dev_err(dev, pm_transition, " async", error);
659
660         put_device(dev);
661 }
662
663 /**
664  * dpm_resume_early - Execute "early resume" callbacks for all devices.
665  * @state: PM transition of the system being carried out.
666  */
667 void dpm_resume_early(pm_message_t state)
668 {
669         struct device *dev;
670         ktime_t starttime = ktime_get();
671
672         trace_suspend_resume(TPS("dpm_resume_early"), state.event, true);
673         mutex_lock(&dpm_list_mtx);
674         pm_transition = state;
675
676         /*
677          * Advanced the async threads upfront,
678          * in case the starting of async threads is
679          * delayed by non-async resuming devices.
680          */
681         list_for_each_entry(dev, &dpm_late_early_list, power.entry) {
682                 reinit_completion(&dev->power.completion);
683                 if (is_async(dev)) {
684                         get_device(dev);
685                         async_schedule(async_resume_early, dev);
686                 }
687         }
688
689         while (!list_empty(&dpm_late_early_list)) {
690                 dev = to_device(dpm_late_early_list.next);
691                 get_device(dev);
692                 list_move_tail(&dev->power.entry, &dpm_suspended_list);
693                 mutex_unlock(&dpm_list_mtx);
694
695                 if (!is_async(dev)) {
696                         int error;
697
698                         error = device_resume_early(dev, state, false);
699                         if (error) {
700                                 suspend_stats.failed_resume_early++;
701                                 dpm_save_failed_step(SUSPEND_RESUME_EARLY);
702                                 dpm_save_failed_dev(dev_name(dev));
703                                 pm_dev_err(dev, state, " early", error);
704                         }
705                 }
706                 mutex_lock(&dpm_list_mtx);
707                 put_device(dev);
708         }
709         mutex_unlock(&dpm_list_mtx);
710         async_synchronize_full();
711         dpm_show_time(starttime, state, "early");
712         trace_suspend_resume(TPS("dpm_resume_early"), state.event, false);
713 }
714
715 /**
716  * dpm_resume_start - Execute "noirq" and "early" device callbacks.
717  * @state: PM transition of the system being carried out.
718  */
719 void dpm_resume_start(pm_message_t state)
720 {
721         dpm_resume_noirq(state);
722         dpm_resume_early(state);
723 }
724 EXPORT_SYMBOL_GPL(dpm_resume_start);
725
726 /**
727  * device_resume - Execute "resume" callbacks for given device.
728  * @dev: Device to handle.
729  * @state: PM transition of the system being carried out.
730  * @async: If true, the device is being resumed asynchronously.
731  */
732 static int device_resume(struct device *dev, pm_message_t state, bool async)
733 {
734         pm_callback_t callback = NULL;
735         char *info = NULL;
736         int error = 0;
737         DECLARE_DPM_WATCHDOG_ON_STACK(wd);
738
739         TRACE_DEVICE(dev);
740         TRACE_RESUME(0);
741
742         if (dev->power.syscore)
743                 goto Complete;
744
745         if (dev->power.direct_complete) {
746                 /* Match the pm_runtime_disable() in __device_suspend(). */
747                 pm_runtime_enable(dev);
748                 goto Complete;
749         }
750
751         dpm_wait(dev->parent, async);
752         dpm_watchdog_set(&wd, dev);
753         device_lock(dev);
754
755         /*
756          * This is a fib.  But we'll allow new children to be added below
757          * a resumed device, even if the device hasn't been completed yet.
758          */
759         dev->power.is_prepared = false;
760
761         if (!dev->power.is_suspended)
762                 goto Unlock;
763
764         if (dev->pm_domain) {
765                 info = "power domain ";
766                 callback = pm_op(&dev->pm_domain->ops, state);
767                 goto Driver;
768         }
769
770         if (dev->type && dev->type->pm) {
771                 info = "type ";
772                 callback = pm_op(dev->type->pm, state);
773                 goto Driver;
774         }
775
776         if (dev->class) {
777                 if (dev->class->pm) {
778                         info = "class ";
779                         callback = pm_op(dev->class->pm, state);
780                         goto Driver;
781                 } else if (dev->class->resume) {
782                         info = "legacy class ";
783                         callback = dev->class->resume;
784                         goto End;
785                 }
786         }
787
788         if (dev->bus) {
789                 if (dev->bus->pm) {
790                         info = "bus ";
791                         callback = pm_op(dev->bus->pm, state);
792                 } else if (dev->bus->resume) {
793                         info = "legacy bus ";
794                         callback = dev->bus->resume;
795                         goto End;
796                 }
797         }
798
799  Driver:
800         if (!callback && dev->driver && dev->driver->pm) {
801                 info = "driver ";
802                 callback = pm_op(dev->driver->pm, state);
803         }
804
805  End:
806         error = dpm_run_callback(callback, dev, state, info);
807         dev->power.is_suspended = false;
808
809  Unlock:
810         device_unlock(dev);
811         dpm_watchdog_clear(&wd);
812
813  Complete:
814         complete_all(&dev->power.completion);
815
816         TRACE_RESUME(error);
817
818         return error;
819 }
820
821 static void async_resume(void *data, async_cookie_t cookie)
822 {
823         struct device *dev = (struct device *)data;
824         int error;
825
826         error = device_resume(dev, pm_transition, true);
827         if (error)
828                 pm_dev_err(dev, pm_transition, " async", error);
829         put_device(dev);
830 }
831
832 /**
833  * dpm_resume - Execute "resume" callbacks for non-sysdev devices.
834  * @state: PM transition of the system being carried out.
835  *
836  * Execute the appropriate "resume" callback for all devices whose status
837  * indicates that they are suspended.
838  */
839 void dpm_resume(pm_message_t state)
840 {
841         struct device *dev;
842         ktime_t starttime = ktime_get();
843
844         trace_suspend_resume(TPS("dpm_resume"), state.event, true);
845         might_sleep();
846
847         mutex_lock(&dpm_list_mtx);
848         pm_transition = state;
849         async_error = 0;
850
851         list_for_each_entry(dev, &dpm_suspended_list, power.entry) {
852                 reinit_completion(&dev->power.completion);
853                 if (is_async(dev)) {
854                         get_device(dev);
855                         async_schedule(async_resume, dev);
856                 }
857         }
858
859         while (!list_empty(&dpm_suspended_list)) {
860                 dev = to_device(dpm_suspended_list.next);
861                 get_device(dev);
862                 if (!is_async(dev)) {
863                         int error;
864
865                         mutex_unlock(&dpm_list_mtx);
866
867                         error = device_resume(dev, state, false);
868                         if (error) {
869                                 suspend_stats.failed_resume++;
870                                 dpm_save_failed_step(SUSPEND_RESUME);
871                                 dpm_save_failed_dev(dev_name(dev));
872                                 pm_dev_err(dev, state, "", error);
873                         }
874
875                         mutex_lock(&dpm_list_mtx);
876                 }
877                 if (!list_empty(&dev->power.entry))
878                         list_move_tail(&dev->power.entry, &dpm_prepared_list);
879                 put_device(dev);
880         }
881         mutex_unlock(&dpm_list_mtx);
882         async_synchronize_full();
883         dpm_show_time(starttime, state, NULL);
884
885         cpufreq_resume();
886         trace_suspend_resume(TPS("dpm_resume"), state.event, false);
887 }
888
889 /**
890  * device_complete - Complete a PM transition for given device.
891  * @dev: Device to handle.
892  * @state: PM transition of the system being carried out.
893  */
894 static void device_complete(struct device *dev, pm_message_t state)
895 {
896         void (*callback)(struct device *) = NULL;
897         char *info = NULL;
898
899         if (dev->power.syscore)
900                 return;
901
902         device_lock(dev);
903
904         if (dev->pm_domain) {
905                 info = "completing power domain ";
906                 callback = dev->pm_domain->ops.complete;
907         } else if (dev->type && dev->type->pm) {
908                 info = "completing type ";
909                 callback = dev->type->pm->complete;
910         } else if (dev->class && dev->class->pm) {
911                 info = "completing class ";
912                 callback = dev->class->pm->complete;
913         } else if (dev->bus && dev->bus->pm) {
914                 info = "completing bus ";
915                 callback = dev->bus->pm->complete;
916         }
917
918         if (!callback && dev->driver && dev->driver->pm) {
919                 info = "completing driver ";
920                 callback = dev->driver->pm->complete;
921         }
922
923         if (callback) {
924                 pm_dev_dbg(dev, state, info);
925                 callback(dev);
926         }
927
928         device_unlock(dev);
929
930         pm_runtime_put(dev);
931 }
932
933 /**
934  * dpm_complete - Complete a PM transition for all non-sysdev devices.
935  * @state: PM transition of the system being carried out.
936  *
937  * Execute the ->complete() callbacks for all devices whose PM status is not
938  * DPM_ON (this allows new devices to be registered).
939  */
940 void dpm_complete(pm_message_t state)
941 {
942         struct list_head list;
943
944         trace_suspend_resume(TPS("dpm_complete"), state.event, true);
945         might_sleep();
946
947         INIT_LIST_HEAD(&list);
948         mutex_lock(&dpm_list_mtx);
949         while (!list_empty(&dpm_prepared_list)) {
950                 struct device *dev = to_device(dpm_prepared_list.prev);
951
952                 get_device(dev);
953                 dev->power.is_prepared = false;
954                 list_move(&dev->power.entry, &list);
955                 mutex_unlock(&dpm_list_mtx);
956
957                 trace_device_pm_callback_start(dev, "", state.event);
958                 device_complete(dev, state);
959                 trace_device_pm_callback_end(dev, 0);
960
961                 mutex_lock(&dpm_list_mtx);
962                 put_device(dev);
963         }
964         list_splice(&list, &dpm_list);
965         mutex_unlock(&dpm_list_mtx);
966         trace_suspend_resume(TPS("dpm_complete"), state.event, false);
967 }
968
969 /**
970  * dpm_resume_end - Execute "resume" callbacks and complete system transition.
971  * @state: PM transition of the system being carried out.
972  *
973  * Execute "resume" callbacks for all devices and complete the PM transition of
974  * the system.
975  */
976 void dpm_resume_end(pm_message_t state)
977 {
978         dpm_resume(state);
979         dpm_complete(state);
980 }
981 EXPORT_SYMBOL_GPL(dpm_resume_end);
982
983
984 /*------------------------- Suspend routines -------------------------*/
985
986 /**
987  * resume_event - Return a "resume" message for given "suspend" sleep state.
988  * @sleep_state: PM message representing a sleep state.
989  *
990  * Return a PM message representing the resume event corresponding to given
991  * sleep state.
992  */
993 static pm_message_t resume_event(pm_message_t sleep_state)
994 {
995         switch (sleep_state.event) {
996         case PM_EVENT_SUSPEND:
997                 return PMSG_RESUME;
998         case PM_EVENT_FREEZE:
999         case PM_EVENT_QUIESCE:
1000                 return PMSG_RECOVER;
1001         case PM_EVENT_HIBERNATE:
1002                 return PMSG_RESTORE;
1003         }
1004         return PMSG_ON;
1005 }
1006
1007 /**
1008  * device_suspend_noirq - Execute a "late suspend" callback for given device.
1009  * @dev: Device to handle.
1010  * @state: PM transition of the system being carried out.
1011  * @async: If true, the device is being suspended asynchronously.
1012  *
1013  * The driver of @dev will not receive interrupts while this function is being
1014  * executed.
1015  */
1016 static int __device_suspend_noirq(struct device *dev, pm_message_t state, bool async)
1017 {
1018         pm_callback_t callback = NULL;
1019         char *info = NULL;
1020         int error = 0;
1021
1022         TRACE_DEVICE(dev);
1023         TRACE_SUSPEND(0);
1024
1025         dpm_wait_for_children(dev, async);
1026
1027         if (async_error)
1028                 goto Complete;
1029
1030         if (pm_wakeup_pending()) {
1031                 async_error = -EBUSY;
1032                 goto Complete;
1033         }
1034
1035         if (dev->power.syscore || dev->power.direct_complete)
1036                 goto Complete;
1037
1038         if (dev->pm_domain) {
1039                 info = "noirq power domain ";
1040                 callback = pm_noirq_op(&dev->pm_domain->ops, state);
1041         } else if (dev->type && dev->type->pm) {
1042                 info = "noirq type ";
1043                 callback = pm_noirq_op(dev->type->pm, state);
1044         } else if (dev->class && dev->class->pm) {
1045                 info = "noirq class ";
1046                 callback = pm_noirq_op(dev->class->pm, state);
1047         } else if (dev->bus && dev->bus->pm) {
1048                 info = "noirq bus ";
1049                 callback = pm_noirq_op(dev->bus->pm, state);
1050         }
1051
1052         if (!callback && dev->driver && dev->driver->pm) {
1053                 info = "noirq driver ";
1054                 callback = pm_noirq_op(dev->driver->pm, state);
1055         }
1056
1057         error = dpm_run_callback(callback, dev, state, info);
1058         if (!error)
1059                 dev->power.is_noirq_suspended = true;
1060         else
1061                 async_error = error;
1062
1063 Complete:
1064         complete_all(&dev->power.completion);
1065         TRACE_SUSPEND(error);
1066         return error;
1067 }
1068
1069 static void async_suspend_noirq(void *data, async_cookie_t cookie)
1070 {
1071         struct device *dev = (struct device *)data;
1072         int error;
1073
1074         error = __device_suspend_noirq(dev, pm_transition, true);
1075         if (error) {
1076                 dpm_save_failed_dev(dev_name(dev));
1077                 pm_dev_err(dev, pm_transition, " async", error);
1078         }
1079
1080         put_device(dev);
1081 }
1082
1083 static int device_suspend_noirq(struct device *dev)
1084 {
1085         reinit_completion(&dev->power.completion);
1086
1087         if (is_async(dev)) {
1088                 get_device(dev);
1089                 async_schedule(async_suspend_noirq, dev);
1090                 return 0;
1091         }
1092         return __device_suspend_noirq(dev, pm_transition, false);
1093 }
1094
1095 /**
1096  * dpm_suspend_noirq - Execute "noirq suspend" callbacks for all devices.
1097  * @state: PM transition of the system being carried out.
1098  *
1099  * Prevent device drivers from receiving interrupts and call the "noirq" suspend
1100  * handlers for all non-sysdev devices.
1101  */
1102 int dpm_suspend_noirq(pm_message_t state)
1103 {
1104         ktime_t starttime = ktime_get();
1105         int error = 0;
1106
1107         trace_suspend_resume(TPS("dpm_suspend_noirq"), state.event, true);
1108         cpuidle_pause();
1109         device_wakeup_arm_wake_irqs();
1110         suspend_device_irqs();
1111         mutex_lock(&dpm_list_mtx);
1112         pm_transition = state;
1113         async_error = 0;
1114
1115         while (!list_empty(&dpm_late_early_list)) {
1116                 struct device *dev = to_device(dpm_late_early_list.prev);
1117
1118                 get_device(dev);
1119                 mutex_unlock(&dpm_list_mtx);
1120
1121                 error = device_suspend_noirq(dev);
1122
1123                 mutex_lock(&dpm_list_mtx);
1124                 if (error) {
1125                         pm_dev_err(dev, state, " noirq", error);
1126                         dpm_save_failed_dev(dev_name(dev));
1127                         put_device(dev);
1128                         break;
1129                 }
1130                 if (!list_empty(&dev->power.entry))
1131                         list_move(&dev->power.entry, &dpm_noirq_list);
1132                 put_device(dev);
1133
1134                 if (async_error)
1135                         break;
1136         }
1137         mutex_unlock(&dpm_list_mtx);
1138         async_synchronize_full();
1139         if (!error)
1140                 error = async_error;
1141
1142         if (error) {
1143                 suspend_stats.failed_suspend_noirq++;
1144                 dpm_save_failed_step(SUSPEND_SUSPEND_NOIRQ);
1145                 dpm_resume_noirq(resume_event(state));
1146         } else {
1147                 dpm_show_time(starttime, state, "noirq");
1148         }
1149         trace_suspend_resume(TPS("dpm_suspend_noirq"), state.event, false);
1150         return error;
1151 }
1152
1153 /**
1154  * device_suspend_late - Execute a "late suspend" callback for given device.
1155  * @dev: Device to handle.
1156  * @state: PM transition of the system being carried out.
1157  * @async: If true, the device is being suspended asynchronously.
1158  *
1159  * Runtime PM is disabled for @dev while this function is being executed.
1160  */
1161 static int __device_suspend_late(struct device *dev, pm_message_t state, bool async)
1162 {
1163         pm_callback_t callback = NULL;
1164         char *info = NULL;
1165         int error = 0;
1166
1167         TRACE_DEVICE(dev);
1168         TRACE_SUSPEND(0);
1169
1170         __pm_runtime_disable(dev, false);
1171
1172         dpm_wait_for_children(dev, async);
1173
1174         if (async_error)
1175                 goto Complete;
1176
1177         if (pm_wakeup_pending()) {
1178                 async_error = -EBUSY;
1179                 goto Complete;
1180         }
1181
1182         if (dev->power.syscore || dev->power.direct_complete)
1183                 goto Complete;
1184
1185         if (dev->pm_domain) {
1186                 info = "late power domain ";
1187                 callback = pm_late_early_op(&dev->pm_domain->ops, state);
1188         } else if (dev->type && dev->type->pm) {
1189                 info = "late type ";
1190                 callback = pm_late_early_op(dev->type->pm, state);
1191         } else if (dev->class && dev->class->pm) {
1192                 info = "late class ";
1193                 callback = pm_late_early_op(dev->class->pm, state);
1194         } else if (dev->bus && dev->bus->pm) {
1195                 info = "late bus ";
1196                 callback = pm_late_early_op(dev->bus->pm, state);
1197         }
1198
1199         if (!callback && dev->driver && dev->driver->pm) {
1200                 info = "late driver ";
1201                 callback = pm_late_early_op(dev->driver->pm, state);
1202         }
1203
1204         error = dpm_run_callback(callback, dev, state, info);
1205         if (!error)
1206                 dev->power.is_late_suspended = true;
1207         else
1208                 async_error = error;
1209
1210 Complete:
1211         TRACE_SUSPEND(error);
1212         complete_all(&dev->power.completion);
1213         return error;
1214 }
1215
1216 static void async_suspend_late(void *data, async_cookie_t cookie)
1217 {
1218         struct device *dev = (struct device *)data;
1219         int error;
1220
1221         error = __device_suspend_late(dev, pm_transition, true);
1222         if (error) {
1223                 dpm_save_failed_dev(dev_name(dev));
1224                 pm_dev_err(dev, pm_transition, " async", error);
1225         }
1226         put_device(dev);
1227 }
1228
1229 static int device_suspend_late(struct device *dev)
1230 {
1231         reinit_completion(&dev->power.completion);
1232
1233         if (is_async(dev)) {
1234                 get_device(dev);
1235                 async_schedule(async_suspend_late, dev);
1236                 return 0;
1237         }
1238
1239         return __device_suspend_late(dev, pm_transition, false);
1240 }
1241
1242 /**
1243  * dpm_suspend_late - Execute "late suspend" callbacks for all devices.
1244  * @state: PM transition of the system being carried out.
1245  */
1246 int dpm_suspend_late(pm_message_t state)
1247 {
1248         ktime_t starttime = ktime_get();
1249         int error = 0;
1250
1251         trace_suspend_resume(TPS("dpm_suspend_late"), state.event, true);
1252         mutex_lock(&dpm_list_mtx);
1253         pm_transition = state;
1254         async_error = 0;
1255
1256         while (!list_empty(&dpm_suspended_list)) {
1257                 struct device *dev = to_device(dpm_suspended_list.prev);
1258
1259                 get_device(dev);
1260                 mutex_unlock(&dpm_list_mtx);
1261
1262                 error = device_suspend_late(dev);
1263
1264                 mutex_lock(&dpm_list_mtx);
1265                 if (!list_empty(&dev->power.entry))
1266                         list_move(&dev->power.entry, &dpm_late_early_list);
1267
1268                 if (error) {
1269                         pm_dev_err(dev, state, " late", error);
1270                         dpm_save_failed_dev(dev_name(dev));
1271                         put_device(dev);
1272                         break;
1273                 }
1274                 put_device(dev);
1275
1276                 if (async_error)
1277                         break;
1278         }
1279         mutex_unlock(&dpm_list_mtx);
1280         async_synchronize_full();
1281         if (!error)
1282                 error = async_error;
1283         if (error) {
1284                 suspend_stats.failed_suspend_late++;
1285                 dpm_save_failed_step(SUSPEND_SUSPEND_LATE);
1286                 dpm_resume_early(resume_event(state));
1287         } else {
1288                 dpm_show_time(starttime, state, "late");
1289         }
1290         trace_suspend_resume(TPS("dpm_suspend_late"), state.event, false);
1291         return error;
1292 }
1293
1294 /**
1295  * dpm_suspend_end - Execute "late" and "noirq" device suspend callbacks.
1296  * @state: PM transition of the system being carried out.
1297  */
1298 int dpm_suspend_end(pm_message_t state)
1299 {
1300         int error = dpm_suspend_late(state);
1301         if (error)
1302                 return error;
1303
1304         error = dpm_suspend_noirq(state);
1305         if (error) {
1306                 dpm_resume_early(resume_event(state));
1307                 return error;
1308         }
1309
1310         return 0;
1311 }
1312 EXPORT_SYMBOL_GPL(dpm_suspend_end);
1313
1314 /**
1315  * legacy_suspend - Execute a legacy (bus or class) suspend callback for device.
1316  * @dev: Device to suspend.
1317  * @state: PM transition of the system being carried out.
1318  * @cb: Suspend callback to execute.
1319  * @info: string description of caller.
1320  */
1321 static int legacy_suspend(struct device *dev, pm_message_t state,
1322                           int (*cb)(struct device *dev, pm_message_t state),
1323                           char *info)
1324 {
1325         int error;
1326         ktime_t calltime;
1327
1328         calltime = initcall_debug_start(dev);
1329
1330         trace_device_pm_callback_start(dev, info, state.event);
1331         error = cb(dev, state);
1332         trace_device_pm_callback_end(dev, error);
1333         suspend_report_result(cb, error);
1334
1335         initcall_debug_report(dev, calltime, error, state, info);
1336
1337         return error;
1338 }
1339
1340 /**
1341  * device_suspend - Execute "suspend" callbacks for given device.
1342  * @dev: Device to handle.
1343  * @state: PM transition of the system being carried out.
1344  * @async: If true, the device is being suspended asynchronously.
1345  */
1346 static int __device_suspend(struct device *dev, pm_message_t state, bool async)
1347 {
1348         pm_callback_t callback = NULL;
1349         char *info = NULL;
1350         int error = 0;
1351         DECLARE_DPM_WATCHDOG_ON_STACK(wd);
1352
1353         TRACE_DEVICE(dev);
1354         TRACE_SUSPEND(0);
1355
1356         dpm_wait_for_children(dev, async);
1357
1358         if (async_error)
1359                 goto Complete;
1360
1361         /*
1362          * If a device configured to wake up the system from sleep states
1363          * has been suspended at run time and there's a resume request pending
1364          * for it, this is equivalent to the device signaling wakeup, so the
1365          * system suspend operation should be aborted.
1366          */
1367         if (pm_runtime_barrier(dev) && device_may_wakeup(dev))
1368                 pm_wakeup_event(dev, 0);
1369
1370         if (pm_wakeup_pending()) {
1371                 async_error = -EBUSY;
1372                 goto Complete;
1373         }
1374
1375         if (dev->power.syscore)
1376                 goto Complete;
1377
1378         if (dev->power.direct_complete) {
1379                 if (pm_runtime_status_suspended(dev)) {
1380                         pm_runtime_disable(dev);
1381                         if (pm_runtime_status_suspended(dev))
1382                                 goto Complete;
1383
1384                         pm_runtime_enable(dev);
1385                 }
1386                 dev->power.direct_complete = false;
1387         }
1388
1389         dpm_watchdog_set(&wd, dev);
1390         device_lock(dev);
1391
1392         if (dev->pm_domain) {
1393                 info = "power domain ";
1394                 callback = pm_op(&dev->pm_domain->ops, state);
1395                 goto Run;
1396         }
1397
1398         if (dev->type && dev->type->pm) {
1399                 info = "type ";
1400                 callback = pm_op(dev->type->pm, state);
1401                 goto Run;
1402         }
1403
1404         if (dev->class) {
1405                 if (dev->class->pm) {
1406                         info = "class ";
1407                         callback = pm_op(dev->class->pm, state);
1408                         goto Run;
1409                 } else if (dev->class->suspend) {
1410                         pm_dev_dbg(dev, state, "legacy class ");
1411                         error = legacy_suspend(dev, state, dev->class->suspend,
1412                                                 "legacy class ");
1413                         goto End;
1414                 }
1415         }
1416
1417         if (dev->bus) {
1418                 if (dev->bus->pm) {
1419                         info = "bus ";
1420                         callback = pm_op(dev->bus->pm, state);
1421                 } else if (dev->bus->suspend) {
1422                         pm_dev_dbg(dev, state, "legacy bus ");
1423                         error = legacy_suspend(dev, state, dev->bus->suspend,
1424                                                 "legacy bus ");
1425                         goto End;
1426                 }
1427         }
1428
1429  Run:
1430         if (!callback && dev->driver && dev->driver->pm) {
1431                 info = "driver ";
1432                 callback = pm_op(dev->driver->pm, state);
1433         }
1434
1435         error = dpm_run_callback(callback, dev, state, info);
1436
1437  End:
1438         if (!error) {
1439                 struct device *parent = dev->parent;
1440
1441                 dev->power.is_suspended = true;
1442                 if (parent) {
1443                         spin_lock_irq(&parent->power.lock);
1444
1445                         dev->parent->power.direct_complete = false;
1446                         if (dev->power.wakeup_path
1447                             && !dev->parent->power.ignore_children)
1448                                 dev->parent->power.wakeup_path = true;
1449
1450                         spin_unlock_irq(&parent->power.lock);
1451                 }
1452         }
1453
1454         device_unlock(dev);
1455         dpm_watchdog_clear(&wd);
1456
1457  Complete:
1458         complete_all(&dev->power.completion);
1459         if (error)
1460                 async_error = error;
1461
1462         TRACE_SUSPEND(error);
1463         return error;
1464 }
1465
1466 static void async_suspend(void *data, async_cookie_t cookie)
1467 {
1468         struct device *dev = (struct device *)data;
1469         int error;
1470
1471         error = __device_suspend(dev, pm_transition, true);
1472         if (error) {
1473                 dpm_save_failed_dev(dev_name(dev));
1474                 pm_dev_err(dev, pm_transition, " async", error);
1475         }
1476
1477         put_device(dev);
1478 }
1479
1480 static int device_suspend(struct device *dev)
1481 {
1482         reinit_completion(&dev->power.completion);
1483
1484         if (is_async(dev)) {
1485                 get_device(dev);
1486                 async_schedule(async_suspend, dev);
1487                 return 0;
1488         }
1489
1490         return __device_suspend(dev, pm_transition, false);
1491 }
1492
1493 /**
1494  * dpm_suspend - Execute "suspend" callbacks for all non-sysdev devices.
1495  * @state: PM transition of the system being carried out.
1496  */
1497 int dpm_suspend(pm_message_t state)
1498 {
1499         ktime_t starttime = ktime_get();
1500         int error = 0;
1501
1502         trace_suspend_resume(TPS("dpm_suspend"), state.event, true);
1503         might_sleep();
1504
1505         cpufreq_suspend();
1506
1507         mutex_lock(&dpm_list_mtx);
1508         pm_transition = state;
1509         async_error = 0;
1510         while (!list_empty(&dpm_prepared_list)) {
1511                 struct device *dev = to_device(dpm_prepared_list.prev);
1512
1513                 get_device(dev);
1514                 mutex_unlock(&dpm_list_mtx);
1515
1516                 error = device_suspend(dev);
1517
1518                 mutex_lock(&dpm_list_mtx);
1519                 if (error) {
1520                         pm_dev_err(dev, state, "", error);
1521                         dpm_save_failed_dev(dev_name(dev));
1522                         put_device(dev);
1523                         break;
1524                 }
1525                 if (!list_empty(&dev->power.entry))
1526                         list_move(&dev->power.entry, &dpm_suspended_list);
1527                 put_device(dev);
1528                 if (async_error)
1529                         break;
1530         }
1531         mutex_unlock(&dpm_list_mtx);
1532         async_synchronize_full();
1533         if (!error)
1534                 error = async_error;
1535         if (error) {
1536                 suspend_stats.failed_suspend++;
1537                 dpm_save_failed_step(SUSPEND_SUSPEND);
1538         } else
1539                 dpm_show_time(starttime, state, NULL);
1540         trace_suspend_resume(TPS("dpm_suspend"), state.event, false);
1541         return error;
1542 }
1543
1544 /**
1545  * device_prepare - Prepare a device for system power transition.
1546  * @dev: Device to handle.
1547  * @state: PM transition of the system being carried out.
1548  *
1549  * Execute the ->prepare() callback(s) for given device.  No new children of the
1550  * device may be registered after this function has returned.
1551  */
1552 static int device_prepare(struct device *dev, pm_message_t state)
1553 {
1554         int (*callback)(struct device *) = NULL;
1555         char *info = NULL;
1556         int ret = 0;
1557
1558         if (dev->power.syscore)
1559                 return 0;
1560
1561         /*
1562          * If a device's parent goes into runtime suspend at the wrong time,
1563          * it won't be possible to resume the device.  To prevent this we
1564          * block runtime suspend here, during the prepare phase, and allow
1565          * it again during the complete phase.
1566          */
1567         pm_runtime_get_noresume(dev);
1568
1569         device_lock(dev);
1570
1571         dev->power.wakeup_path = device_may_wakeup(dev);
1572
1573         if (dev->pm_domain) {
1574                 info = "preparing power domain ";
1575                 callback = dev->pm_domain->ops.prepare;
1576         } else if (dev->type && dev->type->pm) {
1577                 info = "preparing type ";
1578                 callback = dev->type->pm->prepare;
1579         } else if (dev->class && dev->class->pm) {
1580                 info = "preparing class ";
1581                 callback = dev->class->pm->prepare;
1582         } else if (dev->bus && dev->bus->pm) {
1583                 info = "preparing bus ";
1584                 callback = dev->bus->pm->prepare;
1585         }
1586
1587         if (!callback && dev->driver && dev->driver->pm) {
1588                 info = "preparing driver ";
1589                 callback = dev->driver->pm->prepare;
1590         }
1591
1592         if (callback)
1593                 ret = callback(dev);
1594
1595         device_unlock(dev);
1596
1597         if (ret < 0) {
1598                 suspend_report_result(callback, ret);
1599                 pm_runtime_put(dev);
1600                 return ret;
1601         }
1602         /*
1603          * A positive return value from ->prepare() means "this device appears
1604          * to be runtime-suspended and its state is fine, so if it really is
1605          * runtime-suspended, you can leave it in that state provided that you
1606          * will do the same thing with all of its descendants".  This only
1607          * applies to suspend transitions, however.
1608          */
1609         spin_lock_irq(&dev->power.lock);
1610         dev->power.direct_complete = ret > 0 && state.event == PM_EVENT_SUSPEND;
1611         spin_unlock_irq(&dev->power.lock);
1612         return 0;
1613 }
1614
1615 /**
1616  * dpm_prepare - Prepare all non-sysdev devices for a system PM transition.
1617  * @state: PM transition of the system being carried out.
1618  *
1619  * Execute the ->prepare() callback(s) for all devices.
1620  */
1621 int dpm_prepare(pm_message_t state)
1622 {
1623         int error = 0;
1624
1625         trace_suspend_resume(TPS("dpm_prepare"), state.event, true);
1626         might_sleep();
1627
1628         mutex_lock(&dpm_list_mtx);
1629         while (!list_empty(&dpm_list)) {
1630                 struct device *dev = to_device(dpm_list.next);
1631
1632                 get_device(dev);
1633                 mutex_unlock(&dpm_list_mtx);
1634
1635                 trace_device_pm_callback_start(dev, "", state.event);
1636                 error = device_prepare(dev, state);
1637                 trace_device_pm_callback_end(dev, error);
1638
1639                 mutex_lock(&dpm_list_mtx);
1640                 if (error) {
1641                         if (error == -EAGAIN) {
1642                                 put_device(dev);
1643                                 error = 0;
1644                                 continue;
1645                         }
1646                         printk(KERN_INFO "PM: Device %s not prepared "
1647                                 "for power transition: code %d\n",
1648                                 dev_name(dev), error);
1649                         put_device(dev);
1650                         break;
1651                 }
1652                 dev->power.is_prepared = true;
1653                 if (!list_empty(&dev->power.entry))
1654                         list_move_tail(&dev->power.entry, &dpm_prepared_list);
1655                 put_device(dev);
1656         }
1657         mutex_unlock(&dpm_list_mtx);
1658         trace_suspend_resume(TPS("dpm_prepare"), state.event, false);
1659         return error;
1660 }
1661
1662 /**
1663  * dpm_suspend_start - Prepare devices for PM transition and suspend them.
1664  * @state: PM transition of the system being carried out.
1665  *
1666  * Prepare all non-sysdev devices for system PM transition and execute "suspend"
1667  * callbacks for them.
1668  */
1669 int dpm_suspend_start(pm_message_t state)
1670 {
1671         int error;
1672
1673         error = dpm_prepare(state);
1674         if (error) {
1675                 suspend_stats.failed_prepare++;
1676                 dpm_save_failed_step(SUSPEND_PREPARE);
1677         } else
1678                 error = dpm_suspend(state);
1679         return error;
1680 }
1681 EXPORT_SYMBOL_GPL(dpm_suspend_start);
1682
1683 void __suspend_report_result(const char *function, void *fn, int ret)
1684 {
1685         if (ret)
1686                 printk(KERN_ERR "%s(): %pF returns %d\n", function, fn, ret);
1687 }
1688 EXPORT_SYMBOL_GPL(__suspend_report_result);
1689
1690 /**
1691  * device_pm_wait_for_dev - Wait for suspend/resume of a device to complete.
1692  * @dev: Device to wait for.
1693  * @subordinate: Device that needs to wait for @dev.
1694  */
1695 int device_pm_wait_for_dev(struct device *subordinate, struct device *dev)
1696 {
1697         dpm_wait(dev, subordinate->power.async_suspend);
1698         return async_error;
1699 }
1700 EXPORT_SYMBOL_GPL(device_pm_wait_for_dev);
1701
1702 /**
1703  * dpm_for_each_dev - device iterator.
1704  * @data: data for the callback.
1705  * @fn: function to be called for each device.
1706  *
1707  * Iterate over devices in dpm_list, and call @fn for each device,
1708  * passing it @data.
1709  */
1710 void dpm_for_each_dev(void *data, void (*fn)(struct device *, void *))
1711 {
1712         struct device *dev;
1713
1714         if (!fn)
1715                 return;
1716
1717         device_pm_lock();
1718         list_for_each_entry(dev, &dpm_list, power.entry)
1719                 fn(dev, data);
1720         device_pm_unlock();
1721 }
1722 EXPORT_SYMBOL_GPL(dpm_for_each_dev);