~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/block/blk-pm.c

Version: ~ [ linux-5.0-rc6 ] ~ [ linux-4.20.10 ] ~ [ linux-4.19.23 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.101 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.158 ] ~ [ linux-4.8.17 ] ~ [ linux-4.7.10 ] ~ [ linux-4.6.7 ] ~ [ linux-4.5.7 ] ~ [ linux-4.4.174 ] ~ [ linux-4.3.6 ] ~ [ linux-4.2.8 ] ~ [ linux-4.1.52 ] ~ [ linux-4.0.9 ] ~ [ linux-3.19.8 ] ~ [ linux-3.18.134 ] ~ [ linux-3.17.8 ] ~ [ linux-3.16.63 ] ~ [ linux-3.15.10 ] ~ [ linux-3.14.79 ] ~ [ linux-3.13.11 ] ~ [ linux-3.12.74 ] ~ [ linux-3.11.10 ] ~ [ linux-3.10.108 ] ~ [ linux-3.9.11 ] ~ [ linux-3.8.13 ] ~ [ linux-3.7.10 ] ~ [ linux-3.6.11 ] ~ [ linux-3.5.7 ] ~ [ linux-3.4.113 ] ~ [ linux-3.3.8 ] ~ [ linux-3.2.102 ] ~ [ linux-3.1.10 ] ~ [ linux-3.0.101 ] ~ [ linux-2.6.39.4 ] ~ [ linux-2.6.38.8 ] ~ [ linux-2.6.37.6 ] ~ [ linux-2.6.36.4 ] ~ [ linux-2.6.35.14 ] ~ [ linux-2.6.34.15 ] ~ [ linux-2.6.33.20 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.5 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 // SPDX-License-Identifier: GPL-2.0
  2 
  3 #include <linux/blk-mq.h>
  4 #include <linux/blk-pm.h>
  5 #include <linux/blkdev.h>
  6 #include <linux/pm_runtime.h>
  7 #include "blk-mq.h"
  8 #include "blk-mq-tag.h"
  9 
 10 /**
 11  * blk_pm_runtime_init - Block layer runtime PM initialization routine
 12  * @q: the queue of the device
 13  * @dev: the device the queue belongs to
 14  *
 15  * Description:
 16  *    Initialize runtime-PM-related fields for @q and start auto suspend for
 17  *    @dev. Drivers that want to take advantage of request-based runtime PM
 18  *    should call this function after @dev has been initialized, and its
 19  *    request queue @q has been allocated, and runtime PM for it can not happen
 20  *    yet(either due to disabled/forbidden or its usage_count > 0). In most
 21  *    cases, driver should call this function before any I/O has taken place.
 22  *
 23  *    This function takes care of setting up using auto suspend for the device,
 24  *    the autosuspend delay is set to -1 to make runtime suspend impossible
 25  *    until an updated value is either set by user or by driver. Drivers do
 26  *    not need to touch other autosuspend settings.
 27  *
 28  *    The block layer runtime PM is request based, so only works for drivers
 29  *    that use request as their IO unit instead of those directly use bio's.
 30  */
 31 void blk_pm_runtime_init(struct request_queue *q, struct device *dev)
 32 {
 33         q->dev = dev;
 34         q->rpm_status = RPM_ACTIVE;
 35         pm_runtime_set_autosuspend_delay(q->dev, -1);
 36         pm_runtime_use_autosuspend(q->dev);
 37 }
 38 EXPORT_SYMBOL(blk_pm_runtime_init);
 39 
 40 /**
 41  * blk_pre_runtime_suspend - Pre runtime suspend check
 42  * @q: the queue of the device
 43  *
 44  * Description:
 45  *    This function will check if runtime suspend is allowed for the device
 46  *    by examining if there are any requests pending in the queue. If there
 47  *    are requests pending, the device can not be runtime suspended; otherwise,
 48  *    the queue's status will be updated to SUSPENDING and the driver can
 49  *    proceed to suspend the device.
 50  *
 51  *    For the not allowed case, we mark last busy for the device so that
 52  *    runtime PM core will try to autosuspend it some time later.
 53  *
 54  *    This function should be called near the start of the device's
 55  *    runtime_suspend callback.
 56  *
 57  * Return:
 58  *    0         - OK to runtime suspend the device
 59  *    -EBUSY    - Device should not be runtime suspended
 60  */
 61 int blk_pre_runtime_suspend(struct request_queue *q)
 62 {
 63         int ret = 0;
 64 
 65         if (!q->dev)
 66                 return ret;
 67 
 68         WARN_ON_ONCE(q->rpm_status != RPM_ACTIVE);
 69 
 70         /*
 71          * Increase the pm_only counter before checking whether any
 72          * non-PM blk_queue_enter() calls are in progress to avoid that any
 73          * new non-PM blk_queue_enter() calls succeed before the pm_only
 74          * counter is decreased again.
 75          */
 76         blk_set_pm_only(q);
 77         ret = -EBUSY;
 78         /* Switch q_usage_counter from per-cpu to atomic mode. */
 79         blk_freeze_queue_start(q);
 80         /*
 81          * Wait until atomic mode has been reached. Since that
 82          * involves calling call_rcu(), it is guaranteed that later
 83          * blk_queue_enter() calls see the pm-only state. See also
 84          * http://lwn.net/Articles/573497/.
 85          */
 86         percpu_ref_switch_to_atomic_sync(&q->q_usage_counter);
 87         if (percpu_ref_is_zero(&q->q_usage_counter))
 88                 ret = 0;
 89         /* Switch q_usage_counter back to per-cpu mode. */
 90         blk_mq_unfreeze_queue(q);
 91 
 92         spin_lock_irq(&q->queue_lock);
 93         if (ret < 0)
 94                 pm_runtime_mark_last_busy(q->dev);
 95         else
 96                 q->rpm_status = RPM_SUSPENDING;
 97         spin_unlock_irq(&q->queue_lock);
 98 
 99         if (ret)
100                 blk_clear_pm_only(q);
101 
102         return ret;
103 }
104 EXPORT_SYMBOL(blk_pre_runtime_suspend);
105 
106 /**
107  * blk_post_runtime_suspend - Post runtime suspend processing
108  * @q: the queue of the device
109  * @err: return value of the device's runtime_suspend function
110  *
111  * Description:
112  *    Update the queue's runtime status according to the return value of the
113  *    device's runtime suspend function and mark last busy for the device so
114  *    that PM core will try to auto suspend the device at a later time.
115  *
116  *    This function should be called near the end of the device's
117  *    runtime_suspend callback.
118  */
119 void blk_post_runtime_suspend(struct request_queue *q, int err)
120 {
121         if (!q->dev)
122                 return;
123 
124         spin_lock_irq(&q->queue_lock);
125         if (!err) {
126                 q->rpm_status = RPM_SUSPENDED;
127         } else {
128                 q->rpm_status = RPM_ACTIVE;
129                 pm_runtime_mark_last_busy(q->dev);
130         }
131         spin_unlock_irq(&q->queue_lock);
132 
133         if (err)
134                 blk_clear_pm_only(q);
135 }
136 EXPORT_SYMBOL(blk_post_runtime_suspend);
137 
138 /**
139  * blk_pre_runtime_resume - Pre runtime resume processing
140  * @q: the queue of the device
141  *
142  * Description:
143  *    Update the queue's runtime status to RESUMING in preparation for the
144  *    runtime resume of the device.
145  *
146  *    This function should be called near the start of the device's
147  *    runtime_resume callback.
148  */
149 void blk_pre_runtime_resume(struct request_queue *q)
150 {
151         if (!q->dev)
152                 return;
153 
154         spin_lock_irq(&q->queue_lock);
155         q->rpm_status = RPM_RESUMING;
156         spin_unlock_irq(&q->queue_lock);
157 }
158 EXPORT_SYMBOL(blk_pre_runtime_resume);
159 
160 /**
161  * blk_post_runtime_resume - Post runtime resume processing
162  * @q: the queue of the device
163  * @err: return value of the device's runtime_resume function
164  *
165  * Description:
166  *    Update the queue's runtime status according to the return value of the
167  *    device's runtime_resume function. If it is successfully resumed, process
168  *    the requests that are queued into the device's queue when it is resuming
169  *    and then mark last busy and initiate autosuspend for it.
170  *
171  *    This function should be called near the end of the device's
172  *    runtime_resume callback.
173  */
174 void blk_post_runtime_resume(struct request_queue *q, int err)
175 {
176         if (!q->dev)
177                 return;
178 
179         spin_lock_irq(&q->queue_lock);
180         if (!err) {
181                 q->rpm_status = RPM_ACTIVE;
182                 pm_runtime_mark_last_busy(q->dev);
183                 pm_request_autosuspend(q->dev);
184         } else {
185                 q->rpm_status = RPM_SUSPENDED;
186         }
187         spin_unlock_irq(&q->queue_lock);
188 
189         if (!err)
190                 blk_clear_pm_only(q);
191 }
192 EXPORT_SYMBOL(blk_post_runtime_resume);
193 
194 /**
195  * blk_set_runtime_active - Force runtime status of the queue to be active
196  * @q: the queue of the device
197  *
198  * If the device is left runtime suspended during system suspend the resume
199  * hook typically resumes the device and corrects runtime status
200  * accordingly. However, that does not affect the queue runtime PM status
201  * which is still "suspended". This prevents processing requests from the
202  * queue.
203  *
204  * This function can be used in driver's resume hook to correct queue
205  * runtime PM status and re-enable peeking requests from the queue. It
206  * should be called before first request is added to the queue.
207  */
208 void blk_set_runtime_active(struct request_queue *q)
209 {
210         spin_lock_irq(&q->queue_lock);
211         q->rpm_status = RPM_ACTIVE;
212         pm_runtime_mark_last_busy(q->dev);
213         pm_request_autosuspend(q->dev);
214         spin_unlock_irq(&q->queue_lock);
215 }
216 EXPORT_SYMBOL(blk_set_runtime_active);
217 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | Wiki (Japanese) | Wiki (English) | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

osdn.jp