~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/block/blk.h

Version: ~ [ linux-5.10-rc1 ] ~ [ linux-5.9.1 ] ~ [ linux-5.8.16 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.72 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.152 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.202 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.240 ] ~ [ linux-4.8.17 ] ~ [ linux-4.7.10 ] ~ [ linux-4.6.7 ] ~ [ linux-4.5.7 ] ~ [ linux-4.4.240 ] ~ [ linux-4.3.6 ] ~ [ linux-4.2.8 ] ~ [ linux-4.1.52 ] ~ [ linux-4.0.9 ] ~ [ linux-3.19.8 ] ~ [ linux-3.18.140 ] ~ [ linux-3.17.8 ] ~ [ linux-3.16.85 ] ~ [ linux-3.15.10 ] ~ [ linux-3.14.79 ] ~ [ linux-3.13.11 ] ~ [ linux-3.12.74 ] ~ [ linux-3.11.10 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.5 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 #ifndef BLK_INTERNAL_H
  2 #define BLK_INTERNAL_H
  3 
  4 #include <linux/idr.h>
  5 #include <linux/blk-mq.h>
  6 #include "blk-mq.h"
  7 
  8 /* Amount of time in which a process may batch requests */
  9 #define BLK_BATCH_TIME  (HZ/50UL)
 10 
 11 /* Number of requests a "batching" process may submit */
 12 #define BLK_BATCH_REQ   32
 13 
 14 /* Max future timer expiry for timeouts */
 15 #define BLK_MAX_TIMEOUT         (5 * HZ)
 16 
 17 struct blk_flush_queue {
 18         unsigned int            flush_queue_delayed:1;
 19         unsigned int            flush_pending_idx:1;
 20         unsigned int            flush_running_idx:1;
 21         unsigned long           flush_pending_since;
 22         struct list_head        flush_queue[2];
 23         struct list_head        flush_data_in_flight;
 24         struct request          *flush_rq;
 25 
 26         /*
 27          * flush_rq shares tag with this rq, both can't be active
 28          * at the same time
 29          */
 30         struct request          *orig_rq;
 31         spinlock_t              mq_flush_lock;
 32 };
 33 
 34 extern struct kmem_cache *blk_requestq_cachep;
 35 extern struct kmem_cache *request_cachep;
 36 extern struct kobj_type blk_queue_ktype;
 37 extern struct ida blk_queue_ida;
 38 
 39 static inline struct blk_flush_queue *blk_get_flush_queue(
 40                 struct request_queue *q, struct blk_mq_ctx *ctx)
 41 {
 42         struct blk_mq_hw_ctx *hctx;
 43 
 44         if (!q->mq_ops)
 45                 return q->fq;
 46 
 47         hctx = q->mq_ops->map_queue(q, ctx->cpu);
 48 
 49         return hctx->fq;
 50 }
 51 
 52 static inline void __blk_get_queue(struct request_queue *q)
 53 {
 54         kobject_get(&q->kobj);
 55 }
 56 
 57 struct blk_flush_queue *blk_alloc_flush_queue(struct request_queue *q,
 58                 int node, int cmd_size);
 59 void blk_free_flush_queue(struct blk_flush_queue *q);
 60 
 61 int blk_init_rl(struct request_list *rl, struct request_queue *q,
 62                 gfp_t gfp_mask);
 63 void blk_exit_rl(struct request_list *rl);
 64 void init_request_from_bio(struct request *req, struct bio *bio);
 65 void blk_rq_bio_prep(struct request_queue *q, struct request *rq,
 66                         struct bio *bio);
 67 void blk_queue_bypass_start(struct request_queue *q);
 68 void blk_queue_bypass_end(struct request_queue *q);
 69 void blk_dequeue_request(struct request *rq);
 70 void __blk_queue_free_tags(struct request_queue *q);
 71 bool __blk_end_bidi_request(struct request *rq, int error,
 72                             unsigned int nr_bytes, unsigned int bidi_bytes);
 73 void blk_freeze_queue(struct request_queue *q);
 74 
 75 static inline void blk_queue_enter_live(struct request_queue *q)
 76 {
 77         /*
 78          * Given that running in generic_make_request() context
 79          * guarantees that a live reference against q_usage_counter has
 80          * been established, further references under that same context
 81          * need not check that the queue has been frozen (marked dead).
 82          */
 83         percpu_ref_get(&q->q_usage_counter);
 84 }
 85 
 86 #ifdef CONFIG_BLK_DEV_INTEGRITY
 87 void blk_flush_integrity(void);
 88 #else
 89 static inline void blk_flush_integrity(void)
 90 {
 91 }
 92 #endif
 93 
 94 void blk_timeout_work(struct work_struct *work);
 95 unsigned long blk_rq_timeout(unsigned long timeout);
 96 void blk_add_timer(struct request *req);
 97 void blk_delete_timer(struct request *);
 98 
 99 
100 bool bio_attempt_front_merge(struct request_queue *q, struct request *req,
101                              struct bio *bio);
102 bool bio_attempt_back_merge(struct request_queue *q, struct request *req,
103                             struct bio *bio);
104 bool blk_attempt_plug_merge(struct request_queue *q, struct bio *bio,
105                             unsigned int *request_count,
106                             struct request **same_queue_rq);
107 unsigned int blk_plug_queued_count(struct request_queue *q);
108 
109 void blk_account_io_start(struct request *req, bool new_io);
110 void blk_account_io_completion(struct request *req, unsigned int bytes);
111 void blk_account_io_done(struct request *req);
112 
113 /*
114  * Internal atomic flags for request handling
115  */
116 enum rq_atomic_flags {
117         REQ_ATOM_COMPLETE = 0,
118         REQ_ATOM_STARTED,
119 };
120 
121 /*
122  * EH timer and IO completion will both attempt to 'grab' the request, make
123  * sure that only one of them succeeds
124  */
125 static inline int blk_mark_rq_complete(struct request *rq)
126 {
127         return test_and_set_bit(REQ_ATOM_COMPLETE, &rq->atomic_flags);
128 }
129 
130 static inline void blk_clear_rq_complete(struct request *rq)
131 {
132         clear_bit(REQ_ATOM_COMPLETE, &rq->atomic_flags);
133 }
134 
135 /*
136  * Internal elevator interface
137  */
138 #define ELV_ON_HASH(rq) ((rq)->cmd_flags & REQ_HASHED)
139 
140 void blk_insert_flush(struct request *rq);
141 
142 static inline struct request *__elv_next_request(struct request_queue *q)
143 {
144         struct request *rq;
145         struct blk_flush_queue *fq = blk_get_flush_queue(q, NULL);
146 
147         while (1) {
148                 if (!list_empty(&q->queue_head)) {
149                         rq = list_entry_rq(q->queue_head.next);
150                         return rq;
151                 }
152 
153                 /*
154                  * Flush request is running and flush request isn't queueable
155                  * in the drive, we can hold the queue till flush request is
156                  * finished. Even we don't do this, driver can't dispatch next
157                  * requests and will requeue them. And this can improve
158                  * throughput too. For example, we have request flush1, write1,
159                  * flush 2. flush1 is dispatched, then queue is hold, write1
160                  * isn't inserted to queue. After flush1 is finished, flush2
161                  * will be dispatched. Since disk cache is already clean,
162                  * flush2 will be finished very soon, so looks like flush2 is
163                  * folded to flush1.
164                  * Since the queue is hold, a flag is set to indicate the queue
165                  * should be restarted later. Please see flush_end_io() for
166                  * details.
167                  */
168                 if (fq->flush_pending_idx != fq->flush_running_idx &&
169                                 !queue_flush_queueable(q)) {
170                         fq->flush_queue_delayed = 1;
171                         return NULL;
172                 }
173                 if (unlikely(blk_queue_bypass(q)) ||
174                     !q->elevator->type->ops.elevator_dispatch_fn(q, 0))
175                         return NULL;
176         }
177 }
178 
179 static inline void elv_activate_rq(struct request_queue *q, struct request *rq)
180 {
181         struct elevator_queue *e = q->elevator;
182 
183         if (e->type->ops.elevator_activate_req_fn)
184                 e->type->ops.elevator_activate_req_fn(q, rq);
185 }
186 
187 static inline void elv_deactivate_rq(struct request_queue *q, struct request *rq)
188 {
189         struct elevator_queue *e = q->elevator;
190 
191         if (e->type->ops.elevator_deactivate_req_fn)
192                 e->type->ops.elevator_deactivate_req_fn(q, rq);
193 }
194 
195 #ifdef CONFIG_FAIL_IO_TIMEOUT
196 int blk_should_fake_timeout(struct request_queue *);
197 ssize_t part_timeout_show(struct device *, struct device_attribute *, char *);
198 ssize_t part_timeout_store(struct device *, struct device_attribute *,
199                                 const char *, size_t);
200 #else
201 static inline int blk_should_fake_timeout(struct request_queue *q)
202 {
203         return 0;
204 }
205 #endif
206 
207 int ll_back_merge_fn(struct request_queue *q, struct request *req,
208                      struct bio *bio);
209 int ll_front_merge_fn(struct request_queue *q, struct request *req, 
210                       struct bio *bio);
211 int attempt_back_merge(struct request_queue *q, struct request *rq);
212 int attempt_front_merge(struct request_queue *q, struct request *rq);
213 int blk_attempt_req_merge(struct request_queue *q, struct request *rq,
214                                 struct request *next);
215 void blk_recalc_rq_segments(struct request *rq);
216 void blk_rq_set_mixed_merge(struct request *rq);
217 bool blk_rq_merge_ok(struct request *rq, struct bio *bio);
218 int blk_try_merge(struct request *rq, struct bio *bio);
219 
220 void blk_queue_congestion_threshold(struct request_queue *q);
221 
222 int blk_dev_init(void);
223 
224 
225 /*
226  * Return the threshold (number of used requests) at which the queue is
227  * considered to be congested.  It include a little hysteresis to keep the
228  * context switch rate down.
229  */
230 static inline int queue_congestion_on_threshold(struct request_queue *q)
231 {
232         return q->nr_congestion_on;
233 }
234 
235 /*
236  * The threshold at which a queue is considered to be uncongested
237  */
238 static inline int queue_congestion_off_threshold(struct request_queue *q)
239 {
240         return q->nr_congestion_off;
241 }
242 
243 extern int blk_update_nr_requests(struct request_queue *, unsigned int);
244 
245 /*
246  * Contribute to IO statistics IFF:
247  *
248  *      a) it's attached to a gendisk, and
249  *      b) the queue had IO stats enabled when this request was started, and
250  *      c) it's a file system request
251  */
252 static inline int blk_do_io_stat(struct request *rq)
253 {
254         return rq->rq_disk &&
255                (rq->cmd_flags & REQ_IO_STAT) &&
256                 (rq->cmd_type == REQ_TYPE_FS);
257 }
258 
259 /*
260  * Internal io_context interface
261  */
262 void get_io_context(struct io_context *ioc);
263 struct io_cq *ioc_lookup_icq(struct io_context *ioc, struct request_queue *q);
264 struct io_cq *ioc_create_icq(struct io_context *ioc, struct request_queue *q,
265                              gfp_t gfp_mask);
266 void ioc_clear_queue(struct request_queue *q);
267 
268 int create_task_io_context(struct task_struct *task, gfp_t gfp_mask, int node);
269 
270 /**
271  * create_io_context - try to create task->io_context
272  * @gfp_mask: allocation mask
273  * @node: allocation node
274  *
275  * If %current->io_context is %NULL, allocate a new io_context and install
276  * it.  Returns the current %current->io_context which may be %NULL if
277  * allocation failed.
278  *
279  * Note that this function can't be called with IRQ disabled because
280  * task_lock which protects %current->io_context is IRQ-unsafe.
281  */
282 static inline struct io_context *create_io_context(gfp_t gfp_mask, int node)
283 {
284         WARN_ON_ONCE(irqs_disabled());
285         if (unlikely(!current->io_context))
286                 create_task_io_context(current, gfp_mask, node);
287         return current->io_context;
288 }
289 
290 /*
291  * Internal throttling interface
292  */
293 #ifdef CONFIG_BLK_DEV_THROTTLING
294 extern void blk_throtl_drain(struct request_queue *q);
295 extern int blk_throtl_init(struct request_queue *q);
296 extern void blk_throtl_exit(struct request_queue *q);
297 #else /* CONFIG_BLK_DEV_THROTTLING */
298 static inline void blk_throtl_drain(struct request_queue *q) { }
299 static inline int blk_throtl_init(struct request_queue *q) { return 0; }
300 static inline void blk_throtl_exit(struct request_queue *q) { }
301 #endif /* CONFIG_BLK_DEV_THROTTLING */
302 
303 #endif /* BLK_INTERNAL_H */
304 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | Wiki (Japanese) | Wiki (English) | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

osdn.jp