~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/block/blk-mq.h

Version: ~ [ linux-5.16 ] ~ [ linux-5.15.13 ] ~ [ linux-5.14.21 ] ~ [ linux-5.13.19 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.90 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.170 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.224 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.261 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.296 ] ~ [ linux-4.8.17 ] ~ [ linux-4.7.10 ] ~ [ linux-4.6.7 ] ~ [ linux-4.5.7 ] ~ [ linux-4.4.298 ] ~ [ linux-4.3.6 ] ~ [ linux-4.2.8 ] ~ [ linux-4.1.52 ] ~ [ linux-4.0.9 ] ~ [ linux-3.18.140 ] ~ [ linux-3.16.85 ] ~ [ linux-3.14.79 ] ~ [ linux-3.12.74 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.5 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 /* SPDX-License-Identifier: GPL-2.0 */
  2 #ifndef INT_BLK_MQ_H
  3 #define INT_BLK_MQ_H
  4 
  5 #include "blk-stat.h"
  6 #include "blk-mq-tag.h"
  7 
  8 struct blk_mq_tag_set;
  9 
 10 struct blk_mq_ctxs {
 11         struct kobject kobj;
 12         struct blk_mq_ctx __percpu      *queue_ctx;
 13 };
 14 
 15 /**
 16  * struct blk_mq_ctx - State for a software queue facing the submitting CPUs
 17  */
 18 struct blk_mq_ctx {
 19         struct {
 20                 spinlock_t              lock;
 21                 struct list_head        rq_lists[HCTX_MAX_TYPES];
 22         } ____cacheline_aligned_in_smp;
 23 
 24         unsigned int            cpu;
 25         unsigned short          index_hw[HCTX_MAX_TYPES];
 26         struct blk_mq_hw_ctx    *hctxs[HCTX_MAX_TYPES];
 27 
 28         struct request_queue    *queue;
 29         struct blk_mq_ctxs      *ctxs;
 30         struct kobject          kobj;
 31 } ____cacheline_aligned_in_smp;
 32 
 33 void blk_mq_submit_bio(struct bio *bio);
 34 int blk_mq_poll(struct request_queue *q, blk_qc_t cookie, struct io_comp_batch *iob,
 35                 unsigned int flags);
 36 void blk_mq_exit_queue(struct request_queue *q);
 37 int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr);
 38 void blk_mq_wake_waiters(struct request_queue *q);
 39 bool blk_mq_dispatch_rq_list(struct blk_mq_hw_ctx *hctx, struct list_head *,
 40                              unsigned int);
 41 void blk_mq_add_to_requeue_list(struct request *rq, bool at_head,
 42                                 bool kick_requeue_list);
 43 void blk_mq_flush_busy_ctxs(struct blk_mq_hw_ctx *hctx, struct list_head *list);
 44 struct request *blk_mq_dequeue_from_ctx(struct blk_mq_hw_ctx *hctx,
 45                                         struct blk_mq_ctx *start);
 46 void blk_mq_put_rq_ref(struct request *rq);
 47 
 48 /*
 49  * Internal helpers for allocating/freeing the request map
 50  */
 51 void blk_mq_free_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags,
 52                      unsigned int hctx_idx);
 53 void blk_mq_free_rq_map(struct blk_mq_tags *tags);
 54 struct blk_mq_tags *blk_mq_alloc_map_and_rqs(struct blk_mq_tag_set *set,
 55                                 unsigned int hctx_idx, unsigned int depth);
 56 void blk_mq_free_map_and_rqs(struct blk_mq_tag_set *set,
 57                              struct blk_mq_tags *tags,
 58                              unsigned int hctx_idx);
 59 /*
 60  * Internal helpers for request insertion into sw queues
 61  */
 62 void __blk_mq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
 63                                 bool at_head);
 64 void blk_mq_request_bypass_insert(struct request *rq, bool at_head,
 65                                   bool run_queue);
 66 void blk_mq_insert_requests(struct blk_mq_hw_ctx *hctx, struct blk_mq_ctx *ctx,
 67                                 struct list_head *list);
 68 
 69 /* Used by blk_insert_cloned_request() to issue request directly */
 70 blk_status_t blk_mq_request_issue_directly(struct request *rq, bool last);
 71 void blk_mq_try_issue_list_directly(struct blk_mq_hw_ctx *hctx,
 72                                     struct list_head *list);
 73 
 74 /*
 75  * CPU -> queue mappings
 76  */
 77 extern int blk_mq_hw_queue_to_node(struct blk_mq_queue_map *qmap, unsigned int);
 78 
 79 /*
 80  * blk_mq_map_queue_type() - map (hctx_type,cpu) to hardware queue
 81  * @q: request queue
 82  * @type: the hctx type index
 83  * @cpu: CPU
 84  */
 85 static inline struct blk_mq_hw_ctx *blk_mq_map_queue_type(struct request_queue *q,
 86                                                           enum hctx_type type,
 87                                                           unsigned int cpu)
 88 {
 89         return q->queue_hw_ctx[q->tag_set->map[type].mq_map[cpu]];
 90 }
 91 
 92 static inline enum hctx_type blk_mq_get_hctx_type(unsigned int flags)
 93 {
 94         enum hctx_type type = HCTX_TYPE_DEFAULT;
 95 
 96         /*
 97          * The caller ensure that if REQ_POLLED, poll must be enabled.
 98          */
 99         if (flags & REQ_POLLED)
100                 type = HCTX_TYPE_POLL;
101         else if ((flags & REQ_OP_MASK) == REQ_OP_READ)
102                 type = HCTX_TYPE_READ;
103         return type;
104 }
105 
106 /*
107  * blk_mq_map_queue() - map (cmd_flags,type) to hardware queue
108  * @q: request queue
109  * @flags: request command flags
110  * @ctx: software queue cpu ctx
111  */
112 static inline struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *q,
113                                                      unsigned int flags,
114                                                      struct blk_mq_ctx *ctx)
115 {
116         return ctx->hctxs[blk_mq_get_hctx_type(flags)];
117 }
118 
119 /*
120  * sysfs helpers
121  */
122 extern void blk_mq_sysfs_init(struct request_queue *q);
123 extern void blk_mq_sysfs_deinit(struct request_queue *q);
124 extern int __blk_mq_register_dev(struct device *dev, struct request_queue *q);
125 extern int blk_mq_sysfs_register(struct request_queue *q);
126 extern void blk_mq_sysfs_unregister(struct request_queue *q);
127 extern void blk_mq_hctx_kobj_init(struct blk_mq_hw_ctx *hctx);
128 void blk_mq_free_plug_rqs(struct blk_plug *plug);
129 void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule);
130 
131 void blk_mq_cancel_work_sync(struct request_queue *q);
132 
133 void blk_mq_release(struct request_queue *q);
134 
135 static inline struct blk_mq_ctx *__blk_mq_get_ctx(struct request_queue *q,
136                                            unsigned int cpu)
137 {
138         return per_cpu_ptr(q->queue_ctx, cpu);
139 }
140 
141 /*
142  * This assumes per-cpu software queueing queues. They could be per-node
143  * as well, for instance. For now this is hardcoded as-is. Note that we don't
144  * care about preemption, since we know the ctx's are persistent. This does
145  * mean that we can't rely on ctx always matching the currently running CPU.
146  */
147 static inline struct blk_mq_ctx *blk_mq_get_ctx(struct request_queue *q)
148 {
149         return __blk_mq_get_ctx(q, raw_smp_processor_id());
150 }
151 
152 struct blk_mq_alloc_data {
153         /* input parameter */
154         struct request_queue *q;
155         blk_mq_req_flags_t flags;
156         unsigned int shallow_depth;
157         unsigned int cmd_flags;
158         req_flags_t rq_flags;
159 
160         /* allocate multiple requests/tags in one go */
161         unsigned int nr_tags;
162         struct request **cached_rq;
163 
164         /* input & output parameter */
165         struct blk_mq_ctx *ctx;
166         struct blk_mq_hw_ctx *hctx;
167 };
168 
169 static inline bool blk_mq_is_shared_tags(unsigned int flags)
170 {
171         return flags & BLK_MQ_F_TAG_HCTX_SHARED;
172 }
173 
174 static inline struct blk_mq_tags *blk_mq_tags_from_data(struct blk_mq_alloc_data *data)
175 {
176         if (!(data->rq_flags & RQF_ELV))
177                 return data->hctx->tags;
178         return data->hctx->sched_tags;
179 }
180 
181 static inline bool blk_mq_hctx_stopped(struct blk_mq_hw_ctx *hctx)
182 {
183         return test_bit(BLK_MQ_S_STOPPED, &hctx->state);
184 }
185 
186 static inline bool blk_mq_hw_queue_mapped(struct blk_mq_hw_ctx *hctx)
187 {
188         return hctx->nr_ctx && hctx->tags;
189 }
190 
191 unsigned int blk_mq_in_flight(struct request_queue *q,
192                 struct block_device *part);
193 void blk_mq_in_flight_rw(struct request_queue *q, struct block_device *part,
194                 unsigned int inflight[2]);
195 
196 static inline void blk_mq_put_dispatch_budget(struct request_queue *q,
197                                               int budget_token)
198 {
199         if (q->mq_ops->put_budget)
200                 q->mq_ops->put_budget(q, budget_token);
201 }
202 
203 static inline int blk_mq_get_dispatch_budget(struct request_queue *q)
204 {
205         if (q->mq_ops->get_budget)
206                 return q->mq_ops->get_budget(q);
207         return 0;
208 }
209 
210 static inline void blk_mq_set_rq_budget_token(struct request *rq, int token)
211 {
212         if (token < 0)
213                 return;
214 
215         if (rq->q->mq_ops->set_rq_budget_token)
216                 rq->q->mq_ops->set_rq_budget_token(rq, token);
217 }
218 
219 static inline int blk_mq_get_rq_budget_token(struct request *rq)
220 {
221         if (rq->q->mq_ops->get_rq_budget_token)
222                 return rq->q->mq_ops->get_rq_budget_token(rq);
223         return -1;
224 }
225 
226 static inline void __blk_mq_inc_active_requests(struct blk_mq_hw_ctx *hctx)
227 {
228         if (blk_mq_is_shared_tags(hctx->flags))
229                 atomic_inc(&hctx->queue->nr_active_requests_shared_tags);
230         else
231                 atomic_inc(&hctx->nr_active);
232 }
233 
234 static inline void __blk_mq_sub_active_requests(struct blk_mq_hw_ctx *hctx,
235                 int val)
236 {
237         if (blk_mq_is_shared_tags(hctx->flags))
238                 atomic_sub(val, &hctx->queue->nr_active_requests_shared_tags);
239         else
240                 atomic_sub(val, &hctx->nr_active);
241 }
242 
243 static inline void __blk_mq_dec_active_requests(struct blk_mq_hw_ctx *hctx)
244 {
245         __blk_mq_sub_active_requests(hctx, 1);
246 }
247 
248 static inline int __blk_mq_active_requests(struct blk_mq_hw_ctx *hctx)
249 {
250         if (blk_mq_is_shared_tags(hctx->flags))
251                 return atomic_read(&hctx->queue->nr_active_requests_shared_tags);
252         return atomic_read(&hctx->nr_active);
253 }
254 static inline void __blk_mq_put_driver_tag(struct blk_mq_hw_ctx *hctx,
255                                            struct request *rq)
256 {
257         blk_mq_put_tag(hctx->tags, rq->mq_ctx, rq->tag);
258         rq->tag = BLK_MQ_NO_TAG;
259 
260         if (rq->rq_flags & RQF_MQ_INFLIGHT) {
261                 rq->rq_flags &= ~RQF_MQ_INFLIGHT;
262                 __blk_mq_dec_active_requests(hctx);
263         }
264 }
265 
266 static inline void blk_mq_put_driver_tag(struct request *rq)
267 {
268         if (rq->tag == BLK_MQ_NO_TAG || rq->internal_tag == BLK_MQ_NO_TAG)
269                 return;
270 
271         __blk_mq_put_driver_tag(rq->mq_hctx, rq);
272 }
273 
274 bool __blk_mq_get_driver_tag(struct blk_mq_hw_ctx *hctx, struct request *rq);
275 
276 static inline bool blk_mq_get_driver_tag(struct request *rq)
277 {
278         struct blk_mq_hw_ctx *hctx = rq->mq_hctx;
279 
280         if (rq->tag != BLK_MQ_NO_TAG &&
281             !(hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED)) {
282                 hctx->tags->rqs[rq->tag] = rq;
283                 return true;
284         }
285 
286         return __blk_mq_get_driver_tag(hctx, rq);
287 }
288 
289 static inline void blk_mq_clear_mq_map(struct blk_mq_queue_map *qmap)
290 {
291         int cpu;
292 
293         for_each_possible_cpu(cpu)
294                 qmap->mq_map[cpu] = 0;
295 }
296 
297 /*
298  * blk_mq_plug() - Get caller context plug
299  * @q: request queue
300  * @bio : the bio being submitted by the caller context
301  *
302  * Plugging, by design, may delay the insertion of BIOs into the elevator in
303  * order to increase BIO merging opportunities. This however can cause BIO
304  * insertion order to change from the order in which submit_bio() is being
305  * executed in the case of multiple contexts concurrently issuing BIOs to a
306  * device, even if these context are synchronized to tightly control BIO issuing
307  * order. While this is not a problem with regular block devices, this ordering
308  * change can cause write BIO failures with zoned block devices as these
309  * require sequential write patterns to zones. Prevent this from happening by
310  * ignoring the plug state of a BIO issuing context if the target request queue
311  * is for a zoned block device and the BIO to plug is a write operation.
312  *
313  * Return current->plug if the bio can be plugged and NULL otherwise
314  */
315 static inline struct blk_plug *blk_mq_plug(struct request_queue *q,
316                                            struct bio *bio)
317 {
318         /*
319          * For regular block devices or read operations, use the context plug
320          * which may be NULL if blk_start_plug() was not executed.
321          */
322         if (!blk_queue_is_zoned(q) || !op_is_write(bio_op(bio)))
323                 return current->plug;
324 
325         /* Zoned block device write operation case: do not plug the BIO */
326         return NULL;
327 }
328 
329 /* Free all requests on the list */
330 static inline void blk_mq_free_requests(struct list_head *list)
331 {
332         while (!list_empty(list)) {
333                 struct request *rq = list_entry_rq(list->next);
334 
335                 list_del_init(&rq->queuelist);
336                 blk_mq_free_request(rq);
337         }
338 }
339 
340 /*
341  * For shared tag users, we track the number of currently active users
342  * and attempt to provide a fair share of the tag depth for each of them.
343  */
344 static inline bool hctx_may_queue(struct blk_mq_hw_ctx *hctx,
345                                   struct sbitmap_queue *bt)
346 {
347         unsigned int depth, users;
348 
349         if (!hctx || !(hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED))
350                 return true;
351 
352         /*
353          * Don't try dividing an ant
354          */
355         if (bt->sb.depth == 1)
356                 return true;
357 
358         if (blk_mq_is_shared_tags(hctx->flags)) {
359                 struct request_queue *q = hctx->queue;
360 
361                 if (!test_bit(QUEUE_FLAG_HCTX_ACTIVE, &q->queue_flags))
362                         return true;
363         } else {
364                 if (!test_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state))
365                         return true;
366         }
367 
368         users = atomic_read(&hctx->tags->active_queues);
369 
370         if (!users)
371                 return true;
372 
373         /*
374          * Allow at least some tags
375          */
376         depth = max((bt->sb.depth + users - 1) / users, 4U);
377         return __blk_mq_active_requests(hctx) < depth;
378 }
379 
380 
381 #endif
382 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | Wiki (Japanese) | Wiki (English) | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

osdn.jp