1 #ifndef INT_BLK_MQ_TAG_H 2 #define INT_BLK_MQ_TAG_H 3 4 #include "blk-mq.h" 5 6 /* 7 * Tag address space map. 8 */ 9 struct blk_mq_tags { 10 unsigned int nr_tags; 11 unsigned int nr_reserved_tags; 12 13 atomic_t active_queues; 14 15 struct sbitmap_queue bitmap_tags; 16 struct sbitmap_queue breserved_tags; 17 18 struct request **rqs; 19 struct request **static_rqs; 20 struct list_head page_list; 21 }; 22 23 24 extern struct blk_mq_tags *blk_mq_init_tags(unsigned int nr_tags, unsigned int reserved_tags, int node, int alloc_policy); 25 extern void blk_mq_free_tags(struct blk_mq_tags *tags); 26 27 extern unsigned int blk_mq_get_tag(struct blk_mq_alloc_data *data); 28 extern void blk_mq_put_tag(struct blk_mq_hw_ctx *hctx, struct blk_mq_tags *tags, 29 struct blk_mq_ctx *ctx, unsigned int tag); 30 extern bool blk_mq_has_free_tags(struct blk_mq_tags *tags); 31 extern int blk_mq_tag_update_depth(struct blk_mq_hw_ctx *hctx, 32 struct blk_mq_tags **tags, 33 unsigned int depth, bool can_grow); 34 extern void blk_mq_tag_wakeup_all(struct blk_mq_tags *tags, bool); 35 void blk_mq_queue_tag_busy_iter(struct request_queue *q, busy_iter_fn *fn, 36 void *priv); 37 38 static inline struct sbq_wait_state *bt_wait_ptr(struct sbitmap_queue *bt, 39 struct blk_mq_hw_ctx *hctx) 40 { 41 if (!hctx) 42 return &bt->ws[0]; 43 return sbq_wait_ptr(bt, &hctx->wait_index); 44 } 45 46 enum { 47 BLK_MQ_TAG_CACHE_MIN = 1, 48 BLK_MQ_TAG_CACHE_MAX = 64, 49 }; 50 51 enum { 52 BLK_MQ_TAG_FAIL = -1U, 53 BLK_MQ_TAG_MIN = BLK_MQ_TAG_CACHE_MIN, 54 BLK_MQ_TAG_MAX = BLK_MQ_TAG_FAIL - 1, 55 }; 56 57 extern bool __blk_mq_tag_busy(struct blk_mq_hw_ctx *); 58 extern void __blk_mq_tag_idle(struct blk_mq_hw_ctx *); 59 60 static inline bool blk_mq_tag_busy(struct blk_mq_hw_ctx *hctx) 61 { 62 if (!(hctx->flags & BLK_MQ_F_TAG_SHARED)) 63 return false; 64 65 return __blk_mq_tag_busy(hctx); 66 } 67 68 static inline void blk_mq_tag_idle(struct blk_mq_hw_ctx *hctx) 69 { 70 if (!(hctx->flags & BLK_MQ_F_TAG_SHARED)) 71 return; 72 73 __blk_mq_tag_idle(hctx); 74 } 75 76 /* 77 * This helper should only be used for flush request to share tag 78 * with the request cloned from, and both the two requests can't be 79 * in flight at the same time. The caller has to make sure the tag 80 * can't be freed. 81 */ 82 static inline void blk_mq_tag_set_rq(struct blk_mq_hw_ctx *hctx, 83 unsigned int tag, struct request *rq) 84 { 85 hctx->tags->rqs[tag] = rq; 86 } 87 88 static inline bool blk_mq_tag_is_reserved(struct blk_mq_tags *tags, 89 unsigned int tag) 90 { 91 return tag < tags->nr_reserved_tags; 92 } 93 94 #endif 95
Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.