~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/include/linux/blk_types.h

Version: ~ [ linux-5.15-rc1 ] ~ [ linux-5.14.5 ] ~ [ linux-5.13.18 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.66 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.147 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.206 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.246 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.282 ] ~ [ linux-4.8.17 ] ~ [ linux-4.7.10 ] ~ [ linux-4.6.7 ] ~ [ linux-4.5.7 ] ~ [ linux-4.4.283 ] ~ [ linux-4.3.6 ] ~ [ linux-4.2.8 ] ~ [ linux-4.1.52 ] ~ [ linux-4.0.9 ] ~ [ linux-3.18.140 ] ~ [ linux-3.16.85 ] ~ [ linux-3.14.79 ] ~ [ linux-3.12.74 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.5 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

Diff markup

Differences between /include/linux/blk_types.h (Version linux-4.5.7) and /include/linux/blk_types.h (Version linux-5.0.21)


                                                   >>   1 /* SPDX-License-Identifier: GPL-2.0 */
  1 /*                                                  2 /*
  2  * Block data types and constants.  Directly i      3  * Block data types and constants.  Directly include this file only to
  3  * break include dependency loop.                   4  * break include dependency loop.
  4  */                                                 5  */
  5 #ifndef __LINUX_BLK_TYPES_H                         6 #ifndef __LINUX_BLK_TYPES_H
  6 #define __LINUX_BLK_TYPES_H                         7 #define __LINUX_BLK_TYPES_H
  7                                                     8 
  8 #include <linux/types.h>                            9 #include <linux/types.h>
                                                   >>  10 #include <linux/bvec.h>
                                                   >>  11 #include <linux/ktime.h>
  9                                                    12 
 10 struct bio_set;                                    13 struct bio_set;
 11 struct bio;                                        14 struct bio;
 12 struct bio_integrity_payload;                      15 struct bio_integrity_payload;
 13 struct page;                                       16 struct page;
 14 struct block_device;                               17 struct block_device;
 15 struct io_context;                                 18 struct io_context;
 16 struct cgroup_subsys_state;                        19 struct cgroup_subsys_state;
 17 typedef void (bio_end_io_t) (struct bio *);        20 typedef void (bio_end_io_t) (struct bio *);
 18 typedef void (bio_destructor_t) (struct bio *) << 
 19                                                    21 
 20 /*                                                 22 /*
 21  * was unsigned short, but we might as well be !!  23  * Block error status values.  See block/blk-core:blk_errors for the details.
                                                   >>  24  * Alpha cannot write a byte atomically, so we need to use 32-bit value.
 22  */                                                25  */
 23 struct bio_vec {                               !!  26 #if defined(CONFIG_ALPHA) && !defined(__alpha_bwx__)
 24         struct page     *bv_page;              !!  27 typedef u32 __bitwise blk_status_t;
 25         unsigned int    bv_len;                !!  28 #else
 26         unsigned int    bv_offset;             !!  29 typedef u8 __bitwise blk_status_t;
 27 };                                             !!  30 #endif
                                                   >>  31 #define BLK_STS_OK 0
                                                   >>  32 #define BLK_STS_NOTSUPP         ((__force blk_status_t)1)
                                                   >>  33 #define BLK_STS_TIMEOUT         ((__force blk_status_t)2)
                                                   >>  34 #define BLK_STS_NOSPC           ((__force blk_status_t)3)
                                                   >>  35 #define BLK_STS_TRANSPORT       ((__force blk_status_t)4)
                                                   >>  36 #define BLK_STS_TARGET          ((__force blk_status_t)5)
                                                   >>  37 #define BLK_STS_NEXUS           ((__force blk_status_t)6)
                                                   >>  38 #define BLK_STS_MEDIUM          ((__force blk_status_t)7)
                                                   >>  39 #define BLK_STS_PROTECTION      ((__force blk_status_t)8)
                                                   >>  40 #define BLK_STS_RESOURCE        ((__force blk_status_t)9)
                                                   >>  41 #define BLK_STS_IOERR           ((__force blk_status_t)10)
                                                   >>  42 
                                                   >>  43 /* hack for device mapper, don't use elsewhere: */
                                                   >>  44 #define BLK_STS_DM_REQUEUE    ((__force blk_status_t)11)
 28                                                    45 
 29 #ifdef CONFIG_BLOCK                            !!  46 #define BLK_STS_AGAIN           ((__force blk_status_t)12)
                                                   >>  47 
                                                   >>  48 /*
                                                   >>  49  * BLK_STS_DEV_RESOURCE is returned from the driver to the block layer if
                                                   >>  50  * device related resources are unavailable, but the driver can guarantee
                                                   >>  51  * that the queue will be rerun in the future once resources become
                                                   >>  52  * available again. This is typically the case for device specific
                                                   >>  53  * resources that are consumed for IO. If the driver fails allocating these
                                                   >>  54  * resources, we know that inflight (or pending) IO will free these
                                                   >>  55  * resource upon completion.
                                                   >>  56  *
                                                   >>  57  * This is different from BLK_STS_RESOURCE in that it explicitly references
                                                   >>  58  * a device specific resource. For resources of wider scope, allocation
                                                   >>  59  * failure can happen without having pending IO. This means that we can't
                                                   >>  60  * rely on request completions freeing these resources, as IO may not be in
                                                   >>  61  * flight. Examples of that are kernel memory allocations, DMA mappings, or
                                                   >>  62  * any other system wide resources.
                                                   >>  63  */
                                                   >>  64 #define BLK_STS_DEV_RESOURCE    ((__force blk_status_t)13)
                                                   >>  65 
                                                   >>  66 /**
                                                   >>  67  * blk_path_error - returns true if error may be path related
                                                   >>  68  * @error: status the request was completed with
                                                   >>  69  *
                                                   >>  70  * Description:
                                                   >>  71  *     This classifies block error status into non-retryable errors and ones
                                                   >>  72  *     that may be successful if retried on a failover path.
                                                   >>  73  *
                                                   >>  74  * Return:
                                                   >>  75  *     %false - retrying failover path will not help
                                                   >>  76  *     %true  - may succeed if retried
                                                   >>  77  */
                                                   >>  78 static inline bool blk_path_error(blk_status_t error)
                                                   >>  79 {
                                                   >>  80         switch (error) {
                                                   >>  81         case BLK_STS_NOTSUPP:
                                                   >>  82         case BLK_STS_NOSPC:
                                                   >>  83         case BLK_STS_TARGET:
                                                   >>  84         case BLK_STS_NEXUS:
                                                   >>  85         case BLK_STS_MEDIUM:
                                                   >>  86         case BLK_STS_PROTECTION:
                                                   >>  87                 return false;
                                                   >>  88         }
                                                   >>  89 
                                                   >>  90         /* Anything else could be a path failure, so should be retried */
                                                   >>  91         return true;
                                                   >>  92 }
 30                                                    93 
 31 struct bvec_iter {                             !!  94 /*
 32         sector_t                bi_sector;     !!  95  * From most significant bit:
 33                                                !!  96  * 1 bit: reserved for other usage, see below
 34         unsigned int            bi_size;       !!  97  * 12 bits: original size of bio
                                                   >>  98  * 51 bits: issue time of bio
                                                   >>  99  */
                                                   >> 100 #define BIO_ISSUE_RES_BITS      1
                                                   >> 101 #define BIO_ISSUE_SIZE_BITS     12
                                                   >> 102 #define BIO_ISSUE_RES_SHIFT     (64 - BIO_ISSUE_RES_BITS)
                                                   >> 103 #define BIO_ISSUE_SIZE_SHIFT    (BIO_ISSUE_RES_SHIFT - BIO_ISSUE_SIZE_BITS)
                                                   >> 104 #define BIO_ISSUE_TIME_MASK     ((1ULL << BIO_ISSUE_SIZE_SHIFT) - 1)
                                                   >> 105 #define BIO_ISSUE_SIZE_MASK     \
                                                   >> 106         (((1ULL << BIO_ISSUE_SIZE_BITS) - 1) << BIO_ISSUE_SIZE_SHIFT)
                                                   >> 107 #define BIO_ISSUE_RES_MASK      (~((1ULL << BIO_ISSUE_RES_SHIFT) - 1))
 35                                                   108 
 36         unsigned int            bi_idx;        !! 109 /* Reserved bit for blk-throtl */
                                                   >> 110 #define BIO_ISSUE_THROTL_SKIP_LATENCY (1ULL << 63)
 37                                                   111 
 38         unsigned int            bi_bvec_done;  !! 112 struct bio_issue {
 39                                                !! 113         u64 value;
 40 };                                                114 };
 41                                                   115 
                                                   >> 116 static inline u64 __bio_issue_time(u64 time)
                                                   >> 117 {
                                                   >> 118         return time & BIO_ISSUE_TIME_MASK;
                                                   >> 119 }
                                                   >> 120 
                                                   >> 121 static inline u64 bio_issue_time(struct bio_issue *issue)
                                                   >> 122 {
                                                   >> 123         return __bio_issue_time(issue->value);
                                                   >> 124 }
                                                   >> 125 
                                                   >> 126 static inline sector_t bio_issue_size(struct bio_issue *issue)
                                                   >> 127 {
                                                   >> 128         return ((issue->value & BIO_ISSUE_SIZE_MASK) >> BIO_ISSUE_SIZE_SHIFT);
                                                   >> 129 }
                                                   >> 130 
                                                   >> 131 static inline void bio_issue_init(struct bio_issue *issue,
                                                   >> 132                                        sector_t size)
                                                   >> 133 {
                                                   >> 134         size &= (1ULL << BIO_ISSUE_SIZE_BITS) - 1;
                                                   >> 135         issue->value = ((issue->value & BIO_ISSUE_RES_MASK) |
                                                   >> 136                         (ktime_get_ns() & BIO_ISSUE_TIME_MASK) |
                                                   >> 137                         ((u64)size << BIO_ISSUE_SIZE_SHIFT));
                                                   >> 138 }
                                                   >> 139 
 42 /*                                                140 /*
 43  * main unit of I/O for the block layer and lo    141  * main unit of I/O for the block layer and lower layers (ie drivers and
 44  * stacking drivers)                              142  * stacking drivers)
 45  */                                               143  */
 46 struct bio {                                      144 struct bio {
 47         struct bio              *bi_next;         145         struct bio              *bi_next;       /* request queue link */
 48         struct block_device     *bi_bdev;      !! 146         struct gendisk          *bi_disk;
 49         unsigned int            bi_flags;      !! 147         unsigned int            bi_opf;         /* bottom bits req flags,
 50         int                     bi_error;      !! 148                                                  * top bits REQ_OP. Use
 51         unsigned long           bi_rw;         !! 149                                                  * accessors.
 52                                                << 
 53                                                   150                                                  */
 54                                                !! 151         unsigned short          bi_flags;       /* status, etc and bvec pool number */
 55         struct bvec_iter        bi_iter;       !! 152         unsigned short          bi_ioprio;
                                                   >> 153         unsigned short          bi_write_hint;
                                                   >> 154         blk_status_t            bi_status;
                                                   >> 155         u8                      bi_partno;
 56                                                   156 
 57         /* Number of segments in this BIO afte    157         /* Number of segments in this BIO after
 58          * physical address coalescing is perf    158          * physical address coalescing is performed.
 59          */                                       159          */
 60         unsigned int            bi_phys_segmen    160         unsigned int            bi_phys_segments;
 61                                                   161 
 62         /*                                        162         /*
 63          * To keep track of the max segment si    163          * To keep track of the max segment size, we account for the
 64          * sizes of the first and last mergeab    164          * sizes of the first and last mergeable segments in this bio.
 65          */                                       165          */
 66         unsigned int            bi_seg_front_s    166         unsigned int            bi_seg_front_size;
 67         unsigned int            bi_seg_back_si    167         unsigned int            bi_seg_back_size;
 68                                                   168 
 69         atomic_t                __bi_remaining !! 169         struct bvec_iter        bi_iter;
 70                                                   170 
                                                   >> 171         atomic_t                __bi_remaining;
 71         bio_end_io_t            *bi_end_io;       172         bio_end_io_t            *bi_end_io;
 72                                                   173 
 73         void                    *bi_private;      174         void                    *bi_private;
 74 #ifdef CONFIG_BLK_CGROUP                          175 #ifdef CONFIG_BLK_CGROUP
 75         /*                                        176         /*
 76          * Optional ioc and css associated wit !! 177          * Represents the association of the css and request_queue for the bio.
 77          * release.  Read comment on top of bi !! 178          * If a bio goes direct to device, it will not have a blkg as it will
                                                   >> 179          * not have a request_queue associated with it.  The reference is put
                                                   >> 180          * on release of the bio.
 78          */                                       181          */
 79         struct io_context       *bi_ioc;       !! 182         struct blkcg_gq         *bi_blkg;
 80         struct cgroup_subsys_state *bi_css;    !! 183         struct bio_issue        bi_issue;
 81 #endif                                            184 #endif
 82         union {                                   185         union {
 83 #if defined(CONFIG_BLK_DEV_INTEGRITY)             186 #if defined(CONFIG_BLK_DEV_INTEGRITY)
 84                 struct bio_integrity_payload *    187                 struct bio_integrity_payload *bi_integrity; /* data integrity */
 85 #endif                                            188 #endif
 86         };                                        189         };
 87                                                   190 
 88         unsigned short          bi_vcnt;          191         unsigned short          bi_vcnt;        /* how many bio_vec's */
 89                                                   192 
 90         /*                                        193         /*
 91          * Everything starting with bi_max_vec    194          * Everything starting with bi_max_vecs will be preserved by bio_reset()
 92          */                                       195          */
 93                                                   196 
 94         unsigned short          bi_max_vecs;      197         unsigned short          bi_max_vecs;    /* max bvl_vecs we can hold */
 95                                                   198 
 96         atomic_t                __bi_cnt;         199         atomic_t                __bi_cnt;       /* pin count */
 97                                                   200 
 98         struct bio_vec          *bi_io_vec;       201         struct bio_vec          *bi_io_vec;     /* the actual vec list */
 99                                                   202 
100         struct bio_set          *bi_pool;         203         struct bio_set          *bi_pool;
101                                                   204 
102         /*                                        205         /*
103          * We can inline a number of vecs at t    206          * We can inline a number of vecs at the end of the bio, to avoid
104          * double allocations for a small numb    207          * double allocations for a small number of bio_vecs. This member
105          * MUST obviously be kept at the very     208          * MUST obviously be kept at the very end of the bio.
106          */                                       209          */
107         struct bio_vec          bi_inline_vecs    210         struct bio_vec          bi_inline_vecs[0];
108 };                                                211 };
109                                                   212 
110 #define BIO_RESET_BYTES         offsetof(struc    213 #define BIO_RESET_BYTES         offsetof(struct bio, bi_max_vecs)
111                                                   214 
112 /*                                                215 /*
113  * bio flags                                      216  * bio flags
114  */                                               217  */
115 #define BIO_SEG_VALID   1       /* bi_phys_seg    218 #define BIO_SEG_VALID   1       /* bi_phys_segments valid */
116 #define BIO_CLONED      2       /* doesn't own    219 #define BIO_CLONED      2       /* doesn't own data */
117 #define BIO_BOUNCED     3       /* bio is a bo    220 #define BIO_BOUNCED     3       /* bio is a bounce bio */
118 #define BIO_USER_MAPPED 4       /* contains us    221 #define BIO_USER_MAPPED 4       /* contains user pages */
119 #define BIO_NULL_MAPPED 5       /* contains in    222 #define BIO_NULL_MAPPED 5       /* contains invalid user pages */
120 #define BIO_QUIET       6       /* Make BIO Qu    223 #define BIO_QUIET       6       /* Make BIO Quiet */
121 #define BIO_CHAIN       7       /* chained bio    224 #define BIO_CHAIN       7       /* chained bio, ->bi_remaining in effect */
122 #define BIO_REFFED      8       /* bio has ele    225 #define BIO_REFFED      8       /* bio has elevated ->bi_cnt */
                                                   >> 226 #define BIO_THROTTLED   9       /* This bio has already been subjected to
                                                   >> 227                                  * throttling rules. Don't do it again. */
                                                   >> 228 #define BIO_TRACE_COMPLETION 10 /* bio_endio() should trace the final completion
                                                   >> 229                                  * of this bio. */
                                                   >> 230 #define BIO_QUEUE_ENTERED 11    /* can use blk_queue_enter_live() */
                                                   >> 231 #define BIO_TRACKED 12          /* set if bio goes through the rq_qos path */
                                                   >> 232 
                                                   >> 233 /* See BVEC_POOL_OFFSET below before adding new flags */
123                                                   234 
124 /*                                                235 /*
125  * Flags starting here get preserved by bio_re !! 236  * We support 6 different bvec pools, the last one is magic in that it
126  * BIO_POOL_IDX()                              !! 237  * is backed by a mempool.
127  */                                               238  */
128 #define BIO_RESET_BITS  13                     !! 239 #define BVEC_POOL_NR            6
129 #define BIO_OWNS_VEC    13      /* bio_free()  !! 240 #define BVEC_POOL_MAX           (BVEC_POOL_NR - 1)
130                                                   241 
131 /*                                                242 /*
132  * top 4 bits of bio flags indicate the pool t !! 243  * Top 3 bits of bio flags indicate the pool the bvecs came from.  We add
                                                   >> 244  * 1 to the actual index so that 0 indicates that there are no bvecs to be
                                                   >> 245  * freed.
133  */                                               246  */
134 #define BIO_POOL_BITS           (4)            !! 247 #define BVEC_POOL_BITS          (3)
135 #define BIO_POOL_NONE           ((1UL << BIO_P !! 248 #define BVEC_POOL_OFFSET        (16 - BVEC_POOL_BITS)
136 #define BIO_POOL_OFFSET         (32 - BIO_POOL !! 249 #define BVEC_POOL_IDX(bio)      ((bio)->bi_flags >> BVEC_POOL_OFFSET)
137 #define BIO_POOL_MASK           (1UL << BIO_PO !! 250 #if (1<< BVEC_POOL_BITS) < (BVEC_POOL_NR+1)
138 #define BIO_POOL_IDX(bio)       ((bio)->bi_fla !! 251 # error "BVEC_POOL_BITS is too small"
                                                   >> 252 #endif
                                                   >> 253 
                                                   >> 254 /*
                                                   >> 255  * Flags starting here get preserved by bio_reset() - this includes
                                                   >> 256  * only BVEC_POOL_IDX()
                                                   >> 257  */
                                                   >> 258 #define BIO_RESET_BITS  BVEC_POOL_OFFSET
139                                                   259 
140 #endif /* CONFIG_BLOCK */                      !! 260 typedef __u32 __bitwise blk_mq_req_flags_t;
141                                                   261 
142 /*                                                262 /*
143  * Request flags.  For use in the cmd_flags fi !! 263  * Operations and flags common to the bio and request structures.
144  * bi_rw of struct bio.  Note that some flags  !! 264  * We use 8 bits for encoding the operation, and the remaining 24 for flags.
                                                   >> 265  *
                                                   >> 266  * The least significant bit of the operation number indicates the data
                                                   >> 267  * transfer direction:
                                                   >> 268  *
                                                   >> 269  *   - if the least significant bit is set transfers are TO the device
                                                   >> 270  *   - if the least significant bit is not set transfers are FROM the device
                                                   >> 271  *
                                                   >> 272  * If a operation does not transfer data the least significant bit has no
                                                   >> 273  * meaning.
145  */                                               274  */
146 enum rq_flag_bits {                            !! 275 #define REQ_OP_BITS     8
147         /* common flags */                     !! 276 #define REQ_OP_MASK     ((1 << REQ_OP_BITS) - 1)
148         __REQ_WRITE,            /* not set, re !! 277 #define REQ_FLAG_BITS   24
149         __REQ_FAILFAST_DEV,     /* no driver r !! 278 
                                                   >> 279 enum req_opf {
                                                   >> 280         /* read sectors from the device */
                                                   >> 281         REQ_OP_READ             = 0,
                                                   >> 282         /* write sectors to the device */
                                                   >> 283         REQ_OP_WRITE            = 1,
                                                   >> 284         /* flush the volatile write cache */
                                                   >> 285         REQ_OP_FLUSH            = 2,
                                                   >> 286         /* discard sectors */
                                                   >> 287         REQ_OP_DISCARD          = 3,
                                                   >> 288         /* securely erase sectors */
                                                   >> 289         REQ_OP_SECURE_ERASE     = 5,
                                                   >> 290         /* reset a zone write pointer */
                                                   >> 291         REQ_OP_ZONE_RESET       = 6,
                                                   >> 292         /* write the same sector many times */
                                                   >> 293         REQ_OP_WRITE_SAME       = 7,
                                                   >> 294         /* write the zero filled sector many times */
                                                   >> 295         REQ_OP_WRITE_ZEROES     = 9,
                                                   >> 296 
                                                   >> 297         /* SCSI passthrough using struct scsi_request */
                                                   >> 298         REQ_OP_SCSI_IN          = 32,
                                                   >> 299         REQ_OP_SCSI_OUT         = 33,
                                                   >> 300         /* Driver private requests */
                                                   >> 301         REQ_OP_DRV_IN           = 34,
                                                   >> 302         REQ_OP_DRV_OUT          = 35,
                                                   >> 303 
                                                   >> 304         REQ_OP_LAST,
                                                   >> 305 };
                                                   >> 306 
                                                   >> 307 enum req_flag_bits {
                                                   >> 308         __REQ_FAILFAST_DEV =    /* no driver retries of device errors */
                                                   >> 309                 REQ_OP_BITS,
150         __REQ_FAILFAST_TRANSPORT, /* no driver    310         __REQ_FAILFAST_TRANSPORT, /* no driver retries of transport errors */
151         __REQ_FAILFAST_DRIVER,  /* no driver r    311         __REQ_FAILFAST_DRIVER,  /* no driver retries of driver errors */
152                                                << 
153         __REQ_SYNC,             /* request is     312         __REQ_SYNC,             /* request is sync (sync write or read) */
154         __REQ_META,             /* metadata io    313         __REQ_META,             /* metadata io request */
155         __REQ_PRIO,             /* boost prior    314         __REQ_PRIO,             /* boost priority in cfq */
156         __REQ_DISCARD,          /* request to  !! 315         __REQ_NOMERGE,          /* don't touch this for merging */
157         __REQ_SECURE,           /* secure disc !! 316         __REQ_IDLE,             /* anticipate more IO after this one */
158         __REQ_WRITE_SAME,       /* write same  << 
159                                                << 
160         __REQ_NOIDLE,           /* don't antic << 
161         __REQ_INTEGRITY,        /* I/O include    317         __REQ_INTEGRITY,        /* I/O includes block integrity payload */
162         __REQ_FUA,              /* forced unit    318         __REQ_FUA,              /* forced unit access */
163         __REQ_FLUSH,            /* request for !! 319         __REQ_PREFLUSH,         /* request for cache flush */
164                                                << 
165         /* bio only flags */                   << 
166         __REQ_RAHEAD,           /* read ahead,    320         __REQ_RAHEAD,           /* read ahead, can fail anytime */
167         __REQ_THROTTLED,        /* This bio ha !! 321         __REQ_BACKGROUND,       /* background IO */
168                                  * throttling  !! 322         __REQ_NOWAIT,           /* Don't wait if request will block */
169                                                   323 
170         /* request only flags */               !! 324         /* command specific flags for REQ_OP_WRITE_ZEROES: */
171         __REQ_SORTED,           /* elevator kn !! 325         __REQ_NOUNMAP,          /* do not free blocks when zeroing */
172         __REQ_SOFTBARRIER,      /* may not be  !! 326 
173         __REQ_NOMERGE,          /* don't touch !! 327         __REQ_HIPRI,
174         __REQ_STARTED,          /* drive alrea !! 328 
175         __REQ_DONTPREP,         /* don't call  !! 329         /* for driver use */
176         __REQ_QUEUED,           /* uses queuei !! 330         __REQ_DRV,
177         __REQ_ELVPRIV,          /* elevator pr !! 331         __REQ_SWAP,             /* swapping request. */
178         __REQ_FAILED,           /* set if the  << 
179         __REQ_QUIET,            /* don't worry << 
180         __REQ_PREEMPT,          /* set for "id << 
181                                    for request << 
182                                    state must  << 
183         __REQ_ALLOCED,          /* request cam << 
184         __REQ_COPY_USER,        /* contains co << 
185         __REQ_FLUSH_SEQ,        /* request for << 
186         __REQ_IO_STAT,          /* account I/O << 
187         __REQ_MIXED_MERGE,      /* merge of di << 
188         __REQ_PM,               /* runtime pm  << 
189         __REQ_HASHED,           /* on IO sched << 
190         __REQ_MQ_INFLIGHT,      /* track infli << 
191         __REQ_NR_BITS,          /* stops here     332         __REQ_NR_BITS,          /* stops here */
192 };                                                333 };
193                                                   334 
194 #define REQ_WRITE               (1ULL << __REQ << 
195 #define REQ_FAILFAST_DEV        (1ULL << __REQ    335 #define REQ_FAILFAST_DEV        (1ULL << __REQ_FAILFAST_DEV)
196 #define REQ_FAILFAST_TRANSPORT  (1ULL << __REQ    336 #define REQ_FAILFAST_TRANSPORT  (1ULL << __REQ_FAILFAST_TRANSPORT)
197 #define REQ_FAILFAST_DRIVER     (1ULL << __REQ    337 #define REQ_FAILFAST_DRIVER     (1ULL << __REQ_FAILFAST_DRIVER)
198 #define REQ_SYNC                (1ULL << __REQ    338 #define REQ_SYNC                (1ULL << __REQ_SYNC)
199 #define REQ_META                (1ULL << __REQ    339 #define REQ_META                (1ULL << __REQ_META)
200 #define REQ_PRIO                (1ULL << __REQ    340 #define REQ_PRIO                (1ULL << __REQ_PRIO)
201 #define REQ_DISCARD             (1ULL << __REQ !! 341 #define REQ_NOMERGE             (1ULL << __REQ_NOMERGE)
202 #define REQ_WRITE_SAME          (1ULL << __REQ !! 342 #define REQ_IDLE                (1ULL << __REQ_IDLE)
203 #define REQ_NOIDLE              (1ULL << __REQ << 
204 #define REQ_INTEGRITY           (1ULL << __REQ    343 #define REQ_INTEGRITY           (1ULL << __REQ_INTEGRITY)
                                                   >> 344 #define REQ_FUA                 (1ULL << __REQ_FUA)
                                                   >> 345 #define REQ_PREFLUSH            (1ULL << __REQ_PREFLUSH)
                                                   >> 346 #define REQ_RAHEAD              (1ULL << __REQ_RAHEAD)
                                                   >> 347 #define REQ_BACKGROUND          (1ULL << __REQ_BACKGROUND)
                                                   >> 348 #define REQ_NOWAIT              (1ULL << __REQ_NOWAIT)
                                                   >> 349 #define REQ_NOUNMAP             (1ULL << __REQ_NOUNMAP)
                                                   >> 350 #define REQ_HIPRI               (1ULL << __REQ_HIPRI)
                                                   >> 351 
                                                   >> 352 #define REQ_DRV                 (1ULL << __REQ_DRV)
                                                   >> 353 #define REQ_SWAP                (1ULL << __REQ_SWAP)
205                                                   354 
206 #define REQ_FAILFAST_MASK \                       355 #define REQ_FAILFAST_MASK \
207         (REQ_FAILFAST_DEV | REQ_FAILFAST_TRANS    356         (REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT | REQ_FAILFAST_DRIVER)
208 #define REQ_COMMON_MASK \                      << 
209         (REQ_WRITE | REQ_FAILFAST_MASK | REQ_S << 
210          REQ_DISCARD | REQ_WRITE_SAME | REQ_NO << 
211          REQ_SECURE | REQ_INTEGRITY)           << 
212 #define REQ_CLONE_MASK          REQ_COMMON_MAS << 
213                                                   357 
214 #define BIO_NO_ADVANCE_ITER_MASK        (REQ_D << 
215                                                << 
216 /* This mask is used for both bio and request  << 
217 #define REQ_NOMERGE_FLAGS \                       358 #define REQ_NOMERGE_FLAGS \
218         (REQ_NOMERGE | REQ_STARTED | REQ_SOFTB !! 359         (REQ_NOMERGE | REQ_PREFLUSH | REQ_FUA)
219                                                   360 
220 #define REQ_RAHEAD              (1ULL << __REQ !! 361 enum stat_group {
221 #define REQ_THROTTLED           (1ULL << __REQ !! 362         STAT_READ,
                                                   >> 363         STAT_WRITE,
                                                   >> 364         STAT_DISCARD,
222                                                   365 
223 #define REQ_SORTED              (1ULL << __REQ !! 366         NR_STAT_GROUPS
224 #define REQ_SOFTBARRIER         (1ULL << __REQ !! 367 };
225 #define REQ_FUA                 (1ULL << __REQ << 
226 #define REQ_NOMERGE             (1ULL << __REQ << 
227 #define REQ_STARTED             (1ULL << __REQ << 
228 #define REQ_DONTPREP            (1ULL << __REQ << 
229 #define REQ_QUEUED              (1ULL << __REQ << 
230 #define REQ_ELVPRIV             (1ULL << __REQ << 
231 #define REQ_FAILED              (1ULL << __REQ << 
232 #define REQ_QUIET               (1ULL << __REQ << 
233 #define REQ_PREEMPT             (1ULL << __REQ << 
234 #define REQ_ALLOCED             (1ULL << __REQ << 
235 #define REQ_COPY_USER           (1ULL << __REQ << 
236 #define REQ_FLUSH               (1ULL << __REQ << 
237 #define REQ_FLUSH_SEQ           (1ULL << __REQ << 
238 #define REQ_IO_STAT             (1ULL << __REQ << 
239 #define REQ_MIXED_MERGE         (1ULL << __REQ << 
240 #define REQ_SECURE              (1ULL << __REQ << 
241 #define REQ_PM                  (1ULL << __REQ << 
242 #define REQ_HASHED              (1ULL << __REQ << 
243 #define REQ_MQ_INFLIGHT         (1ULL << __REQ << 
244                                                   368 
245 typedef unsigned int blk_qc_t;                 !! 369 #define bio_op(bio) \
246 #define BLK_QC_T_NONE   -1U                    !! 370         ((bio)->bi_opf & REQ_OP_MASK)
247 #define BLK_QC_T_SHIFT  16                     !! 371 #define req_op(req) \
                                                   >> 372         ((req)->cmd_flags & REQ_OP_MASK)
                                                   >> 373 
                                                   >> 374 /* obsolete, don't use in new code */
                                                   >> 375 static inline void bio_set_op_attrs(struct bio *bio, unsigned op,
                                                   >> 376                 unsigned op_flags)
                                                   >> 377 {
                                                   >> 378         bio->bi_opf = op | op_flags;
                                                   >> 379 }
248                                                   380 
249 static inline bool blk_qc_t_valid(blk_qc_t coo !! 381 static inline bool op_is_write(unsigned int op)
250 {                                                 382 {
251         return cookie != BLK_QC_T_NONE;        !! 383         return (op & 1);
                                                   >> 384 }
                                                   >> 385 
                                                   >> 386 /*
                                                   >> 387  * Check if the bio or request is one that needs special treatment in the
                                                   >> 388  * flush state machine.
                                                   >> 389  */
                                                   >> 390 static inline bool op_is_flush(unsigned int op)
                                                   >> 391 {
                                                   >> 392         return op & (REQ_FUA | REQ_PREFLUSH);
                                                   >> 393 }
                                                   >> 394 
                                                   >> 395 /*
                                                   >> 396  * Reads are always treated as synchronous, as are requests with the FUA or
                                                   >> 397  * PREFLUSH flag.  Other operations may be marked as synchronous using the
                                                   >> 398  * REQ_SYNC flag.
                                                   >> 399  */
                                                   >> 400 static inline bool op_is_sync(unsigned int op)
                                                   >> 401 {
                                                   >> 402         return (op & REQ_OP_MASK) == REQ_OP_READ ||
                                                   >> 403                 (op & (REQ_SYNC | REQ_FUA | REQ_PREFLUSH));
                                                   >> 404 }
                                                   >> 405 
                                                   >> 406 static inline bool op_is_discard(unsigned int op)
                                                   >> 407 {
                                                   >> 408         return (op & REQ_OP_MASK) == REQ_OP_DISCARD;
                                                   >> 409 }
                                                   >> 410 
                                                   >> 411 static inline int op_stat_group(unsigned int op)
                                                   >> 412 {
                                                   >> 413         if (op_is_discard(op))
                                                   >> 414                 return STAT_DISCARD;
                                                   >> 415         return op_is_write(op);
252 }                                                 416 }
253                                                   417 
254 static inline blk_qc_t blk_tag_to_qc_t(unsigne !! 418 typedef unsigned int blk_qc_t;
                                                   >> 419 #define BLK_QC_T_NONE           -1U
                                                   >> 420 #define BLK_QC_T_SHIFT          16
                                                   >> 421 #define BLK_QC_T_INTERNAL       (1U << 31)
                                                   >> 422 
                                                   >> 423 static inline bool blk_qc_t_valid(blk_qc_t cookie)
255 {                                                 424 {
256         return tag | (queue_num << BLK_QC_T_SH !! 425         return cookie != BLK_QC_T_NONE;
257 }                                                 426 }
258                                                   427 
259 static inline unsigned int blk_qc_t_to_queue_n    428 static inline unsigned int blk_qc_t_to_queue_num(blk_qc_t cookie)
260 {                                                 429 {
261         return cookie >> BLK_QC_T_SHIFT;       !! 430         return (cookie & ~BLK_QC_T_INTERNAL) >> BLK_QC_T_SHIFT;
262 }                                                 431 }
263                                                   432 
264 static inline unsigned int blk_qc_t_to_tag(blk    433 static inline unsigned int blk_qc_t_to_tag(blk_qc_t cookie)
265 {                                                 434 {
266         return cookie & ((1u << BLK_QC_T_SHIFT    435         return cookie & ((1u << BLK_QC_T_SHIFT) - 1);
267 }                                                 436 }
                                                   >> 437 
                                                   >> 438 static inline bool blk_qc_t_is_internal(blk_qc_t cookie)
                                                   >> 439 {
                                                   >> 440         return (cookie & BLK_QC_T_INTERNAL) != 0;
                                                   >> 441 }
                                                   >> 442 
                                                   >> 443 struct blk_rq_stat {
                                                   >> 444         u64 mean;
                                                   >> 445         u64 min;
                                                   >> 446         u64 max;
                                                   >> 447         u32 nr_samples;
                                                   >> 448         u64 batch;
                                                   >> 449 };
268                                                   450 
269 #endif /* __LINUX_BLK_TYPES_H */                  451 #endif /* __LINUX_BLK_TYPES_H */
270                                                   452 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | Wiki (Japanese) | Wiki (English) | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

osdn.jp