~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/include/linux/blk_types.h

Version: ~ [ linux-5.15-rc1 ] ~ [ linux-5.14.5 ] ~ [ linux-5.13.18 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.66 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.147 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.206 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.246 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.282 ] ~ [ linux-4.8.17 ] ~ [ linux-4.7.10 ] ~ [ linux-4.6.7 ] ~ [ linux-4.5.7 ] ~ [ linux-4.4.283 ] ~ [ linux-4.3.6 ] ~ [ linux-4.2.8 ] ~ [ linux-4.1.52 ] ~ [ linux-4.0.9 ] ~ [ linux-3.18.140 ] ~ [ linux-3.16.85 ] ~ [ linux-3.14.79 ] ~ [ linux-3.12.74 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.5 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

Diff markup

Differences between /include/linux/blk_types.h (Version linux-4.5.7) and /include/linux/blk_types.h (Version linux-4.16.18)


                                                   >>   1 /* SPDX-License-Identifier: GPL-2.0 */
  1 /*                                                  2 /*
  2  * Block data types and constants.  Directly i      3  * Block data types and constants.  Directly include this file only to
  3  * break include dependency loop.                   4  * break include dependency loop.
  4  */                                                 5  */
  5 #ifndef __LINUX_BLK_TYPES_H                         6 #ifndef __LINUX_BLK_TYPES_H
  6 #define __LINUX_BLK_TYPES_H                         7 #define __LINUX_BLK_TYPES_H
  7                                                     8 
  8 #include <linux/types.h>                            9 #include <linux/types.h>
                                                   >>  10 #include <linux/bvec.h>
  9                                                    11 
 10 struct bio_set;                                    12 struct bio_set;
 11 struct bio;                                        13 struct bio;
 12 struct bio_integrity_payload;                      14 struct bio_integrity_payload;
 13 struct page;                                       15 struct page;
 14 struct block_device;                               16 struct block_device;
 15 struct io_context;                                 17 struct io_context;
 16 struct cgroup_subsys_state;                        18 struct cgroup_subsys_state;
 17 typedef void (bio_end_io_t) (struct bio *);        19 typedef void (bio_end_io_t) (struct bio *);
 18 typedef void (bio_destructor_t) (struct bio *) << 
 19                                                    20 
 20 /*                                                 21 /*
 21  * was unsigned short, but we might as well be !!  22  * Block error status values.  See block/blk-core:blk_errors for the details.
                                                   >>  23  * Alpha cannot write a byte atomically, so we need to use 32-bit value.
 22  */                                                24  */
 23 struct bio_vec {                               !!  25 #if defined(CONFIG_ALPHA) && !defined(__alpha_bwx__)
 24         struct page     *bv_page;              !!  26 typedef u32 __bitwise blk_status_t;
 25         unsigned int    bv_len;                !!  27 #else
 26         unsigned int    bv_offset;             !!  28 typedef u8 __bitwise blk_status_t;
 27 };                                             !!  29 #endif
                                                   >>  30 #define BLK_STS_OK 0
                                                   >>  31 #define BLK_STS_NOTSUPP         ((__force blk_status_t)1)
                                                   >>  32 #define BLK_STS_TIMEOUT         ((__force blk_status_t)2)
                                                   >>  33 #define BLK_STS_NOSPC           ((__force blk_status_t)3)
                                                   >>  34 #define BLK_STS_TRANSPORT       ((__force blk_status_t)4)
                                                   >>  35 #define BLK_STS_TARGET          ((__force blk_status_t)5)
                                                   >>  36 #define BLK_STS_NEXUS           ((__force blk_status_t)6)
                                                   >>  37 #define BLK_STS_MEDIUM          ((__force blk_status_t)7)
                                                   >>  38 #define BLK_STS_PROTECTION      ((__force blk_status_t)8)
                                                   >>  39 #define BLK_STS_RESOURCE        ((__force blk_status_t)9)
                                                   >>  40 #define BLK_STS_IOERR           ((__force blk_status_t)10)
                                                   >>  41 
                                                   >>  42 /* hack for device mapper, don't use elsewhere: */
                                                   >>  43 #define BLK_STS_DM_REQUEUE    ((__force blk_status_t)11)
                                                   >>  44 
                                                   >>  45 #define BLK_STS_AGAIN           ((__force blk_status_t)12)
 28                                                    46 
 29 #ifdef CONFIG_BLOCK                            !!  47 /*
                                                   >>  48  * BLK_STS_DEV_RESOURCE is returned from the driver to the block layer if
                                                   >>  49  * device related resources are unavailable, but the driver can guarantee
                                                   >>  50  * that the queue will be rerun in the future once resources become
                                                   >>  51  * available again. This is typically the case for device specific
                                                   >>  52  * resources that are consumed for IO. If the driver fails allocating these
                                                   >>  53  * resources, we know that inflight (or pending) IO will free these
                                                   >>  54  * resource upon completion.
                                                   >>  55  *
                                                   >>  56  * This is different from BLK_STS_RESOURCE in that it explicitly references
                                                   >>  57  * a device specific resource. For resources of wider scope, allocation
                                                   >>  58  * failure can happen without having pending IO. This means that we can't
                                                   >>  59  * rely on request completions freeing these resources, as IO may not be in
                                                   >>  60  * flight. Examples of that are kernel memory allocations, DMA mappings, or
                                                   >>  61  * any other system wide resources.
                                                   >>  62  */
                                                   >>  63 #define BLK_STS_DEV_RESOURCE    ((__force blk_status_t)13)
 30                                                    64 
 31 struct bvec_iter {                             !!  65 /**
 32         sector_t                bi_sector;     !!  66  * blk_path_error - returns true if error may be path related
 33                                                !!  67  * @error: status the request was completed with
 34         unsigned int            bi_size;       !!  68  *
                                                   >>  69  * Description:
                                                   >>  70  *     This classifies block error status into non-retryable errors and ones
                                                   >>  71  *     that may be successful if retried on a failover path.
                                                   >>  72  *
                                                   >>  73  * Return:
                                                   >>  74  *     %false - retrying failover path will not help
                                                   >>  75  *     %true  - may succeed if retried
                                                   >>  76  */
                                                   >>  77 static inline bool blk_path_error(blk_status_t error)
                                                   >>  78 {
                                                   >>  79         switch (error) {
                                                   >>  80         case BLK_STS_NOTSUPP:
                                                   >>  81         case BLK_STS_NOSPC:
                                                   >>  82         case BLK_STS_TARGET:
                                                   >>  83         case BLK_STS_NEXUS:
                                                   >>  84         case BLK_STS_MEDIUM:
                                                   >>  85         case BLK_STS_PROTECTION:
                                                   >>  86                 return false;
                                                   >>  87         }
 35                                                    88 
 36         unsigned int            bi_idx;        !!  89         /* Anything else could be a path failure, so should be retried */
                                                   >>  90         return true;
                                                   >>  91 }
 37                                                    92 
 38         unsigned int            bi_bvec_done;  !!  93 struct blk_issue_stat {
 39                                                !!  94         u64 stat;
 40 };                                                 95 };
 41                                                    96 
 42 /*                                                 97 /*
 43  * main unit of I/O for the block layer and lo     98  * main unit of I/O for the block layer and lower layers (ie drivers and
 44  * stacking drivers)                               99  * stacking drivers)
 45  */                                               100  */
 46 struct bio {                                      101 struct bio {
 47         struct bio              *bi_next;         102         struct bio              *bi_next;       /* request queue link */
 48         struct block_device     *bi_bdev;      !! 103         struct gendisk          *bi_disk;
 49         unsigned int            bi_flags;      !! 104         unsigned int            bi_opf;         /* bottom bits req flags,
 50         int                     bi_error;      !! 105                                                  * top bits REQ_OP. Use
 51         unsigned long           bi_rw;         !! 106                                                  * accessors.
 52                                                << 
 53                                                   107                                                  */
 54                                                !! 108         unsigned short          bi_flags;       /* status, etc and bvec pool number */
 55         struct bvec_iter        bi_iter;       !! 109         unsigned short          bi_ioprio;
                                                   >> 110         unsigned short          bi_write_hint;
                                                   >> 111         blk_status_t            bi_status;
                                                   >> 112         u8                      bi_partno;
 56                                                   113 
 57         /* Number of segments in this BIO afte    114         /* Number of segments in this BIO after
 58          * physical address coalescing is perf    115          * physical address coalescing is performed.
 59          */                                       116          */
 60         unsigned int            bi_phys_segmen    117         unsigned int            bi_phys_segments;
 61                                                   118 
 62         /*                                        119         /*
 63          * To keep track of the max segment si    120          * To keep track of the max segment size, we account for the
 64          * sizes of the first and last mergeab    121          * sizes of the first and last mergeable segments in this bio.
 65          */                                       122          */
 66         unsigned int            bi_seg_front_s    123         unsigned int            bi_seg_front_size;
 67         unsigned int            bi_seg_back_si    124         unsigned int            bi_seg_back_size;
 68                                                   125 
 69         atomic_t                __bi_remaining !! 126         struct bvec_iter        bi_iter;
 70                                                   127 
                                                   >> 128         atomic_t                __bi_remaining;
 71         bio_end_io_t            *bi_end_io;       129         bio_end_io_t            *bi_end_io;
 72                                                   130 
 73         void                    *bi_private;      131         void                    *bi_private;
 74 #ifdef CONFIG_BLK_CGROUP                          132 #ifdef CONFIG_BLK_CGROUP
 75         /*                                        133         /*
 76          * Optional ioc and css associated wit    134          * Optional ioc and css associated with this bio.  Put on bio
 77          * release.  Read comment on top of bi    135          * release.  Read comment on top of bio_associate_current().
 78          */                                       136          */
 79         struct io_context       *bi_ioc;          137         struct io_context       *bi_ioc;
 80         struct cgroup_subsys_state *bi_css;       138         struct cgroup_subsys_state *bi_css;
                                                   >> 139 #ifdef CONFIG_BLK_DEV_THROTTLING_LOW
                                                   >> 140         void                    *bi_cg_private;
                                                   >> 141         struct blk_issue_stat   bi_issue_stat;
                                                   >> 142 #endif
 81 #endif                                            143 #endif
 82         union {                                   144         union {
 83 #if defined(CONFIG_BLK_DEV_INTEGRITY)             145 #if defined(CONFIG_BLK_DEV_INTEGRITY)
 84                 struct bio_integrity_payload *    146                 struct bio_integrity_payload *bi_integrity; /* data integrity */
 85 #endif                                            147 #endif
 86         };                                        148         };
 87                                                   149 
 88         unsigned short          bi_vcnt;          150         unsigned short          bi_vcnt;        /* how many bio_vec's */
 89                                                   151 
 90         /*                                        152         /*
 91          * Everything starting with bi_max_vec    153          * Everything starting with bi_max_vecs will be preserved by bio_reset()
 92          */                                       154          */
 93                                                   155 
 94         unsigned short          bi_max_vecs;      156         unsigned short          bi_max_vecs;    /* max bvl_vecs we can hold */
 95                                                   157 
 96         atomic_t                __bi_cnt;         158         atomic_t                __bi_cnt;       /* pin count */
 97                                                   159 
 98         struct bio_vec          *bi_io_vec;       160         struct bio_vec          *bi_io_vec;     /* the actual vec list */
 99                                                   161 
100         struct bio_set          *bi_pool;         162         struct bio_set          *bi_pool;
101                                                   163 
102         /*                                        164         /*
103          * We can inline a number of vecs at t    165          * We can inline a number of vecs at the end of the bio, to avoid
104          * double allocations for a small numb    166          * double allocations for a small number of bio_vecs. This member
105          * MUST obviously be kept at the very     167          * MUST obviously be kept at the very end of the bio.
106          */                                       168          */
107         struct bio_vec          bi_inline_vecs    169         struct bio_vec          bi_inline_vecs[0];
108 };                                                170 };
109                                                   171 
110 #define BIO_RESET_BYTES         offsetof(struc    172 #define BIO_RESET_BYTES         offsetof(struct bio, bi_max_vecs)
111                                                   173 
112 /*                                                174 /*
113  * bio flags                                      175  * bio flags
114  */                                               176  */
115 #define BIO_SEG_VALID   1       /* bi_phys_seg    177 #define BIO_SEG_VALID   1       /* bi_phys_segments valid */
116 #define BIO_CLONED      2       /* doesn't own    178 #define BIO_CLONED      2       /* doesn't own data */
117 #define BIO_BOUNCED     3       /* bio is a bo    179 #define BIO_BOUNCED     3       /* bio is a bounce bio */
118 #define BIO_USER_MAPPED 4       /* contains us    180 #define BIO_USER_MAPPED 4       /* contains user pages */
119 #define BIO_NULL_MAPPED 5       /* contains in    181 #define BIO_NULL_MAPPED 5       /* contains invalid user pages */
120 #define BIO_QUIET       6       /* Make BIO Qu    182 #define BIO_QUIET       6       /* Make BIO Quiet */
121 #define BIO_CHAIN       7       /* chained bio    183 #define BIO_CHAIN       7       /* chained bio, ->bi_remaining in effect */
122 #define BIO_REFFED      8       /* bio has ele    184 #define BIO_REFFED      8       /* bio has elevated ->bi_cnt */
                                                   >> 185 #define BIO_THROTTLED   9       /* This bio has already been subjected to
                                                   >> 186                                  * throttling rules. Don't do it again. */
                                                   >> 187 #define BIO_TRACE_COMPLETION 10 /* bio_endio() should trace the final completion
                                                   >> 188                                  * of this bio. */
                                                   >> 189 /* See BVEC_POOL_OFFSET below before adding new flags */
123                                                   190 
124 /*                                                191 /*
125  * Flags starting here get preserved by bio_re !! 192  * We support 6 different bvec pools, the last one is magic in that it
126  * BIO_POOL_IDX()                              !! 193  * is backed by a mempool.
                                                   >> 194  */
                                                   >> 195 #define BVEC_POOL_NR            6
                                                   >> 196 #define BVEC_POOL_MAX           (BVEC_POOL_NR - 1)
                                                   >> 197 
                                                   >> 198 /*
                                                   >> 199  * Top 3 bits of bio flags indicate the pool the bvecs came from.  We add
                                                   >> 200  * 1 to the actual index so that 0 indicates that there are no bvecs to be
                                                   >> 201  * freed.
127  */                                               202  */
128 #define BIO_RESET_BITS  13                     !! 203 #define BVEC_POOL_BITS          (3)
129 #define BIO_OWNS_VEC    13      /* bio_free()  !! 204 #define BVEC_POOL_OFFSET        (16 - BVEC_POOL_BITS)
                                                   >> 205 #define BVEC_POOL_IDX(bio)      ((bio)->bi_flags >> BVEC_POOL_OFFSET)
                                                   >> 206 #if (1<< BVEC_POOL_BITS) < (BVEC_POOL_NR+1)
                                                   >> 207 # error "BVEC_POOL_BITS is too small"
                                                   >> 208 #endif
130                                                   209 
131 /*                                                210 /*
132  * top 4 bits of bio flags indicate the pool t !! 211  * Flags starting here get preserved by bio_reset() - this includes
                                                   >> 212  * only BVEC_POOL_IDX()
133  */                                               213  */
134 #define BIO_POOL_BITS           (4)            !! 214 #define BIO_RESET_BITS  BVEC_POOL_OFFSET
135 #define BIO_POOL_NONE           ((1UL << BIO_P << 
136 #define BIO_POOL_OFFSET         (32 - BIO_POOL << 
137 #define BIO_POOL_MASK           (1UL << BIO_PO << 
138 #define BIO_POOL_IDX(bio)       ((bio)->bi_fla << 
139                                                   215 
140 #endif /* CONFIG_BLOCK */                      !! 216 typedef __u32 __bitwise blk_mq_req_flags_t;
141                                                   217 
142 /*                                                218 /*
143  * Request flags.  For use in the cmd_flags fi !! 219  * Operations and flags common to the bio and request structures.
144  * bi_rw of struct bio.  Note that some flags  !! 220  * We use 8 bits for encoding the operation, and the remaining 24 for flags.
                                                   >> 221  *
                                                   >> 222  * The least significant bit of the operation number indicates the data
                                                   >> 223  * transfer direction:
                                                   >> 224  *
                                                   >> 225  *   - if the least significant bit is set transfers are TO the device
                                                   >> 226  *   - if the least significant bit is not set transfers are FROM the device
                                                   >> 227  *
                                                   >> 228  * If a operation does not transfer data the least significant bit has no
                                                   >> 229  * meaning.
145  */                                               230  */
146 enum rq_flag_bits {                            !! 231 #define REQ_OP_BITS     8
147         /* common flags */                     !! 232 #define REQ_OP_MASK     ((1 << REQ_OP_BITS) - 1)
148         __REQ_WRITE,            /* not set, re !! 233 #define REQ_FLAG_BITS   24
149         __REQ_FAILFAST_DEV,     /* no driver r !! 234 
                                                   >> 235 enum req_opf {
                                                   >> 236         /* read sectors from the device */
                                                   >> 237         REQ_OP_READ             = 0,
                                                   >> 238         /* write sectors to the device */
                                                   >> 239         REQ_OP_WRITE            = 1,
                                                   >> 240         /* flush the volatile write cache */
                                                   >> 241         REQ_OP_FLUSH            = 2,
                                                   >> 242         /* discard sectors */
                                                   >> 243         REQ_OP_DISCARD          = 3,
                                                   >> 244         /* get zone information */
                                                   >> 245         REQ_OP_ZONE_REPORT      = 4,
                                                   >> 246         /* securely erase sectors */
                                                   >> 247         REQ_OP_SECURE_ERASE     = 5,
                                                   >> 248         /* seset a zone write pointer */
                                                   >> 249         REQ_OP_ZONE_RESET       = 6,
                                                   >> 250         /* write the same sector many times */
                                                   >> 251         REQ_OP_WRITE_SAME       = 7,
                                                   >> 252         /* write the zero filled sector many times */
                                                   >> 253         REQ_OP_WRITE_ZEROES     = 9,
                                                   >> 254 
                                                   >> 255         /* SCSI passthrough using struct scsi_request */
                                                   >> 256         REQ_OP_SCSI_IN          = 32,
                                                   >> 257         REQ_OP_SCSI_OUT         = 33,
                                                   >> 258         /* Driver private requests */
                                                   >> 259         REQ_OP_DRV_IN           = 34,
                                                   >> 260         REQ_OP_DRV_OUT          = 35,
                                                   >> 261 
                                                   >> 262         REQ_OP_LAST,
                                                   >> 263 };
                                                   >> 264 
                                                   >> 265 enum req_flag_bits {
                                                   >> 266         __REQ_FAILFAST_DEV =    /* no driver retries of device errors */
                                                   >> 267                 REQ_OP_BITS,
150         __REQ_FAILFAST_TRANSPORT, /* no driver    268         __REQ_FAILFAST_TRANSPORT, /* no driver retries of transport errors */
151         __REQ_FAILFAST_DRIVER,  /* no driver r    269         __REQ_FAILFAST_DRIVER,  /* no driver retries of driver errors */
152                                                << 
153         __REQ_SYNC,             /* request is     270         __REQ_SYNC,             /* request is sync (sync write or read) */
154         __REQ_META,             /* metadata io    271         __REQ_META,             /* metadata io request */
155         __REQ_PRIO,             /* boost prior    272         __REQ_PRIO,             /* boost priority in cfq */
156         __REQ_DISCARD,          /* request to  !! 273         __REQ_NOMERGE,          /* don't touch this for merging */
157         __REQ_SECURE,           /* secure disc !! 274         __REQ_IDLE,             /* anticipate more IO after this one */
158         __REQ_WRITE_SAME,       /* write same  << 
159                                                << 
160         __REQ_NOIDLE,           /* don't antic << 
161         __REQ_INTEGRITY,        /* I/O include    275         __REQ_INTEGRITY,        /* I/O includes block integrity payload */
162         __REQ_FUA,              /* forced unit    276         __REQ_FUA,              /* forced unit access */
163         __REQ_FLUSH,            /* request for !! 277         __REQ_PREFLUSH,         /* request for cache flush */
164                                                << 
165         /* bio only flags */                   << 
166         __REQ_RAHEAD,           /* read ahead,    278         __REQ_RAHEAD,           /* read ahead, can fail anytime */
167         __REQ_THROTTLED,        /* This bio ha !! 279         __REQ_BACKGROUND,       /* background IO */
168                                  * throttling  !! 280         __REQ_NOWAIT,           /* Don't wait if request will block */
                                                   >> 281 
                                                   >> 282         /* command specific flags for REQ_OP_WRITE_ZEROES: */
                                                   >> 283         __REQ_NOUNMAP,          /* do not free blocks when zeroing */
                                                   >> 284 
                                                   >> 285         /* for driver use */
                                                   >> 286         __REQ_DRV,
169                                                   287 
170         /* request only flags */               << 
171         __REQ_SORTED,           /* elevator kn << 
172         __REQ_SOFTBARRIER,      /* may not be  << 
173         __REQ_NOMERGE,          /* don't touch << 
174         __REQ_STARTED,          /* drive alrea << 
175         __REQ_DONTPREP,         /* don't call  << 
176         __REQ_QUEUED,           /* uses queuei << 
177         __REQ_ELVPRIV,          /* elevator pr << 
178         __REQ_FAILED,           /* set if the  << 
179         __REQ_QUIET,            /* don't worry << 
180         __REQ_PREEMPT,          /* set for "id << 
181                                    for request << 
182                                    state must  << 
183         __REQ_ALLOCED,          /* request cam << 
184         __REQ_COPY_USER,        /* contains co << 
185         __REQ_FLUSH_SEQ,        /* request for << 
186         __REQ_IO_STAT,          /* account I/O << 
187         __REQ_MIXED_MERGE,      /* merge of di << 
188         __REQ_PM,               /* runtime pm  << 
189         __REQ_HASHED,           /* on IO sched << 
190         __REQ_MQ_INFLIGHT,      /* track infli << 
191         __REQ_NR_BITS,          /* stops here     288         __REQ_NR_BITS,          /* stops here */
192 };                                                289 };
193                                                   290 
194 #define REQ_WRITE               (1ULL << __REQ << 
195 #define REQ_FAILFAST_DEV        (1ULL << __REQ    291 #define REQ_FAILFAST_DEV        (1ULL << __REQ_FAILFAST_DEV)
196 #define REQ_FAILFAST_TRANSPORT  (1ULL << __REQ    292 #define REQ_FAILFAST_TRANSPORT  (1ULL << __REQ_FAILFAST_TRANSPORT)
197 #define REQ_FAILFAST_DRIVER     (1ULL << __REQ    293 #define REQ_FAILFAST_DRIVER     (1ULL << __REQ_FAILFAST_DRIVER)
198 #define REQ_SYNC                (1ULL << __REQ    294 #define REQ_SYNC                (1ULL << __REQ_SYNC)
199 #define REQ_META                (1ULL << __REQ    295 #define REQ_META                (1ULL << __REQ_META)
200 #define REQ_PRIO                (1ULL << __REQ    296 #define REQ_PRIO                (1ULL << __REQ_PRIO)
201 #define REQ_DISCARD             (1ULL << __REQ !! 297 #define REQ_NOMERGE             (1ULL << __REQ_NOMERGE)
202 #define REQ_WRITE_SAME          (1ULL << __REQ !! 298 #define REQ_IDLE                (1ULL << __REQ_IDLE)
203 #define REQ_NOIDLE              (1ULL << __REQ << 
204 #define REQ_INTEGRITY           (1ULL << __REQ    299 #define REQ_INTEGRITY           (1ULL << __REQ_INTEGRITY)
                                                   >> 300 #define REQ_FUA                 (1ULL << __REQ_FUA)
                                                   >> 301 #define REQ_PREFLUSH            (1ULL << __REQ_PREFLUSH)
                                                   >> 302 #define REQ_RAHEAD              (1ULL << __REQ_RAHEAD)
                                                   >> 303 #define REQ_BACKGROUND          (1ULL << __REQ_BACKGROUND)
                                                   >> 304 #define REQ_NOWAIT              (1ULL << __REQ_NOWAIT)
                                                   >> 305 
                                                   >> 306 #define REQ_NOUNMAP             (1ULL << __REQ_NOUNMAP)
                                                   >> 307 
                                                   >> 308 #define REQ_DRV                 (1ULL << __REQ_DRV)
205                                                   309 
206 #define REQ_FAILFAST_MASK \                       310 #define REQ_FAILFAST_MASK \
207         (REQ_FAILFAST_DEV | REQ_FAILFAST_TRANS    311         (REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT | REQ_FAILFAST_DRIVER)
208 #define REQ_COMMON_MASK \                      << 
209         (REQ_WRITE | REQ_FAILFAST_MASK | REQ_S << 
210          REQ_DISCARD | REQ_WRITE_SAME | REQ_NO << 
211          REQ_SECURE | REQ_INTEGRITY)           << 
212 #define REQ_CLONE_MASK          REQ_COMMON_MAS << 
213                                                   312 
214 #define BIO_NO_ADVANCE_ITER_MASK        (REQ_D << 
215                                                << 
216 /* This mask is used for both bio and request  << 
217 #define REQ_NOMERGE_FLAGS \                       313 #define REQ_NOMERGE_FLAGS \
218         (REQ_NOMERGE | REQ_STARTED | REQ_SOFTB !! 314         (REQ_NOMERGE | REQ_PREFLUSH | REQ_FUA)
219                                                   315 
220 #define REQ_RAHEAD              (1ULL << __REQ !! 316 #define bio_op(bio) \
221 #define REQ_THROTTLED           (1ULL << __REQ !! 317         ((bio)->bi_opf & REQ_OP_MASK)
                                                   >> 318 #define req_op(req) \
                                                   >> 319         ((req)->cmd_flags & REQ_OP_MASK)
                                                   >> 320 
                                                   >> 321 /* obsolete, don't use in new code */
                                                   >> 322 static inline void bio_set_op_attrs(struct bio *bio, unsigned op,
                                                   >> 323                 unsigned op_flags)
                                                   >> 324 {
                                                   >> 325         bio->bi_opf = op | op_flags;
                                                   >> 326 }
222                                                   327 
223 #define REQ_SORTED              (1ULL << __REQ !! 328 static inline bool op_is_write(unsigned int op)
224 #define REQ_SOFTBARRIER         (1ULL << __REQ !! 329 {
225 #define REQ_FUA                 (1ULL << __REQ !! 330         return (op & 1);
226 #define REQ_NOMERGE             (1ULL << __REQ !! 331 }
227 #define REQ_STARTED             (1ULL << __REQ !! 332 
228 #define REQ_DONTPREP            (1ULL << __REQ !! 333 /*
229 #define REQ_QUEUED              (1ULL << __REQ !! 334  * Check if the bio or request is one that needs special treatment in the
230 #define REQ_ELVPRIV             (1ULL << __REQ !! 335  * flush state machine.
231 #define REQ_FAILED              (1ULL << __REQ !! 336  */
232 #define REQ_QUIET               (1ULL << __REQ !! 337 static inline bool op_is_flush(unsigned int op)
233 #define REQ_PREEMPT             (1ULL << __REQ !! 338 {
234 #define REQ_ALLOCED             (1ULL << __REQ !! 339         return op & (REQ_FUA | REQ_PREFLUSH);
235 #define REQ_COPY_USER           (1ULL << __REQ !! 340 }
236 #define REQ_FLUSH               (1ULL << __REQ !! 341 
237 #define REQ_FLUSH_SEQ           (1ULL << __REQ !! 342 /*
238 #define REQ_IO_STAT             (1ULL << __REQ !! 343  * Reads are always treated as synchronous, as are requests with the FUA or
239 #define REQ_MIXED_MERGE         (1ULL << __REQ !! 344  * PREFLUSH flag.  Other operations may be marked as synchronous using the
240 #define REQ_SECURE              (1ULL << __REQ !! 345  * REQ_SYNC flag.
241 #define REQ_PM                  (1ULL << __REQ !! 346  */
242 #define REQ_HASHED              (1ULL << __REQ !! 347 static inline bool op_is_sync(unsigned int op)
243 #define REQ_MQ_INFLIGHT         (1ULL << __REQ !! 348 {
                                                   >> 349         return (op & REQ_OP_MASK) == REQ_OP_READ ||
                                                   >> 350                 (op & (REQ_SYNC | REQ_FUA | REQ_PREFLUSH));
                                                   >> 351 }
244                                                   352 
245 typedef unsigned int blk_qc_t;                    353 typedef unsigned int blk_qc_t;
246 #define BLK_QC_T_NONE   -1U                    !! 354 #define BLK_QC_T_NONE           -1U
247 #define BLK_QC_T_SHIFT  16                     !! 355 #define BLK_QC_T_SHIFT          16
                                                   >> 356 #define BLK_QC_T_INTERNAL       (1U << 31)
248                                                   357 
249 static inline bool blk_qc_t_valid(blk_qc_t coo    358 static inline bool blk_qc_t_valid(blk_qc_t cookie)
250 {                                                 359 {
251         return cookie != BLK_QC_T_NONE;           360         return cookie != BLK_QC_T_NONE;
252 }                                                 361 }
253                                                   362 
254 static inline blk_qc_t blk_tag_to_qc_t(unsigne !! 363 static inline blk_qc_t blk_tag_to_qc_t(unsigned int tag, unsigned int queue_num,
                                                   >> 364                                        bool internal)
255 {                                                 365 {
256         return tag | (queue_num << BLK_QC_T_SH !! 366         blk_qc_t ret = tag | (queue_num << BLK_QC_T_SHIFT);
                                                   >> 367 
                                                   >> 368         if (internal)
                                                   >> 369                 ret |= BLK_QC_T_INTERNAL;
                                                   >> 370 
                                                   >> 371         return ret;
257 }                                                 372 }
258                                                   373 
259 static inline unsigned int blk_qc_t_to_queue_n    374 static inline unsigned int blk_qc_t_to_queue_num(blk_qc_t cookie)
260 {                                                 375 {
261         return cookie >> BLK_QC_T_SHIFT;       !! 376         return (cookie & ~BLK_QC_T_INTERNAL) >> BLK_QC_T_SHIFT;
262 }                                                 377 }
263                                                   378 
264 static inline unsigned int blk_qc_t_to_tag(blk    379 static inline unsigned int blk_qc_t_to_tag(blk_qc_t cookie)
265 {                                                 380 {
266         return cookie & ((1u << BLK_QC_T_SHIFT    381         return cookie & ((1u << BLK_QC_T_SHIFT) - 1);
267 }                                                 382 }
                                                   >> 383 
                                                   >> 384 static inline bool blk_qc_t_is_internal(blk_qc_t cookie)
                                                   >> 385 {
                                                   >> 386         return (cookie & BLK_QC_T_INTERNAL) != 0;
                                                   >> 387 }
                                                   >> 388 
                                                   >> 389 struct blk_rq_stat {
                                                   >> 390         u64 mean;
                                                   >> 391         u64 min;
                                                   >> 392         u64 max;
                                                   >> 393         u32 nr_samples;
                                                   >> 394         u64 batch;
                                                   >> 395 };
268                                                   396 
269 #endif /* __LINUX_BLK_TYPES_H */                  397 #endif /* __LINUX_BLK_TYPES_H */
270                                                   398 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | Wiki (Japanese) | Wiki (English) | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

osdn.jp