~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/block/bio.c

Version: ~ [ linux-5.1-rc1 ] ~ [ linux-5.0.3 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.30 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.107 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.164 ] ~ [ linux-4.8.17 ] ~ [ linux-4.7.10 ] ~ [ linux-4.6.7 ] ~ [ linux-4.5.7 ] ~ [ linux-4.4.176 ] ~ [ linux-4.3.6 ] ~ [ linux-4.2.8 ] ~ [ linux-4.1.52 ] ~ [ linux-4.0.9 ] ~ [ linux-3.19.8 ] ~ [ linux-3.18.136 ] ~ [ linux-3.17.8 ] ~ [ linux-3.16.63 ] ~ [ linux-3.15.10 ] ~ [ linux-3.14.79 ] ~ [ linux-3.13.11 ] ~ [ linux-3.12.74 ] ~ [ linux-3.11.10 ] ~ [ linux-3.10.108 ] ~ [ linux-3.9.11 ] ~ [ linux-3.8.13 ] ~ [ linux-3.7.10 ] ~ [ linux-3.6.11 ] ~ [ linux-3.5.7 ] ~ [ linux-3.4.113 ] ~ [ linux-3.3.8 ] ~ [ linux-3.2.102 ] ~ [ linux-3.1.10 ] ~ [ linux-3.0.101 ] ~ [ linux-2.6.39.4 ] ~ [ linux-2.6.38.8 ] ~ [ linux-2.6.37.6 ] ~ [ linux-2.6.36.4 ] ~ [ linux-2.6.35.14 ] ~ [ linux-2.6.34.15 ] ~ [ linux-2.6.33.20 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.5 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 /*
  2  * Copyright (C) 2001 Jens Axboe <axboe@kernel.dk>
  3  *
  4  * This program is free software; you can redistribute it and/or modify
  5  * it under the terms of the GNU General Public License version 2 as
  6  * published by the Free Software Foundation.
  7  *
  8  * This program is distributed in the hope that it will be useful,
  9  * but WITHOUT ANY WARRANTY; without even the implied warranty of
 10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 11  * GNU General Public License for more details.
 12  *
 13  * You should have received a copy of the GNU General Public Licens
 14  * along with this program; if not, write to the Free Software
 15  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-
 16  *
 17  */
 18 #include <linux/mm.h>
 19 #include <linux/swap.h>
 20 #include <linux/bio.h>
 21 #include <linux/blkdev.h>
 22 #include <linux/uio.h>
 23 #include <linux/iocontext.h>
 24 #include <linux/slab.h>
 25 #include <linux/init.h>
 26 #include <linux/kernel.h>
 27 #include <linux/export.h>
 28 #include <linux/mempool.h>
 29 #include <linux/workqueue.h>
 30 #include <linux/cgroup.h>
 31 
 32 #include <trace/events/block.h>
 33 #include "blk.h"
 34 
 35 /*
 36  * Test patch to inline a certain number of bi_io_vec's inside the bio
 37  * itself, to shrink a bio data allocation from two mempool calls to one
 38  */
 39 #define BIO_INLINE_VECS         4
 40 
 41 /*
 42  * if you change this list, also change bvec_alloc or things will
 43  * break badly! cannot be bigger than what you can fit into an
 44  * unsigned short
 45  */
 46 #define BV(x, n) { .nr_vecs = x, .name = "biovec-"#n }
 47 static struct biovec_slab bvec_slabs[BVEC_POOL_NR] __read_mostly = {
 48         BV(1, 1), BV(4, 4), BV(16, 16), BV(64, 64), BV(128, 128), BV(BIO_MAX_PAGES, max),
 49 };
 50 #undef BV
 51 
 52 /*
 53  * fs_bio_set is the bio_set containing bio and iovec memory pools used by
 54  * IO code that does not need private memory pools.
 55  */
 56 struct bio_set *fs_bio_set;
 57 EXPORT_SYMBOL(fs_bio_set);
 58 
 59 /*
 60  * Our slab pool management
 61  */
 62 struct bio_slab {
 63         struct kmem_cache *slab;
 64         unsigned int slab_ref;
 65         unsigned int slab_size;
 66         char name[8];
 67 };
 68 static DEFINE_MUTEX(bio_slab_lock);
 69 static struct bio_slab *bio_slabs;
 70 static unsigned int bio_slab_nr, bio_slab_max;
 71 
 72 static struct kmem_cache *bio_find_or_create_slab(unsigned int extra_size)
 73 {
 74         unsigned int sz = sizeof(struct bio) + extra_size;
 75         struct kmem_cache *slab = NULL;
 76         struct bio_slab *bslab, *new_bio_slabs;
 77         unsigned int new_bio_slab_max;
 78         unsigned int i, entry = -1;
 79 
 80         mutex_lock(&bio_slab_lock);
 81 
 82         i = 0;
 83         while (i < bio_slab_nr) {
 84                 bslab = &bio_slabs[i];
 85 
 86                 if (!bslab->slab && entry == -1)
 87                         entry = i;
 88                 else if (bslab->slab_size == sz) {
 89                         slab = bslab->slab;
 90                         bslab->slab_ref++;
 91                         break;
 92                 }
 93                 i++;
 94         }
 95 
 96         if (slab)
 97                 goto out_unlock;
 98 
 99         if (bio_slab_nr == bio_slab_max && entry == -1) {
100                 new_bio_slab_max = bio_slab_max << 1;
101                 new_bio_slabs = krealloc(bio_slabs,
102                                          new_bio_slab_max * sizeof(struct bio_slab),
103                                          GFP_KERNEL);
104                 if (!new_bio_slabs)
105                         goto out_unlock;
106                 bio_slab_max = new_bio_slab_max;
107                 bio_slabs = new_bio_slabs;
108         }
109         if (entry == -1)
110                 entry = bio_slab_nr++;
111 
112         bslab = &bio_slabs[entry];
113 
114         snprintf(bslab->name, sizeof(bslab->name), "bio-%d", entry);
115         slab = kmem_cache_create(bslab->name, sz, ARCH_KMALLOC_MINALIGN,
116                                  SLAB_HWCACHE_ALIGN, NULL);
117         if (!slab)
118                 goto out_unlock;
119 
120         bslab->slab = slab;
121         bslab->slab_ref = 1;
122         bslab->slab_size = sz;
123 out_unlock:
124         mutex_unlock(&bio_slab_lock);
125         return slab;
126 }
127 
128 static void bio_put_slab(struct bio_set *bs)
129 {
130         struct bio_slab *bslab = NULL;
131         unsigned int i;
132 
133         mutex_lock(&bio_slab_lock);
134 
135         for (i = 0; i < bio_slab_nr; i++) {
136                 if (bs->bio_slab == bio_slabs[i].slab) {
137                         bslab = &bio_slabs[i];
138                         break;
139                 }
140         }
141 
142         if (WARN(!bslab, KERN_ERR "bio: unable to find slab!\n"))
143                 goto out;
144 
145         WARN_ON(!bslab->slab_ref);
146 
147         if (--bslab->slab_ref)
148                 goto out;
149 
150         kmem_cache_destroy(bslab->slab);
151         bslab->slab = NULL;
152 
153 out:
154         mutex_unlock(&bio_slab_lock);
155 }
156 
157 unsigned int bvec_nr_vecs(unsigned short idx)
158 {
159         return bvec_slabs[idx].nr_vecs;
160 }
161 
162 void bvec_free(mempool_t *pool, struct bio_vec *bv, unsigned int idx)
163 {
164         if (!idx)
165                 return;
166         idx--;
167 
168         BIO_BUG_ON(idx >= BVEC_POOL_NR);
169 
170         if (idx == BVEC_POOL_MAX) {
171                 mempool_free(bv, pool);
172         } else {
173                 struct biovec_slab *bvs = bvec_slabs + idx;
174 
175                 kmem_cache_free(bvs->slab, bv);
176         }
177 }
178 
179 struct bio_vec *bvec_alloc(gfp_t gfp_mask, int nr, unsigned long *idx,
180                            mempool_t *pool)
181 {
182         struct bio_vec *bvl;
183 
184         /*
185          * see comment near bvec_array define!
186          */
187         switch (nr) {
188         case 1:
189                 *idx = 0;
190                 break;
191         case 2 ... 4:
192                 *idx = 1;
193                 break;
194         case 5 ... 16:
195                 *idx = 2;
196                 break;
197         case 17 ... 64:
198                 *idx = 3;
199                 break;
200         case 65 ... 128:
201                 *idx = 4;
202                 break;
203         case 129 ... BIO_MAX_PAGES:
204                 *idx = 5;
205                 break;
206         default:
207                 return NULL;
208         }
209 
210         /*
211          * idx now points to the pool we want to allocate from. only the
212          * 1-vec entry pool is mempool backed.
213          */
214         if (*idx == BVEC_POOL_MAX) {
215 fallback:
216                 bvl = mempool_alloc(pool, gfp_mask);
217         } else {
218                 struct biovec_slab *bvs = bvec_slabs + *idx;
219                 gfp_t __gfp_mask = gfp_mask & ~(__GFP_DIRECT_RECLAIM | __GFP_IO);
220 
221                 /*
222                  * Make this allocation restricted and don't dump info on
223                  * allocation failures, since we'll fallback to the mempool
224                  * in case of failure.
225                  */
226                 __gfp_mask |= __GFP_NOMEMALLOC | __GFP_NORETRY | __GFP_NOWARN;
227 
228                 /*
229                  * Try a slab allocation. If this fails and __GFP_DIRECT_RECLAIM
230                  * is set, retry with the 1-entry mempool
231                  */
232                 bvl = kmem_cache_alloc(bvs->slab, __gfp_mask);
233                 if (unlikely(!bvl && (gfp_mask & __GFP_DIRECT_RECLAIM))) {
234                         *idx = BVEC_POOL_MAX;
235                         goto fallback;
236                 }
237         }
238 
239         (*idx)++;
240         return bvl;
241 }
242 
243 void bio_uninit(struct bio *bio)
244 {
245         bio_disassociate_task(bio);
246 }
247 EXPORT_SYMBOL(bio_uninit);
248 
249 static void bio_free(struct bio *bio)
250 {
251         struct bio_set *bs = bio->bi_pool;
252         void *p;
253 
254         bio_uninit(bio);
255 
256         if (bs) {
257                 bvec_free(bs->bvec_pool, bio->bi_io_vec, BVEC_POOL_IDX(bio));
258 
259                 /*
260                  * If we have front padding, adjust the bio pointer before freeing
261                  */
262                 p = bio;
263                 p -= bs->front_pad;
264 
265                 mempool_free(p, bs->bio_pool);
266         } else {
267                 /* Bio was allocated by bio_kmalloc() */
268                 kfree(bio);
269         }
270 }
271 
272 /*
273  * Users of this function have their own bio allocation. Subsequently,
274  * they must remember to pair any call to bio_init() with bio_uninit()
275  * when IO has completed, or when the bio is released.
276  */
277 void bio_init(struct bio *bio, struct bio_vec *table,
278               unsigned short max_vecs)
279 {
280         memset(bio, 0, sizeof(*bio));
281         atomic_set(&bio->__bi_remaining, 1);
282         atomic_set(&bio->__bi_cnt, 1);
283 
284         bio->bi_io_vec = table;
285         bio->bi_max_vecs = max_vecs;
286 }
287 EXPORT_SYMBOL(bio_init);
288 
289 /**
290  * bio_reset - reinitialize a bio
291  * @bio:        bio to reset
292  *
293  * Description:
294  *   After calling bio_reset(), @bio will be in the same state as a freshly
295  *   allocated bio returned bio bio_alloc_bioset() - the only fields that are
296  *   preserved are the ones that are initialized by bio_alloc_bioset(). See
297  *   comment in struct bio.
298  */
299 void bio_reset(struct bio *bio)
300 {
301         unsigned long flags = bio->bi_flags & (~0UL << BIO_RESET_BITS);
302 
303         bio_uninit(bio);
304 
305         memset(bio, 0, BIO_RESET_BYTES);
306         bio->bi_flags = flags;
307         atomic_set(&bio->__bi_remaining, 1);
308 }
309 EXPORT_SYMBOL(bio_reset);
310 
311 static struct bio *__bio_chain_endio(struct bio *bio)
312 {
313         struct bio *parent = bio->bi_private;
314 
315         if (!parent->bi_status)
316                 parent->bi_status = bio->bi_status;
317         bio_put(bio);
318         return parent;
319 }
320 
321 static void bio_chain_endio(struct bio *bio)
322 {
323         bio_endio(__bio_chain_endio(bio));
324 }
325 
326 /**
327  * bio_chain - chain bio completions
328  * @bio: the target bio
329  * @parent: the @bio's parent bio
330  *
331  * The caller won't have a bi_end_io called when @bio completes - instead,
332  * @parent's bi_end_io won't be called until both @parent and @bio have
333  * completed; the chained bio will also be freed when it completes.
334  *
335  * The caller must not set bi_private or bi_end_io in @bio.
336  */
337 void bio_chain(struct bio *bio, struct bio *parent)
338 {
339         BUG_ON(bio->bi_private || bio->bi_end_io);
340 
341         bio->bi_private = parent;
342         bio->bi_end_io  = bio_chain_endio;
343         bio_inc_remaining(parent);
344 }
345 EXPORT_SYMBOL(bio_chain);
346 
347 static void bio_alloc_rescue(struct work_struct *work)
348 {
349         struct bio_set *bs = container_of(work, struct bio_set, rescue_work);
350         struct bio *bio;
351 
352         while (1) {
353                 spin_lock(&bs->rescue_lock);
354                 bio = bio_list_pop(&bs->rescue_list);
355                 spin_unlock(&bs->rescue_lock);
356 
357                 if (!bio)
358                         break;
359 
360                 generic_make_request(bio);
361         }
362 }
363 
364 static void punt_bios_to_rescuer(struct bio_set *bs)
365 {
366         struct bio_list punt, nopunt;
367         struct bio *bio;
368 
369         if (WARN_ON_ONCE(!bs->rescue_workqueue))
370                 return;
371         /*
372          * In order to guarantee forward progress we must punt only bios that
373          * were allocated from this bio_set; otherwise, if there was a bio on
374          * there for a stacking driver higher up in the stack, processing it
375          * could require allocating bios from this bio_set, and doing that from
376          * our own rescuer would be bad.
377          *
378          * Since bio lists are singly linked, pop them all instead of trying to
379          * remove from the middle of the list:
380          */
381 
382         bio_list_init(&punt);
383         bio_list_init(&nopunt);
384 
385         while ((bio = bio_list_pop(&current->bio_list[0])))
386                 bio_list_add(bio->bi_pool == bs ? &punt : &nopunt, bio);
387         current->bio_list[0] = nopunt;
388 
389         bio_list_init(&nopunt);
390         while ((bio = bio_list_pop(&current->bio_list[1])))
391                 bio_list_add(bio->bi_pool == bs ? &punt : &nopunt, bio);
392         current->bio_list[1] = nopunt;
393 
394         spin_lock(&bs->rescue_lock);
395         bio_list_merge(&bs->rescue_list, &punt);
396         spin_unlock(&bs->rescue_lock);
397 
398         queue_work(bs->rescue_workqueue, &bs->rescue_work);
399 }
400 
401 /**
402  * bio_alloc_bioset - allocate a bio for I/O
403  * @gfp_mask:   the GFP_* mask given to the slab allocator
404  * @nr_iovecs:  number of iovecs to pre-allocate
405  * @bs:         the bio_set to allocate from.
406  *
407  * Description:
408  *   If @bs is NULL, uses kmalloc() to allocate the bio; else the allocation is
409  *   backed by the @bs's mempool.
410  *
411  *   When @bs is not NULL, if %__GFP_DIRECT_RECLAIM is set then bio_alloc will
412  *   always be able to allocate a bio. This is due to the mempool guarantees.
413  *   To make this work, callers must never allocate more than 1 bio at a time
414  *   from this pool. Callers that need to allocate more than 1 bio must always
415  *   submit the previously allocated bio for IO before attempting to allocate
416  *   a new one. Failure to do so can cause deadlocks under memory pressure.
417  *
418  *   Note that when running under generic_make_request() (i.e. any block
419  *   driver), bios are not submitted until after you return - see the code in
420  *   generic_make_request() that converts recursion into iteration, to prevent
421  *   stack overflows.
422  *
423  *   This would normally mean allocating multiple bios under
424  *   generic_make_request() would be susceptible to deadlocks, but we have
425  *   deadlock avoidance code that resubmits any blocked bios from a rescuer
426  *   thread.
427  *
428  *   However, we do not guarantee forward progress for allocations from other
429  *   mempools. Doing multiple allocations from the same mempool under
430  *   generic_make_request() should be avoided - instead, use bio_set's front_pad
431  *   for per bio allocations.
432  *
433  *   RETURNS:
434  *   Pointer to new bio on success, NULL on failure.
435  */
436 struct bio *bio_alloc_bioset(gfp_t gfp_mask, unsigned int nr_iovecs,
437                              struct bio_set *bs)
438 {
439         gfp_t saved_gfp = gfp_mask;
440         unsigned front_pad;
441         unsigned inline_vecs;
442         struct bio_vec *bvl = NULL;
443         struct bio *bio;
444         void *p;
445 
446         if (!bs) {
447                 if (nr_iovecs > UIO_MAXIOV)
448                         return NULL;
449 
450                 p = kmalloc(sizeof(struct bio) +
451                             nr_iovecs * sizeof(struct bio_vec),
452                             gfp_mask);
453                 front_pad = 0;
454                 inline_vecs = nr_iovecs;
455         } else {
456                 /* should not use nobvec bioset for nr_iovecs > 0 */
457                 if (WARN_ON_ONCE(!bs->bvec_pool && nr_iovecs > 0))
458                         return NULL;
459                 /*
460                  * generic_make_request() converts recursion to iteration; this
461                  * means if we're running beneath it, any bios we allocate and
462                  * submit will not be submitted (and thus freed) until after we
463                  * return.
464                  *
465                  * This exposes us to a potential deadlock if we allocate
466                  * multiple bios from the same bio_set() while running
467                  * underneath generic_make_request(). If we were to allocate
468                  * multiple bios (say a stacking block driver that was splitting
469                  * bios), we would deadlock if we exhausted the mempool's
470                  * reserve.
471                  *
472                  * We solve this, and guarantee forward progress, with a rescuer
473                  * workqueue per bio_set. If we go to allocate and there are
474                  * bios on current->bio_list, we first try the allocation
475                  * without __GFP_DIRECT_RECLAIM; if that fails, we punt those
476                  * bios we would be blocking to the rescuer workqueue before
477                  * we retry with the original gfp_flags.
478                  */
479 
480                 if (current->bio_list &&
481                     (!bio_list_empty(&current->bio_list[0]) ||
482                      !bio_list_empty(&current->bio_list[1])) &&
483                     bs->rescue_workqueue)
484                         gfp_mask &= ~__GFP_DIRECT_RECLAIM;
485 
486                 p = mempool_alloc(bs->bio_pool, gfp_mask);
487                 if (!p && gfp_mask != saved_gfp) {
488                         punt_bios_to_rescuer(bs);
489                         gfp_mask = saved_gfp;
490                         p = mempool_alloc(bs->bio_pool, gfp_mask);
491                 }
492 
493                 front_pad = bs->front_pad;
494                 inline_vecs = BIO_INLINE_VECS;
495         }
496 
497         if (unlikely(!p))
498                 return NULL;
499 
500         bio = p + front_pad;
501         bio_init(bio, NULL, 0);
502 
503         if (nr_iovecs > inline_vecs) {
504                 unsigned long idx = 0;
505 
506                 bvl = bvec_alloc(gfp_mask, nr_iovecs, &idx, bs->bvec_pool);
507                 if (!bvl && gfp_mask != saved_gfp) {
508                         punt_bios_to_rescuer(bs);
509                         gfp_mask = saved_gfp;
510                         bvl = bvec_alloc(gfp_mask, nr_iovecs, &idx, bs->bvec_pool);
511                 }
512 
513                 if (unlikely(!bvl))
514                         goto err_free;
515 
516                 bio->bi_flags |= idx << BVEC_POOL_OFFSET;
517         } else if (nr_iovecs) {
518                 bvl = bio->bi_inline_vecs;
519         }
520 
521         bio->bi_pool = bs;
522         bio->bi_max_vecs = nr_iovecs;
523         bio->bi_io_vec = bvl;
524         return bio;
525 
526 err_free:
527         mempool_free(p, bs->bio_pool);
528         return NULL;
529 }
530 EXPORT_SYMBOL(bio_alloc_bioset);
531 
532 void zero_fill_bio(struct bio *bio)
533 {
534         unsigned long flags;
535         struct bio_vec bv;
536         struct bvec_iter iter;
537 
538         bio_for_each_segment(bv, bio, iter) {
539                 char *data = bvec_kmap_irq(&bv, &flags);
540                 memset(data, 0, bv.bv_len);
541                 flush_dcache_page(bv.bv_page);
542                 bvec_kunmap_irq(data, &flags);
543         }
544 }
545 EXPORT_SYMBOL(zero_fill_bio);
546 
547 /**
548  * bio_put - release a reference to a bio
549  * @bio:   bio to release reference to
550  *
551  * Description:
552  *   Put a reference to a &struct bio, either one you have gotten with
553  *   bio_alloc, bio_get or bio_clone_*. The last put of a bio will free it.
554  **/
555 void bio_put(struct bio *bio)
556 {
557         if (!bio_flagged(bio, BIO_REFFED))
558                 bio_free(bio);
559         else {
560                 BIO_BUG_ON(!atomic_read(&bio->__bi_cnt));
561 
562                 /*
563                  * last put frees it
564                  */
565                 if (atomic_dec_and_test(&bio->__bi_cnt))
566                         bio_free(bio);
567         }
568 }
569 EXPORT_SYMBOL(bio_put);
570 
571 inline int bio_phys_segments(struct request_queue *q, struct bio *bio)
572 {
573         if (unlikely(!bio_flagged(bio, BIO_SEG_VALID)))
574                 blk_recount_segments(q, bio);
575 
576         return bio->bi_phys_segments;
577 }
578 EXPORT_SYMBOL(bio_phys_segments);
579 
580 /**
581  *      __bio_clone_fast - clone a bio that shares the original bio's biovec
582  *      @bio: destination bio
583  *      @bio_src: bio to clone
584  *
585  *      Clone a &bio. Caller will own the returned bio, but not
586  *      the actual data it points to. Reference count of returned
587  *      bio will be one.
588  *
589  *      Caller must ensure that @bio_src is not freed before @bio.
590  */
591 void __bio_clone_fast(struct bio *bio, struct bio *bio_src)
592 {
593         BUG_ON(bio->bi_pool && BVEC_POOL_IDX(bio));
594 
595         /*
596          * most users will be overriding ->bi_disk with a new target,
597          * so we don't set nor calculate new physical/hw segment counts here
598          */
599         bio->bi_disk = bio_src->bi_disk;
600         bio->bi_partno = bio_src->bi_partno;
601         bio_set_flag(bio, BIO_CLONED);
602         if (bio_flagged(bio_src, BIO_THROTTLED))
603                 bio_set_flag(bio, BIO_THROTTLED);
604         bio->bi_opf = bio_src->bi_opf;
605         bio->bi_write_hint = bio_src->bi_write_hint;
606         bio->bi_iter = bio_src->bi_iter;
607         bio->bi_io_vec = bio_src->bi_io_vec;
608 
609         bio_clone_blkcg_association(bio, bio_src);
610 }
611 EXPORT_SYMBOL(__bio_clone_fast);
612 
613 /**
614  *      bio_clone_fast - clone a bio that shares the original bio's biovec
615  *      @bio: bio to clone
616  *      @gfp_mask: allocation priority
617  *      @bs: bio_set to allocate from
618  *
619  *      Like __bio_clone_fast, only also allocates the returned bio
620  */
621 struct bio *bio_clone_fast(struct bio *bio, gfp_t gfp_mask, struct bio_set *bs)
622 {
623         struct bio *b;
624 
625         b = bio_alloc_bioset(gfp_mask, 0, bs);
626         if (!b)
627                 return NULL;
628 
629         __bio_clone_fast(b, bio);
630 
631         if (bio_integrity(bio)) {
632                 int ret;
633 
634                 ret = bio_integrity_clone(b, bio, gfp_mask);
635 
636                 if (ret < 0) {
637                         bio_put(b);
638                         return NULL;
639                 }
640         }
641 
642         return b;
643 }
644 EXPORT_SYMBOL(bio_clone_fast);
645 
646 /**
647  *      bio_clone_bioset - clone a bio
648  *      @bio_src: bio to clone
649  *      @gfp_mask: allocation priority
650  *      @bs: bio_set to allocate from
651  *
652  *      Clone bio. Caller will own the returned bio, but not the actual data it
653  *      points to. Reference count of returned bio will be one.
654  */
655 struct bio *bio_clone_bioset(struct bio *bio_src, gfp_t gfp_mask,
656                              struct bio_set *bs)
657 {
658         struct bvec_iter iter;
659         struct bio_vec bv;
660         struct bio *bio;
661 
662         /*
663          * Pre immutable biovecs, __bio_clone() used to just do a memcpy from
664          * bio_src->bi_io_vec to bio->bi_io_vec.
665          *
666          * We can't do that anymore, because:
667          *
668          *  - The point of cloning the biovec is to produce a bio with a biovec
669          *    the caller can modify: bi_idx and bi_bvec_done should be 0.
670          *
671          *  - The original bio could've had more than BIO_MAX_PAGES biovecs; if
672          *    we tried to clone the whole thing bio_alloc_bioset() would fail.
673          *    But the clone should succeed as long as the number of biovecs we
674          *    actually need to allocate is fewer than BIO_MAX_PAGES.
675          *
676          *  - Lastly, bi_vcnt should not be looked at or relied upon by code
677          *    that does not own the bio - reason being drivers don't use it for
678          *    iterating over the biovec anymore, so expecting it to be kept up
679          *    to date (i.e. for clones that share the parent biovec) is just
680          *    asking for trouble and would force extra work on
681          *    __bio_clone_fast() anyways.
682          */
683 
684         bio = bio_alloc_bioset(gfp_mask, bio_segments(bio_src), bs);
685         if (!bio)
686                 return NULL;
687         bio->bi_disk            = bio_src->bi_disk;
688         bio->bi_opf             = bio_src->bi_opf;
689         bio->bi_write_hint      = bio_src->bi_write_hint;
690         bio->bi_iter.bi_sector  = bio_src->bi_iter.bi_sector;
691         bio->bi_iter.bi_size    = bio_src->bi_iter.bi_size;
692 
693         switch (bio_op(bio)) {
694         case REQ_OP_DISCARD:
695         case REQ_OP_SECURE_ERASE:
696         case REQ_OP_WRITE_ZEROES:
697                 break;
698         case REQ_OP_WRITE_SAME:
699                 bio->bi_io_vec[bio->bi_vcnt++] = bio_src->bi_io_vec[0];
700                 break;
701         default:
702                 bio_for_each_segment(bv, bio_src, iter)
703                         bio->bi_io_vec[bio->bi_vcnt++] = bv;
704                 break;
705         }
706 
707         if (bio_integrity(bio_src)) {
708                 int ret;
709 
710                 ret = bio_integrity_clone(bio, bio_src, gfp_mask);
711                 if (ret < 0) {
712                         bio_put(bio);
713                         return NULL;
714                 }
715         }
716 
717         bio_clone_blkcg_association(bio, bio_src);
718 
719         return bio;
720 }
721 EXPORT_SYMBOL(bio_clone_bioset);
722 
723 /**
724  *      bio_add_pc_page -       attempt to add page to bio
725  *      @q: the target queue
726  *      @bio: destination bio
727  *      @page: page to add
728  *      @len: vec entry length
729  *      @offset: vec entry offset
730  *
731  *      Attempt to add a page to the bio_vec maplist. This can fail for a
732  *      number of reasons, such as the bio being full or target block device
733  *      limitations. The target block device must allow bio's up to PAGE_SIZE,
734  *      so it is always possible to add a single page to an empty bio.
735  *
736  *      This should only be used by REQ_PC bios.
737  */
738 int bio_add_pc_page(struct request_queue *q, struct bio *bio, struct page
739                     *page, unsigned int len, unsigned int offset)
740 {
741         int retried_segments = 0;
742         struct bio_vec *bvec;
743 
744         /*
745          * cloned bio must not modify vec list
746          */
747         if (unlikely(bio_flagged(bio, BIO_CLONED)))
748                 return 0;
749 
750         if (((bio->bi_iter.bi_size + len) >> 9) > queue_max_hw_sectors(q))
751                 return 0;
752 
753         /*
754          * For filesystems with a blocksize smaller than the pagesize
755          * we will often be called with the same page as last time and
756          * a consecutive offset.  Optimize this special case.
757          */
758         if (bio->bi_vcnt > 0) {
759                 struct bio_vec *prev = &bio->bi_io_vec[bio->bi_vcnt - 1];
760 
761                 if (page == prev->bv_page &&
762                     offset == prev->bv_offset + prev->bv_len) {
763                         prev->bv_len += len;
764                         bio->bi_iter.bi_size += len;
765                         goto done;
766                 }
767 
768                 /*
769                  * If the queue doesn't support SG gaps and adding this
770                  * offset would create a gap, disallow it.
771                  */
772                 if (bvec_gap_to_prev(q, prev, offset))
773                         return 0;
774         }
775 
776         if (bio->bi_vcnt >= bio->bi_max_vecs)
777                 return 0;
778 
779         /*
780          * setup the new entry, we might clear it again later if we
781          * cannot add the page
782          */
783         bvec = &bio->bi_io_vec[bio->bi_vcnt];
784         bvec->bv_page = page;
785         bvec->bv_len = len;
786         bvec->bv_offset = offset;
787         bio->bi_vcnt++;
788         bio->bi_phys_segments++;
789         bio->bi_iter.bi_size += len;
790 
791         /*
792          * Perform a recount if the number of segments is greater
793          * than queue_max_segments(q).
794          */
795 
796         while (bio->bi_phys_segments > queue_max_segments(q)) {
797 
798                 if (retried_segments)
799                         goto failed;
800 
801                 retried_segments = 1;
802                 blk_recount_segments(q, bio);
803         }
804 
805         /* If we may be able to merge these biovecs, force a recount */
806         if (bio->bi_vcnt > 1 && (BIOVEC_PHYS_MERGEABLE(bvec-1, bvec)))
807                 bio_clear_flag(bio, BIO_SEG_VALID);
808 
809  done:
810         return len;
811 
812  failed:
813         bvec->bv_page = NULL;
814         bvec->bv_len = 0;
815         bvec->bv_offset = 0;
816         bio->bi_vcnt--;
817         bio->bi_iter.bi_size -= len;
818         blk_recount_segments(q, bio);
819         return 0;
820 }
821 EXPORT_SYMBOL(bio_add_pc_page);
822 
823 /**
824  *      bio_add_page    -       attempt to add page to bio
825  *      @bio: destination bio
826  *      @page: page to add
827  *      @len: vec entry length
828  *      @offset: vec entry offset
829  *
830  *      Attempt to add a page to the bio_vec maplist. This will only fail
831  *      if either bio->bi_vcnt == bio->bi_max_vecs or it's a cloned bio.
832  */
833 int bio_add_page(struct bio *bio, struct page *page,
834                  unsigned int len, unsigned int offset)
835 {
836         struct bio_vec *bv;
837 
838         /*
839          * cloned bio must not modify vec list
840          */
841         if (WARN_ON_ONCE(bio_flagged(bio, BIO_CLONED)))
842                 return 0;
843 
844         /*
845          * For filesystems with a blocksize smaller than the pagesize
846          * we will often be called with the same page as last time and
847          * a consecutive offset.  Optimize this special case.
848          */
849         if (bio->bi_vcnt > 0) {
850                 bv = &bio->bi_io_vec[bio->bi_vcnt - 1];
851 
852                 if (page == bv->bv_page &&
853                     offset == bv->bv_offset + bv->bv_len) {
854                         bv->bv_len += len;
855                         goto done;
856                 }
857         }
858 
859         if (bio->bi_vcnt >= bio->bi_max_vecs)
860                 return 0;
861 
862         bv              = &bio->bi_io_vec[bio->bi_vcnt];
863         bv->bv_page     = page;
864         bv->bv_len      = len;
865         bv->bv_offset   = offset;
866 
867         bio->bi_vcnt++;
868 done:
869         bio->bi_iter.bi_size += len;
870         return len;
871 }
872 EXPORT_SYMBOL(bio_add_page);
873 
874 /**
875  * bio_iov_iter_get_pages - pin user or kernel pages and add them to a bio
876  * @bio: bio to add pages to
877  * @iter: iov iterator describing the region to be mapped
878  *
879  * Pins as many pages from *iter and appends them to @bio's bvec array. The
880  * pages will have to be released using put_page() when done.
881  */
882 int bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter)
883 {
884         unsigned short nr_pages = bio->bi_max_vecs - bio->bi_vcnt;
885         struct bio_vec *bv = bio->bi_io_vec + bio->bi_vcnt;
886         struct page **pages = (struct page **)bv;
887         size_t offset, diff;
888         ssize_t size;
889 
890         size = iov_iter_get_pages(iter, pages, LONG_MAX, nr_pages, &offset);
891         if (unlikely(size <= 0))
892                 return size ? size : -EFAULT;
893         nr_pages = (size + offset + PAGE_SIZE - 1) / PAGE_SIZE;
894 
895         /*
896          * Deep magic below:  We need to walk the pinned pages backwards
897          * because we are abusing the space allocated for the bio_vecs
898          * for the page array.  Because the bio_vecs are larger than the
899          * page pointers by definition this will always work.  But it also
900          * means we can't use bio_add_page, so any changes to it's semantics
901          * need to be reflected here as well.
902          */
903         bio->bi_iter.bi_size += size;
904         bio->bi_vcnt += nr_pages;
905 
906         diff = (nr_pages * PAGE_SIZE - offset) - size;
907         while (nr_pages--) {
908                 bv[nr_pages].bv_page = pages[nr_pages];
909                 bv[nr_pages].bv_len = PAGE_SIZE;
910                 bv[nr_pages].bv_offset = 0;
911         }
912 
913         bv[0].bv_offset += offset;
914         bv[0].bv_len -= offset;
915         if (diff)
916                 bv[bio->bi_vcnt - 1].bv_len -= diff;
917 
918         iov_iter_advance(iter, size);
919         return 0;
920 }
921 EXPORT_SYMBOL_GPL(bio_iov_iter_get_pages);
922 
923 static void submit_bio_wait_endio(struct bio *bio)
924 {
925         complete(bio->bi_private);
926 }
927 
928 /**
929  * submit_bio_wait - submit a bio, and wait until it completes
930  * @bio: The &struct bio which describes the I/O
931  *
932  * Simple wrapper around submit_bio(). Returns 0 on success, or the error from
933  * bio_endio() on failure.
934  *
935  * WARNING: Unlike to how submit_bio() is usually used, this function does not
936  * result in bio reference to be consumed. The caller must drop the reference
937  * on his own.
938  */
939 int submit_bio_wait(struct bio *bio)
940 {
941         DECLARE_COMPLETION_ONSTACK_MAP(done, bio->bi_disk->lockdep_map);
942 
943         bio->bi_private = &done;
944         bio->bi_end_io = submit_bio_wait_endio;
945         bio->bi_opf |= REQ_SYNC;
946         submit_bio(bio);
947         wait_for_completion_io(&done);
948 
949         return blk_status_to_errno(bio->bi_status);
950 }
951 EXPORT_SYMBOL(submit_bio_wait);
952 
953 /**
954  * bio_advance - increment/complete a bio by some number of bytes
955  * @bio:        bio to advance
956  * @bytes:      number of bytes to complete
957  *
958  * This updates bi_sector, bi_size and bi_idx; if the number of bytes to
959  * complete doesn't align with a bvec boundary, then bv_len and bv_offset will
960  * be updated on the last bvec as well.
961  *
962  * @bio will then represent the remaining, uncompleted portion of the io.
963  */
964 void bio_advance(struct bio *bio, unsigned bytes)
965 {
966         if (bio_integrity(bio))
967                 bio_integrity_advance(bio, bytes);
968 
969         bio_advance_iter(bio, &bio->bi_iter, bytes);
970 }
971 EXPORT_SYMBOL(bio_advance);
972 
973 /**
974  * bio_copy_data - copy contents of data buffers from one chain of bios to
975  * another
976  * @src: source bio list
977  * @dst: destination bio list
978  *
979  * If @src and @dst are single bios, bi_next must be NULL - otherwise, treats
980  * @src and @dst as linked lists of bios.
981  *
982  * Stops when it reaches the end of either @src or @dst - that is, copies
983  * min(src->bi_size, dst->bi_size) bytes (or the equivalent for lists of bios).
984  */
985 void bio_copy_data(struct bio *dst, struct bio *src)
986 {
987         struct bvec_iter src_iter, dst_iter;
988         struct bio_vec src_bv, dst_bv;
989         void *src_p, *dst_p;
990         unsigned bytes;
991 
992         src_iter = src->bi_iter;
993         dst_iter = dst->bi_iter;
994 
995         while (1) {
996                 if (!src_iter.bi_size) {
997                         src = src->bi_next;
998                         if (!src)
999                                 break;
1000 
1001                         src_iter = src->bi_iter;
1002                 }
1003 
1004                 if (!dst_iter.bi_size) {
1005                         dst = dst->bi_next;
1006                         if (!dst)
1007                                 break;
1008 
1009                         dst_iter = dst->bi_iter;
1010                 }
1011 
1012                 src_bv = bio_iter_iovec(src, src_iter);
1013                 dst_bv = bio_iter_iovec(dst, dst_iter);
1014 
1015                 bytes = min(src_bv.bv_len, dst_bv.bv_len);
1016 
1017                 src_p = kmap_atomic(src_bv.bv_page);
1018                 dst_p = kmap_atomic(dst_bv.bv_page);
1019 
1020                 memcpy(dst_p + dst_bv.bv_offset,
1021                        src_p + src_bv.bv_offset,
1022                        bytes);
1023 
1024                 kunmap_atomic(dst_p);
1025                 kunmap_atomic(src_p);
1026 
1027                 bio_advance_iter(src, &src_iter, bytes);
1028                 bio_advance_iter(dst, &dst_iter, bytes);
1029         }
1030 }
1031 EXPORT_SYMBOL(bio_copy_data);
1032 
1033 struct bio_map_data {
1034         int is_our_pages;
1035         struct iov_iter iter;
1036         struct iovec iov[];
1037 };
1038 
1039 static struct bio_map_data *bio_alloc_map_data(struct iov_iter *data,
1040                                                gfp_t gfp_mask)
1041 {
1042         struct bio_map_data *bmd;
1043         if (data->nr_segs > UIO_MAXIOV)
1044                 return NULL;
1045 
1046         bmd = kmalloc(sizeof(struct bio_map_data) +
1047                        sizeof(struct iovec) * data->nr_segs, gfp_mask);
1048         if (!bmd)
1049                 return NULL;
1050         memcpy(bmd->iov, data->iov, sizeof(struct iovec) * data->nr_segs);
1051         bmd->iter = *data;
1052         bmd->iter.iov = bmd->iov;
1053         return bmd;
1054 }
1055 
1056 /**
1057  * bio_copy_from_iter - copy all pages from iov_iter to bio
1058  * @bio: The &struct bio which describes the I/O as destination
1059  * @iter: iov_iter as source
1060  *
1061  * Copy all pages from iov_iter to bio.
1062  * Returns 0 on success, or error on failure.
1063  */
1064 static int bio_copy_from_iter(struct bio *bio, struct iov_iter *iter)
1065 {
1066         int i;
1067         struct bio_vec *bvec;
1068 
1069         bio_for_each_segment_all(bvec, bio, i) {
1070                 ssize_t ret;
1071 
1072                 ret = copy_page_from_iter(bvec->bv_page,
1073                                           bvec->bv_offset,
1074                                           bvec->bv_len,
1075                                           iter);
1076 
1077                 if (!iov_iter_count(iter))
1078                         break;
1079 
1080                 if (ret < bvec->bv_len)
1081                         return -EFAULT;
1082         }
1083 
1084         return 0;
1085 }
1086 
1087 /**
1088  * bio_copy_to_iter - copy all pages from bio to iov_iter
1089  * @bio: The &struct bio which describes the I/O as source
1090  * @iter: iov_iter as destination
1091  *
1092  * Copy all pages from bio to iov_iter.
1093  * Returns 0 on success, or error on failure.
1094  */
1095 static int bio_copy_to_iter(struct bio *bio, struct iov_iter iter)
1096 {
1097         int i;
1098         struct bio_vec *bvec;
1099 
1100         bio_for_each_segment_all(bvec, bio, i) {
1101                 ssize_t ret;
1102 
1103                 ret = copy_page_to_iter(bvec->bv_page,
1104                                         bvec->bv_offset,
1105                                         bvec->bv_len,
1106                                         &iter);
1107 
1108                 if (!iov_iter_count(&iter))
1109                         break;
1110 
1111                 if (ret < bvec->bv_len)
1112                         return -EFAULT;
1113         }
1114 
1115         return 0;
1116 }
1117 
1118 void bio_free_pages(struct bio *bio)
1119 {
1120         struct bio_vec *bvec;
1121         int i;
1122 
1123         bio_for_each_segment_all(bvec, bio, i)
1124                 __free_page(bvec->bv_page);
1125 }
1126 EXPORT_SYMBOL(bio_free_pages);
1127 
1128 /**
1129  *      bio_uncopy_user -       finish previously mapped bio
1130  *      @bio: bio being terminated
1131  *
1132  *      Free pages allocated from bio_copy_user_iov() and write back data
1133  *      to user space in case of a read.
1134  */
1135 int bio_uncopy_user(struct bio *bio)
1136 {
1137         struct bio_map_data *bmd = bio->bi_private;
1138         int ret = 0;
1139 
1140         if (!bio_flagged(bio, BIO_NULL_MAPPED)) {
1141                 /*
1142                  * if we're in a workqueue, the request is orphaned, so
1143                  * don't copy into a random user address space, just free
1144                  * and return -EINTR so user space doesn't expect any data.
1145                  */
1146                 if (!current->mm)
1147                         ret = -EINTR;
1148                 else if (bio_data_dir(bio) == READ)
1149                         ret = bio_copy_to_iter(bio, bmd->iter);
1150                 if (bmd->is_our_pages)
1151                         bio_free_pages(bio);
1152         }
1153         kfree(bmd);
1154         bio_put(bio);
1155         return ret;
1156 }
1157 
1158 /**
1159  *      bio_copy_user_iov       -       copy user data to bio
1160  *      @q:             destination block queue
1161  *      @map_data:      pointer to the rq_map_data holding pages (if necessary)
1162  *      @iter:          iovec iterator
1163  *      @gfp_mask:      memory allocation flags
1164  *
1165  *      Prepares and returns a bio for indirect user io, bouncing data
1166  *      to/from kernel pages as necessary. Must be paired with
1167  *      call bio_uncopy_user() on io completion.
1168  */
1169 struct bio *bio_copy_user_iov(struct request_queue *q,
1170                               struct rq_map_data *map_data,
1171                               struct iov_iter *iter,
1172                               gfp_t gfp_mask)
1173 {
1174         struct bio_map_data *bmd;
1175         struct page *page;
1176         struct bio *bio;
1177         int i = 0, ret;
1178         int nr_pages;
1179         unsigned int len = iter->count;
1180         unsigned int offset = map_data ? offset_in_page(map_data->offset) : 0;
1181 
1182         bmd = bio_alloc_map_data(iter, gfp_mask);
1183         if (!bmd)
1184                 return ERR_PTR(-ENOMEM);
1185 
1186         /*
1187          * We need to do a deep copy of the iov_iter including the iovecs.
1188          * The caller provided iov might point to an on-stack or otherwise
1189          * shortlived one.
1190          */
1191         bmd->is_our_pages = map_data ? 0 : 1;
1192 
1193         nr_pages = DIV_ROUND_UP(offset + len, PAGE_SIZE);
1194         if (nr_pages > BIO_MAX_PAGES)
1195                 nr_pages = BIO_MAX_PAGES;
1196 
1197         ret = -ENOMEM;
1198         bio = bio_kmalloc(gfp_mask, nr_pages);
1199         if (!bio)
1200                 goto out_bmd;
1201 
1202         ret = 0;
1203 
1204         if (map_data) {
1205                 nr_pages = 1 << map_data->page_order;
1206                 i = map_data->offset / PAGE_SIZE;
1207         }
1208         while (len) {
1209                 unsigned int bytes = PAGE_SIZE;
1210 
1211                 bytes -= offset;
1212 
1213                 if (bytes > len)
1214                         bytes = len;
1215 
1216                 if (map_data) {
1217                         if (i == map_data->nr_entries * nr_pages) {
1218                                 ret = -ENOMEM;
1219                                 break;
1220                         }
1221 
1222                         page = map_data->pages[i / nr_pages];
1223                         page += (i % nr_pages);
1224 
1225                         i++;
1226                 } else {
1227                         page = alloc_page(q->bounce_gfp | gfp_mask);
1228                         if (!page) {
1229                                 ret = -ENOMEM;
1230                                 break;
1231                         }
1232                 }
1233 
1234                 if (bio_add_pc_page(q, bio, page, bytes, offset) < bytes)
1235                         break;
1236 
1237                 len -= bytes;
1238                 offset = 0;
1239         }
1240 
1241         if (ret)
1242                 goto cleanup;
1243 
1244         if (map_data)
1245                 map_data->offset += bio->bi_iter.bi_size;
1246 
1247         /*
1248          * success
1249          */
1250         if (((iter->type & WRITE) && (!map_data || !map_data->null_mapped)) ||
1251             (map_data && map_data->from_user)) {
1252                 ret = bio_copy_from_iter(bio, iter);
1253                 if (ret)
1254                         goto cleanup;
1255         } else {
1256                 iov_iter_advance(iter, bio->bi_iter.bi_size);
1257         }
1258 
1259         bio->bi_private = bmd;
1260         if (map_data && map_data->null_mapped)
1261                 bio_set_flag(bio, BIO_NULL_MAPPED);
1262         return bio;
1263 cleanup:
1264         if (!map_data)
1265                 bio_free_pages(bio);
1266         bio_put(bio);
1267 out_bmd:
1268         kfree(bmd);
1269         return ERR_PTR(ret);
1270 }
1271 
1272 /**
1273  *      bio_map_user_iov - map user iovec into bio
1274  *      @q:             the struct request_queue for the bio
1275  *      @iter:          iovec iterator
1276  *      @gfp_mask:      memory allocation flags
1277  *
1278  *      Map the user space address into a bio suitable for io to a block
1279  *      device. Returns an error pointer in case of error.
1280  */
1281 struct bio *bio_map_user_iov(struct request_queue *q,
1282                              struct iov_iter *iter,
1283                              gfp_t gfp_mask)
1284 {
1285         int j;
1286         struct bio *bio;
1287         int ret;
1288         struct bio_vec *bvec;
1289 
1290         if (!iov_iter_count(iter))
1291                 return ERR_PTR(-EINVAL);
1292 
1293         bio = bio_kmalloc(gfp_mask, iov_iter_npages(iter, BIO_MAX_PAGES));
1294         if (!bio)
1295                 return ERR_PTR(-ENOMEM);
1296 
1297         while (iov_iter_count(iter)) {
1298                 struct page **pages;
1299                 ssize_t bytes;
1300                 size_t offs, added = 0;
1301                 int npages;
1302 
1303                 bytes = iov_iter_get_pages_alloc(iter, &pages, LONG_MAX, &offs);
1304                 if (unlikely(bytes <= 0)) {
1305                         ret = bytes ? bytes : -EFAULT;
1306                         goto out_unmap;
1307                 }
1308 
1309                 npages = DIV_ROUND_UP(offs + bytes, PAGE_SIZE);
1310 
1311                 if (unlikely(offs & queue_dma_alignment(q))) {
1312                         ret = -EINVAL;
1313                         j = 0;
1314                 } else {
1315                         for (j = 0; j < npages; j++) {
1316                                 struct page *page = pages[j];
1317                                 unsigned int n = PAGE_SIZE - offs;
1318                                 unsigned short prev_bi_vcnt = bio->bi_vcnt;
1319 
1320                                 if (n > bytes)
1321                                         n = bytes;
1322 
1323                                 if (!bio_add_pc_page(q, bio, page, n, offs))
1324                                         break;
1325 
1326                                 /*
1327                                  * check if vector was merged with previous
1328                                  * drop page reference if needed
1329                                  */
1330                                 if (bio->bi_vcnt == prev_bi_vcnt)
1331                                         put_page(page);
1332 
1333                                 added += n;
1334                                 bytes -= n;
1335                                 offs = 0;
1336                         }
1337                         iov_iter_advance(iter, added);
1338                 }
1339                 /*
1340                  * release the pages we didn't map into the bio, if any
1341                  */
1342                 while (j < npages)
1343                         put_page(pages[j++]);
1344                 kvfree(pages);
1345                 /* couldn't stuff something into bio? */
1346                 if (bytes)
1347                         break;
1348         }
1349 
1350         bio_set_flag(bio, BIO_USER_MAPPED);
1351 
1352         /*
1353          * subtle -- if bio_map_user_iov() ended up bouncing a bio,
1354          * it would normally disappear when its bi_end_io is run.
1355          * however, we need it for the unmap, so grab an extra
1356          * reference to it
1357          */
1358         bio_get(bio);
1359         return bio;
1360 
1361  out_unmap:
1362         bio_for_each_segment_all(bvec, bio, j) {
1363                 put_page(bvec->bv_page);
1364         }
1365         bio_put(bio);
1366         return ERR_PTR(ret);
1367 }
1368 
1369 static void __bio_unmap_user(struct bio *bio)
1370 {
1371         struct bio_vec *bvec;
1372         int i;
1373 
1374         /*
1375          * make sure we dirty pages we wrote to
1376          */
1377         bio_for_each_segment_all(bvec, bio, i) {
1378                 if (bio_data_dir(bio) == READ)
1379                         set_page_dirty_lock(bvec->bv_page);
1380 
1381                 put_page(bvec->bv_page);
1382         }
1383 
1384         bio_put(bio);
1385 }
1386 
1387 /**
1388  *      bio_unmap_user  -       unmap a bio
1389  *      @bio:           the bio being unmapped
1390  *
1391  *      Unmap a bio previously mapped by bio_map_user_iov(). Must be called from
1392  *      process context.
1393  *
1394  *      bio_unmap_user() may sleep.
1395  */
1396 void bio_unmap_user(struct bio *bio)
1397 {
1398         __bio_unmap_user(bio);
1399         bio_put(bio);
1400 }
1401 
1402 static void bio_map_kern_endio(struct bio *bio)
1403 {
1404         bio_put(bio);
1405 }
1406 
1407 /**
1408  *      bio_map_kern    -       map kernel address into bio
1409  *      @q: the struct request_queue for the bio
1410  *      @data: pointer to buffer to map
1411  *      @len: length in bytes
1412  *      @gfp_mask: allocation flags for bio allocation
1413  *
1414  *      Map the kernel address into a bio suitable for io to a block
1415  *      device. Returns an error pointer in case of error.
1416  */
1417 struct bio *bio_map_kern(struct request_queue *q, void *data, unsigned int len,
1418                          gfp_t gfp_mask)
1419 {
1420         unsigned long kaddr = (unsigned long)data;
1421         unsigned long end = (kaddr + len + PAGE_SIZE - 1) >> PAGE_SHIFT;
1422         unsigned long start = kaddr >> PAGE_SHIFT;
1423         const int nr_pages = end - start;
1424         int offset, i;
1425         struct bio *bio;
1426 
1427         bio = bio_kmalloc(gfp_mask, nr_pages);
1428         if (!bio)
1429                 return ERR_PTR(-ENOMEM);
1430 
1431         offset = offset_in_page(kaddr);
1432         for (i = 0; i < nr_pages; i++) {
1433                 unsigned int bytes = PAGE_SIZE - offset;
1434 
1435                 if (len <= 0)
1436                         break;
1437 
1438                 if (bytes > len)
1439                         bytes = len;
1440 
1441                 if (bio_add_pc_page(q, bio, virt_to_page(data), bytes,
1442                                     offset) < bytes) {
1443                         /* we don't support partial mappings */
1444                         bio_put(bio);
1445                         return ERR_PTR(-EINVAL);
1446                 }
1447 
1448                 data += bytes;
1449                 len -= bytes;
1450                 offset = 0;
1451         }
1452 
1453         bio->bi_end_io = bio_map_kern_endio;
1454         return bio;
1455 }
1456 EXPORT_SYMBOL(bio_map_kern);
1457 
1458 static void bio_copy_kern_endio(struct bio *bio)
1459 {
1460         bio_free_pages(bio);
1461         bio_put(bio);
1462 }
1463 
1464 static void bio_copy_kern_endio_read(struct bio *bio)
1465 {
1466         char *p = bio->bi_private;
1467         struct bio_vec *bvec;
1468         int i;
1469 
1470         bio_for_each_segment_all(bvec, bio, i) {
1471                 memcpy(p, page_address(bvec->bv_page), bvec->bv_len);
1472                 p += bvec->bv_len;
1473         }
1474 
1475         bio_copy_kern_endio(bio);
1476 }
1477 
1478 /**
1479  *      bio_copy_kern   -       copy kernel address into bio
1480  *      @q: the struct request_queue for the bio
1481  *      @data: pointer to buffer to copy
1482  *      @len: length in bytes
1483  *      @gfp_mask: allocation flags for bio and page allocation
1484  *      @reading: data direction is READ
1485  *
1486  *      copy the kernel address into a bio suitable for io to a block
1487  *      device. Returns an error pointer in case of error.
1488  */
1489 struct bio *bio_copy_kern(struct request_queue *q, void *data, unsigned int len,
1490                           gfp_t gfp_mask, int reading)
1491 {
1492         unsigned long kaddr = (unsigned long)data;
1493         unsigned long end = (kaddr + len + PAGE_SIZE - 1) >> PAGE_SHIFT;
1494         unsigned long start = kaddr >> PAGE_SHIFT;
1495         struct bio *bio;
1496         void *p = data;
1497         int nr_pages = 0;
1498 
1499         /*
1500          * Overflow, abort
1501          */
1502         if (end < start)
1503                 return ERR_PTR(-EINVAL);
1504 
1505         nr_pages = end - start;
1506         bio = bio_kmalloc(gfp_mask, nr_pages);
1507         if (!bio)
1508                 return ERR_PTR(-ENOMEM);
1509 
1510         while (len) {
1511                 struct page *page;
1512                 unsigned int bytes = PAGE_SIZE;
1513 
1514                 if (bytes > len)
1515                         bytes = len;
1516 
1517                 page = alloc_page(q->bounce_gfp | gfp_mask);
1518                 if (!page)
1519                         goto cleanup;
1520 
1521                 if (!reading)
1522                         memcpy(page_address(page), p, bytes);
1523 
1524                 if (bio_add_pc_page(q, bio, page, bytes, 0) < bytes)
1525                         break;
1526 
1527                 len -= bytes;
1528                 p += bytes;
1529         }
1530 
1531         if (reading) {
1532                 bio->bi_end_io = bio_copy_kern_endio_read;
1533                 bio->bi_private = data;
1534         } else {
1535                 bio->bi_end_io = bio_copy_kern_endio;
1536         }
1537 
1538         return bio;
1539 
1540 cleanup:
1541         bio_free_pages(bio);
1542         bio_put(bio);
1543         return ERR_PTR(-ENOMEM);
1544 }
1545 
1546 /*
1547  * bio_set_pages_dirty() and bio_check_pages_dirty() are support functions
1548  * for performing direct-IO in BIOs.
1549  *
1550  * The problem is that we cannot run set_page_dirty() from interrupt context
1551  * because the required locks are not interrupt-safe.  So what we can do is to
1552  * mark the pages dirty _before_ performing IO.  And in interrupt context,
1553  * check that the pages are still dirty.   If so, fine.  If not, redirty them
1554  * in process context.
1555  *
1556  * We special-case compound pages here: normally this means reads into hugetlb
1557  * pages.  The logic in here doesn't really work right for compound pages
1558  * because the VM does not uniformly chase down the head page in all cases.
1559  * But dirtiness of compound pages is pretty meaningless anyway: the VM doesn't
1560  * handle them at all.  So we skip compound pages here at an early stage.
1561  *
1562  * Note that this code is very hard to test under normal circumstances because
1563  * direct-io pins the pages with get_user_pages().  This makes
1564  * is_page_cache_freeable return false, and the VM will not clean the pages.
1565  * But other code (eg, flusher threads) could clean the pages if they are mapped
1566  * pagecache.
1567  *
1568  * Simply disabling the call to bio_set_pages_dirty() is a good way to test the
1569  * deferred bio dirtying paths.
1570  */
1571 
1572 /*
1573  * bio_set_pages_dirty() will mark all the bio's pages as dirty.
1574  */
1575 void bio_set_pages_dirty(struct bio *bio)
1576 {
1577         struct bio_vec *bvec;
1578         int i;
1579 
1580         bio_for_each_segment_all(bvec, bio, i) {
1581                 struct page *page = bvec->bv_page;
1582 
1583                 if (page && !PageCompound(page))
1584                         set_page_dirty_lock(page);
1585         }
1586 }
1587 
1588 static void bio_release_pages(struct bio *bio)
1589 {
1590         struct bio_vec *bvec;
1591         int i;
1592 
1593         bio_for_each_segment_all(bvec, bio, i) {
1594                 struct page *page = bvec->bv_page;
1595 
1596                 if (page)
1597                         put_page(page);
1598         }
1599 }
1600 
1601 /*
1602  * bio_check_pages_dirty() will check that all the BIO's pages are still dirty.
1603  * If they are, then fine.  If, however, some pages are clean then they must
1604  * have been written out during the direct-IO read.  So we take another ref on
1605  * the BIO and the offending pages and re-dirty the pages in process context.
1606  *
1607  * It is expected that bio_check_pages_dirty() will wholly own the BIO from
1608  * here on.  It will run one put_page() against each page and will run one
1609  * bio_put() against the BIO.
1610  */
1611 
1612 static void bio_dirty_fn(struct work_struct *work);
1613 
1614 static DECLARE_WORK(bio_dirty_work, bio_dirty_fn);
1615 static DEFINE_SPINLOCK(bio_dirty_lock);
1616 static struct bio *bio_dirty_list;
1617 
1618 /*
1619  * This runs in process context
1620  */
1621 static void bio_dirty_fn(struct work_struct *work)
1622 {
1623         unsigned long flags;
1624         struct bio *bio;
1625 
1626         spin_lock_irqsave(&bio_dirty_lock, flags);
1627         bio = bio_dirty_list;
1628         bio_dirty_list = NULL;
1629         spin_unlock_irqrestore(&bio_dirty_lock, flags);
1630 
1631         while (bio) {
1632                 struct bio *next = bio->bi_private;
1633 
1634                 bio_set_pages_dirty(bio);
1635                 bio_release_pages(bio);
1636                 bio_put(bio);
1637                 bio = next;
1638         }
1639 }
1640 
1641 void bio_check_pages_dirty(struct bio *bio)
1642 {
1643         struct bio_vec *bvec;
1644         int nr_clean_pages = 0;
1645         int i;
1646 
1647         bio_for_each_segment_all(bvec, bio, i) {
1648                 struct page *page = bvec->bv_page;
1649 
1650                 if (PageDirty(page) || PageCompound(page)) {
1651                         put_page(page);
1652                         bvec->bv_page = NULL;
1653                 } else {
1654                         nr_clean_pages++;
1655                 }
1656         }
1657 
1658         if (nr_clean_pages) {
1659                 unsigned long flags;
1660 
1661                 spin_lock_irqsave(&bio_dirty_lock, flags);
1662                 bio->bi_private = bio_dirty_list;
1663                 bio_dirty_list = bio;
1664                 spin_unlock_irqrestore(&bio_dirty_lock, flags);
1665                 schedule_work(&bio_dirty_work);
1666         } else {
1667                 bio_put(bio);
1668         }
1669 }
1670 
1671 void generic_start_io_acct(struct request_queue *q, int rw,
1672                            unsigned long sectors, struct hd_struct *part)
1673 {
1674         int cpu = part_stat_lock();
1675 
1676         part_round_stats(q, cpu, part);
1677         part_stat_inc(cpu, part, ios[rw]);
1678         part_stat_add(cpu, part, sectors[rw], sectors);
1679         part_inc_in_flight(q, part, rw);
1680 
1681         part_stat_unlock();
1682 }
1683 EXPORT_SYMBOL(generic_start_io_acct);
1684 
1685 void generic_end_io_acct(struct request_queue *q, int rw,
1686                          struct hd_struct *part, unsigned long start_time)
1687 {
1688         unsigned long duration = jiffies - start_time;
1689         int cpu = part_stat_lock();
1690 
1691         part_stat_add(cpu, part, ticks[rw], duration);
1692         part_round_stats(q, cpu, part);
1693         part_dec_in_flight(q, part, rw);
1694 
1695         part_stat_unlock();
1696 }
1697 EXPORT_SYMBOL(generic_end_io_acct);
1698 
1699 #if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE
1700 void bio_flush_dcache_pages(struct bio *bi)
1701 {
1702         struct bio_vec bvec;
1703         struct bvec_iter iter;
1704 
1705         bio_for_each_segment(bvec, bi, iter)
1706                 flush_dcache_page(bvec.bv_page);
1707 }
1708 EXPORT_SYMBOL(bio_flush_dcache_pages);
1709 #endif
1710 
1711 static inline bool bio_remaining_done(struct bio *bio)
1712 {
1713         /*
1714          * If we're not chaining, then ->__bi_remaining is always 1 and
1715          * we always end io on the first invocation.
1716          */
1717         if (!bio_flagged(bio, BIO_CHAIN))
1718                 return true;
1719 
1720         BUG_ON(atomic_read(&bio->__bi_remaining) <= 0);
1721 
1722         if (atomic_dec_and_test(&bio->__bi_remaining)) {
1723                 bio_clear_flag(bio, BIO_CHAIN);
1724                 return true;
1725         }
1726 
1727         return false;
1728 }
1729 
1730 /**
1731  * bio_endio - end I/O on a bio
1732  * @bio:        bio
1733  *
1734  * Description:
1735  *   bio_endio() will end I/O on the whole bio. bio_endio() is the preferred
1736  *   way to end I/O on a bio. No one should call bi_end_io() directly on a
1737  *   bio unless they own it and thus know that it has an end_io function.
1738  *
1739  *   bio_endio() can be called several times on a bio that has been chained
1740  *   using bio_chain().  The ->bi_end_io() function will only be called the
1741  *   last time.  At this point the BLK_TA_COMPLETE tracing event will be
1742  *   generated if BIO_TRACE_COMPLETION is set.
1743  **/
1744 void bio_endio(struct bio *bio)
1745 {
1746 again:
1747         if (!bio_remaining_done(bio))
1748                 return;
1749         if (!bio_integrity_endio(bio))
1750                 return;
1751 
1752         /*
1753          * Need to have a real endio function for chained bios, otherwise
1754          * various corner cases will break (like stacking block devices that
1755          * save/restore bi_end_io) - however, we want to avoid unbounded
1756          * recursion and blowing the stack. Tail call optimization would
1757          * handle this, but compiling with frame pointers also disables
1758          * gcc's sibling call optimization.
1759          */
1760         if (bio->bi_end_io == bio_chain_endio) {
1761                 bio = __bio_chain_endio(bio);
1762                 goto again;
1763         }
1764 
1765         if (bio->bi_disk && bio_flagged(bio, BIO_TRACE_COMPLETION)) {
1766                 trace_block_bio_complete(bio->bi_disk->queue, bio,
1767                                          blk_status_to_errno(bio->bi_status));
1768                 bio_clear_flag(bio, BIO_TRACE_COMPLETION);
1769         }
1770 
1771         blk_throtl_bio_endio(bio);
1772         /* release cgroup info */
1773         bio_uninit(bio);
1774         if (bio->bi_end_io)
1775                 bio->bi_end_io(bio);
1776 }
1777 EXPORT_SYMBOL(bio_endio);
1778 
1779 /**
1780  * bio_split - split a bio
1781  * @bio:        bio to split
1782  * @sectors:    number of sectors to split from the front of @bio
1783  * @gfp:        gfp mask
1784  * @bs:         bio set to allocate from
1785  *
1786  * Allocates and returns a new bio which represents @sectors from the start of
1787  * @bio, and updates @bio to represent the remaining sectors.
1788  *
1789  * Unless this is a discard request the newly allocated bio will point
1790  * to @bio's bi_io_vec; it is the caller's responsibility to ensure that
1791  * @bio is not freed before the split.
1792  */
1793 struct bio *bio_split(struct bio *bio, int sectors,
1794                       gfp_t gfp, struct bio_set *bs)
1795 {
1796         struct bio *split;
1797 
1798         BUG_ON(sectors <= 0);
1799         BUG_ON(sectors >= bio_sectors(bio));
1800 
1801         split = bio_clone_fast(bio, gfp, bs);
1802         if (!split)
1803                 return NULL;
1804 
1805         split->bi_iter.bi_size = sectors << 9;
1806 
1807         if (bio_integrity(split))
1808                 bio_integrity_trim(split);
1809 
1810         bio_advance(bio, split->bi_iter.bi_size);
1811 
1812         if (bio_flagged(bio, BIO_TRACE_COMPLETION))
1813                 bio_set_flag(split, BIO_TRACE_COMPLETION);
1814 
1815         return split;
1816 }
1817 EXPORT_SYMBOL(bio_split);
1818 
1819 /**
1820  * bio_trim - trim a bio
1821  * @bio:        bio to trim
1822  * @offset:     number of sectors to trim from the front of @bio
1823  * @size:       size we want to trim @bio to, in sectors
1824  */
1825 void bio_trim(struct bio *bio, int offset, int size)
1826 {
1827         /* 'bio' is a cloned bio which we need to trim to match
1828          * the given offset and size.
1829          */
1830 
1831         size <<= 9;
1832         if (offset == 0 && size == bio->bi_iter.bi_size)
1833                 return;
1834 
1835         bio_clear_flag(bio, BIO_SEG_VALID);
1836 
1837         bio_advance(bio, offset << 9);
1838 
1839         bio->bi_iter.bi_size = size;
1840 
1841         if (bio_integrity(bio))
1842                 bio_integrity_trim(bio);
1843 
1844 }
1845 EXPORT_SYMBOL_GPL(bio_trim);
1846 
1847 /*
1848  * create memory pools for biovec's in a bio_set.
1849  * use the global biovec slabs created for general use.
1850  */
1851 mempool_t *biovec_create_pool(int pool_entries)
1852 {
1853         struct biovec_slab *bp = bvec_slabs + BVEC_POOL_MAX;
1854 
1855         return mempool_create_slab_pool(pool_entries, bp->slab);
1856 }
1857 
1858 void bioset_free(struct bio_set *bs)
1859 {
1860         if (bs->rescue_workqueue)
1861                 destroy_workqueue(bs->rescue_workqueue);
1862 
1863         mempool_destroy(bs->bio_pool);
1864         mempool_destroy(bs->bvec_pool);
1865 
1866         bioset_integrity_free(bs);
1867         bio_put_slab(bs);
1868 
1869         kfree(bs);
1870 }
1871 EXPORT_SYMBOL(bioset_free);
1872 
1873 /**
1874  * bioset_create  - Create a bio_set
1875  * @pool_size:  Number of bio and bio_vecs to cache in the mempool
1876  * @front_pad:  Number of bytes to allocate in front of the returned bio
1877  * @flags:      Flags to modify behavior, currently %BIOSET_NEED_BVECS
1878  *              and %BIOSET_NEED_RESCUER
1879  *
1880  * Description:
1881  *    Set up a bio_set to be used with @bio_alloc_bioset. Allows the caller
1882  *    to ask for a number of bytes to be allocated in front of the bio.
1883  *    Front pad allocation is useful for embedding the bio inside
1884  *    another structure, to avoid allocating extra data to go with the bio.
1885  *    Note that the bio must be embedded at the END of that structure always,
1886  *    or things will break badly.
1887  *    If %BIOSET_NEED_BVECS is set in @flags, a separate pool will be allocated
1888  *    for allocating iovecs.  This pool is not needed e.g. for bio_clone_fast().
1889  *    If %BIOSET_NEED_RESCUER is set, a workqueue is created which can be used to
1890  *    dispatch queued requests when the mempool runs out of space.
1891  *
1892  */
1893 struct bio_set *bioset_create(unsigned int pool_size,
1894                               unsigned int front_pad,
1895                               int flags)
1896 {
1897         unsigned int back_pad = BIO_INLINE_VECS * sizeof(struct bio_vec);
1898         struct bio_set *bs;
1899 
1900         bs = kzalloc(sizeof(*bs), GFP_KERNEL);
1901         if (!bs)
1902                 return NULL;
1903 
1904         bs->front_pad = front_pad;
1905 
1906         spin_lock_init(&bs->rescue_lock);
1907         bio_list_init(&bs->rescue_list);
1908         INIT_WORK(&bs->rescue_work, bio_alloc_rescue);
1909 
1910         bs->bio_slab = bio_find_or_create_slab(front_pad + back_pad);
1911         if (!bs->bio_slab) {
1912                 kfree(bs);
1913                 return NULL;
1914         }
1915 
1916         bs->bio_pool = mempool_create_slab_pool(pool_size, bs->bio_slab);
1917         if (!bs->bio_pool)
1918                 goto bad;
1919 
1920         if (flags & BIOSET_NEED_BVECS) {
1921                 bs->bvec_pool = biovec_create_pool(pool_size);
1922                 if (!bs->bvec_pool)
1923                         goto bad;
1924         }
1925 
1926         if (!(flags & BIOSET_NEED_RESCUER))
1927                 return bs;
1928 
1929         bs->rescue_workqueue = alloc_workqueue("bioset", WQ_MEM_RECLAIM, 0);
1930         if (!bs->rescue_workqueue)
1931                 goto bad;
1932 
1933         return bs;
1934 bad:
1935         bioset_free(bs);
1936         return NULL;
1937 }
1938 EXPORT_SYMBOL(bioset_create);
1939 
1940 #ifdef CONFIG_BLK_CGROUP
1941 
1942 /**
1943  * bio_associate_blkcg - associate a bio with the specified blkcg
1944  * @bio: target bio
1945  * @blkcg_css: css of the blkcg to associate
1946  *
1947  * Associate @bio with the blkcg specified by @blkcg_css.  Block layer will
1948  * treat @bio as if it were issued by a task which belongs to the blkcg.
1949  *
1950  * This function takes an extra reference of @blkcg_css which will be put
1951  * when @bio is released.  The caller must own @bio and is responsible for
1952  * synchronizing calls to this function.
1953  */
1954 int bio_associate_blkcg(struct bio *bio, struct cgroup_subsys_state *blkcg_css)
1955 {
1956         if (unlikely(bio->bi_css))
1957                 return -EBUSY;
1958         css_get(blkcg_css);
1959         bio->bi_css = blkcg_css;
1960         return 0;
1961 }
1962 EXPORT_SYMBOL_GPL(bio_associate_blkcg);
1963 
1964 /**
1965  * bio_disassociate_task - undo bio_associate_current()
1966  * @bio: target bio
1967  */
1968 void bio_disassociate_task(struct bio *bio)
1969 {
1970         if (bio->bi_ioc) {
1971                 put_io_context(bio->bi_ioc);
1972                 bio->bi_ioc = NULL;
1973         }
1974         if (bio->bi_css) {
1975                 css_put(bio->bi_css);
1976                 bio->bi_css = NULL;
1977         }
1978 }
1979 
1980 /**
1981  * bio_clone_blkcg_association - clone blkcg association from src to dst bio
1982  * @dst: destination bio
1983  * @src: source bio
1984  */
1985 void bio_clone_blkcg_association(struct bio *dst, struct bio *src)
1986 {
1987         if (src->bi_css)
1988                 WARN_ON(bio_associate_blkcg(dst, src->bi_css));
1989 }
1990 EXPORT_SYMBOL_GPL(bio_clone_blkcg_association);
1991 #endif /* CONFIG_BLK_CGROUP */
1992 
1993 static void __init biovec_init_slabs(void)
1994 {
1995         int i;
1996 
1997         for (i = 0; i < BVEC_POOL_NR; i++) {
1998                 int size;
1999                 struct biovec_slab *bvs = bvec_slabs + i;
2000 
2001                 if (bvs->nr_vecs <= BIO_INLINE_VECS) {
2002                         bvs->slab = NULL;
2003                         continue;
2004                 }
2005 
2006                 size = bvs->nr_vecs * sizeof(struct bio_vec);
2007                 bvs->slab = kmem_cache_create(bvs->name, size, 0,
2008                                 SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
2009         }
2010 }
2011 
2012 static int __init init_bio(void)
2013 {
2014         bio_slab_max = 2;
2015         bio_slab_nr = 0;
2016         bio_slabs = kzalloc(bio_slab_max * sizeof(struct bio_slab), GFP_KERNEL);
2017         if (!bio_slabs)
2018                 panic("bio: can't allocate bios\n");
2019 
2020         bio_integrity_init();
2021         biovec_init_slabs();
2022 
2023         fs_bio_set = bioset_create(BIO_POOL_SIZE, 0, BIOSET_NEED_BVECS);
2024         if (!fs_bio_set)
2025                 panic("bio: can't allocate bios\n");
2026 
2027         if (bioset_integrity_create(fs_bio_set, BIO_POOL_SIZE))
2028                 panic("bio: can't create integrity pool\n");
2029 
2030         return 0;
2031 }
2032 subsys_initcall(init_bio);
2033 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | Wiki (Japanese) | Wiki (English) | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

osdn.jp