root/block/bio.c

/* [previous][next][first][last][top][bottom][index][help] [+1 block/bio.c] */

DEFINITIONS

This source file includes following definitions.
  1. bio_find_or_create_slab
  2. bio_put_slab
  3. bvec_nr_vecs
  4. bvec_free
  5. bvec_alloc
  6. __bio_free
  7. bio_free
  8. bio_init
  9. bio_reset
  10. bio_chain_endio
  11. bio_inc_remaining
  12. bio_chain
  13. bio_alloc_rescue
  14. punt_bios_to_rescuer
  15. bio_alloc_bioset
  16. zero_fill_bio
  17. bio_put
  18. bio_phys_segments
  19. __bio_clone_fast
  20. bio_clone_fast
  21. bio_clone_bioset
  22. bio_add_pc_page
  23. bio_add_page
  24. submit_bio_wait_endio
  25. submit_bio_wait
  26. bio_advance
  27. bio_alloc_pages
  28. bio_copy_data
  29. bio_alloc_map_data
  30. bio_copy_from_iter
  31. bio_copy_to_iter
  32. bio_free_pages
  33. bio_uncopy_user
  34. bio_copy_user_iov
  35. bio_map_user_iov
  36. __bio_unmap_user
  37. bio_unmap_user
  38. bio_map_kern_endio
  39. bio_map_kern
  40. bio_copy_kern_endio
  41. bio_copy_kern_endio_read
  42. bio_copy_kern
  43. bio_set_pages_dirty
  44. bio_release_pages
  45. bio_dirty_fn
  46. bio_check_pages_dirty
  47. generic_start_io_acct
  48. generic_end_io_acct
  49. bio_flush_dcache_pages
  50. bio_remaining_done
  51. bio_endio
  52. bio_split
  53. bio_trim
  54. biovec_create_pool
  55. bioset_free
  56. __bioset_create
  57. bioset_create
  58. bioset_create_nobvec
  59. bio_associate_blkcg
  60. bio_associate_current
  61. bio_disassociate_task
  62. biovec_init_slabs
  63. init_bio

   1 /*
   2  * Copyright (C) 2001 Jens Axboe <axboe@kernel.dk>
   3  *
   4  * This program is free software; you can redistribute it and/or modify
   5  * it under the terms of the GNU General Public License version 2 as
   6  * published by the Free Software Foundation.
   7  *
   8  * This program is distributed in the hope that it will be useful,
   9  * but WITHOUT ANY WARRANTY; without even the implied warranty of
  10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  11  * GNU General Public License for more details.
  12  *
  13  * You should have received a copy of the GNU General Public Licens
  14  * along with this program; if not, write to the Free Software
  15  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-
  16  *
  17  */
  18 #include <linux/mm.h>
  19 #include <linux/swap.h>
  20 #include <linux/bio.h>
  21 #include <linux/blkdev.h>
  22 #include <linux/uio.h>
  23 #include <linux/iocontext.h>
  24 #include <linux/slab.h>
  25 #include <linux/init.h>
  26 #include <linux/kernel.h>
  27 #include <linux/export.h>
  28 #include <linux/mempool.h>
  29 #include <linux/workqueue.h>
  30 #include <linux/cgroup.h>
  31 
  32 #include <trace/events/block.h>
  33 
  34 /*
  35  * Test patch to inline a certain number of bi_io_vec's inside the bio
  36  * itself, to shrink a bio data allocation from two mempool calls to one
  37  */
  38 #define BIO_INLINE_VECS         4
  39 
  40 /*
  41  * if you change this list, also change bvec_alloc or things will
  42  * break badly! cannot be bigger than what you can fit into an
  43  * unsigned short
  44  */
  45 #define BV(x) { .nr_vecs = x, .name = "biovec-"__stringify(x) }
  46 static struct biovec_slab bvec_slabs[BIOVEC_NR_POOLS] __read_mostly = {
  47         BV(1), BV(4), BV(16), BV(64), BV(128), BV(BIO_MAX_PAGES),
  48 };
  49 #undef BV
  50 
  51 /*
  52  * fs_bio_set is the bio_set containing bio and iovec memory pools used by
  53  * IO code that does not need private memory pools.
  54  */
  55 struct bio_set *fs_bio_set;
  56 EXPORT_SYMBOL(fs_bio_set);
  57 
  58 /*
  59  * Our slab pool management
  60  */
  61 struct bio_slab {
  62         struct kmem_cache *slab;
  63         unsigned int slab_ref;
  64         unsigned int slab_size;
  65         char name[8];
  66 };
  67 static DEFINE_MUTEX(bio_slab_lock);
  68 static struct bio_slab *bio_slabs;
  69 static unsigned int bio_slab_nr, bio_slab_max;
  70 
  71 static struct kmem_cache *bio_find_or_create_slab(unsigned int extra_size)
     /* [previous][next][first][last][top][bottom][index][help] [+71 block/bio.c] */
  72 {
  73         unsigned int sz = sizeof(struct bio) + extra_size;
  74         struct kmem_cache *slab = NULL;
  75         struct bio_slab *bslab, *new_bio_slabs;
  76         unsigned int new_bio_slab_max;
  77         unsigned int i, entry = -1;
  78 
  79         mutex_lock(&bio_slab_lock);
  80 
  81         i = 0;
  82         while (i < bio_slab_nr) {
  83                 bslab = &bio_slabs[i];
  84 
  85                 if (!bslab->slab && entry == -1)
  86                         entry = i;
  87                 else if (bslab->slab_size == sz) {
  88                         slab = bslab->slab;
  89                         bslab->slab_ref++;
  90                         break;
  91                 }
  92                 i++;
  93         }
  94 
  95         if (slab)
  96                 goto out_unlock;
  97 
  98         if (bio_slab_nr == bio_slab_max && entry == -1) {
  99                 new_bio_slab_max = bio_slab_max << 1;
 100                 new_bio_slabs = krealloc(bio_slabs,
 101                                          new_bio_slab_max * sizeof(struct bio_slab),
 102                                          GFP_KERNEL);
 103                 if (!new_bio_slabs)
 104                         goto out_unlock;
 105                 bio_slab_max = new_bio_slab_max;
 106                 bio_slabs = new_bio_slabs;
 107         }
 108         if (entry == -1)
 109                 entry = bio_slab_nr++;
 110 
 111         bslab = &bio_slabs[entry];
 112 
 113         snprintf(bslab->name, sizeof(bslab->name), "bio-%d", entry);
 114         slab = kmem_cache_create(bslab->name, sz, ARCH_KMALLOC_MINALIGN,
 115                                  SLAB_HWCACHE_ALIGN, NULL);
 116         if (!slab)
 117                 goto out_unlock;
 118 
 119         bslab->slab = slab;
 120         bslab->slab_ref = 1;
 121         bslab->slab_size = sz;
 122 out_unlock:
 123         mutex_unlock(&bio_slab_lock);
 124         return slab;
 125 }
 126 
 127 static void bio_put_slab(struct bio_set *bs)
     /* [previous][next][first][last][top][bottom][index][help] [+127 block/bio.c] */
 128 {
 129         struct bio_slab *bslab = NULL;
 130         unsigned int i;
 131 
 132         mutex_lock(&bio_slab_lock);
 133 
 134         for (i = 0; i < bio_slab_nr; i++) {
 135                 if (bs->bio_slab == bio_slabs[i].slab) {
 136                         bslab = &bio_slabs[i];
 137                         break;
 138                 }
 139         }
 140 
 141         if (WARN(!bslab, KERN_ERR "bio: unable to find slab!\n"))
 142                 goto out;
 143 
 144         WARN_ON(!bslab->slab_ref);
 145 
 146         if (--bslab->slab_ref)
 147                 goto out;
 148 
 149         kmem_cache_destroy(bslab->slab);
 150         bslab->slab = NULL;
 151 
 152 out:
 153         mutex_unlock(&bio_slab_lock);
 154 }
 155 
 156 unsigned int bvec_nr_vecs(unsigned short idx)
     /* [previous][next][first][last][top][bottom][index][help] [+156 block/bio.c] */
 157 {
 158         return bvec_slabs[idx].nr_vecs;
 159 }
 160 
 161 void bvec_free(mempool_t *pool, struct bio_vec *bv, unsigned int idx)
     /* [previous][next][first][last][top][bottom][index][help] [+161 block/bio.c] */
 162 {
 163         BIO_BUG_ON(idx >= BIOVEC_NR_POOLS);
 164 
 165         if (idx == BIOVEC_MAX_IDX)
 166                 mempool_free(bv, pool);
 167         else {
 168                 struct biovec_slab *bvs = bvec_slabs + idx;
 169 
 170                 kmem_cache_free(bvs->slab, bv);
 171         }
 172 }
 173 
 174 struct bio_vec *bvec_alloc(gfp_t gfp_mask, int nr, unsigned long *idx,
     /* [previous][next][first][last][top][bottom][index][help] [+174 block/bio.c] */
 175                            mempool_t *pool)
 176 {
 177         struct bio_vec *bvl;
 178 
 179         /*
 180          * see comment near bvec_array define!
 181          */
 182         switch (nr) {
 183         case 1:
 184                 *idx = 0;
 185                 break;
 186         case 2 ... 4:
 187                 *idx = 1;
 188                 break;
 189         case 5 ... 16:
 190                 *idx = 2;
 191                 break;
 192         case 17 ... 64:
 193                 *idx = 3;
 194                 break;
 195         case 65 ... 128:
 196                 *idx = 4;
 197                 break;
 198         case 129 ... BIO_MAX_PAGES:
 199                 *idx = 5;
 200                 break;
 201         default:
 202                 return NULL;
 203         }
 204 
 205         /*
 206          * idx now points to the pool we want to allocate from. only the
 207          * 1-vec entry pool is mempool backed.
 208          */
 209         if (*idx == BIOVEC_MAX_IDX) {
 210 fallback:
 211                 bvl = mempool_alloc(pool, gfp_mask);
 212         } else {
 213                 struct biovec_slab *bvs = bvec_slabs + *idx;
 214                 gfp_t __gfp_mask = gfp_mask & ~(__GFP_DIRECT_RECLAIM | __GFP_IO);
 215 
 216                 /*
 217                  * Make this allocation restricted and don't dump info on
 218                  * allocation failures, since we'll fallback to the mempool
 219                  * in case of failure.
 220                  */
 221                 __gfp_mask |= __GFP_NOMEMALLOC | __GFP_NORETRY | __GFP_NOWARN;
 222 
 223                 /*
 224                  * Try a slab allocation. If this fails and __GFP_DIRECT_RECLAIM
 225                  * is set, retry with the 1-entry mempool
 226                  */
 227                 bvl = kmem_cache_alloc(bvs->slab, __gfp_mask);
 228                 if (unlikely(!bvl && (gfp_mask & __GFP_DIRECT_RECLAIM))) {
 229                         *idx = BIOVEC_MAX_IDX;
 230                         goto fallback;
 231                 }
 232         }
 233 
 234         return bvl;
 235 }
 236 
 237 static void __bio_free(struct bio *bio)
     /* [previous][next][first][last][top][bottom][index][help] [+237 block/bio.c] */
 238 {
 239         bio_disassociate_task(bio);
 240 
 241         if (bio_integrity(bio))
 242                 bio_integrity_free(bio);
 243 }
 244 
 245 static void bio_free(struct bio *bio)
     /* [previous][next][first][last][top][bottom][index][help] [+245 block/bio.c] */
 246 {
 247         struct bio_set *bs = bio->bi_pool;
 248         void *p;
 249 
 250         __bio_free(bio);
 251 
 252         if (bs) {
 253                 if (bio_flagged(bio, BIO_OWNS_VEC))
 254                         bvec_free(bs->bvec_pool, bio->bi_io_vec, BIO_POOL_IDX(bio));
 255 
 256                 /*
 257                  * If we have front padding, adjust the bio pointer before freeing
 258                  */
 259                 p = bio;
 260                 p -= bs->front_pad;
 261 
 262                 mempool_free(p, bs->bio_pool);
 263         } else {
 264                 /* Bio was allocated by bio_kmalloc() */
 265                 kfree(bio);
 266         }
 267 }
 268 
 269 void bio_init(struct bio *bio)
     /* [previous][next][first][last][top][bottom][index][help] [+269 block/bio.c] */
 270 {
 271         memset(bio, 0, sizeof(*bio));
 272         atomic_set(&bio->__bi_remaining, 1);
 273         atomic_set(&bio->__bi_cnt, 1);
 274 }
 275 EXPORT_SYMBOL(bio_init);
 276 
 277 /**
 278  * bio_reset - reinitialize a bio
 279  * @bio:        bio to reset
 280  *
 281  * Description:
 282  *   After calling bio_reset(), @bio will be in the same state as a freshly
 283  *   allocated bio returned bio bio_alloc_bioset() - the only fields that are
 284  *   preserved are the ones that are initialized by bio_alloc_bioset(). See
 285  *   comment in struct bio.
 286  */
 287 void bio_reset(struct bio *bio)
     /* [previous][next][first][last][top][bottom][index][help] [+287 block/bio.c] */
 288 {
 289         unsigned long flags = bio->bi_flags & (~0UL << BIO_RESET_BITS);
 290 
 291         __bio_free(bio);
 292 
 293         memset(bio, 0, BIO_RESET_BYTES);
 294         bio->bi_flags = flags;
 295         atomic_set(&bio->__bi_remaining, 1);
 296 }
 297 EXPORT_SYMBOL(bio_reset);
 298 
 299 static void bio_chain_endio(struct bio *bio)
     /* [previous][next][first][last][top][bottom][index][help] [+299 block/bio.c] */
 300 {
 301         struct bio *parent = bio->bi_private;
 302 
 303         parent->bi_error = bio->bi_error;
 304         bio_endio(parent);
 305         bio_put(bio);
 306 }
 307 
 308 /*
 309  * Increment chain count for the bio. Make sure the CHAIN flag update
 310  * is visible before the raised count.
 311  */
 312 static inline void bio_inc_remaining(struct bio *bio)
     /* [previous][next][first][last][top][bottom][index][help] [+312 block/bio.c] */
 313 {
 314         bio_set_flag(bio, BIO_CHAIN);
 315         smp_mb__before_atomic();
 316         atomic_inc(&bio->__bi_remaining);
 317 }
 318 
 319 /**
 320  * bio_chain - chain bio completions
 321  * @bio: the target bio
 322  * @parent: the @bio's parent bio
 323  *
 324  * The caller won't have a bi_end_io called when @bio completes - instead,
 325  * @parent's bi_end_io won't be called until both @parent and @bio have
 326  * completed; the chained bio will also be freed when it completes.
 327  *
 328  * The caller must not set bi_private or bi_end_io in @bio.
 329  */
 330 void bio_chain(struct bio *bio, struct bio *parent)
     /* [previous][next][first][last][top][bottom][index][help] [+330 block/bio.c] */
 331 {
 332         BUG_ON(bio->bi_private || bio->bi_end_io);
 333 
 334         bio->bi_private = parent;
 335         bio->bi_end_io  = bio_chain_endio;
 336         bio_inc_remaining(parent);
 337 }
 338 EXPORT_SYMBOL(bio_chain);
 339 
 340 static void bio_alloc_rescue(struct work_struct *work)
     /* [previous][next][first][last][top][bottom][index][help] [+340 block/bio.c] */
 341 {
 342         struct bio_set *bs = container_of(work, struct bio_set, rescue_work);
 343         struct bio *bio;
 344 
 345         while (1) {
 346                 spin_lock(&bs->rescue_lock);
 347                 bio = bio_list_pop(&bs->rescue_list);
 348                 spin_unlock(&bs->rescue_lock);
 349 
 350                 if (!bio)
 351                         break;
 352 
 353                 generic_make_request(bio);
 354         }
 355 }
 356 
 357 static void punt_bios_to_rescuer(struct bio_set *bs)
     /* [previous][next][first][last][top][bottom][index][help] [+357 block/bio.c] */
 358 {
 359         struct bio_list punt, nopunt;
 360         struct bio *bio;
 361 
 362         /*
 363          * In order to guarantee forward progress we must punt only bios that
 364          * were allocated from this bio_set; otherwise, if there was a bio on
 365          * there for a stacking driver higher up in the stack, processing it
 366          * could require allocating bios from this bio_set, and doing that from
 367          * our own rescuer would be bad.
 368          *
 369          * Since bio lists are singly linked, pop them all instead of trying to
 370          * remove from the middle of the list:
 371          */
 372 
 373         bio_list_init(&punt);
 374         bio_list_init(&nopunt);
 375 
 376         while ((bio = bio_list_pop(current->bio_list)))
 377                 bio_list_add(bio->bi_pool == bs ? &punt : &nopunt, bio);
 378 
 379         *current->bio_list = nopunt;
 380 
 381         spin_lock(&bs->rescue_lock);
 382         bio_list_merge(&bs->rescue_list, &punt);
 383         spin_unlock(&bs->rescue_lock);
 384 
 385         queue_work(bs->rescue_workqueue, &bs->rescue_work);
 386 }
 387 
 388 /**
 389  * bio_alloc_bioset - allocate a bio for I/O
 390  * @gfp_mask:   the GFP_ mask given to the slab allocator
 391  * @nr_iovecs:  number of iovecs to pre-allocate
 392  * @bs:         the bio_set to allocate from.
 393  *
 394  * Description:
 395  *   If @bs is NULL, uses kmalloc() to allocate the bio; else the allocation is
 396  *   backed by the @bs's mempool.
 397  *
 398  *   When @bs is not NULL, if %__GFP_DIRECT_RECLAIM is set then bio_alloc will
 399  *   always be able to allocate a bio. This is due to the mempool guarantees.
 400  *   To make this work, callers must never allocate more than 1 bio at a time
 401  *   from this pool. Callers that need to allocate more than 1 bio must always
 402  *   submit the previously allocated bio for IO before attempting to allocate
 403  *   a new one. Failure to do so can cause deadlocks under memory pressure.
 404  *
 405  *   Note that when running under generic_make_request() (i.e. any block
 406  *   driver), bios are not submitted until after you return - see the code in
 407  *   generic_make_request() that converts recursion into iteration, to prevent
 408  *   stack overflows.
 409  *
 410  *   This would normally mean allocating multiple bios under
 411  *   generic_make_request() would be susceptible to deadlocks, but we have
 412  *   deadlock avoidance code that resubmits any blocked bios from a rescuer
 413  *   thread.
 414  *
 415  *   However, we do not guarantee forward progress for allocations from other
 416  *   mempools. Doing multiple allocations from the same mempool under
 417  *   generic_make_request() should be avoided - instead, use bio_set's front_pad
 418  *   for per bio allocations.
 419  *
 420  *   RETURNS:
 421  *   Pointer to new bio on success, NULL on failure.
 422  */
 423 struct bio *bio_alloc_bioset(gfp_t gfp_mask, int nr_iovecs, struct bio_set *bs)
     /* [previous][next][first][last][top][bottom][index][help] [+423 block/bio.c] */
 424 {
 425         gfp_t saved_gfp = gfp_mask;
 426         unsigned front_pad;
 427         unsigned inline_vecs;
 428         unsigned long idx = BIO_POOL_NONE;
 429         struct bio_vec *bvl = NULL;
 430         struct bio *bio;
 431         void *p;
 432 
 433         if (!bs) {
 434                 if (nr_iovecs > UIO_MAXIOV)
 435                         return NULL;
 436 
 437                 p = kmalloc(sizeof(struct bio) +
 438                             nr_iovecs * sizeof(struct bio_vec),
 439                             gfp_mask);
 440                 front_pad = 0;
 441                 inline_vecs = nr_iovecs;
 442         } else {
 443                 /* should not use nobvec bioset for nr_iovecs > 0 */
 444                 if (WARN_ON_ONCE(!bs->bvec_pool && nr_iovecs > 0))
 445                         return NULL;
 446                 /*
 447                  * generic_make_request() converts recursion to iteration; this
 448                  * means if we're running beneath it, any bios we allocate and
 449                  * submit will not be submitted (and thus freed) until after we
 450                  * return.
 451                  *
 452                  * This exposes us to a potential deadlock if we allocate
 453                  * multiple bios from the same bio_set() while running
 454                  * underneath generic_make_request(). If we were to allocate
 455                  * multiple bios (say a stacking block driver that was splitting
 456                  * bios), we would deadlock if we exhausted the mempool's
 457                  * reserve.
 458                  *
 459                  * We solve this, and guarantee forward progress, with a rescuer
 460                  * workqueue per bio_set. If we go to allocate and there are
 461                  * bios on current->bio_list, we first try the allocation
 462                  * without __GFP_DIRECT_RECLAIM; if that fails, we punt those
 463                  * bios we would be blocking to the rescuer workqueue before
 464                  * we retry with the original gfp_flags.
 465                  */
 466 
 467                 if (current->bio_list && !bio_list_empty(current->bio_list))
 468                         gfp_mask &= ~__GFP_DIRECT_RECLAIM;
 469 
 470                 p = mempool_alloc(bs->bio_pool, gfp_mask);
 471                 if (!p && gfp_mask != saved_gfp) {
 472                         punt_bios_to_rescuer(bs);
 473                         gfp_mask = saved_gfp;
 474                         p = mempool_alloc(bs->bio_pool, gfp_mask);
 475                 }
 476 
 477                 front_pad = bs->front_pad;
 478                 inline_vecs = BIO_INLINE_VECS;
 479         }
 480 
 481         if (unlikely(!p))
 482                 return NULL;
 483 
 484         bio = p + front_pad;
 485         bio_init(bio);
 486 
 487         if (nr_iovecs > inline_vecs) {
 488                 bvl = bvec_alloc(gfp_mask, nr_iovecs, &idx, bs->bvec_pool);
 489                 if (!bvl && gfp_mask != saved_gfp) {
 490                         punt_bios_to_rescuer(bs);
 491                         gfp_mask = saved_gfp;
 492                         bvl = bvec_alloc(gfp_mask, nr_iovecs, &idx, bs->bvec_pool);
 493                 }
 494 
 495                 if (unlikely(!bvl))
 496                         goto err_free;
 497 
 498                 bio_set_flag(bio, BIO_OWNS_VEC);
 499         } else if (nr_iovecs) {
 500                 bvl = bio->bi_inline_vecs;
 501         }
 502 
 503         bio->bi_pool = bs;
 504         bio->bi_flags |= idx << BIO_POOL_OFFSET;
 505         bio->bi_max_vecs = nr_iovecs;
 506         bio->bi_io_vec = bvl;
 507         return bio;
 508 
 509 err_free:
 510         mempool_free(p, bs->bio_pool);
 511         return NULL;
 512 }
 513 EXPORT_SYMBOL(bio_alloc_bioset);
 514 
 515 void zero_fill_bio(struct bio *bio)
     /* [previous][next][first][last][top][bottom][index][help] [+515 block/bio.c] */
 516 {
 517         unsigned long flags;
 518         struct bio_vec bv;
 519         struct bvec_iter iter;
 520 
 521         bio_for_each_segment(bv, bio, iter) {
 522                 char *data = bvec_kmap_irq(&bv, &flags);
 523                 memset(data, 0, bv.bv_len);
 524                 flush_dcache_page(bv.bv_page);
 525                 bvec_kunmap_irq(data, &flags);
 526         }
 527 }
 528 EXPORT_SYMBOL(zero_fill_bio);
 529 
 530 /**
 531  * bio_put - release a reference to a bio
 532  * @bio:   bio to release reference to
 533  *
 534  * Description:
 535  *   Put a reference to a &struct bio, either one you have gotten with
 536  *   bio_alloc, bio_get or bio_clone. The last put of a bio will free it.
 537  **/
 538 void bio_put(struct bio *bio)
     /* [previous][next][first][last][top][bottom][index][help] [+538 block/bio.c] */
 539 {
 540         if (!bio_flagged(bio, BIO_REFFED))
 541                 bio_free(bio);
 542         else {
 543                 BIO_BUG_ON(!atomic_read(&bio->__bi_cnt));
 544 
 545                 /*
 546                  * last put frees it
 547                  */
 548                 if (atomic_dec_and_test(&bio->__bi_cnt))
 549                         bio_free(bio);
 550         }
 551 }
 552 EXPORT_SYMBOL(bio_put);
 553 
 554 inline int bio_phys_segments(struct request_queue *q, struct bio *bio)
     /* [previous][next][first][last][top][bottom][index][help] [+554 block/bio.c] */
 555 {
 556         if (unlikely(!bio_flagged(bio, BIO_SEG_VALID)))
 557                 blk_recount_segments(q, bio);
 558 
 559         return bio->bi_phys_segments;
 560 }
 561 EXPORT_SYMBOL(bio_phys_segments);
 562 
 563 /**
 564  *      __bio_clone_fast - clone a bio that shares the original bio's biovec
 565  *      @bio: destination bio
 566  *      @bio_src: bio to clone
 567  *
 568  *      Clone a &bio. Caller will own the returned bio, but not
 569  *      the actual data it points to. Reference count of returned
 570  *      bio will be one.
 571  *
 572  *      Caller must ensure that @bio_src is not freed before @bio.
 573  */
 574 void __bio_clone_fast(struct bio *bio, struct bio *bio_src)
     /* [previous][next][first][last][top][bottom][index][help] [+574 block/bio.c] */
 575 {
 576         BUG_ON(bio->bi_pool && BIO_POOL_IDX(bio) != BIO_POOL_NONE);
 577 
 578         /*
 579          * most users will be overriding ->bi_bdev with a new target,
 580          * so we don't set nor calculate new physical/hw segment counts here
 581          */
 582         bio->bi_bdev = bio_src->bi_bdev;
 583         bio_set_flag(bio, BIO_CLONED);
 584         bio->bi_rw = bio_src->bi_rw;
 585         bio->bi_iter = bio_src->bi_iter;
 586         bio->bi_io_vec = bio_src->bi_io_vec;
 587 }
 588 EXPORT_SYMBOL(__bio_clone_fast);
 589 
 590 /**
 591  *      bio_clone_fast - clone a bio that shares the original bio's biovec
 592  *      @bio: bio to clone
 593  *      @gfp_mask: allocation priority
 594  *      @bs: bio_set to allocate from
 595  *
 596  *      Like __bio_clone_fast, only also allocates the returned bio
 597  */
 598 struct bio *bio_clone_fast(struct bio *bio, gfp_t gfp_mask, struct bio_set *bs)
     /* [previous][next][first][last][top][bottom][index][help] [+598 block/bio.c] */
 599 {
 600         struct bio *b;
 601 
 602         b = bio_alloc_bioset(gfp_mask, 0, bs);
 603         if (!b)
 604                 return NULL;
 605 
 606         __bio_clone_fast(b, bio);
 607 
 608         if (bio_integrity(bio)) {
 609                 int ret;
 610 
 611                 ret = bio_integrity_clone(b, bio, gfp_mask);
 612 
 613                 if (ret < 0) {
 614                         bio_put(b);
 615                         return NULL;
 616                 }
 617         }
 618 
 619         return b;
 620 }
 621 EXPORT_SYMBOL(bio_clone_fast);
 622 
 623 /**
 624  *      bio_clone_bioset - clone a bio
 625  *      @bio_src: bio to clone
 626  *      @gfp_mask: allocation priority
 627  *      @bs: bio_set to allocate from
 628  *
 629  *      Clone bio. Caller will own the returned bio, but not the actual data it
 630  *      points to. Reference count of returned bio will be one.
 631  */
 632 struct bio *bio_clone_bioset(struct bio *bio_src, gfp_t gfp_mask,
     /* [previous][next][first][last][top][bottom][index][help] [+632 block/bio.c] */
 633                              struct bio_set *bs)
 634 {
 635         struct bvec_iter iter;
 636         struct bio_vec bv;
 637         struct bio *bio;
 638 
 639         /*
 640          * Pre immutable biovecs, __bio_clone() used to just do a memcpy from
 641          * bio_src->bi_io_vec to bio->bi_io_vec.
 642          *
 643          * We can't do that anymore, because:
 644          *
 645          *  - The point of cloning the biovec is to produce a bio with a biovec
 646          *    the caller can modify: bi_idx and bi_bvec_done should be 0.
 647          *
 648          *  - The original bio could've had more than BIO_MAX_PAGES biovecs; if
 649          *    we tried to clone the whole thing bio_alloc_bioset() would fail.
 650          *    But the clone should succeed as long as the number of biovecs we
 651          *    actually need to allocate is fewer than BIO_MAX_PAGES.
 652          *
 653          *  - Lastly, bi_vcnt should not be looked at or relied upon by code
 654          *    that does not own the bio - reason being drivers don't use it for
 655          *    iterating over the biovec anymore, so expecting it to be kept up
 656          *    to date (i.e. for clones that share the parent biovec) is just
 657          *    asking for trouble and would force extra work on
 658          *    __bio_clone_fast() anyways.
 659          */
 660 
 661         bio = bio_alloc_bioset(gfp_mask, bio_segments(bio_src), bs);
 662         if (!bio)
 663                 return NULL;
 664 
 665         bio->bi_bdev            = bio_src->bi_bdev;
 666         bio->bi_rw              = bio_src->bi_rw;
 667         bio->bi_iter.bi_sector  = bio_src->bi_iter.bi_sector;
 668         bio->bi_iter.bi_size    = bio_src->bi_iter.bi_size;
 669 
 670         if (bio->bi_rw & REQ_DISCARD)
 671                 goto integrity_clone;
 672 
 673         if (bio->bi_rw & REQ_WRITE_SAME) {
 674                 bio->bi_io_vec[bio->bi_vcnt++] = bio_src->bi_io_vec[0];
 675                 goto integrity_clone;
 676         }
 677 
 678         bio_for_each_segment(bv, bio_src, iter)
 679                 bio->bi_io_vec[bio->bi_vcnt++] = bv;
 680 
 681 integrity_clone:
 682         if (bio_integrity(bio_src)) {
 683                 int ret;
 684 
 685                 ret = bio_integrity_clone(bio, bio_src, gfp_mask);
 686                 if (ret < 0) {
 687                         bio_put(bio);
 688                         return NULL;
 689                 }
 690         }
 691 
 692         return bio;
 693 }
 694 EXPORT_SYMBOL(bio_clone_bioset);
 695 
 696 /**
 697  *      bio_add_pc_page -       attempt to add page to bio
 698  *      @q: the target queue
 699  *      @bio: destination bio
 700  *      @page: page to add
 701  *      @len: vec entry length
 702  *      @offset: vec entry offset
 703  *
 704  *      Attempt to add a page to the bio_vec maplist. This can fail for a
 705  *      number of reasons, such as the bio being full or target block device
 706  *      limitations. The target block device must allow bio's up to PAGE_SIZE,
 707  *      so it is always possible to add a single page to an empty bio.
 708  *
 709  *      This should only be used by REQ_PC bios.
 710  */
 711 int bio_add_pc_page(struct request_queue *q, struct bio *bio, struct page
     /* [previous][next][first][last][top][bottom][index][help] [+711 block/bio.c] */
 712                     *page, unsigned int len, unsigned int offset)
 713 {
 714         int retried_segments = 0;
 715         struct bio_vec *bvec;
 716 
 717         /*
 718          * cloned bio must not modify vec list
 719          */
 720         if (unlikely(bio_flagged(bio, BIO_CLONED)))
 721                 return 0;
 722 
 723         if (((bio->bi_iter.bi_size + len) >> 9) > queue_max_hw_sectors(q))
 724                 return 0;
 725 
 726         /*
 727          * For filesystems with a blocksize smaller than the pagesize
 728          * we will often be called with the same page as last time and
 729          * a consecutive offset.  Optimize this special case.
 730          */
 731         if (bio->bi_vcnt > 0) {
 732                 struct bio_vec *prev = &bio->bi_io_vec[bio->bi_vcnt - 1];
 733 
 734                 if (page == prev->bv_page &&
 735                     offset == prev->bv_offset + prev->bv_len) {
 736                         prev->bv_len += len;
 737                         bio->bi_iter.bi_size += len;
 738                         goto done;
 739                 }
 740 
 741                 /*
 742                  * If the queue doesn't support SG gaps and adding this
 743                  * offset would create a gap, disallow it.
 744                  */
 745                 if (bvec_gap_to_prev(q, prev, offset))
 746                         return 0;
 747         }
 748 
 749         if (bio->bi_vcnt >= bio->bi_max_vecs)
 750                 return 0;
 751 
 752         /*
 753          * setup the new entry, we might clear it again later if we
 754          * cannot add the page
 755          */
 756         bvec = &bio->bi_io_vec[bio->bi_vcnt];
 757         bvec->bv_page = page;
 758         bvec->bv_len = len;
 759         bvec->bv_offset = offset;
 760         bio->bi_vcnt++;
 761         bio->bi_phys_segments++;
 762         bio->bi_iter.bi_size += len;
 763 
 764         /*
 765          * Perform a recount if the number of segments is greater
 766          * than queue_max_segments(q).
 767          */
 768 
 769         while (bio->bi_phys_segments > queue_max_segments(q)) {
 770 
 771                 if (retried_segments)
 772                         goto failed;
 773 
 774                 retried_segments = 1;
 775                 blk_recount_segments(q, bio);
 776         }
 777 
 778         /* If we may be able to merge these biovecs, force a recount */
 779         if (bio->bi_vcnt > 1 && (BIOVEC_PHYS_MERGEABLE(bvec-1, bvec)))
 780                 bio_clear_flag(bio, BIO_SEG_VALID);
 781 
 782  done:
 783         return len;
 784 
 785  failed:
 786         bvec->bv_page = NULL;
 787         bvec->bv_len = 0;
 788         bvec->bv_offset = 0;
 789         bio->bi_vcnt--;
 790         bio->bi_iter.bi_size -= len;
 791         blk_recount_segments(q, bio);
 792         return 0;
 793 }
 794 EXPORT_SYMBOL(bio_add_pc_page);
 795 
 796 /**
 797  *      bio_add_page    -       attempt to add page to bio
 798  *      @bio: destination bio
 799  *      @page: page to add
 800  *      @len: vec entry length
 801  *      @offset: vec entry offset
 802  *
 803  *      Attempt to add a page to the bio_vec maplist. This will only fail
 804  *      if either bio->bi_vcnt == bio->bi_max_vecs or it's a cloned bio.
 805  */
 806 int bio_add_page(struct bio *bio, struct page *page,
     /* [previous][next][first][last][top][bottom][index][help] [+806 block/bio.c] */
 807                  unsigned int len, unsigned int offset)
 808 {
 809         struct bio_vec *bv;
 810 
 811         /*
 812          * cloned bio must not modify vec list
 813          */
 814         if (WARN_ON_ONCE(bio_flagged(bio, BIO_CLONED)))
 815                 return 0;
 816 
 817         /*
 818          * For filesystems with a blocksize smaller than the pagesize
 819          * we will often be called with the same page as last time and
 820          * a consecutive offset.  Optimize this special case.
 821          */
 822         if (bio->bi_vcnt > 0) {
 823                 bv = &bio->bi_io_vec[bio->bi_vcnt - 1];
 824 
 825                 if (page == bv->bv_page &&
 826                     offset == bv->bv_offset + bv->bv_len) {
 827                         bv->bv_len += len;
 828                         goto done;
 829                 }
 830         }
 831 
 832         if (bio->bi_vcnt >= bio->bi_max_vecs)
 833                 return 0;
 834 
 835         bv              = &bio->bi_io_vec[bio->bi_vcnt];
 836         bv->bv_page     = page;
 837         bv->bv_len      = len;
 838         bv->bv_offset   = offset;
 839 
 840         bio->bi_vcnt++;
 841 done:
 842         bio->bi_iter.bi_size += len;
 843         return len;
 844 }
 845 EXPORT_SYMBOL(bio_add_page);
 846 
 847 struct submit_bio_ret {
 848         struct completion event;
 849         int error;
 850 };
 851 
 852 static void submit_bio_wait_endio(struct bio *bio)
     /* [previous][next][first][last][top][bottom][index][help] [+852 block/bio.c] */
 853 {
 854         struct submit_bio_ret *ret = bio->bi_private;
 855 
 856         ret->error = bio->bi_error;
 857         complete(&ret->event);
 858 }
 859 
 860 /**
 861  * submit_bio_wait - submit a bio, and wait until it completes
 862  * @rw: whether to %READ or %WRITE, or maybe to %READA (read ahead)
 863  * @bio: The &struct bio which describes the I/O
 864  *
 865  * Simple wrapper around submit_bio(). Returns 0 on success, or the error from
 866  * bio_endio() on failure.
 867  */
 868 int submit_bio_wait(int rw, struct bio *bio)
     /* [previous][next][first][last][top][bottom][index][help] [+868 block/bio.c] */
 869 {
 870         struct submit_bio_ret ret;
 871 
 872         rw |= REQ_SYNC;
 873         init_completion(&ret.event);
 874         bio->bi_private = &ret;
 875         bio->bi_end_io = submit_bio_wait_endio;
 876         submit_bio(rw, bio);
 877         wait_for_completion(&ret.event);
 878 
 879         return ret.error;
 880 }
 881 EXPORT_SYMBOL(submit_bio_wait);
 882 
 883 /**
 884  * bio_advance - increment/complete a bio by some number of bytes
 885  * @bio:        bio to advance
 886  * @bytes:      number of bytes to complete
 887  *
 888  * This updates bi_sector, bi_size and bi_idx; if the number of bytes to
 889  * complete doesn't align with a bvec boundary, then bv_len and bv_offset will
 890  * be updated on the last bvec as well.
 891  *
 892  * @bio will then represent the remaining, uncompleted portion of the io.
 893  */
 894 void bio_advance(struct bio *bio, unsigned bytes)
     /* [previous][next][first][last][top][bottom][index][help] [+894 block/bio.c] */
 895 {
 896         if (bio_integrity(bio))
 897                 bio_integrity_advance(bio, bytes);
 898 
 899         bio_advance_iter(bio, &bio->bi_iter, bytes);
 900 }
 901 EXPORT_SYMBOL(bio_advance);
 902 
 903 /**
 904  * bio_alloc_pages - allocates a single page for each bvec in a bio
 905  * @bio: bio to allocate pages for
 906  * @gfp_mask: flags for allocation
 907  *
 908  * Allocates pages up to @bio->bi_vcnt.
 909  *
 910  * Returns 0 on success, -ENOMEM on failure. On failure, any allocated pages are
 911  * freed.
 912  */
 913 int bio_alloc_pages(struct bio *bio, gfp_t gfp_mask)
     /* [previous][next][first][last][top][bottom][index][help] [+913 block/bio.c] */
 914 {
 915         int i;
 916         struct bio_vec *bv;
 917 
 918         bio_for_each_segment_all(bv, bio, i) {
 919                 bv->bv_page = alloc_page(gfp_mask);
 920                 if (!bv->bv_page) {
 921                         while (--bv >= bio->bi_io_vec)
 922                                 __free_page(bv->bv_page);
 923                         return -ENOMEM;
 924                 }
 925         }
 926 
 927         return 0;
 928 }
 929 EXPORT_SYMBOL(bio_alloc_pages);
 930 
 931 /**
 932  * bio_copy_data - copy contents of data buffers from one chain of bios to
 933  * another
 934  * @src: source bio list
 935  * @dst: destination bio list
 936  *
 937  * If @src and @dst are single bios, bi_next must be NULL - otherwise, treats
 938  * @src and @dst as linked lists of bios.
 939  *
 940  * Stops when it reaches the end of either @src or @dst - that is, copies
 941  * min(src->bi_size, dst->bi_size) bytes (or the equivalent for lists of bios).
 942  */
 943 void bio_copy_data(struct bio *dst, struct bio *src)
     /* [previous][next][first][last][top][bottom][index][help] [+943 block/bio.c] */
 944 {
 945         struct bvec_iter src_iter, dst_iter;
 946         struct bio_vec src_bv, dst_bv;
 947         void *src_p, *dst_p;
 948         unsigned bytes;
 949 
 950         src_iter = src->bi_iter;
 951         dst_iter = dst->bi_iter;
 952 
 953         while (1) {
 954                 if (!src_iter.bi_size) {
 955                         src = src->bi_next;
 956                         if (!src)
 957                                 break;
 958 
 959                         src_iter = src->bi_iter;
 960                 }
 961 
 962                 if (!dst_iter.bi_size) {
 963                         dst = dst->bi_next;
 964                         if (!dst)
 965                                 break;
 966 
 967                         dst_iter = dst->bi_iter;
 968                 }
 969 
 970                 src_bv = bio_iter_iovec(src, src_iter);
 971                 dst_bv = bio_iter_iovec(dst, dst_iter);
 972 
 973                 bytes = min(src_bv.bv_len, dst_bv.bv_len);
 974 
 975                 src_p = kmap_atomic(src_bv.bv_page);
 976                 dst_p = kmap_atomic(dst_bv.bv_page);
 977 
 978                 memcpy(dst_p + dst_bv.bv_offset,
 979                        src_p + src_bv.bv_offset,
 980                        bytes);
 981 
 982                 kunmap_atomic(dst_p);
 983                 kunmap_atomic(src_p);
 984 
 985                 bio_advance_iter(src, &src_iter, bytes);
 986                 bio_advance_iter(dst, &dst_iter, bytes);
 987         }
 988 }
 989 EXPORT_SYMBOL(bio_copy_data);
 990 
 991 struct bio_map_data {
 992         int is_our_pages;
 993         struct iov_iter iter;
 994         struct iovec iov[];
 995 };
 996 
 997 static struct bio_map_data *bio_alloc_map_data(unsigned int iov_count,
     /* [previous][next][first][last][top][bottom][index][help] [+997 block/bio.c] */
 998                                                gfp_t gfp_mask)
 999 {
1000         if (iov_count > UIO_MAXIOV)
1001                 return NULL;
1002 
1003         return kmalloc(sizeof(struct bio_map_data) +
1004                        sizeof(struct iovec) * iov_count, gfp_mask);
1005 }
1006 
1007 /**
1008  * bio_copy_from_iter - copy all pages from iov_iter to bio
1009  * @bio: The &struct bio which describes the I/O as destination
1010  * @iter: iov_iter as source
1011  *
1012  * Copy all pages from iov_iter to bio.
1013  * Returns 0 on success, or error on failure.
1014  */
1015 static int bio_copy_from_iter(struct bio *bio, struct iov_iter iter)
     /* [previous][next][first][last][top][bottom][index][help] [+1015 block/bio.c] */
1016 {
1017         int i;
1018         struct bio_vec *bvec;
1019 
1020         bio_for_each_segment_all(bvec, bio, i) {
1021                 ssize_t ret;
1022 
1023                 ret = copy_page_from_iter(bvec->bv_page,
1024                                           bvec->bv_offset,
1025                                           bvec->bv_len,
1026                                           &iter);
1027 
1028                 if (!iov_iter_count(&iter))
1029                         break;
1030 
1031                 if (ret < bvec->bv_len)
1032                         return -EFAULT;
1033         }
1034 
1035         return 0;
1036 }
1037 
1038 /**
1039  * bio_copy_to_iter - copy all pages from bio to iov_iter
1040  * @bio: The &struct bio which describes the I/O as source
1041  * @iter: iov_iter as destination
1042  *
1043  * Copy all pages from bio to iov_iter.
1044  * Returns 0 on success, or error on failure.
1045  */
1046 static int bio_copy_to_iter(struct bio *bio, struct iov_iter iter)
     /* [previous][next][first][last][top][bottom][index][help] [+1046 block/bio.c] */
1047 {
1048         int i;
1049         struct bio_vec *bvec;
1050 
1051         bio_for_each_segment_all(bvec, bio, i) {
1052                 ssize_t ret;
1053 
1054                 ret = copy_page_to_iter(bvec->bv_page,
1055                                         bvec->bv_offset,
1056                                         bvec->bv_len,
1057                                         &iter);
1058 
1059                 if (!iov_iter_count(&iter))
1060                         break;
1061 
1062                 if (ret < bvec->bv_len)
1063                         return -EFAULT;
1064         }
1065 
1066         return 0;
1067 }
1068 
1069 static void bio_free_pages(struct bio *bio)
     /* [previous][next][first][last][top][bottom][index][help] [+1069 block/bio.c] */
1070 {
1071         struct bio_vec *bvec;
1072         int i;
1073 
1074         bio_for_each_segment_all(bvec, bio, i)
1075                 __free_page(bvec->bv_page);
1076 }
1077 
1078 /**
1079  *      bio_uncopy_user -       finish previously mapped bio
1080  *      @bio: bio being terminated
1081  *
1082  *      Free pages allocated from bio_copy_user_iov() and write back data
1083  *      to user space in case of a read.
1084  */
1085 int bio_uncopy_user(struct bio *bio)
     /* [previous][next][first][last][top][bottom][index][help] [+1085 block/bio.c] */
1086 {
1087         struct bio_map_data *bmd = bio->bi_private;
1088         int ret = 0;
1089 
1090         if (!bio_flagged(bio, BIO_NULL_MAPPED)) {
1091                 /*
1092                  * if we're in a workqueue, the request is orphaned, so
1093                  * don't copy into a random user address space, just free.
1094                  */
1095                 if (current->mm && bio_data_dir(bio) == READ)
1096                         ret = bio_copy_to_iter(bio, bmd->iter);
1097                 if (bmd->is_our_pages)
1098                         bio_free_pages(bio);
1099         }
1100         kfree(bmd);
1101         bio_put(bio);
1102         return ret;
1103 }
1104 EXPORT_SYMBOL(bio_uncopy_user);
1105 
1106 /**
1107  *      bio_copy_user_iov       -       copy user data to bio
1108  *      @q:             destination block queue
1109  *      @map_data:      pointer to the rq_map_data holding pages (if necessary)
1110  *      @iter:          iovec iterator
1111  *      @gfp_mask:      memory allocation flags
1112  *
1113  *      Prepares and returns a bio for indirect user io, bouncing data
1114  *      to/from kernel pages as necessary. Must be paired with
1115  *      call bio_uncopy_user() on io completion.
1116  */
1117 struct bio *bio_copy_user_iov(struct request_queue *q,
     /* [previous][next][first][last][top][bottom][index][help] [+1117 block/bio.c] */
1118                               struct rq_map_data *map_data,
1119                               const struct iov_iter *iter,
1120                               gfp_t gfp_mask)
1121 {
1122         struct bio_map_data *bmd;
1123         struct page *page;
1124         struct bio *bio;
1125         int i, ret;
1126         int nr_pages = 0;
1127         unsigned int len = iter->count;
1128         unsigned int offset = map_data ? map_data->offset & ~PAGE_MASK : 0;
1129 
1130         for (i = 0; i < iter->nr_segs; i++) {
1131                 unsigned long uaddr;
1132                 unsigned long end;
1133                 unsigned long start;
1134 
1135                 uaddr = (unsigned long) iter->iov[i].iov_base;
1136                 end = (uaddr + iter->iov[i].iov_len + PAGE_SIZE - 1)
1137                         >> PAGE_SHIFT;
1138                 start = uaddr >> PAGE_SHIFT;
1139 
1140                 /*
1141                  * Overflow, abort
1142                  */
1143                 if (end < start)
1144                         return ERR_PTR(-EINVAL);
1145 
1146                 nr_pages += end - start;
1147         }
1148 
1149         if (offset)
1150                 nr_pages++;
1151 
1152         bmd = bio_alloc_map_data(iter->nr_segs, gfp_mask);
1153         if (!bmd)
1154                 return ERR_PTR(-ENOMEM);
1155 
1156         /*
1157          * We need to do a deep copy of the iov_iter including the iovecs.
1158          * The caller provided iov might point to an on-stack or otherwise
1159          * shortlived one.
1160          */
1161         bmd->is_our_pages = map_data ? 0 : 1;
1162         memcpy(bmd->iov, iter->iov, sizeof(struct iovec) * iter->nr_segs);
1163         iov_iter_init(&bmd->iter, iter->type, bmd->iov,
1164                         iter->nr_segs, iter->count);
1165 
1166         ret = -ENOMEM;
1167         bio = bio_kmalloc(gfp_mask, nr_pages);
1168         if (!bio)
1169                 goto out_bmd;
1170 
1171         if (iter->type & WRITE)
1172                 bio->bi_rw |= REQ_WRITE;
1173 
1174         ret = 0;
1175 
1176         if (map_data) {
1177                 nr_pages = 1 << map_data->page_order;
1178                 i = map_data->offset / PAGE_SIZE;
1179         }
1180         while (len) {
1181                 unsigned int bytes = PAGE_SIZE;
1182 
1183                 bytes -= offset;
1184 
1185                 if (bytes > len)
1186                         bytes = len;
1187 
1188                 if (map_data) {
1189                         if (i == map_data->nr_entries * nr_pages) {
1190                                 ret = -ENOMEM;
1191                                 break;
1192                         }
1193 
1194                         page = map_data->pages[i / nr_pages];
1195                         page += (i % nr_pages);
1196 
1197                         i++;
1198                 } else {
1199                         page = alloc_page(q->bounce_gfp | gfp_mask);
1200                         if (!page) {
1201                                 ret = -ENOMEM;
1202                                 break;
1203                         }
1204                 }
1205 
1206                 if (bio_add_pc_page(q, bio, page, bytes, offset) < bytes)
1207                         break;
1208 
1209                 len -= bytes;
1210                 offset = 0;
1211         }
1212 
1213         if (ret)
1214                 goto cleanup;
1215 
1216         /*
1217          * success
1218          */
1219         if (((iter->type & WRITE) && (!map_data || !map_data->null_mapped)) ||
1220             (map_data && map_data->from_user)) {
1221                 ret = bio_copy_from_iter(bio, *iter);
1222                 if (ret)
1223                         goto cleanup;
1224         }
1225 
1226         bio->bi_private = bmd;
1227         return bio;
1228 cleanup:
1229         if (!map_data)
1230                 bio_free_pages(bio);
1231         bio_put(bio);
1232 out_bmd:
1233         kfree(bmd);
1234         return ERR_PTR(ret);
1235 }
1236 
1237 /**
1238  *      bio_map_user_iov - map user iovec into bio
1239  *      @q:             the struct request_queue for the bio
1240  *      @iter:          iovec iterator
1241  *      @gfp_mask:      memory allocation flags
1242  *
1243  *      Map the user space address into a bio suitable for io to a block
1244  *      device. Returns an error pointer in case of error.
1245  */
1246 struct bio *bio_map_user_iov(struct request_queue *q,
     /* [previous][next][first][last][top][bottom][index][help] [+1246 block/bio.c] */
1247                              const struct iov_iter *iter,
1248                              gfp_t gfp_mask)
1249 {
1250         int j;
1251         int nr_pages = 0;
1252         struct page **pages;
1253         struct bio *bio;
1254         int cur_page = 0;
1255         int ret, offset;
1256         struct iov_iter i;
1257         struct iovec iov;
1258 
1259         iov_for_each(iov, i, *iter) {
1260                 unsigned long uaddr = (unsigned long) iov.iov_base;
1261                 unsigned long len = iov.iov_len;
1262                 unsigned long end = (uaddr + len + PAGE_SIZE - 1) >> PAGE_SHIFT;
1263                 unsigned long start = uaddr >> PAGE_SHIFT;
1264 
1265                 /*
1266                  * Overflow, abort
1267                  */
1268                 if (end < start)
1269                         return ERR_PTR(-EINVAL);
1270 
1271                 nr_pages += end - start;
1272                 /*
1273                  * buffer must be aligned to at least hardsector size for now
1274                  */
1275                 if (uaddr & queue_dma_alignment(q))
1276                         return ERR_PTR(-EINVAL);
1277         }
1278 
1279         if (!nr_pages)
1280                 return ERR_PTR(-EINVAL);
1281 
1282         bio = bio_kmalloc(gfp_mask, nr_pages);
1283         if (!bio)
1284                 return ERR_PTR(-ENOMEM);
1285 
1286         ret = -ENOMEM;
1287         pages = kcalloc(nr_pages, sizeof(struct page *), gfp_mask);
1288         if (!pages)
1289                 goto out;
1290 
1291         iov_for_each(iov, i, *iter) {
1292                 unsigned long uaddr = (unsigned long) iov.iov_base;
1293                 unsigned long len = iov.iov_len;
1294                 unsigned long end = (uaddr + len + PAGE_SIZE - 1) >> PAGE_SHIFT;
1295                 unsigned long start = uaddr >> PAGE_SHIFT;
1296                 const int local_nr_pages = end - start;
1297                 const int page_limit = cur_page + local_nr_pages;
1298 
1299                 ret = get_user_pages_fast(uaddr, local_nr_pages,
1300                                 (iter->type & WRITE) != WRITE,
1301                                 &pages[cur_page]);
1302                 if (ret < local_nr_pages) {
1303                         ret = -EFAULT;
1304                         goto out_unmap;
1305                 }
1306 
1307                 offset = uaddr & ~PAGE_MASK;
1308                 for (j = cur_page; j < page_limit; j++) {
1309                         unsigned int bytes = PAGE_SIZE - offset;
1310 
1311                         if (len <= 0)
1312                                 break;
1313                         
1314                         if (bytes > len)
1315                                 bytes = len;
1316 
1317                         /*
1318                          * sorry...
1319                          */
1320                         if (bio_add_pc_page(q, bio, pages[j], bytes, offset) <
1321                                             bytes)
1322                                 break;
1323 
1324                         len -= bytes;
1325                         offset = 0;
1326                 }
1327 
1328                 cur_page = j;
1329                 /*
1330                  * release the pages we didn't map into the bio, if any
1331                  */
1332                 while (j < page_limit)
1333                         page_cache_release(pages[j++]);
1334         }
1335 
1336         kfree(pages);
1337 
1338         /*
1339          * set data direction, and check if mapped pages need bouncing
1340          */
1341         if (iter->type & WRITE)
1342                 bio->bi_rw |= REQ_WRITE;
1343 
1344         bio_set_flag(bio, BIO_USER_MAPPED);
1345 
1346         /*
1347          * subtle -- if __bio_map_user() ended up bouncing a bio,
1348          * it would normally disappear when its bi_end_io is run.
1349          * however, we need it for the unmap, so grab an extra
1350          * reference to it
1351          */
1352         bio_get(bio);
1353         return bio;
1354 
1355  out_unmap:
1356         for (j = 0; j < nr_pages; j++) {
1357                 if (!pages[j])
1358                         break;
1359                 page_cache_release(pages[j]);
1360         }
1361  out:
1362         kfree(pages);
1363         bio_put(bio);
1364         return ERR_PTR(ret);
1365 }
1366 
1367 static void __bio_unmap_user(struct bio *bio)
     /* [previous][next][first][last][top][bottom][index][help] [+1367 block/bio.c] */
1368 {
1369         struct bio_vec *bvec;
1370         int i;
1371 
1372         /*
1373          * make sure we dirty pages we wrote to
1374          */
1375         bio_for_each_segment_all(bvec, bio, i) {
1376                 if (bio_data_dir(bio) == READ)
1377                         set_page_dirty_lock(bvec->bv_page);
1378 
1379                 page_cache_release(bvec->bv_page);
1380         }
1381 
1382         bio_put(bio);
1383 }
1384 
1385 /**
1386  *      bio_unmap_user  -       unmap a bio
1387  *      @bio:           the bio being unmapped
1388  *
1389  *      Unmap a bio previously mapped by bio_map_user(). Must be called with
1390  *      a process context.
1391  *
1392  *      bio_unmap_user() may sleep.
1393  */
1394 void bio_unmap_user(struct bio *bio)
     /* [previous][next][first][last][top][bottom][index][help] [+1394 block/bio.c] */
1395 {
1396         __bio_unmap_user(bio);
1397         bio_put(bio);
1398 }
1399 EXPORT_SYMBOL(bio_unmap_user);
1400 
1401 static void bio_map_kern_endio(struct bio *bio)
     /* [previous][next][first][last][top][bottom][index][help] [+1401 block/bio.c] */
1402 {
1403         bio_put(bio);
1404 }
1405 
1406 /**
1407  *      bio_map_kern    -       map kernel address into bio
1408  *      @q: the struct request_queue for the bio
1409  *      @data: pointer to buffer to map
1410  *      @len: length in bytes
1411  *      @gfp_mask: allocation flags for bio allocation
1412  *
1413  *      Map the kernel address into a bio suitable for io to a block
1414  *      device. Returns an error pointer in case of error.
1415  */
1416 struct bio *bio_map_kern(struct request_queue *q, void *data, unsigned int len,
     /* [previous][next][first][last][top][bottom][index][help] [+1416 block/bio.c] */
1417                          gfp_t gfp_mask)
1418 {
1419         unsigned long kaddr = (unsigned long)data;
1420         unsigned long end = (kaddr + len + PAGE_SIZE - 1) >> PAGE_SHIFT;
1421         unsigned long start = kaddr >> PAGE_SHIFT;
1422         const int nr_pages = end - start;
1423         int offset, i;
1424         struct bio *bio;
1425 
1426         bio = bio_kmalloc(gfp_mask, nr_pages);
1427         if (!bio)
1428                 return ERR_PTR(-ENOMEM);
1429 
1430         offset = offset_in_page(kaddr);
1431         for (i = 0; i < nr_pages; i++) {
1432                 unsigned int bytes = PAGE_SIZE - offset;
1433 
1434                 if (len <= 0)
1435                         break;
1436 
1437                 if (bytes > len)
1438                         bytes = len;
1439 
1440                 if (bio_add_pc_page(q, bio, virt_to_page(data), bytes,
1441                                     offset) < bytes) {
1442                         /* we don't support partial mappings */
1443                         bio_put(bio);
1444                         return ERR_PTR(-EINVAL);
1445                 }
1446 
1447                 data += bytes;
1448                 len -= bytes;
1449                 offset = 0;
1450         }
1451 
1452         bio->bi_end_io = bio_map_kern_endio;
1453         return bio;
1454 }
1455 EXPORT_SYMBOL(bio_map_kern);
1456 
1457 static void bio_copy_kern_endio(struct bio *bio)
     /* [previous][next][first][last][top][bottom][index][help] [+1457 block/bio.c] */
1458 {
1459         bio_free_pages(bio);
1460         bio_put(bio);
1461 }
1462 
1463 static void bio_copy_kern_endio_read(struct bio *bio)
     /* [previous][next][first][last][top][bottom][index][help] [+1463 block/bio.c] */
1464 {
1465         char *p = bio->bi_private;
1466         struct bio_vec *bvec;
1467         int i;
1468 
1469         bio_for_each_segment_all(bvec, bio, i) {
1470                 memcpy(p, page_address(bvec->bv_page), bvec->bv_len);
1471                 p += bvec->bv_len;
1472         }
1473 
1474         bio_copy_kern_endio(bio);
1475 }
1476 
1477 /**
1478  *      bio_copy_kern   -       copy kernel address into bio
1479  *      @q: the struct request_queue for the bio
1480  *      @data: pointer to buffer to copy
1481  *      @len: length in bytes
1482  *      @gfp_mask: allocation flags for bio and page allocation
1483  *      @reading: data direction is READ
1484  *
1485  *      copy the kernel address into a bio suitable for io to a block
1486  *      device. Returns an error pointer in case of error.
1487  */
1488 struct bio *bio_copy_kern(struct request_queue *q, void *data, unsigned int len,
     /* [previous][next][first][last][top][bottom][index][help] [+1488 block/bio.c] */
1489                           gfp_t gfp_mask, int reading)
1490 {
1491         unsigned long kaddr = (unsigned long)data;
1492         unsigned long end = (kaddr + len + PAGE_SIZE - 1) >> PAGE_SHIFT;
1493         unsigned long start = kaddr >> PAGE_SHIFT;
1494         struct bio *bio;
1495         void *p = data;
1496         int nr_pages = 0;
1497 
1498         /*
1499          * Overflow, abort
1500          */
1501         if (end < start)
1502                 return ERR_PTR(-EINVAL);
1503 
1504         nr_pages = end - start;
1505         bio = bio_kmalloc(gfp_mask, nr_pages);
1506         if (!bio)
1507                 return ERR_PTR(-ENOMEM);
1508 
1509         while (len) {
1510                 struct page *page;
1511                 unsigned int bytes = PAGE_SIZE;
1512 
1513                 if (bytes > len)
1514                         bytes = len;
1515 
1516                 page = alloc_page(q->bounce_gfp | gfp_mask);
1517                 if (!page)
1518                         goto cleanup;
1519 
1520                 if (!reading)
1521                         memcpy(page_address(page), p, bytes);
1522 
1523                 if (bio_add_pc_page(q, bio, page, bytes, 0) < bytes)
1524                         break;
1525 
1526                 len -= bytes;
1527                 p += bytes;
1528         }
1529 
1530         if (reading) {
1531                 bio->bi_end_io = bio_copy_kern_endio_read;
1532                 bio->bi_private = data;
1533         } else {
1534                 bio->bi_end_io = bio_copy_kern_endio;
1535                 bio->bi_rw |= REQ_WRITE;
1536         }
1537 
1538         return bio;
1539 
1540 cleanup:
1541         bio_free_pages(bio);
1542         bio_put(bio);
1543         return ERR_PTR(-ENOMEM);
1544 }
1545 EXPORT_SYMBOL(bio_copy_kern);
1546 
1547 /*
1548  * bio_set_pages_dirty() and bio_check_pages_dirty() are support functions
1549  * for performing direct-IO in BIOs.
1550  *
1551  * The problem is that we cannot run set_page_dirty() from interrupt context
1552  * because the required locks are not interrupt-safe.  So what we can do is to
1553  * mark the pages dirty _before_ performing IO.  And in interrupt context,
1554  * check that the pages are still dirty.   If so, fine.  If not, redirty them
1555  * in process context.
1556  *
1557  * We special-case compound pages here: normally this means reads into hugetlb
1558  * pages.  The logic in here doesn't really work right for compound pages
1559  * because the VM does not uniformly chase down the head page in all cases.
1560  * But dirtiness of compound pages is pretty meaningless anyway: the VM doesn't
1561  * handle them at all.  So we skip compound pages here at an early stage.
1562  *
1563  * Note that this code is very hard to test under normal circumstances because
1564  * direct-io pins the pages with get_user_pages().  This makes
1565  * is_page_cache_freeable return false, and the VM will not clean the pages.
1566  * But other code (eg, flusher threads) could clean the pages if they are mapped
1567  * pagecache.
1568  *
1569  * Simply disabling the call to bio_set_pages_dirty() is a good way to test the
1570  * deferred bio dirtying paths.
1571  */
1572 
1573 /*
1574  * bio_set_pages_dirty() will mark all the bio's pages as dirty.
1575  */
1576 void bio_set_pages_dirty(struct bio *bio)
     /* [previous][next][first][last][top][bottom][index][help] [+1576 block/bio.c] */
1577 {
1578         struct bio_vec *bvec;
1579         int i;
1580 
1581         bio_for_each_segment_all(bvec, bio, i) {
1582                 struct page *page = bvec->bv_page;
1583 
1584                 if (page && !PageCompound(page))
1585                         set_page_dirty_lock(page);
1586         }
1587 }
1588 
1589 static void bio_release_pages(struct bio *bio)
     /* [previous][next][first][last][top][bottom][index][help] [+1589 block/bio.c] */
1590 {
1591         struct bio_vec *bvec;
1592         int i;
1593 
1594         bio_for_each_segment_all(bvec, bio, i) {
1595                 struct page *page = bvec->bv_page;
1596 
1597                 if (page)
1598                         put_page(page);
1599         }
1600 }
1601 
1602 /*
1603  * bio_check_pages_dirty() will check that all the BIO's pages are still dirty.
1604  * If they are, then fine.  If, however, some pages are clean then they must
1605  * have been written out during the direct-IO read.  So we take another ref on
1606  * the BIO and the offending pages and re-dirty the pages in process context.
1607  *
1608  * It is expected that bio_check_pages_dirty() will wholly own the BIO from
1609  * here on.  It will run one page_cache_release() against each page and will
1610  * run one bio_put() against the BIO.
1611  */
1612 
1613 static void bio_dirty_fn(struct work_struct *work);
1614 
1615 static DECLARE_WORK(bio_dirty_work, bio_dirty_fn);
1616 static DEFINE_SPINLOCK(bio_dirty_lock);
1617 static struct bio *bio_dirty_list;
1618 
1619 /*
1620  * This runs in process context
1621  */
1622 static void bio_dirty_fn(struct work_struct *work)
     /* [previous][next][first][last][top][bottom][index][help] [+1622 block/bio.c] */
1623 {
1624         unsigned long flags;
1625         struct bio *bio;
1626 
1627         spin_lock_irqsave(&bio_dirty_lock, flags);
1628         bio = bio_dirty_list;
1629         bio_dirty_list = NULL;
1630         spin_unlock_irqrestore(&bio_dirty_lock, flags);
1631 
1632         while (bio) {
1633                 struct bio *next = bio->bi_private;
1634 
1635                 bio_set_pages_dirty(bio);
1636                 bio_release_pages(bio);
1637                 bio_put(bio);
1638                 bio = next;
1639         }
1640 }
1641 
1642 void bio_check_pages_dirty(struct bio *bio)
     /* [previous][next][first][last][top][bottom][index][help] [+1642 block/bio.c] */
1643 {
1644         struct bio_vec *bvec;
1645         int nr_clean_pages = 0;
1646         int i;
1647 
1648         bio_for_each_segment_all(bvec, bio, i) {
1649                 struct page *page = bvec->bv_page;
1650 
1651                 if (PageDirty(page) || PageCompound(page)) {
1652                         page_cache_release(page);
1653                         bvec->bv_page = NULL;
1654                 } else {
1655                         nr_clean_pages++;
1656                 }
1657         }
1658 
1659         if (nr_clean_pages) {
1660                 unsigned long flags;
1661 
1662                 spin_lock_irqsave(&bio_dirty_lock, flags);
1663                 bio->bi_private = bio_dirty_list;
1664                 bio_dirty_list = bio;
1665                 spin_unlock_irqrestore(&bio_dirty_lock, flags);
1666                 schedule_work(&bio_dirty_work);
1667         } else {
1668                 bio_put(bio);
1669         }
1670 }
1671 
1672 void generic_start_io_acct(int rw, unsigned long sectors,
     /* [previous][next][first][last][top][bottom][index][help] [+1672 block/bio.c] */
1673                            struct hd_struct *part)
1674 {
1675         int cpu = part_stat_lock();
1676 
1677         part_round_stats(cpu, part);
1678         part_stat_inc(cpu, part, ios[rw]);
1679         part_stat_add(cpu, part, sectors[rw], sectors);
1680         part_inc_in_flight(part, rw);
1681 
1682         part_stat_unlock();
1683 }
1684 EXPORT_SYMBOL(generic_start_io_acct);
1685 
1686 void generic_end_io_acct(int rw, struct hd_struct *part,
     /* [previous][next][first][last][top][bottom][index][help] [+1686 block/bio.c] */
1687                          unsigned long start_time)
1688 {
1689         unsigned long duration = jiffies - start_time;
1690         int cpu = part_stat_lock();
1691 
1692         part_stat_add(cpu, part, ticks[rw], duration);
1693         part_round_stats(cpu, part);
1694         part_dec_in_flight(part, rw);
1695 
1696         part_stat_unlock();
1697 }
1698 EXPORT_SYMBOL(generic_end_io_acct);
1699 
1700 #if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE
1701 void bio_flush_dcache_pages(struct bio *bi)
     /* [previous][next][first][last][top][bottom][index][help] [+1701 block/bio.c] */
1702 {
1703         struct bio_vec bvec;
1704         struct bvec_iter iter;
1705 
1706         bio_for_each_segment(bvec, bi, iter)
1707                 flush_dcache_page(bvec.bv_page);
1708 }
1709 EXPORT_SYMBOL(bio_flush_dcache_pages);
1710 #endif
1711 
1712 static inline bool bio_remaining_done(struct bio *bio)
     /* [previous][next][first][last][top][bottom][index][help] [+1712 block/bio.c] */
1713 {
1714         /*
1715          * If we're not chaining, then ->__bi_remaining is always 1 and
1716          * we always end io on the first invocation.
1717          */
1718         if (!bio_flagged(bio, BIO_CHAIN))
1719                 return true;
1720 
1721         BUG_ON(atomic_read(&bio->__bi_remaining) <= 0);
1722 
1723         if (atomic_dec_and_test(&bio->__bi_remaining)) {
1724                 bio_clear_flag(bio, BIO_CHAIN);
1725                 return true;
1726         }
1727 
1728         return false;
1729 }
1730 
1731 /**
1732  * bio_endio - end I/O on a bio
1733  * @bio:        bio
1734  *
1735  * Description:
1736  *   bio_endio() will end I/O on the whole bio. bio_endio() is the preferred
1737  *   way to end I/O on a bio. No one should call bi_end_io() directly on a
1738  *   bio unless they own it and thus know that it has an end_io function.
1739  **/
1740 void bio_endio(struct bio *bio)
     /* [previous][next][first][last][top][bottom][index][help] [+1740 block/bio.c] */
1741 {
1742         while (bio) {
1743                 if (unlikely(!bio_remaining_done(bio)))
1744                         break;
1745 
1746                 /*
1747                  * Need to have a real endio function for chained bios,
1748                  * otherwise various corner cases will break (like stacking
1749                  * block devices that save/restore bi_end_io) - however, we want
1750                  * to avoid unbounded recursion and blowing the stack. Tail call
1751                  * optimization would handle this, but compiling with frame
1752                  * pointers also disables gcc's sibling call optimization.
1753                  */
1754                 if (bio->bi_end_io == bio_chain_endio) {
1755                         struct bio *parent = bio->bi_private;
1756                         parent->bi_error = bio->bi_error;
1757                         bio_put(bio);
1758                         bio = parent;
1759                 } else {
1760                         if (bio->bi_end_io)
1761                                 bio->bi_end_io(bio);
1762                         bio = NULL;
1763                 }
1764         }
1765 }
1766 EXPORT_SYMBOL(bio_endio);
1767 
1768 /**
1769  * bio_split - split a bio
1770  * @bio:        bio to split
1771  * @sectors:    number of sectors to split from the front of @bio
1772  * @gfp:        gfp mask
1773  * @bs:         bio set to allocate from
1774  *
1775  * Allocates and returns a new bio which represents @sectors from the start of
1776  * @bio, and updates @bio to represent the remaining sectors.
1777  *
1778  * Unless this is a discard request the newly allocated bio will point
1779  * to @bio's bi_io_vec; it is the caller's responsibility to ensure that
1780  * @bio is not freed before the split.
1781  */
1782 struct bio *bio_split(struct bio *bio, int sectors,
     /* [previous][next][first][last][top][bottom][index][help] [+1782 block/bio.c] */
1783                       gfp_t gfp, struct bio_set *bs)
1784 {
1785         struct bio *split = NULL;
1786 
1787         BUG_ON(sectors <= 0);
1788         BUG_ON(sectors >= bio_sectors(bio));
1789 
1790         /*
1791          * Discards need a mutable bio_vec to accommodate the payload
1792          * required by the DSM TRIM and UNMAP commands.
1793          */
1794         if (bio->bi_rw & REQ_DISCARD)
1795                 split = bio_clone_bioset(bio, gfp, bs);
1796         else
1797                 split = bio_clone_fast(bio, gfp, bs);
1798 
1799         if (!split)
1800                 return NULL;
1801 
1802         split->bi_iter.bi_size = sectors << 9;
1803 
1804         if (bio_integrity(split))
1805                 bio_integrity_trim(split, 0, sectors);
1806 
1807         bio_advance(bio, split->bi_iter.bi_size);
1808 
1809         return split;
1810 }
1811 EXPORT_SYMBOL(bio_split);
1812 
1813 /**
1814  * bio_trim - trim a bio
1815  * @bio:        bio to trim
1816  * @offset:     number of sectors to trim from the front of @bio
1817  * @size:       size we want to trim @bio to, in sectors
1818  */
1819 void bio_trim(struct bio *bio, int offset, int size)
     /* [previous][next][first][last][top][bottom][index][help] [+1819 block/bio.c] */
1820 {
1821         /* 'bio' is a cloned bio which we need to trim to match
1822          * the given offset and size.
1823          */
1824 
1825         size <<= 9;
1826         if (offset == 0 && size == bio->bi_iter.bi_size)
1827                 return;
1828 
1829         bio_clear_flag(bio, BIO_SEG_VALID);
1830 
1831         bio_advance(bio, offset << 9);
1832 
1833         bio->bi_iter.bi_size = size;
1834 }
1835 EXPORT_SYMBOL_GPL(bio_trim);
1836 
1837 /*
1838  * create memory pools for biovec's in a bio_set.
1839  * use the global biovec slabs created for general use.
1840  */
1841 mempool_t *biovec_create_pool(int pool_entries)
     /* [previous][next][first][last][top][bottom][index][help] [+1841 block/bio.c] */
1842 {
1843         struct biovec_slab *bp = bvec_slabs + BIOVEC_MAX_IDX;
1844 
1845         return mempool_create_slab_pool(pool_entries, bp->slab);
1846 }
1847 
1848 void bioset_free(struct bio_set *bs)
     /* [previous][next][first][last][top][bottom][index][help] [+1848 block/bio.c] */
1849 {
1850         if (bs->rescue_workqueue)
1851                 destroy_workqueue(bs->rescue_workqueue);
1852 
1853         if (bs->bio_pool)
1854                 mempool_destroy(bs->bio_pool);
1855 
1856         if (bs->bvec_pool)
1857                 mempool_destroy(bs->bvec_pool);
1858 
1859         bioset_integrity_free(bs);
1860         bio_put_slab(bs);
1861 
1862         kfree(bs);
1863 }
1864 EXPORT_SYMBOL(bioset_free);
1865 
1866 static struct bio_set *__bioset_create(unsigned int pool_size,
     /* [previous][next][first][last][top][bottom][index][help] [+1866 block/bio.c] */
1867                                        unsigned int front_pad,
1868                                        bool create_bvec_pool)
1869 {
1870         unsigned int back_pad = BIO_INLINE_VECS * sizeof(struct bio_vec);
1871         struct bio_set *bs;
1872 
1873         bs = kzalloc(sizeof(*bs), GFP_KERNEL);
1874         if (!bs)
1875                 return NULL;
1876 
1877         bs->front_pad = front_pad;
1878 
1879         spin_lock_init(&bs->rescue_lock);
1880         bio_list_init(&bs->rescue_list);
1881         INIT_WORK(&bs->rescue_work, bio_alloc_rescue);
1882 
1883         bs->bio_slab = bio_find_or_create_slab(front_pad + back_pad);
1884         if (!bs->bio_slab) {
1885                 kfree(bs);
1886                 return NULL;
1887         }
1888 
1889         bs->bio_pool = mempool_create_slab_pool(pool_size, bs->bio_slab);
1890         if (!bs->bio_pool)
1891                 goto bad;
1892 
1893         if (create_bvec_pool) {
1894                 bs->bvec_pool = biovec_create_pool(pool_size);
1895                 if (!bs->bvec_pool)
1896                         goto bad;
1897         }
1898 
1899         bs->rescue_workqueue = alloc_workqueue("bioset", WQ_MEM_RECLAIM, 0);
1900         if (!bs->rescue_workqueue)
1901                 goto bad;
1902 
1903         return bs;
1904 bad:
1905         bioset_free(bs);
1906         return NULL;
1907 }
1908 
1909 /**
1910  * bioset_create  - Create a bio_set
1911  * @pool_size:  Number of bio and bio_vecs to cache in the mempool
1912  * @front_pad:  Number of bytes to allocate in front of the returned bio
1913  *
1914  * Description:
1915  *    Set up a bio_set to be used with @bio_alloc_bioset. Allows the caller
1916  *    to ask for a number of bytes to be allocated in front of the bio.
1917  *    Front pad allocation is useful for embedding the bio inside
1918  *    another structure, to avoid allocating extra data to go with the bio.
1919  *    Note that the bio must be embedded at the END of that structure always,
1920  *    or things will break badly.
1921  */
1922 struct bio_set *bioset_create(unsigned int pool_size, unsigned int front_pad)
     /* [previous][next][first][last][top][bottom][index][help] [+1922 block/bio.c] */
1923 {
1924         return __bioset_create(pool_size, front_pad, true);
1925 }
1926 EXPORT_SYMBOL(bioset_create);
1927 
1928 /**
1929  * bioset_create_nobvec  - Create a bio_set without bio_vec mempool
1930  * @pool_size:  Number of bio to cache in the mempool
1931  * @front_pad:  Number of bytes to allocate in front of the returned bio
1932  *
1933  * Description:
1934  *    Same functionality as bioset_create() except that mempool is not
1935  *    created for bio_vecs. Saving some memory for bio_clone_fast() users.
1936  */
1937 struct bio_set *bioset_create_nobvec(unsigned int pool_size, unsigned int front_pad)
     /* [previous][next][first][last][top][bottom][index][help] [+1937 block/bio.c] */
1938 {
1939         return __bioset_create(pool_size, front_pad, false);
1940 }
1941 EXPORT_SYMBOL(bioset_create_nobvec);
1942 
1943 #ifdef CONFIG_BLK_CGROUP
1944 
1945 /**
1946  * bio_associate_blkcg - associate a bio with the specified blkcg
1947  * @bio: target bio
1948  * @blkcg_css: css of the blkcg to associate
1949  *
1950  * Associate @bio with the blkcg specified by @blkcg_css.  Block layer will
1951  * treat @bio as if it were issued by a task which belongs to the blkcg.
1952  *
1953  * This function takes an extra reference of @blkcg_css which will be put
1954  * when @bio is released.  The caller must own @bio and is responsible for
1955  * synchronizing calls to this function.
1956  */
1957 int bio_associate_blkcg(struct bio *bio, struct cgroup_subsys_state *blkcg_css)
     /* [previous][next][first][last][top][bottom][index][help] [+1957 block/bio.c] */
1958 {
1959         if (unlikely(bio->bi_css))
1960                 return -EBUSY;
1961         css_get(blkcg_css);
1962         bio->bi_css = blkcg_css;
1963         return 0;
1964 }
1965 EXPORT_SYMBOL_GPL(bio_associate_blkcg);
1966 
1967 /**
1968  * bio_associate_current - associate a bio with %current
1969  * @bio: target bio
1970  *
1971  * Associate @bio with %current if it hasn't been associated yet.  Block
1972  * layer will treat @bio as if it were issued by %current no matter which
1973  * task actually issues it.
1974  *
1975  * This function takes an extra reference of @task's io_context and blkcg
1976  * which will be put when @bio is released.  The caller must own @bio,
1977  * ensure %current->io_context exists, and is responsible for synchronizing
1978  * calls to this function.
1979  */
1980 int bio_associate_current(struct bio *bio)
     /* [previous][next][first][last][top][bottom][index][help] [+1980 block/bio.c] */
1981 {
1982         struct io_context *ioc;
1983 
1984         if (bio->bi_css)
1985                 return -EBUSY;
1986 
1987         ioc = current->io_context;
1988         if (!ioc)
1989                 return -ENOENT;
1990 
1991         get_io_context_active(ioc);
1992         bio->bi_ioc = ioc;
1993         bio->bi_css = task_get_css(current, io_cgrp_id);
1994         return 0;
1995 }
1996 EXPORT_SYMBOL_GPL(bio_associate_current);
1997 
1998 /**
1999  * bio_disassociate_task - undo bio_associate_current()
2000  * @bio: target bio
2001  */
2002 void bio_disassociate_task(struct bio *bio)
     /* [previous][next][first][last][top][bottom][index][help] [+2002 block/bio.c] */
2003 {
2004         if (bio->bi_ioc) {
2005                 put_io_context(bio->bi_ioc);
2006                 bio->bi_ioc = NULL;
2007         }
2008         if (bio->bi_css) {
2009                 css_put(bio->bi_css);
2010                 bio->bi_css = NULL;
2011         }
2012 }
2013 
2014 #endif /* CONFIG_BLK_CGROUP */
2015 
2016 static void __init biovec_init_slabs(void)
     /* [previous][next][first][last][top][bottom][index][help] [+2016 block/bio.c] */
2017 {
2018         int i;
2019 
2020         for (i = 0; i < BIOVEC_NR_POOLS; i++) {
2021                 int size;
2022                 struct biovec_slab *bvs = bvec_slabs + i;
2023 
2024                 if (bvs->nr_vecs <= BIO_INLINE_VECS) {
2025                         bvs->slab = NULL;
2026                         continue;
2027                 }
2028 
2029                 size = bvs->nr_vecs * sizeof(struct bio_vec);
2030                 bvs->slab = kmem_cache_create(bvs->name, size, 0,
2031                                 SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
2032         }
2033 }
2034 
2035 static int __init init_bio(void)
     /* [previous][next][first][last][top][bottom][index][help] [+2035 block/bio.c] */
2036 {
2037         bio_slab_max = 2;
2038         bio_slab_nr = 0;
2039         bio_slabs = kzalloc(bio_slab_max * sizeof(struct bio_slab), GFP_KERNEL);
2040         if (!bio_slabs)
2041                 panic("bio: can't allocate bios\n");
2042 
2043         bio_integrity_init();
2044         biovec_init_slabs();
2045 
2046         fs_bio_set = bioset_create(BIO_POOL_SIZE, 0);
2047         if (!fs_bio_set)
2048                 panic("bio: can't allocate bios\n");
2049 
2050         if (bioset_integrity_create(fs_bio_set, BIO_POOL_SIZE))
2051                 panic("bio: can't create integrity pool\n");
2052 
2053         return 0;
2054 }
2055 subsys_initcall(init_bio);

/* [previous][next][first][last][top][bottom][index][help] [+2055 block/bio.c] */