~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/fs/ext4/mballoc.c

Version: ~ [ linux-5.10-rc6 ] ~ [ linux-5.9.12 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.81 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.161 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.210 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.247 ] ~ [ linux-4.8.17 ] ~ [ linux-4.7.10 ] ~ [ linux-4.6.7 ] ~ [ linux-4.5.7 ] ~ [ linux-4.4.247 ] ~ [ linux-4.3.6 ] ~ [ linux-4.2.8 ] ~ [ linux-4.1.52 ] ~ [ linux-4.0.9 ] ~ [ linux-3.19.8 ] ~ [ linux-3.18.140 ] ~ [ linux-3.17.8 ] ~ [ linux-3.16.85 ] ~ [ linux-3.15.10 ] ~ [ linux-3.14.79 ] ~ [ linux-3.13.11 ] ~ [ linux-3.12.74 ] ~ [ linux-3.11.10 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.5 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 // SPDX-License-Identifier: GPL-2.0
  2 /*
  3  * Copyright (c) 2003-2006, Cluster File Systems, Inc, info@clusterfs.com
  4  * Written by Alex Tomas <alex@clusterfs.com>
  5  */
  6 
  7 
  8 /*
  9  * mballoc.c contains the multiblocks allocation routines
 10  */
 11 
 12 #include "ext4_jbd2.h"
 13 #include "mballoc.h"
 14 #include <linux/log2.h>
 15 #include <linux/module.h>
 16 #include <linux/slab.h>
 17 #include <linux/nospec.h>
 18 #include <linux/backing-dev.h>
 19 #include <trace/events/ext4.h>
 20 
 21 /*
 22  * MUSTDO:
 23  *   - test ext4_ext_search_left() and ext4_ext_search_right()
 24  *   - search for metadata in few groups
 25  *
 26  * TODO v4:
 27  *   - normalization should take into account whether file is still open
 28  *   - discard preallocations if no free space left (policy?)
 29  *   - don't normalize tails
 30  *   - quota
 31  *   - reservation for superuser
 32  *
 33  * TODO v3:
 34  *   - bitmap read-ahead (proposed by Oleg Drokin aka green)
 35  *   - track min/max extents in each group for better group selection
 36  *   - mb_mark_used() may allocate chunk right after splitting buddy
 37  *   - tree of groups sorted by number of free blocks
 38  *   - error handling
 39  */
 40 
 41 /*
 42  * The allocation request involve request for multiple number of blocks
 43  * near to the goal(block) value specified.
 44  *
 45  * During initialization phase of the allocator we decide to use the
 46  * group preallocation or inode preallocation depending on the size of
 47  * the file. The size of the file could be the resulting file size we
 48  * would have after allocation, or the current file size, which ever
 49  * is larger. If the size is less than sbi->s_mb_stream_request we
 50  * select to use the group preallocation. The default value of
 51  * s_mb_stream_request is 16 blocks. This can also be tuned via
 52  * /sys/fs/ext4/<partition>/mb_stream_req. The value is represented in
 53  * terms of number of blocks.
 54  *
 55  * The main motivation for having small file use group preallocation is to
 56  * ensure that we have small files closer together on the disk.
 57  *
 58  * First stage the allocator looks at the inode prealloc list,
 59  * ext4_inode_info->i_prealloc_list, which contains list of prealloc
 60  * spaces for this particular inode. The inode prealloc space is
 61  * represented as:
 62  *
 63  * pa_lstart -> the logical start block for this prealloc space
 64  * pa_pstart -> the physical start block for this prealloc space
 65  * pa_len    -> length for this prealloc space (in clusters)
 66  * pa_free   ->  free space available in this prealloc space (in clusters)
 67  *
 68  * The inode preallocation space is used looking at the _logical_ start
 69  * block. If only the logical file block falls within the range of prealloc
 70  * space we will consume the particular prealloc space. This makes sure that
 71  * we have contiguous physical blocks representing the file blocks
 72  *
 73  * The important thing to be noted in case of inode prealloc space is that
 74  * we don't modify the values associated to inode prealloc space except
 75  * pa_free.
 76  *
 77  * If we are not able to find blocks in the inode prealloc space and if we
 78  * have the group allocation flag set then we look at the locality group
 79  * prealloc space. These are per CPU prealloc list represented as
 80  *
 81  * ext4_sb_info.s_locality_groups[smp_processor_id()]
 82  *
 83  * The reason for having a per cpu locality group is to reduce the contention
 84  * between CPUs. It is possible to get scheduled at this point.
 85  *
 86  * The locality group prealloc space is used looking at whether we have
 87  * enough free space (pa_free) within the prealloc space.
 88  *
 89  * If we can't allocate blocks via inode prealloc or/and locality group
 90  * prealloc then we look at the buddy cache. The buddy cache is represented
 91  * by ext4_sb_info.s_buddy_cache (struct inode) whose file offset gets
 92  * mapped to the buddy and bitmap information regarding different
 93  * groups. The buddy information is attached to buddy cache inode so that
 94  * we can access them through the page cache. The information regarding
 95  * each group is loaded via ext4_mb_load_buddy.  The information involve
 96  * block bitmap and buddy information. The information are stored in the
 97  * inode as:
 98  *
 99  *  {                        page                        }
100  *  [ group 0 bitmap][ group 0 buddy] [group 1][ group 1]...
101  *
102  *
103  * one block each for bitmap and buddy information.  So for each group we
104  * take up 2 blocks. A page can contain blocks_per_page (PAGE_SIZE /
105  * blocksize) blocks.  So it can have information regarding groups_per_page
106  * which is blocks_per_page/2
107  *
108  * The buddy cache inode is not stored on disk. The inode is thrown
109  * away when the filesystem is unmounted.
110  *
111  * We look for count number of blocks in the buddy cache. If we were able
112  * to locate that many free blocks we return with additional information
113  * regarding rest of the contiguous physical block available
114  *
115  * Before allocating blocks via buddy cache we normalize the request
116  * blocks. This ensure we ask for more blocks that we needed. The extra
117  * blocks that we get after allocation is added to the respective prealloc
118  * list. In case of inode preallocation we follow a list of heuristics
119  * based on file size. This can be found in ext4_mb_normalize_request. If
120  * we are doing a group prealloc we try to normalize the request to
121  * sbi->s_mb_group_prealloc.  The default value of s_mb_group_prealloc is
122  * dependent on the cluster size; for non-bigalloc file systems, it is
123  * 512 blocks. This can be tuned via
124  * /sys/fs/ext4/<partition>/mb_group_prealloc. The value is represented in
125  * terms of number of blocks. If we have mounted the file system with -O
126  * stripe=<value> option the group prealloc request is normalized to the
127  * smallest multiple of the stripe value (sbi->s_stripe) which is
128  * greater than the default mb_group_prealloc.
129  *
130  * The regular allocator (using the buddy cache) supports a few tunables.
131  *
132  * /sys/fs/ext4/<partition>/mb_min_to_scan
133  * /sys/fs/ext4/<partition>/mb_max_to_scan
134  * /sys/fs/ext4/<partition>/mb_order2_req
135  *
136  * The regular allocator uses buddy scan only if the request len is power of
137  * 2 blocks and the order of allocation is >= sbi->s_mb_order2_reqs. The
138  * value of s_mb_order2_reqs can be tuned via
139  * /sys/fs/ext4/<partition>/mb_order2_req.  If the request len is equal to
140  * stripe size (sbi->s_stripe), we try to search for contiguous block in
141  * stripe size. This should result in better allocation on RAID setups. If
142  * not, we search in the specific group using bitmap for best extents. The
143  * tunable min_to_scan and max_to_scan control the behaviour here.
144  * min_to_scan indicate how long the mballoc __must__ look for a best
145  * extent and max_to_scan indicates how long the mballoc __can__ look for a
146  * best extent in the found extents. Searching for the blocks starts with
147  * the group specified as the goal value in allocation context via
148  * ac_g_ex. Each group is first checked based on the criteria whether it
149  * can be used for allocation. ext4_mb_good_group explains how the groups are
150  * checked.
151  *
152  * Both the prealloc space are getting populated as above. So for the first
153  * request we will hit the buddy cache which will result in this prealloc
154  * space getting filled. The prealloc space is then later used for the
155  * subsequent request.
156  */
157 
158 /*
159  * mballoc operates on the following data:
160  *  - on-disk bitmap
161  *  - in-core buddy (actually includes buddy and bitmap)
162  *  - preallocation descriptors (PAs)
163  *
164  * there are two types of preallocations:
165  *  - inode
166  *    assiged to specific inode and can be used for this inode only.
167  *    it describes part of inode's space preallocated to specific
168  *    physical blocks. any block from that preallocated can be used
169  *    independent. the descriptor just tracks number of blocks left
170  *    unused. so, before taking some block from descriptor, one must
171  *    make sure corresponded logical block isn't allocated yet. this
172  *    also means that freeing any block within descriptor's range
173  *    must discard all preallocated blocks.
174  *  - locality group
175  *    assigned to specific locality group which does not translate to
176  *    permanent set of inodes: inode can join and leave group. space
177  *    from this type of preallocation can be used for any inode. thus
178  *    it's consumed from the beginning to the end.
179  *
180  * relation between them can be expressed as:
181  *    in-core buddy = on-disk bitmap + preallocation descriptors
182  *
183  * this mean blocks mballoc considers used are:
184  *  - allocated blocks (persistent)
185  *  - preallocated blocks (non-persistent)
186  *
187  * consistency in mballoc world means that at any time a block is either
188  * free or used in ALL structures. notice: "any time" should not be read
189  * literally -- time is discrete and delimited by locks.
190  *
191  *  to keep it simple, we don't use block numbers, instead we count number of
192  *  blocks: how many blocks marked used/free in on-disk bitmap, buddy and PA.
193  *
194  * all operations can be expressed as:
195  *  - init buddy:                       buddy = on-disk + PAs
196  *  - new PA:                           buddy += N; PA = N
197  *  - use inode PA:                     on-disk += N; PA -= N
198  *  - discard inode PA                  buddy -= on-disk - PA; PA = 0
199  *  - use locality group PA             on-disk += N; PA -= N
200  *  - discard locality group PA         buddy -= PA; PA = 0
201  *  note: 'buddy -= on-disk - PA' is used to show that on-disk bitmap
202  *        is used in real operation because we can't know actual used
203  *        bits from PA, only from on-disk bitmap
204  *
205  * if we follow this strict logic, then all operations above should be atomic.
206  * given some of them can block, we'd have to use something like semaphores
207  * killing performance on high-end SMP hardware. let's try to relax it using
208  * the following knowledge:
209  *  1) if buddy is referenced, it's already initialized
210  *  2) while block is used in buddy and the buddy is referenced,
211  *     nobody can re-allocate that block
212  *  3) we work on bitmaps and '+' actually means 'set bits'. if on-disk has
213  *     bit set and PA claims same block, it's OK. IOW, one can set bit in
214  *     on-disk bitmap if buddy has same bit set or/and PA covers corresponded
215  *     block
216  *
217  * so, now we're building a concurrency table:
218  *  - init buddy vs.
219  *    - new PA
220  *      blocks for PA are allocated in the buddy, buddy must be referenced
221  *      until PA is linked to allocation group to avoid concurrent buddy init
222  *    - use inode PA
223  *      we need to make sure that either on-disk bitmap or PA has uptodate data
224  *      given (3) we care that PA-=N operation doesn't interfere with init
225  *    - discard inode PA
226  *      the simplest way would be to have buddy initialized by the discard
227  *    - use locality group PA
228  *      again PA-=N must be serialized with init
229  *    - discard locality group PA
230  *      the simplest way would be to have buddy initialized by the discard
231  *  - new PA vs.
232  *    - use inode PA
233  *      i_data_sem serializes them
234  *    - discard inode PA
235  *      discard process must wait until PA isn't used by another process
236  *    - use locality group PA
237  *      some mutex should serialize them
238  *    - discard locality group PA
239  *      discard process must wait until PA isn't used by another process
240  *  - use inode PA
241  *    - use inode PA
242  *      i_data_sem or another mutex should serializes them
243  *    - discard inode PA
244  *      discard process must wait until PA isn't used by another process
245  *    - use locality group PA
246  *      nothing wrong here -- they're different PAs covering different blocks
247  *    - discard locality group PA
248  *      discard process must wait until PA isn't used by another process
249  *
250  * now we're ready to make few consequences:
251  *  - PA is referenced and while it is no discard is possible
252  *  - PA is referenced until block isn't marked in on-disk bitmap
253  *  - PA changes only after on-disk bitmap
254  *  - discard must not compete with init. either init is done before
255  *    any discard or they're serialized somehow
256  *  - buddy init as sum of on-disk bitmap and PAs is done atomically
257  *
258  * a special case when we've used PA to emptiness. no need to modify buddy
259  * in this case, but we should care about concurrent init
260  *
261  */
262 
263  /*
264  * Logic in few words:
265  *
266  *  - allocation:
267  *    load group
268  *    find blocks
269  *    mark bits in on-disk bitmap
270  *    release group
271  *
272  *  - use preallocation:
273  *    find proper PA (per-inode or group)
274  *    load group
275  *    mark bits in on-disk bitmap
276  *    release group
277  *    release PA
278  *
279  *  - free:
280  *    load group
281  *    mark bits in on-disk bitmap
282  *    release group
283  *
284  *  - discard preallocations in group:
285  *    mark PAs deleted
286  *    move them onto local list
287  *    load on-disk bitmap
288  *    load group
289  *    remove PA from object (inode or locality group)
290  *    mark free blocks in-core
291  *
292  *  - discard inode's preallocations:
293  */
294 
295 /*
296  * Locking rules
297  *
298  * Locks:
299  *  - bitlock on a group        (group)
300  *  - object (inode/locality)   (object)
301  *  - per-pa lock               (pa)
302  *
303  * Paths:
304  *  - new pa
305  *    object
306  *    group
307  *
308  *  - find and use pa:
309  *    pa
310  *
311  *  - release consumed pa:
312  *    pa
313  *    group
314  *    object
315  *
316  *  - generate in-core bitmap:
317  *    group
318  *        pa
319  *
320  *  - discard all for given object (inode, locality group):
321  *    object
322  *        pa
323  *    group
324  *
325  *  - discard all for given group:
326  *    group
327  *        pa
328  *    group
329  *        object
330  *
331  */
332 static struct kmem_cache *ext4_pspace_cachep;
333 static struct kmem_cache *ext4_ac_cachep;
334 static struct kmem_cache *ext4_free_data_cachep;
335 
336 /* We create slab caches for groupinfo data structures based on the
337  * superblock block size.  There will be one per mounted filesystem for
338  * each unique s_blocksize_bits */
339 #define NR_GRPINFO_CACHES 8
340 static struct kmem_cache *ext4_groupinfo_caches[NR_GRPINFO_CACHES];
341 
342 static const char * const ext4_groupinfo_slab_names[NR_GRPINFO_CACHES] = {
343         "ext4_groupinfo_1k", "ext4_groupinfo_2k", "ext4_groupinfo_4k",
344         "ext4_groupinfo_8k", "ext4_groupinfo_16k", "ext4_groupinfo_32k",
345         "ext4_groupinfo_64k", "ext4_groupinfo_128k"
346 };
347 
348 static void ext4_mb_generate_from_pa(struct super_block *sb, void *bitmap,
349                                         ext4_group_t group);
350 static void ext4_mb_generate_from_freelist(struct super_block *sb, void *bitmap,
351                                                 ext4_group_t group);
352 static void ext4_mb_new_preallocation(struct ext4_allocation_context *ac);
353 
354 /*
355  * The algorithm using this percpu seq counter goes below:
356  * 1. We sample the percpu discard_pa_seq counter before trying for block
357  *    allocation in ext4_mb_new_blocks().
358  * 2. We increment this percpu discard_pa_seq counter when we either allocate
359  *    or free these blocks i.e. while marking those blocks as used/free in
360  *    mb_mark_used()/mb_free_blocks().
361  * 3. We also increment this percpu seq counter when we successfully identify
362  *    that the bb_prealloc_list is not empty and hence proceed for discarding
363  *    of those PAs inside ext4_mb_discard_group_preallocations().
364  *
365  * Now to make sure that the regular fast path of block allocation is not
366  * affected, as a small optimization we only sample the percpu seq counter
367  * on that cpu. Only when the block allocation fails and when freed blocks
368  * found were 0, that is when we sample percpu seq counter for all cpus using
369  * below function ext4_get_discard_pa_seq_sum(). This happens after making
370  * sure that all the PAs on grp->bb_prealloc_list got freed or if it's empty.
371  */
372 static DEFINE_PER_CPU(u64, discard_pa_seq);
373 static inline u64 ext4_get_discard_pa_seq_sum(void)
374 {
375         int __cpu;
376         u64 __seq = 0;
377 
378         for_each_possible_cpu(__cpu)
379                 __seq += per_cpu(discard_pa_seq, __cpu);
380         return __seq;
381 }
382 
383 static inline void *mb_correct_addr_and_bit(int *bit, void *addr)
384 {
385 #if BITS_PER_LONG == 64
386         *bit += ((unsigned long) addr & 7UL) << 3;
387         addr = (void *) ((unsigned long) addr & ~7UL);
388 #elif BITS_PER_LONG == 32
389         *bit += ((unsigned long) addr & 3UL) << 3;
390         addr = (void *) ((unsigned long) addr & ~3UL);
391 #else
392 #error "how many bits you are?!"
393 #endif
394         return addr;
395 }
396 
397 static inline int mb_test_bit(int bit, void *addr)
398 {
399         /*
400          * ext4_test_bit on architecture like powerpc
401          * needs unsigned long aligned address
402          */
403         addr = mb_correct_addr_and_bit(&bit, addr);
404         return ext4_test_bit(bit, addr);
405 }
406 
407 static inline void mb_set_bit(int bit, void *addr)
408 {
409         addr = mb_correct_addr_and_bit(&bit, addr);
410         ext4_set_bit(bit, addr);
411 }
412 
413 static inline void mb_clear_bit(int bit, void *addr)
414 {
415         addr = mb_correct_addr_and_bit(&bit, addr);
416         ext4_clear_bit(bit, addr);
417 }
418 
419 static inline int mb_test_and_clear_bit(int bit, void *addr)
420 {
421         addr = mb_correct_addr_and_bit(&bit, addr);
422         return ext4_test_and_clear_bit(bit, addr);
423 }
424 
425 static inline int mb_find_next_zero_bit(void *addr, int max, int start)
426 {
427         int fix = 0, ret, tmpmax;
428         addr = mb_correct_addr_and_bit(&fix, addr);
429         tmpmax = max + fix;
430         start += fix;
431 
432         ret = ext4_find_next_zero_bit(addr, tmpmax, start) - fix;
433         if (ret > max)
434                 return max;
435         return ret;
436 }
437 
438 static inline int mb_find_next_bit(void *addr, int max, int start)
439 {
440         int fix = 0, ret, tmpmax;
441         addr = mb_correct_addr_and_bit(&fix, addr);
442         tmpmax = max + fix;
443         start += fix;
444 
445         ret = ext4_find_next_bit(addr, tmpmax, start) - fix;
446         if (ret > max)
447                 return max;
448         return ret;
449 }
450 
451 static void *mb_find_buddy(struct ext4_buddy *e4b, int order, int *max)
452 {
453         char *bb;
454 
455         BUG_ON(e4b->bd_bitmap == e4b->bd_buddy);
456         BUG_ON(max == NULL);
457 
458         if (order > e4b->bd_blkbits + 1) {
459                 *max = 0;
460                 return NULL;
461         }
462 
463         /* at order 0 we see each particular block */
464         if (order == 0) {
465                 *max = 1 << (e4b->bd_blkbits + 3);
466                 return e4b->bd_bitmap;
467         }
468 
469         bb = e4b->bd_buddy + EXT4_SB(e4b->bd_sb)->s_mb_offsets[order];
470         *max = EXT4_SB(e4b->bd_sb)->s_mb_maxs[order];
471 
472         return bb;
473 }
474 
475 #ifdef DOUBLE_CHECK
476 static void mb_free_blocks_double(struct inode *inode, struct ext4_buddy *e4b,
477                            int first, int count)
478 {
479         int i;
480         struct super_block *sb = e4b->bd_sb;
481 
482         if (unlikely(e4b->bd_info->bb_bitmap == NULL))
483                 return;
484         assert_spin_locked(ext4_group_lock_ptr(sb, e4b->bd_group));
485         for (i = 0; i < count; i++) {
486                 if (!mb_test_bit(first + i, e4b->bd_info->bb_bitmap)) {
487                         ext4_fsblk_t blocknr;
488 
489                         blocknr = ext4_group_first_block_no(sb, e4b->bd_group);
490                         blocknr += EXT4_C2B(EXT4_SB(sb), first + i);
491                         ext4_grp_locked_error(sb, e4b->bd_group,
492                                               inode ? inode->i_ino : 0,
493                                               blocknr,
494                                               "freeing block already freed "
495                                               "(bit %u)",
496                                               first + i);
497                         ext4_mark_group_bitmap_corrupted(sb, e4b->bd_group,
498                                         EXT4_GROUP_INFO_BBITMAP_CORRUPT);
499                 }
500                 mb_clear_bit(first + i, e4b->bd_info->bb_bitmap);
501         }
502 }
503 
504 static void mb_mark_used_double(struct ext4_buddy *e4b, int first, int count)
505 {
506         int i;
507 
508         if (unlikely(e4b->bd_info->bb_bitmap == NULL))
509                 return;
510         assert_spin_locked(ext4_group_lock_ptr(e4b->bd_sb, e4b->bd_group));
511         for (i = 0; i < count; i++) {
512                 BUG_ON(mb_test_bit(first + i, e4b->bd_info->bb_bitmap));
513                 mb_set_bit(first + i, e4b->bd_info->bb_bitmap);
514         }
515 }
516 
517 static void mb_cmp_bitmaps(struct ext4_buddy *e4b, void *bitmap)
518 {
519         if (unlikely(e4b->bd_info->bb_bitmap == NULL))
520                 return;
521         if (memcmp(e4b->bd_info->bb_bitmap, bitmap, e4b->bd_sb->s_blocksize)) {
522                 unsigned char *b1, *b2;
523                 int i;
524                 b1 = (unsigned char *) e4b->bd_info->bb_bitmap;
525                 b2 = (unsigned char *) bitmap;
526                 for (i = 0; i < e4b->bd_sb->s_blocksize; i++) {
527                         if (b1[i] != b2[i]) {
528                                 ext4_msg(e4b->bd_sb, KERN_ERR,
529                                          "corruption in group %u "
530                                          "at byte %u(%u): %x in copy != %x "
531                                          "on disk/prealloc",
532                                          e4b->bd_group, i, i * 8, b1[i], b2[i]);
533                                 BUG();
534                         }
535                 }
536         }
537 }
538 
539 static void mb_group_bb_bitmap_alloc(struct super_block *sb,
540                         struct ext4_group_info *grp, ext4_group_t group)
541 {
542         struct buffer_head *bh;
543 
544         grp->bb_bitmap = kmalloc(sb->s_blocksize, GFP_NOFS);
545         if (!grp->bb_bitmap)
546                 return;
547 
548         bh = ext4_read_block_bitmap(sb, group);
549         if (IS_ERR_OR_NULL(bh)) {
550                 kfree(grp->bb_bitmap);
551                 grp->bb_bitmap = NULL;
552                 return;
553         }
554 
555         memcpy(grp->bb_bitmap, bh->b_data, sb->s_blocksize);
556         put_bh(bh);
557 }
558 
559 static void mb_group_bb_bitmap_free(struct ext4_group_info *grp)
560 {
561         kfree(grp->bb_bitmap);
562 }
563 
564 #else
565 static inline void mb_free_blocks_double(struct inode *inode,
566                                 struct ext4_buddy *e4b, int first, int count)
567 {
568         return;
569 }
570 static inline void mb_mark_used_double(struct ext4_buddy *e4b,
571                                                 int first, int count)
572 {
573         return;
574 }
575 static inline void mb_cmp_bitmaps(struct ext4_buddy *e4b, void *bitmap)
576 {
577         return;
578 }
579 
580 static inline void mb_group_bb_bitmap_alloc(struct super_block *sb,
581                         struct ext4_group_info *grp, ext4_group_t group)
582 {
583         return;
584 }
585 
586 static inline void mb_group_bb_bitmap_free(struct ext4_group_info *grp)
587 {
588         return;
589 }
590 #endif
591 
592 #ifdef AGGRESSIVE_CHECK
593 
594 #define MB_CHECK_ASSERT(assert)                                         \
595 do {                                                                    \
596         if (!(assert)) {                                                \
597                 printk(KERN_EMERG                                       \
598                         "Assertion failure in %s() at %s:%d: \"%s\"\n", \
599                         function, file, line, # assert);                \
600                 BUG();                                                  \
601         }                                                               \
602 } while (0)
603 
604 static int __mb_check_buddy(struct ext4_buddy *e4b, char *file,
605                                 const char *function, int line)
606 {
607         struct super_block *sb = e4b->bd_sb;
608         int order = e4b->bd_blkbits + 1;
609         int max;
610         int max2;
611         int i;
612         int j;
613         int k;
614         int count;
615         struct ext4_group_info *grp;
616         int fragments = 0;
617         int fstart;
618         struct list_head *cur;
619         void *buddy;
620         void *buddy2;
621 
622         if (e4b->bd_info->bb_check_counter++ % 10)
623                 return 0;
624 
625         while (order > 1) {
626                 buddy = mb_find_buddy(e4b, order, &max);
627                 MB_CHECK_ASSERT(buddy);
628                 buddy2 = mb_find_buddy(e4b, order - 1, &max2);
629                 MB_CHECK_ASSERT(buddy2);
630                 MB_CHECK_ASSERT(buddy != buddy2);
631                 MB_CHECK_ASSERT(max * 2 == max2);
632 
633                 count = 0;
634                 for (i = 0; i < max; i++) {
635 
636                         if (mb_test_bit(i, buddy)) {
637                                 /* only single bit in buddy2 may be 1 */
638                                 if (!mb_test_bit(i << 1, buddy2)) {
639                                         MB_CHECK_ASSERT(
640                                                 mb_test_bit((i<<1)+1, buddy2));
641                                 } else if (!mb_test_bit((i << 1) + 1, buddy2)) {
642                                         MB_CHECK_ASSERT(
643                                                 mb_test_bit(i << 1, buddy2));
644                                 }
645                                 continue;
646                         }
647 
648                         /* both bits in buddy2 must be 1 */
649                         MB_CHECK_ASSERT(mb_test_bit(i << 1, buddy2));
650                         MB_CHECK_ASSERT(mb_test_bit((i << 1) + 1, buddy2));
651 
652                         for (j = 0; j < (1 << order); j++) {
653                                 k = (i * (1 << order)) + j;
654                                 MB_CHECK_ASSERT(
655                                         !mb_test_bit(k, e4b->bd_bitmap));
656                         }
657                         count++;
658                 }
659                 MB_CHECK_ASSERT(e4b->bd_info->bb_counters[order] == count);
660                 order--;
661         }
662 
663         fstart = -1;
664         buddy = mb_find_buddy(e4b, 0, &max);
665         for (i = 0; i < max; i++) {
666                 if (!mb_test_bit(i, buddy)) {
667                         MB_CHECK_ASSERT(i >= e4b->bd_info->bb_first_free);
668                         if (fstart == -1) {
669                                 fragments++;
670                                 fstart = i;
671                         }
672                         continue;
673                 }
674                 fstart = -1;
675                 /* check used bits only */
676                 for (j = 0; j < e4b->bd_blkbits + 1; j++) {
677                         buddy2 = mb_find_buddy(e4b, j, &max2);
678                         k = i >> j;
679                         MB_CHECK_ASSERT(k < max2);
680                         MB_CHECK_ASSERT(mb_test_bit(k, buddy2));
681                 }
682         }
683         MB_CHECK_ASSERT(!EXT4_MB_GRP_NEED_INIT(e4b->bd_info));
684         MB_CHECK_ASSERT(e4b->bd_info->bb_fragments == fragments);
685 
686         grp = ext4_get_group_info(sb, e4b->bd_group);
687         list_for_each(cur, &grp->bb_prealloc_list) {
688                 ext4_group_t groupnr;
689                 struct ext4_prealloc_space *pa;
690                 pa = list_entry(cur, struct ext4_prealloc_space, pa_group_list);
691                 ext4_get_group_no_and_offset(sb, pa->pa_pstart, &groupnr, &k);
692                 MB_CHECK_ASSERT(groupnr == e4b->bd_group);
693                 for (i = 0; i < pa->pa_len; i++)
694                         MB_CHECK_ASSERT(mb_test_bit(k + i, buddy));
695         }
696         return 0;
697 }
698 #undef MB_CHECK_ASSERT
699 #define mb_check_buddy(e4b) __mb_check_buddy(e4b,       \
700                                         __FILE__, __func__, __LINE__)
701 #else
702 #define mb_check_buddy(e4b)
703 #endif
704 
705 /*
706  * Divide blocks started from @first with length @len into
707  * smaller chunks with power of 2 blocks.
708  * Clear the bits in bitmap which the blocks of the chunk(s) covered,
709  * then increase bb_counters[] for corresponded chunk size.
710  */
711 static void ext4_mb_mark_free_simple(struct super_block *sb,
712                                 void *buddy, ext4_grpblk_t first, ext4_grpblk_t len,
713                                         struct ext4_group_info *grp)
714 {
715         struct ext4_sb_info *sbi = EXT4_SB(sb);
716         ext4_grpblk_t min;
717         ext4_grpblk_t max;
718         ext4_grpblk_t chunk;
719         unsigned int border;
720 
721         BUG_ON(len > EXT4_CLUSTERS_PER_GROUP(sb));
722 
723         border = 2 << sb->s_blocksize_bits;
724 
725         while (len > 0) {
726                 /* find how many blocks can be covered since this position */
727                 max = ffs(first | border) - 1;
728 
729                 /* find how many blocks of power 2 we need to mark */
730                 min = fls(len) - 1;
731 
732                 if (max < min)
733                         min = max;
734                 chunk = 1 << min;
735 
736                 /* mark multiblock chunks only */
737                 grp->bb_counters[min]++;
738                 if (min > 0)
739                         mb_clear_bit(first >> min,
740                                      buddy + sbi->s_mb_offsets[min]);
741 
742                 len -= chunk;
743                 first += chunk;
744         }
745 }
746 
747 /*
748  * Cache the order of the largest free extent we have available in this block
749  * group.
750  */
751 static void
752 mb_set_largest_free_order(struct super_block *sb, struct ext4_group_info *grp)
753 {
754         int i;
755         int bits;
756 
757         grp->bb_largest_free_order = -1; /* uninit */
758 
759         bits = sb->s_blocksize_bits + 1;
760         for (i = bits; i >= 0; i--) {
761                 if (grp->bb_counters[i] > 0) {
762                         grp->bb_largest_free_order = i;
763                         break;
764                 }
765         }
766 }
767 
768 static noinline_for_stack
769 void ext4_mb_generate_buddy(struct super_block *sb,
770                                 void *buddy, void *bitmap, ext4_group_t group)
771 {
772         struct ext4_group_info *grp = ext4_get_group_info(sb, group);
773         struct ext4_sb_info *sbi = EXT4_SB(sb);
774         ext4_grpblk_t max = EXT4_CLUSTERS_PER_GROUP(sb);
775         ext4_grpblk_t i = 0;
776         ext4_grpblk_t first;
777         ext4_grpblk_t len;
778         unsigned free = 0;
779         unsigned fragments = 0;
780         unsigned long long period = get_cycles();
781 
782         /* initialize buddy from bitmap which is aggregation
783          * of on-disk bitmap and preallocations */
784         i = mb_find_next_zero_bit(bitmap, max, 0);
785         grp->bb_first_free = i;
786         while (i < max) {
787                 fragments++;
788                 first = i;
789                 i = mb_find_next_bit(bitmap, max, i);
790                 len = i - first;
791                 free += len;
792                 if (len > 1)
793                         ext4_mb_mark_free_simple(sb, buddy, first, len, grp);
794                 else
795                         grp->bb_counters[0]++;
796                 if (i < max)
797                         i = mb_find_next_zero_bit(bitmap, max, i);
798         }
799         grp->bb_fragments = fragments;
800 
801         if (free != grp->bb_free) {
802                 ext4_grp_locked_error(sb, group, 0, 0,
803                                       "block bitmap and bg descriptor "
804                                       "inconsistent: %u vs %u free clusters",
805                                       free, grp->bb_free);
806                 /*
807                  * If we intend to continue, we consider group descriptor
808                  * corrupt and update bb_free using bitmap value
809                  */
810                 grp->bb_free = free;
811                 ext4_mark_group_bitmap_corrupted(sb, group,
812                                         EXT4_GROUP_INFO_BBITMAP_CORRUPT);
813         }
814         mb_set_largest_free_order(sb, grp);
815 
816         clear_bit(EXT4_GROUP_INFO_NEED_INIT_BIT, &(grp->bb_state));
817 
818         period = get_cycles() - period;
819         spin_lock(&sbi->s_bal_lock);
820         sbi->s_mb_buddies_generated++;
821         sbi->s_mb_generation_time += period;
822         spin_unlock(&sbi->s_bal_lock);
823 }
824 
825 static void mb_regenerate_buddy(struct ext4_buddy *e4b)
826 {
827         int count;
828         int order = 1;
829         void *buddy;
830 
831         while ((buddy = mb_find_buddy(e4b, order++, &count))) {
832                 ext4_set_bits(buddy, 0, count);
833         }
834         e4b->bd_info->bb_fragments = 0;
835         memset(e4b->bd_info->bb_counters, 0,
836                 sizeof(*e4b->bd_info->bb_counters) *
837                 (e4b->bd_sb->s_blocksize_bits + 2));
838 
839         ext4_mb_generate_buddy(e4b->bd_sb, e4b->bd_buddy,
840                 e4b->bd_bitmap, e4b->bd_group);
841 }
842 
843 /* The buddy information is attached the buddy cache inode
844  * for convenience. The information regarding each group
845  * is loaded via ext4_mb_load_buddy. The information involve
846  * block bitmap and buddy information. The information are
847  * stored in the inode as
848  *
849  * {                        page                        }
850  * [ group 0 bitmap][ group 0 buddy] [group 1][ group 1]...
851  *
852  *
853  * one block each for bitmap and buddy information.
854  * So for each group we take up 2 blocks. A page can
855  * contain blocks_per_page (PAGE_SIZE / blocksize)  blocks.
856  * So it can have information regarding groups_per_page which
857  * is blocks_per_page/2
858  *
859  * Locking note:  This routine takes the block group lock of all groups
860  * for this page; do not hold this lock when calling this routine!
861  */
862 
863 static int ext4_mb_init_cache(struct page *page, char *incore, gfp_t gfp)
864 {
865         ext4_group_t ngroups;
866         int blocksize;
867         int blocks_per_page;
868         int groups_per_page;
869         int err = 0;
870         int i;
871         ext4_group_t first_group, group;
872         int first_block;
873         struct super_block *sb;
874         struct buffer_head *bhs;
875         struct buffer_head **bh = NULL;
876         struct inode *inode;
877         char *data;
878         char *bitmap;
879         struct ext4_group_info *grinfo;
880 
881         inode = page->mapping->host;
882         sb = inode->i_sb;
883         ngroups = ext4_get_groups_count(sb);
884         blocksize = i_blocksize(inode);
885         blocks_per_page = PAGE_SIZE / blocksize;
886 
887         mb_debug(sb, "init page %lu\n", page->index);
888 
889         groups_per_page = blocks_per_page >> 1;
890         if (groups_per_page == 0)
891                 groups_per_page = 1;
892 
893         /* allocate buffer_heads to read bitmaps */
894         if (groups_per_page > 1) {
895                 i = sizeof(struct buffer_head *) * groups_per_page;
896                 bh = kzalloc(i, gfp);
897                 if (bh == NULL) {
898                         err = -ENOMEM;
899                         goto out;
900                 }
901         } else
902                 bh = &bhs;
903 
904         first_group = page->index * blocks_per_page / 2;
905 
906         /* read all groups the page covers into the cache */
907         for (i = 0, group = first_group; i < groups_per_page; i++, group++) {
908                 if (group >= ngroups)
909                         break;
910 
911                 grinfo = ext4_get_group_info(sb, group);
912                 /*
913                  * If page is uptodate then we came here after online resize
914                  * which added some new uninitialized group info structs, so
915                  * we must skip all initialized uptodate buddies on the page,
916                  * which may be currently in use by an allocating task.
917                  */
918                 if (PageUptodate(page) && !EXT4_MB_GRP_NEED_INIT(grinfo)) {
919                         bh[i] = NULL;
920                         continue;
921                 }
922                 bh[i] = ext4_read_block_bitmap_nowait(sb, group, false);
923                 if (IS_ERR(bh[i])) {
924                         err = PTR_ERR(bh[i]);
925                         bh[i] = NULL;
926                         goto out;
927                 }
928                 mb_debug(sb, "read bitmap for group %u\n", group);
929         }
930 
931         /* wait for I/O completion */
932         for (i = 0, group = first_group; i < groups_per_page; i++, group++) {
933                 int err2;
934 
935                 if (!bh[i])
936                         continue;
937                 err2 = ext4_wait_block_bitmap(sb, group, bh[i]);
938                 if (!err)
939                         err = err2;
940         }
941 
942         first_block = page->index * blocks_per_page;
943         for (i = 0; i < blocks_per_page; i++) {
944                 group = (first_block + i) >> 1;
945                 if (group >= ngroups)
946                         break;
947 
948                 if (!bh[group - first_group])
949                         /* skip initialized uptodate buddy */
950                         continue;
951 
952                 if (!buffer_verified(bh[group - first_group]))
953                         /* Skip faulty bitmaps */
954                         continue;
955                 err = 0;
956 
957                 /*
958                  * data carry information regarding this
959                  * particular group in the format specified
960                  * above
961                  *
962                  */
963                 data = page_address(page) + (i * blocksize);
964                 bitmap = bh[group - first_group]->b_data;
965 
966                 /*
967                  * We place the buddy block and bitmap block
968                  * close together
969                  */
970                 if ((first_block + i) & 1) {
971                         /* this is block of buddy */
972                         BUG_ON(incore == NULL);
973                         mb_debug(sb, "put buddy for group %u in page %lu/%x\n",
974                                 group, page->index, i * blocksize);
975                         trace_ext4_mb_buddy_bitmap_load(sb, group);
976                         grinfo = ext4_get_group_info(sb, group);
977                         grinfo->bb_fragments = 0;
978                         memset(grinfo->bb_counters, 0,
979                                sizeof(*grinfo->bb_counters) *
980                                 (sb->s_blocksize_bits+2));
981                         /*
982                          * incore got set to the group block bitmap below
983                          */
984                         ext4_lock_group(sb, group);
985                         /* init the buddy */
986                         memset(data, 0xff, blocksize);
987                         ext4_mb_generate_buddy(sb, data, incore, group);
988                         ext4_unlock_group(sb, group);
989                         incore = NULL;
990                 } else {
991                         /* this is block of bitmap */
992                         BUG_ON(incore != NULL);
993                         mb_debug(sb, "put bitmap for group %u in page %lu/%x\n",
994                                 group, page->index, i * blocksize);
995                         trace_ext4_mb_bitmap_load(sb, group);
996 
997                         /* see comments in ext4_mb_put_pa() */
998                         ext4_lock_group(sb, group);
999                         memcpy(data, bitmap, blocksize);
1000 
1001                         /* mark all preallocated blks used in in-core bitmap */
1002                         ext4_mb_generate_from_pa(sb, data, group);
1003                         ext4_mb_generate_from_freelist(sb, data, group);
1004                         ext4_unlock_group(sb, group);
1005 
1006                         /* set incore so that the buddy information can be
1007                          * generated using this
1008                          */
1009                         incore = data;
1010                 }
1011         }
1012         SetPageUptodate(page);
1013 
1014 out:
1015         if (bh) {
1016                 for (i = 0; i < groups_per_page; i++)
1017                         brelse(bh[i]);
1018                 if (bh != &bhs)
1019                         kfree(bh);
1020         }
1021         return err;
1022 }
1023 
1024 /*
1025  * Lock the buddy and bitmap pages. This make sure other parallel init_group
1026  * on the same buddy page doesn't happen whild holding the buddy page lock.
1027  * Return locked buddy and bitmap pages on e4b struct. If buddy and bitmap
1028  * are on the same page e4b->bd_buddy_page is NULL and return value is 0.
1029  */
1030 static int ext4_mb_get_buddy_page_lock(struct super_block *sb,
1031                 ext4_group_t group, struct ext4_buddy *e4b, gfp_t gfp)
1032 {
1033         struct inode *inode = EXT4_SB(sb)->s_buddy_cache;
1034         int block, pnum, poff;
1035         int blocks_per_page;
1036         struct page *page;
1037 
1038         e4b->bd_buddy_page = NULL;
1039         e4b->bd_bitmap_page = NULL;
1040 
1041         blocks_per_page = PAGE_SIZE / sb->s_blocksize;
1042         /*
1043          * the buddy cache inode stores the block bitmap
1044          * and buddy information in consecutive blocks.
1045          * So for each group we need two blocks.
1046          */
1047         block = group * 2;
1048         pnum = block / blocks_per_page;
1049         poff = block % blocks_per_page;
1050         page = find_or_create_page(inode->i_mapping, pnum, gfp);
1051         if (!page)
1052                 return -ENOMEM;
1053         BUG_ON(page->mapping != inode->i_mapping);
1054         e4b->bd_bitmap_page = page;
1055         e4b->bd_bitmap = page_address(page) + (poff * sb->s_blocksize);
1056 
1057         if (blocks_per_page >= 2) {
1058                 /* buddy and bitmap are on the same page */
1059                 return 0;
1060         }
1061 
1062         block++;
1063         pnum = block / blocks_per_page;
1064         page = find_or_create_page(inode->i_mapping, pnum, gfp);
1065         if (!page)
1066                 return -ENOMEM;
1067         BUG_ON(page->mapping != inode->i_mapping);
1068         e4b->bd_buddy_page = page;
1069         return 0;
1070 }
1071 
1072 static void ext4_mb_put_buddy_page_lock(struct ext4_buddy *e4b)
1073 {
1074         if (e4b->bd_bitmap_page) {
1075                 unlock_page(e4b->bd_bitmap_page);
1076                 put_page(e4b->bd_bitmap_page);
1077         }
1078         if (e4b->bd_buddy_page) {
1079                 unlock_page(e4b->bd_buddy_page);
1080                 put_page(e4b->bd_buddy_page);
1081         }
1082 }
1083 
1084 /*
1085  * Locking note:  This routine calls ext4_mb_init_cache(), which takes the
1086  * block group lock of all groups for this page; do not hold the BG lock when
1087  * calling this routine!
1088  */
1089 static noinline_for_stack
1090 int ext4_mb_init_group(struct super_block *sb, ext4_group_t group, gfp_t gfp)
1091 {
1092 
1093         struct ext4_group_info *this_grp;
1094         struct ext4_buddy e4b;
1095         struct page *page;
1096         int ret = 0;
1097 
1098         might_sleep();
1099         mb_debug(sb, "init group %u\n", group);
1100         this_grp = ext4_get_group_info(sb, group);
1101         /*
1102          * This ensures that we don't reinit the buddy cache
1103          * page which map to the group from which we are already
1104          * allocating. If we are looking at the buddy cache we would
1105          * have taken a reference using ext4_mb_load_buddy and that
1106          * would have pinned buddy page to page cache.
1107          * The call to ext4_mb_get_buddy_page_lock will mark the
1108          * page accessed.
1109          */
1110         ret = ext4_mb_get_buddy_page_lock(sb, group, &e4b, gfp);
1111         if (ret || !EXT4_MB_GRP_NEED_INIT(this_grp)) {
1112                 /*
1113                  * somebody initialized the group
1114                  * return without doing anything
1115                  */
1116                 goto err;
1117         }
1118 
1119         page = e4b.bd_bitmap_page;
1120         ret = ext4_mb_init_cache(page, NULL, gfp);
1121         if (ret)
1122                 goto err;
1123         if (!PageUptodate(page)) {
1124                 ret = -EIO;
1125                 goto err;
1126         }
1127 
1128         if (e4b.bd_buddy_page == NULL) {
1129                 /*
1130                  * If both the bitmap and buddy are in
1131                  * the same page we don't need to force
1132                  * init the buddy
1133                  */
1134                 ret = 0;
1135                 goto err;
1136         }
1137         /* init buddy cache */
1138         page = e4b.bd_buddy_page;
1139         ret = ext4_mb_init_cache(page, e4b.bd_bitmap, gfp);
1140         if (ret)
1141                 goto err;
1142         if (!PageUptodate(page)) {
1143                 ret = -EIO;
1144                 goto err;
1145         }
1146 err:
1147         ext4_mb_put_buddy_page_lock(&e4b);
1148         return ret;
1149 }
1150 
1151 /*
1152  * Locking note:  This routine calls ext4_mb_init_cache(), which takes the
1153  * block group lock of all groups for this page; do not hold the BG lock when
1154  * calling this routine!
1155  */
1156 static noinline_for_stack int
1157 ext4_mb_load_buddy_gfp(struct super_block *sb, ext4_group_t group,
1158                        struct ext4_buddy *e4b, gfp_t gfp)
1159 {
1160         int blocks_per_page;
1161         int block;
1162         int pnum;
1163         int poff;
1164         struct page *page;
1165         int ret;
1166         struct ext4_group_info *grp;
1167         struct ext4_sb_info *sbi = EXT4_SB(sb);
1168         struct inode *inode = sbi->s_buddy_cache;
1169 
1170         might_sleep();
1171         mb_debug(sb, "load group %u\n", group);
1172 
1173         blocks_per_page = PAGE_SIZE / sb->s_blocksize;
1174         grp = ext4_get_group_info(sb, group);
1175 
1176         e4b->bd_blkbits = sb->s_blocksize_bits;
1177         e4b->bd_info = grp;
1178         e4b->bd_sb = sb;
1179         e4b->bd_group = group;
1180         e4b->bd_buddy_page = NULL;
1181         e4b->bd_bitmap_page = NULL;
1182 
1183         if (unlikely(EXT4_MB_GRP_NEED_INIT(grp))) {
1184                 /*
1185                  * we need full data about the group
1186                  * to make a good selection
1187                  */
1188                 ret = ext4_mb_init_group(sb, group, gfp);
1189                 if (ret)
1190                         return ret;
1191         }
1192 
1193         /*
1194          * the buddy cache inode stores the block bitmap
1195          * and buddy information in consecutive blocks.
1196          * So for each group we need two blocks.
1197          */
1198         block = group * 2;
1199         pnum = block / blocks_per_page;
1200         poff = block % blocks_per_page;
1201 
1202         /* we could use find_or_create_page(), but it locks page
1203          * what we'd like to avoid in fast path ... */
1204         page = find_get_page_flags(inode->i_mapping, pnum, FGP_ACCESSED);
1205         if (page == NULL || !PageUptodate(page)) {
1206                 if (page)
1207                         /*
1208                          * drop the page reference and try
1209                          * to get the page with lock. If we
1210                          * are not uptodate that implies
1211                          * somebody just created the page but
1212                          * is yet to initialize the same. So
1213                          * wait for it to initialize.
1214                          */
1215                         put_page(page);
1216                 page = find_or_create_page(inode->i_mapping, pnum, gfp);
1217                 if (page) {
1218                         BUG_ON(page->mapping != inode->i_mapping);
1219                         if (!PageUptodate(page)) {
1220                                 ret = ext4_mb_init_cache(page, NULL, gfp);
1221                                 if (ret) {
1222                                         unlock_page(page);
1223                                         goto err;
1224                                 }
1225                                 mb_cmp_bitmaps(e4b, page_address(page) +
1226                                                (poff * sb->s_blocksize));
1227                         }
1228                         unlock_page(page);
1229                 }
1230         }
1231         if (page == NULL) {
1232                 ret = -ENOMEM;
1233                 goto err;
1234         }
1235         if (!PageUptodate(page)) {
1236                 ret = -EIO;
1237                 goto err;
1238         }
1239 
1240         /* Pages marked accessed already */
1241         e4b->bd_bitmap_page = page;
1242         e4b->bd_bitmap = page_address(page) + (poff * sb->s_blocksize);
1243 
1244         block++;
1245         pnum = block / blocks_per_page;
1246         poff = block % blocks_per_page;
1247 
1248         page = find_get_page_flags(inode->i_mapping, pnum, FGP_ACCESSED);
1249         if (page == NULL || !PageUptodate(page)) {
1250                 if (page)
1251                         put_page(page);
1252                 page = find_or_create_page(inode->i_mapping, pnum, gfp);
1253                 if (page) {
1254                         BUG_ON(page->mapping != inode->i_mapping);
1255                         if (!PageUptodate(page)) {
1256                                 ret = ext4_mb_init_cache(page, e4b->bd_bitmap,
1257                                                          gfp);
1258                                 if (ret) {
1259                                         unlock_page(page);
1260                                         goto err;
1261                                 }
1262                         }
1263                         unlock_page(page);
1264                 }
1265         }
1266         if (page == NULL) {
1267                 ret = -ENOMEM;
1268                 goto err;
1269         }
1270         if (!PageUptodate(page)) {
1271                 ret = -EIO;
1272                 goto err;
1273         }
1274 
1275         /* Pages marked accessed already */
1276         e4b->bd_buddy_page = page;
1277         e4b->bd_buddy = page_address(page) + (poff * sb->s_blocksize);
1278 
1279         return 0;
1280 
1281 err:
1282         if (page)
1283                 put_page(page);
1284         if (e4b->bd_bitmap_page)
1285                 put_page(e4b->bd_bitmap_page);
1286         if (e4b->bd_buddy_page)
1287                 put_page(e4b->bd_buddy_page);
1288         e4b->bd_buddy = NULL;
1289         e4b->bd_bitmap = NULL;
1290         return ret;
1291 }
1292 
1293 static int ext4_mb_load_buddy(struct super_block *sb, ext4_group_t group,
1294                               struct ext4_buddy *e4b)
1295 {
1296         return ext4_mb_load_buddy_gfp(sb, group, e4b, GFP_NOFS);
1297 }
1298 
1299 static void ext4_mb_unload_buddy(struct ext4_buddy *e4b)
1300 {
1301         if (e4b->bd_bitmap_page)
1302                 put_page(e4b->bd_bitmap_page);
1303         if (e4b->bd_buddy_page)
1304                 put_page(e4b->bd_buddy_page);
1305 }
1306 
1307 
1308 static int mb_find_order_for_block(struct ext4_buddy *e4b, int block)
1309 {
1310         int order = 1;
1311         int bb_incr = 1 << (e4b->bd_blkbits - 1);
1312         void *bb;
1313 
1314         BUG_ON(e4b->bd_bitmap == e4b->bd_buddy);
1315         BUG_ON(block >= (1 << (e4b->bd_blkbits + 3)));
1316 
1317         bb = e4b->bd_buddy;
1318         while (order <= e4b->bd_blkbits + 1) {
1319                 block = block >> 1;
1320                 if (!mb_test_bit(block, bb)) {
1321                         /* this block is part of buddy of order 'order' */
1322                         return order;
1323                 }
1324                 bb += bb_incr;
1325                 bb_incr >>= 1;
1326                 order++;
1327         }
1328         return 0;
1329 }
1330 
1331 static void mb_clear_bits(void *bm, int cur, int len)
1332 {
1333         __u32 *addr;
1334 
1335         len = cur + len;
1336         while (cur < len) {
1337                 if ((cur & 31) == 0 && (len - cur) >= 32) {
1338                         /* fast path: clear whole word at once */
1339                         addr = bm + (cur >> 3);
1340                         *addr = 0;
1341                         cur += 32;
1342                         continue;
1343                 }
1344                 mb_clear_bit(cur, bm);
1345                 cur++;
1346         }
1347 }
1348 
1349 /* clear bits in given range
1350  * will return first found zero bit if any, -1 otherwise
1351  */
1352 static int mb_test_and_clear_bits(void *bm, int cur, int len)
1353 {
1354         __u32 *addr;
1355         int zero_bit = -1;
1356 
1357         len = cur + len;
1358         while (cur < len) {
1359                 if ((cur & 31) == 0 && (len - cur) >= 32) {
1360                         /* fast path: clear whole word at once */
1361                         addr = bm + (cur >> 3);
1362                         if (*addr != (__u32)(-1) && zero_bit == -1)
1363                                 zero_bit = cur + mb_find_next_zero_bit(addr, 32, 0);
1364                         *addr = 0;
1365                         cur += 32;
1366                         continue;
1367                 }
1368                 if (!mb_test_and_clear_bit(cur, bm) && zero_bit == -1)
1369                         zero_bit = cur;
1370                 cur++;
1371         }
1372 
1373         return zero_bit;
1374 }
1375 
1376 void ext4_set_bits(void *bm, int cur, int len)
1377 {
1378         __u32 *addr;
1379 
1380         len = cur + len;
1381         while (cur < len) {
1382                 if ((cur & 31) == 0 && (len - cur) >= 32) {
1383                         /* fast path: set whole word at once */
1384                         addr = bm + (cur >> 3);
1385                         *addr = 0xffffffff;
1386                         cur += 32;
1387                         continue;
1388                 }
1389                 mb_set_bit(cur, bm);
1390                 cur++;
1391         }
1392 }
1393 
1394 static inline int mb_buddy_adjust_border(int* bit, void* bitmap, int side)
1395 {
1396         if (mb_test_bit(*bit + side, bitmap)) {
1397                 mb_clear_bit(*bit, bitmap);
1398                 (*bit) -= side;
1399                 return 1;
1400         }
1401         else {
1402                 (*bit) += side;
1403                 mb_set_bit(*bit, bitmap);
1404                 return -1;
1405         }
1406 }
1407 
1408 static void mb_buddy_mark_free(struct ext4_buddy *e4b, int first, int last)
1409 {
1410         int max;
1411         int order = 1;
1412         void *buddy = mb_find_buddy(e4b, order, &max);
1413 
1414         while (buddy) {
1415                 void *buddy2;
1416 
1417                 /* Bits in range [first; last] are known to be set since
1418                  * corresponding blocks were allocated. Bits in range
1419                  * (first; last) will stay set because they form buddies on
1420                  * upper layer. We just deal with borders if they don't
1421                  * align with upper layer and then go up.
1422                  * Releasing entire group is all about clearing
1423                  * single bit of highest order buddy.
1424                  */
1425 
1426                 /* Example:
1427                  * ---------------------------------
1428                  * |   1   |   1   |   1   |   1   |
1429                  * ---------------------------------
1430                  * | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 1 |
1431                  * ---------------------------------
1432                  *   0   1   2   3   4   5   6   7
1433                  *      \_____________________/
1434                  *
1435                  * Neither [1] nor [6] is aligned to above layer.
1436                  * Left neighbour [0] is free, so mark it busy,
1437                  * decrease bb_counters and extend range to
1438                  * [0; 6]
1439                  * Right neighbour [7] is busy. It can't be coaleasced with [6], so
1440                  * mark [6] free, increase bb_counters and shrink range to
1441                  * [0; 5].
1442                  * Then shift range to [0; 2], go up and do the same.
1443                  */
1444 
1445 
1446                 if (first & 1)
1447                         e4b->bd_info->bb_counters[order] += mb_buddy_adjust_border(&first, buddy, -1);
1448                 if (!(last & 1))
1449                         e4b->bd_info->bb_counters[order] += mb_buddy_adjust_border(&last, buddy, 1);
1450                 if (first > last)
1451                         break;
1452                 order++;
1453 
1454                 if (first == last || !(buddy2 = mb_find_buddy(e4b, order, &max))) {
1455                         mb_clear_bits(buddy, first, last - first + 1);
1456                         e4b->bd_info->bb_counters[order - 1] += last - first + 1;
1457                         break;
1458                 }
1459                 first >>= 1;
1460                 last >>= 1;
1461                 buddy = buddy2;
1462         }
1463 }
1464 
1465 static void mb_free_blocks(struct inode *inode, struct ext4_buddy *e4b,
1466                            int first, int count)
1467 {
1468         int left_is_free = 0;
1469         int right_is_free = 0;
1470         int block;
1471         int last = first + count - 1;
1472         struct super_block *sb = e4b->bd_sb;
1473 
1474         if (WARN_ON(count == 0))
1475                 return;
1476         BUG_ON(last >= (sb->s_blocksize << 3));
1477         assert_spin_locked(ext4_group_lock_ptr(sb, e4b->bd_group));
1478         /* Don't bother if the block group is corrupt. */
1479         if (unlikely(EXT4_MB_GRP_BBITMAP_CORRUPT(e4b->bd_info)))
1480                 return;
1481 
1482         mb_check_buddy(e4b);
1483         mb_free_blocks_double(inode, e4b, first, count);
1484 
1485         this_cpu_inc(discard_pa_seq);
1486         e4b->bd_info->bb_free += count;
1487         if (first < e4b->bd_info->bb_first_free)
1488                 e4b->bd_info->bb_first_free = first;
1489 
1490         /* access memory sequentially: check left neighbour,
1491          * clear range and then check right neighbour
1492          */
1493         if (first != 0)
1494                 left_is_free = !mb_test_bit(first - 1, e4b->bd_bitmap);
1495         block = mb_test_and_clear_bits(e4b->bd_bitmap, first, count);
1496         if (last + 1 < EXT4_SB(sb)->s_mb_maxs[0])
1497                 right_is_free = !mb_test_bit(last + 1, e4b->bd_bitmap);
1498 
1499         if (unlikely(block != -1)) {
1500                 struct ext4_sb_info *sbi = EXT4_SB(sb);
1501                 ext4_fsblk_t blocknr;
1502 
1503                 blocknr = ext4_group_first_block_no(sb, e4b->bd_group);
1504                 blocknr += EXT4_C2B(sbi, block);
1505                 if (!(sbi->s_mount_state & EXT4_FC_REPLAY)) {
1506                         ext4_grp_locked_error(sb, e4b->bd_group,
1507                                               inode ? inode->i_ino : 0,
1508                                               blocknr,
1509                                               "freeing already freed block (bit %u); block bitmap corrupt.",
1510                                               block);
1511                         ext4_mark_group_bitmap_corrupted(
1512                                 sb, e4b->bd_group,
1513                                 EXT4_GROUP_INFO_BBITMAP_CORRUPT);
1514                 }
1515                 mb_regenerate_buddy(e4b);
1516                 goto done;
1517         }
1518 
1519         /* let's maintain fragments counter */
1520         if (left_is_free && right_is_free)
1521                 e4b->bd_info->bb_fragments--;
1522         else if (!left_is_free && !right_is_free)
1523                 e4b->bd_info->bb_fragments++;
1524 
1525         /* buddy[0] == bd_bitmap is a special case, so handle
1526          * it right away and let mb_buddy_mark_free stay free of
1527          * zero order checks.
1528          * Check if neighbours are to be coaleasced,
1529          * adjust bitmap bb_counters and borders appropriately.
1530          */
1531         if (first & 1) {
1532                 first += !left_is_free;
1533                 e4b->bd_info->bb_counters[0] += left_is_free ? -1 : 1;
1534         }
1535         if (!(last & 1)) {
1536                 last -= !right_is_free;
1537                 e4b->bd_info->bb_counters[0] += right_is_free ? -1 : 1;
1538         }
1539 
1540         if (first <= last)
1541                 mb_buddy_mark_free(e4b, first >> 1, last >> 1);
1542 
1543 done:
1544         mb_set_largest_free_order(sb, e4b->bd_info);
1545         mb_check_buddy(e4b);
1546 }
1547 
1548 static int mb_find_extent(struct ext4_buddy *e4b, int block,
1549                                 int needed, struct ext4_free_extent *ex)
1550 {
1551         int next = block;
1552         int max, order;
1553         void *buddy;
1554 
1555         assert_spin_locked(ext4_group_lock_ptr(e4b->bd_sb, e4b->bd_group));
1556         BUG_ON(ex == NULL);
1557 
1558         buddy = mb_find_buddy(e4b, 0, &max);
1559         BUG_ON(buddy == NULL);
1560         BUG_ON(block >= max);
1561         if (mb_test_bit(block, buddy)) {
1562                 ex->fe_len = 0;
1563                 ex->fe_start = 0;
1564                 ex->fe_group = 0;
1565                 return 0;
1566         }
1567 
1568         /* find actual order */
1569         order = mb_find_order_for_block(e4b, block);
1570         block = block >> order;
1571 
1572         ex->fe_len = 1 << order;
1573         ex->fe_start = block << order;
1574         ex->fe_group = e4b->bd_group;
1575 
1576         /* calc difference from given start */
1577         next = next - ex->fe_start;
1578         ex->fe_len -= next;
1579         ex->fe_start += next;
1580 
1581         while (needed > ex->fe_len &&
1582                mb_find_buddy(e4b, order, &max)) {
1583 
1584                 if (block + 1 >= max)
1585                         break;
1586 
1587                 next = (block + 1) * (1 << order);
1588                 if (mb_test_bit(next, e4b->bd_bitmap))
1589                         break;
1590 
1591                 order = mb_find_order_for_block(e4b, next);
1592 
1593                 block = next >> order;
1594                 ex->fe_len += 1 << order;
1595         }
1596 
1597         if (ex->fe_start + ex->fe_len > EXT4_CLUSTERS_PER_GROUP(e4b->bd_sb)) {
1598                 /* Should never happen! (but apparently sometimes does?!?) */
1599                 WARN_ON(1);
1600                 ext4_error(e4b->bd_sb, "corruption or bug in mb_find_extent "
1601                            "block=%d, order=%d needed=%d ex=%u/%d/%d@%u",
1602                            block, order, needed, ex->fe_group, ex->fe_start,
1603                            ex->fe_len, ex->fe_logical);
1604                 ex->fe_len = 0;
1605                 ex->fe_start = 0;
1606                 ex->fe_group = 0;
1607         }
1608         return ex->fe_len;
1609 }
1610 
1611 static int mb_mark_used(struct ext4_buddy *e4b, struct ext4_free_extent *ex)
1612 {
1613         int ord;
1614         int mlen = 0;
1615         int max = 0;
1616         int cur;
1617         int start = ex->fe_start;
1618         int len = ex->fe_len;
1619         unsigned ret = 0;
1620         int len0 = len;
1621         void *buddy;
1622 
1623         BUG_ON(start + len > (e4b->bd_sb->s_blocksize << 3));
1624         BUG_ON(e4b->bd_group != ex->fe_group);
1625         assert_spin_locked(ext4_group_lock_ptr(e4b->bd_sb, e4b->bd_group));
1626         mb_check_buddy(e4b);
1627         mb_mark_used_double(e4b, start, len);
1628 
1629         this_cpu_inc(discard_pa_seq);
1630         e4b->bd_info->bb_free -= len;
1631         if (e4b->bd_info->bb_first_free == start)
1632                 e4b->bd_info->bb_first_free += len;
1633 
1634         /* let's maintain fragments counter */
1635         if (start != 0)
1636                 mlen = !mb_test_bit(start - 1, e4b->bd_bitmap);
1637         if (start + len < EXT4_SB(e4b->bd_sb)->s_mb_maxs[0])
1638                 max = !mb_test_bit(start + len, e4b->bd_bitmap);
1639         if (mlen && max)
1640                 e4b->bd_info->bb_fragments++;
1641         else if (!mlen && !max)
1642                 e4b->bd_info->bb_fragments--;
1643 
1644         /* let's maintain buddy itself */
1645         while (len) {
1646                 ord = mb_find_order_for_block(e4b, start);
1647 
1648                 if (((start >> ord) << ord) == start && len >= (1 << ord)) {
1649                         /* the whole chunk may be allocated at once! */
1650                         mlen = 1 << ord;
1651                         buddy = mb_find_buddy(e4b, ord, &max);
1652                         BUG_ON((start >> ord) >= max);
1653                         mb_set_bit(start >> ord, buddy);
1654                         e4b->bd_info->bb_counters[ord]--;
1655                         start += mlen;
1656                         len -= mlen;
1657                         BUG_ON(len < 0);
1658                         continue;
1659                 }
1660 
1661                 /* store for history */
1662                 if (ret == 0)
1663                         ret = len | (ord << 16);
1664 
1665                 /* we have to split large buddy */
1666                 BUG_ON(ord <= 0);
1667                 buddy = mb_find_buddy(e4b, ord, &max);
1668                 mb_set_bit(start >> ord, buddy);
1669                 e4b->bd_info->bb_counters[ord]--;
1670 
1671                 ord--;
1672                 cur = (start >> ord) & ~1U;
1673                 buddy = mb_find_buddy(e4b, ord, &max);
1674                 mb_clear_bit(cur, buddy);
1675                 mb_clear_bit(cur + 1, buddy);
1676                 e4b->bd_info->bb_counters[ord]++;
1677                 e4b->bd_info->bb_counters[ord]++;
1678         }
1679         mb_set_largest_free_order(e4b->bd_sb, e4b->bd_info);
1680 
1681         ext4_set_bits(e4b->bd_bitmap, ex->fe_start, len0);
1682         mb_check_buddy(e4b);
1683 
1684         return ret;
1685 }
1686 
1687 /*
1688  * Must be called under group lock!
1689  */
1690 static void ext4_mb_use_best_found(struct ext4_allocation_context *ac,
1691                                         struct ext4_buddy *e4b)
1692 {
1693         struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
1694         int ret;
1695 
1696         BUG_ON(ac->ac_b_ex.fe_group != e4b->bd_group);
1697         BUG_ON(ac->ac_status == AC_STATUS_FOUND);
1698 
1699         ac->ac_b_ex.fe_len = min(ac->ac_b_ex.fe_len, ac->ac_g_ex.fe_len);
1700         ac->ac_b_ex.fe_logical = ac->ac_g_ex.fe_logical;
1701         ret = mb_mark_used(e4b, &ac->ac_b_ex);
1702 
1703         /* preallocation can change ac_b_ex, thus we store actually
1704          * allocated blocks for history */
1705         ac->ac_f_ex = ac->ac_b_ex;
1706 
1707         ac->ac_status = AC_STATUS_FOUND;
1708         ac->ac_tail = ret & 0xffff;
1709         ac->ac_buddy = ret >> 16;
1710 
1711         /*
1712          * take the page reference. We want the page to be pinned
1713          * so that we don't get a ext4_mb_init_cache_call for this
1714          * group until we update the bitmap. That would mean we
1715          * double allocate blocks. The reference is dropped
1716          * in ext4_mb_release_context
1717          */
1718         ac->ac_bitmap_page = e4b->bd_bitmap_page;
1719         get_page(ac->ac_bitmap_page);
1720         ac->ac_buddy_page = e4b->bd_buddy_page;
1721         get_page(ac->ac_buddy_page);
1722         /* store last allocated for subsequent stream allocation */
1723         if (ac->ac_flags & EXT4_MB_STREAM_ALLOC) {
1724                 spin_lock(&sbi->s_md_lock);
1725                 sbi->s_mb_last_group = ac->ac_f_ex.fe_group;
1726                 sbi->s_mb_last_start = ac->ac_f_ex.fe_start;
1727                 spin_unlock(&sbi->s_md_lock);
1728         }
1729         /*
1730          * As we've just preallocated more space than
1731          * user requested originally, we store allocated
1732          * space in a special descriptor.
1733          */
1734         if (ac->ac_o_ex.fe_len < ac->ac_b_ex.fe_len)
1735                 ext4_mb_new_preallocation(ac);
1736 
1737 }
1738 
1739 static void ext4_mb_check_limits(struct ext4_allocation_context *ac,
1740                                         struct ext4_buddy *e4b,
1741                                         int finish_group)
1742 {
1743         struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
1744         struct ext4_free_extent *bex = &ac->ac_b_ex;
1745         struct ext4_free_extent *gex = &ac->ac_g_ex;
1746         struct ext4_free_extent ex;
1747         int max;
1748 
1749         if (ac->ac_status == AC_STATUS_FOUND)
1750                 return;
1751         /*
1752          * We don't want to scan for a whole year
1753          */
1754         if (ac->ac_found > sbi->s_mb_max_to_scan &&
1755                         !(ac->ac_flags & EXT4_MB_HINT_FIRST)) {
1756                 ac->ac_status = AC_STATUS_BREAK;
1757                 return;
1758         }
1759 
1760         /*
1761          * Haven't found good chunk so far, let's continue
1762          */
1763         if (bex->fe_len < gex->fe_len)
1764                 return;
1765 
1766         if ((finish_group || ac->ac_found > sbi->s_mb_min_to_scan)
1767                         && bex->fe_group == e4b->bd_group) {
1768                 /* recheck chunk's availability - we don't know
1769                  * when it was found (within this lock-unlock
1770                  * period or not) */
1771                 max = mb_find_extent(e4b, bex->fe_start, gex->fe_len, &ex);
1772                 if (max >= gex->fe_len) {
1773                         ext4_mb_use_best_found(ac, e4b);
1774                         return;
1775                 }
1776         }
1777 }
1778 
1779 /*
1780  * The routine checks whether found extent is good enough. If it is,
1781  * then the extent gets marked used and flag is set to the context
1782  * to stop scanning. Otherwise, the extent is compared with the
1783  * previous found extent and if new one is better, then it's stored
1784  * in the context. Later, the best found extent will be used, if
1785  * mballoc can't find good enough extent.
1786  *
1787  * FIXME: real allocation policy is to be designed yet!
1788  */
1789 static void ext4_mb_measure_extent(struct ext4_allocation_context *ac,
1790                                         struct ext4_free_extent *ex,
1791                                         struct ext4_buddy *e4b)
1792 {
1793         struct ext4_free_extent *bex = &ac->ac_b_ex;
1794         struct ext4_free_extent *gex = &ac->ac_g_ex;
1795 
1796         BUG_ON(ex->fe_len <= 0);
1797         BUG_ON(ex->fe_len > EXT4_CLUSTERS_PER_GROUP(ac->ac_sb));
1798         BUG_ON(ex->fe_start >= EXT4_CLUSTERS_PER_GROUP(ac->ac_sb));
1799         BUG_ON(ac->ac_status != AC_STATUS_CONTINUE);
1800 
1801         ac->ac_found++;
1802 
1803         /*
1804          * The special case - take what you catch first
1805          */
1806         if (unlikely(ac->ac_flags & EXT4_MB_HINT_FIRST)) {
1807                 *bex = *ex;
1808                 ext4_mb_use_best_found(ac, e4b);
1809                 return;
1810         }
1811 
1812         /*
1813          * Let's check whether the chuck is good enough
1814          */
1815         if (ex->fe_len == gex->fe_len) {
1816                 *bex = *ex;
1817                 ext4_mb_use_best_found(ac, e4b);
1818                 return;
1819         }
1820 
1821         /*
1822          * If this is first found extent, just store it in the context
1823          */
1824         if (bex->fe_len == 0) {
1825                 *bex = *ex;
1826                 return;
1827         }
1828 
1829         /*
1830          * If new found extent is better, store it in the context
1831          */
1832         if (bex->fe_len < gex->fe_len) {
1833                 /* if the request isn't satisfied, any found extent
1834                  * larger than previous best one is better */
1835                 if (ex->fe_len > bex->fe_len)
1836                         *bex = *ex;
1837         } else if (ex->fe_len > gex->fe_len) {
1838                 /* if the request is satisfied, then we try to find
1839                  * an extent that still satisfy the request, but is
1840                  * smaller than previous one */
1841                 if (ex->fe_len < bex->fe_len)
1842                         *bex = *ex;
1843         }
1844 
1845         ext4_mb_check_limits(ac, e4b, 0);
1846 }
1847 
1848 static noinline_for_stack
1849 int ext4_mb_try_best_found(struct ext4_allocation_context *ac,
1850                                         struct ext4_buddy *e4b)
1851 {
1852         struct ext4_free_extent ex = ac->ac_b_ex;
1853         ext4_group_t group = ex.fe_group;
1854         int max;
1855         int err;
1856 
1857         BUG_ON(ex.fe_len <= 0);
1858         err = ext4_mb_load_buddy(ac->ac_sb, group, e4b);
1859         if (err)
1860                 return err;
1861 
1862         ext4_lock_group(ac->ac_sb, group);
1863         max = mb_find_extent(e4b, ex.fe_start, ex.fe_len, &ex);
1864 
1865         if (max > 0) {
1866                 ac->ac_b_ex = ex;
1867                 ext4_mb_use_best_found(ac, e4b);
1868         }
1869 
1870         ext4_unlock_group(ac->ac_sb, group);
1871         ext4_mb_unload_buddy(e4b);
1872 
1873         return 0;
1874 }
1875 
1876 static noinline_for_stack
1877 int ext4_mb_find_by_goal(struct ext4_allocation_context *ac,
1878                                 struct ext4_buddy *e4b)
1879 {
1880         ext4_group_t group = ac->ac_g_ex.fe_group;
1881         int max;
1882         int err;
1883         struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
1884         struct ext4_group_info *grp = ext4_get_group_info(ac->ac_sb, group);
1885         struct ext4_free_extent ex;
1886 
1887         if (!(ac->ac_flags & EXT4_MB_HINT_TRY_GOAL))
1888                 return 0;
1889         if (grp->bb_free == 0)
1890                 return 0;
1891 
1892         err = ext4_mb_load_buddy(ac->ac_sb, group, e4b);
1893         if (err)
1894                 return err;
1895 
1896         if (unlikely(EXT4_MB_GRP_BBITMAP_CORRUPT(e4b->bd_info))) {
1897                 ext4_mb_unload_buddy(e4b);
1898                 return 0;
1899         }
1900 
1901         ext4_lock_group(ac->ac_sb, group);
1902         max = mb_find_extent(e4b, ac->ac_g_ex.fe_start,
1903                              ac->ac_g_ex.fe_len, &ex);
1904         ex.fe_logical = 0xDEADFA11; /* debug value */
1905 
1906         if (max >= ac->ac_g_ex.fe_len && ac->ac_g_ex.fe_len == sbi->s_stripe) {
1907                 ext4_fsblk_t start;
1908 
1909                 start = ext4_group_first_block_no(ac->ac_sb, e4b->bd_group) +
1910                         ex.fe_start;
1911                 /* use do_div to get remainder (would be 64-bit modulo) */
1912                 if (do_div(start, sbi->s_stripe) == 0) {
1913                         ac->ac_found++;
1914                         ac->ac_b_ex = ex;
1915                         ext4_mb_use_best_found(ac, e4b);
1916                 }
1917         } else if (max >= ac->ac_g_ex.fe_len) {
1918                 BUG_ON(ex.fe_len <= 0);
1919                 BUG_ON(ex.fe_group != ac->ac_g_ex.fe_group);
1920                 BUG_ON(ex.fe_start != ac->ac_g_ex.fe_start);
1921                 ac->ac_found++;
1922                 ac->ac_b_ex = ex;
1923                 ext4_mb_use_best_found(ac, e4b);
1924         } else if (max > 0 && (ac->ac_flags & EXT4_MB_HINT_MERGE)) {
1925                 /* Sometimes, caller may want to merge even small
1926                  * number of blocks to an existing extent */
1927                 BUG_ON(ex.fe_len <= 0);
1928                 BUG_ON(ex.fe_group != ac->ac_g_ex.fe_group);
1929                 BUG_ON(ex.fe_start != ac->ac_g_ex.fe_start);
1930                 ac->ac_found++;
1931                 ac->ac_b_ex = ex;
1932                 ext4_mb_use_best_found(ac, e4b);
1933         }
1934         ext4_unlock_group(ac->ac_sb, group);
1935         ext4_mb_unload_buddy(e4b);
1936 
1937         return 0;
1938 }
1939 
1940 /*
1941  * The routine scans buddy structures (not bitmap!) from given order
1942  * to max order and tries to find big enough chunk to satisfy the req
1943  */
1944 static noinline_for_stack
1945 void ext4_mb_simple_scan_group(struct ext4_allocation_context *ac,
1946                                         struct ext4_buddy *e4b)
1947 {
1948         struct super_block *sb = ac->ac_sb;
1949         struct ext4_group_info *grp = e4b->bd_info;
1950         void *buddy;
1951         int i;
1952         int k;
1953         int max;
1954 
1955         BUG_ON(ac->ac_2order <= 0);
1956         for (i = ac->ac_2order; i <= sb->s_blocksize_bits + 1; i++) {
1957                 if (grp->bb_counters[i] == 0)
1958                         continue;
1959 
1960                 buddy = mb_find_buddy(e4b, i, &max);
1961                 BUG_ON(buddy == NULL);
1962 
1963                 k = mb_find_next_zero_bit(buddy, max, 0);
1964                 if (k >= max) {
1965                         ext4_grp_locked_error(ac->ac_sb, e4b->bd_group, 0, 0,
1966                                 "%d free clusters of order %d. But found 0",
1967                                 grp->bb_counters[i], i);
1968                         ext4_mark_group_bitmap_corrupted(ac->ac_sb,
1969                                          e4b->bd_group,
1970                                         EXT4_GROUP_INFO_BBITMAP_CORRUPT);
1971                         break;
1972                 }
1973                 ac->ac_found++;
1974 
1975                 ac->ac_b_ex.fe_len = 1 << i;
1976                 ac->ac_b_ex.fe_start = k << i;
1977                 ac->ac_b_ex.fe_group = e4b->bd_group;
1978 
1979                 ext4_mb_use_best_found(ac, e4b);
1980 
1981                 BUG_ON(ac->ac_f_ex.fe_len != ac->ac_g_ex.fe_len);
1982 
1983                 if (EXT4_SB(sb)->s_mb_stats)
1984                         atomic_inc(&EXT4_SB(sb)->s_bal_2orders);
1985 
1986                 break;
1987         }
1988 }
1989 
1990 /*
1991  * The routine scans the group and measures all found extents.
1992  * In order to optimize scanning, caller must pass number of
1993  * free blocks in the group, so the routine can know upper limit.
1994  */
1995 static noinline_for_stack
1996 void ext4_mb_complex_scan_group(struct ext4_allocation_context *ac,
1997                                         struct ext4_buddy *e4b)
1998 {
1999         struct super_block *sb = ac->ac_sb;
2000         void *bitmap = e4b->bd_bitmap;
2001         struct ext4_free_extent ex;
2002         int i;
2003         int free;
2004 
2005         free = e4b->bd_info->bb_free;
2006         if (WARN_ON(free <= 0))
2007                 return;
2008 
2009         i = e4b->bd_info->bb_first_free;
2010 
2011         while (free && ac->ac_status == AC_STATUS_CONTINUE) {
2012                 i = mb_find_next_zero_bit(bitmap,
2013                                                 EXT4_CLUSTERS_PER_GROUP(sb), i);
2014                 if (i >= EXT4_CLUSTERS_PER_GROUP(sb)) {
2015                         /*
2016                          * IF we have corrupt bitmap, we won't find any
2017                          * free blocks even though group info says we
2018                          * have free blocks
2019                          */
2020                         ext4_grp_locked_error(sb, e4b->bd_group, 0, 0,
2021                                         "%d free clusters as per "
2022                                         "group info. But bitmap says 0",
2023                                         free);
2024                         ext4_mark_group_bitmap_corrupted(sb, e4b->bd_group,
2025                                         EXT4_GROUP_INFO_BBITMAP_CORRUPT);
2026                         break;
2027                 }
2028 
2029                 mb_find_extent(e4b, i, ac->ac_g_ex.fe_len, &ex);
2030                 if (WARN_ON(ex.fe_len <= 0))
2031                         break;
2032                 if (free < ex.fe_len) {
2033                         ext4_grp_locked_error(sb, e4b->bd_group, 0, 0,
2034                                         "%d free clusters as per "
2035                                         "group info. But got %d blocks",
2036                                         free, ex.fe_len);
2037                         ext4_mark_group_bitmap_corrupted(sb, e4b->bd_group,
2038                                         EXT4_GROUP_INFO_BBITMAP_CORRUPT);
2039                         /*
2040                          * The number of free blocks differs. This mostly
2041                          * indicate that the bitmap is corrupt. So exit
2042                          * without claiming the space.
2043                          */
2044                         break;
2045                 }
2046                 ex.fe_logical = 0xDEADC0DE; /* debug value */
2047                 ext4_mb_measure_extent(ac, &ex, e4b);
2048 
2049                 i += ex.fe_len;
2050                 free -= ex.fe_len;
2051         }
2052 
2053         ext4_mb_check_limits(ac, e4b, 1);
2054 }
2055 
2056 /*
2057  * This is a special case for storages like raid5
2058  * we try to find stripe-aligned chunks for stripe-size-multiple requests
2059  */
2060 static noinline_for_stack
2061 void ext4_mb_scan_aligned(struct ext4_allocation_context *ac,
2062                                  struct ext4_buddy *e4b)
2063 {
2064         struct super_block *sb = ac->ac_sb;
2065         struct ext4_sb_info *sbi = EXT4_SB(sb);
2066         void *bitmap = e4b->bd_bitmap;
2067         struct ext4_free_extent ex;
2068         ext4_fsblk_t first_group_block;
2069         ext4_fsblk_t a;
2070         ext4_grpblk_t i;
2071         int max;
2072 
2073         BUG_ON(sbi->s_stripe == 0);
2074 
2075         /* find first stripe-aligned block in group */
2076         first_group_block = ext4_group_first_block_no(sb, e4b->bd_group);
2077 
2078         a = first_group_block + sbi->s_stripe - 1;
2079         do_div(a, sbi->s_stripe);
2080         i = (a * sbi->s_stripe) - first_group_block;
2081 
2082         while (i < EXT4_CLUSTERS_PER_GROUP(sb)) {
2083                 if (!mb_test_bit(i, bitmap)) {
2084                         max = mb_find_extent(e4b, i, sbi->s_stripe, &ex);
2085                         if (max >= sbi->s_stripe) {
2086                                 ac->ac_found++;
2087                                 ex.fe_logical = 0xDEADF00D; /* debug value */
2088                                 ac->ac_b_ex = ex;
2089                                 ext4_mb_use_best_found(ac, e4b);
2090                                 break;
2091                         }
2092                 }
2093                 i += sbi->s_stripe;
2094         }
2095 }
2096 
2097 /*
2098  * This is also called BEFORE we load the buddy bitmap.
2099  * Returns either 1 or 0 indicating that the group is either suitable
2100  * for the allocation or not.
2101  */
2102 static bool ext4_mb_good_group(struct ext4_allocation_context *ac,
2103                                 ext4_group_t group, int cr)
2104 {
2105         ext4_grpblk_t free, fragments;
2106         int flex_size = ext4_flex_bg_size(EXT4_SB(ac->ac_sb));
2107         struct ext4_group_info *grp = ext4_get_group_info(ac->ac_sb, group);
2108 
2109         BUG_ON(cr < 0 || cr >= 4);
2110 
2111         if (unlikely(EXT4_MB_GRP_BBITMAP_CORRUPT(grp)))
2112                 return false;
2113 
2114         free = grp->bb_free;
2115         if (free == 0)
2116                 return false;
2117 
2118         fragments = grp->bb_fragments;
2119         if (fragments == 0)
2120                 return false;
2121 
2122         switch (cr) {
2123         case 0:
2124                 BUG_ON(ac->ac_2order == 0);
2125 
2126                 /* Avoid using the first bg of a flexgroup for data files */
2127                 if ((ac->ac_flags & EXT4_MB_HINT_DATA) &&
2128                     (flex_size >= EXT4_FLEX_SIZE_DIR_ALLOC_SCHEME) &&
2129                     ((group % flex_size) == 0))
2130                         return false;
2131 
2132                 if (free < ac->ac_g_ex.fe_len)
2133                         return false;
2134 
2135                 if (ac->ac_2order > ac->ac_sb->s_blocksize_bits+1)
2136                         return true;
2137 
2138                 if (grp->bb_largest_free_order < ac->ac_2order)
2139                         return false;
2140 
2141                 return true;
2142         case 1:
2143                 if ((free / fragments) >= ac->ac_g_ex.fe_len)
2144                         return true;
2145                 break;
2146         case 2:
2147                 if (free >= ac->ac_g_ex.fe_len)
2148                         return true;
2149                 break;
2150         case 3:
2151                 return true;
2152         default:
2153                 BUG();
2154         }
2155 
2156         return false;
2157 }
2158 
2159 /*
2160  * This could return negative error code if something goes wrong
2161  * during ext4_mb_init_group(). This should not be called with
2162  * ext4_lock_group() held.
2163  */
2164 static int ext4_mb_good_group_nolock(struct ext4_allocation_context *ac,
2165                                      ext4_group_t group, int cr)
2166 {
2167         struct ext4_group_info *grp = ext4_get_group_info(ac->ac_sb, group);
2168         struct super_block *sb = ac->ac_sb;
2169         struct ext4_sb_info *sbi = EXT4_SB(sb);
2170         bool should_lock = ac->ac_flags & EXT4_MB_STRICT_CHECK;
2171         ext4_grpblk_t free;
2172         int ret = 0;
2173 
2174         if (should_lock)
2175                 ext4_lock_group(sb, group);
2176         free = grp->bb_free;
2177         if (free == 0)
2178                 goto out;
2179         if (cr <= 2 && free < ac->ac_g_ex.fe_len)
2180                 goto out;
2181         if (unlikely(EXT4_MB_GRP_BBITMAP_CORRUPT(grp)))
2182                 goto out;
2183         if (should_lock)
2184                 ext4_unlock_group(sb, group);
2185 
2186         /* We only do this if the grp has never been initialized */
2187         if (unlikely(EXT4_MB_GRP_NEED_INIT(grp))) {
2188                 struct ext4_group_desc *gdp =
2189                         ext4_get_group_desc(sb, group, NULL);
2190                 int ret;
2191 
2192                 /* cr=0/1 is a very optimistic search to find large
2193                  * good chunks almost for free.  If buddy data is not
2194                  * ready, then this optimization makes no sense.  But
2195                  * we never skip the first block group in a flex_bg,
2196                  * since this gets used for metadata block allocation,
2197                  * and we want to make sure we locate metadata blocks
2198                  * in the first block group in the flex_bg if possible.
2199                  */
2200                 if (cr < 2 &&
2201                     (!sbi->s_log_groups_per_flex ||
2202                      ((group & ((1 << sbi->s_log_groups_per_flex) - 1)) != 0)) &&
2203                     !(ext4_has_group_desc_csum(sb) &&
2204                       (gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT))))
2205                         return 0;
2206                 ret = ext4_mb_init_group(sb, group, GFP_NOFS);
2207                 if (ret)
2208                         return ret;
2209         }
2210 
2211         if (should_lock)
2212                 ext4_lock_group(sb, group);
2213         ret = ext4_mb_good_group(ac, group, cr);
2214 out:
2215         if (should_lock)
2216                 ext4_unlock_group(sb, group);
2217         return ret;
2218 }
2219 
2220 /*
2221  * Start prefetching @nr block bitmaps starting at @group.
2222  * Return the next group which needs to be prefetched.
2223  */
2224 ext4_group_t ext4_mb_prefetch(struct super_block *sb, ext4_group_t group,
2225                               unsigned int nr, int *cnt)
2226 {
2227         ext4_group_t ngroups = ext4_get_groups_count(sb);
2228         struct buffer_head *bh;
2229         struct blk_plug plug;
2230 
2231         blk_start_plug(&plug);
2232         while (nr-- > 0) {
2233                 struct ext4_group_desc *gdp = ext4_get_group_desc(sb, group,
2234                                                                   NULL);
2235                 struct ext4_group_info *grp = ext4_get_group_info(sb, group);
2236 
2237                 /*
2238                  * Prefetch block groups with free blocks; but don't
2239                  * bother if it is marked uninitialized on disk, since
2240                  * it won't require I/O to read.  Also only try to
2241                  * prefetch once, so we avoid getblk() call, which can
2242                  * be expensive.
2243                  */
2244                 if (!EXT4_MB_GRP_TEST_AND_SET_READ(grp) &&
2245                     EXT4_MB_GRP_NEED_INIT(grp) &&
2246                     ext4_free_group_clusters(sb, gdp) > 0 &&
2247                     !(ext4_has_group_desc_csum(sb) &&
2248                       (gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)))) {
2249                         bh = ext4_read_block_bitmap_nowait(sb, group, true);
2250                         if (bh && !IS_ERR(bh)) {
2251                                 if (!buffer_uptodate(bh) && cnt)
2252                                         (*cnt)++;
2253                                 brelse(bh);
2254                         }
2255                 }
2256                 if (++group >= ngroups)
2257                         group = 0;
2258         }
2259         blk_finish_plug(&plug);
2260         return group;
2261 }
2262 
2263 /*
2264  * Prefetching reads the block bitmap into the buffer cache; but we
2265  * need to make sure that the buddy bitmap in the page cache has been
2266  * initialized.  Note that ext4_mb_init_group() will block if the I/O
2267  * is not yet completed, or indeed if it was not initiated by
2268  * ext4_mb_prefetch did not start the I/O.
2269  *
2270  * TODO: We should actually kick off the buddy bitmap setup in a work
2271  * queue when the buffer I/O is completed, so that we don't block
2272  * waiting for the block allocation bitmap read to finish when
2273  * ext4_mb_prefetch_fini is called from ext4_mb_regular_allocator().
2274  */
2275 void ext4_mb_prefetch_fini(struct super_block *sb, ext4_group_t group,
2276                            unsigned int nr)
2277 {
2278         while (nr-- > 0) {
2279                 struct ext4_group_desc *gdp = ext4_get_group_desc(sb, group,
2280                                                                   NULL);
2281                 struct ext4_group_info *grp = ext4_get_group_info(sb, group);
2282 
2283                 if (!group)
2284                         group = ext4_get_groups_count(sb);
2285                 group--;
2286                 grp = ext4_get_group_info(sb, group);
2287 
2288                 if (EXT4_MB_GRP_NEED_INIT(grp) &&
2289                     ext4_free_group_clusters(sb, gdp) > 0 &&
2290                     !(ext4_has_group_desc_csum(sb) &&
2291                       (gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)))) {
2292                         if (ext4_mb_init_group(sb, group, GFP_NOFS))
2293                                 break;
2294                 }
2295         }
2296 }
2297 
2298 static noinline_for_stack int
2299 ext4_mb_regular_allocator(struct ext4_allocation_context *ac)
2300 {
2301         ext4_group_t prefetch_grp = 0, ngroups, group, i;
2302         int cr = -1;
2303         int err = 0, first_err = 0;
2304         unsigned int nr = 0, prefetch_ios = 0;
2305         struct ext4_sb_info *sbi;
2306         struct super_block *sb;
2307         struct ext4_buddy e4b;
2308         int lost;
2309 
2310         sb = ac->ac_sb;
2311         sbi = EXT4_SB(sb);
2312         ngroups = ext4_get_groups_count(sb);
2313         /* non-extent files are limited to low blocks/groups */
2314         if (!(ext4_test_inode_flag(ac->ac_inode, EXT4_INODE_EXTENTS)))
2315                 ngroups = sbi->s_blockfile_groups;
2316 
2317         BUG_ON(ac->ac_status == AC_STATUS_FOUND);
2318 
2319         /* first, try the goal */
2320         err = ext4_mb_find_by_goal(ac, &e4b);
2321         if (err || ac->ac_status == AC_STATUS_FOUND)
2322                 goto out;
2323 
2324         if (unlikely(ac->ac_flags & EXT4_MB_HINT_GOAL_ONLY))
2325                 goto out;
2326 
2327         /*
2328          * ac->ac_2order is set only if the fe_len is a power of 2
2329          * if ac->ac_2order is set we also set criteria to 0 so that we
2330          * try exact allocation using buddy.
2331          */
2332         i = fls(ac->ac_g_ex.fe_len);
2333         ac->ac_2order = 0;
2334         /*
2335          * We search using buddy data only if the order of the request
2336          * is greater than equal to the sbi_s_mb_order2_reqs
2337          * You can tune it via /sys/fs/ext4/<partition>/mb_order2_req
2338          * We also support searching for power-of-two requests only for
2339          * requests upto maximum buddy size we have constructed.
2340          */
2341         if (i >= sbi->s_mb_order2_reqs && i <= sb->s_blocksize_bits + 2) {
2342                 /*
2343                  * This should tell if fe_len is exactly power of 2
2344                  */
2345                 if ((ac->ac_g_ex.fe_len & (~(1 << (i - 1)))) == 0)
2346                         ac->ac_2order = array_index_nospec(i - 1,
2347                                                            sb->s_blocksize_bits + 2);
2348         }
2349 
2350         /* if stream allocation is enabled, use global goal */
2351         if (ac->ac_flags & EXT4_MB_STREAM_ALLOC) {
2352                 /* TBD: may be hot point */
2353                 spin_lock(&sbi->s_md_lock);
2354                 ac->ac_g_ex.fe_group = sbi->s_mb_last_group;
2355                 ac->ac_g_ex.fe_start = sbi->s_mb_last_start;
2356                 spin_unlock(&sbi->s_md_lock);
2357         }
2358 
2359         /* Let's just scan groups to find more-less suitable blocks */
2360         cr = ac->ac_2order ? 0 : 1;
2361         /*
2362          * cr == 0 try to get exact allocation,
2363          * cr == 3  try to get anything
2364          */
2365 repeat:
2366         for (; cr < 4 && ac->ac_status == AC_STATUS_CONTINUE; cr++) {
2367                 ac->ac_criteria = cr;
2368                 /*
2369                  * searching for the right group start
2370                  * from the goal value specified
2371                  */
2372                 group = ac->ac_g_ex.fe_group;
2373                 prefetch_grp = group;
2374 
2375                 for (i = 0; i < ngroups; group++, i++) {
2376                         int ret = 0;
2377                         cond_resched();
2378                         /*
2379                          * Artificially restricted ngroups for non-extent
2380                          * files makes group > ngroups possible on first loop.
2381                          */
2382                         if (group >= ngroups)
2383                                 group = 0;
2384 
2385                         /*
2386                          * Batch reads of the block allocation bitmaps
2387                          * to get multiple READs in flight; limit
2388                          * prefetching at cr=0/1, otherwise mballoc can
2389                          * spend a lot of time loading imperfect groups
2390                          */
2391                         if ((prefetch_grp == group) &&
2392                             (cr > 1 ||
2393                              prefetch_ios < sbi->s_mb_prefetch_limit)) {
2394                                 unsigned int curr_ios = prefetch_ios;
2395 
2396                                 nr = sbi->s_mb_prefetch;
2397                                 if (ext4_has_feature_flex_bg(sb)) {
2398                                         nr = (group / sbi->s_mb_prefetch) *
2399                                                 sbi->s_mb_prefetch;
2400                                         nr = nr + sbi->s_mb_prefetch - group;
2401                                 }
2402                                 prefetch_grp = ext4_mb_prefetch(sb, group,
2403                                                         nr, &prefetch_ios);
2404                                 if (prefetch_ios == curr_ios)
2405                                         nr = 0;
2406                         }
2407 
2408                         /* This now checks without needing the buddy page */
2409                         ret = ext4_mb_good_group_nolock(ac, group, cr);
2410                         if (ret <= 0) {
2411                                 if (!first_err)
2412                                         first_err = ret;
2413                                 continue;
2414                         }
2415 
2416                         err = ext4_mb_load_buddy(sb, group, &e4b);
2417                         if (err)
2418                                 goto out;
2419 
2420                         ext4_lock_group(sb, group);
2421 
2422                         /*
2423                          * We need to check again after locking the
2424                          * block group
2425                          */
2426                         ret = ext4_mb_good_group(ac, group, cr);
2427                         if (ret == 0) {
2428                                 ext4_unlock_group(sb, group);
2429                                 ext4_mb_unload_buddy(&e4b);
2430                                 continue;
2431                         }
2432 
2433                         ac->ac_groups_scanned++;
2434                         if (cr == 0)
2435                                 ext4_mb_simple_scan_group(ac, &e4b);
2436                         else if (cr == 1 && sbi->s_stripe &&
2437                                         !(ac->ac_g_ex.fe_len % sbi->s_stripe))
2438                                 ext4_mb_scan_aligned(ac, &e4b);
2439                         else
2440                                 ext4_mb_complex_scan_group(ac, &e4b);
2441 
2442                         ext4_unlock_group(sb, group);
2443                         ext4_mb_unload_buddy(&e4b);
2444 
2445                         if (ac->ac_status != AC_STATUS_CONTINUE)
2446                                 break;
2447                 }
2448         }
2449 
2450         if (ac->ac_b_ex.fe_len > 0 && ac->ac_status != AC_STATUS_FOUND &&
2451             !(ac->ac_flags & EXT4_MB_HINT_FIRST)) {
2452                 /*
2453                  * We've been searching too long. Let's try to allocate
2454                  * the best chunk we've found so far
2455                  */
2456                 ext4_mb_try_best_found(ac, &e4b);
2457                 if (ac->ac_status != AC_STATUS_FOUND) {
2458                         /*
2459                          * Someone more lucky has already allocated it.
2460                          * The only thing we can do is just take first
2461                          * found block(s)
2462                          */
2463                         lost = atomic_inc_return(&sbi->s_mb_lost_chunks);
2464                         mb_debug(sb, "lost chunk, group: %u, start: %d, len: %d, lost: %d\n",
2465                                  ac->ac_b_ex.fe_group, ac->ac_b_ex.fe_start,
2466                                  ac->ac_b_ex.fe_len, lost);
2467 
2468                         ac->ac_b_ex.fe_group = 0;
2469                         ac->ac_b_ex.fe_start = 0;
2470                         ac->ac_b_ex.fe_len = 0;
2471                         ac->ac_status = AC_STATUS_CONTINUE;
2472                         ac->ac_flags |= EXT4_MB_HINT_FIRST;
2473                         cr = 3;
2474                         goto repeat;
2475                 }
2476         }
2477 out:
2478         if (!err && ac->ac_status != AC_STATUS_FOUND && first_err)
2479                 err = first_err;
2480 
2481         mb_debug(sb, "Best len %d, origin len %d, ac_status %u, ac_flags 0x%x, cr %d ret %d\n",
2482                  ac->ac_b_ex.fe_len, ac->ac_o_ex.fe_len, ac->ac_status,
2483                  ac->ac_flags, cr, err);
2484 
2485         if (nr)
2486                 ext4_mb_prefetch_fini(sb, prefetch_grp, nr);
2487 
2488         return err;
2489 }
2490 
2491 static void *ext4_mb_seq_groups_start(struct seq_file *seq, loff_t *pos)
2492 {
2493         struct super_block *sb = PDE_DATA(file_inode(seq->file));
2494         ext4_group_t group;
2495 
2496         if (*pos < 0 || *pos >= ext4_get_groups_count(sb))
2497                 return NULL;
2498         group = *pos + 1;
2499         return (void *) ((unsigned long) group);
2500 }
2501 
2502 static void *ext4_mb_seq_groups_next(struct seq_file *seq, void *v, loff_t *pos)
2503 {
2504         struct super_block *sb = PDE_DATA(file_inode(seq->file));
2505         ext4_group_t group;
2506 
2507         ++*pos;
2508         if (*pos < 0 || *pos >= ext4_get_groups_count(sb))
2509                 return NULL;
2510         group = *pos + 1;
2511         return (void *) ((unsigned long) group);
2512 }
2513 
2514 static int ext4_mb_seq_groups_show(struct seq_file *seq, void *v)
2515 {
2516         struct super_block *sb = PDE_DATA(file_inode(seq->file));
2517         ext4_group_t group = (ext4_group_t) ((unsigned long) v);
2518         int i;
2519         int err, buddy_loaded = 0;
2520         struct ext4_buddy e4b;
2521         struct ext4_group_info *grinfo;
2522         unsigned char blocksize_bits = min_t(unsigned char,
2523                                              sb->s_blocksize_bits,
2524                                              EXT4_MAX_BLOCK_LOG_SIZE);
2525         struct sg {
2526                 struct ext4_group_info info;
2527                 ext4_grpblk_t counters[EXT4_MAX_BLOCK_LOG_SIZE + 2];
2528         } sg;
2529 
2530         group--;
2531         if (group == 0)
2532                 seq_puts(seq, "#group: free  frags first ["
2533                               " 2^0   2^1   2^2   2^3   2^4   2^5   2^6  "
2534                               " 2^7   2^8   2^9   2^10  2^11  2^12  2^13  ]\n");
2535 
2536         i = (blocksize_bits + 2) * sizeof(sg.info.bb_counters[0]) +
2537                 sizeof(struct ext4_group_info);
2538 
2539         grinfo = ext4_get_group_info(sb, group);
2540         /* Load the group info in memory only if not already loaded. */
2541         if (unlikely(EXT4_MB_GRP_NEED_INIT(grinfo))) {
2542                 err = ext4_mb_load_buddy(sb, group, &e4b);
2543                 if (err) {
2544                         seq_printf(seq, "#%-5u: I/O error\n", group);
2545                         return 0;
2546                 }
2547                 buddy_loaded = 1;
2548         }
2549 
2550         memcpy(&sg, ext4_get_group_info(sb, group), i);
2551 
2552         if (buddy_loaded)
2553                 ext4_mb_unload_buddy(&e4b);
2554 
2555         seq_printf(seq, "#%-5u: %-5u %-5u %-5u [", group, sg.info.bb_free,
2556                         sg.info.bb_fragments, sg.info.bb_first_free);
2557         for (i = 0; i <= 13; i++)
2558                 seq_printf(seq, " %-5u", i <= blocksize_bits + 1 ?
2559                                 sg.info.bb_counters[i] : 0);
2560         seq_puts(seq, " ]\n");
2561 
2562         return 0;
2563 }
2564 
2565 static void ext4_mb_seq_groups_stop(struct seq_file *seq, void *v)
2566 {
2567 }
2568 
2569 const struct seq_operations ext4_mb_seq_groups_ops = {
2570         .start  = ext4_mb_seq_groups_start,
2571         .next   = ext4_mb_seq_groups_next,
2572         .stop   = ext4_mb_seq_groups_stop,
2573         .show   = ext4_mb_seq_groups_show,
2574 };
2575 
2576 static struct kmem_cache *get_groupinfo_cache(int blocksize_bits)
2577 {
2578         int cache_index = blocksize_bits - EXT4_MIN_BLOCK_LOG_SIZE;
2579         struct kmem_cache *cachep = ext4_groupinfo_caches[cache_index];
2580 
2581         BUG_ON(!cachep);
2582         return cachep;
2583 }
2584 
2585 /*
2586  * Allocate the top-level s_group_info array for the specified number
2587  * of groups
2588  */
2589 int ext4_mb_alloc_groupinfo(struct super_block *sb, ext4_group_t ngroups)
2590 {
2591         struct ext4_sb_info *sbi = EXT4_SB(sb);
2592         unsigned size;
2593         struct ext4_group_info ***old_groupinfo, ***new_groupinfo;
2594 
2595         size = (ngroups + EXT4_DESC_PER_BLOCK(sb) - 1) >>
2596                 EXT4_DESC_PER_BLOCK_BITS(sb);
2597         if (size <= sbi->s_group_info_size)
2598                 return 0;
2599 
2600         size = roundup_pow_of_two(sizeof(*sbi->s_group_info) * size);
2601         new_groupinfo = kvzalloc(size, GFP_KERNEL);
2602         if (!new_groupinfo) {
2603                 ext4_msg(sb, KERN_ERR, "can't allocate buddy meta group");
2604                 return -ENOMEM;
2605         }
2606         rcu_read_lock();
2607         old_groupinfo = rcu_dereference(sbi->s_group_info);
2608         if (old_groupinfo)
2609                 memcpy(new_groupinfo, old_groupinfo,
2610                        sbi->s_group_info_size * sizeof(*sbi->s_group_info));
2611         rcu_read_unlock();
2612         rcu_assign_pointer(sbi->s_group_info, new_groupinfo);
2613         sbi->s_group_info_size = size / sizeof(*sbi->s_group_info);
2614         if (old_groupinfo)
2615                 ext4_kvfree_array_rcu(old_groupinfo);
2616         ext4_debug("allocated s_groupinfo array for %d meta_bg's\n", 
2617                    sbi->s_group_info_size);
2618         return 0;
2619 }
2620 
2621 /* Create and initialize ext4_group_info data for the given group. */
2622 int ext4_mb_add_groupinfo(struct super_block *sb, ext4_group_t group,
2623                           struct ext4_group_desc *desc)
2624 {
2625         int i;
2626         int metalen = 0;
2627         int idx = group >> EXT4_DESC_PER_BLOCK_BITS(sb);
2628         struct ext4_sb_info *sbi = EXT4_SB(sb);
2629         struct ext4_group_info **meta_group_info;
2630         struct kmem_cache *cachep = get_groupinfo_cache(sb->s_blocksize_bits);
2631 
2632         /*
2633          * First check if this group is the first of a reserved block.
2634          * If it's true, we have to allocate a new table of pointers
2635          * to ext4_group_info structures
2636          */
2637         if (group % EXT4_DESC_PER_BLOCK(sb) == 0) {
2638                 metalen = sizeof(*meta_group_info) <<
2639                         EXT4_DESC_PER_BLOCK_BITS(sb);
2640                 meta_group_info = kmalloc(metalen, GFP_NOFS);
2641                 if (meta_group_info == NULL) {
2642                         ext4_msg(sb, KERN_ERR, "can't allocate mem "
2643                                  "for a buddy group");
2644                         goto exit_meta_group_info;
2645                 }
2646                 rcu_read_lock();
2647                 rcu_dereference(sbi->s_group_info)[idx] = meta_group_info;
2648                 rcu_read_unlock();
2649         }
2650 
2651         meta_group_info = sbi_array_rcu_deref(sbi, s_group_info, idx);
2652         i = group & (EXT4_DESC_PER_BLOCK(sb) - 1);
2653 
2654         meta_group_info[i] = kmem_cache_zalloc(cachep, GFP_NOFS);
2655         if (meta_group_info[i] == NULL) {
2656                 ext4_msg(sb, KERN_ERR, "can't allocate buddy mem");
2657                 goto exit_group_info;
2658         }
2659         set_bit(EXT4_GROUP_INFO_NEED_INIT_BIT,
2660                 &(meta_group_info[i]->bb_state));
2661 
2662         /*
2663          * initialize bb_free to be able to skip
2664          * empty groups without initialization
2665          */
2666         if (ext4_has_group_desc_csum(sb) &&
2667             (desc->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT))) {
2668                 meta_group_info[i]->bb_free =
2669                         ext4_free_clusters_after_init(sb, group, desc);
2670         } else {
2671                 meta_group_info[i]->bb_free =
2672                         ext4_free_group_clusters(sb, desc);
2673         }
2674 
2675         INIT_LIST_HEAD(&meta_group_info[i]->bb_prealloc_list);
2676         init_rwsem(&meta_group_info[i]->alloc_sem);
2677         meta_group_info[i]->bb_free_root = RB_ROOT;
2678         meta_group_info[i]->bb_largest_free_order = -1;  /* uninit */
2679 
2680         mb_group_bb_bitmap_alloc(sb, meta_group_info[i], group);
2681         return 0;
2682 
2683 exit_group_info:
2684         /* If a meta_group_info table has been allocated, release it now */
2685         if (group % EXT4_DESC_PER_BLOCK(sb) == 0) {
2686                 struct ext4_group_info ***group_info;
2687 
2688                 rcu_read_lock();
2689                 group_info = rcu_dereference(sbi->s_group_info);
2690                 kfree(group_info[idx]);
2691                 group_info[idx] = NULL;
2692                 rcu_read_unlock();
2693         }
2694 exit_meta_group_info:
2695         return -ENOMEM;
2696 } /* ext4_mb_add_groupinfo */
2697 
2698 static int ext4_mb_init_backend(struct super_block *sb)
2699 {
2700         ext4_group_t ngroups = ext4_get_groups_count(sb);
2701         ext4_group_t i;
2702         struct ext4_sb_info *sbi = EXT4_SB(sb);
2703         int err;
2704         struct ext4_group_desc *desc;
2705         struct ext4_group_info ***group_info;
2706         struct kmem_cache *cachep;
2707 
2708         err = ext4_mb_alloc_groupinfo(sb, ngroups);
2709         if (err)
2710                 return err;
2711 
2712         sbi->s_buddy_cache = new_inode(sb);
2713         if (sbi->s_buddy_cache == NULL) {
2714                 ext4_msg(sb, KERN_ERR, "can't get new inode");
2715                 goto err_freesgi;
2716         }
2717         /* To avoid potentially colliding with an valid on-disk inode number,
2718          * use EXT4_BAD_INO for the buddy cache inode number.  This inode is
2719          * not in the inode hash, so it should never be found by iget(), but
2720          * this will avoid confusion if it ever shows up during debugging. */
2721         sbi->s_buddy_cache->i_ino = EXT4_BAD_INO;
2722         EXT4_I(sbi->s_buddy_cache)->i_disksize = 0;
2723         for (i = 0; i < ngroups; i++) {
2724                 cond_resched();
2725                 desc = ext4_get_group_desc(sb, i, NULL);
2726                 if (desc == NULL) {
2727                         ext4_msg(sb, KERN_ERR, "can't read descriptor %u", i);
2728                         goto err_freebuddy;
2729                 }
2730                 if (ext4_mb_add_groupinfo(sb, i, desc) != 0)
2731                         goto err_freebuddy;
2732         }
2733 
2734         if (ext4_has_feature_flex_bg(sb)) {
2735                 /* a single flex group is supposed to be read by a single IO */
2736                 sbi->s_mb_prefetch = 1 << sbi->s_es->s_log_groups_per_flex;
2737                 sbi->s_mb_prefetch *= 8; /* 8 prefetch IOs in flight at most */
2738         } else {
2739                 sbi->s_mb_prefetch = 32;
2740         }
2741         if (sbi->s_mb_prefetch > ext4_get_groups_count(sb))
2742                 sbi->s_mb_prefetch = ext4_get_groups_count(sb);
2743         /* now many real IOs to prefetch within a single allocation at cr=0
2744          * given cr=0 is an CPU-related optimization we shouldn't try to
2745          * load too many groups, at some point we should start to use what
2746          * we've got in memory.
2747          * with an average random access time 5ms, it'd take a second to get
2748          * 200 groups (* N with flex_bg), so let's make this limit 4
2749          */
2750         sbi->s_mb_prefetch_limit = sbi->s_mb_prefetch * 4;
2751         if (sbi->s_mb_prefetch_limit > ext4_get_groups_count(sb))
2752                 sbi->s_mb_prefetch_limit = ext4_get_groups_count(sb);
2753 
2754         return 0;
2755 
2756 err_freebuddy:
2757         cachep = get_groupinfo_cache(sb->s_blocksize_bits);
2758         while (i-- > 0)
2759                 kmem_cache_free(cachep, ext4_get_group_info(sb, i));
2760         i = sbi->s_group_info_size;
2761         rcu_read_lock();
2762         group_info = rcu_dereference(sbi->s_group_info);
2763         while (i-- > 0)
2764                 kfree(group_info[i]);
2765         rcu_read_unlock();
2766         iput(sbi->s_buddy_cache);
2767 err_freesgi:
2768         rcu_read_lock();
2769         kvfree(rcu_dereference(sbi->s_group_info));
2770         rcu_read_unlock();
2771         return -ENOMEM;
2772 }
2773 
2774 static void ext4_groupinfo_destroy_slabs(void)
2775 {
2776         int i;
2777 
2778         for (i = 0; i < NR_GRPINFO_CACHES; i++) {
2779                 kmem_cache_destroy(ext4_groupinfo_caches[i]);
2780                 ext4_groupinfo_caches[i] = NULL;
2781         }
2782 }
2783 
2784 static int ext4_groupinfo_create_slab(size_t size)
2785 {
2786         static DEFINE_MUTEX(ext4_grpinfo_slab_create_mutex);
2787         int slab_size;
2788         int blocksize_bits = order_base_2(size);
2789         int cache_index = blocksize_bits - EXT4_MIN_BLOCK_LOG_SIZE;
2790         struct kmem_cache *cachep;
2791 
2792         if (cache_index >= NR_GRPINFO_CACHES)
2793                 return -EINVAL;
2794 
2795         if (unlikely(cache_index < 0))
2796                 cache_index = 0;
2797 
2798         mutex_lock(&ext4_grpinfo_slab_create_mutex);
2799         if (ext4_groupinfo_caches[cache_index]) {
2800                 mutex_unlock(&ext4_grpinfo_slab_create_mutex);
2801                 return 0;       /* Already created */
2802         }
2803 
2804         slab_size = offsetof(struct ext4_group_info,
2805                                 bb_counters[blocksize_bits + 2]);
2806 
2807         cachep = kmem_cache_create(ext4_groupinfo_slab_names[cache_index],
2808                                         slab_size, 0, SLAB_RECLAIM_ACCOUNT,
2809                                         NULL);
2810 
2811         ext4_groupinfo_caches[cache_index] = cachep;
2812 
2813         mutex_unlock(&ext4_grpinfo_slab_create_mutex);
2814         if (!cachep) {
2815                 printk(KERN_EMERG
2816                        "EXT4-fs: no memory for groupinfo slab cache\n");
2817                 return -ENOMEM;
2818         }
2819 
2820         return 0;
2821 }
2822 
2823 int ext4_mb_init(struct super_block *sb)
2824 {
2825         struct ext4_sb_info *sbi = EXT4_SB(sb);
2826         unsigned i, j;
2827         unsigned offset, offset_incr;
2828         unsigned max;
2829         int ret;
2830 
2831         i = (sb->s_blocksize_bits + 2) * sizeof(*sbi->s_mb_offsets);
2832 
2833         sbi->s_mb_offsets = kmalloc(i, GFP_KERNEL);
2834         if (sbi->s_mb_offsets == NULL) {
2835                 ret = -ENOMEM;
2836                 goto out;
2837         }
2838 
2839         i = (sb->s_blocksize_bits + 2) * sizeof(*sbi->s_mb_maxs);
2840         sbi->s_mb_maxs = kmalloc(i, GFP_KERNEL);
2841         if (sbi->s_mb_maxs == NULL) {
2842                 ret = -ENOMEM;
2843                 goto out;
2844         }
2845 
2846         ret = ext4_groupinfo_create_slab(sb->s_blocksize);
2847         if (ret < 0)
2848                 goto out;
2849 
2850         /* order 0 is regular bitmap */
2851         sbi->s_mb_maxs[0] = sb->s_blocksize << 3;
2852         sbi->s_mb_offsets[0] = 0;
2853 
2854         i = 1;
2855         offset = 0;
2856         offset_incr = 1 << (sb->s_blocksize_bits - 1);
2857         max = sb->s_blocksize << 2;
2858         do {
2859                 sbi->s_mb_offsets[i] = offset;
2860                 sbi->s_mb_maxs[i] = max;
2861                 offset += offset_incr;
2862                 offset_incr = offset_incr >> 1;
2863                 max = max >> 1;
2864                 i++;
2865         } while (i <= sb->s_blocksize_bits + 1);
2866 
2867         spin_lock_init(&sbi->s_md_lock);
2868         spin_lock_init(&sbi->s_bal_lock);
2869         sbi->s_mb_free_pending = 0;
2870         INIT_LIST_HEAD(&sbi->s_freed_data_list);
2871 
2872         sbi->s_mb_max_to_scan = MB_DEFAULT_MAX_TO_SCAN;
2873         sbi->s_mb_min_to_scan = MB_DEFAULT_MIN_TO_SCAN;
2874         sbi->s_mb_stats = MB_DEFAULT_STATS;
2875         sbi->s_mb_stream_request = MB_DEFAULT_STREAM_THRESHOLD;
2876         sbi->s_mb_order2_reqs = MB_DEFAULT_ORDER2_REQS;
2877         sbi->s_mb_max_inode_prealloc = MB_DEFAULT_MAX_INODE_PREALLOC;
2878         /*
2879          * The default group preallocation is 512, which for 4k block
2880          * sizes translates to 2 megabytes.  However for bigalloc file
2881          * systems, this is probably too big (i.e, if the cluster size
2882          * is 1 megabyte, then group preallocation size becomes half a
2883          * gigabyte!).  As a default, we will keep a two megabyte
2884          * group pralloc size for cluster sizes up to 64k, and after
2885          * that, we will force a minimum group preallocation size of
2886          * 32 clusters.  This translates to 8 megs when the cluster
2887          * size is 256k, and 32 megs when the cluster size is 1 meg,
2888          * which seems reasonable as a default.
2889          */
2890         sbi->s_mb_group_prealloc = max(MB_DEFAULT_GROUP_PREALLOC >>
2891                                        sbi->s_cluster_bits, 32);
2892         /*
2893          * If there is a s_stripe > 1, then we set the s_mb_group_prealloc
2894          * to the lowest multiple of s_stripe which is bigger than
2895          * the s_mb_group_prealloc as determined above. We want
2896          * the preallocation size to be an exact multiple of the
2897          * RAID stripe size so that preallocations don't fragment
2898          * the stripes.
2899          */
2900         if (sbi->s_stripe > 1) {
2901                 sbi->s_mb_group_prealloc = roundup(
2902                         sbi->s_mb_group_prealloc, sbi->s_stripe);
2903         }
2904 
2905         sbi->s_locality_groups = alloc_percpu(struct ext4_locality_group);
2906         if (sbi->s_locality_groups == NULL) {
2907                 ret = -ENOMEM;
2908                 goto out;
2909         }
2910         for_each_possible_cpu(i) {
2911                 struct ext4_locality_group *lg;
2912                 lg = per_cpu_ptr(sbi->s_locality_groups, i);
2913                 mutex_init(&lg->lg_mutex);
2914                 for (j = 0; j < PREALLOC_TB_SIZE; j++)
2915                         INIT_LIST_HEAD(&lg->lg_prealloc_list[j]);
2916                 spin_lock_init(&lg->lg_prealloc_lock);
2917         }
2918 
2919         /* init file for buddy data */
2920         ret = ext4_mb_init_backend(sb);
2921         if (ret != 0)
2922                 goto out_free_locality_groups;
2923 
2924         return 0;
2925 
2926 out_free_locality_groups:
2927         free_percpu(sbi->s_locality_groups);
2928         sbi->s_locality_groups = NULL;
2929 out:
2930         kfree(sbi->s_mb_offsets);
2931         sbi->s_mb_offsets = NULL;
2932         kfree(sbi->s_mb_maxs);
2933         sbi->s_mb_maxs = NULL;
2934         return ret;
2935 }
2936 
2937 /* need to called with the ext4 group lock held */
2938 static int ext4_mb_cleanup_pa(struct ext4_group_info *grp)
2939 {
2940         struct ext4_prealloc_space *pa;
2941         struct list_head *cur, *tmp;
2942         int count = 0;
2943 
2944         list_for_each_safe(cur, tmp, &grp->bb_prealloc_list) {
2945                 pa = list_entry(cur, struct ext4_prealloc_space, pa_group_list);
2946                 list_del(&pa->pa_group_list);
2947                 count++;
2948                 kmem_cache_free(ext4_pspace_cachep, pa);
2949         }
2950         return count;
2951 }
2952 
2953 int ext4_mb_release(struct super_block *sb)
2954 {
2955         ext4_group_t ngroups = ext4_get_groups_count(sb);
2956         ext4_group_t i;
2957         int num_meta_group_infos;
2958         struct ext4_group_info *grinfo, ***group_info;
2959         struct ext4_sb_info *sbi = EXT4_SB(sb);
2960         struct kmem_cache *cachep = get_groupinfo_cache(sb->s_blocksize_bits);
2961         int count;
2962 
2963         if (sbi->s_group_info) {
2964                 for (i = 0; i < ngroups; i++) {
2965                         cond_resched();
2966                         grinfo = ext4_get_group_info(sb, i);
2967                         mb_group_bb_bitmap_free(grinfo);
2968                         ext4_lock_group(sb, i);
2969                         count = ext4_mb_cleanup_pa(grinfo);
2970                         if (count)
2971                                 mb_debug(sb, "mballoc: %d PAs left\n",
2972                                          count);
2973                         ext4_unlock_group(sb, i);
2974                         kmem_cache_free(cachep, grinfo);
2975                 }
2976                 num_meta_group_infos = (ngroups +
2977                                 EXT4_DESC_PER_BLOCK(sb) - 1) >>
2978                         EXT4_DESC_PER_BLOCK_BITS(sb);
2979                 rcu_read_lock();
2980                 group_info = rcu_dereference(sbi->s_group_info);
2981                 for (i = 0; i < num_meta_group_infos; i++)
2982                         kfree(group_info[i]);
2983                 kvfree(group_info);
2984                 rcu_read_unlock();
2985         }
2986         kfree(sbi->s_mb_offsets);
2987         kfree(sbi->s_mb_maxs);
2988         iput(sbi->s_buddy_cache);
2989         if (sbi->s_mb_stats) {
2990                 ext4_msg(sb, KERN_INFO,
2991                        "mballoc: %u blocks %u reqs (%u success)",
2992                                 atomic_read(&sbi->s_bal_allocated),
2993                                 atomic_read(&sbi->s_bal_reqs),
2994                                 atomic_read(&sbi->s_bal_success));
2995                 ext4_msg(sb, KERN_INFO,
2996                       "mballoc: %u extents scanned, %u goal hits, "
2997                                 "%u 2^N hits, %u breaks, %u lost",
2998                                 atomic_read(&sbi->s_bal_ex_scanned),
2999                                 atomic_read(&sbi->s_bal_goals),
3000                                 atomic_read(&sbi->s_bal_2orders),
3001                                 atomic_read(&sbi->s_bal_breaks),
3002                                 atomic_read(&sbi->s_mb_lost_chunks));
3003                 ext4_msg(sb, KERN_INFO,
3004                        "mballoc: %lu generated and it took %Lu",
3005                                 sbi->s_mb_buddies_generated,
3006                                 sbi->s_mb_generation_time);
3007                 ext4_msg(sb, KERN_INFO,
3008                        "mballoc: %u preallocated, %u discarded",
3009                                 atomic_read(&sbi->s_mb_preallocated),
3010                                 atomic_read(&sbi->s_mb_discarded));
3011         }
3012 
3013         free_percpu(sbi->s_locality_groups);
3014 
3015         return 0;
3016 }
3017 
3018 static inline int ext4_issue_discard(struct super_block *sb,
3019                 ext4_group_t block_group, ext4_grpblk_t cluster, int count,
3020                 struct bio **biop)
3021 {
3022         ext4_fsblk_t discard_block;
3023 
3024         discard_block = (EXT4_C2B(EXT4_SB(sb), cluster) +
3025                          ext4_group_first_block_no(sb, block_group));
3026         count = EXT4_C2B(EXT4_SB(sb), count);
3027         trace_ext4_discard_blocks(sb,
3028                         (unsigned long long) discard_block, count);
3029         if (biop) {
3030                 return __blkdev_issue_discard(sb->s_bdev,
3031                         (sector_t)discard_block << (sb->s_blocksize_bits - 9),
3032                         (sector_t)count << (sb->s_blocksize_bits - 9),
3033                         GFP_NOFS, 0, biop);
3034         } else
3035                 return sb_issue_discard(sb, discard_block, count, GFP_NOFS, 0);
3036 }
3037 
3038 static void ext4_free_data_in_buddy(struct super_block *sb,
3039                                     struct ext4_free_data *entry)
3040 {
3041         struct ext4_buddy e4b;
3042         struct ext4_group_info *db;
3043         int err, count = 0, count2 = 0;
3044 
3045         mb_debug(sb, "gonna free %u blocks in group %u (0x%p):",
3046                  entry->efd_count, entry->efd_group, entry);
3047 
3048         err = ext4_mb_load_buddy(sb, entry->efd_group, &e4b);
3049         /* we expect to find existing buddy because it's pinned */
3050         BUG_ON(err != 0);
3051 
3052         spin_lock(&EXT4_SB(sb)->s_md_lock);
3053         EXT4_SB(sb)->s_mb_free_pending -= entry->efd_count;
3054         spin_unlock(&EXT4_SB(sb)->s_md_lock);
3055 
3056         db = e4b.bd_info;
3057         /* there are blocks to put in buddy to make them really free */
3058         count += entry->efd_count;
3059         count2++;
3060         ext4_lock_group(sb, entry->efd_group);
3061         /* Take it out of per group rb tree */
3062         rb_erase(&entry->efd_node, &(db->bb_free_root));
3063         mb_free_blocks(NULL, &e4b, entry->efd_start_cluster, entry->efd_count);
3064 
3065         /*
3066          * Clear the trimmed flag for the group so that the next
3067          * ext4_trim_fs can trim it.
3068          * If the volume is mounted with -o discard, online discard
3069          * is supported and the free blocks will be trimmed online.
3070          */
3071         if (!test_opt(sb, DISCARD))
3072                 EXT4_MB_GRP_CLEAR_TRIMMED(db);
3073 
3074         if (!db->bb_free_root.rb_node) {
3075                 /* No more items in the per group rb tree
3076                  * balance refcounts from ext4_mb_free_metadata()
3077                  */
3078                 put_page(e4b.bd_buddy_page);
3079                 put_page(e4b.bd_bitmap_page);
3080         }
3081         ext4_unlock_group(sb, entry->efd_group);
3082         kmem_cache_free(ext4_free_data_cachep, entry);
3083         ext4_mb_unload_buddy(&e4b);
3084 
3085         mb_debug(sb, "freed %d blocks in %d structures\n", count,
3086                  count2);
3087 }
3088 
3089 /*
3090  * This function is called by the jbd2 layer once the commit has finished,
3091  * so we know we can free the blocks that were released with that commit.
3092  */
3093 void ext4_process_freed_data(struct super_block *sb, tid_t commit_tid)
3094 {
3095         struct ext4_sb_info *sbi = EXT4_SB(sb);
3096         struct ext4_free_data *entry, *tmp;
3097         struct bio *discard_bio = NULL;
3098         struct list_head freed_data_list;
3099         struct list_head *cut_pos = NULL;
3100         int err;
3101 
3102         INIT_LIST_HEAD(&freed_data_list);
3103 
3104         spin_lock(&sbi->s_md_lock);
3105         list_for_each_entry(entry, &sbi->s_freed_data_list, efd_list) {
3106                 if (entry->efd_tid != commit_tid)
3107                         break;
3108                 cut_pos = &entry->efd_list;
3109         }
3110         if (cut_pos)
3111                 list_cut_position(&freed_data_list, &sbi->s_freed_data_list,
3112                                   cut_pos);
3113         spin_unlock(&sbi->s_md_lock);
3114 
3115         if (test_opt(sb, DISCARD)) {
3116                 list_for_each_entry(entry, &freed_data_list, efd_list) {
3117                         err = ext4_issue_discard(sb, entry->efd_group,
3118                                                  entry->efd_start_cluster,
3119                                                  entry->efd_count,
3120                                                  &discard_bio);
3121                         if (err && err != -EOPNOTSUPP) {
3122                                 ext4_msg(sb, KERN_WARNING, "discard request in"
3123                                          " group:%d block:%d count:%d failed"
3124                                          " with %d", entry->efd_group,
3125                                          entry->efd_start_cluster,
3126                                          entry->efd_count, err);
3127                         } else if (err == -EOPNOTSUPP)
3128                                 break;
3129                 }
3130 
3131                 if (discard_bio) {
3132                         submit_bio_wait(discard_bio);
3133                         bio_put(discard_bio);
3134                 }
3135         }
3136 
3137         list_for_each_entry_safe(entry, tmp, &freed_data_list, efd_list)
3138                 ext4_free_data_in_buddy(sb, entry);
3139 }
3140 
3141 int __init ext4_init_mballoc(void)
3142 {
3143         ext4_pspace_cachep = KMEM_CACHE(ext4_prealloc_space,
3144                                         SLAB_RECLAIM_ACCOUNT);
3145         if (ext4_pspace_cachep == NULL)
3146                 goto out;
3147 
3148         ext4_ac_cachep = KMEM_CACHE(ext4_allocation_context,
3149                                     SLAB_RECLAIM_ACCOUNT);
3150         if (ext4_ac_cachep == NULL)
3151                 goto out_pa_free;
3152 
3153         ext4_free_data_cachep = KMEM_CACHE(ext4_free_data,
3154                                            SLAB_RECLAIM_ACCOUNT);
3155         if (ext4_free_data_cachep == NULL)
3156                 goto out_ac_free;
3157 
3158         return 0;
3159 
3160 out_ac_free:
3161         kmem_cache_destroy(ext4_ac_cachep);
3162 out_pa_free:
3163         kmem_cache_destroy(ext4_pspace_cachep);
3164 out:
3165         return -ENOMEM;
3166 }
3167 
3168 void ext4_exit_mballoc(void)
3169 {
3170         /*
3171          * Wait for completion of call_rcu()'s on ext4_pspace_cachep
3172          * before destroying the slab cache.
3173          */
3174         rcu_barrier();
3175         kmem_cache_destroy(ext4_pspace_cachep);
3176         kmem_cache_destroy(ext4_ac_cachep);
3177         kmem_cache_destroy(ext4_free_data_cachep);
3178         ext4_groupinfo_destroy_slabs();
3179 }
3180 
3181 
3182 /*
3183  * Check quota and mark chosen space (ac->ac_b_ex) non-free in bitmaps
3184  * Returns 0 if success or error code
3185  */
3186 static noinline_for_stack int
3187 ext4_mb_mark_diskspace_used(struct ext4_allocation_context *ac,
3188                                 handle_t *handle, unsigned int reserv_clstrs)
3189 {
3190         struct buffer_head *bitmap_bh = NULL;
3191         struct ext4_group_desc *gdp;
3192         struct buffer_head *gdp_bh;
3193         struct ext4_sb_info *sbi;
3194         struct super_block *sb;
3195         ext4_fsblk_t block;
3196         int err, len;
3197 
3198         BUG_ON(ac->ac_status != AC_STATUS_FOUND);
3199         BUG_ON(ac->ac_b_ex.fe_len <= 0);
3200 
3201         sb = ac->ac_sb;
3202         sbi = EXT4_SB(sb);
3203 
3204         bitmap_bh = ext4_read_block_bitmap(sb, ac->ac_b_ex.fe_group);
3205         if (IS_ERR(bitmap_bh)) {
3206                 err = PTR_ERR(bitmap_bh);
3207                 bitmap_bh = NULL;
3208                 goto out_err;
3209         }
3210 
3211         BUFFER_TRACE(bitmap_bh, "getting write access");
3212         err = ext4_journal_get_write_access(handle, bitmap_bh);
3213         if (err)
3214                 goto out_err;
3215 
3216         err = -EIO;
3217         gdp = ext4_get_group_desc(sb, ac->ac_b_ex.fe_group, &gdp_bh);
3218         if (!gdp)
3219                 goto out_err;
3220 
3221         ext4_debug("using block group %u(%d)\n", ac->ac_b_ex.fe_group,
3222                         ext4_free_group_clusters(sb, gdp));
3223 
3224         BUFFER_TRACE(gdp_bh, "get_write_access");
3225         err = ext4_journal_get_write_access(handle, gdp_bh);
3226         if (err)
3227                 goto out_err;
3228 
3229         block = ext4_grp_offs_to_block(sb, &ac->ac_b_ex);
3230 
3231         len = EXT4_C2B(sbi, ac->ac_b_ex.fe_len);
3232         if (!ext4_inode_block_valid(ac->ac_inode, block, len)) {
3233                 ext4_error(sb, "Allocating blocks %llu-%llu which overlap "
3234                            "fs metadata", block, block+len);
3235                 /* File system mounted not to panic on error
3236                  * Fix the bitmap and return EFSCORRUPTED
3237                  * We leak some of the blocks here.
3238                  */
3239                 ext4_lock_group(sb, ac->ac_b_ex.fe_group);
3240                 ext4_set_bits(bitmap_bh->b_data, ac->ac_b_ex.fe_start,
3241                               ac->ac_b_ex.fe_len);
3242                 ext4_unlock_group(sb, ac->ac_b_ex.fe_group);
3243                 err = ext4_handle_dirty_metadata(handle, NULL, bitmap_bh);
3244                 if (!err)
3245                         err = -EFSCORRUPTED;
3246                 goto out_err;
3247         }
3248 
3249         ext4_lock_group(sb, ac->ac_b_ex.fe_group);
3250 #ifdef AGGRESSIVE_CHECK
3251         {
3252                 int i;
3253                 for (i = 0; i < ac->ac_b_ex.fe_len; i++) {
3254                         BUG_ON(mb_test_bit(ac->ac_b_ex.fe_start + i,
3255                                                 bitmap_bh->b_data));
3256                 }
3257         }
3258 #endif
3259         ext4_set_bits(bitmap_bh->b_data, ac->ac_b_ex.fe_start,
3260                       ac->ac_b_ex.fe_len);
3261         if (ext4_has_group_desc_csum(sb) &&
3262             (gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT))) {
3263                 gdp->bg_flags &= cpu_to_le16(~EXT4_BG_BLOCK_UNINIT);
3264                 ext4_free_group_clusters_set(sb, gdp,
3265                                              ext4_free_clusters_after_init(sb,
3266                                                 ac->ac_b_ex.fe_group, gdp));
3267         }
3268         len = ext4_free_group_clusters(sb, gdp) - ac->ac_b_ex.fe_len;
3269         ext4_free_group_clusters_set(sb, gdp, len);
3270         ext4_block_bitmap_csum_set(sb, ac->ac_b_ex.fe_group, gdp, bitmap_bh);
3271         ext4_group_desc_csum_set(sb, ac->ac_b_ex.fe_group, gdp);
3272 
3273         ext4_unlock_group(sb, ac->ac_b_ex.fe_group);
3274         percpu_counter_sub(&sbi->s_freeclusters_counter, ac->ac_b_ex.fe_len);
3275         /*
3276          * Now reduce the dirty block count also. Should not go negative
3277          */
3278         if (!(ac->ac_flags & EXT4_MB_DELALLOC_RESERVED))
3279                 /* release all the reserved blocks if non delalloc */
3280                 percpu_counter_sub(&sbi->s_dirtyclusters_counter,
3281                                    reserv_clstrs);
3282 
3283         if (sbi->s_log_groups_per_flex) {
3284                 ext4_group_t flex_group = ext4_flex_group(sbi,
3285                                                           ac->ac_b_ex.fe_group);
3286                 atomic64_sub(ac->ac_b_ex.fe_len,
3287                              &sbi_array_rcu_deref(sbi, s_flex_groups,
3288                                                   flex_group)->free_clusters);
3289         }
3290 
3291         err = ext4_handle_dirty_metadata(handle, NULL, bitmap_bh);
3292         if (err)
3293                 goto out_err;
3294         err = ext4_handle_dirty_metadata(handle, NULL, gdp_bh);
3295 
3296 out_err:
3297         brelse(bitmap_bh);
3298         return err;
3299 }
3300 
3301 /*
3302  * Idempotent helper for Ext4 fast commit replay path to set the state of
3303  * blocks in bitmaps and update counters.
3304  */
3305 void ext4_mb_mark_bb(struct super_block *sb, ext4_fsblk_t block,
3306                         int len, int state)
3307 {
3308         struct buffer_head *bitmap_bh = NULL;
3309         struct ext4_group_desc *gdp;
3310         struct buffer_head *gdp_bh;
3311         struct ext4_sb_info *sbi = EXT4_SB(sb);
3312         ext4_group_t group;
3313         ext4_grpblk_t blkoff;
3314         int i, clen, err;
3315         int already;
3316 
3317         clen = EXT4_B2C(sbi, len);
3318 
3319         ext4_get_group_no_and_offset(sb, block, &group, &blkoff);
3320         bitmap_bh = ext4_read_block_bitmap(sb, group);
3321         if (IS_ERR(bitmap_bh)) {
3322                 err = PTR_ERR(bitmap_bh);
3323                 bitmap_bh = NULL;
3324                 goto out_err;
3325         }
3326 
3327         err = -EIO;
3328         gdp = ext4_get_group_desc(sb, group, &gdp_bh);
3329         if (!gdp)
3330                 goto out_err;
3331 
3332         ext4_lock_group(sb, group);
3333         already = 0;
3334         for (i = 0; i < clen; i++)
3335                 if (!mb_test_bit(blkoff + i, bitmap_bh->b_data) == !state)
3336                         already++;
3337 
3338         if (state)
3339                 ext4_set_bits(bitmap_bh->b_data, blkoff, clen);
3340         else
3341                 mb_test_and_clear_bits(bitmap_bh->b_data, blkoff, clen);
3342         if (ext4_has_group_desc_csum(sb) &&
3343             (gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT))) {
3344                 gdp->bg_flags &= cpu_to_le16(~EXT4_BG_BLOCK_UNINIT);
3345                 ext4_free_group_clusters_set(sb, gdp,
3346                                              ext4_free_clusters_after_init(sb,
3347                                                 group, gdp));
3348         }
3349         if (state)
3350                 clen = ext4_free_group_clusters(sb, gdp) - clen + already;
3351         else
3352                 clen = ext4_free_group_clusters(sb, gdp) + clen - already;
3353 
3354         ext4_free_group_clusters_set(sb, gdp, clen);
3355         ext4_block_bitmap_csum_set(sb, group, gdp, bitmap_bh);
3356         ext4_group_desc_csum_set(sb, group, gdp);
3357 
3358         ext4_unlock_group(sb, group);
3359 
3360         if (sbi->s_log_groups_per_flex) {
3361                 ext4_group_t flex_group = ext4_flex_group(sbi, group);
3362 
3363                 atomic64_sub(len,
3364                              &sbi_array_rcu_deref(sbi, s_flex_groups,
3365                                                   flex_group)->free_clusters);
3366         }
3367 
3368         err = ext4_handle_dirty_metadata(NULL, NULL, bitmap_bh);
3369         if (err)
3370                 goto out_err;
3371         sync_dirty_buffer(bitmap_bh);
3372         err = ext4_handle_dirty_metadata(NULL, NULL, gdp_bh);
3373         sync_dirty_buffer(gdp_bh);
3374 
3375 out_err:
3376         brelse(bitmap_bh);
3377 }
3378 
3379 /*
3380  * here we normalize request for locality group
3381  * Group request are normalized to s_mb_group_prealloc, which goes to
3382  * s_strip if we set the same via mount option.
3383  * s_mb_group_prealloc can be configured via
3384  * /sys/fs/ext4/<partition>/mb_group_prealloc
3385  *
3386  * XXX: should we try to preallocate more than the group has now?
3387  */
3388 static void ext4_mb_normalize_group_request(struct ext4_allocation_context *ac)
3389 {
3390         struct super_block *sb = ac->ac_sb;
3391         struct ext4_locality_group *lg = ac->ac_lg;
3392 
3393         BUG_ON(lg == NULL);
3394         ac->ac_g_ex.fe_len = EXT4_SB(sb)->s_mb_group_prealloc;
3395         mb_debug(sb, "goal %u blocks for locality group\n", ac->ac_g_ex.fe_len);
3396 }
3397 
3398 /*
3399  * Normalization means making request better in terms of
3400  * size and alignment
3401  */
3402 static noinline_for_stack void
3403 ext4_mb_normalize_request(struct ext4_allocation_context *ac,
3404                                 struct ext4_allocation_request *ar)
3405 {
3406         struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
3407         int bsbits, max;
3408         ext4_lblk_t end;
3409         loff_t size, start_off;
3410         loff_t orig_size __maybe_unused;
3411         ext4_lblk_t start;
3412         struct ext4_inode_info *ei = EXT4_I(ac->ac_inode);
3413         struct ext4_prealloc_space *pa;
3414 
3415         /* do normalize only data requests, metadata requests
3416            do not need preallocation */
3417         if (!(ac->ac_flags & EXT4_MB_HINT_DATA))
3418                 return;
3419 
3420         /* sometime caller may want exact blocks */
3421         if (unlikely(ac->ac_flags & EXT4_MB_HINT_GOAL_ONLY))
3422                 return;
3423 
3424         /* caller may indicate that preallocation isn't
3425          * required (it's a tail, for example) */
3426         if (ac->ac_flags & EXT4_MB_HINT_NOPREALLOC)
3427                 return;
3428 
3429         if (ac->ac_flags & EXT4_MB_HINT_GROUP_ALLOC) {
3430                 ext4_mb_normalize_group_request(ac);
3431                 return ;
3432         }
3433 
3434         bsbits = ac->ac_sb->s_blocksize_bits;
3435 
3436         /* first, let's learn actual file size
3437          * given current request is allocated */
3438         size = ac->ac_o_ex.fe_logical + EXT4_C2B(sbi, ac->ac_o_ex.fe_len);
3439         size = size << bsbits;
3440         if (size < i_size_read(ac->ac_inode))
3441                 size = i_size_read(ac->ac_inode);
3442         orig_size = size;
3443 
3444         /* max size of free chunks */
3445         max = 2 << bsbits;
3446 
3447 #define NRL_CHECK_SIZE(req, size, max, chunk_size)      \
3448                 (req <= (size) || max <= (chunk_size))
3449 
3450         /* first, try to predict filesize */
3451         /* XXX: should this table be tunable? */
3452         start_off = 0;
3453         if (size <= 16 * 1024) {
3454                 size = 16 * 1024;
3455         } else if (size <= 32 * 1024) {
3456                 size = 32 * 1024;
3457         } else if (size <= 64 * 1024) {
3458                 size = 64 * 1024;
3459         } else if (size <= 128 * 1024) {
3460                 size = 128 * 1024;
3461         } else if (size <= 256 * 1024) {
3462                 size = 256 * 1024;
3463         } else if (size <= 512 * 1024) {
3464                 size = 512 * 1024;
3465         } else if (size <= 1024 * 1024) {
3466                 size = 1024 * 1024;
3467         } else if (NRL_CHECK_SIZE(size, 4 * 1024 * 1024, max, 2 * 1024)) {
3468                 start_off = ((loff_t)ac->ac_o_ex.fe_logical >>
3469                                                 (21 - bsbits)) << 21;
3470                 size = 2 * 1024 * 1024;
3471         } else if (NRL_CHECK_SIZE(size, 8 * 1024 * 1024, max, 4 * 1024)) {
3472                 start_off = ((loff_t)ac->ac_o_ex.fe_logical >>
3473                                                         (22 - bsbits)) << 22;
3474                 size = 4 * 1024 * 1024;
3475         } else if (NRL_CHECK_SIZE(ac->ac_o_ex.fe_len,
3476                                         (8<<20)>>bsbits, max, 8 * 1024)) {
3477                 start_off = ((loff_t)ac->ac_o_ex.fe_logical >>
3478                                                         (23 - bsbits)) << 23;
3479                 size = 8 * 1024 * 1024;
3480         } else {
3481                 start_off = (loff_t) ac->ac_o_ex.fe_logical << bsbits;
3482                 size      = (loff_t) EXT4_C2B(EXT4_SB(ac->ac_sb),
3483                                               ac->ac_o_ex.fe_len) << bsbits;
3484         }
3485         size = size >> bsbits;
3486         start = start_off >> bsbits;
3487 
3488         /* don't cover already allocated blocks in selected range */
3489         if (ar->pleft && start <= ar->lleft) {
3490                 size -= ar->lleft + 1 - start;
3491                 start = ar->lleft + 1;
3492         }
3493         if (ar->pright && start + size - 1 >= ar->lright)
3494                 size -= start + size - ar->lright;
3495 
3496         /*
3497          * Trim allocation request for filesystems with artificially small
3498          * groups.
3499          */
3500         if (size > EXT4_BLOCKS_PER_GROUP(ac->ac_sb))
3501                 size = EXT4_BLOCKS_PER_GROUP(ac->ac_sb);
3502 
3503         end = start + size;
3504 
3505         /* check we don't cross already preallocated blocks */
3506         rcu_read_lock();
3507         list_for_each_entry_rcu(pa, &ei->i_prealloc_list, pa_inode_list) {
3508                 ext4_lblk_t pa_end;
3509 
3510                 if (pa->pa_deleted)
3511                         continue;
3512                 spin_lock(&pa->pa_lock);
3513                 if (pa->pa_deleted) {
3514                         spin_unlock(&pa->pa_lock);
3515                         continue;
3516                 }
3517 
3518                 pa_end = pa->pa_lstart + EXT4_C2B(EXT4_SB(ac->ac_sb),
3519                                                   pa->pa_len);
3520 
3521                 /* PA must not overlap original request */
3522                 BUG_ON(!(ac->ac_o_ex.fe_logical >= pa_end ||
3523                         ac->ac_o_ex.fe_logical < pa->pa_lstart));
3524 
3525                 /* skip PAs this normalized request doesn't overlap with */
3526                 if (pa->pa_lstart >= end || pa_end <= start) {
3527                         spin_unlock(&pa->pa_lock);
3528                         continue;
3529                 }
3530                 BUG_ON(pa->pa_lstart <= start && pa_end >= end);
3531 
3532                 /* adjust start or end to be adjacent to this pa */
3533                 if (pa_end <= ac->ac_o_ex.fe_logical) {
3534                         BUG_ON(pa_end < start);
3535                         start = pa_end;
3536                 } else if (pa->pa_lstart > ac->ac_o_ex.fe_logical) {
3537                         BUG_ON(pa->pa_lstart > end);
3538                         end = pa->pa_lstart;
3539                 }
3540                 spin_unlock(&pa->pa_lock);
3541         }
3542         rcu_read_unlock();
3543         size = end - start;
3544 
3545         /* XXX: extra loop to check we really don't overlap preallocations */
3546         rcu_read_lock();
3547         list_for_each_entry_rcu(pa, &ei->i_prealloc_list, pa_inode_list) {
3548                 ext4_lblk_t pa_end;
3549 
3550                 spin_lock(&pa->pa_lock);
3551                 if (pa->pa_deleted == 0) {
3552                         pa_end = pa->pa_lstart + EXT4_C2B(EXT4_SB(ac->ac_sb),
3553                                                           pa->pa_len);
3554                         BUG_ON(!(start >= pa_end || end <= pa->pa_lstart));
3555                 }
3556                 spin_unlock(&pa->pa_lock);
3557         }
3558         rcu_read_unlock();
3559 
3560         if (start + size <= ac->ac_o_ex.fe_logical &&
3561                         start > ac->ac_o_ex.fe_logical) {
3562                 ext4_msg(ac->ac_sb, KERN_ERR,
3563                          "start %lu, size %lu, fe_logical %lu",
3564                          (unsigned long) start, (unsigned long) size,
3565                          (unsigned long) ac->ac_o_ex.fe_logical);
3566                 BUG();
3567         }
3568         BUG_ON(size <= 0 || size > EXT4_BLOCKS_PER_GROUP(ac->ac_sb));
3569 
3570         /* now prepare goal request */
3571 
3572         /* XXX: is it better to align blocks WRT to logical
3573          * placement or satisfy big request as is */
3574         ac->ac_g_ex.fe_logical = start;
3575         ac->ac_g_ex.fe_len = EXT4_NUM_B2C(sbi, size);
3576 
3577         /* define goal start in order to merge */
3578         if (ar->pright && (ar->lright == (start + size))) {
3579                 /* merge to the right */
3580                 ext4_get_group_no_and_offset(ac->ac_sb, ar->pright - size,
3581                                                 &ac->ac_f_ex.fe_group,
3582                                                 &ac->ac_f_ex.fe_start);
3583                 ac->ac_flags |= EXT4_MB_HINT_TRY_GOAL;
3584         }
3585         if (ar->pleft && (ar->lleft + 1 == start)) {
3586                 /* merge to the left */
3587                 ext4_get_group_no_and_offset(ac->ac_sb, ar->pleft + 1,
3588                                                 &ac->ac_f_ex.fe_group,
3589                                                 &ac->ac_f_ex.fe_start);
3590                 ac->ac_flags |= EXT4_MB_HINT_TRY_GOAL;
3591         }
3592 
3593         mb_debug(ac->ac_sb, "goal: %lld(was %lld) blocks at %u\n", size,
3594                  orig_size, start);
3595 }
3596 
3597 static void ext4_mb_collect_stats(struct ext4_allocation_context *ac)
3598 {
3599         struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
3600 
3601         if (sbi->s_mb_stats && ac->ac_g_ex.fe_len > 1) {
3602                 atomic_inc(&sbi->s_bal_reqs);
3603                 atomic_add(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated);
3604                 if (ac->ac_b_ex.fe_len >= ac->ac_o_ex.fe_len)
3605                         atomic_inc(&sbi->s_bal_success);
3606                 atomic_add(ac->ac_found, &sbi->s_bal_ex_scanned);
3607                 if (ac->ac_g_ex.fe_start == ac->ac_b_ex.fe_start &&
3608                                 ac->ac_g_ex.fe_group == ac->ac_b_ex.fe_group)
3609                         atomic_inc(&sbi->s_bal_goals);
3610                 if (ac->ac_found > sbi->s_mb_max_to_scan)
3611                         atomic_inc(&sbi->s_bal_breaks);
3612         }
3613 
3614         if (ac->ac_op == EXT4_MB_HISTORY_ALLOC)
3615                 trace_ext4_mballoc_alloc(ac);
3616         else
3617                 trace_ext4_mballoc_prealloc(ac);
3618 }
3619 
3620 /*
3621  * Called on failure; free up any blocks from the inode PA for this
3622  * context.  We don't need this for MB_GROUP_PA because we only change
3623  * pa_free in ext4_mb_release_context(), but on failure, we've already
3624  * zeroed out ac->ac_b_ex.fe_len, so group_pa->pa_free is not changed.
3625  */
3626 static void ext4_discard_allocated_blocks(struct ext4_allocation_context *ac)
3627 {
3628         struct ext4_prealloc_space *pa = ac->ac_pa;
3629         struct ext4_buddy e4b;
3630         int err;
3631 
3632         if (pa == NULL) {
3633                 if (ac->ac_f_ex.fe_len == 0)
3634                         return;
3635                 err = ext4_mb_load_buddy(ac->ac_sb, ac->ac_f_ex.fe_group, &e4b);
3636                 if (err) {
3637                         /*
3638                          * This should never happen since we pin the
3639                          * pages in the ext4_allocation_context so
3640                          * ext4_mb_load_buddy() should never fail.
3641                          */
3642                         WARN(1, "mb_load_buddy failed (%d)", err);
3643                         return;
3644                 }
3645                 ext4_lock_group(ac->ac_sb, ac->ac_f_ex.fe_group);
3646                 mb_free_blocks(ac->ac_inode, &e4b, ac->ac_f_ex.fe_start,
3647                                ac->ac_f_ex.fe_len);
3648                 ext4_unlock_group(ac->ac_sb, ac->ac_f_ex.fe_group);
3649                 ext4_mb_unload_buddy(&e4b);
3650                 return;
3651         }
3652         if (pa->pa_type == MB_INODE_PA)
3653                 pa->pa_free += ac->ac_b_ex.fe_len;
3654 }
3655 
3656 /*
3657  * use blocks preallocated to inode
3658  */
3659 static void ext4_mb_use_inode_pa(struct ext4_allocation_context *ac,
3660                                 struct ext4_prealloc_space *pa)
3661 {
3662         struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
3663         ext4_fsblk_t start;
3664         ext4_fsblk_t end;
3665         int len;
3666 
3667         /* found preallocated blocks, use them */
3668         start = pa->pa_pstart + (ac->ac_o_ex.fe_logical - pa->pa_lstart);
3669         end = min(pa->pa_pstart + EXT4_C2B(sbi, pa->pa_len),
3670                   start + EXT4_C2B(sbi, ac->ac_o_ex.fe_len));
3671         len = EXT4_NUM_B2C(sbi, end - start);
3672         ext4_get_group_no_and_offset(ac->ac_sb, start, &ac->ac_b_ex.fe_group,
3673                                         &ac->ac_b_ex.fe_start);
3674         ac->ac_b_ex.fe_len = len;
3675         ac->ac_status = AC_STATUS_FOUND;
3676         ac->ac_pa = pa;
3677 
3678         BUG_ON(start < pa->pa_pstart);
3679         BUG_ON(end > pa->pa_pstart + EXT4_C2B(sbi, pa->pa_len));
3680         BUG_ON(pa->pa_free < len);
3681         pa->pa_free -= len;
3682 
3683         mb_debug(ac->ac_sb, "use %llu/%d from inode pa %p\n", start, len, pa);
3684 }
3685 
3686 /*
3687  * use blocks preallocated to locality group
3688  */
3689 static void ext4_mb_use_group_pa(struct ext4_allocation_context *ac,
3690                                 struct ext4_prealloc_space *pa)
3691 {
3692         unsigned int len = ac->ac_o_ex.fe_len;
3693 
3694         ext4_get_group_no_and_offset(ac->ac_sb, pa->pa_pstart,
3695                                         &ac->ac_b_ex.fe_group,
3696                                         &ac->ac_b_ex.fe_start);
3697         ac->ac_b_ex.fe_len = len;
3698         ac->ac_status = AC_STATUS_FOUND;
3699         ac->ac_pa = pa;
3700 
3701         /* we don't correct pa_pstart or pa_plen here to avoid
3702          * possible race when the group is being loaded concurrently
3703          * instead we correct pa later, after blocks are marked
3704          * in on-disk bitmap -- see ext4_mb_release_context()
3705          * Other CPUs are prevented from allocating from this pa by lg_mutex
3706          */
3707         mb_debug(ac->ac_sb, "use %u/%u from group pa %p\n",
3708                  pa->pa_lstart-len, len, pa);
3709 }
3710 
3711 /*
3712  * Return the prealloc space that have minimal distance
3713  * from the goal block. @cpa is the prealloc
3714  * space that is having currently known minimal distance
3715  * from the goal block.
3716  */
3717 static struct ext4_prealloc_space *
3718 ext4_mb_check_group_pa(ext4_fsblk_t goal_block,
3719                         struct ext4_prealloc_space *pa,
3720                         struct ext4_prealloc_space *cpa)
3721 {
3722         ext4_fsblk_t cur_distance, new_distance;
3723 
3724         if (cpa == NULL) {
3725                 atomic_inc(&pa->pa_count);
3726                 return pa;
3727         }
3728         cur_distance = abs(goal_block - cpa->pa_pstart);
3729         new_distance = abs(goal_block - pa->pa_pstart);
3730 
3731         if (cur_distance <= new_distance)
3732                 return cpa;
3733 
3734         /* drop the previous reference */
3735         atomic_dec(&cpa->pa_count);
3736         atomic_inc(&pa->pa_count);
3737         return pa;
3738 }
3739 
3740 /*
3741  * search goal blocks in preallocated space
3742  */
3743 static noinline_for_stack bool
3744 ext4_mb_use_preallocated(struct ext4_allocation_context *ac)
3745 {
3746         struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
3747         int order, i;
3748         struct ext4_inode_info *ei = EXT4_I(ac->ac_inode);
3749         struct ext4_locality_group *lg;
3750         struct ext4_prealloc_space *pa, *cpa = NULL;
3751         ext4_fsblk_t goal_block;
3752 
3753         /* only data can be preallocated */
3754         if (!(ac->ac_flags & EXT4_MB_HINT_DATA))
3755                 return false;
3756 
3757         /* first, try per-file preallocation */
3758         rcu_read_lock();
3759         list_for_each_entry_rcu(pa, &ei->i_prealloc_list, pa_inode_list) {
3760 
3761                 /* all fields in this condition don't change,
3762                  * so we can skip locking for them */
3763                 if (ac->ac_o_ex.fe_logical < pa->pa_lstart ||
3764                     ac->ac_o_ex.fe_logical >= (pa->pa_lstart +
3765                                                EXT4_C2B(sbi, pa->pa_len)))
3766                         continue;
3767 
3768                 /* non-extent files can't have physical blocks past 2^32 */
3769                 if (!(ext4_test_inode_flag(ac->ac_inode, EXT4_INODE_EXTENTS)) &&
3770                     (pa->pa_pstart + EXT4_C2B(sbi, pa->pa_len) >
3771                      EXT4_MAX_BLOCK_FILE_PHYS))
3772                         continue;
3773 
3774                 /* found preallocated blocks, use them */
3775                 spin_lock(&pa->pa_lock);
3776                 if (pa->pa_deleted == 0 && pa->pa_free) {
3777                         atomic_inc(&pa->pa_count);
3778                         ext4_mb_use_inode_pa(ac, pa);
3779                         spin_unlock(&pa->pa_lock);
3780                         ac->ac_criteria = 10;
3781                         rcu_read_unlock();
3782                         return true;
3783                 }
3784                 spin_unlock(&pa->pa_lock);
3785         }
3786         rcu_read_unlock();
3787 
3788         /* can we use group allocation? */
3789         if (!(ac->ac_flags & EXT4_MB_HINT_GROUP_ALLOC))
3790                 return false;
3791 
3792         /* inode may have no locality group for some reason */
3793         lg = ac->ac_lg;
3794         if (lg == NULL)
3795                 return false;
3796         order  = fls(ac->ac_o_ex.fe_len) - 1;
3797         if (order > PREALLOC_TB_SIZE - 1)
3798                 /* The max size of hash table is PREALLOC_TB_SIZE */
3799                 order = PREALLOC_TB_SIZE - 1;
3800 
3801         goal_block = ext4_grp_offs_to_block(ac->ac_sb, &ac->ac_g_ex);
3802         /*
3803          * search for the prealloc space that is having
3804          * minimal distance from the goal block.
3805          */
3806         for (i = order; i < PREALLOC_TB_SIZE; i++) {
3807                 rcu_read_lock();
3808                 list_for_each_entry_rcu(pa, &lg->lg_prealloc_list[i],
3809                                         pa_inode_list) {
3810                         spin_lock(&pa->pa_lock);
3811                         if (pa->pa_deleted == 0 &&
3812                                         pa->pa_free >= ac->ac_o_ex.fe_len) {
3813 
3814                                 cpa = ext4_mb_check_group_pa(goal_block,
3815                                                                 pa, cpa);
3816                         }
3817                         spin_unlock(&pa->pa_lock);
3818                 }
3819                 rcu_read_unlock();
3820         }
3821         if (cpa) {
3822                 ext4_mb_use_group_pa(ac, cpa);
3823                 ac->ac_criteria = 20;
3824                 return true;
3825         }
3826         return false;
3827 }
3828 
3829 /*
3830  * the function goes through all block freed in the group
3831  * but not yet committed and marks them used in in-core bitmap.
3832  * buddy must be generated from this bitmap
3833  * Need to be called with the ext4 group lock held
3834  */
3835 static void ext4_mb_generate_from_freelist(struct super_block *sb, void *bitmap,
3836                                                 ext4_group_t group)
3837 {
3838         struct rb_node *n;
3839         struct ext4_group_info *grp;
3840         struct ext4_free_data *entry;
3841 
3842         grp = ext4_get_group_info(sb, group);
3843         n = rb_first(&(grp->bb_free_root));
3844 
3845         while (n) {
3846                 entry = rb_entry(n, struct ext4_free_data, efd_node);
3847                 ext4_set_bits(bitmap, entry->efd_start_cluster, entry->efd_count);
3848                 n = rb_next(n);
3849         }
3850         return;
3851 }
3852 
3853 /*
3854  * the function goes through all preallocation in this group and marks them
3855  * used in in-core bitmap. buddy must be generated from this bitmap
3856  * Need to be called with ext4 group lock held
3857  */
3858 static noinline_for_stack
3859 void ext4_mb_generate_from_pa(struct super_block *sb, void *bitmap,
3860                                         ext4_group_t group)
3861 {
3862         struct ext4_group_info *grp = ext4_get_group_info(sb, group);
3863         struct ext4_prealloc_space *pa;
3864         struct list_head *cur;
3865         ext4_group_t groupnr;
3866         ext4_grpblk_t start;
3867         int preallocated = 0;
3868         int len;
3869 
3870         /* all form of preallocation discards first load group,
3871          * so the only competing code is preallocation use.
3872          * we don't need any locking here
3873          * notice we do NOT ignore preallocations with pa_deleted
3874          * otherwise we could leave used blocks available for
3875          * allocation in buddy when concurrent ext4_mb_put_pa()
3876          * is dropping preallocation
3877          */
3878         list_for_each(cur, &grp->bb_prealloc_list) {
3879                 pa = list_entry(cur, struct ext4_prealloc_space, pa_group_list);
3880                 spin_lock(&pa->pa_lock);
3881                 ext4_get_group_no_and_offset(sb, pa->pa_pstart,
3882                                              &groupnr, &start);
3883                 len = pa->pa_len;
3884                 spin_unlock(&pa->pa_lock);
3885                 if (unlikely(len == 0))
3886                         continue;
3887                 BUG_ON(groupnr != group);
3888                 ext4_set_bits(bitmap, start, len);
3889                 preallocated += len;
3890         }
3891         mb_debug(sb, "preallocated %d for group %u\n", preallocated, group);
3892 }
3893 
3894 static void ext4_mb_mark_pa_deleted(struct super_block *sb,
3895                                     struct ext4_prealloc_space *pa)
3896 {
3897         struct ext4_inode_info *ei;
3898 
3899         if (pa->pa_deleted) {
3900                 ext4_warning(sb, "deleted pa, type:%d, pblk:%llu, lblk:%u, len:%d\n",
3901                              pa->pa_type, pa->pa_pstart, pa->pa_lstart,
3902                              pa->pa_len);
3903                 return;
3904         }
3905 
3906         pa->pa_deleted = 1;
3907 
3908         if (pa->pa_type == MB_INODE_PA) {
3909                 ei = EXT4_I(pa->pa_inode);
3910                 atomic_dec(&ei->i_prealloc_active);
3911         }
3912 }
3913 
3914 static void ext4_mb_pa_callback(struct rcu_head *head)
3915 {
3916         struct ext4_prealloc_space *pa;
3917         pa = container_of(head, struct ext4_prealloc_space, u.pa_rcu);
3918 
3919         BUG_ON(atomic_read(&pa->pa_count));
3920         BUG_ON(pa->pa_deleted == 0);
3921         kmem_cache_free(ext4_pspace_cachep, pa);
3922 }
3923 
3924 /*
3925  * drops a reference to preallocated space descriptor
3926  * if this was the last reference and the space is consumed
3927  */
3928 static void ext4_mb_put_pa(struct ext4_allocation_context *ac,
3929                         struct super_block *sb, struct ext4_prealloc_space *pa)
3930 {
3931         ext4_group_t grp;
3932         ext4_fsblk_t grp_blk;
3933 
3934         /* in this short window concurrent discard can set pa_deleted */
3935         spin_lock(&pa->pa_lock);
3936         if (!atomic_dec_and_test(&pa->pa_count) || pa->pa_free != 0) {
3937                 spin_unlock(&pa->pa_lock);
3938                 return;
3939         }
3940 
3941         if (pa->pa_deleted == 1) {
3942                 spin_unlock(&pa->pa_lock);
3943                 return;
3944         }
3945 
3946         ext4_mb_mark_pa_deleted(sb, pa);
3947         spin_unlock(&pa->pa_lock);
3948 
3949         grp_blk = pa->pa_pstart;
3950         /*
3951          * If doing group-based preallocation, pa_pstart may be in the
3952          * next group when pa is used up
3953          */
3954         if (pa->pa_type == MB_GROUP_PA)
3955                 grp_blk--;
3956 
3957         grp = ext4_get_group_number(sb, grp_blk);
3958 
3959         /*
3960          * possible race:
3961          *
3962          *  P1 (buddy init)                     P2 (regular allocation)
3963          *                                      find block B in PA
3964          *  copy on-disk bitmap to buddy
3965          *                                      mark B in on-disk bitmap
3966          *                                      drop PA from group
3967          *  mark all PAs in buddy
3968          *
3969          * thus, P1 initializes buddy with B available. to prevent this
3970          * we make "copy" and "mark all PAs" atomic and serialize "drop PA"
3971          * against that pair
3972          */
3973         ext4_lock_group(sb, grp);
3974         list_del(&pa->pa_group_list);
3975         ext4_unlock_group(sb, grp);
3976 
3977         spin_lock(pa->pa_obj_lock);
3978         list_del_rcu(&pa->pa_inode_list);
3979         spin_unlock(pa->pa_obj_lock);
3980 
3981         call_rcu(&(pa)->u.pa_rcu, ext4_mb_pa_callback);
3982 }
3983 
3984 /*
3985  * creates new preallocated space for given inode
3986  */
3987 static noinline_for_stack void
3988 ext4_mb_new_inode_pa(struct ext4_allocation_context *ac)
3989 {
3990         struct super_block *sb = ac->ac_sb;
3991         struct ext4_sb_info *sbi = EXT4_SB(sb);
3992         struct ext4_prealloc_space *pa;
3993         struct ext4_group_info *grp;
3994         struct ext4_inode_info *ei;
3995 
3996         /* preallocate only when found space is larger then requested */
3997         BUG_ON(ac->ac_o_ex.fe_len >= ac->ac_b_ex.fe_len);
3998         BUG_ON(ac->ac_status != AC_STATUS_FOUND);
3999         BUG_ON(!S_ISREG(ac->ac_inode->i_mode));
4000         BUG_ON(ac->ac_pa == NULL);
4001 
4002         pa = ac->ac_pa;
4003 
4004         if (ac->ac_b_ex.fe_len < ac->ac_g_ex.fe_len) {
4005                 int winl;
4006                 int wins;
4007                 int win;
4008                 int offs;
4009 
4010                 /* we can't allocate as much as normalizer wants.
4011                  * so, found space must get proper lstart
4012                  * to cover original request */
4013                 BUG_ON(ac->ac_g_ex.fe_logical > ac->ac_o_ex.fe_logical);
4014                 BUG_ON(ac->ac_g_ex.fe_len < ac->ac_o_ex.fe_len);
4015 
4016                 /* we're limited by original request in that
4017                  * logical block must be covered any way
4018                  * winl is window we can move our chunk within */
4019                 winl = ac->ac_o_ex.fe_logical - ac->ac_g_ex.fe_logical;
4020 
4021                 /* also, we should cover whole original request */
4022                 wins = EXT4_C2B(sbi, ac->ac_b_ex.fe_len - ac->ac_o_ex.fe_len);
4023 
4024                 /* the smallest one defines real window */
4025                 win = min(winl, wins);
4026 
4027                 offs = ac->ac_o_ex.fe_logical %
4028                         EXT4_C2B(sbi, ac->ac_b_ex.fe_len);
4029                 if (offs && offs < win)
4030                         win = offs;
4031 
4032                 ac->ac_b_ex.fe_logical = ac->ac_o_ex.fe_logical -
4033                         EXT4_NUM_B2C(sbi, win);
4034                 BUG_ON(ac->ac_o_ex.fe_logical < ac->ac_b_ex.fe_logical);
4035                 BUG_ON(ac->ac_o_ex.fe_len > ac->ac_b_ex.fe_len);
4036         }
4037 
4038         /* preallocation can change ac_b_ex, thus we store actually
4039          * allocated blocks for history */
4040         ac->ac_f_ex = ac->ac_b_ex;
4041 
4042         pa->pa_lstart = ac->ac_b_ex.fe_logical;
4043         pa->pa_pstart = ext4_grp_offs_to_block(sb, &ac->ac_b_ex);
4044         pa->pa_len = ac->ac_b_ex.fe_len;
4045         pa->pa_free = pa->pa_len;
4046         spin_lock_init(&pa->pa_lock);
4047         INIT_LIST_HEAD(&pa->pa_inode_list);
4048         INIT_LIST_HEAD(&pa->pa_group_list);
4049         pa->pa_deleted = 0;
4050         pa->pa_type = MB_INODE_PA;
4051 
4052         mb_debug(sb, "new inode pa %p: %llu/%d for %u\n", pa, pa->pa_pstart,
4053                  pa->pa_len, pa->pa_lstart);
4054         trace_ext4_mb_new_inode_pa(ac, pa);
4055 
4056         ext4_mb_use_inode_pa(ac, pa);
4057         atomic_add(pa->pa_free, &sbi->s_mb_preallocated);
4058 
4059         ei = EXT4_I(ac->ac_inode);
4060         grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
4061 
4062         pa->pa_obj_lock = &ei->i_prealloc_lock;
4063         pa->pa_inode = ac->ac_inode;
4064 
4065         list_add(&pa->pa_group_list, &grp->bb_prealloc_list);
4066 
4067         spin_lock(pa->pa_obj_lock);
4068         list_add_rcu(&pa->pa_inode_list, &ei->i_prealloc_list);
4069         spin_unlock(pa->pa_obj_lock);
4070         atomic_inc(&ei->i_prealloc_active);
4071 }
4072 
4073 /*
4074  * creates new preallocated space for locality group inodes belongs to
4075  */
4076 static noinline_for_stack void
4077 ext4_mb_new_group_pa(struct ext4_allocation_context *ac)
4078 {
4079         struct super_block *sb = ac->ac_sb;
4080         struct ext4_locality_group *lg;
4081         struct ext4_prealloc_space *pa;
4082         struct ext4_group_info *grp;
4083 
4084         /* preallocate only when found space is larger then requested */
4085         BUG_ON(ac->ac_o_ex.fe_len >= ac->ac_b_ex.fe_len);
4086         BUG_ON(ac->ac_status != AC_STATUS_FOUND);
4087         BUG_ON(!S_ISREG(ac->ac_inode->i_mode));
4088         BUG_ON(ac->ac_pa == NULL);
4089 
4090         pa = ac->ac_pa;
4091 
4092         /* preallocation can change ac_b_ex, thus we store actually
4093          * allocated blocks for history */
4094         ac->ac_f_ex = ac->ac_b_ex;
4095 
4096         pa->pa_pstart = ext4_grp_offs_to_block(sb, &ac->ac_b_ex);
4097         pa->pa_lstart = pa->pa_pstart;
4098         pa->pa_len = ac->ac_b_ex.fe_len;
4099         pa->pa_free = pa->pa_len;
4100         spin_lock_init(&pa->pa_lock);
4101         INIT_LIST_HEAD(&pa->pa_inode_list);
4102         INIT_LIST_HEAD(&pa->pa_group_list);
4103         pa->pa_deleted = 0;
4104         pa->pa_type = MB_GROUP_PA;
4105 
4106         mb_debug(sb, "new group pa %p: %llu/%d for %u\n", pa, pa->pa_pstart,
4107                  pa->pa_len, pa->pa_lstart);
4108         trace_ext4_mb_new_group_pa(ac, pa);
4109 
4110         ext4_mb_use_group_pa(ac, pa);
4111         atomic_add(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
4112 
4113         grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
4114         lg = ac->ac_lg;
4115         BUG_ON(lg == NULL);
4116 
4117         pa->pa_obj_lock = &lg->lg_prealloc_lock;
4118         pa->pa_inode = NULL;
4119 
4120         list_add(&pa->pa_group_list, &grp->bb_prealloc_list);
4121 
4122         /*
4123          * We will later add the new pa to the right bucket
4124          * after updating the pa_free in ext4_mb_release_context
4125          */
4126 }
4127 
4128 static void ext4_mb_new_preallocation(struct ext4_allocation_context *ac)
4129 {
4130         if (ac->ac_flags & EXT4_MB_HINT_GROUP_ALLOC)
4131                 ext4_mb_new_group_pa(ac);
4132         else
4133                 ext4_mb_new_inode_pa(ac);
4134 }
4135 
4136 /*
4137  * finds all unused blocks in on-disk bitmap, frees them in
4138  * in-core bitmap and buddy.
4139  * @pa must be unlinked from inode and group lists, so that
4140  * nobody else can find/use it.
4141  * the caller MUST hold group/inode locks.
4142  * TODO: optimize the case when there are no in-core structures yet
4143  */
4144 static noinline_for_stack int
4145 ext4_mb_release_inode_pa(struct ext4_buddy *e4b, struct buffer_head *bitmap_bh,
4146                         struct ext4_prealloc_space *pa)
4147 {
4148         struct super_block *sb = e4b->bd_sb;
4149         struct ext4_sb_info *sbi = EXT4_SB(sb);
4150         unsigned int end;
4151         unsigned int next;
4152         ext4_group_t group;
4153         ext4_grpblk_t bit;
4154         unsigned long long grp_blk_start;
4155         int free = 0;
4156 
4157         BUG_ON(pa->pa_deleted == 0);
4158         ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, &bit);
4159         grp_blk_start = pa->pa_pstart - EXT4_C2B(sbi, bit);
4160         BUG_ON(group != e4b->bd_group && pa->pa_len != 0);
4161         end = bit + pa->pa_len;
4162 
4163         while (bit < end) {
4164                 bit = mb_find_next_zero_bit(bitmap_bh->b_data, end, bit);
4165                 if (bit >= end)
4166                         break;
4167                 next = mb_find_next_bit(bitmap_bh->b_data, end, bit);
4168                 mb_debug(sb, "free preallocated %u/%u in group %u\n",
4169                          (unsigned) ext4_group_first_block_no(sb, group) + bit,
4170                          (unsigned) next - bit, (unsigned) group);
4171                 free += next - bit;
4172 
4173                 trace_ext4_mballoc_discard(sb, NULL, group, bit, next - bit);
4174                 trace_ext4_mb_release_inode_pa(pa, (grp_blk_start +
4175                                                     EXT4_C2B(sbi, bit)),
4176                                                next - bit);
4177                 mb_free_blocks(pa->pa_inode, e4b, bit, next - bit);
4178                 bit = next + 1;
4179         }
4180         if (free != pa->pa_free) {
4181                 ext4_msg(e4b->bd_sb, KERN_CRIT,
4182                          "pa %p: logic %lu, phys. %lu, len %d",
4183                          pa, (unsigned long) pa->pa_lstart,
4184                          (unsigned long) pa->pa_pstart,
4185                          pa->pa_len);
4186                 ext4_grp_locked_error(sb, group, 0, 0, "free %u, pa_free %u",
4187                                         free, pa->pa_free);
4188                 /*
4189                  * pa is already deleted so we use the value obtained
4190                  * from the bitmap and continue.
4191                  */
4192         }
4193         atomic_add(free, &sbi->s_mb_discarded);
4194 
4195         return 0;
4196 }
4197 
4198 static noinline_for_stack int
4199 ext4_mb_release_group_pa(struct ext4_buddy *e4b,
4200                                 struct ext4_prealloc_space *pa)
4201 {
4202         struct super_block *sb = e4b->bd_sb;
4203         ext4_group_t group;
4204         ext4_grpblk_t bit;
4205 
4206         trace_ext4_mb_release_group_pa(sb, pa);
4207         BUG_ON(pa->pa_deleted == 0);
4208         ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, &bit);
4209         BUG_ON(group != e4b->bd_group && pa->pa_len != 0);
4210         mb_free_blocks(pa->pa_inode, e4b, bit, pa->pa_len);
4211         atomic_add(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded);
4212         trace_ext4_mballoc_discard(sb, NULL, group, bit, pa->pa_len);
4213 
4214         return 0;
4215 }
4216 
4217 /*
4218  * releases all preallocations in given group
4219  *
4220  * first, we need to decide discard policy:
4221  * - when do we discard
4222  *   1) ENOSPC
4223  * - how many do we discard
4224  *   1) how many requested
4225  */
4226 static noinline_for_stack int
4227 ext4_mb_discard_group_preallocations(struct super_block *sb,
4228                                         ext4_group_t group, int needed)
4229 {
4230         struct ext4_group_info *grp = ext4_get_group_info(sb, group);
4231         struct buffer_head *bitmap_bh = NULL;
4232         struct ext4_prealloc_space *pa, *tmp;
4233         struct list_head list;
4234         struct ext4_buddy e4b;
4235         int err;
4236         int busy = 0;
4237         int free, free_total = 0;
4238 
4239         mb_debug(sb, "discard preallocation for group %u\n", group);
4240         if (list_empty(&grp->bb_prealloc_list))
4241                 goto out_dbg;
4242 
4243         bitmap_bh = ext4_read_block_bitmap(sb, group);
4244         if (IS_ERR(bitmap_bh)) {
4245                 err = PTR_ERR(bitmap_bh);
4246                 ext4_error_err(sb, -err,
4247                                "Error %d reading block bitmap for %u",
4248                                err, group);
4249                 goto out_dbg;
4250         }
4251 
4252         err = ext4_mb_load_buddy(sb, group, &e4b);
4253         if (err) {
4254                 ext4_warning(sb, "Error %d loading buddy information for %u",
4255                              err, group);
4256                 put_bh(bitmap_bh);
4257                 goto out_dbg;
4258         }
4259 
4260         if (needed == 0)
4261                 needed = EXT4_CLUSTERS_PER_GROUP(sb) + 1;
4262 
4263         INIT_LIST_HEAD(&list);
4264 repeat:
4265         free = 0;
4266         ext4_lock_group(sb, group);
4267         list_for_each_entry_safe(pa, tmp,
4268                                 &grp->bb_prealloc_list, pa_group_list) {
4269                 spin_lock(&pa->pa_lock);
4270                 if (atomic_read(&pa->pa_count)) {
4271                         spin_unlock(&pa->pa_lock);
4272                         busy = 1;
4273                         continue;
4274                 }
4275                 if (pa->pa_deleted) {
4276                         spin_unlock(&pa->pa_lock);
4277                         continue;
4278                 }
4279 
4280                 /* seems this one can be freed ... */
4281                 ext4_mb_mark_pa_deleted(sb, pa);
4282 
4283                 if (!free)
4284                         this_cpu_inc(discard_pa_seq);
4285 
4286                 /* we can trust pa_free ... */
4287                 free += pa->pa_free;
4288 
4289                 spin_unlock(&pa->pa_lock);
4290 
4291                 list_del(&pa->pa_group_list);
4292                 list_add(&pa->u.pa_tmp_list, &list);
4293         }
4294 
4295         /* now free all selected PAs */
4296         list_for_each_entry_safe(pa, tmp, &list, u.pa_tmp_list) {
4297 
4298                 /* remove from object (inode or locality group) */
4299                 spin_lock(pa->pa_obj_lock);
4300                 list_del_rcu(&pa->pa_inode_list);
4301                 spin_unlock(pa->pa_obj_lock);
4302 
4303                 if (pa->pa_type == MB_GROUP_PA)
4304                         ext4_mb_release_group_pa(&e4b, pa);
4305                 else
4306                         ext4_mb_release_inode_pa(&e4b, bitmap_bh, pa);
4307 
4308                 list_del(&pa->u.pa_tmp_list);
4309                 call_rcu(&(pa)->u.pa_rcu, ext4_mb_pa_callback);
4310         }
4311 
4312         free_total += free;
4313 
4314         /* if we still need more blocks and some PAs were used, try again */
4315         if (free_total < needed && busy) {
4316                 ext4_unlock_group(sb, group);
4317                 cond_resched();
4318                 busy = 0;
4319                 goto repeat;
4320         }
4321         ext4_unlock_group(sb, group);
4322         ext4_mb_unload_buddy(&e4b);
4323         put_bh(bitmap_bh);
4324 out_dbg:
4325         mb_debug(sb, "discarded (%d) blocks preallocated for group %u bb_free (%d)\n",
4326                  free_total, group, grp->bb_free);
4327         return free_total;
4328 }
4329 
4330 /*
4331  * releases all non-used preallocated blocks for given inode
4332  *
4333  * It's important to discard preallocations under i_data_sem
4334  * We don't want another block to be served from the prealloc
4335  * space when we are discarding the inode prealloc space.
4336  *
4337  * FIXME!! Make sure it is valid at all the call sites
4338  */
4339 void ext4_discard_preallocations(struct inode *inode, unsigned int needed)
4340 {
4341         struct ext4_inode_info *ei = EXT4_I(inode);
4342         struct super_block *sb = inode->i_sb;
4343         struct buffer_head *bitmap_bh = NULL;
4344         struct ext4_prealloc_space *pa, *tmp;
4345         ext4_group_t group = 0;
4346         struct list_head list;
4347         struct ext4_buddy e4b;
4348         int err;
4349 
4350         if (!S_ISREG(inode->i_mode)) {
4351                 /*BUG_ON(!list_empty(&ei->i_prealloc_list));*/
4352                 return;
4353         }
4354 
4355         if (EXT4_SB(sb)->s_mount_state & EXT4_FC_REPLAY)
4356                 return;
4357 
4358         mb_debug(sb, "discard preallocation for inode %lu\n",
4359                  inode->i_ino);
4360         trace_ext4_discard_preallocations(inode,
4361                         atomic_read(&ei->i_prealloc_active), needed);
4362 
4363         INIT_LIST_HEAD(&list);
4364 
4365         if (needed == 0)
4366                 needed = UINT_MAX;
4367 
4368 repeat:
4369         /* first, collect all pa's in the inode */
4370         spin_lock(&ei->i_prealloc_lock);
4371         while (!list_empty(&ei->i_prealloc_list) && needed) {
4372                 pa = list_entry(ei->i_prealloc_list.prev,
4373                                 struct ext4_prealloc_space, pa_inode_list);
4374                 BUG_ON(pa->pa_obj_lock != &ei->i_prealloc_lock);
4375                 spin_lock(&pa->pa_lock);
4376                 if (atomic_read(&pa->pa_count)) {
4377                         /* this shouldn't happen often - nobody should
4378                          * use preallocation while we're discarding it */
4379                         spin_unlock(&pa->pa_lock);
4380                         spin_unlock(&ei->i_prealloc_lock);
4381                         ext4_msg(sb, KERN_ERR,
4382                                  "uh-oh! used pa while discarding");
4383                         WARN_ON(1);
4384                         schedule_timeout_uninterruptible(HZ);
4385                         goto repeat;
4386 
4387                 }
4388                 if (pa->pa_deleted == 0) {
4389                         ext4_mb_mark_pa_deleted(sb, pa);
4390                         spin_unlock(&pa->pa_lock);
4391                         list_del_rcu(&pa->pa_inode_list);
4392                         list_add(&pa->u.pa_tmp_list, &list);
4393                         needed--;
4394                         continue;
4395                 }
4396 
4397                 /* someone is deleting pa right now */
4398                 spin_unlock(&pa->pa_lock);
4399                 spin_unlock(&ei->i_prealloc_lock);
4400 
4401                 /* we have to wait here because pa_deleted
4402                  * doesn't mean pa is already unlinked from
4403                  * the list. as we might be called from
4404                  * ->clear_inode() the inode will get freed
4405                  * and concurrent thread which is unlinking
4406                  * pa from inode's list may access already
4407                  * freed memory, bad-bad-bad */
4408 
4409                 /* XXX: if this happens too often, we can
4410                  * add a flag to force wait only in case
4411                  * of ->clear_inode(), but not in case of
4412                  * regular truncate */
4413                 schedule_timeout_uninterruptible(HZ);
4414                 goto repeat;
4415         }
4416         spin_unlock(&ei->i_prealloc_lock);
4417 
4418         list_for_each_entry_safe(pa, tmp, &list, u.pa_tmp_list) {
4419                 BUG_ON(pa->pa_type != MB_INODE_PA);
4420                 group = ext4_get_group_number(sb, pa->pa_pstart);
4421 
4422                 err = ext4_mb_load_buddy_gfp(sb, group, &e4b,
4423                                              GFP_NOFS|__GFP_NOFAIL);
4424                 if (err) {
4425                         ext4_error_err(sb, -err, "Error %d loading buddy information for %u",
4426                                        err, group);
4427                         continue;
4428                 }
4429 
4430                 bitmap_bh = ext4_read_block_bitmap(sb, group);
4431                 if (IS_ERR(bitmap_bh)) {
4432                         err = PTR_ERR(bitmap_bh);
4433                         ext4_error_err(sb, -err, "Error %d reading block bitmap for %u",
4434                                        err, group);
4435                         ext4_mb_unload_buddy(&e4b);
4436                         continue;
4437                 }
4438 
4439                 ext4_lock_group(sb, group);
4440                 list_del(&pa->pa_group_list);
4441                 ext4_mb_release_inode_pa(&e4b, bitmap_bh, pa);
4442                 ext4_unlock_group(sb, group);
4443 
4444                 ext4_mb_unload_buddy(&e4b);
4445                 put_bh(bitmap_bh);
4446 
4447                 list_del(&pa->u.pa_tmp_list);
4448                 call_rcu(&(pa)->u.pa_rcu, ext4_mb_pa_callback);
4449         }
4450 }
4451 
4452 static int ext4_mb_pa_alloc(struct ext4_allocation_context *ac)
4453 {
4454         struct ext4_prealloc_space *pa;
4455 
4456         BUG_ON(ext4_pspace_cachep == NULL);
4457         pa = kmem_cache_zalloc(ext4_pspace_cachep, GFP_NOFS);
4458         if (!pa)
4459                 return -ENOMEM;
4460         atomic_set(&pa->pa_count, 1);
4461         ac->ac_pa = pa;
4462         return 0;
4463 }
4464 
4465 static void ext4_mb_pa_free(struct ext4_allocation_context *ac)
4466 {
4467         struct ext4_prealloc_space *pa = ac->ac_pa;
4468 
4469         BUG_ON(!pa);
4470         ac->ac_pa = NULL;
4471         WARN_ON(!atomic_dec_and_test(&pa->pa_count));
4472         kmem_cache_free(ext4_pspace_cachep, pa);
4473 }
4474 
4475 #ifdef CONFIG_EXT4_DEBUG
4476 static inline void ext4_mb_show_pa(struct super_block *sb)
4477 {
4478         ext4_group_t i, ngroups;
4479 
4480         if (ext4_test_mount_flag(sb, EXT4_MF_FS_ABORTED))
4481                 return;
4482 
4483         ngroups = ext4_get_groups_count(sb);
4484         mb_debug(sb, "groups: ");
4485         for (i = 0; i < ngroups; i++) {
4486                 struct ext4_group_info *grp = ext4_get_group_info(sb, i);
4487                 struct ext4_prealloc_space *pa;
4488                 ext4_grpblk_t start;
4489                 struct list_head *cur;
4490                 ext4_lock_group(sb, i);
4491                 list_for_each(cur, &grp->bb_prealloc_list) {
4492                         pa = list_entry(cur, struct ext4_prealloc_space,
4493                                         pa_group_list);
4494                         spin_lock(&pa->pa_lock);
4495                         ext4_get_group_no_and_offset(sb, pa->pa_pstart,
4496                                                      NULL, &start);
4497                         spin_unlock(&pa->pa_lock);
4498                         mb_debug(sb, "PA:%u:%d:%d\n", i, start,
4499                                  pa->pa_len);
4500                 }
4501                 ext4_unlock_group(sb, i);
4502                 mb_debug(sb, "%u: %d/%d\n", i, grp->bb_free,
4503                          grp->bb_fragments);
4504         }
4505 }
4506 
4507 static void ext4_mb_show_ac(struct ext4_allocation_context *ac)
4508 {
4509         struct super_block *sb = ac->ac_sb;
4510 
4511         if (ext4_test_mount_flag(sb, EXT4_MF_FS_ABORTED))
4512                 return;
4513 
4514         mb_debug(sb, "Can't allocate:"
4515                         " Allocation context details:");
4516         mb_debug(sb, "status %u flags 0x%x",
4517                         ac->ac_status, ac->ac_flags);
4518         mb_debug(sb, "orig %lu/%lu/%lu@%lu, "
4519                         "goal %lu/%lu/%lu@%lu, "
4520                         "best %lu/%lu/%lu@%lu cr %d",
4521                         (unsigned long)ac->ac_o_ex.fe_group,
4522                         (unsigned long)ac->ac_o_ex.fe_start,
4523                         (unsigned long)ac->ac_o_ex.fe_len,
4524                         (unsigned long)ac->ac_o_ex.fe_logical,
4525                         (unsigned long)ac->ac_g_ex.fe_group,
4526                         (unsigned long)ac->ac_g_ex.fe_start,
4527                         (unsigned long)ac->ac_g_ex.fe_len,
4528                         (unsigned long)ac->ac_g_ex.fe_logical,
4529                         (unsigned long)ac->ac_b_ex.fe_group,
4530                         (unsigned long)ac->ac_b_ex.fe_start,
4531                         (unsigned long)ac->ac_b_ex.fe_len,
4532                         (unsigned long)ac->ac_b_ex.fe_logical,
4533                         (int)ac->ac_criteria);
4534         mb_debug(sb, "%u found", ac->ac_found);
4535         ext4_mb_show_pa(sb);
4536 }
4537 #else
4538 static inline void ext4_mb_show_pa(struct super_block *sb)
4539 {
4540         return;
4541 }
4542 static inline void ext4_mb_show_ac(struct ext4_allocation_context *ac)
4543 {
4544         ext4_mb_show_pa(ac->ac_sb);
4545         return;
4546 }
4547 #endif
4548 
4549 /*
4550  * We use locality group preallocation for small size file. The size of the
4551  * file is determined by the current size or the resulting size after
4552  * allocation which ever is larger
4553  *
4554  * One can tune this size via /sys/fs/ext4/<partition>/mb_stream_req
4555  */
4556 static void ext4_mb_group_or_file(struct ext4_allocation_context *ac)
4557 {
4558         struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
4559         int bsbits = ac->ac_sb->s_blocksize_bits;
4560         loff_t size, isize;
4561 
4562         if (!(ac->ac_flags & EXT4_MB_HINT_DATA))
4563                 return;
4564 
4565         if (unlikely(ac->ac_flags & EXT4_MB_HINT_GOAL_ONLY))
4566                 return;
4567 
4568         size = ac->ac_o_ex.fe_logical + EXT4_C2B(sbi, ac->ac_o_ex.fe_len);
4569         isize = (i_size_read(ac->ac_inode) + ac->ac_sb->s_blocksize - 1)
4570                 >> bsbits;
4571 
4572         if ((size == isize) && !ext4_fs_is_busy(sbi) &&
4573             !inode_is_open_for_write(ac->ac_inode)) {
4574                 ac->ac_flags |= EXT4_MB_HINT_NOPREALLOC;
4575                 return;
4576         }
4577 
4578         if (sbi->s_mb_group_prealloc <= 0) {
4579                 ac->ac_flags |= EXT4_MB_STREAM_ALLOC;
4580                 return;
4581         }
4582 
4583         /* don't use group allocation for large files */
4584         size = max(size, isize);
4585         if (size > sbi->s_mb_stream_request) {
4586                 ac->ac_flags |= EXT4_MB_STREAM_ALLOC;
4587                 return;
4588         }
4589 
4590         BUG_ON(ac->ac_lg != NULL);
4591         /*
4592          * locality group prealloc space are per cpu. The reason for having
4593          * per cpu locality group is to reduce the contention between block
4594          * request from multiple CPUs.
4595          */
4596         ac->ac_lg = raw_cpu_ptr(sbi->s_locality_groups);
4597 
4598         /* we're going to use group allocation */
4599         ac->ac_flags |= EXT4_MB_HINT_GROUP_ALLOC;
4600 
4601         /* serialize all allocations in the group */
4602         mutex_lock(&ac->ac_lg->lg_mutex);
4603 }
4604 
4605 static noinline_for_stack int
4606 ext4_mb_initialize_context(struct ext4_allocation_context *ac,
4607                                 struct ext4_allocation_request *ar)
4608 {
4609         struct super_block *sb = ar->inode->i_sb;
4610         struct ext4_sb_info *sbi = EXT4_SB(sb);
4611         struct ext4_super_block *es = sbi->s_es;
4612         ext4_group_t group;
4613         unsigned int len;
4614         ext4_fsblk_t goal;
4615         ext4_grpblk_t block;
4616 
4617         /* we can't allocate > group size */
4618         len = ar->len;
4619 
4620         /* just a dirty hack to filter too big requests  */
4621         if (len >= EXT4_CLUSTERS_PER_GROUP(sb))
4622                 len = EXT4_CLUSTERS_PER_GROUP(sb);
4623 
4624         /* start searching from the goal */
4625         goal = ar->goal;
4626         if (goal < le32_to_cpu(es->s_first_data_block) ||
4627                         goal >= ext4_blocks_count(es))
4628                 goal = le32_to_cpu(es->s_first_data_block);
4629         ext4_get_group_no_and_offset(sb, goal, &group, &block);
4630 
4631         /* set up allocation goals */
4632         ac->ac_b_ex.fe_logical = EXT4_LBLK_CMASK(sbi, ar->logical);
4633         ac->ac_status = AC_STATUS_CONTINUE;
4634         ac->ac_sb = sb;
4635         ac->ac_inode = ar->inode;
4636         ac->ac_o_ex.fe_logical = ac->ac_b_ex.fe_logical;
4637         ac->ac_o_ex.fe_group = group;
4638         ac->ac_o_ex.fe_start = block;
4639         ac->ac_o_ex.fe_len = len;
4640         ac->ac_g_ex = ac->ac_o_ex;
4641         ac->ac_flags = ar->flags;
4642 
4643         /* we have to define context: we'll work with a file or
4644          * locality group. this is a policy, actually */
4645         ext4_mb_group_or_file(ac);
4646 
4647         mb_debug(sb, "init ac: %u blocks @ %u, goal %u, flags 0x%x, 2^%d, "
4648                         "left: %u/%u, right %u/%u to %swritable\n",
4649                         (unsigned) ar->len, (unsigned) ar->logical,
4650                         (unsigned) ar->goal, ac->ac_flags, ac->ac_2order,
4651                         (unsigned) ar->lleft, (unsigned) ar->pleft,
4652                         (unsigned) ar->lright, (unsigned) ar->pright,
4653                         inode_is_open_for_write(ar->inode) ? "" : "non-");
4654         return 0;
4655 
4656 }
4657 
4658 static noinline_for_stack void
4659 ext4_mb_discard_lg_preallocations(struct super_block *sb,
4660                                         struct ext4_locality_group *lg,
4661                                         int order, int total_entries)
4662 {
4663         ext4_group_t group = 0;
4664         struct ext4_buddy e4b;
4665         struct list_head discard_list;
4666         struct ext4_prealloc_space *pa, *tmp;
4667 
4668         mb_debug(sb, "discard locality group preallocation\n");
4669 
4670         INIT_LIST_HEAD(&discard_list);
4671 
4672         spin_lock(&lg->lg_prealloc_lock);
4673         list_for_each_entry_rcu(pa, &lg->lg_prealloc_list[order],
4674                                 pa_inode_list,
4675                                 lockdep_is_held(&lg->lg_prealloc_lock)) {
4676                 spin_lock(&pa->pa_lock);
4677                 if (atomic_read(&pa->pa_count)) {
4678                         /*
4679                          * This is the pa that we just used
4680                          * for block allocation. So don't
4681                          * free that
4682                          */
4683                         spin_unlock(&pa->pa_lock);
4684                         continue;
4685                 }
4686                 if (pa->pa_deleted) {
4687                         spin_unlock(&pa->pa_lock);
4688                         continue;
4689                 }
4690                 /* only lg prealloc space */
4691                 BUG_ON(pa->pa_type != MB_GROUP_PA);
4692 
4693                 /* seems this one can be freed ... */
4694                 ext4_mb_mark_pa_deleted(sb, pa);
4695                 spin_unlock(&pa->pa_lock);
4696 
4697                 list_del_rcu(&pa->pa_inode_list);
4698                 list_add(&pa->u.pa_tmp_list, &discard_list);
4699 
4700                 total_entries--;
4701                 if (total_entries <= 5) {
4702                         /*
4703                          * we want to keep only 5 entries
4704                          * allowing it to grow to 8. This
4705                          * mak sure we don't call discard
4706                          * soon for this list.
4707                          */
4708                         break;
4709                 }
4710         }
4711         spin_unlock(&lg->lg_prealloc_lock);
4712 
4713         list_for_each_entry_safe(pa, tmp, &discard_list, u.pa_tmp_list) {
4714                 int err;
4715 
4716                 group = ext4_get_group_number(sb, pa->pa_pstart);
4717                 err = ext4_mb_load_buddy_gfp(sb, group, &e4b,
4718                                              GFP_NOFS|__GFP_NOFAIL);
4719                 if (err) {
4720                         ext4_error_err(sb, -err, "Error %d loading buddy information for %u",
4721                                        err, group);
4722                         continue;
4723                 }
4724                 ext4_lock_group(sb, group);
4725                 list_del(&pa->pa_group_list);
4726                 ext4_mb_release_group_pa(&e4b, pa);
4727                 ext4_unlock_group(sb, group);
4728 
4729                 ext4_mb_unload_buddy(&e4b);
4730                 list_del(&pa->u.pa_tmp_list);
4731                 call_rcu(&(pa)->u.pa_rcu, ext4_mb_pa_callback);
4732         }
4733 }
4734 
4735 /*
4736  * We have incremented pa_count. So it cannot be freed at this
4737  * point. Also we hold lg_mutex. So no parallel allocation is
4738  * possible from this lg. That means pa_free cannot be updated.
4739  *
4740  * A parallel ext4_mb_discard_group_preallocations is possible.
4741  * which can cause the lg_prealloc_list to be updated.
4742  */
4743 
4744 static void ext4_mb_add_n_trim(struct ext4_allocation_context *ac)
4745 {
4746         int order, added = 0, lg_prealloc_count = 1;
4747         struct super_block *sb = ac->ac_sb;
4748         struct ext4_locality_group *lg = ac->ac_lg;
4749         struct ext4_prealloc_space *tmp_pa, *pa = ac->ac_pa;
4750 
4751         order = fls(pa->pa_free) - 1;
4752         if (order > PREALLOC_TB_SIZE - 1)
4753                 /* The max size of hash table is PREALLOC_TB_SIZE */
4754                 order = PREALLOC_TB_SIZE - 1;
4755         /* Add the prealloc space to lg */
4756         spin_lock(&lg->lg_prealloc_lock);
4757         list_for_each_entry_rcu(tmp_pa, &lg->lg_prealloc_list[order],
4758                                 pa_inode_list,
4759                                 lockdep_is_held(&lg->lg_prealloc_lock)) {
4760                 spin_lock(&tmp_pa->pa_lock);
4761                 if (tmp_pa->pa_deleted) {
4762                         spin_unlock(&tmp_pa->pa_lock);
4763                         continue;
4764                 }
4765                 if (!added && pa->pa_free < tmp_pa->pa_free) {
4766                         /* Add to the tail of the previous entry */
4767                         list_add_tail_rcu(&pa->pa_inode_list,
4768                                                 &tmp_pa->pa_inode_list);
4769                         added = 1;
4770                         /*
4771                          * we want to count the total
4772                          * number of entries in the list
4773                          */
4774                 }
4775                 spin_unlock(&tmp_pa->pa_lock);
4776                 lg_prealloc_count++;
4777         }
4778         if (!added)
4779                 list_add_tail_rcu(&pa->pa_inode_list,
4780                                         &lg->lg_prealloc_list[order]);
4781         spin_unlock(&lg->lg_prealloc_lock);
4782 
4783         /* Now trim the list to be not more than 8 elements */
4784         if (lg_prealloc_count > 8) {
4785                 ext4_mb_discard_lg_preallocations(sb, lg,
4786                                                   order, lg_prealloc_count);
4787                 return;
4788         }
4789         return ;
4790 }
4791 
4792 /*
4793  * if per-inode prealloc list is too long, trim some PA
4794  */
4795 static void ext4_mb_trim_inode_pa(struct inode *inode)
4796 {
4797         struct ext4_inode_info *ei = EXT4_I(inode);
4798         struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
4799         int count, delta;
4800 
4801         count = atomic_read(&ei->i_prealloc_active);
4802         delta = (sbi->s_mb_max_inode_prealloc >> 2) + 1;
4803         if (count > sbi->s_mb_max_inode_prealloc + delta) {
4804                 count -= sbi->s_mb_max_inode_prealloc;
4805                 ext4_discard_preallocations(inode, count);
4806         }
4807 }
4808 
4809 /*
4810  * release all resource we used in allocation
4811  */
4812 static int ext4_mb_release_context(struct ext4_allocation_context *ac)
4813 {
4814         struct inode *inode = ac->ac_inode;
4815         struct ext4_inode_info *ei = EXT4_I(inode);
4816         struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
4817         struct ext4_prealloc_space *pa = ac->ac_pa;
4818         if (pa) {
4819                 if (pa->pa_type == MB_GROUP_PA) {
4820                         /* see comment in ext4_mb_use_group_pa() */
4821                         spin_lock(&pa->pa_lock);
4822                         pa->pa_pstart += EXT4_C2B(sbi, ac->ac_b_ex.fe_len);
4823                         pa->pa_lstart += EXT4_C2B(sbi, ac->ac_b_ex.fe_len);
4824                         pa->pa_free -= ac->ac_b_ex.fe_len;
4825                         pa->pa_len -= ac->ac_b_ex.fe_len;
4826                         spin_unlock(&pa->pa_lock);
4827 
4828                         /*
4829                          * We want to add the pa to the right bucket.
4830                          * Remove it from the list and while adding
4831                          * make sure the list to which we are adding
4832                          * doesn't grow big.
4833                          */
4834                         if (likely(pa->pa_free)) {
4835                                 spin_lock(pa->pa_obj_lock);
4836                                 list_del_rcu(&pa->pa_inode_list);
4837                                 spin_unlock(pa->pa_obj_lock);
4838                                 ext4_mb_add_n_trim(ac);
4839                         }
4840                 }
4841 
4842                 if (pa->pa_type == MB_INODE_PA) {
4843                         /*
4844                          * treat per-inode prealloc list as a lru list, then try
4845                          * to trim the least recently used PA.
4846                          */
4847                         spin_lock(pa->pa_obj_lock);
4848                         list_move(&pa->pa_inode_list, &ei->i_prealloc_list);
4849                         spin_unlock(pa->pa_obj_lock);
4850                 }
4851 
4852                 ext4_mb_put_pa(ac, ac->ac_sb, pa);
4853         }
4854         if (ac->ac_bitmap_page)
4855                 put_page(ac->ac_bitmap_page);
4856         if (ac->ac_buddy_page)
4857                 put_page(ac->ac_buddy_page);
4858         if (ac->ac_flags & EXT4_MB_HINT_GROUP_ALLOC)
4859                 mutex_unlock(&ac->ac_lg->lg_mutex);
4860         ext4_mb_collect_stats(ac);
4861         ext4_mb_trim_inode_pa(inode);
4862         return 0;
4863 }
4864 
4865 static int ext4_mb_discard_preallocations(struct super_block *sb, int needed)
4866 {
4867         ext4_group_t i, ngroups = ext4_get_groups_count(sb);
4868         int ret;
4869         int freed = 0;
4870 
4871         trace_ext4_mb_discard_preallocations(sb, needed);
4872         for (i = 0; i < ngroups && needed > 0; i++) {
4873                 ret = ext4_mb_discard_group_preallocations(sb, i, needed);
4874                 freed += ret;
4875                 needed -= ret;
4876         }
4877 
4878         return freed;
4879 }
4880 
4881 static bool ext4_mb_discard_preallocations_should_retry(struct super_block *sb,
4882                         struct ext4_allocation_context *ac, u64 *seq)
4883 {
4884         int freed;
4885         u64 seq_retry = 0;
4886         bool ret = false;
4887 
4888         freed = ext4_mb_discard_preallocations(sb, ac->ac_o_ex.fe_len);
4889         if (freed) {
4890                 ret = true;
4891                 goto out_dbg;
4892         }
4893         seq_retry = ext4_get_discard_pa_seq_sum();
4894         if (!(ac->ac_flags & EXT4_MB_STRICT_CHECK) || seq_retry != *seq) {
4895                 ac->ac_flags |= EXT4_MB_STRICT_CHECK;
4896                 *seq = seq_retry;
4897                 ret = true;
4898         }
4899 
4900 out_dbg:
4901         mb_debug(sb, "freed %d, retry ? %s\n", freed, ret ? "yes" : "no");
4902         return ret;
4903 }
4904 
4905 static ext4_fsblk_t ext4_mb_new_blocks_simple(handle_t *handle,
4906                                 struct ext4_allocation_request *ar, int *errp);
4907 
4908 /*
4909  * Main entry point into mballoc to allocate blocks
4910  * it tries to use preallocation first, then falls back
4911  * to usual allocation
4912  */
4913 ext4_fsblk_t ext4_mb_new_blocks(handle_t *handle,
4914                                 struct ext4_allocation_request *ar, int *errp)
4915 {
4916         struct ext4_allocation_context *ac = NULL;
4917         struct ext4_sb_info *sbi;
4918         struct super_block *sb;
4919         ext4_fsblk_t block = 0;
4920         unsigned int inquota = 0;
4921         unsigned int reserv_clstrs = 0;
4922         u64 seq;
4923 
4924         might_sleep();
4925         sb = ar->inode->i_sb;
4926         sbi = EXT4_SB(sb);
4927 
4928         trace_ext4_request_blocks(ar);
4929         if (sbi->s_mount_state & EXT4_FC_REPLAY)
4930                 return ext4_mb_new_blocks_simple(handle, ar, errp);
4931 
4932         /* Allow to use superuser reservation for quota file */
4933         if (ext4_is_quota_file(ar->inode))
4934                 ar->flags |= EXT4_MB_USE_ROOT_BLOCKS;
4935 
4936         if ((ar->flags & EXT4_MB_DELALLOC_RESERVED) == 0) {
4937                 /* Without delayed allocation we need to verify
4938                  * there is enough free blocks to do block allocation
4939                  * and verify allocation doesn't exceed the quota limits.
4940                  */
4941                 while (ar->len &&
4942                         ext4_claim_free_clusters(sbi, ar->len, ar->flags)) {
4943 
4944                         /* let others to free the space */
4945                         cond_resched();
4946                         ar->len = ar->len >> 1;
4947                 }
4948                 if (!ar->len) {
4949                         ext4_mb_show_pa(sb);
4950                         *errp = -ENOSPC;
4951                         return 0;
4952                 }
4953                 reserv_clstrs = ar->len;
4954                 if (ar->flags & EXT4_MB_USE_ROOT_BLOCKS) {
4955                         dquot_alloc_block_nofail(ar->inode,
4956                                                  EXT4_C2B(sbi, ar->len));
4957                 } else {
4958                         while (ar->len &&
4959                                 dquot_alloc_block(ar->inode,
4960                                                   EXT4_C2B(sbi, ar->len))) {
4961 
4962                                 ar->flags |= EXT4_MB_HINT_NOPREALLOC;
4963                                 ar->len--;
4964                         }
4965                 }
4966                 inquota = ar->len;
4967                 if (ar->len == 0) {
4968                         *errp = -EDQUOT;
4969                         goto out;
4970                 }
4971         }
4972 
4973         ac = kmem_cache_zalloc(ext4_ac_cachep, GFP_NOFS);
4974         if (!ac) {
4975                 ar->len = 0;
4976                 *errp = -ENOMEM;
4977                 goto out;
4978         }
4979 
4980         *errp = ext4_mb_initialize_context(ac, ar);
4981         if (*errp) {
4982                 ar->len = 0;
4983                 goto out;
4984         }
4985 
4986         ac->ac_op = EXT4_MB_HISTORY_PREALLOC;
4987         seq = this_cpu_read(discard_pa_seq);
4988         if (!ext4_mb_use_preallocated(ac)) {
4989                 ac->ac_op = EXT4_MB_HISTORY_ALLOC;
4990                 ext4_mb_normalize_request(ac, ar);
4991 
4992                 *errp = ext4_mb_pa_alloc(ac);
4993                 if (*errp)
4994                         goto errout;
4995 repeat:
4996                 /* allocate space in core */
4997                 *errp = ext4_mb_regular_allocator(ac);
4998                 /*
4999                  * pa allocated above is added to grp->bb_prealloc_list only
5000                  * when we were able to allocate some block i.e. when
5001                  * ac->ac_status == AC_STATUS_FOUND.
5002                  * And error from above mean ac->ac_status != AC_STATUS_FOUND
5003                  * So we have to free this pa here itself.
5004                  */
5005                 if (*errp) {
5006                         ext4_mb_pa_free(ac);
5007                         ext4_discard_allocated_blocks(ac);
5008                         goto errout;
5009                 }
5010                 if (ac->ac_status == AC_STATUS_FOUND &&
5011                         ac->ac_o_ex.fe_len >= ac->ac_f_ex.fe_len)
5012                         ext4_mb_pa_free(ac);
5013         }
5014         if (likely(ac->ac_status == AC_STATUS_FOUND)) {
5015                 *errp = ext4_mb_mark_diskspace_used(ac, handle, reserv_clstrs);
5016                 if (*errp) {
5017                         ext4_discard_allocated_blocks(ac);
5018                         goto errout;
5019                 } else {
5020                         block = ext4_grp_offs_to_block(sb, &ac->ac_b_ex);
5021                         ar->len = ac->ac_b_ex.fe_len;
5022                 }
5023         } else {
5024                 if (ext4_mb_discard_preallocations_should_retry(sb, ac, &seq))
5025                         goto repeat;
5026                 /*
5027                  * If block allocation fails then the pa allocated above
5028                  * needs to be freed here itself.
5029                  */
5030                 ext4_mb_pa_free(ac);
5031                 *errp = -ENOSPC;
5032         }
5033 
5034 errout:
5035         if (*errp) {
5036                 ac->ac_b_ex.fe_len = 0;
5037                 ar->len = 0;
5038                 ext4_mb_show_ac(ac);
5039         }
5040         ext4_mb_release_context(ac);
5041 out:
5042         if (ac)
5043                 kmem_cache_free(ext4_ac_cachep, ac);
5044         if (inquota && ar->len < inquota)
5045                 dquot_free_block(ar->inode, EXT4_C2B(sbi, inquota - ar->len));
5046         if (!ar->len) {
5047                 if ((ar->flags & EXT4_MB_DELALLOC_RESERVED) == 0)
5048                         /* release all the reserved blocks if non delalloc */
5049                         percpu_counter_sub(&sbi->s_dirtyclusters_counter,
5050                                                 reserv_clstrs);
5051         }
5052 
5053         trace_ext4_allocate_blocks(ar, (unsigned long long)block);
5054 
5055         return block;
5056 }
5057 
5058 /*
5059  * We can merge two free data extents only if the physical blocks
5060  * are contiguous, AND the extents were freed by the same transaction,
5061  * AND the blocks are associated with the same group.
5062  */
5063 static void ext4_try_merge_freed_extent(struct ext4_sb_info *sbi,
5064                                         struct ext4_free_data *entry,
5065                                         struct ext4_free_data *new_entry,
5066                                         struct rb_root *entry_rb_root)
5067 {
5068         if ((entry->efd_tid != new_entry->efd_tid) ||
5069             (entry->efd_group != new_entry->efd_group))
5070                 return;
5071         if (entry->efd_start_cluster + entry->efd_count ==
5072             new_entry->efd_start_cluster) {
5073                 new_entry->efd_start_cluster = entry->efd_start_cluster;
5074                 new_entry->efd_count += entry->efd_count;
5075         } else if (new_entry->efd_start_cluster + new_entry->efd_count ==
5076                    entry->efd_start_cluster) {
5077                 new_entry->efd_count += entry->efd_count;
5078         } else
5079                 return;
5080         spin_lock(&sbi->s_md_lock);
5081         list_del(&entry->efd_list);
5082         spin_unlock(&sbi->s_md_lock);
5083         rb_erase(&entry->efd_node, entry_rb_root);
5084         kmem_cache_free(ext4_free_data_cachep, entry);
5085 }
5086 
5087 static noinline_for_stack int
5088 ext4_mb_free_metadata(handle_t *handle, struct ext4_buddy *e4b,
5089                       struct ext4_free_data *new_entry)
5090 {
5091         ext4_group_t group = e4b->bd_group;
5092         ext4_grpblk_t cluster;
5093         ext4_grpblk_t clusters = new_entry->efd_count;
5094         struct ext4_free_data *entry;
5095         struct ext4_group_info *db = e4b->bd_info;
5096         struct super_block *sb = e4b->bd_sb;
5097         struct ext4_sb_info *sbi = EXT4_SB(sb);
5098         struct rb_node **n = &db->bb_free_root.rb_node, *node;
5099         struct rb_node *parent = NULL, *new_node;
5100 
5101         BUG_ON(!ext4_handle_valid(handle));
5102         BUG_ON(e4b->bd_bitmap_page == NULL);
5103         BUG_ON(e4b->bd_buddy_page == NULL);
5104 
5105         new_node = &new_entry->efd_node;
5106         cluster = new_entry->efd_start_cluster;
5107 
5108         if (!*n) {
5109                 /* first free block exent. We need to
5110                    protect buddy cache from being freed,
5111                  * otherwise we'll refresh it from
5112                  * on-disk bitmap and lose not-yet-available
5113                  * blocks */
5114                 get_page(e4b->bd_buddy_page);
5115                 get_page(e4b->bd_bitmap_page);
5116         }
5117         while (*n) {
5118                 parent = *n;
5119                 entry = rb_entry(parent, struct ext4_free_data, efd_node);
5120                 if (cluster < entry->efd_start_cluster)
5121                         n = &(*n)->rb_left;
5122                 else if (cluster >= (entry->efd_start_cluster + entry->efd_count))
5123                         n = &(*n)->rb_right;
5124                 else {
5125                         ext4_grp_locked_error(sb, group, 0,
5126                                 ext4_group_first_block_no(sb, group) +
5127                                 EXT4_C2B(sbi, cluster),
5128                                 "Block already on to-be-freed list");
5129                         return 0;
5130                 }
5131         }
5132 
5133         rb_link_node(new_node, parent, n);
5134         rb_insert_color(new_node, &db->bb_free_root);
5135 
5136         /* Now try to see the extent can be merged to left and right */
5137         node = rb_prev(new_node);
5138         if (node) {
5139                 entry = rb_entry(node, struct ext4_free_data, efd_node);
5140                 ext4_try_merge_freed_extent(sbi, entry, new_entry,
5141                                             &(db->bb_free_root));
5142         }
5143 
5144         node = rb_next(new_node);
5145         if (node) {
5146                 entry = rb_entry(node, struct ext4_free_data, efd_node);
5147                 ext4_try_merge_freed_extent(sbi, entry, new_entry,
5148                                             &(db->bb_free_root));
5149         }
5150 
5151         spin_lock(&sbi->s_md_lock);
5152         list_add_tail(&new_entry->efd_list, &sbi->s_freed_data_list);
5153         sbi->s_mb_free_pending += clusters;
5154         spin_unlock(&sbi->s_md_lock);
5155         return 0;
5156 }
5157 
5158 /*
5159  * Simple allocator for Ext4 fast commit replay path. It searches for blocks
5160  * linearly starting at the goal block and also excludes the blocks which
5161  * are going to be in use after fast commit replay.
5162  */
5163 static ext4_fsblk_t ext4_mb_new_blocks_simple(handle_t *handle,
5164                                 struct ext4_allocation_request *ar, int *errp)
5165 {
5166         struct buffer_head *bitmap_bh;
5167         struct super_block *sb = ar->inode->i_sb;
5168         ext4_group_t group;
5169         ext4_grpblk_t blkoff;
5170         int i = sb->s_blocksize;
5171         ext4_fsblk_t goal, block;
5172         struct ext4_super_block *es = EXT4_SB(sb)->s_es;
5173 
5174         goal = ar->goal;
5175         if (goal < le32_to_cpu(es->s_first_data_block) ||
5176                         goal >= ext4_blocks_count(es))
5177                 goal = le32_to_cpu(es->s_first_data_block);
5178 
5179         ar->len = 0;
5180         ext4_get_group_no_and_offset(sb, goal, &group, &blkoff);
5181         for (; group < ext4_get_groups_count(sb); group++) {
5182                 bitmap_bh = ext4_read_block_bitmap(sb, group);
5183                 if (IS_ERR(bitmap_bh)) {
5184                         *errp = PTR_ERR(bitmap_bh);
5185                         pr_warn("Failed to read block bitmap\n");
5186                         return 0;
5187                 }
5188 
5189                 ext4_get_group_no_and_offset(sb,
5190                         max(ext4_group_first_block_no(sb, group), goal),
5191                         NULL, &blkoff);
5192                 i = mb_find_next_zero_bit(bitmap_bh->b_data, sb->s_blocksize,
5193                                                 blkoff);
5194                 brelse(bitmap_bh);
5195                 if (i >= sb->s_blocksize)
5196                         continue;
5197                 if (ext4_fc_replay_check_excluded(sb,
5198                         ext4_group_first_block_no(sb, group) + i))
5199                         continue;
5200                 break;
5201         }
5202 
5203         if (group >= ext4_get_groups_count(sb) && i >= sb->s_blocksize)
5204                 return 0;
5205 
5206         block = ext4_group_first_block_no(sb, group) + i;
5207         ext4_mb_mark_bb(sb, block, 1, 1);
5208         ar->len = 1;
5209 
5210         return block;
5211 }
5212 
5213 static void ext4_free_blocks_simple(struct inode *inode, ext4_fsblk_t block,
5214                                         unsigned long count)
5215 {
5216         struct buffer_head *bitmap_bh;
5217         struct super_block *sb = inode->i_sb;
5218         struct ext4_group_desc *gdp;
5219         struct buffer_head *gdp_bh;
5220         ext4_group_t group;
5221         ext4_grpblk_t blkoff;
5222         int already_freed = 0, err, i;
5223 
5224         ext4_get_group_no_and_offset(sb, block, &group, &blkoff);
5225         bitmap_bh = ext4_read_block_bitmap(sb, group);
5226         if (IS_ERR(bitmap_bh)) {
5227                 err = PTR_ERR(bitmap_bh);
5228                 pr_warn("Failed to read block bitmap\n");
5229                 return;
5230         }
5231         gdp = ext4_get_group_desc(sb, group, &gdp_bh);
5232         if (!gdp)
5233                 return;
5234 
5235         for (i = 0; i < count; i++) {
5236                 if (!mb_test_bit(blkoff + i, bitmap_bh->b_data))
5237                         already_freed++;
5238         }
5239         mb_clear_bits(bitmap_bh->b_data, blkoff, count);
5240         err = ext4_handle_dirty_metadata(NULL, NULL, bitmap_bh);
5241         if (err)
5242                 return;
5243         ext4_free_group_clusters_set(
5244                 sb, gdp, ext4_free_group_clusters(sb, gdp) +
5245                 count - already_freed);
5246         ext4_block_bitmap_csum_set(sb, group, gdp, bitmap_bh);
5247         ext4_group_desc_csum_set(sb, group, gdp);
5248         ext4_handle_dirty_metadata(NULL, NULL, gdp_bh);
5249         sync_dirty_buffer(bitmap_bh);
5250         sync_dirty_buffer(gdp_bh);
5251         brelse(bitmap_bh);
5252 }
5253 
5254 /**
5255  * ext4_free_blocks() -- Free given blocks and update quota
5256  * @handle:             handle for this transaction
5257  * @inode:              inode
5258  * @bh:                 optional buffer of the block to be freed
5259  * @block:              starting physical block to be freed
5260  * @count:              number of blocks to be freed
5261  * @flags:              flags used by ext4_free_blocks
5262  */
5263 void ext4_free_blocks(handle_t *handle, struct inode *inode,
5264                       struct buffer_head *bh, ext4_fsblk_t block,
5265                       unsigned long count, int flags)
5266 {
5267         struct buffer_head *bitmap_bh = NULL;
5268         struct super_block *sb = inode->i_sb;
5269         struct ext4_group_desc *gdp;
5270         unsigned int overflow;
5271         ext4_grpblk_t bit;
5272         struct buffer_head *gd_bh;
5273         ext4_group_t block_group;
5274         struct ext4_sb_info *sbi;
5275         struct ext4_buddy e4b;
5276         unsigned int count_clusters;
5277         int err = 0;
5278         int ret;
5279 
5280         sbi = EXT4_SB(sb);
5281 
5282         if (sbi->s_mount_state & EXT4_FC_REPLAY) {
5283                 ext4_free_blocks_simple(inode, block, count);
5284                 return;
5285         }
5286 
5287         might_sleep();
5288         if (bh) {
5289                 if (block)
5290                         BUG_ON(block != bh->b_blocknr);
5291                 else
5292                         block = bh->b_blocknr;
5293         }
5294 
5295         if (!(flags & EXT4_FREE_BLOCKS_VALIDATED) &&
5296             !ext4_inode_block_valid(inode, block, count)) {
5297                 ext4_error(sb, "Freeing blocks not in datazone - "
5298                            "block = %llu, count = %lu", block, count);
5299                 goto error_return;
5300         }
5301 
5302         ext4_debug("freeing block %llu\n", block);
5303         trace_ext4_free_blocks(inode, block, count, flags);
5304 
5305         if (bh && (flags & EXT4_FREE_BLOCKS_FORGET)) {
5306                 BUG_ON(count > 1);
5307 
5308                 ext4_forget(handle, flags & EXT4_FREE_BLOCKS_METADATA,
5309                             inode, bh, block);
5310         }
5311 
5312         /*
5313          * If the extent to be freed does not begin on a cluster
5314          * boundary, we need to deal with partial clusters at the
5315          * beginning and end of the extent.  Normally we will free
5316          * blocks at the beginning or the end unless we are explicitly
5317          * requested to avoid doing so.
5318          */
5319         overflow = EXT4_PBLK_COFF(sbi, block);
5320         if (overflow) {
5321                 if (flags & EXT4_FREE_BLOCKS_NOFREE_FIRST_CLUSTER) {
5322                         overflow = sbi->s_cluster_ratio - overflow;
5323                         block += overflow;
5324                         if (count > overflow)
5325                                 count -= overflow;
5326                         else
5327                                 return;
5328                 } else {
5329                         block -= overflow;
5330                         count += overflow;
5331                 }
5332         }
5333         overflow = EXT4_LBLK_COFF(sbi, count);
5334         if (overflow) {
5335                 if (flags & EXT4_FREE_BLOCKS_NOFREE_LAST_CLUSTER) {
5336                         if (count > overflow)
5337                                 count -= overflow;
5338                         else
5339                                 return;
5340                 } else
5341                         count += sbi->s_cluster_ratio - overflow;
5342         }
5343 
5344         if (!bh && (flags & EXT4_FREE_BLOCKS_FORGET)) {
5345                 int i;
5346                 int is_metadata = flags & EXT4_FREE_BLOCKS_METADATA;
5347 
5348                 for (i = 0; i < count; i++) {
5349                         cond_resched();
5350                         if (is_metadata)
5351                                 bh = sb_find_get_block(inode->i_sb, block + i);
5352                         ext4_forget(handle, is_metadata, inode, bh, block + i);
5353                 }
5354         }
5355 
5356 do_more:
5357         overflow = 0;
5358         ext4_get_group_no_and_offset(sb, block, &block_group, &bit);
5359 
5360         if (unlikely(EXT4_MB_GRP_BBITMAP_CORRUPT(
5361                         ext4_get_group_info(sb, block_group))))
5362                 return;
5363 
5364         /*
5365          * Check to see if we are freeing blocks across a group
5366          * boundary.
5367          */
5368         if (EXT4_C2B(sbi, bit) + count > EXT4_BLOCKS_PER_GROUP(sb)) {
5369                 overflow = EXT4_C2B(sbi, bit) + count -
5370                         EXT4_BLOCKS_PER_GROUP(sb);
5371                 count -= overflow;
5372         }
5373         count_clusters = EXT4_NUM_B2C(sbi, count);
5374         bitmap_bh = ext4_read_block_bitmap(sb, block_group);
5375         if (IS_ERR(bitmap_bh)) {
5376                 err = PTR_ERR(bitmap_bh);
5377                 bitmap_bh = NULL;
5378                 goto error_return;
5379         }
5380         gdp = ext4_get_group_desc(sb, block_group, &gd_bh);
5381         if (!gdp) {
5382                 err = -EIO;
5383                 goto error_return;
5384         }
5385 
5386         if (in_range(ext4_block_bitmap(sb, gdp), block, count) ||
5387             in_range(ext4_inode_bitmap(sb, gdp), block, count) ||
5388             in_range(block, ext4_inode_table(sb, gdp),
5389                      sbi->s_itb_per_group) ||
5390             in_range(block + count - 1, ext4_inode_table(sb, gdp),
5391                      sbi->s_itb_per_group)) {
5392 
5393                 ext4_error(sb, "Freeing blocks in system zone - "
5394                            "Block = %llu, count = %lu", block, count);
5395                 /* err = 0. ext4_std_error should be a no op */
5396                 goto error_return;
5397         }
5398 
5399         BUFFER_TRACE(bitmap_bh, "getting write access");
5400         err = ext4_journal_get_write_access(handle, bitmap_bh);
5401         if (err)
5402                 goto error_return;
5403 
5404         /*
5405          * We are about to modify some metadata.  Call the journal APIs
5406          * to unshare ->b_data if a currently-committing transaction is
5407          * using it
5408          */
5409         BUFFER_TRACE(gd_bh, "get_write_access");
5410         err = ext4_journal_get_write_access(handle, gd_bh);
5411         if (err)
5412                 goto error_return;
5413 #ifdef AGGRESSIVE_CHECK
5414         {
5415                 int i;
5416                 for (i = 0; i < count_clusters; i++)
5417                         BUG_ON(!mb_test_bit(bit + i, bitmap_bh->b_data));
5418         }
5419 #endif
5420         trace_ext4_mballoc_free(sb, inode, block_group, bit, count_clusters);
5421 
5422         /* __GFP_NOFAIL: retry infinitely, ignore TIF_MEMDIE and memcg limit. */
5423         err = ext4_mb_load_buddy_gfp(sb, block_group, &e4b,
5424                                      GFP_NOFS|__GFP_NOFAIL);
5425         if (err)
5426                 goto error_return;
5427 
5428         /*
5429          * We need to make sure we don't reuse the freed block until after the
5430          * transaction is committed. We make an exception if the inode is to be
5431          * written in writeback mode since writeback mode has weak data
5432          * consistency guarantees.
5433          */
5434         if (ext4_handle_valid(handle) &&
5435             ((flags & EXT4_FREE_BLOCKS_METADATA) ||
5436              !ext4_should_writeback_data(inode))) {
5437                 struct ext4_free_data *new_entry;
5438                 /*
5439                  * We use __GFP_NOFAIL because ext4_free_blocks() is not allowed
5440                  * to fail.
5441                  */
5442                 new_entry = kmem_cache_alloc(ext4_free_data_cachep,
5443                                 GFP_NOFS|__GFP_NOFAIL);
5444                 new_entry->efd_start_cluster = bit;
5445                 new_entry->efd_group = block_group;
5446                 new_entry->efd_count = count_clusters;
5447                 new_entry->efd_tid = handle->h_transaction->t_tid;
5448 
5449                 ext4_lock_group(sb, block_group);
5450                 mb_clear_bits(bitmap_bh->b_data, bit, count_clusters);
5451                 ext4_mb_free_metadata(handle, &e4b, new_entry);
5452         } else {
5453                 /* need to update group_info->bb_free and bitmap
5454                  * with group lock held. generate_buddy look at
5455                  * them with group lock_held
5456                  */
5457                 if (test_opt(sb, DISCARD)) {
5458                         err = ext4_issue_discard(sb, block_group, bit, count,
5459                                                  NULL);
5460                         if (err && err != -EOPNOTSUPP)
5461                                 ext4_msg(sb, KERN_WARNING, "discard request in"
5462                                          " group:%d block:%d count:%lu failed"
5463                                          " with %d", block_group, bit, count,
5464                                          err);
5465                 } else
5466                         EXT4_MB_GRP_CLEAR_TRIMMED(e4b.bd_info);
5467 
5468                 ext4_lock_group(sb, block_group);
5469                 mb_clear_bits(bitmap_bh->b_data, bit, count_clusters);
5470                 mb_free_blocks(inode, &e4b, bit, count_clusters);
5471         }
5472 
5473         ret = ext4_free_group_clusters(sb, gdp) + count_clusters;
5474         ext4_free_group_clusters_set(sb, gdp, ret);
5475         ext4_block_bitmap_csum_set(sb, block_group, gdp, bitmap_bh);
5476         ext4_group_desc_csum_set(sb, block_group, gdp);
5477         ext4_unlock_group(sb, block_group);
5478 
5479         if (sbi->s_log_groups_per_flex) {
5480                 ext4_group_t flex_group = ext4_flex_group(sbi, block_group);
5481                 atomic64_add(count_clusters,
5482                              &sbi_array_rcu_deref(sbi, s_flex_groups,
5483                                                   flex_group)->free_clusters);
5484         }
5485 
5486         /*
5487          * on a bigalloc file system, defer the s_freeclusters_counter
5488          * update to the caller (ext4_remove_space and friends) so they
5489          * can determine if a cluster freed here should be rereserved
5490          */
5491         if (!(flags & EXT4_FREE_BLOCKS_RERESERVE_CLUSTER)) {
5492                 if (!(flags & EXT4_FREE_BLOCKS_NO_QUOT_UPDATE))
5493                         dquot_free_block(inode, EXT4_C2B(sbi, count_clusters));
5494                 percpu_counter_add(&sbi->s_freeclusters_counter,
5495                                    count_clusters);
5496         }
5497 
5498         ext4_mb_unload_buddy(&e4b);
5499 
5500         /* We dirtied the bitmap block */
5501         BUFFER_TRACE(bitmap_bh, "dirtied bitmap block");
5502         err = ext4_handle_dirty_metadata(handle, NULL, bitmap_bh);
5503 
5504         /* And the group descriptor block */
5505         BUFFER_TRACE(gd_bh, "dirtied group descriptor block");
5506         ret = ext4_handle_dirty_metadata(handle, NULL, gd_bh);
5507         if (!err)
5508                 err = ret;
5509 
5510         if (overflow && !err) {
5511                 block += count;
5512                 count = overflow;
5513                 put_bh(bitmap_bh);
5514                 goto do_more;
5515         }
5516 error_return:
5517         brelse(bitmap_bh);
5518         ext4_std_error(sb, err);
5519         return;
5520 }
5521 
5522 /**
5523  * ext4_group_add_blocks() -- Add given blocks to an existing group
5524  * @handle:                     handle to this transaction
5525  * @sb:                         super block
5526  * @block:                      start physical block to add to the block group
5527  * @count:                      number of blocks to free
5528  *
5529  * This marks the blocks as free in the bitmap and buddy.
5530  */
5531 int ext4_group_add_blocks(handle_t *handle, struct super_block *sb,
5532                          ext4_fsblk_t block, unsigned long count)
5533 {
5534         struct buffer_head *bitmap_bh = NULL;
5535         struct buffer_head *gd_bh;
5536         ext4_group_t block_group;
5537         ext4_grpblk_t bit;
5538         unsigned int i;
5539         struct ext4_group_desc *desc;
5540         struct ext4_sb_info *sbi = EXT4_SB(sb);
5541         struct ext4_buddy e4b;
5542         int err = 0, ret, free_clusters_count;
5543         ext4_grpblk_t clusters_freed;
5544         ext4_fsblk_t first_cluster = EXT4_B2C(sbi, block);
5545         ext4_fsblk_t last_cluster = EXT4_B2C(sbi, block + count - 1);
5546         unsigned long cluster_count = last_cluster - first_cluster + 1;
5547 
5548         ext4_debug("Adding block(s) %llu-%llu\n", block, block + count - 1);
5549 
5550         if (count == 0)
5551                 return 0;
5552 
5553         ext4_get_group_no_and_offset(sb, block, &block_group, &bit);
5554         /*
5555          * Check to see if we are freeing blocks across a group
5556          * boundary.
5557          */
5558         if (bit + cluster_count > EXT4_CLUSTERS_PER_GROUP(sb)) {
5559                 ext4_warning(sb, "too many blocks added to group %u",
5560                              block_group);
5561                 err = -EINVAL;
5562                 goto error_return;
5563         }
5564 
5565         bitmap_bh = ext4_read_block_bitmap(sb, block_group);
5566         if (IS_ERR(bitmap_bh)) {
5567                 err = PTR_ERR(bitmap_bh);
5568                 bitmap_bh = NULL;
5569                 goto error_return;
5570         }
5571 
5572         desc = ext4_get_group_desc(sb, block_group, &gd_bh);
5573         if (!desc) {
5574                 err = -EIO;
5575                 goto error_return;
5576         }
5577 
5578         if (in_range(ext4_block_bitmap(sb, desc), block, count) ||
5579             in_range(ext4_inode_bitmap(sb, desc), block, count) ||
5580             in_range(block, ext4_inode_table(sb, desc), sbi->s_itb_per_group) ||
5581             in_range(block + count - 1, ext4_inode_table(sb, desc),
5582                      sbi->s_itb_per_group)) {
5583                 ext4_error(sb, "Adding blocks in system zones - "
5584                            "Block = %llu, count = %lu",
5585                            block, count);
5586                 err = -EINVAL;
5587                 goto error_return;
5588         }
5589 
5590         BUFFER_TRACE(bitmap_bh, "getting write access");
5591         err = ext4_journal_get_write_access(handle, bitmap_bh);
5592         if (err)
5593                 goto error_return;
5594 
5595         /*
5596          * We are about to modify some metadata.  Call the journal APIs
5597          * to unshare ->b_data if a currently-committing transaction is
5598          * using it
5599          */
5600         BUFFER_TRACE(gd_bh, "get_write_access");
5601         err = ext4_journal_get_write_access(handle, gd_bh);
5602         if (err)
5603                 goto error_return;
5604 
5605         for (i = 0, clusters_freed = 0; i < cluster_count; i++) {
5606                 BUFFER_TRACE(bitmap_bh, "clear bit");
5607                 if (!mb_test_bit(bit + i, bitmap_bh->b_data)) {
5608                         ext4_error(sb, "bit already cleared for block %llu",
5609                                    (ext4_fsblk_t)(block + i));
5610                         BUFFER_TRACE(bitmap_bh, "bit already cleared");
5611                 } else {
5612                         clusters_freed++;
5613                 }
5614         }
5615 
5616         err = ext4_mb_load_buddy(sb, block_group, &e4b);
5617         if (err)
5618                 goto error_return;
5619 
5620         /*
5621          * need to update group_info->bb_free and bitmap
5622          * with group lock held. generate_buddy look at
5623          * them with group lock_held
5624          */
5625         ext4_lock_group(sb, block_group);
5626         mb_clear_bits(bitmap_bh->b_data, bit, cluster_count);
5627         mb_free_blocks(NULL, &e4b, bit, cluster_count);
5628         free_clusters_count = clusters_freed +
5629                 ext4_free_group_clusters(sb, desc);
5630         ext4_free_group_clusters_set(sb, desc, free_clusters_count);
5631         ext4_block_bitmap_csum_set(sb, block_group, desc, bitmap_bh);
5632         ext4_group_desc_csum_set(sb, block_group, desc);
5633         ext4_unlock_group(sb, block_group);
5634         percpu_counter_add(&sbi->s_freeclusters_counter,
5635                            clusters_freed);
5636 
5637         if (sbi->s_log_groups_per_flex) {
5638                 ext4_group_t flex_group = ext4_flex_group(sbi, block_group);
5639                 atomic64_add(clusters_freed,
5640                              &sbi_array_rcu_deref(sbi, s_flex_groups,
5641                                                   flex_group)->free_clusters);
5642         }
5643 
5644         ext4_mb_unload_buddy(&e4b);
5645 
5646         /* We dirtied the bitmap block */
5647         BUFFER_TRACE(bitmap_bh, "dirtied bitmap block");
5648         err = ext4_handle_dirty_metadata(handle, NULL, bitmap_bh);
5649 
5650         /* And the group descriptor block */
5651         BUFFER_TRACE(gd_bh, "dirtied group descriptor block");
5652         ret = ext4_handle_dirty_metadata(handle, NULL, gd_bh);
5653         if (!err)
5654                 err = ret;
5655 
5656 error_return:
5657         brelse(bitmap_bh);
5658         ext4_std_error(sb, err);
5659         return err;
5660 }
5661 
5662 /**
5663  * ext4_trim_extent -- function to TRIM one single free extent in the group
5664  * @sb:         super block for the file system
5665  * @start:      starting block of the free extent in the alloc. group
5666  * @count:      number of blocks to TRIM
5667  * @group:      alloc. group we are working with
5668  * @e4b:        ext4 buddy for the group
5669  *
5670  * Trim "count" blocks starting at "start" in the "group". To assure that no
5671  * one will allocate those blocks, mark it as used in buddy bitmap. This must
5672  * be called with under the group lock.
5673  */
5674 static int ext4_trim_extent(struct super_block *sb, int start, int count,
5675                              ext4_group_t group, struct ext4_buddy *e4b)
5676 __releases(bitlock)
5677 __acquires(bitlock)
5678 {
5679         struct ext4_free_extent ex;
5680         int ret = 0;
5681 
5682         trace_ext4_trim_extent(sb, group, start, count);
5683 
5684         assert_spin_locked(ext4_group_lock_ptr(sb, group));
5685 
5686         ex.fe_start = start;
5687         ex.fe_group = group;
5688         ex.fe_len = count;
5689 
5690         /*
5691          * Mark blocks used, so no one can reuse them while
5692          * being trimmed.
5693          */
5694         mb_mark_used(e4b, &ex);
5695         ext4_unlock_group(sb, group);
5696         ret = ext4_issue_discard(sb, group, start, count, NULL);
5697         ext4_lock_group(sb, group);
5698         mb_free_blocks(NULL, e4b, start, ex.fe_len);
5699         return ret;
5700 }
5701 
5702 /**
5703  * ext4_trim_all_free -- function to trim all free space in alloc. group
5704  * @sb:                 super block for file system
5705  * @group:              group to be trimmed
5706  * @start:              first group block to examine
5707  * @max:                last group block to examine
5708  * @minblocks:          minimum extent block count
5709  *
5710  * ext4_trim_all_free walks through group's buddy bitmap searching for free
5711  * extents. When the free block is found, ext4_trim_extent is called to TRIM
5712  * the extent.
5713  *
5714  *
5715  * ext4_trim_all_free walks through group's block bitmap searching for free
5716  * extents. When the free extent is found, mark it as used in group buddy
5717  * bitmap. Then issue a TRIM command on this extent and free the extent in
5718  * the group buddy bitmap. This is done until whole group is scanned.
5719  */
5720 static ext4_grpblk_t
5721 ext4_trim_all_free(struct super_block *sb, ext4_group_t group,
5722                    ext4_grpblk_t start, ext4_grpblk_t max,
5723                    ext4_grpblk_t minblocks)
5724 {
5725         void *bitmap;
5726         ext4_grpblk_t next, count = 0, free_count = 0;
5727         struct ext4_buddy e4b;
5728         int ret = 0;
5729 
5730         trace_ext4_trim_all_free(sb, group, start, max);
5731 
5732         ret = ext4_mb_load_buddy(sb, group, &e4b);
5733         if (ret) {
5734                 ext4_warning(sb, "Error %d loading buddy information for %u",
5735                              ret, group);
5736                 return ret;
5737         }
5738         bitmap = e4b.bd_bitmap;
5739 
5740         ext4_lock_group(sb, group);
5741         if (EXT4_MB_GRP_WAS_TRIMMED(e4b.bd_info) &&
5742             minblocks >= atomic_read(&EXT4_SB(sb)->s_last_trim_minblks))
5743                 goto out;
5744 
5745         start = (e4b.bd_info->bb_first_free > start) ?
5746                 e4b.bd_info->bb_first_free : start;
5747 
5748         while (start <= max) {
5749                 start = mb_find_next_zero_bit(bitmap, max + 1, start);
5750                 if (start > max)
5751                         break;
5752                 next = mb_find_next_bit(bitmap, max + 1, start);
5753 
5754                 if ((next - start) >= minblocks) {
5755                         ret = ext4_trim_extent(sb, start,
5756                                                next - start, group, &e4b);
5757                         if (ret && ret != -EOPNOTSUPP)
5758                                 break;
5759                         ret = 0;
5760                         count += next - start;
5761                 }
5762                 free_count += next - start;
5763                 start = next + 1;
5764 
5765                 if (fatal_signal_pending(current)) {
5766                         count = -ERESTARTSYS;
5767                         break;
5768                 }
5769 
5770                 if (need_resched()) {
5771                         ext4_unlock_group(sb, group);
5772                         cond_resched();
5773                         ext4_lock_group(sb, group);
5774                 }
5775 
5776                 if ((e4b.bd_info->bb_free - free_count) < minblocks)
5777                         break;
5778         }
5779 
5780         if (!ret) {
5781                 ret = count;
5782                 EXT4_MB_GRP_SET_TRIMMED(e4b.bd_info);
5783         }
5784 out:
5785         ext4_unlock_group(sb, group);
5786         ext4_mb_unload_buddy(&e4b);
5787 
5788         ext4_debug("trimmed %d blocks in the group %d\n",
5789                 count, group);
5790 
5791         return ret;
5792 }
5793 
5794 /**
5795  * ext4_trim_fs() -- trim ioctl handle function
5796  * @sb:                 superblock for filesystem
5797  * @range:              fstrim_range structure
5798  *
5799  * start:       First Byte to trim
5800  * len:         number of Bytes to trim from start
5801  * minlen:      minimum extent length in Bytes
5802  * ext4_trim_fs goes through all allocation groups containing Bytes from
5803  * start to start+len. For each such a group ext4_trim_all_free function
5804  * is invoked to trim all free space.
5805  */
5806 int ext4_trim_fs(struct super_block *sb, struct fstrim_range *range)
5807 {
5808         struct ext4_group_info *grp;
5809         ext4_group_t group, first_group, last_group;
5810         ext4_grpblk_t cnt = 0, first_cluster, last_cluster;
5811         uint64_t start, end, minlen, trimmed = 0;
5812         ext4_fsblk_t first_data_blk =
5813                         le32_to_cpu(EXT4_SB(sb)->s_es->s_first_data_block);
5814         ext4_fsblk_t max_blks = ext4_blocks_count(EXT4_SB(sb)->s_es);
5815         int ret = 0;
5816 
5817         start = range->start >> sb->s_blocksize_bits;
5818         end = start + (range->len >> sb->s_blocksize_bits) - 1;
5819         minlen = EXT4_NUM_B2C(EXT4_SB(sb),
5820                               range->minlen >> sb->s_blocksize_bits);
5821 
5822         if (minlen > EXT4_CLUSTERS_PER_GROUP(sb) ||
5823             start >= max_blks ||
5824             range->len < sb->s_blocksize)
5825                 return -EINVAL;
5826         if (end >= max_blks)
5827                 end = max_blks - 1;
5828         if (end <= first_data_blk)
5829                 goto out;
5830         if (start < first_data_blk)
5831                 start = first_data_blk;
5832 
5833         /* Determine first and last group to examine based on start and end */
5834         ext4_get_group_no_and_offset(sb, (ext4_fsblk_t) start,
5835                                      &first_group, &first_cluster);
5836         ext4_get_group_no_and_offset(sb, (ext4_fsblk_t) end,
5837                                      &last_group, &last_cluster);
5838 
5839         /* end now represents the last cluster to discard in this group */
5840         end = EXT4_CLUSTERS_PER_GROUP(sb) - 1;
5841 
5842         for (group = first_group; group <= last_group; group++) {
5843                 grp = ext4_get_group_info(sb, group);
5844                 /* We only do this if the grp has never been initialized */
5845                 if (unlikely(EXT4_MB_GRP_NEED_INIT(grp))) {
5846                         ret = ext4_mb_init_group(sb, group, GFP_NOFS);
5847                         if (ret)
5848                                 break;
5849                 }
5850 
5851                 /*
5852                  * For all the groups except the last one, last cluster will
5853                  * always be EXT4_CLUSTERS_PER_GROUP(sb)-1, so we only need to
5854                  * change it for the last group, note that last_cluster is
5855                  * already computed earlier by ext4_get_group_no_and_offset()
5856                  */
5857                 if (group == last_group)
5858                         end = last_cluster;
5859 
5860                 if (grp->bb_free >= minlen) {
5861                         cnt = ext4_trim_all_free(sb, group, first_cluster,
5862                                                 end, minlen);
5863                         if (cnt < 0) {
5864                                 ret = cnt;
5865                                 break;
5866                         }
5867                         trimmed += cnt;
5868                 }
5869 
5870                 /*
5871                  * For every group except the first one, we are sure
5872                  * that the first cluster to discard will be cluster #0.
5873                  */
5874                 first_cluster = 0;
5875         }
5876 
5877         if (!ret)
5878                 atomic_set(&EXT4_SB(sb)->s_last_trim_minblks, minlen);
5879 
5880 out:
5881         range->len = EXT4_C2B(EXT4_SB(sb), trimmed) << sb->s_blocksize_bits;
5882         return ret;
5883 }
5884 
5885 /* Iterate all the free extents in the group. */
5886 int
5887 ext4_mballoc_query_range(
5888         struct super_block              *sb,
5889         ext4_group_t                    group,
5890         ext4_grpblk_t                   start,
5891         ext4_grpblk_t                   end,
5892         ext4_mballoc_query_range_fn     formatter,
5893         void                            *priv)
5894 {
5895         void                            *bitmap;
5896         ext4_grpblk_t                   next;
5897         struct ext4_buddy               e4b;
5898         int                             error;
5899 
5900         error = ext4_mb_load_buddy(sb, group, &e4b);
5901         if (error)
5902                 return error;
5903         bitmap = e4b.bd_bitmap;
5904 
5905         ext4_lock_group(sb, group);
5906 
5907         start = (e4b.bd_info->bb_first_free > start) ?
5908                 e4b.bd_info->bb_first_free : start;
5909         if (end >= EXT4_CLUSTERS_PER_GROUP(sb))
5910                 end = EXT4_CLUSTERS_PER_GROUP(sb) - 1;
5911 
5912         while (start <= end) {
5913                 start = mb_find_next_zero_bit(bitmap, end + 1, start);
5914                 if (start > end)
5915                         break;
5916                 next = mb_find_next_bit(bitmap, end + 1, start);
5917 
5918                 ext4_unlock_group(sb, group);
5919                 error = formatter(sb, group, start, next - start, priv);
5920                 if (error)
5921                         goto out_unload;
5922                 ext4_lock_group(sb, group);
5923 
5924                 start = next + 1;
5925         }
5926 
5927         ext4_unlock_group(sb, group);
5928 out_unload:
5929         ext4_mb_unload_buddy(&e4b);
5930 
5931         return error;
5932 }
5933 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | Wiki (Japanese) | Wiki (English) | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

osdn.jp