~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/fs/xfs/scrub/refcount.c

Version: ~ [ linux-5.9 ] ~ [ linux-5.8.14 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.70 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.150 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.200 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.238 ] ~ [ linux-4.8.17 ] ~ [ linux-4.7.10 ] ~ [ linux-4.6.7 ] ~ [ linux-4.5.7 ] ~ [ linux-4.4.238 ] ~ [ linux-4.3.6 ] ~ [ linux-4.2.8 ] ~ [ linux-4.1.52 ] ~ [ linux-4.0.9 ] ~ [ linux-3.19.8 ] ~ [ linux-3.18.140 ] ~ [ linux-3.17.8 ] ~ [ linux-3.16.85 ] ~ [ linux-3.15.10 ] ~ [ linux-3.14.79 ] ~ [ linux-3.13.11 ] ~ [ linux-3.12.74 ] ~ [ linux-3.11.10 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.5 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 // SPDX-License-Identifier: GPL-2.0+
  2 /*
  3  * Copyright (C) 2017 Oracle.  All Rights Reserved.
  4  * Author: Darrick J. Wong <darrick.wong@oracle.com>
  5  */
  6 #include "xfs.h"
  7 #include "xfs_fs.h"
  8 #include "xfs_shared.h"
  9 #include "xfs_format.h"
 10 #include "xfs_btree.h"
 11 #include "xfs_rmap.h"
 12 #include "xfs_refcount.h"
 13 #include "scrub/scrub.h"
 14 #include "scrub/common.h"
 15 #include "scrub/btree.h"
 16 
 17 /*
 18  * Set us up to scrub reference count btrees.
 19  */
 20 int
 21 xchk_setup_ag_refcountbt(
 22         struct xfs_scrub        *sc,
 23         struct xfs_inode        *ip)
 24 {
 25         return xchk_setup_ag_btree(sc, ip, false);
 26 }
 27 
 28 /* Reference count btree scrubber. */
 29 
 30 /*
 31  * Confirming Reference Counts via Reverse Mappings
 32  *
 33  * We want to count the reverse mappings overlapping a refcount record
 34  * (bno, len, refcount), allowing for the possibility that some of the
 35  * overlap may come from smaller adjoining reverse mappings, while some
 36  * comes from single extents which overlap the range entirely.  The
 37  * outer loop is as follows:
 38  *
 39  * 1. For all reverse mappings overlapping the refcount extent,
 40  *    a. If a given rmap completely overlaps, mark it as seen.
 41  *    b. Otherwise, record the fragment (in agbno order) for later
 42  *       processing.
 43  *
 44  * Once we've seen all the rmaps, we know that for all blocks in the
 45  * refcount record we want to find $refcount owners and we've already
 46  * visited $seen extents that overlap all the blocks.  Therefore, we
 47  * need to find ($refcount - $seen) owners for every block in the
 48  * extent; call that quantity $target_nr.  Proceed as follows:
 49  *
 50  * 2. Pull the first $target_nr fragments from the list; all of them
 51  *    should start at or before the start of the extent.
 52  *    Call this subset of fragments the working set.
 53  * 3. Until there are no more unprocessed fragments,
 54  *    a. Find the shortest fragments in the set and remove them.
 55  *    b. Note the block number of the end of these fragments.
 56  *    c. Pull the same number of fragments from the list.  All of these
 57  *       fragments should start at the block number recorded in the
 58  *       previous step.
 59  *    d. Put those fragments in the set.
 60  * 4. Check that there are $target_nr fragments remaining in the list,
 61  *    and that they all end at or beyond the end of the refcount extent.
 62  *
 63  * If the refcount is correct, all the check conditions in the algorithm
 64  * should always hold true.  If not, the refcount is incorrect.
 65  */
 66 struct xchk_refcnt_frag {
 67         struct list_head        list;
 68         struct xfs_rmap_irec    rm;
 69 };
 70 
 71 struct xchk_refcnt_check {
 72         struct xfs_scrub        *sc;
 73         struct list_head        fragments;
 74 
 75         /* refcount extent we're examining */
 76         xfs_agblock_t           bno;
 77         xfs_extlen_t            len;
 78         xfs_nlink_t             refcount;
 79 
 80         /* number of owners seen */
 81         xfs_nlink_t             seen;
 82 };
 83 
 84 /*
 85  * Decide if the given rmap is large enough that we can redeem it
 86  * towards refcount verification now, or if it's a fragment, in
 87  * which case we'll hang onto it in the hopes that we'll later
 88  * discover that we've collected exactly the correct number of
 89  * fragments as the refcountbt says we should have.
 90  */
 91 STATIC int
 92 xchk_refcountbt_rmap_check(
 93         struct xfs_btree_cur            *cur,
 94         struct xfs_rmap_irec            *rec,
 95         void                            *priv)
 96 {
 97         struct xchk_refcnt_check        *refchk = priv;
 98         struct xchk_refcnt_frag         *frag;
 99         xfs_agblock_t                   rm_last;
100         xfs_agblock_t                   rc_last;
101         int                             error = 0;
102 
103         if (xchk_should_terminate(refchk->sc, &error))
104                 return error;
105 
106         rm_last = rec->rm_startblock + rec->rm_blockcount - 1;
107         rc_last = refchk->bno + refchk->len - 1;
108 
109         /* Confirm that a single-owner refc extent is a CoW stage. */
110         if (refchk->refcount == 1 && rec->rm_owner != XFS_RMAP_OWN_COW) {
111                 xchk_btree_xref_set_corrupt(refchk->sc, cur, 0);
112                 return 0;
113         }
114 
115         if (rec->rm_startblock <= refchk->bno && rm_last >= rc_last) {
116                 /*
117                  * The rmap overlaps the refcount record, so we can confirm
118                  * one refcount owner seen.
119                  */
120                 refchk->seen++;
121         } else {
122                 /*
123                  * This rmap covers only part of the refcount record, so
124                  * save the fragment for later processing.  If the rmapbt
125                  * is healthy each rmap_irec we see will be in agbno order
126                  * so we don't need insertion sort here.
127                  */
128                 frag = kmem_alloc(sizeof(struct xchk_refcnt_frag),
129                                 KM_MAYFAIL);
130                 if (!frag)
131                         return -ENOMEM;
132                 memcpy(&frag->rm, rec, sizeof(frag->rm));
133                 list_add_tail(&frag->list, &refchk->fragments);
134         }
135 
136         return 0;
137 }
138 
139 /*
140  * Given a bunch of rmap fragments, iterate through them, keeping
141  * a running tally of the refcount.  If this ever deviates from
142  * what we expect (which is the refcountbt's refcount minus the
143  * number of extents that totally covered the refcountbt extent),
144  * we have a refcountbt error.
145  */
146 STATIC void
147 xchk_refcountbt_process_rmap_fragments(
148         struct xchk_refcnt_check        *refchk)
149 {
150         struct list_head                worklist;
151         struct xchk_refcnt_frag         *frag;
152         struct xchk_refcnt_frag         *n;
153         xfs_agblock_t                   bno;
154         xfs_agblock_t                   rbno;
155         xfs_agblock_t                   next_rbno;
156         xfs_nlink_t                     nr;
157         xfs_nlink_t                     target_nr;
158 
159         target_nr = refchk->refcount - refchk->seen;
160         if (target_nr == 0)
161                 return;
162 
163         /*
164          * There are (refchk->rc.rc_refcount - refchk->nr refcount)
165          * references we haven't found yet.  Pull that many off the
166          * fragment list and figure out where the smallest rmap ends
167          * (and therefore the next rmap should start).  All the rmaps
168          * we pull off should start at or before the beginning of the
169          * refcount record's range.
170          */
171         INIT_LIST_HEAD(&worklist);
172         rbno = NULLAGBLOCK;
173         nr = 1;
174 
175         /* Make sure the fragments actually /are/ in agbno order. */
176         bno = 0;
177         list_for_each_entry(frag, &refchk->fragments, list) {
178                 if (frag->rm.rm_startblock < bno)
179                         goto done;
180                 bno = frag->rm.rm_startblock;
181         }
182 
183         /*
184          * Find all the rmaps that start at or before the refc extent,
185          * and put them on the worklist.
186          */
187         list_for_each_entry_safe(frag, n, &refchk->fragments, list) {
188                 if (frag->rm.rm_startblock > refchk->bno)
189                         goto done;
190                 bno = frag->rm.rm_startblock + frag->rm.rm_blockcount;
191                 if (bno < rbno)
192                         rbno = bno;
193                 list_move_tail(&frag->list, &worklist);
194                 if (nr == target_nr)
195                         break;
196                 nr++;
197         }
198 
199         /*
200          * We should have found exactly $target_nr rmap fragments starting
201          * at or before the refcount extent.
202          */
203         if (nr != target_nr)
204                 goto done;
205 
206         while (!list_empty(&refchk->fragments)) {
207                 /* Discard any fragments ending at rbno from the worklist. */
208                 nr = 0;
209                 next_rbno = NULLAGBLOCK;
210                 list_for_each_entry_safe(frag, n, &worklist, list) {
211                         bno = frag->rm.rm_startblock + frag->rm.rm_blockcount;
212                         if (bno != rbno) {
213                                 if (bno < next_rbno)
214                                         next_rbno = bno;
215                                 continue;
216                         }
217                         list_del(&frag->list);
218                         kmem_free(frag);
219                         nr++;
220                 }
221 
222                 /* Try to add nr rmaps starting at rbno to the worklist. */
223                 list_for_each_entry_safe(frag, n, &refchk->fragments, list) {
224                         bno = frag->rm.rm_startblock + frag->rm.rm_blockcount;
225                         if (frag->rm.rm_startblock != rbno)
226                                 goto done;
227                         list_move_tail(&frag->list, &worklist);
228                         if (next_rbno > bno)
229                                 next_rbno = bno;
230                         nr--;
231                         if (nr == 0)
232                                 break;
233                 }
234 
235                 /*
236                  * If we get here and nr > 0, this means that we added fewer
237                  * items to the worklist than we discarded because the fragment
238                  * list ran out of items.  Therefore, we cannot maintain the
239                  * required refcount.  Something is wrong, so we're done.
240                  */
241                 if (nr)
242                         goto done;
243 
244                 rbno = next_rbno;
245         }
246 
247         /*
248          * Make sure the last extent we processed ends at or beyond
249          * the end of the refcount extent.
250          */
251         if (rbno < refchk->bno + refchk->len)
252                 goto done;
253 
254         /* Actually record us having seen the remaining refcount. */
255         refchk->seen = refchk->refcount;
256 done:
257         /* Delete fragments and work list. */
258         list_for_each_entry_safe(frag, n, &worklist, list) {
259                 list_del(&frag->list);
260                 kmem_free(frag);
261         }
262         list_for_each_entry_safe(frag, n, &refchk->fragments, list) {
263                 list_del(&frag->list);
264                 kmem_free(frag);
265         }
266 }
267 
268 /* Use the rmap entries covering this extent to verify the refcount. */
269 STATIC void
270 xchk_refcountbt_xref_rmap(
271         struct xfs_scrub                *sc,
272         xfs_agblock_t                   bno,
273         xfs_extlen_t                    len,
274         xfs_nlink_t                     refcount)
275 {
276         struct xchk_refcnt_check        refchk = {
277                 .sc = sc,
278                 .bno = bno,
279                 .len = len,
280                 .refcount = refcount,
281                 .seen = 0,
282         };
283         struct xfs_rmap_irec            low;
284         struct xfs_rmap_irec            high;
285         struct xchk_refcnt_frag         *frag;
286         struct xchk_refcnt_frag         *n;
287         int                             error;
288 
289         if (!sc->sa.rmap_cur || xchk_skip_xref(sc->sm))
290                 return;
291 
292         /* Cross-reference with the rmapbt to confirm the refcount. */
293         memset(&low, 0, sizeof(low));
294         low.rm_startblock = bno;
295         memset(&high, 0xFF, sizeof(high));
296         high.rm_startblock = bno + len - 1;
297 
298         INIT_LIST_HEAD(&refchk.fragments);
299         error = xfs_rmap_query_range(sc->sa.rmap_cur, &low, &high,
300                         &xchk_refcountbt_rmap_check, &refchk);
301         if (!xchk_should_check_xref(sc, &error, &sc->sa.rmap_cur))
302                 goto out_free;
303 
304         xchk_refcountbt_process_rmap_fragments(&refchk);
305         if (refcount != refchk.seen)
306                 xchk_btree_xref_set_corrupt(sc, sc->sa.rmap_cur, 0);
307 
308 out_free:
309         list_for_each_entry_safe(frag, n, &refchk.fragments, list) {
310                 list_del(&frag->list);
311                 kmem_free(frag);
312         }
313 }
314 
315 /* Cross-reference with the other btrees. */
316 STATIC void
317 xchk_refcountbt_xref(
318         struct xfs_scrub        *sc,
319         xfs_agblock_t           agbno,
320         xfs_extlen_t            len,
321         xfs_nlink_t             refcount)
322 {
323         if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
324                 return;
325 
326         xchk_xref_is_used_space(sc, agbno, len);
327         xchk_xref_is_not_inode_chunk(sc, agbno, len);
328         xchk_refcountbt_xref_rmap(sc, agbno, len, refcount);
329 }
330 
331 /* Scrub a refcountbt record. */
332 STATIC int
333 xchk_refcountbt_rec(
334         struct xchk_btree       *bs,
335         union xfs_btree_rec     *rec)
336 {
337         struct xfs_mount        *mp = bs->cur->bc_mp;
338         xfs_agblock_t           *cow_blocks = bs->private;
339         xfs_agnumber_t          agno = bs->cur->bc_private.a.agno;
340         xfs_agblock_t           bno;
341         xfs_extlen_t            len;
342         xfs_nlink_t             refcount;
343         bool                    has_cowflag;
344         int                     error = 0;
345 
346         bno = be32_to_cpu(rec->refc.rc_startblock);
347         len = be32_to_cpu(rec->refc.rc_blockcount);
348         refcount = be32_to_cpu(rec->refc.rc_refcount);
349 
350         /* Only CoW records can have refcount == 1. */
351         has_cowflag = (bno & XFS_REFC_COW_START);
352         if ((refcount == 1 && !has_cowflag) || (refcount != 1 && has_cowflag))
353                 xchk_btree_set_corrupt(bs->sc, bs->cur, 0);
354         if (has_cowflag)
355                 (*cow_blocks) += len;
356 
357         /* Check the extent. */
358         bno &= ~XFS_REFC_COW_START;
359         if (bno + len <= bno ||
360             !xfs_verify_agbno(mp, agno, bno) ||
361             !xfs_verify_agbno(mp, agno, bno + len - 1))
362                 xchk_btree_set_corrupt(bs->sc, bs->cur, 0);
363 
364         if (refcount == 0)
365                 xchk_btree_set_corrupt(bs->sc, bs->cur, 0);
366 
367         xchk_refcountbt_xref(bs->sc, bno, len, refcount);
368 
369         return error;
370 }
371 
372 /* Make sure we have as many refc blocks as the rmap says. */
373 STATIC void
374 xchk_refcount_xref_rmap(
375         struct xfs_scrub        *sc,
376         xfs_filblks_t           cow_blocks)
377 {
378         xfs_extlen_t            refcbt_blocks = 0;
379         xfs_filblks_t           blocks;
380         int                     error;
381 
382         if (!sc->sa.rmap_cur || xchk_skip_xref(sc->sm))
383                 return;
384 
385         /* Check that we saw as many refcbt blocks as the rmap knows about. */
386         error = xfs_btree_count_blocks(sc->sa.refc_cur, &refcbt_blocks);
387         if (!xchk_btree_process_error(sc, sc->sa.refc_cur, 0, &error))
388                 return;
389         error = xchk_count_rmap_ownedby_ag(sc, sc->sa.rmap_cur,
390                         &XFS_RMAP_OINFO_REFC, &blocks);
391         if (!xchk_should_check_xref(sc, &error, &sc->sa.rmap_cur))
392                 return;
393         if (blocks != refcbt_blocks)
394                 xchk_btree_xref_set_corrupt(sc, sc->sa.rmap_cur, 0);
395 
396         /* Check that we saw as many cow blocks as the rmap knows about. */
397         error = xchk_count_rmap_ownedby_ag(sc, sc->sa.rmap_cur,
398                         &XFS_RMAP_OINFO_COW, &blocks);
399         if (!xchk_should_check_xref(sc, &error, &sc->sa.rmap_cur))
400                 return;
401         if (blocks != cow_blocks)
402                 xchk_btree_xref_set_corrupt(sc, sc->sa.rmap_cur, 0);
403 }
404 
405 /* Scrub the refcount btree for some AG. */
406 int
407 xchk_refcountbt(
408         struct xfs_scrub        *sc)
409 {
410         xfs_agblock_t           cow_blocks = 0;
411         int                     error;
412 
413         error = xchk_btree(sc, sc->sa.refc_cur, xchk_refcountbt_rec,
414                         &XFS_RMAP_OINFO_REFC, &cow_blocks);
415         if (error)
416                 return error;
417 
418         xchk_refcount_xref_rmap(sc, cow_blocks);
419 
420         return 0;
421 }
422 
423 /* xref check that a cow staging extent is marked in the refcountbt. */
424 void
425 xchk_xref_is_cow_staging(
426         struct xfs_scrub                *sc,
427         xfs_agblock_t                   agbno,
428         xfs_extlen_t                    len)
429 {
430         struct xfs_refcount_irec        rc;
431         bool                            has_cowflag;
432         int                             has_refcount;
433         int                             error;
434 
435         if (!sc->sa.refc_cur || xchk_skip_xref(sc->sm))
436                 return;
437 
438         /* Find the CoW staging extent. */
439         error = xfs_refcount_lookup_le(sc->sa.refc_cur,
440                         agbno + XFS_REFC_COW_START, &has_refcount);
441         if (!xchk_should_check_xref(sc, &error, &sc->sa.refc_cur))
442                 return;
443         if (!has_refcount) {
444                 xchk_btree_xref_set_corrupt(sc, sc->sa.refc_cur, 0);
445                 return;
446         }
447 
448         error = xfs_refcount_get_rec(sc->sa.refc_cur, &rc, &has_refcount);
449         if (!xchk_should_check_xref(sc, &error, &sc->sa.refc_cur))
450                 return;
451         if (!has_refcount) {
452                 xchk_btree_xref_set_corrupt(sc, sc->sa.refc_cur, 0);
453                 return;
454         }
455 
456         /* CoW flag must be set, refcount must be 1. */
457         has_cowflag = (rc.rc_startblock & XFS_REFC_COW_START);
458         if (!has_cowflag || rc.rc_refcount != 1)
459                 xchk_btree_xref_set_corrupt(sc, sc->sa.refc_cur, 0);
460 
461         /* Must be at least as long as what was passed in */
462         if (rc.rc_blockcount < len)
463                 xchk_btree_xref_set_corrupt(sc, sc->sa.refc_cur, 0);
464 }
465 
466 /*
467  * xref check that the extent is not shared.  Only file data blocks
468  * can have multiple owners.
469  */
470 void
471 xchk_xref_is_not_shared(
472         struct xfs_scrub        *sc,
473         xfs_agblock_t           agbno,
474         xfs_extlen_t            len)
475 {
476         bool                    shared;
477         int                     error;
478 
479         if (!sc->sa.refc_cur || xchk_skip_xref(sc->sm))
480                 return;
481 
482         error = xfs_refcount_has_record(sc->sa.refc_cur, agbno, len, &shared);
483         if (!xchk_should_check_xref(sc, &error, &sc->sa.refc_cur))
484                 return;
485         if (shared)
486                 xchk_btree_xref_set_corrupt(sc, sc->sa.refc_cur, 0);
487 }
488 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | Wiki (Japanese) | Wiki (English) | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

osdn.jp