~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/fs/xfs/xfs_dquot.c

Version: ~ [ linux-5.11-rc3 ] ~ [ linux-5.10.7 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.89 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.167 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.215 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.251 ] ~ [ linux-4.8.17 ] ~ [ linux-4.7.10 ] ~ [ linux-4.6.7 ] ~ [ linux-4.5.7 ] ~ [ linux-4.4.251 ] ~ [ linux-4.3.6 ] ~ [ linux-4.2.8 ] ~ [ linux-4.1.52 ] ~ [ linux-4.0.9 ] ~ [ linux-3.19.8 ] ~ [ linux-3.18.140 ] ~ [ linux-3.17.8 ] ~ [ linux-3.16.85 ] ~ [ linux-3.15.10 ] ~ [ linux-3.14.79 ] ~ [ linux-3.13.11 ] ~ [ linux-3.12.74 ] ~ [ linux-3.11.10 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.5 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 // SPDX-License-Identifier: GPL-2.0
  2 /*
  3  * Copyright (c) 2000-2003 Silicon Graphics, Inc.
  4  * All Rights Reserved.
  5  */
  6 #include "xfs.h"
  7 #include "xfs_fs.h"
  8 #include "xfs_format.h"
  9 #include "xfs_log_format.h"
 10 #include "xfs_shared.h"
 11 #include "xfs_trans_resv.h"
 12 #include "xfs_bit.h"
 13 #include "xfs_mount.h"
 14 #include "xfs_defer.h"
 15 #include "xfs_inode.h"
 16 #include "xfs_bmap.h"
 17 #include "xfs_quota.h"
 18 #include "xfs_trans.h"
 19 #include "xfs_buf_item.h"
 20 #include "xfs_trans_space.h"
 21 #include "xfs_trans_priv.h"
 22 #include "xfs_qm.h"
 23 #include "xfs_trace.h"
 24 #include "xfs_log.h"
 25 #include "xfs_bmap_btree.h"
 26 #include "xfs_error.h"
 27 
 28 /*
 29  * Lock order:
 30  *
 31  * ip->i_lock
 32  *   qi->qi_tree_lock
 33  *     dquot->q_qlock (xfs_dqlock() and friends)
 34  *       dquot->q_flush (xfs_dqflock() and friends)
 35  *       qi->qi_lru_lock
 36  *
 37  * If two dquots need to be locked the order is user before group/project,
 38  * otherwise by the lowest id first, see xfs_dqlock2.
 39  */
 40 
 41 struct kmem_zone                *xfs_qm_dqtrxzone;
 42 static struct kmem_zone         *xfs_qm_dqzone;
 43 
 44 static struct lock_class_key xfs_dquot_group_class;
 45 static struct lock_class_key xfs_dquot_project_class;
 46 
 47 /*
 48  * This is called to free all the memory associated with a dquot
 49  */
 50 void
 51 xfs_qm_dqdestroy(
 52         struct xfs_dquot        *dqp)
 53 {
 54         ASSERT(list_empty(&dqp->q_lru));
 55 
 56         kmem_free(dqp->q_logitem.qli_item.li_lv_shadow);
 57         mutex_destroy(&dqp->q_qlock);
 58 
 59         XFS_STATS_DEC(dqp->q_mount, xs_qm_dquot);
 60         kmem_cache_free(xfs_qm_dqzone, dqp);
 61 }
 62 
 63 /*
 64  * If default limits are in force, push them into the dquot now.
 65  * We overwrite the dquot limits only if they are zero and this
 66  * is not the root dquot.
 67  */
 68 void
 69 xfs_qm_adjust_dqlimits(
 70         struct xfs_dquot        *dq)
 71 {
 72         struct xfs_mount        *mp = dq->q_mount;
 73         struct xfs_quotainfo    *q = mp->m_quotainfo;
 74         struct xfs_def_quota    *defq;
 75         int                     prealloc = 0;
 76 
 77         ASSERT(dq->q_id);
 78         defq = xfs_get_defquota(q, xfs_dquot_type(dq));
 79 
 80         if (!dq->q_blk.softlimit) {
 81                 dq->q_blk.softlimit = defq->blk.soft;
 82                 prealloc = 1;
 83         }
 84         if (!dq->q_blk.hardlimit) {
 85                 dq->q_blk.hardlimit = defq->blk.hard;
 86                 prealloc = 1;
 87         }
 88         if (!dq->q_ino.softlimit)
 89                 dq->q_ino.softlimit = defq->ino.soft;
 90         if (!dq->q_ino.hardlimit)
 91                 dq->q_ino.hardlimit = defq->ino.hard;
 92         if (!dq->q_rtb.softlimit)
 93                 dq->q_rtb.softlimit = defq->rtb.soft;
 94         if (!dq->q_rtb.hardlimit)
 95                 dq->q_rtb.hardlimit = defq->rtb.hard;
 96 
 97         if (prealloc)
 98                 xfs_dquot_set_prealloc_limits(dq);
 99 }
100 
101 /* Set the expiration time of a quota's grace period. */
102 time64_t
103 xfs_dquot_set_timeout(
104         struct xfs_mount        *mp,
105         time64_t                timeout)
106 {
107         struct xfs_quotainfo    *qi = mp->m_quotainfo;
108 
109         return clamp_t(time64_t, timeout, qi->qi_expiry_min,
110                                           qi->qi_expiry_max);
111 }
112 
113 /* Set the length of the default grace period. */
114 time64_t
115 xfs_dquot_set_grace_period(
116         time64_t                grace)
117 {
118         return clamp_t(time64_t, grace, XFS_DQ_GRACE_MIN, XFS_DQ_GRACE_MAX);
119 }
120 
121 /*
122  * Determine if this quota counter is over either limit and set the quota
123  * timers as appropriate.
124  */
125 static inline void
126 xfs_qm_adjust_res_timer(
127         struct xfs_mount        *mp,
128         struct xfs_dquot_res    *res,
129         struct xfs_quota_limits *qlim)
130 {
131         ASSERT(res->hardlimit == 0 || res->softlimit <= res->hardlimit);
132 
133         if ((res->softlimit && res->count > res->softlimit) ||
134             (res->hardlimit && res->count > res->hardlimit)) {
135                 if (res->timer == 0)
136                         res->timer = xfs_dquot_set_timeout(mp,
137                                         ktime_get_real_seconds() + qlim->time);
138         } else {
139                 if (res->timer == 0)
140                         res->warnings = 0;
141                 else
142                         res->timer = 0;
143         }
144 }
145 
146 /*
147  * Check the limits and timers of a dquot and start or reset timers
148  * if necessary.
149  * This gets called even when quota enforcement is OFF, which makes our
150  * life a little less complicated. (We just don't reject any quota
151  * reservations in that case, when enforcement is off).
152  * We also return 0 as the values of the timers in Q_GETQUOTA calls, when
153  * enforcement's off.
154  * In contrast, warnings are a little different in that they don't
155  * 'automatically' get started when limits get exceeded.  They do
156  * get reset to zero, however, when we find the count to be under
157  * the soft limit (they are only ever set non-zero via userspace).
158  */
159 void
160 xfs_qm_adjust_dqtimers(
161         struct xfs_dquot        *dq)
162 {
163         struct xfs_mount        *mp = dq->q_mount;
164         struct xfs_quotainfo    *qi = mp->m_quotainfo;
165         struct xfs_def_quota    *defq;
166 
167         ASSERT(dq->q_id);
168         defq = xfs_get_defquota(qi, xfs_dquot_type(dq));
169 
170         xfs_qm_adjust_res_timer(dq->q_mount, &dq->q_blk, &defq->blk);
171         xfs_qm_adjust_res_timer(dq->q_mount, &dq->q_ino, &defq->ino);
172         xfs_qm_adjust_res_timer(dq->q_mount, &dq->q_rtb, &defq->rtb);
173 }
174 
175 /*
176  * initialize a buffer full of dquots and log the whole thing
177  */
178 STATIC void
179 xfs_qm_init_dquot_blk(
180         struct xfs_trans        *tp,
181         struct xfs_mount        *mp,
182         xfs_dqid_t              id,
183         xfs_dqtype_t            type,
184         struct xfs_buf          *bp)
185 {
186         struct xfs_quotainfo    *q = mp->m_quotainfo;
187         struct xfs_dqblk        *d;
188         xfs_dqid_t              curid;
189         unsigned int            qflag;
190         unsigned int            blftype;
191         int                     i;
192 
193         ASSERT(tp);
194         ASSERT(xfs_buf_islocked(bp));
195 
196         switch (type) {
197         case XFS_DQTYPE_USER:
198                 qflag = XFS_UQUOTA_CHKD;
199                 blftype = XFS_BLF_UDQUOT_BUF;
200                 break;
201         case XFS_DQTYPE_PROJ:
202                 qflag = XFS_PQUOTA_CHKD;
203                 blftype = XFS_BLF_PDQUOT_BUF;
204                 break;
205         case XFS_DQTYPE_GROUP:
206                 qflag = XFS_GQUOTA_CHKD;
207                 blftype = XFS_BLF_GDQUOT_BUF;
208                 break;
209         default:
210                 ASSERT(0);
211                 return;
212         }
213 
214         d = bp->b_addr;
215 
216         /*
217          * ID of the first dquot in the block - id's are zero based.
218          */
219         curid = id - (id % q->qi_dqperchunk);
220         memset(d, 0, BBTOB(q->qi_dqchunklen));
221         for (i = 0; i < q->qi_dqperchunk; i++, d++, curid++) {
222                 d->dd_diskdq.d_magic = cpu_to_be16(XFS_DQUOT_MAGIC);
223                 d->dd_diskdq.d_version = XFS_DQUOT_VERSION;
224                 d->dd_diskdq.d_id = cpu_to_be32(curid);
225                 d->dd_diskdq.d_type = type;
226                 if (curid > 0 && xfs_sb_version_hasbigtime(&mp->m_sb))
227                         d->dd_diskdq.d_type |= XFS_DQTYPE_BIGTIME;
228                 if (xfs_sb_version_hascrc(&mp->m_sb)) {
229                         uuid_copy(&d->dd_uuid, &mp->m_sb.sb_meta_uuid);
230                         xfs_update_cksum((char *)d, sizeof(struct xfs_dqblk),
231                                          XFS_DQUOT_CRC_OFF);
232                 }
233         }
234 
235         xfs_trans_dquot_buf(tp, bp, blftype);
236 
237         /*
238          * quotacheck uses delayed writes to update all the dquots on disk in an
239          * efficient manner instead of logging the individual dquot changes as
240          * they are made. However if we log the buffer allocated here and crash
241          * after quotacheck while the logged initialisation is still in the
242          * active region of the log, log recovery can replay the dquot buffer
243          * initialisation over the top of the checked dquots and corrupt quota
244          * accounting.
245          *
246          * To avoid this problem, quotacheck cannot log the initialised buffer.
247          * We must still dirty the buffer and write it back before the
248          * allocation transaction clears the log. Therefore, mark the buffer as
249          * ordered instead of logging it directly. This is safe for quotacheck
250          * because it detects and repairs allocated but initialized dquot blocks
251          * in the quota inodes.
252          */
253         if (!(mp->m_qflags & qflag))
254                 xfs_trans_ordered_buf(tp, bp);
255         else
256                 xfs_trans_log_buf(tp, bp, 0, BBTOB(q->qi_dqchunklen) - 1);
257 }
258 
259 /*
260  * Initialize the dynamic speculative preallocation thresholds. The lo/hi
261  * watermarks correspond to the soft and hard limits by default. If a soft limit
262  * is not specified, we use 95% of the hard limit.
263  */
264 void
265 xfs_dquot_set_prealloc_limits(struct xfs_dquot *dqp)
266 {
267         uint64_t space;
268 
269         dqp->q_prealloc_hi_wmark = dqp->q_blk.hardlimit;
270         dqp->q_prealloc_lo_wmark = dqp->q_blk.softlimit;
271         if (!dqp->q_prealloc_lo_wmark) {
272                 dqp->q_prealloc_lo_wmark = dqp->q_prealloc_hi_wmark;
273                 do_div(dqp->q_prealloc_lo_wmark, 100);
274                 dqp->q_prealloc_lo_wmark *= 95;
275         }
276 
277         space = dqp->q_prealloc_hi_wmark;
278 
279         do_div(space, 100);
280         dqp->q_low_space[XFS_QLOWSP_1_PCNT] = space;
281         dqp->q_low_space[XFS_QLOWSP_3_PCNT] = space * 3;
282         dqp->q_low_space[XFS_QLOWSP_5_PCNT] = space * 5;
283 }
284 
285 /*
286  * Ensure that the given in-core dquot has a buffer on disk backing it, and
287  * return the buffer locked and held. This is called when the bmapi finds a
288  * hole.
289  */
290 STATIC int
291 xfs_dquot_disk_alloc(
292         struct xfs_trans        **tpp,
293         struct xfs_dquot        *dqp,
294         struct xfs_buf          **bpp)
295 {
296         struct xfs_bmbt_irec    map;
297         struct xfs_trans        *tp = *tpp;
298         struct xfs_mount        *mp = tp->t_mountp;
299         struct xfs_buf          *bp;
300         xfs_dqtype_t            qtype = xfs_dquot_type(dqp);
301         struct xfs_inode        *quotip = xfs_quota_inode(mp, qtype);
302         int                     nmaps = 1;
303         int                     error;
304 
305         trace_xfs_dqalloc(dqp);
306 
307         xfs_ilock(quotip, XFS_ILOCK_EXCL);
308         if (!xfs_this_quota_on(dqp->q_mount, qtype)) {
309                 /*
310                  * Return if this type of quotas is turned off while we didn't
311                  * have an inode lock
312                  */
313                 xfs_iunlock(quotip, XFS_ILOCK_EXCL);
314                 return -ESRCH;
315         }
316 
317         /* Create the block mapping. */
318         xfs_trans_ijoin(tp, quotip, XFS_ILOCK_EXCL);
319         error = xfs_bmapi_write(tp, quotip, dqp->q_fileoffset,
320                         XFS_DQUOT_CLUSTER_SIZE_FSB, XFS_BMAPI_METADATA, 0, &map,
321                         &nmaps);
322         if (error)
323                 return error;
324         ASSERT(map.br_blockcount == XFS_DQUOT_CLUSTER_SIZE_FSB);
325         ASSERT(nmaps == 1);
326         ASSERT((map.br_startblock != DELAYSTARTBLOCK) &&
327                (map.br_startblock != HOLESTARTBLOCK));
328 
329         /*
330          * Keep track of the blkno to save a lookup later
331          */
332         dqp->q_blkno = XFS_FSB_TO_DADDR(mp, map.br_startblock);
333 
334         /* now we can just get the buffer (there's nothing to read yet) */
335         error = xfs_trans_get_buf(tp, mp->m_ddev_targp, dqp->q_blkno,
336                         mp->m_quotainfo->qi_dqchunklen, 0, &bp);
337         if (error)
338                 return error;
339         bp->b_ops = &xfs_dquot_buf_ops;
340 
341         /*
342          * Make a chunk of dquots out of this buffer and log
343          * the entire thing.
344          */
345         xfs_qm_init_dquot_blk(tp, mp, dqp->q_id, qtype, bp);
346         xfs_buf_set_ref(bp, XFS_DQUOT_REF);
347 
348         /*
349          * Hold the buffer and join it to the dfops so that we'll still own
350          * the buffer when we return to the caller.  The buffer disposal on
351          * error must be paid attention to very carefully, as it has been
352          * broken since commit efa092f3d4c6 "[XFS] Fixes a bug in the quota
353          * code when allocating a new dquot record" in 2005, and the later
354          * conversion to xfs_defer_ops in commit 310a75a3c6c747 failed to keep
355          * the buffer locked across the _defer_finish call.  We can now do
356          * this correctly with xfs_defer_bjoin.
357          *
358          * Above, we allocated a disk block for the dquot information and used
359          * get_buf to initialize the dquot. If the _defer_finish fails, the old
360          * transaction is gone but the new buffer is not joined or held to any
361          * transaction, so we must _buf_relse it.
362          *
363          * If everything succeeds, the caller of this function is returned a
364          * buffer that is locked and held to the transaction.  The caller
365          * is responsible for unlocking any buffer passed back, either
366          * manually or by committing the transaction.  On error, the buffer is
367          * released and not passed back.
368          */
369         xfs_trans_bhold(tp, bp);
370         error = xfs_defer_finish(tpp);
371         if (error) {
372                 xfs_trans_bhold_release(*tpp, bp);
373                 xfs_trans_brelse(*tpp, bp);
374                 return error;
375         }
376         *bpp = bp;
377         return 0;
378 }
379 
380 /*
381  * Read in the in-core dquot's on-disk metadata and return the buffer.
382  * Returns ENOENT to signal a hole.
383  */
384 STATIC int
385 xfs_dquot_disk_read(
386         struct xfs_mount        *mp,
387         struct xfs_dquot        *dqp,
388         struct xfs_buf          **bpp)
389 {
390         struct xfs_bmbt_irec    map;
391         struct xfs_buf          *bp;
392         xfs_dqtype_t            qtype = xfs_dquot_type(dqp);
393         struct xfs_inode        *quotip = xfs_quota_inode(mp, qtype);
394         uint                    lock_mode;
395         int                     nmaps = 1;
396         int                     error;
397 
398         lock_mode = xfs_ilock_data_map_shared(quotip);
399         if (!xfs_this_quota_on(mp, qtype)) {
400                 /*
401                  * Return if this type of quotas is turned off while we
402                  * didn't have the quota inode lock.
403                  */
404                 xfs_iunlock(quotip, lock_mode);
405                 return -ESRCH;
406         }
407 
408         /*
409          * Find the block map; no allocations yet
410          */
411         error = xfs_bmapi_read(quotip, dqp->q_fileoffset,
412                         XFS_DQUOT_CLUSTER_SIZE_FSB, &map, &nmaps, 0);
413         xfs_iunlock(quotip, lock_mode);
414         if (error)
415                 return error;
416 
417         ASSERT(nmaps == 1);
418         ASSERT(map.br_blockcount >= 1);
419         ASSERT(map.br_startblock != DELAYSTARTBLOCK);
420         if (map.br_startblock == HOLESTARTBLOCK)
421                 return -ENOENT;
422 
423         trace_xfs_dqtobp_read(dqp);
424 
425         /*
426          * store the blkno etc so that we don't have to do the
427          * mapping all the time
428          */
429         dqp->q_blkno = XFS_FSB_TO_DADDR(mp, map.br_startblock);
430 
431         error = xfs_trans_read_buf(mp, NULL, mp->m_ddev_targp, dqp->q_blkno,
432                         mp->m_quotainfo->qi_dqchunklen, 0, &bp,
433                         &xfs_dquot_buf_ops);
434         if (error) {
435                 ASSERT(bp == NULL);
436                 return error;
437         }
438 
439         ASSERT(xfs_buf_islocked(bp));
440         xfs_buf_set_ref(bp, XFS_DQUOT_REF);
441         *bpp = bp;
442 
443         return 0;
444 }
445 
446 /* Allocate and initialize everything we need for an incore dquot. */
447 STATIC struct xfs_dquot *
448 xfs_dquot_alloc(
449         struct xfs_mount        *mp,
450         xfs_dqid_t              id,
451         xfs_dqtype_t            type)
452 {
453         struct xfs_dquot        *dqp;
454 
455         dqp = kmem_cache_zalloc(xfs_qm_dqzone, GFP_KERNEL | __GFP_NOFAIL);
456 
457         dqp->q_type = type;
458         dqp->q_id = id;
459         dqp->q_mount = mp;
460         INIT_LIST_HEAD(&dqp->q_lru);
461         mutex_init(&dqp->q_qlock);
462         init_waitqueue_head(&dqp->q_pinwait);
463         dqp->q_fileoffset = (xfs_fileoff_t)id / mp->m_quotainfo->qi_dqperchunk;
464         /*
465          * Offset of dquot in the (fixed sized) dquot chunk.
466          */
467         dqp->q_bufoffset = (id % mp->m_quotainfo->qi_dqperchunk) *
468                         sizeof(xfs_dqblk_t);
469 
470         /*
471          * Because we want to use a counting completion, complete
472          * the flush completion once to allow a single access to
473          * the flush completion without blocking.
474          */
475         init_completion(&dqp->q_flush);
476         complete(&dqp->q_flush);
477 
478         /*
479          * Make sure group quotas have a different lock class than user
480          * quotas.
481          */
482         switch (type) {
483         case XFS_DQTYPE_USER:
484                 /* uses the default lock class */
485                 break;
486         case XFS_DQTYPE_GROUP:
487                 lockdep_set_class(&dqp->q_qlock, &xfs_dquot_group_class);
488                 break;
489         case XFS_DQTYPE_PROJ:
490                 lockdep_set_class(&dqp->q_qlock, &xfs_dquot_project_class);
491                 break;
492         default:
493                 ASSERT(0);
494                 break;
495         }
496 
497         xfs_qm_dquot_logitem_init(dqp);
498 
499         XFS_STATS_INC(mp, xs_qm_dquot);
500         return dqp;
501 }
502 
503 /* Copy the in-core quota fields in from the on-disk buffer. */
504 STATIC int
505 xfs_dquot_from_disk(
506         struct xfs_dquot        *dqp,
507         struct xfs_buf          *bp)
508 {
509         struct xfs_disk_dquot   *ddqp = bp->b_addr + dqp->q_bufoffset;
510 
511         /*
512          * Ensure that we got the type and ID we were looking for.
513          * Everything else was checked by the dquot buffer verifier.
514          */
515         if ((ddqp->d_type & XFS_DQTYPE_REC_MASK) != xfs_dquot_type(dqp) ||
516             be32_to_cpu(ddqp->d_id) != dqp->q_id) {
517                 xfs_alert_tag(bp->b_mount, XFS_PTAG_VERIFIER_ERROR,
518                           "Metadata corruption detected at %pS, quota %u",
519                           __this_address, dqp->q_id);
520                 xfs_alert(bp->b_mount, "Unmount and run xfs_repair");
521                 return -EFSCORRUPTED;
522         }
523 
524         /* copy everything from disk dquot to the incore dquot */
525         dqp->q_type = ddqp->d_type;
526         dqp->q_blk.hardlimit = be64_to_cpu(ddqp->d_blk_hardlimit);
527         dqp->q_blk.softlimit = be64_to_cpu(ddqp->d_blk_softlimit);
528         dqp->q_ino.hardlimit = be64_to_cpu(ddqp->d_ino_hardlimit);
529         dqp->q_ino.softlimit = be64_to_cpu(ddqp->d_ino_softlimit);
530         dqp->q_rtb.hardlimit = be64_to_cpu(ddqp->d_rtb_hardlimit);
531         dqp->q_rtb.softlimit = be64_to_cpu(ddqp->d_rtb_softlimit);
532 
533         dqp->q_blk.count = be64_to_cpu(ddqp->d_bcount);
534         dqp->q_ino.count = be64_to_cpu(ddqp->d_icount);
535         dqp->q_rtb.count = be64_to_cpu(ddqp->d_rtbcount);
536 
537         dqp->q_blk.warnings = be16_to_cpu(ddqp->d_bwarns);
538         dqp->q_ino.warnings = be16_to_cpu(ddqp->d_iwarns);
539         dqp->q_rtb.warnings = be16_to_cpu(ddqp->d_rtbwarns);
540 
541         dqp->q_blk.timer = xfs_dquot_from_disk_ts(ddqp, ddqp->d_btimer);
542         dqp->q_ino.timer = xfs_dquot_from_disk_ts(ddqp, ddqp->d_itimer);
543         dqp->q_rtb.timer = xfs_dquot_from_disk_ts(ddqp, ddqp->d_rtbtimer);
544 
545         /*
546          * Reservation counters are defined as reservation plus current usage
547          * to avoid having to add every time.
548          */
549         dqp->q_blk.reserved = dqp->q_blk.count;
550         dqp->q_ino.reserved = dqp->q_ino.count;
551         dqp->q_rtb.reserved = dqp->q_rtb.count;
552 
553         /* initialize the dquot speculative prealloc thresholds */
554         xfs_dquot_set_prealloc_limits(dqp);
555         return 0;
556 }
557 
558 /* Copy the in-core quota fields into the on-disk buffer. */
559 void
560 xfs_dquot_to_disk(
561         struct xfs_disk_dquot   *ddqp,
562         struct xfs_dquot        *dqp)
563 {
564         ddqp->d_magic = cpu_to_be16(XFS_DQUOT_MAGIC);
565         ddqp->d_version = XFS_DQUOT_VERSION;
566         ddqp->d_type = dqp->q_type;
567         ddqp->d_id = cpu_to_be32(dqp->q_id);
568         ddqp->d_pad0 = 0;
569         ddqp->d_pad = 0;
570 
571         ddqp->d_blk_hardlimit = cpu_to_be64(dqp->q_blk.hardlimit);
572         ddqp->d_blk_softlimit = cpu_to_be64(dqp->q_blk.softlimit);
573         ddqp->d_ino_hardlimit = cpu_to_be64(dqp->q_ino.hardlimit);
574         ddqp->d_ino_softlimit = cpu_to_be64(dqp->q_ino.softlimit);
575         ddqp->d_rtb_hardlimit = cpu_to_be64(dqp->q_rtb.hardlimit);
576         ddqp->d_rtb_softlimit = cpu_to_be64(dqp->q_rtb.softlimit);
577 
578         ddqp->d_bcount = cpu_to_be64(dqp->q_blk.count);
579         ddqp->d_icount = cpu_to_be64(dqp->q_ino.count);
580         ddqp->d_rtbcount = cpu_to_be64(dqp->q_rtb.count);
581 
582         ddqp->d_bwarns = cpu_to_be16(dqp->q_blk.warnings);
583         ddqp->d_iwarns = cpu_to_be16(dqp->q_ino.warnings);
584         ddqp->d_rtbwarns = cpu_to_be16(dqp->q_rtb.warnings);
585 
586         ddqp->d_btimer = xfs_dquot_to_disk_ts(dqp, dqp->q_blk.timer);
587         ddqp->d_itimer = xfs_dquot_to_disk_ts(dqp, dqp->q_ino.timer);
588         ddqp->d_rtbtimer = xfs_dquot_to_disk_ts(dqp, dqp->q_rtb.timer);
589 }
590 
591 /* Allocate and initialize the dquot buffer for this in-core dquot. */
592 static int
593 xfs_qm_dqread_alloc(
594         struct xfs_mount        *mp,
595         struct xfs_dquot        *dqp,
596         struct xfs_buf          **bpp)
597 {
598         struct xfs_trans        *tp;
599         int                     error;
600 
601         error = xfs_trans_alloc(mp, &M_RES(mp)->tr_qm_dqalloc,
602                         XFS_QM_DQALLOC_SPACE_RES(mp), 0, 0, &tp);
603         if (error)
604                 goto err;
605 
606         error = xfs_dquot_disk_alloc(&tp, dqp, bpp);
607         if (error)
608                 goto err_cancel;
609 
610         error = xfs_trans_commit(tp);
611         if (error) {
612                 /*
613                  * Buffer was held to the transaction, so we have to unlock it
614                  * manually here because we're not passing it back.
615                  */
616                 xfs_buf_relse(*bpp);
617                 *bpp = NULL;
618                 goto err;
619         }
620         return 0;
621 
622 err_cancel:
623         xfs_trans_cancel(tp);
624 err:
625         return error;
626 }
627 
628 /*
629  * Read in the ondisk dquot using dqtobp() then copy it to an incore version,
630  * and release the buffer immediately.  If @can_alloc is true, fill any
631  * holes in the on-disk metadata.
632  */
633 static int
634 xfs_qm_dqread(
635         struct xfs_mount        *mp,
636         xfs_dqid_t              id,
637         xfs_dqtype_t            type,
638         bool                    can_alloc,
639         struct xfs_dquot        **dqpp)
640 {
641         struct xfs_dquot        *dqp;
642         struct xfs_buf          *bp;
643         int                     error;
644 
645         dqp = xfs_dquot_alloc(mp, id, type);
646         trace_xfs_dqread(dqp);
647 
648         /* Try to read the buffer, allocating if necessary. */
649         error = xfs_dquot_disk_read(mp, dqp, &bp);
650         if (error == -ENOENT && can_alloc)
651                 error = xfs_qm_dqread_alloc(mp, dqp, &bp);
652         if (error)
653                 goto err;
654 
655         /*
656          * At this point we should have a clean locked buffer.  Copy the data
657          * to the incore dquot and release the buffer since the incore dquot
658          * has its own locking protocol so we needn't tie up the buffer any
659          * further.
660          */
661         ASSERT(xfs_buf_islocked(bp));
662         error = xfs_dquot_from_disk(dqp, bp);
663         xfs_buf_relse(bp);
664         if (error)
665                 goto err;
666 
667         *dqpp = dqp;
668         return error;
669 
670 err:
671         trace_xfs_dqread_fail(dqp);
672         xfs_qm_dqdestroy(dqp);
673         *dqpp = NULL;
674         return error;
675 }
676 
677 /*
678  * Advance to the next id in the current chunk, or if at the
679  * end of the chunk, skip ahead to first id in next allocated chunk
680  * using the SEEK_DATA interface.
681  */
682 static int
683 xfs_dq_get_next_id(
684         struct xfs_mount        *mp,
685         xfs_dqtype_t            type,
686         xfs_dqid_t              *id)
687 {
688         struct xfs_inode        *quotip = xfs_quota_inode(mp, type);
689         xfs_dqid_t              next_id = *id + 1; /* simple advance */
690         uint                    lock_flags;
691         struct xfs_bmbt_irec    got;
692         struct xfs_iext_cursor  cur;
693         xfs_fsblock_t           start;
694         int                     error = 0;
695 
696         /* If we'd wrap past the max ID, stop */
697         if (next_id < *id)
698                 return -ENOENT;
699 
700         /* If new ID is within the current chunk, advancing it sufficed */
701         if (next_id % mp->m_quotainfo->qi_dqperchunk) {
702                 *id = next_id;
703                 return 0;
704         }
705 
706         /* Nope, next_id is now past the current chunk, so find the next one */
707         start = (xfs_fsblock_t)next_id / mp->m_quotainfo->qi_dqperchunk;
708 
709         lock_flags = xfs_ilock_data_map_shared(quotip);
710         if (!(quotip->i_df.if_flags & XFS_IFEXTENTS)) {
711                 error = xfs_iread_extents(NULL, quotip, XFS_DATA_FORK);
712                 if (error)
713                         return error;
714         }
715 
716         if (xfs_iext_lookup_extent(quotip, &quotip->i_df, start, &cur, &got)) {
717                 /* contiguous chunk, bump startoff for the id calculation */
718                 if (got.br_startoff < start)
719                         got.br_startoff = start;
720                 *id = got.br_startoff * mp->m_quotainfo->qi_dqperchunk;
721         } else {
722                 error = -ENOENT;
723         }
724 
725         xfs_iunlock(quotip, lock_flags);
726 
727         return error;
728 }
729 
730 /*
731  * Look up the dquot in the in-core cache.  If found, the dquot is returned
732  * locked and ready to go.
733  */
734 static struct xfs_dquot *
735 xfs_qm_dqget_cache_lookup(
736         struct xfs_mount        *mp,
737         struct xfs_quotainfo    *qi,
738         struct radix_tree_root  *tree,
739         xfs_dqid_t              id)
740 {
741         struct xfs_dquot        *dqp;
742 
743 restart:
744         mutex_lock(&qi->qi_tree_lock);
745         dqp = radix_tree_lookup(tree, id);
746         if (!dqp) {
747                 mutex_unlock(&qi->qi_tree_lock);
748                 XFS_STATS_INC(mp, xs_qm_dqcachemisses);
749                 return NULL;
750         }
751 
752         xfs_dqlock(dqp);
753         if (dqp->q_flags & XFS_DQFLAG_FREEING) {
754                 xfs_dqunlock(dqp);
755                 mutex_unlock(&qi->qi_tree_lock);
756                 trace_xfs_dqget_freeing(dqp);
757                 delay(1);
758                 goto restart;
759         }
760 
761         dqp->q_nrefs++;
762         mutex_unlock(&qi->qi_tree_lock);
763 
764         trace_xfs_dqget_hit(dqp);
765         XFS_STATS_INC(mp, xs_qm_dqcachehits);
766         return dqp;
767 }
768 
769 /*
770  * Try to insert a new dquot into the in-core cache.  If an error occurs the
771  * caller should throw away the dquot and start over.  Otherwise, the dquot
772  * is returned locked (and held by the cache) as if there had been a cache
773  * hit.
774  */
775 static int
776 xfs_qm_dqget_cache_insert(
777         struct xfs_mount        *mp,
778         struct xfs_quotainfo    *qi,
779         struct radix_tree_root  *tree,
780         xfs_dqid_t              id,
781         struct xfs_dquot        *dqp)
782 {
783         int                     error;
784 
785         mutex_lock(&qi->qi_tree_lock);
786         error = radix_tree_insert(tree, id, dqp);
787         if (unlikely(error)) {
788                 /* Duplicate found!  Caller must try again. */
789                 WARN_ON(error != -EEXIST);
790                 mutex_unlock(&qi->qi_tree_lock);
791                 trace_xfs_dqget_dup(dqp);
792                 return error;
793         }
794 
795         /* Return a locked dquot to the caller, with a reference taken. */
796         xfs_dqlock(dqp);
797         dqp->q_nrefs = 1;
798 
799         qi->qi_dquots++;
800         mutex_unlock(&qi->qi_tree_lock);
801 
802         return 0;
803 }
804 
805 /* Check our input parameters. */
806 static int
807 xfs_qm_dqget_checks(
808         struct xfs_mount        *mp,
809         xfs_dqtype_t            type)
810 {
811         if (WARN_ON_ONCE(!XFS_IS_QUOTA_RUNNING(mp)))
812                 return -ESRCH;
813 
814         switch (type) {
815         case XFS_DQTYPE_USER:
816                 if (!XFS_IS_UQUOTA_ON(mp))
817                         return -ESRCH;
818                 return 0;
819         case XFS_DQTYPE_GROUP:
820                 if (!XFS_IS_GQUOTA_ON(mp))
821                         return -ESRCH;
822                 return 0;
823         case XFS_DQTYPE_PROJ:
824                 if (!XFS_IS_PQUOTA_ON(mp))
825                         return -ESRCH;
826                 return 0;
827         default:
828                 WARN_ON_ONCE(0);
829                 return -EINVAL;
830         }
831 }
832 
833 /*
834  * Given the file system, id, and type (UDQUOT/GDQUOT/PDQUOT), return a
835  * locked dquot, doing an allocation (if requested) as needed.
836  */
837 int
838 xfs_qm_dqget(
839         struct xfs_mount        *mp,
840         xfs_dqid_t              id,
841         xfs_dqtype_t            type,
842         bool                    can_alloc,
843         struct xfs_dquot        **O_dqpp)
844 {
845         struct xfs_quotainfo    *qi = mp->m_quotainfo;
846         struct radix_tree_root  *tree = xfs_dquot_tree(qi, type);
847         struct xfs_dquot        *dqp;
848         int                     error;
849 
850         error = xfs_qm_dqget_checks(mp, type);
851         if (error)
852                 return error;
853 
854 restart:
855         dqp = xfs_qm_dqget_cache_lookup(mp, qi, tree, id);
856         if (dqp) {
857                 *O_dqpp = dqp;
858                 return 0;
859         }
860 
861         error = xfs_qm_dqread(mp, id, type, can_alloc, &dqp);
862         if (error)
863                 return error;
864 
865         error = xfs_qm_dqget_cache_insert(mp, qi, tree, id, dqp);
866         if (error) {
867                 /*
868                  * Duplicate found. Just throw away the new dquot and start
869                  * over.
870                  */
871                 xfs_qm_dqdestroy(dqp);
872                 XFS_STATS_INC(mp, xs_qm_dquot_dups);
873                 goto restart;
874         }
875 
876         trace_xfs_dqget_miss(dqp);
877         *O_dqpp = dqp;
878         return 0;
879 }
880 
881 /*
882  * Given a dquot id and type, read and initialize a dquot from the on-disk
883  * metadata.  This function is only for use during quota initialization so
884  * it ignores the dquot cache assuming that the dquot shrinker isn't set up.
885  * The caller is responsible for _qm_dqdestroy'ing the returned dquot.
886  */
887 int
888 xfs_qm_dqget_uncached(
889         struct xfs_mount        *mp,
890         xfs_dqid_t              id,
891         xfs_dqtype_t            type,
892         struct xfs_dquot        **dqpp)
893 {
894         int                     error;
895 
896         error = xfs_qm_dqget_checks(mp, type);
897         if (error)
898                 return error;
899 
900         return xfs_qm_dqread(mp, id, type, 0, dqpp);
901 }
902 
903 /* Return the quota id for a given inode and type. */
904 xfs_dqid_t
905 xfs_qm_id_for_quotatype(
906         struct xfs_inode        *ip,
907         xfs_dqtype_t            type)
908 {
909         switch (type) {
910         case XFS_DQTYPE_USER:
911                 return i_uid_read(VFS_I(ip));
912         case XFS_DQTYPE_GROUP:
913                 return i_gid_read(VFS_I(ip));
914         case XFS_DQTYPE_PROJ:
915                 return ip->i_d.di_projid;
916         }
917         ASSERT(0);
918         return 0;
919 }
920 
921 /*
922  * Return the dquot for a given inode and type.  If @can_alloc is true, then
923  * allocate blocks if needed.  The inode's ILOCK must be held and it must not
924  * have already had an inode attached.
925  */
926 int
927 xfs_qm_dqget_inode(
928         struct xfs_inode        *ip,
929         xfs_dqtype_t            type,
930         bool                    can_alloc,
931         struct xfs_dquot        **O_dqpp)
932 {
933         struct xfs_mount        *mp = ip->i_mount;
934         struct xfs_quotainfo    *qi = mp->m_quotainfo;
935         struct radix_tree_root  *tree = xfs_dquot_tree(qi, type);
936         struct xfs_dquot        *dqp;
937         xfs_dqid_t              id;
938         int                     error;
939 
940         error = xfs_qm_dqget_checks(mp, type);
941         if (error)
942                 return error;
943 
944         ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
945         ASSERT(xfs_inode_dquot(ip, type) == NULL);
946 
947         id = xfs_qm_id_for_quotatype(ip, type);
948 
949 restart:
950         dqp = xfs_qm_dqget_cache_lookup(mp, qi, tree, id);
951         if (dqp) {
952                 *O_dqpp = dqp;
953                 return 0;
954         }
955 
956         /*
957          * Dquot cache miss. We don't want to keep the inode lock across
958          * a (potential) disk read. Also we don't want to deal with the lock
959          * ordering between quotainode and this inode. OTOH, dropping the inode
960          * lock here means dealing with a chown that can happen before
961          * we re-acquire the lock.
962          */
963         xfs_iunlock(ip, XFS_ILOCK_EXCL);
964         error = xfs_qm_dqread(mp, id, type, can_alloc, &dqp);
965         xfs_ilock(ip, XFS_ILOCK_EXCL);
966         if (error)
967                 return error;
968 
969         /*
970          * A dquot could be attached to this inode by now, since we had
971          * dropped the ilock.
972          */
973         if (xfs_this_quota_on(mp, type)) {
974                 struct xfs_dquot        *dqp1;
975 
976                 dqp1 = xfs_inode_dquot(ip, type);
977                 if (dqp1) {
978                         xfs_qm_dqdestroy(dqp);
979                         dqp = dqp1;
980                         xfs_dqlock(dqp);
981                         goto dqret;
982                 }
983         } else {
984                 /* inode stays locked on return */
985                 xfs_qm_dqdestroy(dqp);
986                 return -ESRCH;
987         }
988 
989         error = xfs_qm_dqget_cache_insert(mp, qi, tree, id, dqp);
990         if (error) {
991                 /*
992                  * Duplicate found. Just throw away the new dquot and start
993                  * over.
994                  */
995                 xfs_qm_dqdestroy(dqp);
996                 XFS_STATS_INC(mp, xs_qm_dquot_dups);
997                 goto restart;
998         }
999 
1000 dqret:
1001         ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
1002         trace_xfs_dqget_miss(dqp);
1003         *O_dqpp = dqp;
1004         return 0;
1005 }
1006 
1007 /*
1008  * Starting at @id and progressing upwards, look for an initialized incore
1009  * dquot, lock it, and return it.
1010  */
1011 int
1012 xfs_qm_dqget_next(
1013         struct xfs_mount        *mp,
1014         xfs_dqid_t              id,
1015         xfs_dqtype_t            type,
1016         struct xfs_dquot        **dqpp)
1017 {
1018         struct xfs_dquot        *dqp;
1019         int                     error = 0;
1020 
1021         *dqpp = NULL;
1022         for (; !error; error = xfs_dq_get_next_id(mp, type, &id)) {
1023                 error = xfs_qm_dqget(mp, id, type, false, &dqp);
1024                 if (error == -ENOENT)
1025                         continue;
1026                 else if (error != 0)
1027                         break;
1028 
1029                 if (!XFS_IS_DQUOT_UNINITIALIZED(dqp)) {
1030                         *dqpp = dqp;
1031                         return 0;
1032                 }
1033 
1034                 xfs_qm_dqput(dqp);
1035         }
1036 
1037         return error;
1038 }
1039 
1040 /*
1041  * Release a reference to the dquot (decrement ref-count) and unlock it.
1042  *
1043  * If there is a group quota attached to this dquot, carefully release that
1044  * too without tripping over deadlocks'n'stuff.
1045  */
1046 void
1047 xfs_qm_dqput(
1048         struct xfs_dquot        *dqp)
1049 {
1050         ASSERT(dqp->q_nrefs > 0);
1051         ASSERT(XFS_DQ_IS_LOCKED(dqp));
1052 
1053         trace_xfs_dqput(dqp);
1054 
1055         if (--dqp->q_nrefs == 0) {
1056                 struct xfs_quotainfo    *qi = dqp->q_mount->m_quotainfo;
1057                 trace_xfs_dqput_free(dqp);
1058 
1059                 if (list_lru_add(&qi->qi_lru, &dqp->q_lru))
1060                         XFS_STATS_INC(dqp->q_mount, xs_qm_dquot_unused);
1061         }
1062         xfs_dqunlock(dqp);
1063 }
1064 
1065 /*
1066  * Release a dquot. Flush it if dirty, then dqput() it.
1067  * dquot must not be locked.
1068  */
1069 void
1070 xfs_qm_dqrele(
1071         struct xfs_dquot        *dqp)
1072 {
1073         if (!dqp)
1074                 return;
1075 
1076         trace_xfs_dqrele(dqp);
1077 
1078         xfs_dqlock(dqp);
1079         /*
1080          * We don't care to flush it if the dquot is dirty here.
1081          * That will create stutters that we want to avoid.
1082          * Instead we do a delayed write when we try to reclaim
1083          * a dirty dquot. Also xfs_sync will take part of the burden...
1084          */
1085         xfs_qm_dqput(dqp);
1086 }
1087 
1088 /*
1089  * This is the dquot flushing I/O completion routine.  It is called
1090  * from interrupt level when the buffer containing the dquot is
1091  * flushed to disk.  It is responsible for removing the dquot logitem
1092  * from the AIL if it has not been re-logged, and unlocking the dquot's
1093  * flush lock. This behavior is very similar to that of inodes..
1094  */
1095 static void
1096 xfs_qm_dqflush_done(
1097         struct xfs_log_item     *lip)
1098 {
1099         struct xfs_dq_logitem   *qip = (struct xfs_dq_logitem *)lip;
1100         struct xfs_dquot        *dqp = qip->qli_dquot;
1101         struct xfs_ail          *ailp = lip->li_ailp;
1102         xfs_lsn_t               tail_lsn;
1103 
1104         /*
1105          * We only want to pull the item from the AIL if its
1106          * location in the log has not changed since we started the flush.
1107          * Thus, we only bother if the dquot's lsn has
1108          * not changed. First we check the lsn outside the lock
1109          * since it's cheaper, and then we recheck while
1110          * holding the lock before removing the dquot from the AIL.
1111          */
1112         if (test_bit(XFS_LI_IN_AIL, &lip->li_flags) &&
1113             ((lip->li_lsn == qip->qli_flush_lsn) ||
1114              test_bit(XFS_LI_FAILED, &lip->li_flags))) {
1115 
1116                 spin_lock(&ailp->ail_lock);
1117                 xfs_clear_li_failed(lip);
1118                 if (lip->li_lsn == qip->qli_flush_lsn) {
1119                         /* xfs_ail_update_finish() drops the AIL lock */
1120                         tail_lsn = xfs_ail_delete_one(ailp, lip);
1121                         xfs_ail_update_finish(ailp, tail_lsn);
1122                 } else {
1123                         spin_unlock(&ailp->ail_lock);
1124                 }
1125         }
1126 
1127         /*
1128          * Release the dq's flush lock since we're done with it.
1129          */
1130         xfs_dqfunlock(dqp);
1131 }
1132 
1133 void
1134 xfs_buf_dquot_iodone(
1135         struct xfs_buf          *bp)
1136 {
1137         struct xfs_log_item     *lip, *n;
1138 
1139         list_for_each_entry_safe(lip, n, &bp->b_li_list, li_bio_list) {
1140                 list_del_init(&lip->li_bio_list);
1141                 xfs_qm_dqflush_done(lip);
1142         }
1143 }
1144 
1145 void
1146 xfs_buf_dquot_io_fail(
1147         struct xfs_buf          *bp)
1148 {
1149         struct xfs_log_item     *lip;
1150 
1151         spin_lock(&bp->b_mount->m_ail->ail_lock);
1152         list_for_each_entry(lip, &bp->b_li_list, li_bio_list)
1153                 xfs_set_li_failed(lip, bp);
1154         spin_unlock(&bp->b_mount->m_ail->ail_lock);
1155 }
1156 
1157 /* Check incore dquot for errors before we flush. */
1158 static xfs_failaddr_t
1159 xfs_qm_dqflush_check(
1160         struct xfs_dquot        *dqp)
1161 {
1162         xfs_dqtype_t            type = xfs_dquot_type(dqp);
1163 
1164         if (type != XFS_DQTYPE_USER &&
1165             type != XFS_DQTYPE_GROUP &&
1166             type != XFS_DQTYPE_PROJ)
1167                 return __this_address;
1168 
1169         if (dqp->q_id == 0)
1170                 return NULL;
1171 
1172         if (dqp->q_blk.softlimit && dqp->q_blk.count > dqp->q_blk.softlimit &&
1173             !dqp->q_blk.timer)
1174                 return __this_address;
1175 
1176         if (dqp->q_ino.softlimit && dqp->q_ino.count > dqp->q_ino.softlimit &&
1177             !dqp->q_ino.timer)
1178                 return __this_address;
1179 
1180         if (dqp->q_rtb.softlimit && dqp->q_rtb.count > dqp->q_rtb.softlimit &&
1181             !dqp->q_rtb.timer)
1182                 return __this_address;
1183 
1184         /* bigtime flag should never be set on root dquots */
1185         if (dqp->q_type & XFS_DQTYPE_BIGTIME) {
1186                 if (!xfs_sb_version_hasbigtime(&dqp->q_mount->m_sb))
1187                         return __this_address;
1188                 if (dqp->q_id == 0)
1189                         return __this_address;
1190         }
1191 
1192         return NULL;
1193 }
1194 
1195 /*
1196  * Write a modified dquot to disk.
1197  * The dquot must be locked and the flush lock too taken by caller.
1198  * The flush lock will not be unlocked until the dquot reaches the disk,
1199  * but the dquot is free to be unlocked and modified by the caller
1200  * in the interim. Dquot is still locked on return. This behavior is
1201  * identical to that of inodes.
1202  */
1203 int
1204 xfs_qm_dqflush(
1205         struct xfs_dquot        *dqp,
1206         struct xfs_buf          **bpp)
1207 {
1208         struct xfs_mount        *mp = dqp->q_mount;
1209         struct xfs_log_item     *lip = &dqp->q_logitem.qli_item;
1210         struct xfs_buf          *bp;
1211         struct xfs_dqblk        *dqblk;
1212         xfs_failaddr_t          fa;
1213         int                     error;
1214 
1215         ASSERT(XFS_DQ_IS_LOCKED(dqp));
1216         ASSERT(!completion_done(&dqp->q_flush));
1217 
1218         trace_xfs_dqflush(dqp);
1219 
1220         *bpp = NULL;
1221 
1222         xfs_qm_dqunpin_wait(dqp);
1223 
1224         /*
1225          * Get the buffer containing the on-disk dquot
1226          */
1227         error = xfs_trans_read_buf(mp, NULL, mp->m_ddev_targp, dqp->q_blkno,
1228                                    mp->m_quotainfo->qi_dqchunklen, XBF_TRYLOCK,
1229                                    &bp, &xfs_dquot_buf_ops);
1230         if (error == -EAGAIN)
1231                 goto out_unlock;
1232         if (error)
1233                 goto out_abort;
1234 
1235         fa = xfs_qm_dqflush_check(dqp);
1236         if (fa) {
1237                 xfs_alert(mp, "corrupt dquot ID 0x%x in memory at %pS",
1238                                 dqp->q_id, fa);
1239                 xfs_buf_relse(bp);
1240                 error = -EFSCORRUPTED;
1241                 goto out_abort;
1242         }
1243 
1244         /* Flush the incore dquot to the ondisk buffer. */
1245         dqblk = bp->b_addr + dqp->q_bufoffset;
1246         xfs_dquot_to_disk(&dqblk->dd_diskdq, dqp);
1247 
1248         /*
1249          * Clear the dirty field and remember the flush lsn for later use.
1250          */
1251         dqp->q_flags &= ~XFS_DQFLAG_DIRTY;
1252 
1253         xfs_trans_ail_copy_lsn(mp->m_ail, &dqp->q_logitem.qli_flush_lsn,
1254                                         &dqp->q_logitem.qli_item.li_lsn);
1255 
1256         /*
1257          * copy the lsn into the on-disk dquot now while we have the in memory
1258          * dquot here. This can't be done later in the write verifier as we
1259          * can't get access to the log item at that point in time.
1260          *
1261          * We also calculate the CRC here so that the on-disk dquot in the
1262          * buffer always has a valid CRC. This ensures there is no possibility
1263          * of a dquot without an up-to-date CRC getting to disk.
1264          */
1265         if (xfs_sb_version_hascrc(&mp->m_sb)) {
1266                 dqblk->dd_lsn = cpu_to_be64(dqp->q_logitem.qli_item.li_lsn);
1267                 xfs_update_cksum((char *)dqblk, sizeof(struct xfs_dqblk),
1268                                  XFS_DQUOT_CRC_OFF);
1269         }
1270 
1271         /*
1272          * Attach the dquot to the buffer so that we can remove this dquot from
1273          * the AIL and release the flush lock once the dquot is synced to disk.
1274          */
1275         bp->b_flags |= _XBF_DQUOTS;
1276         list_add_tail(&dqp->q_logitem.qli_item.li_bio_list, &bp->b_li_list);
1277 
1278         /*
1279          * If the buffer is pinned then push on the log so we won't
1280          * get stuck waiting in the write for too long.
1281          */
1282         if (xfs_buf_ispinned(bp)) {
1283                 trace_xfs_dqflush_force(dqp);
1284                 xfs_log_force(mp, 0);
1285         }
1286 
1287         trace_xfs_dqflush_done(dqp);
1288         *bpp = bp;
1289         return 0;
1290 
1291 out_abort:
1292         dqp->q_flags &= ~XFS_DQFLAG_DIRTY;
1293         xfs_trans_ail_delete(lip, 0);
1294         xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
1295 out_unlock:
1296         xfs_dqfunlock(dqp);
1297         return error;
1298 }
1299 
1300 /*
1301  * Lock two xfs_dquot structures.
1302  *
1303  * To avoid deadlocks we always lock the quota structure with
1304  * the lowerd id first.
1305  */
1306 void
1307 xfs_dqlock2(
1308         struct xfs_dquot        *d1,
1309         struct xfs_dquot        *d2)
1310 {
1311         if (d1 && d2) {
1312                 ASSERT(d1 != d2);
1313                 if (d1->q_id > d2->q_id) {
1314                         mutex_lock(&d2->q_qlock);
1315                         mutex_lock_nested(&d1->q_qlock, XFS_QLOCK_NESTED);
1316                 } else {
1317                         mutex_lock(&d1->q_qlock);
1318                         mutex_lock_nested(&d2->q_qlock, XFS_QLOCK_NESTED);
1319                 }
1320         } else if (d1) {
1321                 mutex_lock(&d1->q_qlock);
1322         } else if (d2) {
1323                 mutex_lock(&d2->q_qlock);
1324         }
1325 }
1326 
1327 int __init
1328 xfs_qm_init(void)
1329 {
1330         xfs_qm_dqzone = kmem_cache_create("xfs_dquot",
1331                                           sizeof(struct xfs_dquot),
1332                                           0, 0, NULL);
1333         if (!xfs_qm_dqzone)
1334                 goto out;
1335 
1336         xfs_qm_dqtrxzone = kmem_cache_create("xfs_dqtrx",
1337                                              sizeof(struct xfs_dquot_acct),
1338                                              0, 0, NULL);
1339         if (!xfs_qm_dqtrxzone)
1340                 goto out_free_dqzone;
1341 
1342         return 0;
1343 
1344 out_free_dqzone:
1345         kmem_cache_destroy(xfs_qm_dqzone);
1346 out:
1347         return -ENOMEM;
1348 }
1349 
1350 void
1351 xfs_qm_exit(void)
1352 {
1353         kmem_cache_destroy(xfs_qm_dqtrxzone);
1354         kmem_cache_destroy(xfs_qm_dqzone);
1355 }
1356 
1357 /*
1358  * Iterate every dquot of a particular type.  The caller must ensure that the
1359  * particular quota type is active.  iter_fn can return negative error codes,
1360  * or -ECANCELED to indicate that it wants to stop iterating.
1361  */
1362 int
1363 xfs_qm_dqiterate(
1364         struct xfs_mount        *mp,
1365         xfs_dqtype_t            type,
1366         xfs_qm_dqiterate_fn     iter_fn,
1367         void                    *priv)
1368 {
1369         struct xfs_dquot        *dq;
1370         xfs_dqid_t              id = 0;
1371         int                     error;
1372 
1373         do {
1374                 error = xfs_qm_dqget_next(mp, id, type, &dq);
1375                 if (error == -ENOENT)
1376                         return 0;
1377                 if (error)
1378                         return error;
1379 
1380                 error = iter_fn(dq, type, priv);
1381                 id = dq->q_id;
1382                 xfs_qm_dqput(dq);
1383         } while (error == 0 && id != 0);
1384 
1385         return error;
1386 }
1387 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | Wiki (Japanese) | Wiki (English) | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

osdn.jp