xfs: remove xfs_qm_dqput and optimize dropping dquot references

With the new lockref-based dquot reference counting, there is no need to
hold q_qlock for dropping the reference.  Make xfs_qm_dqrele the main
function to drop dquot references without taking q_qlock and convert all
callers of xfs_qm_dqput to unlock q_qlock and call xfs_qm_dqrele instead.

Signed-off-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Darrick J. Wong <djwong@kernel.org>
Signed-off-by: Carlos Maiolino <cem@kernel.org>
This commit is contained in:
Christoph Hellwig 2025-11-10 14:22:58 +01:00 committed by Carlos Maiolino
parent 0c5e80bd57
commit 6b6e6e7521
10 changed files with 36 additions and 54 deletions

View File

@ -330,7 +330,8 @@ xchk_quota(
xchk_dqiter_init(&cursor, sc, dqtype);
while ((error = xchk_dquot_iter(&cursor, &dq)) == 1) {
error = xchk_quota_item(&sqi, dq);
xfs_qm_dqput(dq);
mutex_unlock(&dq->q_qlock);
xfs_qm_dqrele(dq);
if (error)
break;
}

View File

@ -513,7 +513,8 @@ xrep_quota_problems(
xchk_dqiter_init(&cursor, sc, dqtype);
while ((error = xchk_dquot_iter(&cursor, &dq)) == 1) {
error = xrep_quota_item(&rqi, dq);
xfs_qm_dqput(dq);
mutex_unlock(&dq->q_qlock);
xfs_qm_dqrele(dq);
if (error)
break;
}

View File

@ -636,7 +636,8 @@ xqcheck_walk_observations(
return error;
error = xqcheck_compare_dquot(xqc, dqtype, dq);
xfs_qm_dqput(dq);
mutex_unlock(&dq->q_qlock);
xfs_qm_dqrele(dq);
if (error)
return error;
@ -674,7 +675,8 @@ xqcheck_compare_dqtype(
xchk_dqiter_init(&cursor, sc, dqtype);
while ((error = xchk_dquot_iter(&cursor, &dq)) == 1) {
error = xqcheck_compare_dquot(xqc, dqtype, dq);
xfs_qm_dqput(dq);
mutex_unlock(&dq->q_qlock);
xfs_qm_dqrele(dq);
if (error)
break;
}

View File

@ -156,7 +156,8 @@ xqcheck_commit_dqtype(
xchk_dqiter_init(&cursor, sc, dqtype);
while ((error = xchk_dquot_iter(&cursor, &dq)) == 1) {
error = xqcheck_commit_dquot(xqc, dqtype, dq);
xfs_qm_dqput(dq);
mutex_unlock(&dq->q_qlock);
xfs_qm_dqrele(dq);
if (error)
break;
}
@ -187,7 +188,8 @@ xqcheck_commit_dqtype(
return error;
error = xqcheck_commit_dquot(xqc, dqtype, dq);
xfs_qm_dqput(dq);
mutex_unlock(&dq->q_qlock);
xfs_qm_dqrele(dq);
if (error)
return error;

View File

@ -1105,44 +1105,15 @@ xfs_qm_dqget_next(
return 0;
}
xfs_qm_dqput(dqp);
mutex_unlock(&dqp->q_qlock);
xfs_qm_dqrele(dqp);
}
return error;
}
/*
* Release a reference to the dquot (decrement ref-count) and unlock it.
*
* If there is a group quota attached to this dquot, carefully release that
* too without tripping over deadlocks'n'stuff.
*/
void
xfs_qm_dqput(
struct xfs_dquot *dqp)
{
ASSERT(XFS_DQ_IS_LOCKED(dqp));
trace_xfs_dqput(dqp);
if (lockref_put_or_lock(&dqp->q_lockref))
goto out_unlock;
if (!--dqp->q_lockref.count) {
struct xfs_quotainfo *qi = dqp->q_mount->m_quotainfo;
trace_xfs_dqput_free(dqp);
if (list_lru_add_obj(&qi->qi_lru, &dqp->q_lru))
XFS_STATS_INC(dqp->q_mount, xs_qm_dquot_unused);
}
spin_unlock(&dqp->q_lockref.lock);
out_unlock:
mutex_unlock(&dqp->q_qlock);
}
/*
* Release a dquot. Flush it if dirty, then dqput() it.
* dquot must not be locked.
* Release a reference to the dquot.
*/
void
xfs_qm_dqrele(
@ -1153,14 +1124,16 @@ xfs_qm_dqrele(
trace_xfs_dqrele(dqp);
mutex_lock(&dqp->q_qlock);
/*
* We don't care to flush it if the dquot is dirty here.
* That will create stutters that we want to avoid.
* Instead we do a delayed write when we try to reclaim
* a dirty dquot. Also xfs_sync will take part of the burden...
*/
xfs_qm_dqput(dqp);
if (lockref_put_or_lock(&dqp->q_lockref))
return;
if (!--dqp->q_lockref.count) {
struct xfs_quotainfo *qi = dqp->q_mount->m_quotainfo;
trace_xfs_dqrele_free(dqp);
if (list_lru_add_obj(&qi->qi_lru, &dqp->q_lru))
XFS_STATS_INC(dqp->q_mount, xs_qm_dquot_unused);
}
spin_unlock(&dqp->q_lockref.lock);
}
/*

View File

@ -218,7 +218,6 @@ int xfs_qm_dqget_next(struct xfs_mount *mp, xfs_dqid_t id,
int xfs_qm_dqget_uncached(struct xfs_mount *mp,
xfs_dqid_t id, xfs_dqtype_t type,
struct xfs_dquot **dqpp);
void xfs_qm_dqput(struct xfs_dquot *dqp);
void xfs_dqlock2(struct xfs_dquot *, struct xfs_dquot *);
void xfs_dqlockn(struct xfs_dqtrx *q);

View File

@ -1346,7 +1346,8 @@ xfs_qm_quotacheck_dqadjust(
dqp->q_flags |= XFS_DQFLAG_DIRTY;
out_unlock:
xfs_qm_dqput(dqp);
mutex_unlock(&dqp->q_qlock);
xfs_qm_dqrele(dqp);
return error;
}
@ -1487,7 +1488,8 @@ xfs_qm_flush_one(
xfs_buf_delwri_queue(bp, buffer_list);
xfs_buf_relse(bp);
out_unlock:
xfs_qm_dqput(dqp);
mutex_unlock(&dqp->q_qlock);
xfs_qm_dqrele(dqp);
return error;
}

View File

@ -74,7 +74,8 @@ xfs_qm_statvfs(
if (!xfs_qm_dqget(mp, ip->i_projid, XFS_DQTYPE_PROJ, false, &dqp)) {
xfs_fill_statvfs_from_dquot(statp, ip, dqp);
xfs_qm_dqput(dqp);
mutex_unlock(&dqp->q_qlock);
xfs_qm_dqrele(dqp);
}
}

View File

@ -467,7 +467,8 @@ xfs_qm_scall_getquota(
xfs_qm_scall_getquota_fill_qc(mp, type, dqp, dst);
out_put:
xfs_qm_dqput(dqp);
mutex_unlock(&dqp->q_qlock);
xfs_qm_dqrele(dqp);
return error;
}
@ -497,7 +498,8 @@ xfs_qm_scall_getquota_next(
*id = dqp->q_id;
xfs_qm_scall_getquota_fill_qc(mp, type, dqp, dst);
mutex_unlock(&dqp->q_qlock);
xfs_qm_dqput(dqp);
xfs_qm_dqrele(dqp);
return error;
}

View File

@ -1409,9 +1409,8 @@ DEFINE_DQUOT_EVENT(xfs_dqget_hit);
DEFINE_DQUOT_EVENT(xfs_dqget_miss);
DEFINE_DQUOT_EVENT(xfs_dqget_freeing);
DEFINE_DQUOT_EVENT(xfs_dqget_dup);
DEFINE_DQUOT_EVENT(xfs_dqput);
DEFINE_DQUOT_EVENT(xfs_dqput_free);
DEFINE_DQUOT_EVENT(xfs_dqrele);
DEFINE_DQUOT_EVENT(xfs_dqrele_free);
DEFINE_DQUOT_EVENT(xfs_dqflush);
DEFINE_DQUOT_EVENT(xfs_dqflush_force);
DEFINE_DQUOT_EVENT(xfs_dqflush_done);