mirror of https://github.com/torvalds/linux.git
gfs2 changes
- Major withdraw / error handling overhaul based on dlm's new DLM_RELEASE_RECOVER feature: this allows gfs to treat withdraws like node failures. Make withdraws asynchronous. - Fix a bug in commite4a8b5481cthat caused 'df' to remain out of sync. ('df' is still allowed to go slightly out of sync for short periods of time.) - Prevent recusive memory reclaim in gfs2_unstuff_dinode(). - Clean up SDF_JOURNAL_LIVE flag handling. - Fix remote evict for read-only filesystems. - Fix a misuse of bio_chain(). - Various other minor cleanups. -----BEGIN PGP SIGNATURE----- iQJIBAABCAAyFiEEJZs3krPW0xkhLMTc1b+f6wMTZToFAmkvM/oUHGFncnVlbmJh QHJlZGhhdC5jb20ACgkQ1b+f6wMTZTqFLQ//Ra+FUIMYRrU83xd8OB+rXaFixymk riPnoXZHrccoRAARc6d/p/nTYmV7FSnTa2jlsUFshsO2jl/1OIRI6xqScL+W5dp9 ToxHKnezOsRmy5tXvomYy9Teo/ngz6dpv6ixVrn7qTGJJ5zpir0ji2fO+xUvCUQq IVGgvQN0O7ECUmmRXxjkRLhoEdus2Ye4jQkpJmqY9I1+H/0E43HlF2zDgoisTkiH baNtFkTgE65Hba2xhoTCt3NO17POgqGcRaLVOLv5dqGhLAQ33+jXeYduM6ujbqwk 1FznGf09NRBJVQkyJvbgZELdX+mWXvWf/2kPmW6TXd7phFiO7cByxN2aHHcu/wBp kh0ZgLFtvjyuR7wiyKf0tAKKPh250YiSkBuJU64dlHha2ZQuME6Z4dlYqBKwvPXf AwxAu0tB2bAjc2OXvPaHhLHjdpySEWqNZIITbfQPVPonRKed10IqCwlTd+j5/64H mnZCCxr2p2akrSYbQjl7h1k4LO7eX3cYJ23JoKIjwQ2hJE+Pd4li9+xhQCEQUVyE ou/ezUKc19Xe7uxG1412PrbmOtIMgKfpq2KW151b1KA3pfCUt7GMvpYlvmnf9Wzs XGjrQ1p4XdlxzQqe9XTrdO0IrNUNGDcky1Kqishje9SPa8FthwrtTuBSYL8nkcX6 3KRRzlFCRHfM5ng= =AWK2 -----END PGP SIGNATURE----- Merge tag 'gfs2-for-6.19' of git://git.kernel.org/pub/scm/linux/kernel/git/gfs2/linux-gfs2 Pull gfs2 updates from Andreas Gruenbacher: - Major withdraw / error handling overhaul based on dlm's new DLM_RELEASE_RECOVER feature: this allows gfs to treat withdraws like node failures. Make withdraws asynchronous - Fix a bug in commite4a8b5481cthat caused 'df' to remain out of sync. ('df' is still allowed to go slightly out of sync for short periods of time) - Prevent recusive memory reclaim in gfs2_unstuff_dinode() - Clean up SDF_JOURNAL_LIVE flag handling - Fix remote evict for read-only filesystems - Fix a misuse of bio_chain() - Various other minor cleanups * tag 'gfs2-for-6.19' of git://git.kernel.org/pub/scm/linux/kernel/git/gfs2/linux-gfs2: (35 commits) gfs2: Fix use of bio_chain gfs2: Clean up SDF_JOURNAL_LIVE flag handling gfs2: No longer thaw filesystems during a withdraw gfs2: Withdraw immediately in gfs2_trans_add_meta gfs2: New gfs2_withdraw_helper gfs2: Clean up properly during a withdraw gfs2: Rename gfs2_{gl_dq_holders => withdraw_glocks} Revert "gfs2: fix infinite loop when checking ail item count before go_inval" Revert "gfs2: Allow some glocks to be used during withdraw" Revert "gfs2: Check for log write errors before telling dlm to unlock" Revert "gfs2: fix a deadlock on withdraw-during-mount" Revert "gfs2: Force withdraw to replay journals and wait for it to finish" (6/6) Revert "gfs2: Force withdraw to replay journals and wait for it to finish" (5/6) Revert "gfs2: Force withdraw to replay journals and wait for it to finish" (4/6) Revert "gfs2: Force withdraw to replay journals and wait for it to finish" (3/6) Revert "gfs2: Force withdraw to replay journals and wait for it to finish" (2/6) Revert "gfs2: Force withdraw to replay journals and wait for it to finish" (1/6) Revert "gfs2: don't stop reads while withdraw in progress" gfs2: Rename LM_FLAG_{NOEXP -> RECOVER} gfs2: Kill gfs2_io_error_bh_wd ...
This commit is contained in:
commit
afcbce74f3
|
|
@ -4,6 +4,9 @@
|
|||
Global File System 2
|
||||
====================
|
||||
|
||||
Overview
|
||||
========
|
||||
|
||||
GFS2 is a cluster file system. It allows a cluster of computers to
|
||||
simultaneously use a block device that is shared between them (with FC,
|
||||
iSCSI, NBD, etc). GFS2 reads and writes to the block device like a local
|
||||
|
|
@ -50,3 +53,12 @@ The following man pages are available from gfs2-utils:
|
|||
gfs2_convert to convert a gfs filesystem to GFS2 in-place
|
||||
mkfs.gfs2 to make a filesystem
|
||||
============ =============================================
|
||||
|
||||
Implementation Notes
|
||||
====================
|
||||
|
||||
.. toctree::
|
||||
:maxdepth: 1
|
||||
|
||||
glocks
|
||||
uevents
|
||||
|
|
@ -89,9 +89,7 @@ Documentation for filesystem implementations.
|
|||
ext3
|
||||
ext4/index
|
||||
f2fs
|
||||
gfs2
|
||||
gfs2-uevents
|
||||
gfs2-glocks
|
||||
gfs2/index
|
||||
hfs
|
||||
hfsplus
|
||||
hpfs
|
||||
|
|
|
|||
|
|
@ -10535,7 +10535,7 @@ L: gfs2@lists.linux.dev
|
|||
S: Supported
|
||||
B: https://bugzilla.kernel.org/enter_bug.cgi?product=File%20System&component=gfs2
|
||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/gfs2/linux-gfs2.git
|
||||
F: Documentation/filesystems/gfs2*
|
||||
F: Documentation/filesystems/gfs2/
|
||||
F: fs/gfs2/
|
||||
F: include/uapi/linux/gfs2_ondisk.h
|
||||
|
||||
|
|
|
|||
|
|
@ -431,7 +431,7 @@ static int gfs2_read_folio(struct file *file, struct folio *folio)
|
|||
error = mpage_read_folio(folio, gfs2_block_map);
|
||||
}
|
||||
|
||||
if (gfs2_withdrawing_or_withdrawn(sdp))
|
||||
if (gfs2_withdrawn(sdp))
|
||||
return -EIO;
|
||||
|
||||
return error;
|
||||
|
|
|
|||
|
|
@ -1446,7 +1446,7 @@ static int gfs2_lock(struct file *file, int cmd, struct file_lock *fl)
|
|||
|
||||
if (!(fl->c.flc_flags & FL_POSIX))
|
||||
return -ENOLCK;
|
||||
if (gfs2_withdrawing_or_withdrawn(sdp)) {
|
||||
if (gfs2_withdrawn(sdp)) {
|
||||
if (lock_is_unlock(fl))
|
||||
locks_lock_file_wait(file, fl);
|
||||
return -EIO;
|
||||
|
|
|
|||
217
fs/gfs2/glock.c
217
fs/gfs2/glock.c
|
|
@ -137,33 +137,6 @@ static void gfs2_glock_dealloc(struct rcu_head *rcu)
|
|||
kmem_cache_free(gfs2_glock_cachep, gl);
|
||||
}
|
||||
|
||||
/**
|
||||
* glock_blocked_by_withdraw - determine if we can still use a glock
|
||||
* @gl: the glock
|
||||
*
|
||||
* We need to allow some glocks to be enqueued, dequeued, promoted, and demoted
|
||||
* when we're withdrawn. For example, to maintain metadata integrity, we should
|
||||
* disallow the use of inode and rgrp glocks when withdrawn. Other glocks like
|
||||
* the iopen or freeze glock may be safely used because none of their
|
||||
* metadata goes through the journal. So in general, we should disallow all
|
||||
* glocks that are journaled, and allow all the others. One exception is:
|
||||
* we need to allow our active journal to be promoted and demoted so others
|
||||
* may recover it and we can reacquire it when they're done.
|
||||
*/
|
||||
static bool glock_blocked_by_withdraw(struct gfs2_glock *gl)
|
||||
{
|
||||
struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
|
||||
|
||||
if (!gfs2_withdrawing_or_withdrawn(sdp))
|
||||
return false;
|
||||
if (gl->gl_ops->go_flags & GLOF_NONDISK)
|
||||
return false;
|
||||
if (!sdp->sd_jdesc ||
|
||||
gl->gl_name.ln_number == sdp->sd_jdesc->jd_no_addr)
|
||||
return false;
|
||||
return true;
|
||||
}
|
||||
|
||||
static void __gfs2_glock_free(struct gfs2_glock *gl)
|
||||
{
|
||||
rhashtable_remove_fast(&gl_hash_table, &gl->gl_node, ht_parms);
|
||||
|
|
@ -270,7 +243,7 @@ static void __gfs2_glock_put(struct gfs2_glock *gl)
|
|||
GLOCK_BUG_ON(gl, !list_empty(&gl->gl_holders));
|
||||
if (mapping) {
|
||||
truncate_inode_pages_final(mapping);
|
||||
if (!gfs2_withdrawing_or_withdrawn(sdp))
|
||||
if (!gfs2_withdrawn(sdp))
|
||||
GLOCK_BUG_ON(gl, !mapping_empty(mapping));
|
||||
}
|
||||
trace_gfs2_glock_put(gl);
|
||||
|
|
@ -485,8 +458,14 @@ int gfs2_instantiate(struct gfs2_holder *gh)
|
|||
|
||||
static void do_promote(struct gfs2_glock *gl)
|
||||
{
|
||||
struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
|
||||
struct gfs2_holder *gh, *current_gh;
|
||||
|
||||
if (gfs2_withdrawn(sdp)) {
|
||||
do_error(gl, LM_OUT_ERROR);
|
||||
return;
|
||||
}
|
||||
|
||||
current_gh = find_first_holder(gl);
|
||||
list_for_each_entry(gh, &gl->gl_holders, gh_list) {
|
||||
if (test_bit(HIF_HOLDER, &gh->gh_iflags))
|
||||
|
|
@ -592,7 +571,6 @@ static void finish_xmote(struct gfs2_glock *gl, unsigned int ret)
|
|||
state_change(gl, state);
|
||||
}
|
||||
|
||||
|
||||
/* Demote to UN request arrived during demote to SH or DF */
|
||||
if (test_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags) &&
|
||||
gl->gl_state != LM_ST_UNLOCKED &&
|
||||
|
|
@ -663,16 +641,6 @@ static void finish_xmote(struct gfs2_glock *gl, unsigned int ret)
|
|||
clear_bit(GLF_LOCK, &gl->gl_flags);
|
||||
}
|
||||
|
||||
static bool is_system_glock(struct gfs2_glock *gl)
|
||||
{
|
||||
struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
|
||||
struct gfs2_inode *m_ip = GFS2_I(sdp->sd_statfs_inode);
|
||||
|
||||
if (gl == m_ip->i_gl)
|
||||
return true;
|
||||
return false;
|
||||
}
|
||||
|
||||
/**
|
||||
* do_xmote - Calls the DLM to change the state of a lock
|
||||
* @gl: The lock state
|
||||
|
|
@ -691,95 +659,47 @@ __acquires(&gl->gl_lockref.lock)
|
|||
struct lm_lockstruct *ls = &sdp->sd_lockstruct;
|
||||
int ret;
|
||||
|
||||
if (target != LM_ST_UNLOCKED && glock_blocked_by_withdraw(gl) &&
|
||||
gh && !(gh->gh_flags & LM_FLAG_NOEXP))
|
||||
goto skip_inval;
|
||||
/*
|
||||
* When a filesystem is withdrawing, the remaining cluster nodes will
|
||||
* take care of recovering the withdrawing node's journal. We only
|
||||
* need to make sure that once we trigger remote recovery, we won't
|
||||
* write to the shared block device anymore. This means that here,
|
||||
*
|
||||
* - no new writes to the filesystem must be triggered (->go_sync()).
|
||||
*
|
||||
* - any cached data should be discarded by calling ->go_inval(), dirty
|
||||
* or not and journaled or unjournaled.
|
||||
*
|
||||
* - no more dlm locking operations should be issued (->lm_lock()).
|
||||
*/
|
||||
|
||||
GLOCK_BUG_ON(gl, gl->gl_state == target);
|
||||
GLOCK_BUG_ON(gl, gl->gl_state == gl->gl_target);
|
||||
|
||||
if (!glops->go_inval || !glops->go_sync)
|
||||
goto skip_inval;
|
||||
|
||||
spin_unlock(&gl->gl_lockref.lock);
|
||||
if (!gfs2_withdrawn(sdp)) {
|
||||
ret = glops->go_sync(gl);
|
||||
/* If we had a problem syncing (due to io errors or whatever,
|
||||
* we should not invalidate the metadata or tell dlm to
|
||||
* release the glock to other nodes.
|
||||
*/
|
||||
if (ret) {
|
||||
if (cmpxchg(&sdp->sd_log_error, 0, ret)) {
|
||||
fs_err(sdp, "Error %d syncing glock\n", ret);
|
||||
gfs2_dump_glock(NULL, gl, true);
|
||||
gfs2_withdraw(sdp);
|
||||
}
|
||||
}
|
||||
spin_lock(&gl->gl_lockref.lock);
|
||||
goto skip_inval;
|
||||
}
|
||||
|
||||
if (target == LM_ST_UNLOCKED || target == LM_ST_DEFERRED) {
|
||||
/*
|
||||
* The call to go_sync should have cleared out the ail list.
|
||||
* If there are still items, we have a problem. We ought to
|
||||
* withdraw, but we can't because the withdraw code also uses
|
||||
* glocks. Warn about the error, dump the glock, then fall
|
||||
* through and wait for logd to do the withdraw for us.
|
||||
*/
|
||||
if ((atomic_read(&gl->gl_ail_count) != 0) &&
|
||||
(!cmpxchg(&sdp->sd_log_error, 0, -EIO))) {
|
||||
gfs2_glock_assert_warn(gl,
|
||||
!atomic_read(&gl->gl_ail_count));
|
||||
gfs2_dump_glock(NULL, gl, true);
|
||||
}
|
||||
if (target == LM_ST_UNLOCKED || target == LM_ST_DEFERRED)
|
||||
glops->go_inval(gl, target == LM_ST_DEFERRED ? 0 : DIO_METADATA);
|
||||
}
|
||||
spin_lock(&gl->gl_lockref.lock);
|
||||
|
||||
skip_inval:
|
||||
/*
|
||||
* Check for an error encountered since we called go_sync and go_inval.
|
||||
* If so, we can't withdraw from the glock code because the withdraw
|
||||
* code itself uses glocks (see function signal_our_withdraw) to
|
||||
* change the mount to read-only. Most importantly, we must not call
|
||||
* dlm to unlock the glock until the journal is in a known good state
|
||||
* (after journal replay) otherwise other nodes may use the object
|
||||
* (rgrp or dinode) and then later, journal replay will corrupt the
|
||||
* file system. The best we can do here is wait for the logd daemon
|
||||
* to see sd_log_error and withdraw, and in the meantime, requeue the
|
||||
* work for later.
|
||||
*
|
||||
* We make a special exception for some system glocks, such as the
|
||||
* system statfs inode glock, which needs to be granted before the
|
||||
* gfs2_quotad daemon can exit, and that exit needs to finish before
|
||||
* we can unmount the withdrawn file system.
|
||||
*
|
||||
* However, if we're just unlocking the lock (say, for unmount, when
|
||||
* gfs2_gl_hash_clear calls clear_glock) and recovery is complete
|
||||
* then it's okay to tell dlm to unlock it.
|
||||
*/
|
||||
if (unlikely(sdp->sd_log_error) && !gfs2_withdrawing_or_withdrawn(sdp))
|
||||
gfs2_withdraw_delayed(sdp);
|
||||
if (glock_blocked_by_withdraw(gl) &&
|
||||
(target != LM_ST_UNLOCKED ||
|
||||
test_bit(SDF_WITHDRAW_RECOVERY, &sdp->sd_flags))) {
|
||||
if (!is_system_glock(gl)) {
|
||||
request_demote(gl, LM_ST_UNLOCKED, 0, false);
|
||||
/*
|
||||
* Ordinarily, we would call dlm and its callback would call
|
||||
* finish_xmote, which would call state_change() to the new state.
|
||||
* Since we withdrew, we won't call dlm, so call state_change
|
||||
* manually, but to the UNLOCKED state we desire.
|
||||
*/
|
||||
state_change(gl, LM_ST_UNLOCKED);
|
||||
/*
|
||||
* We skip telling dlm to do the locking, so we won't get a
|
||||
* reply that would otherwise clear GLF_LOCK. So we clear it here.
|
||||
*/
|
||||
if (!test_bit(GLF_CANCELING, &gl->gl_flags))
|
||||
clear_bit(GLF_LOCK, &gl->gl_flags);
|
||||
clear_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags);
|
||||
gl->gl_lockref.count++;
|
||||
gfs2_glock_queue_work(gl, GL_GLOCK_DFT_HOLD);
|
||||
return;
|
||||
}
|
||||
if (gfs2_withdrawn(sdp)) {
|
||||
if (target != LM_ST_UNLOCKED)
|
||||
target = LM_OUT_ERROR;
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (ls->ls_ops->lm_lock) {
|
||||
|
|
@ -795,19 +715,23 @@ __acquires(&gl->gl_lockref.lock)
|
|||
}
|
||||
clear_bit(GLF_PENDING_REPLY, &gl->gl_flags);
|
||||
|
||||
if (ret == -ENODEV && gl->gl_target == LM_ST_UNLOCKED &&
|
||||
target == LM_ST_UNLOCKED) {
|
||||
if (ret == -ENODEV) {
|
||||
/*
|
||||
* The lockspace has been released and the lock has
|
||||
* been unlocked implicitly.
|
||||
*/
|
||||
if (target != LM_ST_UNLOCKED) {
|
||||
target = LM_OUT_ERROR;
|
||||
goto out;
|
||||
}
|
||||
} else {
|
||||
fs_err(sdp, "lm_lock ret %d\n", ret);
|
||||
GLOCK_BUG_ON(gl, !gfs2_withdrawing_or_withdrawn(sdp));
|
||||
GLOCK_BUG_ON(gl, !gfs2_withdrawn(sdp));
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
out:
|
||||
/* Complete the operation now. */
|
||||
finish_xmote(gl, target);
|
||||
gl->gl_lockref.count++;
|
||||
|
|
@ -966,14 +890,14 @@ static struct gfs2_inode *gfs2_grab_existing_inode(struct gfs2_glock *gl)
|
|||
return ip;
|
||||
}
|
||||
|
||||
static void gfs2_try_evict(struct gfs2_glock *gl)
|
||||
static void gfs2_try_to_evict(struct gfs2_glock *gl)
|
||||
{
|
||||
struct gfs2_inode *ip;
|
||||
|
||||
/*
|
||||
* If there is contention on the iopen glock and we have an inode, try
|
||||
* to grab and release the inode so that it can be evicted. The
|
||||
* GIF_DEFER_DELETE flag indicates to gfs2_evict_inode() that the inode
|
||||
* GLF_DEFER_DELETE flag indicates to gfs2_evict_inode() that the inode
|
||||
* should not be deleted locally. This will allow the remote node to
|
||||
* go ahead and delete the inode without us having to do it, which will
|
||||
* avoid rgrp glock thrashing.
|
||||
|
|
@ -1026,8 +950,14 @@ static void delete_work_func(struct work_struct *work)
|
|||
struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
|
||||
bool verify_delete = test_and_clear_bit(GLF_VERIFY_DELETE, &gl->gl_flags);
|
||||
|
||||
/*
|
||||
* Check for the GLF_VERIFY_DELETE above: this ensures that we won't
|
||||
* immediately process GLF_VERIFY_DELETE work that the below call to
|
||||
* gfs2_try_to_evict() queues.
|
||||
*/
|
||||
|
||||
if (test_and_clear_bit(GLF_TRY_TO_EVICT, &gl->gl_flags))
|
||||
gfs2_try_evict(gl);
|
||||
gfs2_try_to_evict(gl);
|
||||
|
||||
if (verify_delete) {
|
||||
u64 no_addr = gl->gl_name.ln_number;
|
||||
|
|
@ -1211,10 +1141,13 @@ int gfs2_glock_get(struct gfs2_sbd *sdp, u64 number,
|
|||
|
||||
mapping = gfs2_glock2aspace(gl);
|
||||
if (mapping) {
|
||||
gfp_t gfp_mask;
|
||||
|
||||
mapping->a_ops = &gfs2_meta_aops;
|
||||
mapping->host = sdp->sd_inode;
|
||||
mapping->flags = 0;
|
||||
mapping_set_gfp_mask(mapping, GFP_NOFS);
|
||||
gfp_mask = mapping_gfp_mask(sdp->sd_inode->i_mapping);
|
||||
mapping_set_gfp_mask(mapping, gfp_mask);
|
||||
mapping->i_private_data = NULL;
|
||||
mapping->writeback_index = 0;
|
||||
}
|
||||
|
|
@ -1241,7 +1174,7 @@ int gfs2_glock_get(struct gfs2_sbd *sdp, u64 number,
|
|||
* @state: the state we're requesting
|
||||
* @flags: the modifier flags
|
||||
* @gh: the holder structure
|
||||
*
|
||||
* @ip: caller's return address for debugging
|
||||
*/
|
||||
|
||||
void __gfs2_holder_init(struct gfs2_glock *gl, unsigned int state, u16 flags,
|
||||
|
|
@ -1539,9 +1472,10 @@ static inline void add_to_queue(struct gfs2_holder *gh)
|
|||
int gfs2_glock_nq(struct gfs2_holder *gh)
|
||||
{
|
||||
struct gfs2_glock *gl = gh->gh_gl;
|
||||
struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
|
||||
int error;
|
||||
|
||||
if (glock_blocked_by_withdraw(gl) && !(gh->gh_flags & LM_FLAG_NOEXP))
|
||||
if (gfs2_withdrawn(sdp))
|
||||
return -EIO;
|
||||
|
||||
if (gh->gh_flags & GL_NOBLOCK) {
|
||||
|
|
@ -1566,7 +1500,7 @@ int gfs2_glock_nq(struct gfs2_holder *gh)
|
|||
gh->gh_error = 0;
|
||||
spin_lock(&gl->gl_lockref.lock);
|
||||
add_to_queue(gh);
|
||||
if (unlikely((LM_FLAG_NOEXP & gh->gh_flags) &&
|
||||
if (unlikely((LM_FLAG_RECOVER & gh->gh_flags) &&
|
||||
test_and_clear_bit(GLF_HAVE_FROZEN_REPLY, &gl->gl_flags))) {
|
||||
set_bit(GLF_HAVE_REPLY, &gl->gl_flags);
|
||||
gl->gl_lockref.count++;
|
||||
|
|
@ -1639,7 +1573,6 @@ static void __gfs2_glock_dq(struct gfs2_holder *gh)
|
|||
void gfs2_glock_dq(struct gfs2_holder *gh)
|
||||
{
|
||||
struct gfs2_glock *gl = gh->gh_gl;
|
||||
struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
|
||||
|
||||
spin_lock(&gl->gl_lockref.lock);
|
||||
if (!gfs2_holder_queued(gh)) {
|
||||
|
|
@ -1666,24 +1599,6 @@ void gfs2_glock_dq(struct gfs2_holder *gh)
|
|||
goto out;
|
||||
}
|
||||
|
||||
/*
|
||||
* If we're in the process of file system withdraw, we cannot just
|
||||
* dequeue any glocks until our journal is recovered, lest we introduce
|
||||
* file system corruption. We need two exceptions to this rule: We need
|
||||
* to allow unlocking of nondisk glocks and the glock for our own
|
||||
* journal that needs recovery.
|
||||
*/
|
||||
if (test_bit(SDF_WITHDRAW_RECOVERY, &sdp->sd_flags) &&
|
||||
glock_blocked_by_withdraw(gl) &&
|
||||
gh->gh_gl != sdp->sd_jinode_gl) {
|
||||
sdp->sd_glock_dqs_held++;
|
||||
spin_unlock(&gl->gl_lockref.lock);
|
||||
might_sleep();
|
||||
wait_on_bit(&sdp->sd_flags, SDF_WITHDRAW_RECOVERY,
|
||||
TASK_UNINTERRUPTIBLE);
|
||||
spin_lock(&gl->gl_lockref.lock);
|
||||
}
|
||||
|
||||
__gfs2_glock_dq(gh);
|
||||
out:
|
||||
spin_unlock(&gl->gl_lockref.lock);
|
||||
|
|
@ -1871,7 +1786,7 @@ void gfs2_glock_cb(struct gfs2_glock *gl, unsigned int state)
|
|||
*
|
||||
* Glocks are not frozen if (a) the result of the dlm operation is
|
||||
* an error, (b) the locking operation was an unlock operation or
|
||||
* (c) if there is a "noexp" flagged request anywhere in the queue
|
||||
* (c) if there is a "recover" flagged request anywhere in the queue
|
||||
*
|
||||
* Returns: 1 if freezing should occur, 0 otherwise
|
||||
*/
|
||||
|
|
@ -1888,7 +1803,7 @@ static int gfs2_should_freeze(const struct gfs2_glock *gl)
|
|||
list_for_each_entry(gh, &gl->gl_holders, gh_list) {
|
||||
if (test_bit(HIF_HOLDER, &gh->gh_iflags))
|
||||
continue;
|
||||
if (LM_FLAG_NOEXP & gh->gh_flags)
|
||||
if (LM_FLAG_RECOVER & gh->gh_flags)
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
@ -2165,18 +2080,26 @@ static void dump_glock_func(struct gfs2_glock *gl)
|
|||
dump_glock(NULL, gl, true);
|
||||
}
|
||||
|
||||
static void withdraw_dq(struct gfs2_glock *gl)
|
||||
static void withdraw_glock(struct gfs2_glock *gl)
|
||||
{
|
||||
spin_lock(&gl->gl_lockref.lock);
|
||||
if (!__lockref_is_dead(&gl->gl_lockref) &&
|
||||
glock_blocked_by_withdraw(gl))
|
||||
if (!__lockref_is_dead(&gl->gl_lockref)) {
|
||||
/*
|
||||
* We don't want to write back any more dirty data. Unlock the
|
||||
* remaining inode and resource group glocks; this will cause
|
||||
* their ->go_inval() hooks to toss out all the remaining
|
||||
* cached data, dirty or not.
|
||||
*/
|
||||
if (gl->gl_ops->go_inval && gl->gl_state != LM_ST_UNLOCKED)
|
||||
request_demote(gl, LM_ST_UNLOCKED, 0, false);
|
||||
do_error(gl, LM_OUT_ERROR); /* remove pending waiters */
|
||||
}
|
||||
spin_unlock(&gl->gl_lockref.lock);
|
||||
}
|
||||
|
||||
void gfs2_gl_dq_holders(struct gfs2_sbd *sdp)
|
||||
void gfs2_withdraw_glocks(struct gfs2_sbd *sdp)
|
||||
{
|
||||
glock_hash_walk(withdraw_dq, sdp);
|
||||
glock_hash_walk(withdraw_glock, sdp);
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
@ -2237,7 +2160,7 @@ static const char *hflags2str(char *buf, u16 flags, unsigned long iflags)
|
|||
*p++ = 't';
|
||||
if (flags & LM_FLAG_TRY_1CB)
|
||||
*p++ = 'T';
|
||||
if (flags & LM_FLAG_NOEXP)
|
||||
if (flags & LM_FLAG_RECOVER)
|
||||
*p++ = 'e';
|
||||
if (flags & LM_FLAG_ANY)
|
||||
*p++ = 'A';
|
||||
|
|
@ -2324,8 +2247,6 @@ static const char *gflags2str(char *buf, const struct gfs2_glock *gl)
|
|||
*p++ = 'o';
|
||||
if (test_bit(GLF_BLOCKING, gflags))
|
||||
*p++ = 'b';
|
||||
if (test_bit(GLF_UNLOCKED, gflags))
|
||||
*p++ = 'x';
|
||||
if (test_bit(GLF_INSTANTIATE_NEEDED, gflags))
|
||||
*p++ = 'n';
|
||||
if (test_bit(GLF_INSTANTIATE_IN_PROG, gflags))
|
||||
|
|
|
|||
|
|
@ -58,10 +58,10 @@ enum {
|
|||
* LM_FLAG_TRY_1CB
|
||||
* Send one blocking callback if TRY is set and the lock is not granted.
|
||||
*
|
||||
* LM_FLAG_NOEXP
|
||||
* LM_FLAG_RECOVER
|
||||
* GFS sets this flag on lock requests it makes while doing journal recovery.
|
||||
* These special requests should not be blocked due to the recovery like
|
||||
* ordinary locks would be.
|
||||
* While ordinary requests are blocked until the end of recovery, requests
|
||||
* with this flag set do proceed.
|
||||
*
|
||||
* LM_FLAG_ANY
|
||||
* A SHARED request may also be granted in DEFERRED, or a DEFERRED request may
|
||||
|
|
@ -80,7 +80,7 @@ enum {
|
|||
|
||||
#define LM_FLAG_TRY 0x0001
|
||||
#define LM_FLAG_TRY_1CB 0x0002
|
||||
#define LM_FLAG_NOEXP 0x0004
|
||||
#define LM_FLAG_RECOVER 0x0004
|
||||
#define LM_FLAG_ANY 0x0008
|
||||
#define LM_FLAG_NODE_SCOPE 0x0020
|
||||
#define GL_ASYNC 0x0040
|
||||
|
|
@ -136,7 +136,7 @@ struct lm_lockops {
|
|||
void (*lm_first_done) (struct gfs2_sbd *sdp);
|
||||
void (*lm_recovery_result) (struct gfs2_sbd *sdp, unsigned int jid,
|
||||
unsigned int result);
|
||||
void (*lm_unmount) (struct gfs2_sbd *sdp);
|
||||
void (*lm_unmount) (struct gfs2_sbd *sdp, bool clean);
|
||||
void (*lm_withdraw) (struct gfs2_sbd *sdp);
|
||||
void (*lm_put_lock) (struct gfs2_glock *gl);
|
||||
int (*lm_lock) (struct gfs2_glock *gl, unsigned int req_state,
|
||||
|
|
@ -263,7 +263,7 @@ bool gfs2_queue_verify_delete(struct gfs2_glock *gl, bool later);
|
|||
void gfs2_cancel_delete_work(struct gfs2_glock *gl);
|
||||
void gfs2_flush_delete_work(struct gfs2_sbd *sdp);
|
||||
void gfs2_gl_hash_clear(struct gfs2_sbd *sdp);
|
||||
void gfs2_gl_dq_holders(struct gfs2_sbd *sdp);
|
||||
void gfs2_withdraw_glocks(struct gfs2_sbd *sdp);
|
||||
void gfs2_glock_thaw(struct gfs2_sbd *sdp);
|
||||
void gfs2_glock_free(struct gfs2_glock *gl);
|
||||
void gfs2_glock_free_later(struct gfs2_glock *gl);
|
||||
|
|
|
|||
|
|
@ -30,8 +30,6 @@
|
|||
|
||||
struct workqueue_struct *gfs2_freeze_wq;
|
||||
|
||||
extern struct workqueue_struct *gfs2_control_wq;
|
||||
|
||||
static void gfs2_ail_error(struct gfs2_glock *gl, const struct buffer_head *bh)
|
||||
{
|
||||
struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
|
||||
|
|
@ -45,7 +43,7 @@ static void gfs2_ail_error(struct gfs2_glock *gl, const struct buffer_head *bh)
|
|||
gl->gl_name.ln_type, gl->gl_name.ln_number,
|
||||
gfs2_glock2aspace(gl));
|
||||
gfs2_lm(sdp, "AIL error\n");
|
||||
gfs2_withdraw_delayed(sdp);
|
||||
gfs2_withdraw(sdp);
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
@ -83,9 +81,6 @@ static void __gfs2_ail_flush(struct gfs2_glock *gl, bool fsync,
|
|||
GLOCK_BUG_ON(gl, !fsync && atomic_read(&gl->gl_ail_count));
|
||||
spin_unlock(&sdp->sd_ail_lock);
|
||||
gfs2_log_unlock(sdp);
|
||||
|
||||
if (gfs2_withdrawing(sdp))
|
||||
gfs2_withdraw(sdp);
|
||||
}
|
||||
|
||||
|
||||
|
|
@ -178,7 +173,7 @@ static int gfs2_rgrp_metasync(struct gfs2_glock *gl)
|
|||
|
||||
filemap_fdatawrite_range(metamapping, start, end);
|
||||
error = filemap_fdatawait_range(metamapping, start, end);
|
||||
WARN_ON_ONCE(error && !gfs2_withdrawing_or_withdrawn(sdp));
|
||||
WARN_ON_ONCE(error && !gfs2_withdrawn(sdp));
|
||||
mapping_set_error(metamapping, error);
|
||||
if (error)
|
||||
gfs2_io_error(sdp);
|
||||
|
|
@ -237,6 +232,7 @@ static void rgrp_go_inval(struct gfs2_glock *gl, int flags)
|
|||
end = PAGE_ALIGN((rgd->rd_addr + rgd->rd_length) * bsize) - 1;
|
||||
gfs2_rgrp_brelse(rgd);
|
||||
WARN_ON_ONCE(!(flags & DIO_METADATA));
|
||||
gfs2_assert_withdraw(sdp, !atomic_read(&gl->gl_ail_count));
|
||||
truncate_inode_pages_range(mapping, start, end);
|
||||
}
|
||||
|
||||
|
|
@ -363,6 +359,8 @@ static void inode_go_inval(struct gfs2_glock *gl, int flags)
|
|||
{
|
||||
struct gfs2_inode *ip = gfs2_glock2inode(gl);
|
||||
|
||||
gfs2_assert_withdraw(gl->gl_name.ln_sbd, !atomic_read(&gl->gl_ail_count));
|
||||
|
||||
if (flags & DIO_METADATA) {
|
||||
struct address_space *mapping = gfs2_glock2aspace(gl);
|
||||
truncate_inode_pages(mapping, 0);
|
||||
|
|
@ -608,9 +606,9 @@ static int freeze_go_xmote_bh(struct gfs2_glock *gl)
|
|||
j_gl->gl_ops->go_inval(j_gl, DIO_METADATA);
|
||||
|
||||
error = gfs2_find_jhead(sdp->sd_jdesc, &head);
|
||||
if (gfs2_assert_withdraw_delayed(sdp, !error))
|
||||
if (gfs2_assert_withdraw(sdp, !error))
|
||||
return error;
|
||||
if (gfs2_assert_withdraw_delayed(sdp, head.lh_flags &
|
||||
if (gfs2_assert_withdraw(sdp, head.lh_flags &
|
||||
GFS2_LOG_HEAD_UNMOUNT))
|
||||
return -EIO;
|
||||
gfs2_log_pointers_init(sdp, &head);
|
||||
|
|
@ -630,8 +628,7 @@ static void iopen_go_callback(struct gfs2_glock *gl, bool remote)
|
|||
struct gfs2_inode *ip = gl->gl_object;
|
||||
struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
|
||||
|
||||
if (!remote || sb_rdonly(sdp->sd_vfs) ||
|
||||
test_bit(SDF_KILL, &sdp->sd_flags))
|
||||
if (!remote || test_bit(SDF_KILL, &sdp->sd_flags))
|
||||
return;
|
||||
|
||||
if (gl->gl_demote_state == LM_ST_UNLOCKED &&
|
||||
|
|
@ -642,76 +639,8 @@ static void iopen_go_callback(struct gfs2_glock *gl, bool remote)
|
|||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* inode_go_unlocked - wake up anyone waiting for dlm's unlock ast
|
||||
* @gl: glock being unlocked
|
||||
*
|
||||
* For now, this is only used for the journal inode glock. In withdraw
|
||||
* situations, we need to wait for the glock to be unlocked so that we know
|
||||
* other nodes may proceed with recovery / journal replay.
|
||||
*/
|
||||
static void inode_go_unlocked(struct gfs2_glock *gl)
|
||||
{
|
||||
/* Note that we cannot reference gl_object because it's already set
|
||||
* to NULL by this point in its lifecycle. */
|
||||
if (!test_bit(GLF_UNLOCKED, &gl->gl_flags))
|
||||
return;
|
||||
clear_bit_unlock(GLF_UNLOCKED, &gl->gl_flags);
|
||||
wake_up_bit(&gl->gl_flags, GLF_UNLOCKED);
|
||||
}
|
||||
|
||||
/**
|
||||
* nondisk_go_callback - used to signal when a node did a withdraw
|
||||
* @gl: the nondisk glock
|
||||
* @remote: true if this came from a different cluster node
|
||||
*
|
||||
*/
|
||||
static void nondisk_go_callback(struct gfs2_glock *gl, bool remote)
|
||||
{
|
||||
struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
|
||||
|
||||
/* Ignore the callback unless it's from another node, and it's the
|
||||
live lock. */
|
||||
if (!remote || gl->gl_name.ln_number != GFS2_LIVE_LOCK)
|
||||
return;
|
||||
|
||||
/* First order of business is to cancel the demote request. We don't
|
||||
* really want to demote a nondisk glock. At best it's just to inform
|
||||
* us of another node's withdraw. We'll keep it in SH mode. */
|
||||
clear_bit(GLF_DEMOTE, &gl->gl_flags);
|
||||
clear_bit(GLF_PENDING_DEMOTE, &gl->gl_flags);
|
||||
|
||||
/* Ignore the unlock if we're withdrawn, unmounting, or in recovery. */
|
||||
if (test_bit(SDF_NORECOVERY, &sdp->sd_flags) ||
|
||||
test_bit(SDF_WITHDRAWN, &sdp->sd_flags) ||
|
||||
test_bit(SDF_REMOTE_WITHDRAW, &sdp->sd_flags))
|
||||
return;
|
||||
|
||||
/* We only care when a node wants us to unlock, because that means
|
||||
* they want a journal recovered. */
|
||||
if (gl->gl_demote_state != LM_ST_UNLOCKED)
|
||||
return;
|
||||
|
||||
if (sdp->sd_args.ar_spectator) {
|
||||
fs_warn(sdp, "Spectator node cannot recover journals.\n");
|
||||
return;
|
||||
}
|
||||
|
||||
fs_warn(sdp, "Some node has withdrawn; checking for recovery.\n");
|
||||
set_bit(SDF_REMOTE_WITHDRAW, &sdp->sd_flags);
|
||||
/*
|
||||
* We can't call remote_withdraw directly here or gfs2_recover_journal
|
||||
* because this is called from the glock unlock function and the
|
||||
* remote_withdraw needs to enqueue and dequeue the same "live" glock
|
||||
* we were called from. So we queue it to the control work queue in
|
||||
* lock_dlm.
|
||||
*/
|
||||
queue_delayed_work(gfs2_control_wq, &sdp->sd_control_work, 0);
|
||||
}
|
||||
|
||||
const struct gfs2_glock_operations gfs2_meta_glops = {
|
||||
.go_type = LM_TYPE_META,
|
||||
.go_flags = GLOF_NONDISK,
|
||||
};
|
||||
|
||||
const struct gfs2_glock_operations gfs2_inode_glops = {
|
||||
|
|
@ -722,7 +651,6 @@ const struct gfs2_glock_operations gfs2_inode_glops = {
|
|||
.go_dump = inode_go_dump,
|
||||
.go_type = LM_TYPE_INODE,
|
||||
.go_flags = GLOF_ASPACE | GLOF_LVB,
|
||||
.go_unlocked = inode_go_unlocked,
|
||||
};
|
||||
|
||||
const struct gfs2_glock_operations gfs2_rgrp_glops = {
|
||||
|
|
@ -738,36 +666,30 @@ const struct gfs2_glock_operations gfs2_freeze_glops = {
|
|||
.go_xmote_bh = freeze_go_xmote_bh,
|
||||
.go_callback = freeze_go_callback,
|
||||
.go_type = LM_TYPE_NONDISK,
|
||||
.go_flags = GLOF_NONDISK,
|
||||
};
|
||||
|
||||
const struct gfs2_glock_operations gfs2_iopen_glops = {
|
||||
.go_type = LM_TYPE_IOPEN,
|
||||
.go_callback = iopen_go_callback,
|
||||
.go_dump = inode_go_dump,
|
||||
.go_flags = GLOF_NONDISK,
|
||||
.go_subclass = 1,
|
||||
};
|
||||
|
||||
const struct gfs2_glock_operations gfs2_flock_glops = {
|
||||
.go_type = LM_TYPE_FLOCK,
|
||||
.go_flags = GLOF_NONDISK,
|
||||
};
|
||||
|
||||
const struct gfs2_glock_operations gfs2_nondisk_glops = {
|
||||
.go_type = LM_TYPE_NONDISK,
|
||||
.go_flags = GLOF_NONDISK,
|
||||
.go_callback = nondisk_go_callback,
|
||||
};
|
||||
|
||||
const struct gfs2_glock_operations gfs2_quota_glops = {
|
||||
.go_type = LM_TYPE_QUOTA,
|
||||
.go_flags = GLOF_LVB | GLOF_NONDISK,
|
||||
.go_flags = GLOF_LVB,
|
||||
};
|
||||
|
||||
const struct gfs2_glock_operations gfs2_journal_glops = {
|
||||
.go_type = LM_TYPE_JOURNAL,
|
||||
.go_flags = GLOF_NONDISK,
|
||||
};
|
||||
|
||||
const struct gfs2_glock_operations *gfs2_glops_list[] = {
|
||||
|
|
|
|||
|
|
@ -223,13 +223,11 @@ struct gfs2_glock_operations {
|
|||
void (*go_dump)(struct seq_file *seq, const struct gfs2_glock *gl,
|
||||
const char *fs_id_buf);
|
||||
void (*go_callback)(struct gfs2_glock *gl, bool remote);
|
||||
void (*go_unlocked)(struct gfs2_glock *gl);
|
||||
const int go_subclass;
|
||||
const int go_type;
|
||||
const unsigned long go_flags;
|
||||
#define GLOF_ASPACE 1 /* address space attached */
|
||||
#define GLOF_LVB 2 /* Lock Value Block attached */
|
||||
#define GLOF_NONDISK 8 /* not I/O related */
|
||||
};
|
||||
|
||||
enum {
|
||||
|
|
@ -326,7 +324,6 @@ enum {
|
|||
GLF_LRU = 13,
|
||||
GLF_OBJECT = 14, /* Used only for tracing */
|
||||
GLF_BLOCKING = 15,
|
||||
GLF_UNLOCKED = 16, /* Wait for glock to be unlocked */
|
||||
GLF_TRY_TO_EVICT = 17, /* iopen glocks only */
|
||||
GLF_VERIFY_DELETE = 18, /* iopen glocks only */
|
||||
GLF_PENDING_REPLY = 19,
|
||||
|
|
@ -520,8 +517,6 @@ struct gfs2_jdesc {
|
|||
|
||||
struct list_head jd_revoke_list;
|
||||
unsigned int jd_replay_tail;
|
||||
|
||||
u64 jd_no_addr;
|
||||
};
|
||||
|
||||
struct gfs2_statfs_change_host {
|
||||
|
|
@ -542,8 +537,7 @@ struct gfs2_statfs_change_host {
|
|||
|
||||
#define GFS2_ERRORS_DEFAULT GFS2_ERRORS_WITHDRAW
|
||||
#define GFS2_ERRORS_WITHDRAW 0
|
||||
#define GFS2_ERRORS_CONTINUE 1 /* place holder for future feature */
|
||||
#define GFS2_ERRORS_RO 2 /* place holder for future feature */
|
||||
#define GFS2_ERRORS_DEACTIVATE 1
|
||||
#define GFS2_ERRORS_PANIC 3
|
||||
|
||||
struct gfs2_args {
|
||||
|
|
@ -559,7 +553,7 @@ struct gfs2_args {
|
|||
unsigned int ar_data:2; /* ordered/writeback */
|
||||
unsigned int ar_meta:1; /* mount metafs */
|
||||
unsigned int ar_discard:1; /* discard requests */
|
||||
unsigned int ar_errors:2; /* errors=withdraw | panic */
|
||||
unsigned int ar_errors:2; /* errors=withdraw | deactivate | panic */
|
||||
unsigned int ar_nobarrier:1; /* do not send barriers */
|
||||
unsigned int ar_rgrplvb:1; /* use lvbs for rgrp info */
|
||||
unsigned int ar_got_rgrplvb:1; /* Was the rgrplvb opt given? */
|
||||
|
|
@ -585,6 +579,7 @@ struct gfs2_tune {
|
|||
unsigned int gt_complain_secs;
|
||||
unsigned int gt_statfs_quantum;
|
||||
unsigned int gt_statfs_slow;
|
||||
unsigned int gt_withdraw_helper_timeout;
|
||||
};
|
||||
|
||||
enum {
|
||||
|
|
@ -599,11 +594,6 @@ enum {
|
|||
SDF_SKIP_DLM_UNLOCK = 8,
|
||||
SDF_FORCE_AIL_FLUSH = 9,
|
||||
SDF_FREEZE_INITIATOR = 10,
|
||||
SDF_WITHDRAWING = 11, /* Will withdraw eventually */
|
||||
SDF_WITHDRAW_IN_PROG = 12, /* Withdraw is in progress */
|
||||
SDF_REMOTE_WITHDRAW = 13, /* Performing remote recovery */
|
||||
SDF_WITHDRAW_RECOVERY = 14, /* Wait for journal recovery when we are
|
||||
withdrawing */
|
||||
SDF_KILL = 15,
|
||||
SDF_EVICTING = 16,
|
||||
SDF_FROZEN = 17,
|
||||
|
|
@ -716,11 +706,13 @@ struct gfs2_sbd {
|
|||
struct gfs2_glock *sd_rename_gl;
|
||||
struct gfs2_glock *sd_freeze_gl;
|
||||
struct work_struct sd_freeze_work;
|
||||
struct work_struct sd_withdraw_work;
|
||||
wait_queue_head_t sd_kill_wait;
|
||||
wait_queue_head_t sd_async_glock_wait;
|
||||
atomic_t sd_glock_disposal;
|
||||
struct completion sd_locking_init;
|
||||
struct completion sd_wdack;
|
||||
struct completion sd_withdraw_helper;
|
||||
int sd_withdraw_helper_status;
|
||||
struct delayed_work sd_control_work;
|
||||
|
||||
/* Inode Stuff */
|
||||
|
|
@ -761,7 +753,6 @@ struct gfs2_sbd {
|
|||
struct gfs2_jdesc *sd_jdesc;
|
||||
struct gfs2_holder sd_journal_gh;
|
||||
struct gfs2_holder sd_jinode_gh;
|
||||
struct gfs2_glock *sd_jinode_gl;
|
||||
|
||||
struct gfs2_holder sd_sc_gh;
|
||||
struct buffer_head *sd_sc_bh;
|
||||
|
|
@ -846,7 +837,6 @@ struct gfs2_sbd {
|
|||
|
||||
unsigned long sd_last_warning;
|
||||
struct dentry *debugfs_dir; /* debugfs directory */
|
||||
unsigned long sd_glock_dqs_held;
|
||||
};
|
||||
|
||||
#define GFS2_BAD_INO 1
|
||||
|
|
|
|||
|
|
@ -89,6 +89,19 @@ static int iget_set(struct inode *inode, void *opaque)
|
|||
return 0;
|
||||
}
|
||||
|
||||
void gfs2_setup_inode(struct inode *inode)
|
||||
{
|
||||
gfp_t gfp_mask;
|
||||
|
||||
/*
|
||||
* Ensure all page cache allocations are done from GFP_NOFS context to
|
||||
* prevent direct reclaim recursion back into the filesystem and blowing
|
||||
* stacks or deadlocking.
|
||||
*/
|
||||
gfp_mask = mapping_gfp_mask(inode->i_mapping);
|
||||
mapping_set_gfp_mask(inode->i_mapping, gfp_mask & ~__GFP_FS);
|
||||
}
|
||||
|
||||
/**
|
||||
* gfs2_inode_lookup - Lookup an inode
|
||||
* @sb: The super block
|
||||
|
|
@ -132,6 +145,7 @@ struct inode *gfs2_inode_lookup(struct super_block *sb, unsigned int type,
|
|||
struct gfs2_glock *io_gl;
|
||||
int extra_flags = 0;
|
||||
|
||||
gfs2_setup_inode(inode);
|
||||
error = gfs2_glock_get(sdp, no_addr, &gfs2_inode_glops, CREATE,
|
||||
&ip->i_gl);
|
||||
if (unlikely(error))
|
||||
|
|
@ -752,6 +766,7 @@ static int gfs2_create_inode(struct inode *dir, struct dentry *dentry,
|
|||
error = -ENOMEM;
|
||||
if (!inode)
|
||||
goto fail_gunlock;
|
||||
gfs2_setup_inode(inode);
|
||||
ip = GFS2_I(inode);
|
||||
|
||||
error = posix_acl_create(dir, &mode, &default_acl, &acl);
|
||||
|
|
|
|||
|
|
@ -86,6 +86,7 @@ static inline int gfs2_check_internal_file_size(struct inode *inode,
|
|||
return -EIO;
|
||||
}
|
||||
|
||||
void gfs2_setup_inode(struct inode *inode);
|
||||
struct inode *gfs2_inode_lookup(struct super_block *sb, unsigned type,
|
||||
u64 no_addr, u64 no_formal_ino,
|
||||
unsigned int blktype);
|
||||
|
|
|
|||
|
|
@ -15,9 +15,6 @@
|
|||
#include <linux/sched/signal.h>
|
||||
|
||||
#include "incore.h"
|
||||
#include "glock.h"
|
||||
#include "glops.h"
|
||||
#include "recovery.h"
|
||||
#include "util.h"
|
||||
#include "sys.h"
|
||||
#include "trace_gfs2.h"
|
||||
|
|
@ -139,8 +136,6 @@ static void gdlm_ast(void *arg)
|
|||
|
||||
switch (gl->gl_lksb.sb_status) {
|
||||
case -DLM_EUNLOCK: /* Unlocked, so glock can be freed */
|
||||
if (gl->gl_ops->go_unlocked)
|
||||
gl->gl_ops->go_unlocked(gl);
|
||||
gfs2_glock_free(gl);
|
||||
return;
|
||||
case -DLM_ECANCEL: /* Cancel while getting lock */
|
||||
|
|
@ -399,7 +394,6 @@ static void gdlm_cancel(struct gfs2_glock *gl)
|
|||
/*
|
||||
* dlm/gfs2 recovery coordination using dlm_recover callbacks
|
||||
*
|
||||
* 0. gfs2 checks for another cluster node withdraw, needing journal replay
|
||||
* 1. dlm_controld sees lockspace members change
|
||||
* 2. dlm_controld blocks dlm-kernel locking activity
|
||||
* 3. dlm_controld within dlm-kernel notifies gfs2 (recover_prep)
|
||||
|
|
@ -657,28 +651,6 @@ static int control_lock(struct gfs2_sbd *sdp, int mode, uint32_t flags)
|
|||
&ls->ls_control_lksb, "control_lock");
|
||||
}
|
||||
|
||||
/**
|
||||
* remote_withdraw - react to a node withdrawing from the file system
|
||||
* @sdp: The superblock
|
||||
*/
|
||||
static void remote_withdraw(struct gfs2_sbd *sdp)
|
||||
{
|
||||
struct gfs2_jdesc *jd;
|
||||
int ret = 0, count = 0;
|
||||
|
||||
list_for_each_entry(jd, &sdp->sd_jindex_list, jd_list) {
|
||||
if (jd->jd_jid == sdp->sd_lockstruct.ls_jid)
|
||||
continue;
|
||||
ret = gfs2_recover_journal(jd, true);
|
||||
if (ret)
|
||||
break;
|
||||
count++;
|
||||
}
|
||||
|
||||
/* Now drop the additional reference we acquired */
|
||||
fs_err(sdp, "Journals checked: %d, ret = %d.\n", count, ret);
|
||||
}
|
||||
|
||||
static void gfs2_control_func(struct work_struct *work)
|
||||
{
|
||||
struct gfs2_sbd *sdp = container_of(work, struct gfs2_sbd, sd_control_work.work);
|
||||
|
|
@ -689,13 +661,6 @@ static void gfs2_control_func(struct work_struct *work)
|
|||
int recover_size;
|
||||
int i, error;
|
||||
|
||||
/* First check for other nodes that may have done a withdraw. */
|
||||
if (test_bit(SDF_REMOTE_WITHDRAW, &sdp->sd_flags)) {
|
||||
remote_withdraw(sdp);
|
||||
clear_bit(SDF_REMOTE_WITHDRAW, &sdp->sd_flags);
|
||||
return;
|
||||
}
|
||||
|
||||
spin_lock(&ls->ls_recover_spin);
|
||||
/*
|
||||
* No MOUNT_DONE means we're still mounting; control_mount()
|
||||
|
|
@ -1195,7 +1160,7 @@ static void gdlm_recover_prep(void *arg)
|
|||
struct gfs2_sbd *sdp = arg;
|
||||
struct lm_lockstruct *ls = &sdp->sd_lockstruct;
|
||||
|
||||
if (gfs2_withdrawing_or_withdrawn(sdp)) {
|
||||
if (gfs2_withdrawn(sdp)) {
|
||||
fs_err(sdp, "recover_prep ignored due to withdraw.\n");
|
||||
return;
|
||||
}
|
||||
|
|
@ -1221,7 +1186,7 @@ static void gdlm_recover_slot(void *arg, struct dlm_slot *slot)
|
|||
struct lm_lockstruct *ls = &sdp->sd_lockstruct;
|
||||
int jid = slot->slot - 1;
|
||||
|
||||
if (gfs2_withdrawing_or_withdrawn(sdp)) {
|
||||
if (gfs2_withdrawn(sdp)) {
|
||||
fs_err(sdp, "recover_slot jid %d ignored due to withdraw.\n",
|
||||
jid);
|
||||
return;
|
||||
|
|
@ -1250,7 +1215,7 @@ static void gdlm_recover_done(void *arg, struct dlm_slot *slots, int num_slots,
|
|||
struct gfs2_sbd *sdp = arg;
|
||||
struct lm_lockstruct *ls = &sdp->sd_lockstruct;
|
||||
|
||||
if (gfs2_withdrawing_or_withdrawn(sdp)) {
|
||||
if (gfs2_withdrawn(sdp)) {
|
||||
fs_err(sdp, "recover_done ignored due to withdraw.\n");
|
||||
return;
|
||||
}
|
||||
|
|
@ -1281,7 +1246,7 @@ static void gdlm_recovery_result(struct gfs2_sbd *sdp, unsigned int jid,
|
|||
{
|
||||
struct lm_lockstruct *ls = &sdp->sd_lockstruct;
|
||||
|
||||
if (gfs2_withdrawing_or_withdrawn(sdp)) {
|
||||
if (gfs2_withdrawn(sdp)) {
|
||||
fs_err(sdp, "recovery_result jid %d ignored due to withdraw.\n",
|
||||
jid);
|
||||
return;
|
||||
|
|
@ -1438,7 +1403,15 @@ static void gdlm_first_done(struct gfs2_sbd *sdp)
|
|||
fs_err(sdp, "mount first_done error %d\n", error);
|
||||
}
|
||||
|
||||
static void gdlm_unmount(struct gfs2_sbd *sdp)
|
||||
/*
|
||||
* gdlm_unmount - release our lockspace
|
||||
* @sdp: the superblock
|
||||
* @clean: Indicates whether or not the remaining nodes in the cluster should
|
||||
* perform recovery. Recovery is necessary when a node withdraws and
|
||||
* its journal remains dirty. Recovery isn't necessary when a node
|
||||
* cleanly unmounts a filesystem.
|
||||
*/
|
||||
static void gdlm_unmount(struct gfs2_sbd *sdp, bool clean)
|
||||
{
|
||||
struct lm_lockstruct *ls = &sdp->sd_lockstruct;
|
||||
|
||||
|
|
@ -1456,7 +1429,9 @@ static void gdlm_unmount(struct gfs2_sbd *sdp)
|
|||
release:
|
||||
down_write(&ls->ls_sem);
|
||||
if (ls->ls_dlm) {
|
||||
dlm_release_lockspace(ls->ls_dlm, DLM_RELEASE_NORMAL);
|
||||
dlm_release_lockspace(ls->ls_dlm,
|
||||
clean ? DLM_RELEASE_NORMAL :
|
||||
DLM_RELEASE_RECOVER);
|
||||
ls->ls_dlm = NULL;
|
||||
}
|
||||
up_write(&ls->ls_sem);
|
||||
|
|
|
|||
|
|
@ -112,13 +112,11 @@ __acquires(&sdp->sd_ail_lock)
|
|||
&tr->tr_ail2_list);
|
||||
continue;
|
||||
}
|
||||
if (!cmpxchg(&sdp->sd_log_error, 0, -EIO)) {
|
||||
if (!cmpxchg(&sdp->sd_log_error, 0, -EIO))
|
||||
gfs2_io_error_bh(sdp, bh);
|
||||
gfs2_withdraw_delayed(sdp);
|
||||
}
|
||||
}
|
||||
|
||||
if (gfs2_withdrawing_or_withdrawn(sdp)) {
|
||||
if (gfs2_withdrawn(sdp)) {
|
||||
gfs2_remove_from_ail(bd);
|
||||
continue;
|
||||
}
|
||||
|
|
@ -324,10 +322,8 @@ static int gfs2_ail1_empty_one(struct gfs2_sbd *sdp, struct gfs2_trans *tr,
|
|||
continue;
|
||||
}
|
||||
if (!buffer_uptodate(bh) &&
|
||||
!cmpxchg(&sdp->sd_log_error, 0, -EIO)) {
|
||||
!cmpxchg(&sdp->sd_log_error, 0, -EIO))
|
||||
gfs2_io_error_bh(sdp, bh);
|
||||
gfs2_withdraw_delayed(sdp);
|
||||
}
|
||||
/*
|
||||
* If we have space for revokes and the bd is no longer on any
|
||||
* buf list, we can just add a revoke for it immediately and
|
||||
|
|
@ -807,9 +803,6 @@ void gfs2_flush_revokes(struct gfs2_sbd *sdp)
|
|||
gfs2_log_lock(sdp);
|
||||
gfs2_ail1_empty(sdp, max_revokes);
|
||||
gfs2_log_unlock(sdp);
|
||||
|
||||
if (gfs2_withdrawing(sdp))
|
||||
gfs2_withdraw(sdp);
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
@ -837,7 +830,7 @@ void gfs2_write_log_header(struct gfs2_sbd *sdp, struct gfs2_jdesc *jd,
|
|||
struct super_block *sb = sdp->sd_vfs;
|
||||
u64 dblock;
|
||||
|
||||
if (gfs2_withdrawing_or_withdrawn(sdp))
|
||||
if (gfs2_withdrawn(sdp))
|
||||
return;
|
||||
|
||||
page = mempool_alloc(gfs2_page_pool, GFP_NOIO);
|
||||
|
|
@ -984,12 +977,9 @@ static void empty_ail1_list(struct gfs2_sbd *sdp)
|
|||
gfs2_ail1_wait(sdp);
|
||||
empty = gfs2_ail1_empty(sdp, 0);
|
||||
|
||||
if (gfs2_withdrawing_or_withdrawn(sdp))
|
||||
if (gfs2_withdrawn(sdp))
|
||||
break;
|
||||
}
|
||||
|
||||
if (gfs2_withdrawing(sdp))
|
||||
gfs2_withdraw(sdp);
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
@ -1050,7 +1040,7 @@ void gfs2_log_flush(struct gfs2_sbd *sdp, struct gfs2_glock *gl, u32 flags)
|
|||
* Do this check while holding the log_flush_lock to prevent new
|
||||
* buffers from being added to the ail via gfs2_pin()
|
||||
*/
|
||||
if (gfs2_withdrawing_or_withdrawn(sdp) ||
|
||||
if (gfs2_withdrawn(sdp) ||
|
||||
!test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags))
|
||||
goto out;
|
||||
|
||||
|
|
@ -1071,7 +1061,7 @@ void gfs2_log_flush(struct gfs2_sbd *sdp, struct gfs2_glock *gl, u32 flags)
|
|||
sdp->sd_log_tr = NULL;
|
||||
tr->tr_first = first_log_head;
|
||||
if (unlikely(frozen)) {
|
||||
if (gfs2_assert_withdraw_delayed(sdp,
|
||||
if (gfs2_assert_withdraw(sdp,
|
||||
!tr->tr_num_buf_new && !tr->tr_num_databuf_new))
|
||||
goto out_withdraw;
|
||||
}
|
||||
|
|
@ -1096,18 +1086,18 @@ void gfs2_log_flush(struct gfs2_sbd *sdp, struct gfs2_glock *gl, u32 flags)
|
|||
clear_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags);
|
||||
|
||||
if (unlikely(frozen))
|
||||
if (gfs2_assert_withdraw_delayed(sdp, !reserved_revokes))
|
||||
if (gfs2_assert_withdraw(sdp, !reserved_revokes))
|
||||
goto out_withdraw;
|
||||
|
||||
gfs2_ordered_write(sdp);
|
||||
if (gfs2_withdrawing_or_withdrawn(sdp))
|
||||
if (gfs2_withdrawn(sdp))
|
||||
goto out_withdraw;
|
||||
lops_before_commit(sdp, tr);
|
||||
if (gfs2_withdrawing_or_withdrawn(sdp))
|
||||
if (gfs2_withdrawn(sdp))
|
||||
goto out_withdraw;
|
||||
if (sdp->sd_jdesc)
|
||||
gfs2_log_submit_bio(&sdp->sd_jdesc->jd_log_bio, REQ_OP_WRITE);
|
||||
if (gfs2_withdrawing_or_withdrawn(sdp))
|
||||
if (gfs2_withdrawn(sdp))
|
||||
goto out_withdraw;
|
||||
|
||||
if (sdp->sd_log_head != sdp->sd_log_flush_head) {
|
||||
|
|
@ -1115,7 +1105,7 @@ void gfs2_log_flush(struct gfs2_sbd *sdp, struct gfs2_glock *gl, u32 flags)
|
|||
} else if (sdp->sd_log_tail != sdp->sd_log_flush_tail && !sdp->sd_log_idle) {
|
||||
log_write_header(sdp, flags);
|
||||
}
|
||||
if (gfs2_withdrawing_or_withdrawn(sdp))
|
||||
if (gfs2_withdrawn(sdp))
|
||||
goto out_withdraw;
|
||||
lops_after_commit(sdp, tr);
|
||||
|
||||
|
|
@ -1133,7 +1123,7 @@ void gfs2_log_flush(struct gfs2_sbd *sdp, struct gfs2_glock *gl, u32 flags)
|
|||
if (!(flags & GFS2_LOG_HEAD_FLUSH_NORMAL)) {
|
||||
if (!sdp->sd_log_idle) {
|
||||
empty_ail1_list(sdp);
|
||||
if (gfs2_withdrawing_or_withdrawn(sdp))
|
||||
if (gfs2_withdrawn(sdp))
|
||||
goto out_withdraw;
|
||||
log_write_header(sdp, flags);
|
||||
}
|
||||
|
|
@ -1151,13 +1141,11 @@ void gfs2_log_flush(struct gfs2_sbd *sdp, struct gfs2_glock *gl, u32 flags)
|
|||
reserved_blocks += (reserved_revokes - sdp->sd_ldptrs) / sdp->sd_inptrs;
|
||||
out:
|
||||
if (used_blocks != reserved_blocks) {
|
||||
gfs2_assert_withdraw_delayed(sdp, used_blocks < reserved_blocks);
|
||||
gfs2_assert_withdraw(sdp, used_blocks < reserved_blocks);
|
||||
gfs2_log_release(sdp, reserved_blocks - used_blocks);
|
||||
}
|
||||
up_write(&sdp->sd_log_flush_lock);
|
||||
gfs2_trans_free(sdp, tr);
|
||||
if (gfs2_withdrawing(sdp))
|
||||
gfs2_withdraw(sdp);
|
||||
trace_gfs2_log_flush(sdp, 0, flags);
|
||||
return;
|
||||
|
||||
|
|
@ -1304,20 +1292,9 @@ int gfs2_logd(void *data)
|
|||
|
||||
set_freezable();
|
||||
while (!kthread_should_stop()) {
|
||||
if (gfs2_withdrawing_or_withdrawn(sdp))
|
||||
if (gfs2_withdrawn(sdp))
|
||||
break;
|
||||
|
||||
/* Check for errors writing to the journal */
|
||||
if (sdp->sd_log_error) {
|
||||
gfs2_lm(sdp,
|
||||
"GFS2: fsid=%s: error %d: "
|
||||
"withdrawing the file system to "
|
||||
"prevent further damage.\n",
|
||||
sdp->sd_fsname, sdp->sd_log_error);
|
||||
gfs2_withdraw(sdp);
|
||||
break;
|
||||
}
|
||||
|
||||
if (gfs2_jrnl_flush_reqd(sdp) || t == 0) {
|
||||
gfs2_ail1_empty(sdp, 0);
|
||||
gfs2_log_flush(sdp, NULL, GFS2_LOG_HEAD_FLUSH_NORMAL |
|
||||
|
|
@ -1340,15 +1317,11 @@ int gfs2_logd(void *data)
|
|||
test_bit(SDF_FORCE_AIL_FLUSH, &sdp->sd_flags) ||
|
||||
gfs2_ail_flush_reqd(sdp) ||
|
||||
gfs2_jrnl_flush_reqd(sdp) ||
|
||||
sdp->sd_log_error ||
|
||||
gfs2_withdrawing_or_withdrawn(sdp) ||
|
||||
gfs2_withdrawn(sdp) ||
|
||||
kthread_should_stop(),
|
||||
t);
|
||||
}
|
||||
|
||||
if (gfs2_withdrawing(sdp))
|
||||
gfs2_withdraw(sdp);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -49,7 +49,7 @@ void gfs2_pin(struct gfs2_sbd *sdp, struct buffer_head *bh)
|
|||
if (test_set_buffer_pinned(bh))
|
||||
gfs2_assert_withdraw(sdp, 0);
|
||||
if (!buffer_uptodate(bh))
|
||||
gfs2_io_error_bh_wd(sdp, bh);
|
||||
gfs2_io_error_bh(sdp, bh);
|
||||
bd = bh->b_private;
|
||||
/* If this buffer is in the AIL and it has already been written
|
||||
* to in-place disk block, remove it from the AIL.
|
||||
|
|
@ -209,10 +209,7 @@ static void gfs2_end_log_write(struct bio *bio)
|
|||
if (!cmpxchg(&sdp->sd_log_error, 0, err))
|
||||
fs_err(sdp, "Error %d writing to journal, jid=%u\n",
|
||||
err, sdp->sd_jdesc->jd_jid);
|
||||
gfs2_withdraw_delayed(sdp);
|
||||
/* prevent more writes to the journal */
|
||||
clear_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags);
|
||||
wake_up(&sdp->sd_logd_waitq);
|
||||
gfs2_withdraw(sdp);
|
||||
}
|
||||
|
||||
bio_for_each_segment_all(bvec, bio, iter_all) {
|
||||
|
|
@ -487,7 +484,7 @@ static struct bio *gfs2_chain_bio(struct bio *prev, unsigned int nr_iovecs)
|
|||
new = bio_alloc(prev->bi_bdev, nr_iovecs, prev->bi_opf, GFP_NOIO);
|
||||
bio_clone_blkg_association(new, prev);
|
||||
new->bi_iter.bi_sector = bio_end_sector(prev);
|
||||
bio_chain(new, prev);
|
||||
bio_chain(prev, new);
|
||||
submit_bio(prev);
|
||||
return new;
|
||||
}
|
||||
|
|
@ -562,8 +559,7 @@ int gfs2_find_jhead(struct gfs2_jdesc *jd, struct gfs2_log_header_host *head)
|
|||
bio = gfs2_log_alloc_bio(sdp, dblock, gfs2_end_log_read);
|
||||
bio->bi_opf = REQ_OP_READ;
|
||||
add_block_to_new_bio:
|
||||
if (!bio_add_folio(bio, folio, bsize, off))
|
||||
BUG();
|
||||
bio_add_folio_nofail(bio, folio, bsize, off);
|
||||
block_added:
|
||||
off += bsize;
|
||||
if (off == folio_size(folio))
|
||||
|
|
|
|||
|
|
@ -263,8 +263,7 @@ int gfs2_meta_read(struct gfs2_glock *gl, u64 blkno, int flags,
|
|||
struct buffer_head *bh, *bhs[2];
|
||||
int num = 0;
|
||||
|
||||
if (gfs2_withdrawing_or_withdrawn(sdp) &&
|
||||
!gfs2_withdraw_in_prog(sdp)) {
|
||||
if (gfs2_withdrawn(sdp)) {
|
||||
*bhp = NULL;
|
||||
return -EIO;
|
||||
}
|
||||
|
|
@ -303,7 +302,7 @@ int gfs2_meta_read(struct gfs2_glock *gl, u64 blkno, int flags,
|
|||
if (unlikely(!buffer_uptodate(bh))) {
|
||||
struct gfs2_trans *tr = current->journal_info;
|
||||
if (tr && test_bit(TR_TOUCHED, &tr->tr_flags))
|
||||
gfs2_io_error_bh_wd(sdp, bh);
|
||||
gfs2_io_error_bh(sdp, bh);
|
||||
brelse(bh);
|
||||
*bhp = NULL;
|
||||
return -EIO;
|
||||
|
|
@ -322,8 +321,7 @@ int gfs2_meta_read(struct gfs2_glock *gl, u64 blkno, int flags,
|
|||
|
||||
int gfs2_meta_wait(struct gfs2_sbd *sdp, struct buffer_head *bh)
|
||||
{
|
||||
if (gfs2_withdrawing_or_withdrawn(sdp) &&
|
||||
!gfs2_withdraw_in_prog(sdp))
|
||||
if (gfs2_withdrawn(sdp))
|
||||
return -EIO;
|
||||
|
||||
wait_on_buffer(bh);
|
||||
|
|
@ -331,11 +329,10 @@ int gfs2_meta_wait(struct gfs2_sbd *sdp, struct buffer_head *bh)
|
|||
if (!buffer_uptodate(bh)) {
|
||||
struct gfs2_trans *tr = current->journal_info;
|
||||
if (tr && test_bit(TR_TOUCHED, &tr->tr_flags))
|
||||
gfs2_io_error_bh_wd(sdp, bh);
|
||||
gfs2_io_error_bh(sdp, bh);
|
||||
return -EIO;
|
||||
}
|
||||
if (gfs2_withdrawing_or_withdrawn(sdp) &&
|
||||
!gfs2_withdraw_in_prog(sdp))
|
||||
if (gfs2_withdrawn(sdp))
|
||||
return -EIO;
|
||||
|
||||
return 0;
|
||||
|
|
|
|||
|
|
@ -60,6 +60,7 @@ static void gfs2_tune_init(struct gfs2_tune *gt)
|
|||
gt->gt_new_files_jdata = 0;
|
||||
gt->gt_max_readahead = BIT(18);
|
||||
gt->gt_complain_secs = 10;
|
||||
gt->gt_withdraw_helper_timeout = 5;
|
||||
}
|
||||
|
||||
void free_sbd(struct gfs2_sbd *sdp)
|
||||
|
|
@ -92,7 +93,7 @@ static struct gfs2_sbd *init_sbd(struct super_block *sb)
|
|||
init_waitqueue_head(&sdp->sd_async_glock_wait);
|
||||
atomic_set(&sdp->sd_glock_disposal, 0);
|
||||
init_completion(&sdp->sd_locking_init);
|
||||
init_completion(&sdp->sd_wdack);
|
||||
init_completion(&sdp->sd_withdraw_helper);
|
||||
spin_lock_init(&sdp->sd_statfs_spin);
|
||||
|
||||
spin_lock_init(&sdp->sd_rindex_spin);
|
||||
|
|
@ -370,7 +371,7 @@ static int init_locking(struct gfs2_sbd *sdp, struct gfs2_holder *mount_gh,
|
|||
error = gfs2_glock_nq_num(sdp,
|
||||
GFS2_MOUNT_LOCK, &gfs2_nondisk_glops,
|
||||
LM_ST_EXCLUSIVE,
|
||||
LM_FLAG_NOEXP | GL_NOCACHE | GL_NOPID,
|
||||
LM_FLAG_RECOVER | GL_NOCACHE | GL_NOPID,
|
||||
mount_gh);
|
||||
if (error) {
|
||||
fs_err(sdp, "can't acquire mount glock: %d\n", error);
|
||||
|
|
@ -380,7 +381,7 @@ static int init_locking(struct gfs2_sbd *sdp, struct gfs2_holder *mount_gh,
|
|||
error = gfs2_glock_nq_num(sdp,
|
||||
GFS2_LIVE_LOCK, &gfs2_nondisk_glops,
|
||||
LM_ST_SHARED,
|
||||
LM_FLAG_NOEXP | GL_EXACT | GL_NOPID,
|
||||
LM_FLAG_RECOVER | GL_EXACT | GL_NOPID,
|
||||
&sdp->sd_live_gh);
|
||||
if (error) {
|
||||
fs_err(sdp, "can't acquire live glock: %d\n", error);
|
||||
|
|
@ -542,8 +543,6 @@ static int gfs2_jindex_hold(struct gfs2_sbd *sdp, struct gfs2_holder *ji_gh)
|
|||
mutex_lock(&sdp->sd_jindex_mutex);
|
||||
|
||||
for (;;) {
|
||||
struct gfs2_inode *jip;
|
||||
|
||||
error = gfs2_glock_nq_init(dip->i_gl, LM_ST_SHARED, 0, ji_gh);
|
||||
if (error)
|
||||
break;
|
||||
|
|
@ -584,8 +583,6 @@ static int gfs2_jindex_hold(struct gfs2_sbd *sdp, struct gfs2_holder *ji_gh)
|
|||
d_mark_dontcache(jd->jd_inode);
|
||||
spin_lock(&sdp->sd_jindex_spin);
|
||||
jd->jd_jid = sdp->sd_journals++;
|
||||
jip = GFS2_I(jd->jd_inode);
|
||||
jd->jd_no_addr = jip->i_no_addr;
|
||||
list_add_tail(&jd->jd_list, &sdp->sd_jindex_list);
|
||||
spin_unlock(&sdp->sd_jindex_spin);
|
||||
}
|
||||
|
|
@ -745,7 +742,7 @@ static int init_journal(struct gfs2_sbd *sdp, int undo)
|
|||
error = gfs2_glock_nq_num(sdp, sdp->sd_lockstruct.ls_jid,
|
||||
&gfs2_journal_glops,
|
||||
LM_ST_EXCLUSIVE,
|
||||
LM_FLAG_NOEXP | GL_NOCACHE | GL_NOPID,
|
||||
LM_FLAG_RECOVER | GL_NOPID,
|
||||
&sdp->sd_journal_gh);
|
||||
if (error) {
|
||||
fs_err(sdp, "can't acquire journal glock: %d\n", error);
|
||||
|
|
@ -753,9 +750,8 @@ static int init_journal(struct gfs2_sbd *sdp, int undo)
|
|||
}
|
||||
|
||||
ip = GFS2_I(sdp->sd_jdesc->jd_inode);
|
||||
sdp->sd_jinode_gl = ip->i_gl;
|
||||
error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED,
|
||||
LM_FLAG_NOEXP | GL_EXACT |
|
||||
LM_FLAG_RECOVER | GL_EXACT |
|
||||
GL_NOCACHE | GL_NOPID,
|
||||
&sdp->sd_jinode_gh);
|
||||
if (error) {
|
||||
|
|
@ -821,13 +817,10 @@ static int init_journal(struct gfs2_sbd *sdp, int undo)
|
|||
fail_statfs:
|
||||
uninit_statfs(sdp);
|
||||
fail_jinode_gh:
|
||||
/* A withdraw may have done dq/uninit so now we need to check it */
|
||||
if (!sdp->sd_args.ar_spectator &&
|
||||
gfs2_holder_initialized(&sdp->sd_jinode_gh))
|
||||
if (!sdp->sd_args.ar_spectator)
|
||||
gfs2_glock_dq_uninit(&sdp->sd_jinode_gh);
|
||||
fail_journal_gh:
|
||||
if (!sdp->sd_args.ar_spectator &&
|
||||
gfs2_holder_initialized(&sdp->sd_journal_gh))
|
||||
if (!sdp->sd_args.ar_spectator)
|
||||
gfs2_glock_dq_uninit(&sdp->sd_journal_gh);
|
||||
fail_jindex:
|
||||
gfs2_jindex_free(sdp);
|
||||
|
|
@ -1040,8 +1033,8 @@ static int gfs2_lm_mount(struct gfs2_sbd *sdp, int silent)
|
|||
void gfs2_lm_unmount(struct gfs2_sbd *sdp)
|
||||
{
|
||||
const struct lm_lockops *lm = sdp->sd_lockstruct.ls_ops;
|
||||
if (!gfs2_withdrawing_or_withdrawn(sdp) && lm->lm_unmount)
|
||||
lm->lm_unmount(sdp);
|
||||
if (!gfs2_withdrawn(sdp) && lm->lm_unmount)
|
||||
lm->lm_unmount(sdp, true);
|
||||
}
|
||||
|
||||
static int wait_on_journal(struct gfs2_sbd *sdp)
|
||||
|
|
@ -1183,7 +1176,7 @@ static int gfs2_fill_super(struct super_block *sb, struct fs_context *fc)
|
|||
|
||||
mapping = gfs2_aspace(sdp);
|
||||
mapping->a_ops = &gfs2_rgrp_aops;
|
||||
mapping_set_gfp_mask(mapping, GFP_NOFS);
|
||||
gfs2_setup_inode(sdp->sd_inode);
|
||||
|
||||
error = init_names(sdp, silent);
|
||||
if (error)
|
||||
|
|
@ -1215,6 +1208,8 @@ static int gfs2_fill_super(struct super_block *sb, struct fs_context *fc)
|
|||
if (error)
|
||||
goto fail_debug;
|
||||
|
||||
INIT_WORK(&sdp->sd_withdraw_work, gfs2_withdraw_func);
|
||||
|
||||
error = init_locking(sdp, &mount_gh, DO);
|
||||
if (error)
|
||||
goto fail_lm;
|
||||
|
|
@ -1402,11 +1397,13 @@ static const struct constant_table gfs2_param_data[] = {
|
|||
|
||||
enum opt_errors {
|
||||
Opt_errors_withdraw = GFS2_ERRORS_WITHDRAW,
|
||||
Opt_errors_deactivate = GFS2_ERRORS_DEACTIVATE,
|
||||
Opt_errors_panic = GFS2_ERRORS_PANIC,
|
||||
};
|
||||
|
||||
static const struct constant_table gfs2_param_errors[] = {
|
||||
{"withdraw", Opt_errors_withdraw },
|
||||
{"deactivate", Opt_errors_deactivate },
|
||||
{"panic", Opt_errors_panic },
|
||||
{}
|
||||
};
|
||||
|
|
|
|||
|
|
@ -125,7 +125,7 @@ static void gfs2_qd_dispose(struct gfs2_quota_data *qd)
|
|||
hlist_bl_del_rcu(&qd->qd_hlist);
|
||||
spin_unlock_bucket(qd->qd_hash);
|
||||
|
||||
if (!gfs2_withdrawing_or_withdrawn(sdp)) {
|
||||
if (!gfs2_withdrawn(sdp)) {
|
||||
gfs2_assert_warn(sdp, !qd->qd_change);
|
||||
gfs2_assert_warn(sdp, !qd->qd_slot_ref);
|
||||
gfs2_assert_warn(sdp, !qd->qd_bh_count);
|
||||
|
|
@ -1551,27 +1551,13 @@ static void quotad_error(struct gfs2_sbd *sdp, const char *msg, int error)
|
|||
{
|
||||
if (error == 0 || error == -EROFS)
|
||||
return;
|
||||
if (!gfs2_withdrawing_or_withdrawn(sdp)) {
|
||||
if (!gfs2_withdrawn(sdp)) {
|
||||
if (!cmpxchg(&sdp->sd_log_error, 0, error))
|
||||
fs_err(sdp, "gfs2_quotad: %s error %d\n", msg, error);
|
||||
wake_up(&sdp->sd_logd_waitq);
|
||||
}
|
||||
}
|
||||
|
||||
static void quotad_check_timeo(struct gfs2_sbd *sdp, const char *msg,
|
||||
int (*fxn)(struct super_block *sb, int type),
|
||||
unsigned long t, unsigned long *timeo,
|
||||
unsigned int *new_timeo)
|
||||
{
|
||||
if (t >= *timeo) {
|
||||
int error = fxn(sdp->sd_vfs, 0);
|
||||
quotad_error(sdp, msg, error);
|
||||
*timeo = gfs2_tune_get_i(&sdp->sd_tune, new_timeo) * HZ;
|
||||
} else {
|
||||
*timeo -= t;
|
||||
}
|
||||
}
|
||||
|
||||
void gfs2_wake_up_statfs(struct gfs2_sbd *sdp) {
|
||||
if (!sdp->sd_statfs_force_sync) {
|
||||
sdp->sd_statfs_force_sync = 1;
|
||||
|
|
@ -1589,36 +1575,46 @@ void gfs2_wake_up_statfs(struct gfs2_sbd *sdp) {
|
|||
int gfs2_quotad(void *data)
|
||||
{
|
||||
struct gfs2_sbd *sdp = data;
|
||||
struct gfs2_tune *tune = &sdp->sd_tune;
|
||||
unsigned long statfs_timeo = 0;
|
||||
unsigned long quotad_timeo = 0;
|
||||
unsigned long t = 0;
|
||||
unsigned long now = jiffies;
|
||||
unsigned long statfs_deadline = now;
|
||||
unsigned long quotad_deadline = now;
|
||||
|
||||
set_freezable();
|
||||
while (!kthread_should_stop()) {
|
||||
if (gfs2_withdrawing_or_withdrawn(sdp))
|
||||
unsigned long t;
|
||||
|
||||
if (gfs2_withdrawn(sdp))
|
||||
break;
|
||||
|
||||
now = jiffies;
|
||||
if (sdp->sd_statfs_force_sync ||
|
||||
time_after(now, statfs_deadline)) {
|
||||
unsigned int quantum;
|
||||
int error;
|
||||
|
||||
/* Update the master statfs file */
|
||||
if (sdp->sd_statfs_force_sync) {
|
||||
int error = gfs2_statfs_sync(sdp->sd_vfs, 0);
|
||||
error = gfs2_statfs_sync(sdp->sd_vfs, 0);
|
||||
quotad_error(sdp, "statfs", error);
|
||||
statfs_timeo = gfs2_tune_get(sdp, gt_statfs_quantum) * HZ;
|
||||
|
||||
quantum = gfs2_tune_get(sdp, gt_statfs_quantum);
|
||||
statfs_deadline = now + quantum * HZ;
|
||||
}
|
||||
else
|
||||
quotad_check_timeo(sdp, "statfs", gfs2_statfs_sync, t,
|
||||
&statfs_timeo,
|
||||
&tune->gt_statfs_quantum);
|
||||
if (time_after(now, quotad_deadline)) {
|
||||
unsigned int quantum;
|
||||
int error;
|
||||
|
||||
/* Update quota file */
|
||||
quotad_check_timeo(sdp, "sync", gfs2_quota_sync, t,
|
||||
"ad_timeo, &tune->gt_quota_quantum);
|
||||
/* Update the quota file */
|
||||
error = gfs2_quota_sync(sdp->sd_vfs, 0);
|
||||
quotad_error(sdp, "sync", error);
|
||||
|
||||
t = min(quotad_timeo, statfs_timeo);
|
||||
quantum = gfs2_tune_get(sdp, gt_quota_quantum);
|
||||
quotad_deadline = now + quantum * HZ;
|
||||
}
|
||||
|
||||
t = wait_event_freezable_timeout(sdp->sd_quota_wait,
|
||||
t = min(statfs_deadline - now, quotad_deadline - now);
|
||||
wait_event_freezable_timeout(sdp->sd_quota_wait,
|
||||
sdp->sd_statfs_force_sync ||
|
||||
gfs2_withdrawing_or_withdrawn(sdp) ||
|
||||
gfs2_withdrawn(sdp) ||
|
||||
kthread_should_stop(),
|
||||
t);
|
||||
|
||||
|
|
|
|||
|
|
@ -408,7 +408,7 @@ void gfs2_recover_func(struct work_struct *work)
|
|||
int error = 0;
|
||||
int jlocked = 0;
|
||||
|
||||
if (gfs2_withdrawing_or_withdrawn(sdp)) {
|
||||
if (gfs2_withdrawn(sdp)) {
|
||||
fs_err(sdp, "jid=%u: Recovery not attempted due to withdraw.\n",
|
||||
jd->jd_jid);
|
||||
goto fail;
|
||||
|
|
@ -424,7 +424,8 @@ void gfs2_recover_func(struct work_struct *work)
|
|||
|
||||
error = gfs2_glock_nq_num(sdp, jd->jd_jid, &gfs2_journal_glops,
|
||||
LM_ST_EXCLUSIVE,
|
||||
LM_FLAG_NOEXP | LM_FLAG_TRY | GL_NOCACHE,
|
||||
LM_FLAG_RECOVER | LM_FLAG_TRY |
|
||||
GL_NOCACHE,
|
||||
&j_gh);
|
||||
switch (error) {
|
||||
case 0:
|
||||
|
|
@ -440,7 +441,8 @@ void gfs2_recover_func(struct work_struct *work)
|
|||
}
|
||||
|
||||
error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED,
|
||||
LM_FLAG_NOEXP | GL_NOCACHE, &ji_gh);
|
||||
LM_FLAG_RECOVER | GL_NOCACHE,
|
||||
&ji_gh);
|
||||
if (error)
|
||||
goto fail_gunlock_j;
|
||||
} else {
|
||||
|
|
|
|||
|
|
@ -137,7 +137,7 @@ int gfs2_make_fs_rw(struct gfs2_sbd *sdp)
|
|||
int error;
|
||||
|
||||
j_gl->gl_ops->go_inval(j_gl, DIO_METADATA);
|
||||
if (gfs2_withdrawing_or_withdrawn(sdp))
|
||||
if (gfs2_withdrawn(sdp))
|
||||
return -EIO;
|
||||
|
||||
if (sdp->sd_log_sequence == 0) {
|
||||
|
|
@ -147,7 +147,7 @@ int gfs2_make_fs_rw(struct gfs2_sbd *sdp)
|
|||
}
|
||||
|
||||
error = gfs2_quota_init(sdp);
|
||||
if (!error && gfs2_withdrawing_or_withdrawn(sdp))
|
||||
if (!error && gfs2_withdrawn(sdp))
|
||||
error = -EIO;
|
||||
if (!error)
|
||||
set_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags);
|
||||
|
|
@ -351,7 +351,7 @@ static int gfs2_lock_fs_check_clean(struct gfs2_sbd *sdp)
|
|||
gfs2_freeze_unlock(sdp);
|
||||
|
||||
error = gfs2_glock_nq_init(sdp->sd_freeze_gl, LM_ST_EXCLUSIVE,
|
||||
LM_FLAG_NOEXP | GL_NOPID,
|
||||
LM_FLAG_RECOVER | GL_NOPID,
|
||||
&sdp->sd_freeze_gh);
|
||||
if (error)
|
||||
goto relock_shared;
|
||||
|
|
@ -491,7 +491,7 @@ static void gfs2_dirty_inode(struct inode *inode, int flags)
|
|||
if (unlikely(!ip->i_gl))
|
||||
return;
|
||||
|
||||
if (gfs2_withdrawing_or_withdrawn(sdp))
|
||||
if (gfs2_withdrawn(sdp))
|
||||
return;
|
||||
if (!gfs2_glock_is_locked_by_me(ip->i_gl)) {
|
||||
ret = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &gh);
|
||||
|
|
@ -597,13 +597,13 @@ static void gfs2_put_super(struct super_block *sb)
|
|||
if (!sb_rdonly(sb))
|
||||
gfs2_make_fs_ro(sdp);
|
||||
else {
|
||||
if (gfs2_withdrawing_or_withdrawn(sdp))
|
||||
if (gfs2_withdrawn(sdp))
|
||||
gfs2_destroy_threads(sdp);
|
||||
|
||||
gfs2_quota_cleanup(sdp);
|
||||
}
|
||||
|
||||
WARN_ON(gfs2_withdrawing(sdp));
|
||||
flush_work(&sdp->sd_withdraw_work);
|
||||
|
||||
/* At this point, we're through modifying the disk */
|
||||
|
||||
|
|
@ -749,9 +749,7 @@ static int gfs2_freeze_super(struct super_block *sb, enum freeze_holder who,
|
|||
break;
|
||||
}
|
||||
|
||||
error = gfs2_do_thaw(sdp, who, freeze_owner);
|
||||
if (error)
|
||||
goto out;
|
||||
(void)gfs2_do_thaw(sdp, who, freeze_owner);
|
||||
|
||||
if (error == -EBUSY)
|
||||
fs_err(sdp, "waiting for recovery before freeze\n");
|
||||
|
|
@ -778,7 +776,7 @@ static int gfs2_freeze_fs(struct super_block *sb)
|
|||
if (test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags)) {
|
||||
gfs2_log_flush(sdp, NULL, GFS2_LOG_HEAD_FLUSH_FREEZE |
|
||||
GFS2_LFC_FREEZE_GO_SYNC);
|
||||
if (gfs2_withdrawing_or_withdrawn(sdp))
|
||||
if (gfs2_withdrawn(sdp))
|
||||
return -EIO;
|
||||
}
|
||||
return 0;
|
||||
|
|
@ -819,20 +817,6 @@ static int gfs2_thaw_super(struct super_block *sb, enum freeze_holder who,
|
|||
return error;
|
||||
}
|
||||
|
||||
void gfs2_thaw_freeze_initiator(struct super_block *sb)
|
||||
{
|
||||
struct gfs2_sbd *sdp = sb->s_fs_info;
|
||||
|
||||
mutex_lock(&sdp->sd_freeze_mutex);
|
||||
if (!test_bit(SDF_FREEZE_INITIATOR, &sdp->sd_flags))
|
||||
goto out;
|
||||
|
||||
gfs2_freeze_unlock(sdp);
|
||||
|
||||
out:
|
||||
mutex_unlock(&sdp->sd_freeze_mutex);
|
||||
}
|
||||
|
||||
/**
|
||||
* statfs_slow_fill - fill in the sg for a given RG
|
||||
* @rgd: the RG
|
||||
|
|
@ -1147,6 +1131,9 @@ static int gfs2_show_options(struct seq_file *s, struct dentry *root)
|
|||
case GFS2_ERRORS_WITHDRAW:
|
||||
state = "withdraw";
|
||||
break;
|
||||
case GFS2_ERRORS_DEACTIVATE:
|
||||
state = "deactivate";
|
||||
break;
|
||||
case GFS2_ERRORS_PANIC:
|
||||
state = "panic";
|
||||
break;
|
||||
|
|
|
|||
|
|
@ -47,7 +47,6 @@ void gfs2_statfs_change_out(const struct gfs2_statfs_change_host *sc,
|
|||
void update_statfs(struct gfs2_sbd *sdp, struct buffer_head *m_bh);
|
||||
int gfs2_statfs_sync(struct super_block *sb, int type);
|
||||
void gfs2_freeze_func(struct work_struct *work);
|
||||
void gfs2_thaw_freeze_initiator(struct super_block *sb);
|
||||
|
||||
void free_local_statfs_inodes(struct gfs2_sbd *sdp);
|
||||
struct inode *find_local_statfs_inode(struct gfs2_sbd *sdp,
|
||||
|
|
|
|||
|
|
@ -59,7 +59,7 @@ static struct kset *gfs2_kset;
|
|||
|
||||
static ssize_t id_show(struct gfs2_sbd *sdp, char *buf)
|
||||
{
|
||||
return snprintf(buf, PAGE_SIZE, "%u:%u\n",
|
||||
return sysfs_emit(buf, "%u:%u\n",
|
||||
MAJOR(sdp->sd_vfs->s_dev), MINOR(sdp->sd_vfs->s_dev));
|
||||
}
|
||||
|
||||
|
|
@ -68,7 +68,7 @@ static ssize_t status_show(struct gfs2_sbd *sdp, char *buf)
|
|||
unsigned long f = sdp->sd_flags;
|
||||
ssize_t s;
|
||||
|
||||
s = snprintf(buf, PAGE_SIZE,
|
||||
s = sysfs_emit(buf,
|
||||
"Journal Checked: %d\n"
|
||||
"Journal Live: %d\n"
|
||||
"Journal ID: %d\n"
|
||||
|
|
@ -84,10 +84,6 @@ static ssize_t status_show(struct gfs2_sbd *sdp, char *buf)
|
|||
"Force AIL Flush: %d\n"
|
||||
"FS Freeze Initiator: %d\n"
|
||||
"FS Frozen: %d\n"
|
||||
"Withdrawing: %d\n"
|
||||
"Withdraw In Prog: %d\n"
|
||||
"Remote Withdraw: %d\n"
|
||||
"Withdraw Recovery: %d\n"
|
||||
"Killing: %d\n"
|
||||
"sd_log_error: %d\n"
|
||||
"sd_log_flush_lock: %d\n"
|
||||
|
|
@ -117,10 +113,6 @@ static ssize_t status_show(struct gfs2_sbd *sdp, char *buf)
|
|||
test_bit(SDF_FORCE_AIL_FLUSH, &f),
|
||||
test_bit(SDF_FREEZE_INITIATOR, &f),
|
||||
test_bit(SDF_FROZEN, &f),
|
||||
test_bit(SDF_WITHDRAWING, &f),
|
||||
test_bit(SDF_WITHDRAW_IN_PROG, &f),
|
||||
test_bit(SDF_REMOTE_WITHDRAW, &f),
|
||||
test_bit(SDF_WITHDRAW_RECOVERY, &f),
|
||||
test_bit(SDF_KILL, &f),
|
||||
sdp->sd_log_error,
|
||||
rwsem_is_locked(&sdp->sd_log_flush_lock),
|
||||
|
|
@ -140,7 +132,7 @@ static ssize_t status_show(struct gfs2_sbd *sdp, char *buf)
|
|||
|
||||
static ssize_t fsname_show(struct gfs2_sbd *sdp, char *buf)
|
||||
{
|
||||
return snprintf(buf, PAGE_SIZE, "%s\n", sdp->sd_fsname);
|
||||
return sysfs_emit(buf, "%s\n", sdp->sd_fsname);
|
||||
}
|
||||
|
||||
static ssize_t uuid_show(struct gfs2_sbd *sdp, char *buf)
|
||||
|
|
@ -150,7 +142,7 @@ static ssize_t uuid_show(struct gfs2_sbd *sdp, char *buf)
|
|||
buf[0] = '\0';
|
||||
if (uuid_is_null(&s->s_uuid))
|
||||
return 0;
|
||||
return snprintf(buf, PAGE_SIZE, "%pUB\n", &s->s_uuid);
|
||||
return sysfs_emit(buf, "%pUB\n", &s->s_uuid);
|
||||
}
|
||||
|
||||
static ssize_t freeze_show(struct gfs2_sbd *sdp, char *buf)
|
||||
|
|
@ -158,7 +150,7 @@ static ssize_t freeze_show(struct gfs2_sbd *sdp, char *buf)
|
|||
struct super_block *sb = sdp->sd_vfs;
|
||||
int frozen = (sb->s_writers.frozen == SB_UNFROZEN) ? 0 : 1;
|
||||
|
||||
return snprintf(buf, PAGE_SIZE, "%d\n", frozen);
|
||||
return sysfs_emit(buf, "%d\n", frozen);
|
||||
}
|
||||
|
||||
static ssize_t freeze_store(struct gfs2_sbd *sdp, const char *buf, size_t len)
|
||||
|
|
@ -193,8 +185,8 @@ static ssize_t freeze_store(struct gfs2_sbd *sdp, const char *buf, size_t len)
|
|||
|
||||
static ssize_t withdraw_show(struct gfs2_sbd *sdp, char *buf)
|
||||
{
|
||||
unsigned int b = gfs2_withdrawing_or_withdrawn(sdp);
|
||||
return snprintf(buf, PAGE_SIZE, "%u\n", b);
|
||||
unsigned int b = gfs2_withdrawn(sdp);
|
||||
return sysfs_emit(buf, "%u\n", b);
|
||||
}
|
||||
|
||||
static ssize_t withdraw_store(struct gfs2_sbd *sdp, const char *buf, size_t len)
|
||||
|
|
@ -397,7 +389,7 @@ static struct kobj_type gfs2_ktype = {
|
|||
static ssize_t proto_name_show(struct gfs2_sbd *sdp, char *buf)
|
||||
{
|
||||
const struct lm_lockops *ops = sdp->sd_lockstruct.ls_ops;
|
||||
return sprintf(buf, "%s\n", ops->lm_proto_name);
|
||||
return sysfs_emit(buf, "%s\n", ops->lm_proto_name);
|
||||
}
|
||||
|
||||
static ssize_t block_show(struct gfs2_sbd *sdp, char *buf)
|
||||
|
|
@ -408,7 +400,7 @@ static ssize_t block_show(struct gfs2_sbd *sdp, char *buf)
|
|||
|
||||
if (test_bit(DFL_BLOCK_LOCKS, &ls->ls_recover_flags))
|
||||
val = 1;
|
||||
ret = sprintf(buf, "%d\n", val);
|
||||
ret = sysfs_emit(buf, "%d\n", val);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
|
@ -433,33 +425,27 @@ static ssize_t block_store(struct gfs2_sbd *sdp, const char *buf, size_t len)
|
|||
return len;
|
||||
}
|
||||
|
||||
static ssize_t wdack_show(struct gfs2_sbd *sdp, char *buf)
|
||||
{
|
||||
int val = completion_done(&sdp->sd_wdack) ? 1 : 0;
|
||||
|
||||
return sprintf(buf, "%d\n", val);
|
||||
}
|
||||
|
||||
static ssize_t wdack_store(struct gfs2_sbd *sdp, const char *buf, size_t len)
|
||||
static ssize_t withdraw_helper_status_store(struct gfs2_sbd *sdp,
|
||||
const char *buf,
|
||||
size_t len)
|
||||
{
|
||||
int ret, val;
|
||||
|
||||
ret = kstrtoint(buf, 0, &val);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
if ((val == 1) &&
|
||||
!strcmp(sdp->sd_lockstruct.ls_ops->lm_proto_name, "lock_dlm"))
|
||||
complete(&sdp->sd_wdack);
|
||||
else
|
||||
if (val < 0 || val > 1)
|
||||
return -EINVAL;
|
||||
|
||||
sdp->sd_withdraw_helper_status = val;
|
||||
complete(&sdp->sd_withdraw_helper);
|
||||
return len;
|
||||
}
|
||||
|
||||
static ssize_t lkfirst_show(struct gfs2_sbd *sdp, char *buf)
|
||||
{
|
||||
struct lm_lockstruct *ls = &sdp->sd_lockstruct;
|
||||
return sprintf(buf, "%d\n", ls->ls_first);
|
||||
return sysfs_emit(buf, "%d\n", ls->ls_first);
|
||||
}
|
||||
|
||||
static ssize_t lkfirst_store(struct gfs2_sbd *sdp, const char *buf, size_t len)
|
||||
|
|
@ -492,7 +478,7 @@ static ssize_t lkfirst_store(struct gfs2_sbd *sdp, const char *buf, size_t len)
|
|||
static ssize_t first_done_show(struct gfs2_sbd *sdp, char *buf)
|
||||
{
|
||||
struct lm_lockstruct *ls = &sdp->sd_lockstruct;
|
||||
return sprintf(buf, "%d\n", !!test_bit(DFL_FIRST_MOUNT_DONE, &ls->ls_recover_flags));
|
||||
return sysfs_emit(buf, "%d\n", !!test_bit(DFL_FIRST_MOUNT_DONE, &ls->ls_recover_flags));
|
||||
}
|
||||
|
||||
int gfs2_recover_set(struct gfs2_sbd *sdp, unsigned jid)
|
||||
|
|
@ -550,18 +536,18 @@ static ssize_t recover_store(struct gfs2_sbd *sdp, const char *buf, size_t len)
|
|||
static ssize_t recover_done_show(struct gfs2_sbd *sdp, char *buf)
|
||||
{
|
||||
struct lm_lockstruct *ls = &sdp->sd_lockstruct;
|
||||
return sprintf(buf, "%d\n", ls->ls_recover_jid_done);
|
||||
return sysfs_emit(buf, "%d\n", ls->ls_recover_jid_done);
|
||||
}
|
||||
|
||||
static ssize_t recover_status_show(struct gfs2_sbd *sdp, char *buf)
|
||||
{
|
||||
struct lm_lockstruct *ls = &sdp->sd_lockstruct;
|
||||
return sprintf(buf, "%d\n", ls->ls_recover_jid_status);
|
||||
return sysfs_emit(buf, "%d\n", ls->ls_recover_jid_status);
|
||||
}
|
||||
|
||||
static ssize_t jid_show(struct gfs2_sbd *sdp, char *buf)
|
||||
{
|
||||
return sprintf(buf, "%d\n", sdp->sd_lockstruct.ls_jid);
|
||||
return sysfs_emit(buf, "%d\n", sdp->sd_lockstruct.ls_jid);
|
||||
}
|
||||
|
||||
static ssize_t jid_store(struct gfs2_sbd *sdp, const char *buf, size_t len)
|
||||
|
|
@ -599,7 +585,7 @@ static struct gfs2_attr gdlm_attr_##_name = __ATTR(_name,_mode,_show,_store)
|
|||
|
||||
GDLM_ATTR(proto_name, 0444, proto_name_show, NULL);
|
||||
GDLM_ATTR(block, 0644, block_show, block_store);
|
||||
GDLM_ATTR(withdraw, 0644, wdack_show, wdack_store);
|
||||
GDLM_ATTR(withdraw, 0200, NULL, withdraw_helper_status_store);
|
||||
GDLM_ATTR(jid, 0644, jid_show, jid_store);
|
||||
GDLM_ATTR(first, 0644, lkfirst_show, lkfirst_store);
|
||||
GDLM_ATTR(first_done, 0444, first_done_show, NULL);
|
||||
|
|
@ -626,7 +612,7 @@ static struct attribute *lock_module_attrs[] = {
|
|||
|
||||
static ssize_t quota_scale_show(struct gfs2_sbd *sdp, char *buf)
|
||||
{
|
||||
return snprintf(buf, PAGE_SIZE, "%u %u\n",
|
||||
return sysfs_emit(buf, "%u %u\n",
|
||||
sdp->sd_tune.gt_quota_scale_num,
|
||||
sdp->sd_tune.gt_quota_scale_den);
|
||||
}
|
||||
|
|
@ -679,7 +665,7 @@ static struct gfs2_attr tune_attr_##name = __ATTR(name, 0644, show, store)
|
|||
#define TUNE_ATTR_2(name, store) \
|
||||
static ssize_t name##_show(struct gfs2_sbd *sdp, char *buf) \
|
||||
{ \
|
||||
return snprintf(buf, PAGE_SIZE, "%u\n", sdp->sd_tune.gt_##name); \
|
||||
return sysfs_emit(buf, "%u\n", sdp->sd_tune.gt_##name); \
|
||||
} \
|
||||
TUNE_ATTR_3(name, name##_show, store)
|
||||
|
||||
|
|
@ -698,6 +684,7 @@ TUNE_ATTR(statfs_slow, 0);
|
|||
TUNE_ATTR(new_files_jdata, 0);
|
||||
TUNE_ATTR(statfs_quantum, 1);
|
||||
TUNE_ATTR_3(quota_scale, quota_scale_show, quota_scale_store);
|
||||
TUNE_ATTR(withdraw_helper_timeout, 1);
|
||||
|
||||
static struct attribute *tune_attrs[] = {
|
||||
&tune_attr_quota_warn_period.attr,
|
||||
|
|
@ -708,6 +695,7 @@ static struct attribute *tune_attrs[] = {
|
|||
&tune_attr_statfs_quantum.attr,
|
||||
&tune_attr_quota_scale.attr,
|
||||
&tune_attr_new_files_jdata.attr,
|
||||
&tune_attr_withdraw_helper_timeout.attr,
|
||||
NULL,
|
||||
};
|
||||
|
||||
|
|
|
|||
|
|
@ -59,7 +59,6 @@
|
|||
{(1UL << GLF_LRU), "L" }, \
|
||||
{(1UL << GLF_OBJECT), "o" }, \
|
||||
{(1UL << GLF_BLOCKING), "b" }, \
|
||||
{(1UL << GLF_UNLOCKED), "x" }, \
|
||||
{(1UL << GLF_INSTANTIATE_NEEDED), "n" }, \
|
||||
{(1UL << GLF_INSTANTIATE_IN_PROG), "N" }, \
|
||||
{(1UL << GLF_TRY_TO_EVICT), "e" }, \
|
||||
|
|
|
|||
|
|
@ -49,7 +49,7 @@ int __gfs2_trans_begin(struct gfs2_trans *tr, struct gfs2_sbd *sdp,
|
|||
}
|
||||
BUG_ON(blocks == 0 && revokes == 0);
|
||||
|
||||
if (!test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags))
|
||||
if (gfs2_withdrawn(sdp))
|
||||
return -EROFS;
|
||||
|
||||
tr->tr_ip = ip;
|
||||
|
|
@ -85,27 +85,32 @@ int __gfs2_trans_begin(struct gfs2_trans *tr, struct gfs2_sbd *sdp,
|
|||
*/
|
||||
|
||||
down_read(&sdp->sd_log_flush_lock);
|
||||
if (unlikely(!test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags)))
|
||||
goto out_not_live;
|
||||
if (gfs2_log_try_reserve(sdp, tr, &extra_revokes))
|
||||
goto reserved;
|
||||
|
||||
up_read(&sdp->sd_log_flush_lock);
|
||||
gfs2_log_reserve(sdp, tr, &extra_revokes);
|
||||
down_read(&sdp->sd_log_flush_lock);
|
||||
if (unlikely(!test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags))) {
|
||||
revokes = tr->tr_revokes + extra_revokes;
|
||||
gfs2_log_release_revokes(sdp, revokes);
|
||||
gfs2_log_release(sdp, tr->tr_reserved);
|
||||
goto out_not_live;
|
||||
}
|
||||
|
||||
reserved:
|
||||
gfs2_log_release_revokes(sdp, extra_revokes);
|
||||
if (unlikely(!test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags))) {
|
||||
gfs2_log_release_revokes(sdp, tr->tr_revokes);
|
||||
current->journal_info = tr;
|
||||
return 0;
|
||||
|
||||
out_not_live:
|
||||
up_read(&sdp->sd_log_flush_lock);
|
||||
gfs2_log_release(sdp, tr->tr_reserved);
|
||||
sb_end_intwrite(sdp->sd_vfs);
|
||||
return -EROFS;
|
||||
}
|
||||
|
||||
current->journal_info = tr;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int gfs2_trans_begin(struct gfs2_sbd *sdp, unsigned int blocks,
|
||||
unsigned int revokes)
|
||||
{
|
||||
|
|
@ -255,7 +260,6 @@ void gfs2_trans_add_meta(struct gfs2_glock *gl, struct buffer_head *bh)
|
|||
struct gfs2_bufdata *bd;
|
||||
struct gfs2_meta_header *mh;
|
||||
struct gfs2_trans *tr = current->journal_info;
|
||||
bool withdraw = false;
|
||||
|
||||
lock_buffer(bh);
|
||||
if (buffer_pinned(bh)) {
|
||||
|
|
@ -289,14 +293,14 @@ void gfs2_trans_add_meta(struct gfs2_glock *gl, struct buffer_head *bh)
|
|||
(unsigned long long)bd->bd_bh->b_blocknr);
|
||||
BUG();
|
||||
}
|
||||
if (gfs2_withdrawing_or_withdrawn(sdp)) {
|
||||
if (gfs2_withdrawn(sdp)) {
|
||||
fs_info(sdp, "GFS2:adding buf while withdrawn! 0x%llx\n",
|
||||
(unsigned long long)bd->bd_bh->b_blocknr);
|
||||
goto out_unlock;
|
||||
}
|
||||
if (unlikely(sb->s_writers.frozen == SB_FREEZE_COMPLETE)) {
|
||||
fs_info(sdp, "GFS2:adding buf while frozen\n");
|
||||
withdraw = true;
|
||||
gfs2_withdraw(sdp);
|
||||
goto out_unlock;
|
||||
}
|
||||
gfs2_pin(sdp, bd->bd_bh);
|
||||
|
|
@ -306,8 +310,6 @@ void gfs2_trans_add_meta(struct gfs2_glock *gl, struct buffer_head *bh)
|
|||
tr->tr_num_buf_new++;
|
||||
out_unlock:
|
||||
gfs2_log_unlock(sdp);
|
||||
if (withdraw)
|
||||
gfs2_assert_withdraw(sdp, 0);
|
||||
out:
|
||||
unlock_buffer(bh);
|
||||
}
|
||||
|
|
|
|||
302
fs/gfs2/util.c
302
fs/gfs2/util.c
|
|
@ -58,7 +58,7 @@ int check_journal_clean(struct gfs2_sbd *sdp, struct gfs2_jdesc *jd,
|
|||
struct gfs2_inode *ip;
|
||||
|
||||
ip = GFS2_I(jd->jd_inode);
|
||||
error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_NOEXP |
|
||||
error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_RECOVER |
|
||||
GL_EXACT | GL_NOCACHE, &j_gh);
|
||||
if (error) {
|
||||
if (verbose)
|
||||
|
|
@ -99,7 +99,7 @@ int check_journal_clean(struct gfs2_sbd *sdp, struct gfs2_jdesc *jd,
|
|||
*/
|
||||
int gfs2_freeze_lock_shared(struct gfs2_sbd *sdp)
|
||||
{
|
||||
int flags = LM_FLAG_NOEXP | GL_EXACT;
|
||||
int flags = LM_FLAG_RECOVER | GL_EXACT;
|
||||
int error;
|
||||
|
||||
error = gfs2_glock_nq_init(sdp->sd_freeze_gl, LM_ST_SHARED, flags,
|
||||
|
|
@ -115,41 +115,17 @@ void gfs2_freeze_unlock(struct gfs2_sbd *sdp)
|
|||
gfs2_glock_dq_uninit(&sdp->sd_freeze_gh);
|
||||
}
|
||||
|
||||
static void signal_our_withdraw(struct gfs2_sbd *sdp)
|
||||
static void do_withdraw(struct gfs2_sbd *sdp)
|
||||
{
|
||||
struct gfs2_glock *live_gl = sdp->sd_live_gh.gh_gl;
|
||||
struct inode *inode;
|
||||
struct gfs2_inode *ip;
|
||||
struct gfs2_glock *i_gl;
|
||||
u64 no_formal_ino;
|
||||
int ret = 0;
|
||||
int tries;
|
||||
|
||||
if (test_bit(SDF_NORECOVERY, &sdp->sd_flags) || !sdp->sd_jdesc)
|
||||
down_write(&sdp->sd_log_flush_lock);
|
||||
if (!test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags)) {
|
||||
up_write(&sdp->sd_log_flush_lock);
|
||||
return;
|
||||
}
|
||||
clear_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags);
|
||||
up_write(&sdp->sd_log_flush_lock);
|
||||
|
||||
gfs2_ail_drain(sdp); /* frees all transactions */
|
||||
inode = sdp->sd_jdesc->jd_inode;
|
||||
ip = GFS2_I(inode);
|
||||
i_gl = ip->i_gl;
|
||||
no_formal_ino = ip->i_no_formal_ino;
|
||||
|
||||
/* Prevent any glock dq until withdraw recovery is complete */
|
||||
set_bit(SDF_WITHDRAW_RECOVERY, &sdp->sd_flags);
|
||||
/*
|
||||
* Don't tell dlm we're bailing until we have no more buffers in the
|
||||
* wind. If journal had an IO error, the log code should just purge
|
||||
* the outstanding buffers rather than submitting new IO. Making the
|
||||
* file system read-only will flush the journal, etc.
|
||||
*
|
||||
* During a normal unmount, gfs2_make_fs_ro calls gfs2_log_shutdown
|
||||
* which clears SDF_JOURNAL_LIVE. In a withdraw, we must not write
|
||||
* any UNMOUNT log header, so we can't call gfs2_log_shutdown, and
|
||||
* therefore we need to clear SDF_JOURNAL_LIVE manually.
|
||||
*/
|
||||
clear_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags);
|
||||
if (!sb_rdonly(sdp->sd_vfs)) {
|
||||
bool locked = mutex_trylock(&sdp->sd_freeze_mutex);
|
||||
|
||||
wake_up(&sdp->sd_logd_waitq);
|
||||
wake_up(&sdp->sd_quota_wait);
|
||||
|
|
@ -160,137 +136,11 @@ static void signal_our_withdraw(struct gfs2_sbd *sdp)
|
|||
|
||||
sdp->sd_vfs->s_flags |= SB_RDONLY;
|
||||
|
||||
if (locked)
|
||||
mutex_unlock(&sdp->sd_freeze_mutex);
|
||||
|
||||
/*
|
||||
* Dequeue any pending non-system glock holders that can no
|
||||
* longer be granted because the file system is withdrawn.
|
||||
*/
|
||||
gfs2_gl_dq_holders(sdp);
|
||||
}
|
||||
|
||||
if (sdp->sd_lockstruct.ls_ops->lm_lock == NULL) { /* lock_nolock */
|
||||
if (!ret)
|
||||
ret = -EIO;
|
||||
clear_bit(SDF_WITHDRAW_RECOVERY, &sdp->sd_flags);
|
||||
goto skip_recovery;
|
||||
}
|
||||
/*
|
||||
* Drop the glock for our journal so another node can recover it.
|
||||
*/
|
||||
if (gfs2_holder_initialized(&sdp->sd_journal_gh)) {
|
||||
gfs2_glock_dq_wait(&sdp->sd_journal_gh);
|
||||
gfs2_holder_uninit(&sdp->sd_journal_gh);
|
||||
}
|
||||
sdp->sd_jinode_gh.gh_flags |= GL_NOCACHE;
|
||||
gfs2_glock_dq(&sdp->sd_jinode_gh);
|
||||
gfs2_thaw_freeze_initiator(sdp->sd_vfs);
|
||||
wait_on_bit(&i_gl->gl_flags, GLF_DEMOTE, TASK_UNINTERRUPTIBLE);
|
||||
|
||||
/*
|
||||
* holder_uninit to force glock_put, to force dlm to let go
|
||||
*/
|
||||
gfs2_holder_uninit(&sdp->sd_jinode_gh);
|
||||
|
||||
/*
|
||||
* Note: We need to be careful here:
|
||||
* Our iput of jd_inode will evict it. The evict will dequeue its
|
||||
* glock, but the glock dq will wait for the withdraw unless we have
|
||||
* exception code in glock_dq.
|
||||
*/
|
||||
iput(inode);
|
||||
sdp->sd_jdesc->jd_inode = NULL;
|
||||
/*
|
||||
* Wait until the journal inode's glock is freed. This allows try locks
|
||||
* on other nodes to be successful, otherwise we remain the owner of
|
||||
* the glock as far as dlm is concerned.
|
||||
*/
|
||||
if (i_gl->gl_ops->go_unlocked) {
|
||||
set_bit(GLF_UNLOCKED, &i_gl->gl_flags);
|
||||
wait_on_bit(&i_gl->gl_flags, GLF_UNLOCKED, TASK_UNINTERRUPTIBLE);
|
||||
}
|
||||
|
||||
/*
|
||||
* Dequeue the "live" glock, but keep a reference so it's never freed.
|
||||
*/
|
||||
gfs2_glock_hold(live_gl);
|
||||
gfs2_glock_dq_wait(&sdp->sd_live_gh);
|
||||
/*
|
||||
* We enqueue the "live" glock in EX so that all other nodes
|
||||
* get a demote request and act on it. We don't really want the
|
||||
* lock in EX, so we send a "try" lock with 1CB to produce a callback.
|
||||
*/
|
||||
fs_warn(sdp, "Requesting recovery of jid %d.\n",
|
||||
sdp->sd_lockstruct.ls_jid);
|
||||
gfs2_holder_reinit(LM_ST_EXCLUSIVE,
|
||||
LM_FLAG_TRY_1CB | LM_FLAG_NOEXP | GL_NOPID,
|
||||
&sdp->sd_live_gh);
|
||||
msleep(GL_GLOCK_MAX_HOLD);
|
||||
/*
|
||||
* This will likely fail in a cluster, but succeed standalone:
|
||||
*/
|
||||
ret = gfs2_glock_nq(&sdp->sd_live_gh);
|
||||
|
||||
gfs2_glock_put(live_gl); /* drop extra reference we acquired */
|
||||
clear_bit(SDF_WITHDRAW_RECOVERY, &sdp->sd_flags);
|
||||
|
||||
/*
|
||||
* If we actually got the "live" lock in EX mode, there are no other
|
||||
* nodes available to replay our journal.
|
||||
*/
|
||||
if (ret == 0) {
|
||||
fs_warn(sdp, "No other mounters found.\n");
|
||||
/*
|
||||
* We are about to release the lockspace. By keeping live_gl
|
||||
* locked here, we ensure that the next mounter coming along
|
||||
* will be a "first" mounter which will perform recovery.
|
||||
*/
|
||||
goto skip_recovery;
|
||||
}
|
||||
|
||||
/*
|
||||
* At this point our journal is evicted, so we need to get a new inode
|
||||
* for it. Once done, we need to call gfs2_find_jhead which
|
||||
* calls gfs2_map_journal_extents to map it for us again.
|
||||
*
|
||||
* Note that we don't really want it to look up a FREE block. The
|
||||
* GFS2_BLKST_FREE simply overrides a block check in gfs2_inode_lookup
|
||||
* which would otherwise fail because it requires grabbing an rgrp
|
||||
* glock, which would fail with -EIO because we're withdrawing.
|
||||
*/
|
||||
inode = gfs2_inode_lookup(sdp->sd_vfs, DT_UNKNOWN,
|
||||
sdp->sd_jdesc->jd_no_addr, no_formal_ino,
|
||||
GFS2_BLKST_FREE);
|
||||
if (IS_ERR(inode)) {
|
||||
fs_warn(sdp, "Reprocessing of jid %d failed with %ld.\n",
|
||||
sdp->sd_lockstruct.ls_jid, PTR_ERR(inode));
|
||||
goto skip_recovery;
|
||||
}
|
||||
sdp->sd_jdesc->jd_inode = inode;
|
||||
d_mark_dontcache(inode);
|
||||
|
||||
/*
|
||||
* Now wait until recovery is complete.
|
||||
*/
|
||||
for (tries = 0; tries < 10; tries++) {
|
||||
ret = check_journal_clean(sdp, sdp->sd_jdesc, false);
|
||||
if (!ret)
|
||||
break;
|
||||
msleep(HZ);
|
||||
fs_warn(sdp, "Waiting for journal recovery jid %d.\n",
|
||||
sdp->sd_lockstruct.ls_jid);
|
||||
}
|
||||
skip_recovery:
|
||||
if (!ret)
|
||||
fs_warn(sdp, "Journal recovery complete for jid %d.\n",
|
||||
sdp->sd_lockstruct.ls_jid);
|
||||
else
|
||||
fs_warn(sdp, "Journal recovery skipped for jid %d until next "
|
||||
"mount.\n", sdp->sd_lockstruct.ls_jid);
|
||||
fs_warn(sdp, "Glock dequeues delayed: %lu\n", sdp->sd_glock_dqs_held);
|
||||
sdp->sd_glock_dqs_held = 0;
|
||||
wake_up_bit(&sdp->sd_flags, SDF_WITHDRAW_RECOVERY);
|
||||
gfs2_withdraw_glocks(sdp);
|
||||
}
|
||||
|
||||
void gfs2_lm(struct gfs2_sbd *sdp, const char *fmt, ...)
|
||||
|
|
@ -309,43 +159,104 @@ void gfs2_lm(struct gfs2_sbd *sdp, const char *fmt, ...)
|
|||
va_end(args);
|
||||
}
|
||||
|
||||
void gfs2_withdraw(struct gfs2_sbd *sdp)
|
||||
/**
|
||||
* gfs2_offline_uevent - run gfs2_withdraw_helper
|
||||
* @sdp: The GFS2 superblock
|
||||
*/
|
||||
static bool gfs2_offline_uevent(struct gfs2_sbd *sdp)
|
||||
{
|
||||
struct lm_lockstruct *ls = &sdp->sd_lockstruct;
|
||||
const struct lm_lockops *lm = ls->ls_ops;
|
||||
long timeout;
|
||||
|
||||
if (sdp->sd_args.ar_errors == GFS2_ERRORS_WITHDRAW) {
|
||||
unsigned long old = READ_ONCE(sdp->sd_flags), new;
|
||||
/* Skip protocol "lock_nolock" which doesn't require shared storage. */
|
||||
if (!ls->ls_ops->lm_lock)
|
||||
return false;
|
||||
|
||||
do {
|
||||
if (old & BIT(SDF_WITHDRAWN)) {
|
||||
wait_on_bit(&sdp->sd_flags,
|
||||
SDF_WITHDRAW_IN_PROG,
|
||||
TASK_UNINTERRUPTIBLE);
|
||||
return;
|
||||
/*
|
||||
* The gfs2_withdraw_helper replies by writing one of the following
|
||||
* status codes to "/sys$DEVPATH/lock_module/withdraw":
|
||||
*
|
||||
* 0 - The shared block device has been marked inactive. Future write
|
||||
* operations will fail.
|
||||
*
|
||||
* 1 - The shared block device may still be active and carry out
|
||||
* write operations.
|
||||
*
|
||||
* If the "offline" uevent isn't reacted upon in time, the event
|
||||
* handler is assumed to have failed.
|
||||
*/
|
||||
|
||||
sdp->sd_withdraw_helper_status = -1;
|
||||
kobject_uevent(&sdp->sd_kobj, KOBJ_OFFLINE);
|
||||
timeout = gfs2_tune_get(sdp, gt_withdraw_helper_timeout) * HZ;
|
||||
wait_for_completion_timeout(&sdp->sd_withdraw_helper, timeout);
|
||||
if (sdp->sd_withdraw_helper_status == -1) {
|
||||
fs_err(sdp, "%s timed out\n", "gfs2_withdraw_helper");
|
||||
} else {
|
||||
fs_err(sdp, "%s %s with status %d\n",
|
||||
"gfs2_withdraw_helper",
|
||||
sdp->sd_withdraw_helper_status == 0 ?
|
||||
"succeeded" : "failed",
|
||||
sdp->sd_withdraw_helper_status);
|
||||
}
|
||||
return sdp->sd_withdraw_helper_status == 0;
|
||||
}
|
||||
new = old | BIT(SDF_WITHDRAWN) | BIT(SDF_WITHDRAW_IN_PROG);
|
||||
} while (unlikely(!try_cmpxchg(&sdp->sd_flags, &old, new)));
|
||||
|
||||
fs_err(sdp, "about to withdraw this file system\n");
|
||||
void gfs2_withdraw_func(struct work_struct *work)
|
||||
{
|
||||
struct gfs2_sbd *sdp = container_of(work, struct gfs2_sbd, sd_withdraw_work);
|
||||
struct lm_lockstruct *ls = &sdp->sd_lockstruct;
|
||||
const struct lm_lockops *lm = ls->ls_ops;
|
||||
bool device_inactive;
|
||||
|
||||
if (test_bit(SDF_KILL, &sdp->sd_flags))
|
||||
return;
|
||||
|
||||
BUG_ON(sdp->sd_args.ar_debug);
|
||||
|
||||
signal_our_withdraw(sdp);
|
||||
/*
|
||||
* Try to deactivate the shared block device so that no more I/O will
|
||||
* go through. If successful, we can immediately trigger remote
|
||||
* recovery. Otherwise, we must first empty out all our local caches.
|
||||
*/
|
||||
|
||||
kobject_uevent(&sdp->sd_kobj, KOBJ_OFFLINE);
|
||||
device_inactive = gfs2_offline_uevent(sdp);
|
||||
|
||||
if (!strcmp(sdp->sd_lockstruct.ls_ops->lm_proto_name, "lock_dlm"))
|
||||
wait_for_completion(&sdp->sd_wdack);
|
||||
if (sdp->sd_args.ar_errors == GFS2_ERRORS_DEACTIVATE && !device_inactive)
|
||||
panic("GFS2: fsid=%s: panic requested\n", sdp->sd_fsname);
|
||||
|
||||
if (lm->lm_unmount) {
|
||||
fs_err(sdp, "telling LM to unmount\n");
|
||||
lm->lm_unmount(sdp);
|
||||
if (device_inactive) {
|
||||
lm->lm_unmount(sdp, false);
|
||||
do_withdraw(sdp);
|
||||
} else {
|
||||
do_withdraw(sdp);
|
||||
lm->lm_unmount(sdp, false);
|
||||
}
|
||||
fs_err(sdp, "File system withdrawn\n");
|
||||
} else {
|
||||
do_withdraw(sdp);
|
||||
}
|
||||
|
||||
fs_err(sdp, "file system withdrawn\n");
|
||||
}
|
||||
|
||||
void gfs2_withdraw(struct gfs2_sbd *sdp)
|
||||
{
|
||||
if (sdp->sd_args.ar_errors == GFS2_ERRORS_WITHDRAW ||
|
||||
sdp->sd_args.ar_errors == GFS2_ERRORS_DEACTIVATE) {
|
||||
if (test_and_set_bit(SDF_WITHDRAWN, &sdp->sd_flags))
|
||||
return;
|
||||
|
||||
dump_stack();
|
||||
clear_bit(SDF_WITHDRAW_IN_PROG, &sdp->sd_flags);
|
||||
smp_mb__after_atomic();
|
||||
wake_up_bit(&sdp->sd_flags, SDF_WITHDRAW_IN_PROG);
|
||||
/*
|
||||
* There is no need to withdraw when the superblock hasn't been
|
||||
* fully initialized, yet.
|
||||
*/
|
||||
if (!(sdp->sd_vfs->s_flags & SB_BORN))
|
||||
return;
|
||||
fs_err(sdp, "about to withdraw this file system\n");
|
||||
schedule_work(&sdp->sd_withdraw_work);
|
||||
return;
|
||||
}
|
||||
|
||||
if (sdp->sd_args.ar_errors == GFS2_ERRORS_PANIC)
|
||||
|
|
@ -357,10 +268,9 @@ void gfs2_withdraw(struct gfs2_sbd *sdp)
|
|||
*/
|
||||
|
||||
void gfs2_assert_withdraw_i(struct gfs2_sbd *sdp, char *assertion,
|
||||
const char *function, char *file, unsigned int line,
|
||||
bool delayed)
|
||||
const char *function, char *file, unsigned int line)
|
||||
{
|
||||
if (gfs2_withdrawing_or_withdrawn(sdp))
|
||||
if (gfs2_withdrawn(sdp))
|
||||
return;
|
||||
|
||||
fs_err(sdp,
|
||||
|
|
@ -368,16 +278,6 @@ void gfs2_assert_withdraw_i(struct gfs2_sbd *sdp, char *assertion,
|
|||
"function = %s, file = %s, line = %u\n",
|
||||
assertion, function, file, line);
|
||||
|
||||
/*
|
||||
* If errors=panic was specified on mount, it won't help to delay the
|
||||
* withdraw.
|
||||
*/
|
||||
if (sdp->sd_args.ar_errors == GFS2_ERRORS_PANIC)
|
||||
delayed = false;
|
||||
|
||||
if (delayed)
|
||||
gfs2_withdraw_delayed(sdp);
|
||||
else
|
||||
gfs2_withdraw(sdp);
|
||||
dump_stack();
|
||||
}
|
||||
|
|
@ -520,22 +420,18 @@ void gfs2_io_error_i(struct gfs2_sbd *sdp, const char *function, char *file,
|
|||
}
|
||||
|
||||
/*
|
||||
* gfs2_io_error_bh_i - Flag a buffer I/O error
|
||||
* @withdraw: withdraw the filesystem
|
||||
* gfs2_io_error_bh_i - Flag a buffer I/O error and withdraw
|
||||
*/
|
||||
|
||||
void gfs2_io_error_bh_i(struct gfs2_sbd *sdp, struct buffer_head *bh,
|
||||
const char *function, char *file, unsigned int line,
|
||||
bool withdraw)
|
||||
const char *function, char *file, unsigned int line)
|
||||
{
|
||||
if (gfs2_withdrawing_or_withdrawn(sdp))
|
||||
if (gfs2_withdrawn(sdp))
|
||||
return;
|
||||
|
||||
fs_err(sdp, "fatal: I/O error - "
|
||||
"block = %llu, "
|
||||
"function = %s, file = %s, line = %u\n",
|
||||
(unsigned long long)bh->b_blocknr, function, file, line);
|
||||
if (withdraw)
|
||||
gfs2_withdraw(sdp);
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -37,24 +37,14 @@ do { \
|
|||
|
||||
|
||||
void gfs2_assert_withdraw_i(struct gfs2_sbd *sdp, char *assertion,
|
||||
const char *function, char *file, unsigned int line,
|
||||
bool delayed);
|
||||
const char *function, char *file, unsigned int line);
|
||||
|
||||
#define gfs2_assert_withdraw(sdp, assertion) \
|
||||
({ \
|
||||
bool _bool = (assertion); \
|
||||
if (unlikely(!_bool)) \
|
||||
gfs2_assert_withdraw_i((sdp), #assertion, \
|
||||
__func__, __FILE__, __LINE__, false); \
|
||||
!_bool; \
|
||||
})
|
||||
|
||||
#define gfs2_assert_withdraw_delayed(sdp, assertion) \
|
||||
({ \
|
||||
bool _bool = (assertion); \
|
||||
if (unlikely(!_bool)) \
|
||||
gfs2_assert_withdraw_i((sdp), #assertion, \
|
||||
__func__, __FILE__, __LINE__, true); \
|
||||
__func__, __FILE__, __LINE__); \
|
||||
!_bool; \
|
||||
})
|
||||
|
||||
|
|
@ -161,14 +151,10 @@ gfs2_io_error_i((sdp), __func__, __FILE__, __LINE__)
|
|||
|
||||
|
||||
void gfs2_io_error_bh_i(struct gfs2_sbd *sdp, struct buffer_head *bh,
|
||||
const char *function, char *file, unsigned int line,
|
||||
bool withdraw);
|
||||
|
||||
#define gfs2_io_error_bh_wd(sdp, bh) \
|
||||
gfs2_io_error_bh_i((sdp), (bh), __func__, __FILE__, __LINE__, true)
|
||||
const char *function, char *file, unsigned int line);
|
||||
|
||||
#define gfs2_io_error_bh(sdp, bh) \
|
||||
gfs2_io_error_bh_i((sdp), (bh), __func__, __FILE__, __LINE__, false)
|
||||
gfs2_io_error_bh_i((sdp), (bh), __func__, __FILE__, __LINE__)
|
||||
|
||||
|
||||
extern struct kmem_cache *gfs2_glock_cachep;
|
||||
|
|
@ -193,38 +179,12 @@ static inline unsigned int gfs2_tune_get_i(struct gfs2_tune *gt,
|
|||
}
|
||||
|
||||
/**
|
||||
* gfs2_withdraw_delayed - withdraw as soon as possible without deadlocks
|
||||
* gfs2_withdrawn - test whether the file system is withdrawn
|
||||
* @sdp: the superblock
|
||||
*/
|
||||
static inline void gfs2_withdraw_delayed(struct gfs2_sbd *sdp)
|
||||
static inline bool gfs2_withdrawn(struct gfs2_sbd *sdp)
|
||||
{
|
||||
set_bit(SDF_WITHDRAWING, &sdp->sd_flags);
|
||||
}
|
||||
|
||||
/**
|
||||
* gfs2_withdrawing_or_withdrawn - test whether the file system is withdrawing
|
||||
* or withdrawn
|
||||
* @sdp: the superblock
|
||||
*/
|
||||
static inline bool gfs2_withdrawing_or_withdrawn(struct gfs2_sbd *sdp)
|
||||
{
|
||||
return unlikely(test_bit(SDF_WITHDRAWN, &sdp->sd_flags) ||
|
||||
test_bit(SDF_WITHDRAWING, &sdp->sd_flags));
|
||||
}
|
||||
|
||||
/**
|
||||
* gfs2_withdrawing - check if a withdraw is pending
|
||||
* @sdp: the superblock
|
||||
*/
|
||||
static inline bool gfs2_withdrawing(struct gfs2_sbd *sdp)
|
||||
{
|
||||
return unlikely(test_bit(SDF_WITHDRAWING, &sdp->sd_flags) &&
|
||||
!test_bit(SDF_WITHDRAWN, &sdp->sd_flags));
|
||||
}
|
||||
|
||||
static inline bool gfs2_withdraw_in_prog(struct gfs2_sbd *sdp)
|
||||
{
|
||||
return unlikely(test_bit(SDF_WITHDRAW_IN_PROG, &sdp->sd_flags));
|
||||
return unlikely(test_bit(SDF_WITHDRAWN, &sdp->sd_flags));
|
||||
}
|
||||
|
||||
#define gfs2_tune_get(sdp, field) \
|
||||
|
|
@ -232,6 +192,8 @@ gfs2_tune_get_i(&(sdp)->sd_tune, &(sdp)->sd_tune.field)
|
|||
|
||||
__printf(2, 3)
|
||||
void gfs2_lm(struct gfs2_sbd *sdp, const char *fmt, ...);
|
||||
|
||||
void gfs2_withdraw_func(struct work_struct *work);
|
||||
void gfs2_withdraw(struct gfs2_sbd *sdp);
|
||||
|
||||
#endif /* __UTIL_DOT_H__ */
|
||||
|
|
|
|||
Loading…
Reference in New Issue