fs/ntfs3: correct attr_collapse_range when file is too fragmented

Fix incorrect VCN adjustments in attr_collapse_range() that caused
filesystem errors or corruption on very fragmented NTFS files when
performing collapse-range operations.

Signed-off-by: Konstantin Komarov <almaz.alexandrovich@paragon-software.com>
This commit is contained in:
Konstantin Komarov 2025-10-30 23:35:24 +03:00
parent aee4d5a521
commit 2109b08024
No known key found for this signature in database
GPG Key ID: A9B0331F832407B6
4 changed files with 53 additions and 48 deletions

View File

@ -1860,7 +1860,7 @@ int attr_collapse_range(struct ntfs_inode *ni, u64 vbo, u64 bytes)
struct ATTRIB *attr = NULL, *attr_b; struct ATTRIB *attr = NULL, *attr_b;
struct ATTR_LIST_ENTRY *le, *le_b; struct ATTR_LIST_ENTRY *le, *le_b;
struct mft_inode *mi, *mi_b; struct mft_inode *mi, *mi_b;
CLST svcn, evcn1, len, dealloc, alen; CLST svcn, evcn1, len, dealloc, alen, done;
CLST vcn, end; CLST vcn, end;
u64 valid_size, data_size, alloc_size, total_size; u64 valid_size, data_size, alloc_size, total_size;
u32 mask; u32 mask;
@ -1923,6 +1923,7 @@ int attr_collapse_range(struct ntfs_inode *ni, u64 vbo, u64 bytes)
len = bytes >> sbi->cluster_bits; len = bytes >> sbi->cluster_bits;
end = vcn + len; end = vcn + len;
dealloc = 0; dealloc = 0;
done = 0;
svcn = le64_to_cpu(attr_b->nres.svcn); svcn = le64_to_cpu(attr_b->nres.svcn);
evcn1 = le64_to_cpu(attr_b->nres.evcn) + 1; evcn1 = le64_to_cpu(attr_b->nres.evcn) + 1;
@ -1931,23 +1932,28 @@ int attr_collapse_range(struct ntfs_inode *ni, u64 vbo, u64 bytes)
attr = attr_b; attr = attr_b;
le = le_b; le = le_b;
mi = mi_b; mi = mi_b;
} else if (!le_b) { goto check_seg;
}
if (!le_b) {
err = -EINVAL; err = -EINVAL;
goto out; goto out;
} else { }
le = le_b;
attr = ni_find_attr(ni, attr_b, &le, ATTR_DATA, NULL, 0, &vcn,
&mi);
if (!attr) {
err = -EINVAL;
goto out;
}
svcn = le64_to_cpu(attr->nres.svcn); le = le_b;
evcn1 = le64_to_cpu(attr->nres.evcn) + 1; attr = ni_find_attr(ni, attr_b, &le, ATTR_DATA, NULL, 0, &vcn, &mi);
if (!attr) {
err = -EINVAL;
goto out;
} }
for (;;) { for (;;) {
CLST vcn1, eat, next_svcn;
svcn = le64_to_cpu(attr->nres.svcn);
evcn1 = le64_to_cpu(attr->nres.evcn) + 1;
check_seg:
if (svcn >= end) { if (svcn >= end) {
/* Shift VCN- */ /* Shift VCN- */
attr->nres.svcn = cpu_to_le64(svcn - len); attr->nres.svcn = cpu_to_le64(svcn - len);
@ -1957,22 +1963,25 @@ int attr_collapse_range(struct ntfs_inode *ni, u64 vbo, u64 bytes)
ni->attr_list.dirty = true; ni->attr_list.dirty = true;
} }
mi->dirty = true; mi->dirty = true;
} else if (svcn < vcn || end < evcn1) { goto next_attr;
CLST vcn1, eat, next_svcn; }
run_truncate(run, 0);
err = attr_load_runs(attr, ni, run, &svcn);
if (err)
goto out;
vcn1 = vcn + done; /* original vcn in attr/run. */
eat = min(end, evcn1) - vcn1;
err = run_deallocate_ex(sbi, run, vcn1, eat, &dealloc, true);
if (err)
goto out;
if (svcn + eat < evcn1) {
/* Collapse a part of this attribute segment. */ /* Collapse a part of this attribute segment. */
err = attr_load_runs(attr, ni, run, &svcn);
if (err)
goto out;
vcn1 = max(vcn, svcn);
eat = min(end, evcn1) - vcn1;
err = run_deallocate_ex(sbi, run, vcn1, eat, &dealloc, if (!run_collapse_range(run, vcn1, eat, done)) {
true);
if (err)
goto out;
if (!run_collapse_range(run, vcn1, eat)) {
err = -ENOMEM; err = -ENOMEM;
goto out; goto out;
} }
@ -1980,7 +1989,7 @@ int attr_collapse_range(struct ntfs_inode *ni, u64 vbo, u64 bytes)
if (svcn >= vcn) { if (svcn >= vcn) {
/* Shift VCN */ /* Shift VCN */
attr->nres.svcn = cpu_to_le64(vcn); attr->nres.svcn = cpu_to_le64(vcn);
if (le) { if (le && attr->nres.svcn != le->vcn) {
le->vcn = attr->nres.svcn; le->vcn = attr->nres.svcn;
ni->attr_list.dirty = true; ni->attr_list.dirty = true;
} }
@ -1991,7 +2000,7 @@ int attr_collapse_range(struct ntfs_inode *ni, u64 vbo, u64 bytes)
goto out; goto out;
next_svcn = le64_to_cpu(attr->nres.evcn) + 1; next_svcn = le64_to_cpu(attr->nres.evcn) + 1;
if (next_svcn + eat < evcn1) { if (next_svcn + eat + done < evcn1) {
err = ni_insert_nonresident( err = ni_insert_nonresident(
ni, ATTR_DATA, NULL, 0, run, next_svcn, ni, ATTR_DATA, NULL, 0, run, next_svcn,
evcn1 - eat - next_svcn, a_flags, &attr, evcn1 - eat - next_svcn, a_flags, &attr,
@ -2005,18 +2014,9 @@ int attr_collapse_range(struct ntfs_inode *ni, u64 vbo, u64 bytes)
/* Free all allocated memory. */ /* Free all allocated memory. */
run_truncate(run, 0); run_truncate(run, 0);
done += eat;
} else { } else {
u16 le_sz; u16 le_sz;
u16 roff = le16_to_cpu(attr->nres.run_off);
if (roff > le32_to_cpu(attr->size)) {
err = -EINVAL;
goto out;
}
run_unpack_ex(RUN_DEALLOCATE, sbi, ni->mi.rno, svcn,
evcn1 - 1, svcn, Add2Ptr(attr, roff),
le32_to_cpu(attr->size) - roff);
/* Delete this attribute segment. */ /* Delete this attribute segment. */
mi_remove_attr(NULL, mi, attr); mi_remove_attr(NULL, mi, attr);
@ -2029,6 +2029,7 @@ int attr_collapse_range(struct ntfs_inode *ni, u64 vbo, u64 bytes)
goto out; goto out;
} }
done += evcn1 - svcn;
if (evcn1 >= alen) if (evcn1 >= alen)
break; break;
@ -2046,11 +2047,12 @@ int attr_collapse_range(struct ntfs_inode *ni, u64 vbo, u64 bytes)
err = -EINVAL; err = -EINVAL;
goto out; goto out;
} }
goto next_attr; continue;
} }
le = (struct ATTR_LIST_ENTRY *)((u8 *)le - le_sz); le = (struct ATTR_LIST_ENTRY *)((u8 *)le - le_sz);
} }
next_attr:
if (evcn1 >= alen) if (evcn1 >= alen)
break; break;
@ -2059,10 +2061,6 @@ int attr_collapse_range(struct ntfs_inode *ni, u64 vbo, u64 bytes)
err = -EINVAL; err = -EINVAL;
goto out; goto out;
} }
next_attr:
svcn = le64_to_cpu(attr->nres.svcn);
evcn1 = le64_to_cpu(attr->nres.evcn) + 1;
} }
if (!attr_b) { if (!attr_b) {
@ -2552,7 +2550,7 @@ int attr_insert_range(struct ntfs_inode *ni, u64 vbo, u64 bytes)
if (attr_load_runs(attr, ni, run, NULL)) if (attr_load_runs(attr, ni, run, NULL))
goto bad_inode; goto bad_inode;
if (!run_collapse_range(run, vcn, len)) if (!run_collapse_range(run, vcn, len, 0))
goto bad_inode; goto bad_inode;
if (mi_pack_runs(mi, attr, run, evcn1 + len - svcn)) if (mi_pack_runs(mi, attr, run, evcn1 + len - svcn))

View File

@ -777,7 +777,7 @@ bool mi_remove_attr(struct ntfs_inode *ni, struct mft_inode *mi,
struct ATTRIB *attr); struct ATTRIB *attr);
bool mi_resize_attr(struct mft_inode *mi, struct ATTRIB *attr, int bytes); bool mi_resize_attr(struct mft_inode *mi, struct ATTRIB *attr, int bytes);
int mi_pack_runs(struct mft_inode *mi, struct ATTRIB *attr, int mi_pack_runs(struct mft_inode *mi, struct ATTRIB *attr,
struct runs_tree *run, CLST len); const struct runs_tree *run, CLST len);
static inline bool mi_is_ref(const struct mft_inode *mi, static inline bool mi_is_ref(const struct mft_inode *mi,
const struct MFT_REF *ref) const struct MFT_REF *ref)
{ {
@ -812,7 +812,7 @@ void run_truncate_head(struct runs_tree *run, CLST vcn);
void run_truncate_around(struct runs_tree *run, CLST vcn); void run_truncate_around(struct runs_tree *run, CLST vcn);
bool run_add_entry(struct runs_tree *run, CLST vcn, CLST lcn, CLST len, bool run_add_entry(struct runs_tree *run, CLST vcn, CLST lcn, CLST len,
bool is_mft); bool is_mft);
bool run_collapse_range(struct runs_tree *run, CLST vcn, CLST len); bool run_collapse_range(struct runs_tree *run, CLST vcn, CLST len, CLST sub);
bool run_insert_range(struct runs_tree *run, CLST vcn, CLST len); bool run_insert_range(struct runs_tree *run, CLST vcn, CLST len);
bool run_get_entry(const struct runs_tree *run, size_t index, CLST *vcn, bool run_get_entry(const struct runs_tree *run, size_t index, CLST *vcn,
CLST *lcn, CLST *len); CLST *lcn, CLST *len);

View File

@ -621,7 +621,7 @@ bool mi_resize_attr(struct mft_inode *mi, struct ATTRIB *attr, int bytes)
* If failed record is not changed. * If failed record is not changed.
*/ */
int mi_pack_runs(struct mft_inode *mi, struct ATTRIB *attr, int mi_pack_runs(struct mft_inode *mi, struct ATTRIB *attr,
struct runs_tree *run, CLST len) const struct runs_tree *run, CLST len)
{ {
int err = 0; int err = 0;
struct ntfs_sb_info *sbi = mi->sbi; struct ntfs_sb_info *sbi = mi->sbi;

View File

@ -487,7 +487,7 @@ bool run_add_entry(struct runs_tree *run, CLST vcn, CLST lcn, CLST len,
* Helper for attr_collapse_range(), * Helper for attr_collapse_range(),
* which is helper for fallocate(collapse_range). * which is helper for fallocate(collapse_range).
*/ */
bool run_collapse_range(struct runs_tree *run, CLST vcn, CLST len) bool run_collapse_range(struct runs_tree *run, CLST vcn, CLST len, CLST sub)
{ {
size_t index, eat; size_t index, eat;
struct ntfs_run *r, *e, *eat_start, *eat_end; struct ntfs_run *r, *e, *eat_start, *eat_end;
@ -511,7 +511,7 @@ bool run_collapse_range(struct runs_tree *run, CLST vcn, CLST len)
/* Collapse a middle part of normal run, split. */ /* Collapse a middle part of normal run, split. */
if (!run_add_entry(run, vcn, SPARSE_LCN, len, false)) if (!run_add_entry(run, vcn, SPARSE_LCN, len, false))
return false; return false;
return run_collapse_range(run, vcn, len); return run_collapse_range(run, vcn, len, sub);
} }
r += 1; r += 1;
@ -545,6 +545,13 @@ bool run_collapse_range(struct runs_tree *run, CLST vcn, CLST len)
memmove(eat_start, eat_end, (e - eat_end) * sizeof(*r)); memmove(eat_start, eat_end, (e - eat_end) * sizeof(*r));
run->count -= eat; run->count -= eat;
if (sub) {
e -= eat;
for (r = run->runs; r < e; r++) {
r->vcn -= sub;
}
}
return true; return true;
} }