Changes for 6.19-rc1

Added:
     support timestamps prior to epoch;
     do not overwrite uptodate pages;
 	disable readahead for compressed files;
 	setting of dummy blocksize to read boot_block when mounting;
 	the run_lock initialization when loading $Extend;
 	initialization of allocated memory before use;
 	support for the NTFS3_IOC_SHUTDOWN ioctl;
 	check for minimum alignment when performing direct I/O reads;
 	check for shutdown in fsync.
 
 Fixed:
     mount failure for sparse runs in run_unpack();
 	use-after-free of sbi->options in cmp_fnames;
 	KMSAN uninit bug after failed mi_read in mi_format_new;
 	uninit error after buffer allocation by __getname();
 	KMSAN uninit-value in ni_create_attr_list;
 	double free of sbi->options->nls and ownership of fc->fs_private;
 	incorrect vcn adjustments in attr_collapse_range();
 	mode update when ACL can be reduced to mode;
 	memory leaks in add sub record.
 
 Changed:
     refactor code, updated terminology, spelling;
 	do not kmap pages in (de)compression code;
 	after ntfs_look_free_mft(), code that fails must put mi;
 	default mount options for "acl" and "prealloc".
 
 Replaced:
 	use unsafe_memcpy() to avoid memcpy size warning;
 	ntfs_bio_pages with page cache for compressed files.
 -----BEGIN PGP SIGNATURE-----
 
 iQIzBAABCgAdFiEEh0DEKNP0I9IjwfWEqbAzH4MkB7YFAmkwF6UACgkQqbAzH4Mk
 B7aG3w/9H3iUua41pQWH1g94PW7qhRxyN+2hVvnlJvNRB5bV/rhVHnEEkyxNHjau
 I+pknP2KceUrHYQO032b3kuuwab7sMCfVhVQCIzQ1dsp0V3HFwx5IWz8hGiAMrAN
 7pIQuUqqG5NxArVM028HWUXOq1myjRWxuwLTEaSYDOT0/Fl+a/nXKhlg+S5Js8K1
 FlkorvWdmXKUp3Dp96yzlcmcbqNFCeyfAlUu57KDx1wLjSvzEnCol8VHpSl9myc3
 FET6RcsL00jy3Tr3/6xBk6ef+9Eoas6hiafLaHay6jMkzW2hPzVwNgUsGOYQ0IwQ
 6jIWiTsAGfi6/GZRVQrEMFr4pvlIlG//uTezfu9pE7ld6wMwYfT4ZvDuHMYvshiw
 keRIEeF4oCakut9Cy8OVh0FtUCv/RYaT332rKJhtFZyCnewqwHd29/ihjB9yNnDx
 qruFEpjBHOffsbcd0PLDsATutGi8sjd4eNyv1KuRf2xKL3fZ22d79LOzBHRGhAnR
 1MyBNo6Q9eF3W8hbf1aMJ0X3O76EVwXxuAWc32EwHn/35XMjb9ajjmn0G1Xeix7+
 0N/cdXZO4fiZhj2fOVUuLduHpoNCcaQya0GD6YJ+jgVnFSs0ms/pGDryQa2kooqu
 ewav36eEmtU6WOXAxMKVYzok1A+/fMtpDK7cOvW61Vz91X6woCU=
 =RGJu
 -----END PGP SIGNATURE-----

Merge tag 'ntfs3_for_6.19' of https://github.com/Paragon-Software-Group/linux-ntfs3

Pull ntfs3 updates from Konstantin Komarov:
 "New code:
   - support timestamps prior to epoch
   - do not overwrite uptodate pages
   - disable readahead for compressed files
   - setting of dummy blocksize to read boot_block when mounting
   - the run_lock initialization when loading $Extend
   - initialization of allocated memory before use
   - support for the NTFS3_IOC_SHUTDOWN ioctl
   - check for minimum alignment when performing direct I/O reads
   - check for shutdown in fsync

  Fixes:
   - mount failure for sparse runs in run_unpack()
   - use-after-free of sbi->options in cmp_fnames
   - KMSAN uninit bug after failed mi_read in mi_format_new
   - uninit error after buffer allocation by __getname()
   - KMSAN uninit-value in ni_create_attr_list
   - double free of sbi->options->nls and ownership of fc->fs_private
   - incorrect vcn adjustments in attr_collapse_range()
   - mode update when ACL can be reduced to mode
   - memory leaks in add sub record

  Changes:
   - refactor code, updated terminology, spelling
   - do not kmap pages in (de)compression code
   - after ntfs_look_free_mft(), code that fails must put mft_inode
   - default mount options for "acl" and "prealloc"

  Replaced:
   - use unsafe_memcpy() to avoid memcpy size warning
   - ntfs_bio_pages with page cache for compressed files"

* tag 'ntfs3_for_6.19' of https://github.com/Paragon-Software-Group/linux-ntfs3: (26 commits)
  fs/ntfs3: check for shutdown in fsync
  fs/ntfs3: change the default mount options for "acl" and "prealloc"
  fs/ntfs3: Prevent memory leaks in add sub record
  fs/ntfs3: out1 also needs to put mi
  fs/ntfs3: Fix spelling mistake "recommened" -> "recommended"
  fs/ntfs3: update mode in xattr when ACL can be reduced to mode
  fs/ntfs3: check minimum alignment for direct I/O
  fs/ntfs3: implement NTFS3_IOC_SHUTDOWN ioctl
  fs/ntfs3: correct attr_collapse_range when file is too fragmented
  ntfs3: fix double free of sbi->options->nls and clarify ownership of fc->fs_private
  fs/ntfs3: Initialize allocated memory before use
  fs/ntfs3: remove ntfs_bio_pages and use page cache for compressed I/O
  ntfs3: avoid memcpy size warning
  fs/ntfs3: fix KMSAN uninit-value in ni_create_attr_list
  ntfs3: init run lock for extend inode
  ntfs: set dummy blocksize to read boot_block when mounting
  fs/ntfs3: disable readahead for compressed files
  ntfs3: Fix uninit buffer allocated by __getname()
  ntfs3: fix uninit memory after failed mi_read in mi_format_new
  ntfs3: fix use-after-free of sbi->options in cmp_fnames
  ...
This commit is contained in:
Linus Torvalds 2025-12-03 20:45:43 -08:00
commit 559e608c46
13 changed files with 423 additions and 333 deletions

View File

@ -1457,7 +1457,6 @@ int attr_wof_frame_info(struct ntfs_inode *ni, struct ATTRIB *attr,
pgoff_t index = vbo[i] >> PAGE_SHIFT;
if (index != folio->index) {
struct page *page = &folio->page;
u64 from = vbo[i] & ~(u64)(PAGE_SIZE - 1);
u64 to = min(from + PAGE_SIZE, wof_size);
@ -1467,8 +1466,7 @@ int attr_wof_frame_info(struct ntfs_inode *ni, struct ATTRIB *attr,
if (err)
goto out1;
err = ntfs_bio_pages(sbi, run, &page, 1, from,
to - from, REQ_OP_READ);
err = ntfs_read_run(sbi, run, addr, from, to - from);
if (err) {
folio->index = -1;
goto out1;
@ -1862,7 +1860,7 @@ int attr_collapse_range(struct ntfs_inode *ni, u64 vbo, u64 bytes)
struct ATTRIB *attr = NULL, *attr_b;
struct ATTR_LIST_ENTRY *le, *le_b;
struct mft_inode *mi, *mi_b;
CLST svcn, evcn1, len, dealloc, alen;
CLST svcn, evcn1, len, dealloc, alen, done;
CLST vcn, end;
u64 valid_size, data_size, alloc_size, total_size;
u32 mask;
@ -1925,6 +1923,7 @@ int attr_collapse_range(struct ntfs_inode *ni, u64 vbo, u64 bytes)
len = bytes >> sbi->cluster_bits;
end = vcn + len;
dealloc = 0;
done = 0;
svcn = le64_to_cpu(attr_b->nres.svcn);
evcn1 = le64_to_cpu(attr_b->nres.evcn) + 1;
@ -1933,23 +1932,28 @@ int attr_collapse_range(struct ntfs_inode *ni, u64 vbo, u64 bytes)
attr = attr_b;
le = le_b;
mi = mi_b;
} else if (!le_b) {
goto check_seg;
}
if (!le_b) {
err = -EINVAL;
goto out;
} else {
le = le_b;
attr = ni_find_attr(ni, attr_b, &le, ATTR_DATA, NULL, 0, &vcn,
&mi);
if (!attr) {
err = -EINVAL;
goto out;
}
}
svcn = le64_to_cpu(attr->nres.svcn);
evcn1 = le64_to_cpu(attr->nres.evcn) + 1;
le = le_b;
attr = ni_find_attr(ni, attr_b, &le, ATTR_DATA, NULL, 0, &vcn, &mi);
if (!attr) {
err = -EINVAL;
goto out;
}
for (;;) {
CLST vcn1, eat, next_svcn;
svcn = le64_to_cpu(attr->nres.svcn);
evcn1 = le64_to_cpu(attr->nres.evcn) + 1;
check_seg:
if (svcn >= end) {
/* Shift VCN- */
attr->nres.svcn = cpu_to_le64(svcn - len);
@ -1959,22 +1963,25 @@ int attr_collapse_range(struct ntfs_inode *ni, u64 vbo, u64 bytes)
ni->attr_list.dirty = true;
}
mi->dirty = true;
} else if (svcn < vcn || end < evcn1) {
CLST vcn1, eat, next_svcn;
goto next_attr;
}
run_truncate(run, 0);
err = attr_load_runs(attr, ni, run, &svcn);
if (err)
goto out;
vcn1 = vcn + done; /* original vcn in attr/run. */
eat = min(end, evcn1) - vcn1;
err = run_deallocate_ex(sbi, run, vcn1, eat, &dealloc, true);
if (err)
goto out;
if (svcn + eat < evcn1) {
/* Collapse a part of this attribute segment. */
err = attr_load_runs(attr, ni, run, &svcn);
if (err)
goto out;
vcn1 = max(vcn, svcn);
eat = min(end, evcn1) - vcn1;
err = run_deallocate_ex(sbi, run, vcn1, eat, &dealloc,
true);
if (err)
goto out;
if (!run_collapse_range(run, vcn1, eat)) {
if (!run_collapse_range(run, vcn1, eat, done)) {
err = -ENOMEM;
goto out;
}
@ -1982,7 +1989,7 @@ int attr_collapse_range(struct ntfs_inode *ni, u64 vbo, u64 bytes)
if (svcn >= vcn) {
/* Shift VCN */
attr->nres.svcn = cpu_to_le64(vcn);
if (le) {
if (le && attr->nres.svcn != le->vcn) {
le->vcn = attr->nres.svcn;
ni->attr_list.dirty = true;
}
@ -1993,7 +2000,7 @@ int attr_collapse_range(struct ntfs_inode *ni, u64 vbo, u64 bytes)
goto out;
next_svcn = le64_to_cpu(attr->nres.evcn) + 1;
if (next_svcn + eat < evcn1) {
if (next_svcn + eat + done < evcn1) {
err = ni_insert_nonresident(
ni, ATTR_DATA, NULL, 0, run, next_svcn,
evcn1 - eat - next_svcn, a_flags, &attr,
@ -2007,18 +2014,9 @@ int attr_collapse_range(struct ntfs_inode *ni, u64 vbo, u64 bytes)
/* Free all allocated memory. */
run_truncate(run, 0);
done += eat;
} else {
u16 le_sz;
u16 roff = le16_to_cpu(attr->nres.run_off);
if (roff > le32_to_cpu(attr->size)) {
err = -EINVAL;
goto out;
}
run_unpack_ex(RUN_DEALLOCATE, sbi, ni->mi.rno, svcn,
evcn1 - 1, svcn, Add2Ptr(attr, roff),
le32_to_cpu(attr->size) - roff);
/* Delete this attribute segment. */
mi_remove_attr(NULL, mi, attr);
@ -2031,6 +2029,7 @@ int attr_collapse_range(struct ntfs_inode *ni, u64 vbo, u64 bytes)
goto out;
}
done += evcn1 - svcn;
if (evcn1 >= alen)
break;
@ -2048,11 +2047,12 @@ int attr_collapse_range(struct ntfs_inode *ni, u64 vbo, u64 bytes)
err = -EINVAL;
goto out;
}
goto next_attr;
continue;
}
le = (struct ATTR_LIST_ENTRY *)((u8 *)le - le_sz);
}
next_attr:
if (evcn1 >= alen)
break;
@ -2061,10 +2061,6 @@ int attr_collapse_range(struct ntfs_inode *ni, u64 vbo, u64 bytes)
err = -EINVAL;
goto out;
}
next_attr:
svcn = le64_to_cpu(attr->nres.svcn);
evcn1 = le64_to_cpu(attr->nres.evcn) + 1;
}
if (!attr_b) {
@ -2554,7 +2550,7 @@ int attr_insert_range(struct ntfs_inode *ni, u64 vbo, u64 bytes)
if (attr_load_runs(attr, ni, run, NULL))
goto bad_inode;
if (!run_collapse_range(run, vcn, len))
if (!run_collapse_range(run, vcn, len, 0))
goto bad_inode;
if (mi_pack_runs(mi, attr, run, evcn1 + len - svcn))

View File

@ -332,8 +332,7 @@ static inline bool ntfs_dir_emit(struct ntfs_sb_info *sbi,
* It does additional locks/reads just to get the type of name.
* Should we use additional mount option to enable branch below?
*/
if (fname->dup.extend_data &&
ino != ni->mi.rno) {
if (fname->dup.extend_data && ino != ni->mi.rno) {
struct inode *inode = ntfs_iget5(sbi->sb, &e->ref, NULL);
if (!IS_ERR_OR_NULL(inode)) {
dt_type = fs_umode_to_dtype(inode->i_mode);

View File

@ -19,6 +19,12 @@
#include "ntfs.h"
#include "ntfs_fs.h"
/*
* cifx, btrfs, exfat, ext4, f2fs use this constant.
* Hope this value will become common to all fs.
*/
#define NTFS3_IOC_SHUTDOWN _IOR('X', 125, __u32)
static int ntfs_ioctl_fitrim(struct ntfs_sb_info *sbi, unsigned long arg)
{
struct fstrim_range __user *user_range;
@ -59,7 +65,7 @@ static int ntfs_ioctl_get_volume_label(struct ntfs_sb_info *sbi, u8 __user *buf)
static int ntfs_ioctl_set_volume_label(struct ntfs_sb_info *sbi, u8 __user *buf)
{
u8 user[FSLABEL_MAX] = {0};
u8 user[FSLABEL_MAX] = { 0 };
int len;
if (!capable(CAP_SYS_ADMIN))
@ -73,13 +79,47 @@ static int ntfs_ioctl_set_volume_label(struct ntfs_sb_info *sbi, u8 __user *buf)
return ntfs_set_label(sbi, user, len);
}
/*
* ntfs_force_shutdown - helper function. Called from ioctl
*/
static int ntfs_force_shutdown(struct super_block *sb, u32 flags)
{
int err;
struct ntfs_sb_info *sbi = sb->s_fs_info;
if (unlikely(ntfs3_forced_shutdown(sb)))
return 0;
/* No additional options yet (flags). */
err = bdev_freeze(sb->s_bdev);
if (err)
return err;
set_bit(NTFS_FLAGS_SHUTDOWN_BIT, &sbi->flags);
bdev_thaw(sb->s_bdev);
return 0;
}
static int ntfs_ioctl_shutdown(struct super_block *sb, unsigned long arg)
{
u32 flags;
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
if (get_user(flags, (__u32 __user *)arg))
return -EFAULT;
return ntfs_force_shutdown(sb, flags);
}
/*
* ntfs_ioctl - file_operations::unlocked_ioctl
*/
long ntfs_ioctl(struct file *filp, u32 cmd, unsigned long arg)
{
struct inode *inode = file_inode(filp);
struct ntfs_sb_info *sbi = inode->i_sb->s_fs_info;
struct super_block *sb = inode->i_sb;
struct ntfs_sb_info *sbi = sb->s_fs_info;
/* Avoid any operation if inode is bad. */
if (unlikely(is_bad_ni(ntfs_i(inode))))
@ -92,6 +132,8 @@ long ntfs_ioctl(struct file *filp, u32 cmd, unsigned long arg)
return ntfs_ioctl_get_volume_label(sbi, (u8 __user *)arg);
case FS_IOC_SETFSLABEL:
return ntfs_ioctl_set_volume_label(sbi, (u8 __user *)arg);
case NTFS3_IOC_SHUTDOWN:
return ntfs_ioctl_shutdown(sb, arg);
}
return -ENOTTY; /* Inappropriate ioctl for device. */
}
@ -325,9 +367,14 @@ static int ntfs_file_mmap_prepare(struct vm_area_desc *desc)
return -EOPNOTSUPP;
}
if (is_compressed(ni) && rw) {
ntfs_inode_warn(inode, "mmap(write) compressed not supported");
return -EOPNOTSUPP;
if (is_compressed(ni)) {
if (rw) {
ntfs_inode_warn(inode,
"mmap(write) compressed not supported");
return -EOPNOTSUPP;
}
/* Turn off readahead for compressed files. */
file->f_ra.ra_pages = 0;
}
if (rw) {
@ -503,8 +550,6 @@ static int ntfs_truncate(struct inode *inode, loff_t new_size)
if (dirty)
mark_inode_dirty(inode);
/*ntfs_flush_inodes(inode->i_sb, inode, NULL);*/
return 0;
}
@ -886,9 +931,24 @@ static ssize_t ntfs_file_read_iter(struct kiocb *iocb, struct iov_iter *iter)
if (err)
return err;
if (is_compressed(ni) && (iocb->ki_flags & IOCB_DIRECT)) {
ntfs_inode_warn(inode, "direct i/o + compressed not supported");
return -EOPNOTSUPP;
if (is_compressed(ni)) {
if (iocb->ki_flags & IOCB_DIRECT) {
ntfs_inode_warn(
inode, "direct i/o + compressed not supported");
return -EOPNOTSUPP;
}
/* Turn off readahead for compressed files. */
file->f_ra.ra_pages = 0;
}
/* Check minimum alignment for dio. */
if (iocb->ki_flags & IOCB_DIRECT) {
struct super_block *sb = inode->i_sb;
struct ntfs_sb_info *sbi = sb->s_fs_info;
if ((iocb->ki_pos | iov_iter_alignment(iter)) &
sbi->bdev_blocksize_mask) {
iocb->ki_flags &= ~IOCB_DIRECT;
}
}
return generic_file_read_iter(iocb, iter);
@ -908,6 +968,11 @@ static ssize_t ntfs_file_splice_read(struct file *in, loff_t *ppos,
if (err)
return err;
if (is_compressed(ntfs_i(inode))) {
/* Turn off readahead for compressed files. */
in->f_ra.ra_pages = 0;
}
return filemap_splice_read(in, ppos, pipe, len, flags);
}
@ -1026,7 +1091,7 @@ static ssize_t ntfs_compress_write(struct kiocb *iocb, struct iov_iter *from)
if (!frame_uptodate && off) {
err = ni_read_frame(ni, frame_vbo, pages,
pages_per_frame);
pages_per_frame, 0);
if (err) {
for (ip = 0; ip < pages_per_frame; ip++) {
folio = page_folio(pages[ip]);
@ -1091,7 +1156,7 @@ static ssize_t ntfs_compress_write(struct kiocb *iocb, struct iov_iter *from)
if (off || (to < i_size && (to & (frame_size - 1)))) {
err = ni_read_frame(ni, frame_vbo, pages,
pages_per_frame);
pages_per_frame, 0);
if (err) {
for (ip = 0; ip < pages_per_frame;
ip++) {
@ -1114,8 +1179,8 @@ static ssize_t ntfs_compress_write(struct kiocb *iocb, struct iov_iter *from)
size_t cp, tail = PAGE_SIZE - off;
folio = page_folio(pages[ip]);
cp = copy_folio_from_iter_atomic(folio, off,
min(tail, bytes), from);
cp = copy_folio_from_iter_atomic(
folio, off, min(tail, bytes), from);
flush_dcache_folio(folio);
copied += cp;
@ -1312,7 +1377,7 @@ static int ntfs_file_release(struct inode *inode, struct file *file)
if (sbi->options->prealloc &&
((file->f_mode & FMODE_WRITE) &&
atomic_read(&inode->i_writecount) == 1)
/*
/*
* The only file when inode->i_fop = &ntfs_file_operations and
* init_rwsem(&ni->file.run_lock) is not called explicitly is MFT.
*
@ -1375,6 +1440,18 @@ static ssize_t ntfs_file_splice_write(struct pipe_inode_info *pipe,
return iter_file_splice_write(pipe, file, ppos, len, flags);
}
/*
* ntfs_file_fsync - file_operations::fsync
*/
static int ntfs_file_fsync(struct file *file, loff_t start, loff_t end, int datasync)
{
struct inode *inode = file_inode(file);
if (unlikely(ntfs3_forced_shutdown(inode->i_sb)))
return -EIO;
return generic_file_fsync(file, start, end, datasync);
}
// clang-format off
const struct inode_operations ntfs_file_inode_operations = {
.getattr = ntfs_getattr,
@ -1397,7 +1474,7 @@ const struct file_operations ntfs_file_operations = {
.splice_write = ntfs_file_splice_write,
.mmap_prepare = ntfs_file_mmap_prepare,
.open = ntfs_file_open,
.fsync = generic_file_fsync,
.fsync = ntfs_file_fsync,
.fallocate = ntfs_fallocate,
.release = ntfs_file_release,
};

View File

@ -325,8 +325,10 @@ bool ni_add_subrecord(struct ntfs_inode *ni, CLST rno, struct mft_inode **mi)
mi_get_ref(&ni->mi, &m->mrec->parent_ref);
ni_add_mi(ni, m);
*mi = m;
*mi = ni_ins_mi(ni, &ni->mi_tree, m->rno, &m->node);
if (*mi != m)
mi_put(m);
return true;
}
@ -767,7 +769,7 @@ int ni_create_attr_list(struct ntfs_inode *ni)
* Skip estimating exact memory requirement.
* Looks like one record_size is always enough.
*/
le = kmalloc(al_aligned(rs), GFP_NOFS);
le = kzalloc(al_aligned(rs), GFP_NOFS);
if (!le)
return -ENOMEM;
@ -1015,9 +1017,9 @@ static int ni_ins_attr_ext(struct ntfs_inode *ni, struct ATTR_LIST_ENTRY *le,
out2:
ni_remove_mi(ni, mi);
mi_put(mi);
out1:
mi_put(mi);
ntfs_mark_rec_free(sbi, rno, is_mft);
out:
@ -2020,6 +2022,29 @@ int ni_fiemap(struct ntfs_inode *ni, struct fiemap_extent_info *fieinfo,
return err;
}
static struct page *ntfs_lock_new_page(struct address_space *mapping,
pgoff_t index, gfp_t gfp)
{
struct folio *folio = __filemap_get_folio(mapping, index,
FGP_LOCK | FGP_ACCESSED | FGP_CREAT, gfp);
struct page *page;
if (IS_ERR(folio))
return ERR_CAST(folio);
if (!folio_test_uptodate(folio))
return folio_file_page(folio, index);
/* Use a temporary page to avoid data corruption */
folio_unlock(folio);
folio_put(folio);
page = alloc_page(gfp);
if (!page)
return ERR_PTR(-ENOMEM);
__SetPageLocked(page);
return page;
}
/*
* ni_readpage_cmpr
*
@ -2074,15 +2099,15 @@ int ni_readpage_cmpr(struct ntfs_inode *ni, struct folio *folio)
if (i == idx)
continue;
pg = find_or_create_page(mapping, index, gfp_mask);
if (!pg) {
err = -ENOMEM;
pg = ntfs_lock_new_page(mapping, index, gfp_mask);
if (IS_ERR(pg)) {
err = PTR_ERR(pg);
goto out1;
}
pages[i] = pg;
}
err = ni_read_frame(ni, frame_vbo, pages, pages_per_frame);
err = ni_read_frame(ni, frame_vbo, pages, pages_per_frame, 0);
out1:
for (i = 0; i < pages_per_frame; i++) {
@ -2152,17 +2177,9 @@ int ni_decompress_file(struct ntfs_inode *ni)
*/
index = 0;
for (vbo = 0; vbo < i_size; vbo += bytes) {
u32 nr_pages;
bool new;
if (vbo + frame_size > i_size) {
bytes = i_size - vbo;
nr_pages = (bytes + PAGE_SIZE - 1) >> PAGE_SHIFT;
} else {
nr_pages = pages_per_frame;
bytes = frame_size;
}
bytes = vbo + frame_size > i_size ? (i_size - vbo) : frame_size;
end = bytes_to_cluster(sbi, vbo + bytes);
for (vcn = vbo >> sbi->cluster_bits; vcn < end; vcn += clen) {
@ -2175,27 +2192,19 @@ int ni_decompress_file(struct ntfs_inode *ni)
for (i = 0; i < pages_per_frame; i++, index++) {
struct page *pg;
pg = find_or_create_page(mapping, index, gfp_mask);
if (!pg) {
pg = ntfs_lock_new_page(mapping, index, gfp_mask);
if (IS_ERR(pg)) {
while (i--) {
unlock_page(pages[i]);
put_page(pages[i]);
}
err = -ENOMEM;
err = PTR_ERR(pg);
goto out;
}
pages[i] = pg;
}
err = ni_read_frame(ni, vbo, pages, pages_per_frame);
if (!err) {
down_read(&ni->file.run_lock);
err = ntfs_bio_pages(sbi, &ni->file.run, pages,
nr_pages, vbo, bytes,
REQ_OP_WRITE);
up_read(&ni->file.run_lock);
}
err = ni_read_frame(ni, vbo, pages, pages_per_frame, 1);
for (i = 0; i < pages_per_frame; i++) {
unlock_page(pages[i]);
@ -2385,20 +2394,19 @@ static int decompress_lzx_xpress(struct ntfs_sb_info *sbi, const char *cmpr,
* Pages - Array of locked pages.
*/
int ni_read_frame(struct ntfs_inode *ni, u64 frame_vbo, struct page **pages,
u32 pages_per_frame)
u32 pages_per_frame, int copy)
{
int err;
struct ntfs_sb_info *sbi = ni->mi.sbi;
u8 cluster_bits = sbi->cluster_bits;
char *frame_ondisk = NULL;
char *frame_mem = NULL;
struct page **pages_disk = NULL;
struct ATTR_LIST_ENTRY *le = NULL;
struct runs_tree *run = &ni->file.run;
u64 valid_size = ni->i_valid;
u64 vbo_disk;
size_t unc_size;
u32 frame_size, i, npages_disk, ondisk_size;
u32 frame_size, i, ondisk_size;
struct page *pg;
struct ATTRIB *attr;
CLST frame, clst_data;
@ -2407,9 +2415,6 @@ int ni_read_frame(struct ntfs_inode *ni, u64 frame_vbo, struct page **pages,
* To simplify decompress algorithm do vmap for source
* and target pages.
*/
for (i = 0; i < pages_per_frame; i++)
kmap(pages[i]);
frame_size = pages_per_frame << PAGE_SHIFT;
frame_mem = vmap(pages, pages_per_frame, VM_MAP, PAGE_KERNEL);
if (!frame_mem) {
@ -2493,7 +2498,7 @@ int ni_read_frame(struct ntfs_inode *ni, u64 frame_vbo, struct page **pages,
err = attr_wof_frame_info(ni, attr, run, frame64, frames,
frame_bits, &ondisk_size, &vbo_data);
if (err)
goto out2;
goto out1;
if (frame64 == frames) {
unc_size = 1 + ((i_size - 1) & (frame_size - 1));
@ -2504,7 +2509,7 @@ int ni_read_frame(struct ntfs_inode *ni, u64 frame_vbo, struct page **pages,
if (ondisk_size > frame_size) {
err = -EINVAL;
goto out2;
goto out1;
}
if (!attr->non_res) {
@ -2525,10 +2530,7 @@ int ni_read_frame(struct ntfs_inode *ni, u64 frame_vbo, struct page **pages,
ARRAY_SIZE(WOF_NAME), run, vbo_disk,
vbo_data + ondisk_size);
if (err)
goto out2;
npages_disk = (ondisk_size + (vbo_disk & (PAGE_SIZE - 1)) +
PAGE_SIZE - 1) >>
PAGE_SHIFT;
goto out1;
#endif
} else if (is_attr_compressed(attr)) {
/* LZNT compression. */
@ -2562,61 +2564,37 @@ int ni_read_frame(struct ntfs_inode *ni, u64 frame_vbo, struct page **pages,
if (clst_data >= NTFS_LZNT_CLUSTERS) {
/* Frame is not compressed. */
down_read(&ni->file.run_lock);
err = ntfs_bio_pages(sbi, run, pages, pages_per_frame,
frame_vbo, ondisk_size,
REQ_OP_READ);
err = ntfs_read_run(sbi, run, frame_mem, frame_vbo,
ondisk_size);
up_read(&ni->file.run_lock);
goto out1;
}
vbo_disk = frame_vbo;
npages_disk = (ondisk_size + PAGE_SIZE - 1) >> PAGE_SHIFT;
} else {
__builtin_unreachable();
err = -EINVAL;
goto out1;
}
pages_disk = kcalloc(npages_disk, sizeof(*pages_disk), GFP_NOFS);
if (!pages_disk) {
/* Allocate memory to read compressed data to. */
frame_ondisk = kvmalloc(ondisk_size, GFP_KERNEL);
if (!frame_ondisk) {
err = -ENOMEM;
goto out2;
}
for (i = 0; i < npages_disk; i++) {
pg = alloc_page(GFP_KERNEL);
if (!pg) {
err = -ENOMEM;
goto out3;
}
pages_disk[i] = pg;
lock_page(pg);
kmap(pg);
goto out1;
}
/* Read 'ondisk_size' bytes from disk. */
down_read(&ni->file.run_lock);
err = ntfs_bio_pages(sbi, run, pages_disk, npages_disk, vbo_disk,
ondisk_size, REQ_OP_READ);
err = ntfs_read_run(sbi, run, frame_ondisk, vbo_disk, ondisk_size);
up_read(&ni->file.run_lock);
if (err)
goto out3;
goto out2;
/*
* To simplify decompress algorithm do vmap for source and target pages.
*/
frame_ondisk = vmap(pages_disk, npages_disk, VM_MAP, PAGE_KERNEL_RO);
if (!frame_ondisk) {
err = -ENOMEM;
goto out3;
}
/* Decompress: Frame_ondisk -> frame_mem. */
#ifdef CONFIG_NTFS3_LZX_XPRESS
if (run != &ni->file.run) {
/* LZX or XPRESS */
err = decompress_lzx_xpress(
sbi, frame_ondisk + (vbo_disk & (PAGE_SIZE - 1)),
ondisk_size, frame_mem, unc_size, frame_size);
err = decompress_lzx_xpress(sbi, frame_ondisk, ondisk_size,
frame_mem, unc_size, frame_size);
} else
#endif
{
@ -2634,30 +2612,25 @@ int ni_read_frame(struct ntfs_inode *ni, u64 frame_vbo, struct page **pages,
memset(frame_mem + ok, 0, frame_size - ok);
}
vunmap(frame_ondisk);
out3:
for (i = 0; i < npages_disk; i++) {
pg = pages_disk[i];
if (pg) {
kunmap(pg);
unlock_page(pg);
put_page(pg);
}
}
kfree(pages_disk);
out2:
kvfree(frame_ondisk);
out1:
#ifdef CONFIG_NTFS3_LZX_XPRESS
if (run != &ni->file.run)
run_free(run);
if (!err && copy) {
/* We are called from 'ni_decompress_file' */
/* Copy decompressed LZX or XPRESS data into new place. */
down_read(&ni->file.run_lock);
err = ntfs_write_run(sbi, &ni->file.run, frame_mem, frame_vbo,
frame_size);
up_read(&ni->file.run_lock);
}
#endif
out1:
vunmap(frame_mem);
out:
for (i = 0; i < pages_per_frame; i++) {
pg = pages[i];
kunmap(pg);
SetPageUptodate(pg);
}
@ -2680,13 +2653,10 @@ int ni_write_frame(struct ntfs_inode *ni, struct page **pages,
u64 frame_vbo = folio_pos(folio);
CLST frame = frame_vbo >> frame_bits;
char *frame_ondisk = NULL;
struct page **pages_disk = NULL;
struct ATTR_LIST_ENTRY *le = NULL;
char *frame_mem;
struct ATTRIB *attr;
struct mft_inode *mi;
u32 i;
struct page *pg;
size_t compr_size, ondisk_size;
struct lznt *lznt;
@ -2721,38 +2691,18 @@ int ni_write_frame(struct ntfs_inode *ni, struct page **pages,
goto out;
}
pages_disk = kcalloc(pages_per_frame, sizeof(struct page *), GFP_NOFS);
if (!pages_disk) {
/* Allocate memory to write compressed data to. */
frame_ondisk = kvmalloc(frame_size, GFP_KERNEL);
if (!frame_ondisk) {
err = -ENOMEM;
goto out;
}
for (i = 0; i < pages_per_frame; i++) {
pg = alloc_page(GFP_KERNEL);
if (!pg) {
err = -ENOMEM;
goto out1;
}
pages_disk[i] = pg;
lock_page(pg);
kmap(pg);
}
/* To simplify compress algorithm do vmap for source and target pages. */
frame_ondisk = vmap(pages_disk, pages_per_frame, VM_MAP, PAGE_KERNEL);
if (!frame_ondisk) {
err = -ENOMEM;
goto out1;
}
for (i = 0; i < pages_per_frame; i++)
kmap(pages[i]);
/* Map in-memory frame for read-only. */
frame_mem = vmap(pages, pages_per_frame, VM_MAP, PAGE_KERNEL_RO);
if (!frame_mem) {
err = -ENOMEM;
goto out2;
goto out1;
}
mutex_lock(&sbi->compress.mtx_lznt);
@ -2768,7 +2718,7 @@ int ni_write_frame(struct ntfs_inode *ni, struct page **pages,
if (!lznt) {
mutex_unlock(&sbi->compress.mtx_lznt);
err = -ENOMEM;
goto out3;
goto out2;
}
sbi->compress.lznt = lznt;
@ -2805,30 +2755,16 @@ int ni_write_frame(struct ntfs_inode *ni, struct page **pages,
goto out2;
down_read(&ni->file.run_lock);
err = ntfs_bio_pages(sbi, &ni->file.run,
ondisk_size < frame_size ? pages_disk : pages,
pages_per_frame, frame_vbo, ondisk_size,
REQ_OP_WRITE);
err = ntfs_write_run(sbi, &ni->file.run,
ondisk_size < frame_size ? frame_ondisk :
frame_mem,
frame_vbo, ondisk_size);
up_read(&ni->file.run_lock);
out3:
vunmap(frame_mem);
out2:
for (i = 0; i < pages_per_frame; i++)
kunmap(pages[i]);
vunmap(frame_ondisk);
vunmap(frame_mem);
out1:
for (i = 0; i < pages_per_frame; i++) {
pg = pages_disk[i];
if (pg) {
kunmap(pg);
unlock_page(pg);
put_page(pg);
}
}
kfree(pages_disk);
kvfree(frame_ondisk);
out:
return err;
}
@ -3026,8 +2962,8 @@ int ni_rename(struct ntfs_inode *dir_ni, struct ntfs_inode *new_dir_ni,
err = ni_add_name(new_dir_ni, ni, new_de);
if (!err) {
err = ni_remove_name(dir_ni, ni, de, &de2, &undo);
WARN_ON(err && ni_remove_name(new_dir_ni, ni, new_de, &de2,
&undo));
WARN_ON(err &&
ni_remove_name(new_dir_ni, ni, new_de, &de2, &undo));
}
/*
@ -3127,7 +3063,8 @@ static bool ni_update_parent(struct ntfs_inode *ni, struct NTFS_DUP_INFO *dup,
if (attr) {
const struct REPARSE_POINT *rp;
rp = resident_data_ex(attr, sizeof(struct REPARSE_POINT));
rp = resident_data_ex(attr,
sizeof(struct REPARSE_POINT));
/* If ATTR_REPARSE exists 'rp' can't be NULL. */
if (rp)
dup->extend_data = rp->ReparseTag;

View File

@ -1349,7 +1349,14 @@ int ntfs_get_bh(struct ntfs_sb_info *sbi, const struct runs_tree *run, u64 vbo,
}
if (buffer_locked(bh))
__wait_on_buffer(bh);
set_buffer_uptodate(bh);
lock_buffer(bh);
if (!buffer_uptodate(bh))
{
memset(bh->b_data, 0, blocksize);
set_buffer_uptodate(bh);
}
unlock_buffer(bh);
} else {
bh = ntfs_bread(sb, block);
if (!bh) {
@ -1472,99 +1479,86 @@ int ntfs_write_bh(struct ntfs_sb_info *sbi, struct NTFS_RECORD_HEADER *rhdr,
}
/*
* ntfs_bio_pages - Read/write pages from/to disk.
* ntfs_read_write_run - Read/Write disk's page cache.
*/
int ntfs_bio_pages(struct ntfs_sb_info *sbi, const struct runs_tree *run,
struct page **pages, u32 nr_pages, u64 vbo, u32 bytes,
enum req_op op)
int ntfs_read_write_run(struct ntfs_sb_info *sbi, const struct runs_tree *run,
void *buf, u64 vbo, size_t bytes, int wr)
{
int err = 0;
struct bio *new, *bio = NULL;
struct super_block *sb = sbi->sb;
struct block_device *bdev = sb->s_bdev;
struct page *page;
struct address_space *mapping = sb->s_bdev->bd_mapping;
u8 cluster_bits = sbi->cluster_bits;
CLST lcn, clen, vcn, vcn_next;
u32 add, off, page_idx;
CLST vcn_next, vcn = vbo >> cluster_bits;
CLST lcn, clen;
u64 lbo, len;
size_t run_idx;
struct blk_plug plug;
size_t idx;
u32 off, op;
struct folio *folio;
char *kaddr;
if (!bytes)
return 0;
blk_start_plug(&plug);
if (!run_lookup_entry(run, vcn, &lcn, &clen, &idx))
return -ENOENT;
/* Align vbo and bytes to be 512 bytes aligned. */
lbo = (vbo + bytes + 511) & ~511ull;
vbo = vbo & ~511ull;
bytes = lbo - vbo;
if (lcn == SPARSE_LCN)
return -EINVAL;
vcn = vbo >> cluster_bits;
if (!run_lookup_entry(run, vcn, &lcn, &clen, &run_idx)) {
err = -ENOENT;
goto out;
}
off = vbo & sbi->cluster_mask;
page_idx = 0;
page = pages[0];
lbo = ((u64)lcn << cluster_bits) + off;
len = ((u64)clen << cluster_bits) - off;
for (;;) {
lbo = ((u64)lcn << cluster_bits) + off;
len = ((u64)clen << cluster_bits) - off;
new_bio:
new = bio_alloc(bdev, nr_pages - page_idx, op, GFP_NOFS);
if (bio) {
bio_chain(bio, new);
submit_bio(bio);
/* Read range [lbo, lbo+len). */
folio = read_mapping_folio(mapping, lbo >> PAGE_SHIFT, NULL);
if (IS_ERR(folio))
return PTR_ERR(folio);
off = offset_in_page(lbo);
op = PAGE_SIZE - off;
if (op > len)
op = len;
if (op > bytes)
op = bytes;
kaddr = kmap_local_folio(folio, 0);
if (wr) {
memcpy(kaddr + off, buf, op);
folio_mark_dirty(folio);
} else {
memcpy(buf, kaddr + off, op);
flush_dcache_folio(folio);
}
bio = new;
bio->bi_iter.bi_sector = lbo >> 9;
kunmap_local(kaddr);
folio_put(folio);
while (len) {
off = vbo & (PAGE_SIZE - 1);
add = off + len > PAGE_SIZE ? (PAGE_SIZE - off) : len;
bytes -= op;
if (!bytes)
return 0;
if (bio_add_page(bio, page, add, off) < add)
goto new_bio;
if (bytes <= add)
goto out;
bytes -= add;
vbo += add;
if (add + off == PAGE_SIZE) {
page_idx += 1;
if (WARN_ON(page_idx >= nr_pages)) {
err = -EINVAL;
goto out;
}
page = pages[page_idx];
}
if (len <= add)
break;
len -= add;
lbo += add;
buf += op;
len -= op;
if (len) {
/* next volume's page. */
lbo += op;
continue;
}
/* get next range. */
vcn_next = vcn + clen;
if (!run_get_entry(run, ++run_idx, &vcn, &lcn, &clen) ||
if (!run_get_entry(run, ++idx, &vcn, &lcn, &clen) ||
vcn != vcn_next) {
err = -ENOENT;
goto out;
return -ENOENT;
}
off = 0;
}
out:
if (bio) {
if (!err)
err = submit_bio_wait(bio);
bio_put(bio);
}
blk_finish_plug(&plug);
return err;
if (lcn == SPARSE_LCN)
return -EINVAL;
lbo = ((u64)lcn << cluster_bits);
len = ((u64)clen << cluster_bits);
}
}
/*

View File

@ -1924,7 +1924,8 @@ indx_insert_into_buffer(struct ntfs_index *indx, struct ntfs_inode *ni,
* Undo critical operations.
*/
indx_mark_free(indx, ni, new_vbn >> indx->idx2vbn_bits);
memcpy(hdr1, hdr1_saved, used1);
unsafe_memcpy(hdr1, hdr1_saved, used1,
"There are entries after the structure");
indx_write(indx, ni, n1, 0);
}

View File

@ -472,6 +472,7 @@ static struct inode *ntfs_read_mft(struct inode *inode,
/* Records in $Extend are not a files or general directories. */
inode->i_op = &ntfs_file_inode_operations;
mode = S_IFREG;
init_rwsem(&ni->file.run_lock);
} else {
err = -EINVAL;
goto out;
@ -975,9 +976,9 @@ int ntfs_write_begin(const struct kiocb *iocb, struct address_space *mapping,
/*
* ntfs_write_end - Address_space_operations::write_end.
*/
int ntfs_write_end(const struct kiocb *iocb,
struct address_space *mapping, loff_t pos,
u32 len, u32 copied, struct folio *folio, void *fsdata)
int ntfs_write_end(const struct kiocb *iocb, struct address_space *mapping,
loff_t pos, u32 len, u32 copied, struct folio *folio,
void *fsdata)
{
struct inode *inode = mapping->host;
struct ntfs_inode *ni = ntfs_i(inode);
@ -1099,7 +1100,7 @@ ntfs_create_reparse_buffer(struct ntfs_sb_info *sbi, const char *symname,
typeof(rp->SymbolicLinkReparseBuffer) *rs;
bool is_absolute;
is_absolute = (strlen(symname) > 1 && symname[1] == ':');
is_absolute = symname[0] && symname[1] == ':';
rp = kzalloc(ntfs_reparse_bytes(2 * size + 2, is_absolute), GFP_NOFS);
if (!rp)
@ -1136,17 +1137,19 @@ ntfs_create_reparse_buffer(struct ntfs_sb_info *sbi, const char *symname,
/* PrintName + SubstituteName. */
rs->SubstituteNameOffset = cpu_to_le16(sizeof(short) * err);
rs->SubstituteNameLength = cpu_to_le16(sizeof(short) * err + (is_absolute ? 8 : 0));
rs->SubstituteNameLength =
cpu_to_le16(sizeof(short) * err + (is_absolute ? 8 : 0));
rs->PrintNameLength = rs->SubstituteNameOffset;
/*
* TODO: Use relative path if possible to allow Windows to
* parse this path.
* 0-absolute path 1- relative path (SYMLINK_FLAG_RELATIVE).
* 0-absolute path, 1- relative path (SYMLINK_FLAG_RELATIVE).
*/
rs->Flags = cpu_to_le32(is_absolute ? 0 : SYMLINK_FLAG_RELATIVE);
memmove(rp_name + err + (is_absolute ? 4 : 0), rp_name, sizeof(short) * err);
memmove(rp_name + err + (is_absolute ? 4 : 0), rp_name,
sizeof(short) * err);
if (is_absolute) {
/* Decorate SubstituteName. */
@ -1278,7 +1281,7 @@ int ntfs_create_inode(struct mnt_idmap *idmap, struct inode *dir,
fa |= FILE_ATTRIBUTE_READONLY;
/* Allocate PATH_MAX bytes. */
new_de = __getname();
new_de = kmem_cache_zalloc(names_cachep, GFP_KERNEL);
if (!new_de) {
err = -ENOMEM;
goto out1;
@ -1635,7 +1638,8 @@ int ntfs_create_inode(struct mnt_idmap *idmap, struct inode *dir,
* Use ni_find_attr cause layout of MFT record may be changed
* in ntfs_init_acl and ntfs_save_wsl_perm.
*/
attr = ni_find_attr(ni, NULL, NULL, ATTR_NAME, NULL, 0, NULL, NULL);
attr = ni_find_attr(ni, NULL, NULL, ATTR_NAME, NULL, 0, NULL,
NULL);
if (attr) {
struct ATTR_FILE_NAME *fn;
@ -1719,7 +1723,7 @@ int ntfs_link_inode(struct inode *inode, struct dentry *dentry)
struct NTFS_DE *de;
/* Allocate PATH_MAX bytes. */
de = __getname();
de = kmem_cache_zalloc(names_cachep, GFP_KERNEL);
if (!de)
return -ENOMEM;
@ -1757,7 +1761,7 @@ int ntfs_unlink_inode(struct inode *dir, const struct dentry *dentry)
return -EINVAL;
/* Allocate PATH_MAX bytes. */
de = __getname();
de = kmem_cache_zalloc(names_cachep, GFP_KERNEL);
if (!de)
return -ENOMEM;
@ -2102,7 +2106,6 @@ const struct address_space_operations ntfs_aops = {
const struct address_space_operations ntfs_aops_cmpr = {
.read_folio = ntfs_read_folio,
.readahead = ntfs_readahead,
.dirty_folio = block_dirty_folio,
.direct_IO = ntfs_direct_IO,
};

View File

@ -207,13 +207,13 @@ static int ntfs_symlink(struct mnt_idmap *idmap, struct inode *dir,
}
/*
* ntfs_mkdir- inode_operations::mkdir
* ntfs_mkdir - inode_operations::mkdir
*/
static struct dentry *ntfs_mkdir(struct mnt_idmap *idmap, struct inode *dir,
struct dentry *dentry, umode_t mode)
{
return ERR_PTR(ntfs_create_inode(idmap, dir, dentry, NULL, S_IFDIR | mode, 0,
NULL, 0, NULL));
return ERR_PTR(ntfs_create_inode(idmap, dir, dentry, NULL,
S_IFDIR | mode, 0, NULL, 0, NULL));
}
/*

View File

@ -212,6 +212,7 @@ struct ntfs_sb_info {
u32 discard_granularity;
u64 discard_granularity_mask_inv; // ~(discard_granularity_mask_inv-1)
u32 bdev_blocksize_mask; // bdev_logical_block_size(bdev) - 1;
u32 cluster_size; // bytes per cluster
u32 cluster_mask; // == cluster_size - 1
@ -570,7 +571,7 @@ int ni_fiemap(struct ntfs_inode *ni, struct fiemap_extent_info *fieinfo,
int ni_readpage_cmpr(struct ntfs_inode *ni, struct folio *folio);
int ni_decompress_file(struct ntfs_inode *ni);
int ni_read_frame(struct ntfs_inode *ni, u64 frame_vbo, struct page **pages,
u32 pages_per_frame);
u32 pages_per_frame, int copy);
int ni_write_frame(struct ntfs_inode *ni, struct page **pages,
u32 pages_per_frame);
int ni_remove_name(struct ntfs_inode *dir_ni, struct ntfs_inode *ni,
@ -584,7 +585,8 @@ int ni_add_name(struct ntfs_inode *dir_ni, struct ntfs_inode *ni,
struct NTFS_DE *de);
int ni_rename(struct ntfs_inode *dir_ni, struct ntfs_inode *new_dir_ni,
struct ntfs_inode *ni, struct NTFS_DE *de, struct NTFS_DE *new_de);
struct ntfs_inode *ni, struct NTFS_DE *de,
struct NTFS_DE *new_de);
bool ni_is_dirty(struct inode *inode);
@ -632,9 +634,21 @@ int ntfs_get_bh(struct ntfs_sb_info *sbi, const struct runs_tree *run, u64 vbo,
u32 bytes, struct ntfs_buffers *nb);
int ntfs_write_bh(struct ntfs_sb_info *sbi, struct NTFS_RECORD_HEADER *rhdr,
struct ntfs_buffers *nb, int sync);
int ntfs_bio_pages(struct ntfs_sb_info *sbi, const struct runs_tree *run,
struct page **pages, u32 nr_pages, u64 vbo, u32 bytes,
enum req_op op);
int ntfs_read_write_run(struct ntfs_sb_info *sbi, const struct runs_tree *run,
void *buf, u64 vbo, size_t bytes, int wr);
static inline int ntfs_read_run(struct ntfs_sb_info *sbi,
const struct runs_tree *run, void *buf, u64 vbo,
size_t bytes)
{
return ntfs_read_write_run(sbi, run, buf, vbo, bytes, 0);
}
static inline int ntfs_write_run(struct ntfs_sb_info *sbi,
const struct runs_tree *run, void *buf,
u64 vbo, size_t bytes)
{
return ntfs_read_write_run(sbi, run, buf, vbo, bytes, 1);
}
int ntfs_bio_fill_1(struct ntfs_sb_info *sbi, const struct runs_tree *run);
int ntfs_vbo_to_lbo(struct ntfs_sb_info *sbi, const struct runs_tree *run,
u64 vbo, u64 *lbo, u64 *bytes);
@ -709,8 +723,7 @@ int ntfs_set_size(struct inode *inode, u64 new_size);
int ntfs_get_block(struct inode *inode, sector_t vbn,
struct buffer_head *bh_result, int create);
int ntfs_write_begin(const struct kiocb *iocb, struct address_space *mapping,
loff_t pos, u32 len, struct folio **foliop,
void **fsdata);
loff_t pos, u32 len, struct folio **foliop, void **fsdata);
int ntfs_write_end(const struct kiocb *iocb, struct address_space *mapping,
loff_t pos, u32 len, u32 copied, struct folio *folio,
void *fsdata);
@ -765,7 +778,7 @@ bool mi_remove_attr(struct ntfs_inode *ni, struct mft_inode *mi,
struct ATTRIB *attr);
bool mi_resize_attr(struct mft_inode *mi, struct ATTRIB *attr, int bytes);
int mi_pack_runs(struct mft_inode *mi, struct ATTRIB *attr,
struct runs_tree *run, CLST len);
const struct runs_tree *run, CLST len);
static inline bool mi_is_ref(const struct mft_inode *mi,
const struct MFT_REF *ref)
{
@ -800,7 +813,7 @@ void run_truncate_head(struct runs_tree *run, CLST vcn);
void run_truncate_around(struct runs_tree *run, CLST vcn);
bool run_add_entry(struct runs_tree *run, CLST vcn, CLST lcn, CLST len,
bool is_mft);
bool run_collapse_range(struct runs_tree *run, CLST vcn, CLST len);
bool run_collapse_range(struct runs_tree *run, CLST vcn, CLST len, CLST sub);
bool run_insert_range(struct runs_tree *run, CLST vcn, CLST len);
bool run_get_entry(const struct runs_tree *run, size_t index, CLST *vcn,
CLST *lcn, CLST *len);
@ -979,11 +992,12 @@ static inline __le64 kernel2nt(const struct timespec64 *ts)
*/
static inline void nt2kernel(const __le64 tm, struct timespec64 *ts)
{
u64 t = le64_to_cpu(tm) - _100ns2seconds * SecondsToStartOf1970;
s32 t32;
/* use signed 64 bit to support timestamps prior to epoch. xfstest 258. */
s64 t = le64_to_cpu(tm) - _100ns2seconds * SecondsToStartOf1970;
// WARNING: do_div changes its first argument(!)
ts->tv_nsec = do_div(t, _100ns2seconds) * 100;
ts->tv_sec = t;
ts->tv_sec = div_s64_rem(t, _100ns2seconds, &t32);
ts->tv_nsec = t32 * 100;
}
static inline struct ntfs_sb_info *ntfs_sb(struct super_block *sb)

View File

@ -621,7 +621,7 @@ bool mi_resize_attr(struct mft_inode *mi, struct ATTRIB *attr, int bytes)
* If failed record is not changed.
*/
int mi_pack_runs(struct mft_inode *mi, struct ATTRIB *attr,
struct runs_tree *run, CLST len)
const struct runs_tree *run, CLST len)
{
int err = 0;
struct ntfs_sb_info *sbi = mi->sbi;

View File

@ -487,7 +487,7 @@ bool run_add_entry(struct runs_tree *run, CLST vcn, CLST lcn, CLST len,
* Helper for attr_collapse_range(),
* which is helper for fallocate(collapse_range).
*/
bool run_collapse_range(struct runs_tree *run, CLST vcn, CLST len)
bool run_collapse_range(struct runs_tree *run, CLST vcn, CLST len, CLST sub)
{
size_t index, eat;
struct ntfs_run *r, *e, *eat_start, *eat_end;
@ -511,7 +511,7 @@ bool run_collapse_range(struct runs_tree *run, CLST vcn, CLST len)
/* Collapse a middle part of normal run, split. */
if (!run_add_entry(run, vcn, SPARSE_LCN, len, false))
return false;
return run_collapse_range(run, vcn, len);
return run_collapse_range(run, vcn, len, sub);
}
r += 1;
@ -545,6 +545,13 @@ bool run_collapse_range(struct runs_tree *run, CLST vcn, CLST len)
memmove(eat_start, eat_end, (e - eat_end) * sizeof(*r));
run->count -= eat;
if (sub) {
e -= eat;
for (r = run->runs; r < e; r++) {
r->vcn -= sub;
}
}
return true;
}
@ -984,8 +991,12 @@ int run_unpack(struct runs_tree *run, struct ntfs_sb_info *sbi, CLST ino,
if (!dlcn)
return -EINVAL;
if (check_add_overflow(prev_lcn, dlcn, &lcn))
/* Check special combination: 0 + SPARSE_LCN64. */
if (!prev_lcn && dlcn == SPARSE_LCN64) {
lcn = SPARSE_LCN64;
} else if (check_add_overflow(prev_lcn, dlcn, &lcn)) {
return -EINVAL;
}
prev_lcn = lcn;
} else {
/* The size of 'dlcn' can't be > 8. */

View File

@ -16,6 +16,13 @@
* mi - MFT inode - One MFT record(usually 1024 bytes or 4K), consists of attributes.
* ni - NTFS inode - Extends linux inode. consists of one or more mft inodes.
* index - unit inside directory - 2K, 4K, <=page size, does not depend on cluster size.
* resident attribute - Attribute with content stored directly in the MFT record
* non-resident attribute - Attribute with content stored in clusters
* data_size - Size of attribute content in bytes. Equal to inode->i_size
* valid_size - Number of bytes written to the non-resident attribute
* allocated_size - Total size of clusters allocated for non-resident content
* total_size - Actual size of allocated clusters for sparse or compressed attributes
* - Constraint: valid_size <= data_size <= allocated_size
*
* WSL - Windows Subsystem for Linux
* https://docs.microsoft.com/en-us/windows/wsl/file-permissions
@ -278,9 +285,9 @@ static const struct fs_parameter_spec ntfs_fs_parameters[] = {
fsparam_flag("hide_dot_files", Opt_hide_dot_files),
fsparam_flag("windows_names", Opt_windows_names),
fsparam_flag("showmeta", Opt_showmeta),
fsparam_flag("acl", Opt_acl),
fsparam_flag_no("acl", Opt_acl),
fsparam_string("iocharset", Opt_iocharset),
fsparam_flag("prealloc", Opt_prealloc),
fsparam_flag_no("prealloc", Opt_prealloc),
fsparam_flag("nocase", Opt_nocase),
{}
};
@ -289,10 +296,8 @@ static const struct fs_parameter_spec ntfs_fs_parameters[] = {
/*
* Load nls table or if @nls is utf8 then return NULL.
*
* It is good idea to use here "const char *nls".
* But load_nls accepts "char*".
*/
static struct nls_table *ntfs_load_nls(char *nls)
static struct nls_table *ntfs_load_nls(const char *nls)
{
struct nls_table *ret;
@ -391,7 +396,7 @@ static int ntfs_fs_parse_param(struct fs_context *fc,
param->string = NULL;
break;
case Opt_prealloc:
opts->prealloc = 1;
opts->prealloc = !result.negated;
break;
case Opt_nocase:
opts->nocase = 1;
@ -567,10 +572,8 @@ static void ntfs_create_procdir(struct super_block *sb)
if (e) {
struct ntfs_sb_info *sbi = sb->s_fs_info;
proc_create_data("volinfo", 0444, e,
&ntfs3_volinfo_fops, sb);
proc_create_data("label", 0644, e,
&ntfs3_label_fops, sb);
proc_create_data("volinfo", 0444, e, &ntfs3_volinfo_fops, sb);
proc_create_data("label", 0644, e, &ntfs3_label_fops, sb);
sbi->procdir = e;
}
}
@ -601,10 +604,12 @@ static void ntfs_remove_proc_root(void)
}
}
#else
static void ntfs_create_procdir(struct super_block *sb) {}
static void ntfs_remove_procdir(struct super_block *sb) {}
static void ntfs_create_proc_root(void) {}
static void ntfs_remove_proc_root(void) {}
// clang-format off
static void ntfs_create_procdir(struct super_block *sb){}
static void ntfs_remove_procdir(struct super_block *sb){}
static void ntfs_create_proc_root(void){}
static void ntfs_remove_proc_root(void){}
// clang-format on
#endif
static struct kmem_cache *ntfs_inode_cachep;
@ -698,6 +703,14 @@ static void ntfs_put_super(struct super_block *sb)
/* Mark rw ntfs as clear, if possible. */
ntfs_set_state(sbi, NTFS_DIRTY_CLEAR);
if (sbi->options) {
unload_nls(sbi->options->nls);
kfree(sbi->options->nls_name);
kfree(sbi->options);
sbi->options = NULL;
}
ntfs3_put_sbi(sbi);
}
@ -934,6 +947,11 @@ static int ntfs_init_from_boot(struct super_block *sb, u32 sector_size,
sbi->volume.blocks = dev_size >> PAGE_SHIFT;
/* Set dummy blocksize to read boot_block. */
if (!sb_min_blocksize(sb, PAGE_SIZE)) {
return -EINVAL;
}
read_boot:
bh = ntfs_bread(sb, boot_block);
if (!bh)
@ -1058,6 +1076,7 @@ static int ntfs_init_from_boot(struct super_block *sb, u32 sector_size,
dev_size += sector_size - 1;
}
sbi->bdev_blocksize_mask = max(boot_sector_size, sector_size) - 1;
sbi->mft.lbo = mlcn << cluster_bits;
sbi->mft.lbo2 = mlcn2 << cluster_bits;
@ -1199,7 +1218,8 @@ static int ntfs_fill_super(struct super_block *sb, struct fs_context *fc)
int err;
struct ntfs_sb_info *sbi = sb->s_fs_info;
struct block_device *bdev = sb->s_bdev;
struct ntfs_mount_options *options;
struct ntfs_mount_options *fc_opts;
struct ntfs_mount_options *options = NULL;
struct inode *inode;
struct ntfs_inode *ni;
size_t i, tt, bad_len, bad_frags;
@ -1216,7 +1236,23 @@ static int ntfs_fill_super(struct super_block *sb, struct fs_context *fc)
ref.high = 0;
sbi->sb = sb;
sbi->options = options = fc->fs_private;
fc_opts = fc->fs_private;
if (!fc_opts) {
errorf(fc, "missing mount options");
return -EINVAL;
}
options = kmemdup(fc_opts, sizeof(*fc_opts), GFP_KERNEL);
if (!options)
return -ENOMEM;
if (fc_opts->nls_name) {
options->nls_name = kstrdup(fc_opts->nls_name, GFP_KERNEL);
if (!options->nls_name) {
kfree(options);
return -ENOMEM;
}
}
sbi->options = options;
fc->fs_private = NULL;
sb->s_flags |= SB_NODIRATIME;
sb->s_magic = 0x7366746e; // "ntfs"
@ -1224,8 +1260,7 @@ static int ntfs_fill_super(struct super_block *sb, struct fs_context *fc)
sb->s_export_op = &ntfs_export_ops;
sb->s_time_gran = NTFS_TIME_GRAN; // 100 nsec
sb->s_xattr = ntfs_xattr_handlers;
if (options->nocase)
set_default_d_op(sb, &ntfs_dentry_ops);
set_default_d_op(sb, options->nocase ? &ntfs_dentry_ops : NULL);
options->nls = ntfs_load_nls(options->nls_name);
if (IS_ERR(options->nls)) {
@ -1295,7 +1330,7 @@ static int ntfs_fill_super(struct super_block *sb, struct fs_context *fc)
sbi->volume.ni = ni;
if (info->flags & VOLUME_FLAG_DIRTY) {
sbi->volume.real_dirty = true;
ntfs_info(sb, "It is recommened to use chkdsk.");
ntfs_info(sb, "It is recommended to use chkdsk.");
}
/* Load $MFTMirr to estimate recs_mirr. */
@ -1642,9 +1677,16 @@ static int ntfs_fill_super(struct super_block *sb, struct fs_context *fc)
put_inode_out:
iput(inode);
out:
/* sbi->options == options */
if (options) {
unload_nls(options->nls);
kfree(options->nls_name);
kfree(options);
sbi->options = NULL;
}
ntfs3_put_sbi(sbi);
kfree(boot2);
ntfs3_put_sbi(sbi);
return err;
}
@ -1768,6 +1810,12 @@ static int __ntfs_init_fs_context(struct fs_context *fc)
opts->fs_gid = current_gid();
opts->fs_fmask_inv = ~current_umask();
opts->fs_dmask_inv = ~current_umask();
opts->prealloc = 1;
#ifdef CONFIG_NTFS3_FS_POSIX_ACL
/* Set the default value 'acl' */
fc->sb_flags |= SB_POSIXACL;
#endif
if (fc->purpose == FS_CONTEXT_FOR_RECONFIGURE)
goto ok;

View File

@ -654,12 +654,22 @@ static noinline int ntfs_set_acl_ex(struct mnt_idmap *idmap,
err = ntfs_set_ea(inode, name, name_len, value, size, flags, 0, NULL);
if (err == -ENODATA && !size)
err = 0; /* Removing non existed xattr. */
if (!err) {
set_cached_acl(inode, type, acl);
if (err)
goto out;
if (inode->i_mode != mode) {
umode_t old_mode = inode->i_mode;
inode->i_mode = mode;
err = ntfs_save_wsl_perm(inode, NULL);
if (err) {
inode->i_mode = old_mode;
goto out;
}
inode->i_mode = mode;
inode_set_ctime_current(inode);
mark_inode_dirty(inode);
}
set_cached_acl(inode, type, acl);
inode_set_ctime_current(inode);
mark_inode_dirty(inode);
out:
kfree(value);