KVM: guest_memfd: Add slab-allocated inode cache

Add a dedicated gmem_inode structure and a slab-allocated inode cache for
guest memory backing, similar to how shmem handles inodes.

This adds the necessary allocation/destruction functions and prepares
for upcoming guest_memfd NUMA policy support changes.  Using a dedicated
structure will also allow for additional cleanups, e.g. to track flags in
gmem_inode instead of i_private.

Signed-off-by: Shivank Garg <shivankg@amd.com>
Tested-by: Ashish Kalra <ashish.kalra@amd.com>
[sean: s/kvm_gmem_inode_info/gmem_inode, name init_once()]
Reviewed-by: Ackerley Tng <ackerleytng@google.com>
Tested-by: Ackerley Tng <ackerleytng@google.com>
Link: https://lore.kernel.org/r/20251016172853.52451-5-seanjc@google.com
Signed-off-by: Sean Christopherson <seanjc@google.com>
This commit is contained in:
Shivank Garg 2025-10-16 10:28:45 -07:00 committed by Sean Christopherson
parent a63ca4236e
commit f609e89ae8
1 changed files with 77 additions and 2 deletions

View File

@ -26,6 +26,15 @@ struct gmem_file {
struct list_head entry; struct list_head entry;
}; };
struct gmem_inode {
struct inode vfs_inode;
};
static __always_inline struct gmem_inode *GMEM_I(struct inode *inode)
{
return container_of(inode, struct gmem_inode, vfs_inode);
}
#define kvm_gmem_for_each_file(f, mapping) \ #define kvm_gmem_for_each_file(f, mapping) \
list_for_each_entry(f, &(mapping)->i_private_list, entry) list_for_each_entry(f, &(mapping)->i_private_list, entry)
@ -830,13 +839,61 @@ long kvm_gmem_populate(struct kvm *kvm, gfn_t start_gfn, void __user *src, long
EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_gmem_populate); EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_gmem_populate);
#endif #endif
static struct kmem_cache *kvm_gmem_inode_cachep;
static void kvm_gmem_init_inode_once(void *__gi)
{
struct gmem_inode *gi = __gi;
/*
* Note! Don't initialize the inode with anything specific to the
* guest_memfd instance, or that might be specific to how the inode is
* used (from the VFS-layer's perspective). This hook is called only
* during the initial slab allocation, i.e. only fields/state that are
* idempotent across _all_ use of the inode _object_ can be initialized
* at this time!
*/
inode_init_once(&gi->vfs_inode);
}
static struct inode *kvm_gmem_alloc_inode(struct super_block *sb)
{
struct gmem_inode *gi;
gi = alloc_inode_sb(sb, kvm_gmem_inode_cachep, GFP_KERNEL);
if (!gi)
return NULL;
return &gi->vfs_inode;
}
static void kvm_gmem_destroy_inode(struct inode *inode)
{
}
static void kvm_gmem_free_inode(struct inode *inode)
{
kmem_cache_free(kvm_gmem_inode_cachep, GMEM_I(inode));
}
static const struct super_operations kvm_gmem_super_operations = {
.statfs = simple_statfs,
.alloc_inode = kvm_gmem_alloc_inode,
.destroy_inode = kvm_gmem_destroy_inode,
.free_inode = kvm_gmem_free_inode,
};
static int kvm_gmem_init_fs_context(struct fs_context *fc) static int kvm_gmem_init_fs_context(struct fs_context *fc)
{ {
struct pseudo_fs_context *ctx;
if (!init_pseudo(fc, GUEST_MEMFD_MAGIC)) if (!init_pseudo(fc, GUEST_MEMFD_MAGIC))
return -ENOMEM; return -ENOMEM;
fc->s_iflags |= SB_I_NOEXEC; fc->s_iflags |= SB_I_NOEXEC;
fc->s_iflags |= SB_I_NODEV; fc->s_iflags |= SB_I_NODEV;
ctx = fc->fs_private;
ctx->ops = &kvm_gmem_super_operations;
return 0; return 0;
} }
@ -860,13 +917,31 @@ static int kvm_gmem_init_mount(void)
int kvm_gmem_init(struct module *module) int kvm_gmem_init(struct module *module)
{ {
kvm_gmem_fops.owner = module; struct kmem_cache_args args = {
.align = 0,
.ctor = kvm_gmem_init_inode_once,
};
int ret;
return kvm_gmem_init_mount(); kvm_gmem_fops.owner = module;
kvm_gmem_inode_cachep = kmem_cache_create("kvm_gmem_inode_cache",
sizeof(struct gmem_inode),
&args, SLAB_ACCOUNT);
if (!kvm_gmem_inode_cachep)
return -ENOMEM;
ret = kvm_gmem_init_mount();
if (ret) {
kmem_cache_destroy(kvm_gmem_inode_cachep);
return ret;
}
return 0;
} }
void kvm_gmem_exit(void) void kvm_gmem_exit(void)
{ {
kern_unmount(kvm_gmem_mnt); kern_unmount(kvm_gmem_mnt);
kvm_gmem_mnt = NULL; kvm_gmem_mnt = NULL;
rcu_barrier();
kmem_cache_destroy(kvm_gmem_inode_cachep);
} }