mirror of https://github.com/torvalds/linux.git
Merge branch 'mm-hotfixes-stable' into mm-stable in order to pick up
changes required by mm-stable material: hugetlb and damon.
This commit is contained in:
commit
bc9950b56f
|
|
@ -16127,6 +16127,7 @@ M: Andrew Morton <akpm@linux-foundation.org>
|
||||||
M: Mike Rapoport <rppt@kernel.org>
|
M: Mike Rapoport <rppt@kernel.org>
|
||||||
L: linux-mm@kvack.org
|
L: linux-mm@kvack.org
|
||||||
S: Maintained
|
S: Maintained
|
||||||
|
T: git git://git.kernel.org/pub/scm/linux/kernel/git/rppt/memblock.git
|
||||||
F: include/linux/numa_memblks.h
|
F: include/linux/numa_memblks.h
|
||||||
F: mm/numa.c
|
F: mm/numa.c
|
||||||
F: mm/numa_emulation.c
|
F: mm/numa_emulation.c
|
||||||
|
|
@ -16194,6 +16195,7 @@ R: Rik van Riel <riel@surriel.com>
|
||||||
R: Liam R. Howlett <Liam.Howlett@oracle.com>
|
R: Liam R. Howlett <Liam.Howlett@oracle.com>
|
||||||
R: Vlastimil Babka <vbabka@suse.cz>
|
R: Vlastimil Babka <vbabka@suse.cz>
|
||||||
R: Harry Yoo <harry.yoo@oracle.com>
|
R: Harry Yoo <harry.yoo@oracle.com>
|
||||||
|
R: Jann Horn <jannh@google.com>
|
||||||
L: linux-mm@kvack.org
|
L: linux-mm@kvack.org
|
||||||
S: Maintained
|
S: Maintained
|
||||||
F: include/linux/rmap.h
|
F: include/linux/rmap.h
|
||||||
|
|
@ -16239,6 +16241,7 @@ R: Nico Pache <npache@redhat.com>
|
||||||
R: Ryan Roberts <ryan.roberts@arm.com>
|
R: Ryan Roberts <ryan.roberts@arm.com>
|
||||||
R: Dev Jain <dev.jain@arm.com>
|
R: Dev Jain <dev.jain@arm.com>
|
||||||
R: Barry Song <baohua@kernel.org>
|
R: Barry Song <baohua@kernel.org>
|
||||||
|
R: Lance Yang <lance.yang@linux.dev>
|
||||||
L: linux-mm@kvack.org
|
L: linux-mm@kvack.org
|
||||||
S: Maintained
|
S: Maintained
|
||||||
W: http://www.linux-mm.org
|
W: http://www.linux-mm.org
|
||||||
|
|
|
||||||
|
|
@ -94,7 +94,7 @@ int load_other_segments(struct kimage *image,
|
||||||
char *initrd, unsigned long initrd_len,
|
char *initrd, unsigned long initrd_len,
|
||||||
char *cmdline)
|
char *cmdline)
|
||||||
{
|
{
|
||||||
struct kexec_buf kbuf;
|
struct kexec_buf kbuf = {};
|
||||||
void *dtb = NULL;
|
void *dtb = NULL;
|
||||||
unsigned long initrd_load_addr = 0, dtb_len,
|
unsigned long initrd_load_addr = 0, dtb_len,
|
||||||
orig_segments = image->nr_segments;
|
orig_segments = image->nr_segments;
|
||||||
|
|
|
||||||
|
|
@ -28,7 +28,7 @@ static int riscv_kexec_elf_load(struct kimage *image, struct elfhdr *ehdr,
|
||||||
int i;
|
int i;
|
||||||
int ret = 0;
|
int ret = 0;
|
||||||
size_t size;
|
size_t size;
|
||||||
struct kexec_buf kbuf;
|
struct kexec_buf kbuf = {};
|
||||||
const struct elf_phdr *phdr;
|
const struct elf_phdr *phdr;
|
||||||
|
|
||||||
kbuf.image = image;
|
kbuf.image = image;
|
||||||
|
|
@ -66,7 +66,7 @@ static int elf_find_pbase(struct kimage *image, unsigned long kernel_len,
|
||||||
{
|
{
|
||||||
int i;
|
int i;
|
||||||
int ret;
|
int ret;
|
||||||
struct kexec_buf kbuf;
|
struct kexec_buf kbuf = {};
|
||||||
const struct elf_phdr *phdr;
|
const struct elf_phdr *phdr;
|
||||||
unsigned long lowest_paddr = ULONG_MAX;
|
unsigned long lowest_paddr = ULONG_MAX;
|
||||||
unsigned long lowest_vaddr = ULONG_MAX;
|
unsigned long lowest_vaddr = ULONG_MAX;
|
||||||
|
|
|
||||||
|
|
@ -41,7 +41,7 @@ static void *image_load(struct kimage *image,
|
||||||
struct riscv_image_header *h;
|
struct riscv_image_header *h;
|
||||||
u64 flags;
|
u64 flags;
|
||||||
bool be_image, be_kernel;
|
bool be_image, be_kernel;
|
||||||
struct kexec_buf kbuf;
|
struct kexec_buf kbuf = {};
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
/* Check Image header */
|
/* Check Image header */
|
||||||
|
|
|
||||||
|
|
@ -261,7 +261,7 @@ int load_extra_segments(struct kimage *image, unsigned long kernel_start,
|
||||||
int ret;
|
int ret;
|
||||||
void *fdt;
|
void *fdt;
|
||||||
unsigned long initrd_pbase = 0UL;
|
unsigned long initrd_pbase = 0UL;
|
||||||
struct kexec_buf kbuf;
|
struct kexec_buf kbuf = {};
|
||||||
char *modified_cmdline = NULL;
|
char *modified_cmdline = NULL;
|
||||||
|
|
||||||
kbuf.image = image;
|
kbuf.image = image;
|
||||||
|
|
|
||||||
|
|
@ -16,7 +16,7 @@
|
||||||
static int kexec_file_add_kernel_elf(struct kimage *image,
|
static int kexec_file_add_kernel_elf(struct kimage *image,
|
||||||
struct s390_load_data *data)
|
struct s390_load_data *data)
|
||||||
{
|
{
|
||||||
struct kexec_buf buf;
|
struct kexec_buf buf = {};
|
||||||
const Elf_Ehdr *ehdr;
|
const Elf_Ehdr *ehdr;
|
||||||
const Elf_Phdr *phdr;
|
const Elf_Phdr *phdr;
|
||||||
Elf_Addr entry;
|
Elf_Addr entry;
|
||||||
|
|
|
||||||
|
|
@ -16,7 +16,7 @@
|
||||||
static int kexec_file_add_kernel_image(struct kimage *image,
|
static int kexec_file_add_kernel_image(struct kimage *image,
|
||||||
struct s390_load_data *data)
|
struct s390_load_data *data)
|
||||||
{
|
{
|
||||||
struct kexec_buf buf;
|
struct kexec_buf buf = {};
|
||||||
|
|
||||||
buf.image = image;
|
buf.image = image;
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -129,7 +129,7 @@ static int kexec_file_update_purgatory(struct kimage *image,
|
||||||
static int kexec_file_add_purgatory(struct kimage *image,
|
static int kexec_file_add_purgatory(struct kimage *image,
|
||||||
struct s390_load_data *data)
|
struct s390_load_data *data)
|
||||||
{
|
{
|
||||||
struct kexec_buf buf;
|
struct kexec_buf buf = {};
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
buf.image = image;
|
buf.image = image;
|
||||||
|
|
@ -152,7 +152,7 @@ static int kexec_file_add_purgatory(struct kimage *image,
|
||||||
static int kexec_file_add_initrd(struct kimage *image,
|
static int kexec_file_add_initrd(struct kimage *image,
|
||||||
struct s390_load_data *data)
|
struct s390_load_data *data)
|
||||||
{
|
{
|
||||||
struct kexec_buf buf;
|
struct kexec_buf buf = {};
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
buf.image = image;
|
buf.image = image;
|
||||||
|
|
@ -184,7 +184,7 @@ static int kexec_file_add_ipl_report(struct kimage *image,
|
||||||
{
|
{
|
||||||
__u32 *lc_ipl_parmblock_ptr;
|
__u32 *lc_ipl_parmblock_ptr;
|
||||||
unsigned int len, ncerts;
|
unsigned int len, ncerts;
|
||||||
struct kexec_buf buf;
|
struct kexec_buf buf = {};
|
||||||
unsigned long addr;
|
unsigned long addr;
|
||||||
void *ptr, *end;
|
void *ptr, *end;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
|
||||||
|
|
@ -1788,6 +1788,7 @@ static int write_same_filled_page(struct zram *zram, unsigned long fill,
|
||||||
u32 index)
|
u32 index)
|
||||||
{
|
{
|
||||||
zram_slot_lock(zram, index);
|
zram_slot_lock(zram, index);
|
||||||
|
zram_free_page(zram, index);
|
||||||
zram_set_flag(zram, index, ZRAM_SAME);
|
zram_set_flag(zram, index, ZRAM_SAME);
|
||||||
zram_set_handle(zram, index, fill);
|
zram_set_handle(zram, index, fill);
|
||||||
zram_slot_unlock(zram, index);
|
zram_slot_unlock(zram, index);
|
||||||
|
|
@ -1825,6 +1826,7 @@ static int write_incompressible_page(struct zram *zram, struct page *page,
|
||||||
kunmap_local(src);
|
kunmap_local(src);
|
||||||
|
|
||||||
zram_slot_lock(zram, index);
|
zram_slot_lock(zram, index);
|
||||||
|
zram_free_page(zram, index);
|
||||||
zram_set_flag(zram, index, ZRAM_HUGE);
|
zram_set_flag(zram, index, ZRAM_HUGE);
|
||||||
zram_set_handle(zram, index, handle);
|
zram_set_handle(zram, index, handle);
|
||||||
zram_set_obj_size(zram, index, PAGE_SIZE);
|
zram_set_obj_size(zram, index, PAGE_SIZE);
|
||||||
|
|
@ -1848,11 +1850,6 @@ static int zram_write_page(struct zram *zram, struct page *page, u32 index)
|
||||||
unsigned long element;
|
unsigned long element;
|
||||||
bool same_filled;
|
bool same_filled;
|
||||||
|
|
||||||
/* First, free memory allocated to this slot (if any) */
|
|
||||||
zram_slot_lock(zram, index);
|
|
||||||
zram_free_page(zram, index);
|
|
||||||
zram_slot_unlock(zram, index);
|
|
||||||
|
|
||||||
mem = kmap_local_page(page);
|
mem = kmap_local_page(page);
|
||||||
same_filled = page_same_filled(mem, &element);
|
same_filled = page_same_filled(mem, &element);
|
||||||
kunmap_local(mem);
|
kunmap_local(mem);
|
||||||
|
|
@ -1894,6 +1891,7 @@ static int zram_write_page(struct zram *zram, struct page *page, u32 index)
|
||||||
zcomp_stream_put(zstrm);
|
zcomp_stream_put(zstrm);
|
||||||
|
|
||||||
zram_slot_lock(zram, index);
|
zram_slot_lock(zram, index);
|
||||||
|
zram_free_page(zram, index);
|
||||||
zram_set_handle(zram, index, handle);
|
zram_set_handle(zram, index, handle);
|
||||||
zram_set_obj_size(zram, index, comp_len);
|
zram_set_obj_size(zram, index, comp_len);
|
||||||
zram_slot_unlock(zram, index);
|
zram_slot_unlock(zram, index);
|
||||||
|
|
|
||||||
|
|
@ -1075,7 +1075,7 @@ void nilfs_sysfs_delete_device_group(struct the_nilfs *nilfs)
|
||||||
************************************************************************/
|
************************************************************************/
|
||||||
|
|
||||||
static ssize_t nilfs_feature_revision_show(struct kobject *kobj,
|
static ssize_t nilfs_feature_revision_show(struct kobject *kobj,
|
||||||
struct attribute *attr, char *buf)
|
struct kobj_attribute *attr, char *buf)
|
||||||
{
|
{
|
||||||
return sysfs_emit(buf, "%d.%d\n",
|
return sysfs_emit(buf, "%d.%d\n",
|
||||||
NILFS_CURRENT_REV, NILFS_MINOR_REV);
|
NILFS_CURRENT_REV, NILFS_MINOR_REV);
|
||||||
|
|
@ -1087,7 +1087,7 @@ static const char features_readme_str[] =
|
||||||
"(1) revision\n\tshow current revision of NILFS file system driver.\n";
|
"(1) revision\n\tshow current revision of NILFS file system driver.\n";
|
||||||
|
|
||||||
static ssize_t nilfs_feature_README_show(struct kobject *kobj,
|
static ssize_t nilfs_feature_README_show(struct kobject *kobj,
|
||||||
struct attribute *attr,
|
struct kobj_attribute *attr,
|
||||||
char *buf)
|
char *buf)
|
||||||
{
|
{
|
||||||
return sysfs_emit(buf, features_readme_str);
|
return sysfs_emit(buf, features_readme_str);
|
||||||
|
|
|
||||||
|
|
@ -50,16 +50,16 @@ struct nilfs_sysfs_dev_subgroups {
|
||||||
struct completion sg_segments_kobj_unregister;
|
struct completion sg_segments_kobj_unregister;
|
||||||
};
|
};
|
||||||
|
|
||||||
#define NILFS_COMMON_ATTR_STRUCT(name) \
|
#define NILFS_KOBJ_ATTR_STRUCT(name) \
|
||||||
struct nilfs_##name##_attr { \
|
struct nilfs_##name##_attr { \
|
||||||
struct attribute attr; \
|
struct attribute attr; \
|
||||||
ssize_t (*show)(struct kobject *, struct attribute *, \
|
ssize_t (*show)(struct kobject *, struct kobj_attribute *, \
|
||||||
char *); \
|
char *); \
|
||||||
ssize_t (*store)(struct kobject *, struct attribute *, \
|
ssize_t (*store)(struct kobject *, struct kobj_attribute *, \
|
||||||
const char *, size_t); \
|
const char *, size_t); \
|
||||||
}
|
}
|
||||||
|
|
||||||
NILFS_COMMON_ATTR_STRUCT(feature);
|
NILFS_KOBJ_ATTR_STRUCT(feature);
|
||||||
|
|
||||||
#define NILFS_DEV_ATTR_STRUCT(name) \
|
#define NILFS_DEV_ATTR_STRUCT(name) \
|
||||||
struct nilfs_##name##_attr { \
|
struct nilfs_##name##_attr { \
|
||||||
|
|
|
||||||
|
|
@ -706,6 +706,8 @@ int ocfs2_extent_map_get_blocks(struct inode *inode, u64 v_blkno, u64 *p_blkno,
|
||||||
* it not only handles the fiemap for inlined files, but also deals
|
* it not only handles the fiemap for inlined files, but also deals
|
||||||
* with the fast symlink, cause they have no difference for extent
|
* with the fast symlink, cause they have no difference for extent
|
||||||
* mapping per se.
|
* mapping per se.
|
||||||
|
*
|
||||||
|
* Must be called with ip_alloc_sem semaphore held.
|
||||||
*/
|
*/
|
||||||
static int ocfs2_fiemap_inline(struct inode *inode, struct buffer_head *di_bh,
|
static int ocfs2_fiemap_inline(struct inode *inode, struct buffer_head *di_bh,
|
||||||
struct fiemap_extent_info *fieinfo,
|
struct fiemap_extent_info *fieinfo,
|
||||||
|
|
@ -717,6 +719,7 @@ static int ocfs2_fiemap_inline(struct inode *inode, struct buffer_head *di_bh,
|
||||||
u64 phys;
|
u64 phys;
|
||||||
u32 flags = FIEMAP_EXTENT_DATA_INLINE|FIEMAP_EXTENT_LAST;
|
u32 flags = FIEMAP_EXTENT_DATA_INLINE|FIEMAP_EXTENT_LAST;
|
||||||
struct ocfs2_inode_info *oi = OCFS2_I(inode);
|
struct ocfs2_inode_info *oi = OCFS2_I(inode);
|
||||||
|
lockdep_assert_held_read(&oi->ip_alloc_sem);
|
||||||
|
|
||||||
di = (struct ocfs2_dinode *)di_bh->b_data;
|
di = (struct ocfs2_dinode *)di_bh->b_data;
|
||||||
if (ocfs2_inode_is_fast_symlink(inode))
|
if (ocfs2_inode_is_fast_symlink(inode))
|
||||||
|
|
@ -732,8 +735,11 @@ static int ocfs2_fiemap_inline(struct inode *inode, struct buffer_head *di_bh,
|
||||||
phys += offsetof(struct ocfs2_dinode,
|
phys += offsetof(struct ocfs2_dinode,
|
||||||
id2.i_data.id_data);
|
id2.i_data.id_data);
|
||||||
|
|
||||||
|
/* Release the ip_alloc_sem to prevent deadlock on page fault */
|
||||||
|
up_read(&OCFS2_I(inode)->ip_alloc_sem);
|
||||||
ret = fiemap_fill_next_extent(fieinfo, 0, phys, id_count,
|
ret = fiemap_fill_next_extent(fieinfo, 0, phys, id_count,
|
||||||
flags);
|
flags);
|
||||||
|
down_read(&OCFS2_I(inode)->ip_alloc_sem);
|
||||||
if (ret < 0)
|
if (ret < 0)
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
@ -802,9 +808,11 @@ int ocfs2_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
|
||||||
len_bytes = (u64)le16_to_cpu(rec.e_leaf_clusters) << osb->s_clustersize_bits;
|
len_bytes = (u64)le16_to_cpu(rec.e_leaf_clusters) << osb->s_clustersize_bits;
|
||||||
phys_bytes = le64_to_cpu(rec.e_blkno) << osb->sb->s_blocksize_bits;
|
phys_bytes = le64_to_cpu(rec.e_blkno) << osb->sb->s_blocksize_bits;
|
||||||
virt_bytes = (u64)le32_to_cpu(rec.e_cpos) << osb->s_clustersize_bits;
|
virt_bytes = (u64)le32_to_cpu(rec.e_cpos) << osb->s_clustersize_bits;
|
||||||
|
/* Release the ip_alloc_sem to prevent deadlock on page fault */
|
||||||
|
up_read(&OCFS2_I(inode)->ip_alloc_sem);
|
||||||
ret = fiemap_fill_next_extent(fieinfo, virt_bytes, phys_bytes,
|
ret = fiemap_fill_next_extent(fieinfo, virt_bytes, phys_bytes,
|
||||||
len_bytes, fe_flags);
|
len_bytes, fe_flags);
|
||||||
|
down_read(&OCFS2_I(inode)->ip_alloc_sem);
|
||||||
if (ret)
|
if (ret)
|
||||||
break;
|
break;
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -393,7 +393,8 @@ struct proc_dir_entry *proc_register(struct proc_dir_entry *dir,
|
||||||
if (proc_alloc_inum(&dp->low_ino))
|
if (proc_alloc_inum(&dp->low_ino))
|
||||||
goto out_free_entry;
|
goto out_free_entry;
|
||||||
|
|
||||||
pde_set_flags(dp);
|
if (!S_ISDIR(dp->mode))
|
||||||
|
pde_set_flags(dp);
|
||||||
|
|
||||||
write_lock(&proc_subdir_lock);
|
write_lock(&proc_subdir_lock);
|
||||||
dp->parent = dir;
|
dp->parent = dir;
|
||||||
|
|
|
||||||
|
|
@ -18,23 +18,42 @@
|
||||||
#define KASAN_ABI_VERSION 5
|
#define KASAN_ABI_VERSION 5
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
* Clang 22 added preprocessor macros to match GCC, in hopes of eventually
|
||||||
|
* dropping __has_feature support for sanitizers:
|
||||||
|
* https://github.com/llvm/llvm-project/commit/568c23bbd3303518c5056d7f03444dae4fdc8a9c
|
||||||
|
* Create these macros for older versions of clang so that it is easy to clean
|
||||||
|
* up once the minimum supported version of LLVM for building the kernel always
|
||||||
|
* creates these macros.
|
||||||
|
*
|
||||||
* Note: Checking __has_feature(*_sanitizer) is only true if the feature is
|
* Note: Checking __has_feature(*_sanitizer) is only true if the feature is
|
||||||
* enabled. Therefore it is not required to additionally check defined(CONFIG_*)
|
* enabled. Therefore it is not required to additionally check defined(CONFIG_*)
|
||||||
* to avoid adding redundant attributes in other configurations.
|
* to avoid adding redundant attributes in other configurations.
|
||||||
*/
|
*/
|
||||||
|
#if __has_feature(address_sanitizer) && !defined(__SANITIZE_ADDRESS__)
|
||||||
#if __has_feature(address_sanitizer) || __has_feature(hwaddress_sanitizer)
|
|
||||||
/* Emulate GCC's __SANITIZE_ADDRESS__ flag */
|
|
||||||
#define __SANITIZE_ADDRESS__
|
#define __SANITIZE_ADDRESS__
|
||||||
|
#endif
|
||||||
|
#if __has_feature(hwaddress_sanitizer) && !defined(__SANITIZE_HWADDRESS__)
|
||||||
|
#define __SANITIZE_HWADDRESS__
|
||||||
|
#endif
|
||||||
|
#if __has_feature(thread_sanitizer) && !defined(__SANITIZE_THREAD__)
|
||||||
|
#define __SANITIZE_THREAD__
|
||||||
|
#endif
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Treat __SANITIZE_HWADDRESS__ the same as __SANITIZE_ADDRESS__ in the kernel.
|
||||||
|
*/
|
||||||
|
#ifdef __SANITIZE_HWADDRESS__
|
||||||
|
#define __SANITIZE_ADDRESS__
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#ifdef __SANITIZE_ADDRESS__
|
||||||
#define __no_sanitize_address \
|
#define __no_sanitize_address \
|
||||||
__attribute__((no_sanitize("address", "hwaddress")))
|
__attribute__((no_sanitize("address", "hwaddress")))
|
||||||
#else
|
#else
|
||||||
#define __no_sanitize_address
|
#define __no_sanitize_address
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#if __has_feature(thread_sanitizer)
|
#ifdef __SANITIZE_THREAD__
|
||||||
/* emulate gcc's __SANITIZE_THREAD__ flag */
|
|
||||||
#define __SANITIZE_THREAD__
|
|
||||||
#define __no_sanitize_thread \
|
#define __no_sanitize_thread \
|
||||||
__attribute__((no_sanitize("thread")))
|
__attribute__((no_sanitize("thread")))
|
||||||
#else
|
#else
|
||||||
|
|
|
||||||
|
|
@ -636,6 +636,7 @@ struct damon_operations {
|
||||||
* @data: Data that will be passed to @fn.
|
* @data: Data that will be passed to @fn.
|
||||||
* @repeat: Repeat invocations.
|
* @repeat: Repeat invocations.
|
||||||
* @return_code: Return code from @fn invocation.
|
* @return_code: Return code from @fn invocation.
|
||||||
|
* @dealloc_on_cancel: De-allocate when canceled.
|
||||||
*
|
*
|
||||||
* Control damon_call(), which requests specific kdamond to invoke a given
|
* Control damon_call(), which requests specific kdamond to invoke a given
|
||||||
* function. Refer to damon_call() for more details.
|
* function. Refer to damon_call() for more details.
|
||||||
|
|
@ -645,6 +646,7 @@ struct damon_call_control {
|
||||||
void *data;
|
void *data;
|
||||||
bool repeat;
|
bool repeat;
|
||||||
int return_code;
|
int return_code;
|
||||||
|
bool dealloc_on_cancel;
|
||||||
/* private: internal use only */
|
/* private: internal use only */
|
||||||
/* informs if the kdamond finished handling of the request */
|
/* informs if the kdamond finished handling of the request */
|
||||||
struct completion completion;
|
struct completion completion;
|
||||||
|
|
|
||||||
|
|
@ -562,7 +562,7 @@ static inline void kasan_init_hw_tags(void) { }
|
||||||
#if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)
|
#if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)
|
||||||
|
|
||||||
void kasan_populate_early_vm_area_shadow(void *start, unsigned long size);
|
void kasan_populate_early_vm_area_shadow(void *start, unsigned long size);
|
||||||
int kasan_populate_vmalloc(unsigned long addr, unsigned long size);
|
int kasan_populate_vmalloc(unsigned long addr, unsigned long size, gfp_t gfp_mask);
|
||||||
void kasan_release_vmalloc(unsigned long start, unsigned long end,
|
void kasan_release_vmalloc(unsigned long start, unsigned long end,
|
||||||
unsigned long free_region_start,
|
unsigned long free_region_start,
|
||||||
unsigned long free_region_end,
|
unsigned long free_region_end,
|
||||||
|
|
@ -574,7 +574,7 @@ static inline void kasan_populate_early_vm_area_shadow(void *start,
|
||||||
unsigned long size)
|
unsigned long size)
|
||||||
{ }
|
{ }
|
||||||
static inline int kasan_populate_vmalloc(unsigned long start,
|
static inline int kasan_populate_vmalloc(unsigned long start,
|
||||||
unsigned long size)
|
unsigned long size, gfp_t gfp_mask)
|
||||||
{
|
{
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
@ -610,7 +610,7 @@ static __always_inline void kasan_poison_vmalloc(const void *start,
|
||||||
static inline void kasan_populate_early_vm_area_shadow(void *start,
|
static inline void kasan_populate_early_vm_area_shadow(void *start,
|
||||||
unsigned long size) { }
|
unsigned long size) { }
|
||||||
static inline int kasan_populate_vmalloc(unsigned long start,
|
static inline int kasan_populate_vmalloc(unsigned long start,
|
||||||
unsigned long size)
|
unsigned long size, gfp_t gfp_mask)
|
||||||
{
|
{
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -381,6 +381,16 @@ void folio_add_lru_vma(struct folio *, struct vm_area_struct *);
|
||||||
void mark_page_accessed(struct page *);
|
void mark_page_accessed(struct page *);
|
||||||
void folio_mark_accessed(struct folio *);
|
void folio_mark_accessed(struct folio *);
|
||||||
|
|
||||||
|
static inline bool folio_may_be_lru_cached(struct folio *folio)
|
||||||
|
{
|
||||||
|
/*
|
||||||
|
* Holding PMD-sized folios in per-CPU LRU cache unbalances accounting.
|
||||||
|
* Holding small numbers of low-order mTHP folios in per-CPU LRU cache
|
||||||
|
* will be sensible, but nobody has implemented and tested that yet.
|
||||||
|
*/
|
||||||
|
return !folio_test_large(folio);
|
||||||
|
}
|
||||||
|
|
||||||
extern atomic_t lru_disable_count;
|
extern atomic_t lru_disable_count;
|
||||||
|
|
||||||
static inline bool lru_cache_disabled(void)
|
static inline bool lru_cache_disabled(void)
|
||||||
|
|
|
||||||
|
|
@ -956,6 +956,7 @@ void start_kernel(void)
|
||||||
sort_main_extable();
|
sort_main_extable();
|
||||||
trap_init();
|
trap_init();
|
||||||
mm_core_init();
|
mm_core_init();
|
||||||
|
maple_tree_init();
|
||||||
poking_init();
|
poking_init();
|
||||||
ftrace_init();
|
ftrace_init();
|
||||||
|
|
||||||
|
|
@ -973,7 +974,6 @@ void start_kernel(void)
|
||||||
"Interrupts were enabled *very* early, fixing it\n"))
|
"Interrupts were enabled *very* early, fixing it\n"))
|
||||||
local_irq_disable();
|
local_irq_disable();
|
||||||
radix_tree_init();
|
radix_tree_init();
|
||||||
maple_tree_init();
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Set up housekeeping before setting up workqueues to allow the unbound
|
* Set up housekeeping before setting up workqueues to allow the unbound
|
||||||
|
|
|
||||||
|
|
@ -2141,6 +2141,10 @@ static void damos_adjust_quota(struct damon_ctx *c, struct damos *s)
|
||||||
if (!quota->ms && !quota->sz && list_empty("a->goals))
|
if (!quota->ms && !quota->sz && list_empty("a->goals))
|
||||||
return;
|
return;
|
||||||
|
|
||||||
|
/* First charge window */
|
||||||
|
if (!quota->total_charged_sz && !quota->charged_from)
|
||||||
|
quota->charged_from = jiffies;
|
||||||
|
|
||||||
/* New charge window starts */
|
/* New charge window starts */
|
||||||
if (time_after_eq(jiffies, quota->charged_from +
|
if (time_after_eq(jiffies, quota->charged_from +
|
||||||
msecs_to_jiffies(quota->reset_interval))) {
|
msecs_to_jiffies(quota->reset_interval))) {
|
||||||
|
|
@ -2506,10 +2510,14 @@ static void kdamond_call(struct damon_ctx *ctx, bool cancel)
|
||||||
mutex_lock(&ctx->call_controls_lock);
|
mutex_lock(&ctx->call_controls_lock);
|
||||||
list_del(&control->list);
|
list_del(&control->list);
|
||||||
mutex_unlock(&ctx->call_controls_lock);
|
mutex_unlock(&ctx->call_controls_lock);
|
||||||
if (!control->repeat)
|
if (!control->repeat) {
|
||||||
complete(&control->completion);
|
complete(&control->completion);
|
||||||
else
|
} else if (control->canceled && control->dealloc_on_cancel) {
|
||||||
|
kfree(control);
|
||||||
|
continue;
|
||||||
|
} else {
|
||||||
list_add(&control->list, &repeat_controls);
|
list_add(&control->list, &repeat_controls);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
control = list_first_entry_or_null(&repeat_controls,
|
control = list_first_entry_or_null(&repeat_controls,
|
||||||
struct damon_call_control, list);
|
struct damon_call_control, list);
|
||||||
|
|
|
||||||
|
|
@ -198,6 +198,11 @@ static int damon_lru_sort_apply_parameters(void)
|
||||||
if (err)
|
if (err)
|
||||||
return err;
|
return err;
|
||||||
|
|
||||||
|
if (!damon_lru_sort_mon_attrs.sample_interval) {
|
||||||
|
err = -EINVAL;
|
||||||
|
goto out;
|
||||||
|
}
|
||||||
|
|
||||||
err = damon_set_attrs(ctx, &damon_lru_sort_mon_attrs);
|
err = damon_set_attrs(ctx, &damon_lru_sort_mon_attrs);
|
||||||
if (err)
|
if (err)
|
||||||
goto out;
|
goto out;
|
||||||
|
|
|
||||||
|
|
@ -194,6 +194,11 @@ static int damon_reclaim_apply_parameters(void)
|
||||||
if (err)
|
if (err)
|
||||||
return err;
|
return err;
|
||||||
|
|
||||||
|
if (!damon_reclaim_mon_attrs.aggr_interval) {
|
||||||
|
err = -EINVAL;
|
||||||
|
goto out;
|
||||||
|
}
|
||||||
|
|
||||||
err = damon_set_attrs(param_ctx, &damon_reclaim_mon_attrs);
|
err = damon_set_attrs(param_ctx, &damon_reclaim_mon_attrs);
|
||||||
if (err)
|
if (err)
|
||||||
goto out;
|
goto out;
|
||||||
|
|
|
||||||
|
|
@ -1292,14 +1292,18 @@ static ssize_t state_show(struct kobject *kobj, struct kobj_attribute *attr,
|
||||||
{
|
{
|
||||||
struct damon_sysfs_kdamond *kdamond = container_of(kobj,
|
struct damon_sysfs_kdamond *kdamond = container_of(kobj,
|
||||||
struct damon_sysfs_kdamond, kobj);
|
struct damon_sysfs_kdamond, kobj);
|
||||||
struct damon_ctx *ctx = kdamond->damon_ctx;
|
struct damon_ctx *ctx;
|
||||||
bool running;
|
bool running = false;
|
||||||
|
|
||||||
if (!ctx)
|
if (!mutex_trylock(&damon_sysfs_lock))
|
||||||
running = false;
|
return -EBUSY;
|
||||||
else
|
|
||||||
|
ctx = kdamond->damon_ctx;
|
||||||
|
if (ctx)
|
||||||
running = damon_is_running(ctx);
|
running = damon_is_running(ctx);
|
||||||
|
|
||||||
|
mutex_unlock(&damon_sysfs_lock);
|
||||||
|
|
||||||
return sysfs_emit(buf, "%s\n", running ?
|
return sysfs_emit(buf, "%s\n", running ?
|
||||||
damon_sysfs_cmd_strs[DAMON_SYSFS_CMD_ON] :
|
damon_sysfs_cmd_strs[DAMON_SYSFS_CMD_ON] :
|
||||||
damon_sysfs_cmd_strs[DAMON_SYSFS_CMD_OFF]);
|
damon_sysfs_cmd_strs[DAMON_SYSFS_CMD_OFF]);
|
||||||
|
|
@ -1565,14 +1569,10 @@ static int damon_sysfs_repeat_call_fn(void *data)
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct damon_call_control damon_sysfs_repeat_call_control = {
|
|
||||||
.fn = damon_sysfs_repeat_call_fn,
|
|
||||||
.repeat = true,
|
|
||||||
};
|
|
||||||
|
|
||||||
static int damon_sysfs_turn_damon_on(struct damon_sysfs_kdamond *kdamond)
|
static int damon_sysfs_turn_damon_on(struct damon_sysfs_kdamond *kdamond)
|
||||||
{
|
{
|
||||||
struct damon_ctx *ctx;
|
struct damon_ctx *ctx;
|
||||||
|
struct damon_call_control *repeat_call_control;
|
||||||
int err;
|
int err;
|
||||||
|
|
||||||
if (damon_sysfs_kdamond_running(kdamond))
|
if (damon_sysfs_kdamond_running(kdamond))
|
||||||
|
|
@ -1585,18 +1585,29 @@ static int damon_sysfs_turn_damon_on(struct damon_sysfs_kdamond *kdamond)
|
||||||
damon_destroy_ctx(kdamond->damon_ctx);
|
damon_destroy_ctx(kdamond->damon_ctx);
|
||||||
kdamond->damon_ctx = NULL;
|
kdamond->damon_ctx = NULL;
|
||||||
|
|
||||||
|
repeat_call_control = kmalloc(sizeof(*repeat_call_control),
|
||||||
|
GFP_KERNEL);
|
||||||
|
if (!repeat_call_control)
|
||||||
|
return -ENOMEM;
|
||||||
|
|
||||||
ctx = damon_sysfs_build_ctx(kdamond->contexts->contexts_arr[0]);
|
ctx = damon_sysfs_build_ctx(kdamond->contexts->contexts_arr[0]);
|
||||||
if (IS_ERR(ctx))
|
if (IS_ERR(ctx)) {
|
||||||
|
kfree(repeat_call_control);
|
||||||
return PTR_ERR(ctx);
|
return PTR_ERR(ctx);
|
||||||
|
}
|
||||||
err = damon_start(&ctx, 1, false);
|
err = damon_start(&ctx, 1, false);
|
||||||
if (err) {
|
if (err) {
|
||||||
|
kfree(repeat_call_control);
|
||||||
damon_destroy_ctx(ctx);
|
damon_destroy_ctx(ctx);
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
kdamond->damon_ctx = ctx;
|
kdamond->damon_ctx = ctx;
|
||||||
|
|
||||||
damon_sysfs_repeat_call_control.data = kdamond;
|
repeat_call_control->fn = damon_sysfs_repeat_call_fn;
|
||||||
damon_call(ctx, &damon_sysfs_repeat_call_control);
|
repeat_call_control->data = kdamond;
|
||||||
|
repeat_call_control->repeat = true;
|
||||||
|
repeat_call_control->dealloc_on_cancel = true;
|
||||||
|
damon_call(ctx, repeat_call_control);
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
||||||
14
mm/gup.c
14
mm/gup.c
|
|
@ -2287,8 +2287,8 @@ static unsigned long collect_longterm_unpinnable_folios(
|
||||||
struct pages_or_folios *pofs)
|
struct pages_or_folios *pofs)
|
||||||
{
|
{
|
||||||
unsigned long collected = 0;
|
unsigned long collected = 0;
|
||||||
bool drain_allow = true;
|
|
||||||
struct folio *folio;
|
struct folio *folio;
|
||||||
|
int drained = 0;
|
||||||
long i = 0;
|
long i = 0;
|
||||||
|
|
||||||
for (folio = pofs_get_folio(pofs, i); folio;
|
for (folio = pofs_get_folio(pofs, i); folio;
|
||||||
|
|
@ -2307,9 +2307,17 @@ static unsigned long collect_longterm_unpinnable_folios(
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!folio_test_lru(folio) && drain_allow) {
|
if (drained == 0 && folio_may_be_lru_cached(folio) &&
|
||||||
|
folio_ref_count(folio) !=
|
||||||
|
folio_expected_ref_count(folio) + 1) {
|
||||||
|
lru_add_drain();
|
||||||
|
drained = 1;
|
||||||
|
}
|
||||||
|
if (drained == 1 && folio_may_be_lru_cached(folio) &&
|
||||||
|
folio_ref_count(folio) !=
|
||||||
|
folio_expected_ref_count(folio) + 1) {
|
||||||
lru_add_drain_all();
|
lru_add_drain_all();
|
||||||
drain_allow = false;
|
drained = 2;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!folio_isolate_lru(folio))
|
if (!folio_isolate_lru(folio))
|
||||||
|
|
|
||||||
|
|
@ -5854,7 +5854,7 @@ void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma,
|
||||||
spinlock_t *ptl;
|
spinlock_t *ptl;
|
||||||
struct hstate *h = hstate_vma(vma);
|
struct hstate *h = hstate_vma(vma);
|
||||||
unsigned long sz = huge_page_size(h);
|
unsigned long sz = huge_page_size(h);
|
||||||
bool adjust_reservation = false;
|
bool adjust_reservation;
|
||||||
unsigned long last_addr_mask;
|
unsigned long last_addr_mask;
|
||||||
bool force_flush = false;
|
bool force_flush = false;
|
||||||
|
|
||||||
|
|
@ -5947,6 +5947,7 @@ void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma,
|
||||||
sz);
|
sz);
|
||||||
hugetlb_count_sub(pages_per_huge_page(h), mm);
|
hugetlb_count_sub(pages_per_huge_page(h), mm);
|
||||||
hugetlb_remove_rmap(folio);
|
hugetlb_remove_rmap(folio);
|
||||||
|
spin_unlock(ptl);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Restore the reservation for anonymous page, otherwise the
|
* Restore the reservation for anonymous page, otherwise the
|
||||||
|
|
@ -5954,14 +5955,16 @@ void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma,
|
||||||
* If there we are freeing a surplus, do not set the restore
|
* If there we are freeing a surplus, do not set the restore
|
||||||
* reservation bit.
|
* reservation bit.
|
||||||
*/
|
*/
|
||||||
|
adjust_reservation = false;
|
||||||
|
|
||||||
|
spin_lock_irq(&hugetlb_lock);
|
||||||
if (!h->surplus_huge_pages && __vma_private_lock(vma) &&
|
if (!h->surplus_huge_pages && __vma_private_lock(vma) &&
|
||||||
folio_test_anon(folio)) {
|
folio_test_anon(folio)) {
|
||||||
folio_set_hugetlb_restore_reserve(folio);
|
folio_set_hugetlb_restore_reserve(folio);
|
||||||
/* Reservation to be adjusted after the spin lock */
|
/* Reservation to be adjusted after the spin lock */
|
||||||
adjust_reservation = true;
|
adjust_reservation = true;
|
||||||
}
|
}
|
||||||
|
spin_unlock_irq(&hugetlb_lock);
|
||||||
spin_unlock(ptl);
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Adjust the reservation for the region that will have the
|
* Adjust the reservation for the region that will have the
|
||||||
|
|
|
||||||
|
|
@ -336,13 +336,13 @@ static void ___free_pages_bulk(struct page **pages, int nr_pages)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static int ___alloc_pages_bulk(struct page **pages, int nr_pages)
|
static int ___alloc_pages_bulk(struct page **pages, int nr_pages, gfp_t gfp_mask)
|
||||||
{
|
{
|
||||||
unsigned long nr_populated, nr_total = nr_pages;
|
unsigned long nr_populated, nr_total = nr_pages;
|
||||||
struct page **page_array = pages;
|
struct page **page_array = pages;
|
||||||
|
|
||||||
while (nr_pages) {
|
while (nr_pages) {
|
||||||
nr_populated = alloc_pages_bulk(GFP_KERNEL, nr_pages, pages);
|
nr_populated = alloc_pages_bulk(gfp_mask, nr_pages, pages);
|
||||||
if (!nr_populated) {
|
if (!nr_populated) {
|
||||||
___free_pages_bulk(page_array, nr_total - nr_pages);
|
___free_pages_bulk(page_array, nr_total - nr_pages);
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
@ -354,25 +354,42 @@ static int ___alloc_pages_bulk(struct page **pages, int nr_pages)
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int __kasan_populate_vmalloc(unsigned long start, unsigned long end)
|
static int __kasan_populate_vmalloc(unsigned long start, unsigned long end, gfp_t gfp_mask)
|
||||||
{
|
{
|
||||||
unsigned long nr_pages, nr_total = PFN_UP(end - start);
|
unsigned long nr_pages, nr_total = PFN_UP(end - start);
|
||||||
struct vmalloc_populate_data data;
|
struct vmalloc_populate_data data;
|
||||||
|
unsigned int flags;
|
||||||
int ret = 0;
|
int ret = 0;
|
||||||
|
|
||||||
data.pages = (struct page **)__get_free_page(GFP_KERNEL | __GFP_ZERO);
|
data.pages = (struct page **)__get_free_page(gfp_mask | __GFP_ZERO);
|
||||||
if (!data.pages)
|
if (!data.pages)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
while (nr_total) {
|
while (nr_total) {
|
||||||
nr_pages = min(nr_total, PAGE_SIZE / sizeof(data.pages[0]));
|
nr_pages = min(nr_total, PAGE_SIZE / sizeof(data.pages[0]));
|
||||||
ret = ___alloc_pages_bulk(data.pages, nr_pages);
|
ret = ___alloc_pages_bulk(data.pages, nr_pages, gfp_mask);
|
||||||
if (ret)
|
if (ret)
|
||||||
break;
|
break;
|
||||||
|
|
||||||
data.start = start;
|
data.start = start;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* page tables allocations ignore external gfp mask, enforce it
|
||||||
|
* by the scope API
|
||||||
|
*/
|
||||||
|
if ((gfp_mask & (__GFP_FS | __GFP_IO)) == __GFP_IO)
|
||||||
|
flags = memalloc_nofs_save();
|
||||||
|
else if ((gfp_mask & (__GFP_FS | __GFP_IO)) == 0)
|
||||||
|
flags = memalloc_noio_save();
|
||||||
|
|
||||||
ret = apply_to_page_range(&init_mm, start, nr_pages * PAGE_SIZE,
|
ret = apply_to_page_range(&init_mm, start, nr_pages * PAGE_SIZE,
|
||||||
kasan_populate_vmalloc_pte, &data);
|
kasan_populate_vmalloc_pte, &data);
|
||||||
|
|
||||||
|
if ((gfp_mask & (__GFP_FS | __GFP_IO)) == __GFP_IO)
|
||||||
|
memalloc_nofs_restore(flags);
|
||||||
|
else if ((gfp_mask & (__GFP_FS | __GFP_IO)) == 0)
|
||||||
|
memalloc_noio_restore(flags);
|
||||||
|
|
||||||
___free_pages_bulk(data.pages, nr_pages);
|
___free_pages_bulk(data.pages, nr_pages);
|
||||||
if (ret)
|
if (ret)
|
||||||
break;
|
break;
|
||||||
|
|
@ -386,7 +403,7 @@ static int __kasan_populate_vmalloc(unsigned long start, unsigned long end)
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
int kasan_populate_vmalloc(unsigned long addr, unsigned long size)
|
int kasan_populate_vmalloc(unsigned long addr, unsigned long size, gfp_t gfp_mask)
|
||||||
{
|
{
|
||||||
unsigned long shadow_start, shadow_end;
|
unsigned long shadow_start, shadow_end;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
@ -415,7 +432,7 @@ int kasan_populate_vmalloc(unsigned long addr, unsigned long size)
|
||||||
shadow_start = PAGE_ALIGN_DOWN(shadow_start);
|
shadow_start = PAGE_ALIGN_DOWN(shadow_start);
|
||||||
shadow_end = PAGE_ALIGN(shadow_end);
|
shadow_end = PAGE_ALIGN(shadow_end);
|
||||||
|
|
||||||
ret = __kasan_populate_vmalloc(shadow_start, shadow_end);
|
ret = __kasan_populate_vmalloc(shadow_start, shadow_end, gfp_mask);
|
||||||
if (ret)
|
if (ret)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -1417,8 +1417,8 @@ static int hpage_collapse_scan_pmd(struct mm_struct *mm,
|
||||||
*/
|
*/
|
||||||
if (cc->is_khugepaged &&
|
if (cc->is_khugepaged &&
|
||||||
(pte_young(pteval) || folio_test_young(folio) ||
|
(pte_young(pteval) || folio_test_young(folio) ||
|
||||||
folio_test_referenced(folio) || mmu_notifier_test_young(vma->vm_mm,
|
folio_test_referenced(folio) ||
|
||||||
address)))
|
mmu_notifier_test_young(vma->vm_mm, _address)))
|
||||||
referenced++;
|
referenced++;
|
||||||
}
|
}
|
||||||
if (!writable) {
|
if (!writable) {
|
||||||
|
|
|
||||||
|
|
@ -956,7 +956,7 @@ static const char * const action_page_types[] = {
|
||||||
[MF_MSG_BUDDY] = "free buddy page",
|
[MF_MSG_BUDDY] = "free buddy page",
|
||||||
[MF_MSG_DAX] = "dax page",
|
[MF_MSG_DAX] = "dax page",
|
||||||
[MF_MSG_UNSPLIT_THP] = "unsplit thp",
|
[MF_MSG_UNSPLIT_THP] = "unsplit thp",
|
||||||
[MF_MSG_ALREADY_POISONED] = "already poisoned",
|
[MF_MSG_ALREADY_POISONED] = "already poisoned page",
|
||||||
[MF_MSG_UNKNOWN] = "unknown page",
|
[MF_MSG_UNKNOWN] = "unknown page",
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
@ -1349,9 +1349,10 @@ static int action_result(unsigned long pfn, enum mf_action_page_type type,
|
||||||
{
|
{
|
||||||
trace_memory_failure_event(pfn, type, result);
|
trace_memory_failure_event(pfn, type, result);
|
||||||
|
|
||||||
num_poisoned_pages_inc(pfn);
|
if (type != MF_MSG_ALREADY_POISONED) {
|
||||||
|
num_poisoned_pages_inc(pfn);
|
||||||
update_per_node_mf_stats(pfn, result);
|
update_per_node_mf_stats(pfn, result);
|
||||||
|
}
|
||||||
|
|
||||||
pr_err("%#lx: recovery action for %s: %s\n",
|
pr_err("%#lx: recovery action for %s: %s\n",
|
||||||
pfn, action_page_types[type], action_name[result]);
|
pfn, action_page_types[type], action_name[result]);
|
||||||
|
|
@ -2094,12 +2095,11 @@ static int try_memory_failure_hugetlb(unsigned long pfn, int flags, int *hugetlb
|
||||||
*hugetlb = 0;
|
*hugetlb = 0;
|
||||||
return 0;
|
return 0;
|
||||||
} else if (res == -EHWPOISON) {
|
} else if (res == -EHWPOISON) {
|
||||||
pr_err("%#lx: already hardware poisoned\n", pfn);
|
|
||||||
if (flags & MF_ACTION_REQUIRED) {
|
if (flags & MF_ACTION_REQUIRED) {
|
||||||
folio = page_folio(p);
|
folio = page_folio(p);
|
||||||
res = kill_accessing_process(current, folio_pfn(folio), flags);
|
res = kill_accessing_process(current, folio_pfn(folio), flags);
|
||||||
action_result(pfn, MF_MSG_ALREADY_POISONED, MF_FAILED);
|
|
||||||
}
|
}
|
||||||
|
action_result(pfn, MF_MSG_ALREADY_POISONED, MF_FAILED);
|
||||||
return res;
|
return res;
|
||||||
} else if (res == -EBUSY) {
|
} else if (res == -EBUSY) {
|
||||||
if (!(flags & MF_NO_RETRY)) {
|
if (!(flags & MF_NO_RETRY)) {
|
||||||
|
|
@ -2285,7 +2285,6 @@ int memory_failure(unsigned long pfn, int flags)
|
||||||
goto unlock_mutex;
|
goto unlock_mutex;
|
||||||
|
|
||||||
if (TestSetPageHWPoison(p)) {
|
if (TestSetPageHWPoison(p)) {
|
||||||
pr_err("%#lx: already hardware poisoned\n", pfn);
|
|
||||||
res = -EHWPOISON;
|
res = -EHWPOISON;
|
||||||
if (flags & MF_ACTION_REQUIRED)
|
if (flags & MF_ACTION_REQUIRED)
|
||||||
res = kill_accessing_process(current, pfn, flags);
|
res = kill_accessing_process(current, pfn, flags);
|
||||||
|
|
@ -2569,10 +2568,9 @@ int unpoison_memory(unsigned long pfn)
|
||||||
static DEFINE_RATELIMIT_STATE(unpoison_rs, DEFAULT_RATELIMIT_INTERVAL,
|
static DEFINE_RATELIMIT_STATE(unpoison_rs, DEFAULT_RATELIMIT_INTERVAL,
|
||||||
DEFAULT_RATELIMIT_BURST);
|
DEFAULT_RATELIMIT_BURST);
|
||||||
|
|
||||||
if (!pfn_valid(pfn))
|
p = pfn_to_online_page(pfn);
|
||||||
return -ENXIO;
|
if (!p)
|
||||||
|
return -EIO;
|
||||||
p = pfn_to_page(pfn);
|
|
||||||
folio = page_folio(p);
|
folio = page_folio(p);
|
||||||
|
|
||||||
mutex_lock(&mf_mutex);
|
mutex_lock(&mf_mutex);
|
||||||
|
|
|
||||||
|
|
@ -1815,8 +1815,14 @@ static void do_migrate_range(unsigned long start_pfn, unsigned long end_pfn)
|
||||||
pfn = folio_pfn(folio) + folio_nr_pages(folio) - 1;
|
pfn = folio_pfn(folio) + folio_nr_pages(folio) - 1;
|
||||||
|
|
||||||
if (folio_contain_hwpoisoned_page(folio)) {
|
if (folio_contain_hwpoisoned_page(folio)) {
|
||||||
if (WARN_ON(folio_test_lru(folio)))
|
/*
|
||||||
folio_isolate_lru(folio);
|
* unmap_poisoned_folio() cannot handle large folios
|
||||||
|
* in all cases yet.
|
||||||
|
*/
|
||||||
|
if (folio_test_large(folio) && !folio_test_hugetlb(folio))
|
||||||
|
goto put_folio;
|
||||||
|
if (folio_test_lru(folio) && !folio_isolate_lru(folio))
|
||||||
|
goto put_folio;
|
||||||
if (folio_mapped(folio)) {
|
if (folio_mapped(folio)) {
|
||||||
folio_lock(folio);
|
folio_lock(folio);
|
||||||
unmap_poisoned_folio(folio, pfn, false);
|
unmap_poisoned_folio(folio, pfn, false);
|
||||||
|
|
|
||||||
|
|
@ -255,7 +255,7 @@ void mlock_folio(struct folio *folio)
|
||||||
|
|
||||||
folio_get(folio);
|
folio_get(folio);
|
||||||
if (!folio_batch_add(fbatch, mlock_lru(folio)) ||
|
if (!folio_batch_add(fbatch, mlock_lru(folio)) ||
|
||||||
folio_test_large(folio) || lru_cache_disabled())
|
!folio_may_be_lru_cached(folio) || lru_cache_disabled())
|
||||||
mlock_folio_batch(fbatch);
|
mlock_folio_batch(fbatch);
|
||||||
local_unlock(&mlock_fbatch.lock);
|
local_unlock(&mlock_fbatch.lock);
|
||||||
}
|
}
|
||||||
|
|
@ -278,7 +278,7 @@ void mlock_new_folio(struct folio *folio)
|
||||||
|
|
||||||
folio_get(folio);
|
folio_get(folio);
|
||||||
if (!folio_batch_add(fbatch, mlock_new(folio)) ||
|
if (!folio_batch_add(fbatch, mlock_new(folio)) ||
|
||||||
folio_test_large(folio) || lru_cache_disabled())
|
!folio_may_be_lru_cached(folio) || lru_cache_disabled())
|
||||||
mlock_folio_batch(fbatch);
|
mlock_folio_batch(fbatch);
|
||||||
local_unlock(&mlock_fbatch.lock);
|
local_unlock(&mlock_fbatch.lock);
|
||||||
}
|
}
|
||||||
|
|
@ -299,7 +299,7 @@ void munlock_folio(struct folio *folio)
|
||||||
*/
|
*/
|
||||||
folio_get(folio);
|
folio_get(folio);
|
||||||
if (!folio_batch_add(fbatch, folio) ||
|
if (!folio_batch_add(fbatch, folio) ||
|
||||||
folio_test_large(folio) || lru_cache_disabled())
|
!folio_may_be_lru_cached(folio) || lru_cache_disabled())
|
||||||
mlock_folio_batch(fbatch);
|
mlock_folio_batch(fbatch);
|
||||||
local_unlock(&mlock_fbatch.lock);
|
local_unlock(&mlock_fbatch.lock);
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -1774,15 +1774,18 @@ static unsigned long check_mremap_params(struct vma_remap_struct *vrm)
|
||||||
if (!vrm->new_len)
|
if (!vrm->new_len)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
/* Is the new length or address silly? */
|
/* Is the new length silly? */
|
||||||
if (vrm->new_len > TASK_SIZE ||
|
if (vrm->new_len > TASK_SIZE)
|
||||||
vrm->new_addr > TASK_SIZE - vrm->new_len)
|
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
/* Remainder of checks are for cases with specific new_addr. */
|
/* Remainder of checks are for cases with specific new_addr. */
|
||||||
if (!vrm_implies_new_addr(vrm))
|
if (!vrm_implies_new_addr(vrm))
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
|
/* Is the new address silly? */
|
||||||
|
if (vrm->new_addr > TASK_SIZE - vrm->new_len)
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
/* The new address must be page-aligned. */
|
/* The new address must be page-aligned. */
|
||||||
if (offset_in_page(vrm->new_addr))
|
if (offset_in_page(vrm->new_addr))
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
|
||||||
20
mm/percpu.c
20
mm/percpu.c
|
|
@ -1734,7 +1734,7 @@ void __percpu *pcpu_alloc_noprof(size_t size, size_t align, bool reserved,
|
||||||
bool is_atomic;
|
bool is_atomic;
|
||||||
bool do_warn;
|
bool do_warn;
|
||||||
struct obj_cgroup *objcg = NULL;
|
struct obj_cgroup *objcg = NULL;
|
||||||
static int warn_limit = 10;
|
static atomic_t warn_limit = ATOMIC_INIT(10);
|
||||||
struct pcpu_chunk *chunk, *next;
|
struct pcpu_chunk *chunk, *next;
|
||||||
const char *err;
|
const char *err;
|
||||||
int slot, off, cpu, ret;
|
int slot, off, cpu, ret;
|
||||||
|
|
@ -1904,13 +1904,17 @@ void __percpu *pcpu_alloc_noprof(size_t size, size_t align, bool reserved,
|
||||||
fail:
|
fail:
|
||||||
trace_percpu_alloc_percpu_fail(reserved, is_atomic, size, align);
|
trace_percpu_alloc_percpu_fail(reserved, is_atomic, size, align);
|
||||||
|
|
||||||
if (do_warn && warn_limit) {
|
if (do_warn) {
|
||||||
pr_warn("allocation failed, size=%zu align=%zu atomic=%d, %s\n",
|
int remaining = atomic_dec_if_positive(&warn_limit);
|
||||||
size, align, is_atomic, err);
|
|
||||||
if (!is_atomic)
|
if (remaining >= 0) {
|
||||||
dump_stack();
|
pr_warn("allocation failed, size=%zu align=%zu atomic=%d, %s\n",
|
||||||
if (!--warn_limit)
|
size, align, is_atomic, err);
|
||||||
pr_info("limit reached, disable warning\n");
|
if (!is_atomic)
|
||||||
|
dump_stack();
|
||||||
|
if (remaining == 0)
|
||||||
|
pr_info("limit reached, disable warning\n");
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if (is_atomic) {
|
if (is_atomic) {
|
||||||
|
|
|
||||||
50
mm/swap.c
50
mm/swap.c
|
|
@ -164,6 +164,10 @@ static void folio_batch_move_lru(struct folio_batch *fbatch, move_fn_t move_fn)
|
||||||
for (i = 0; i < folio_batch_count(fbatch); i++) {
|
for (i = 0; i < folio_batch_count(fbatch); i++) {
|
||||||
struct folio *folio = fbatch->folios[i];
|
struct folio *folio = fbatch->folios[i];
|
||||||
|
|
||||||
|
/* block memcg migration while the folio moves between lru */
|
||||||
|
if (move_fn != lru_add && !folio_test_clear_lru(folio))
|
||||||
|
continue;
|
||||||
|
|
||||||
folio_lruvec_relock_irqsave(folio, &lruvec, &flags);
|
folio_lruvec_relock_irqsave(folio, &lruvec, &flags);
|
||||||
move_fn(lruvec, folio);
|
move_fn(lruvec, folio);
|
||||||
|
|
||||||
|
|
@ -176,14 +180,10 @@ static void folio_batch_move_lru(struct folio_batch *fbatch, move_fn_t move_fn)
|
||||||
}
|
}
|
||||||
|
|
||||||
static void __folio_batch_add_and_move(struct folio_batch __percpu *fbatch,
|
static void __folio_batch_add_and_move(struct folio_batch __percpu *fbatch,
|
||||||
struct folio *folio, move_fn_t move_fn,
|
struct folio *folio, move_fn_t move_fn, bool disable_irq)
|
||||||
bool on_lru, bool disable_irq)
|
|
||||||
{
|
{
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
|
|
||||||
if (on_lru && !folio_test_clear_lru(folio))
|
|
||||||
return;
|
|
||||||
|
|
||||||
folio_get(folio);
|
folio_get(folio);
|
||||||
|
|
||||||
if (disable_irq)
|
if (disable_irq)
|
||||||
|
|
@ -191,8 +191,8 @@ static void __folio_batch_add_and_move(struct folio_batch __percpu *fbatch,
|
||||||
else
|
else
|
||||||
local_lock(&cpu_fbatches.lock);
|
local_lock(&cpu_fbatches.lock);
|
||||||
|
|
||||||
if (!folio_batch_add(this_cpu_ptr(fbatch), folio) || folio_test_large(folio) ||
|
if (!folio_batch_add(this_cpu_ptr(fbatch), folio) ||
|
||||||
lru_cache_disabled())
|
!folio_may_be_lru_cached(folio) || lru_cache_disabled())
|
||||||
folio_batch_move_lru(this_cpu_ptr(fbatch), move_fn);
|
folio_batch_move_lru(this_cpu_ptr(fbatch), move_fn);
|
||||||
|
|
||||||
if (disable_irq)
|
if (disable_irq)
|
||||||
|
|
@ -201,13 +201,13 @@ static void __folio_batch_add_and_move(struct folio_batch __percpu *fbatch,
|
||||||
local_unlock(&cpu_fbatches.lock);
|
local_unlock(&cpu_fbatches.lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
#define folio_batch_add_and_move(folio, op, on_lru) \
|
#define folio_batch_add_and_move(folio, op) \
|
||||||
__folio_batch_add_and_move( \
|
__folio_batch_add_and_move( \
|
||||||
&cpu_fbatches.op, \
|
&cpu_fbatches.op, \
|
||||||
folio, \
|
folio, \
|
||||||
op, \
|
op, \
|
||||||
on_lru, \
|
offsetof(struct cpu_fbatches, op) >= \
|
||||||
offsetof(struct cpu_fbatches, op) >= offsetof(struct cpu_fbatches, lock_irq) \
|
offsetof(struct cpu_fbatches, lock_irq) \
|
||||||
)
|
)
|
||||||
|
|
||||||
static void lru_move_tail(struct lruvec *lruvec, struct folio *folio)
|
static void lru_move_tail(struct lruvec *lruvec, struct folio *folio)
|
||||||
|
|
@ -231,10 +231,10 @@ static void lru_move_tail(struct lruvec *lruvec, struct folio *folio)
|
||||||
void folio_rotate_reclaimable(struct folio *folio)
|
void folio_rotate_reclaimable(struct folio *folio)
|
||||||
{
|
{
|
||||||
if (folio_test_locked(folio) || folio_test_dirty(folio) ||
|
if (folio_test_locked(folio) || folio_test_dirty(folio) ||
|
||||||
folio_test_unevictable(folio))
|
folio_test_unevictable(folio) || !folio_test_lru(folio))
|
||||||
return;
|
return;
|
||||||
|
|
||||||
folio_batch_add_and_move(folio, lru_move_tail, true);
|
folio_batch_add_and_move(folio, lru_move_tail);
|
||||||
}
|
}
|
||||||
|
|
||||||
void lru_note_cost_unlock_irq(struct lruvec *lruvec, bool file,
|
void lru_note_cost_unlock_irq(struct lruvec *lruvec, bool file,
|
||||||
|
|
@ -328,10 +328,11 @@ static void folio_activate_drain(int cpu)
|
||||||
|
|
||||||
void folio_activate(struct folio *folio)
|
void folio_activate(struct folio *folio)
|
||||||
{
|
{
|
||||||
if (folio_test_active(folio) || folio_test_unevictable(folio))
|
if (folio_test_active(folio) || folio_test_unevictable(folio) ||
|
||||||
|
!folio_test_lru(folio))
|
||||||
return;
|
return;
|
||||||
|
|
||||||
folio_batch_add_and_move(folio, lru_activate, true);
|
folio_batch_add_and_move(folio, lru_activate);
|
||||||
}
|
}
|
||||||
|
|
||||||
#else
|
#else
|
||||||
|
|
@ -507,7 +508,7 @@ void folio_add_lru(struct folio *folio)
|
||||||
lru_gen_in_fault() && !(current->flags & PF_MEMALLOC))
|
lru_gen_in_fault() && !(current->flags & PF_MEMALLOC))
|
||||||
folio_set_active(folio);
|
folio_set_active(folio);
|
||||||
|
|
||||||
folio_batch_add_and_move(folio, lru_add, false);
|
folio_batch_add_and_move(folio, lru_add);
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(folio_add_lru);
|
EXPORT_SYMBOL(folio_add_lru);
|
||||||
|
|
||||||
|
|
@ -685,13 +686,13 @@ void lru_add_drain_cpu(int cpu)
|
||||||
void deactivate_file_folio(struct folio *folio)
|
void deactivate_file_folio(struct folio *folio)
|
||||||
{
|
{
|
||||||
/* Deactivating an unevictable folio will not accelerate reclaim */
|
/* Deactivating an unevictable folio will not accelerate reclaim */
|
||||||
if (folio_test_unevictable(folio))
|
if (folio_test_unevictable(folio) || !folio_test_lru(folio))
|
||||||
return;
|
return;
|
||||||
|
|
||||||
if (lru_gen_enabled() && lru_gen_clear_refs(folio))
|
if (lru_gen_enabled() && lru_gen_clear_refs(folio))
|
||||||
return;
|
return;
|
||||||
|
|
||||||
folio_batch_add_and_move(folio, lru_deactivate_file, true);
|
folio_batch_add_and_move(folio, lru_deactivate_file);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
|
@ -704,13 +705,13 @@ void deactivate_file_folio(struct folio *folio)
|
||||||
*/
|
*/
|
||||||
void folio_deactivate(struct folio *folio)
|
void folio_deactivate(struct folio *folio)
|
||||||
{
|
{
|
||||||
if (folio_test_unevictable(folio))
|
if (folio_test_unevictable(folio) || !folio_test_lru(folio))
|
||||||
return;
|
return;
|
||||||
|
|
||||||
if (lru_gen_enabled() ? lru_gen_clear_refs(folio) : !folio_test_active(folio))
|
if (lru_gen_enabled() ? lru_gen_clear_refs(folio) : !folio_test_active(folio))
|
||||||
return;
|
return;
|
||||||
|
|
||||||
folio_batch_add_and_move(folio, lru_deactivate, true);
|
folio_batch_add_and_move(folio, lru_deactivate);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
|
@ -723,10 +724,11 @@ void folio_deactivate(struct folio *folio)
|
||||||
void folio_mark_lazyfree(struct folio *folio)
|
void folio_mark_lazyfree(struct folio *folio)
|
||||||
{
|
{
|
||||||
if (!folio_test_anon(folio) || !folio_test_swapbacked(folio) ||
|
if (!folio_test_anon(folio) || !folio_test_swapbacked(folio) ||
|
||||||
|
!folio_test_lru(folio) ||
|
||||||
folio_test_swapcache(folio) || folio_test_unevictable(folio))
|
folio_test_swapcache(folio) || folio_test_unevictable(folio))
|
||||||
return;
|
return;
|
||||||
|
|
||||||
folio_batch_add_and_move(folio, lru_lazyfree, true);
|
folio_batch_add_and_move(folio, lru_lazyfree);
|
||||||
}
|
}
|
||||||
|
|
||||||
void lru_add_drain(void)
|
void lru_add_drain(void)
|
||||||
|
|
|
||||||
|
|
@ -2026,6 +2026,8 @@ static struct vmap_area *alloc_vmap_area(unsigned long size,
|
||||||
if (unlikely(!vmap_initialized))
|
if (unlikely(!vmap_initialized))
|
||||||
return ERR_PTR(-EBUSY);
|
return ERR_PTR(-EBUSY);
|
||||||
|
|
||||||
|
/* Only reclaim behaviour flags are relevant. */
|
||||||
|
gfp_mask = gfp_mask & GFP_RECLAIM_MASK;
|
||||||
might_sleep();
|
might_sleep();
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
|
@ -2038,8 +2040,6 @@ static struct vmap_area *alloc_vmap_area(unsigned long size,
|
||||||
*/
|
*/
|
||||||
va = node_alloc(size, align, vstart, vend, &addr, &vn_id);
|
va = node_alloc(size, align, vstart, vend, &addr, &vn_id);
|
||||||
if (!va) {
|
if (!va) {
|
||||||
gfp_mask = gfp_mask & GFP_RECLAIM_MASK;
|
|
||||||
|
|
||||||
va = kmem_cache_alloc_node(vmap_area_cachep, gfp_mask, node);
|
va = kmem_cache_alloc_node(vmap_area_cachep, gfp_mask, node);
|
||||||
if (unlikely(!va))
|
if (unlikely(!va))
|
||||||
return ERR_PTR(-ENOMEM);
|
return ERR_PTR(-ENOMEM);
|
||||||
|
|
@ -2089,7 +2089,7 @@ static struct vmap_area *alloc_vmap_area(unsigned long size,
|
||||||
BUG_ON(va->va_start < vstart);
|
BUG_ON(va->va_start < vstart);
|
||||||
BUG_ON(va->va_end > vend);
|
BUG_ON(va->va_end > vend);
|
||||||
|
|
||||||
ret = kasan_populate_vmalloc(addr, size);
|
ret = kasan_populate_vmalloc(addr, size, gfp_mask);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
free_vmap_area(va);
|
free_vmap_area(va);
|
||||||
return ERR_PTR(ret);
|
return ERR_PTR(ret);
|
||||||
|
|
@ -4845,7 +4845,7 @@ struct vm_struct **pcpu_get_vm_areas(const unsigned long *offsets,
|
||||||
|
|
||||||
/* populate the kasan shadow space */
|
/* populate the kasan shadow space */
|
||||||
for (area = 0; area < nr_vms; area++) {
|
for (area = 0; area < nr_vms; area++) {
|
||||||
if (kasan_populate_vmalloc(vas[area]->va_start, sizes[area]))
|
if (kasan_populate_vmalloc(vas[area]->va_start, sizes[area], GFP_KERNEL))
|
||||||
goto err_free_shadow;
|
goto err_free_shadow;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -4500,7 +4500,7 @@ static bool sort_folio(struct lruvec *lruvec, struct folio *folio, struct scan_c
|
||||||
}
|
}
|
||||||
|
|
||||||
/* ineligible */
|
/* ineligible */
|
||||||
if (!folio_test_lru(folio) || zone > sc->reclaim_idx) {
|
if (zone > sc->reclaim_idx) {
|
||||||
gen = folio_inc_gen(lruvec, folio, false);
|
gen = folio_inc_gen(lruvec, folio, false);
|
||||||
list_move_tail(&folio->lru, &lrugen->folios[gen][type][zone]);
|
list_move_tail(&folio->lru, &lrugen->folios[gen][type][zone]);
|
||||||
return true;
|
return true;
|
||||||
|
|
|
||||||
|
|
@ -208,6 +208,9 @@ static int damon_sample_mtier_enable_store(
|
||||||
if (enabled == is_enabled)
|
if (enabled == is_enabled)
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
|
if (!init_called)
|
||||||
|
return 0;
|
||||||
|
|
||||||
if (enabled) {
|
if (enabled) {
|
||||||
err = damon_sample_mtier_start();
|
err = damon_sample_mtier_start();
|
||||||
if (err)
|
if (err)
|
||||||
|
|
|
||||||
|
|
@ -137,6 +137,9 @@ static int damon_sample_prcl_enable_store(
|
||||||
if (enabled == is_enabled)
|
if (enabled == is_enabled)
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
|
if (!init_called)
|
||||||
|
return 0;
|
||||||
|
|
||||||
if (enabled) {
|
if (enabled) {
|
||||||
err = damon_sample_prcl_start();
|
err = damon_sample_prcl_start();
|
||||||
if (err)
|
if (err)
|
||||||
|
|
|
||||||
|
|
@ -118,6 +118,9 @@ static int damon_sample_wsse_enable_store(
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
if (enabled) {
|
if (enabled) {
|
||||||
|
if (!init_called)
|
||||||
|
return 0;
|
||||||
|
|
||||||
err = damon_sample_wsse_start();
|
err = damon_sample_wsse_start();
|
||||||
if (err)
|
if (err)
|
||||||
enabled = false;
|
enabled = false;
|
||||||
|
|
|
||||||
Loading…
Reference in New Issue