mirror of https://github.com/torvalds/linux.git
17 hotfixes. 12 are cc:stable and 14 are for MM.
There's a two-patch DAMON series from SeongJae Park which addresses a missed check and possible memory leak. Apart from that it's all singletons - please see the changelogs for details. -----BEGIN PGP SIGNATURE----- iHUEABYIAB0WIQTTMBEPP41GrTpTJgfdBJ7gKXxAjgUCaPkz/QAKCRDdBJ7gKXxA js6eAQCdnA10LouzzVdqA+HuYh206z8qE2KGsGKpUGDfJv40uAEA4ZbxYrMJmwhU MXFn7Czphh/NOfFFCnrDnOlAFH7MmQc= =hc/P -----END PGP SIGNATURE----- Merge tag 'mm-hotfixes-stable-2025-10-22-12-43' of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm Pull hotfixes from Andrew Morton: "17 hotfixes. 12 are cc:stable and 14 are for MM. There's a two-patch DAMON series from SeongJae Park which addresses a missed check and possible memory leak. Apart from that it's all singletons - please see the changelogs for details" * tag 'mm-hotfixes-stable-2025-10-22-12-43' of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm: csky: abiv2: adapt to new folio flags field mm/damon/core: use damos_commit_quota_goal() for new goal commit mm/damon/core: fix potential memory leak by cleaning ops_filter in damon_destroy_scheme hugetlbfs: move lock assertions after early returns in huge_pmd_unshare() vmw_balloon: indicate success when effectively deflating during migration mm/damon/core: fix list_add_tail() call on damon_call() mm/mremap: correctly account old mapping after MREMAP_DONTUNMAP remap mm: prevent poison consumption when splitting THP ocfs2: clear extent cache after moving/defragmenting extents mm: don't spin in add_stack_record when gfp flags don't allow dma-debug: don't report false positives with DMA_BOUNCE_UNALIGNED_KMALLOC mm/damon/sysfs: dealloc commit test ctx always mm/damon/sysfs: catch commit test ctx alloc failure hung_task: fix warnings caused by unaligned lock pointers
This commit is contained in:
commit
0f3ad9c610
|
|
@ -21,7 +21,7 @@ void update_mmu_cache_range(struct vm_fault *vmf, struct vm_area_struct *vma,
|
|||
|
||||
folio = page_folio(pfn_to_page(pfn));
|
||||
|
||||
if (test_and_set_bit(PG_dcache_clean, &folio->flags))
|
||||
if (test_and_set_bit(PG_dcache_clean, &folio->flags.f))
|
||||
return;
|
||||
|
||||
icache_inv_range(address, address + nr*PAGE_SIZE);
|
||||
|
|
|
|||
|
|
@ -20,8 +20,8 @@
|
|||
|
||||
static inline void flush_dcache_folio(struct folio *folio)
|
||||
{
|
||||
if (test_bit(PG_dcache_clean, &folio->flags))
|
||||
clear_bit(PG_dcache_clean, &folio->flags);
|
||||
if (test_bit(PG_dcache_clean, &folio->flags.f))
|
||||
clear_bit(PG_dcache_clean, &folio->flags.f);
|
||||
}
|
||||
#define flush_dcache_folio flush_dcache_folio
|
||||
|
||||
|
|
|
|||
|
|
@ -1737,7 +1737,7 @@ static int vmballoon_migratepage(struct balloon_dev_info *b_dev_info,
|
|||
{
|
||||
unsigned long status, flags;
|
||||
struct vmballoon *b;
|
||||
int ret;
|
||||
int ret = 0;
|
||||
|
||||
b = container_of(b_dev_info, struct vmballoon, b_dev_info);
|
||||
|
||||
|
|
@ -1796,17 +1796,15 @@ static int vmballoon_migratepage(struct balloon_dev_info *b_dev_info,
|
|||
* A failure happened. While we can deflate the page we just
|
||||
* inflated, this deflation can also encounter an error. Instead
|
||||
* we will decrease the size of the balloon to reflect the
|
||||
* change and report failure.
|
||||
* change.
|
||||
*/
|
||||
atomic64_dec(&b->size);
|
||||
ret = -EBUSY;
|
||||
} else {
|
||||
/*
|
||||
* Success. Take a reference for the page, and we will add it to
|
||||
* the list after acquiring the lock.
|
||||
*/
|
||||
get_page(newpage);
|
||||
ret = 0;
|
||||
}
|
||||
|
||||
/* Update the balloon list under the @pages_lock */
|
||||
|
|
@ -1817,7 +1815,7 @@ static int vmballoon_migratepage(struct balloon_dev_info *b_dev_info,
|
|||
* If we succeed just insert it to the list and update the statistics
|
||||
* under the lock.
|
||||
*/
|
||||
if (!ret) {
|
||||
if (status == VMW_BALLOON_SUCCESS) {
|
||||
balloon_page_insert(&b->b_dev_info, newpage);
|
||||
__count_vm_event(BALLOON_MIGRATE);
|
||||
}
|
||||
|
|
|
|||
|
|
@ -478,14 +478,6 @@ hugetlb_vmdelete_list(struct rb_root_cached *root, pgoff_t start, pgoff_t end,
|
|||
if (!hugetlb_vma_trylock_write(vma))
|
||||
continue;
|
||||
|
||||
/*
|
||||
* Skip VMAs without shareable locks. Per the design in commit
|
||||
* 40549ba8f8e0, these will be handled by remove_inode_hugepages()
|
||||
* called after this function with proper locking.
|
||||
*/
|
||||
if (!__vma_shareable_lock(vma))
|
||||
goto skip;
|
||||
|
||||
v_start = vma_offset_start(vma, start);
|
||||
v_end = vma_offset_end(vma, end);
|
||||
|
||||
|
|
@ -496,7 +488,6 @@ hugetlb_vmdelete_list(struct rb_root_cached *root, pgoff_t start, pgoff_t end,
|
|||
* vmas. Therefore, lock is not held when calling
|
||||
* unmap_hugepage_range for private vmas.
|
||||
*/
|
||||
skip:
|
||||
hugetlb_vma_unlock_write(vma);
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -867,6 +867,11 @@ static int __ocfs2_move_extents_range(struct buffer_head *di_bh,
|
|||
mlog_errno(ret);
|
||||
goto out;
|
||||
}
|
||||
/*
|
||||
* Invalidate extent cache after moving/defragging to prevent
|
||||
* stale cached data with outdated extent flags.
|
||||
*/
|
||||
ocfs2_extent_map_trunc(inode, cpos);
|
||||
|
||||
context->clusters_moved += alloc_size;
|
||||
next:
|
||||
|
|
|
|||
|
|
@ -20,6 +20,10 @@
|
|||
* always zero. So we can use these bits to encode the specific blocking
|
||||
* type.
|
||||
*
|
||||
* Note that on architectures where this is not guaranteed, or for any
|
||||
* unaligned lock, this tracking mechanism is silently skipped for that
|
||||
* lock.
|
||||
*
|
||||
* Type encoding:
|
||||
* 00 - Blocked on mutex (BLOCKER_TYPE_MUTEX)
|
||||
* 01 - Blocked on semaphore (BLOCKER_TYPE_SEM)
|
||||
|
|
@ -45,7 +49,7 @@ static inline void hung_task_set_blocker(void *lock, unsigned long type)
|
|||
* If the lock pointer matches the BLOCKER_TYPE_MASK, return
|
||||
* without writing anything.
|
||||
*/
|
||||
if (WARN_ON_ONCE(lock_ptr & BLOCKER_TYPE_MASK))
|
||||
if (lock_ptr & BLOCKER_TYPE_MASK)
|
||||
return;
|
||||
|
||||
WRITE_ONCE(current->blocker, lock_ptr | type);
|
||||
|
|
@ -53,8 +57,6 @@ static inline void hung_task_set_blocker(void *lock, unsigned long type)
|
|||
|
||||
static inline void hung_task_clear_blocker(void)
|
||||
{
|
||||
WARN_ON_ONCE(!READ_ONCE(current->blocker));
|
||||
|
||||
WRITE_ONCE(current->blocker, 0UL);
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -23,6 +23,7 @@
|
|||
#include <linux/ctype.h>
|
||||
#include <linux/list.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/swiotlb.h>
|
||||
#include <asm/sections.h>
|
||||
#include "debug.h"
|
||||
|
||||
|
|
@ -594,7 +595,9 @@ static void add_dma_entry(struct dma_debug_entry *entry, unsigned long attrs)
|
|||
if (rc == -ENOMEM) {
|
||||
pr_err_once("cacheline tracking ENOMEM, dma-debug disabled\n");
|
||||
global_disable = true;
|
||||
} else if (rc == -EEXIST && !(attrs & DMA_ATTR_SKIP_CPU_SYNC)) {
|
||||
} else if (rc == -EEXIST && !(attrs & DMA_ATTR_SKIP_CPU_SYNC) &&
|
||||
!(IS_ENABLED(CONFIG_DMA_BOUNCE_UNALIGNED_KMALLOC) &&
|
||||
is_swiotlb_active(entry->dev))) {
|
||||
err_printk(entry->dev, entry,
|
||||
"cacheline tracking EEXIST, overlapping mappings aren't supported\n");
|
||||
}
|
||||
|
|
|
|||
|
|
@ -452,6 +452,9 @@ void damon_destroy_scheme(struct damos *s)
|
|||
damos_for_each_filter_safe(f, next, s)
|
||||
damos_destroy_filter(f);
|
||||
|
||||
damos_for_each_ops_filter_safe(f, next, s)
|
||||
damos_destroy_filter(f);
|
||||
|
||||
kfree(s->migrate_dests.node_id_arr);
|
||||
kfree(s->migrate_dests.weight_arr);
|
||||
damon_del_scheme(s);
|
||||
|
|
@ -832,7 +835,7 @@ int damos_commit_quota_goals(struct damos_quota *dst, struct damos_quota *src)
|
|||
src_goal->metric, src_goal->target_value);
|
||||
if (!new_goal)
|
||||
return -ENOMEM;
|
||||
damos_commit_quota_goal_union(new_goal, src_goal);
|
||||
damos_commit_quota_goal(new_goal, src_goal);
|
||||
damos_add_quota_goal(dst, new_goal);
|
||||
}
|
||||
return 0;
|
||||
|
|
@ -1450,7 +1453,7 @@ int damon_call(struct damon_ctx *ctx, struct damon_call_control *control)
|
|||
INIT_LIST_HEAD(&control->list);
|
||||
|
||||
mutex_lock(&ctx->call_controls_lock);
|
||||
list_add_tail(&ctx->call_controls, &control->list);
|
||||
list_add_tail(&control->list, &ctx->call_controls);
|
||||
mutex_unlock(&ctx->call_controls_lock);
|
||||
if (!damon_is_running(ctx))
|
||||
return -EINVAL;
|
||||
|
|
|
|||
|
|
@ -1473,13 +1473,14 @@ static int damon_sysfs_commit_input(void *data)
|
|||
if (IS_ERR(param_ctx))
|
||||
return PTR_ERR(param_ctx);
|
||||
test_ctx = damon_new_ctx();
|
||||
if (!test_ctx)
|
||||
return -ENOMEM;
|
||||
err = damon_commit_ctx(test_ctx, param_ctx);
|
||||
if (err) {
|
||||
damon_destroy_ctx(test_ctx);
|
||||
if (err)
|
||||
goto out;
|
||||
}
|
||||
err = damon_commit_ctx(kdamond->damon_ctx, param_ctx);
|
||||
out:
|
||||
damon_destroy_ctx(test_ctx);
|
||||
damon_destroy_ctx(param_ctx);
|
||||
return err;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -4109,6 +4109,9 @@ static bool thp_underused(struct folio *folio)
|
|||
if (khugepaged_max_ptes_none == HPAGE_PMD_NR - 1)
|
||||
return false;
|
||||
|
||||
if (folio_contain_hwpoisoned_page(folio))
|
||||
return false;
|
||||
|
||||
for (i = 0; i < folio_nr_pages(folio); i++) {
|
||||
if (pages_identical(folio_page(folio, i), ZERO_PAGE(0))) {
|
||||
if (++num_zero_pages > khugepaged_max_ptes_none)
|
||||
|
|
|
|||
|
|
@ -7614,13 +7614,12 @@ int huge_pmd_unshare(struct mm_struct *mm, struct vm_area_struct *vma,
|
|||
p4d_t *p4d = p4d_offset(pgd, addr);
|
||||
pud_t *pud = pud_offset(p4d, addr);
|
||||
|
||||
i_mmap_assert_write_locked(vma->vm_file->f_mapping);
|
||||
hugetlb_vma_assert_locked(vma);
|
||||
if (sz != PMD_SIZE)
|
||||
return 0;
|
||||
if (!ptdesc_pmd_is_shared(virt_to_ptdesc(ptep)))
|
||||
return 0;
|
||||
|
||||
i_mmap_assert_write_locked(vma->vm_file->f_mapping);
|
||||
hugetlb_vma_assert_locked(vma);
|
||||
pud_clear(pud);
|
||||
/*
|
||||
* Once our caller drops the rmap lock, some other process might be
|
||||
|
|
|
|||
|
|
@ -301,8 +301,9 @@ static bool try_to_map_unused_to_zeropage(struct page_vma_mapped_walk *pvmw,
|
|||
struct page *page = folio_page(folio, idx);
|
||||
pte_t newpte;
|
||||
|
||||
if (PageCompound(page))
|
||||
if (PageCompound(page) || PageHWPoison(page))
|
||||
return false;
|
||||
|
||||
VM_BUG_ON_PAGE(!PageAnon(page), page);
|
||||
VM_BUG_ON_PAGE(!PageLocked(page), page);
|
||||
VM_BUG_ON_PAGE(pte_present(old_pte), page);
|
||||
|
|
|
|||
15
mm/mremap.c
15
mm/mremap.c
|
|
@ -1237,10 +1237,10 @@ static int copy_vma_and_data(struct vma_remap_struct *vrm,
|
|||
}
|
||||
|
||||
/*
|
||||
* Perform final tasks for MADV_DONTUNMAP operation, clearing mlock() and
|
||||
* account flags on remaining VMA by convention (it cannot be mlock()'d any
|
||||
* longer, as pages in range are no longer mapped), and removing anon_vma_chain
|
||||
* links from it (if the entire VMA was copied over).
|
||||
* Perform final tasks for MADV_DONTUNMAP operation, clearing mlock() flag on
|
||||
* remaining VMA by convention (it cannot be mlock()'d any longer, as pages in
|
||||
* range are no longer mapped), and removing anon_vma_chain links from it if the
|
||||
* entire VMA was copied over.
|
||||
*/
|
||||
static void dontunmap_complete(struct vma_remap_struct *vrm,
|
||||
struct vm_area_struct *new_vma)
|
||||
|
|
@ -1250,11 +1250,8 @@ static void dontunmap_complete(struct vma_remap_struct *vrm,
|
|||
unsigned long old_start = vrm->vma->vm_start;
|
||||
unsigned long old_end = vrm->vma->vm_end;
|
||||
|
||||
/*
|
||||
* We always clear VM_LOCKED[ONFAULT] | VM_ACCOUNT on the old
|
||||
* vma.
|
||||
*/
|
||||
vm_flags_clear(vrm->vma, VM_LOCKED_MASK | VM_ACCOUNT);
|
||||
/* We always clear VM_LOCKED[ONFAULT] on the old VMA. */
|
||||
vm_flags_clear(vrm->vma, VM_LOCKED_MASK);
|
||||
|
||||
/*
|
||||
* anon_vma links of the old vma is no longer needed after its page
|
||||
|
|
|
|||
|
|
@ -168,6 +168,9 @@ static void add_stack_record_to_list(struct stack_record *stack_record,
|
|||
unsigned long flags;
|
||||
struct stack *stack;
|
||||
|
||||
if (!gfpflags_allow_spinning(gfp_mask))
|
||||
return;
|
||||
|
||||
set_current_in_page_owner();
|
||||
stack = kmalloc(sizeof(*stack), gfp_nested_mask(gfp_mask));
|
||||
if (!stack) {
|
||||
|
|
|
|||
Loading…
Reference in New Issue