Skip to content

Commit 3cae1ca

Browse files
author
CKI KWF Bot
committed
Merge: RHEL 9 MM subsystem proactive follow up fixes
MR: https://gitlab.com/redhat/centos-stream/src/kernel/centos-stream-9/-/merge_requests/7543 Updating the RHEL 9 tree with mm follow up fixes to proactively prevent issues cropping up. JIRA: https://issues.redhat.com/browse/RHEL-104908 Backport of the following patches: 6db07bb8df55 kasan: use vmalloc_dump_obj() for vmalloc error reports 87bc4b5ce41a mm/damon/lru_sort: reset enabled when DAMON start failed 5abc4befc7e3 mm/damon/reclaim: reset enabled when DAMON start failed 6ddc9fb45b26 kasan: remove kasan_find_vm_area() to prevent possible deadlock 4eb5e19038c2 mm/migrate: fix do_pages_stat in compat mode ec62cdf3079b mm/hugetlb: don't crash when allocating a folio if there are no resv 3f9d1b876d09 mm/damon/sysfs-schemes: free old damon_sysfs_scheme_filter->memcg_path on write 662b5582abb3 mm: shmem: add missing shmem_unacct_size() in __shmem_file_setup() 634d7535a52f mm: pcp: increase pcp->free_count threshold to trigger free_high eca8dc5f08b4 mm: page_alloc: remove redundant READ_ONCE 2aca24130463 mm/gup: fix wrongly calculated returned value in fault_in_safe_writeable() 8bea129b6965 kernel/fork: only call untrack_pfn_clear() on VMAs duplicated for fork() c83bf8fca6e8 mm: (un)track_pfn_copy() fix + doc improvements 8510ab2669cb mm/gup: remove unnecessary check in memfd_pin_folios() 804040866398 mm: fix filemap_get_folios_contig returning batches of identical folios ee2dc58e5161 mm/memblock: repeat setting reserved region nid if array is doubled 4d83684333a2 mm/memblock: pass size instead of end to memblock_set_node() 2a0710282377 x86/mm/pat: Fix VM_PAT handling when fork() fails in copy_page_range() dd314f2d74f6 mm/hwpoison: do not send SIGBUS to processes with recovered clean pages a046fca559fa mm/damon/sysfs-schemes: avoid Wformat-security warning on damon_sysfs_access_pattern_add_range_dir() 07275590f7fe mm/mremap: correctly handle partial mremap() of VMA starting at 0 f1549cdcdbd2 mm: respect mmap hint address when aligning for THP 27d7f8024675 mm: multi-gen LRU: use {ptep,pmdp}_clear_young_notify() 092fc8165eae mm: multi-gen LRU: remove MM_LEAF_OLD and MM_NONLEAF_TOTAL stats 68e78c47542c mm: multi-gen LRU: ignore non-leaf pmd_young for force_scan=true 3c033ac7304d kmsan: remove an x86-specific #include from kmsan.h 775cd2e44130 kmsan: remove a useless assignment from kmsan_vmap_pages_range_noflush() 432d611e7ea5 mm: hugetlb_vmemmap: fix reference to nonexistent file Includes the following CVEs: CVE: CVE-2025-22090 CVE: CVE-2025-38258 CVE: CVE-2025-38510 Signed-off-by: Audra Mitchell <audra@redhat.com> Approved-by: Herton R. Krzesinski <herton@redhat.com> Approved-by: Rafael Aquini <raquini@redhat.com> Approved-by: Luiz Capitulino <luizcap@redhat.com> Approved-by: CKI KWF Bot <cki-ci-bot+kwf-gitlab-com@redhat.com> Merged-by: CKI GitLab Kmaint Pipeline Bot <26919896-cki-kmaint-pipeline-bot@users.noreply.gitlab.com>
2 parents 7b2e1fc + 61c98d2 commit 3cae1ca

File tree

24 files changed

+199
-185
lines changed

24 files changed

+199
-185
lines changed

arch/x86/mm/pat/memtype.c

Lines changed: 28 additions & 24 deletions
Original file line numberDiff line numberDiff line change
@@ -1000,29 +1000,42 @@ static int get_pat_info(struct vm_area_struct *vma, resource_size_t *paddr,
10001000
return -EINVAL;
10011001
}
10021002

1003-
/*
1004-
* track_pfn_copy is called when vma that is covering the pfnmap gets
1005-
* copied through copy_page_range().
1006-
*
1007-
* If the vma has a linear pfn mapping for the entire range, we get the prot
1008-
* from pte and reserve the entire vma range with single reserve_pfn_range call.
1009-
*/
1010-
int track_pfn_copy(struct vm_area_struct *vma)
1003+
int track_pfn_copy(struct vm_area_struct *dst_vma,
1004+
struct vm_area_struct *src_vma, unsigned long *pfn)
10111005
{
1006+
const unsigned long vma_size = src_vma->vm_end - src_vma->vm_start;
10121007
resource_size_t paddr;
1013-
unsigned long vma_size = vma->vm_end - vma->vm_start;
10141008
pgprot_t pgprot;
1009+
int rc;
10151010

1016-
if (vma->vm_flags & VM_PAT) {
1017-
if (get_pat_info(vma, &paddr, &pgprot))
1018-
return -EINVAL;
1019-
/* reserve the whole chunk covered by vma. */
1020-
return reserve_pfn_range(paddr, vma_size, &pgprot, 1);
1021-
}
1011+
if (!(src_vma->vm_flags & VM_PAT))
1012+
return 0;
1013+
1014+
/*
1015+
* Duplicate the PAT information for the dst VMA based on the src
1016+
* VMA.
1017+
*/
1018+
if (get_pat_info(src_vma, &paddr, &pgprot))
1019+
return -EINVAL;
1020+
rc = reserve_pfn_range(paddr, vma_size, &pgprot, 1);
1021+
if (rc)
1022+
return rc;
10221023

1024+
/* Reservation for the destination VMA succeeded. */
1025+
vm_flags_set(dst_vma, VM_PAT);
1026+
*pfn = PHYS_PFN(paddr);
10231027
return 0;
10241028
}
10251029

1030+
void untrack_pfn_copy(struct vm_area_struct *dst_vma, unsigned long pfn)
1031+
{
1032+
untrack_pfn(dst_vma, pfn, dst_vma->vm_end - dst_vma->vm_start, true);
1033+
/*
1034+
* Reservation was freed, any copied page tables will get cleaned
1035+
* up later, but without getting PAT involved again.
1036+
*/
1037+
}
1038+
10261039
/*
10271040
* prot is passed in as a parameter for the new mapping. If the vma has
10281041
* a linear pfn mapping for the entire range, or no vma is provided,
@@ -1111,15 +1124,6 @@ void untrack_pfn(struct vm_area_struct *vma, unsigned long pfn,
11111124
}
11121125
}
11131126

1114-
/*
1115-
* untrack_pfn_clear is called if the following situation fits:
1116-
*
1117-
* 1) while mremapping a pfnmap for a new region, with the old vma after
1118-
* its pfnmap page table has been removed. The new vma has a new pfnmap
1119-
* to the same pfn & cache type with VM_PAT set.
1120-
* 2) while duplicating vm area, the new vma fails to copy the pgtable from
1121-
* old vma.
1122-
*/
11231127
void untrack_pfn_clear(struct vm_area_struct *vma)
11241128
{
11251129
vm_flags_clear(vma, VM_PAT);

include/linux/mmzone.h

Lines changed: 3 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -458,9 +458,7 @@ struct lru_gen_folio {
458458

459459
enum {
460460
MM_LEAF_TOTAL, /* total leaf entries */
461-
MM_LEAF_OLD, /* old leaf entries */
462461
MM_LEAF_YOUNG, /* young leaf entries */
463-
MM_NONLEAF_TOTAL, /* total non-leaf entries */
464462
MM_NONLEAF_FOUND, /* non-leaf entries found in Bloom filters */
465463
MM_NONLEAF_ADDED, /* non-leaf entries added to Bloom filters */
466464
NR_MM_STATS
@@ -500,7 +498,7 @@ struct lru_gen_mm_walk {
500498
};
501499

502500
void lru_gen_init_lruvec(struct lruvec *lruvec);
503-
void lru_gen_look_around(struct page_vma_mapped_walk *pvmw);
501+
bool lru_gen_look_around(struct page_vma_mapped_walk *pvmw);
504502

505503
#ifdef CONFIG_MEMCG
506504

@@ -592,8 +590,9 @@ static inline void lru_gen_init_lruvec(struct lruvec *lruvec)
592590
{
593591
}
594592

595-
static inline void lru_gen_look_around(struct page_vma_mapped_walk *pvmw)
593+
static inline bool lru_gen_look_around(struct page_vma_mapped_walk *pvmw)
596594
{
595+
return false;
597596
}
598597

599598
#ifdef CONFIG_MEMCG

include/linux/pgtable.h

Lines changed: 25 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -1277,14 +1277,28 @@ static inline void track_pfn_insert(struct vm_area_struct *vma, pgprot_t *prot,
12771277
}
12781278

12791279
/*
1280-
* track_pfn_copy is called when vma that is covering the pfnmap gets
1281-
* copied through copy_page_range().
1280+
* track_pfn_copy is called when a VM_PFNMAP VMA is about to get the page
1281+
* tables copied during copy_page_range(). Will store the pfn to be
1282+
* passed to untrack_pfn_copy() only if there is something to be untracked.
1283+
* Callers should initialize the pfn to 0.
12821284
*/
1283-
static inline int track_pfn_copy(struct vm_area_struct *vma)
1285+
static inline int track_pfn_copy(struct vm_area_struct *dst_vma,
1286+
struct vm_area_struct *src_vma, unsigned long *pfn)
12841287
{
12851288
return 0;
12861289
}
12871290

1291+
/*
1292+
* untrack_pfn_copy is called when a VM_PFNMAP VMA failed to copy during
1293+
* copy_page_range(), but after track_pfn_copy() was already called. Can
1294+
* be called even if track_pfn_copy() did not actually track anything:
1295+
* handled internally.
1296+
*/
1297+
static inline void untrack_pfn_copy(struct vm_area_struct *dst_vma,
1298+
unsigned long pfn)
1299+
{
1300+
}
1301+
12881302
/*
12891303
* untrack_pfn is called while unmapping a pfnmap for a region.
12901304
* untrack can be called for a specific region indicated by pfn and size or
@@ -1297,8 +1311,10 @@ static inline void untrack_pfn(struct vm_area_struct *vma,
12971311
}
12981312

12991313
/*
1300-
* untrack_pfn_clear is called while mremapping a pfnmap for a new region
1301-
* or fails to copy pgtable during duplicate vm area.
1314+
* untrack_pfn_clear is called in the following cases on a VM_PFNMAP VMA:
1315+
*
1316+
* 1) During mremap() on the src VMA after the page tables were moved.
1317+
* 2) During fork() on the dst VMA, immediately after duplicating the src VMA.
13021318
*/
13031319
static inline void untrack_pfn_clear(struct vm_area_struct *vma)
13041320
{
@@ -1309,7 +1325,10 @@ extern int track_pfn_remap(struct vm_area_struct *vma, pgprot_t *prot,
13091325
unsigned long size);
13101326
extern void track_pfn_insert(struct vm_area_struct *vma, pgprot_t *prot,
13111327
pfn_t pfn);
1312-
extern int track_pfn_copy(struct vm_area_struct *vma);
1328+
extern int track_pfn_copy(struct vm_area_struct *dst_vma,
1329+
struct vm_area_struct *src_vma, unsigned long *pfn);
1330+
extern void untrack_pfn_copy(struct vm_area_struct *dst_vma,
1331+
unsigned long pfn);
13131332
extern void untrack_pfn(struct vm_area_struct *vma, unsigned long pfn,
13141333
unsigned long size, bool mm_wr_locked);
13151334
extern void untrack_pfn_clear(struct vm_area_struct *vma);

kernel/fork.c

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -711,6 +711,11 @@ static __latent_entropy int dup_mmap(struct mm_struct *mm,
711711
tmp = vm_area_dup(mpnt);
712712
if (!tmp)
713713
goto fail_nomem;
714+
715+
/* track_pfn_copy() will later take care of copying internal state. */
716+
if (unlikely(tmp->vm_flags & VM_PFNMAP))
717+
untrack_pfn_clear(tmp);
718+
714719
retval = vma_dup_policy(mpnt, tmp);
715720
if (retval)
716721
goto fail_nomem_policy;

mm/damon/lru_sort.c

Lines changed: 4 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -335,7 +335,7 @@ static int __init damon_lru_sort_init(void)
335335
int err = damon_modules_new_paddr_ctx_target(&ctx, &target);
336336

337337
if (err)
338-
return err;
338+
goto out;
339339

340340
ctx->callback.after_wmarks_check = damon_lru_sort_after_wmarks_check;
341341
ctx->callback.after_aggregation = damon_lru_sort_after_aggregation;
@@ -344,6 +344,9 @@ static int __init damon_lru_sort_init(void)
344344
if (enabled)
345345
err = damon_lru_sort_turn(true);
346346

347+
out:
348+
if (err && enabled)
349+
enabled = false;
347350
return err;
348351
}
349352

mm/damon/reclaim.c

Lines changed: 4 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -285,7 +285,7 @@ static int __init damon_reclaim_init(void)
285285
int err = damon_modules_new_paddr_ctx_target(&ctx, &target);
286286

287287
if (err)
288-
return err;
288+
goto out;
289289

290290
ctx->callback.after_wmarks_check = damon_reclaim_after_wmarks_check;
291291
ctx->callback.after_aggregation = damon_reclaim_after_aggregation;
@@ -294,6 +294,9 @@ static int __init damon_reclaim_init(void)
294294
if (enabled)
295295
err = damon_reclaim_turn(true);
296296

297+
out:
298+
if (err && enabled)
299+
enabled = false;
297300
return err;
298301
}
299302

mm/damon/sysfs-schemes.c

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -376,6 +376,7 @@ static ssize_t memcg_path_store(struct kobject *kobj,
376376
return -ENOMEM;
377377

378378
strscpy(path, buf, count + 1);
379+
kfree(filter->memcg_path);
379380
filter->memcg_path = path;
380381
return count;
381382
}
@@ -1051,7 +1052,7 @@ static int damon_sysfs_access_pattern_add_range_dir(
10511052
if (!range)
10521053
return -ENOMEM;
10531054
err = kobject_init_and_add(&range->kobj, &damon_sysfs_ul_range_ktype,
1054-
&access_pattern->kobj, name);
1055+
&access_pattern->kobj, "%s", name);
10551056
if (err)
10561057
kobject_put(&range->kobj);
10571058
else

mm/filemap.c

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -2205,6 +2205,7 @@ unsigned filemap_get_folios_contig(struct address_space *mapping,
22052205
*start = folio->index + nr;
22062206
goto out;
22072207
}
2208+
xas_advance(&xas, folio_next_index(folio) - 1);
22082209
continue;
22092210
put_folio:
22102211
folio_put(folio);

mm/gup.c

Lines changed: 3 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -1956,8 +1956,8 @@ size_t fault_in_safe_writeable(const char __user *uaddr, size_t size)
19561956
} while (start != end);
19571957
mmap_read_unlock(mm);
19581958

1959-
if (size > (unsigned long)uaddr - start)
1960-
return size - ((unsigned long)uaddr - start);
1959+
if (size > start - (unsigned long)uaddr)
1960+
return size - (start - (unsigned long)uaddr);
19611961
return 0;
19621962
}
19631963
EXPORT_SYMBOL(fault_in_safe_writeable);
@@ -3614,7 +3614,7 @@ long memfd_pin_folios(struct file *memfd, loff_t start, loff_t end,
36143614
{
36153615
unsigned int flags, nr_folios, nr_found;
36163616
unsigned int i, pgshift = PAGE_SHIFT;
3617-
pgoff_t start_idx, end_idx, next_idx;
3617+
pgoff_t start_idx, end_idx;
36183618
struct folio *folio = NULL;
36193619
struct folio_batch fbatch;
36203620
struct hstate *h;
@@ -3664,19 +3664,7 @@ long memfd_pin_folios(struct file *memfd, loff_t start, loff_t end,
36643664
folio = NULL;
36653665
}
36663666

3667-
next_idx = 0;
36683667
for (i = 0; i < nr_found; i++) {
3669-
/*
3670-
* As there can be multiple entries for a
3671-
* given folio in the batch returned by
3672-
* filemap_get_folios_contig(), the below
3673-
* check is to ensure that we pin and return a
3674-
* unique set of folios between start and end.
3675-
*/
3676-
if (next_idx &&
3677-
next_idx != folio_index(fbatch.folios[i]))
3678-
continue;
3679-
36803668
folio = page_folio(&fbatch.folios[i]->page);
36813669

36823670
if (try_grab_folio(folio, 1, FOLL_PIN)) {
@@ -3689,7 +3677,6 @@ long memfd_pin_folios(struct file *memfd, loff_t start, loff_t end,
36893677
*offset = offset_in_folio(folio, start);
36903678

36913679
folios[nr_folios] = folio;
3692-
next_idx = folio_next_index(folio);
36933680
if (++nr_folios == max_folios)
36943681
break;
36953682
}

mm/hugetlb.c

Lines changed: 6 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -2481,12 +2481,15 @@ struct folio *alloc_hugetlb_folio_reserve(struct hstate *h, int preferred_nid,
24812481
struct folio *folio;
24822482

24832483
spin_lock_irq(&hugetlb_lock);
2484+
if (!h->resv_huge_pages) {
2485+
spin_unlock_irq(&hugetlb_lock);
2486+
return NULL;
2487+
}
2488+
24842489
folio = dequeue_hugetlb_folio_nodemask(h, gfp_mask, preferred_nid,
24852490
nmask);
2486-
if (folio) {
2487-
VM_BUG_ON(!h->resv_huge_pages);
2491+
if (folio)
24882492
h->resv_huge_pages--;
2489-
}
24902493

24912494
spin_unlock_irq(&hugetlb_lock);
24922495
return folio;

0 commit comments

Comments
 (0)