diff options
| author | David Hildenbrand (Arm) <david@kernel.org> | 2026-02-27 21:08:44 +0100 |
|---|---|---|
| committer | Andrew Morton <akpm@linux-foundation.org> | 2026-04-05 13:53:14 -0700 |
| commit | 784a742e7b6db236ef1ccfcdbfe29bf6234b3ccb (patch) | |
| tree | 2fa38323dbe0a1f78e630fc0f9f6e4534c111254 | |
| parent | 32bc7fe4a6f4d359b6de96cbc106d2cac695154e (diff) | |
| download | linux-784a742e7b6db236ef1ccfcdbfe29bf6234b3ccb.tar.gz linux-784a742e7b6db236ef1ccfcdbfe29bf6234b3ccb.zip | |
mm: rename zap_page_range_single_batched() to zap_vma_range_batched()
Let's make the naming more consistent with our new naming scheme.
While at it, polish the kerneldoc a bit.
Link: https://lkml.kernel.org/r/20260227200848.114019-14-david@kernel.org
Signed-off-by: David Hildenbrand (Arm) <david@kernel.org>
Reviewed-by: Lorenzo Stoakes (Oracle) <ljs@kernel.org>
Cc: Alexander Gordeev <agordeev@linux.ibm.com>
Cc: Alexei Starovoitov <ast@kernel.org>
Cc: Alice Ryhl <aliceryhl@google.com>
Cc: Andrii Nakryiko <andrii@kernel.org>
Cc: Andy Lutomirski <luto@kernel.org>
Cc: Arnaldo Carvalho de Melo <acme@kernel.org>
Cc: Arnd Bergmann <arnd@arndb.de>
Cc: Arve <arve@android.com>
Cc: "Borislav Petkov (AMD)" <bp@alien8.de>
Cc: Carlos Llamas <cmllamas@google.com>
Cc: Christian Borntraeger <borntraeger@linux.ibm.com>
Cc: Christian Brauner <brauner@kernel.org>
Cc: Claudio Imbrenda <imbrenda@linux.ibm.com>
Cc: Daniel Borkman <daniel@iogearbox.net>
Cc: Dave Airlie <airlied@gmail.com>
Cc: David Ahern <dsahern@kernel.org>
Cc: David Rientjes <rientjes@google.com>
Cc: David S. Miller <davem@davemloft.net>
Cc: Dimitri Sivanich <dimitri.sivanich@hpe.com>
Cc: Eric Dumazet <edumazet@google.com>
Cc: Gerald Schaefer <gerald.schaefer@linux.ibm.com>
Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Cc: Hartley Sweeten <hsweeten@visionengravers.com>
Cc: Heiko Carstens <hca@linux.ibm.com>
Cc: Ian Abbott <abbotti@mev.co.uk>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Jakub Kacinski <kuba@kernel.org>
Cc: Jani Nikula <jani.nikula@linux.intel.com>
Cc: Jann Horn <jannh@google.com>
Cc: Janosch Frank <frankja@linux.ibm.com>
Cc: Jarkko Sakkinen <jarkko@kernel.org>
Cc: Jason Gunthorpe <jgg@ziepe.ca>
Cc: Jonas Lahtinen <joonas.lahtinen@linux.intel.com>
Cc: Leon Romanovsky <leon@kernel.org>
Cc: Liam Howlett <liam.howlett@oracle.com>
Cc: Madhavan Srinivasan <maddy@linux.ibm.com>
Cc: Matthew Wilcox (Oracle) <willy@infradead.org>
Cc: Michael Ellerman <mpe@ellerman.id.au>
Cc: Michal Hocko <mhocko@suse.com>
Cc: Miguel Ojeda <ojeda@kernel.org>
Cc: Mike Rapoport <rppt@kernel.org>
Cc: Namhyung kim <namhyung@kernel.org>
Cc: Neal Cardwell <ncardwell@google.com>
Cc: Paolo Abeni <pabeni@redhat.com>
Cc: Pedro Falcato <pfalcato@suse.de>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Rodrigo Vivi <rodrigo.vivi@intel.com>
Cc: Shakeel Butt <shakeel.butt@linux.dev>
Cc: Suren Baghdasaryan <surenb@google.com>
Cc: Todd Kjos <tkjos@android.com>
Cc: Tvrtko Ursulin <tursulin@ursulin.net>
Cc: Vasily Gorbik <gor@linux.ibm.com>
Cc: Vincenzo Frascino <vincenzo.frascino@arm.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
| -rw-r--r-- | mm/internal.h | 2 | ||||
| -rw-r--r-- | mm/madvise.c | 5 | ||||
| -rw-r--r-- | mm/memory.c | 23 |
3 files changed, 16 insertions, 14 deletions
diff --git a/mm/internal.h b/mm/internal.h index b0ac179d3a5d..6e1162e13289 100644 --- a/mm/internal.h +++ b/mm/internal.h @@ -536,7 +536,7 @@ static inline void sync_with_folio_pmd_zap(struct mm_struct *mm, pmd_t *pmdp) } struct zap_details; -void zap_page_range_single_batched(struct mmu_gather *tlb, +void zap_vma_range_batched(struct mmu_gather *tlb, struct vm_area_struct *vma, unsigned long addr, unsigned long size, struct zap_details *details); int zap_vma_for_reaping(struct vm_area_struct *vma); diff --git a/mm/madvise.c b/mm/madvise.c index e86228682842..a50ec5f90e3e 100644 --- a/mm/madvise.c +++ b/mm/madvise.c @@ -855,9 +855,8 @@ static long madvise_dontneed_single_vma(struct madvise_behavior *madv_behavior) .reclaim_pt = true, }; - zap_page_range_single_batched( - madv_behavior->tlb, madv_behavior->vma, range->start, - range->end - range->start, &details); + zap_vma_range_batched(madv_behavior->tlb, madv_behavior->vma, + range->start, range->end - range->start, &details); return 0; } diff --git a/mm/memory.c b/mm/memory.c index 8c77a765036f..879858e466ef 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -2167,17 +2167,20 @@ void unmap_vmas(struct mmu_gather *tlb, struct unmap_desc *unmap) } /** - * zap_page_range_single_batched - remove user pages in a given range + * zap_vma_range_batched - zap page table entries in a vma range * @tlb: pointer to the caller's struct mmu_gather - * @vma: vm_area_struct holding the applicable pages - * @address: starting address of pages to remove - * @size: number of bytes to remove - * @details: details of shared cache invalidation + * @vma: the vma covering the range to zap + * @address: starting address of the range to zap + * @size: number of bytes to zap + * @details: details specifying zapping behavior + * + * @tlb must not be NULL. The provided address range must be fully + * contained within @vma. If @vma is for hugetlb, @tlb is flushed and + * re-initialized by this function. * - * @tlb shouldn't be NULL. The range must fit into one VMA. If @vma is for - * hugetlb, @tlb is flushed and re-initialized by this function. + * If @details is NULL, this function will zap all page table entries. */ -void zap_page_range_single_batched(struct mmu_gather *tlb, +void zap_vma_range_batched(struct mmu_gather *tlb, struct vm_area_struct *vma, unsigned long address, unsigned long size, struct zap_details *details) { @@ -2225,7 +2228,7 @@ void zap_page_range_single(struct vm_area_struct *vma, unsigned long address, struct mmu_gather tlb; tlb_gather_mmu(&tlb, vma->vm_mm); - zap_page_range_single_batched(&tlb, vma, address, size, NULL); + zap_vma_range_batched(&tlb, vma, address, size, NULL); tlb_finish_mmu(&tlb); } @@ -4251,7 +4254,7 @@ static inline void unmap_mapping_range_tree(struct rb_root_cached *root, size = (end_idx - start_idx) << PAGE_SHIFT; tlb_gather_mmu(&tlb, vma->vm_mm); - zap_page_range_single_batched(&tlb, vma, start, size, details); + zap_vma_range_batched(&tlb, vma, start, size, details); tlb_finish_mmu(&tlb); } } |
