aboutsummaryrefslogtreecommitdiffstats
path: root/arch/sh/mm
diff options
context:
space:
mode:
authorMatthew Wilcox (Oracle) <willy@infradead.org>2025-08-05 18:22:51 +0100
committerAndrew Morton <akpm@linux-foundation.org>2025-09-13 16:55:07 -0700
commit53fbef56e07df822ea3029109ffca25328c2e5ac (patch)
tree8f68e237755cc342682e0515e5b691536859312e /arch/sh/mm
parentmm/damon/Kconfig: make DAMON_STAT_ENABLED_DEFAULT depend on DAMON_STAT (diff)
downloadlinux-53fbef56e07df822ea3029109ffca25328c2e5ac.tar.gz
linux-53fbef56e07df822ea3029109ffca25328c2e5ac.zip
mm: introduce memdesc_flags_t
Patch series "Add and use memdesc_flags_t". At some point struct page will be separated from struct slab and struct folio. This is a step towards that by introducing a type for the 'flags' word of all three structures. This gives us a certain amount of type safety by establishing that some of these unsigned longs are different from other unsigned longs in that they contain things like node ID, section number and zone number in the upper bits. That lets us have functions that can be easily called by anyone who has a slab, folio or page (but not easily by anyone else) to get the node or zone. There's going to be some unusual merge problems with this as some odd bits of the kernel decide they want to print out the flags value or something similar by writing page->flags and now they'll need to write page->flags.f instead. That's most of the churn here. Maybe we should be removing these things from the debug output? This patch (of 11): Wrap the unsigned long flags in a typedef. In upcoming patches, this will provide a strong hint that you can't just pass a random unsigned long to functions which take this as an argument. [willy@infradead.org: s/flags/flags.f/ in several architectures] Link: https://lkml.kernel.org/r/aKMgPRLD-WnkPxYm@casper.infradead.org [nicola.vetrini@gmail.com: mips: fix compilation error] Link: https://lore.kernel.org/lkml/CA+G9fYvkpmqGr6wjBNHY=dRp71PLCoi2341JxOudi60yqaeUdg@mail.gmail.com/ Link: https://lkml.kernel.org/r/20250825214245.1838158-1-nicola.vetrini@gmail.com Link: https://lkml.kernel.org/r/20250805172307.1302730-1-willy@infradead.org Link: https://lkml.kernel.org/r/20250805172307.1302730-2-willy@infradead.org Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org> Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org> Acked-by: Zi Yan <ziy@nvidia.com> Cc: Shakeel Butt <shakeel.butt@linux.dev> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Diffstat (limited to 'arch/sh/mm')
-rw-r--r--arch/sh/mm/cache-sh4.c2
-rw-r--r--arch/sh/mm/cache-sh7705.c2
-rw-r--r--arch/sh/mm/cache.c14
-rw-r--r--arch/sh/mm/kmap.c2
4 files changed, 10 insertions, 10 deletions
diff --git a/arch/sh/mm/cache-sh4.c b/arch/sh/mm/cache-sh4.c
index 46393b00137e..83fb34b39ca7 100644
--- a/arch/sh/mm/cache-sh4.c
+++ b/arch/sh/mm/cache-sh4.c
@@ -114,7 +114,7 @@ static void sh4_flush_dcache_folio(void *arg)
struct address_space *mapping = folio_flush_mapping(folio);
if (mapping && !mapping_mapped(mapping))
- clear_bit(PG_dcache_clean, &folio->flags);
+ clear_bit(PG_dcache_clean, &folio->flags.f);
else
#endif
{
diff --git a/arch/sh/mm/cache-sh7705.c b/arch/sh/mm/cache-sh7705.c
index b509a407588f..71f8be9fc8e0 100644
--- a/arch/sh/mm/cache-sh7705.c
+++ b/arch/sh/mm/cache-sh7705.c
@@ -138,7 +138,7 @@ static void sh7705_flush_dcache_folio(void *arg)
struct address_space *mapping = folio_flush_mapping(folio);
if (mapping && !mapping_mapped(mapping))
- clear_bit(PG_dcache_clean, &folio->flags);
+ clear_bit(PG_dcache_clean, &folio->flags.f);
else {
unsigned long pfn = folio_pfn(folio);
unsigned int i, nr = folio_nr_pages(folio);
diff --git a/arch/sh/mm/cache.c b/arch/sh/mm/cache.c
index 6ebdeaff3021..c3f028bed049 100644
--- a/arch/sh/mm/cache.c
+++ b/arch/sh/mm/cache.c
@@ -64,14 +64,14 @@ void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
struct folio *folio = page_folio(page);
if (boot_cpu_data.dcache.n_aliases && folio_mapped(folio) &&
- test_bit(PG_dcache_clean, &folio->flags)) {
+ test_bit(PG_dcache_clean, &folio->flags.f)) {
void *vto = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK);
memcpy(vto, src, len);
kunmap_coherent(vto);
} else {
memcpy(dst, src, len);
if (boot_cpu_data.dcache.n_aliases)
- clear_bit(PG_dcache_clean, &folio->flags);
+ clear_bit(PG_dcache_clean, &folio->flags.f);
}
if (vma->vm_flags & VM_EXEC)
@@ -85,14 +85,14 @@ void copy_from_user_page(struct vm_area_struct *vma, struct page *page,
struct folio *folio = page_folio(page);
if (boot_cpu_data.dcache.n_aliases && folio_mapped(folio) &&
- test_bit(PG_dcache_clean, &folio->flags)) {
+ test_bit(PG_dcache_clean, &folio->flags.f)) {
void *vfrom = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK);
memcpy(dst, vfrom, len);
kunmap_coherent(vfrom);
} else {
memcpy(dst, src, len);
if (boot_cpu_data.dcache.n_aliases)
- clear_bit(PG_dcache_clean, &folio->flags);
+ clear_bit(PG_dcache_clean, &folio->flags.f);
}
}
@@ -105,7 +105,7 @@ void copy_user_highpage(struct page *to, struct page *from,
vto = kmap_atomic(to);
if (boot_cpu_data.dcache.n_aliases && folio_mapped(src) &&
- test_bit(PG_dcache_clean, &src->flags)) {
+ test_bit(PG_dcache_clean, &src->flags.f)) {
vfrom = kmap_coherent(from, vaddr);
copy_page(vto, vfrom);
kunmap_coherent(vfrom);
@@ -148,7 +148,7 @@ void __update_cache(struct vm_area_struct *vma,
if (pfn_valid(pfn)) {
struct folio *folio = page_folio(pfn_to_page(pfn));
- int dirty = !test_and_set_bit(PG_dcache_clean, &folio->flags);
+ int dirty = !test_and_set_bit(PG_dcache_clean, &folio->flags.f);
if (dirty)
__flush_purge_region(folio_address(folio),
folio_size(folio));
@@ -162,7 +162,7 @@ void __flush_anon_page(struct page *page, unsigned long vmaddr)
if (pages_do_alias(addr, vmaddr)) {
if (boot_cpu_data.dcache.n_aliases && folio_mapped(folio) &&
- test_bit(PG_dcache_clean, &folio->flags)) {
+ test_bit(PG_dcache_clean, &folio->flags.f)) {
void *kaddr;
kaddr = kmap_coherent(page, vmaddr);
diff --git a/arch/sh/mm/kmap.c b/arch/sh/mm/kmap.c
index fa50e8f6e7a9..c9f32d5a54b8 100644
--- a/arch/sh/mm/kmap.c
+++ b/arch/sh/mm/kmap.c
@@ -31,7 +31,7 @@ void *kmap_coherent(struct page *page, unsigned long addr)
enum fixed_addresses idx;
unsigned long vaddr;
- BUG_ON(!test_bit(PG_dcache_clean, &folio->flags));
+ BUG_ON(!test_bit(PG_dcache_clean, &folio->flags.f));
preempt_disable();
pagefault_disable();