From 25352d2f2dc630b33cb61228dbdf7a0a2d6a4117 Mon Sep 17 00:00:00 2001 From: Shakeel Butt Date: Sun, 18 May 2025 23:31:38 -0700 Subject: memcg: disable kmem charging in nmi for unsupported arch Patch series "memcg: nmi-safe kmem charging", v4. Users can attached their BPF programs at arbitrary execution points in the kernel and such BPF programs may run in nmi context. In addition, these programs can trigger memcg charged kernel allocations in the nmi context. However memcg charging infra for kernel memory is not equipped to handle nmi context for all architectures. This series removes the hurdles to enable kmem charging in the nmi context for most of the archs. For archs without CONFIG_HAVE_NMI, this series is a noop. For archs with NMI support and have CONFIG_ARCH_HAS_NMI_SAFE_THIS_CPU_OPS, the previous work to make memcg stats re-entrant is sufficient for allowing kmem charging in nmi context. For archs with NMI support but without CONFIG_ARCH_HAS_NMI_SAFE_THIS_CPU_OPS and with ARCH_HAVE_NMI_SAFE_CMPXCHG, this series added infra to support kmem charging in nmi context. Lastly those archs with NMI support but without CONFIG_ARCH_HAS_NMI_SAFE_THIS_CPU_OPS and ARCH_HAVE_NMI_SAFE_CMPXCHG, kmem charging in nmi context is not supported at all. Mostly used archs have support for CONFIG_ARCH_HAS_NMI_SAFE_THIS_CPU_OPS and this series should be almost a noop (other than making memcg_rstat_updated nmi safe) for such archs. This patch (of 5): The memcg accounting and stats uses this_cpu* and atomic* ops. There are archs which define CONFIG_HAVE_NMI but does not define CONFIG_ARCH_HAS_NMI_SAFE_THIS_CPU_OPS and ARCH_HAVE_NMI_SAFE_CMPXCHG, so memcg accounting for such archs in nmi context is not possible to support. Let's just disable memcg accounting in nmi context for such archs. Link: https://lkml.kernel.org/r/20250519063142.111219-1-shakeel.butt@linux.dev Link: https://lkml.kernel.org/r/20250519063142.111219-2-shakeel.butt@linux.dev Signed-off-by: Shakeel Butt Acked-by: Vlastimil Babka Cc: Alexei Starovoitov Cc: Johannes Weiner Cc: Mathieu Desnoyers Cc: Michal Hocko Cc: Muchun Song Cc: Peter Zijlstra Cc: Roman Gushchin Cc: Sebastian Andrzej Siewior Cc: Tejun Heo Signed-off-by: Andrew Morton --- init/Kconfig | 7 +++++++ 1 file changed, 7 insertions(+) (limited to 'init') diff --git a/init/Kconfig b/init/Kconfig index 4cdd1049283c..a2aa49cfb8bd 100644 --- a/init/Kconfig +++ b/init/Kconfig @@ -1006,6 +1006,13 @@ config MEMCG help Provides control over the memory footprint of tasks in a cgroup. +config MEMCG_NMI_UNSAFE + bool + depends on MEMCG + depends on HAVE_NMI + depends on !ARCH_HAS_NMI_SAFE_THIS_CPU_OPS && !ARCH_HAVE_NMI_SAFE_CMPXCHG + default y + config MEMCG_V1 bool "Legacy cgroup v1 memory controller" depends on MEMCG -- cgit v1.2.3 From 940b01fc8dc1aead398819215650727cb9e7335e Mon Sep 17 00:00:00 2001 From: Shakeel Butt Date: Sun, 18 May 2025 23:31:39 -0700 Subject: memcg: nmi safe memcg stats for specific archs There are archs which have NMI but does not support this_cpu_* ops safely in the nmi context but they support safe atomic ops in nmi context. For such archs, let's add infra to use atomic ops for the memcg stats which can be updated in nmi. At the moment, the memcg stats which get updated in the objcg charging path are MEMCG_KMEM, NR_SLAB_RECLAIMABLE_B & NR_SLAB_UNRECLAIMABLE_B. Rather than adding support for all memcg stats to be nmi safe, let's just add infra to make these three stats nmi safe which this patch is doing. Link: https://lkml.kernel.org/r/20250519063142.111219-3-shakeel.butt@linux.dev Signed-off-by: Shakeel Butt Acked-by: Vlastimil Babka Cc: Alexei Starovoitov Cc: Johannes Weiner Cc: Mathieu Desnoyers Cc: Michal Hocko Cc: Muchun Song Cc: Peter Zijlstra Cc: Roman Gushchin Cc: Sebastian Andrzej Siewior Cc: Tejun Heo Signed-off-by: Andrew Morton --- include/linux/memcontrol.h | 10 ++++++++++ init/Kconfig | 7 +++++++ mm/memcontrol.c | 49 ++++++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 66 insertions(+) (limited to 'init') diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h index f7848f73f41c..87b6688f124a 100644 --- a/include/linux/memcontrol.h +++ b/include/linux/memcontrol.h @@ -113,6 +113,12 @@ struct mem_cgroup_per_node { CACHELINE_PADDING(_pad2_); unsigned long lru_zone_size[MAX_NR_ZONES][NR_LRU_LISTS]; struct mem_cgroup_reclaim_iter iter; + +#ifdef CONFIG_MEMCG_NMI_SAFETY_REQUIRES_ATOMIC + /* slab stats for nmi context */ + atomic_t slab_reclaimable; + atomic_t slab_unreclaimable; +#endif }; struct mem_cgroup_threshold { @@ -236,6 +242,10 @@ struct mem_cgroup { atomic_long_t memory_events[MEMCG_NR_MEMORY_EVENTS]; atomic_long_t memory_events_local[MEMCG_NR_MEMORY_EVENTS]; +#ifdef CONFIG_MEMCG_NMI_SAFETY_REQUIRES_ATOMIC + /* MEMCG_KMEM for nmi context */ + atomic_t kmem_stat; +#endif /* * Hint of reclaim pressure for socket memroy management. Note * that this indicator should NOT be used in legacy cgroup mode diff --git a/init/Kconfig b/init/Kconfig index a2aa49cfb8bd..e9f47baa34e0 100644 --- a/init/Kconfig +++ b/init/Kconfig @@ -1013,6 +1013,13 @@ config MEMCG_NMI_UNSAFE depends on !ARCH_HAS_NMI_SAFE_THIS_CPU_OPS && !ARCH_HAVE_NMI_SAFE_CMPXCHG default y +config MEMCG_NMI_SAFETY_REQUIRES_ATOMIC + bool + depends on MEMCG + depends on HAVE_NMI + depends on !ARCH_HAS_NMI_SAFE_THIS_CPU_OPS && ARCH_HAVE_NMI_SAFE_CMPXCHG + default y + config MEMCG_V1 bool "Legacy cgroup v1 memory controller" depends on MEMCG diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 2532cc2316ee..e8e8becbe926 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -3966,6 +3966,53 @@ static void mem_cgroup_stat_aggregate(struct aggregate_control *ac) } } +#ifdef CONFIG_MEMCG_NMI_SAFETY_REQUIRES_ATOMIC +static void flush_nmi_stats(struct mem_cgroup *memcg, struct mem_cgroup *parent, + int cpu) +{ + int nid; + + if (atomic_read(&memcg->kmem_stat)) { + int kmem = atomic_xchg(&memcg->kmem_stat, 0); + int index = memcg_stats_index(MEMCG_KMEM); + + memcg->vmstats->state[index] += kmem; + if (parent) + parent->vmstats->state_pending[index] += kmem; + } + + for_each_node_state(nid, N_MEMORY) { + struct mem_cgroup_per_node *pn = memcg->nodeinfo[nid]; + struct lruvec_stats *lstats = pn->lruvec_stats; + struct lruvec_stats *plstats = NULL; + + if (parent) + plstats = parent->nodeinfo[nid]->lruvec_stats; + + if (atomic_read(&pn->slab_reclaimable)) { + int slab = atomic_xchg(&pn->slab_reclaimable, 0); + int index = memcg_stats_index(NR_SLAB_RECLAIMABLE_B); + + lstats->state[index] += slab; + if (plstats) + plstats->state_pending[index] += slab; + } + if (atomic_read(&pn->slab_unreclaimable)) { + int slab = atomic_xchg(&pn->slab_unreclaimable, 0); + int index = memcg_stats_index(NR_SLAB_UNRECLAIMABLE_B); + + lstats->state[index] += slab; + if (plstats) + plstats->state_pending[index] += slab; + } + } +} +#else +static void flush_nmi_stats(struct mem_cgroup *memcg, struct mem_cgroup *parent, + int cpu) +{} +#endif + static void mem_cgroup_css_rstat_flush(struct cgroup_subsys_state *css, int cpu) { struct mem_cgroup *memcg = mem_cgroup_from_css(css); @@ -3974,6 +4021,8 @@ static void mem_cgroup_css_rstat_flush(struct cgroup_subsys_state *css, int cpu) struct aggregate_control ac; int nid; + flush_nmi_stats(memcg, parent, cpu); + statc = per_cpu_ptr(memcg->vmstats_percpu, cpu); ac = (struct aggregate_control) { -- cgit v1.2.3