aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kernel/cpu
diff options
context:
space:
mode:
authorXin Li (Intel) <xin@zytor.com>2025-04-27 02:20:17 -0700
committerIngo Molnar <mingo@kernel.org>2025-05-02 10:26:56 +0200
commit795ada52875fe61469f635f226d19a4cd733d1e8 (patch)
tree254898b9bd47e620c200935fec32f307409856cf /arch/x86/kernel/cpu
parentx86/msr: Rename rdpmcl() to rdpmc() (diff)
downloadlinux-795ada52875fe61469f635f226d19a4cd733d1e8.tar.gz
linux-795ada52875fe61469f635f226d19a4cd733d1e8.zip
x86/msr: Convert the rdpmc() macro to an __always_inline function
Functions offer type safety and better readability compared to macros. Additionally, always inline functions can match the performance of macros. Converting the rdpmc() macro into an always inline function is simple and straightforward, so just make the change. Moreover, the read result is now the returned value, further enhancing readability. Signed-off-by: Xin Li (Intel) <xin@zytor.com> Signed-off-by: Ingo Molnar <mingo@kernel.org> Acked-by: Dave Hansen <dave.hansen@linux.intel.com> Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org> Cc: Andy Lutomirski <luto@kernel.org> Cc: Brian Gerst <brgerst@gmail.com> Cc: Juergen Gross <jgross@suse.com> Cc: H. Peter Anvin <hpa@zytor.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Kees Cook <keescook@chromium.org> Cc: Josh Poimboeuf <jpoimboe@redhat.com> Cc: Uros Bizjak <ubizjak@gmail.com> Link: https://lore.kernel.org/r/20250427092027.1598740-6-xin@zytor.com
Diffstat (limited to 'arch/x86/kernel/cpu')
-rw-r--r--arch/x86/kernel/cpu/resctrl/pseudo_lock.c12
1 files changed, 6 insertions, 6 deletions
diff --git a/arch/x86/kernel/cpu/resctrl/pseudo_lock.c b/arch/x86/kernel/cpu/resctrl/pseudo_lock.c
index 15ff62d83bd8..61d762555a79 100644
--- a/arch/x86/kernel/cpu/resctrl/pseudo_lock.c
+++ b/arch/x86/kernel/cpu/resctrl/pseudo_lock.c
@@ -1019,8 +1019,8 @@ static int measure_residency_fn(struct perf_event_attr *miss_attr,
* used in L1 cache, second to capture accurate value that does not
* include cache misses incurred because of instruction loads.
*/
- rdpmc(hit_pmcnum, hits_before);
- rdpmc(miss_pmcnum, miss_before);
+ hits_before = rdpmc(hit_pmcnum);
+ miss_before = rdpmc(miss_pmcnum);
/*
* From SDM: Performing back-to-back fast reads are not guaranteed
* to be monotonic.
@@ -1028,8 +1028,8 @@ static int measure_residency_fn(struct perf_event_attr *miss_attr,
* before proceeding.
*/
rmb();
- rdpmc(hit_pmcnum, hits_before);
- rdpmc(miss_pmcnum, miss_before);
+ hits_before = rdpmc(hit_pmcnum);
+ miss_before = rdpmc(miss_pmcnum);
/*
* Use LFENCE to ensure all previous instructions are retired
* before proceeding.
@@ -1051,8 +1051,8 @@ static int measure_residency_fn(struct perf_event_attr *miss_attr,
* before proceeding.
*/
rmb();
- rdpmc(hit_pmcnum, hits_after);
- rdpmc(miss_pmcnum, miss_after);
+ hits_after = rdpmc(hit_pmcnum);
+ miss_after = rdpmc(miss_pmcnum);
/*
* Use LFENCE to ensure all previous instructions are retired
* before proceeding.