From 339ff8995a8fdd33c5ef1ac492e627c22fee941c Mon Sep 17 00:00:00 2001 From: Travis Geiselbrecht Date: Thu, 9 May 2024 19:51:32 -0700 Subject: [PATCH] [arch][barriers] add default memory barriers for all of the architectures Most are pretty straightforward, but a few of the more esoteric architectures just defaults are implemented. --- arch/arm/include/arch/arch_ops.h | 1 + arch/m68k/include/arch/arch_ops.h | 2 +- arch/microblaze/include/arch/arch_ops.h | 9 +++++++++ arch/mips/include/arch/arch_ops.h | 10 +++++++++ arch/or1k/include/arch/arch_ops.h | 10 +++++++++ arch/x86/include/arch/arch_ops.h | 27 +++++++++++++++++++++++++ 6 files changed, 58 insertions(+), 1 deletion(-) diff --git a/arch/arm/include/arch/arch_ops.h b/arch/arm/include/arch/arch_ops.h index 73a21d23de..4a2c7f8b1e 100644 --- a/arch/arm/include/arch/arch_ops.h +++ b/arch/arm/include/arch/arch_ops.h @@ -210,6 +210,7 @@ static inline void arch_set_current_thread(struct thread *t) { #endif +// TODO: use less strong versions of these (dsb sy/ld/st) #define mb() DSB #define wmb() DSB #define rmb() DSB diff --git a/arch/m68k/include/arch/arch_ops.h b/arch/m68k/include/arch/arch_ops.h index 552f70d702..1e651d6cfe 100644 --- a/arch/m68k/include/arch/arch_ops.h +++ b/arch/m68k/include/arch/arch_ops.h @@ -47,7 +47,7 @@ static inline uint arch_curr_cpu_num(void) { return 0; } -// TODO: see if there's a proper (or required) memory barrier on 68k +// Default barriers for architectures that generally don't need them #define mb() CF #define wmb() CF #define rmb() CF diff --git a/arch/microblaze/include/arch/arch_ops.h b/arch/microblaze/include/arch/arch_ops.h index 8e2d85f8d5..ab22f5dee7 100644 --- a/arch/microblaze/include/arch/arch_ops.h +++ b/arch/microblaze/include/arch/arch_ops.h @@ -66,3 +66,12 @@ static inline uint arch_curr_cpu_num(void) { return 0; } +// Default barriers for architectures that generally don't need them +// TODO: do we need these for microblaze? +#define mb() CF +#define wmb() CF +#define rmb() CF +#define smp_mb() CF +#define smp_wmb() CF +#define smp_rmb() CF + diff --git a/arch/mips/include/arch/arch_ops.h b/arch/mips/include/arch/arch_ops.h index a20838a5a0..2e3fcb4e9b 100644 --- a/arch/mips/include/arch/arch_ops.h +++ b/arch/mips/include/arch/arch_ops.h @@ -57,3 +57,13 @@ static inline uint arch_curr_cpu_num(void) { return 0; } +// Default barriers for architectures that generally don't need them +// TODO: do we need these for mips? +#define mb() CF +#define wmb() CF +#define rmb() CF +#define smp_mb() CF +#define smp_wmb() CF +#define smp_rmb() CF + + diff --git a/arch/or1k/include/arch/arch_ops.h b/arch/or1k/include/arch/arch_ops.h index 32356c7b94..ff8f785fc1 100644 --- a/arch/or1k/include/arch/arch_ops.h +++ b/arch/or1k/include/arch/arch_ops.h @@ -82,4 +82,14 @@ static inline ulong arch_cycle_count(void) { return 0; } static inline uint arch_curr_cpu_num(void) { return 0; } + +// Default barriers for architectures that generally don't need them +// TODO: do we need these for or1k? +#define mb() CF +#define wmb() CF +#define rmb() CF +#define smp_mb() CF +#define smp_wmb() CF +#define smp_rmb() CF + #endif // !ASSEMBLY diff --git a/arch/x86/include/arch/arch_ops.h b/arch/x86/include/arch/arch_ops.h index 65f25b465f..b3092d6020 100644 --- a/arch/x86/include/arch/arch_ops.h +++ b/arch/x86/include/arch/arch_ops.h @@ -65,4 +65,31 @@ static inline uint arch_curr_cpu_num(void) { return 0; } +#if ARCH_X86_64 +// relies on SSE2 +#define mb() __asm__ volatile("mfence" : : : "memory") +#define rmb() __asm__ volatile("lfence" : : : "memory") +#define wmb() __asm__ volatile("sfence" : : : "memory") +#else +// Store to the top of the stack as a load/store barrier. Cannot +// rely on SS2 being intrinsically available for older i386 class hardware. +#define __storeload_barrier \ + __asm__ volatile("lock; addl $0, (%%esp)" : : : "memory", "cc") +#define mb() __storeload_barrier +#define rmb() __storeload_barrier +#define wmb() __storeload_barrier +#endif + +#ifdef WITH_SMP +// XXX probably too strict +#define smp_mb() mb +#define smp_rmb() rmb +#define smp_wmb() wmb +#else +#define smp_mb() CF +#define smp_wmb() CF +#define smp_rmb() CF +#endif + + #endif // !ASSEMBLY