X-Git-Url: https://gerrit.opnfv.org/gerrit/gitweb?a=blobdiff_plain;f=kernel%2Farch%2Farm64%2Finclude%2Fasm%2Fspinlock.h;fp=kernel%2Farch%2Farm64%2Finclude%2Fasm%2Fspinlock.h;h=499e8de33a0004c42c170de2fc007d3224908ac3;hb=52f993b8e89487ec9ee15a7fb4979e0f09a45b27;hp=c85e96d174a5fbd4764adb748b9bc11c70479617;hpb=c189ccac5702322ed843fe17057035b7222a59b6;p=kvmfornfv.git diff --git a/kernel/arch/arm64/include/asm/spinlock.h b/kernel/arch/arm64/include/asm/spinlock.h index c85e96d17..499e8de33 100644 --- a/kernel/arch/arm64/include/asm/spinlock.h +++ b/kernel/arch/arm64/include/asm/spinlock.h @@ -312,4 +312,14 @@ static inline int arch_read_trylock(arch_rwlock_t *rw) #define arch_read_relax(lock) cpu_relax() #define arch_write_relax(lock) cpu_relax() +/* + * Accesses appearing in program order before a spin_lock() operation + * can be reordered with accesses inside the critical section, by virtue + * of arch_spin_lock being constructed using acquire semantics. + * + * In cases where this is problematic (e.g. try_to_wake_up), an + * smp_mb__before_spinlock() can restore the required ordering. + */ +#define smp_mb__before_spinlock() smp_mb() + #endif /* __ASM_SPINLOCK_H */