diff options
author | Mark Kettenis <kettenis@cvs.openbsd.org> | 2014-09-07 22:19:33 +0000 |
---|---|---|
committer | Mark Kettenis <kettenis@cvs.openbsd.org> | 2014-09-07 22:19:33 +0000 |
commit | 70a1a351e327b8a83c961c73cb501d51d6ab0ced (patch) | |
tree | 2df86fcce2f40e9a9f7d6844070bd6f10fcc6fc3 /sys/arch | |
parent | 5a22e23ae80976ef0f9684150e6f41847b102800 (diff) |
Implement membar(9) API for amd64.
ok dlg@, guenther@
Diffstat (limited to 'sys/arch')
-rw-r--r-- | sys/arch/amd64/include/atomic.h | 28 |
1 files changed, 27 insertions, 1 deletions
diff --git a/sys/arch/amd64/include/atomic.h b/sys/arch/amd64/include/atomic.h index 5d31c698e82..6c51ae56347 100644 --- a/sys/arch/amd64/include/atomic.h +++ b/sys/arch/amd64/include/atomic.h @@ -1,4 +1,4 @@ -/* $OpenBSD: atomic.h,v 1.14 2014/09/01 03:39:15 guenther Exp $ */ +/* $OpenBSD: atomic.h,v 1.15 2014/09/07 22:19:32 kettenis Exp $ */ /* $NetBSD: atomic.h,v 1.1 2003/04/26 18:39:37 fvdl Exp $ */ /* @@ -205,6 +205,32 @@ _atomic_sub_long_nv(volatile unsigned long *p, unsigned long v) } #define atomic_sub_long_nv(_p, _v) _atomic_sub_long_nv(_p, _v) +/* + * The AMD64 architecture is rather strongly ordered. When accessing + * normal write-back cachable memory, only reads may be reordered with + * older writes to different locations. There are a few instructions + * (clfush, non-temporal move instructions) that obey weaker ordering + * rules, but those instructions will only be used in (inline) + * assembly code where we can add the necessary fence instructions + * ourselves. + */ + +#define __membar(_f) do { __asm __volatile(_f ::: "memory"); } while (0) + +#ifdef MULTIPROCESSOR +#define membar_enter() __membar("mfence") +#define membar_exit() __membar("") +#define membar_producer() __membar("") +#define membar_consumer() __membar("") +#define membar_sync() __membar("mfence") +#else +#define membar_enter() __membar("") +#define membar_exit() __membar("") +#define membar_producer() __membar("") +#define membar_consumer() __membar("") +#define membar_sync() __membar("") +#endif + static __inline u_int64_t x86_atomic_testset_u64(volatile u_int64_t *ptr, u_int64_t val) { |