diff options
Diffstat (limited to 'lib/sanitizer_common/sanitizer_atomic_clang.h')
-rw-r--r-- | lib/sanitizer_common/sanitizer_atomic_clang.h | 71 |
1 files changed, 18 insertions, 53 deletions
diff --git a/lib/sanitizer_common/sanitizer_atomic_clang.h b/lib/sanitizer_common/sanitizer_atomic_clang.h index c5aa939b58c44..38363e8755606 100644 --- a/lib/sanitizer_common/sanitizer_atomic_clang.h +++ b/lib/sanitizer_common/sanitizer_atomic_clang.h @@ -15,8 +15,26 @@ #ifndef SANITIZER_ATOMIC_CLANG_H #define SANITIZER_ATOMIC_CLANG_H +#if defined(__i386__) || defined(__x86_64__) +# include "sanitizer_atomic_clang_x86.h" +#else +# include "sanitizer_atomic_clang_other.h" +#endif + namespace __sanitizer { +// We would like to just use compiler builtin atomic operations +// for loads and stores, but they are mostly broken in clang: +// - they lead to vastly inefficient code generation +// (http://llvm.org/bugs/show_bug.cgi?id=17281) +// - 64-bit atomic operations are not implemented on x86_32 +// (http://llvm.org/bugs/show_bug.cgi?id=15034) +// - they are not implemented on ARM +// error: undefined reference to '__atomic_load_4' + +// See http://www.cl.cam.ac.uk/~pes20/cpp/cpp0xmappings.html +// for mappings of the memory model to different processors. + INLINE void atomic_signal_fence(memory_order) { __asm__ __volatile__("" ::: "memory"); } @@ -25,59 +43,6 @@ INLINE void atomic_thread_fence(memory_order) { __sync_synchronize(); } -INLINE void proc_yield(int cnt) { - __asm__ __volatile__("" ::: "memory"); -#if defined(__i386__) || defined(__x86_64__) - for (int i = 0; i < cnt; i++) - __asm__ __volatile__("pause"); -#endif - __asm__ __volatile__("" ::: "memory"); -} - -template<typename T> -INLINE typename T::Type atomic_load( - const volatile T *a, memory_order mo) { - DCHECK(mo & (memory_order_relaxed | memory_order_consume - | memory_order_acquire | memory_order_seq_cst)); - DCHECK(!((uptr)a % sizeof(*a))); - typename T::Type v; - // FIXME: - // 64-bit atomic operations are not atomic on 32-bit platforms. - // The implementation lacks necessary memory fences on ARM/PPC. - // We would like to use compiler builtin atomic operations, - // but they are mostly broken: - // - they lead to vastly inefficient code generation - // (http://llvm.org/bugs/show_bug.cgi?id=17281) - // - 64-bit atomic operations are not implemented on x86_32 - // (http://llvm.org/bugs/show_bug.cgi?id=15034) - // - they are not implemented on ARM - // error: undefined reference to '__atomic_load_4' - if (mo == memory_order_relaxed) { - v = a->val_dont_use; - } else { - atomic_signal_fence(memory_order_seq_cst); - v = a->val_dont_use; - atomic_signal_fence(memory_order_seq_cst); - } - return v; -} - -template<typename T> -INLINE void atomic_store(volatile T *a, typename T::Type v, memory_order mo) { - DCHECK(mo & (memory_order_relaxed | memory_order_release - | memory_order_seq_cst)); - DCHECK(!((uptr)a % sizeof(*a))); - if (mo == memory_order_relaxed) { - a->val_dont_use = v; - } else { - atomic_signal_fence(memory_order_seq_cst); - a->val_dont_use = v; - atomic_signal_fence(memory_order_seq_cst); - } - if (mo == memory_order_seq_cst) - atomic_thread_fence(memory_order_seq_cst); -} - template<typename T> INLINE typename T::Type atomic_fetch_add(volatile T *a, typename T::Type v, memory_order mo) { |