diff options
Diffstat (limited to 'contrib/llvm-project/libcxx/include/__atomic/cxx_atomic_impl.h')
| -rw-r--r-- | contrib/llvm-project/libcxx/include/__atomic/cxx_atomic_impl.h | 57 |
1 files changed, 51 insertions, 6 deletions
diff --git a/contrib/llvm-project/libcxx/include/__atomic/cxx_atomic_impl.h b/contrib/llvm-project/libcxx/include/__atomic/cxx_atomic_impl.h index 167cee7f0bde..5d724669fee8 100644 --- a/contrib/llvm-project/libcxx/include/__atomic/cxx_atomic_impl.h +++ b/contrib/llvm-project/libcxx/include/__atomic/cxx_atomic_impl.h @@ -32,14 +32,12 @@ _LIBCPP_BEGIN_NAMESPACE_STD // [atomics.types.generic]p1 guarantees _Tp is trivially copyable. Because // the default operator= in an object is not volatile, a byte-by-byte copy // is required. -template <typename _Tp, typename _Tv> _LIBCPP_HIDE_FROM_ABI -typename enable_if<is_assignable<_Tp&, _Tv>::value>::type -__cxx_atomic_assign_volatile(_Tp& __a_value, _Tv const& __val) { +template <typename _Tp, typename _Tv, __enable_if_t<is_assignable<_Tp&, _Tv>::value, int> = 0> _LIBCPP_HIDE_FROM_ABI +void __cxx_atomic_assign_volatile(_Tp& __a_value, _Tv const& __val) { __a_value = __val; } -template <typename _Tp, typename _Tv> _LIBCPP_HIDE_FROM_ABI -typename enable_if<is_assignable<_Tp&, _Tv>::value>::type -__cxx_atomic_assign_volatile(_Tp volatile& __a_value, _Tv volatile const& __val) { +template <typename _Tp, typename _Tv, __enable_if_t<is_assignable<_Tp&, _Tv>::value, int> = 0> _LIBCPP_HIDE_FROM_ABI +void __cxx_atomic_assign_volatile(_Tp volatile& __a_value, _Tv volatile const& __val) { volatile char* __to = reinterpret_cast<volatile char*>(std::addressof(__a_value)); volatile char* __end = __to + sizeof(_Tp); volatile const char* __from = reinterpret_cast<volatile const char*>(std::addressof(__val)); @@ -131,6 +129,18 @@ _Tp __cxx_atomic_load(const volatile __cxx_atomic_base_impl<_Tp>* __a, } template <typename _Tp> +_LIBCPP_HIDE_FROM_ABI void +__cxx_atomic_load_inplace(const volatile __cxx_atomic_base_impl<_Tp>* __a, _Tp* __dst, memory_order __order) { + __atomic_load(std::addressof(__a->__a_value), __dst, __to_gcc_order(__order)); +} + +template <typename _Tp> +_LIBCPP_HIDE_FROM_ABI void +__cxx_atomic_load_inplace(const __cxx_atomic_base_impl<_Tp>* __a, _Tp* __dst, memory_order __order) { + __atomic_load(std::addressof(__a->__a_value), __dst, __to_gcc_order(__order)); +} + +template <typename _Tp> _LIBCPP_HIDE_FROM_ABI _Tp __cxx_atomic_load(const __cxx_atomic_base_impl<_Tp>* __a, memory_order __order) { _Tp __ret; @@ -364,6 +374,21 @@ _Tp __cxx_atomic_load(__cxx_atomic_base_impl<_Tp> const* __a, memory_order __ord const_cast<__ptr_type>(std::addressof(__a->__a_value)), static_cast<__memory_order_underlying_t>(__order)); } +template <class _Tp> +_LIBCPP_HIDE_FROM_ABI void +__cxx_atomic_load_inplace(__cxx_atomic_base_impl<_Tp> const volatile* __a, _Tp* __dst, memory_order __order) _NOEXCEPT { + using __ptr_type = __remove_const_t<decltype(__a->__a_value)>*; + *__dst = __c11_atomic_load( + const_cast<__ptr_type>(std::addressof(__a->__a_value)), static_cast<__memory_order_underlying_t>(__order)); +} +template <class _Tp> +_LIBCPP_HIDE_FROM_ABI void +__cxx_atomic_load_inplace(__cxx_atomic_base_impl<_Tp> const* __a, _Tp* __dst, memory_order __order) _NOEXCEPT { + using __ptr_type = __remove_const_t<decltype(__a->__a_value)>*; + *__dst = __c11_atomic_load( + const_cast<__ptr_type>(std::addressof(__a->__a_value)), static_cast<__memory_order_underlying_t>(__order)); +} + template<class _Tp> _LIBCPP_HIDE_FROM_ABI _Tp __cxx_atomic_exchange(__cxx_atomic_base_impl<_Tp> volatile* __a, _Tp __value, memory_order __order) _NOEXCEPT { @@ -560,6 +585,16 @@ struct __cxx_atomic_lock_impl { __unlock(); return __old; } + _LIBCPP_HIDE_FROM_ABI void __read_inplace(_Tp* __dst) const volatile { + __lock(); + __cxx_atomic_assign_volatile(*__dst, __a_value); + __unlock(); + } + _LIBCPP_HIDE_FROM_ABI void __read_inplace(_Tp* __dst) const { + __lock(); + *__dst = __a_value; + __unlock(); + } }; template <typename _Tp> @@ -600,6 +635,16 @@ _Tp __cxx_atomic_load(const __cxx_atomic_lock_impl<_Tp>* __a, memory_order) { } template <typename _Tp> +_LIBCPP_HIDE_FROM_ABI void +__cxx_atomic_load(const volatile __cxx_atomic_lock_impl<_Tp>* __a, _Tp* __dst, memory_order) { + __a->__read_inplace(__dst); +} +template <typename _Tp> +_LIBCPP_HIDE_FROM_ABI void __cxx_atomic_load(const __cxx_atomic_lock_impl<_Tp>* __a, _Tp* __dst, memory_order) { + __a->__read_inplace(__dst); +} + +template <typename _Tp> _LIBCPP_HIDE_FROM_ABI _Tp __cxx_atomic_exchange(volatile __cxx_atomic_lock_impl<_Tp>* __a, _Tp __value, memory_order) { __a->__lock(); |
