// -*- C++ -*- header.// Copyright (C) 2008, 2009// Free Software Foundation, Inc.//// This file is part of the GNU ISO C++ Library. This library is free// software; you can redistribute it and/or modify it under the// terms of the GNU General Public License as published by the// Free Software Foundation; either version 3, or (at your option)// any later version.// This library is distributed in the hope that it will be useful,// but WITHOUT ANY WARRANTY; without even the implied warranty of// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the// GNU General Public License for more details.// Under Section 7 of GPL version 3, you are granted additional// permissions described in the GCC Runtime Library Exception, version// 3.1, as published by the Free Software Foundation.// You should have received a copy of the GNU General Public License and// a copy of the GCC Runtime Library Exception along with this program;// see the files COPYING3 and COPYING.RUNTIME respectively. If not, see// <http://www.gnu.org/licenses/>./** @file cstdatomic* This is a Standard C++ Library file. You should @c #include this file* in your programs, rather than any of the "*.h" implementation files.** This is the C++ version of the Standard C Library header @c stdatomic.h,* and its contents are (mostly) the same as that header, but are all* contained in the namespace @c std (except for names which are defined* as macros in C).*/// Based on "C++ Atomic Types and Operations" by Hans Boehm and Lawrence Crowl.// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2007/n2427.html#ifndef _GLIBCXX_STDATOMIC#define _GLIBCXX_STDATOMIC 1#pragma GCC system_header#ifndef __GXX_EXPERIMENTAL_CXX0X__# include <c++0x_warning.h>#endif#include <stdatomic.h>#include <cstddef>_GLIBCXX_BEGIN_NAMESPACE(std)/*** @addtogroup atomics* @{*//// kill_dependencytemplate<typename _Tp>inline _Tpkill_dependency(_Tp __y){_Tp ret(__y);return ret;}inline memory_order__calculate_memory_order(memory_order __m){const bool __cond1 = __m == memory_order_release;const bool __cond2 = __m == memory_order_acq_rel;memory_order __mo1(__cond1 ? memory_order_relaxed : __m);memory_order __mo2(__cond2 ? memory_order_acquire : __mo1);return __mo2;}//// Three nested namespaces for atomic implementation details.//// The nested namespace inlined into std:: is determined by the value// of the _GLIBCXX_ATOMIC_PROPERTY macro and the resulting// ATOMIC_*_LOCK_FREE macros. See file stdatomic.h.//// 0 == __atomic0 == Never lock-free// 1 == __atomic1 == Best available, sometimes lock-free// 2 == __atomic2 == Always lock-free#include <bits/atomic_0.h>#include <bits/atomic_2.h>/// atomic/// 29.4.3, Generic atomic type, primary class template.template<typename _Tp>struct atomic{private:_Tp _M_i;public:atomic() = default;~atomic() = default;atomic(const atomic&) = delete;atomic& operator=(const atomic&) = delete;atomic(_Tp __i) : _M_i(__i) { }operator _Tp() const volatile;_Tpoperator=(_Tp __i) volatile { store(__i); return __i; }boolis_lock_free() const volatile;voidstore(_Tp, memory_order = memory_order_seq_cst) volatile;_Tpload(memory_order = memory_order_seq_cst) const volatile;_Tpexchange(_Tp __i, memory_order = memory_order_seq_cst) volatile;boolcompare_exchange_weak(_Tp&, _Tp, memory_order, memory_order) volatile;boolcompare_exchange_strong(_Tp&, _Tp, memory_order, memory_order) volatile;boolcompare_exchange_weak(_Tp&, _Tp,memory_order = memory_order_seq_cst) volatile;boolcompare_exchange_strong(_Tp&, _Tp,memory_order = memory_order_seq_cst) volatile;};/// Partial specialization for pointer types.template<typename _Tp>struct atomic<_Tp*> : atomic_address{atomic() = default;~atomic() = default;atomic(const atomic&) = delete;atomic& operator=(const atomic&) = delete;atomic(_Tp* __v) : atomic_address(__v) { }voidstore(_Tp*, memory_order = memory_order_seq_cst) volatile;_Tp*load(memory_order = memory_order_seq_cst) const volatile;_Tp*exchange(_Tp*, memory_order = memory_order_seq_cst) volatile;boolcompare_exchange_weak(_Tp*&, _Tp*, memory_order, memory_order) volatile;boolcompare_exchange_strong(_Tp*&, _Tp*, memory_order, memory_order) volatile;boolcompare_exchange_weak(_Tp*&, _Tp*,memory_order = memory_order_seq_cst) volatile;boolcompare_exchange_strong(_Tp*&, _Tp*,memory_order = memory_order_seq_cst) volatile;_Tp*fetch_add(ptrdiff_t, memory_order = memory_order_seq_cst) volatile;_Tp*fetch_sub(ptrdiff_t, memory_order = memory_order_seq_cst) volatile;operator _Tp*() const volatile{ return load(); }_Tp*operator=(_Tp* __v) volatile{store(__v);return __v;}_Tp*operator++(int) volatile { return fetch_add(1); }_Tp*operator--(int) volatile { return fetch_sub(1); }_Tp*operator++() volatile { return fetch_add(1) + 1; }_Tp*operator--() volatile { return fetch_sub(1) - 1; }_Tp*operator+=(ptrdiff_t __d) volatile{ return fetch_add(__d) + __d; }_Tp*operator-=(ptrdiff_t __d) volatile{ return fetch_sub(__d) - __d; }};/// Explicit specialization for void*template<>struct atomic<void*> : public atomic_address{typedef void* __integral_type;typedef atomic_address __base_type;atomic() = default;~atomic() = default;atomic(const atomic&) = delete;atomic& operator=(const atomic&) = delete;atomic(__integral_type __i) : __base_type(__i) { }using __base_type::operator __integral_type;using __base_type::operator=;};/// Explicit specialization for bool.template<>struct atomic<bool> : public atomic_bool{typedef bool __integral_type;typedef atomic_bool __base_type;atomic() = default;~atomic() = default;atomic(const atomic&) = delete;atomic& operator=(const atomic&) = delete;atomic(__integral_type __i) : __base_type(__i) { }using __base_type::operator __integral_type;using __base_type::operator=;};/// Explicit specialization for char.template<>struct atomic<char> : public atomic_char{typedef char __integral_type;typedef atomic_char __base_type;atomic() = default;~atomic() = default;atomic(const atomic&) = delete;atomic& operator=(const atomic&) = delete;atomic(__integral_type __i) : __base_type(__i) { }using __base_type::operator __integral_type;using __base_type::operator=;};/// Explicit specialization for signed char.template<>struct atomic<signed char> : public atomic_schar{typedef signed char __integral_type;typedef atomic_schar __base_type;atomic() = default;~atomic() = default;atomic(const atomic&) = delete;atomic& operator=(const atomic&) = delete;atomic(__integral_type __i) : __base_type(__i) { }using __base_type::operator __integral_type;using __base_type::operator=;};/// Explicit specialization for unsigned char.template<>struct atomic<unsigned char> : public atomic_uchar{typedef unsigned char __integral_type;typedef atomic_uchar __base_type;atomic() = default;~atomic() = default;atomic(const atomic&) = delete;atomic& operator=(const atomic&) = delete;atomic(__integral_type __i) : __base_type(__i) { }using __base_type::operator __integral_type;using __base_type::operator=;};/// Explicit specialization for short.template<>struct atomic<short> : public atomic_short{typedef short __integral_type;typedef atomic_short __base_type;atomic() = default;~atomic() = default;atomic(const atomic&) = delete;atomic& operator=(const atomic&) = delete;atomic(__integral_type __i) : __base_type(__i) { }using __base_type::operator __integral_type;using __base_type::operator=;};/// Explicit specialization for unsigned short.template<>struct atomic<unsigned short> : public atomic_ushort{typedef unsigned short __integral_type;typedef atomic_ushort __base_type;atomic() = default;~atomic() = default;atomic(const atomic&) = delete;atomic& operator=(const atomic&) = delete;atomic(__integral_type __i) : __base_type(__i) { }using __base_type::operator __integral_type;using __base_type::operator=;};/// Explicit specialization for int.template<>struct atomic<int> : atomic_int{typedef int __integral_type;typedef atomic_int __base_type;atomic() = default;~atomic() = default;atomic(const atomic&) = delete;atomic& operator=(const atomic&) = delete;atomic(__integral_type __i) : __base_type(__i) { }using __base_type::operator __integral_type;using __base_type::operator=;};/// Explicit specialization for unsigned int.template<>struct atomic<unsigned int> : public atomic_uint{typedef unsigned int __integral_type;typedef atomic_uint __base_type;atomic() = default;~atomic() = default;atomic(const atomic&) = delete;atomic& operator=(const atomic&) = delete;atomic(__integral_type __i) : __base_type(__i) { }using __base_type::operator __integral_type;using __base_type::operator=;};/// Explicit specialization for long.template<>struct atomic<long> : public atomic_long{typedef long __integral_type;typedef atomic_long __base_type;atomic() = default;~atomic() = default;atomic(const atomic&) = delete;atomic& operator=(const atomic&) = delete;atomic(__integral_type __i) : __base_type(__i) { }using __base_type::operator __integral_type;using __base_type::operator=;};/// Explicit specialization for unsigned long.template<>struct atomic<unsigned long> : public atomic_ulong{typedef unsigned long __integral_type;typedef atomic_ulong __base_type;atomic() = default;~atomic() = default;atomic(const atomic&) = delete;atomic& operator=(const atomic&) = delete;atomic(__integral_type __i) : __base_type(__i) { }using __base_type::operator __integral_type;using __base_type::operator=;};/// Explicit specialization for long long.template<>struct atomic<long long> : public atomic_llong{typedef long long __integral_type;typedef atomic_llong __base_type;atomic() = default;~atomic() = default;atomic(const atomic&) = delete;atomic& operator=(const atomic&) = delete;atomic(__integral_type __i) : __base_type(__i) { }using __base_type::operator __integral_type;using __base_type::operator=;};/// Explicit specialization for unsigned long long.template<>struct atomic<unsigned long long> : public atomic_ullong{typedef unsigned long long __integral_type;typedef atomic_ullong __base_type;atomic() = default;~atomic() = default;atomic(const atomic&) = delete;atomic& operator=(const atomic&) = delete;atomic(__integral_type __i) : __base_type(__i) { }using __base_type::operator __integral_type;using __base_type::operator=;};/// Explicit specialization for wchar_t.template<>struct atomic<wchar_t> : public atomic_wchar_t{typedef wchar_t __integral_type;typedef atomic_wchar_t __base_type;atomic() = default;~atomic() = default;atomic(const atomic&) = delete;atomic& operator=(const atomic&) = delete;atomic(__integral_type __i) : __base_type(__i) { }using __base_type::operator __integral_type;using __base_type::operator=;};/// Explicit specialization for char16_t.template<>struct atomic<char16_t> : public atomic_char16_t{typedef char16_t __integral_type;typedef atomic_char16_t __base_type;atomic() = default;~atomic() = default;atomic(const atomic&) = delete;atomic& operator=(const atomic&) = delete;atomic(__integral_type __i) : __base_type(__i) { }using __base_type::operator __integral_type;using __base_type::operator=;};/// Explicit specialization for char32_t.template<>struct atomic<char32_t> : public atomic_char32_t{typedef char32_t __integral_type;typedef atomic_char32_t __base_type;atomic() = default;~atomic() = default;atomic(const atomic&) = delete;atomic& operator=(const atomic&) = delete;atomic(__integral_type __i) : __base_type(__i) { }using __base_type::operator __integral_type;using __base_type::operator=;};template<typename _Tp>_Tp*atomic<_Tp*>::load(memory_order __m) const volatile{ return static_cast<_Tp*>(atomic_address::load(__m)); }template<typename _Tp>_Tp*atomic<_Tp*>::exchange(_Tp* __v, memory_order __m) volatile{ return static_cast<_Tp*>(atomic_address::exchange(__v, __m)); }template<typename _Tp>boolatomic<_Tp*>::compare_exchange_weak(_Tp*& __r, _Tp* __v, memory_order __m1,memory_order __m2) volatile{void** __vr = reinterpret_cast<void**>(&__r);void* __vv = static_cast<void*>(__v);return atomic_address::compare_exchange_weak(*__vr, __vv, __m1, __m2);}template<typename _Tp>boolatomic<_Tp*>::compare_exchange_strong(_Tp*& __r, _Tp* __v,memory_order __m1,memory_order __m2) volatile{void** __vr = reinterpret_cast<void**>(&__r);void* __vv = static_cast<void*>(__v);return atomic_address::compare_exchange_strong(*__vr, __vv, __m1, __m2);}template<typename _Tp>boolatomic<_Tp*>::compare_exchange_weak(_Tp*& __r, _Tp* __v,memory_order __m) volatile{return compare_exchange_weak(__r, __v, __m,__calculate_memory_order(__m));}template<typename _Tp>boolatomic<_Tp*>::compare_exchange_strong(_Tp*& __r, _Tp* __v,memory_order __m) volatile{return compare_exchange_strong(__r, __v, __m,__calculate_memory_order(__m));}template<typename _Tp>_Tp*atomic<_Tp*>::fetch_add(ptrdiff_t __d, memory_order __m) volatile{void* __p = atomic_fetch_add_explicit(this, sizeof(_Tp) * __d, __m);return static_cast<_Tp*>(__p);}template<typename _Tp>_Tp*atomic<_Tp*>::fetch_sub(ptrdiff_t __d, memory_order __m) volatile{void* __p = atomic_fetch_sub_explicit(this, sizeof(_Tp) * __d, __m);return static_cast<_Tp*>(__p);}// Convenience function definitions, atomic_flag.inline boolatomic_flag_test_and_set_explicit(volatile atomic_flag* __a, memory_order __m){ return __a->test_and_set(__m); }inline voidatomic_flag_clear_explicit(volatile atomic_flag* __a, memory_order __m){ return __a->clear(__m); }// Convenience function definitions, atomic_address.inline boolatomic_is_lock_free(const volatile atomic_address* __a){ return __a->is_lock_free(); }inline voidatomic_store(volatile atomic_address* __a, void* __v){ __a->store(__v); }inline voidatomic_store_explicit(volatile atomic_address* __a, void* __v,memory_order __m){ __a->store(__v, __m); }inline void*atomic_load(const volatile atomic_address* __a){ return __a->load(); }inline void*atomic_load_explicit(const volatile atomic_address* __a, memory_order __m){ return __a->load(__m); }inline void*atomic_exchange(volatile atomic_address* __a, void* __v){ return __a->exchange(__v); }inline void*atomic_exchange_explicit(volatile atomic_address* __a, void* __v,memory_order __m){ return __a->exchange(__v, __m); }inline boolatomic_compare_exchange_weak(volatile atomic_address* __a,void** __v1, void* __v2){return __a->compare_exchange_weak(*__v1, __v2, memory_order_seq_cst,memory_order_seq_cst);}inline boolatomic_compare_exchange_strong(volatile atomic_address* __a,void** __v1, void* __v2){return __a->compare_exchange_strong(*__v1, __v2, memory_order_seq_cst,memory_order_seq_cst);}inline boolatomic_compare_exchange_weak_explicit(volatile atomic_address* __a,void** __v1, void* __v2,memory_order __m1, memory_order __m2){ return __a->compare_exchange_weak(*__v1, __v2, __m1, __m2); }inline boolatomic_compare_exchange_strong_explicit(volatile atomic_address* __a,void** __v1, void* __v2,memory_order __m1, memory_order __m2){ return __a->compare_exchange_strong(*__v1, __v2, __m1, __m2); }inline void*atomic_fetch_add_explicit(volatile atomic_address* __a, ptrdiff_t __d,memory_order __m){ return __a->fetch_add(__d, __m); }inline void*atomic_fetch_add(volatile atomic_address* __a, ptrdiff_t __d){ return __a->fetch_add(__d); }inline void*atomic_fetch_sub_explicit(volatile atomic_address* __a, ptrdiff_t __d,memory_order __m){ return __a->fetch_sub(__d, __m); }inline void*atomic_fetch_sub(volatile atomic_address* __a, ptrdiff_t __d){ return __a->fetch_sub(__d); }// Convenience function definitions, atomic_bool.inline boolatomic_is_lock_free(const volatile atomic_bool* __a){ return __a->is_lock_free(); }inline voidatomic_store(volatile atomic_bool* __a, bool __i){ __a->store(__i); }inline voidatomic_store_explicit(volatile atomic_bool* __a, bool __i, memory_order __m){ __a->store(__i, __m); }inline boolatomic_load(const volatile atomic_bool* __a){ return __a->load(); }inline boolatomic_load_explicit(const volatile atomic_bool* __a, memory_order __m){ return __a->load(__m); }inline boolatomic_exchange(volatile atomic_bool* __a, bool __i){ return __a->exchange(__i); }inline boolatomic_exchange_explicit(volatile atomic_bool* __a, bool __i,memory_order __m){ return __a->exchange(__i, __m); }inline boolatomic_compare_exchange_weak(volatile atomic_bool* __a, bool* __i1, bool __i2){return __a->compare_exchange_weak(*__i1, __i2, memory_order_seq_cst,memory_order_seq_cst);}inline boolatomic_compare_exchange_strong(volatile atomic_bool* __a,bool* __i1, bool __i2){return __a->compare_exchange_strong(*__i1, __i2, memory_order_seq_cst,memory_order_seq_cst);}inline boolatomic_compare_exchange_weak_explicit(volatile atomic_bool* __a, bool* __i1,bool __i2, memory_order __m1,memory_order __m2){ return __a->compare_exchange_weak(*__i1, __i2, __m1, __m2); }inline boolatomic_compare_exchange_strong_explicit(volatile atomic_bool* __a,bool* __i1, bool __i2,memory_order __m1, memory_order __m2){ return __a->compare_exchange_strong(*__i1, __i2, __m1, __m2); }// Free standing functions. Template argument should be constricted// to intergral types as specified in the standard.template<typename _ITp>inline voidatomic_store_explicit(volatile __atomic_base<_ITp>* __a, _ITp __i,memory_order __m){ __a->store(__i, __m); }template<typename _ITp>inline _ITpatomic_load_explicit(const volatile __atomic_base<_ITp>* __a,memory_order __m){ return __a->load(__m); }template<typename _ITp>inline _ITpatomic_exchange_explicit(volatile __atomic_base<_ITp>* __a,_ITp __i, memory_order __m){ return __a->exchange(__i, __m); }template<typename _ITp>inline boolatomic_compare_exchange_weak_explicit(volatile __atomic_base<_ITp>* __a,_ITp* __i1, _ITp __i2,memory_order __m1, memory_order __m2){ return __a->compare_exchange_weak(*__i1, __i2, __m1, __m2); }template<typename _ITp>inline boolatomic_compare_exchange_strong_explicit(volatile __atomic_base<_ITp>* __a,_ITp* __i1, _ITp __i2,memory_order __m1,memory_order __m2){ return __a->compare_exchange_strong(*__i1, __i2, __m1, __m2); }template<typename _ITp>inline _ITpatomic_fetch_add_explicit(volatile __atomic_base<_ITp>* __a, _ITp __i,memory_order __m){ return __a->fetch_add(__i, __m); }template<typename _ITp>inline _ITpatomic_fetch_sub_explicit(volatile __atomic_base<_ITp>* __a, _ITp __i,memory_order __m){ return __a->fetch_sub(__i, __m); }template<typename _ITp>inline _ITpatomic_fetch_and_explicit(volatile __atomic_base<_ITp>* __a, _ITp __i,memory_order __m){ return __a->fetch_and(__i, __m); }template<typename _ITp>inline _ITpatomic_fetch_or_explicit(volatile __atomic_base<_ITp>* __a, _ITp __i,memory_order __m){ return __a->fetch_or(__i, __m); }template<typename _ITp>inline _ITpatomic_fetch_xor_explicit(volatile __atomic_base<_ITp>* __a, _ITp __i,memory_order __m){ return __a->fetch_xor(__i, __m); }template<typename _ITp>inline boolatomic_is_lock_free(const volatile __atomic_base<_ITp>* __a){ return __a->is_lock_free(); }template<typename _ITp>inline voidatomic_store(volatile __atomic_base<_ITp>* __a, _ITp __i){ atomic_store_explicit(__a, __i, memory_order_seq_cst); }template<typename _ITp>inline _ITpatomic_load(const volatile __atomic_base<_ITp>* __a){ return atomic_load_explicit(__a, memory_order_seq_cst); }template<typename _ITp>inline _ITpatomic_exchange(volatile __atomic_base<_ITp>* __a, _ITp __i){ return atomic_exchange_explicit(__a, __i, memory_order_seq_cst); }template<typename _ITp>inline boolatomic_compare_exchange_weak(volatile __atomic_base<_ITp>* __a,_ITp* __i1, _ITp __i2){return atomic_compare_exchange_weak_explicit(__a, __i1, __i2,memory_order_seq_cst,memory_order_seq_cst);}template<typename _ITp>inline boolatomic_compare_exchange_strong(volatile __atomic_base<_ITp>* __a,_ITp* __i1, _ITp __i2){return atomic_compare_exchange_strong_explicit(__a, __i1, __i2,memory_order_seq_cst,memory_order_seq_cst);}template<typename _ITp>inline _ITpatomic_fetch_add(volatile __atomic_base<_ITp>* __a, _ITp __i){ return atomic_fetch_add_explicit(__a, __i, memory_order_seq_cst); }template<typename _ITp>inline _ITpatomic_fetch_sub(volatile __atomic_base<_ITp>* __a, _ITp __i){ return atomic_fetch_sub_explicit(__a, __i, memory_order_seq_cst); }template<typename _ITp>inline _ITpatomic_fetch_and(volatile __atomic_base<_ITp>* __a, _ITp __i){ return atomic_fetch_and_explicit(__a, __i, memory_order_seq_cst); }template<typename _ITp>inline _ITpatomic_fetch_or(volatile __atomic_base<_ITp>* __a, _ITp __i){ return atomic_fetch_or_explicit(__a, __i, memory_order_seq_cst); }template<typename _ITp>inline _ITpatomic_fetch_xor(volatile __atomic_base<_ITp>* __a, _ITp __i){ return atomic_fetch_xor_explicit(__a, __i, memory_order_seq_cst); }// @} group atomics_GLIBCXX_END_NAMESPACE#endif