30#ifndef _GLIBCXX_ATOMIC_BASE_H
31#define _GLIBCXX_ATOMIC_BASE_H 1
33#pragma GCC system_header
41#if __cplusplus > 201703L && _GLIBCXX_HOSTED
45#ifndef _GLIBCXX_ALWAYS_INLINE
46#define _GLIBCXX_ALWAYS_INLINE inline __attribute__((__always_inline__))
51namespace std _GLIBCXX_VISIBILITY(default)
53_GLIBCXX_BEGIN_NAMESPACE_VERSION
63#if __cplusplus > 201703L
74 inline constexpr memory_order memory_order_relaxed = memory_order::relaxed;
75 inline constexpr memory_order memory_order_consume = memory_order::consume;
76 inline constexpr memory_order memory_order_acquire = memory_order::acquire;
77 inline constexpr memory_order memory_order_release = memory_order::release;
78 inline constexpr memory_order memory_order_acq_rel = memory_order::acq_rel;
79 inline constexpr memory_order memory_order_seq_cst = memory_order::seq_cst;
93 enum __memory_order_modifier
95 __memory_order_mask = 0x0ffff,
96 __memory_order_modifier_mask = 0xffff0000,
97 __memory_order_hle_acquire = 0x10000,
98 __memory_order_hle_release = 0x20000
120 return __m == memory_order_acq_rel ? memory_order_acquire
121 : __m == memory_order_release ? memory_order_relaxed : __m;
127 return memory_order(__cmpexch_failure_order2(__m & __memory_order_mask)
128 | __memory_order_modifier(__m & __memory_order_modifier_mask));
132 __is_valid_cmpexch_failure_order(
memory_order __m)
noexcept
134 return (__m & __memory_order_mask) != memory_order_release
135 && (__m & __memory_order_mask) != memory_order_acq_rel;
139 template<
typename _IntTp>
140 struct __atomic_base;
144 _GLIBCXX_ALWAYS_INLINE
void
146 { __atomic_thread_fence(
int(__m)); }
148 _GLIBCXX_ALWAYS_INLINE
void
150 { __atomic_signal_fence(
int(__m)); }
153 template<
typename _Tp>
162#if __glibcxx_atomic_value_initialization
163# define _GLIBCXX20_INIT(I) = I
165# define _GLIBCXX20_INIT(I)
169#define ATOMIC_VAR_INIT(_VI) { _VI }
171 template<
typename _Tp>
174 template<
typename _Tp>
178#if __GCC_ATOMIC_TEST_AND_SET_TRUEVAL == 1
179 typedef bool __atomic_flag_data_type;
181 typedef unsigned char __atomic_flag_data_type;
196 _GLIBCXX_BEGIN_EXTERN_C
198 struct __atomic_flag_base
200 __atomic_flag_data_type _M_i _GLIBCXX20_INIT({});
203 _GLIBCXX_END_EXTERN_C
207#define ATOMIC_FLAG_INIT { 0 }
210 struct atomic_flag :
public __atomic_flag_base
212 atomic_flag()
noexcept =
default;
213 ~atomic_flag()
noexcept =
default;
214 atomic_flag(
const atomic_flag&) =
delete;
215 atomic_flag& operator=(
const atomic_flag&) =
delete;
216 atomic_flag& operator=(
const atomic_flag&)
volatile =
delete;
219 constexpr atomic_flag(
bool __i) noexcept
220 : __atomic_flag_base{ _S_init(__i) }
223 _GLIBCXX_ALWAYS_INLINE
bool
224 test_and_set(
memory_order __m = memory_order_seq_cst)
noexcept
226 return __atomic_test_and_set (&_M_i,
int(__m));
229 _GLIBCXX_ALWAYS_INLINE
bool
230 test_and_set(
memory_order __m = memory_order_seq_cst)
volatile noexcept
232 return __atomic_test_and_set (&_M_i,
int(__m));
235#ifdef __glibcxx_atomic_flag_test
236 _GLIBCXX_ALWAYS_INLINE
bool
237 test(
memory_order __m = memory_order_seq_cst)
const noexcept
239 __atomic_flag_data_type __v;
240 __atomic_load(&_M_i, &__v,
int(__m));
241 return __v == __GCC_ATOMIC_TEST_AND_SET_TRUEVAL;
244 _GLIBCXX_ALWAYS_INLINE
bool
245 test(
memory_order __m = memory_order_seq_cst)
const volatile noexcept
247 __atomic_flag_data_type __v;
248 __atomic_load(&_M_i, &__v,
int(__m));
249 return __v == __GCC_ATOMIC_TEST_AND_SET_TRUEVAL;
253#if __glibcxx_atomic_wait
254 _GLIBCXX_ALWAYS_INLINE
void
258 const __atomic_flag_data_type __v
259 = __old ? __GCC_ATOMIC_TEST_AND_SET_TRUEVAL : 0;
261 std::__atomic_wait_address_v(&_M_i, __v,
262 [__m,
this] {
return __atomic_load_n(&_M_i,
int(__m)); });
267 _GLIBCXX_ALWAYS_INLINE
void
268 notify_one()
noexcept
269 { std::__atomic_notify_address(&_M_i,
false); }
273 _GLIBCXX_ALWAYS_INLINE
void
274 notify_all()
noexcept
275 { std::__atomic_notify_address(&_M_i,
true); }
280 _GLIBCXX_ALWAYS_INLINE
void
284 = __m & __memory_order_mask;
285 __glibcxx_assert(__b != memory_order_consume);
286 __glibcxx_assert(__b != memory_order_acquire);
287 __glibcxx_assert(__b != memory_order_acq_rel);
289 __atomic_clear (&_M_i,
int(__m));
292 _GLIBCXX_ALWAYS_INLINE
void
293 clear(
memory_order __m = memory_order_seq_cst)
volatile noexcept
296 = __m & __memory_order_mask;
297 __glibcxx_assert(__b != memory_order_consume);
298 __glibcxx_assert(__b != memory_order_acquire);
299 __glibcxx_assert(__b != memory_order_acq_rel);
301 __atomic_clear (&_M_i,
int(__m));
305 static constexpr __atomic_flag_data_type
307 {
return __i ? __GCC_ATOMIC_TEST_AND_SET_TRUEVAL : 0; }
336 template<
typename _ITp>
343 typedef _ITp __int_type;
345 static constexpr int _S_alignment =
346 sizeof(_ITp) >
alignof(_ITp) ?
sizeof(_ITp) : alignof(_ITp);
348 alignas(_S_alignment) __int_type _M_i _GLIBCXX20_INIT(0);
351 __atomic_base() noexcept = default;
352 ~__atomic_base() noexcept = default;
353 __atomic_base(const __atomic_base&) = delete;
354 __atomic_base& operator=(const __atomic_base&) = delete;
355 __atomic_base& operator=(const __atomic_base&) volatile = delete;
358 constexpr __atomic_base(__int_type __i) noexcept : _M_i (__i) { }
360 operator __int_type() const noexcept
363 operator __int_type() const volatile noexcept
367 operator=(__int_type __i)
noexcept
374 operator=(__int_type __i)
volatile noexcept
381 operator++(
int)
noexcept
382 {
return fetch_add(1); }
385 operator++(
int)
volatile noexcept
386 {
return fetch_add(1); }
389 operator--(
int)
noexcept
390 {
return fetch_sub(1); }
393 operator--(
int)
volatile noexcept
394 {
return fetch_sub(1); }
397 operator++() noexcept
398 {
return __atomic_add_fetch(&_M_i, 1,
int(memory_order_seq_cst)); }
401 operator++() volatile noexcept
402 {
return __atomic_add_fetch(&_M_i, 1,
int(memory_order_seq_cst)); }
405 operator--() noexcept
406 {
return __atomic_sub_fetch(&_M_i, 1,
int(memory_order_seq_cst)); }
409 operator--() volatile noexcept
410 {
return __atomic_sub_fetch(&_M_i, 1,
int(memory_order_seq_cst)); }
413 operator+=(__int_type __i)
noexcept
414 {
return __atomic_add_fetch(&_M_i, __i,
int(memory_order_seq_cst)); }
417 operator+=(__int_type __i)
volatile noexcept
418 {
return __atomic_add_fetch(&_M_i, __i,
int(memory_order_seq_cst)); }
421 operator-=(__int_type __i)
noexcept
422 {
return __atomic_sub_fetch(&_M_i, __i,
int(memory_order_seq_cst)); }
425 operator-=(__int_type __i)
volatile noexcept
426 {
return __atomic_sub_fetch(&_M_i, __i,
int(memory_order_seq_cst)); }
429 operator&=(__int_type __i)
noexcept
430 {
return __atomic_and_fetch(&_M_i, __i,
int(memory_order_seq_cst)); }
433 operator&=(__int_type __i)
volatile noexcept
434 {
return __atomic_and_fetch(&_M_i, __i,
int(memory_order_seq_cst)); }
437 operator|=(__int_type __i)
noexcept
438 {
return __atomic_or_fetch(&_M_i, __i,
int(memory_order_seq_cst)); }
441 operator|=(__int_type __i)
volatile noexcept
442 {
return __atomic_or_fetch(&_M_i, __i,
int(memory_order_seq_cst)); }
445 operator^=(__int_type __i)
noexcept
446 {
return __atomic_xor_fetch(&_M_i, __i,
int(memory_order_seq_cst)); }
449 operator^=(__int_type __i)
volatile noexcept
450 {
return __atomic_xor_fetch(&_M_i, __i,
int(memory_order_seq_cst)); }
453 is_lock_free() const noexcept
456 return __atomic_is_lock_free(
sizeof(_M_i),
457 reinterpret_cast<void *
>(-_S_alignment));
461 is_lock_free() const volatile noexcept
464 return __atomic_is_lock_free(
sizeof(_M_i),
465 reinterpret_cast<void *
>(-_S_alignment));
468 _GLIBCXX_ALWAYS_INLINE
void
469 store(__int_type __i, memory_order __m = memory_order_seq_cst)
noexcept
472 = __m & __memory_order_mask;
473 __glibcxx_assert(__b != memory_order_acquire);
474 __glibcxx_assert(__b != memory_order_acq_rel);
475 __glibcxx_assert(__b != memory_order_consume);
477 __atomic_store_n(&_M_i, __i,
int(__m));
480 _GLIBCXX_ALWAYS_INLINE
void
481 store(__int_type __i,
482 memory_order __m = memory_order_seq_cst)
volatile noexcept
485 = __m & __memory_order_mask;
486 __glibcxx_assert(__b != memory_order_acquire);
487 __glibcxx_assert(__b != memory_order_acq_rel);
488 __glibcxx_assert(__b != memory_order_consume);
490 __atomic_store_n(&_M_i, __i,
int(__m));
493 _GLIBCXX_ALWAYS_INLINE __int_type
494 load(memory_order __m = memory_order_seq_cst)
const noexcept
497 = __m & __memory_order_mask;
498 __glibcxx_assert(__b != memory_order_release);
499 __glibcxx_assert(__b != memory_order_acq_rel);
501 return __atomic_load_n(&_M_i,
int(__m));
504 _GLIBCXX_ALWAYS_INLINE __int_type
505 load(memory_order __m = memory_order_seq_cst)
const volatile noexcept
508 = __m & __memory_order_mask;
509 __glibcxx_assert(__b != memory_order_release);
510 __glibcxx_assert(__b != memory_order_acq_rel);
512 return __atomic_load_n(&_M_i,
int(__m));
515 _GLIBCXX_ALWAYS_INLINE __int_type
516 exchange(__int_type __i,
517 memory_order __m = memory_order_seq_cst)
noexcept
519 return __atomic_exchange_n(&_M_i, __i,
int(__m));
523 _GLIBCXX_ALWAYS_INLINE __int_type
524 exchange(__int_type __i,
525 memory_order __m = memory_order_seq_cst)
volatile noexcept
527 return __atomic_exchange_n(&_M_i, __i,
int(__m));
530 _GLIBCXX_ALWAYS_INLINE
bool
531 compare_exchange_weak(__int_type& __i1, __int_type __i2,
532 memory_order __m1, memory_order __m2)
noexcept
534 __glibcxx_assert(__is_valid_cmpexch_failure_order(__m2));
536 return __atomic_compare_exchange_n(&_M_i, &__i1, __i2, 1,
537 int(__m1),
int(__m2));
540 _GLIBCXX_ALWAYS_INLINE
bool
541 compare_exchange_weak(__int_type& __i1, __int_type __i2,
543 memory_order __m2)
volatile noexcept
545 __glibcxx_assert(__is_valid_cmpexch_failure_order(__m2));
547 return __atomic_compare_exchange_n(&_M_i, &__i1, __i2, 1,
548 int(__m1),
int(__m2));
551 _GLIBCXX_ALWAYS_INLINE
bool
552 compare_exchange_weak(__int_type& __i1, __int_type __i2,
553 memory_order __m = memory_order_seq_cst)
noexcept
555 return compare_exchange_weak(__i1, __i2, __m,
556 __cmpexch_failure_order(__m));
559 _GLIBCXX_ALWAYS_INLINE
bool
560 compare_exchange_weak(__int_type& __i1, __int_type __i2,
561 memory_order __m = memory_order_seq_cst)
volatile noexcept
563 return compare_exchange_weak(__i1, __i2, __m,
564 __cmpexch_failure_order(__m));
567 _GLIBCXX_ALWAYS_INLINE
bool
568 compare_exchange_strong(__int_type& __i1, __int_type __i2,
569 memory_order __m1, memory_order __m2)
noexcept
571 __glibcxx_assert(__is_valid_cmpexch_failure_order(__m2));
573 return __atomic_compare_exchange_n(&_M_i, &__i1, __i2, 0,
574 int(__m1),
int(__m2));
577 _GLIBCXX_ALWAYS_INLINE
bool
578 compare_exchange_strong(__int_type& __i1, __int_type __i2,
580 memory_order __m2)
volatile noexcept
582 __glibcxx_assert(__is_valid_cmpexch_failure_order(__m2));
584 return __atomic_compare_exchange_n(&_M_i, &__i1, __i2, 0,
585 int(__m1),
int(__m2));
588 _GLIBCXX_ALWAYS_INLINE
bool
589 compare_exchange_strong(__int_type& __i1, __int_type __i2,
590 memory_order __m = memory_order_seq_cst)
noexcept
592 return compare_exchange_strong(__i1, __i2, __m,
593 __cmpexch_failure_order(__m));
596 _GLIBCXX_ALWAYS_INLINE
bool
597 compare_exchange_strong(__int_type& __i1, __int_type __i2,
598 memory_order __m = memory_order_seq_cst)
volatile noexcept
600 return compare_exchange_strong(__i1, __i2, __m,
601 __cmpexch_failure_order(__m));
604#if __glibcxx_atomic_wait
605 _GLIBCXX_ALWAYS_INLINE
void
606 wait(__int_type __old,
607 memory_order __m = memory_order_seq_cst)
const noexcept
609 std::__atomic_wait_address_v(&_M_i, __old,
610 [__m,
this] {
return this->load(__m); });
615 _GLIBCXX_ALWAYS_INLINE
void
616 notify_one() noexcept
617 { std::__atomic_notify_address(&_M_i,
false); }
621 _GLIBCXX_ALWAYS_INLINE
void
622 notify_all() noexcept
623 { std::__atomic_notify_address(&_M_i,
true); }
628 _GLIBCXX_ALWAYS_INLINE __int_type
629 fetch_add(__int_type __i,
630 memory_order __m = memory_order_seq_cst)
noexcept
631 {
return __atomic_fetch_add(&_M_i, __i,
int(__m)); }
633 _GLIBCXX_ALWAYS_INLINE __int_type
634 fetch_add(__int_type __i,
635 memory_order __m = memory_order_seq_cst)
volatile noexcept
636 {
return __atomic_fetch_add(&_M_i, __i,
int(__m)); }
638 _GLIBCXX_ALWAYS_INLINE __int_type
639 fetch_sub(__int_type __i,
640 memory_order __m = memory_order_seq_cst)
noexcept
641 {
return __atomic_fetch_sub(&_M_i, __i,
int(__m)); }
643 _GLIBCXX_ALWAYS_INLINE __int_type
644 fetch_sub(__int_type __i,
645 memory_order __m = memory_order_seq_cst)
volatile noexcept
646 {
return __atomic_fetch_sub(&_M_i, __i,
int(__m)); }
648 _GLIBCXX_ALWAYS_INLINE __int_type
649 fetch_and(__int_type __i,
650 memory_order __m = memory_order_seq_cst)
noexcept
651 {
return __atomic_fetch_and(&_M_i, __i,
int(__m)); }
653 _GLIBCXX_ALWAYS_INLINE __int_type
654 fetch_and(__int_type __i,
655 memory_order __m = memory_order_seq_cst)
volatile noexcept
656 {
return __atomic_fetch_and(&_M_i, __i,
int(__m)); }
658 _GLIBCXX_ALWAYS_INLINE __int_type
659 fetch_or(__int_type __i,
660 memory_order __m = memory_order_seq_cst)
noexcept
661 {
return __atomic_fetch_or(&_M_i, __i,
int(__m)); }
663 _GLIBCXX_ALWAYS_INLINE __int_type
664 fetch_or(__int_type __i,
665 memory_order __m = memory_order_seq_cst)
volatile noexcept
666 {
return __atomic_fetch_or(&_M_i, __i,
int(__m)); }
668 _GLIBCXX_ALWAYS_INLINE __int_type
669 fetch_xor(__int_type __i,
670 memory_order __m = memory_order_seq_cst)
noexcept
671 {
return __atomic_fetch_xor(&_M_i, __i,
int(__m)); }
673 _GLIBCXX_ALWAYS_INLINE __int_type
674 fetch_xor(__int_type __i,
675 memory_order __m = memory_order_seq_cst)
volatile noexcept
676 {
return __atomic_fetch_xor(&_M_i, __i,
int(__m)); }
681 template<
typename _PTp>
682 struct __atomic_base<_PTp*>
685 typedef _PTp* __pointer_type;
687 __pointer_type _M_p _GLIBCXX20_INIT(
nullptr);
689 static constexpr ptrdiff_t
690 _S_type_size(ptrdiff_t __d)
691 {
return __d *
sizeof(_PTp); }
694 __atomic_base() noexcept = default;
695 ~__atomic_base() noexcept = default;
696 __atomic_base(const __atomic_base&) = delete;
697 __atomic_base& operator=(const __atomic_base&) = delete;
698 __atomic_base& operator=(const __atomic_base&) volatile = delete;
701 constexpr __atomic_base(__pointer_type __p) noexcept : _M_p (__p) { }
703 operator __pointer_type() const noexcept
706 operator __pointer_type() const volatile noexcept
710 operator=(__pointer_type __p)
noexcept
717 operator=(__pointer_type __p)
volatile noexcept
724 operator++(
int)
noexcept
725 {
return fetch_add(1); }
728 operator++(
int)
volatile noexcept
729 {
return fetch_add(1); }
732 operator--(
int)
noexcept
733 {
return fetch_sub(1); }
736 operator--(
int)
volatile noexcept
737 {
return fetch_sub(1); }
740 operator++() noexcept
741 {
return __atomic_add_fetch(&_M_p, _S_type_size(1),
742 int(memory_order_seq_cst)); }
745 operator++() volatile noexcept
746 {
return __atomic_add_fetch(&_M_p, _S_type_size(1),
747 int(memory_order_seq_cst)); }
750 operator--() noexcept
751 {
return __atomic_sub_fetch(&_M_p, _S_type_size(1),
752 int(memory_order_seq_cst)); }
755 operator--() volatile noexcept
756 {
return __atomic_sub_fetch(&_M_p, _S_type_size(1),
757 int(memory_order_seq_cst)); }
760 operator+=(ptrdiff_t __d)
noexcept
761 {
return __atomic_add_fetch(&_M_p, _S_type_size(__d),
762 int(memory_order_seq_cst)); }
765 operator+=(ptrdiff_t __d)
volatile noexcept
766 {
return __atomic_add_fetch(&_M_p, _S_type_size(__d),
767 int(memory_order_seq_cst)); }
770 operator-=(ptrdiff_t __d)
noexcept
771 {
return __atomic_sub_fetch(&_M_p, _S_type_size(__d),
772 int(memory_order_seq_cst)); }
775 operator-=(ptrdiff_t __d)
volatile noexcept
776 {
return __atomic_sub_fetch(&_M_p, _S_type_size(__d),
777 int(memory_order_seq_cst)); }
780 is_lock_free() const noexcept
783 return __atomic_is_lock_free(
sizeof(_M_p),
784 reinterpret_cast<void *
>(-__alignof(_M_p)));
788 is_lock_free() const volatile noexcept
791 return __atomic_is_lock_free(
sizeof(_M_p),
792 reinterpret_cast<void *
>(-__alignof(_M_p)));
795 _GLIBCXX_ALWAYS_INLINE
void
796 store(__pointer_type __p,
800 = __m & __memory_order_mask;
802 __glibcxx_assert(__b != memory_order_acquire);
803 __glibcxx_assert(__b != memory_order_acq_rel);
804 __glibcxx_assert(__b != memory_order_consume);
806 __atomic_store_n(&_M_p, __p,
int(__m));
809 _GLIBCXX_ALWAYS_INLINE
void
810 store(__pointer_type __p,
811 memory_order __m = memory_order_seq_cst)
volatile noexcept
814 = __m & __memory_order_mask;
815 __glibcxx_assert(__b != memory_order_acquire);
816 __glibcxx_assert(__b != memory_order_acq_rel);
817 __glibcxx_assert(__b != memory_order_consume);
819 __atomic_store_n(&_M_p, __p,
int(__m));
822 _GLIBCXX_ALWAYS_INLINE __pointer_type
823 load(
memory_order __m = memory_order_seq_cst)
const noexcept
826 = __m & __memory_order_mask;
827 __glibcxx_assert(__b != memory_order_release);
828 __glibcxx_assert(__b != memory_order_acq_rel);
830 return __atomic_load_n(&_M_p,
int(__m));
833 _GLIBCXX_ALWAYS_INLINE __pointer_type
834 load(
memory_order __m = memory_order_seq_cst)
const volatile noexcept
837 = __m & __memory_order_mask;
838 __glibcxx_assert(__b != memory_order_release);
839 __glibcxx_assert(__b != memory_order_acq_rel);
841 return __atomic_load_n(&_M_p,
int(__m));
844 _GLIBCXX_ALWAYS_INLINE __pointer_type
845 exchange(__pointer_type __p,
848 return __atomic_exchange_n(&_M_p, __p,
int(__m));
852 _GLIBCXX_ALWAYS_INLINE __pointer_type
853 exchange(__pointer_type __p,
854 memory_order __m = memory_order_seq_cst)
volatile noexcept
856 return __atomic_exchange_n(&_M_p, __p,
int(__m));
859 _GLIBCXX_ALWAYS_INLINE
bool
860 compare_exchange_weak(__pointer_type& __p1, __pointer_type __p2,
864 __glibcxx_assert(__is_valid_cmpexch_failure_order(__m2));
866 return __atomic_compare_exchange_n(&_M_p, &__p1, __p2, 1,
867 int(__m1),
int(__m2));
870 _GLIBCXX_ALWAYS_INLINE
bool
871 compare_exchange_weak(__pointer_type& __p1, __pointer_type __p2,
875 __glibcxx_assert(__is_valid_cmpexch_failure_order(__m2));
877 return __atomic_compare_exchange_n(&_M_p, &__p1, __p2, 1,
878 int(__m1),
int(__m2));
881 _GLIBCXX_ALWAYS_INLINE
bool
882 compare_exchange_strong(__pointer_type& __p1, __pointer_type __p2,
886 __glibcxx_assert(__is_valid_cmpexch_failure_order(__m2));
888 return __atomic_compare_exchange_n(&_M_p, &__p1, __p2, 0,
889 int(__m1),
int(__m2));
892 _GLIBCXX_ALWAYS_INLINE
bool
893 compare_exchange_strong(__pointer_type& __p1, __pointer_type __p2,
897 __glibcxx_assert(__is_valid_cmpexch_failure_order(__m2));
899 return __atomic_compare_exchange_n(&_M_p, &__p1, __p2, 0,
900 int(__m1),
int(__m2));
903#if __glibcxx_atomic_wait
904 _GLIBCXX_ALWAYS_INLINE
void
905 wait(__pointer_type __old,
908 std::__atomic_wait_address_v(&_M_p, __old,
910 {
return this->load(__m); });
915 _GLIBCXX_ALWAYS_INLINE
void
916 notify_one() const noexcept
917 { std::__atomic_notify_address(&_M_p,
false); }
921 _GLIBCXX_ALWAYS_INLINE
void
922 notify_all() const noexcept
923 { std::__atomic_notify_address(&_M_p,
true); }
928 _GLIBCXX_ALWAYS_INLINE __pointer_type
929 fetch_add(ptrdiff_t __d,
931 {
return __atomic_fetch_add(&_M_p, _S_type_size(__d),
int(__m)); }
933 _GLIBCXX_ALWAYS_INLINE __pointer_type
934 fetch_add(ptrdiff_t __d,
935 memory_order __m = memory_order_seq_cst)
volatile noexcept
936 {
return __atomic_fetch_add(&_M_p, _S_type_size(__d),
int(__m)); }
938 _GLIBCXX_ALWAYS_INLINE __pointer_type
939 fetch_sub(ptrdiff_t __d,
941 {
return __atomic_fetch_sub(&_M_p, _S_type_size(__d),
int(__m)); }
943 _GLIBCXX_ALWAYS_INLINE __pointer_type
944 fetch_sub(ptrdiff_t __d,
945 memory_order __m = memory_order_seq_cst)
volatile noexcept
946 {
return __atomic_fetch_sub(&_M_p, _S_type_size(__d),
int(__m)); }
949 namespace __atomic_impl
953 template<
typename _Tp>
955 __maybe_has_padding()
957#if ! __has_builtin(__builtin_clear_padding)
959#elif __has_builtin(__has_unique_object_representations)
960 return !__has_unique_object_representations(_Tp)
961 && !is_same<_Tp, float>::value && !is_same<_Tp, double>::value;
967#pragma GCC diagnostic push
968#pragma GCC diagnostic ignored "-Wc++17-extensions"
970 template<
typename _Tp>
971 _GLIBCXX_ALWAYS_INLINE _GLIBCXX14_CONSTEXPR _Tp*
972 __clear_padding(_Tp& __val)
noexcept
975#if __has_builtin(__builtin_clear_padding)
976 if constexpr (__atomic_impl::__maybe_has_padding<_Tp>())
977 __builtin_clear_padding(__ptr);
983 template<
typename _Tp>
984 using _Val =
typename remove_volatile<_Tp>::type;
986 template<
bool _AtomicRef = false,
typename _Tp>
987 _GLIBCXX_ALWAYS_INLINE
bool
988 __compare_exchange(_Tp& __val, _Val<_Tp>& __e, _Val<_Tp>& __i,
990 memory_order __s, memory_order __f)
noexcept
992 __glibcxx_assert(__is_valid_cmpexch_failure_order(__f));
994 using _Vp = _Val<_Tp>;
997 if constexpr (!__atomic_impl::__maybe_has_padding<_Vp>())
1001 int(__s),
int(__f));
1003 else if constexpr (!_AtomicRef)
1006 _Vp*
const __pi = __atomic_impl::__clear_padding(__i);
1010 _Vp*
const __pexp = __atomic_impl::__clear_padding(__exp);
1014 if (__atomic_compare_exchange(__pval, __pexp, __pi,
1015 __is_weak,
int(__s),
int(__f)))
1024 _Vp*
const __pi = __atomic_impl::__clear_padding(__i);
1030 _Vp*
const __pexp = __atomic_impl::__clear_padding(__exp);
1047 if (__atomic_compare_exchange(__pval, __pexp, __pi,
1048 __is_weak,
int(__s),
int(__f)))
1055 if (__builtin_memcmp(__atomic_impl::__clear_padding(__orig),
1056 __atomic_impl::__clear_padding(__curr),
1067#pragma GCC diagnostic pop
1070#if __cplusplus > 201703L
1072 namespace __atomic_impl
1075 template<
typename _Tp>
1076 using _Diff = __conditional_t<is_pointer_v<_Tp>, ptrdiff_t, _Val<_Tp>>;
1078 template<
size_t _Size,
size_t _Align>
1079 _GLIBCXX_ALWAYS_INLINE
bool
1080 is_lock_free() noexcept
1083 return __atomic_is_lock_free(_Size,
reinterpret_cast<void *
>(-_Align));
1086 template<
typename _Tp>
1087 _GLIBCXX_ALWAYS_INLINE
void
1088 store(_Tp* __ptr, _Val<_Tp> __t, memory_order __m)
noexcept
1090 __atomic_store(__ptr, __atomic_impl::__clear_padding(__t),
int(__m));
1093 template<
typename _Tp>
1094 _GLIBCXX_ALWAYS_INLINE _Val<_Tp>
1095 load(
const _Tp* __ptr, memory_order __m)
noexcept
1097 alignas(_Tp)
unsigned char __buf[
sizeof(_Tp)];
1098 auto* __dest =
reinterpret_cast<_Val<_Tp>*
>(__buf);
1099 __atomic_load(__ptr, __dest,
int(__m));
1103 template<
typename _Tp>
1104 _GLIBCXX_ALWAYS_INLINE _Val<_Tp>
1105 exchange(_Tp* __ptr, _Val<_Tp> __desired, memory_order __m)
noexcept
1107 alignas(_Tp)
unsigned char __buf[
sizeof(_Tp)];
1108 auto* __dest =
reinterpret_cast<_Val<_Tp>*
>(__buf);
1109 __atomic_exchange(__ptr, __atomic_impl::__clear_padding(__desired),
1114 template<
bool _AtomicRef = false,
typename _Tp>
1115 _GLIBCXX_ALWAYS_INLINE
bool
1116 compare_exchange_weak(_Tp* __ptr, _Val<_Tp>& __expected,
1117 _Val<_Tp> __desired, memory_order __success,
1118 memory_order __failure,
1119 bool __check_padding =
false) noexcept
1121 return __atomic_impl::__compare_exchange<_AtomicRef>(
1122 *__ptr, __expected, __desired,
true, __success, __failure);
1125 template<
bool _AtomicRef = false,
typename _Tp>
1126 _GLIBCXX_ALWAYS_INLINE
bool
1127 compare_exchange_strong(_Tp* __ptr, _Val<_Tp>& __expected,
1128 _Val<_Tp> __desired, memory_order __success,
1129 memory_order __failure,
1130 bool __ignore_padding =
false) noexcept
1132 return __atomic_impl::__compare_exchange<_AtomicRef>(
1133 *__ptr, __expected, __desired,
false, __success, __failure);
1136#if __glibcxx_atomic_wait
1137 template<
typename _Tp>
1138 _GLIBCXX_ALWAYS_INLINE
void
1139 wait(
const _Tp* __ptr, _Val<_Tp> __old,
1140 memory_order __m = memory_order_seq_cst)
noexcept
1142 std::__atomic_wait_address_v(__ptr, __old,
1143 [__ptr, __m]() {
return __atomic_impl::load(__ptr, __m); });
1148 template<
typename _Tp>
1149 _GLIBCXX_ALWAYS_INLINE
void
1150 notify_one(
const _Tp* __ptr)
noexcept
1151 { std::__atomic_notify_address(__ptr,
false); }
1155 template<
typename _Tp>
1156 _GLIBCXX_ALWAYS_INLINE
void
1157 notify_all(
const _Tp* __ptr)
noexcept
1158 { std::__atomic_notify_address(__ptr,
true); }
1163 template<
typename _Tp>
1164 _GLIBCXX_ALWAYS_INLINE _Tp
1165 fetch_add(_Tp* __ptr, _Diff<_Tp> __i, memory_order __m)
noexcept
1166 {
return __atomic_fetch_add(__ptr, __i,
int(__m)); }
1168 template<
typename _Tp>
1169 _GLIBCXX_ALWAYS_INLINE _Tp
1170 fetch_sub(_Tp* __ptr, _Diff<_Tp> __i, memory_order __m)
noexcept
1171 {
return __atomic_fetch_sub(__ptr, __i,
int(__m)); }
1173 template<
typename _Tp>
1174 _GLIBCXX_ALWAYS_INLINE _Tp
1175 fetch_and(_Tp* __ptr, _Val<_Tp> __i, memory_order __m)
noexcept
1176 {
return __atomic_fetch_and(__ptr, __i,
int(__m)); }
1178 template<
typename _Tp>
1179 _GLIBCXX_ALWAYS_INLINE _Tp
1180 fetch_or(_Tp* __ptr, _Val<_Tp> __i, memory_order __m)
noexcept
1181 {
return __atomic_fetch_or(__ptr, __i,
int(__m)); }
1183 template<
typename _Tp>
1184 _GLIBCXX_ALWAYS_INLINE _Tp
1185 fetch_xor(_Tp* __ptr, _Val<_Tp> __i, memory_order __m)
noexcept
1186 {
return __atomic_fetch_xor(__ptr, __i,
int(__m)); }
1188 template<
typename _Tp>
1189 _GLIBCXX_ALWAYS_INLINE _Tp
1190 __add_fetch(_Tp* __ptr, _Diff<_Tp> __i)
noexcept
1191 {
return __atomic_add_fetch(__ptr, __i, __ATOMIC_SEQ_CST); }
1193 template<
typename _Tp>
1194 _GLIBCXX_ALWAYS_INLINE _Tp
1195 __sub_fetch(_Tp* __ptr, _Diff<_Tp> __i)
noexcept
1196 {
return __atomic_sub_fetch(__ptr, __i, __ATOMIC_SEQ_CST); }
1198 template<
typename _Tp>
1199 _GLIBCXX_ALWAYS_INLINE _Tp
1200 __and_fetch(_Tp* __ptr, _Val<_Tp> __i)
noexcept
1201 {
return __atomic_and_fetch(__ptr, __i, __ATOMIC_SEQ_CST); }
1203 template<
typename _Tp>
1204 _GLIBCXX_ALWAYS_INLINE _Tp
1205 __or_fetch(_Tp* __ptr, _Val<_Tp> __i)
noexcept
1206 {
return __atomic_or_fetch(__ptr, __i, __ATOMIC_SEQ_CST); }
1208 template<
typename _Tp>
1209 _GLIBCXX_ALWAYS_INLINE _Tp
1210 __xor_fetch(_Tp* __ptr, _Val<_Tp> __i)
noexcept
1211 {
return __atomic_xor_fetch(__ptr, __i, __ATOMIC_SEQ_CST); }
1213 template<
typename _Tp>
1215 __fetch_add_flt(_Tp* __ptr, _Val<_Tp> __i, memory_order __m)
noexcept
1217 _Val<_Tp> __oldval = load(__ptr, memory_order_relaxed);
1218 _Val<_Tp> __newval = __oldval + __i;
1219 while (!compare_exchange_weak(__ptr, __oldval, __newval, __m,
1220 memory_order_relaxed))
1221 __newval = __oldval + __i;
1225 template<
typename _Tp>
1227 __fetch_sub_flt(_Tp* __ptr, _Val<_Tp> __i, memory_order __m)
noexcept
1229 _Val<_Tp> __oldval = load(__ptr, memory_order_relaxed);
1230 _Val<_Tp> __newval = __oldval - __i;
1231 while (!compare_exchange_weak(__ptr, __oldval, __newval, __m,
1232 memory_order_relaxed))
1233 __newval = __oldval - __i;
1237 template<
typename _Tp>
1239 __add_fetch_flt(_Tp* __ptr, _Val<_Tp> __i)
noexcept
1241 _Val<_Tp> __oldval = load(__ptr, memory_order_relaxed);
1242 _Val<_Tp> __newval = __oldval + __i;
1243 while (!compare_exchange_weak(__ptr, __oldval, __newval,
1244 memory_order_seq_cst,
1245 memory_order_relaxed))
1246 __newval = __oldval + __i;
1250 template<
typename _Tp>
1252 __sub_fetch_flt(_Tp* __ptr, _Val<_Tp> __i)
noexcept
1254 _Val<_Tp> __oldval = load(__ptr, memory_order_relaxed);
1255 _Val<_Tp> __newval = __oldval - __i;
1256 while (!compare_exchange_weak(__ptr, __oldval, __newval,
1257 memory_order_seq_cst,
1258 memory_order_relaxed))
1259 __newval = __oldval - __i;
1265 template<
typename _Fp>
1266 struct __atomic_float
1268 static_assert(is_floating_point_v<_Fp>);
1270 static constexpr size_t _S_alignment = __alignof__(_Fp);
1273 using value_type = _Fp;
1274 using difference_type = value_type;
1276 static constexpr bool is_always_lock_free
1277 = __atomic_always_lock_free(
sizeof(_Fp), 0);
1279 __atomic_float() =
default;
1282 __atomic_float(_Fp __t) : _M_fp(__t)
1284 if (!std::__is_constant_evaluated())
1285 __atomic_impl::__clear_padding(_M_fp);
1288 __atomic_float(
const __atomic_float&) =
delete;
1289 __atomic_float& operator=(
const __atomic_float&) =
delete;
1290 __atomic_float& operator=(
const __atomic_float&)
volatile =
delete;
1293 operator=(_Fp __t)
volatile noexcept
1300 operator=(_Fp __t)
noexcept
1307 is_lock_free() const volatile noexcept
1308 {
return __atomic_impl::is_lock_free<sizeof(_Fp), _S_alignment>(); }
1311 is_lock_free() const noexcept
1312 {
return __atomic_impl::is_lock_free<sizeof(_Fp), _S_alignment>(); }
1315 store(_Fp __t, memory_order __m = memory_order_seq_cst)
volatile noexcept
1316 { __atomic_impl::store(&_M_fp, __t, __m); }
1319 store(_Fp __t, memory_order __m = memory_order_seq_cst)
noexcept
1320 { __atomic_impl::store(&_M_fp, __t, __m); }
1323 load(memory_order __m = memory_order_seq_cst)
const volatile noexcept
1324 {
return __atomic_impl::load(&_M_fp, __m); }
1327 load(memory_order __m = memory_order_seq_cst)
const noexcept
1328 {
return __atomic_impl::load(&_M_fp, __m); }
1330 operator _Fp() const volatile noexcept {
return this->load(); }
1331 operator _Fp() const noexcept {
return this->load(); }
1334 exchange(_Fp __desired,
1335 memory_order __m = memory_order_seq_cst)
volatile noexcept
1336 {
return __atomic_impl::exchange(&_M_fp, __desired, __m); }
1339 exchange(_Fp __desired,
1340 memory_order __m = memory_order_seq_cst)
noexcept
1341 {
return __atomic_impl::exchange(&_M_fp, __desired, __m); }
1344 compare_exchange_weak(_Fp& __expected, _Fp __desired,
1345 memory_order __success,
1346 memory_order __failure)
noexcept
1348 return __atomic_impl::compare_exchange_weak(&_M_fp,
1349 __expected, __desired,
1350 __success, __failure);
1354 compare_exchange_weak(_Fp& __expected, _Fp __desired,
1355 memory_order __success,
1356 memory_order __failure)
volatile noexcept
1358 return __atomic_impl::compare_exchange_weak(&_M_fp,
1359 __expected, __desired,
1360 __success, __failure);
1364 compare_exchange_strong(_Fp& __expected, _Fp __desired,
1365 memory_order __success,
1366 memory_order __failure)
noexcept
1368 return __atomic_impl::compare_exchange_strong(&_M_fp,
1369 __expected, __desired,
1370 __success, __failure);
1374 compare_exchange_strong(_Fp& __expected, _Fp __desired,
1375 memory_order __success,
1376 memory_order __failure)
volatile noexcept
1378 return __atomic_impl::compare_exchange_strong(&_M_fp,
1379 __expected, __desired,
1380 __success, __failure);
1384 compare_exchange_weak(_Fp& __expected, _Fp __desired,
1385 memory_order __order = memory_order_seq_cst)
1388 return compare_exchange_weak(__expected, __desired, __order,
1389 __cmpexch_failure_order(__order));
1393 compare_exchange_weak(_Fp& __expected, _Fp __desired,
1394 memory_order __order = memory_order_seq_cst)
1397 return compare_exchange_weak(__expected, __desired, __order,
1398 __cmpexch_failure_order(__order));
1402 compare_exchange_strong(_Fp& __expected, _Fp __desired,
1403 memory_order __order = memory_order_seq_cst)
1406 return compare_exchange_strong(__expected, __desired, __order,
1407 __cmpexch_failure_order(__order));
1411 compare_exchange_strong(_Fp& __expected, _Fp __desired,
1412 memory_order __order = memory_order_seq_cst)
1415 return compare_exchange_strong(__expected, __desired, __order,
1416 __cmpexch_failure_order(__order));
1419#if __glibcxx_atomic_wait
1420 _GLIBCXX_ALWAYS_INLINE
void
1421 wait(_Fp __old, memory_order __m = memory_order_seq_cst)
const noexcept
1422 { __atomic_impl::wait(&_M_fp, __old, __m); }
1426 _GLIBCXX_ALWAYS_INLINE
void
1427 notify_one() const noexcept
1428 { __atomic_impl::notify_one(&_M_fp); }
1432 _GLIBCXX_ALWAYS_INLINE
void
1433 notify_all() const noexcept
1434 { __atomic_impl::notify_all(&_M_fp); }
1440 fetch_add(value_type __i,
1441 memory_order __m = memory_order_seq_cst)
noexcept
1442 {
return __atomic_impl::__fetch_add_flt(&_M_fp, __i, __m); }
1445 fetch_add(value_type __i,
1446 memory_order __m = memory_order_seq_cst)
volatile noexcept
1447 {
return __atomic_impl::__fetch_add_flt(&_M_fp, __i, __m); }
1450 fetch_sub(value_type __i,
1451 memory_order __m = memory_order_seq_cst)
noexcept
1452 {
return __atomic_impl::__fetch_sub_flt(&_M_fp, __i, __m); }
1455 fetch_sub(value_type __i,
1456 memory_order __m = memory_order_seq_cst)
volatile noexcept
1457 {
return __atomic_impl::__fetch_sub_flt(&_M_fp, __i, __m); }
1460 operator+=(value_type __i)
noexcept
1461 {
return __atomic_impl::__add_fetch_flt(&_M_fp, __i); }
1464 operator+=(value_type __i)
volatile noexcept
1465 {
return __atomic_impl::__add_fetch_flt(&_M_fp, __i); }
1468 operator-=(value_type __i)
noexcept
1469 {
return __atomic_impl::__sub_fetch_flt(&_M_fp, __i); }
1472 operator-=(value_type __i)
volatile noexcept
1473 {
return __atomic_impl::__sub_fetch_flt(&_M_fp, __i); }
1476 alignas(_S_alignment) _Fp _M_fp _GLIBCXX20_INIT(0);
1478#undef _GLIBCXX20_INIT
1480 template<
typename _Tp,
1481 bool = is_integral_v<_Tp>,
bool = is_floating_point_v<_Tp>>
1482 struct __atomic_ref;
1485 template<
typename _Tp>
1486 struct __atomic_ref<_Tp, false, false>
1488 static_assert(is_trivially_copyable_v<_Tp>);
1491 static constexpr int _S_min_alignment
1492 = (
sizeof(_Tp) & (
sizeof(_Tp) - 1)) ||
sizeof(_Tp) > 16
1496 using value_type = _Tp;
1498 static constexpr bool is_always_lock_free
1499 = __atomic_always_lock_free(
sizeof(_Tp), 0);
1501 static constexpr size_t required_alignment
1502 = _S_min_alignment >
alignof(_Tp) ? _S_min_alignment : alignof(_Tp);
1504 __atomic_ref& operator=(
const __atomic_ref&) =
delete;
1507 __atomic_ref(_Tp& __t) : _M_ptr(std::
__addressof(__t))
1508 { __glibcxx_assert(((uintptr_t)_M_ptr % required_alignment) == 0); }
1510 __atomic_ref(
const __atomic_ref&)
noexcept =
default;
1513 operator=(_Tp __t)
const noexcept
1519 operator _Tp() const noexcept {
return this->load(); }
1522 is_lock_free() const noexcept
1523 {
return __atomic_impl::is_lock_free<sizeof(_Tp), required_alignment>(); }
1526 store(_Tp __t, memory_order __m = memory_order_seq_cst)
const noexcept
1527 { __atomic_impl::store(_M_ptr, __t, __m); }
1530 load(memory_order __m = memory_order_seq_cst)
const noexcept
1531 {
return __atomic_impl::load(_M_ptr, __m); }
1534 exchange(_Tp __desired, memory_order __m = memory_order_seq_cst)
1536 {
return __atomic_impl::exchange(_M_ptr, __desired, __m); }
1539 compare_exchange_weak(_Tp& __expected, _Tp __desired,
1540 memory_order __success,
1541 memory_order __failure)
const noexcept
1543 return __atomic_impl::compare_exchange_weak<true>(
1544 _M_ptr, __expected, __desired, __success, __failure);
1548 compare_exchange_strong(_Tp& __expected, _Tp __desired,
1549 memory_order __success,
1550 memory_order __failure)
const noexcept
1552 return __atomic_impl::compare_exchange_strong<true>(
1553 _M_ptr, __expected, __desired, __success, __failure);
1557 compare_exchange_weak(_Tp& __expected, _Tp __desired,
1558 memory_order __order = memory_order_seq_cst)
1561 return compare_exchange_weak(__expected, __desired, __order,
1562 __cmpexch_failure_order(__order));
1566 compare_exchange_strong(_Tp& __expected, _Tp __desired,
1567 memory_order __order = memory_order_seq_cst)
1570 return compare_exchange_strong(__expected, __desired, __order,
1571 __cmpexch_failure_order(__order));
1574#if __glibcxx_atomic_wait
1575 _GLIBCXX_ALWAYS_INLINE
void
1576 wait(_Tp __old, memory_order __m = memory_order_seq_cst)
const noexcept
1577 { __atomic_impl::wait(_M_ptr, __old, __m); }
1581 _GLIBCXX_ALWAYS_INLINE
void
1582 notify_one() const noexcept
1583 { __atomic_impl::notify_one(_M_ptr); }
1587 _GLIBCXX_ALWAYS_INLINE
void
1588 notify_all() const noexcept
1589 { __atomic_impl::notify_all(_M_ptr); }
1599 template<
typename _Tp>
1600 struct __atomic_ref<_Tp, true, false>
1602 static_assert(is_integral_v<_Tp>);
1605 using value_type = _Tp;
1606 using difference_type = value_type;
1608 static constexpr bool is_always_lock_free
1609 = __atomic_always_lock_free(
sizeof(_Tp), 0);
1611 static constexpr size_t required_alignment
1612 =
sizeof(_Tp) >
alignof(_Tp) ?
sizeof(_Tp) : alignof(_Tp);
1614 __atomic_ref() =
delete;
1615 __atomic_ref& operator=(
const __atomic_ref&) =
delete;
1618 __atomic_ref(_Tp& __t) : _M_ptr(&__t)
1619 { __glibcxx_assert(((uintptr_t)_M_ptr % required_alignment) == 0); }
1621 __atomic_ref(
const __atomic_ref&)
noexcept =
default;
1624 operator=(_Tp __t)
const noexcept
1630 operator _Tp() const noexcept {
return this->load(); }
1633 is_lock_free() const noexcept
1635 return __atomic_impl::is_lock_free<sizeof(_Tp), required_alignment>();
1639 store(_Tp __t, memory_order __m = memory_order_seq_cst)
const noexcept
1640 { __atomic_impl::store(_M_ptr, __t, __m); }
1643 load(memory_order __m = memory_order_seq_cst)
const noexcept
1644 {
return __atomic_impl::load(_M_ptr, __m); }
1647 exchange(_Tp __desired,
1648 memory_order __m = memory_order_seq_cst)
const noexcept
1649 {
return __atomic_impl::exchange(_M_ptr, __desired, __m); }
1652 compare_exchange_weak(_Tp& __expected, _Tp __desired,
1653 memory_order __success,
1654 memory_order __failure)
const noexcept
1656 return __atomic_impl::compare_exchange_weak<true>(
1657 _M_ptr, __expected, __desired, __success, __failure);
1661 compare_exchange_strong(_Tp& __expected, _Tp __desired,
1662 memory_order __success,
1663 memory_order __failure)
const noexcept
1665 return __atomic_impl::compare_exchange_strong<true>(
1666 _M_ptr, __expected, __desired, __success, __failure);
1670 compare_exchange_weak(_Tp& __expected, _Tp __desired,
1671 memory_order __order = memory_order_seq_cst)
1674 return compare_exchange_weak(__expected, __desired, __order,
1675 __cmpexch_failure_order(__order));
1679 compare_exchange_strong(_Tp& __expected, _Tp __desired,
1680 memory_order __order = memory_order_seq_cst)
1683 return compare_exchange_strong(__expected, __desired, __order,
1684 __cmpexch_failure_order(__order));
1687#if __glibcxx_atomic_wait
1688 _GLIBCXX_ALWAYS_INLINE
void
1689 wait(_Tp __old, memory_order __m = memory_order_seq_cst)
const noexcept
1690 { __atomic_impl::wait(_M_ptr, __old, __m); }
1694 _GLIBCXX_ALWAYS_INLINE
void
1695 notify_one() const noexcept
1696 { __atomic_impl::notify_one(_M_ptr); }
1700 _GLIBCXX_ALWAYS_INLINE
void
1701 notify_all() const noexcept
1702 { __atomic_impl::notify_all(_M_ptr); }
1708 fetch_add(value_type __i,
1709 memory_order __m = memory_order_seq_cst)
const noexcept
1710 {
return __atomic_impl::fetch_add(_M_ptr, __i, __m); }
1713 fetch_sub(value_type __i,
1714 memory_order __m = memory_order_seq_cst)
const noexcept
1715 {
return __atomic_impl::fetch_sub(_M_ptr, __i, __m); }
1718 fetch_and(value_type __i,
1719 memory_order __m = memory_order_seq_cst)
const noexcept
1720 {
return __atomic_impl::fetch_and(_M_ptr, __i, __m); }
1723 fetch_or(value_type __i,
1724 memory_order __m = memory_order_seq_cst)
const noexcept
1725 {
return __atomic_impl::fetch_or(_M_ptr, __i, __m); }
1728 fetch_xor(value_type __i,
1729 memory_order __m = memory_order_seq_cst)
const noexcept
1730 {
return __atomic_impl::fetch_xor(_M_ptr, __i, __m); }
1732 _GLIBCXX_ALWAYS_INLINE value_type
1733 operator++(
int)
const noexcept
1734 {
return fetch_add(1); }
1736 _GLIBCXX_ALWAYS_INLINE value_type
1737 operator--(
int)
const noexcept
1738 {
return fetch_sub(1); }
1741 operator++() const noexcept
1742 {
return __atomic_impl::__add_fetch(_M_ptr, value_type(1)); }
1745 operator--() const noexcept
1746 {
return __atomic_impl::__sub_fetch(_M_ptr, value_type(1)); }
1749 operator+=(value_type __i)
const noexcept
1750 {
return __atomic_impl::__add_fetch(_M_ptr, __i); }
1753 operator-=(value_type __i)
const noexcept
1754 {
return __atomic_impl::__sub_fetch(_M_ptr, __i); }
1757 operator&=(value_type __i)
const noexcept
1758 {
return __atomic_impl::__and_fetch(_M_ptr, __i); }
1761 operator|=(value_type __i)
const noexcept
1762 {
return __atomic_impl::__or_fetch(_M_ptr, __i); }
1765 operator^=(value_type __i)
const noexcept
1766 {
return __atomic_impl::__xor_fetch(_M_ptr, __i); }
1773 template<
typename _Fp>
1774 struct __atomic_ref<_Fp, false, true>
1776 static_assert(is_floating_point_v<_Fp>);
1779 using value_type = _Fp;
1780 using difference_type = value_type;
1782 static constexpr bool is_always_lock_free
1783 = __atomic_always_lock_free(
sizeof(_Fp), 0);
1785 static constexpr size_t required_alignment = __alignof__(_Fp);
1787 __atomic_ref() =
delete;
1788 __atomic_ref& operator=(
const __atomic_ref&) =
delete;
1791 __atomic_ref(_Fp& __t) : _M_ptr(&__t)
1792 { __glibcxx_assert(((uintptr_t)_M_ptr % required_alignment) == 0); }
1794 __atomic_ref(
const __atomic_ref&)
noexcept =
default;
1797 operator=(_Fp __t)
const noexcept
1803 operator _Fp() const noexcept {
return this->load(); }
1806 is_lock_free() const noexcept
1808 return __atomic_impl::is_lock_free<sizeof(_Fp), required_alignment>();
1812 store(_Fp __t, memory_order __m = memory_order_seq_cst)
const noexcept
1813 { __atomic_impl::store(_M_ptr, __t, __m); }
1816 load(memory_order __m = memory_order_seq_cst)
const noexcept
1817 {
return __atomic_impl::load(_M_ptr, __m); }
1820 exchange(_Fp __desired,
1821 memory_order __m = memory_order_seq_cst)
const noexcept
1822 {
return __atomic_impl::exchange(_M_ptr, __desired, __m); }
1825 compare_exchange_weak(_Fp& __expected, _Fp __desired,
1826 memory_order __success,
1827 memory_order __failure)
const noexcept
1829 return __atomic_impl::compare_exchange_weak<true>(
1830 _M_ptr, __expected, __desired, __success, __failure);
1834 compare_exchange_strong(_Fp& __expected, _Fp __desired,
1835 memory_order __success,
1836 memory_order __failure)
const noexcept
1838 return __atomic_impl::compare_exchange_strong<true>(
1839 _M_ptr, __expected, __desired, __success, __failure);
1843 compare_exchange_weak(_Fp& __expected, _Fp __desired,
1844 memory_order __order = memory_order_seq_cst)
1847 return compare_exchange_weak(__expected, __desired, __order,
1848 __cmpexch_failure_order(__order));
1852 compare_exchange_strong(_Fp& __expected, _Fp __desired,
1853 memory_order __order = memory_order_seq_cst)
1856 return compare_exchange_strong(__expected, __desired, __order,
1857 __cmpexch_failure_order(__order));
1860#if __glibcxx_atomic_wait
1861 _GLIBCXX_ALWAYS_INLINE
void
1862 wait(_Fp __old, memory_order __m = memory_order_seq_cst)
const noexcept
1863 { __atomic_impl::wait(_M_ptr, __old, __m); }
1867 _GLIBCXX_ALWAYS_INLINE
void
1868 notify_one() const noexcept
1869 { __atomic_impl::notify_one(_M_ptr); }
1873 _GLIBCXX_ALWAYS_INLINE
void
1874 notify_all() const noexcept
1875 { __atomic_impl::notify_all(_M_ptr); }
1881 fetch_add(value_type __i,
1882 memory_order __m = memory_order_seq_cst)
const noexcept
1883 {
return __atomic_impl::__fetch_add_flt(_M_ptr, __i, __m); }
1886 fetch_sub(value_type __i,
1887 memory_order __m = memory_order_seq_cst)
const noexcept
1888 {
return __atomic_impl::__fetch_sub_flt(_M_ptr, __i, __m); }
1891 operator+=(value_type __i)
const noexcept
1892 {
return __atomic_impl::__add_fetch_flt(_M_ptr, __i); }
1895 operator-=(value_type __i)
const noexcept
1896 {
return __atomic_impl::__sub_fetch_flt(_M_ptr, __i); }
1903 template<
typename _Tp>
1904 struct __atomic_ref<_Tp*,
false,
false>
1910 static constexpr bool is_always_lock_free = ATOMIC_POINTER_LOCK_FREE == 2;
1912 static constexpr size_t required_alignment = __alignof__(_Tp*);
1914 __atomic_ref() =
delete;
1915 __atomic_ref& operator=(
const __atomic_ref&) =
delete;
1918 __atomic_ref(_Tp*& __t) : _M_ptr(std::
__addressof(__t))
1919 { __glibcxx_assert(((uintptr_t)_M_ptr % required_alignment) == 0); }
1921 __atomic_ref(
const __atomic_ref&)
noexcept =
default;
1924 operator=(_Tp* __t)
const noexcept
1930 operator _Tp*()
const noexcept {
return this->load(); }
1933 is_lock_free() const noexcept
1935 return __atomic_impl::is_lock_free<sizeof(_Tp*), required_alignment>();
1939 store(_Tp* __t,
memory_order __m = memory_order_seq_cst)
const noexcept
1940 { __atomic_impl::store(_M_ptr, __t, __m); }
1943 load(
memory_order __m = memory_order_seq_cst)
const noexcept
1944 {
return __atomic_impl::load(_M_ptr, __m); }
1947 exchange(_Tp* __desired,
1948 memory_order __m = memory_order_seq_cst)
const noexcept
1949 {
return __atomic_impl::exchange(_M_ptr, __desired, __m); }
1952 compare_exchange_weak(_Tp*& __expected, _Tp* __desired,
1956 return __atomic_impl::compare_exchange_weak<true>(
1957 _M_ptr, __expected, __desired, __success, __failure);
1961 compare_exchange_strong(_Tp*& __expected, _Tp* __desired,
1965 return __atomic_impl::compare_exchange_strong<true>(
1966 _M_ptr, __expected, __desired, __success, __failure);
1970 compare_exchange_weak(_Tp*& __expected, _Tp* __desired,
1974 return compare_exchange_weak(__expected, __desired, __order,
1975 __cmpexch_failure_order(__order));
1979 compare_exchange_strong(_Tp*& __expected, _Tp* __desired,
1983 return compare_exchange_strong(__expected, __desired, __order,
1984 __cmpexch_failure_order(__order));
1987#if __glibcxx_atomic_wait
1988 _GLIBCXX_ALWAYS_INLINE
void
1989 wait(_Tp* __old,
memory_order __m = memory_order_seq_cst)
const noexcept
1990 { __atomic_impl::wait(_M_ptr, __old, __m); }
1994 _GLIBCXX_ALWAYS_INLINE
void
1995 notify_one() const noexcept
1996 { __atomic_impl::notify_one(_M_ptr); }
2000 _GLIBCXX_ALWAYS_INLINE
void
2001 notify_all() const noexcept
2002 { __atomic_impl::notify_all(_M_ptr); }
2009 memory_order __m = memory_order_seq_cst)
const noexcept
2010 {
return __atomic_impl::fetch_add(_M_ptr, _S_type_size(__d), __m); }
2014 memory_order __m = memory_order_seq_cst)
const noexcept
2015 {
return __atomic_impl::fetch_sub(_M_ptr, _S_type_size(__d), __m); }
2018 operator++(
int)
const noexcept
2019 {
return fetch_add(1); }
2022 operator--(
int)
const noexcept
2023 {
return fetch_sub(1); }
2026 operator++() const noexcept
2028 return __atomic_impl::__add_fetch(_M_ptr, _S_type_size(1));
2032 operator--() const noexcept
2034 return __atomic_impl::__sub_fetch(_M_ptr, _S_type_size(1));
2040 return __atomic_impl::__add_fetch(_M_ptr, _S_type_size(__d));
2046 return __atomic_impl::__sub_fetch(_M_ptr, _S_type_size(__d));
2050 static constexpr ptrdiff_t
2051 _S_type_size(ptrdiff_t __d)
noexcept
2053 static_assert(is_object_v<_Tp>);
2054 return __d *
sizeof(_Tp);
2065_GLIBCXX_END_NAMESPACE_VERSION
constexpr _Tp * __addressof(_Tp &__r) noexcept
Same as C++11 std::addressof.
_Tp kill_dependency(_Tp __y) noexcept
kill_dependency
memory_order
Enumeration for memory_order.
ISO C++ entities toplevel namespace is std.
constexpr bitset< _Nb > operator|(const bitset< _Nb > &__x, const bitset< _Nb > &__y) noexcept
Global bitwise operations on bitsets.
constexpr bitset< _Nb > operator&(const bitset< _Nb > &__x, const bitset< _Nb > &__y) noexcept
Global bitwise operations on bitsets.