18
18
#include < __memory/addressof.h>
19
19
#include < __thread/poll_with_backoff.h>
20
20
#include < __thread/support.h>
21
+ #include < __type_traits/conjunction.h>
21
22
#include < __type_traits/decay.h>
23
+ #include < __type_traits/invoke.h>
24
+ #include < __type_traits/void_t.h>
25
+ #include < __utility/declval.h>
22
26
#include < cstring>
23
27
24
28
#if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER)
27
31
28
32
_LIBCPP_BEGIN_NAMESPACE_STD
29
33
30
- template <class _Atp , class _Poll >
31
- struct __libcpp_atomic_wait_poll_impl {
32
- _Atp* __a_;
34
+ // The customisation points to enable the following functions:
35
+ // - __atomic_wait
36
+ // - __atomic_wait_unless
37
+ // - __atomic_notify_one
38
+ // - __atomic_notify_all
39
+ // Note that std::atomic<T>::wait was back-ported to C++03
40
+ // The below implementations look ugly to support C++03
41
+ template <class _Tp , class = void >
42
+ struct __atomic_waitable_traits {
43
+ template <class _AtomicWaitable >
44
+ static void __atomic_load (_AtomicWaitable&&, memory_order) = delete;
45
+
46
+ template <class _AtomicWaitable >
47
+ static void __atomic_contention_address (_AtomicWaitable&&) = delete;
48
+ };
49
+
50
+ template <class _Tp , class = void >
51
+ struct __atomic_waitable : false_type {};
52
+
53
+ template <class _Tp >
54
+ struct __atomic_waitable < _Tp,
55
+ __void_t <decltype(__atomic_waitable_traits<__decay_t <_Tp> >::__atomic_load(
56
+ std::declval<const _Tp&>(), std::declval<memory_order>())),
57
+ decltype (__atomic_waitable_traits<__decay_t <_Tp> >::__atomic_contention_address(
58
+ std::declval<const _Tp&>()))> > : true_type {};
59
+
60
+ template <class _AtomicWaitable , class _Poll >
61
+ struct __atomic_wait_poll_impl {
62
+ const _AtomicWaitable& __a_;
33
63
_Poll __poll_;
34
64
memory_order __order_;
35
65
36
- _LIBCPP_AVAILABILITY_SYNC
37
66
_LIBCPP_HIDE_FROM_ABI bool operator ()() const {
38
- auto __current_val = std::__cxx_atomic_load (__a_, __order_);
67
+ auto __current_val = __atomic_waitable_traits< __decay_t <_AtomicWaitable> >:: __atomic_load (__a_, __order_);
39
68
return __poll_ (__current_val);
40
69
}
41
70
};
@@ -56,42 +85,45 @@ __libcpp_atomic_monitor(__cxx_atomic_contention_t const volatile*);
56
85
_LIBCPP_AVAILABILITY_SYNC _LIBCPP_EXPORTED_FROM_ABI void
57
86
__libcpp_atomic_wait (__cxx_atomic_contention_t const volatile *, __cxx_contention_t );
58
87
59
- template <class _Atp , class _Poll >
60
- struct __libcpp_atomic_wait_backoff_impl {
61
- _Atp* __a_;
88
+ template <class _AtomicWaitable , class _Poll >
89
+ struct __atomic_wait_backoff_impl {
90
+ const _AtomicWaitable& __a_;
62
91
_Poll __poll_;
63
92
memory_order __order_;
64
93
94
+ using __waitable_traits = __atomic_waitable_traits<__decay_t <_AtomicWaitable> >;
95
+
65
96
_LIBCPP_AVAILABILITY_SYNC
66
97
_LIBCPP_HIDE_FROM_ABI bool
67
- __poll_or_get_monitor (__cxx_atomic_contention_t const volatile *, __cxx_contention_t & __monitor ) const {
68
- // In case the atomic can be waited on directly, the monitor value is just
69
- // the value of the atomic.
98
+ __update_monitor_val_and_poll (__cxx_atomic_contention_t const volatile *, __cxx_contention_t & __monitor_val ) const {
99
+ // In case the contention type happens to be __cxx_atomic_contention_t, i.e. __cxx_atomic_impl<int64_t>,
100
+ // the platform wait is directly monitoring the atomic value itself .
70
101
// `__poll_` takes the current value of the atomic as an in-out argument
71
102
// to potentially modify it. After it returns, `__monitor` has a value
72
103
// which can be safely waited on by `std::__libcpp_atomic_wait` without any
73
104
// ABA style issues.
74
- __monitor = std::__cxx_atomic_load (__a_, __order_);
75
- return __poll_ (__monitor );
105
+ __monitor_val = __waitable_traits::__atomic_load (__a_, __order_);
106
+ return __poll_ (__monitor_val );
76
107
}
77
108
78
109
_LIBCPP_AVAILABILITY_SYNC
79
- _LIBCPP_HIDE_FROM_ABI bool __poll_or_get_monitor ( void const volatile *, __cxx_contention_t & __monitor) const {
80
- // In case we must wait on an atomic from the pool, the monitor comes from
81
- // `std::__libcpp_atomic_monitor`.
82
- // Only then we may read from `__a_`. This is the "event count" pattern.
83
- __monitor = std::__libcpp_atomic_monitor (__a_ );
84
- auto __current_val = std::__cxx_atomic_load (__a_, __order_);
110
+ _LIBCPP_HIDE_FROM_ABI bool
111
+ __update_monitor_val_and_poll ( void const volatile * __contention_address, __cxx_contention_t & __monitor_val) const {
112
+ // In case the contention type is anything else, platform wait is monitoring a __cxx_atomic_contention_t
113
+ // from the global pool, the monitor comes from __libcpp_atomic_monitor
114
+ __monitor_val = std::__libcpp_atomic_monitor (__contention_address );
115
+ auto __current_val = __waitable_traits::__atomic_load (__a_, __order_);
85
116
return __poll_ (__current_val);
86
117
}
87
118
88
119
_LIBCPP_AVAILABILITY_SYNC
89
120
_LIBCPP_HIDE_FROM_ABI bool operator ()(chrono::nanoseconds __elapsed) const {
90
121
if (__elapsed > chrono::microseconds (64 )) {
91
- __cxx_contention_t __monitor;
92
- if (__poll_or_get_monitor (__a_, __monitor))
122
+ auto __contention_address = __waitable_traits::__atomic_contention_address (__a_);
123
+ __cxx_contention_t __monitor_val;
124
+ if (__update_monitor_val_and_poll (__contention_address, __monitor_val))
93
125
return true ;
94
- std::__libcpp_atomic_wait (__a_, __monitor );
126
+ std::__libcpp_atomic_wait (__contention_address, __monitor_val );
95
127
} else if (__elapsed > chrono::microseconds (4 ))
96
128
__libcpp_thread_yield ();
97
129
else {
@@ -105,29 +137,44 @@ struct __libcpp_atomic_wait_backoff_impl {
105
137
// predicate (is the loaded value unequal to `old`?), the predicate function is
106
138
// specified as an argument. The loaded value is given as an in-out argument to
107
139
// the predicate. If the predicate function returns `true`,
108
- // `_cxx_atomic_wait_unless ` will return. If the predicate function returns
140
+ // `__atomic_wait_unless ` will return. If the predicate function returns
109
141
// `false`, it must set the argument to its current understanding of the atomic
110
142
// value. The predicate function must not return `false` spuriously.
111
- template <class _Atp , class _Poll >
143
+ template <class _AtomicWaitable , class _Poll >
112
144
_LIBCPP_AVAILABILITY_SYNC _LIBCPP_HIDE_FROM_ABI void
113
- __cxx_atomic_wait_unless (_Atp* __a, _Poll&& __poll, memory_order __order) {
114
- __libcpp_atomic_wait_poll_impl<_Atp, __decay_t <_Poll> > __poll_fn = {__a, __poll, __order};
115
- __libcpp_atomic_wait_backoff_impl<_Atp, __decay_t <_Poll> > __backoff_fn = {__a, __poll, __order};
116
- (void )std::__libcpp_thread_poll_with_backoff (__poll_fn, __backoff_fn);
145
+ __atomic_wait_unless (const _AtomicWaitable& __a, _Poll&& __poll, memory_order __order) {
146
+ static_assert (__atomic_waitable<_AtomicWaitable>::value, " " );
147
+ __atomic_wait_poll_impl<_AtomicWaitable, __decay_t <_Poll> > __poll_impl = {__a, __poll, __order};
148
+ __atomic_wait_backoff_impl<_AtomicWaitable, __decay_t <_Poll> > __backoff_fn = {__a, __poll, __order};
149
+ std::__libcpp_thread_poll_with_backoff (__poll_impl, __backoff_fn);
150
+ }
151
+
152
+ template <class _AtomicWaitable >
153
+ _LIBCPP_AVAILABILITY_SYNC _LIBCPP_HIDE_FROM_ABI void __atomic_notify_one (const _AtomicWaitable& __a) {
154
+ static_assert (__atomic_waitable<_AtomicWaitable>::value, " " );
155
+ std::__cxx_atomic_notify_one (__atomic_waitable_traits<__decay_t <_AtomicWaitable> >::__atomic_contention_address (__a));
156
+ }
157
+
158
+ template <class _AtomicWaitable >
159
+ _LIBCPP_AVAILABILITY_SYNC _LIBCPP_HIDE_FROM_ABI void __atomic_notify_all (const _AtomicWaitable& __a) {
160
+ static_assert (__atomic_waitable<_AtomicWaitable>::value, " " );
161
+ std::__cxx_atomic_notify_all (__atomic_waitable_traits<__decay_t <_AtomicWaitable> >::__atomic_contention_address (__a));
117
162
}
118
163
119
164
#else // _LIBCPP_HAS_NO_THREADS
120
165
121
- template <class _Tp >
122
- _LIBCPP_HIDE_FROM_ABI void __cxx_atomic_notify_all (__cxx_atomic_impl<_Tp> const volatile *) {}
123
- template <class _Tp >
124
- _LIBCPP_HIDE_FROM_ABI void __cxx_atomic_notify_one (__cxx_atomic_impl<_Tp> const volatile *) {}
125
- template <class _Atp , class _Poll >
126
- _LIBCPP_HIDE_FROM_ABI void __cxx_atomic_wait_unless (_Atp* __a, _Poll&& __poll, memory_order __order) {
127
- __libcpp_atomic_wait_poll_impl<_Atp, __decay_t <_Poll> > __poll_fn = {__a, __poll, __order};
128
- (void )std::__libcpp_thread_poll_with_backoff (__poll_fn, __spinning_backoff_policy ());
166
+ template <class _AtomicWaitable , class _Poll >
167
+ _LIBCPP_HIDE_FROM_ABI void __atomic_wait_unless (const _AtomicWaitable& __a, _Poll&& __poll, memory_order __order) {
168
+ __atomic_wait_poll_impl<_AtomicWaitable, __decay_t <_Poll> > __poll_fn = {__a, __poll, __order};
169
+ std::__libcpp_thread_poll_with_backoff (__poll_fn, __spinning_backoff_policy ());
129
170
}
130
171
172
+ template <class _AtomicWaitable >
173
+ _LIBCPP_HIDE_FROM_ABI void __atomic_notify_one (const _AtomicWaitable&) {}
174
+
175
+ template <class _AtomicWaitable >
176
+ _LIBCPP_HIDE_FROM_ABI void __atomic_notify_all (const _AtomicWaitable&) {}
177
+
131
178
#endif // _LIBCPP_HAS_NO_THREADS
132
179
133
180
template <typename _Tp>
@@ -143,11 +190,12 @@ struct __atomic_compare_unequal_to {
143
190
}
144
191
};
145
192
146
- template <class _Atp , class _Tp >
193
+ template <class _AtomicWaitable , class _Up >
147
194
_LIBCPP_AVAILABILITY_SYNC _LIBCPP_HIDE_FROM_ABI void
148
- __cxx_atomic_wait (_Atp* __a, _Tp const __val, memory_order __order) {
149
- __atomic_compare_unequal_to<_Tp> __poll_fn = {__val};
150
- std::__cxx_atomic_wait_unless (__a, __poll_fn, __order);
195
+ __atomic_wait (_AtomicWaitable& __a, _Up __val, memory_order __order) {
196
+ static_assert (__atomic_waitable<_AtomicWaitable>::value, " " );
197
+ __atomic_compare_unequal_to<_Up> __nonatomic_equal = {__val};
198
+ std::__atomic_wait_unless (__a, __nonatomic_equal, __order);
151
199
}
152
200
153
201
_LIBCPP_END_NAMESPACE_STD
0 commit comments