Skip to content

Commit 1f613bc

Browse files
authored
[libc++] refactor cxx_atomic_wait to make it reusable for atomic_ref (#81427)
The goal of this patch is to make `atomic`'s wait functions to be reusable by `atomic_ref`. #76647 First, this patch is built on top of #80596 , to reduce the future merge conflicts. This patch made the following functions as "API"s to be used by `atomic`, `atomic_flag`, `semaphore`, `latch`, `atomic_ref` ``` __atomic_wait __atomic_wait_unless __atomic_notify_one __atomic_notify_all ``` These functions are made generic to support `atomic` type and `atomic_ref`. There are two customisation points. ``` // How to load the value from the given type (with a memory order) __atomic_load ``` ``` // what is the contention address that the platform `wait` function is going to monitor __atomic_contention_address ``` For `atomic_ref` (not implemented in this patch), the `load` and `address` function will be different, because - it does not use the "atomic abstraction layer" so the `load` operation will be some gcc builtin - the contention address will be the user's actual type that the `atomic_ref` is pointing to
1 parent 2a95fe4 commit 1f613bc

File tree

5 files changed

+154
-61
lines changed

5 files changed

+154
-61
lines changed

libcxx/include/__atomic/atomic_base.h

Lines changed: 32 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -104,24 +104,20 @@ struct __atomic_base // false
104104

105105
_LIBCPP_AVAILABILITY_SYNC _LIBCPP_HIDE_FROM_ABI void wait(_Tp __v, memory_order __m = memory_order_seq_cst) const
106106
volatile _NOEXCEPT {
107-
std::__cxx_atomic_wait(std::addressof(__a_), __v, __m);
107+
std::__atomic_wait(*this, __v, __m);
108108
}
109109
_LIBCPP_AVAILABILITY_SYNC _LIBCPP_HIDE_FROM_ABI void
110110
wait(_Tp __v, memory_order __m = memory_order_seq_cst) const _NOEXCEPT {
111-
std::__cxx_atomic_wait(std::addressof(__a_), __v, __m);
111+
std::__atomic_wait(*this, __v, __m);
112112
}
113113
_LIBCPP_AVAILABILITY_SYNC _LIBCPP_HIDE_FROM_ABI void notify_one() volatile _NOEXCEPT {
114-
std::__cxx_atomic_notify_one(std::addressof(__a_));
115-
}
116-
_LIBCPP_AVAILABILITY_SYNC _LIBCPP_HIDE_FROM_ABI void notify_one() _NOEXCEPT {
117-
std::__cxx_atomic_notify_one(std::addressof(__a_));
114+
std::__atomic_notify_one(*this);
118115
}
116+
_LIBCPP_AVAILABILITY_SYNC _LIBCPP_HIDE_FROM_ABI void notify_one() _NOEXCEPT { std::__atomic_notify_one(*this); }
119117
_LIBCPP_AVAILABILITY_SYNC _LIBCPP_HIDE_FROM_ABI void notify_all() volatile _NOEXCEPT {
120-
std::__cxx_atomic_notify_all(std::addressof(__a_));
121-
}
122-
_LIBCPP_AVAILABILITY_SYNC _LIBCPP_HIDE_FROM_ABI void notify_all() _NOEXCEPT {
123-
std::__cxx_atomic_notify_all(std::addressof(__a_));
118+
std::__atomic_notify_all(*this);
124119
}
120+
_LIBCPP_AVAILABILITY_SYNC _LIBCPP_HIDE_FROM_ABI void notify_all() _NOEXCEPT { std::__atomic_notify_all(*this); }
125121

126122
#if _LIBCPP_STD_VER >= 20
127123
_LIBCPP_HIDE_FROM_ABI constexpr __atomic_base() noexcept(is_nothrow_default_constructible_v<_Tp>) : __a_(_Tp()) {}
@@ -200,6 +196,32 @@ struct __atomic_base<_Tp, true> : public __atomic_base<_Tp, false> {
200196
_LIBCPP_HIDE_FROM_ABI _Tp operator^=(_Tp __op) _NOEXCEPT { return fetch_xor(__op) ^ __op; }
201197
};
202198

199+
// Here we need _IsIntegral because the default template argument is not enough
200+
// e.g __atomic_base<int> is __atomic_base<int, true>, which inherits from
201+
// __atomic_base<int, false> and the caller of the wait function is
202+
// __atomic_base<int, false>. So specializing __atomic_base<_Tp> does not work
203+
template <class _Tp, bool _IsIntegral>
204+
struct __atomic_waitable_traits<__atomic_base<_Tp, _IsIntegral> > {
205+
static _LIBCPP_HIDE_FROM_ABI _Tp __atomic_load(const __atomic_base<_Tp, _IsIntegral>& __a, memory_order __order) {
206+
return __a.load(__order);
207+
}
208+
209+
static _LIBCPP_HIDE_FROM_ABI _Tp
210+
__atomic_load(const volatile __atomic_base<_Tp, _IsIntegral>& __this, memory_order __order) {
211+
return __this.load(__order);
212+
}
213+
214+
static _LIBCPP_HIDE_FROM_ABI const __cxx_atomic_impl<_Tp>*
215+
__atomic_contention_address(const __atomic_base<_Tp, _IsIntegral>& __a) {
216+
return std::addressof(__a.__a_);
217+
}
218+
219+
static _LIBCPP_HIDE_FROM_ABI const volatile __cxx_atomic_impl<_Tp>*
220+
__atomic_contention_address(const volatile __atomic_base<_Tp, _IsIntegral>& __this) {
221+
return std::addressof(__this.__a_);
222+
}
223+
};
224+
203225
_LIBCPP_END_NAMESPACE_STD
204226

205227
#endif // _LIBCPP___ATOMIC_ATOMIC_BASE_H

libcxx/include/__atomic/atomic_flag.h

Lines changed: 29 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -16,6 +16,7 @@
1616
#include <__availability>
1717
#include <__chrono/duration.h>
1818
#include <__config>
19+
#include <__memory/addressof.h>
1920
#include <__thread/support.h>
2021
#include <cstdint>
2122

@@ -50,20 +51,20 @@ struct atomic_flag {
5051

5152
_LIBCPP_AVAILABILITY_SYNC _LIBCPP_HIDE_FROM_ABI void wait(bool __v, memory_order __m = memory_order_seq_cst) const
5253
volatile _NOEXCEPT {
53-
__cxx_atomic_wait(&__a_, _LIBCPP_ATOMIC_FLAG_TYPE(__v), __m);
54+
std::__atomic_wait(*this, _LIBCPP_ATOMIC_FLAG_TYPE(__v), __m);
5455
}
5556
_LIBCPP_AVAILABILITY_SYNC _LIBCPP_HIDE_FROM_ABI void
5657
wait(bool __v, memory_order __m = memory_order_seq_cst) const _NOEXCEPT {
57-
__cxx_atomic_wait(&__a_, _LIBCPP_ATOMIC_FLAG_TYPE(__v), __m);
58+
std::__atomic_wait(*this, _LIBCPP_ATOMIC_FLAG_TYPE(__v), __m);
5859
}
5960
_LIBCPP_AVAILABILITY_SYNC _LIBCPP_HIDE_FROM_ABI void notify_one() volatile _NOEXCEPT {
60-
__cxx_atomic_notify_one(&__a_);
61+
std::__atomic_notify_one(*this);
6162
}
62-
_LIBCPP_AVAILABILITY_SYNC _LIBCPP_HIDE_FROM_ABI void notify_one() _NOEXCEPT { __cxx_atomic_notify_one(&__a_); }
63+
_LIBCPP_AVAILABILITY_SYNC _LIBCPP_HIDE_FROM_ABI void notify_one() _NOEXCEPT { std::__atomic_notify_one(*this); }
6364
_LIBCPP_AVAILABILITY_SYNC _LIBCPP_HIDE_FROM_ABI void notify_all() volatile _NOEXCEPT {
64-
__cxx_atomic_notify_all(&__a_);
65+
std::__atomic_notify_all(*this);
6566
}
66-
_LIBCPP_AVAILABILITY_SYNC _LIBCPP_HIDE_FROM_ABI void notify_all() _NOEXCEPT { __cxx_atomic_notify_all(&__a_); }
67+
_LIBCPP_AVAILABILITY_SYNC _LIBCPP_HIDE_FROM_ABI void notify_all() _NOEXCEPT { std::__atomic_notify_all(*this); }
6768

6869
#if _LIBCPP_STD_VER >= 20
6970
_LIBCPP_HIDE_FROM_ABI constexpr atomic_flag() _NOEXCEPT : __a_(false) {}
@@ -78,6 +79,28 @@ struct atomic_flag {
7879
atomic_flag& operator=(const atomic_flag&) volatile = delete;
7980
};
8081

82+
template <>
83+
struct __atomic_waitable_traits<atomic_flag> {
84+
static _LIBCPP_HIDE_FROM_ABI _LIBCPP_ATOMIC_FLAG_TYPE __atomic_load(const atomic_flag& __a, memory_order __order) {
85+
return std::__cxx_atomic_load(&__a.__a_, __order);
86+
}
87+
88+
static _LIBCPP_HIDE_FROM_ABI _LIBCPP_ATOMIC_FLAG_TYPE
89+
__atomic_load(const volatile atomic_flag& __a, memory_order __order) {
90+
return std::__cxx_atomic_load(&__a.__a_, __order);
91+
}
92+
93+
static _LIBCPP_HIDE_FROM_ABI const __cxx_atomic_impl<_LIBCPP_ATOMIC_FLAG_TYPE>*
94+
__atomic_contention_address(const atomic_flag& __a) {
95+
return std::addressof(__a.__a_);
96+
}
97+
98+
static _LIBCPP_HIDE_FROM_ABI const volatile __cxx_atomic_impl<_LIBCPP_ATOMIC_FLAG_TYPE>*
99+
__atomic_contention_address(const volatile atomic_flag& __a) {
100+
return std::addressof(__a.__a_);
101+
}
102+
};
103+
81104
inline _LIBCPP_HIDE_FROM_ABI bool atomic_flag_test(const volatile atomic_flag* __o) _NOEXCEPT { return __o->test(); }
82105

83106
inline _LIBCPP_HIDE_FROM_ABI bool atomic_flag_test(const atomic_flag* __o) _NOEXCEPT { return __o->test(); }

libcxx/include/__atomic/atomic_sync.h

Lines changed: 88 additions & 40 deletions
Original file line numberDiff line numberDiff line change
@@ -18,7 +18,11 @@
1818
#include <__memory/addressof.h>
1919
#include <__thread/poll_with_backoff.h>
2020
#include <__thread/support.h>
21+
#include <__type_traits/conjunction.h>
2122
#include <__type_traits/decay.h>
23+
#include <__type_traits/invoke.h>
24+
#include <__type_traits/void_t.h>
25+
#include <__utility/declval.h>
2226
#include <cstring>
2327

2428
#if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER)
@@ -27,15 +31,40 @@
2731

2832
_LIBCPP_BEGIN_NAMESPACE_STD
2933

30-
template <class _Atp, class _Poll>
31-
struct __libcpp_atomic_wait_poll_impl {
32-
_Atp* __a_;
34+
// The customisation points to enable the following functions:
35+
// - __atomic_wait
36+
// - __atomic_wait_unless
37+
// - __atomic_notify_one
38+
// - __atomic_notify_all
39+
// Note that std::atomic<T>::wait was back-ported to C++03
40+
// The below implementations look ugly to support C++03
41+
template <class _Tp, class = void>
42+
struct __atomic_waitable_traits {
43+
template <class _AtomicWaitable>
44+
static void __atomic_load(_AtomicWaitable&&, memory_order) = delete;
45+
46+
template <class _AtomicWaitable>
47+
static void __atomic_contention_address(_AtomicWaitable&&) = delete;
48+
};
49+
50+
template <class _Tp, class = void>
51+
struct __atomic_waitable : false_type {};
52+
53+
template <class _Tp>
54+
struct __atomic_waitable< _Tp,
55+
__void_t<decltype(__atomic_waitable_traits<__decay_t<_Tp> >::__atomic_load(
56+
std::declval<const _Tp&>(), std::declval<memory_order>())),
57+
decltype(__atomic_waitable_traits<__decay_t<_Tp> >::__atomic_contention_address(
58+
std::declval<const _Tp&>()))> > : true_type {};
59+
60+
template <class _AtomicWaitable, class _Poll>
61+
struct __atomic_wait_poll_impl {
62+
const _AtomicWaitable& __a_;
3363
_Poll __poll_;
3464
memory_order __order_;
3565

36-
_LIBCPP_AVAILABILITY_SYNC
3766
_LIBCPP_HIDE_FROM_ABI bool operator()() const {
38-
auto __current_val = std::__cxx_atomic_load(__a_, __order_);
67+
auto __current_val = __atomic_waitable_traits<__decay_t<_AtomicWaitable> >::__atomic_load(__a_, __order_);
3968
return __poll_(__current_val);
4069
}
4170
};
@@ -56,42 +85,45 @@ __libcpp_atomic_monitor(__cxx_atomic_contention_t const volatile*);
5685
_LIBCPP_AVAILABILITY_SYNC _LIBCPP_EXPORTED_FROM_ABI void
5786
__libcpp_atomic_wait(__cxx_atomic_contention_t const volatile*, __cxx_contention_t);
5887

59-
template <class _Atp, class _Poll>
60-
struct __libcpp_atomic_wait_backoff_impl {
61-
_Atp* __a_;
88+
template <class _AtomicWaitable, class _Poll>
89+
struct __atomic_wait_backoff_impl {
90+
const _AtomicWaitable& __a_;
6291
_Poll __poll_;
6392
memory_order __order_;
6493

94+
using __waitable_traits = __atomic_waitable_traits<__decay_t<_AtomicWaitable> >;
95+
6596
_LIBCPP_AVAILABILITY_SYNC
6697
_LIBCPP_HIDE_FROM_ABI bool
67-
__poll_or_get_monitor(__cxx_atomic_contention_t const volatile*, __cxx_contention_t& __monitor) const {
68-
// In case the atomic can be waited on directly, the monitor value is just
69-
// the value of the atomic.
98+
__update_monitor_val_and_poll(__cxx_atomic_contention_t const volatile*, __cxx_contention_t& __monitor_val) const {
99+
// In case the contention type happens to be __cxx_atomic_contention_t, i.e. __cxx_atomic_impl<int64_t>,
100+
// the platform wait is directly monitoring the atomic value itself.
70101
// `__poll_` takes the current value of the atomic as an in-out argument
71102
// to potentially modify it. After it returns, `__monitor` has a value
72103
// which can be safely waited on by `std::__libcpp_atomic_wait` without any
73104
// ABA style issues.
74-
__monitor = std::__cxx_atomic_load(__a_, __order_);
75-
return __poll_(__monitor);
105+
__monitor_val = __waitable_traits::__atomic_load(__a_, __order_);
106+
return __poll_(__monitor_val);
76107
}
77108

78109
_LIBCPP_AVAILABILITY_SYNC
79-
_LIBCPP_HIDE_FROM_ABI bool __poll_or_get_monitor(void const volatile*, __cxx_contention_t& __monitor) const {
80-
// In case we must wait on an atomic from the pool, the monitor comes from
81-
// `std::__libcpp_atomic_monitor`.
82-
// Only then we may read from `__a_`. This is the "event count" pattern.
83-
__monitor = std::__libcpp_atomic_monitor(__a_);
84-
auto __current_val = std::__cxx_atomic_load(__a_, __order_);
110+
_LIBCPP_HIDE_FROM_ABI bool
111+
__update_monitor_val_and_poll(void const volatile* __contention_address, __cxx_contention_t& __monitor_val) const {
112+
// In case the contention type is anything else, platform wait is monitoring a __cxx_atomic_contention_t
113+
// from the global pool, the monitor comes from __libcpp_atomic_monitor
114+
__monitor_val = std::__libcpp_atomic_monitor(__contention_address);
115+
auto __current_val = __waitable_traits::__atomic_load(__a_, __order_);
85116
return __poll_(__current_val);
86117
}
87118

88119
_LIBCPP_AVAILABILITY_SYNC
89120
_LIBCPP_HIDE_FROM_ABI bool operator()(chrono::nanoseconds __elapsed) const {
90121
if (__elapsed > chrono::microseconds(64)) {
91-
__cxx_contention_t __monitor;
92-
if (__poll_or_get_monitor(__a_, __monitor))
122+
auto __contention_address = __waitable_traits::__atomic_contention_address(__a_);
123+
__cxx_contention_t __monitor_val;
124+
if (__update_monitor_val_and_poll(__contention_address, __monitor_val))
93125
return true;
94-
std::__libcpp_atomic_wait(__a_, __monitor);
126+
std::__libcpp_atomic_wait(__contention_address, __monitor_val);
95127
} else if (__elapsed > chrono::microseconds(4))
96128
__libcpp_thread_yield();
97129
else {
@@ -105,29 +137,44 @@ struct __libcpp_atomic_wait_backoff_impl {
105137
// predicate (is the loaded value unequal to `old`?), the predicate function is
106138
// specified as an argument. The loaded value is given as an in-out argument to
107139
// the predicate. If the predicate function returns `true`,
108-
// `_cxx_atomic_wait_unless` will return. If the predicate function returns
140+
// `__atomic_wait_unless` will return. If the predicate function returns
109141
// `false`, it must set the argument to its current understanding of the atomic
110142
// value. The predicate function must not return `false` spuriously.
111-
template <class _Atp, class _Poll>
143+
template <class _AtomicWaitable, class _Poll>
112144
_LIBCPP_AVAILABILITY_SYNC _LIBCPP_HIDE_FROM_ABI void
113-
__cxx_atomic_wait_unless(_Atp* __a, _Poll&& __poll, memory_order __order) {
114-
__libcpp_atomic_wait_poll_impl<_Atp, __decay_t<_Poll> > __poll_fn = {__a, __poll, __order};
115-
__libcpp_atomic_wait_backoff_impl<_Atp, __decay_t<_Poll> > __backoff_fn = {__a, __poll, __order};
116-
(void)std::__libcpp_thread_poll_with_backoff(__poll_fn, __backoff_fn);
145+
__atomic_wait_unless(const _AtomicWaitable& __a, _Poll&& __poll, memory_order __order) {
146+
static_assert(__atomic_waitable<_AtomicWaitable>::value, "");
147+
__atomic_wait_poll_impl<_AtomicWaitable, __decay_t<_Poll> > __poll_impl = {__a, __poll, __order};
148+
__atomic_wait_backoff_impl<_AtomicWaitable, __decay_t<_Poll> > __backoff_fn = {__a, __poll, __order};
149+
std::__libcpp_thread_poll_with_backoff(__poll_impl, __backoff_fn);
150+
}
151+
152+
template <class _AtomicWaitable>
153+
_LIBCPP_AVAILABILITY_SYNC _LIBCPP_HIDE_FROM_ABI void __atomic_notify_one(const _AtomicWaitable& __a) {
154+
static_assert(__atomic_waitable<_AtomicWaitable>::value, "");
155+
std::__cxx_atomic_notify_one(__atomic_waitable_traits<__decay_t<_AtomicWaitable> >::__atomic_contention_address(__a));
156+
}
157+
158+
template <class _AtomicWaitable>
159+
_LIBCPP_AVAILABILITY_SYNC _LIBCPP_HIDE_FROM_ABI void __atomic_notify_all(const _AtomicWaitable& __a) {
160+
static_assert(__atomic_waitable<_AtomicWaitable>::value, "");
161+
std::__cxx_atomic_notify_all(__atomic_waitable_traits<__decay_t<_AtomicWaitable> >::__atomic_contention_address(__a));
117162
}
118163

119164
#else // _LIBCPP_HAS_NO_THREADS
120165

121-
template <class _Tp>
122-
_LIBCPP_HIDE_FROM_ABI void __cxx_atomic_notify_all(__cxx_atomic_impl<_Tp> const volatile*) {}
123-
template <class _Tp>
124-
_LIBCPP_HIDE_FROM_ABI void __cxx_atomic_notify_one(__cxx_atomic_impl<_Tp> const volatile*) {}
125-
template <class _Atp, class _Poll>
126-
_LIBCPP_HIDE_FROM_ABI void __cxx_atomic_wait_unless(_Atp* __a, _Poll&& __poll, memory_order __order) {
127-
__libcpp_atomic_wait_poll_impl<_Atp, __decay_t<_Poll> > __poll_fn = {__a, __poll, __order};
128-
(void)std::__libcpp_thread_poll_with_backoff(__poll_fn, __spinning_backoff_policy());
166+
template <class _AtomicWaitable, class _Poll>
167+
_LIBCPP_HIDE_FROM_ABI void __atomic_wait_unless(const _AtomicWaitable& __a, _Poll&& __poll, memory_order __order) {
168+
__atomic_wait_poll_impl<_AtomicWaitable, __decay_t<_Poll> > __poll_fn = {__a, __poll, __order};
169+
std::__libcpp_thread_poll_with_backoff(__poll_fn, __spinning_backoff_policy());
129170
}
130171

172+
template <class _AtomicWaitable>
173+
_LIBCPP_HIDE_FROM_ABI void __atomic_notify_one(const _AtomicWaitable&) {}
174+
175+
template <class _AtomicWaitable>
176+
_LIBCPP_HIDE_FROM_ABI void __atomic_notify_all(const _AtomicWaitable&) {}
177+
131178
#endif // _LIBCPP_HAS_NO_THREADS
132179

133180
template <typename _Tp>
@@ -143,11 +190,12 @@ struct __atomic_compare_unequal_to {
143190
}
144191
};
145192

146-
template <class _Atp, class _Tp>
193+
template <class _AtomicWaitable, class _Up>
147194
_LIBCPP_AVAILABILITY_SYNC _LIBCPP_HIDE_FROM_ABI void
148-
__cxx_atomic_wait(_Atp* __a, _Tp const __val, memory_order __order) {
149-
__atomic_compare_unequal_to<_Tp> __poll_fn = {__val};
150-
std::__cxx_atomic_wait_unless(__a, __poll_fn, __order);
195+
__atomic_wait(_AtomicWaitable& __a, _Up __val, memory_order __order) {
196+
static_assert(__atomic_waitable<_AtomicWaitable>::value, "");
197+
__atomic_compare_unequal_to<_Up> __nonatomic_equal = {__val};
198+
std::__atomic_wait_unless(__a, __nonatomic_equal, __order);
151199
}
152200

153201
_LIBCPP_END_NAMESPACE_STD

libcxx/include/latch

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -102,8 +102,8 @@ public:
102102
return try_wait_impl(__value);
103103
}
104104
inline _LIBCPP_AVAILABILITY_SYNC _LIBCPP_HIDE_FROM_ABI void wait() const {
105-
__cxx_atomic_wait_unless(
106-
&__a_.__a_, [this](ptrdiff_t& __value) -> bool { return try_wait_impl(__value); }, memory_order_acquire);
105+
std::__atomic_wait_unless(
106+
__a_, [this](ptrdiff_t& __value) -> bool { return try_wait_impl(__value); }, memory_order_acquire);
107107
}
108108
inline _LIBCPP_AVAILABILITY_SYNC _LIBCPP_HIDE_FROM_ABI void arrive_and_wait(ptrdiff_t __update = 1) {
109109
_LIBCPP_ASSERT_ARGUMENT_WITHIN_DOMAIN(__update >= 0, "latch::arrive_and_wait called with a negative value");
@@ -114,7 +114,7 @@ public:
114114
}
115115

116116
private:
117-
inline _LIBCPP_HIDE_FROM_ABI bool try_wait_impl(ptrdiff_t& __value) const noexcept { return __value == 0; }
117+
_LIBCPP_HIDE_FROM_ABI bool try_wait_impl(ptrdiff_t& __value) const noexcept { return __value == 0; }
118118
};
119119

120120
_LIBCPP_END_NAMESPACE_STD

libcxx/include/semaphore

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -99,8 +99,8 @@ public:
9999
}
100100
}
101101
_LIBCPP_AVAILABILITY_SYNC _LIBCPP_HIDE_FROM_ABI void acquire() {
102-
__cxx_atomic_wait_unless(
103-
&__a_.__a_, [this](ptrdiff_t& __old) { return __try_acquire_impl(__old); }, memory_order_relaxed);
102+
std::__atomic_wait_unless(
103+
__a_, [this](ptrdiff_t& __old) { return __try_acquire_impl(__old); }, memory_order_relaxed);
104104
}
105105
template <class _Rep, class _Period>
106106
_LIBCPP_AVAILABILITY_SYNC _LIBCPP_HIDE_FROM_ABI bool

0 commit comments

Comments
 (0)