Line data Source code
1 : // -*- C++ -*- header.
2 :
3 : // Copyright (C) 2015-2018 Free Software Foundation, Inc.
4 : //
5 : // This file is part of the GNU ISO C++ Library. This library is free
6 : // software; you can redistribute it and/or modify it under the
7 : // terms of the GNU General Public License as published by the
8 : // Free Software Foundation; either version 3, or (at your option)
9 : // any later version.
10 :
11 : // This library is distributed in the hope that it will be useful,
12 : // but WITHOUT ANY WARRANTY; without even the implied warranty of
13 : // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 : // GNU General Public License for more details.
15 :
16 : // Under Section 7 of GPL version 3, you are granted additional
17 : // permissions described in the GCC Runtime Library Exception, version
18 : // 3.1, as published by the Free Software Foundation.
19 :
20 : // You should have received a copy of the GNU General Public License and
21 : // a copy of the GCC Runtime Library Exception along with this program;
22 : // see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
23 : // <http://www.gnu.org/licenses/>.
24 :
25 : /** @file bits/atomic_futex.h
26 : * This is an internal header file, included by other library headers.
27 : * Do not attempt to use it directly.
28 : */
29 :
30 : #ifndef _GLIBCXX_ATOMIC_FUTEX_H
31 : #define _GLIBCXX_ATOMIC_FUTEX_H 1
32 :
33 : #pragma GCC system_header
34 :
35 : #include <bits/c++config.h>
36 : #include <atomic>
37 : #include <chrono>
38 : #if ! (defined(_GLIBCXX_HAVE_LINUX_FUTEX) && ATOMIC_INT_LOCK_FREE > 1)
39 : #include <mutex>
40 : #include <condition_variable>
41 : #endif
42 :
43 : #ifndef _GLIBCXX_ALWAYS_INLINE
44 : #define _GLIBCXX_ALWAYS_INLINE inline __attribute__((__always_inline__))
45 : #endif
46 :
47 : namespace std _GLIBCXX_VISIBILITY(default)
48 : {
49 : _GLIBCXX_BEGIN_NAMESPACE_VERSION
50 :
51 : #if defined(_GLIBCXX_HAS_GTHREADS) && defined(_GLIBCXX_USE_C99_STDINT_TR1)
52 : #if defined(_GLIBCXX_HAVE_LINUX_FUTEX) && ATOMIC_INT_LOCK_FREE > 1
53 : struct __atomic_futex_unsigned_base
54 : {
55 : // Returns false iff a timeout occurred.
56 : bool
57 : _M_futex_wait_until(unsigned *__addr, unsigned __val, bool __has_timeout,
58 : chrono::seconds __s, chrono::nanoseconds __ns);
59 :
60 : // This can be executed after the object has been destroyed.
61 : static void _M_futex_notify_all(unsigned* __addr);
62 : };
63 :
64 : template <unsigned _Waiter_bit = 0x80000000>
65 : class __atomic_futex_unsigned : __atomic_futex_unsigned_base
66 : {
67 : typedef chrono::system_clock __clock_t;
68 :
69 : // This must be lock-free and at offset 0.
70 : atomic<unsigned> _M_data;
71 :
72 : public:
73 : explicit
74 83 : __atomic_futex_unsigned(unsigned __data) : _M_data(__data)
75 83 : { }
76 :
77 : _GLIBCXX_ALWAYS_INLINE unsigned
78 : _M_load(memory_order __mo)
79 : {
80 237 : return _M_data.load(__mo) & ~_Waiter_bit;
81 : }
82 :
83 : private:
84 : // If a timeout occurs, returns a current value after the timeout;
85 : // otherwise, returns the operand's value if equal is true or a different
86 : // value if equal is false.
87 : // The assumed value is the caller's assumption about the current value
88 : // when making the call.
89 : unsigned
90 73 : _M_load_and_test_until(unsigned __assumed, unsigned __operand,
91 : bool __equal, memory_order __mo, bool __has_timeout,
92 : chrono::seconds __s, chrono::nanoseconds __ns)
93 : {
94 0 : for (;;)
95 : {
96 : // Don't bother checking the value again because we expect the caller
97 : // to have done it recently.
98 : // memory_order_relaxed is sufficient because we can rely on just the
99 : // modification order (store_notify uses an atomic RMW operation too),
100 : // and the futex syscalls synchronize between themselves.
101 73 : _M_data.fetch_or(_Waiter_bit, memory_order_relaxed);
102 73 : bool __ret = _M_futex_wait_until((unsigned*)(void*)&_M_data,
103 : __assumed | _Waiter_bit,
104 : __has_timeout, __s, __ns);
105 : // Fetch the current value after waiting (clears _Waiter_bit).
106 73 : __assumed = _M_load(__mo);
107 73 : if (!__ret || ((__operand == __assumed) == __equal))
108 73 : return __assumed;
109 : // TODO adapt wait time
110 : }
111 : }
112 :
113 : // Returns the operand's value if equal is true or a different value if
114 : // equal is false.
115 : // The assumed value is the caller's assumption about the current value
116 : // when making the call.
117 : unsigned
118 73 : _M_load_and_test(unsigned __assumed, unsigned __operand,
119 : bool __equal, memory_order __mo)
120 : {
121 : return _M_load_and_test_until(__assumed, __operand, __equal, __mo,
122 73 : false, {}, {});
123 : }
124 :
125 : // If a timeout occurs, returns a current value after the timeout;
126 : // otherwise, returns the operand's value if equal is true or a different
127 : // value if equal is false.
128 : // The assumed value is the caller's assumption about the current value
129 : // when making the call.
130 : template<typename _Dur>
131 : unsigned
132 : _M_load_and_test_until_impl(unsigned __assumed, unsigned __operand,
133 : bool __equal, memory_order __mo,
134 : const chrono::time_point<__clock_t, _Dur>& __atime)
135 : {
136 : auto __s = chrono::time_point_cast<chrono::seconds>(__atime);
137 : auto __ns = chrono::duration_cast<chrono::nanoseconds>(__atime - __s);
138 : // XXX correct?
139 : return _M_load_and_test_until(__assumed, __operand, __equal, __mo,
140 : true, __s.time_since_epoch(), __ns);
141 : }
142 :
143 : public:
144 :
145 : _GLIBCXX_ALWAYS_INLINE unsigned
146 : _M_load_when_not_equal(unsigned __val, memory_order __mo)
147 : {
148 : unsigned __i = _M_load(__mo);
149 : if ((__i & ~_Waiter_bit) != __val)
150 : return (__i & ~_Waiter_bit);
151 : // TODO Spin-wait first.
152 : return _M_load_and_test(__i, __val, false, __mo);
153 : }
154 :
155 : _GLIBCXX_ALWAYS_INLINE void
156 : _M_load_when_equal(unsigned __val, memory_order __mo)
157 : {
158 82 : unsigned __i = _M_load(__mo);
159 82 : if ((__i & ~_Waiter_bit) == __val)
160 9 : return;
161 : // TODO Spin-wait first.
162 73 : _M_load_and_test(__i, __val, true, __mo);
163 : }
164 :
165 : // Returns false iff a timeout occurred.
166 : template<typename _Rep, typename _Period>
167 : _GLIBCXX_ALWAYS_INLINE bool
168 : _M_load_when_equal_for(unsigned __val, memory_order __mo,
169 : const chrono::duration<_Rep, _Period>& __rtime)
170 : {
171 : return _M_load_when_equal_until(__val, __mo,
172 : __clock_t::now() + __rtime);
173 : }
174 :
175 : // Returns false iff a timeout occurred.
176 : template<typename _Clock, typename _Duration>
177 : _GLIBCXX_ALWAYS_INLINE bool
178 : _M_load_when_equal_until(unsigned __val, memory_order __mo,
179 : const chrono::time_point<_Clock, _Duration>& __atime)
180 : {
181 : // DR 887 - Sync unknown clock to known clock.
182 : const typename _Clock::time_point __c_entry = _Clock::now();
183 : const __clock_t::time_point __s_entry = __clock_t::now();
184 : const auto __delta = __atime - __c_entry;
185 : const auto __s_atime = __s_entry + __delta;
186 : return _M_load_when_equal_until(__val, __mo, __s_atime);
187 : }
188 :
189 : // Returns false iff a timeout occurred.
190 : template<typename _Duration>
191 : _GLIBCXX_ALWAYS_INLINE bool
192 : _M_load_when_equal_until(unsigned __val, memory_order __mo,
193 : const chrono::time_point<__clock_t, _Duration>& __atime)
194 : {
195 : unsigned __i = _M_load(__mo);
196 : if ((__i & ~_Waiter_bit) == __val)
197 : return true;
198 : // TODO Spin-wait first. Ignore effect on timeout.
199 : __i = _M_load_and_test_until_impl(__i, __val, true, __mo, __atime);
200 : return (__i & ~_Waiter_bit) == __val;
201 : }
202 :
203 : _GLIBCXX_ALWAYS_INLINE void
204 : _M_store_notify_all(unsigned __val, memory_order __mo)
205 : {
206 83 : unsigned* __futex = (unsigned *)(void *)&_M_data;
207 83 : if (_M_data.exchange(__val, __mo) & _Waiter_bit)
208 73 : _M_futex_notify_all(__futex);
209 : }
210 : };
211 :
212 : #else // ! (_GLIBCXX_HAVE_LINUX_FUTEX && ATOMIC_INT_LOCK_FREE > 1)
213 :
214 : // If futexes are not available, use a mutex and a condvar to wait.
215 : // Because we access the data only within critical sections, all accesses
216 : // are sequentially consistent; thus, we satisfy any provided memory_order.
217 : template <unsigned _Waiter_bit = 0x80000000>
218 : class __atomic_futex_unsigned
219 : {
220 : typedef chrono::system_clock __clock_t;
221 :
222 : unsigned _M_data;
223 : mutex _M_mutex;
224 : condition_variable _M_condvar;
225 :
226 : public:
227 : explicit
228 : __atomic_futex_unsigned(unsigned __data) : _M_data(__data)
229 : { }
230 :
231 : _GLIBCXX_ALWAYS_INLINE unsigned
232 : _M_load(memory_order __mo)
233 : {
234 : unique_lock<mutex> __lock(_M_mutex);
235 : return _M_data;
236 : }
237 :
238 : _GLIBCXX_ALWAYS_INLINE unsigned
239 : _M_load_when_not_equal(unsigned __val, memory_order __mo)
240 : {
241 : unique_lock<mutex> __lock(_M_mutex);
242 : while (_M_data == __val)
243 : _M_condvar.wait(__lock);
244 : return _M_data;
245 : }
246 :
247 : _GLIBCXX_ALWAYS_INLINE void
248 : _M_load_when_equal(unsigned __val, memory_order __mo)
249 : {
250 : unique_lock<mutex> __lock(_M_mutex);
251 : while (_M_data != __val)
252 : _M_condvar.wait(__lock);
253 : }
254 :
255 : template<typename _Rep, typename _Period>
256 : _GLIBCXX_ALWAYS_INLINE bool
257 : _M_load_when_equal_for(unsigned __val, memory_order __mo,
258 : const chrono::duration<_Rep, _Period>& __rtime)
259 : {
260 : unique_lock<mutex> __lock(_M_mutex);
261 : return _M_condvar.wait_for(__lock, __rtime,
262 : [&] { return _M_data == __val;});
263 : }
264 :
265 : template<typename _Clock, typename _Duration>
266 : _GLIBCXX_ALWAYS_INLINE bool
267 : _M_load_when_equal_until(unsigned __val, memory_order __mo,
268 : const chrono::time_point<_Clock, _Duration>& __atime)
269 : {
270 : unique_lock<mutex> __lock(_M_mutex);
271 : return _M_condvar.wait_until(__lock, __atime,
272 : [&] { return _M_data == __val;});
273 : }
274 :
275 : _GLIBCXX_ALWAYS_INLINE void
276 : _M_store_notify_all(unsigned __val, memory_order __mo)
277 : {
278 : unique_lock<mutex> __lock(_M_mutex);
279 : _M_data = __val;
280 : _M_condvar.notify_all();
281 : }
282 : };
283 :
284 : #endif // _GLIBCXX_HAVE_LINUX_FUTEX && ATOMIC_INT_LOCK_FREE > 1
285 : #endif // _GLIBCXX_HAS_GTHREADS && _GLIBCXX_USE_C99_STDINT_TR1
286 :
287 : _GLIBCXX_END_NAMESPACE_VERSION
288 : } // namespace std
289 :
290 : #endif
|