1// -*- C++ -*- header.
2
3// Copyright (C) 2008-2019 Free Software Foundation, Inc.
4//
5// This file is part of the GNU ISO C++ Library. This library is free
6// software; you can redistribute it and/or modify it under the
7// terms of the GNU General Public License as published by the
8// Free Software Foundation; either version 3, or (at your option)
9// any later version.
10
11// This library is distributed in the hope that it will be useful,
12// but WITHOUT ANY WARRANTY; without even the implied warranty of
13// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14// GNU General Public License for more details.
15
16// Under Section 7 of GPL version 3, you are granted additional
17// permissions described in the GCC Runtime Library Exception, version
18// 3.1, as published by the Free Software Foundation.
19
20// You should have received a copy of the GNU General Public License and
21// a copy of the GCC Runtime Library Exception along with this program;
22// see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
23// <http://www.gnu.org/licenses/>.
24
25/** @file bits/atomic_base.h
26 * This is an internal header file, included by other library headers.
27 * Do not attempt to use it directly. @headername{atomic}
28 */
29
30#ifndef _GLIBCXX_ATOMIC_BASE_H
31#define _GLIBCXX_ATOMIC_BASE_H 1
32
33#pragma GCC system_header
34
35#include <bits/c++config.h>
36#include <stdint.h>
37#include <bits/atomic_lockfree_defines.h>
38
39#ifndef _GLIBCXX_ALWAYS_INLINE
40#define _GLIBCXX_ALWAYS_INLINE inline __attribute__((__always_inline__))
41#endif
42
43namespace std _GLIBCXX_VISIBILITY(default)
44{
45_GLIBCXX_BEGIN_NAMESPACE_VERSION
46
47 /**
48 * @defgroup atomics Atomics
49 *
50 * Components for performing atomic operations.
51 * @{
52 */
53
54 /// Enumeration for memory_order
55#if __cplusplus > 201703L
56 enum class memory_order : int
57 {
58 relaxed,
59 consume,
60 acquire,
61 release,
62 acq_rel,
63 seq_cst
64 };
65
66 inline constexpr memory_order memory_order_relaxed = memory_order::relaxed;
67 inline constexpr memory_order memory_order_consume = memory_order::consume;
68 inline constexpr memory_order memory_order_acquire = memory_order::acquire;
69 inline constexpr memory_order memory_order_release = memory_order::release;
70 inline constexpr memory_order memory_order_acq_rel = memory_order::acq_rel;
71 inline constexpr memory_order memory_order_seq_cst = memory_order::seq_cst;
72#else
73 typedef enum memory_order
74 {
75 memory_order_relaxed,
76 memory_order_consume,
77 memory_order_acquire,
78 memory_order_release,
79 memory_order_acq_rel,
80 memory_order_seq_cst
81 } memory_order;
82#endif
83
84 enum __memory_order_modifier
85 {
86 __memory_order_mask = 0x0ffff,
87 __memory_order_modifier_mask = 0xffff0000,
88 __memory_order_hle_acquire = 0x10000,
89 __memory_order_hle_release = 0x20000
90 };
91
92 constexpr memory_order
93 operator|(memory_order __m, __memory_order_modifier __mod)
94 {
95 return memory_order(int(__m) | int(__mod));
96 }
97
98 constexpr memory_order
99 operator&(memory_order __m, __memory_order_modifier __mod)
100 {
101 return memory_order(int(__m) & int(__mod));
102 }
103
104 // Drop release ordering as per [atomics.types.operations.req]/21
105 constexpr memory_order
106 __cmpexch_failure_order2(memory_order __m) noexcept
107 {
108 return __m == memory_order_acq_rel ? memory_order_acquire
109 : __m == memory_order_release ? memory_order_relaxed : __m;
110 }
111
112 constexpr memory_order
113 __cmpexch_failure_order(memory_order __m) noexcept
114 {
115 return memory_order(__cmpexch_failure_order2(__m & __memory_order_mask)
116 | __memory_order_modifier(__m & __memory_order_modifier_mask));
117 }
118
119 _GLIBCXX_ALWAYS_INLINE void
120 atomic_thread_fence(memory_order __m) noexcept
121 { __atomic_thread_fence(int(__m)); }
122
123 _GLIBCXX_ALWAYS_INLINE void
124 atomic_signal_fence(memory_order __m) noexcept
125 { __atomic_signal_fence(int(__m)); }
126
127 /// kill_dependency
128 template<typename _Tp>
129 inline _Tp
130 kill_dependency(_Tp __y) noexcept
131 {
132 _Tp __ret(__y);
133 return __ret;
134 }
135
136
137 // Base types for atomics.
138 template<typename _IntTp>
139 struct __atomic_base;
140
141
142#define ATOMIC_VAR_INIT(_VI) { _VI }
143
144 template<typename _Tp>
145 struct atomic;
146
147 template<typename _Tp>
148 struct atomic<_Tp*>;
149
150 /* The target's "set" value for test-and-set may not be exactly 1. */
151#if __GCC_ATOMIC_TEST_AND_SET_TRUEVAL == 1
152 typedef bool __atomic_flag_data_type;
153#else
154 typedef unsigned char __atomic_flag_data_type;
155#endif
156
157 /**
158 * @brief Base type for atomic_flag.
159 *
160 * Base type is POD with data, allowing atomic_flag to derive from
161 * it and meet the standard layout type requirement. In addition to
162 * compatibility with a C interface, this allows different
163 * implementations of atomic_flag to use the same atomic operation
164 * functions, via a standard conversion to the __atomic_flag_base
165 * argument.
166 */
167 _GLIBCXX_BEGIN_EXTERN_C
168
169 struct __atomic_flag_base
170 {
171 __atomic_flag_data_type _M_i;
172 };
173
174 _GLIBCXX_END_EXTERN_C
175
176#define ATOMIC_FLAG_INIT { 0 }
177
178 /// atomic_flag
179 struct atomic_flag : public __atomic_flag_base
180 {
181 atomic_flag() noexcept = default;
182 ~atomic_flag() noexcept = default;
183 atomic_flag(const atomic_flag&) = delete;
184 atomic_flag& operator=(const atomic_flag&) = delete;
185 atomic_flag& operator=(const atomic_flag&) volatile = delete;
186
187 // Conversion to ATOMIC_FLAG_INIT.
188 constexpr atomic_flag(bool __i) noexcept
189 : __atomic_flag_base{ _S_init(__i) }
190 { }
191
192 _GLIBCXX_ALWAYS_INLINE bool
193 test_and_set(memory_order __m = memory_order_seq_cst) noexcept
194 {
195 return __atomic_test_and_set (&_M_i, int(__m));
196 }
197
198 _GLIBCXX_ALWAYS_INLINE bool
199 test_and_set(memory_order __m = memory_order_seq_cst) volatile noexcept
200 {
201 return __atomic_test_and_set (&_M_i, int(__m));
202 }
203
204 _GLIBCXX_ALWAYS_INLINE void
205 clear(memory_order __m = memory_order_seq_cst) noexcept
206 {
207 memory_order __b = __m & __memory_order_mask;
208 __glibcxx_assert(__b != memory_order_consume);
209 __glibcxx_assert(__b != memory_order_acquire);
210 __glibcxx_assert(__b != memory_order_acq_rel);
211
212 __atomic_clear (&_M_i, int(__m));
213 }
214
215 _GLIBCXX_ALWAYS_INLINE void
216 clear(memory_order __m = memory_order_seq_cst) volatile noexcept
217 {
218 memory_order __b = __m & __memory_order_mask;
219 __glibcxx_assert(__b != memory_order_consume);
220 __glibcxx_assert(__b != memory_order_acquire);
221 __glibcxx_assert(__b != memory_order_acq_rel);
222
223 __atomic_clear (&_M_i, int(__m));
224 }
225
226 private:
227 static constexpr __atomic_flag_data_type
228 _S_init(bool __i)
229 { return __i ? __GCC_ATOMIC_TEST_AND_SET_TRUEVAL : 0; }
230 };
231
232
233 /// Base class for atomic integrals.
234 //
235 // For each of the integral types, define atomic_[integral type] struct
236 //
237 // atomic_bool bool
238 // atomic_char char
239 // atomic_schar signed char
240 // atomic_uchar unsigned char
241 // atomic_short short
242 // atomic_ushort unsigned short
243 // atomic_int int
244 // atomic_uint unsigned int
245 // atomic_long long
246 // atomic_ulong unsigned long
247 // atomic_llong long long
248 // atomic_ullong unsigned long long
249 // atomic_char8_t char8_t
250 // atomic_char16_t char16_t
251 // atomic_char32_t char32_t
252 // atomic_wchar_t wchar_t
253 //
254 // NB: Assuming _ITp is an integral scalar type that is 1, 2, 4, or
255 // 8 bytes, since that is what GCC built-in functions for atomic
256 // memory access expect.
257 template<typename _ITp>
258 struct __atomic_base
259 {
260 using value_type = _ITp;
261 using difference_type = value_type;
262
263 private:
264 typedef _ITp __int_type;
265
266 static constexpr int _S_alignment =
267 sizeof(_ITp) > alignof(_ITp) ? sizeof(_ITp) : alignof(_ITp);
268
269 alignas(_S_alignment) __int_type _M_i;
270
271 public:
272 __atomic_base() noexcept = default;
273 ~__atomic_base() noexcept = default;
274 __atomic_base(const __atomic_base&) = delete;
275 __atomic_base& operator=(const __atomic_base&) = delete;
276 __atomic_base& operator=(const __atomic_base&) volatile = delete;
277
278 // Requires __int_type convertible to _M_i.
279 constexpr __atomic_base(__int_type __i) noexcept : _M_i (__i) { }
280
281 operator __int_type() const noexcept
282 { return load(); }
283
284 operator __int_type() const volatile noexcept
285 { return load(); }
286
287 __int_type
288 operator=(__int_type __i) noexcept
289 {
290 store(__i);
291 return __i;
292 }
293
294 __int_type
295 operator=(__int_type __i) volatile noexcept
296 {
297 store(__i);
298 return __i;
299 }
300
301 __int_type
302 operator++(int) noexcept
303 { return fetch_add(1); }
304
305 __int_type
306 operator++(int) volatile noexcept
307 { return fetch_add(1); }
308
309 __int_type
310 operator--(int) noexcept
311 { return fetch_sub(1); }
312
313 __int_type
314 operator--(int) volatile noexcept
315 { return fetch_sub(1); }
316
317 __int_type
318 operator++() noexcept
319 { return __atomic_add_fetch(&_M_i, 1, int(memory_order_seq_cst)); }
320
321 __int_type
322 operator++() volatile noexcept
323 { return __atomic_add_fetch(&_M_i, 1, int(memory_order_seq_cst)); }
324
325 __int_type
326 operator--() noexcept
327 { return __atomic_sub_fetch(&_M_i, 1, int(memory_order_seq_cst)); }
328
329 __int_type
330 operator--() volatile noexcept
331 { return __atomic_sub_fetch(&_M_i, 1, int(memory_order_seq_cst)); }
332
333 __int_type
334 operator+=(__int_type __i) noexcept
335 { return __atomic_add_fetch(&_M_i, __i, int(memory_order_seq_cst)); }
336
337 __int_type
338 operator+=(__int_type __i) volatile noexcept
339 { return __atomic_add_fetch(&_M_i, __i, int(memory_order_seq_cst)); }
340
341 __int_type
342 operator-=(__int_type __i) noexcept
343 { return __atomic_sub_fetch(&_M_i, __i, int(memory_order_seq_cst)); }
344
345 __int_type
346 operator-=(__int_type __i) volatile noexcept
347 { return __atomic_sub_fetch(&_M_i, __i, int(memory_order_seq_cst)); }
348
349 __int_type
350 operator&=(__int_type __i) noexcept
351 { return __atomic_and_fetch(&_M_i, __i, int(memory_order_seq_cst)); }
352
353 __int_type
354 operator&=(__int_type __i) volatile noexcept
355 { return __atomic_and_fetch(&_M_i, __i, int(memory_order_seq_cst)); }
356
357 __int_type
358 operator|=(__int_type __i) noexcept
359 { return __atomic_or_fetch(&_M_i, __i, int(memory_order_seq_cst)); }
360
361 __int_type
362 operator|=(__int_type __i) volatile noexcept
363 { return __atomic_or_fetch(&_M_i, __i, int(memory_order_seq_cst)); }
364
365 __int_type
366 operator^=(__int_type __i) noexcept
367 { return __atomic_xor_fetch(&_M_i, __i, int(memory_order_seq_cst)); }
368
369 __int_type
370 operator^=(__int_type __i) volatile noexcept
371 { return __atomic_xor_fetch(&_M_i, __i, int(memory_order_seq_cst)); }
372
373 bool
374 is_lock_free() const noexcept
375 {
376 // Use a fake, minimally aligned pointer.
377 return __atomic_is_lock_free(sizeof(_M_i),
378 reinterpret_cast<void *>(-_S_alignment));
379 }
380
381 bool
382 is_lock_free() const volatile noexcept
383 {
384 // Use a fake, minimally aligned pointer.
385 return __atomic_is_lock_free(sizeof(_M_i),
386 reinterpret_cast<void *>(-_S_alignment));
387 }
388
389 _GLIBCXX_ALWAYS_INLINE void
390 store(__int_type __i, memory_order __m = memory_order_seq_cst) noexcept
391 {
392 memory_order __b = __m & __memory_order_mask;
393 __glibcxx_assert(__b != memory_order_acquire);
394 __glibcxx_assert(__b != memory_order_acq_rel);
395 __glibcxx_assert(__b != memory_order_consume);
396
397 __atomic_store_n(&_M_i, __i, int(__m));
398 }
399
400 _GLIBCXX_ALWAYS_INLINE void
401 store(__int_type __i,
402 memory_order __m = memory_order_seq_cst) volatile noexcept
403 {
404 memory_order __b = __m & __memory_order_mask;
405 __glibcxx_assert(__b != memory_order_acquire);
406 __glibcxx_assert(__b != memory_order_acq_rel);
407 __glibcxx_assert(__b != memory_order_consume);
408
409 __atomic_store_n(&_M_i, __i, int(__m));
410 }
411
412 _GLIBCXX_ALWAYS_INLINE __int_type
413 load(memory_order __m = memory_order_seq_cst) const noexcept
414 {
415 memory_order __b = __m & __memory_order_mask;
416 __glibcxx_assert(__b != memory_order_release);
417 __glibcxx_assert(__b != memory_order_acq_rel);
418
419 return __atomic_load_n(&_M_i, int(__m));
420 }
421
422 _GLIBCXX_ALWAYS_INLINE __int_type
423 load(memory_order __m = memory_order_seq_cst) const volatile noexcept
424 {
425 memory_order __b = __m & __memory_order_mask;
426 __glibcxx_assert(__b != memory_order_release);
427 __glibcxx_assert(__b != memory_order_acq_rel);
428
429 return __atomic_load_n(&_M_i, int(__m));
430 }
431
432 _GLIBCXX_ALWAYS_INLINE __int_type
433 exchange(__int_type __i,
434 memory_order __m = memory_order_seq_cst) noexcept
435 {
436 return __atomic_exchange_n(&_M_i, __i, int(__m));
437 }
438
439
440 _GLIBCXX_ALWAYS_INLINE __int_type
441 exchange(__int_type __i,
442 memory_order __m = memory_order_seq_cst) volatile noexcept
443 {
444 return __atomic_exchange_n(&_M_i, __i, int(__m));
445 }
446
447 _GLIBCXX_ALWAYS_INLINE bool
448 compare_exchange_weak(__int_type& __i1, __int_type __i2,
449 memory_order __m1, memory_order __m2) noexcept
450 {
451 memory_order __b2 = __m2 & __memory_order_mask;
452 memory_order __b1 = __m1 & __memory_order_mask;
453 __glibcxx_assert(__b2 != memory_order_release);
454 __glibcxx_assert(__b2 != memory_order_acq_rel);
455 __glibcxx_assert(__b2 <= __b1);
456
457 return __atomic_compare_exchange_n(&_M_i, &__i1, __i2, 1,
458 int(__m1), int(__m2));
459 }
460
461 _GLIBCXX_ALWAYS_INLINE bool
462 compare_exchange_weak(__int_type& __i1, __int_type __i2,
463 memory_order __m1,
464 memory_order __m2) volatile noexcept
465 {
466 memory_order __b2 = __m2 & __memory_order_mask;
467 memory_order __b1 = __m1 & __memory_order_mask;
468 __glibcxx_assert(__b2 != memory_order_release);
469 __glibcxx_assert(__b2 != memory_order_acq_rel);
470 __glibcxx_assert(__b2 <= __b1);
471
472 return __atomic_compare_exchange_n(&_M_i, &__i1, __i2, 1,
473 int(__m1), int(__m2));
474 }
475
476 _GLIBCXX_ALWAYS_INLINE bool
477 compare_exchange_weak(__int_type& __i1, __int_type __i2,
478 memory_order __m = memory_order_seq_cst) noexcept
479 {
480 return compare_exchange_weak(__i1, __i2, __m,
481 __cmpexch_failure_order(__m));
482 }
483
484 _GLIBCXX_ALWAYS_INLINE bool
485 compare_exchange_weak(__int_type& __i1, __int_type __i2,
486 memory_order __m = memory_order_seq_cst) volatile noexcept
487 {
488 return compare_exchange_weak(__i1, __i2, __m,
489 __cmpexch_failure_order(__m));
490 }
491
492 _GLIBCXX_ALWAYS_INLINE bool
493 compare_exchange_strong(__int_type& __i1, __int_type __i2,
494 memory_order __m1, memory_order __m2) noexcept
495 {
496 memory_order __b2 = __m2 & __memory_order_mask;
497 memory_order __b1 = __m1 & __memory_order_mask;
498 __glibcxx_assert(__b2 != memory_order_release);
499 __glibcxx_assert(__b2 != memory_order_acq_rel);
500 __glibcxx_assert(__b2 <= __b1);
501
502 return __atomic_compare_exchange_n(&_M_i, &__i1, __i2, 0,
503 int(__m1), int(__m2));
504 }
505
506 _GLIBCXX_ALWAYS_INLINE bool
507 compare_exchange_strong(__int_type& __i1, __int_type __i2,
508 memory_order __m1,
509 memory_order __m2) volatile noexcept
510 {
511 memory_order __b2 = __m2 & __memory_order_mask;
512 memory_order __b1 = __m1 & __memory_order_mask;
513
514 __glibcxx_assert(__b2 != memory_order_release);
515 __glibcxx_assert(__b2 != memory_order_acq_rel);
516 __glibcxx_assert(__b2 <= __b1);
517
518 return __atomic_compare_exchange_n(&_M_i, &__i1, __i2, 0,
519 int(__m1), int(__m2));
520 }
521
522 _GLIBCXX_ALWAYS_INLINE bool
523 compare_exchange_strong(__int_type& __i1, __int_type __i2,
524 memory_order __m = memory_order_seq_cst) noexcept
525 {
526 return compare_exchange_strong(__i1, __i2, __m,
527 __cmpexch_failure_order(__m));
528 }
529
530 _GLIBCXX_ALWAYS_INLINE bool
531 compare_exchange_strong(__int_type& __i1, __int_type __i2,
532 memory_order __m = memory_order_seq_cst) volatile noexcept
533 {
534 return compare_exchange_strong(__i1, __i2, __m,
535 __cmpexch_failure_order(__m));
536 }
537
538 _GLIBCXX_ALWAYS_INLINE __int_type
539 fetch_add(__int_type __i,
540 memory_order __m = memory_order_seq_cst) noexcept
541 { return __atomic_fetch_add(&_M_i, __i, int(__m)); }
542
543 _GLIBCXX_ALWAYS_INLINE __int_type
544 fetch_add(__int_type __i,
545 memory_order __m = memory_order_seq_cst) volatile noexcept
546 { return __atomic_fetch_add(&_M_i, __i, int(__m)); }
547
548 _GLIBCXX_ALWAYS_INLINE __int_type
549 fetch_sub(__int_type __i,
550 memory_order __m = memory_order_seq_cst) noexcept
551 { return __atomic_fetch_sub(&_M_i, __i, int(__m)); }
552
553 _GLIBCXX_ALWAYS_INLINE __int_type
554 fetch_sub(__int_type __i,
555 memory_order __m = memory_order_seq_cst) volatile noexcept
556 { return __atomic_fetch_sub(&_M_i, __i, int(__m)); }
557
558 _GLIBCXX_ALWAYS_INLINE __int_type
559 fetch_and(__int_type __i,
560 memory_order __m = memory_order_seq_cst) noexcept
561 { return __atomic_fetch_and(&_M_i, __i, int(__m)); }
562
563 _GLIBCXX_ALWAYS_INLINE __int_type
564 fetch_and(__int_type __i,
565 memory_order __m = memory_order_seq_cst) volatile noexcept
566 { return __atomic_fetch_and(&_M_i, __i, int(__m)); }
567
568 _GLIBCXX_ALWAYS_INLINE __int_type
569 fetch_or(__int_type __i,
570 memory_order __m = memory_order_seq_cst) noexcept
571 { return __atomic_fetch_or(&_M_i, __i, int(__m)); }
572
573 _GLIBCXX_ALWAYS_INLINE __int_type
574 fetch_or(__int_type __i,
575 memory_order __m = memory_order_seq_cst) volatile noexcept
576 { return __atomic_fetch_or(&_M_i, __i, int(__m)); }
577
578 _GLIBCXX_ALWAYS_INLINE __int_type
579 fetch_xor(__int_type __i,
580 memory_order __m = memory_order_seq_cst) noexcept
581 { return __atomic_fetch_xor(&_M_i, __i, int(__m)); }
582
583 _GLIBCXX_ALWAYS_INLINE __int_type
584 fetch_xor(__int_type __i,
585 memory_order __m = memory_order_seq_cst) volatile noexcept
586 { return __atomic_fetch_xor(&_M_i, __i, int(__m)); }
587 };
588
589
590 /// Partial specialization for pointer types.
591 template<typename _PTp>
592 struct __atomic_base<_PTp*>
593 {
594 private:
595 typedef _PTp* __pointer_type;
596
597 __pointer_type _M_p;
598
599 // Factored out to facilitate explicit specialization.
600 constexpr ptrdiff_t
601 _M_type_size(ptrdiff_t __d) const { return __d * sizeof(_PTp); }
602
603 constexpr ptrdiff_t
604 _M_type_size(ptrdiff_t __d) const volatile { return __d * sizeof(_PTp); }
605
606 public:
607 __atomic_base() noexcept = default;
608 ~__atomic_base() noexcept = default;
609 __atomic_base(const __atomic_base&) = delete;
610 __atomic_base& operator=(const __atomic_base&) = delete;
611 __atomic_base& operator=(const __atomic_base&) volatile = delete;
612
613 // Requires __pointer_type convertible to _M_p.
614 constexpr __atomic_base(__pointer_type __p) noexcept : _M_p (__p) { }
615
616 operator __pointer_type() const noexcept
617 { return load(); }
618
619 operator __pointer_type() const volatile noexcept
620 { return load(); }
621
622 __pointer_type
623 operator=(__pointer_type __p) noexcept
624 {
625 store(__p);
626 return __p;
627 }
628
629 __pointer_type
630 operator=(__pointer_type __p) volatile noexcept
631 {
632 store(__p);
633 return __p;
634 }
635
636 __pointer_type
637 operator++(int) noexcept
638 { return fetch_add(1); }
639
640 __pointer_type
641 operator++(int) volatile noexcept
642 { return fetch_add(1); }
643
644 __pointer_type
645 operator--(int) noexcept
646 { return fetch_sub(1); }
647
648 __pointer_type
649 operator--(int) volatile noexcept
650 { return fetch_sub(1); }
651
652 __pointer_type
653 operator++() noexcept
654 { return __atomic_add_fetch(&_M_p, _M_type_size(1),
655 int(memory_order_seq_cst)); }
656
657 __pointer_type
658 operator++() volatile noexcept
659 { return __atomic_add_fetch(&_M_p, _M_type_size(1),
660 int(memory_order_seq_cst)); }
661
662 __pointer_type
663 operator--() noexcept
664 { return __atomic_sub_fetch(&_M_p, _M_type_size(1),
665 int(memory_order_seq_cst)); }
666
667 __pointer_type
668 operator--() volatile noexcept
669 { return __atomic_sub_fetch(&_M_p, _M_type_size(1),
670 int(memory_order_seq_cst)); }
671
672 __pointer_type
673 operator+=(ptrdiff_t __d) noexcept
674 { return __atomic_add_fetch(&_M_p, _M_type_size(__d),
675 int(memory_order_seq_cst)); }
676
677 __pointer_type
678 operator+=(ptrdiff_t __d) volatile noexcept
679 { return __atomic_add_fetch(&_M_p, _M_type_size(__d),
680 int(memory_order_seq_cst)); }
681
682 __pointer_type
683 operator-=(ptrdiff_t __d) noexcept
684 { return __atomic_sub_fetch(&_M_p, _M_type_size(__d),
685 int(memory_order_seq_cst)); }
686
687 __pointer_type
688 operator-=(ptrdiff_t __d) volatile noexcept
689 { return __atomic_sub_fetch(&_M_p, _M_type_size(__d),
690 int(memory_order_seq_cst)); }
691
692 bool
693 is_lock_free() const noexcept
694 {
695 // Produce a fake, minimally aligned pointer.
696 return __atomic_is_lock_free(sizeof(_M_p),
697 reinterpret_cast<void *>(-__alignof(_M_p)));
698 }
699
700 bool
701 is_lock_free() const volatile noexcept
702 {
703 // Produce a fake, minimally aligned pointer.
704 return __atomic_is_lock_free(sizeof(_M_p),
705 reinterpret_cast<void *>(-__alignof(_M_p)));
706 }
707
708 _GLIBCXX_ALWAYS_INLINE void
709 store(__pointer_type __p,
710 memory_order __m = memory_order_seq_cst) noexcept
711 {
712 memory_order __b = __m & __memory_order_mask;
713
714 __glibcxx_assert(__b != memory_order_acquire);
715 __glibcxx_assert(__b != memory_order_acq_rel);
716 __glibcxx_assert(__b != memory_order_consume);
717
718 __atomic_store_n(&_M_p, __p, int(__m));
719 }
720
721 _GLIBCXX_ALWAYS_INLINE void
722 store(__pointer_type __p,
723 memory_order __m = memory_order_seq_cst) volatile noexcept
724 {
725 memory_order __b = __m & __memory_order_mask;
726 __glibcxx_assert(__b != memory_order_acquire);
727 __glibcxx_assert(__b != memory_order_acq_rel);
728 __glibcxx_assert(__b != memory_order_consume);
729
730 __atomic_store_n(&_M_p, __p, int(__m));
731 }
732
733 _GLIBCXX_ALWAYS_INLINE __pointer_type
734 load(memory_order __m = memory_order_seq_cst) const noexcept
735 {
736 memory_order __b = __m & __memory_order_mask;
737 __glibcxx_assert(__b != memory_order_release);
738 __glibcxx_assert(__b != memory_order_acq_rel);
739
740 return __atomic_load_n(&_M_p, int(__m));
741 }
742
743 _GLIBCXX_ALWAYS_INLINE __pointer_type
744 load(memory_order __m = memory_order_seq_cst) const volatile noexcept
745 {
746 memory_order __b = __m & __memory_order_mask;
747 __glibcxx_assert(__b != memory_order_release);
748 __glibcxx_assert(__b != memory_order_acq_rel);
749
750 return __atomic_load_n(&_M_p, int(__m));
751 }
752
753 _GLIBCXX_ALWAYS_INLINE __pointer_type
754 exchange(__pointer_type __p,
755 memory_order __m = memory_order_seq_cst) noexcept
756 {
757 return __atomic_exchange_n(&_M_p, __p, int(__m));
758 }
759
760
761 _GLIBCXX_ALWAYS_INLINE __pointer_type
762 exchange(__pointer_type __p,
763 memory_order __m = memory_order_seq_cst) volatile noexcept
764 {
765 return __atomic_exchange_n(&_M_p, __p, int(__m));
766 }
767
768 _GLIBCXX_ALWAYS_INLINE bool
769 compare_exchange_strong(__pointer_type& __p1, __pointer_type __p2,
770 memory_order __m1,
771 memory_order __m2) noexcept
772 {
773 memory_order __b2 = __m2 & __memory_order_mask;
774 memory_order __b1 = __m1 & __memory_order_mask;
775 __glibcxx_assert(__b2 != memory_order_release);
776 __glibcxx_assert(__b2 != memory_order_acq_rel);
777 __glibcxx_assert(__b2 <= __b1);
778
779 return __atomic_compare_exchange_n(&_M_p, &__p1, __p2, 0,
780 int(__m1), int(__m2));
781 }
782
783 _GLIBCXX_ALWAYS_INLINE bool
784 compare_exchange_strong(__pointer_type& __p1, __pointer_type __p2,
785 memory_order __m1,
786 memory_order __m2) volatile noexcept
787 {
788 memory_order __b2 = __m2 & __memory_order_mask;
789 memory_order __b1 = __m1 & __memory_order_mask;
790
791 __glibcxx_assert(__b2 != memory_order_release);
792 __glibcxx_assert(__b2 != memory_order_acq_rel);
793 __glibcxx_assert(__b2 <= __b1);
794
795 return __atomic_compare_exchange_n(&_M_p, &__p1, __p2, 0,
796 int(__m1), int(__m2));
797 }
798
799 _GLIBCXX_ALWAYS_INLINE __pointer_type
800 fetch_add(ptrdiff_t __d,
801 memory_order __m = memory_order_seq_cst) noexcept
802 { return __atomic_fetch_add(&_M_p, _M_type_size(__d), int(__m)); }
803
804 _GLIBCXX_ALWAYS_INLINE __pointer_type
805 fetch_add(ptrdiff_t __d,
806 memory_order __m = memory_order_seq_cst) volatile noexcept
807 { return __atomic_fetch_add(&_M_p, _M_type_size(__d), int(__m)); }
808
809 _GLIBCXX_ALWAYS_INLINE __pointer_type
810 fetch_sub(ptrdiff_t __d,
811 memory_order __m = memory_order_seq_cst) noexcept
812 { return __atomic_fetch_sub(&_M_p, _M_type_size(__d), int(__m)); }
813
814 _GLIBCXX_ALWAYS_INLINE __pointer_type
815 fetch_sub(ptrdiff_t __d,
816 memory_order __m = memory_order_seq_cst) volatile noexcept
817 { return __atomic_fetch_sub(&_M_p, _M_type_size(__d), int(__m)); }
818 };
819
820 // @} group atomics
821
822_GLIBCXX_END_NAMESPACE_VERSION
823} // namespace std
824
825#endif
826