atomic/
lib.rs

1// Copyright 2016 Amanieu d'Antras
2//
3// Licensed under the Apache License, Version 2.0, <LICENSE-APACHE or
4// http://apache.org/licenses/LICENSE-2.0> or the MIT license <LICENSE-MIT or
5// http://opensource.org/licenses/MIT>, at your option. This file may not be
6// copied, modified, or distributed except according to those terms.
7
8//! Generic `Atomic<T>` wrapper type
9//!
10//! Atomic types provide primitive shared-memory communication between
11//! threads, and are the building blocks of other concurrent types.
12//!
13//! This library defines a generic atomic wrapper type `Atomic<T>` for all
14//! `T: Copy` types.
15//! Atomic types present operations that, when used correctly, synchronize
16//! updates between threads.
17//!
18//! Each method takes an `Ordering` which represents the strength of
19//! the memory barrier for that operation. These orderings are the
20//! same as [LLVM atomic orderings][1].
21//!
22//! [1]: http://llvm.org/docs/LangRef.html#memory-model-for-concurrent-operations
23//!
24//! Atomic variables are safe to share between threads (they implement `Sync`)
25//! but they do not themselves provide the mechanism for sharing. The most
26//! common way to share an atomic variable is to put it into an `Arc` (an
27//! atomically-reference-counted shared pointer).
28//!
29//! Most atomic types may be stored in static variables, initialized using
30//! the `const fn` constructors. Atomic statics are often used for lazy global
31//! initialization.
32
33#![warn(missing_docs)]
34#![warn(rust_2018_idioms)]
35#![no_std]
36#![cfg_attr(feature = "nightly", feature(integer_atomics))]
37
38#[cfg(any(test, feature = "std"))]
39#[macro_use]
40extern crate std;
41
42use core::mem::MaybeUninit;
43// Re-export some useful definitions from libcore
44pub use core::sync::atomic::{fence, Ordering};
45
46use core::cell::UnsafeCell;
47use core::fmt;
48
49#[cfg(feature = "std")]
50use std::panic::RefUnwindSafe;
51
52#[cfg(feature = "fallback")]
53mod fallback;
54mod ops;
55
56/// A generic atomic wrapper type which allows an object to be safely shared
57/// between threads.
58#[repr(transparent)]
59pub struct Atomic<T> {
60    // The MaybeUninit is here to work around rust-lang/rust#87341.
61    v: UnsafeCell<MaybeUninit<T>>,
62}
63
64// Atomic<T> is only Sync if T is Send
65unsafe impl<T: Copy + Send> Sync for Atomic<T> {}
66
67// Given that atomicity is guaranteed, Atomic<T> is RefUnwindSafe if T is
68//
69// This is trivially correct for native lock-free atomic types. For those whose
70// atomicity is emulated using a spinlock, it is still correct because the
71// `Atomic` API does not allow doing any panic-inducing operation after writing
72// to the target object.
73#[cfg(feature = "std")]
74impl<T: Copy + RefUnwindSafe> RefUnwindSafe for Atomic<T> {}
75
76impl<T: Copy + Default> Default for Atomic<T> {
77    #[inline]
78    fn default() -> Self {
79        Self::new(Default::default())
80    }
81}
82
83impl<T: Copy + fmt::Debug> fmt::Debug for Atomic<T> {
84    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
85        f.debug_tuple("Atomic")
86            .field(&self.load(Ordering::SeqCst))
87            .finish()
88    }
89}
90
91impl<T> Atomic<T> {
92    /// Creates a new `Atomic`.
93    #[inline]
94    pub const fn new(v: T) -> Atomic<T> {
95        Atomic {
96            v: UnsafeCell::new(MaybeUninit::new(v)),
97        }
98    }
99
100    /// Checks if `Atomic` objects of this type are lock-free.
101    ///
102    /// If an `Atomic` is not lock-free then it may be implemented using locks
103    /// internally, which makes it unsuitable for some situations (such as
104    /// communicating with a signal handler).
105    #[inline]
106    pub const fn is_lock_free() -> bool {
107        ops::atomic_is_lock_free::<T>()
108    }
109}
110
111impl<T: Copy> Atomic<T> {
112    #[inline]
113    fn inner_ptr(&self) -> *mut T {
114        self.v.get() as *mut T
115    }
116
117    /// Returns a mutable reference to the underlying type.
118    ///
119    /// This is safe because the mutable reference guarantees that no other threads are
120    /// concurrently accessing the atomic data.
121    #[inline]
122    pub fn get_mut(&mut self) -> &mut T {
123        unsafe { &mut *self.inner_ptr() }
124    }
125
126    /// Consumes the atomic and returns the contained value.
127    ///
128    /// This is safe because passing `self` by value guarantees that no other threads are
129    /// concurrently accessing the atomic data.
130    #[inline]
131    pub fn into_inner(self) -> T {
132        unsafe { self.v.into_inner().assume_init() }
133    }
134
135    /// Loads a value from the `Atomic`.
136    ///
137    /// `load` takes an `Ordering` argument which describes the memory ordering
138    /// of this operation.
139    ///
140    /// # Panics
141    ///
142    /// Panics if `order` is `Release` or `AcqRel`.
143    #[inline]
144    pub fn load(&self, order: Ordering) -> T {
145        unsafe { ops::atomic_load(self.inner_ptr(), order) }
146    }
147
148    /// Stores a value into the `Atomic`.
149    ///
150    /// `store` takes an `Ordering` argument which describes the memory ordering
151    /// of this operation.
152    ///
153    /// # Panics
154    ///
155    /// Panics if `order` is `Acquire` or `AcqRel`.
156    #[inline]
157    pub fn store(&self, val: T, order: Ordering) {
158        unsafe {
159            ops::atomic_store(self.inner_ptr(), val, order);
160        }
161    }
162
163    /// Stores a value into the `Atomic`, returning the old value.
164    ///
165    /// `swap` takes an `Ordering` argument which describes the memory ordering
166    /// of this operation.
167    #[inline]
168    pub fn swap(&self, val: T, order: Ordering) -> T {
169        unsafe { ops::atomic_swap(self.inner_ptr(), val, order) }
170    }
171
172    /// Stores a value into the `Atomic` if the current value is the same as the
173    /// `current` value.
174    ///
175    /// The return value is a result indicating whether the new value was
176    /// written and containing the previous value. On success this value is
177    /// guaranteed to be equal to `new`.
178    ///
179    /// `compare_exchange` takes two `Ordering` arguments to describe the memory
180    /// ordering of this operation. The first describes the required ordering if
181    /// the operation succeeds while the second describes the required ordering
182    /// when the operation fails. The failure ordering can't be `Release` or
183    /// `AcqRel` and must be equivalent or weaker than the success ordering.
184    #[inline]
185    pub fn compare_exchange(
186        &self,
187        current: T,
188        new: T,
189        success: Ordering,
190        failure: Ordering,
191    ) -> Result<T, T> {
192        unsafe { ops::atomic_compare_exchange(self.inner_ptr(), current, new, success, failure) }
193    }
194
195    /// Stores a value into the `Atomic` if the current value is the same as the
196    /// `current` value.
197    ///
198    /// Unlike `compare_exchange`, this function is allowed to spuriously fail
199    /// even when the comparison succeeds, which can result in more efficient
200    /// code on some platforms. The return value is a result indicating whether
201    /// the new value was written and containing the previous value.
202    ///
203    /// `compare_exchange` takes two `Ordering` arguments to describe the memory
204    /// ordering of this operation. The first describes the required ordering if
205    /// the operation succeeds while the second describes the required ordering
206    /// when the operation fails. The failure ordering can't be `Release` or
207    /// `AcqRel` and must be equivalent or weaker than the success ordering.
208    /// success ordering.
209    #[inline]
210    pub fn compare_exchange_weak(
211        &self,
212        current: T,
213        new: T,
214        success: Ordering,
215        failure: Ordering,
216    ) -> Result<T, T> {
217        unsafe {
218            ops::atomic_compare_exchange_weak(self.inner_ptr(), current, new, success, failure)
219        }
220    }
221
222    /// Fetches the value, and applies a function to it that returns an optional
223    /// new value. Returns a `Result` of `Ok(previous_value)` if the function returned `Some(_)`, else
224    /// `Err(previous_value)`.
225    ///
226    /// Note: This may call the function multiple times if the value has been changed from other threads in
227    /// the meantime, as long as the function returns `Some(_)`, but the function will have been applied
228    /// only once to the stored value.
229    ///
230    /// `fetch_update` takes two [`Ordering`] arguments to describe the memory ordering of this operation.
231    /// The first describes the required ordering for when the operation finally succeeds while the second
232    /// describes the required ordering for loads. These correspond to the success and failure orderings of
233    /// [`compare_exchange`] respectively.
234    ///
235    /// Using [`Acquire`] as success ordering makes the store part
236    /// of this operation [`Relaxed`], and using [`Release`] makes the final successful load
237    /// [`Relaxed`]. The (failed) load ordering can only be [`SeqCst`], [`Acquire`] or [`Relaxed`]
238    /// and must be equivalent to or weaker than the success ordering.
239    ///
240    /// [`compare_exchange`]: #method.compare_exchange
241    /// [`Ordering`]: enum.Ordering.html
242    /// [`Relaxed`]: enum.Ordering.html#variant.Relaxed
243    /// [`Release`]: enum.Ordering.html#variant.Release
244    /// [`Acquire`]: enum.Ordering.html#variant.Acquire
245    /// [`SeqCst`]: enum.Ordering.html#variant.SeqCst
246    ///
247    /// # Examples
248    ///
249    /// ```rust
250    /// use atomic::{Atomic, Ordering};
251    ///
252    /// let x = Atomic::new(7);
253    /// assert_eq!(x.fetch_update(Ordering::SeqCst, Ordering::SeqCst, |_| None), Err(7));
254    /// assert_eq!(x.fetch_update(Ordering::SeqCst, Ordering::SeqCst, |x| Some(x + 1)), Ok(7));
255    /// assert_eq!(x.fetch_update(Ordering::SeqCst, Ordering::SeqCst, |x| Some(x + 1)), Ok(8));
256    /// assert_eq!(x.load(Ordering::SeqCst), 9);
257    /// ```
258    #[inline]
259    pub fn fetch_update<F>(
260        &self,
261        set_order: Ordering,
262        fetch_order: Ordering,
263        mut f: F,
264    ) -> Result<T, T>
265    where
266        F: FnMut(T) -> Option<T>,
267    {
268        let mut prev = self.load(fetch_order);
269        while let Some(next) = f(prev) {
270            match self.compare_exchange_weak(prev, next, set_order, fetch_order) {
271                x @ Ok(_) => return x,
272                Err(next_prev) => prev = next_prev,
273            }
274        }
275        Err(prev)
276    }
277}
278
279impl Atomic<bool> {
280    /// Logical "and" with a boolean value.
281    ///
282    /// Performs a logical "and" operation on the current value and the argument
283    /// `val`, and sets the new value to the result.
284    ///
285    /// Returns the previous value.
286    #[inline]
287    pub fn fetch_and(&self, val: bool, order: Ordering) -> bool {
288        unsafe { ops::atomic_and(self.inner_ptr(), val, order) }
289    }
290
291    /// Logical "or" with a boolean value.
292    ///
293    /// Performs a logical "or" operation on the current value and the argument
294    /// `val`, and sets the new value to the result.
295    ///
296    /// Returns the previous value.
297    #[inline]
298    pub fn fetch_or(&self, val: bool, order: Ordering) -> bool {
299        unsafe { ops::atomic_or(self.inner_ptr(), val, order) }
300    }
301
302    /// Logical "xor" with a boolean value.
303    ///
304    /// Performs a logical "xor" operation on the current value and the argument
305    /// `val`, and sets the new value to the result.
306    ///
307    /// Returns the previous value.
308    #[inline]
309    pub fn fetch_xor(&self, val: bool, order: Ordering) -> bool {
310        unsafe { ops::atomic_xor(self.inner_ptr(), val, order) }
311    }
312}
313
314macro_rules! atomic_ops_common {
315    ($($t:ty)*) => ($(
316        impl Atomic<$t> {
317            /// Add to the current value, returning the previous value.
318            #[inline]
319            pub fn fetch_add(&self, val: $t, order: Ordering) -> $t {
320                unsafe { ops::atomic_add(self.inner_ptr(), val, order) }
321            }
322
323            /// Subtract from the current value, returning the previous value.
324            #[inline]
325            pub fn fetch_sub(&self, val: $t, order: Ordering) -> $t {
326                unsafe { ops::atomic_sub(self.inner_ptr(), val, order) }
327            }
328
329            /// Bitwise and with the current value, returning the previous value.
330            #[inline]
331            pub fn fetch_and(&self, val: $t, order: Ordering) -> $t {
332                unsafe { ops::atomic_and(self.inner_ptr(), val, order) }
333            }
334
335            /// Bitwise or with the current value, returning the previous value.
336            #[inline]
337            pub fn fetch_or(&self, val: $t, order: Ordering) -> $t {
338                unsafe { ops::atomic_or(self.inner_ptr(), val, order) }
339            }
340
341            /// Bitwise xor with the current value, returning the previous value.
342            #[inline]
343            pub fn fetch_xor(&self, val: $t, order: Ordering) -> $t {
344                unsafe { ops::atomic_xor(self.inner_ptr(), val, order) }
345            }
346        }
347    )*);
348}
349macro_rules! atomic_ops_signed {
350    ($($t:ty)*) => (
351        atomic_ops_common!{ $($t)* }
352        $(
353            impl Atomic<$t> {
354                /// Minimum with the current value.
355                #[inline]
356                pub fn fetch_min(&self, val: $t, order: Ordering) -> $t {
357                    unsafe { ops::atomic_min(self.inner_ptr(), val, order) }
358                }
359
360                /// Maximum with the current value.
361                #[inline]
362                pub fn fetch_max(&self, val: $t, order: Ordering) -> $t {
363                    unsafe { ops::atomic_max(self.inner_ptr(), val, order) }
364                }
365            }
366        )*
367    );
368}
369macro_rules! atomic_ops_unsigned {
370    ($($t:ty)*) => (
371        atomic_ops_common!{ $($t)* }
372        $(
373            impl Atomic<$t> {
374                /// Minimum with the current value.
375                #[inline]
376                pub fn fetch_min(&self, val: $t, order: Ordering) -> $t {
377                    unsafe { ops::atomic_umin(self.inner_ptr(), val, order) }
378                }
379
380                /// Maximum with the current value.
381                #[inline]
382                pub fn fetch_max(&self, val: $t, order: Ordering) -> $t {
383                    unsafe { ops::atomic_umax(self.inner_ptr(), val, order) }
384                }
385            }
386        )*
387    );
388}
389atomic_ops_signed! { i8 i16 i32 i64 isize i128 }
390atomic_ops_unsigned! { u8 u16 u32 u64 usize u128 }
391
392#[cfg(test)]
393mod tests {
394    use super::{Atomic, Ordering::*};
395    use core::mem;
396
397    #[derive(Copy, Clone, Eq, PartialEq, Debug, Default)]
398    struct Foo(u8, u8);
399    #[derive(Copy, Clone, Eq, PartialEq, Debug, Default)]
400    struct Bar(u64, u64);
401    #[derive(Copy, Clone, Eq, PartialEq, Debug, Default)]
402    struct Quux(u32);
403
404    #[test]
405    fn atomic_bool() {
406        let a = Atomic::new(false);
407        assert_eq!(
408            Atomic::<bool>::is_lock_free(),
409            cfg!(target_has_atomic = "8"),
410        );
411        assert_eq!(format!("{:?}", a), "Atomic(false)");
412        assert_eq!(a.load(SeqCst), false);
413        a.store(true, SeqCst);
414        assert_eq!(a.swap(false, SeqCst), true);
415        assert_eq!(a.compare_exchange(true, false, SeqCst, SeqCst), Err(false));
416        assert_eq!(a.compare_exchange(false, true, SeqCst, SeqCst), Ok(false));
417        assert_eq!(a.fetch_and(false, SeqCst), true);
418        assert_eq!(a.fetch_or(true, SeqCst), false);
419        assert_eq!(a.fetch_xor(false, SeqCst), true);
420        assert_eq!(a.load(SeqCst), true);
421    }
422
423    #[test]
424    fn atomic_i8() {
425        let a = Atomic::new(0i8);
426        assert_eq!(Atomic::<i8>::is_lock_free(), cfg!(target_has_atomic = "8"));
427        assert_eq!(format!("{:?}", a), "Atomic(0)");
428        assert_eq!(a.load(SeqCst), 0);
429        a.store(1, SeqCst);
430        assert_eq!(a.swap(2, SeqCst), 1);
431        assert_eq!(a.compare_exchange(5, 45, SeqCst, SeqCst), Err(2));
432        assert_eq!(a.compare_exchange(2, 3, SeqCst, SeqCst), Ok(2));
433        assert_eq!(a.fetch_add(123, SeqCst), 3);
434        // Make sure overflows are handled correctly
435        assert_eq!(a.fetch_sub(-56, SeqCst), 126);
436        assert_eq!(a.fetch_and(7, SeqCst), -74);
437        assert_eq!(a.fetch_or(64, SeqCst), 6);
438        assert_eq!(a.fetch_xor(1, SeqCst), 70);
439        assert_eq!(a.fetch_min(30, SeqCst), 71);
440        assert_eq!(a.fetch_max(-25, SeqCst), 30);
441        assert_eq!(a.load(SeqCst), 30);
442    }
443
444    #[test]
445    fn atomic_i16() {
446        let a = Atomic::new(0i16);
447        assert_eq!(
448            Atomic::<i16>::is_lock_free(),
449            cfg!(target_has_atomic = "16")
450        );
451        assert_eq!(format!("{:?}", a), "Atomic(0)");
452        assert_eq!(a.load(SeqCst), 0);
453        a.store(1, SeqCst);
454        assert_eq!(a.swap(2, SeqCst), 1);
455        assert_eq!(a.compare_exchange(5, 45, SeqCst, SeqCst), Err(2));
456        assert_eq!(a.compare_exchange(2, 3, SeqCst, SeqCst), Ok(2));
457        assert_eq!(a.fetch_add(123, SeqCst), 3);
458        assert_eq!(a.fetch_sub(-56, SeqCst), 126);
459        assert_eq!(a.fetch_and(7, SeqCst), 182);
460        assert_eq!(a.fetch_or(64, SeqCst), 6);
461        assert_eq!(a.fetch_xor(1, SeqCst), 70);
462        assert_eq!(a.fetch_min(30, SeqCst), 71);
463        assert_eq!(a.fetch_max(-25, SeqCst), 30);
464        assert_eq!(a.load(SeqCst), 30);
465    }
466
467    #[test]
468    fn atomic_i32() {
469        let a = Atomic::new(0i32);
470        assert_eq!(
471            Atomic::<i32>::is_lock_free(),
472            cfg!(target_has_atomic = "32")
473        );
474        assert_eq!(format!("{:?}", a), "Atomic(0)");
475        assert_eq!(a.load(SeqCst), 0);
476        a.store(1, SeqCst);
477        assert_eq!(a.swap(2, SeqCst), 1);
478        assert_eq!(a.compare_exchange(5, 45, SeqCst, SeqCst), Err(2));
479        assert_eq!(a.compare_exchange(2, 3, SeqCst, SeqCst), Ok(2));
480        assert_eq!(a.fetch_add(123, SeqCst), 3);
481        assert_eq!(a.fetch_sub(-56, SeqCst), 126);
482        assert_eq!(a.fetch_and(7, SeqCst), 182);
483        assert_eq!(a.fetch_or(64, SeqCst), 6);
484        assert_eq!(a.fetch_xor(1, SeqCst), 70);
485        assert_eq!(a.fetch_min(30, SeqCst), 71);
486        assert_eq!(a.fetch_max(-25, SeqCst), 30);
487        assert_eq!(a.load(SeqCst), 30);
488    }
489
490    #[test]
491    fn atomic_i64() {
492        let a = Atomic::new(0i64);
493        assert_eq!(
494            Atomic::<i64>::is_lock_free(),
495            cfg!(target_has_atomic = "64") && mem::align_of::<i64>() == 8
496        );
497        assert_eq!(format!("{:?}", a), "Atomic(0)");
498        assert_eq!(a.load(SeqCst), 0);
499        a.store(1, SeqCst);
500        assert_eq!(a.swap(2, SeqCst), 1);
501        assert_eq!(a.compare_exchange(5, 45, SeqCst, SeqCst), Err(2));
502        assert_eq!(a.compare_exchange(2, 3, SeqCst, SeqCst), Ok(2));
503        assert_eq!(a.fetch_add(123, SeqCst), 3);
504        assert_eq!(a.fetch_sub(-56, SeqCst), 126);
505        assert_eq!(a.fetch_and(7, SeqCst), 182);
506        assert_eq!(a.fetch_or(64, SeqCst), 6);
507        assert_eq!(a.fetch_xor(1, SeqCst), 70);
508        assert_eq!(a.fetch_min(30, SeqCst), 71);
509        assert_eq!(a.fetch_max(-25, SeqCst), 30);
510        assert_eq!(a.load(SeqCst), 30);
511    }
512
513    #[test]
514    fn atomic_i128() {
515        let a = Atomic::new(0i128);
516        assert_eq!(
517            Atomic::<i128>::is_lock_free(),
518            cfg!(feature = "nightly") & cfg!(target_has_atomic = "128")
519        );
520        assert_eq!(format!("{:?}", a), "Atomic(0)");
521        assert_eq!(a.load(SeqCst), 0);
522        a.store(1, SeqCst);
523        assert_eq!(a.swap(2, SeqCst), 1);
524        assert_eq!(a.compare_exchange(5, 45, SeqCst, SeqCst), Err(2));
525        assert_eq!(a.compare_exchange(2, 3, SeqCst, SeqCst), Ok(2));
526        assert_eq!(a.fetch_add(123, SeqCst), 3);
527        assert_eq!(a.fetch_sub(-56, SeqCst), 126);
528        assert_eq!(a.fetch_and(7, SeqCst), 182);
529        assert_eq!(a.fetch_or(64, SeqCst), 6);
530        assert_eq!(a.fetch_xor(1, SeqCst), 70);
531        assert_eq!(a.fetch_min(30, SeqCst), 71);
532        assert_eq!(a.fetch_max(-25, SeqCst), 30);
533        assert_eq!(a.load(SeqCst), 30);
534    }
535
536    #[test]
537    fn atomic_isize() {
538        let a = Atomic::new(0isize);
539        assert_eq!(format!("{:?}", a), "Atomic(0)");
540        assert_eq!(a.load(SeqCst), 0);
541        a.store(1, SeqCst);
542        assert_eq!(a.swap(2, SeqCst), 1);
543        assert_eq!(a.compare_exchange(5, 45, SeqCst, SeqCst), Err(2));
544        assert_eq!(a.compare_exchange(2, 3, SeqCst, SeqCst), Ok(2));
545        assert_eq!(a.fetch_add(123, SeqCst), 3);
546        assert_eq!(a.fetch_sub(-56, SeqCst), 126);
547        assert_eq!(a.fetch_and(7, SeqCst), 182);
548        assert_eq!(a.fetch_or(64, SeqCst), 6);
549        assert_eq!(a.fetch_xor(1, SeqCst), 70);
550        assert_eq!(a.fetch_min(30, SeqCst), 71);
551        assert_eq!(a.fetch_max(-25, SeqCst), 30);
552        assert_eq!(a.load(SeqCst), 30);
553    }
554
555    #[test]
556    fn atomic_u8() {
557        let a = Atomic::new(0u8);
558        assert_eq!(Atomic::<u8>::is_lock_free(), cfg!(target_has_atomic = "8"));
559        assert_eq!(format!("{:?}", a), "Atomic(0)");
560        assert_eq!(a.load(SeqCst), 0);
561        a.store(1, SeqCst);
562        assert_eq!(a.swap(2, SeqCst), 1);
563        assert_eq!(a.compare_exchange(5, 45, SeqCst, SeqCst), Err(2));
564        assert_eq!(a.compare_exchange(2, 3, SeqCst, SeqCst), Ok(2));
565        assert_eq!(a.fetch_add(123, SeqCst), 3);
566        assert_eq!(a.fetch_sub(56, SeqCst), 126);
567        assert_eq!(a.fetch_and(7, SeqCst), 70);
568        assert_eq!(a.fetch_or(64, SeqCst), 6);
569        assert_eq!(a.fetch_xor(1, SeqCst), 70);
570        assert_eq!(a.fetch_min(30, SeqCst), 71);
571        assert_eq!(a.fetch_max(25, SeqCst), 30);
572        assert_eq!(a.load(SeqCst), 30);
573    }
574
575    #[test]
576    fn atomic_u16() {
577        let a = Atomic::new(0u16);
578        assert_eq!(
579            Atomic::<u16>::is_lock_free(),
580            cfg!(target_has_atomic = "16")
581        );
582        assert_eq!(format!("{:?}", a), "Atomic(0)");
583        assert_eq!(a.load(SeqCst), 0);
584        a.store(1, SeqCst);
585        assert_eq!(a.swap(2, SeqCst), 1);
586        assert_eq!(a.compare_exchange(5, 45, SeqCst, SeqCst), Err(2));
587        assert_eq!(a.compare_exchange(2, 3, SeqCst, SeqCst), Ok(2));
588        assert_eq!(a.fetch_add(123, SeqCst), 3);
589        assert_eq!(a.fetch_sub(56, SeqCst), 126);
590        assert_eq!(a.fetch_and(7, SeqCst), 70);
591        assert_eq!(a.fetch_or(64, SeqCst), 6);
592        assert_eq!(a.fetch_xor(1, SeqCst), 70);
593        assert_eq!(a.fetch_min(30, SeqCst), 71);
594        assert_eq!(a.fetch_max(25, SeqCst), 30);
595        assert_eq!(a.load(SeqCst), 30);
596    }
597
598    #[test]
599    fn atomic_u32() {
600        let a = Atomic::new(0u32);
601        assert_eq!(
602            Atomic::<u32>::is_lock_free(),
603            cfg!(target_has_atomic = "32")
604        );
605        assert_eq!(format!("{:?}", a), "Atomic(0)");
606        assert_eq!(a.load(SeqCst), 0);
607        a.store(1, SeqCst);
608        assert_eq!(a.swap(2, SeqCst), 1);
609        assert_eq!(a.compare_exchange(5, 45, SeqCst, SeqCst), Err(2));
610        assert_eq!(a.compare_exchange(2, 3, SeqCst, SeqCst), Ok(2));
611        assert_eq!(a.fetch_add(123, SeqCst), 3);
612        assert_eq!(a.fetch_sub(56, SeqCst), 126);
613        assert_eq!(a.fetch_and(7, SeqCst), 70);
614        assert_eq!(a.fetch_or(64, SeqCst), 6);
615        assert_eq!(a.fetch_xor(1, SeqCst), 70);
616        assert_eq!(a.fetch_min(30, SeqCst), 71);
617        assert_eq!(a.fetch_max(25, SeqCst), 30);
618        assert_eq!(a.load(SeqCst), 30);
619    }
620
621    #[test]
622    fn atomic_u64() {
623        let a = Atomic::new(0u64);
624        assert_eq!(
625            Atomic::<u64>::is_lock_free(),
626            cfg!(target_has_atomic = "64") && mem::align_of::<u64>() == 8
627        );
628        assert_eq!(format!("{:?}", a), "Atomic(0)");
629        assert_eq!(a.load(SeqCst), 0);
630        a.store(1, SeqCst);
631        assert_eq!(a.swap(2, SeqCst), 1);
632        assert_eq!(a.compare_exchange(5, 45, SeqCst, SeqCst), Err(2));
633        assert_eq!(a.compare_exchange(2, 3, SeqCst, SeqCst), Ok(2));
634        assert_eq!(a.fetch_add(123, SeqCst), 3);
635        assert_eq!(a.fetch_sub(56, SeqCst), 126);
636        assert_eq!(a.fetch_and(7, SeqCst), 70);
637        assert_eq!(a.fetch_or(64, SeqCst), 6);
638        assert_eq!(a.fetch_xor(1, SeqCst), 70);
639        assert_eq!(a.fetch_min(30, SeqCst), 71);
640        assert_eq!(a.fetch_max(25, SeqCst), 30);
641        assert_eq!(a.load(SeqCst), 30);
642    }
643
644    #[test]
645    fn atomic_u128() {
646        let a = Atomic::new(0u128);
647        assert_eq!(
648            Atomic::<u128>::is_lock_free(),
649            cfg!(feature = "nightly") & cfg!(target_has_atomic = "128")
650        );
651        assert_eq!(format!("{:?}", a), "Atomic(0)");
652        assert_eq!(a.load(SeqCst), 0);
653        a.store(1, SeqCst);
654        assert_eq!(a.swap(2, SeqCst), 1);
655        assert_eq!(a.compare_exchange(5, 45, SeqCst, SeqCst), Err(2));
656        assert_eq!(a.compare_exchange(2, 3, SeqCst, SeqCst), Ok(2));
657        assert_eq!(a.fetch_add(123, SeqCst), 3);
658        assert_eq!(a.fetch_sub(56, SeqCst), 126);
659        assert_eq!(a.fetch_and(7, SeqCst), 70);
660        assert_eq!(a.fetch_or(64, SeqCst), 6);
661        assert_eq!(a.fetch_xor(1, SeqCst), 70);
662        assert_eq!(a.fetch_min(30, SeqCst), 71);
663        assert_eq!(a.fetch_max(25, SeqCst), 30);
664        assert_eq!(a.load(SeqCst), 30);
665    }
666
667    #[test]
668    fn atomic_usize() {
669        let a = Atomic::new(0usize);
670        assert_eq!(format!("{:?}", a), "Atomic(0)");
671        assert_eq!(a.load(SeqCst), 0);
672        a.store(1, SeqCst);
673        assert_eq!(a.swap(2, SeqCst), 1);
674        assert_eq!(a.compare_exchange(5, 45, SeqCst, SeqCst), Err(2));
675        assert_eq!(a.compare_exchange(2, 3, SeqCst, SeqCst), Ok(2));
676        assert_eq!(a.fetch_add(123, SeqCst), 3);
677        assert_eq!(a.fetch_sub(56, SeqCst), 126);
678        assert_eq!(a.fetch_and(7, SeqCst), 70);
679        assert_eq!(a.fetch_or(64, SeqCst), 6);
680        assert_eq!(a.fetch_xor(1, SeqCst), 70);
681        assert_eq!(a.fetch_min(30, SeqCst), 71);
682        assert_eq!(a.fetch_max(25, SeqCst), 30);
683        assert_eq!(a.load(SeqCst), 30);
684    }
685
686    #[test]
687    fn atomic_foo() {
688        let a = Atomic::default();
689        assert_eq!(Atomic::<Foo>::is_lock_free(), false);
690        assert_eq!(format!("{:?}", a), "Atomic(Foo(0, 0))");
691        assert_eq!(a.load(SeqCst), Foo(0, 0));
692        a.store(Foo(1, 1), SeqCst);
693        assert_eq!(a.swap(Foo(2, 2), SeqCst), Foo(1, 1));
694        assert_eq!(
695            a.compare_exchange(Foo(5, 5), Foo(45, 45), SeqCst, SeqCst),
696            Err(Foo(2, 2))
697        );
698        assert_eq!(
699            a.compare_exchange(Foo(2, 2), Foo(3, 3), SeqCst, SeqCst),
700            Ok(Foo(2, 2))
701        );
702        assert_eq!(a.load(SeqCst), Foo(3, 3));
703    }
704
705    #[test]
706    fn atomic_bar() {
707        let a = Atomic::default();
708        assert_eq!(Atomic::<Bar>::is_lock_free(), false);
709        assert_eq!(format!("{:?}", a), "Atomic(Bar(0, 0))");
710        assert_eq!(a.load(SeqCst), Bar(0, 0));
711        a.store(Bar(1, 1), SeqCst);
712        assert_eq!(a.swap(Bar(2, 2), SeqCst), Bar(1, 1));
713        assert_eq!(
714            a.compare_exchange(Bar(5, 5), Bar(45, 45), SeqCst, SeqCst),
715            Err(Bar(2, 2))
716        );
717        assert_eq!(
718            a.compare_exchange(Bar(2, 2), Bar(3, 3), SeqCst, SeqCst),
719            Ok(Bar(2, 2))
720        );
721        assert_eq!(a.load(SeqCst), Bar(3, 3));
722    }
723
724    #[test]
725    fn atomic_quxx() {
726        let a = Atomic::default();
727        assert_eq!(
728            Atomic::<Quux>::is_lock_free(),
729            cfg!(target_has_atomic = "32")
730        );
731        assert_eq!(format!("{:?}", a), "Atomic(Quux(0))");
732        assert_eq!(a.load(SeqCst), Quux(0));
733        a.store(Quux(1), SeqCst);
734        assert_eq!(a.swap(Quux(2), SeqCst), Quux(1));
735        assert_eq!(
736            a.compare_exchange(Quux(5), Quux(45), SeqCst, SeqCst),
737            Err(Quux(2))
738        );
739        assert_eq!(
740            a.compare_exchange(Quux(2), Quux(3), SeqCst, SeqCst),
741            Ok(Quux(2))
742        );
743        assert_eq!(a.load(SeqCst), Quux(3));
744    }
745}