atomic/
ops.rs

1// Copyright 2016 Amanieu d'Antras
2//
3// Licensed under the Apache License, Version 2.0, <LICENSE-APACHE or
4// http://apache.org/licenses/LICENSE-2.0> or the MIT license <LICENSE-MIT or
5// http://opensource.org/licenses/MIT>, at your option. This file may not be
6// copied, modified, or distributed except according to those terms.
7
8#[cfg(feature = "fallback")]
9use crate::fallback;
10use core::cmp;
11use core::mem;
12use core::num::Wrapping;
13use core::ops;
14use core::sync::atomic::Ordering;
15
16macro_rules! match_atomic {
17    ($type:ident, $atomic:ident, $impl:expr, $fallback_impl:expr) => {
18        match mem::size_of::<$type>() {
19            #[cfg(target_has_atomic = "8")]
20            1 if mem::align_of::<$type>() >= 1 => {
21                type $atomic = core::sync::atomic::AtomicU8;
22
23                $impl
24            }
25            #[cfg(target_has_atomic = "16")]
26            2 if mem::align_of::<$type>() >= 2 => {
27                type $atomic = core::sync::atomic::AtomicU16;
28
29                $impl
30            }
31            #[cfg(target_has_atomic = "32")]
32            4 if mem::align_of::<$type>() >= 4 => {
33                type $atomic = core::sync::atomic::AtomicU32;
34
35                $impl
36            }
37            #[cfg(target_has_atomic = "64")]
38            8 if mem::align_of::<$type>() >= 8 => {
39                type $atomic = core::sync::atomic::AtomicU64;
40
41                $impl
42            }
43            #[cfg(all(feature = "nightly", target_has_atomic = "128"))]
44            16 if mem::align_of::<$type>() >= 16 => {
45                type $atomic = core::sync::atomic::AtomicU128;
46
47                $impl
48            }
49            #[cfg(feature = "fallback")]
50            _ => $fallback_impl,
51            #[cfg(not(feature = "fallback"))]
52            _ => panic!("Atomic operations for type `{}` are not available as the `fallback` feature of the `atomic` crate is disabled.", core::any::type_name::<$type>()),
53        }
54    };
55}
56
57macro_rules! match_signed_atomic {
58    ($type:ident, $atomic:ident, $impl:expr, $fallback_impl:expr) => {
59        match mem::size_of::<$type>() {
60            #[cfg(target_has_atomic = "8")]
61            1 if mem::align_of::<$type>() >= 1 => {
62                type $atomic = core::sync::atomic::AtomicI8;
63
64                $impl
65            }
66            #[cfg(target_has_atomic = "16")]
67            2 if mem::align_of::<$type>() >= 2 => {
68                type $atomic = core::sync::atomic::AtomicI16;
69
70                $impl
71            }
72            #[cfg(target_has_atomic = "32")]
73            4 if mem::align_of::<$type>() >= 4 => {
74                type $atomic = core::sync::atomic::AtomicI32;
75
76                $impl
77            }
78            #[cfg(target_has_atomic = "64")]
79            8 if mem::align_of::<$type>() >= 8 => {
80                type $atomic = core::sync::atomic::AtomicI64;
81
82                $impl
83            }
84            #[cfg(all(feature = "nightly", target_has_atomic = "128"))]
85            16 if mem::align_of::<$type>() >= 16 => {
86                type $atomic = core::sync::atomic::AtomicI128;
87
88                $impl
89            }
90            #[cfg(feature = "fallback")]
91            _ => $fallback_impl,
92            #[cfg(not(feature = "fallback"))]
93            _ => panic!("Atomic operations for type `{}` are not available as the `fallback` feature of the `atomic` crate is disabled.", core::any::type_name::<$type>()),
94        }
95    };
96}
97
98#[inline]
99pub const fn atomic_is_lock_free<T>() -> bool {
100    let size = mem::size_of::<T>();
101    let align = mem::align_of::<T>();
102
103    (cfg!(target_has_atomic = "8") & (size == 1) & (align >= 1))
104        | (cfg!(target_has_atomic = "16") & (size == 2) & (align >= 2))
105        | (cfg!(target_has_atomic = "32") & (size == 4) & (align >= 4))
106        | (cfg!(target_has_atomic = "64") & (size == 8) & (align >= 8))
107        | (cfg!(feature = "nightly")
108            & cfg!(target_has_atomic = "128")
109            & (size == 16)
110            & (align >= 16))
111}
112
113#[inline]
114pub unsafe fn atomic_load<T>(dst: *mut T, order: Ordering) -> T {
115    match_atomic!(
116        T,
117        A,
118        mem::transmute_copy(&(*(dst as *const A)).load(order)),
119        fallback::atomic_load(dst)
120    )
121}
122
123#[inline]
124pub unsafe fn atomic_store<T>(dst: *mut T, val: T, order: Ordering) {
125    match_atomic!(
126        T,
127        A,
128        (*(dst as *const A)).store(mem::transmute_copy(&val), order),
129        fallback::atomic_store(dst, val)
130    )
131}
132
133#[inline]
134pub unsafe fn atomic_swap<T>(dst: *mut T, val: T, order: Ordering) -> T {
135    match_atomic!(
136        T,
137        A,
138        mem::transmute_copy(&(*(dst as *const A)).swap(mem::transmute_copy(&val), order)),
139        fallback::atomic_swap(dst, val)
140    )
141}
142
143#[inline]
144unsafe fn map_result<T, U>(r: Result<T, T>) -> Result<U, U> {
145    match r {
146        Ok(x) => Ok(mem::transmute_copy(&x)),
147        Err(x) => Err(mem::transmute_copy(&x)),
148    }
149}
150
151#[inline]
152pub unsafe fn atomic_compare_exchange<T>(
153    dst: *mut T,
154    current: T,
155    new: T,
156    success: Ordering,
157    failure: Ordering,
158) -> Result<T, T> {
159    match_atomic!(
160        T,
161        A,
162        map_result((*(dst as *const A)).compare_exchange(
163            mem::transmute_copy(&current),
164            mem::transmute_copy(&new),
165            success,
166            failure,
167        )),
168        fallback::atomic_compare_exchange(dst, current, new)
169    )
170}
171
172#[inline]
173pub unsafe fn atomic_compare_exchange_weak<T>(
174    dst: *mut T,
175    current: T,
176    new: T,
177    success: Ordering,
178    failure: Ordering,
179) -> Result<T, T> {
180    match_atomic!(
181        T,
182        A,
183        map_result((*(dst as *const A)).compare_exchange_weak(
184            mem::transmute_copy(&current),
185            mem::transmute_copy(&new),
186            success,
187            failure,
188        )),
189        fallback::atomic_compare_exchange(dst, current, new)
190    )
191}
192
193#[inline]
194pub unsafe fn atomic_add<T: Copy>(dst: *mut T, val: T, order: Ordering) -> T
195where
196    Wrapping<T>: ops::Add<Output = Wrapping<T>>,
197{
198    match_atomic!(
199        T,
200        A,
201        mem::transmute_copy(&(*(dst as *const A)).fetch_add(mem::transmute_copy(&val), order),),
202        fallback::atomic_add(dst, val)
203    )
204}
205
206#[inline]
207pub unsafe fn atomic_sub<T: Copy>(dst: *mut T, val: T, order: Ordering) -> T
208where
209    Wrapping<T>: ops::Sub<Output = Wrapping<T>>,
210{
211    match_atomic!(
212        T,
213        A,
214        mem::transmute_copy(&(*(dst as *const A)).fetch_sub(mem::transmute_copy(&val), order),),
215        fallback::atomic_sub(dst, val)
216    )
217}
218
219#[inline]
220pub unsafe fn atomic_and<T: Copy + ops::BitAnd<Output = T>>(
221    dst: *mut T,
222    val: T,
223    order: Ordering,
224) -> T {
225    match_atomic!(
226        T,
227        A,
228        mem::transmute_copy(&(*(dst as *const A)).fetch_and(mem::transmute_copy(&val), order),),
229        fallback::atomic_and(dst, val)
230    )
231}
232
233#[inline]
234pub unsafe fn atomic_or<T: Copy + ops::BitOr<Output = T>>(
235    dst: *mut T,
236    val: T,
237    order: Ordering,
238) -> T {
239    match_atomic!(
240        T,
241        A,
242        mem::transmute_copy(&(*(dst as *const A)).fetch_or(mem::transmute_copy(&val), order),),
243        fallback::atomic_or(dst, val)
244    )
245}
246
247#[inline]
248pub unsafe fn atomic_xor<T: Copy + ops::BitXor<Output = T>>(
249    dst: *mut T,
250    val: T,
251    order: Ordering,
252) -> T {
253    match_atomic!(
254        T,
255        A,
256        mem::transmute_copy(&(*(dst as *const A)).fetch_xor(mem::transmute_copy(&val), order),),
257        fallback::atomic_xor(dst, val)
258    )
259}
260
261#[inline]
262pub unsafe fn atomic_min<T: Copy + cmp::Ord>(dst: *mut T, val: T, order: Ordering) -> T {
263    match_signed_atomic!(
264        T,
265        A,
266        mem::transmute_copy(&(*(dst as *const A)).fetch_min(mem::transmute_copy(&val), order),),
267        fallback::atomic_min(dst, val)
268    )
269}
270
271#[inline]
272pub unsafe fn atomic_max<T: Copy + cmp::Ord>(dst: *mut T, val: T, order: Ordering) -> T {
273    match_signed_atomic!(
274        T,
275        A,
276        mem::transmute_copy(&(*(dst as *const A)).fetch_max(mem::transmute_copy(&val), order),),
277        fallback::atomic_max(dst, val)
278    )
279}
280
281#[inline]
282pub unsafe fn atomic_umin<T: Copy + cmp::Ord>(dst: *mut T, val: T, order: Ordering) -> T {
283    match_atomic!(
284        T,
285        A,
286        mem::transmute_copy(&(*(dst as *const A)).fetch_min(mem::transmute_copy(&val), order),),
287        fallback::atomic_min(dst, val)
288    )
289}
290
291#[inline]
292pub unsafe fn atomic_umax<T: Copy + cmp::Ord>(dst: *mut T, val: T, order: Ordering) -> T {
293    match_atomic!(
294        T,
295        A,
296        mem::transmute_copy(&(*(dst as *const A)).fetch_max(mem::transmute_copy(&val), order),),
297        fallback::atomic_max(dst, val)
298    )
299}