atomic/
fallback.rs

1// Copyright 2016 Amanieu d'Antras
2//
3// Licensed under the Apache License, Version 2.0, <LICENSE-APACHE or
4// http://apache.org/licenses/LICENSE-2.0> or the MIT license <LICENSE-MIT or
5// http://opensource.org/licenses/MIT>, at your option. This file may not be
6// copied, modified, or distributed except according to those terms.
7
8use core::cmp;
9use core::hint;
10use core::mem;
11use core::num::Wrapping;
12use core::ops;
13use core::ptr;
14use core::slice;
15use core::sync::atomic::{AtomicUsize, Ordering};
16
17// We use an AtomicUsize instead of an AtomicBool because it performs better
18// on architectures that don't have byte-sized atomics.
19//
20// We give each spinlock its own cache line to avoid false sharing.
21#[repr(align(64))]
22struct SpinLock(AtomicUsize);
23
24impl SpinLock {
25    fn lock(&self) {
26        while self
27            .0
28            .compare_exchange_weak(0, 1, Ordering::Acquire, Ordering::Relaxed)
29            .is_err()
30        {
31            while self.0.load(Ordering::Relaxed) != 0 {
32                hint::spin_loop();
33            }
34        }
35    }
36
37    fn unlock(&self) {
38        self.0.store(0, Ordering::Release);
39    }
40}
41
42// A big array of spinlocks which we use to guard atomic accesses. A spinlock is
43// chosen based on a hash of the address of the atomic object, which helps to
44// reduce contention compared to a single global lock.
45macro_rules! array {
46    (@accum (0, $($_es:expr),*) -> ($($body:tt)*))
47        => {array!(@as_expr [$($body)*])};
48    (@accum (1, $($es:expr),*) -> ($($body:tt)*))
49        => {array!(@accum (0, $($es),*) -> ($($body)* $($es,)*))};
50    (@accum (2, $($es:expr),*) -> ($($body:tt)*))
51        => {array!(@accum (0, $($es),*) -> ($($body)* $($es,)* $($es,)*))};
52    (@accum (4, $($es:expr),*) -> ($($body:tt)*))
53        => {array!(@accum (2, $($es,)* $($es),*) -> ($($body)*))};
54    (@accum (8, $($es:expr),*) -> ($($body:tt)*))
55        => {array!(@accum (4, $($es,)* $($es),*) -> ($($body)*))};
56    (@accum (16, $($es:expr),*) -> ($($body:tt)*))
57        => {array!(@accum (8, $($es,)* $($es),*) -> ($($body)*))};
58    (@accum (32, $($es:expr),*) -> ($($body:tt)*))
59        => {array!(@accum (16, $($es,)* $($es),*) -> ($($body)*))};
60    (@accum (64, $($es:expr),*) -> ($($body:tt)*))
61        => {array!(@accum (32, $($es,)* $($es),*) -> ($($body)*))};
62
63    (@as_expr $e:expr) => {$e};
64
65    [$e:expr; $n:tt] => { array!(@accum ($n, $e) -> ()) };
66}
67static SPINLOCKS: [SpinLock; 64] = array![SpinLock(AtomicUsize::new(0)); 64];
68
69// Spinlock pointer hashing function from compiler-rt
70#[inline]
71fn lock_for_addr(addr: usize) -> &'static SpinLock {
72    // Disregard the lowest 4 bits.  We want all values that may be part of the
73    // same memory operation to hash to the same value and therefore use the same
74    // lock.
75    let mut hash = addr >> 4;
76    // Use the next bits as the basis for the hash
77    let low = hash & (SPINLOCKS.len() - 1);
78    // Now use the high(er) set of bits to perturb the hash, so that we don't
79    // get collisions from atomic fields in a single object
80    hash >>= 16;
81    hash ^= low;
82    // Return a pointer to the lock to use
83    &SPINLOCKS[hash & (SPINLOCKS.len() - 1)]
84}
85
86#[inline]
87fn lock(addr: usize) -> LockGuard {
88    let lock = lock_for_addr(addr);
89    lock.lock();
90    LockGuard(lock)
91}
92
93struct LockGuard(&'static SpinLock);
94impl Drop for LockGuard {
95    #[inline]
96    fn drop(&mut self) {
97        self.0.unlock();
98    }
99}
100
101#[inline]
102pub unsafe fn atomic_load<T>(dst: *mut T) -> T {
103    let _l = lock(dst as usize);
104    ptr::read(dst)
105}
106
107#[inline]
108pub unsafe fn atomic_store<T>(dst: *mut T, val: T) {
109    let _l = lock(dst as usize);
110    ptr::write(dst, val);
111}
112
113#[inline]
114pub unsafe fn atomic_swap<T>(dst: *mut T, val: T) -> T {
115    let _l = lock(dst as usize);
116    ptr::replace(dst, val)
117}
118
119#[inline]
120pub unsafe fn atomic_compare_exchange<T>(dst: *mut T, current: T, new: T) -> Result<T, T> {
121    let _l = lock(dst as usize);
122    let result = ptr::read(dst);
123    // compare_exchange compares with memcmp instead of Eq
124    let a = slice::from_raw_parts(&result as *const _ as *const u8, mem::size_of_val(&result));
125    let b = slice::from_raw_parts(
126        &current as *const _ as *const u8,
127        mem::size_of_val(&current),
128    );
129    if a == b {
130        ptr::write(dst, new);
131        Ok(result)
132    } else {
133        Err(result)
134    }
135}
136
137#[inline]
138pub unsafe fn atomic_add<T: Copy>(dst: *mut T, val: T) -> T
139where
140    Wrapping<T>: ops::Add<Output = Wrapping<T>>,
141{
142    let _l = lock(dst as usize);
143    let result = ptr::read(dst);
144    ptr::write(dst, (Wrapping(result) + Wrapping(val)).0);
145    result
146}
147
148#[inline]
149pub unsafe fn atomic_sub<T: Copy>(dst: *mut T, val: T) -> T
150where
151    Wrapping<T>: ops::Sub<Output = Wrapping<T>>,
152{
153    let _l = lock(dst as usize);
154    let result = ptr::read(dst);
155    ptr::write(dst, (Wrapping(result) - Wrapping(val)).0);
156    result
157}
158
159#[inline]
160pub unsafe fn atomic_and<T: Copy + ops::BitAnd<Output = T>>(dst: *mut T, val: T) -> T {
161    let _l = lock(dst as usize);
162    let result = ptr::read(dst);
163    ptr::write(dst, result & val);
164    result
165}
166
167#[inline]
168pub unsafe fn atomic_or<T: Copy + ops::BitOr<Output = T>>(dst: *mut T, val: T) -> T {
169    let _l = lock(dst as usize);
170    let result = ptr::read(dst);
171    ptr::write(dst, result | val);
172    result
173}
174
175#[inline]
176pub unsafe fn atomic_xor<T: Copy + ops::BitXor<Output = T>>(dst: *mut T, val: T) -> T {
177    let _l = lock(dst as usize);
178    let result = ptr::read(dst);
179    ptr::write(dst, result ^ val);
180    result
181}
182
183#[inline]
184pub unsafe fn atomic_min<T: Copy + cmp::Ord>(dst: *mut T, val: T) -> T {
185    let _l = lock(dst as usize);
186    let result = ptr::read(dst);
187    ptr::write(dst, cmp::min(result, val));
188    result
189}
190
191#[inline]
192pub unsafe fn atomic_max<T: Copy + cmp::Ord>(dst: *mut T, val: T) -> T {
193    let _l = lock(dst as usize);
194    let result = ptr::read(dst);
195    ptr::write(dst, cmp::max(result, val));
196    result
197}