vm_memory/
atomic_integer.rs

1// Copyright 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
2// SPDX-License-Identifier: Apache-2.0 OR BSD-3-Clause
3
4use std::sync::atomic::Ordering;
5
6/// # Safety
7///
8/// Objects that implement this trait must consist exclusively of atomic types
9/// from [`std::sync::atomic`](https://doc.rust-lang.org/std/sync/atomic/), except for
10/// [`AtomicPtr<T>`](https://doc.rust-lang.org/std/sync/atomic/struct.AtomicPtr.html) and
11/// [`AtomicBool`](https://doc.rust-lang.org/std/sync/atomic/struct.AtomicBool.html).
12pub unsafe trait AtomicInteger: Sync + Send {
13    /// The raw value type associated with the atomic integer (i.e. `u16` for `AtomicU16`).
14    type V;
15
16    /// Create a new instance of `Self`.
17    fn new(v: Self::V) -> Self;
18
19    /// Loads a value from the atomic integer.
20    fn load(&self, order: Ordering) -> Self::V;
21
22    /// Stores a value into the atomic integer.
23    fn store(&self, val: Self::V, order: Ordering);
24}
25
26macro_rules! impl_atomic_integer_ops {
27    ($T:path, $V:ty) => {
28        // SAFETY: This is safe as long as T is an Atomic type.
29        // This is a helper macro for generating the implementation for common
30        // Atomic types.
31        unsafe impl AtomicInteger for $T {
32            type V = $V;
33
34            fn new(v: Self::V) -> Self {
35                Self::new(v)
36            }
37
38            fn load(&self, order: Ordering) -> Self::V {
39                self.load(order)
40            }
41
42            fn store(&self, val: Self::V, order: Ordering) {
43                self.store(val, order)
44            }
45        }
46    };
47}
48
49// TODO: Detect availability using #[cfg(target_has_atomic) when it is stabilized.
50// Right now we essentially assume we're running on either x86 or Arm (32 or 64 bit). AFAIK,
51// Rust starts using additional synchronization primitives to implement atomics when they're
52// not natively available, and that doesn't interact safely with how we cast pointers to
53// atomic value references. We should be wary of this when looking at a broader range of
54// platforms.
55
56impl_atomic_integer_ops!(std::sync::atomic::AtomicI8, i8);
57impl_atomic_integer_ops!(std::sync::atomic::AtomicI16, i16);
58impl_atomic_integer_ops!(std::sync::atomic::AtomicI32, i32);
59#[cfg(any(
60    target_arch = "x86_64",
61    target_arch = "aarch64",
62    target_arch = "powerpc64",
63    target_arch = "s390x"
64))]
65impl_atomic_integer_ops!(std::sync::atomic::AtomicI64, i64);
66
67impl_atomic_integer_ops!(std::sync::atomic::AtomicU8, u8);
68impl_atomic_integer_ops!(std::sync::atomic::AtomicU16, u16);
69impl_atomic_integer_ops!(std::sync::atomic::AtomicU32, u32);
70#[cfg(any(
71    target_arch = "x86_64",
72    target_arch = "aarch64",
73    target_arch = "powerpc64",
74    target_arch = "s390x"
75))]
76impl_atomic_integer_ops!(std::sync::atomic::AtomicU64, u64);
77
78impl_atomic_integer_ops!(std::sync::atomic::AtomicIsize, isize);
79impl_atomic_integer_ops!(std::sync::atomic::AtomicUsize, usize);
80
81#[cfg(test)]
82mod tests {
83    use super::*;
84
85    use std::fmt::Debug;
86    use std::sync::atomic::AtomicU32;
87
88    fn check_atomic_integer_ops<A: AtomicInteger>()
89    where
90        A::V: Copy + Debug + From<u8> + PartialEq,
91    {
92        let v = A::V::from(0);
93        let a = A::new(v);
94        assert_eq!(a.load(Ordering::Relaxed), v);
95
96        let v2 = A::V::from(100);
97        a.store(v2, Ordering::Relaxed);
98        assert_eq!(a.load(Ordering::Relaxed), v2);
99    }
100
101    #[test]
102    fn test_atomic_integer_ops() {
103        check_atomic_integer_ops::<AtomicU32>()
104    }
105}