1#![cfg_attr(not(all(test, feature = "float")), allow(dead_code, unused_macros))]
4
5#[macro_use]
6#[path = "gen/utils.rs"]
7mod gen;
8
9use core::sync::atomic::Ordering;
10
11macro_rules! static_assert {
12 ($cond:expr $(,)?) => {{
13 let [] = [(); true as usize - $crate::utils::_assert_is_bool($cond) as usize];
14 }};
15}
16pub(crate) const fn _assert_is_bool(v: bool) -> bool {
17 v
18}
19
20macro_rules! static_assert_layout {
21 ($atomic_type:ty, $value_type:ty) => {
22 static_assert!(
23 core::mem::align_of::<$atomic_type>() == core::mem::size_of::<$atomic_type>()
24 );
25 static_assert!(core::mem::size_of::<$atomic_type>() == core::mem::size_of::<$value_type>());
26 };
27}
28
29macro_rules! doc_comment {
31 ($doc:expr, $($tt:tt)*) => {
32 #[doc = $doc]
33 $($tt)*
34 };
35}
36
37#[allow(unused_macros)]
46#[cfg(not(portable_atomic_no_outline_atomics))]
47#[cfg(any(
48 target_arch = "aarch64",
49 target_arch = "arm",
50 target_arch = "arm64ec",
51 target_arch = "powerpc64",
52 target_arch = "riscv32",
53 target_arch = "riscv64",
54 all(target_arch = "x86_64", not(any(target_env = "sgx", miri))),
55))]
56macro_rules! ifunc {
57 (unsafe fn($($arg_pat:ident: $arg_ty:ty),*) $(-> $ret_ty:ty)? { $($detect_body:tt)* }) => {{
58 type FnTy = unsafe fn($($arg_ty),*) $(-> $ret_ty)?;
59 static FUNC: core::sync::atomic::AtomicPtr<()>
60 = core::sync::atomic::AtomicPtr::new(detect as *mut ());
61 #[cold]
62 unsafe fn detect($($arg_pat: $arg_ty),*) $(-> $ret_ty)? {
63 let func: FnTy = { $($detect_body)* };
64 FUNC.store(func as *mut (), core::sync::atomic::Ordering::Relaxed);
65 unsafe { func($($arg_pat),*) }
67 }
68 let func = {
72 core::mem::transmute::<*mut (), FnTy>(FUNC.load(core::sync::atomic::Ordering::Relaxed))
73 };
74 func($($arg_pat),*)
78 }};
79}
80
81#[allow(unused_macros)]
82#[cfg(not(portable_atomic_no_outline_atomics))]
83#[cfg(any(
84 target_arch = "aarch64",
85 target_arch = "arm",
86 target_arch = "arm64ec",
87 target_arch = "powerpc64",
88 target_arch = "riscv32",
89 target_arch = "riscv64",
90 all(target_arch = "x86_64", not(any(target_env = "sgx", miri))),
91))]
92macro_rules! fn_alias {
93 (
94 $(#[$($fn_attr:tt)*])*
95 $vis:vis unsafe fn($($arg_pat:ident: $arg_ty:ty),*) $(-> $ret_ty:ty)?;
96 $(#[$($alias_attr:tt)*])*
97 $new:ident = $from:ident($($last_args:tt)*);
98 $($rest:tt)*
99 ) => {
100 $(#[$($fn_attr)*])*
101 $(#[$($alias_attr)*])*
102 $vis unsafe fn $new($($arg_pat: $arg_ty),*) $(-> $ret_ty)? {
103 unsafe { $from($($arg_pat,)* $($last_args)*) }
105 }
106 fn_alias! {
107 $(#[$($fn_attr)*])*
108 $vis unsafe fn($($arg_pat: $arg_ty),*) $(-> $ret_ty)?;
109 $($rest)*
110 }
111 };
112 (
113 $(#[$($attr:tt)*])*
114 $vis:vis unsafe fn($($arg_pat:ident: $arg_ty:ty),*) $(-> $ret_ty:ty)?;
115 ) => {}
116}
117
118macro_rules! const_fn {
120 (
121 const_if: #[cfg($($cfg:tt)+)];
122 $(#[$($attr:tt)*])*
123 $vis:vis const $($rest:tt)*
124 ) => {
125 #[cfg($($cfg)+)]
126 $(#[$($attr)*])*
127 $vis const $($rest)*
128 #[cfg(not($($cfg)+))]
129 $(#[$($attr)*])*
130 $vis $($rest)*
131 };
132}
133
134macro_rules! impl_debug_and_serde {
137 ($atomic_type:ident) => {
138 impl fmt::Debug for $atomic_type {
139 #[inline] fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
141 fmt::Debug::fmt(&self.load(Ordering::Relaxed), f)
143 }
144 }
145 #[cfg(feature = "serde")]
146 #[cfg_attr(docsrs, doc(cfg(feature = "serde")))]
147 impl serde::ser::Serialize for $atomic_type {
148 #[allow(clippy::missing_inline_in_public_items)] fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
150 where
151 S: serde::ser::Serializer,
152 {
153 self.load(Ordering::Relaxed).serialize(serializer)
155 }
156 }
157 #[cfg(feature = "serde")]
158 #[cfg_attr(docsrs, doc(cfg(feature = "serde")))]
159 impl<'de> serde::de::Deserialize<'de> for $atomic_type {
160 #[allow(clippy::missing_inline_in_public_items)] fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
162 where
163 D: serde::de::Deserializer<'de>,
164 {
165 serde::de::Deserialize::deserialize(deserializer).map(Self::new)
166 }
167 }
168 };
169}
170
171macro_rules! impl_default_no_fetch_ops {
174 ($atomic_type:ident, bool) => {
175 impl $atomic_type {
176 #[inline]
177 #[cfg_attr(miri, track_caller)] pub(crate) fn and(&self, val: bool, order: Ordering) {
179 self.fetch_and(val, order);
180 }
181 #[inline]
182 #[cfg_attr(miri, track_caller)] pub(crate) fn or(&self, val: bool, order: Ordering) {
184 self.fetch_or(val, order);
185 }
186 #[inline]
187 #[cfg_attr(miri, track_caller)] pub(crate) fn xor(&self, val: bool, order: Ordering) {
189 self.fetch_xor(val, order);
190 }
191 }
192 };
193 ($atomic_type:ident, $int_type:ty) => {
194 impl $atomic_type {
195 #[inline]
196 #[cfg_attr(miri, track_caller)] pub(crate) fn add(&self, val: $int_type, order: Ordering) {
198 self.fetch_add(val, order);
199 }
200 #[inline]
201 #[cfg_attr(miri, track_caller)] pub(crate) fn sub(&self, val: $int_type, order: Ordering) {
203 self.fetch_sub(val, order);
204 }
205 #[inline]
206 #[cfg_attr(miri, track_caller)] pub(crate) fn and(&self, val: $int_type, order: Ordering) {
208 self.fetch_and(val, order);
209 }
210 #[inline]
211 #[cfg_attr(miri, track_caller)] pub(crate) fn or(&self, val: $int_type, order: Ordering) {
213 self.fetch_or(val, order);
214 }
215 #[inline]
216 #[cfg_attr(miri, track_caller)] pub(crate) fn xor(&self, val: $int_type, order: Ordering) {
218 self.fetch_xor(val, order);
219 }
220 }
221 };
222}
223macro_rules! impl_default_bit_opts {
224 ($atomic_type:ident, $int_type:ty) => {
225 impl $atomic_type {
226 #[inline]
227 #[cfg_attr(miri, track_caller)] pub(crate) fn bit_set(&self, bit: u32, order: Ordering) -> bool {
229 let mask = <$int_type>::wrapping_shl(1, bit);
230 self.fetch_or(mask, order) & mask != 0
231 }
232 #[inline]
233 #[cfg_attr(miri, track_caller)] pub(crate) fn bit_clear(&self, bit: u32, order: Ordering) -> bool {
235 let mask = <$int_type>::wrapping_shl(1, bit);
236 self.fetch_and(!mask, order) & mask != 0
237 }
238 #[inline]
239 #[cfg_attr(miri, track_caller)] pub(crate) fn bit_toggle(&self, bit: u32, order: Ordering) -> bool {
241 let mask = <$int_type>::wrapping_shl(1, bit);
242 self.fetch_xor(mask, order) & mask != 0
243 }
244 }
245 };
246}
247
248macro_rules! items {
250 ($($tt:tt)*) => {
251 $($tt)*
252 };
253}
254
255#[allow(dead_code)]
256#[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
257#[inline(always)]
260#[cfg_attr(all(debug_assertions, not(portable_atomic_no_track_caller)), track_caller)]
261pub(crate) unsafe fn assert_unchecked(cond: bool) {
262 if !cond {
263 if cfg!(debug_assertions) {
264 unreachable!()
265 } else {
266 unsafe { core::hint::unreachable_unchecked() }
268 }
269 }
270}
271
272#[inline]
274#[cfg_attr(all(debug_assertions, not(portable_atomic_no_track_caller)), track_caller)]
275pub(crate) fn assert_load_ordering(order: Ordering) {
276 match order {
277 Ordering::Acquire | Ordering::Relaxed | Ordering::SeqCst => {}
278 Ordering::Release => panic!("there is no such thing as a release load"),
279 Ordering::AcqRel => panic!("there is no such thing as an acquire-release load"),
280 _ => unreachable!(),
281 }
282}
283
284#[inline]
286#[cfg_attr(all(debug_assertions, not(portable_atomic_no_track_caller)), track_caller)]
287pub(crate) fn assert_store_ordering(order: Ordering) {
288 match order {
289 Ordering::Release | Ordering::Relaxed | Ordering::SeqCst => {}
290 Ordering::Acquire => panic!("there is no such thing as an acquire store"),
291 Ordering::AcqRel => panic!("there is no such thing as an acquire-release store"),
292 _ => unreachable!(),
293 }
294}
295
296#[inline]
298#[cfg_attr(all(debug_assertions, not(portable_atomic_no_track_caller)), track_caller)]
299pub(crate) fn assert_compare_exchange_ordering(success: Ordering, failure: Ordering) {
300 match success {
301 Ordering::AcqRel
302 | Ordering::Acquire
303 | Ordering::Relaxed
304 | Ordering::Release
305 | Ordering::SeqCst => {}
306 _ => unreachable!(),
307 }
308 match failure {
309 Ordering::Acquire | Ordering::Relaxed | Ordering::SeqCst => {}
310 Ordering::Release => panic!("there is no such thing as a release failure ordering"),
311 Ordering::AcqRel => panic!("there is no such thing as an acquire-release failure ordering"),
312 _ => unreachable!(),
313 }
314}
315
316#[allow(dead_code)]
319#[inline]
320pub(crate) fn upgrade_success_ordering(success: Ordering, failure: Ordering) -> Ordering {
321 match (success, failure) {
322 (Ordering::Relaxed, Ordering::Acquire) => Ordering::Acquire,
323 (Ordering::Release, Ordering::Acquire) => Ordering::AcqRel,
324 (_, Ordering::SeqCst) => Ordering::SeqCst,
325 _ => success,
326 }
327}
328
329#[cfg(not(portable_atomic_no_asm_maybe_uninit))]
333#[cfg(target_pointer_width = "32")]
334#[allow(dead_code)]
335#[inline]
336pub(crate) fn zero_extend64_ptr(v: *mut ()) -> core::mem::MaybeUninit<u64> {
337 #[repr(C)]
338 struct ZeroExtended {
339 #[cfg(target_endian = "big")]
340 pad: *mut (),
341 v: *mut (),
342 #[cfg(target_endian = "little")]
343 pad: *mut (),
344 }
345 unsafe { core::mem::transmute(ZeroExtended { v, pad: core::ptr::null_mut() }) }
347}
348
349#[allow(dead_code)]
350#[cfg(any(
351 target_arch = "aarch64",
352 target_arch = "arm64ec",
353 target_arch = "powerpc64",
354 target_arch = "riscv64",
355 target_arch = "s390x",
356 target_arch = "x86_64",
357))]
358#[derive(Clone, Copy)]
363#[repr(C)]
364pub(crate) union U128 {
365 pub(crate) whole: u128,
366 pub(crate) pair: Pair<u64>,
367}
368#[allow(dead_code)]
369#[cfg(any(target_arch = "arm", target_arch = "riscv32"))]
370#[derive(Clone, Copy)]
375#[repr(C)]
376pub(crate) union U64 {
377 pub(crate) whole: u64,
378 pub(crate) pair: Pair<u32>,
379}
380#[allow(dead_code)]
381#[derive(Clone, Copy)]
382#[repr(C)]
383pub(crate) struct Pair<T: Copy> {
384 #[cfg(any(
386 target_endian = "little",
387 target_arch = "aarch64",
388 target_arch = "arm",
389 target_arch = "arm64ec",
390 ))]
391 pub(crate) lo: T,
392 pub(crate) hi: T,
393 #[cfg(not(any(
395 target_endian = "little",
396 target_arch = "aarch64",
397 target_arch = "arm",
398 target_arch = "arm64ec",
399 )))]
400 pub(crate) lo: T,
401}
402
403#[cfg(any(target_arch = "riscv32", target_arch = "riscv64"))]
404type MinWord = u32;
405#[cfg(any(target_arch = "riscv32", target_arch = "riscv64"))]
406type RetInt = u32;
407#[cfg(any(target_arch = "riscv32", target_arch = "riscv64"))]
413#[allow(dead_code)]
414#[inline]
415pub(crate) fn create_sub_word_mask_values<T>(ptr: *mut T) -> (*mut MinWord, RetInt, RetInt) {
416 use core::mem;
417 const SHIFT_MASK: bool = !cfg!(any(
423 target_arch = "loongarch64",
424 target_arch = "mips",
425 target_arch = "mips32r6",
426 target_arch = "mips64",
427 target_arch = "mips64r6",
428 target_arch = "riscv32",
429 target_arch = "riscv64",
430 target_arch = "s390x",
431 target_arch = "sparc",
432 target_arch = "sparc64",
433 target_arch = "xtensa",
434 ));
435 let ptr_mask = mem::size_of::<MinWord>() - 1;
436 let aligned_ptr = strict::with_addr(ptr, ptr as usize & !ptr_mask) as *mut MinWord;
437 let ptr_lsb = if SHIFT_MASK {
438 ptr as usize & ptr_mask
439 } else {
440 ptr as usize
442 };
443 let shift = if cfg!(any(target_endian = "little", target_arch = "s390x")) {
444 ptr_lsb.wrapping_mul(8)
445 } else {
446 (ptr_lsb ^ (mem::size_of::<MinWord>() - mem::size_of::<T>())).wrapping_mul(8)
447 };
448 let mut mask: RetInt = (1 << (mem::size_of::<T>() * 8)) - 1; if SHIFT_MASK {
450 mask <<= shift;
451 }
452 (aligned_ptr, shift as RetInt, mask)
453}
454
455#[cfg(any(miri, target_arch = "riscv32", target_arch = "riscv64"))]
457#[allow(dead_code)]
458pub(crate) mod strict {
459 #[inline]
460 #[must_use]
461 pub(crate) fn with_addr<T>(ptr: *mut T, addr: usize) -> *mut T {
462 let offset = addr.wrapping_sub(ptr as usize);
466 (ptr as *mut u8).wrapping_add(offset) as *mut T
467 }
468
469 #[cfg(miri)]
470 #[inline]
471 #[must_use]
472 pub(crate) fn map_addr<T>(ptr: *mut T, f: impl FnOnce(usize) -> usize) -> *mut T {
473 with_addr(ptr, f(ptr as usize))
474 }
475}