#![allow(clippy::unit_arg)]
use crate::primitive::sync::atomic::{self, AtomicBool};
use core::cell::UnsafeCell;
use core::cmp;
use core::fmt;
use core::mem::{self, ManuallyDrop, MaybeUninit};
use core::sync::atomic::Ordering;
use core::ptr;
#[cfg(feature = "std")]
use std::panic::{RefUnwindSafe, UnwindSafe};
use super::seq_lock::SeqLock;
#[repr(transparent)]
pub struct AtomicCell<T> {
value: UnsafeCell<MaybeUninit<T>>,
}
unsafe impl<T: Send> Send for AtomicCell<T> {}
unsafe impl<T: Send> Sync for AtomicCell<T> {}
#[cfg(feature = "std")]
impl<T> UnwindSafe for AtomicCell<T> {}
#[cfg(feature = "std")]
impl<T> RefUnwindSafe for AtomicCell<T> {}
impl<T> AtomicCell<T> {
pub const fn new(val: T) -> AtomicCell<T> {
AtomicCell {
value: UnsafeCell::new(MaybeUninit::new(val)),
}
}
pub fn into_inner(self) -> T {
let this = ManuallyDrop::new(self);
unsafe { this.as_ptr().read() }
}
pub const fn is_lock_free() -> bool {
atomic_is_lock_free::<T>()
}
pub fn store(&self, val: T) {
if mem::needs_drop::<T>() {
drop(self.swap(val));
} else {
unsafe {
atomic_store(self.as_ptr(), val);
}
}
}
pub fn swap(&self, val: T) -> T {
unsafe { atomic_swap(self.as_ptr(), val) }
}
#[inline]
pub fn as_ptr(&self) -> *mut T {
self.value.get().cast::<T>()
}
}
impl<T: Default> AtomicCell<T> {
pub fn take(&self) -> T {
self.swap(Default::default())
}
}
impl<T: Copy> AtomicCell<T> {
pub fn load(&self) -> T {
unsafe { atomic_load(self.as_ptr()) }
}
}
impl<T: Copy + Eq> AtomicCell<T> {
#[deprecated(note = "Use `compare_exchange` instead")]
pub fn compare_and_swap(&self, current: T, new: T) -> T {
match self.compare_exchange(current, new) {
Ok(v) => v,
Err(v) => v,
}
}
pub fn compare_exchange(&self, current: T, new: T) -> Result<T, T> {
unsafe { atomic_compare_exchange_weak(self.as_ptr(), current, new) }
}
#[inline]
pub fn fetch_update<F>(&self, mut f: F) -> Result<T, T>
where
F: FnMut(T) -> Option<T>,
{
let mut prev = self.load();
while let Some(next) = f(prev) {
match self.compare_exchange(prev, next) {
x @ Ok(_) => return x,
Err(next_prev) => prev = next_prev,
}
}
Err(prev)
}
}
impl<T> Drop for AtomicCell<T> {
fn drop(&mut self) {
if mem::needs_drop::<T>() {
unsafe {
self.as_ptr().drop_in_place();
}
}
}
}
macro_rules! impl_arithmetic {
($t:ty, fallback, $example:tt) => {
impl AtomicCell<$t> {
#[doc = $example]
#[inline]
pub fn fetch_add(&self, val: $t) -> $t {
let _guard = lock(self.as_ptr() as usize).write();
let value = unsafe { &mut *(self.as_ptr()) };
let old = *value;
*value = value.wrapping_add(val);
old
}
#[doc = $example]
#[inline]
pub fn fetch_sub(&self, val: $t) -> $t {
let _guard = lock(self.as_ptr() as usize).write();
let value = unsafe { &mut *(self.as_ptr()) };
let old = *value;
*value = value.wrapping_sub(val);
old
}
#[doc = $example]
#[inline]
pub fn fetch_and(&self, val: $t) -> $t {
let _guard = lock(self.as_ptr() as usize).write();
let value = unsafe { &mut *(self.as_ptr()) };
let old = *value;
*value &= val;
old
}
#[doc = $example]
#[inline]
pub fn fetch_nand(&self, val: $t) -> $t {
let _guard = lock(self.as_ptr() as usize).write();
let value = unsafe { &mut *(self.as_ptr()) };
let old = *value;
*value = !(old & val);
old
}
#[doc = $example]
#[inline]
pub fn fetch_or(&self, val: $t) -> $t {
let _guard = lock(self.as_ptr() as usize).write();
let value = unsafe { &mut *(self.as_ptr()) };
let old = *value;
*value |= val;
old
}
#[doc = $example]
#[inline]
pub fn fetch_xor(&self, val: $t) -> $t {
let _guard = lock(self.as_ptr() as usize).write();
let value = unsafe { &mut *(self.as_ptr()) };
let old = *value;
*value ^= val;
old
}
#[doc = $example]
#[inline]
pub fn fetch_max(&self, val: $t) -> $t {
let _guard = lock(self.as_ptr() as usize).write();
let value = unsafe { &mut *(self.as_ptr()) };
let old = *value;
*value = cmp::max(old, val);
old
}
#[doc = $example]
#[inline]
pub fn fetch_min(&self, val: $t) -> $t {
let _guard = lock(self.as_ptr() as usize).write();
let value = unsafe { &mut *(self.as_ptr()) };
let old = *value;
*value = cmp::min(old, val);
old
}
}
};
($t:ty, $atomic:ty, $example:tt) => {
impl AtomicCell<$t> {
#[doc = $example]
#[inline]
pub fn fetch_add(&self, val: $t) -> $t {
if can_transmute::<$t, $atomic>() {
let a = unsafe { &*(self.as_ptr() as *const $atomic) };
a.fetch_add(val, Ordering::AcqRel)
} else {
let _guard = lock(self.as_ptr() as usize).write();
let value = unsafe { &mut *(self.as_ptr()) };
let old = *value;
*value = value.wrapping_add(val);
old
}
}
#[doc = $example]
#[inline]
pub fn fetch_sub(&self, val: $t) -> $t {
if can_transmute::<$t, $atomic>() {
let a = unsafe { &*(self.as_ptr() as *const $atomic) };
a.fetch_sub(val, Ordering::AcqRel)
} else {
let _guard = lock(self.as_ptr() as usize).write();
let value = unsafe { &mut *(self.as_ptr()) };
let old = *value;
*value = value.wrapping_sub(val);
old
}
}
#[doc = $example]
#[inline]
pub fn fetch_and(&self, val: $t) -> $t {
if can_transmute::<$t, $atomic>() {
let a = unsafe { &*(self.as_ptr() as *const $atomic) };
a.fetch_and(val, Ordering::AcqRel)
} else {
let _guard = lock(self.as_ptr() as usize).write();
let value = unsafe { &mut *(self.as_ptr()) };
let old = *value;
*value &= val;
old
}
}
#[doc = $example]
#[inline]
pub fn fetch_nand(&self, val: $t) -> $t {
if can_transmute::<$t, $atomic>() {
let a = unsafe { &*(self.as_ptr() as *const $atomic) };
a.fetch_nand(val, Ordering::AcqRel)
} else {
let _guard = lock(self.as_ptr() as usize).write();
let value = unsafe { &mut *(self.as_ptr()) };
let old = *value;
*value = !(old & val);
old
}
}
#[doc = $example]
#[inline]
pub fn fetch_or(&self, val: $t) -> $t {
if can_transmute::<$t, $atomic>() {
let a = unsafe { &*(self.as_ptr() as *const $atomic) };
a.fetch_or(val, Ordering::AcqRel)
} else {
let _guard = lock(self.as_ptr() as usize).write();
let value = unsafe { &mut *(self.as_ptr()) };
let old = *value;
*value |= val;
old
}
}
#[doc = $example]
#[inline]
pub fn fetch_xor(&self, val: $t) -> $t {
if can_transmute::<$t, $atomic>() {
let a = unsafe { &*(self.as_ptr() as *const $atomic) };
a.fetch_xor(val, Ordering::AcqRel)
} else {
let _guard = lock(self.as_ptr() as usize).write();
let value = unsafe { &mut *(self.as_ptr()) };
let old = *value;
*value ^= val;
old
}
}
#[doc = $example]
#[inline]
pub fn fetch_max(&self, val: $t) -> $t {
if can_transmute::<$t, $atomic>() {
self.fetch_update(|old| Some(cmp::max(old, val))).unwrap()
} else {
let _guard = lock(self.as_ptr() as usize).write();
let value = unsafe { &mut *(self.as_ptr()) };
let old = *value;
*value = cmp::max(old, val);
old
}
}
#[doc = $example]
#[inline]
pub fn fetch_min(&self, val: $t) -> $t {
if can_transmute::<$t, $atomic>() {
self.fetch_update(|old| Some(cmp::min(old, val))).unwrap()
} else {
let _guard = lock(self.as_ptr() as usize).write();
let value = unsafe { &mut *(self.as_ptr()) };
let old = *value;
*value = cmp::min(old, val);
old
}
}
}
};
}
impl_arithmetic!(u8, atomic::AtomicU8, "let a = AtomicCell::new(7u8);");
impl_arithmetic!(i8, atomic::AtomicI8, "let a = AtomicCell::new(7i8);");
impl_arithmetic!(u16, atomic::AtomicU16, "let a = AtomicCell::new(7u16);");
impl_arithmetic!(i16, atomic::AtomicI16, "let a = AtomicCell::new(7i16);");
impl_arithmetic!(u32, atomic::AtomicU32, "let a = AtomicCell::new(7u32);");
impl_arithmetic!(i32, atomic::AtomicI32, "let a = AtomicCell::new(7i32);");
#[cfg(not(crossbeam_no_atomic_64))]
impl_arithmetic!(u64, atomic::AtomicU64, "let a = AtomicCell::new(7u64);");
#[cfg(not(crossbeam_no_atomic_64))]
impl_arithmetic!(i64, atomic::AtomicI64, "let a = AtomicCell::new(7i64);");
#[cfg(crossbeam_no_atomic_64)]
impl_arithmetic!(u64, fallback, "let a = AtomicCell::new(7u64);");
#[cfg(crossbeam_no_atomic_64)]
impl_arithmetic!(i64, fallback, "let a = AtomicCell::new(7i64);");
impl_arithmetic!(u128, fallback, "let a = AtomicCell::new(7u128);");
impl_arithmetic!(i128, fallback, "let a = AtomicCell::new(7i128);");
impl_arithmetic!(
usize,
atomic::AtomicUsize,
"let a = AtomicCell::new(7usize);"
);
impl_arithmetic!(
isize,
atomic::AtomicIsize,
"let a = AtomicCell::new(7isize);"
);
impl AtomicCell<bool> {
#[inline]
pub fn fetch_and(&self, val: bool) -> bool {
let a = unsafe { &*(self.as_ptr() as *const AtomicBool) };
a.fetch_and(val, Ordering::AcqRel)
}
#[inline]
pub fn fetch_nand(&self, val: bool) -> bool {
let a = unsafe { &*(self.as_ptr() as *const AtomicBool) };
a.fetch_nand(val, Ordering::AcqRel)
}
#[inline]
pub fn fetch_or(&self, val: bool) -> bool {
let a = unsafe { &*(self.as_ptr() as *const AtomicBool) };
a.fetch_or(val, Ordering::AcqRel)
}
#[inline]
pub fn fetch_xor(&self, val: bool) -> bool {
let a = unsafe { &*(self.as_ptr() as *const AtomicBool) };
a.fetch_xor(val, Ordering::AcqRel)
}
}
impl<T: Default> Default for AtomicCell<T> {
fn default() -> AtomicCell<T> {
AtomicCell::new(T::default())
}
}
impl<T> From<T> for AtomicCell<T> {
#[inline]
fn from(val: T) -> AtomicCell<T> {
AtomicCell::new(val)
}
}
impl<T: Copy + fmt::Debug> fmt::Debug for AtomicCell<T> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("AtomicCell")
.field("value", &self.load())
.finish()
}
}
const fn can_transmute<A, B>() -> bool {
(mem::size_of::<A>() == mem::size_of::<B>()) & (mem::align_of::<A>() >= mem::align_of::<B>())
}
#[inline]
#[must_use]
fn lock(addr: usize) -> &'static SeqLock {
const LEN: usize = 97;
#[allow(clippy::declare_interior_mutable_const)]
const L: SeqLock = SeqLock::new();
static LOCKS: [SeqLock; LEN] = [L; LEN];
&LOCKS[addr % LEN]
}
struct AtomicUnit;
impl AtomicUnit {
#[inline]
fn load(&self, _order: Ordering) {}
#[inline]
fn store(&self, _val: (), _order: Ordering) {}
#[inline]
fn swap(&self, _val: (), _order: Ordering) {}
#[inline]
fn compare_exchange_weak(
&self,
_current: (),
_new: (),
_success: Ordering,
_failure: Ordering,
) -> Result<(), ()> {
Ok(())
}
}
macro_rules! atomic {
(@check, $t:ty, $atomic:ty, $a:ident, $atomic_op:expr) => {
if can_transmute::<$t, $atomic>() {
let $a: &$atomic;
break $atomic_op;
}
};
($t:ty, $a:ident, $atomic_op:expr, $fallback_op:expr) => {
loop {
atomic!(@check, $t, AtomicUnit, $a, $atomic_op);
atomic!(@check, $t, atomic::AtomicU8, $a, $atomic_op);
atomic!(@check, $t, atomic::AtomicU16, $a, $atomic_op);
atomic!(@check, $t, atomic::AtomicU32, $a, $atomic_op);
#[cfg(not(crossbeam_no_atomic_64))]
atomic!(@check, $t, atomic::AtomicU64, $a, $atomic_op);
break $fallback_op;
}
};
}
const fn atomic_is_lock_free<T>() -> bool {
let is_lock_free = can_transmute::<T, AtomicUnit>()
| can_transmute::<T, atomic::AtomicU8>()
| can_transmute::<T, atomic::AtomicU16>()
| can_transmute::<T, atomic::AtomicU32>();
#[cfg(not(crossbeam_no_atomic_64))]
let is_lock_free = is_lock_free | can_transmute::<T, atomic::AtomicU64>();
is_lock_free
}
unsafe fn atomic_load<T>(src: *mut T) -> T
where
T: Copy,
{
atomic! {
T, a,
{
a = &*(src as *const _ as *const _);
mem::transmute_copy(&a.load(Ordering::Acquire))
},
{
let lock = lock(src as usize);
if let Some(stamp) = lock.optimistic_read() {
let val = ptr::read_volatile(src.cast::<MaybeUninit<T>>());
if lock.validate_read(stamp) {
return val.assume_init();
}
}
let guard = lock.write();
let val = ptr::read(src);
guard.abort();
val
}
}
}
unsafe fn atomic_store<T>(dst: *mut T, val: T) {
atomic! {
T, a,
{
a = &*(dst as *const _ as *const _);
a.store(mem::transmute_copy(&val), Ordering::Release);
mem::forget(val);
},
{
let _guard = lock(dst as usize).write();
ptr::write(dst, val);
}
}
}
unsafe fn atomic_swap<T>(dst: *mut T, val: T) -> T {
atomic! {
T, a,
{
a = &*(dst as *const _ as *const _);
let res = mem::transmute_copy(&a.swap(mem::transmute_copy(&val), Ordering::AcqRel));
mem::forget(val);
res
},
{
let _guard = lock(dst as usize).write();
ptr::replace(dst, val)
}
}
}
#[allow(clippy::let_unit_value)]
unsafe fn atomic_compare_exchange_weak<T>(dst: *mut T, mut current: T, new: T) -> Result<T, T>
where
T: Copy + Eq,
{
atomic! {
T, a,
{
a = &*(dst as *const _ as *const _);
let mut current_raw = mem::transmute_copy(¤t);
let new_raw = mem::transmute_copy(&new);
loop {
match a.compare_exchange_weak(
current_raw,
new_raw,
Ordering::AcqRel,
Ordering::Acquire,
) {
Ok(_) => break Ok(current),
Err(previous_raw) => {
let previous = mem::transmute_copy(&previous_raw);
if !T::eq(&previous, ¤t) {
break Err(previous);
}
current = previous;
current_raw = previous_raw;
}
}
}
},
{
let guard = lock(dst as usize).write();
if T::eq(&*dst, ¤t) {
Ok(ptr::replace(dst, new))
} else {
let val = ptr::read(dst);
guard.abort();
Err(val)
}
}
}
}