ostd/sync/spin.rs
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213
// SPDX-License-Identifier: MPL-2.0
use alloc::sync::Arc;
use core::{
cell::UnsafeCell,
fmt,
marker::PhantomData,
ops::{Deref, DerefMut},
sync::atomic::{AtomicBool, Ordering},
};
use super::{guard::SpinGuardian, LocalIrqDisabled, PreemptDisabled};
use crate::task::atomic_mode::AsAtomicModeGuard;
/// A spin lock.
///
/// # Guard behavior
///
/// The type `G' specifies the guard behavior of the spin lock. While holding the lock,
/// - if `G` is [`PreemptDisabled`], preemption is disabled;
/// - if `G` is [`LocalIrqDisabled`], local IRQs are disabled.
///
/// The `G` can also be provided by other crates other than ostd,
/// if it behaves similar like [`PreemptDisabled`] or [`LocalIrqDisabled`].
///
/// The guard behavior can be temporarily upgraded from [`PreemptDisabled`] to
/// [`LocalIrqDisabled`] using the [`disable_irq`] method.
///
/// [`disable_irq`]: Self::disable_irq
#[repr(transparent)]
pub struct SpinLock<T: ?Sized, G = PreemptDisabled> {
phantom: PhantomData<G>,
/// Only the last field of a struct may have a dynamically sized type.
/// That's why SpinLockInner is put in the last field.
inner: SpinLockInner<T>,
}
struct SpinLockInner<T: ?Sized> {
lock: AtomicBool,
val: UnsafeCell<T>,
}
impl<T, G> SpinLock<T, G> {
/// Creates a new spin lock.
pub const fn new(val: T) -> Self {
let lock_inner = SpinLockInner {
lock: AtomicBool::new(false),
val: UnsafeCell::new(val),
};
Self {
phantom: PhantomData,
inner: lock_inner,
}
}
}
impl<T: ?Sized> SpinLock<T, PreemptDisabled> {
/// Converts the guard behavior from disabling preemption to disabling IRQs.
pub fn disable_irq(&self) -> &SpinLock<T, LocalIrqDisabled> {
let ptr = self as *const SpinLock<T, PreemptDisabled>;
let ptr = ptr as *const SpinLock<T, LocalIrqDisabled>;
// SAFETY:
// 1. The types `SpinLock<T, PreemptDisabled>`, `SpinLockInner<T>` and `SpinLock<T,
// IrqDisabled>` have the same memory layout guaranteed by `#[repr(transparent)]`.
// 2. The specified memory location can be borrowed as an immutable reference for the
// specified lifetime.
unsafe { &*ptr }
}
}
impl<T: ?Sized, G: SpinGuardian> SpinLock<T, G> {
/// Acquires the spin lock.
pub fn lock(&self) -> SpinLockGuard<T, G> {
// Notice the guard must be created before acquiring the lock.
let inner_guard = G::guard();
self.acquire_lock();
SpinLockGuard_ {
lock: self,
guard: inner_guard,
}
}
/// Acquires the spin lock through an [`Arc`].
///
/// The method is similar to [`lock`], but it doesn't have the requirement
/// for compile-time checked lifetimes of the lock guard.
///
/// [`lock`]: Self::lock
pub fn lock_arc(self: &Arc<Self>) -> ArcSpinLockGuard<T, G> {
let inner_guard = G::guard();
self.acquire_lock();
SpinLockGuard_ {
lock: self.clone(),
guard: inner_guard,
}
}
/// Tries acquiring the spin lock immedidately.
pub fn try_lock(&self) -> Option<SpinLockGuard<T, G>> {
let inner_guard = G::guard();
if self.try_acquire_lock() {
let lock_guard = SpinLockGuard_ {
lock: self,
guard: inner_guard,
};
return Some(lock_guard);
}
None
}
/// Returns a mutable reference to the underlying data.
///
/// This method is zero-cost: By holding a mutable reference to the lock, the compiler has
/// already statically guaranteed that access to the data is exclusive.
pub fn get_mut(&mut self) -> &mut T {
self.inner.val.get_mut()
}
/// Acquires the spin lock, otherwise busy waiting
fn acquire_lock(&self) {
while !self.try_acquire_lock() {
core::hint::spin_loop();
}
}
fn try_acquire_lock(&self) -> bool {
self.inner
.lock
.compare_exchange(false, true, Ordering::Acquire, Ordering::Relaxed)
.is_ok()
}
fn release_lock(&self) {
self.inner.lock.store(false, Ordering::Release);
}
}
impl<T: ?Sized + fmt::Debug, G> fmt::Debug for SpinLock<T, G> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
fmt::Debug::fmt(&self.inner.val, f)
}
}
// SAFETY: Only a single lock holder is permitted to access the inner data of Spinlock.
unsafe impl<T: ?Sized + Send, G> Send for SpinLock<T, G> {}
unsafe impl<T: ?Sized + Send, G> Sync for SpinLock<T, G> {}
/// A guard that provides exclusive access to the data protected by a [`SpinLock`].
pub type SpinLockGuard<'a, T, G> = SpinLockGuard_<T, &'a SpinLock<T, G>, G>;
/// A guard that provides exclusive access to the data protected by a `Arc<SpinLock>`.
pub type ArcSpinLockGuard<T, G> = SpinLockGuard_<T, Arc<SpinLock<T, G>>, G>;
/// The guard of a spin lock.
#[clippy::has_significant_drop]
#[must_use]
pub struct SpinLockGuard_<T: ?Sized, R: Deref<Target = SpinLock<T, G>>, G: SpinGuardian> {
guard: G::Guard,
lock: R,
}
impl<T: ?Sized, R: Deref<Target = SpinLock<T, G>>, G: SpinGuardian> AsAtomicModeGuard
for SpinLockGuard_<T, R, G>
{
fn as_atomic_mode_guard(&self) -> &dyn crate::task::atomic_mode::InAtomicMode {
self.guard.as_atomic_mode_guard()
}
}
impl<T: ?Sized, R: Deref<Target = SpinLock<T, G>>, G: SpinGuardian> Deref
for SpinLockGuard_<T, R, G>
{
type Target = T;
fn deref(&self) -> &T {
unsafe { &*self.lock.inner.val.get() }
}
}
impl<T: ?Sized, R: Deref<Target = SpinLock<T, G>>, G: SpinGuardian> DerefMut
for SpinLockGuard_<T, R, G>
{
fn deref_mut(&mut self) -> &mut Self::Target {
unsafe { &mut *self.lock.inner.val.get() }
}
}
impl<T: ?Sized, R: Deref<Target = SpinLock<T, G>>, G: SpinGuardian> Drop
for SpinLockGuard_<T, R, G>
{
fn drop(&mut self) {
self.lock.release_lock();
}
}
impl<T: ?Sized + fmt::Debug, R: Deref<Target = SpinLock<T, G>>, G: SpinGuardian> fmt::Debug
for SpinLockGuard_<T, R, G>
{
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
fmt::Debug::fmt(&**self, f)
}
}
impl<T: ?Sized, R: Deref<Target = SpinLock<T, G>>, G: SpinGuardian> !Send
for SpinLockGuard_<T, R, G>
{
}
// SAFETY: `SpinLockGuard_` can be shared between tasks/threads in same CPU.
// As `lock()` is only called when there are no race conditions caused by interrupts.
unsafe impl<T: ?Sized + Sync, R: Deref<Target = SpinLock<T, G>> + Sync, G: SpinGuardian> Sync
for SpinLockGuard_<T, R, G>
{
}