diff options
Diffstat (limited to 'rust/kernel/sync')
| -rw-r--r-- | rust/kernel/sync/atomic.rs | 13 | ||||
| -rw-r--r-- | rust/kernel/sync/condvar.rs | 4 | ||||
| -rw-r--r-- | rust/kernel/sync/lock.rs | 43 | ||||
| -rw-r--r-- | rust/kernel/sync/lock/global.rs | 7 | ||||
| -rw-r--r-- | rust/kernel/sync/set_once.rs | 125 |
5 files changed, 182 insertions, 10 deletions
diff --git a/rust/kernel/sync/atomic.rs b/rust/kernel/sync/atomic.rs index 016a6bcaf080..4aebeacb961a 100644 --- a/rust/kernel/sync/atomic.rs +++ b/rust/kernel/sync/atomic.rs @@ -23,8 +23,10 @@ mod predefine; pub use internal::AtomicImpl; pub use ordering::{Acquire, Full, Relaxed, Release}; +pub(crate) use internal::{AtomicArithmeticOps, AtomicBasicOps, AtomicExchangeOps}; + use crate::build_error; -use internal::{AtomicArithmeticOps, AtomicBasicOps, AtomicExchangeOps, AtomicRepr}; +use internal::AtomicRepr; use ordering::OrderingType; /// A memory location which can be safely modified from multiple execution contexts. @@ -306,6 +308,15 @@ where } } +impl<T: AtomicType + core::fmt::Debug> core::fmt::Debug for Atomic<T> +where + T::Repr: AtomicBasicOps, +{ + fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { + core::fmt::Debug::fmt(&self.load(Relaxed), f) + } +} + impl<T: AtomicType> Atomic<T> where T::Repr: AtomicExchangeOps, diff --git a/rust/kernel/sync/condvar.rs b/rust/kernel/sync/condvar.rs index c6ec64295c9f..69d58dfbad7b 100644 --- a/rust/kernel/sync/condvar.rs +++ b/rust/kernel/sync/condvar.rs @@ -8,7 +8,7 @@ use super::{lock::Backend, lock::Guard, LockClassKey}; use crate::{ ffi::{c_int, c_long}, - str::CStr, + str::{CStr, CStrExt as _}, task::{ MAX_SCHEDULE_TIMEOUT, TASK_FREEZABLE, TASK_INTERRUPTIBLE, TASK_NORMAL, TASK_UNINTERRUPTIBLE, }, @@ -36,7 +36,7 @@ pub use new_condvar; /// spuriously. /// /// Instances of [`CondVar`] need a lock class and to be pinned. The recommended way to create such -/// instances is with the [`pin_init`](crate::pin_init!) and [`new_condvar`] macros. +/// instances is with the [`pin_init`](pin_init::pin_init!) and [`new_condvar`] macros. /// /// # Examples /// diff --git a/rust/kernel/sync/lock.rs b/rust/kernel/sync/lock.rs index 27202beef90c..46a57d1fc309 100644 --- a/rust/kernel/sync/lock.rs +++ b/rust/kernel/sync/lock.rs @@ -7,11 +7,11 @@ use super::LockClassKey; use crate::{ - str::CStr, + str::{CStr, CStrExt as _}, types::{NotThreadSafe, Opaque, ScopeGuard}, }; use core::{cell::UnsafeCell, marker::PhantomPinned, pin::Pin}; -use pin_init::{pin_data, pin_init, PinInit}; +use pin_init::{pin_data, pin_init, PinInit, Wrapper}; pub mod mutex; pub mod spinlock; @@ -115,6 +115,7 @@ pub struct Lock<T: ?Sized, B: Backend> { _pin: PhantomPinned, /// The data protected by the lock. + #[pin] pub(crate) data: UnsafeCell<T>, } @@ -127,9 +128,13 @@ unsafe impl<T: ?Sized + Send, B: Backend> Sync for Lock<T, B> {} impl<T, B: Backend> Lock<T, B> { /// Constructs a new lock initialiser. - pub fn new(t: T, name: &'static CStr, key: Pin<&'static LockClassKey>) -> impl PinInit<Self> { + pub fn new( + t: impl PinInit<T>, + name: &'static CStr, + key: Pin<&'static LockClassKey>, + ) -> impl PinInit<Self> { pin_init!(Self { - data: UnsafeCell::new(t), + data <- UnsafeCell::pin_init(t), _pin: PhantomPinned, // SAFETY: `slot` is valid while the closure is called and both `name` and `key` have // static lifetimes so they live indefinitely. @@ -240,6 +245,31 @@ impl<'a, T: ?Sized, B: Backend> Guard<'a, T, B> { cb() } + + /// Returns a pinned mutable reference to the protected data. + /// + /// The guard implements [`DerefMut`] when `T: Unpin`, so for [`Unpin`] + /// types [`DerefMut`] should be used instead of this function. + /// + /// [`DerefMut`]: core::ops::DerefMut + /// [`Unpin`]: core::marker::Unpin + /// + /// # Examples + /// + /// ``` + /// # use kernel::sync::{Mutex, MutexGuard}; + /// # use core::{pin::Pin, marker::PhantomPinned}; + /// struct Data(PhantomPinned); + /// + /// fn example(mutex: &Mutex<Data>) { + /// let mut data: MutexGuard<'_, Data> = mutex.lock(); + /// let mut data: Pin<&mut Data> = data.as_mut(); + /// } + /// ``` + pub fn as_mut(&mut self) -> Pin<&mut T> { + // SAFETY: `self.lock.data` is structurally pinned. + unsafe { Pin::new_unchecked(&mut *self.lock.data.get()) } + } } impl<T: ?Sized, B: Backend> core::ops::Deref for Guard<'_, T, B> { @@ -251,7 +281,10 @@ impl<T: ?Sized, B: Backend> core::ops::Deref for Guard<'_, T, B> { } } -impl<T: ?Sized, B: Backend> core::ops::DerefMut for Guard<'_, T, B> { +impl<T: ?Sized, B: Backend> core::ops::DerefMut for Guard<'_, T, B> +where + T: Unpin, +{ fn deref_mut(&mut self) -> &mut Self::Target { // SAFETY: The caller owns the lock, so it is safe to deref the protected data. unsafe { &mut *self.lock.data.get() } diff --git a/rust/kernel/sync/lock/global.rs b/rust/kernel/sync/lock/global.rs index d65f94b5caf2..eab48108a4ae 100644 --- a/rust/kernel/sync/lock/global.rs +++ b/rust/kernel/sync/lock/global.rs @@ -5,7 +5,7 @@ //! Support for defining statics containing locks. use crate::{ - str::CStr, + str::{CStr, CStrExt as _}, sync::lock::{Backend, Guard, Lock}, sync::{LockClassKey, LockedBy}, types::Opaque, @@ -106,7 +106,10 @@ impl<B: GlobalLockBackend> core::ops::Deref for GlobalGuard<B> { } } -impl<B: GlobalLockBackend> core::ops::DerefMut for GlobalGuard<B> { +impl<B: GlobalLockBackend> core::ops::DerefMut for GlobalGuard<B> +where + B::Item: Unpin, +{ fn deref_mut(&mut self) -> &mut Self::Target { &mut self.inner } diff --git a/rust/kernel/sync/set_once.rs b/rust/kernel/sync/set_once.rs new file mode 100644 index 000000000000..bdba601807d8 --- /dev/null +++ b/rust/kernel/sync/set_once.rs @@ -0,0 +1,125 @@ +// SPDX-License-Identifier: GPL-2.0 + +//! A container that can be initialized at most once. + +use super::atomic::{ + ordering::{Acquire, Relaxed, Release}, + Atomic, +}; +use core::{cell::UnsafeCell, mem::MaybeUninit}; + +/// A container that can be populated at most once. Thread safe. +/// +/// Once the a [`SetOnce`] is populated, it remains populated by the same object for the +/// lifetime `Self`. +/// +/// # Invariants +/// +/// - `init` may only increase in value. +/// - `init` may only assume values in the range `0..=2`. +/// - `init == 0` if and only if `value` is uninitialized. +/// - `init == 1` if and only if there is exactly one thread with exclusive +/// access to `self.value`. +/// - `init == 2` if and only if `value` is initialized and valid for shared +/// access. +/// +/// # Example +/// +/// ``` +/// # use kernel::sync::SetOnce; +/// let value = SetOnce::new(); +/// assert_eq!(None, value.as_ref()); +/// +/// let status = value.populate(42u8); +/// assert_eq!(true, status); +/// assert_eq!(Some(&42u8), value.as_ref()); +/// assert_eq!(Some(42u8), value.copy()); +/// +/// let status = value.populate(101u8); +/// assert_eq!(false, status); +/// assert_eq!(Some(&42u8), value.as_ref()); +/// assert_eq!(Some(42u8), value.copy()); +/// ``` +pub struct SetOnce<T> { + init: Atomic<u32>, + value: UnsafeCell<MaybeUninit<T>>, +} + +impl<T> Default for SetOnce<T> { + fn default() -> Self { + Self::new() + } +} + +impl<T> SetOnce<T> { + /// Create a new [`SetOnce`]. + /// + /// The returned instance will be empty. + pub const fn new() -> Self { + // INVARIANT: The container is empty and we initialize `init` to `0`. + Self { + value: UnsafeCell::new(MaybeUninit::uninit()), + init: Atomic::new(0), + } + } + + /// Get a reference to the contained object. + /// + /// Returns [`None`] if this [`SetOnce`] is empty. + pub fn as_ref(&self) -> Option<&T> { + if self.init.load(Acquire) == 2 { + // SAFETY: By the type invariants of `Self`, `self.init == 2` means that `self.value` + // is initialized and valid for shared access. + Some(unsafe { &*self.value.get().cast() }) + } else { + None + } + } + + /// Populate the [`SetOnce`]. + /// + /// Returns `true` if the [`SetOnce`] was successfully populated. + pub fn populate(&self, value: T) -> bool { + // INVARIANT: If the swap succeeds: + // - We increase `init`. + // - We write the valid value `1` to `init`. + // - Only one thread can succeed in this write, so we have exclusive access after the + // write. + if let Ok(0) = self.init.cmpxchg(0, 1, Relaxed) { + // SAFETY: By the type invariants of `Self`, the fact that we succeeded in writing `1` + // to `self.init` means we obtained exclusive access to `self.value`. + unsafe { core::ptr::write(self.value.get().cast(), value) }; + // INVARIANT: + // - We increase `init`. + // - We write the valid value `2` to `init`. + // - We release our exclusive access to `self.value` and it is now valid for shared + // access. + self.init.store(2, Release); + true + } else { + false + } + } + + /// Get a copy of the contained object. + /// + /// Returns [`None`] if the [`SetOnce`] is empty. + pub fn copy(&self) -> Option<T> + where + T: Copy, + { + self.as_ref().copied() + } +} + +impl<T> Drop for SetOnce<T> { + fn drop(&mut self) { + if *self.init.get_mut() == 2 { + let value = self.value.get_mut(); + // SAFETY: By the type invariants of `Self`, `self.init == 2` means that `self.value` + // contains a valid value. We have exclusive access, as we hold a `mut` reference to + // `self`. + unsafe { value.assume_init_drop() }; + } + } +} |
