summaryrefslogtreecommitdiff
path: root/rust/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'rust/kernel')
-rw-r--r--rust/kernel/.gitignore2
-rw-r--r--rust/kernel/acpi.rs67
-rw-r--r--rust/kernel/alloc.rs69
-rw-r--r--rust/kernel/alloc/allocator.rs193
-rw-r--r--rust/kernel/alloc/allocator/iter.rs102
-rw-r--r--rust/kernel/alloc/allocator_test.rs113
-rw-r--r--rust/kernel/alloc/kbox.rs226
-rw-r--r--rust/kernel/alloc/kvec.rs136
-rw-r--r--rust/kernel/alloc/kvec/errors.rs2
-rw-r--r--rust/kernel/alloc/layout.rs7
-rw-r--r--rust/kernel/auxiliary.rs57
-rw-r--r--rust/kernel/bitmap.rs600
-rw-r--r--rust/kernel/bits.rs203
-rw-r--r--rust/kernel/block.rs13
-rw-r--r--rust/kernel/block/mq.rs16
-rw-r--r--rust/kernel/block/mq/gen_disk.rs60
-rw-r--r--rust/kernel/block/mq/operations.rs74
-rw-r--r--rust/kernel/block/mq/raw_writer.rs55
-rw-r--r--rust/kernel/block/mq/request.rs105
-rw-r--r--rust/kernel/block/mq/tag_set.rs12
-rw-r--r--rust/kernel/bug.rs126
-rw-r--r--rust/kernel/clk.rs48
-rw-r--r--rust/kernel/configfs.rs36
-rw-r--r--rust/kernel/cpu.rs3
-rw-r--r--rust/kernel/cpufreq.rs96
-rw-r--r--rust/kernel/cpumask.rs22
-rw-r--r--rust/kernel/cred.rs12
-rw-r--r--rust/kernel/debugfs.rs594
-rw-r--r--rust/kernel/debugfs/callback_adapters.rs122
-rw-r--r--rust/kernel/debugfs/entry.rs164
-rw-r--r--rust/kernel/debugfs/file_ops.rs247
-rw-r--r--rust/kernel/debugfs/traits.rs102
-rw-r--r--rust/kernel/device.rs330
-rw-r--r--rust/kernel/device/property.rs632
-rw-r--r--rust/kernel/device_id.rs101
-rw-r--r--rust/kernel/devres.rs308
-rw-r--r--rust/kernel/dma.rs448
-rw-r--r--rust/kernel/driver.rs174
-rw-r--r--rust/kernel/drm/device.rs51
-rw-r--r--rust/kernel/drm/driver.rs21
-rw-r--r--rust/kernel/drm/file.rs10
-rw-r--r--rust/kernel/drm/gem/mod.rs110
-rw-r--r--rust/kernel/drm/ioctl.rs17
-rw-r--r--rust/kernel/error.rs77
-rw-r--r--rust/kernel/faux.rs4
-rw-r--r--rust/kernel/firmware.rs11
-rw-r--r--rust/kernel/fmt.rs7
-rw-r--r--rust/kernel/fs.rs3
-rw-r--r--rust/kernel/fs/file.rs17
-rw-r--r--rust/kernel/fs/kiocb.rs68
-rw-r--r--rust/kernel/generated_arch_reachable_asm.rs.S7
-rw-r--r--rust/kernel/generated_arch_warn_asm.rs.S7
-rw-r--r--rust/kernel/id_pool.rs226
-rw-r--r--rust/kernel/init.rs34
-rw-r--r--rust/kernel/io.rs26
-rw-r--r--rust/kernel/io/mem.rs279
-rw-r--r--rust/kernel/io/poll.rs104
-rw-r--r--rust/kernel/io/resource.rs230
-rw-r--r--rust/kernel/iov.rs314
-rw-r--r--rust/kernel/irq.rs24
-rw-r--r--rust/kernel/irq/flags.rs124
-rw-r--r--rust/kernel/irq/request.rs507
-rw-r--r--rust/kernel/kunit.rs36
-rw-r--r--rust/kernel/lib.rs79
-rw-r--r--rust/kernel/list.rs183
-rw-r--r--rust/kernel/list/impl_list_item_mod.rs239
-rw-r--r--rust/kernel/maple_tree.rs647
-rw-r--r--rust/kernel/miscdevice.rs88
-rw-r--r--rust/kernel/mm.rs3
-rw-r--r--rust/kernel/mm/mmput_async.rs2
-rw-r--r--rust/kernel/mm/virt.rs53
-rw-r--r--rust/kernel/net/phy.rs155
-rw-r--r--rust/kernel/of.rs23
-rw-r--r--rust/kernel/opp.rs57
-rw-r--r--rust/kernel/page.rs93
-rw-r--r--rust/kernel/pci.rs272
-rw-r--r--rust/kernel/pci/id.rs578
-rw-r--r--rust/kernel/pid_namespace.rs5
-rw-r--r--rust/kernel/platform.rs324
-rw-r--r--rust/kernel/prelude.rs9
-rw-r--r--rust/kernel/print.rs12
-rw-r--r--rust/kernel/processor.rs14
-rw-r--r--rust/kernel/ptr.rs228
-rw-r--r--rust/kernel/rbtree.rs29
-rw-r--r--rust/kernel/regulator.rs395
-rw-r--r--rust/kernel/revocable.rs12
-rw-r--r--rust/kernel/scatterlist.rs491
-rw-r--r--rust/kernel/security.rs37
-rw-r--r--rust/kernel/seq_file.rs8
-rw-r--r--rust/kernel/sizes.rs24
-rw-r--r--rust/kernel/str.rs271
-rw-r--r--rust/kernel/sync.rs14
-rw-r--r--rust/kernel/sync/arc.rs159
-rw-r--r--rust/kernel/sync/aref.rs169
-rw-r--r--rust/kernel/sync/atomic.rs551
-rw-r--r--rust/kernel/sync/atomic/internal.rs265
-rw-r--r--rust/kernel/sync/atomic/ordering.rs104
-rw-r--r--rust/kernel/sync/atomic/predefine.rs169
-rw-r--r--rust/kernel/sync/barrier.rs61
-rw-r--r--rust/kernel/sync/condvar.rs3
-rw-r--r--rust/kernel/sync/lock.rs2
-rw-r--r--rust/kernel/sync/poll.rs69
-rw-r--r--rust/kernel/sync/refcount.rs113
-rw-r--r--rust/kernel/task.rs40
-rw-r--r--rust/kernel/time.rs384
-rw-r--r--rust/kernel/time/delay.rs49
-rw-r--r--rust/kernel/time/hrtimer.rs452
-rw-r--r--rust/kernel/time/hrtimer/arc.rs17
-rw-r--r--rust/kernel/time/hrtimer/pin.rs19
-rw-r--r--rust/kernel/time/hrtimer/pin_mut.rs20
-rw-r--r--rust/kernel/time/hrtimer/tbox.rs17
-rw-r--r--rust/kernel/transmute.rs114
-rw-r--r--rust/kernel/types.rs245
-rw-r--r--rust/kernel/uaccess.rs167
-rw-r--r--rust/kernel/usb.rs456
-rw-r--r--rust/kernel/workqueue.rs351
-rw-r--r--rust/kernel/xarray.rs9
117 files changed, 14460 insertions, 1881 deletions
diff --git a/rust/kernel/.gitignore b/rust/kernel/.gitignore
index 6ba39a178f30..f636ad95aaf3 100644
--- a/rust/kernel/.gitignore
+++ b/rust/kernel/.gitignore
@@ -1,3 +1,5 @@
# SPDX-License-Identifier: GPL-2.0
/generated_arch_static_branch_asm.rs
+/generated_arch_warn_asm.rs
+/generated_arch_reachable_asm.rs
diff --git a/rust/kernel/acpi.rs b/rust/kernel/acpi.rs
new file mode 100644
index 000000000000..37e1161c1298
--- /dev/null
+++ b/rust/kernel/acpi.rs
@@ -0,0 +1,67 @@
+// SPDX-License-Identifier: GPL-2.0
+
+//! Advanced Configuration and Power Interface abstractions.
+
+use crate::{
+ bindings,
+ device_id::{RawDeviceId, RawDeviceIdIndex},
+ prelude::*,
+};
+
+/// IdTable type for ACPI drivers.
+pub type IdTable<T> = &'static dyn kernel::device_id::IdTable<DeviceId, T>;
+
+/// An ACPI device id.
+#[repr(transparent)]
+#[derive(Clone, Copy)]
+pub struct DeviceId(bindings::acpi_device_id);
+
+// SAFETY: `DeviceId` is a `#[repr(transparent)]` wrapper of `acpi_device_id` and does not add
+// additional invariants, so it's safe to transmute to `RawType`.
+unsafe impl RawDeviceId for DeviceId {
+ type RawType = bindings::acpi_device_id;
+}
+
+// SAFETY: `DRIVER_DATA_OFFSET` is the offset to the `driver_data` field.
+unsafe impl RawDeviceIdIndex for DeviceId {
+ const DRIVER_DATA_OFFSET: usize = core::mem::offset_of!(bindings::acpi_device_id, driver_data);
+
+ fn index(&self) -> usize {
+ self.0.driver_data
+ }
+}
+
+impl DeviceId {
+ const ACPI_ID_LEN: usize = 16;
+
+ /// Create a new device id from an ACPI 'id' string.
+ #[inline(always)]
+ pub const fn new(id: &'static CStr) -> Self {
+ let src = id.to_bytes_with_nul();
+ build_assert!(src.len() <= Self::ACPI_ID_LEN, "ID exceeds 16 bytes");
+ // Replace with `bindings::acpi_device_id::default()` once stabilized for `const`.
+ // SAFETY: FFI type is valid to be zero-initialized.
+ let mut acpi: bindings::acpi_device_id = unsafe { core::mem::zeroed() };
+ let mut i = 0;
+ while i < src.len() {
+ acpi.id[i] = src[i];
+ i += 1;
+ }
+
+ Self(acpi)
+ }
+}
+
+/// Create an ACPI `IdTable` with an "alias" for modpost.
+#[macro_export]
+macro_rules! acpi_device_table {
+ ($table_name:ident, $module_table_name:ident, $id_info_type: ty, $table_data: expr) => {
+ const $table_name: $crate::device_id::IdArray<
+ $crate::acpi::DeviceId,
+ $id_info_type,
+ { $table_data.len() },
+ > = $crate::device_id::IdArray::new($table_data);
+
+ $crate::module_device_table!("acpi", $module_table_name, $table_name);
+ };
+}
diff --git a/rust/kernel/alloc.rs b/rust/kernel/alloc.rs
index a2c49e5494d3..e38720349dcf 100644
--- a/rust/kernel/alloc.rs
+++ b/rust/kernel/alloc.rs
@@ -2,18 +2,11 @@
//! Implementation of the kernel's memory allocation infrastructure.
-#[cfg(not(any(test, testlib)))]
pub mod allocator;
pub mod kbox;
pub mod kvec;
pub mod layout;
-#[cfg(any(test, testlib))]
-pub mod allocator_test;
-
-#[cfg(any(test, testlib))]
-pub use self::allocator_test as allocator;
-
pub use self::kbox::Box;
pub use self::kbox::KBox;
pub use self::kbox::KVBox;
@@ -28,6 +21,8 @@ pub use self::kvec::Vec;
/// Indicates an allocation error.
#[derive(Copy, Clone, PartialEq, Eq, Debug)]
pub struct AllocError;
+
+use crate::error::{code::EINVAL, Result};
use core::{alloc::Layout, ptr::NonNull};
/// Flags to be used when allocating memory.
@@ -115,6 +110,31 @@ pub mod flags {
pub const __GFP_NOWARN: Flags = Flags(bindings::__GFP_NOWARN);
}
+/// Non Uniform Memory Access (NUMA) node identifier.
+#[derive(Clone, Copy, PartialEq)]
+pub struct NumaNode(i32);
+
+impl NumaNode {
+ /// Create a new NUMA node identifier (non-negative integer).
+ ///
+ /// Returns [`EINVAL`] if a negative id or an id exceeding [`bindings::MAX_NUMNODES`] is
+ /// specified.
+ pub fn new(node: i32) -> Result<Self> {
+ // MAX_NUMNODES never exceeds 2**10 because NODES_SHIFT is 0..10.
+ if node < 0 || node >= bindings::MAX_NUMNODES as i32 {
+ return Err(EINVAL);
+ }
+ Ok(Self(node))
+ }
+}
+
+/// Specify necessary constant to pass the information to Allocator that the caller doesn't care
+/// about the NUMA node to allocate memory from.
+impl NumaNode {
+ /// No node preference.
+ pub const NO_NODE: NumaNode = NumaNode(bindings::NUMA_NO_NODE);
+}
+
/// The kernel's [`Allocator`] trait.
///
/// An implementation of [`Allocator`] can allocate, re-allocate and free memory buffers described
@@ -137,7 +157,15 @@ pub mod flags {
/// - Implementers must ensure that all trait functions abide by the guarantees documented in the
/// `# Guarantees` sections.
pub unsafe trait Allocator {
- /// Allocate memory based on `layout` and `flags`.
+ /// The minimum alignment satisfied by all allocations from this allocator.
+ ///
+ /// # Guarantees
+ ///
+ /// Any pointer allocated by this allocator is guaranteed to be aligned to `MIN_ALIGN` even if
+ /// the requested layout has a smaller alignment.
+ const MIN_ALIGN: usize;
+
+ /// Allocate memory based on `layout`, `flags` and `nid`.
///
/// On success, returns a buffer represented as `NonNull<[u8]>` that satisfies the layout
/// constraints (i.e. minimum size and alignment as specified by `layout`).
@@ -153,13 +181,21 @@ pub unsafe trait Allocator {
///
/// Additionally, `Flags` are honored as documented in
/// <https://docs.kernel.org/core-api/mm-api.html#mm-api-gfp-flags>.
- fn alloc(layout: Layout, flags: Flags) -> Result<NonNull<[u8]>, AllocError> {
+ fn alloc(layout: Layout, flags: Flags, nid: NumaNode) -> Result<NonNull<[u8]>, AllocError> {
// SAFETY: Passing `None` to `realloc` is valid by its safety requirements and asks for a
// new memory allocation.
- unsafe { Self::realloc(None, layout, Layout::new::<()>(), flags) }
+ unsafe { Self::realloc(None, layout, Layout::new::<()>(), flags, nid) }
}
- /// Re-allocate an existing memory allocation to satisfy the requested `layout`.
+ /// Re-allocate an existing memory allocation to satisfy the requested `layout` and
+ /// a specific NUMA node request to allocate the memory for.
+ ///
+ /// Systems employing a Non Uniform Memory Access (NUMA) architecture contain collections of
+ /// hardware resources including processors, memory, and I/O buses, that comprise what is
+ /// commonly known as a NUMA node.
+ ///
+ /// `nid` stands for NUMA id, i. e. NUMA node identifier, which is a non-negative integer
+ /// if a node needs to be specified, or [`NumaNode::NO_NODE`] if the caller doesn't care.
///
/// If the requested size is zero, `realloc` behaves equivalent to `free`.
///
@@ -196,6 +232,7 @@ pub unsafe trait Allocator {
layout: Layout,
old_layout: Layout,
flags: Flags,
+ nid: NumaNode,
) -> Result<NonNull<[u8]>, AllocError>;
/// Free an existing memory allocation.
@@ -211,7 +248,15 @@ pub unsafe trait Allocator {
// SAFETY: The caller guarantees that `ptr` points at a valid allocation created by this
// allocator. We are passing a `Layout` with the smallest possible alignment, so it is
// smaller than or equal to the alignment previously used with this allocation.
- let _ = unsafe { Self::realloc(Some(ptr), Layout::new::<()>(), layout, Flags(0)) };
+ let _ = unsafe {
+ Self::realloc(
+ Some(ptr),
+ Layout::new::<()>(),
+ layout,
+ Flags(0),
+ NumaNode::NO_NODE,
+ )
+ };
}
}
diff --git a/rust/kernel/alloc/allocator.rs b/rust/kernel/alloc/allocator.rs
index aa2dfa9dca4c..63bfb91b3671 100644
--- a/rust/kernel/alloc/allocator.rs
+++ b/rust/kernel/alloc/allocator.rs
@@ -13,9 +13,14 @@ use core::alloc::Layout;
use core::ptr;
use core::ptr::NonNull;
-use crate::alloc::{AllocError, Allocator};
+use crate::alloc::{AllocError, Allocator, NumaNode};
use crate::bindings;
-use crate::pr_warn;
+use crate::page;
+
+const ARCH_KMALLOC_MINALIGN: usize = bindings::ARCH_KMALLOC_MINALIGN;
+
+mod iter;
+pub use self::iter::VmallocPageIter;
/// The contiguous kernel allocator.
///
@@ -43,33 +48,28 @@ pub struct Vmalloc;
/// For more details see [self].
pub struct KVmalloc;
-/// Returns a proper size to alloc a new object aligned to `new_layout`'s alignment.
-fn aligned_size(new_layout: Layout) -> usize {
- // Customized layouts from `Layout::from_size_align()` can have size < align, so pad first.
- let layout = new_layout.pad_to_align();
-
- // Note that `layout.size()` (after padding) is guaranteed to be a multiple of `layout.align()`
- // which together with the slab guarantees means the `krealloc` will return a properly aligned
- // object (see comments in `kmalloc()` for more information).
- layout.size()
-}
-
/// # Invariants
///
-/// One of the following: `krealloc`, `vrealloc`, `kvrealloc`.
+/// One of the following: `krealloc_node_align`, `vrealloc_node_align`, `kvrealloc_node_align`.
struct ReallocFunc(
- unsafe extern "C" fn(*const crate::ffi::c_void, usize, u32) -> *mut crate::ffi::c_void,
+ unsafe extern "C" fn(
+ *const crate::ffi::c_void,
+ usize,
+ crate::ffi::c_ulong,
+ u32,
+ crate::ffi::c_int,
+ ) -> *mut crate::ffi::c_void,
);
impl ReallocFunc {
- // INVARIANT: `krealloc` satisfies the type invariants.
- const KREALLOC: Self = Self(bindings::krealloc);
+ // INVARIANT: `krealloc_node_align` satisfies the type invariants.
+ const KREALLOC: Self = Self(bindings::krealloc_node_align);
- // INVARIANT: `vrealloc` satisfies the type invariants.
- const VREALLOC: Self = Self(bindings::vrealloc);
+ // INVARIANT: `vrealloc_node_align` satisfies the type invariants.
+ const VREALLOC: Self = Self(bindings::vrealloc_node_align);
- // INVARIANT: `kvrealloc` satisfies the type invariants.
- const KVREALLOC: Self = Self(bindings::kvrealloc);
+ // INVARIANT: `kvrealloc_node_align` satisfies the type invariants.
+ const KVREALLOC: Self = Self(bindings::kvrealloc_node_align);
/// # Safety
///
@@ -87,8 +87,9 @@ impl ReallocFunc {
layout: Layout,
old_layout: Layout,
flags: Flags,
+ nid: NumaNode,
) -> Result<NonNull<[u8]>, AllocError> {
- let size = aligned_size(layout);
+ let size = layout.size();
let ptr = match ptr {
Some(ptr) => {
if old_layout.size() == 0 {
@@ -110,7 +111,7 @@ impl ReallocFunc {
// - Those functions provide the guarantees of this function.
let raw_ptr = unsafe {
// If `size == 0` and `ptr != NULL` the memory behind the pointer is freed.
- self.0(ptr.cast(), size, flags.0).cast()
+ self.0(ptr.cast(), size, layout.align(), flags.0, nid.0).cast()
};
let ptr = if size == 0 {
@@ -123,20 +124,84 @@ impl ReallocFunc {
}
}
+impl Kmalloc {
+ /// Returns a [`Layout`] that makes [`Kmalloc`] fulfill the requested size and alignment of
+ /// `layout`.
+ pub fn aligned_layout(layout: Layout) -> Layout {
+ // Note that `layout.size()` (after padding) is guaranteed to be a multiple of
+ // `layout.align()` which together with the slab guarantees means that `Kmalloc` will return
+ // a properly aligned object (see comments in `kmalloc()` for more information).
+ layout.pad_to_align()
+ }
+}
+
// SAFETY: `realloc` delegates to `ReallocFunc::call`, which guarantees that
// - memory remains valid until it is explicitly freed,
// - passing a pointer to a valid memory allocation is OK,
// - `realloc` satisfies the guarantees, since `ReallocFunc::call` has the same.
unsafe impl Allocator for Kmalloc {
+ const MIN_ALIGN: usize = ARCH_KMALLOC_MINALIGN;
+
#[inline]
unsafe fn realloc(
ptr: Option<NonNull<u8>>,
layout: Layout,
old_layout: Layout,
flags: Flags,
+ nid: NumaNode,
) -> Result<NonNull<[u8]>, AllocError> {
+ let layout = Kmalloc::aligned_layout(layout);
+
// SAFETY: `ReallocFunc::call` has the same safety requirements as `Allocator::realloc`.
- unsafe { ReallocFunc::KREALLOC.call(ptr, layout, old_layout, flags) }
+ unsafe { ReallocFunc::KREALLOC.call(ptr, layout, old_layout, flags, nid) }
+ }
+}
+
+impl Vmalloc {
+ /// Convert a pointer to a [`Vmalloc`] allocation to a [`page::BorrowedPage`].
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// # use core::ptr::{NonNull, from_mut};
+ /// # use kernel::{page, prelude::*};
+ /// use kernel::alloc::allocator::Vmalloc;
+ ///
+ /// let mut vbox = VBox::<[u8; page::PAGE_SIZE]>::new_uninit(GFP_KERNEL)?;
+ ///
+ /// {
+ /// // SAFETY: By the type invariant of `Box` the inner pointer of `vbox` is non-null.
+ /// let ptr = unsafe { NonNull::new_unchecked(from_mut(&mut *vbox)) };
+ ///
+ /// // SAFETY:
+ /// // `ptr` is a valid pointer to a `Vmalloc` allocation.
+ /// // `ptr` is valid for the entire lifetime of `page`.
+ /// let page = unsafe { Vmalloc::to_page(ptr.cast()) };
+ ///
+ /// // SAFETY: There is no concurrent read or write to the same page.
+ /// unsafe { page.fill_zero_raw(0, page::PAGE_SIZE)? };
+ /// }
+ /// # Ok::<(), Error>(())
+ /// ```
+ ///
+ /// # Safety
+ ///
+ /// - `ptr` must be a valid pointer to a [`Vmalloc`] allocation.
+ /// - `ptr` must remain valid for the entire duration of `'a`.
+ pub unsafe fn to_page<'a>(ptr: NonNull<u8>) -> page::BorrowedPage<'a> {
+ // SAFETY: `ptr` is a valid pointer to `Vmalloc` memory.
+ let page = unsafe { bindings::vmalloc_to_page(ptr.as_ptr().cast()) };
+
+ // SAFETY: `vmalloc_to_page` returns a valid pointer to a `struct page` for a valid pointer
+ // to `Vmalloc` memory.
+ let page = unsafe { NonNull::new_unchecked(page) };
+
+ // SAFETY:
+ // - `page` is a valid pointer to a `struct page`, given that by the safety requirements of
+ // this function `ptr` is a valid pointer to a `Vmalloc` allocation.
+ // - By the safety requirements of this function `ptr` is valid for the entire lifetime of
+ // `'a`.
+ unsafe { page::BorrowedPage::from_raw(page) }
}
}
@@ -145,22 +210,19 @@ unsafe impl Allocator for Kmalloc {
// - passing a pointer to a valid memory allocation is OK,
// - `realloc` satisfies the guarantees, since `ReallocFunc::call` has the same.
unsafe impl Allocator for Vmalloc {
+ const MIN_ALIGN: usize = kernel::page::PAGE_SIZE;
+
#[inline]
unsafe fn realloc(
ptr: Option<NonNull<u8>>,
layout: Layout,
old_layout: Layout,
flags: Flags,
+ nid: NumaNode,
) -> Result<NonNull<[u8]>, AllocError> {
- // TODO: Support alignments larger than PAGE_SIZE.
- if layout.align() > bindings::PAGE_SIZE {
- pr_warn!("Vmalloc does not support alignments larger than PAGE_SIZE yet.\n");
- return Err(AllocError);
- }
-
// SAFETY: If not `None`, `ptr` is guaranteed to point to valid memory, which was previously
// allocated with this `Allocator`.
- unsafe { ReallocFunc::VREALLOC.call(ptr, layout, old_layout, flags) }
+ unsafe { ReallocFunc::VREALLOC.call(ptr, layout, old_layout, flags, nid) }
}
}
@@ -169,21 +231,78 @@ unsafe impl Allocator for Vmalloc {
// - passing a pointer to a valid memory allocation is OK,
// - `realloc` satisfies the guarantees, since `ReallocFunc::call` has the same.
unsafe impl Allocator for KVmalloc {
+ const MIN_ALIGN: usize = ARCH_KMALLOC_MINALIGN;
+
#[inline]
unsafe fn realloc(
ptr: Option<NonNull<u8>>,
layout: Layout,
old_layout: Layout,
flags: Flags,
+ nid: NumaNode,
) -> Result<NonNull<[u8]>, AllocError> {
- // TODO: Support alignments larger than PAGE_SIZE.
- if layout.align() > bindings::PAGE_SIZE {
- pr_warn!("KVmalloc does not support alignments larger than PAGE_SIZE yet.\n");
- return Err(AllocError);
- }
+ // `KVmalloc` may use the `Kmalloc` backend, hence we have to enforce a `Kmalloc`
+ // compatible layout.
+ let layout = Kmalloc::aligned_layout(layout);
// SAFETY: If not `None`, `ptr` is guaranteed to point to valid memory, which was previously
// allocated with this `Allocator`.
- unsafe { ReallocFunc::KVREALLOC.call(ptr, layout, old_layout, flags) }
+ unsafe { ReallocFunc::KVREALLOC.call(ptr, layout, old_layout, flags, nid) }
+ }
+}
+
+#[macros::kunit_tests(rust_allocator)]
+mod tests {
+ use super::*;
+ use core::mem::MaybeUninit;
+ use kernel::prelude::*;
+
+ #[test]
+ fn test_alignment() -> Result {
+ const TEST_SIZE: usize = 1024;
+ const TEST_LARGE_ALIGN_SIZE: usize = kernel::page::PAGE_SIZE * 4;
+
+ // These two structs are used to test allocating aligned memory.
+ // they don't need to be accessed, so they're marked as dead_code.
+ #[expect(dead_code)]
+ #[repr(align(128))]
+ struct Blob([u8; TEST_SIZE]);
+ #[expect(dead_code)]
+ #[repr(align(8192))]
+ struct LargeAlignBlob([u8; TEST_LARGE_ALIGN_SIZE]);
+
+ struct TestAlign<T, A: Allocator>(Box<MaybeUninit<T>, A>);
+ impl<T, A: Allocator> TestAlign<T, A> {
+ fn new() -> Result<Self> {
+ Ok(Self(Box::<_, A>::new_uninit(GFP_KERNEL)?))
+ }
+
+ fn is_aligned_to(&self, align: usize) -> bool {
+ assert!(align.is_power_of_two());
+
+ let addr = self.0.as_ptr() as usize;
+ addr & (align - 1) == 0
+ }
+ }
+
+ let ta = TestAlign::<Blob, Kmalloc>::new()?;
+ assert!(ta.is_aligned_to(128));
+
+ let ta = TestAlign::<LargeAlignBlob, Kmalloc>::new()?;
+ assert!(ta.is_aligned_to(8192));
+
+ let ta = TestAlign::<Blob, Vmalloc>::new()?;
+ assert!(ta.is_aligned_to(128));
+
+ let ta = TestAlign::<LargeAlignBlob, Vmalloc>::new()?;
+ assert!(ta.is_aligned_to(8192));
+
+ let ta = TestAlign::<Blob, KVmalloc>::new()?;
+ assert!(ta.is_aligned_to(128));
+
+ let ta = TestAlign::<LargeAlignBlob, KVmalloc>::new()?;
+ assert!(ta.is_aligned_to(8192));
+
+ Ok(())
}
}
diff --git a/rust/kernel/alloc/allocator/iter.rs b/rust/kernel/alloc/allocator/iter.rs
new file mode 100644
index 000000000000..5759f86029b7
--- /dev/null
+++ b/rust/kernel/alloc/allocator/iter.rs
@@ -0,0 +1,102 @@
+// SPDX-License-Identifier: GPL-2.0
+
+use super::Vmalloc;
+use crate::page;
+use core::marker::PhantomData;
+use core::ptr::NonNull;
+
+/// An [`Iterator`] of [`page::BorrowedPage`] items owned by a [`Vmalloc`] allocation.
+///
+/// # Guarantees
+///
+/// The pages iterated by the [`Iterator`] appear in the order as they are mapped in the CPU's
+/// virtual address space ascendingly.
+///
+/// # Invariants
+///
+/// - `buf` is a valid and [`page::PAGE_SIZE`] aligned pointer into a [`Vmalloc`] allocation.
+/// - `size` is the number of bytes from `buf` until the end of the [`Vmalloc`] allocation `buf`
+/// points to.
+pub struct VmallocPageIter<'a> {
+ /// The base address of the [`Vmalloc`] buffer.
+ buf: NonNull<u8>,
+ /// The size of the buffer pointed to by `buf` in bytes.
+ size: usize,
+ /// The current page index of the [`Iterator`].
+ index: usize,
+ _p: PhantomData<page::BorrowedPage<'a>>,
+}
+
+impl<'a> Iterator for VmallocPageIter<'a> {
+ type Item = page::BorrowedPage<'a>;
+
+ fn next(&mut self) -> Option<Self::Item> {
+ let offset = self.index.checked_mul(page::PAGE_SIZE)?;
+
+ // Even though `self.size()` may be smaller than `Self::page_count() * page::PAGE_SIZE`, it
+ // is always a number between `(Self::page_count() - 1) * page::PAGE_SIZE` and
+ // `Self::page_count() * page::PAGE_SIZE`, hence the check below is sufficient.
+ if offset < self.size() {
+ self.index += 1;
+ } else {
+ return None;
+ }
+
+ // TODO: Use `NonNull::add()` instead, once the minimum supported compiler version is
+ // bumped to 1.80 or later.
+ //
+ // SAFETY: `offset` is in the interval `[0, (self.page_count() - 1) * page::PAGE_SIZE]`,
+ // hence the resulting pointer is guaranteed to be within the same allocation.
+ let ptr = unsafe { self.buf.as_ptr().add(offset) };
+
+ // SAFETY: `ptr` is guaranteed to be non-null given that it is derived from `self.buf`.
+ let ptr = unsafe { NonNull::new_unchecked(ptr) };
+
+ // SAFETY:
+ // - `ptr` is a valid pointer to a `Vmalloc` allocation.
+ // - `ptr` is valid for the duration of `'a`.
+ Some(unsafe { Vmalloc::to_page(ptr) })
+ }
+
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ let remaining = self.page_count().saturating_sub(self.index);
+
+ (remaining, Some(remaining))
+ }
+}
+
+impl<'a> VmallocPageIter<'a> {
+ /// Creates a new [`VmallocPageIter`] instance.
+ ///
+ /// # Safety
+ ///
+ /// - `buf` must be a [`page::PAGE_SIZE`] aligned pointer into a [`Vmalloc`] allocation.
+ /// - `buf` must be valid for at least the lifetime of `'a`.
+ /// - `size` must be the number of bytes from `buf` until the end of the [`Vmalloc`] allocation
+ /// `buf` points to.
+ pub unsafe fn new(buf: NonNull<u8>, size: usize) -> Self {
+ // INVARIANT: By the safety requirements, `buf` is a valid and `page::PAGE_SIZE` aligned
+ // pointer into a [`Vmalloc`] allocation.
+ Self {
+ buf,
+ size,
+ index: 0,
+ _p: PhantomData,
+ }
+ }
+
+ /// Returns the size of the backing [`Vmalloc`] allocation in bytes.
+ ///
+ /// Note that this is the size the [`Vmalloc`] allocation has been allocated with. Hence, this
+ /// number may be smaller than `[`Self::page_count`] * [`page::PAGE_SIZE`]`.
+ #[inline]
+ pub fn size(&self) -> usize {
+ self.size
+ }
+
+ /// Returns the number of pages owned by the backing [`Vmalloc`] allocation.
+ #[inline]
+ pub fn page_count(&self) -> usize {
+ self.size().div_ceil(page::PAGE_SIZE)
+ }
+}
diff --git a/rust/kernel/alloc/allocator_test.rs b/rust/kernel/alloc/allocator_test.rs
deleted file mode 100644
index d19c06ef0498..000000000000
--- a/rust/kernel/alloc/allocator_test.rs
+++ /dev/null
@@ -1,113 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-
-//! So far the kernel's `Box` and `Vec` types can't be used by userspace test cases, since all users
-//! of those types (e.g. `CString`) use kernel allocators for instantiation.
-//!
-//! In order to allow userspace test cases to make use of such types as well, implement the
-//! `Cmalloc` allocator within the `allocator_test` module and type alias all kernel allocators to
-//! `Cmalloc`. The `Cmalloc` allocator uses libc's `realloc()` function as allocator backend.
-
-#![allow(missing_docs)]
-
-use super::{flags::*, AllocError, Allocator, Flags};
-use core::alloc::Layout;
-use core::cmp;
-use core::ptr;
-use core::ptr::NonNull;
-
-/// The userspace allocator based on libc.
-pub struct Cmalloc;
-
-pub type Kmalloc = Cmalloc;
-pub type Vmalloc = Kmalloc;
-pub type KVmalloc = Kmalloc;
-
-extern "C" {
- #[link_name = "aligned_alloc"]
- fn libc_aligned_alloc(align: usize, size: usize) -> *mut crate::ffi::c_void;
-
- #[link_name = "free"]
- fn libc_free(ptr: *mut crate::ffi::c_void);
-}
-
-// SAFETY:
-// - memory remains valid until it is explicitly freed,
-// - passing a pointer to a valid memory allocation created by this `Allocator` is always OK,
-// - `realloc` provides the guarantees as provided in the `# Guarantees` section.
-unsafe impl Allocator for Cmalloc {
- unsafe fn realloc(
- ptr: Option<NonNull<u8>>,
- layout: Layout,
- old_layout: Layout,
- flags: Flags,
- ) -> Result<NonNull<[u8]>, AllocError> {
- let src = match ptr {
- Some(src) => {
- if old_layout.size() == 0 {
- ptr::null_mut()
- } else {
- src.as_ptr()
- }
- }
- None => ptr::null_mut(),
- };
-
- if layout.size() == 0 {
- // SAFETY: `src` is either NULL or was previously allocated with this `Allocator`
- unsafe { libc_free(src.cast()) };
-
- return Ok(NonNull::slice_from_raw_parts(
- crate::alloc::dangling_from_layout(layout),
- 0,
- ));
- }
-
- // ISO C (ISO/IEC 9899:2011) defines `aligned_alloc`:
- //
- // > The value of alignment shall be a valid alignment supported by the implementation
- // [...].
- //
- // As an example of the "supported by the implementation" requirement, POSIX.1-2001 (IEEE
- // 1003.1-2001) defines `posix_memalign`:
- //
- // > The value of alignment shall be a power of two multiple of sizeof (void *).
- //
- // and POSIX-based implementations of `aligned_alloc` inherit this requirement. At the time
- // of writing, this is known to be the case on macOS (but not in glibc).
- //
- // Satisfy the stricter requirement to avoid spurious test failures on some platforms.
- let min_align = core::mem::size_of::<*const crate::ffi::c_void>();
- let layout = layout.align_to(min_align).map_err(|_| AllocError)?;
- let layout = layout.pad_to_align();
-
- // SAFETY: Returns either NULL or a pointer to a memory allocation that satisfies or
- // exceeds the given size and alignment requirements.
- let dst = unsafe { libc_aligned_alloc(layout.align(), layout.size()) } as *mut u8;
- let dst = NonNull::new(dst).ok_or(AllocError)?;
-
- if flags.contains(__GFP_ZERO) {
- // SAFETY: The preceding calls to `libc_aligned_alloc` and `NonNull::new`
- // guarantee that `dst` points to memory of at least `layout.size()` bytes.
- unsafe { dst.as_ptr().write_bytes(0, layout.size()) };
- }
-
- if !src.is_null() {
- // SAFETY:
- // - `src` has previously been allocated with this `Allocator`; `dst` has just been
- // newly allocated, hence the memory regions do not overlap.
- // - both` src` and `dst` are properly aligned and valid for reads and writes
- unsafe {
- ptr::copy_nonoverlapping(
- src,
- dst.as_ptr(),
- cmp::min(layout.size(), old_layout.size()),
- )
- };
- }
-
- // SAFETY: `src` is either NULL or was previously allocated with this `Allocator`
- unsafe { libc_free(src.cast()) };
-
- Ok(NonNull::slice_from_raw_parts(dst, layout.size()))
- }
-}
diff --git a/rust/kernel/alloc/kbox.rs b/rust/kernel/alloc/kbox.rs
index c386ff771d50..622b3529edfc 100644
--- a/rust/kernel/alloc/kbox.rs
+++ b/rust/kernel/alloc/kbox.rs
@@ -3,10 +3,10 @@
//! Implementation of [`Box`].
#[allow(unused_imports)] // Used in doc comments.
-use super::allocator::{KVmalloc, Kmalloc, Vmalloc};
-use super::{AllocError, Allocator, Flags};
+use super::allocator::{KVmalloc, Kmalloc, Vmalloc, VmallocPageIter};
+use super::{AllocError, Allocator, Flags, NumaNode};
use core::alloc::Layout;
-use core::fmt;
+use core::borrow::{Borrow, BorrowMut};
use core::marker::PhantomData;
use core::mem::ManuallyDrop;
use core::mem::MaybeUninit;
@@ -15,7 +15,10 @@ use core::pin::Pin;
use core::ptr::NonNull;
use core::result::Result;
+use crate::ffi::c_void;
+use crate::fmt;
use crate::init::InPlaceInit;
+use crate::page::AsPageIter;
use crate::types::ForeignOwnable;
use pin_init::{InPlaceWrite, Init, PinInit, ZeroableOption};
@@ -271,7 +274,7 @@ where
/// ```
pub fn new_uninit(flags: Flags) -> Result<Box<MaybeUninit<T>, A>, AllocError> {
let layout = Layout::new::<MaybeUninit<T>>();
- let ptr = A::alloc(layout, flags)?;
+ let ptr = A::alloc(layout, flags, NumaNode::NO_NODE)?;
// INVARIANT: `ptr` is either a dangling pointer or points to memory allocated with `A`,
// which is sufficient in size and alignment for storing a `T`.
@@ -288,6 +291,83 @@ where
Ok(Self::new(x, flags)?.into())
}
+ /// Construct a pinned slice of elements `Pin<Box<[T], A>>`.
+ ///
+ /// This is a convenient means for creation of e.g. slices of structrures containing spinlocks
+ /// or mutexes.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use kernel::sync::{new_spinlock, SpinLock};
+ ///
+ /// struct Inner {
+ /// a: u32,
+ /// b: u32,
+ /// }
+ ///
+ /// #[pin_data]
+ /// struct Example {
+ /// c: u32,
+ /// #[pin]
+ /// d: SpinLock<Inner>,
+ /// }
+ ///
+ /// impl Example {
+ /// fn new() -> impl PinInit<Self, Error> {
+ /// try_pin_init!(Self {
+ /// c: 10,
+ /// d <- new_spinlock!(Inner { a: 20, b: 30 }),
+ /// })
+ /// }
+ /// }
+ ///
+ /// // Allocate a boxed slice of 10 `Example`s.
+ /// let s = KBox::pin_slice(
+ /// | _i | Example::new(),
+ /// 10,
+ /// GFP_KERNEL
+ /// )?;
+ ///
+ /// assert_eq!(s[5].c, 10);
+ /// assert_eq!(s[3].d.lock().a, 20);
+ /// # Ok::<(), Error>(())
+ /// ```
+ pub fn pin_slice<Func, Item, E>(
+ mut init: Func,
+ len: usize,
+ flags: Flags,
+ ) -> Result<Pin<Box<[T], A>>, E>
+ where
+ Func: FnMut(usize) -> Item,
+ Item: PinInit<T, E>,
+ E: From<AllocError>,
+ {
+ let mut buffer = super::Vec::<T, A>::with_capacity(len, flags)?;
+ for i in 0..len {
+ let ptr = buffer.spare_capacity_mut().as_mut_ptr().cast();
+ // SAFETY:
+ // - `ptr` is a valid pointer to uninitialized memory.
+ // - `ptr` is not used if an error is returned.
+ // - `ptr` won't be moved until it is dropped, i.e. it is pinned.
+ unsafe { init(i).__pinned_init(ptr)? };
+
+ // SAFETY:
+ // - `i + 1 <= len`, hence we don't exceed the capacity, due to the call to
+ // `with_capacity()` above.
+ // - The new value at index buffer.len() + 1 is the only element being added here, and
+ // it has been initialized above by `init(i).__pinned_init(ptr)`.
+ unsafe { buffer.inc_len(1) };
+ }
+
+ let (ptr, _, _) = buffer.into_raw_parts();
+ let slice = core::ptr::slice_from_raw_parts_mut(ptr, len);
+
+ // SAFETY: `slice` points to an allocation allocated with `A` (`buffer`) and holds a valid
+ // `[T]`.
+ Ok(Pin::from(unsafe { Box::from_raw(slice) }))
+ }
+
/// Convert a [`Box<T,A>`] to a [`Pin<Box<T,A>>`]. If `T` does not implement
/// [`Unpin`], then `x` will be pinned in memory and can't be moved.
pub fn into_pin(this: Self) -> Pin<Self> {
@@ -398,70 +478,79 @@ where
}
}
-// SAFETY: The `into_foreign` function returns a pointer that is well-aligned.
+// SAFETY: The pointer returned by `into_foreign` comes from a well aligned
+// pointer to `T` allocated by `A`.
unsafe impl<T: 'static, A> ForeignOwnable for Box<T, A>
where
A: Allocator,
{
- type PointedTo = T;
+ const FOREIGN_ALIGN: usize = if core::mem::align_of::<T>() < A::MIN_ALIGN {
+ A::MIN_ALIGN
+ } else {
+ core::mem::align_of::<T>()
+ };
+
type Borrowed<'a> = &'a T;
type BorrowedMut<'a> = &'a mut T;
- fn into_foreign(self) -> *mut Self::PointedTo {
- Box::into_raw(self)
+ fn into_foreign(self) -> *mut c_void {
+ Box::into_raw(self).cast()
}
- unsafe fn from_foreign(ptr: *mut Self::PointedTo) -> Self {
+ unsafe fn from_foreign(ptr: *mut c_void) -> Self {
// SAFETY: The safety requirements of this function ensure that `ptr` comes from a previous
// call to `Self::into_foreign`.
- unsafe { Box::from_raw(ptr) }
+ unsafe { Box::from_raw(ptr.cast()) }
}
- unsafe fn borrow<'a>(ptr: *mut Self::PointedTo) -> &'a T {
+ unsafe fn borrow<'a>(ptr: *mut c_void) -> &'a T {
// SAFETY: The safety requirements of this method ensure that the object remains alive and
// immutable for the duration of 'a.
- unsafe { &*ptr }
+ unsafe { &*ptr.cast() }
}
- unsafe fn borrow_mut<'a>(ptr: *mut Self::PointedTo) -> &'a mut T {
+ unsafe fn borrow_mut<'a>(ptr: *mut c_void) -> &'a mut T {
+ let ptr = ptr.cast();
// SAFETY: The safety requirements of this method ensure that the pointer is valid and that
// nothing else will access the value for the duration of 'a.
unsafe { &mut *ptr }
}
}
-// SAFETY: The `into_foreign` function returns a pointer that is well-aligned.
+// SAFETY: The pointer returned by `into_foreign` comes from a well aligned
+// pointer to `T` allocated by `A`.
unsafe impl<T: 'static, A> ForeignOwnable for Pin<Box<T, A>>
where
A: Allocator,
{
- type PointedTo = T;
+ const FOREIGN_ALIGN: usize = <Box<T, A> as ForeignOwnable>::FOREIGN_ALIGN;
type Borrowed<'a> = Pin<&'a T>;
type BorrowedMut<'a> = Pin<&'a mut T>;
- fn into_foreign(self) -> *mut Self::PointedTo {
+ fn into_foreign(self) -> *mut c_void {
// SAFETY: We are still treating the box as pinned.
- Box::into_raw(unsafe { Pin::into_inner_unchecked(self) })
+ Box::into_raw(unsafe { Pin::into_inner_unchecked(self) }).cast()
}
- unsafe fn from_foreign(ptr: *mut Self::PointedTo) -> Self {
+ unsafe fn from_foreign(ptr: *mut c_void) -> Self {
// SAFETY: The safety requirements of this function ensure that `ptr` comes from a previous
// call to `Self::into_foreign`.
- unsafe { Pin::new_unchecked(Box::from_raw(ptr)) }
+ unsafe { Pin::new_unchecked(Box::from_raw(ptr.cast())) }
}
- unsafe fn borrow<'a>(ptr: *mut Self::PointedTo) -> Pin<&'a T> {
+ unsafe fn borrow<'a>(ptr: *mut c_void) -> Pin<&'a T> {
// SAFETY: The safety requirements for this function ensure that the object is still alive,
// so it is safe to dereference the raw pointer.
// The safety requirements of `from_foreign` also ensure that the object remains alive for
// the lifetime of the returned value.
- let r = unsafe { &*ptr };
+ let r = unsafe { &*ptr.cast() };
// SAFETY: This pointer originates from a `Pin<Box<T>>`.
unsafe { Pin::new_unchecked(r) }
}
- unsafe fn borrow_mut<'a>(ptr: *mut Self::PointedTo) -> Pin<&'a mut T> {
+ unsafe fn borrow_mut<'a>(ptr: *mut c_void) -> Pin<&'a mut T> {
+ let ptr = ptr.cast();
// SAFETY: The safety requirements for this function ensure that the object is still alive,
// so it is safe to dereference the raw pointer.
// The safety requirements of `from_foreign` also ensure that the object remains alive for
@@ -499,6 +588,62 @@ where
}
}
+/// # Examples
+///
+/// ```
+/// # use core::borrow::Borrow;
+/// # use kernel::alloc::KBox;
+/// struct Foo<B: Borrow<u32>>(B);
+///
+/// // Owned instance.
+/// let owned = Foo(1);
+///
+/// // Owned instance using `KBox`.
+/// let owned_kbox = Foo(KBox::new(1, GFP_KERNEL)?);
+///
+/// let i = 1;
+/// // Borrowed from `i`.
+/// let borrowed = Foo(&i);
+/// # Ok::<(), Error>(())
+/// ```
+impl<T, A> Borrow<T> for Box<T, A>
+where
+ T: ?Sized,
+ A: Allocator,
+{
+ fn borrow(&self) -> &T {
+ self.deref()
+ }
+}
+
+/// # Examples
+///
+/// ```
+/// # use core::borrow::BorrowMut;
+/// # use kernel::alloc::KBox;
+/// struct Foo<B: BorrowMut<u32>>(B);
+///
+/// // Owned instance.
+/// let owned = Foo(1);
+///
+/// // Owned instance using `KBox`.
+/// let owned_kbox = Foo(KBox::new(1, GFP_KERNEL)?);
+///
+/// let mut i = 1;
+/// // Borrowed from `i`.
+/// let borrowed = Foo(&mut i);
+/// # Ok::<(), Error>(())
+/// ```
+impl<T, A> BorrowMut<T> for Box<T, A>
+where
+ T: ?Sized,
+ A: Allocator,
+{
+ fn borrow_mut(&mut self) -> &mut T {
+ self.deref_mut()
+ }
+}
+
impl<T, A> fmt::Display for Box<T, A>
where
T: ?Sized + fmt::Display,
@@ -536,3 +681,40 @@ where
unsafe { A::free(self.0.cast(), layout) };
}
}
+
+/// # Examples
+///
+/// ```
+/// # use kernel::prelude::*;
+/// use kernel::alloc::allocator::VmallocPageIter;
+/// use kernel::page::{AsPageIter, PAGE_SIZE};
+///
+/// let mut vbox = VBox::new((), GFP_KERNEL)?;
+///
+/// assert!(vbox.page_iter().next().is_none());
+///
+/// let mut vbox = VBox::<[u8; PAGE_SIZE]>::new_uninit(GFP_KERNEL)?;
+///
+/// let page = vbox.page_iter().next().expect("At least one page should be available.\n");
+///
+/// // SAFETY: There is no concurrent read or write to the same page.
+/// unsafe { page.fill_zero_raw(0, PAGE_SIZE)? };
+/// # Ok::<(), Error>(())
+/// ```
+impl<T> AsPageIter for VBox<T> {
+ type Iter<'a>
+ = VmallocPageIter<'a>
+ where
+ T: 'a;
+
+ fn page_iter(&mut self) -> Self::Iter<'_> {
+ let ptr = self.0.cast();
+ let size = core::mem::size_of::<T>();
+
+ // SAFETY:
+ // - `ptr` is a valid pointer to the beginning of a `Vmalloc` allocation.
+ // - `ptr` is guaranteed to be valid for the lifetime of `'a`.
+ // - `size` is the size of the `Vmalloc` allocation `ptr` points to.
+ unsafe { VmallocPageIter::new(ptr, size) }
+ }
+}
diff --git a/rust/kernel/alloc/kvec.rs b/rust/kernel/alloc/kvec.rs
index 1a0dd852a468..e94aebd084c8 100644
--- a/rust/kernel/alloc/kvec.rs
+++ b/rust/kernel/alloc/kvec.rs
@@ -3,12 +3,16 @@
//! Implementation of [`Vec`].
use super::{
- allocator::{KVmalloc, Kmalloc, Vmalloc},
+ allocator::{KVmalloc, Kmalloc, Vmalloc, VmallocPageIter},
layout::ArrayLayout,
- AllocError, Allocator, Box, Flags,
+ AllocError, Allocator, Box, Flags, NumaNode,
};
-use core::{
+use crate::{
fmt,
+ page::AsPageIter,
+};
+use core::{
+ borrow::{Borrow, BorrowMut},
marker::PhantomData,
mem::{ManuallyDrop, MaybeUninit},
ops::Deref,
@@ -174,7 +178,7 @@ where
/// Returns the number of elements that can be stored within the vector without allocating
/// additional memory.
- pub fn capacity(&self) -> usize {
+ pub const fn capacity(&self) -> usize {
if const { Self::is_zst() } {
usize::MAX
} else {
@@ -184,7 +188,7 @@ where
/// Returns the number of elements stored within the vector.
#[inline]
- pub fn len(&self) -> usize {
+ pub const fn len(&self) -> usize {
self.len
}
@@ -195,7 +199,7 @@ where
/// - `additional` must be less than or equal to `self.capacity - self.len`.
/// - All elements within the interval [`self.len`,`self.len + additional`) must be initialized.
#[inline]
- pub unsafe fn inc_len(&mut self, additional: usize) {
+ pub const unsafe fn inc_len(&mut self, additional: usize) {
// Guaranteed by the type invariant to never underflow.
debug_assert!(additional <= self.capacity() - self.len());
// INVARIANT: By the safety requirements of this method this represents the exact number of
@@ -223,6 +227,16 @@ where
}
/// Returns a slice of the entire vector.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let mut v = KVec::new();
+ /// v.push(1, GFP_KERNEL)?;
+ /// v.push(2, GFP_KERNEL)?;
+ /// assert_eq!(v.as_slice(), &[1, 2]);
+ /// # Ok::<(), Error>(())
+ /// ```
#[inline]
pub fn as_slice(&self) -> &[T] {
self
@@ -244,7 +258,7 @@ where
/// Returns a raw pointer to the vector's backing buffer, or, if `T` is a ZST, a dangling raw
/// pointer.
#[inline]
- pub fn as_ptr(&self) -> *const T {
+ pub const fn as_ptr(&self) -> *const T {
self.ptr.as_ptr()
}
@@ -260,7 +274,7 @@ where
/// assert!(!v.is_empty());
/// ```
#[inline]
- pub fn is_empty(&self) -> bool {
+ pub const fn is_empty(&self) -> bool {
self.len() == 0
}
@@ -288,7 +302,7 @@ where
// - `self.len` is smaller than `self.capacity` by the type invariant and hence, the
// resulting pointer is guaranteed to be part of the same allocated object.
// - `self.len` can not overflow `isize`.
- let ptr = unsafe { self.as_mut_ptr().add(self.len) } as *mut MaybeUninit<T>;
+ let ptr = unsafe { self.as_mut_ptr().add(self.len) }.cast::<MaybeUninit<T>>();
// SAFETY: The memory between `self.len` and `self.capacity` is guaranteed to be allocated
// and valid, but uninitialized.
@@ -633,6 +647,7 @@ where
layout.into(),
self.layout.into(),
flags,
+ NumaNode::NO_NODE,
)?
};
@@ -847,11 +862,11 @@ where
// - `ptr` points to memory with at least a size of `size_of::<T>() * len`,
// - all elements within `b` are initialized values of `T`,
// - `len` does not exceed `isize::MAX`.
- unsafe { Vec::from_raw_parts(ptr as _, len, len) }
+ unsafe { Vec::from_raw_parts(ptr.cast(), len, len) }
}
}
-impl<T> Default for KVec<T> {
+impl<T, A: Allocator> Default for Vec<T, A> {
#[inline]
fn default() -> Self {
Self::new()
@@ -890,6 +905,58 @@ where
}
}
+/// # Examples
+///
+/// ```
+/// # use core::borrow::Borrow;
+/// struct Foo<B: Borrow<[u32]>>(B);
+///
+/// // Owned array.
+/// let owned_array = Foo([1, 2, 3]);
+///
+/// // Owned vector.
+/// let owned_vec = Foo(KVec::from_elem(0, 3, GFP_KERNEL)?);
+///
+/// let arr = [1, 2, 3];
+/// // Borrowed slice from `arr`.
+/// let borrowed_slice = Foo(&arr[..]);
+/// # Ok::<(), Error>(())
+/// ```
+impl<T, A> Borrow<[T]> for Vec<T, A>
+where
+ A: Allocator,
+{
+ fn borrow(&self) -> &[T] {
+ self.as_slice()
+ }
+}
+
+/// # Examples
+///
+/// ```
+/// # use core::borrow::BorrowMut;
+/// struct Foo<B: BorrowMut<[u32]>>(B);
+///
+/// // Owned array.
+/// let owned_array = Foo([1, 2, 3]);
+///
+/// // Owned vector.
+/// let owned_vec = Foo(KVec::from_elem(0, 3, GFP_KERNEL)?);
+///
+/// let mut arr = [1, 2, 3];
+/// // Borrowed slice from `arr`.
+/// let borrowed_slice = Foo(&mut arr[..]);
+/// # Ok::<(), Error>(())
+/// ```
+impl<T, A> BorrowMut<[T]> for Vec<T, A>
+where
+ A: Allocator,
+{
+ fn borrow_mut(&mut self) -> &mut [T] {
+ self.as_mut_slice()
+ }
+}
+
impl<T: Eq, A> Eq for Vec<T, A> where A: Allocator {}
impl<T, I: SliceIndex<[T]>, A> Index<I> for Vec<T, A>
@@ -964,6 +1031,43 @@ where
}
}
+/// # Examples
+///
+/// ```
+/// # use kernel::prelude::*;
+/// use kernel::alloc::allocator::VmallocPageIter;
+/// use kernel::page::{AsPageIter, PAGE_SIZE};
+///
+/// let mut vec = VVec::<u8>::new();
+///
+/// assert!(vec.page_iter().next().is_none());
+///
+/// vec.reserve(PAGE_SIZE, GFP_KERNEL)?;
+///
+/// let page = vec.page_iter().next().expect("At least one page should be available.\n");
+///
+/// // SAFETY: There is no concurrent read or write to the same page.
+/// unsafe { page.fill_zero_raw(0, PAGE_SIZE)? };
+/// # Ok::<(), Error>(())
+/// ```
+impl<T> AsPageIter for VVec<T> {
+ type Iter<'a>
+ = VmallocPageIter<'a>
+ where
+ T: 'a;
+
+ fn page_iter(&mut self) -> Self::Iter<'_> {
+ let ptr = self.ptr.cast();
+ let size = self.layout.size();
+
+ // SAFETY:
+ // - `ptr` is a valid pointer to the beginning of a `Vmalloc` allocation.
+ // - `ptr` is guaranteed to be valid for the lifetime of `'a`.
+ // - `size` is the size of the `Vmalloc` allocation `ptr` points to.
+ unsafe { VmallocPageIter::new(ptr, size) }
+ }
+}
+
/// An [`Iterator`] implementation for [`Vec`] that moves elements out of a vector.
///
/// This structure is created by the [`Vec::into_iter`] method on [`Vec`] (provided by the
@@ -1058,7 +1162,13 @@ where
// the type invariant to be smaller than `cap`. Depending on `realloc` this operation
// may shrink the buffer or leave it as it is.
ptr = match unsafe {
- A::realloc(Some(buf.cast()), layout.into(), old_layout.into(), flags)
+ A::realloc(
+ Some(buf.cast()),
+ layout.into(),
+ old_layout.into(),
+ flags,
+ NumaNode::NO_NODE,
+ )
} {
// If we fail to shrink, which likely can't even happen, continue with the existing
// buffer.
@@ -1241,7 +1351,7 @@ impl<'vec, T> Drop for DrainAll<'vec, T> {
}
}
-#[macros::kunit_tests(rust_kvec_kunit)]
+#[macros::kunit_tests(rust_kvec)]
mod tests {
use super::*;
use crate::prelude::*;
diff --git a/rust/kernel/alloc/kvec/errors.rs b/rust/kernel/alloc/kvec/errors.rs
index 348b8d27e102..21a920a4b09b 100644
--- a/rust/kernel/alloc/kvec/errors.rs
+++ b/rust/kernel/alloc/kvec/errors.rs
@@ -2,7 +2,7 @@
//! Errors for the [`Vec`] type.
-use core::fmt::{self, Debug, Formatter};
+use kernel::fmt::{self, Debug, Formatter};
use kernel::prelude::*;
/// Error type for [`Vec::push_within_capacity`].
diff --git a/rust/kernel/alloc/layout.rs b/rust/kernel/alloc/layout.rs
index 93ed514f7cc7..9f8be72feb7a 100644
--- a/rust/kernel/alloc/layout.rs
+++ b/rust/kernel/alloc/layout.rs
@@ -80,7 +80,7 @@ impl<T> ArrayLayout<T> {
/// # Safety
///
/// `len` must be a value, for which `len * size_of::<T>() <= isize::MAX` is true.
- pub unsafe fn new_unchecked(len: usize) -> Self {
+ pub const unsafe fn new_unchecked(len: usize) -> Self {
// INVARIANT: By the safety requirements of this function
// `len * size_of::<T>() <= isize::MAX`.
Self {
@@ -98,6 +98,11 @@ impl<T> ArrayLayout<T> {
pub const fn is_empty(&self) -> bool {
self.len == 0
}
+
+ /// Returns the size of the [`ArrayLayout`] in bytes.
+ pub const fn size(&self) -> usize {
+ self.len() * core::mem::size_of::<T>()
+ }
}
impl<T> From<ArrayLayout<T>> for Layout {
diff --git a/rust/kernel/auxiliary.rs b/rust/kernel/auxiliary.rs
index d2cfe1eeefb6..e11848bbf206 100644
--- a/rust/kernel/auxiliary.rs
+++ b/rust/kernel/auxiliary.rs
@@ -6,12 +6,11 @@
use crate::{
bindings, container_of, device,
- device_id::RawDeviceId,
+ device_id::{RawDeviceId, RawDeviceIdIndex},
driver,
- error::{to_result, Result},
+ error::{from_result, to_result, Result},
prelude::*,
- str::CStr,
- types::{ForeignOwnable, Opaque},
+ types::Opaque,
ThisModule,
};
use core::{
@@ -56,42 +55,37 @@ impl<T: Driver + 'static> Adapter<T> {
extern "C" fn probe_callback(
adev: *mut bindings::auxiliary_device,
id: *const bindings::auxiliary_device_id,
- ) -> kernel::ffi::c_int {
+ ) -> c_int {
// SAFETY: The auxiliary bus only ever calls the probe callback with a valid pointer to a
// `struct auxiliary_device`.
//
// INVARIANT: `adev` is valid for the duration of `probe_callback()`.
- let adev = unsafe { &*adev.cast::<Device<device::Core>>() };
+ let adev = unsafe { &*adev.cast::<Device<device::CoreInternal>>() };
// SAFETY: `DeviceId` is a `#[repr(transparent)`] wrapper of `struct auxiliary_device_id`
// and does not add additional invariants, so it's safe to transmute.
let id = unsafe { &*id.cast::<DeviceId>() };
let info = T::ID_TABLE.info(id.index());
- match T::probe(adev, info) {
- Ok(data) => {
- // Let the `struct auxiliary_device` own a reference of the driver's private data.
- // SAFETY: By the type invariant `adev.as_raw` returns a valid pointer to a
- // `struct auxiliary_device`.
- unsafe {
- bindings::auxiliary_set_drvdata(adev.as_raw(), data.into_foreign().cast())
- };
- }
- Err(err) => return Error::to_errno(err),
- }
+ from_result(|| {
+ let data = T::probe(adev, info)?;
- 0
+ adev.as_ref().set_drvdata(data);
+ Ok(0)
+ })
}
extern "C" fn remove_callback(adev: *mut bindings::auxiliary_device) {
- // SAFETY: The auxiliary bus only ever calls the remove callback with a valid pointer to a
+ // SAFETY: The auxiliary bus only ever calls the probe callback with a valid pointer to a
// `struct auxiliary_device`.
- let ptr = unsafe { bindings::auxiliary_get_drvdata(adev) };
+ //
+ // INVARIANT: `adev` is valid for the duration of `probe_callback()`.
+ let adev = unsafe { &*adev.cast::<Device<device::CoreInternal>>() };
// SAFETY: `remove_callback` is only ever called after a successful call to
- // `probe_callback`, hence it's guaranteed that `ptr` points to a valid and initialized
- // `KBox<T>` pointer created through `KBox::into_foreign`.
- drop(unsafe { KBox::<T>::from_foreign(ptr.cast()) });
+ // `probe_callback`, hence it's guaranteed that `Device::set_drvdata()` has been called
+ // and stored a `Pin<KBox<T>>`.
+ drop(unsafe { adev.as_ref().drvdata_obtain::<Pin<KBox<T>>>() });
}
}
@@ -111,8 +105,8 @@ pub struct DeviceId(bindings::auxiliary_device_id);
impl DeviceId {
/// Create a new [`DeviceId`] from name.
pub const fn new(modname: &'static CStr, name: &'static CStr) -> Self {
- let name = name.as_bytes_with_nul();
- let modname = modname.as_bytes_with_nul();
+ let name = name.to_bytes_with_nul();
+ let modname = modname.to_bytes_with_nul();
// TODO: Replace with `bindings::auxiliary_device_id::default()` once stabilized for
// `const`.
@@ -140,13 +134,14 @@ impl DeviceId {
}
}
-// SAFETY:
-// * `DeviceId` is a `#[repr(transparent)`] wrapper of `auxiliary_device_id` and does not add
-// additional invariants, so it's safe to transmute to `RawType`.
-// * `DRIVER_DATA_OFFSET` is the offset to the `driver_data` field.
+// SAFETY: `DeviceId` is a `#[repr(transparent)]` wrapper of `auxiliary_device_id` and does not add
+// additional invariants, so it's safe to transmute to `RawType`.
unsafe impl RawDeviceId for DeviceId {
type RawType = bindings::auxiliary_device_id;
+}
+// SAFETY: `DRIVER_DATA_OFFSET` is the offset to the `driver_data` field.
+unsafe impl RawDeviceIdIndex for DeviceId {
const DRIVER_DATA_OFFSET: usize =
core::mem::offset_of!(bindings::auxiliary_device_id, driver_data);
@@ -250,7 +245,7 @@ kernel::impl_device_context_deref!(unsafe { Device });
kernel::impl_device_context_into_aref!(Device);
// SAFETY: Instances of `Device` are always reference-counted.
-unsafe impl crate::types::AlwaysRefCounted for Device {
+unsafe impl crate::sync::aref::AlwaysRefCounted for Device {
fn inc_ref(&self) {
// SAFETY: The existence of a shared reference guarantees that the refcount is non-zero.
unsafe { bindings::get_device(self.as_ref().as_raw()) };
@@ -276,7 +271,7 @@ impl<Ctx: device::DeviceContext> AsRef<device::Device<Ctx>> for Device<Ctx> {
let dev = unsafe { addr_of_mut!((*self.as_raw()).dev) };
// SAFETY: `dev` points to a valid `struct device`.
- unsafe { device::Device::as_ref(dev) }
+ unsafe { device::Device::from_raw(dev) }
}
}
diff --git a/rust/kernel/bitmap.rs b/rust/kernel/bitmap.rs
new file mode 100644
index 000000000000..f45915694454
--- /dev/null
+++ b/rust/kernel/bitmap.rs
@@ -0,0 +1,600 @@
+// SPDX-License-Identifier: GPL-2.0
+
+// Copyright (C) 2025 Google LLC.
+
+//! Rust API for bitmap.
+//!
+//! C headers: [`include/linux/bitmap.h`](srctree/include/linux/bitmap.h).
+
+use crate::alloc::{AllocError, Flags};
+use crate::bindings;
+#[cfg(not(CONFIG_RUST_BITMAP_HARDENED))]
+use crate::pr_err;
+use core::ptr::NonNull;
+
+const BITS_PER_LONG: usize = bindings::BITS_PER_LONG as usize;
+
+/// Represents a C bitmap. Wraps underlying C bitmap API.
+///
+/// # Invariants
+///
+/// Must reference a `[c_ulong]` long enough to fit `data.len()` bits.
+#[cfg_attr(CONFIG_64BIT, repr(align(8)))]
+#[cfg_attr(not(CONFIG_64BIT), repr(align(4)))]
+pub struct Bitmap {
+ data: [()],
+}
+
+impl Bitmap {
+ /// Borrows a C bitmap.
+ ///
+ /// # Safety
+ ///
+ /// * `ptr` holds a non-null address of an initialized array of `unsigned long`
+ /// that is large enough to hold `nbits` bits.
+ /// * the array must not be freed for the lifetime of this [`Bitmap`]
+ /// * concurrent access only happens through atomic operations
+ pub unsafe fn from_raw<'a>(ptr: *const usize, nbits: usize) -> &'a Bitmap {
+ let data: *const [()] = core::ptr::slice_from_raw_parts(ptr.cast(), nbits);
+ // INVARIANT: `data` references an initialized array that can hold `nbits` bits.
+ // SAFETY:
+ // The caller guarantees that `data` (derived from `ptr` and `nbits`)
+ // points to a valid, initialized, and appropriately sized memory region
+ // that will not be freed for the lifetime 'a.
+ // We are casting `*const [()]` to `*const Bitmap`. The `Bitmap`
+ // struct is a ZST with a `data: [()]` field. This means its layout
+ // is compatible with a slice of `()`, and effectively it's a "thin pointer"
+ // (its size is 0 and alignment is 1). The `slice_from_raw_parts`
+ // function correctly encodes the length (number of bits, not elements)
+ // into the metadata of the fat pointer. Therefore, dereferencing this
+ // pointer as `&Bitmap` is safe given the caller's guarantees.
+ unsafe { &*(data as *const Bitmap) }
+ }
+
+ /// Borrows a C bitmap exclusively.
+ ///
+ /// # Safety
+ ///
+ /// * `ptr` holds a non-null address of an initialized array of `unsigned long`
+ /// that is large enough to hold `nbits` bits.
+ /// * the array must not be freed for the lifetime of this [`Bitmap`]
+ /// * no concurrent access may happen.
+ pub unsafe fn from_raw_mut<'a>(ptr: *mut usize, nbits: usize) -> &'a mut Bitmap {
+ let data: *mut [()] = core::ptr::slice_from_raw_parts_mut(ptr.cast(), nbits);
+ // INVARIANT: `data` references an initialized array that can hold `nbits` bits.
+ // SAFETY:
+ // The caller guarantees that `data` (derived from `ptr` and `nbits`)
+ // points to a valid, initialized, and appropriately sized memory region
+ // that will not be freed for the lifetime 'a.
+ // Furthermore, the caller guarantees no concurrent access will happen,
+ // which upholds the exclusivity requirement for a mutable reference.
+ // Similar to `from_raw`, casting `*mut [()]` to `*mut Bitmap` is
+ // safe because `Bitmap` is a ZST with a `data: [()]` field,
+ // making its layout compatible with a slice of `()`.
+ unsafe { &mut *(data as *mut Bitmap) }
+ }
+
+ /// Returns a raw pointer to the backing [`Bitmap`].
+ pub fn as_ptr(&self) -> *const usize {
+ core::ptr::from_ref::<Bitmap>(self).cast::<usize>()
+ }
+
+ /// Returns a mutable raw pointer to the backing [`Bitmap`].
+ pub fn as_mut_ptr(&mut self) -> *mut usize {
+ core::ptr::from_mut::<Bitmap>(self).cast::<usize>()
+ }
+
+ /// Returns length of this [`Bitmap`].
+ #[expect(clippy::len_without_is_empty)]
+ pub fn len(&self) -> usize {
+ self.data.len()
+ }
+}
+
+/// Holds either a pointer to array of `unsigned long` or a small bitmap.
+#[repr(C)]
+union BitmapRepr {
+ bitmap: usize,
+ ptr: NonNull<usize>,
+}
+
+macro_rules! bitmap_assert {
+ ($cond:expr, $($arg:tt)+) => {
+ #[cfg(CONFIG_RUST_BITMAP_HARDENED)]
+ assert!($cond, $($arg)*);
+ }
+}
+
+macro_rules! bitmap_assert_return {
+ ($cond:expr, $($arg:tt)+) => {
+ #[cfg(CONFIG_RUST_BITMAP_HARDENED)]
+ assert!($cond, $($arg)*);
+
+ #[cfg(not(CONFIG_RUST_BITMAP_HARDENED))]
+ if !($cond) {
+ pr_err!($($arg)*);
+ return
+ }
+ }
+}
+
+/// Represents an owned bitmap.
+///
+/// Wraps underlying C bitmap API. See [`Bitmap`] for available
+/// methods.
+///
+/// # Examples
+///
+/// Basic usage
+///
+/// ```
+/// use kernel::alloc::flags::GFP_KERNEL;
+/// use kernel::bitmap::BitmapVec;
+///
+/// let mut b = BitmapVec::new(16, GFP_KERNEL)?;
+///
+/// assert_eq!(16, b.len());
+/// for i in 0..16 {
+/// if i % 4 == 0 {
+/// b.set_bit(i);
+/// }
+/// }
+/// assert_eq!(Some(0), b.next_bit(0));
+/// assert_eq!(Some(1), b.next_zero_bit(0));
+/// assert_eq!(Some(4), b.next_bit(1));
+/// assert_eq!(Some(5), b.next_zero_bit(4));
+/// assert_eq!(Some(12), b.last_bit());
+/// # Ok::<(), Error>(())
+/// ```
+///
+/// # Invariants
+///
+/// * `nbits` is `<= i32::MAX` and never changes.
+/// * if `nbits <= bindings::BITS_PER_LONG`, then `repr` is a `usize`.
+/// * otherwise, `repr` holds a non-null pointer to an initialized
+/// array of `unsigned long` that is large enough to hold `nbits` bits.
+pub struct BitmapVec {
+ /// Representation of bitmap.
+ repr: BitmapRepr,
+ /// Length of this bitmap. Must be `<= i32::MAX`.
+ nbits: usize,
+}
+
+impl core::ops::Deref for BitmapVec {
+ type Target = Bitmap;
+
+ fn deref(&self) -> &Bitmap {
+ let ptr = if self.nbits <= BITS_PER_LONG {
+ // SAFETY: Bitmap is represented inline.
+ unsafe { core::ptr::addr_of!(self.repr.bitmap) }
+ } else {
+ // SAFETY: Bitmap is represented as array of `unsigned long`.
+ unsafe { self.repr.ptr.as_ptr() }
+ };
+
+ // SAFETY: We got the right pointer and invariants of [`Bitmap`] hold.
+ // An inline bitmap is treated like an array with single element.
+ unsafe { Bitmap::from_raw(ptr, self.nbits) }
+ }
+}
+
+impl core::ops::DerefMut for BitmapVec {
+ fn deref_mut(&mut self) -> &mut Bitmap {
+ let ptr = if self.nbits <= BITS_PER_LONG {
+ // SAFETY: Bitmap is represented inline.
+ unsafe { core::ptr::addr_of_mut!(self.repr.bitmap) }
+ } else {
+ // SAFETY: Bitmap is represented as array of `unsigned long`.
+ unsafe { self.repr.ptr.as_ptr() }
+ };
+
+ // SAFETY: We got the right pointer and invariants of [`BitmapVec`] hold.
+ // An inline bitmap is treated like an array with single element.
+ unsafe { Bitmap::from_raw_mut(ptr, self.nbits) }
+ }
+}
+
+/// Enable ownership transfer to other threads.
+///
+/// SAFETY: We own the underlying bitmap representation.
+unsafe impl Send for BitmapVec {}
+
+/// Enable unsynchronized concurrent access to [`BitmapVec`] through shared references.
+///
+/// SAFETY: `deref()` will return a reference to a [`Bitmap`]. Its methods
+/// take immutable references are either atomic or read-only.
+unsafe impl Sync for BitmapVec {}
+
+impl Drop for BitmapVec {
+ fn drop(&mut self) {
+ if self.nbits <= BITS_PER_LONG {
+ return;
+ }
+ // SAFETY: `self.ptr` was returned by the C `bitmap_zalloc`.
+ //
+ // INVARIANT: there is no other use of the `self.ptr` after this
+ // call and the value is being dropped so the broken invariant is
+ // not observable on function exit.
+ unsafe { bindings::bitmap_free(self.repr.ptr.as_ptr()) };
+ }
+}
+
+impl BitmapVec {
+ /// Constructs a new [`BitmapVec`].
+ ///
+ /// Fails with [`AllocError`] when the [`BitmapVec`] could not be allocated. This
+ /// includes the case when `nbits` is greater than `i32::MAX`.
+ #[inline]
+ pub fn new(nbits: usize, flags: Flags) -> Result<Self, AllocError> {
+ if nbits <= BITS_PER_LONG {
+ return Ok(BitmapVec {
+ repr: BitmapRepr { bitmap: 0 },
+ nbits,
+ });
+ }
+ if nbits > i32::MAX.try_into().unwrap() {
+ return Err(AllocError);
+ }
+ let nbits_u32 = u32::try_from(nbits).unwrap();
+ // SAFETY: `BITS_PER_LONG < nbits` and `nbits <= i32::MAX`.
+ let ptr = unsafe { bindings::bitmap_zalloc(nbits_u32, flags.as_raw()) };
+ let ptr = NonNull::new(ptr).ok_or(AllocError)?;
+ // INVARIANT: `ptr` returned by C `bitmap_zalloc` and `nbits` checked.
+ Ok(BitmapVec {
+ repr: BitmapRepr { ptr },
+ nbits,
+ })
+ }
+
+ /// Returns length of this [`Bitmap`].
+ #[allow(clippy::len_without_is_empty)]
+ #[inline]
+ pub fn len(&self) -> usize {
+ self.nbits
+ }
+
+ /// Fills this `Bitmap` with random bits.
+ #[cfg(CONFIG_FIND_BIT_BENCHMARK_RUST)]
+ pub fn fill_random(&mut self) {
+ // SAFETY: `self.as_mut_ptr` points to either an array of the
+ // appropriate length or one usize.
+ unsafe {
+ bindings::get_random_bytes(
+ self.as_mut_ptr().cast::<ffi::c_void>(),
+ usize::div_ceil(self.nbits, bindings::BITS_PER_LONG as usize)
+ * bindings::BITS_PER_LONG as usize
+ / 8,
+ );
+ }
+ }
+}
+
+impl Bitmap {
+ /// Set bit with index `index`.
+ ///
+ /// ATTENTION: `set_bit` is non-atomic, which differs from the naming
+ /// convention in C code. The corresponding C function is `__set_bit`.
+ ///
+ /// If CONFIG_RUST_BITMAP_HARDENED is not enabled and `index` is greater than
+ /// or equal to `self.nbits`, does nothing.
+ ///
+ /// # Panics
+ ///
+ /// Panics if CONFIG_RUST_BITMAP_HARDENED is enabled and `index` is greater than
+ /// or equal to `self.nbits`.
+ #[inline]
+ pub fn set_bit(&mut self, index: usize) {
+ bitmap_assert_return!(
+ index < self.len(),
+ "Bit `index` must be < {}, was {}",
+ self.len(),
+ index
+ );
+ // SAFETY: Bit `index` is within bounds.
+ unsafe { bindings::__set_bit(index, self.as_mut_ptr()) };
+ }
+
+ /// Set bit with index `index`, atomically.
+ ///
+ /// This is a relaxed atomic operation (no implied memory barriers).
+ ///
+ /// ATTENTION: The naming convention differs from C, where the corresponding
+ /// function is called `set_bit`.
+ ///
+ /// If CONFIG_RUST_BITMAP_HARDENED is not enabled and `index` is greater than
+ /// or equal to `self.len()`, does nothing.
+ ///
+ /// # Panics
+ ///
+ /// Panics if CONFIG_RUST_BITMAP_HARDENED is enabled and `index` is greater than
+ /// or equal to `self.len()`.
+ #[inline]
+ pub fn set_bit_atomic(&self, index: usize) {
+ bitmap_assert_return!(
+ index < self.len(),
+ "Bit `index` must be < {}, was {}",
+ self.len(),
+ index
+ );
+ // SAFETY: `index` is within bounds and the caller has ensured that
+ // there is no mix of non-atomic and atomic operations.
+ unsafe { bindings::set_bit(index, self.as_ptr().cast_mut()) };
+ }
+
+ /// Clear `index` bit.
+ ///
+ /// ATTENTION: `clear_bit` is non-atomic, which differs from the naming
+ /// convention in C code. The corresponding C function is `__clear_bit`.
+ ///
+ /// If CONFIG_RUST_BITMAP_HARDENED is not enabled and `index` is greater than
+ /// or equal to `self.len()`, does nothing.
+ ///
+ /// # Panics
+ ///
+ /// Panics if CONFIG_RUST_BITMAP_HARDENED is enabled and `index` is greater than
+ /// or equal to `self.len()`.
+ #[inline]
+ pub fn clear_bit(&mut self, index: usize) {
+ bitmap_assert_return!(
+ index < self.len(),
+ "Bit `index` must be < {}, was {}",
+ self.len(),
+ index
+ );
+ // SAFETY: `index` is within bounds.
+ unsafe { bindings::__clear_bit(index, self.as_mut_ptr()) };
+ }
+
+ /// Clear `index` bit, atomically.
+ ///
+ /// This is a relaxed atomic operation (no implied memory barriers).
+ ///
+ /// ATTENTION: The naming convention differs from C, where the corresponding
+ /// function is called `clear_bit`.
+ ///
+ /// If CONFIG_RUST_BITMAP_HARDENED is not enabled and `index` is greater than
+ /// or equal to `self.len()`, does nothing.
+ ///
+ /// # Panics
+ ///
+ /// Panics if CONFIG_RUST_BITMAP_HARDENED is enabled and `index` is greater than
+ /// or equal to `self.len()`.
+ #[inline]
+ pub fn clear_bit_atomic(&self, index: usize) {
+ bitmap_assert_return!(
+ index < self.len(),
+ "Bit `index` must be < {}, was {}",
+ self.len(),
+ index
+ );
+ // SAFETY: `index` is within bounds and the caller has ensured that
+ // there is no mix of non-atomic and atomic operations.
+ unsafe { bindings::clear_bit(index, self.as_ptr().cast_mut()) };
+ }
+
+ /// Copy `src` into this [`Bitmap`] and set any remaining bits to zero.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use kernel::alloc::{AllocError, flags::GFP_KERNEL};
+ /// use kernel::bitmap::BitmapVec;
+ ///
+ /// let mut long_bitmap = BitmapVec::new(256, GFP_KERNEL)?;
+ ///
+ /// assert_eq!(None, long_bitmap.last_bit());
+ ///
+ /// let mut short_bitmap = BitmapVec::new(16, GFP_KERNEL)?;
+ ///
+ /// short_bitmap.set_bit(7);
+ /// long_bitmap.copy_and_extend(&short_bitmap);
+ /// assert_eq!(Some(7), long_bitmap.last_bit());
+ ///
+ /// # Ok::<(), AllocError>(())
+ /// ```
+ #[inline]
+ pub fn copy_and_extend(&mut self, src: &Bitmap) {
+ let len = core::cmp::min(src.len(), self.len());
+ // SAFETY: access to `self` and `src` is within bounds.
+ unsafe {
+ bindings::bitmap_copy_and_extend(
+ self.as_mut_ptr(),
+ src.as_ptr(),
+ len as u32,
+ self.len() as u32,
+ )
+ };
+ }
+
+ /// Finds last set bit.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use kernel::alloc::{AllocError, flags::GFP_KERNEL};
+ /// use kernel::bitmap::BitmapVec;
+ ///
+ /// let bitmap = BitmapVec::new(64, GFP_KERNEL)?;
+ ///
+ /// match bitmap.last_bit() {
+ /// Some(idx) => {
+ /// pr_info!("The last bit has index {idx}.\n");
+ /// }
+ /// None => {
+ /// pr_info!("All bits in this bitmap are 0.\n");
+ /// }
+ /// }
+ /// # Ok::<(), AllocError>(())
+ /// ```
+ #[inline]
+ pub fn last_bit(&self) -> Option<usize> {
+ // SAFETY: `_find_next_bit` access is within bounds due to invariant.
+ let index = unsafe { bindings::_find_last_bit(self.as_ptr(), self.len()) };
+ if index >= self.len() {
+ None
+ } else {
+ Some(index)
+ }
+ }
+
+ /// Finds next set bit, starting from `start`.
+ ///
+ /// Returns `None` if `start` is greater or equal to `self.nbits`.
+ #[inline]
+ pub fn next_bit(&self, start: usize) -> Option<usize> {
+ bitmap_assert!(
+ start < self.len(),
+ "`start` must be < {} was {}",
+ self.len(),
+ start
+ );
+ // SAFETY: `_find_next_bit` tolerates out-of-bounds arguments and returns a
+ // value larger than or equal to `self.len()` in that case.
+ let index = unsafe { bindings::_find_next_bit(self.as_ptr(), self.len(), start) };
+ if index >= self.len() {
+ None
+ } else {
+ Some(index)
+ }
+ }
+
+ /// Finds next zero bit, starting from `start`.
+ /// Returns `None` if `start` is greater than or equal to `self.len()`.
+ #[inline]
+ pub fn next_zero_bit(&self, start: usize) -> Option<usize> {
+ bitmap_assert!(
+ start < self.len(),
+ "`start` must be < {} was {}",
+ self.len(),
+ start
+ );
+ // SAFETY: `_find_next_zero_bit` tolerates out-of-bounds arguments and returns a
+ // value larger than or equal to `self.len()` in that case.
+ let index = unsafe { bindings::_find_next_zero_bit(self.as_ptr(), self.len(), start) };
+ if index >= self.len() {
+ None
+ } else {
+ Some(index)
+ }
+ }
+}
+
+use macros::kunit_tests;
+
+#[kunit_tests(rust_kernel_bitmap)]
+mod tests {
+ use super::*;
+ use kernel::alloc::flags::GFP_KERNEL;
+
+ #[test]
+ fn bitmap_borrow() {
+ let fake_bitmap: [usize; 2] = [0, 0];
+ // SAFETY: `fake_c_bitmap` is an array of expected length.
+ let b = unsafe { Bitmap::from_raw(fake_bitmap.as_ptr(), 2 * BITS_PER_LONG) };
+ assert_eq!(2 * BITS_PER_LONG, b.len());
+ assert_eq!(None, b.next_bit(0));
+ }
+
+ #[test]
+ fn bitmap_copy() {
+ let fake_bitmap: usize = 0xFF;
+ // SAFETY: `fake_c_bitmap` can be used as one-element array of expected length.
+ let b = unsafe { Bitmap::from_raw(core::ptr::addr_of!(fake_bitmap), 8) };
+ assert_eq!(8, b.len());
+ assert_eq!(None, b.next_zero_bit(0));
+ }
+
+ #[test]
+ fn bitmap_vec_new() -> Result<(), AllocError> {
+ let b = BitmapVec::new(0, GFP_KERNEL)?;
+ assert_eq!(0, b.len());
+
+ let b = BitmapVec::new(3, GFP_KERNEL)?;
+ assert_eq!(3, b.len());
+
+ let b = BitmapVec::new(1024, GFP_KERNEL)?;
+ assert_eq!(1024, b.len());
+
+ // Requesting too large values results in [`AllocError`].
+ let res = BitmapVec::new(1 << 31, GFP_KERNEL);
+ assert!(res.is_err());
+ Ok(())
+ }
+
+ #[test]
+ fn bitmap_set_clear_find() -> Result<(), AllocError> {
+ let mut b = BitmapVec::new(128, GFP_KERNEL)?;
+
+ // Zero-initialized
+ assert_eq!(None, b.next_bit(0));
+ assert_eq!(Some(0), b.next_zero_bit(0));
+ assert_eq!(None, b.last_bit());
+
+ b.set_bit(17);
+
+ assert_eq!(Some(17), b.next_bit(0));
+ assert_eq!(Some(17), b.next_bit(17));
+ assert_eq!(None, b.next_bit(18));
+ assert_eq!(Some(17), b.last_bit());
+
+ b.set_bit(107);
+
+ assert_eq!(Some(17), b.next_bit(0));
+ assert_eq!(Some(17), b.next_bit(17));
+ assert_eq!(Some(107), b.next_bit(18));
+ assert_eq!(Some(107), b.last_bit());
+
+ b.clear_bit(17);
+
+ assert_eq!(Some(107), b.next_bit(0));
+ assert_eq!(Some(107), b.last_bit());
+ Ok(())
+ }
+
+ #[test]
+ fn owned_bitmap_out_of_bounds() -> Result<(), AllocError> {
+ // TODO: Kunit #[test]s do not support `cfg` yet,
+ // so we add it here in the body.
+ #[cfg(not(CONFIG_RUST_BITMAP_HARDENED))]
+ {
+ let mut b = BitmapVec::new(128, GFP_KERNEL)?;
+ b.set_bit(2048);
+ b.set_bit_atomic(2048);
+ b.clear_bit(2048);
+ b.clear_bit_atomic(2048);
+ assert_eq!(None, b.next_bit(2048));
+ assert_eq!(None, b.next_zero_bit(2048));
+ assert_eq!(None, b.last_bit());
+ }
+ Ok(())
+ }
+
+ // TODO: uncomment once kunit supports [should_panic] and `cfg`.
+ // #[cfg(CONFIG_RUST_BITMAP_HARDENED)]
+ // #[test]
+ // #[should_panic]
+ // fn owned_bitmap_out_of_bounds() -> Result<(), AllocError> {
+ // let mut b = BitmapVec::new(128, GFP_KERNEL)?;
+ //
+ // b.set_bit(2048);
+ // }
+
+ #[test]
+ fn bitmap_copy_and_extend() -> Result<(), AllocError> {
+ let mut long_bitmap = BitmapVec::new(256, GFP_KERNEL)?;
+
+ long_bitmap.set_bit(3);
+ long_bitmap.set_bit(200);
+
+ let mut short_bitmap = BitmapVec::new(32, GFP_KERNEL)?;
+
+ short_bitmap.set_bit(17);
+
+ long_bitmap.copy_and_extend(&short_bitmap);
+
+ // Previous bits have been cleared.
+ assert_eq!(Some(17), long_bitmap.next_bit(0));
+ assert_eq!(Some(17), long_bitmap.last_bit());
+ Ok(())
+ }
+}
diff --git a/rust/kernel/bits.rs b/rust/kernel/bits.rs
new file mode 100644
index 000000000000..553d50265883
--- /dev/null
+++ b/rust/kernel/bits.rs
@@ -0,0 +1,203 @@
+// SPDX-License-Identifier: GPL-2.0
+
+//! Bit manipulation macros.
+//!
+//! C header: [`include/linux/bits.h`](srctree/include/linux/bits.h)
+
+use crate::prelude::*;
+use core::ops::RangeInclusive;
+use macros::paste;
+
+macro_rules! impl_bit_fn {
+ (
+ $ty:ty
+ ) => {
+ paste! {
+ /// Computes `1 << n` if `n` is in bounds, i.e.: if `n` is smaller than
+ /// the maximum number of bits supported by the type.
+ ///
+ /// Returns [`None`] otherwise.
+ #[inline]
+ pub fn [<checked_bit_ $ty>](n: u32) -> Option<$ty> {
+ (1 as $ty).checked_shl(n)
+ }
+
+ /// Computes `1 << n` by performing a compile-time assertion that `n` is
+ /// in bounds.
+ ///
+ /// This version is the default and should be used if `n` is known at
+ /// compile time.
+ #[inline]
+ pub const fn [<bit_ $ty>](n: u32) -> $ty {
+ build_assert!(n < <$ty>::BITS);
+ (1 as $ty) << n
+ }
+ }
+ };
+}
+
+impl_bit_fn!(u64);
+impl_bit_fn!(u32);
+impl_bit_fn!(u16);
+impl_bit_fn!(u8);
+
+macro_rules! impl_genmask_fn {
+ (
+ $ty:ty,
+ $(#[$genmask_checked_ex:meta])*,
+ $(#[$genmask_ex:meta])*
+ ) => {
+ paste! {
+ /// Creates a contiguous bitmask for the given range by validating
+ /// the range at runtime.
+ ///
+ /// Returns [`None`] if the range is invalid, i.e.: if the start is
+ /// greater than the end or if the range is outside of the
+ /// representable range for the type.
+ $(#[$genmask_checked_ex])*
+ #[inline]
+ pub fn [<genmask_checked_ $ty>](range: RangeInclusive<u32>) -> Option<$ty> {
+ let start = *range.start();
+ let end = *range.end();
+
+ if start > end {
+ return None;
+ }
+
+ let high = [<checked_bit_ $ty>](end)?;
+ let low = [<checked_bit_ $ty>](start)?;
+ Some((high | (high - 1)) & !(low - 1))
+ }
+
+ /// Creates a compile-time contiguous bitmask for the given range by
+ /// performing a compile-time assertion that the range is valid.
+ ///
+ /// This version is the default and should be used if the range is known
+ /// at compile time.
+ $(#[$genmask_ex])*
+ #[inline]
+ pub const fn [<genmask_ $ty>](range: RangeInclusive<u32>) -> $ty {
+ let start = *range.start();
+ let end = *range.end();
+
+ build_assert!(start <= end);
+
+ let high = [<bit_ $ty>](end);
+ let low = [<bit_ $ty>](start);
+ (high | (high - 1)) & !(low - 1)
+ }
+ }
+ };
+}
+
+impl_genmask_fn!(
+ u64,
+ /// # Examples
+ ///
+ /// ```
+ /// # #![expect(clippy::reversed_empty_ranges)]
+ /// # use kernel::bits::genmask_checked_u64;
+ /// assert_eq!(genmask_checked_u64(0..=0), Some(0b1));
+ /// assert_eq!(genmask_checked_u64(0..=63), Some(u64::MAX));
+ /// assert_eq!(genmask_checked_u64(21..=39), Some(0x0000_00ff_ffe0_0000));
+ ///
+ /// // `80` is out of the supported bit range.
+ /// assert_eq!(genmask_checked_u64(21..=80), None);
+ ///
+ /// // Invalid range where the start is bigger than the end.
+ /// assert_eq!(genmask_checked_u64(15..=8), None);
+ /// ```
+ ,
+ /// # Examples
+ ///
+ /// ```
+ /// # use kernel::bits::genmask_u64;
+ /// assert_eq!(genmask_u64(21..=39), 0x0000_00ff_ffe0_0000);
+ /// assert_eq!(genmask_u64(0..=0), 0b1);
+ /// assert_eq!(genmask_u64(0..=63), u64::MAX);
+ /// ```
+);
+
+impl_genmask_fn!(
+ u32,
+ /// # Examples
+ ///
+ /// ```
+ /// # #![expect(clippy::reversed_empty_ranges)]
+ /// # use kernel::bits::genmask_checked_u32;
+ /// assert_eq!(genmask_checked_u32(0..=0), Some(0b1));
+ /// assert_eq!(genmask_checked_u32(0..=31), Some(u32::MAX));
+ /// assert_eq!(genmask_checked_u32(21..=31), Some(0xffe0_0000));
+ ///
+ /// // `40` is out of the supported bit range.
+ /// assert_eq!(genmask_checked_u32(21..=40), None);
+ ///
+ /// // Invalid range where the start is bigger than the end.
+ /// assert_eq!(genmask_checked_u32(15..=8), None);
+ /// ```
+ ,
+ /// # Examples
+ ///
+ /// ```
+ /// # use kernel::bits::genmask_u32;
+ /// assert_eq!(genmask_u32(21..=31), 0xffe0_0000);
+ /// assert_eq!(genmask_u32(0..=0), 0b1);
+ /// assert_eq!(genmask_u32(0..=31), u32::MAX);
+ /// ```
+);
+
+impl_genmask_fn!(
+ u16,
+ /// # Examples
+ ///
+ /// ```
+ /// # #![expect(clippy::reversed_empty_ranges)]
+ /// # use kernel::bits::genmask_checked_u16;
+ /// assert_eq!(genmask_checked_u16(0..=0), Some(0b1));
+ /// assert_eq!(genmask_checked_u16(0..=15), Some(u16::MAX));
+ /// assert_eq!(genmask_checked_u16(6..=15), Some(0xffc0));
+ ///
+ /// // `20` is out of the supported bit range.
+ /// assert_eq!(genmask_checked_u16(6..=20), None);
+ ///
+ /// // Invalid range where the start is bigger than the end.
+ /// assert_eq!(genmask_checked_u16(10..=5), None);
+ /// ```
+ ,
+ /// # Examples
+ ///
+ /// ```
+ /// # use kernel::bits::genmask_u16;
+ /// assert_eq!(genmask_u16(6..=15), 0xffc0);
+ /// assert_eq!(genmask_u16(0..=0), 0b1);
+ /// assert_eq!(genmask_u16(0..=15), u16::MAX);
+ /// ```
+);
+
+impl_genmask_fn!(
+ u8,
+ /// # Examples
+ ///
+ /// ```
+ /// # #![expect(clippy::reversed_empty_ranges)]
+ /// # use kernel::bits::genmask_checked_u8;
+ /// assert_eq!(genmask_checked_u8(0..=0), Some(0b1));
+ /// assert_eq!(genmask_checked_u8(0..=7), Some(u8::MAX));
+ /// assert_eq!(genmask_checked_u8(6..=7), Some(0xc0));
+ ///
+ /// // `10` is out of the supported bit range.
+ /// assert_eq!(genmask_checked_u8(6..=10), None);
+ ///
+ /// // Invalid range where the start is bigger than the end.
+ /// assert_eq!(genmask_checked_u8(5..=2), None);
+ /// ```
+ ,
+ /// # Examples
+ ///
+ /// ```
+ /// # use kernel::bits::genmask_u8;
+ /// assert_eq!(genmask_u8(6..=7), 0xc0);
+ /// assert_eq!(genmask_u8(0..=0), 0b1);
+ /// assert_eq!(genmask_u8(0..=7), u8::MAX);
+ /// ```
+);
diff --git a/rust/kernel/block.rs b/rust/kernel/block.rs
index 150f710efe5b..32c8d865afb6 100644
--- a/rust/kernel/block.rs
+++ b/rust/kernel/block.rs
@@ -3,3 +3,16 @@
//! Types for working with the block layer.
pub mod mq;
+
+/// Bit mask for masking out [`SECTOR_SIZE`].
+pub const SECTOR_MASK: u32 = bindings::SECTOR_MASK;
+
+/// Sectors are size `1 << SECTOR_SHIFT`.
+pub const SECTOR_SHIFT: u32 = bindings::SECTOR_SHIFT;
+
+/// Size of a sector.
+pub const SECTOR_SIZE: u32 = bindings::SECTOR_SIZE;
+
+/// The difference between the size of a page and the size of a sector,
+/// expressed as a power of two.
+pub const PAGE_SECTORS_SHIFT: u32 = bindings::PAGE_SECTORS_SHIFT;
diff --git a/rust/kernel/block/mq.rs b/rust/kernel/block/mq.rs
index fb0f393c1cea..637018ead0ab 100644
--- a/rust/kernel/block/mq.rs
+++ b/rust/kernel/block/mq.rs
@@ -53,7 +53,7 @@
//! [`GenDiskBuilder`]: gen_disk::GenDiskBuilder
//! [`GenDiskBuilder::build`]: gen_disk::GenDiskBuilder::build
//!
-//! # Example
+//! # Examples
//!
//! ```rust
//! use kernel::{
@@ -69,27 +69,33 @@
//!
//! #[vtable]
//! impl Operations for MyBlkDevice {
+//! type QueueData = ();
//!
-//! fn queue_rq(rq: ARef<Request<Self>>, _is_last: bool) -> Result {
+//! fn queue_rq(_queue_data: (), rq: ARef<Request<Self>>, _is_last: bool) -> Result {
//! Request::end_ok(rq);
//! Ok(())
//! }
//!
-//! fn commit_rqs() {}
+//! fn commit_rqs(_queue_data: ()) {}
+//!
+//! fn complete(rq: ARef<Request<Self>>) {
+//! Request::end_ok(rq)
+//! .map_err(|_e| kernel::error::code::EIO)
+//! .expect("Fatal error - expected to be able to end request");
+//! }
//! }
//!
//! let tagset: Arc<TagSet<MyBlkDevice>> =
//! Arc::pin_init(TagSet::new(1, 256, 1), flags::GFP_KERNEL)?;
//! let mut disk = gen_disk::GenDiskBuilder::new()
//! .capacity_sectors(4096)
-//! .build(format_args!("myblk"), tagset)?;
+//! .build(fmt!("myblk"), tagset, ())?;
//!
//! # Ok::<(), kernel::error::Error>(())
//! ```
pub mod gen_disk;
mod operations;
-mod raw_writer;
mod request;
mod tag_set;
diff --git a/rust/kernel/block/mq/gen_disk.rs b/rust/kernel/block/mq/gen_disk.rs
index cd54cd64ea88..1ce815c8cdab 100644
--- a/rust/kernel/block/mq/gen_disk.rs
+++ b/rust/kernel/block/mq/gen_disk.rs
@@ -3,12 +3,19 @@
//! Generic disk abstraction.
//!
//! C header: [`include/linux/blkdev.h`](srctree/include/linux/blkdev.h)
-//! C header: [`include/linux/blk_mq.h`](srctree/include/linux/blk_mq.h)
-
-use crate::block::mq::{raw_writer::RawWriter, Operations, TagSet};
-use crate::{bindings, error::from_err_ptr, error::Result, sync::Arc};
-use crate::{error, static_lock_class};
-use core::fmt::{self, Write};
+//! C header: [`include/linux/blk-mq.h`](srctree/include/linux/blk-mq.h)
+
+use crate::{
+ bindings,
+ block::mq::{Operations, TagSet},
+ error::{self, from_err_ptr, Result},
+ fmt::{self, Write},
+ prelude::*,
+ static_lock_class,
+ str::NullTerminatedFormatter,
+ sync::Arc,
+ types::{ForeignOwnable, ScopeGuard},
+};
/// A builder for [`GenDisk`].
///
@@ -45,7 +52,7 @@ impl GenDiskBuilder {
/// Validate block size by verifying that it is between 512 and `PAGE_SIZE`,
/// and that it is a power of two.
- fn validate_block_size(size: u32) -> Result {
+ pub fn validate_block_size(size: u32) -> Result {
if !(512..=bindings::PAGE_SIZE as u32).contains(&size) || !size.is_power_of_two() {
Err(error::code::EINVAL)
} else {
@@ -92,7 +99,14 @@ impl GenDiskBuilder {
self,
name: fmt::Arguments<'_>,
tagset: Arc<TagSet<T>>,
+ queue_data: T::QueueData,
) -> Result<GenDisk<T>> {
+ let data = queue_data.into_foreign();
+ let recover_data = ScopeGuard::new(|| {
+ // SAFETY: T::QueueData was created by the call to `into_foreign()` above
+ drop(unsafe { T::QueueData::from_foreign(data) });
+ });
+
// SAFETY: `bindings::queue_limits` contain only fields that are valid when zeroed.
let mut lim: bindings::queue_limits = unsafe { core::mem::zeroed() };
@@ -107,7 +121,7 @@ impl GenDiskBuilder {
bindings::__blk_mq_alloc_disk(
tagset.raw_tag_set(),
&mut lim,
- core::ptr::null_mut(),
+ data,
static_lock_class!().as_ptr(),
)
})?;
@@ -139,14 +153,14 @@ impl GenDiskBuilder {
// SAFETY: `gendisk` is a valid pointer as we initialized it above
unsafe { (*gendisk).fops = &TABLE };
- let mut raw_writer = RawWriter::from_array(
+ let mut writer = NullTerminatedFormatter::new(
// SAFETY: `gendisk` points to a valid and initialized instance. We
// have exclusive access, since the disk is not added to the VFS
// yet.
unsafe { &mut (*gendisk).disk_name },
- )?;
- raw_writer.write_fmt(name)?;
- raw_writer.write_char('\0')?;
+ )
+ .ok_or(EINVAL)?;
+ writer.write_fmt(name)?;
// SAFETY: `gendisk` points to a valid and initialized instance of
// `struct gendisk`. `set_capacity` takes a lock to synchronize this
@@ -161,8 +175,12 @@ impl GenDiskBuilder {
},
)?;
+ recover_data.dismiss();
+
// INVARIANT: `gendisk` was initialized above.
// INVARIANT: `gendisk` was added to the VFS via `device_add_disk` above.
+ // INVARIANT: `gendisk.queue.queue_data` is set to `data` in the call to
+ // `__blk_mq_alloc_disk` above.
Ok(GenDisk {
_tagset: tagset,
gendisk,
@@ -174,9 +192,10 @@ impl GenDiskBuilder {
///
/// # Invariants
///
-/// - `gendisk` must always point to an initialized and valid `struct gendisk`.
-/// - `gendisk` was added to the VFS through a call to
-/// `bindings::device_add_disk`.
+/// - `gendisk` must always point to an initialized and valid `struct gendisk`.
+/// - `gendisk` was added to the VFS through a call to
+/// `bindings::device_add_disk`.
+/// - `self.gendisk.queue.queuedata` is initialized by a call to `ForeignOwnable::into_foreign`.
pub struct GenDisk<T: Operations> {
_tagset: Arc<TagSet<T>>,
gendisk: *mut bindings::gendisk,
@@ -188,9 +207,20 @@ unsafe impl<T: Operations + Send> Send for GenDisk<T> {}
impl<T: Operations> Drop for GenDisk<T> {
fn drop(&mut self) {
+ // SAFETY: By type invariant of `Self`, `self.gendisk` points to a valid
+ // and initialized instance of `struct gendisk`, and, `queuedata` was
+ // initialized with the result of a call to
+ // `ForeignOwnable::into_foreign`.
+ let queue_data = unsafe { (*(*self.gendisk).queue).queuedata };
+
// SAFETY: By type invariant, `self.gendisk` points to a valid and
// initialized instance of `struct gendisk`, and it was previously added
// to the VFS.
unsafe { bindings::del_gendisk(self.gendisk) };
+
+ // SAFETY: `queue.queuedata` was created by `GenDiskBuilder::build` with
+ // a call to `ForeignOwnable::into_foreign` to create `queuedata`.
+ // `ForeignOwnable::from_foreign` is only called here.
+ drop(unsafe { T::QueueData::from_foreign(queue_data) });
}
}
diff --git a/rust/kernel/block/mq/operations.rs b/rust/kernel/block/mq/operations.rs
index 864ff379dc91..f91a1719886c 100644
--- a/rust/kernel/block/mq/operations.rs
+++ b/rust/kernel/block/mq/operations.rs
@@ -6,13 +6,15 @@
use crate::{
bindings,
- block::mq::request::RequestDataWrapper,
- block::mq::Request,
+ block::mq::{request::RequestDataWrapper, Request},
error::{from_result, Result},
prelude::*,
- types::ARef,
+ sync::Refcount,
+ types::{ARef, ForeignOwnable},
};
-use core::{marker::PhantomData, sync::atomic::AtomicU64, sync::atomic::Ordering};
+use core::marker::PhantomData;
+
+type ForeignBorrowed<'a, T> = <T as ForeignOwnable>::Borrowed<'a>;
/// Implement this trait to interface blk-mq as block devices.
///
@@ -26,12 +28,23 @@ use core::{marker::PhantomData, sync::atomic::AtomicU64, sync::atomic::Ordering}
/// [module level documentation]: kernel::block::mq
#[macros::vtable]
pub trait Operations: Sized {
+ /// Data associated with the `struct request_queue` that is allocated for
+ /// the `GenDisk` associated with this `Operations` implementation.
+ type QueueData: ForeignOwnable;
+
/// Called by the kernel to queue a request with the driver. If `is_last` is
/// `false`, the driver is allowed to defer committing the request.
- fn queue_rq(rq: ARef<Request<Self>>, is_last: bool) -> Result;
+ fn queue_rq(
+ queue_data: ForeignBorrowed<'_, Self::QueueData>,
+ rq: ARef<Request<Self>>,
+ is_last: bool,
+ ) -> Result;
/// Called by the kernel to indicate that queued requests should be submitted.
- fn commit_rqs();
+ fn commit_rqs(queue_data: ForeignBorrowed<'_, Self::QueueData>);
+
+ /// Called by the kernel when the request is completed.
+ fn complete(rq: ARef<Request<Self>>);
/// Called by the kernel to poll the device for completed requests. Only
/// used for poll queues.
@@ -70,7 +83,7 @@ impl<T: Operations> OperationsVTable<T> {
/// promise to not access the request until the driver calls
/// `bindings::blk_mq_end_request` for the request.
unsafe extern "C" fn queue_rq_callback(
- _hctx: *mut bindings::blk_mq_hw_ctx,
+ hctx: *mut bindings::blk_mq_hw_ctx,
bd: *const bindings::blk_mq_queue_data,
) -> bindings::blk_status_t {
// SAFETY: `bd.rq` is valid as required by the safety requirement for
@@ -78,7 +91,7 @@ impl<T: Operations> OperationsVTable<T> {
let request = unsafe { &*(*bd).rq.cast::<Request<T>>() };
// One refcount for the ARef, one for being in flight
- request.wrapper_ref().refcount().store(2, Ordering::Relaxed);
+ request.wrapper_ref().refcount().set(2);
// SAFETY:
// - We own a refcount that we took above. We pass that to `ARef`.
@@ -88,10 +101,20 @@ impl<T: Operations> OperationsVTable<T> {
// reference counted by `ARef` until then.
let rq = unsafe { Request::aref_from_raw((*bd).rq) };
+ // SAFETY: `hctx` is valid as required by this function.
+ let queue_data = unsafe { (*(*hctx).queue).queuedata };
+
+ // SAFETY: `queue.queuedata` was created by `GenDiskBuilder::build` with
+ // a call to `ForeignOwnable::into_foreign` to create `queuedata`.
+ // `ForeignOwnable::from_foreign` is only called when the tagset is
+ // dropped, which happens after we are dropped.
+ let queue_data = unsafe { T::QueueData::borrow(queue_data) };
+
// SAFETY: We have exclusive access and we just set the refcount above.
unsafe { Request::start_unchecked(&rq) };
let ret = T::queue_rq(
+ queue_data,
rq,
// SAFETY: `bd` is valid as required by the safety requirement for
// this function.
@@ -101,7 +124,7 @@ impl<T: Operations> OperationsVTable<T> {
if let Err(e) = ret {
e.to_blk_status()
} else {
- bindings::BLK_STS_OK as _
+ bindings::BLK_STS_OK as bindings::blk_status_t
}
}
@@ -110,18 +133,35 @@ impl<T: Operations> OperationsVTable<T> {
///
/// # Safety
///
- /// This function may only be called by blk-mq C infrastructure.
- unsafe extern "C" fn commit_rqs_callback(_hctx: *mut bindings::blk_mq_hw_ctx) {
- T::commit_rqs()
+ /// This function may only be called by blk-mq C infrastructure. The caller
+ /// must ensure that `hctx` is valid.
+ unsafe extern "C" fn commit_rqs_callback(hctx: *mut bindings::blk_mq_hw_ctx) {
+ // SAFETY: `hctx` is valid as required by this function.
+ let queue_data = unsafe { (*(*hctx).queue).queuedata };
+
+ // SAFETY: `queue.queuedata` was created by `GenDisk::try_new()` with a
+ // call to `ForeignOwnable::into_foreign()` to create `queuedata`.
+ // `ForeignOwnable::from_foreign()` is only called when the tagset is
+ // dropped, which happens after we are dropped.
+ let queue_data = unsafe { T::QueueData::borrow(queue_data) };
+ T::commit_rqs(queue_data)
}
- /// This function is called by the C kernel. It is not currently
- /// implemented, and there is no way to exercise this code path.
+ /// This function is called by the C kernel. A pointer to this function is
+ /// installed in the `blk_mq_ops` vtable for the driver.
///
/// # Safety
///
- /// This function may only be called by blk-mq C infrastructure.
- unsafe extern "C" fn complete_callback(_rq: *mut bindings::request) {}
+ /// This function may only be called by blk-mq C infrastructure. `rq` must
+ /// point to a valid request that has been marked as completed. The pointee
+ /// of `rq` must be valid for write for the duration of this function.
+ unsafe extern "C" fn complete_callback(rq: *mut bindings::request) {
+ // SAFETY: This function can only be dispatched through
+ // `Request::complete`. We leaked a refcount then which we pick back up
+ // now.
+ let aref = unsafe { Request::aref_from_raw(rq) };
+ T::complete(aref);
+ }
/// This function is called by the C kernel. A pointer to this function is
/// installed in the `blk_mq_ops` vtable for the driver.
@@ -187,7 +227,7 @@ impl<T: Operations> OperationsVTable<T> {
// SAFETY: The refcount field is allocated but not initialized, so
// it is valid for writes.
- unsafe { RequestDataWrapper::refcount_ptr(pdu.as_ptr()).write(AtomicU64::new(0)) };
+ unsafe { RequestDataWrapper::refcount_ptr(pdu.as_ptr()).write(Refcount::new(0)) };
Ok(0)
})
diff --git a/rust/kernel/block/mq/raw_writer.rs b/rust/kernel/block/mq/raw_writer.rs
deleted file mode 100644
index 7e2159e4f6a6..000000000000
--- a/rust/kernel/block/mq/raw_writer.rs
+++ /dev/null
@@ -1,55 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-
-use core::fmt::{self, Write};
-
-use crate::error::Result;
-use crate::prelude::EINVAL;
-
-/// A mutable reference to a byte buffer where a string can be written into.
-///
-/// # Invariants
-///
-/// `buffer` is always null terminated.
-pub(crate) struct RawWriter<'a> {
- buffer: &'a mut [u8],
- pos: usize,
-}
-
-impl<'a> RawWriter<'a> {
- /// Create a new `RawWriter` instance.
- fn new(buffer: &'a mut [u8]) -> Result<RawWriter<'a>> {
- *(buffer.last_mut().ok_or(EINVAL)?) = 0;
-
- // INVARIANT: We null terminated the buffer above.
- Ok(Self { buffer, pos: 0 })
- }
-
- pub(crate) fn from_array<const N: usize>(
- a: &'a mut [crate::ffi::c_char; N],
- ) -> Result<RawWriter<'a>> {
- Self::new(
- // SAFETY: the buffer of `a` is valid for read and write as `u8` for
- // at least `N` bytes.
- unsafe { core::slice::from_raw_parts_mut(a.as_mut_ptr().cast::<u8>(), N) },
- )
- }
-}
-
-impl Write for RawWriter<'_> {
- fn write_str(&mut self, s: &str) -> fmt::Result {
- let bytes = s.as_bytes();
- let len = bytes.len();
-
- // We do not want to overwrite our null terminator
- if self.pos + len > self.buffer.len() - 1 {
- return Err(fmt::Error);
- }
-
- // INVARIANT: We are not overwriting the last byte
- self.buffer[self.pos..self.pos + len].copy_from_slice(bytes);
-
- self.pos += len;
-
- Ok(())
- }
-}
diff --git a/rust/kernel/block/mq/request.rs b/rust/kernel/block/mq/request.rs
index 4a5b7ec914ef..c5f1f6b1ccfb 100644
--- a/rust/kernel/block/mq/request.rs
+++ b/rust/kernel/block/mq/request.rs
@@ -8,13 +8,10 @@ use crate::{
bindings,
block::mq::Operations,
error::Result,
+ sync::{atomic::Relaxed, Refcount},
types::{ARef, AlwaysRefCounted, Opaque},
};
-use core::{
- marker::PhantomData,
- ptr::NonNull,
- sync::atomic::{AtomicU64, Ordering},
-};
+use core::{marker::PhantomData, ptr::NonNull};
/// A wrapper around a blk-mq [`struct request`]. This represents an IO request.
///
@@ -37,6 +34,9 @@ use core::{
/// We need to track 3 and 4 to ensure that it is safe to end the request and hand
/// back ownership to the block layer.
///
+/// Note that the driver can still obtain new `ARef` even if there is no `ARef`s in existence by
+/// using `tag_to_rq`, hence the need to distinguish B and C.
+///
/// The states are tracked through the private `refcount` field of
/// `RequestDataWrapper`. This structure lives in the private data area of the C
/// [`struct request`].
@@ -53,7 +53,7 @@ use core::{
/// [`struct request`]: srctree/include/linux/blk-mq.h
///
#[repr(transparent)]
-pub struct Request<T: Operations>(Opaque<bindings::request>, PhantomData<T>);
+pub struct Request<T>(Opaque<bindings::request>, PhantomData<T>);
impl<T: Operations> Request<T> {
/// Create an [`ARef<Request>`] from a [`struct request`] pointer.
@@ -69,7 +69,7 @@ impl<T: Operations> Request<T> {
// INVARIANT: By the safety requirements of this function, invariants are upheld.
// SAFETY: By the safety requirement of this function, we own a
// reference count that we can pass to `ARef`.
- unsafe { ARef::from_raw(NonNull::new_unchecked(ptr as *const Self as *mut Self)) }
+ unsafe { ARef::from_raw(NonNull::new_unchecked(ptr.cast())) }
}
/// Notify the block layer that a request is going to be processed now.
@@ -98,13 +98,16 @@ impl<T: Operations> Request<T> {
///
/// [`struct request`]: srctree/include/linux/blk-mq.h
fn try_set_end(this: ARef<Self>) -> Result<*mut bindings::request, ARef<Self>> {
- // We can race with `TagSet::tag_to_rq`
- if let Err(_old) = this.wrapper_ref().refcount().compare_exchange(
- 2,
- 0,
- Ordering::Relaxed,
- Ordering::Relaxed,
- ) {
+ // To hand back the ownership, we need the current refcount to be 2.
+ // Since we can race with `TagSet::tag_to_rq`, this needs to atomically reduce
+ // refcount to 0. `Refcount` does not provide a way to do this, so use the underlying
+ // atomics directly.
+ if let Err(_old) = this
+ .wrapper_ref()
+ .refcount()
+ .as_atomic()
+ .cmpxchg(2, 0, Relaxed)
+ {
return Err(this);
}
@@ -125,11 +128,33 @@ impl<T: Operations> Request<T> {
// success of the call to `try_set_end` guarantees that there are no
// `ARef`s pointing to this request. Therefore it is safe to hand it
// back to the block layer.
- unsafe { bindings::blk_mq_end_request(request_ptr, bindings::BLK_STS_OK as _) };
+ unsafe {
+ bindings::blk_mq_end_request(
+ request_ptr,
+ bindings::BLK_STS_OK as bindings::blk_status_t,
+ )
+ };
Ok(())
}
+ /// Complete the request by scheduling `Operations::complete` for
+ /// execution.
+ ///
+ /// The function may be scheduled locally, via SoftIRQ or remotely via IPMI.
+ /// See `blk_mq_complete_request_remote` in [`blk-mq.c`] for details.
+ ///
+ /// [`blk-mq.c`]: srctree/block/blk-mq.c
+ pub fn complete(this: ARef<Self>) {
+ let ptr = ARef::into_raw(this).cast::<bindings::request>().as_ptr();
+ // SAFETY: By type invariant, `self.0` is a valid `struct request`
+ if !unsafe { bindings::blk_mq_complete_request_remote(ptr) } {
+ // SAFETY: We released a refcount above that we can reclaim here.
+ let this = unsafe { Request::aref_from_raw(ptr) };
+ T::complete(this);
+ }
+ }
+
/// Return a pointer to the [`RequestDataWrapper`] stored in the private area
/// of the request structure.
///
@@ -143,7 +168,7 @@ impl<T: Operations> Request<T> {
// valid allocation.
let wrapper_ptr =
unsafe { bindings::blk_mq_rq_to_pdu(request_ptr).cast::<RequestDataWrapper>() };
- // SAFETY: By C API contract, wrapper_ptr points to a valid allocation
+ // SAFETY: By C API contract, `wrapper_ptr` points to a valid allocation
// and is not null.
unsafe { NonNull::new_unchecked(wrapper_ptr) }
}
@@ -155,7 +180,7 @@ impl<T: Operations> Request<T> {
// the private data associated with this request is initialized and
// valid. The existence of `&self` guarantees that the private data is
// valid as a shared reference.
- unsafe { Self::wrapper_ptr(self as *const Self as *mut Self).as_ref() }
+ unsafe { Self::wrapper_ptr(core::ptr::from_ref(self).cast_mut()).as_ref() }
}
}
@@ -168,13 +193,13 @@ pub(crate) struct RequestDataWrapper {
/// - 0: The request is owned by C block layer.
/// - 1: The request is owned by Rust abstractions but there are no [`ARef`] references to it.
/// - 2+: There are [`ARef`] references to the request.
- refcount: AtomicU64,
+ refcount: Refcount,
}
impl RequestDataWrapper {
/// Return a reference to the refcount of the request that is embedding
/// `self`.
- pub(crate) fn refcount(&self) -> &AtomicU64 {
+ pub(crate) fn refcount(&self) -> &Refcount {
&self.refcount
}
@@ -184,7 +209,7 @@ impl RequestDataWrapper {
/// # Safety
///
/// - `this` must point to a live allocation of at least the size of `Self`.
- pub(crate) unsafe fn refcount_ptr(this: *mut Self) -> *mut AtomicU64 {
+ pub(crate) unsafe fn refcount_ptr(this: *mut Self) -> *mut Refcount {
// SAFETY: Because of the safety requirements of this function, the
// field projection is safe.
unsafe { &raw mut (*this).refcount }
@@ -200,47 +225,13 @@ unsafe impl<T: Operations> Send for Request<T> {}
// mutate `self` are internally synchronized`
unsafe impl<T: Operations> Sync for Request<T> {}
-/// Store the result of `op(target.load())` in target, returning new value of
-/// target.
-fn atomic_relaxed_op_return(target: &AtomicU64, op: impl Fn(u64) -> u64) -> u64 {
- let old = target.fetch_update(Ordering::Relaxed, Ordering::Relaxed, |x| Some(op(x)));
-
- // SAFETY: Because the operation passed to `fetch_update` above always
- // return `Some`, `old` will always be `Ok`.
- let old = unsafe { old.unwrap_unchecked() };
-
- op(old)
-}
-
-/// Store the result of `op(target.load)` in `target` if `target.load() !=
-/// pred`, returning [`true`] if the target was updated.
-fn atomic_relaxed_op_unless(target: &AtomicU64, op: impl Fn(u64) -> u64, pred: u64) -> bool {
- target
- .fetch_update(Ordering::Relaxed, Ordering::Relaxed, |x| {
- if x == pred {
- None
- } else {
- Some(op(x))
- }
- })
- .is_ok()
-}
-
// SAFETY: All instances of `Request<T>` are reference counted. This
// implementation of `AlwaysRefCounted` ensure that increments to the ref count
// keeps the object alive in memory at least until a matching reference count
// decrement is executed.
unsafe impl<T: Operations> AlwaysRefCounted for Request<T> {
fn inc_ref(&self) {
- let refcount = &self.wrapper_ref().refcount();
-
- #[cfg_attr(not(CONFIG_DEBUG_MISC), allow(unused_variables))]
- let updated = atomic_relaxed_op_unless(refcount, |x| x + 1, 0);
-
- #[cfg(CONFIG_DEBUG_MISC)]
- if !updated {
- panic!("Request refcount zero on clone")
- }
+ self.wrapper_ref().refcount().inc();
}
unsafe fn dec_ref(obj: core::ptr::NonNull<Self>) {
@@ -252,10 +243,10 @@ unsafe impl<T: Operations> AlwaysRefCounted for Request<T> {
let refcount = unsafe { &*RequestDataWrapper::refcount_ptr(wrapper_ptr) };
#[cfg_attr(not(CONFIG_DEBUG_MISC), allow(unused_variables))]
- let new_refcount = atomic_relaxed_op_return(refcount, |x| x - 1);
+ let is_zero = refcount.dec_and_test();
#[cfg(CONFIG_DEBUG_MISC)]
- if new_refcount == 0 {
+ if is_zero {
panic!("Request reached refcount zero in Rust abstractions");
}
}
diff --git a/rust/kernel/block/mq/tag_set.rs b/rust/kernel/block/mq/tag_set.rs
index bcf4214ad149..c3cf56d52bee 100644
--- a/rust/kernel/block/mq/tag_set.rs
+++ b/rust/kernel/block/mq/tag_set.rs
@@ -9,7 +9,7 @@ use core::pin::Pin;
use crate::{
bindings,
block::mq::{operations::OperationsVTable, request::RequestDataWrapper, Operations},
- error,
+ error::{self, Result},
prelude::try_pin_init,
types::Opaque,
};
@@ -41,7 +41,7 @@ impl<T: Operations> TagSet<T> {
// SAFETY: `blk_mq_tag_set` only contains integers and pointers, which
// all are allowed to be 0.
let tag_set: bindings::blk_mq_tag_set = unsafe { core::mem::zeroed() };
- let tag_set = core::mem::size_of::<RequestDataWrapper>()
+ let tag_set: Result<_> = core::mem::size_of::<RequestDataWrapper>()
.try_into()
.map(|cmd_size| {
bindings::blk_mq_tag_set {
@@ -56,12 +56,14 @@ impl<T: Operations> TagSet<T> {
nr_maps: num_maps,
..tag_set
}
- });
+ })
+ .map(Opaque::new)
+ .map_err(|e| e.into());
try_pin_init!(TagSet {
- inner <- PinInit::<_, error::Error>::pin_chain(Opaque::new(tag_set?), |tag_set| {
+ inner <- tag_set.pin_chain(|tag_set| {
// SAFETY: we do not move out of `tag_set`.
- let tag_set = unsafe { Pin::get_unchecked_mut(tag_set) };
+ let tag_set: &mut Opaque<_> = unsafe { Pin::get_unchecked_mut(tag_set) };
// SAFETY: `tag_set` is a reference to an initialized `blk_mq_tag_set`.
error::to_result( unsafe { bindings::blk_mq_alloc_tag_set(tag_set.get())})
}),
diff --git a/rust/kernel/bug.rs b/rust/kernel/bug.rs
new file mode 100644
index 000000000000..36aef43e5ebe
--- /dev/null
+++ b/rust/kernel/bug.rs
@@ -0,0 +1,126 @@
+// SPDX-License-Identifier: GPL-2.0
+
+// Copyright (C) 2024, 2025 FUJITA Tomonori <fujita.tomonori@gmail.com>
+
+//! Support for BUG and WARN functionality.
+//!
+//! C header: [`include/asm-generic/bug.h`](srctree/include/asm-generic/bug.h)
+
+#[macro_export]
+#[doc(hidden)]
+#[cfg(all(CONFIG_BUG, not(CONFIG_UML), not(CONFIG_LOONGARCH), not(CONFIG_ARM)))]
+#[cfg(CONFIG_DEBUG_BUGVERBOSE)]
+macro_rules! warn_flags {
+ ($flags:expr) => {
+ const FLAGS: u32 = $crate::bindings::BUGFLAG_WARNING | $flags;
+ const _FILE: &[u8] = file!().as_bytes();
+ // Plus one for null-terminator.
+ static FILE: [u8; _FILE.len() + 1] = {
+ let mut bytes = [0; _FILE.len() + 1];
+ let mut i = 0;
+ while i < _FILE.len() {
+ bytes[i] = _FILE[i];
+ i += 1;
+ }
+ bytes
+ };
+
+ // SAFETY:
+ // - `file`, `line`, `flags`, and `size` are all compile-time constants or
+ // symbols, preventing any invalid memory access.
+ // - The asm block has no side effects and does not modify any registers
+ // or memory. It is purely for embedding metadata into the ELF section.
+ unsafe {
+ $crate::asm!(
+ concat!(
+ "/* {size} */",
+ include!(concat!(env!("OBJTREE"), "/rust/kernel/generated_arch_warn_asm.rs")),
+ include!(concat!(env!("OBJTREE"), "/rust/kernel/generated_arch_reachable_asm.rs")));
+ file = sym FILE,
+ line = const line!(),
+ flags = const FLAGS,
+ size = const ::core::mem::size_of::<$crate::bindings::bug_entry>(),
+ );
+ }
+ }
+}
+
+#[macro_export]
+#[doc(hidden)]
+#[cfg(all(CONFIG_BUG, not(CONFIG_UML), not(CONFIG_LOONGARCH), not(CONFIG_ARM)))]
+#[cfg(not(CONFIG_DEBUG_BUGVERBOSE))]
+macro_rules! warn_flags {
+ ($flags:expr) => {
+ const FLAGS: u32 = $crate::bindings::BUGFLAG_WARNING | $flags;
+
+ // SAFETY:
+ // - `flags` and `size` are all compile-time constants, preventing
+ // any invalid memory access.
+ // - The asm block has no side effects and does not modify any registers
+ // or memory. It is purely for embedding metadata into the ELF section.
+ unsafe {
+ $crate::asm!(
+ concat!(
+ "/* {size} */",
+ include!(concat!(env!("OBJTREE"), "/rust/kernel/generated_arch_warn_asm.rs")),
+ include!(concat!(env!("OBJTREE"), "/rust/kernel/generated_arch_reachable_asm.rs")));
+ flags = const FLAGS,
+ size = const ::core::mem::size_of::<$crate::bindings::bug_entry>(),
+ );
+ }
+ }
+}
+
+#[macro_export]
+#[doc(hidden)]
+#[cfg(all(CONFIG_BUG, CONFIG_UML))]
+macro_rules! warn_flags {
+ ($flags:expr) => {
+ // SAFETY: It is always safe to call `warn_slowpath_fmt()`
+ // with a valid null-terminated string.
+ unsafe {
+ $crate::bindings::warn_slowpath_fmt(
+ $crate::c_str!(::core::file!()).as_char_ptr(),
+ line!() as $crate::ffi::c_int,
+ $flags as $crate::ffi::c_uint,
+ ::core::ptr::null(),
+ );
+ }
+ };
+}
+
+#[macro_export]
+#[doc(hidden)]
+#[cfg(all(CONFIG_BUG, any(CONFIG_LOONGARCH, CONFIG_ARM)))]
+macro_rules! warn_flags {
+ ($flags:expr) => {
+ // SAFETY: It is always safe to call `WARN_ON()`.
+ unsafe { $crate::bindings::WARN_ON(true) }
+ };
+}
+
+#[macro_export]
+#[doc(hidden)]
+#[cfg(not(CONFIG_BUG))]
+macro_rules! warn_flags {
+ ($flags:expr) => {};
+}
+
+#[doc(hidden)]
+pub const fn bugflag_taint(value: u32) -> u32 {
+ value << 8
+}
+
+/// Report a warning if `cond` is true and return the condition's evaluation result.
+#[macro_export]
+macro_rules! warn_on {
+ ($cond:expr) => {{
+ let cond = $cond;
+ if cond {
+ const WARN_ON_FLAGS: u32 = $crate::bug::bugflag_taint($crate::bindings::TAINT_WARN);
+
+ $crate::warn_flags!(WARN_ON_FLAGS);
+ }
+ cond
+ }};
+}
diff --git a/rust/kernel/clk.rs b/rust/kernel/clk.rs
index 6041c6d07527..1e6c8c42fb3a 100644
--- a/rust/kernel/clk.rs
+++ b/rust/kernel/clk.rs
@@ -12,7 +12,7 @@ use crate::ffi::c_ulong;
///
/// Represents a frequency in hertz, wrapping a [`c_ulong`] value.
///
-/// ## Examples
+/// # Examples
///
/// ```
/// use kernel::clk::Hertz;
@@ -30,39 +30,43 @@ use crate::ffi::c_ulong;
pub struct Hertz(pub c_ulong);
impl Hertz {
+ const KHZ_TO_HZ: c_ulong = 1_000;
+ const MHZ_TO_HZ: c_ulong = 1_000_000;
+ const GHZ_TO_HZ: c_ulong = 1_000_000_000;
+
/// Create a new instance from kilohertz (kHz)
- pub fn from_khz(khz: c_ulong) -> Self {
- Self(khz * 1_000)
+ pub const fn from_khz(khz: c_ulong) -> Self {
+ Self(khz * Self::KHZ_TO_HZ)
}
/// Create a new instance from megahertz (MHz)
- pub fn from_mhz(mhz: c_ulong) -> Self {
- Self(mhz * 1_000_000)
+ pub const fn from_mhz(mhz: c_ulong) -> Self {
+ Self(mhz * Self::MHZ_TO_HZ)
}
/// Create a new instance from gigahertz (GHz)
- pub fn from_ghz(ghz: c_ulong) -> Self {
- Self(ghz * 1_000_000_000)
+ pub const fn from_ghz(ghz: c_ulong) -> Self {
+ Self(ghz * Self::GHZ_TO_HZ)
}
/// Get the frequency in hertz
- pub fn as_hz(&self) -> c_ulong {
+ pub const fn as_hz(&self) -> c_ulong {
self.0
}
/// Get the frequency in kilohertz
- pub fn as_khz(&self) -> c_ulong {
- self.0 / 1_000
+ pub const fn as_khz(&self) -> c_ulong {
+ self.0 / Self::KHZ_TO_HZ
}
/// Get the frequency in megahertz
- pub fn as_mhz(&self) -> c_ulong {
- self.0 / 1_000_000
+ pub const fn as_mhz(&self) -> c_ulong {
+ self.0 / Self::MHZ_TO_HZ
}
/// Get the frequency in gigahertz
- pub fn as_ghz(&self) -> c_ulong {
- self.0 / 1_000_000_000
+ pub const fn as_ghz(&self) -> c_ulong {
+ self.0 / Self::GHZ_TO_HZ
}
}
@@ -95,7 +99,7 @@ mod common_clk {
/// Instances of this type are reference-counted. Calling [`Clk::get`] ensures that the
/// allocation remains valid for the lifetime of the [`Clk`].
///
- /// ## Examples
+ /// # Examples
///
/// The following example demonstrates how to obtain and configure a clock for a device.
///
@@ -132,11 +136,7 @@ mod common_clk {
///
/// [`clk_get`]: https://docs.kernel.org/core-api/kernel-api.html#c.clk_get
pub fn get(dev: &Device, name: Option<&CStr>) -> Result<Self> {
- let con_id = if let Some(name) = name {
- name.as_ptr()
- } else {
- ptr::null()
- };
+ let con_id = name.map_or(ptr::null(), |n| n.as_ptr());
// SAFETY: It is safe to call [`clk_get`] for a valid device pointer.
//
@@ -266,7 +266,7 @@ mod common_clk {
/// Instances of this type are reference-counted. Calling [`OptionalClk::get`] ensures that the
/// allocation remains valid for the lifetime of the [`OptionalClk`].
///
- /// ## Examples
+ /// # Examples
///
/// The following example demonstrates how to obtain and configure an optional clock for a
/// device. The code functions correctly whether or not the clock is available.
@@ -304,11 +304,7 @@ mod common_clk {
/// [`clk_get_optional`]:
/// https://docs.kernel.org/core-api/kernel-api.html#c.clk_get_optional
pub fn get(dev: &Device, name: Option<&CStr>) -> Result<Self> {
- let con_id = if let Some(name) = name {
- name.as_ptr()
- } else {
- ptr::null()
- };
+ let con_id = name.map_or(ptr::null(), |n| n.as_ptr());
// SAFETY: It is safe to call [`clk_get_optional`] for a valid device pointer.
//
diff --git a/rust/kernel/configfs.rs b/rust/kernel/configfs.rs
index 34d0bea4f9a5..10f1547ca9f1 100644
--- a/rust/kernel/configfs.rs
+++ b/rust/kernel/configfs.rs
@@ -17,7 +17,7 @@
//!
//! C header: [`include/linux/configfs.h`](srctree/include/linux/configfs.h)
//!
-//! # Example
+//! # Examples
//!
//! ```ignore
//! use kernel::alloc::flags;
@@ -151,7 +151,7 @@ impl<Data> Subsystem<Data> {
data: impl PinInit<Data, Error>,
) -> impl PinInit<Self, Error> {
try_pin_init!(Self {
- subsystem <- pin_init::zeroed().chain(
+ subsystem <- pin_init::init_zeroed().chain(
|place: &mut Opaque<bindings::configfs_subsystem>| {
// SAFETY: We initialized the required fields of `place.group` above.
unsafe {
@@ -261,9 +261,9 @@ impl<Data> Group<Data> {
data: impl PinInit<Data, Error>,
) -> impl PinInit<Self, Error> {
try_pin_init!(Self {
- group <- pin_init::zeroed().chain(|v: &mut Opaque<bindings::config_group>| {
+ group <- pin_init::init_zeroed().chain(|v: &mut Opaque<bindings::config_group>| {
let place = v.get();
- let name = name.as_bytes_with_nul().as_ptr();
+ let name = name.to_bytes_with_nul().as_ptr();
// SAFETY: It is safe to initialize a group once it has been zeroed.
unsafe {
bindings::config_group_init_type_name(place, name.cast(), item_type.as_ptr())
@@ -279,7 +279,7 @@ impl<Data> Group<Data> {
// within the `group` field.
unsafe impl<Data> HasGroup<Data> for Group<Data> {
unsafe fn group(this: *const Self) -> *const bindings::config_group {
- Opaque::raw_get(
+ Opaque::cast_into(
// SAFETY: By impl and function safety requirements this field
// projection is within bounds of the allocation.
unsafe { &raw const (*this).group },
@@ -426,7 +426,7 @@ where
};
const fn vtable_ptr() -> *const bindings::configfs_group_operations {
- &Self::VTABLE as *const bindings::configfs_group_operations
+ &Self::VTABLE
}
}
@@ -464,7 +464,7 @@ where
};
const fn vtable_ptr() -> *const bindings::configfs_item_operations {
- &Self::VTABLE as *const bindings::configfs_item_operations
+ &Self::VTABLE
}
}
@@ -476,7 +476,7 @@ impl<Data> ItemOperationsVTable<Subsystem<Data>, Data> {
};
const fn vtable_ptr() -> *const bindings::configfs_item_operations {
- &Self::VTABLE as *const bindings::configfs_item_operations
+ &Self::VTABLE
}
}
@@ -561,7 +561,7 @@ where
let data: &Data = unsafe { get_group_data(c_group) };
// SAFETY: By function safety requirements, `page` is writable for `PAGE_SIZE`.
- let ret = O::show(data, unsafe { &mut *(page as *mut [u8; PAGE_SIZE]) });
+ let ret = O::show(data, unsafe { &mut *(page.cast::<[u8; PAGE_SIZE]>()) });
match ret {
Ok(size) => size as isize,
@@ -613,7 +613,7 @@ where
pub const fn new(name: &'static CStr) -> Self {
Self {
attribute: Opaque::new(bindings::configfs_attribute {
- ca_name: name.as_char_ptr(),
+ ca_name: crate::str::as_char_ptr_in_const_context(name),
ca_owner: core::ptr::null_mut(),
ca_mode: 0o660,
show: Some(Self::show),
@@ -717,11 +717,7 @@ impl<const N: usize, Data> AttributeList<N, Data> {
// SAFETY: By function safety requirements, we have exclusive access to
// `self` and the reference created below will be exclusive.
- unsafe {
- (&mut *self.0.get())[I] = (attribute as *const Attribute<ID, O, Data>)
- .cast_mut()
- .cast()
- };
+ unsafe { (&mut *self.0.get())[I] = core::ptr::from_ref(attribute).cast_mut().cast() };
}
}
@@ -761,9 +757,7 @@ macro_rules! impl_item_type {
ct_owner: owner.as_ptr(),
ct_group_ops: GroupOperationsVTable::<Data, Child>::vtable_ptr().cast_mut(),
ct_item_ops: ItemOperationsVTable::<$tpe, Data>::vtable_ptr().cast_mut(),
- ct_attrs: (attributes as *const AttributeList<N, Data>)
- .cast_mut()
- .cast(),
+ ct_attrs: core::ptr::from_ref(attributes).cast_mut().cast(),
ct_bin_attrs: core::ptr::null_mut(),
}),
_p: PhantomData,
@@ -780,9 +774,7 @@ macro_rules! impl_item_type {
ct_owner: owner.as_ptr(),
ct_group_ops: core::ptr::null_mut(),
ct_item_ops: ItemOperationsVTable::<$tpe, Data>::vtable_ptr().cast_mut(),
- ct_attrs: (attributes as *const AttributeList<N, Data>)
- .cast_mut()
- .cast(),
+ ct_attrs: core::ptr::from_ref(attributes).cast_mut().cast(),
ct_bin_attrs: core::ptr::null_mut(),
}),
_p: PhantomData,
@@ -1047,3 +1039,5 @@ macro_rules! configfs_attrs {
};
}
+
+pub use crate::configfs_attrs;
diff --git a/rust/kernel/cpu.rs b/rust/kernel/cpu.rs
index b75403b0eb56..cb6c0338ef5a 100644
--- a/rust/kernel/cpu.rs
+++ b/rust/kernel/cpu.rs
@@ -109,6 +109,7 @@ impl CpuId {
/// unexpectedly due to preemption or CPU migration. It should only be
/// used when the context ensures that the task remains on the same CPU
/// or the users could use a stale (yet valid) CPU ID.
+ #[inline]
pub fn current() -> Self {
// SAFETY: raw_smp_processor_id() always returns a valid CPU ID.
unsafe { Self::from_u32_unchecked(bindings::raw_smp_processor_id()) }
@@ -147,5 +148,5 @@ pub unsafe fn from_cpu(cpu: CpuId) -> Result<&'static Device> {
// SAFETY: The pointer returned by `get_cpu_device()`, if not `NULL`, is a valid pointer to
// a `struct device` and is never freed by the C code.
- Ok(unsafe { Device::as_ref(ptr) })
+ Ok(unsafe { Device::from_raw(ptr) })
}
diff --git a/rust/kernel/cpufreq.rs b/rust/kernel/cpufreq.rs
index 11b03e9d7e89..21b5b9b8acc1 100644
--- a/rust/kernel/cpufreq.rs
+++ b/rust/kernel/cpufreq.rs
@@ -13,7 +13,7 @@ use crate::{
cpu::CpuId,
cpumask,
device::{Bound, Device},
- devres::Devres,
+ devres,
error::{code::*, from_err_ptr, from_result, to_result, Result, VTABLE_DEFAULT_ERROR},
ffi::{c_char, c_ulong},
prelude::*,
@@ -27,7 +27,6 @@ use crate::clk::Clk;
use core::{
cell::UnsafeCell,
marker::PhantomData,
- mem::MaybeUninit,
ops::{Deref, DerefMut},
pin::Pin,
ptr,
@@ -39,7 +38,8 @@ use macros::vtable;
const CPUFREQ_NAME_LEN: usize = bindings::CPUFREQ_NAME_LEN as usize;
/// Default transition latency value in nanoseconds.
-pub const ETERNAL_LATENCY_NS: u32 = bindings::CPUFREQ_ETERNAL as u32;
+pub const DEFAULT_TRANSITION_LATENCY_NS: u32 =
+ bindings::CPUFREQ_DEFAULT_TRANSITION_LATENCY_NS;
/// CPU frequency driver flags.
pub mod flags {
@@ -202,7 +202,7 @@ impl From<TableIndex> for usize {
/// The callers must ensure that the `struct cpufreq_frequency_table` is valid for access and
/// remains valid for the lifetime of the returned reference.
///
-/// ## Examples
+/// # Examples
///
/// The following example demonstrates how to read a frequency value from [`Table`].
///
@@ -318,7 +318,7 @@ impl Deref for TableBox {
///
/// This is used by the CPU frequency drivers to build a frequency table dynamically.
///
-/// ## Examples
+/// # Examples
///
/// The following example demonstrates how to create a CPU frequency table.
///
@@ -395,18 +395,18 @@ impl TableBuilder {
/// The callers must ensure that the `struct cpufreq_policy` is valid for access and remains valid
/// for the lifetime of the returned reference.
///
-/// ## Examples
+/// # Examples
///
/// The following example demonstrates how to create a CPU frequency table.
///
/// ```
-/// use kernel::cpufreq::{ETERNAL_LATENCY_NS, Policy};
+/// use kernel::cpufreq::{DEFAULT_TRANSITION_LATENCY_NS, Policy};
///
/// fn update_policy(policy: &mut Policy) {
/// policy
/// .set_dvfs_possible_from_any_cpu(true)
/// .set_fast_switch_possible(true)
-/// .set_transition_latency_ns(ETERNAL_LATENCY_NS);
+/// .set_transition_latency_ns(DEFAULT_TRANSITION_LATENCY_NS);
///
/// pr_info!("The policy details are: {:?}\n", (policy.cpu(), policy.cur()));
/// }
@@ -543,7 +543,7 @@ impl Policy {
pub fn cpus(&mut self) -> &mut cpumask::Cpumask {
// SAFETY: The pointer to `cpus` is valid for writing and remains valid for the lifetime of
// the returned reference.
- unsafe { cpumask::CpumaskVar::as_mut_ref(&mut self.as_mut_ref().cpus) }
+ unsafe { cpumask::CpumaskVar::from_raw_mut(&mut self.as_mut_ref().cpus) }
}
/// Sets clock for the [`Policy`].
@@ -649,7 +649,7 @@ impl Policy {
fn set_data<T: ForeignOwnable>(&mut self, data: T) -> Result {
if self.as_ref().driver_data.is_null() {
// Transfer the ownership of the data to the foreign interface.
- self.as_mut_ref().driver_data = <T as ForeignOwnable>::into_foreign(data) as _;
+ self.as_mut_ref().driver_data = <T as ForeignOwnable>::into_foreign(data).cast();
Ok(())
} else {
Err(EBUSY)
@@ -834,7 +834,7 @@ pub trait Driver {
/// CPU frequency driver Registration.
///
-/// ## Examples
+/// # Examples
///
/// The following example demonstrates how to register a cpufreq driver.
///
@@ -1013,12 +1013,11 @@ impl<T: Driver> Registration<T> {
} else {
None
},
- // SAFETY: All zeros is a valid value for `bindings::cpufreq_driver`.
- ..unsafe { MaybeUninit::zeroed().assume_init() }
+ ..pin_init::zeroed()
};
const fn copy_name(name: &'static CStr) -> [c_char; CPUFREQ_NAME_LEN] {
- let src = name.as_bytes_with_nul();
+ let src = name.to_bytes_with_nul();
let mut dst = [0; CPUFREQ_NAME_LEN];
build_assert!(src.len() <= CPUFREQ_NAME_LEN);
@@ -1046,10 +1045,13 @@ impl<T: Driver> Registration<T> {
/// Same as [`Registration::new`], but does not return a [`Registration`] instance.
///
- /// Instead the [`Registration`] is owned by [`Devres`] and will be revoked / dropped, once the
+ /// Instead the [`Registration`] is owned by [`devres::register`] and will be dropped, once the
/// device is detached.
- pub fn new_foreign_owned(dev: &Device<Bound>) -> Result {
- Devres::new_foreign_owned(dev, Self::new()?, GFP_KERNEL)
+ pub fn new_foreign_owned(dev: &Device<Bound>) -> Result
+ where
+ T: 'static,
+ {
+ devres::register(dev, Self::new()?, GFP_KERNEL)
}
}
@@ -1061,7 +1063,7 @@ impl<T: Driver> Registration<T> {
///
/// - This function may only be called from the cpufreq C infrastructure.
/// - The pointer arguments must be valid pointers.
- unsafe extern "C" fn init_callback(ptr: *mut bindings::cpufreq_policy) -> kernel::ffi::c_int {
+ unsafe extern "C" fn init_callback(ptr: *mut bindings::cpufreq_policy) -> c_int {
from_result(|| {
// SAFETY: The `ptr` is guaranteed to be valid by the contract with the C code for the
// lifetime of `policy`.
@@ -1094,7 +1096,7 @@ impl<T: Driver> Registration<T> {
///
/// - This function may only be called from the cpufreq C infrastructure.
/// - The pointer arguments must be valid pointers.
- unsafe extern "C" fn online_callback(ptr: *mut bindings::cpufreq_policy) -> kernel::ffi::c_int {
+ unsafe extern "C" fn online_callback(ptr: *mut bindings::cpufreq_policy) -> c_int {
from_result(|| {
// SAFETY: The `ptr` is guaranteed to be valid by the contract with the C code for the
// lifetime of `policy`.
@@ -1109,9 +1111,7 @@ impl<T: Driver> Registration<T> {
///
/// - This function may only be called from the cpufreq C infrastructure.
/// - The pointer arguments must be valid pointers.
- unsafe extern "C" fn offline_callback(
- ptr: *mut bindings::cpufreq_policy,
- ) -> kernel::ffi::c_int {
+ unsafe extern "C" fn offline_callback(ptr: *mut bindings::cpufreq_policy) -> c_int {
from_result(|| {
// SAFETY: The `ptr` is guaranteed to be valid by the contract with the C code for the
// lifetime of `policy`.
@@ -1126,9 +1126,7 @@ impl<T: Driver> Registration<T> {
///
/// - This function may only be called from the cpufreq C infrastructure.
/// - The pointer arguments must be valid pointers.
- unsafe extern "C" fn suspend_callback(
- ptr: *mut bindings::cpufreq_policy,
- ) -> kernel::ffi::c_int {
+ unsafe extern "C" fn suspend_callback(ptr: *mut bindings::cpufreq_policy) -> c_int {
from_result(|| {
// SAFETY: The `ptr` is guaranteed to be valid by the contract with the C code for the
// lifetime of `policy`.
@@ -1143,7 +1141,7 @@ impl<T: Driver> Registration<T> {
///
/// - This function may only be called from the cpufreq C infrastructure.
/// - The pointer arguments must be valid pointers.
- unsafe extern "C" fn resume_callback(ptr: *mut bindings::cpufreq_policy) -> kernel::ffi::c_int {
+ unsafe extern "C" fn resume_callback(ptr: *mut bindings::cpufreq_policy) -> c_int {
from_result(|| {
// SAFETY: The `ptr` is guaranteed to be valid by the contract with the C code for the
// lifetime of `policy`.
@@ -1171,9 +1169,7 @@ impl<T: Driver> Registration<T> {
///
/// - This function may only be called from the cpufreq C infrastructure.
/// - The pointer arguments must be valid pointers.
- unsafe extern "C" fn verify_callback(
- ptr: *mut bindings::cpufreq_policy_data,
- ) -> kernel::ffi::c_int {
+ unsafe extern "C" fn verify_callback(ptr: *mut bindings::cpufreq_policy_data) -> c_int {
from_result(|| {
// SAFETY: The `ptr` is guaranteed to be valid by the contract with the C code for the
// lifetime of `policy`.
@@ -1188,9 +1184,7 @@ impl<T: Driver> Registration<T> {
///
/// - This function may only be called from the cpufreq C infrastructure.
/// - The pointer arguments must be valid pointers.
- unsafe extern "C" fn setpolicy_callback(
- ptr: *mut bindings::cpufreq_policy,
- ) -> kernel::ffi::c_int {
+ unsafe extern "C" fn setpolicy_callback(ptr: *mut bindings::cpufreq_policy) -> c_int {
from_result(|| {
// SAFETY: The `ptr` is guaranteed to be valid by the contract with the C code for the
// lifetime of `policy`.
@@ -1207,9 +1201,9 @@ impl<T: Driver> Registration<T> {
/// - The pointer arguments must be valid pointers.
unsafe extern "C" fn target_callback(
ptr: *mut bindings::cpufreq_policy,
- target_freq: u32,
- relation: u32,
- ) -> kernel::ffi::c_int {
+ target_freq: c_uint,
+ relation: c_uint,
+ ) -> c_int {
from_result(|| {
// SAFETY: The `ptr` is guaranteed to be valid by the contract with the C code for the
// lifetime of `policy`.
@@ -1226,8 +1220,8 @@ impl<T: Driver> Registration<T> {
/// - The pointer arguments must be valid pointers.
unsafe extern "C" fn target_index_callback(
ptr: *mut bindings::cpufreq_policy,
- index: u32,
- ) -> kernel::ffi::c_int {
+ index: c_uint,
+ ) -> c_int {
from_result(|| {
// SAFETY: The `ptr` is guaranteed to be valid by the contract with the C code for the
// lifetime of `policy`.
@@ -1249,8 +1243,8 @@ impl<T: Driver> Registration<T> {
/// - The pointer arguments must be valid pointers.
unsafe extern "C" fn fast_switch_callback(
ptr: *mut bindings::cpufreq_policy,
- target_freq: u32,
- ) -> kernel::ffi::c_uint {
+ target_freq: c_uint,
+ ) -> c_uint {
// SAFETY: The `ptr` is guaranteed to be valid by the contract with the C code for the
// lifetime of `policy`.
let policy = unsafe { Policy::from_raw_mut(ptr) };
@@ -1263,10 +1257,10 @@ impl<T: Driver> Registration<T> {
///
/// - This function may only be called from the cpufreq C infrastructure.
unsafe extern "C" fn adjust_perf_callback(
- cpu: u32,
- min_perf: usize,
- target_perf: usize,
- capacity: usize,
+ cpu: c_uint,
+ min_perf: c_ulong,
+ target_perf: c_ulong,
+ capacity: c_ulong,
) {
// SAFETY: The C API guarantees that `cpu` refers to a valid CPU number.
let cpu_id = unsafe { CpuId::from_u32_unchecked(cpu) };
@@ -1284,8 +1278,8 @@ impl<T: Driver> Registration<T> {
/// - The pointer arguments must be valid pointers.
unsafe extern "C" fn get_intermediate_callback(
ptr: *mut bindings::cpufreq_policy,
- index: u32,
- ) -> kernel::ffi::c_uint {
+ index: c_uint,
+ ) -> c_uint {
// SAFETY: The `ptr` is guaranteed to be valid by the contract with the C code for the
// lifetime of `policy`.
let policy = unsafe { Policy::from_raw_mut(ptr) };
@@ -1305,8 +1299,8 @@ impl<T: Driver> Registration<T> {
/// - The pointer arguments must be valid pointers.
unsafe extern "C" fn target_intermediate_callback(
ptr: *mut bindings::cpufreq_policy,
- index: u32,
- ) -> kernel::ffi::c_int {
+ index: c_uint,
+ ) -> c_int {
from_result(|| {
// SAFETY: The `ptr` is guaranteed to be valid by the contract with the C code for the
// lifetime of `policy`.
@@ -1325,7 +1319,7 @@ impl<T: Driver> Registration<T> {
/// # Safety
///
/// - This function may only be called from the cpufreq C infrastructure.
- unsafe extern "C" fn get_callback(cpu: u32) -> kernel::ffi::c_uint {
+ unsafe extern "C" fn get_callback(cpu: c_uint) -> c_uint {
// SAFETY: The C API guarantees that `cpu` refers to a valid CPU number.
let cpu_id = unsafe { CpuId::from_u32_unchecked(cpu) };
@@ -1351,7 +1345,7 @@ impl<T: Driver> Registration<T> {
///
/// - This function may only be called from the cpufreq C infrastructure.
/// - The pointer arguments must be valid pointers.
- unsafe extern "C" fn bios_limit_callback(cpu: i32, limit: *mut u32) -> kernel::ffi::c_int {
+ unsafe extern "C" fn bios_limit_callback(cpu: c_int, limit: *mut c_uint) -> c_int {
// SAFETY: The C API guarantees that `cpu` refers to a valid CPU number.
let cpu_id = unsafe { CpuId::from_i32_unchecked(cpu) };
@@ -1371,8 +1365,8 @@ impl<T: Driver> Registration<T> {
/// - The pointer arguments must be valid pointers.
unsafe extern "C" fn set_boost_callback(
ptr: *mut bindings::cpufreq_policy,
- state: i32,
- ) -> kernel::ffi::c_int {
+ state: c_int,
+ ) -> c_int {
from_result(|| {
// SAFETY: The `ptr` is guaranteed to be valid by the contract with the C code for the
// lifetime of `policy`.
diff --git a/rust/kernel/cpumask.rs b/rust/kernel/cpumask.rs
index 19c607709b5f..c1d17826ae7b 100644
--- a/rust/kernel/cpumask.rs
+++ b/rust/kernel/cpumask.rs
@@ -14,9 +14,6 @@ use crate::{
#[cfg(CONFIG_CPUMASK_OFFSTACK)]
use core::ptr::{self, NonNull};
-#[cfg(not(CONFIG_CPUMASK_OFFSTACK))]
-use core::mem::MaybeUninit;
-
use core::ops::{Deref, DerefMut};
/// A CPU Mask.
@@ -30,7 +27,7 @@ use core::ops::{Deref, DerefMut};
/// The callers must ensure that the `struct cpumask` is valid for access and
/// remains valid for the lifetime of the returned reference.
///
-/// ## Examples
+/// # Examples
///
/// The following example demonstrates how to update a [`Cpumask`].
///
@@ -175,7 +172,7 @@ impl Cpumask {
/// The callers must ensure that the `struct cpumask_var_t` is valid for access and remains valid
/// for the lifetime of [`CpumaskVar`].
///
-/// ## Examples
+/// # Examples
///
/// The following example demonstrates how to create and update a [`CpumaskVar`].
///
@@ -215,6 +212,7 @@ impl Cpumask {
/// }
/// assert_eq!(mask2.weight(), count);
/// ```
+#[repr(transparent)]
pub struct CpumaskVar {
#[cfg(CONFIG_CPUMASK_OFFSTACK)]
ptr: NonNull<Cpumask>,
@@ -239,10 +237,7 @@ impl CpumaskVar {
},
#[cfg(not(CONFIG_CPUMASK_OFFSTACK))]
- // SAFETY: FFI type is valid to be zero-initialized.
- //
- // INVARIANT: The associated memory is freed when the `CpumaskVar` goes out of scope.
- mask: unsafe { core::mem::zeroed() },
+ mask: Cpumask(Opaque::zeroed()),
})
}
@@ -266,10 +261,7 @@ impl CpumaskVar {
NonNull::new(ptr.cast()).ok_or(AllocError)?
},
#[cfg(not(CONFIG_CPUMASK_OFFSTACK))]
- // SAFETY: Guaranteed by the safety requirements of the function.
- //
- // INVARIANT: The associated memory is freed when the `CpumaskVar` goes out of scope.
- mask: unsafe { MaybeUninit::uninit().assume_init() },
+ mask: Cpumask(Opaque::uninit()),
})
}
@@ -279,7 +271,7 @@ impl CpumaskVar {
///
/// The caller must ensure that `ptr` is valid for writing and remains valid for the lifetime
/// of the returned reference.
- pub unsafe fn as_mut_ref<'a>(ptr: *mut bindings::cpumask_var_t) -> &'a mut Self {
+ pub unsafe fn from_raw_mut<'a>(ptr: *mut bindings::cpumask_var_t) -> &'a mut Self {
// SAFETY: Guaranteed by the safety requirements of the function.
//
// INVARIANT: The caller ensures that `ptr` is valid for writing and remains valid for the
@@ -293,7 +285,7 @@ impl CpumaskVar {
///
/// The caller must ensure that `ptr` is valid for reading and remains valid for the lifetime
/// of the returned reference.
- pub unsafe fn as_ref<'a>(ptr: *const bindings::cpumask_var_t) -> &'a Self {
+ pub unsafe fn from_raw<'a>(ptr: *const bindings::cpumask_var_t) -> &'a Self {
// SAFETY: Guaranteed by the safety requirements of the function.
//
// INVARIANT: The caller ensures that `ptr` is valid for reading and remains valid for the
diff --git a/rust/kernel/cred.rs b/rust/kernel/cred.rs
index 2599f01e8b28..ffa156b9df37 100644
--- a/rust/kernel/cred.rs
+++ b/rust/kernel/cred.rs
@@ -8,11 +8,7 @@
//!
//! Reference: <https://www.kernel.org/doc/html/latest/security/credentials.html>
-use crate::{
- bindings,
- task::Kuid,
- types::{AlwaysRefCounted, Opaque},
-};
+use crate::{bindings, sync::aref::AlwaysRefCounted, task::Kuid, types::Opaque};
/// Wraps the kernel's `struct cred`.
///
@@ -54,6 +50,12 @@ impl Credential {
unsafe { &*ptr.cast() }
}
+ /// Returns a raw pointer to the inner credential.
+ #[inline]
+ pub fn as_ptr(&self) -> *const bindings::cred {
+ self.0.get()
+ }
+
/// Get the id for this security context.
#[inline]
pub fn get_secid(&self) -> u32 {
diff --git a/rust/kernel/debugfs.rs b/rust/kernel/debugfs.rs
new file mode 100644
index 000000000000..381c23b3dd83
--- /dev/null
+++ b/rust/kernel/debugfs.rs
@@ -0,0 +1,594 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2025 Google LLC.
+
+//! DebugFS Abstraction
+//!
+//! C header: [`include/linux/debugfs.h`](srctree/include/linux/debugfs.h)
+
+// When DebugFS is disabled, many parameters are dead. Linting for this isn't helpful.
+#![cfg_attr(not(CONFIG_DEBUG_FS), allow(unused_variables))]
+
+use crate::prelude::*;
+use crate::str::CStr;
+#[cfg(CONFIG_DEBUG_FS)]
+use crate::sync::Arc;
+use crate::uaccess::UserSliceReader;
+use core::fmt;
+use core::marker::PhantomData;
+use core::marker::PhantomPinned;
+#[cfg(CONFIG_DEBUG_FS)]
+use core::mem::ManuallyDrop;
+use core::ops::Deref;
+
+mod traits;
+pub use traits::{Reader, Writer};
+
+mod callback_adapters;
+use callback_adapters::{FormatAdapter, NoWriter, WritableAdapter};
+mod file_ops;
+use file_ops::{FileOps, ReadFile, ReadWriteFile, WriteFile};
+#[cfg(CONFIG_DEBUG_FS)]
+mod entry;
+#[cfg(CONFIG_DEBUG_FS)]
+use entry::Entry;
+
+/// Owning handle to a DebugFS directory.
+///
+/// The directory in the filesystem represented by [`Dir`] will be removed when handle has been
+/// dropped *and* all children have been removed.
+// If we have a parent, we hold a reference to it in the `Entry`. This prevents the `dentry`
+// we point to from being cleaned up if our parent `Dir`/`Entry` is dropped before us.
+//
+// The `None` option indicates that the `Arc` could not be allocated, so our children would not be
+// able to refer to us. In this case, we need to silently fail. All future child directories/files
+// will silently fail as well.
+#[derive(Clone)]
+pub struct Dir(#[cfg(CONFIG_DEBUG_FS)] Option<Arc<Entry<'static>>>);
+
+impl Dir {
+ /// Create a new directory in DebugFS. If `parent` is [`None`], it will be created at the root.
+ fn create(name: &CStr, parent: Option<&Dir>) -> Self {
+ #[cfg(CONFIG_DEBUG_FS)]
+ {
+ let parent_entry = match parent {
+ // If the parent couldn't be allocated, just early-return
+ Some(Dir(None)) => return Self(None),
+ Some(Dir(Some(entry))) => Some(entry.clone()),
+ None => None,
+ };
+ Self(
+ // If Arc creation fails, the `Entry` will be dropped, so the directory will be
+ // cleaned up.
+ Arc::new(Entry::dynamic_dir(name, parent_entry), GFP_KERNEL).ok(),
+ )
+ }
+ #[cfg(not(CONFIG_DEBUG_FS))]
+ Self()
+ }
+
+ /// Creates a DebugFS file which will own the data produced by the initializer provided in
+ /// `data`.
+ fn create_file<'a, T, E: 'a>(
+ &'a self,
+ name: &'a CStr,
+ data: impl PinInit<T, E> + 'a,
+ file_ops: &'static FileOps<T>,
+ ) -> impl PinInit<File<T>, E> + 'a
+ where
+ T: Sync + 'static,
+ {
+ let scope = Scope::<T>::new(data, move |data| {
+ #[cfg(CONFIG_DEBUG_FS)]
+ if let Some(parent) = &self.0 {
+ // SAFETY: Because data derives from a scope, and our entry will be dropped before
+ // the data is dropped, it is guaranteed to outlive the entry we return.
+ unsafe { Entry::dynamic_file(name, parent.clone(), data, file_ops) }
+ } else {
+ Entry::empty()
+ }
+ });
+ try_pin_init! {
+ File {
+ scope <- scope
+ } ? E
+ }
+ }
+
+ /// Create a new directory in DebugFS at the root.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// # use kernel::c_str;
+ /// # use kernel::debugfs::Dir;
+ /// let debugfs = Dir::new(c_str!("parent"));
+ /// ```
+ pub fn new(name: &CStr) -> Self {
+ Dir::create(name, None)
+ }
+
+ /// Creates a subdirectory within this directory.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// # use kernel::c_str;
+ /// # use kernel::debugfs::Dir;
+ /// let parent = Dir::new(c_str!("parent"));
+ /// let child = parent.subdir(c_str!("child"));
+ /// ```
+ pub fn subdir(&self, name: &CStr) -> Self {
+ Dir::create(name, Some(self))
+ }
+
+ /// Creates a read-only file in this directory.
+ ///
+ /// The file's contents are produced by invoking [`Writer::write`] on the value initialized by
+ /// `data`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// # use kernel::c_str;
+ /// # use kernel::debugfs::Dir;
+ /// # use kernel::prelude::*;
+ /// # let dir = Dir::new(c_str!("my_debugfs_dir"));
+ /// let file = KBox::pin_init(dir.read_only_file(c_str!("foo"), 200), GFP_KERNEL)?;
+ /// // "my_debugfs_dir/foo" now contains the number 200.
+ /// // The file is removed when `file` is dropped.
+ /// # Ok::<(), Error>(())
+ /// ```
+ pub fn read_only_file<'a, T, E: 'a>(
+ &'a self,
+ name: &'a CStr,
+ data: impl PinInit<T, E> + 'a,
+ ) -> impl PinInit<File<T>, E> + 'a
+ where
+ T: Writer + Send + Sync + 'static,
+ {
+ let file_ops = &<T as ReadFile<_>>::FILE_OPS;
+ self.create_file(name, data, file_ops)
+ }
+
+ /// Creates a read-only file in this directory, with contents from a callback.
+ ///
+ /// `f` must be a function item or a non-capturing closure.
+ /// This is statically asserted and not a safety requirement.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// # use core::sync::atomic::{AtomicU32, Ordering};
+ /// # use kernel::c_str;
+ /// # use kernel::debugfs::Dir;
+ /// # use kernel::prelude::*;
+ /// # let dir = Dir::new(c_str!("foo"));
+ /// let file = KBox::pin_init(
+ /// dir.read_callback_file(c_str!("bar"),
+ /// AtomicU32::new(3),
+ /// &|val, f| {
+ /// let out = val.load(Ordering::Relaxed);
+ /// writeln!(f, "{out:#010x}")
+ /// }),
+ /// GFP_KERNEL)?;
+ /// // Reading "foo/bar" will show "0x00000003".
+ /// file.store(10, Ordering::Relaxed);
+ /// // Reading "foo/bar" will now show "0x0000000a".
+ /// # Ok::<(), Error>(())
+ /// ```
+ pub fn read_callback_file<'a, T, E: 'a, F>(
+ &'a self,
+ name: &'a CStr,
+ data: impl PinInit<T, E> + 'a,
+ _f: &'static F,
+ ) -> impl PinInit<File<T>, E> + 'a
+ where
+ T: Send + Sync + 'static,
+ F: Fn(&T, &mut fmt::Formatter<'_>) -> fmt::Result + Send + Sync,
+ {
+ let file_ops = <FormatAdapter<T, F>>::FILE_OPS.adapt();
+ self.create_file(name, data, file_ops)
+ }
+
+ /// Creates a read-write file in this directory.
+ ///
+ /// Reading the file uses the [`Writer`] implementation.
+ /// Writing to the file uses the [`Reader`] implementation.
+ pub fn read_write_file<'a, T, E: 'a>(
+ &'a self,
+ name: &'a CStr,
+ data: impl PinInit<T, E> + 'a,
+ ) -> impl PinInit<File<T>, E> + 'a
+ where
+ T: Writer + Reader + Send + Sync + 'static,
+ {
+ let file_ops = &<T as ReadWriteFile<_>>::FILE_OPS;
+ self.create_file(name, data, file_ops)
+ }
+
+ /// Creates a read-write file in this directory, with logic from callbacks.
+ ///
+ /// Reading from the file is handled by `f`. Writing to the file is handled by `w`.
+ ///
+ /// `f` and `w` must be function items or non-capturing closures.
+ /// This is statically asserted and not a safety requirement.
+ pub fn read_write_callback_file<'a, T, E: 'a, F, W>(
+ &'a self,
+ name: &'a CStr,
+ data: impl PinInit<T, E> + 'a,
+ _f: &'static F,
+ _w: &'static W,
+ ) -> impl PinInit<File<T>, E> + 'a
+ where
+ T: Send + Sync + 'static,
+ F: Fn(&T, &mut fmt::Formatter<'_>) -> fmt::Result + Send + Sync,
+ W: Fn(&T, &mut UserSliceReader) -> Result + Send + Sync,
+ {
+ let file_ops =
+ <WritableAdapter<FormatAdapter<T, F>, W> as file_ops::ReadWriteFile<_>>::FILE_OPS
+ .adapt()
+ .adapt();
+ self.create_file(name, data, file_ops)
+ }
+
+ /// Creates a write-only file in this directory.
+ ///
+ /// The file owns its backing data. Writing to the file uses the [`Reader`]
+ /// implementation.
+ ///
+ /// The file is removed when the returned [`File`] is dropped.
+ pub fn write_only_file<'a, T, E: 'a>(
+ &'a self,
+ name: &'a CStr,
+ data: impl PinInit<T, E> + 'a,
+ ) -> impl PinInit<File<T>, E> + 'a
+ where
+ T: Reader + Send + Sync + 'static,
+ {
+ self.create_file(name, data, &T::FILE_OPS)
+ }
+
+ /// Creates a write-only file in this directory, with write logic from a callback.
+ ///
+ /// `w` must be a function item or a non-capturing closure.
+ /// This is statically asserted and not a safety requirement.
+ pub fn write_callback_file<'a, T, E: 'a, W>(
+ &'a self,
+ name: &'a CStr,
+ data: impl PinInit<T, E> + 'a,
+ _w: &'static W,
+ ) -> impl PinInit<File<T>, E> + 'a
+ where
+ T: Send + Sync + 'static,
+ W: Fn(&T, &mut UserSliceReader) -> Result + Send + Sync,
+ {
+ let file_ops = <WritableAdapter<NoWriter<T>, W> as WriteFile<_>>::FILE_OPS
+ .adapt()
+ .adapt();
+ self.create_file(name, data, file_ops)
+ }
+
+ // While this function is safe, it is intentionally not public because it's a bit of a
+ // footgun.
+ //
+ // Unless you also extract the `entry` later and schedule it for `Drop` at the appropriate
+ // time, a `ScopedDir` with a `Dir` parent will never be deleted.
+ fn scoped_dir<'data>(&self, name: &CStr) -> ScopedDir<'data, 'static> {
+ #[cfg(CONFIG_DEBUG_FS)]
+ {
+ let parent_entry = match &self.0 {
+ None => return ScopedDir::empty(),
+ Some(entry) => entry.clone(),
+ };
+ ScopedDir {
+ entry: ManuallyDrop::new(Entry::dynamic_dir(name, Some(parent_entry))),
+ _phantom: PhantomData,
+ }
+ }
+ #[cfg(not(CONFIG_DEBUG_FS))]
+ ScopedDir::empty()
+ }
+
+ /// Creates a new scope, which is a directory associated with some data `T`.
+ ///
+ /// The created directory will be a subdirectory of `self`. The `init` closure is called to
+ /// populate the directory with files and subdirectories. These files can reference the data
+ /// stored in the scope.
+ ///
+ /// The entire directory tree created within the scope will be removed when the returned
+ /// `Scope` handle is dropped.
+ pub fn scope<'a, T: 'a, E: 'a, F>(
+ &'a self,
+ data: impl PinInit<T, E> + 'a,
+ name: &'a CStr,
+ init: F,
+ ) -> impl PinInit<Scope<T>, E> + 'a
+ where
+ F: for<'data, 'dir> FnOnce(&'data T, &'dir ScopedDir<'data, 'dir>) + 'a,
+ {
+ Scope::new(data, |data| {
+ let scoped = self.scoped_dir(name);
+ init(data, &scoped);
+ scoped.into_entry()
+ })
+ }
+}
+
+#[pin_data]
+/// Handle to a DebugFS scope, which ensures that attached `data` will outlive the DebugFS entry
+/// without moving.
+///
+/// This is internally used to back [`File`], and used in the API to represent the attachment
+/// of a directory lifetime to a data structure which may be jointly accessed by a number of
+/// different files.
+///
+/// When dropped, a `Scope` will remove all directories and files in the filesystem backed by the
+/// attached data structure prior to releasing the attached data.
+pub struct Scope<T> {
+ // This order is load-bearing for drops - `_entry` must be dropped before `data`.
+ #[cfg(CONFIG_DEBUG_FS)]
+ _entry: Entry<'static>,
+ #[pin]
+ data: T,
+ // Even if `T` is `Unpin`, we still can't allow it to be moved.
+ #[pin]
+ _pin: PhantomPinned,
+}
+
+#[pin_data]
+/// Handle to a DebugFS file, owning its backing data.
+///
+/// When dropped, the DebugFS file will be removed and the attached data will be dropped.
+pub struct File<T> {
+ #[pin]
+ scope: Scope<T>,
+}
+
+#[cfg(not(CONFIG_DEBUG_FS))]
+impl<'b, T: 'b> Scope<T> {
+ fn new<E: 'b, F>(data: impl PinInit<T, E> + 'b, init: F) -> impl PinInit<Self, E> + 'b
+ where
+ F: for<'a> FnOnce(&'a T) + 'b,
+ {
+ try_pin_init! {
+ Self {
+ data <- data,
+ _pin: PhantomPinned
+ } ? E
+ }
+ .pin_chain(|scope| {
+ init(&scope.data);
+ Ok(())
+ })
+ }
+}
+
+#[cfg(CONFIG_DEBUG_FS)]
+impl<'b, T: 'b> Scope<T> {
+ fn entry_mut(self: Pin<&mut Self>) -> &mut Entry<'static> {
+ // SAFETY: _entry is not structurally pinned.
+ unsafe { &mut Pin::into_inner_unchecked(self)._entry }
+ }
+
+ fn new<E: 'b, F>(data: impl PinInit<T, E> + 'b, init: F) -> impl PinInit<Self, E> + 'b
+ where
+ F: for<'a> FnOnce(&'a T) -> Entry<'static> + 'b,
+ {
+ try_pin_init! {
+ Self {
+ _entry: Entry::empty(),
+ data <- data,
+ _pin: PhantomPinned
+ } ? E
+ }
+ .pin_chain(|scope| {
+ *scope.entry_mut() = init(&scope.data);
+ Ok(())
+ })
+ }
+}
+
+impl<'a, T: 'a> Scope<T> {
+ /// Creates a new scope, which is a directory at the root of the debugfs filesystem,
+ /// associated with some data `T`.
+ ///
+ /// The `init` closure is called to populate the directory with files and subdirectories. These
+ /// files can reference the data stored in the scope.
+ ///
+ /// The entire directory tree created within the scope will be removed when the returned
+ /// `Scope` handle is dropped.
+ pub fn dir<E: 'a, F>(
+ data: impl PinInit<T, E> + 'a,
+ name: &'a CStr,
+ init: F,
+ ) -> impl PinInit<Self, E> + 'a
+ where
+ F: for<'data, 'dir> FnOnce(&'data T, &'dir ScopedDir<'data, 'dir>) + 'a,
+ {
+ Scope::new(data, |data| {
+ let scoped = ScopedDir::new(name);
+ init(data, &scoped);
+ scoped.into_entry()
+ })
+ }
+}
+
+impl<T> Deref for Scope<T> {
+ type Target = T;
+ fn deref(&self) -> &T {
+ &self.data
+ }
+}
+
+impl<T> Deref for File<T> {
+ type Target = T;
+ fn deref(&self) -> &T {
+ &self.scope
+ }
+}
+
+/// A handle to a directory which will live at most `'dir`, accessing data that will live for at
+/// least `'data`.
+///
+/// Dropping a ScopedDir will not delete or clean it up, this is expected to occur through dropping
+/// the `Scope` that created it.
+pub struct ScopedDir<'data, 'dir> {
+ #[cfg(CONFIG_DEBUG_FS)]
+ entry: ManuallyDrop<Entry<'dir>>,
+ _phantom: PhantomData<fn(&'data ()) -> &'dir ()>,
+}
+
+impl<'data, 'dir> ScopedDir<'data, 'dir> {
+ /// Creates a subdirectory inside this `ScopedDir`.
+ ///
+ /// The returned directory handle cannot outlive this one.
+ pub fn dir<'dir2>(&'dir2 self, name: &CStr) -> ScopedDir<'data, 'dir2> {
+ #[cfg(not(CONFIG_DEBUG_FS))]
+ let _ = name;
+ ScopedDir {
+ #[cfg(CONFIG_DEBUG_FS)]
+ entry: ManuallyDrop::new(Entry::dir(name, Some(&*self.entry))),
+ _phantom: PhantomData,
+ }
+ }
+
+ fn create_file<T: Sync>(&self, name: &CStr, data: &'data T, vtable: &'static FileOps<T>) {
+ #[cfg(CONFIG_DEBUG_FS)]
+ core::mem::forget(Entry::file(name, &self.entry, data, vtable));
+ }
+
+ /// Creates a read-only file in this directory.
+ ///
+ /// The file's contents are produced by invoking [`Writer::write`].
+ ///
+ /// This function does not produce an owning handle to the file. The created
+ /// file is removed when the [`Scope`] that this directory belongs
+ /// to is dropped.
+ pub fn read_only_file<T: Writer + Send + Sync + 'static>(&self, name: &CStr, data: &'data T) {
+ self.create_file(name, data, &T::FILE_OPS)
+ }
+
+ /// Creates a read-only file in this directory, with contents from a callback.
+ ///
+ /// The file contents are generated by calling `f` with `data`.
+ ///
+ ///
+ /// `f` must be a function item or a non-capturing closure.
+ /// This is statically asserted and not a safety requirement.
+ ///
+ /// This function does not produce an owning handle to the file. The created
+ /// file is removed when the [`Scope`] that this directory belongs
+ /// to is dropped.
+ pub fn read_callback_file<T, F>(&self, name: &CStr, data: &'data T, _f: &'static F)
+ where
+ T: Send + Sync + 'static,
+ F: Fn(&T, &mut fmt::Formatter<'_>) -> fmt::Result + Send + Sync,
+ {
+ let vtable = <FormatAdapter<T, F> as ReadFile<_>>::FILE_OPS.adapt();
+ self.create_file(name, data, vtable)
+ }
+
+ /// Creates a read-write file in this directory.
+ ///
+ /// Reading the file uses the [`Writer`] implementation on `data`. Writing to the file uses
+ /// the [`Reader`] implementation on `data`.
+ ///
+ /// This function does not produce an owning handle to the file. The created
+ /// file is removed when the [`Scope`] that this directory belongs
+ /// to is dropped.
+ pub fn read_write_file<T: Writer + Reader + Send + Sync + 'static>(
+ &self,
+ name: &CStr,
+ data: &'data T,
+ ) {
+ let vtable = &<T as ReadWriteFile<_>>::FILE_OPS;
+ self.create_file(name, data, vtable)
+ }
+
+ /// Creates a read-write file in this directory, with logic from callbacks.
+ ///
+ /// Reading from the file is handled by `f`. Writing to the file is handled by `w`.
+ ///
+ /// `f` and `w` must be function items or non-capturing closures.
+ /// This is statically asserted and not a safety requirement.
+ ///
+ /// This function does not produce an owning handle to the file. The created
+ /// file is removed when the [`Scope`] that this directory belongs
+ /// to is dropped.
+ pub fn read_write_callback_file<T, F, W>(
+ &self,
+ name: &CStr,
+ data: &'data T,
+ _f: &'static F,
+ _w: &'static W,
+ ) where
+ T: Send + Sync + 'static,
+ F: Fn(&T, &mut fmt::Formatter<'_>) -> fmt::Result + Send + Sync,
+ W: Fn(&T, &mut UserSliceReader) -> Result + Send + Sync,
+ {
+ let vtable = <WritableAdapter<FormatAdapter<T, F>, W> as ReadWriteFile<_>>::FILE_OPS
+ .adapt()
+ .adapt();
+ self.create_file(name, data, vtable)
+ }
+
+ /// Creates a write-only file in this directory.
+ ///
+ /// Writing to the file uses the [`Reader`] implementation on `data`.
+ ///
+ /// This function does not produce an owning handle to the file. The created
+ /// file is removed when the [`Scope`] that this directory belongs
+ /// to is dropped.
+ pub fn write_only_file<T: Reader + Send + Sync + 'static>(&self, name: &CStr, data: &'data T) {
+ let vtable = &<T as WriteFile<_>>::FILE_OPS;
+ self.create_file(name, data, vtable)
+ }
+
+ /// Creates a write-only file in this directory, with write logic from a callback.
+ ///
+ /// Writing to the file is handled by `w`.
+ ///
+ /// `w` must be a function item or a non-capturing closure.
+ /// This is statically asserted and not a safety requirement.
+ ///
+ /// This function does not produce an owning handle to the file. The created
+ /// file is removed when the [`Scope`] that this directory belongs
+ /// to is dropped.
+ pub fn write_only_callback_file<T, W>(&self, name: &CStr, data: &'data T, _w: &'static W)
+ where
+ T: Send + Sync + 'static,
+ W: Fn(&T, &mut UserSliceReader) -> Result + Send + Sync,
+ {
+ let vtable = &<WritableAdapter<NoWriter<T>, W> as WriteFile<_>>::FILE_OPS
+ .adapt()
+ .adapt();
+ self.create_file(name, data, vtable)
+ }
+
+ fn empty() -> Self {
+ ScopedDir {
+ #[cfg(CONFIG_DEBUG_FS)]
+ entry: ManuallyDrop::new(Entry::empty()),
+ _phantom: PhantomData,
+ }
+ }
+ #[cfg(CONFIG_DEBUG_FS)]
+ fn into_entry(self) -> Entry<'dir> {
+ ManuallyDrop::into_inner(self.entry)
+ }
+ #[cfg(not(CONFIG_DEBUG_FS))]
+ fn into_entry(self) {}
+}
+
+impl<'data> ScopedDir<'data, 'static> {
+ // This is safe, but intentionally not exported due to footgun status. A ScopedDir with no
+ // parent will never be released by default, and needs to have its entry extracted and used
+ // somewhere.
+ fn new(name: &CStr) -> ScopedDir<'data, 'static> {
+ ScopedDir {
+ #[cfg(CONFIG_DEBUG_FS)]
+ entry: ManuallyDrop::new(Entry::dir(name, None)),
+ _phantom: PhantomData,
+ }
+ }
+}
diff --git a/rust/kernel/debugfs/callback_adapters.rs b/rust/kernel/debugfs/callback_adapters.rs
new file mode 100644
index 000000000000..6c024230f676
--- /dev/null
+++ b/rust/kernel/debugfs/callback_adapters.rs
@@ -0,0 +1,122 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2025 Google LLC.
+
+//! Adapters which allow the user to supply a write or read implementation as a value rather
+//! than a trait implementation. If provided, it will override the trait implementation.
+
+use super::{Reader, Writer};
+use crate::prelude::*;
+use crate::uaccess::UserSliceReader;
+use core::fmt;
+use core::fmt::Formatter;
+use core::marker::PhantomData;
+use core::ops::Deref;
+
+/// # Safety
+///
+/// To implement this trait, it must be safe to cast a `&Self` to a `&Inner`.
+/// It is intended for use in unstacking adapters out of `FileOps` backings.
+pub(crate) unsafe trait Adapter {
+ type Inner;
+}
+
+/// Adapter to implement `Reader` via a callback with the same representation as `T`.
+///
+/// * Layer it on top of `WriterAdapter` if you want to add a custom callback for `write`.
+/// * Layer it on top of `NoWriter` to pass through any support present on the underlying type.
+///
+/// # Invariants
+///
+/// If an instance for `WritableAdapter<_, W>` is constructed, `W` is inhabited.
+#[repr(transparent)]
+pub(crate) struct WritableAdapter<D, W> {
+ inner: D,
+ _writer: PhantomData<W>,
+}
+
+// SAFETY: Stripping off the adapter only removes constraints
+unsafe impl<D, W> Adapter for WritableAdapter<D, W> {
+ type Inner = D;
+}
+
+impl<D: Writer, W> Writer for WritableAdapter<D, W> {
+ fn write(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
+ self.inner.write(fmt)
+ }
+}
+
+impl<D: Deref, W> Reader for WritableAdapter<D, W>
+where
+ W: Fn(&D::Target, &mut UserSliceReader) -> Result + Send + Sync + 'static,
+{
+ fn read_from_slice(&self, reader: &mut UserSliceReader) -> Result {
+ // SAFETY: WritableAdapter<_, W> can only be constructed if W is inhabited
+ let w: &W = unsafe { materialize_zst() };
+ w(self.inner.deref(), reader)
+ }
+}
+
+/// Adapter to implement `Writer` via a callback with the same representation as `T`.
+///
+/// # Invariants
+///
+/// If an instance for `FormatAdapter<_, F>` is constructed, `F` is inhabited.
+#[repr(transparent)]
+pub(crate) struct FormatAdapter<D, F> {
+ inner: D,
+ _formatter: PhantomData<F>,
+}
+
+impl<D, F> Deref for FormatAdapter<D, F> {
+ type Target = D;
+ fn deref(&self) -> &D {
+ &self.inner
+ }
+}
+
+impl<D, F> Writer for FormatAdapter<D, F>
+where
+ F: Fn(&D, &mut Formatter<'_>) -> fmt::Result + 'static,
+{
+ fn write(&self, fmt: &mut Formatter<'_>) -> fmt::Result {
+ // SAFETY: FormatAdapter<_, F> can only be constructed if F is inhabited
+ let f: &F = unsafe { materialize_zst() };
+ f(&self.inner, fmt)
+ }
+}
+
+// SAFETY: Stripping off the adapter only removes constraints
+unsafe impl<D, F> Adapter for FormatAdapter<D, F> {
+ type Inner = D;
+}
+
+#[repr(transparent)]
+pub(crate) struct NoWriter<D> {
+ inner: D,
+}
+
+// SAFETY: Stripping off the adapter only removes constraints
+unsafe impl<D> Adapter for NoWriter<D> {
+ type Inner = D;
+}
+
+impl<D> Deref for NoWriter<D> {
+ type Target = D;
+ fn deref(&self) -> &D {
+ &self.inner
+ }
+}
+
+/// For types with a unique value, produce a static reference to it.
+///
+/// # Safety
+///
+/// The caller asserts that F is inhabited
+unsafe fn materialize_zst<F>() -> &'static F {
+ const { assert!(core::mem::size_of::<F>() == 0) };
+ let zst_dangle: core::ptr::NonNull<F> = core::ptr::NonNull::dangling();
+ // SAFETY: While the pointer is dangling, it is a dangling pointer to a ZST, based on the
+ // assertion above. The type is also inhabited, by the caller's assertion. This means
+ // we can materialize it.
+ unsafe { zst_dangle.as_ref() }
+}
diff --git a/rust/kernel/debugfs/entry.rs b/rust/kernel/debugfs/entry.rs
new file mode 100644
index 000000000000..f99402cd3ba0
--- /dev/null
+++ b/rust/kernel/debugfs/entry.rs
@@ -0,0 +1,164 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2025 Google LLC.
+
+use crate::debugfs::file_ops::FileOps;
+use crate::ffi::c_void;
+use crate::str::CStr;
+use crate::sync::Arc;
+use core::marker::PhantomData;
+
+/// Owning handle to a DebugFS entry.
+///
+/// # Invariants
+///
+/// The wrapped pointer will always be `NULL`, an error, or an owned DebugFS `dentry`.
+pub(crate) struct Entry<'a> {
+ entry: *mut bindings::dentry,
+ // If we were created with an owning parent, this is the keep-alive
+ _parent: Option<Arc<Entry<'static>>>,
+ // If we were created with a non-owning parent, this prevents us from outliving it
+ _phantom: PhantomData<&'a ()>,
+}
+
+// SAFETY: [`Entry`] is just a `dentry` under the hood, which the API promises can be transferred
+// between threads.
+unsafe impl Send for Entry<'_> {}
+
+// SAFETY: All the C functions we call on the `dentry` pointer are threadsafe.
+unsafe impl Sync for Entry<'_> {}
+
+impl Entry<'static> {
+ pub(crate) fn dynamic_dir(name: &CStr, parent: Option<Arc<Self>>) -> Self {
+ let parent_ptr = match &parent {
+ Some(entry) => entry.as_ptr(),
+ None => core::ptr::null_mut(),
+ };
+ // SAFETY: The invariants of this function's arguments ensure the safety of this call.
+ // * `name` is a valid C string by the invariants of `&CStr`.
+ // * `parent_ptr` is either `NULL` (if `parent` is `None`), or a pointer to a valid
+ // `dentry` by our invariant. `debugfs_create_dir` handles `NULL` pointers correctly.
+ let entry = unsafe { bindings::debugfs_create_dir(name.as_char_ptr(), parent_ptr) };
+
+ Entry {
+ entry,
+ _parent: parent,
+ _phantom: PhantomData,
+ }
+ }
+
+ /// # Safety
+ ///
+ /// * `data` must outlive the returned `Entry`.
+ pub(crate) unsafe fn dynamic_file<T>(
+ name: &CStr,
+ parent: Arc<Self>,
+ data: &T,
+ file_ops: &'static FileOps<T>,
+ ) -> Self {
+ // SAFETY: The invariants of this function's arguments ensure the safety of this call.
+ // * `name` is a valid C string by the invariants of `&CStr`.
+ // * `parent.as_ptr()` is a pointer to a valid `dentry` by invariant.
+ // * The caller guarantees that `data` will outlive the returned `Entry`.
+ // * The guarantees on `FileOps` assert the vtable will be compatible with the data we have
+ // provided.
+ let entry = unsafe {
+ bindings::debugfs_create_file_full(
+ name.as_char_ptr(),
+ file_ops.mode(),
+ parent.as_ptr(),
+ core::ptr::from_ref(data) as *mut c_void,
+ core::ptr::null(),
+ &**file_ops,
+ )
+ };
+
+ Entry {
+ entry,
+ _parent: Some(parent),
+ _phantom: PhantomData,
+ }
+ }
+}
+
+impl<'a> Entry<'a> {
+ pub(crate) fn dir(name: &CStr, parent: Option<&'a Entry<'_>>) -> Self {
+ let parent_ptr = match &parent {
+ Some(entry) => entry.as_ptr(),
+ None => core::ptr::null_mut(),
+ };
+ // SAFETY: The invariants of this function's arguments ensure the safety of this call.
+ // * `name` is a valid C string by the invariants of `&CStr`.
+ // * `parent_ptr` is either `NULL` (if `parent` is `None`), or a pointer to a valid
+ // `dentry` (because `parent` is a valid reference to an `Entry`). The lifetime `'a`
+ // ensures that the parent outlives this entry.
+ let entry = unsafe { bindings::debugfs_create_dir(name.as_char_ptr(), parent_ptr) };
+
+ Entry {
+ entry,
+ _parent: None,
+ _phantom: PhantomData,
+ }
+ }
+
+ pub(crate) fn file<T>(
+ name: &CStr,
+ parent: &'a Entry<'_>,
+ data: &'a T,
+ file_ops: &FileOps<T>,
+ ) -> Self {
+ // SAFETY: The invariants of this function's arguments ensure the safety of this call.
+ // * `name` is a valid C string by the invariants of `&CStr`.
+ // * `parent.as_ptr()` is a pointer to a valid `dentry` because we have `&'a Entry`.
+ // * `data` is a valid pointer to `T` for lifetime `'a`.
+ // * The returned `Entry` has lifetime `'a`, so it cannot outlive `parent` or `data`.
+ // * The caller guarantees that `vtable` is compatible with `data`.
+ // * The guarantees on `FileOps` assert the vtable will be compatible with the data we have
+ // provided.
+ let entry = unsafe {
+ bindings::debugfs_create_file_full(
+ name.as_char_ptr(),
+ file_ops.mode(),
+ parent.as_ptr(),
+ core::ptr::from_ref(data) as *mut c_void,
+ core::ptr::null(),
+ &**file_ops,
+ )
+ };
+
+ Entry {
+ entry,
+ _parent: None,
+ _phantom: PhantomData,
+ }
+ }
+}
+
+impl Entry<'_> {
+ /// Constructs a placeholder DebugFS [`Entry`].
+ pub(crate) fn empty() -> Self {
+ Self {
+ entry: core::ptr::null_mut(),
+ _parent: None,
+ _phantom: PhantomData,
+ }
+ }
+
+ /// Returns the pointer representation of the DebugFS directory.
+ ///
+ /// # Guarantees
+ ///
+ /// Due to the type invariant, the value returned from this function will always be an error
+ /// code, NULL, or a live DebugFS directory. If it is live, it will remain live at least as
+ /// long as this entry lives.
+ pub(crate) fn as_ptr(&self) -> *mut bindings::dentry {
+ self.entry
+ }
+}
+
+impl Drop for Entry<'_> {
+ fn drop(&mut self) {
+ // SAFETY: `debugfs_remove` can take `NULL`, error values, and legal DebugFS dentries.
+ // `as_ptr` guarantees that the pointer is of this form.
+ unsafe { bindings::debugfs_remove(self.as_ptr()) }
+ }
+}
diff --git a/rust/kernel/debugfs/file_ops.rs b/rust/kernel/debugfs/file_ops.rs
new file mode 100644
index 000000000000..50fead17b6f3
--- /dev/null
+++ b/rust/kernel/debugfs/file_ops.rs
@@ -0,0 +1,247 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2025 Google LLC.
+
+use super::{Reader, Writer};
+use crate::debugfs::callback_adapters::Adapter;
+use crate::prelude::*;
+use crate::seq_file::SeqFile;
+use crate::seq_print;
+use crate::uaccess::UserSlice;
+use core::fmt::{Display, Formatter, Result};
+use core::marker::PhantomData;
+
+#[cfg(CONFIG_DEBUG_FS)]
+use core::ops::Deref;
+
+/// # Invariant
+///
+/// `FileOps<T>` will always contain an `operations` which is safe to use for a file backed
+/// off an inode which has a pointer to a `T` in its private data that is safe to convert
+/// into a reference.
+pub(super) struct FileOps<T> {
+ #[cfg(CONFIG_DEBUG_FS)]
+ operations: bindings::file_operations,
+ #[cfg(CONFIG_DEBUG_FS)]
+ mode: u16,
+ _phantom: PhantomData<T>,
+}
+
+impl<T> FileOps<T> {
+ /// # Safety
+ ///
+ /// The caller asserts that the provided `operations` is safe to use for a file whose
+ /// inode has a pointer to `T` in its private data that is safe to convert into a reference.
+ const unsafe fn new(operations: bindings::file_operations, mode: u16) -> Self {
+ Self {
+ #[cfg(CONFIG_DEBUG_FS)]
+ operations,
+ #[cfg(CONFIG_DEBUG_FS)]
+ mode,
+ _phantom: PhantomData,
+ }
+ }
+
+ #[cfg(CONFIG_DEBUG_FS)]
+ pub(crate) const fn mode(&self) -> u16 {
+ self.mode
+ }
+}
+
+impl<T: Adapter> FileOps<T> {
+ pub(super) const fn adapt(&self) -> &FileOps<T::Inner> {
+ // SAFETY: `Adapter` asserts that `T` can be legally cast to `T::Inner`.
+ unsafe { core::mem::transmute(self) }
+ }
+}
+
+#[cfg(CONFIG_DEBUG_FS)]
+impl<T> Deref for FileOps<T> {
+ type Target = bindings::file_operations;
+
+ fn deref(&self) -> &Self::Target {
+ &self.operations
+ }
+}
+
+struct WriterAdapter<T>(T);
+
+impl<'a, T: Writer> Display for WriterAdapter<&'a T> {
+ fn fmt(&self, f: &mut Formatter<'_>) -> Result {
+ self.0.write(f)
+ }
+}
+
+/// Implements `open` for `file_operations` via `single_open` to fill out a `seq_file`.
+///
+/// # Safety
+///
+/// * `inode`'s private pointer must point to a value of type `T` which will outlive the `inode`
+/// and will not have any unique references alias it during the call.
+/// * `file` must point to a live, not-yet-initialized file object.
+unsafe extern "C" fn writer_open<T: Writer + Sync>(
+ inode: *mut bindings::inode,
+ file: *mut bindings::file,
+) -> c_int {
+ // SAFETY: The caller ensures that `inode` is a valid pointer.
+ let data = unsafe { (*inode).i_private };
+ // SAFETY:
+ // * `file` is acceptable by caller precondition.
+ // * `print_act` will be called on a `seq_file` with private data set to the third argument,
+ // so we meet its safety requirements.
+ // * The `data` pointer passed in the third argument is a valid `T` pointer that outlives
+ // this call by caller preconditions.
+ unsafe { bindings::single_open(file, Some(writer_act::<T>), data) }
+}
+
+/// Prints private data stashed in a seq_file to that seq file.
+///
+/// # Safety
+///
+/// `seq` must point to a live `seq_file` whose private data is a valid pointer to a `T` which may
+/// not have any unique references alias it during the call.
+unsafe extern "C" fn writer_act<T: Writer + Sync>(
+ seq: *mut bindings::seq_file,
+ _: *mut c_void,
+) -> c_int {
+ // SAFETY: By caller precondition, this pointer is valid pointer to a `T`, and
+ // there are not and will not be any unique references until we are done.
+ let data = unsafe { &*((*seq).private.cast::<T>()) };
+ // SAFETY: By caller precondition, `seq_file` points to a live `seq_file`, so we can lift
+ // it.
+ let seq_file = unsafe { SeqFile::from_raw(seq) };
+ seq_print!(seq_file, "{}", WriterAdapter(data));
+ 0
+}
+
+// Work around lack of generic const items.
+pub(crate) trait ReadFile<T> {
+ const FILE_OPS: FileOps<T>;
+}
+
+impl<T: Writer + Sync> ReadFile<T> for T {
+ const FILE_OPS: FileOps<T> = {
+ let operations = bindings::file_operations {
+ read: Some(bindings::seq_read),
+ llseek: Some(bindings::seq_lseek),
+ release: Some(bindings::single_release),
+ open: Some(writer_open::<Self>),
+ // SAFETY: `file_operations` supports zeroes in all fields.
+ ..unsafe { core::mem::zeroed() }
+ };
+ // SAFETY: `operations` is all stock `seq_file` implementations except for `writer_open`.
+ // `open`'s only requirement beyond what is provided to all open functions is that the
+ // inode's data pointer must point to a `T` that will outlive it, which matches the
+ // `FileOps` requirements.
+ unsafe { FileOps::new(operations, 0o400) }
+ };
+}
+
+fn read<T: Reader + Sync>(data: &T, buf: *const c_char, count: usize) -> isize {
+ let mut reader = UserSlice::new(UserPtr::from_ptr(buf as *mut c_void), count).reader();
+
+ if let Err(e) = data.read_from_slice(&mut reader) {
+ return e.to_errno() as isize;
+ }
+
+ count as isize
+}
+
+/// # Safety
+///
+/// `file` must be a valid pointer to a `file` struct.
+/// The `private_data` of the file must contain a valid pointer to a `seq_file` whose
+/// `private` data in turn points to a `T` that implements `Reader`.
+/// `buf` must be a valid user-space buffer.
+pub(crate) unsafe extern "C" fn write<T: Reader + Sync>(
+ file: *mut bindings::file,
+ buf: *const c_char,
+ count: usize,
+ _ppos: *mut bindings::loff_t,
+) -> isize {
+ // SAFETY: The file was opened with `single_open`, which sets `private_data` to a `seq_file`.
+ let seq = unsafe { &mut *((*file).private_data.cast::<bindings::seq_file>()) };
+ // SAFETY: By caller precondition, this pointer is live and points to a value of type `T`.
+ let data = unsafe { &*(seq.private as *const T) };
+ read(data, buf, count)
+}
+
+// A trait to get the file operations for a type.
+pub(crate) trait ReadWriteFile<T> {
+ const FILE_OPS: FileOps<T>;
+}
+
+impl<T: Writer + Reader + Sync> ReadWriteFile<T> for T {
+ const FILE_OPS: FileOps<T> = {
+ let operations = bindings::file_operations {
+ open: Some(writer_open::<T>),
+ read: Some(bindings::seq_read),
+ write: Some(write::<T>),
+ llseek: Some(bindings::seq_lseek),
+ release: Some(bindings::single_release),
+ // SAFETY: `file_operations` supports zeroes in all fields.
+ ..unsafe { core::mem::zeroed() }
+ };
+ // SAFETY: `operations` is all stock `seq_file` implementations except for `writer_open`
+ // and `write`.
+ // `writer_open`'s only requirement beyond what is provided to all open functions is that
+ // the inode's data pointer must point to a `T` that will outlive it, which matches the
+ // `FileOps` requirements.
+ // `write` only requires that the file's private data pointer points to `seq_file`
+ // which points to a `T` that will outlive it, which matches what `writer_open`
+ // provides.
+ unsafe { FileOps::new(operations, 0o600) }
+ };
+}
+
+/// # Safety
+///
+/// `inode` must be a valid pointer to an `inode` struct.
+/// `file` must be a valid pointer to a `file` struct.
+unsafe extern "C" fn write_only_open(
+ inode: *mut bindings::inode,
+ file: *mut bindings::file,
+) -> c_int {
+ // SAFETY: The caller ensures that `inode` and `file` are valid pointers.
+ unsafe { (*file).private_data = (*inode).i_private };
+ 0
+}
+
+/// # Safety
+///
+/// * `file` must be a valid pointer to a `file` struct.
+/// * The `private_data` of the file must contain a valid pointer to a `T` that implements
+/// `Reader`.
+/// * `buf` must be a valid user-space buffer.
+pub(crate) unsafe extern "C" fn write_only_write<T: Reader + Sync>(
+ file: *mut bindings::file,
+ buf: *const c_char,
+ count: usize,
+ _ppos: *mut bindings::loff_t,
+) -> isize {
+ // SAFETY: The caller ensures that `file` is a valid pointer and that `private_data` holds a
+ // valid pointer to `T`.
+ let data = unsafe { &*((*file).private_data as *const T) };
+ read(data, buf, count)
+}
+
+pub(crate) trait WriteFile<T> {
+ const FILE_OPS: FileOps<T>;
+}
+
+impl<T: Reader + Sync> WriteFile<T> for T {
+ const FILE_OPS: FileOps<T> = {
+ let operations = bindings::file_operations {
+ open: Some(write_only_open),
+ write: Some(write_only_write::<T>),
+ llseek: Some(bindings::noop_llseek),
+ // SAFETY: `file_operations` supports zeroes in all fields.
+ ..unsafe { core::mem::zeroed() }
+ };
+ // SAFETY:
+ // * `write_only_open` populates the file private data with the inode private data
+ // * `write_only_write`'s only requirement is that the private data of the file point to
+ // a `T` and be legal to convert to a shared reference, which `write_only_open`
+ // satisfies.
+ unsafe { FileOps::new(operations, 0o200) }
+ };
+}
diff --git a/rust/kernel/debugfs/traits.rs b/rust/kernel/debugfs/traits.rs
new file mode 100644
index 000000000000..ab009eb254b3
--- /dev/null
+++ b/rust/kernel/debugfs/traits.rs
@@ -0,0 +1,102 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2025 Google LLC.
+
+//! Traits for rendering or updating values exported to DebugFS.
+
+use crate::prelude::*;
+use crate::sync::Mutex;
+use crate::uaccess::UserSliceReader;
+use core::fmt::{self, Debug, Formatter};
+use core::str::FromStr;
+use core::sync::atomic::{
+ AtomicI16, AtomicI32, AtomicI64, AtomicI8, AtomicIsize, AtomicU16, AtomicU32, AtomicU64,
+ AtomicU8, AtomicUsize, Ordering,
+};
+
+/// A trait for types that can be written into a string.
+///
+/// This works very similarly to `Debug`, and is automatically implemented if `Debug` is
+/// implemented for a type. It is also implemented for any writable type inside a `Mutex`.
+///
+/// The derived implementation of `Debug` [may
+/// change](https://doc.rust-lang.org/std/fmt/trait.Debug.html#stability)
+/// between Rust versions, so if stability is key for your use case, please implement `Writer`
+/// explicitly instead.
+pub trait Writer {
+ /// Formats the value using the given formatter.
+ fn write(&self, f: &mut Formatter<'_>) -> fmt::Result;
+}
+
+impl<T: Writer> Writer for Mutex<T> {
+ fn write(&self, f: &mut Formatter<'_>) -> fmt::Result {
+ self.lock().write(f)
+ }
+}
+
+impl<T: Debug> Writer for T {
+ fn write(&self, f: &mut Formatter<'_>) -> fmt::Result {
+ writeln!(f, "{self:?}")
+ }
+}
+
+/// A trait for types that can be updated from a user slice.
+///
+/// This works similarly to `FromStr`, but operates on a `UserSliceReader` rather than a &str.
+///
+/// It is automatically implemented for all atomic integers, or any type that implements `FromStr`
+/// wrapped in a `Mutex`.
+pub trait Reader {
+ /// Updates the value from the given user slice.
+ fn read_from_slice(&self, reader: &mut UserSliceReader) -> Result;
+}
+
+impl<T: FromStr> Reader for Mutex<T> {
+ fn read_from_slice(&self, reader: &mut UserSliceReader) -> Result {
+ let mut buf = [0u8; 128];
+ if reader.len() > buf.len() {
+ return Err(EINVAL);
+ }
+ let n = reader.len();
+ reader.read_slice(&mut buf[..n])?;
+
+ let s = core::str::from_utf8(&buf[..n]).map_err(|_| EINVAL)?;
+ let val = s.trim().parse::<T>().map_err(|_| EINVAL)?;
+ *self.lock() = val;
+ Ok(())
+ }
+}
+
+macro_rules! impl_reader_for_atomic {
+ ($(($atomic_type:ty, $int_type:ty)),*) => {
+ $(
+ impl Reader for $atomic_type {
+ fn read_from_slice(&self, reader: &mut UserSliceReader) -> Result {
+ let mut buf = [0u8; 21]; // Enough for a 64-bit number.
+ if reader.len() > buf.len() {
+ return Err(EINVAL);
+ }
+ let n = reader.len();
+ reader.read_slice(&mut buf[..n])?;
+
+ let s = core::str::from_utf8(&buf[..n]).map_err(|_| EINVAL)?;
+ let val = s.trim().parse::<$int_type>().map_err(|_| EINVAL)?;
+ self.store(val, Ordering::Relaxed);
+ Ok(())
+ }
+ }
+ )*
+ };
+}
+
+impl_reader_for_atomic!(
+ (AtomicI16, i16),
+ (AtomicI32, i32),
+ (AtomicI64, i64),
+ (AtomicI8, i8),
+ (AtomicIsize, isize),
+ (AtomicU16, u16),
+ (AtomicU32, u32),
+ (AtomicU64, u64),
+ (AtomicU8, u8),
+ (AtomicUsize, usize)
+);
diff --git a/rust/kernel/device.rs b/rust/kernel/device.rs
index dea06b79ecb5..1321e6f0b53c 100644
--- a/rust/kernel/device.rs
+++ b/rust/kernel/device.rs
@@ -5,32 +5,143 @@
//! C header: [`include/linux/device.h`](srctree/include/linux/device.h)
use crate::{
- bindings,
- str::CStr,
- types::{ARef, Opaque},
+ bindings, fmt,
+ sync::aref::ARef,
+ types::{ForeignOwnable, Opaque},
};
-use core::{fmt, marker::PhantomData, ptr};
+use core::{marker::PhantomData, ptr};
#[cfg(CONFIG_PRINTK)]
use crate::c_str;
-/// A reference-counted device.
+pub mod property;
+
+/// The core representation of a device in the kernel's driver model.
+///
+/// This structure represents the Rust abstraction for a C `struct device`. A [`Device`] can either
+/// exist as temporary reference (see also [`Device::from_raw`]), which is only valid within a
+/// certain scope or as [`ARef<Device>`], owning a dedicated reference count.
+///
+/// # Device Types
+///
+/// A [`Device`] can represent either a bus device or a class device.
+///
+/// ## Bus Devices
+///
+/// A bus device is a [`Device`] that is associated with a physical or virtual bus. Examples of
+/// buses include PCI, USB, I2C, and SPI. Devices attached to a bus are registered with a specific
+/// bus type, which facilitates matching devices with appropriate drivers based on IDs or other
+/// identifying information. Bus devices are visible in sysfs under `/sys/bus/<bus-name>/devices/`.
+///
+/// ## Class Devices
+///
+/// A class device is a [`Device`] that is associated with a logical category of functionality
+/// rather than a physical bus. Examples of classes include block devices, network interfaces, sound
+/// cards, and input devices. Class devices are grouped under a common class and exposed to
+/// userspace via entries in `/sys/class/<class-name>/`.
+///
+/// # Device Context
+///
+/// [`Device`] references are generic over a [`DeviceContext`], which represents the type state of
+/// a [`Device`].
+///
+/// As the name indicates, this type state represents the context of the scope the [`Device`]
+/// reference is valid in. For instance, the [`Bound`] context guarantees that the [`Device`] is
+/// bound to a driver for the entire duration of the existence of a [`Device<Bound>`] reference.
+///
+/// Other [`DeviceContext`] types besides [`Bound`] are [`Normal`], [`Core`] and [`CoreInternal`].
///
-/// This structure represents the Rust abstraction for a C `struct device`. This implementation
-/// abstracts the usage of an already existing C `struct device` within Rust code that we get
-/// passed from the C side.
+/// Unless selected otherwise [`Device`] defaults to the [`Normal`] [`DeviceContext`], which by
+/// itself has no additional requirements.
///
-/// An instance of this abstraction can be obtained temporarily or permanent.
+/// It is always up to the caller of [`Device::from_raw`] to select the correct [`DeviceContext`]
+/// type for the corresponding scope the [`Device`] reference is created in.
///
-/// A temporary one is bound to the lifetime of the C `struct device` pointer used for creation.
-/// A permanent instance is always reference-counted and hence not restricted by any lifetime
-/// boundaries.
+/// All [`DeviceContext`] types other than [`Normal`] are intended to be used with
+/// [bus devices](#bus-devices) only.
///
-/// For subsystems it is recommended to create a permanent instance to wrap into a subsystem
-/// specific device structure (e.g. `pci::Device`). This is useful for passing it to drivers in
-/// `T::probe()`, such that a driver can store the `ARef<Device>` (equivalent to storing a
-/// `struct device` pointer in a C driver) for arbitrary purposes, e.g. allocating DMA coherent
-/// memory.
+/// # Implementing Bus Devices
+///
+/// This section provides a guideline to implement bus specific devices, such as [`pci::Device`] or
+/// [`platform::Device`].
+///
+/// A bus specific device should be defined as follows.
+///
+/// ```ignore
+/// #[repr(transparent)]
+/// pub struct Device<Ctx: device::DeviceContext = device::Normal>(
+/// Opaque<bindings::bus_device_type>,
+/// PhantomData<Ctx>,
+/// );
+/// ```
+///
+/// Since devices are reference counted, [`AlwaysRefCounted`] should be implemented for `Device`
+/// (i.e. `Device<Normal>`). Note that [`AlwaysRefCounted`] must not be implemented for any other
+/// [`DeviceContext`], since all other device context types are only valid within a certain scope.
+///
+/// In order to be able to implement the [`DeviceContext`] dereference hierarchy, bus device
+/// implementations should call the [`impl_device_context_deref`] macro as shown below.
+///
+/// ```ignore
+/// // SAFETY: `Device` is a transparent wrapper of a type that doesn't depend on `Device`'s
+/// // generic argument.
+/// kernel::impl_device_context_deref!(unsafe { Device });
+/// ```
+///
+/// In order to convert from a any [`Device<Ctx>`] to [`ARef<Device>`], bus devices can implement
+/// the following macro call.
+///
+/// ```ignore
+/// kernel::impl_device_context_into_aref!(Device);
+/// ```
+///
+/// Bus devices should also implement the following [`AsRef`] implementation, such that users can
+/// easily derive a generic [`Device`] reference.
+///
+/// ```ignore
+/// impl<Ctx: device::DeviceContext> AsRef<device::Device<Ctx>> for Device<Ctx> {
+/// fn as_ref(&self) -> &device::Device<Ctx> {
+/// ...
+/// }
+/// }
+/// ```
+///
+/// # Implementing Class Devices
+///
+/// Class device implementations require less infrastructure and depend slightly more on the
+/// specific subsystem.
+///
+/// An example implementation for a class device could look like this.
+///
+/// ```ignore
+/// #[repr(C)]
+/// pub struct Device<T: class::Driver> {
+/// dev: Opaque<bindings::class_device_type>,
+/// data: T::Data,
+/// }
+/// ```
+///
+/// This class device uses the sub-classing pattern to embed the driver's private data within the
+/// allocation of the class device. For this to be possible the class device is generic over the
+/// class specific `Driver` trait implementation.
+///
+/// Just like any device, class devices are reference counted and should hence implement
+/// [`AlwaysRefCounted`] for `Device`.
+///
+/// Class devices should also implement the following [`AsRef`] implementation, such that users can
+/// easily derive a generic [`Device`] reference.
+///
+/// ```ignore
+/// impl<T: class::Driver> AsRef<device::Device> for Device<T> {
+/// fn as_ref(&self) -> &device::Device {
+/// ...
+/// }
+/// }
+/// ```
+///
+/// An example for a class device implementation is
+#[cfg_attr(CONFIG_DRM = "y", doc = "[`drm::Device`](kernel::drm::Device).")]
+#[cfg_attr(not(CONFIG_DRM = "y"), doc = "`drm::Device`.")]
///
/// # Invariants
///
@@ -41,6 +152,11 @@ use crate::c_str;
///
/// `bindings::device::release` is valid to be called from any thread, hence `ARef<Device>` can be
/// dropped from any thread.
+///
+/// [`AlwaysRefCounted`]: kernel::types::AlwaysRefCounted
+/// [`impl_device_context_deref`]: kernel::impl_device_context_deref
+/// [`pci::Device`]: kernel::pci::Device
+/// [`platform::Device`]: kernel::platform::Device
#[repr(transparent)]
pub struct Device<Ctx: DeviceContext = Normal>(Opaque<bindings::device>, PhantomData<Ctx>);
@@ -57,7 +173,73 @@ impl Device {
/// While not officially documented, this should be the case for any `struct device`.
pub unsafe fn get_device(ptr: *mut bindings::device) -> ARef<Self> {
// SAFETY: By the safety requirements ptr is valid
- unsafe { Self::as_ref(ptr) }.into()
+ unsafe { Self::from_raw(ptr) }.into()
+ }
+
+ /// Convert a [`&Device`](Device) into a [`&Device<Bound>`](Device<Bound>).
+ ///
+ /// # Safety
+ ///
+ /// The caller is responsible to ensure that the returned [`&Device<Bound>`](Device<Bound>)
+ /// only lives as long as it can be guaranteed that the [`Device`] is actually bound.
+ pub unsafe fn as_bound(&self) -> &Device<Bound> {
+ let ptr = core::ptr::from_ref(self);
+
+ // CAST: By the safety requirements the caller is responsible to guarantee that the
+ // returned reference only lives as long as the device is actually bound.
+ let ptr = ptr.cast();
+
+ // SAFETY:
+ // - `ptr` comes from `from_ref(self)` above, hence it's guaranteed to be valid.
+ // - Any valid `Device` pointer is also a valid pointer for `Device<Bound>`.
+ unsafe { &*ptr }
+ }
+}
+
+impl Device<CoreInternal> {
+ /// Store a pointer to the bound driver's private data.
+ pub fn set_drvdata(&self, data: impl ForeignOwnable) {
+ // SAFETY: By the type invariants, `self.as_raw()` is a valid pointer to a `struct device`.
+ unsafe { bindings::dev_set_drvdata(self.as_raw(), data.into_foreign().cast()) }
+ }
+
+ /// Take ownership of the private data stored in this [`Device`].
+ ///
+ /// # Safety
+ ///
+ /// - Must only be called once after a preceding call to [`Device::set_drvdata`].
+ /// - The type `T` must match the type of the `ForeignOwnable` previously stored by
+ /// [`Device::set_drvdata`].
+ pub unsafe fn drvdata_obtain<T: ForeignOwnable>(&self) -> T {
+ // SAFETY: By the type invariants, `self.as_raw()` is a valid pointer to a `struct device`.
+ let ptr = unsafe { bindings::dev_get_drvdata(self.as_raw()) };
+
+ // SAFETY:
+ // - By the safety requirements of this function, `ptr` comes from a previous call to
+ // `into_foreign()`.
+ // - `dev_get_drvdata()` guarantees to return the same pointer given to `dev_set_drvdata()`
+ // in `into_foreign()`.
+ unsafe { T::from_foreign(ptr.cast()) }
+ }
+
+ /// Borrow the driver's private data bound to this [`Device`].
+ ///
+ /// # Safety
+ ///
+ /// - Must only be called after a preceding call to [`Device::set_drvdata`] and before
+ /// [`Device::drvdata_obtain`].
+ /// - The type `T` must match the type of the `ForeignOwnable` previously stored by
+ /// [`Device::set_drvdata`].
+ pub unsafe fn drvdata_borrow<T: ForeignOwnable>(&self) -> T::Borrowed<'_> {
+ // SAFETY: By the type invariants, `self.as_raw()` is a valid pointer to a `struct device`.
+ let ptr = unsafe { bindings::dev_get_drvdata(self.as_raw()) };
+
+ // SAFETY:
+ // - By the safety requirements of this function, `ptr` comes from a previous call to
+ // `into_foreign()`.
+ // - `dev_get_drvdata()` guarantees to return the same pointer given to `dev_set_drvdata()`
+ // in `into_foreign()`.
+ unsafe { T::borrow(ptr.cast()) }
}
}
@@ -82,7 +264,7 @@ impl<Ctx: DeviceContext> Device<Ctx> {
// - Since `parent` is not NULL, it must be a valid pointer to a `struct device`.
// - `parent` is valid for the lifetime of `self`, since a `struct device` holds a
// reference count of its parent.
- Some(unsafe { Self::as_ref(parent) })
+ Some(unsafe { Self::from_raw(parent) })
}
}
@@ -94,7 +276,7 @@ impl<Ctx: DeviceContext> Device<Ctx> {
/// i.e. it must be ensured that the reference count of the C `struct device` `ptr` points to
/// can't drop to zero, for the duration of this function call and the entire duration when the
/// returned reference exists.
- pub unsafe fn as_ref<'a>(ptr: *mut bindings::device) -> &'a Self {
+ pub unsafe fn from_raw<'a>(ptr: *mut bindings::device) -> &'a Self {
// SAFETY: Guaranteed by the safety requirements of the function.
unsafe { &*ptr.cast() }
}
@@ -195,18 +377,27 @@ impl<Ctx: DeviceContext> Device<Ctx> {
#[cfg(CONFIG_PRINTK)]
unsafe {
bindings::_dev_printk(
- klevel as *const _ as *const crate::ffi::c_char,
+ klevel.as_ptr().cast::<crate::ffi::c_char>(),
self.as_raw(),
c_str!("%pA").as_char_ptr(),
- &msg as *const _ as *const crate::ffi::c_void,
+ core::ptr::from_ref(&msg).cast::<crate::ffi::c_void>(),
)
};
}
- /// Checks if property is present or not.
- pub fn property_present(&self, name: &CStr) -> bool {
- // SAFETY: By the invariant of `CStr`, `name` is null-terminated.
- unsafe { bindings::device_property_present(self.as_raw().cast_const(), name.as_char_ptr()) }
+ /// Obtain the [`FwNode`](property::FwNode) corresponding to this [`Device`].
+ pub fn fwnode(&self) -> Option<&property::FwNode> {
+ // SAFETY: `self` is valid.
+ let fwnode_handle = unsafe { bindings::__dev_fwnode(self.as_raw()) };
+ if fwnode_handle.is_null() {
+ return None;
+ }
+ // SAFETY: `fwnode_handle` is valid. Its lifetime is tied to `&self`. We
+ // return a reference instead of an `ARef<FwNode>` because `dev_fwnode()`
+ // doesn't increment the refcount. It is safe to cast from a
+ // `struct fwnode_handle*` to a `*const FwNode` because `FwNode` is
+ // defined as a `#[repr(transparent)]` wrapper around `fwnode_handle`.
+ Some(unsafe { &*fwnode_handle.cast() })
}
}
@@ -216,7 +407,7 @@ kernel::impl_device_context_deref!(unsafe { Device });
kernel::impl_device_context_into_aref!(Device);
// SAFETY: Instances of `Device` are always reference-counted.
-unsafe impl crate::types::AlwaysRefCounted for Device {
+unsafe impl crate::sync::aref::AlwaysRefCounted for Device {
fn inc_ref(&self) {
// SAFETY: The existence of a shared reference guarantees that the refcount is non-zero.
unsafe { bindings::get_device(self.as_raw()) };
@@ -235,24 +426,75 @@ unsafe impl Send for Device {}
// synchronization in `struct device`.
unsafe impl Sync for Device {}
-/// Marker trait for the context of a bus specific device.
+/// Marker trait for the context or scope of a bus specific device.
///
-/// Some functions of a bus specific device should only be called from a certain context, i.e. bus
-/// callbacks, such as `probe()`.
+/// [`DeviceContext`] is a marker trait for types representing the context of a bus specific
+/// [`Device`].
///
-/// This is the marker trait for structures representing the context of a bus specific device.
+/// The specific device context types are: [`CoreInternal`], [`Core`], [`Bound`] and [`Normal`].
+///
+/// [`DeviceContext`] types are hierarchical, which means that there is a strict hierarchy that
+/// defines which [`DeviceContext`] type can be derived from another. For instance, any
+/// [`Device<Core>`] can dereference to a [`Device<Bound>`].
+///
+/// The following enumeration illustrates the dereference hierarchy of [`DeviceContext`] types.
+///
+/// - [`CoreInternal`] => [`Core`] => [`Bound`] => [`Normal`]
+///
+/// Bus devices can automatically implement the dereference hierarchy by using
+/// [`impl_device_context_deref`].
+///
+/// Note that the guarantee for a [`Device`] reference to have a certain [`DeviceContext`] comes
+/// from the specific scope the [`Device`] reference is valid in.
+///
+/// [`impl_device_context_deref`]: kernel::impl_device_context_deref
pub trait DeviceContext: private::Sealed {}
-/// The [`Normal`] context is the context of a bus specific device when it is not an argument of
-/// any bus callback.
+/// The [`Normal`] context is the default [`DeviceContext`] of any [`Device`].
+///
+/// The normal context does not indicate any specific context. Any `Device<Ctx>` is also a valid
+/// [`Device<Normal>`]. It is the only [`DeviceContext`] for which it is valid to implement
+/// [`AlwaysRefCounted`] for.
+///
+/// [`AlwaysRefCounted`]: kernel::types::AlwaysRefCounted
pub struct Normal;
-/// The [`Core`] context is the context of a bus specific device when it is supplied as argument of
-/// any of the bus callbacks, such as `probe()`.
+/// The [`Core`] context is the context of a bus specific device when it appears as argument of
+/// any bus specific callback, such as `probe()`.
+///
+/// The core context indicates that the [`Device<Core>`] reference's scope is limited to the bus
+/// callback it appears in. It is intended to be used for synchronization purposes. Bus device
+/// implementations can implement methods for [`Device<Core>`], such that they can only be called
+/// from bus callbacks.
pub struct Core;
-/// The [`Bound`] context is the context of a bus specific device reference when it is guaranteed to
-/// be bound for the duration of its lifetime.
+/// Semantically the same as [`Core`], but reserved for internal usage of the corresponding bus
+/// abstraction.
+///
+/// The internal core context is intended to be used in exactly the same way as the [`Core`]
+/// context, with the difference that this [`DeviceContext`] is internal to the corresponding bus
+/// abstraction.
+///
+/// This context mainly exists to share generic [`Device`] infrastructure that should only be called
+/// from bus callbacks with bus abstractions, but without making them accessible for drivers.
+pub struct CoreInternal;
+
+/// The [`Bound`] context is the [`DeviceContext`] of a bus specific device when it is guaranteed to
+/// be bound to a driver.
+///
+/// The bound context indicates that for the entire duration of the lifetime of a [`Device<Bound>`]
+/// reference, the [`Device`] is guaranteed to be bound to a driver.
+///
+/// Some APIs, such as [`dma::CoherentAllocation`] or [`Devres`] rely on the [`Device`] to be bound,
+/// which can be proven with the [`Bound`] device context.
+///
+/// Any abstraction that can guarantee a scope where the corresponding bus device is bound, should
+/// provide a [`Device<Bound>`] reference to its users for this scope. This allows users to benefit
+/// from optimizations for accessing device resources, see also [`Devres::access`].
+///
+/// [`Devres`]: kernel::devres::Devres
+/// [`Devres::access`]: kernel::devres::Devres::access
+/// [`dma::CoherentAllocation`]: kernel::dma::CoherentAllocation
pub struct Bound;
mod private {
@@ -260,11 +502,13 @@ mod private {
impl Sealed for super::Bound {}
impl Sealed for super::Core {}
+ impl Sealed for super::CoreInternal {}
impl Sealed for super::Normal {}
}
impl DeviceContext for Bound {}
impl DeviceContext for Core {}
+impl DeviceContext for CoreInternal {}
impl DeviceContext for Normal {}
/// # Safety
@@ -306,6 +550,13 @@ macro_rules! impl_device_context_deref {
// `__impl_device_context_deref!`.
::kernel::__impl_device_context_deref!(unsafe {
$device,
+ $crate::device::CoreInternal => $crate::device::Core
+ });
+
+ // SAFETY: This macro has the exact same safety requirement as
+ // `__impl_device_context_deref!`.
+ ::kernel::__impl_device_context_deref!(unsafe {
+ $device,
$crate::device::Core => $crate::device::Bound
});
@@ -322,7 +573,7 @@ macro_rules! impl_device_context_deref {
#[macro_export]
macro_rules! __impl_device_context_into_aref {
($src:ty, $device:tt) => {
- impl ::core::convert::From<&$device<$src>> for $crate::types::ARef<$device> {
+ impl ::core::convert::From<&$device<$src>> for $crate::sync::aref::ARef<$device> {
fn from(dev: &$device<$src>) -> Self {
(&**dev).into()
}
@@ -335,6 +586,7 @@ macro_rules! __impl_device_context_into_aref {
#[macro_export]
macro_rules! impl_device_context_into_aref {
($device:tt) => {
+ ::kernel::__impl_device_context_into_aref!($crate::device::CoreInternal, $device);
::kernel::__impl_device_context_into_aref!($crate::device::Core, $device);
::kernel::__impl_device_context_into_aref!($crate::device::Bound, $device);
};
@@ -345,7 +597,7 @@ macro_rules! impl_device_context_into_aref {
macro_rules! dev_printk {
($method:ident, $dev:expr, $($f:tt)*) => {
{
- ($dev).$method(::core::format_args!($($f)*));
+ ($dev).$method($crate::prelude::fmt!($($f)*));
}
}
}
diff --git a/rust/kernel/device/property.rs b/rust/kernel/device/property.rs
new file mode 100644
index 000000000000..3a332a8c53a9
--- /dev/null
+++ b/rust/kernel/device/property.rs
@@ -0,0 +1,632 @@
+// SPDX-License-Identifier: GPL-2.0
+
+//! Unified device property interface.
+//!
+//! C header: [`include/linux/property.h`](srctree/include/linux/property.h)
+
+use core::{mem::MaybeUninit, ptr};
+
+use super::private::Sealed;
+use crate::{
+ alloc::KVec,
+ bindings,
+ error::{to_result, Result},
+ fmt,
+ prelude::*,
+ str::{CStr, CString},
+ types::{ARef, Opaque},
+};
+
+/// A reference-counted fwnode_handle.
+///
+/// This structure represents the Rust abstraction for a
+/// C `struct fwnode_handle`. This implementation abstracts the usage of an
+/// already existing C `struct fwnode_handle` within Rust code that we get
+/// passed from the C side.
+///
+/// # Invariants
+///
+/// A `FwNode` instance represents a valid `struct fwnode_handle` created by the
+/// C portion of the kernel.
+///
+/// Instances of this type are always reference-counted, that is, a call to
+/// `fwnode_handle_get` ensures that the allocation remains valid at least until
+/// the matching call to `fwnode_handle_put`.
+#[repr(transparent)]
+pub struct FwNode(Opaque<bindings::fwnode_handle>);
+
+impl FwNode {
+ /// # Safety
+ ///
+ /// Callers must ensure that:
+ /// - The reference count was incremented at least once.
+ /// - They relinquish that increment. That is, if there is only one
+ /// increment, callers must not use the underlying object anymore -- it is
+ /// only safe to do so via the newly created `ARef<FwNode>`.
+ unsafe fn from_raw(raw: *mut bindings::fwnode_handle) -> ARef<Self> {
+ // SAFETY: As per the safety requirements of this function:
+ // - `NonNull::new_unchecked`:
+ // - `raw` is not null.
+ // - `ARef::from_raw`:
+ // - `raw` has an incremented refcount.
+ // - that increment is relinquished, i.e. it won't be decremented
+ // elsewhere.
+ // CAST: It is safe to cast from a `*mut fwnode_handle` to
+ // `*mut FwNode`, because `FwNode` is defined as a
+ // `#[repr(transparent)]` wrapper around `fwnode_handle`.
+ unsafe { ARef::from_raw(ptr::NonNull::new_unchecked(raw.cast())) }
+ }
+
+ /// Obtain the raw `struct fwnode_handle *`.
+ pub(crate) fn as_raw(&self) -> *mut bindings::fwnode_handle {
+ self.0.get()
+ }
+
+ /// Returns `true` if `&self` is an OF node, `false` otherwise.
+ pub fn is_of_node(&self) -> bool {
+ // SAFETY: The type invariant of `Self` guarantees that `self.as_raw() is a pointer to a
+ // valid `struct fwnode_handle`.
+ unsafe { bindings::is_of_node(self.as_raw()) }
+ }
+
+ /// Returns an object that implements [`Display`](fmt::Display) for
+ /// printing the name of a node.
+ ///
+ /// This is an alternative to the default `Display` implementation, which
+ /// prints the full path.
+ pub fn display_name(&self) -> impl fmt::Display + '_ {
+ struct FwNodeDisplayName<'a>(&'a FwNode);
+
+ impl fmt::Display for FwNodeDisplayName<'_> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ // SAFETY: `self` is valid by its type invariant.
+ let name = unsafe { bindings::fwnode_get_name(self.0.as_raw()) };
+ if name.is_null() {
+ return Ok(());
+ }
+ // SAFETY:
+ // - `fwnode_get_name` returns null or a valid C string.
+ // - `name` was checked to be non-null.
+ let name = unsafe { CStr::from_char_ptr(name) };
+ fmt::Display::fmt(name, f)
+ }
+ }
+
+ FwNodeDisplayName(self)
+ }
+
+ /// Checks if property is present or not.
+ pub fn property_present(&self, name: &CStr) -> bool {
+ // SAFETY: By the invariant of `CStr`, `name` is null-terminated.
+ unsafe { bindings::fwnode_property_present(self.as_raw().cast_const(), name.as_char_ptr()) }
+ }
+
+ /// Returns firmware property `name` boolean value.
+ pub fn property_read_bool(&self, name: &CStr) -> bool {
+ // SAFETY:
+ // - `name` is non-null and null-terminated.
+ // - `self.as_raw()` is valid because `self` is valid.
+ unsafe { bindings::fwnode_property_read_bool(self.as_raw(), name.as_char_ptr()) }
+ }
+
+ /// Returns the index of matching string `match_str` for firmware string
+ /// property `name`.
+ pub fn property_match_string(&self, name: &CStr, match_str: &CStr) -> Result<usize> {
+ // SAFETY:
+ // - `name` and `match_str` are non-null and null-terminated.
+ // - `self.as_raw` is valid because `self` is valid.
+ let ret = unsafe {
+ bindings::fwnode_property_match_string(
+ self.as_raw(),
+ name.as_char_ptr(),
+ match_str.as_char_ptr(),
+ )
+ };
+ to_result(ret)?;
+ Ok(ret as usize)
+ }
+
+ /// Returns firmware property `name` integer array values in a [`KVec`].
+ pub fn property_read_array_vec<'fwnode, 'name, T: PropertyInt>(
+ &'fwnode self,
+ name: &'name CStr,
+ len: usize,
+ ) -> Result<PropertyGuard<'fwnode, 'name, KVec<T>>> {
+ let mut val: KVec<T> = KVec::with_capacity(len, GFP_KERNEL)?;
+
+ let res = T::read_array_from_fwnode_property(self, name, val.spare_capacity_mut());
+ let res = match res {
+ Ok(_) => {
+ // SAFETY:
+ // - `len` is equal to `val.capacity - val.len`, because
+ // `val.capacity` is `len` and `val.len` is zero.
+ // - All elements within the interval [`0`, `len`) were initialized
+ // by `read_array_from_fwnode_property`.
+ unsafe { val.inc_len(len) }
+ Ok(val)
+ }
+ Err(e) => Err(e),
+ };
+ Ok(PropertyGuard {
+ inner: res,
+ fwnode: self,
+ name,
+ })
+ }
+
+ /// Returns integer array length for firmware property `name`.
+ pub fn property_count_elem<T: PropertyInt>(&self, name: &CStr) -> Result<usize> {
+ T::read_array_len_from_fwnode_property(self, name)
+ }
+
+ /// Returns the value of firmware property `name`.
+ ///
+ /// This method is generic over the type of value to read. The types that
+ /// can be read are strings, integers and arrays of integers.
+ ///
+ /// Reading a [`KVec`] of integers is done with the separate
+ /// method [`Self::property_read_array_vec`], because it takes an
+ /// additional `len` argument.
+ ///
+ /// Reading a boolean is done with the separate method
+ /// [`Self::property_read_bool`], because this operation is infallible.
+ ///
+ /// For more precise documentation about what types can be read, see
+ /// the [implementors of Property][Property#implementors] and [its
+ /// implementations on foreign types][Property#foreign-impls].
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// # use kernel::{c_str, device::{Device, property::FwNode}, str::CString};
+ /// fn examples(dev: &Device) -> Result {
+ /// let fwnode = dev.fwnode().ok_or(ENOENT)?;
+ /// let b: u32 = fwnode.property_read(c_str!("some-number")).required_by(dev)?;
+ /// if let Some(s) = fwnode.property_read::<CString>(c_str!("some-str")).optional() {
+ /// // ...
+ /// }
+ /// Ok(())
+ /// }
+ /// ```
+ pub fn property_read<'fwnode, 'name, T: Property>(
+ &'fwnode self,
+ name: &'name CStr,
+ ) -> PropertyGuard<'fwnode, 'name, T> {
+ PropertyGuard {
+ inner: T::read_from_fwnode_property(self, name),
+ fwnode: self,
+ name,
+ }
+ }
+
+ /// Returns first matching named child node handle.
+ pub fn get_child_by_name(&self, name: &CStr) -> Option<ARef<Self>> {
+ // SAFETY: `self` and `name` are valid by their type invariants.
+ let child =
+ unsafe { bindings::fwnode_get_named_child_node(self.as_raw(), name.as_char_ptr()) };
+ if child.is_null() {
+ return None;
+ }
+ // SAFETY:
+ // - `fwnode_get_named_child_node` returns a pointer with its refcount
+ // incremented.
+ // - That increment is relinquished, i.e. the underlying object is not
+ // used anymore except via the newly created `ARef`.
+ Some(unsafe { Self::from_raw(child) })
+ }
+
+ /// Returns an iterator over a node's children.
+ pub fn children<'a>(&'a self) -> impl Iterator<Item = ARef<FwNode>> + 'a {
+ let mut prev: Option<ARef<FwNode>> = None;
+
+ core::iter::from_fn(move || {
+ let prev_ptr = match prev.take() {
+ None => ptr::null_mut(),
+ Some(prev) => {
+ // We will pass `prev` to `fwnode_get_next_child_node`,
+ // which decrements its refcount, so we use
+ // `ARef::into_raw` to avoid decrementing the refcount
+ // twice.
+ let prev = ARef::into_raw(prev);
+ prev.as_ptr().cast()
+ }
+ };
+ // SAFETY:
+ // - `self.as_raw()` is valid by its type invariant.
+ // - `prev_ptr` may be null, which is allowed and corresponds to
+ // getting the first child. Otherwise, `prev_ptr` is valid, as it
+ // is the stored return value from the previous invocation.
+ // - `prev_ptr` has its refount incremented.
+ // - The increment of `prev_ptr` is relinquished, i.e. the
+ // underlying object won't be used anymore.
+ let next = unsafe { bindings::fwnode_get_next_child_node(self.as_raw(), prev_ptr) };
+ if next.is_null() {
+ return None;
+ }
+ // SAFETY:
+ // - `next` is valid because `fwnode_get_next_child_node` returns a
+ // pointer with its refcount incremented.
+ // - That increment is relinquished, i.e. the underlying object
+ // won't be used anymore, except via the newly created
+ // `ARef<Self>`.
+ let next = unsafe { FwNode::from_raw(next) };
+ prev = Some(next.clone());
+ Some(next)
+ })
+ }
+
+ /// Finds a reference with arguments.
+ pub fn property_get_reference_args(
+ &self,
+ prop: &CStr,
+ nargs: NArgs<'_>,
+ index: u32,
+ ) -> Result<FwNodeReferenceArgs> {
+ let mut out_args = FwNodeReferenceArgs::default();
+
+ let (nargs_prop, nargs) = match nargs {
+ NArgs::Prop(nargs_prop) => (nargs_prop.as_char_ptr(), 0),
+ NArgs::N(nargs) => (ptr::null(), nargs),
+ };
+
+ // SAFETY:
+ // - `self.0.get()` is valid.
+ // - `prop.as_char_ptr()` is valid and zero-terminated.
+ // - `nargs_prop` is valid and zero-terminated if `nargs`
+ // is zero, otherwise it is allowed to be a null-pointer.
+ // - The function upholds the type invariants of `out_args`,
+ // namely:
+ // - It may fill the field `fwnode` with a valid pointer,
+ // in which case its refcount is incremented.
+ // - It may modify the field `nargs`, in which case it
+ // initializes at least as many elements in `args`.
+ let ret = unsafe {
+ bindings::fwnode_property_get_reference_args(
+ self.0.get(),
+ prop.as_char_ptr(),
+ nargs_prop,
+ nargs,
+ index,
+ &mut out_args.0,
+ )
+ };
+ to_result(ret)?;
+
+ Ok(out_args)
+ }
+}
+
+/// The number of arguments to request [`FwNodeReferenceArgs`].
+pub enum NArgs<'a> {
+ /// The name of the property of the reference indicating the number of
+ /// arguments.
+ Prop(&'a CStr),
+ /// The known number of arguments.
+ N(u32),
+}
+
+/// The return value of [`FwNode::property_get_reference_args`].
+///
+/// This structure represents the Rust abstraction for a C
+/// `struct fwnode_reference_args` which was initialized by the C side.
+///
+/// # Invariants
+///
+/// If the field `fwnode` is valid, it owns an increment of its refcount.
+///
+/// The field `args` contains at least as many initialized elements as indicated
+/// by the field `nargs`.
+#[repr(transparent)]
+#[derive(Default)]
+pub struct FwNodeReferenceArgs(bindings::fwnode_reference_args);
+
+impl Drop for FwNodeReferenceArgs {
+ fn drop(&mut self) {
+ if !self.0.fwnode.is_null() {
+ // SAFETY:
+ // - By the type invariants of `FwNodeReferenceArgs`, its field
+ // `fwnode` owns an increment of its refcount.
+ // - That increment is relinquished. The underlying object won't be
+ // used anymore because we are dropping it.
+ let _ = unsafe { FwNode::from_raw(self.0.fwnode) };
+ }
+ }
+}
+
+impl FwNodeReferenceArgs {
+ /// Returns the slice of reference arguments.
+ pub fn as_slice(&self) -> &[u64] {
+ // SAFETY: As per the safety invariant of `FwNodeReferenceArgs`, `nargs`
+ // is the minimum number of elements in `args` that is valid.
+ unsafe { core::slice::from_raw_parts(self.0.args.as_ptr(), self.0.nargs as usize) }
+ }
+
+ /// Returns the number of reference arguments.
+ pub fn len(&self) -> usize {
+ self.0.nargs as usize
+ }
+
+ /// Returns `true` if there are no reference arguments.
+ pub fn is_empty(&self) -> bool {
+ self.0.nargs == 0
+ }
+}
+
+impl fmt::Debug for FwNodeReferenceArgs {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ write!(f, "{:?}", self.as_slice())
+ }
+}
+
+// SAFETY: Instances of `FwNode` are always reference-counted.
+unsafe impl crate::types::AlwaysRefCounted for FwNode {
+ fn inc_ref(&self) {
+ // SAFETY: The existence of a shared reference guarantees that the
+ // refcount is non-zero.
+ unsafe { bindings::fwnode_handle_get(self.as_raw()) };
+ }
+
+ unsafe fn dec_ref(obj: ptr::NonNull<Self>) {
+ // SAFETY: The safety requirements guarantee that the refcount is
+ // non-zero.
+ unsafe { bindings::fwnode_handle_put(obj.cast().as_ptr()) }
+ }
+}
+
+enum Node<'a> {
+ Borrowed(&'a FwNode),
+ Owned(ARef<FwNode>),
+}
+
+impl fmt::Display for FwNode {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ // The logic here is the same as the one in lib/vsprintf.c
+ // (fwnode_full_name_string).
+
+ // SAFETY: `self.as_raw()` is valid by its type invariant.
+ let num_parents = unsafe { bindings::fwnode_count_parents(self.as_raw()) };
+
+ for depth in (0..=num_parents).rev() {
+ let fwnode = if depth == 0 {
+ Node::Borrowed(self)
+ } else {
+ // SAFETY: `self.as_raw()` is valid.
+ let ptr = unsafe { bindings::fwnode_get_nth_parent(self.as_raw(), depth) };
+ // SAFETY:
+ // - The depth passed to `fwnode_get_nth_parent` is
+ // within the valid range, so the returned pointer is
+ // not null.
+ // - The reference count was incremented by
+ // `fwnode_get_nth_parent`.
+ // - That increment is relinquished to
+ // `FwNode::from_raw`.
+ Node::Owned(unsafe { FwNode::from_raw(ptr) })
+ };
+ // Take a reference to the owned or borrowed `FwNode`.
+ let fwnode: &FwNode = match &fwnode {
+ Node::Borrowed(f) => f,
+ Node::Owned(f) => f,
+ };
+
+ // SAFETY: `fwnode` is valid by its type invariant.
+ let prefix = unsafe { bindings::fwnode_get_name_prefix(fwnode.as_raw()) };
+ if !prefix.is_null() {
+ // SAFETY: `fwnode_get_name_prefix` returns null or a
+ // valid C string.
+ let prefix = unsafe { CStr::from_char_ptr(prefix) };
+ fmt::Display::fmt(prefix, f)?;
+ }
+ fmt::Display::fmt(&fwnode.display_name(), f)?;
+ }
+
+ Ok(())
+ }
+}
+
+/// Implemented for types that can be read as properties.
+///
+/// This is implemented for strings, integers and arrays of integers. It's used
+/// to make [`FwNode::property_read`] generic over the type of property being
+/// read. There are also two dedicated methods to read other types, because they
+/// require more specialized function signatures:
+/// - [`property_read_bool`](FwNode::property_read_bool)
+/// - [`property_read_array_vec`](FwNode::property_read_array_vec)
+///
+/// It must be public, because it appears in the signatures of other public
+/// functions, but its methods shouldn't be used outside the kernel crate.
+pub trait Property: Sized + Sealed {
+ /// Used to make [`FwNode::property_read`] generic.
+ fn read_from_fwnode_property(fwnode: &FwNode, name: &CStr) -> Result<Self>;
+}
+
+impl Sealed for CString {}
+
+impl Property for CString {
+ fn read_from_fwnode_property(fwnode: &FwNode, name: &CStr) -> Result<Self> {
+ let mut str: *mut u8 = ptr::null_mut();
+ let pstr: *mut _ = &mut str;
+
+ // SAFETY:
+ // - `name` is non-null and null-terminated.
+ // - `fwnode.as_raw` is valid because `fwnode` is valid.
+ let ret = unsafe {
+ bindings::fwnode_property_read_string(fwnode.as_raw(), name.as_char_ptr(), pstr.cast())
+ };
+ to_result(ret)?;
+
+ // SAFETY:
+ // - `pstr` is a valid pointer to a NUL-terminated C string.
+ // - It is valid for at least as long as `fwnode`, but it's only used
+ // within the current function.
+ // - The memory it points to is not mutated during that time.
+ let str = unsafe { CStr::from_char_ptr(*pstr) };
+ Ok(str.try_into()?)
+ }
+}
+
+/// Implemented for all integers that can be read as properties.
+///
+/// This helper trait is needed on top of the existing [`Property`]
+/// trait to associate the integer types of various sizes with their
+/// corresponding `fwnode_property_read_*_array` functions.
+///
+/// It must be public, because it appears in the signatures of other public
+/// functions, but its methods shouldn't be used outside the kernel crate.
+pub trait PropertyInt: Copy + Sealed {
+ /// Reads a property array.
+ fn read_array_from_fwnode_property<'a>(
+ fwnode: &FwNode,
+ name: &CStr,
+ out: &'a mut [MaybeUninit<Self>],
+ ) -> Result<&'a mut [Self]>;
+
+ /// Reads the length of a property array.
+ fn read_array_len_from_fwnode_property(fwnode: &FwNode, name: &CStr) -> Result<usize>;
+}
+// This macro generates implementations of the traits `Property` and
+// `PropertyInt` for integers of various sizes. Its input is a list
+// of pairs separated by commas. The first element of the pair is the
+// type of the integer, the second one is the name of its corresponding
+// `fwnode_property_read_*_array` function.
+macro_rules! impl_property_for_int {
+ ($($int:ty: $f:ident),* $(,)?) => { $(
+ impl Sealed for $int {}
+ impl<const N: usize> Sealed for [$int; N] {}
+
+ impl PropertyInt for $int {
+ fn read_array_from_fwnode_property<'a>(
+ fwnode: &FwNode,
+ name: &CStr,
+ out: &'a mut [MaybeUninit<Self>],
+ ) -> Result<&'a mut [Self]> {
+ // SAFETY:
+ // - `fwnode`, `name` and `out` are all valid by their type
+ // invariants.
+ // - `out.len()` is a valid bound for the memory pointed to by
+ // `out.as_mut_ptr()`.
+ // CAST: It's ok to cast from `*mut MaybeUninit<$int>` to a
+ // `*mut $int` because they have the same memory layout.
+ let ret = unsafe {
+ bindings::$f(
+ fwnode.as_raw(),
+ name.as_char_ptr(),
+ out.as_mut_ptr().cast(),
+ out.len(),
+ )
+ };
+ to_result(ret)?;
+ // SAFETY: Transmuting from `&'a mut [MaybeUninit<Self>]` to
+ // `&'a mut [Self]` is sound, because the previous call to a
+ // `fwnode_property_read_*_array` function (which didn't fail)
+ // fully initialized the slice.
+ Ok(unsafe { core::mem::transmute::<&mut [MaybeUninit<Self>], &mut [Self]>(out) })
+ }
+
+ fn read_array_len_from_fwnode_property(fwnode: &FwNode, name: &CStr) -> Result<usize> {
+ // SAFETY:
+ // - `fwnode` and `name` are valid by their type invariants.
+ // - It's ok to pass a null pointer to the
+ // `fwnode_property_read_*_array` functions if `nval` is zero.
+ // This will return the length of the array.
+ let ret = unsafe {
+ bindings::$f(
+ fwnode.as_raw(),
+ name.as_char_ptr(),
+ ptr::null_mut(),
+ 0,
+ )
+ };
+ to_result(ret)?;
+ Ok(ret as usize)
+ }
+ }
+
+ impl Property for $int {
+ fn read_from_fwnode_property(fwnode: &FwNode, name: &CStr) -> Result<Self> {
+ let val: [_; 1] = <[$int; 1]>::read_from_fwnode_property(fwnode, name)?;
+ Ok(val[0])
+ }
+ }
+
+ impl<const N: usize> Property for [$int; N] {
+ fn read_from_fwnode_property(fwnode: &FwNode, name: &CStr) -> Result<Self> {
+ let mut val: [MaybeUninit<$int>; N] = [const { MaybeUninit::uninit() }; N];
+
+ <$int>::read_array_from_fwnode_property(fwnode, name, &mut val)?;
+
+ // SAFETY: `val` is always initialized when
+ // `fwnode_property_read_*_array` is successful.
+ Ok(val.map(|v| unsafe { v.assume_init() }))
+ }
+ }
+ )* };
+}
+impl_property_for_int! {
+ u8: fwnode_property_read_u8_array,
+ u16: fwnode_property_read_u16_array,
+ u32: fwnode_property_read_u32_array,
+ u64: fwnode_property_read_u64_array,
+ i8: fwnode_property_read_u8_array,
+ i16: fwnode_property_read_u16_array,
+ i32: fwnode_property_read_u32_array,
+ i64: fwnode_property_read_u64_array,
+}
+
+/// A helper for reading device properties.
+///
+/// Use [`Self::required_by`] if a missing property is considered a bug and
+/// [`Self::optional`] otherwise.
+///
+/// For convenience, [`Self::or`] and [`Self::or_default`] are provided.
+pub struct PropertyGuard<'fwnode, 'name, T> {
+ /// The result of reading the property.
+ inner: Result<T>,
+ /// The fwnode of the property, used for logging in the "required" case.
+ fwnode: &'fwnode FwNode,
+ /// The name of the property, used for logging in the "required" case.
+ name: &'name CStr,
+}
+
+impl<T> PropertyGuard<'_, '_, T> {
+ /// Access the property, indicating it is required.
+ ///
+ /// If the property is not present, the error is automatically logged. If a
+ /// missing property is not an error, use [`Self::optional`] instead. The
+ /// device is required to associate the log with it.
+ pub fn required_by(self, dev: &super::Device) -> Result<T> {
+ if self.inner.is_err() {
+ dev_err!(
+ dev,
+ "{}: property '{}' is missing\n",
+ self.fwnode,
+ self.name
+ );
+ }
+ self.inner
+ }
+
+ /// Access the property, indicating it is optional.
+ ///
+ /// In contrast to [`Self::required_by`], no error message is logged if
+ /// the property is not present.
+ pub fn optional(self) -> Option<T> {
+ self.inner.ok()
+ }
+
+ /// Access the property or the specified default value.
+ ///
+ /// Do not pass a sentinel value as default to detect a missing property.
+ /// Use [`Self::required_by`] or [`Self::optional`] instead.
+ pub fn or(self, default: T) -> T {
+ self.inner.unwrap_or(default)
+ }
+}
+
+impl<T: Default> PropertyGuard<'_, '_, T> {
+ /// Access the property or a default value.
+ ///
+ /// Use [`Self::or`] to specify a custom default value.
+ pub fn or_default(self) -> T {
+ self.inner.unwrap_or_default()
+ }
+}
diff --git a/rust/kernel/device_id.rs b/rust/kernel/device_id.rs
index 0a4eb56d98f2..62c42da12e9d 100644
--- a/rust/kernel/device_id.rs
+++ b/rust/kernel/device_id.rs
@@ -14,32 +14,41 @@ use core::mem::MaybeUninit;
///
/// # Safety
///
-/// Implementers must ensure that:
-/// - `Self` is layout-compatible with [`RawDeviceId::RawType`]; i.e. it's safe to transmute to
-/// `RawDeviceId`.
+/// Implementers must ensure that `Self` is layout-compatible with [`RawDeviceId::RawType`];
+/// i.e. it's safe to transmute to `RawDeviceId`.
///
-/// This requirement is needed so `IdArray::new` can convert `Self` to `RawType` when building
-/// the ID table.
+/// This requirement is needed so `IdArray::new` can convert `Self` to `RawType` when building
+/// the ID table.
///
-/// Ideally, this should be achieved using a const function that does conversion instead of
-/// transmute; however, const trait functions relies on `const_trait_impl` unstable feature,
-/// which is broken/gone in Rust 1.73.
-///
-/// - `DRIVER_DATA_OFFSET` is the offset of context/data field of the device ID (usually named
-/// `driver_data`) of the device ID, the field is suitable sized to write a `usize` value.
-///
-/// Similar to the previous requirement, the data should ideally be added during `Self` to
-/// `RawType` conversion, but there's currently no way to do it when using traits in const.
+/// Ideally, this should be achieved using a const function that does conversion instead of
+/// transmute; however, const trait functions relies on `const_trait_impl` unstable feature,
+/// which is broken/gone in Rust 1.73.
pub unsafe trait RawDeviceId {
/// The raw type that holds the device id.
///
/// Id tables created from [`Self`] are going to hold this type in its zero-terminated array.
type RawType: Copy;
+}
- /// The offset to the context/data field.
+/// Extension trait for [`RawDeviceId`] for devices that embed an index or context value.
+///
+/// This is typically used when the device ID struct includes a field like `driver_data`
+/// that is used to store a pointer-sized value (e.g., an index or context pointer).
+///
+/// # Safety
+///
+/// Implementers must ensure that `DRIVER_DATA_OFFSET` is the correct offset (in bytes) to
+/// the context/data field (e.g., the `driver_data` field) within the raw device ID structure.
+/// This field must be correctly sized to hold a `usize`.
+///
+/// Ideally, the data should be added during `Self` to `RawType` conversion,
+/// but there's currently no way to do it when using traits in const.
+pub unsafe trait RawDeviceIdIndex: RawDeviceId {
+ /// The offset (in bytes) to the context/data field in the raw device ID.
const DRIVER_DATA_OFFSET: usize;
- /// The index stored at `DRIVER_DATA_OFFSET` of the implementor of the [`RawDeviceId`] trait.
+ /// The index stored at `DRIVER_DATA_OFFSET` of the implementor of the [`RawDeviceIdIndex`]
+ /// trait.
fn index(&self) -> usize;
}
@@ -68,7 +77,15 @@ impl<T: RawDeviceId, U, const N: usize> IdArray<T, U, N> {
/// Creates a new instance of the array.
///
/// The contents are derived from the given identifiers and context information.
- pub const fn new(ids: [(T, U); N]) -> Self {
+ ///
+ /// # Safety
+ ///
+ /// `data_offset` as `None` is always safe.
+ /// If `data_offset` is `Some(data_offset)`, then:
+ /// - `data_offset` must be the correct offset (in bytes) to the context/data field
+ /// (e.g., the `driver_data` field) within the raw device ID structure.
+ /// - The field at `data_offset` must be correctly sized to hold a `usize`.
+ const unsafe fn build(ids: [(T, U); N], data_offset: Option<usize>) -> Self {
let mut raw_ids = [const { MaybeUninit::<T::RawType>::uninit() }; N];
let mut infos = [const { MaybeUninit::uninit() }; N];
@@ -77,14 +94,16 @@ impl<T: RawDeviceId, U, const N: usize> IdArray<T, U, N> {
// SAFETY: by the safety requirement of `RawDeviceId`, we're guaranteed that `T` is
// layout-wise compatible with `RawType`.
raw_ids[i] = unsafe { core::mem::transmute_copy(&ids[i].0) };
- // SAFETY: by the safety requirement of `RawDeviceId`, this would be effectively
- // `raw_ids[i].driver_data = i;`.
- unsafe {
- raw_ids[i]
- .as_mut_ptr()
- .byte_offset(T::DRIVER_DATA_OFFSET as _)
- .cast::<usize>()
- .write(i);
+ if let Some(data_offset) = data_offset {
+ // SAFETY: by the safety requirement of this function, this would be effectively
+ // `raw_ids[i].driver_data = i;`.
+ unsafe {
+ raw_ids[i]
+ .as_mut_ptr()
+ .byte_add(data_offset)
+ .cast::<usize>()
+ .write(i);
+ }
}
// SAFETY: this is effectively a move: `infos[i] = ids[i].1`. We make a copy here but
@@ -109,12 +128,34 @@ impl<T: RawDeviceId, U, const N: usize> IdArray<T, U, N> {
}
}
+ /// Creates a new instance of the array without writing index values.
+ ///
+ /// The contents are derived from the given identifiers and context information.
+ /// If the device implements [`RawDeviceIdIndex`], consider using [`IdArray::new`] instead.
+ pub const fn new_without_index(ids: [(T, U); N]) -> Self {
+ // SAFETY: Calling `Self::build` with `offset = None` is always safe,
+ // because no raw memory writes are performed in this case.
+ unsafe { Self::build(ids, None) }
+ }
+
/// Reference to the contained [`RawIdArray`].
pub const fn raw_ids(&self) -> &RawIdArray<T, N> {
&self.raw_ids
}
}
+impl<T: RawDeviceId + RawDeviceIdIndex, U, const N: usize> IdArray<T, U, N> {
+ /// Creates a new instance of the array.
+ ///
+ /// The contents are derived from the given identifiers and context information.
+ pub const fn new(ids: [(T, U); N]) -> Self {
+ // SAFETY: by the safety requirement of `RawDeviceIdIndex`,
+ // `T::DRIVER_DATA_OFFSET` is guaranteed to be the correct offset (in bytes) to
+ // a field within `T::RawType`.
+ unsafe { Self::build(ids, Some(T::DRIVER_DATA_OFFSET)) }
+ }
+}
+
/// A device id table.
///
/// This trait is only implemented by `IdArray`.
@@ -136,7 +177,7 @@ impl<T: RawDeviceId, U, const N: usize> IdTable<T, U> for IdArray<T, U, N> {
fn as_ptr(&self) -> *const T::RawType {
// This cannot be `self.ids.as_ptr()`, as the return pointer must have correct provenance
// to access the sentinel.
- (self as *const Self).cast()
+ core::ptr::from_ref(self).cast()
}
fn id(&self, index: usize) -> &T::RawType {
@@ -154,10 +195,10 @@ macro_rules! module_device_table {
($table_type: literal, $module_table_name:ident, $table_name:ident) => {
#[rustfmt::skip]
#[export_name =
- concat!("__mod_device_table__", $table_type,
- "__", module_path!(),
- "_", line!(),
- "_", stringify!($table_name))
+ concat!("__mod_device_table__", line!(),
+ "__kmod_", module_path!(),
+ "__", $table_type,
+ "__", stringify!($table_name))
]
static $module_table_name: [::core::mem::MaybeUninit<u8>; $table_name.raw_ids().size()] =
unsafe { ::core::mem::transmute_copy($table_name.raw_ids()) };
diff --git a/rust/kernel/devres.rs b/rust/kernel/devres.rs
index 57502534d985..10a6a1789854 100644
--- a/rust/kernel/devres.rs
+++ b/rust/kernel/devres.rs
@@ -9,20 +9,25 @@ use crate::{
alloc::Flags,
bindings,
device::{Bound, Device},
- error::{Error, Result},
+ error::{to_result, Error, Result},
ffi::c_void,
prelude::*,
revocable::{Revocable, RevocableGuard},
- sync::{rcu, Arc, Completion},
- types::ARef,
+ sync::{aref::ARef, rcu, Completion},
+ types::{ForeignOwnable, Opaque, ScopeGuard},
};
+use pin_init::Wrapper;
+
+/// [`Devres`] inner data accessed from [`Devres::callback`].
#[pin_data]
-struct DevresInner<T> {
- dev: ARef<Device>,
- callback: unsafe extern "C" fn(*mut c_void),
+struct Inner<T: Send> {
#[pin]
data: Revocable<T>,
+ /// Tracks whether [`Devres::callback`] has been completed.
+ #[pin]
+ devm: Completion,
+ /// Tracks whether revoking [`Self::data`] has been completed.
#[pin]
revoke: Completion,
}
@@ -44,10 +49,10 @@ struct DevresInner<T> {
/// [`Devres`] users should make sure to simply free the corresponding backing resource in `T`'s
/// [`Drop`] implementation.
///
-/// # Example
+/// # Examples
///
/// ```no_run
-/// # use kernel::{bindings, c_str, device::{Bound, Device}, devres::Devres, io::{Io, IoRaw}};
+/// # use kernel::{bindings, device::{Bound, Device}, devres::Devres, io::{Io, IoRaw}};
/// # use core::ops::Deref;
///
/// // See also [`pci::Bar`] for a real example.
@@ -61,19 +66,19 @@ struct DevresInner<T> {
/// unsafe fn new(paddr: usize) -> Result<Self>{
/// // SAFETY: By the safety requirements of this function [`paddr`, `paddr` + `SIZE`) is
/// // valid for `ioremap`.
-/// let addr = unsafe { bindings::ioremap(paddr as _, SIZE as _) };
+/// let addr = unsafe { bindings::ioremap(paddr as bindings::phys_addr_t, SIZE) };
/// if addr.is_null() {
/// return Err(ENOMEM);
/// }
///
-/// Ok(IoMem(IoRaw::new(addr as _, SIZE)?))
+/// Ok(IoMem(IoRaw::new(addr as usize, SIZE)?))
/// }
/// }
///
/// impl<const SIZE: usize> Drop for IoMem<SIZE> {
/// fn drop(&mut self) {
/// // SAFETY: `self.0.addr()` is guaranteed to be properly mapped by `Self::new`.
-/// unsafe { bindings::iounmap(self.0.addr() as _); };
+/// unsafe { bindings::iounmap(self.0.addr() as *mut c_void); };
/// }
/// }
///
@@ -88,108 +93,127 @@ struct DevresInner<T> {
/// # fn no_run(dev: &Device<Bound>) -> Result<(), Error> {
/// // SAFETY: Invalid usage for example purposes.
/// let iomem = unsafe { IoMem::<{ core::mem::size_of::<u32>() }>::new(0xBAAAAAAD)? };
-/// let devres = Devres::new(dev, iomem, GFP_KERNEL)?;
+/// let devres = KBox::pin_init(Devres::new(dev, iomem), GFP_KERNEL)?;
///
/// let res = devres.try_access().ok_or(ENXIO)?;
/// res.write8(0x42, 0x0);
/// # Ok(())
/// # }
/// ```
-pub struct Devres<T>(Arc<DevresInner<T>>);
-
-impl<T> DevresInner<T> {
- fn new(dev: &Device<Bound>, data: T, flags: Flags) -> Result<Arc<DevresInner<T>>> {
- let inner = Arc::pin_init(
- pin_init!( DevresInner {
- dev: dev.into(),
- callback: Self::devres_callback,
- data <- Revocable::new(data),
- revoke <- Completion::new(),
- }),
- flags,
- )?;
-
- // Convert `Arc<DevresInner>` into a raw pointer and make devres own this reference until
- // `Self::devres_callback` is called.
- let data = inner.clone().into_raw();
+///
+/// # Invariants
+///
+/// [`Self::inner`] is guaranteed to be initialized and is always accessed read-only.
+#[pin_data(PinnedDrop)]
+pub struct Devres<T: Send> {
+ dev: ARef<Device>,
+ /// Pointer to [`Self::devres_callback`].
+ ///
+ /// Has to be stored, since Rust does not guarantee to always return the same address for a
+ /// function. However, the C API uses the address as a key.
+ callback: unsafe extern "C" fn(*mut c_void),
+ /// Contains all the fields shared with [`Self::callback`].
+ // TODO: Replace with `UnsafePinned`, once available.
+ //
+ // Subsequently, the `drop_in_place()` in `Devres::drop` and `Devres::new` as well as the
+ // explicit `Send` and `Sync' impls can be removed.
+ #[pin]
+ inner: Opaque<Inner<T>>,
+ _add_action: (),
+}
- // SAFETY: `devm_add_action` guarantees to call `Self::devres_callback` once `dev` is
- // detached.
- let ret =
- unsafe { bindings::devm_add_action(dev.as_raw(), Some(inner.callback), data as _) };
+impl<T: Send> Devres<T> {
+ /// Creates a new [`Devres`] instance of the given `data`.
+ ///
+ /// The `data` encapsulated within the returned `Devres` instance' `data` will be
+ /// (revoked)[`Revocable`] once the device is detached.
+ pub fn new<'a, E>(
+ dev: &'a Device<Bound>,
+ data: impl PinInit<T, E> + 'a,
+ ) -> impl PinInit<Self, Error> + 'a
+ where
+ T: 'a,
+ Error: From<E>,
+ {
+ try_pin_init!(&this in Self {
+ dev: dev.into(),
+ callback: Self::devres_callback,
+ // INVARIANT: `inner` is properly initialized.
+ inner <- Opaque::pin_init(try_pin_init!(Inner {
+ devm <- Completion::new(),
+ revoke <- Completion::new(),
+ data <- Revocable::new(data),
+ })),
+ // TODO: Replace with "initializer code blocks" [1] once available.
+ //
+ // [1] https://github.com/Rust-for-Linux/pin-init/pull/69
+ _add_action: {
+ // SAFETY: `this` is a valid pointer to uninitialized memory.
+ let inner = unsafe { &raw mut (*this.as_ptr()).inner };
- if ret != 0 {
- // SAFETY: We just created another reference to `inner` in order to pass it to
- // `bindings::devm_add_action`. If `bindings::devm_add_action` fails, we have to drop
- // this reference accordingly.
- let _ = unsafe { Arc::from_raw(data) };
- return Err(Error::from_errno(ret));
- }
+ // SAFETY:
+ // - `dev.as_raw()` is a pointer to a valid bound device.
+ // - `inner` is guaranteed to be a valid for the duration of the lifetime of `Self`.
+ // - `devm_add_action()` is guaranteed not to call `callback` until `this` has been
+ // properly initialized, because we require `dev` (i.e. the *bound* device) to
+ // live at least as long as the returned `impl PinInit<Self, Error>`.
+ to_result(unsafe {
+ bindings::devm_add_action(dev.as_raw(), Some(*callback), inner.cast())
+ }).inspect_err(|_| {
+ let inner = Opaque::cast_into(inner);
- Ok(inner)
+ // SAFETY: `inner` is a valid pointer to an `Inner<T>` and valid for both reads
+ // and writes.
+ unsafe { core::ptr::drop_in_place(inner) };
+ })?;
+ },
+ })
}
- fn as_ptr(&self) -> *const Self {
- self as _
+ fn inner(&self) -> &Inner<T> {
+ // SAFETY: By the type invairants of `Self`, `inner` is properly initialized and always
+ // accessed read-only.
+ unsafe { &*self.inner.get() }
}
- fn remove_action(this: &Arc<Self>) -> bool {
- // SAFETY:
- // - `self.inner.dev` is a valid `Device`,
- // - the `action` and `data` pointers are the exact same ones as given to devm_add_action()
- // previously,
- // - `self` is always valid, even if the action has been released already.
- let success = unsafe {
- bindings::devm_remove_action_nowarn(
- this.dev.as_raw(),
- Some(this.callback),
- this.as_ptr() as _,
- )
- } == 0;
-
- if success {
- // SAFETY: We leaked an `Arc` reference to devm_add_action() in `DevresInner::new`; if
- // devm_remove_action_nowarn() was successful we can (and have to) claim back ownership
- // of this reference.
- let _ = unsafe { Arc::from_raw(this.as_ptr()) };
- }
-
- success
+ fn data(&self) -> &Revocable<T> {
+ &self.inner().data
}
#[allow(clippy::missing_safety_doc)]
unsafe extern "C" fn devres_callback(ptr: *mut kernel::ffi::c_void) {
- let ptr = ptr as *mut DevresInner<T>;
- // Devres owned this memory; now that we received the callback, drop the `Arc` and hence the
- // reference.
- // SAFETY: Safe, since we leaked an `Arc` reference to devm_add_action() in
- // `DevresInner::new`.
- let inner = unsafe { Arc::from_raw(ptr) };
+ // SAFETY: In `Self::new` we've passed a valid pointer to `Inner` to `devm_add_action()`,
+ // hence `ptr` must be a valid pointer to `Inner`.
+ let inner = unsafe { &*ptr.cast::<Inner<T>>() };
+
+ // Ensure that `inner` can't be used anymore after we signal completion of this callback.
+ let inner = ScopeGuard::new_with_data(inner, |inner| inner.devm.complete_all());
if !inner.data.revoke() {
// If `revoke()` returns false, it means that `Devres::drop` already started revoking
- // `inner.data` for us. Hence we have to wait until `Devres::drop()` signals that it
- // completed revoking `inner.data`.
+ // `data` for us. Hence we have to wait until `Devres::drop` signals that it
+ // completed revoking `data`.
inner.revoke.wait_for_completion();
}
}
-}
-impl<T> Devres<T> {
- /// Creates a new [`Devres`] instance of the given `data`. The `data` encapsulated within the
- /// returned `Devres` instance' `data` will be revoked once the device is detached.
- pub fn new(dev: &Device<Bound>, data: T, flags: Flags) -> Result<Self> {
- let inner = DevresInner::new(dev, data, flags)?;
-
- Ok(Devres(inner))
+ fn remove_action(&self) -> bool {
+ // SAFETY:
+ // - `self.dev` is a valid `Device`,
+ // - the `action` and `data` pointers are the exact same ones as given to
+ // `devm_add_action()` previously,
+ (unsafe {
+ bindings::devm_remove_action_nowarn(
+ self.dev.as_raw(),
+ Some(self.callback),
+ core::ptr::from_ref(self.inner()).cast_mut().cast(),
+ )
+ } == 0)
}
- /// Same as [`Devres::new`], but does not return a `Devres` instance. Instead the given `data`
- /// is owned by devres and will be revoked / dropped, once the device is detached.
- pub fn new_foreign_owned(dev: &Device<Bound>, data: T, flags: Flags) -> Result {
- let _ = DevresInner::new(dev, data, flags)?;
-
- Ok(())
+ /// Return a reference of the [`Device`] this [`Devres`] instance has been created with.
+ pub fn device(&self) -> &Device {
+ &self.dev
}
/// Obtain `&'a T`, bypassing the [`Revocable`].
@@ -202,7 +226,7 @@ impl<T> Devres<T> {
/// An error is returned if `dev` does not match the same [`Device`] this [`Devres`] instance
/// has been created with.
///
- /// # Example
+ /// # Examples
///
/// ```no_run
/// # #![cfg(CONFIG_PCI)]
@@ -221,43 +245,123 @@ impl<T> Devres<T> {
/// }
/// ```
pub fn access<'a>(&'a self, dev: &'a Device<Bound>) -> Result<&'a T> {
- if self.0.dev.as_raw() != dev.as_raw() {
+ if self.dev.as_raw() != dev.as_raw() {
return Err(EINVAL);
}
// SAFETY: `dev` being the same device as the device this `Devres` has been created for
- // proves that `self.0.data` hasn't been revoked and is guaranteed to not be revoked as
- // long as `dev` lives; `dev` lives at least as long as `self`.
- Ok(unsafe { self.0.data.access() })
+ // proves that `self.data` hasn't been revoked and is guaranteed to not be revoked as long
+ // as `dev` lives; `dev` lives at least as long as `self`.
+ Ok(unsafe { self.data().access() })
}
/// [`Devres`] accessor for [`Revocable::try_access`].
pub fn try_access(&self) -> Option<RevocableGuard<'_, T>> {
- self.0.data.try_access()
+ self.data().try_access()
}
/// [`Devres`] accessor for [`Revocable::try_access_with`].
pub fn try_access_with<R, F: FnOnce(&T) -> R>(&self, f: F) -> Option<R> {
- self.0.data.try_access_with(f)
+ self.data().try_access_with(f)
}
/// [`Devres`] accessor for [`Revocable::try_access_with_guard`].
pub fn try_access_with_guard<'a>(&'a self, guard: &'a rcu::Guard) -> Option<&'a T> {
- self.0.data.try_access_with_guard(guard)
+ self.data().try_access_with_guard(guard)
}
}
-impl<T> Drop for Devres<T> {
- fn drop(&mut self) {
+// SAFETY: `Devres` can be send to any task, if `T: Send`.
+unsafe impl<T: Send> Send for Devres<T> {}
+
+// SAFETY: `Devres` can be shared with any task, if `T: Sync`.
+unsafe impl<T: Send + Sync> Sync for Devres<T> {}
+
+#[pinned_drop]
+impl<T: Send> PinnedDrop for Devres<T> {
+ fn drop(self: Pin<&mut Self>) {
// SAFETY: When `drop` runs, it is guaranteed that nobody is accessing the revocable data
// anymore, hence it is safe not to wait for the grace period to finish.
- if unsafe { self.0.data.revoke_nosync() } {
- // We revoked `self.0.data` before the devres action did, hence try to remove it.
- if !DevresInner::remove_action(&self.0) {
+ if unsafe { self.data().revoke_nosync() } {
+ // We revoked `self.data` before the devres action did, hence try to remove it.
+ if !self.remove_action() {
// We could not remove the devres action, which means that it now runs concurrently,
- // hence signal that `self.0.data` has been revoked successfully.
- self.0.revoke.complete_all();
+ // hence signal that `self.data` has been revoked by us successfully.
+ self.inner().revoke.complete_all();
+
+ // Wait for `Self::devres_callback` to be done using this object.
+ self.inner().devm.wait_for_completion();
}
+ } else {
+ // `Self::devres_callback` revokes `self.data` for us, hence wait for it to be done
+ // using this object.
+ self.inner().devm.wait_for_completion();
}
+
+ // INVARIANT: At this point it is guaranteed that `inner` can't be accessed any more.
+ //
+ // SAFETY: `inner` is valid for dropping.
+ unsafe { core::ptr::drop_in_place(self.inner.get()) };
+ }
+}
+
+/// Consume `data` and [`Drop::drop`] `data` once `dev` is unbound.
+fn register_foreign<P>(dev: &Device<Bound>, data: P) -> Result
+where
+ P: ForeignOwnable + Send + 'static,
+{
+ let ptr = data.into_foreign();
+
+ #[allow(clippy::missing_safety_doc)]
+ unsafe extern "C" fn callback<P: ForeignOwnable>(ptr: *mut kernel::ffi::c_void) {
+ // SAFETY: `ptr` is the pointer to the `ForeignOwnable` leaked above and hence valid.
+ drop(unsafe { P::from_foreign(ptr.cast()) });
}
+
+ // SAFETY:
+ // - `dev.as_raw()` is a pointer to a valid and bound device.
+ // - `ptr` is a valid pointer the `ForeignOwnable` devres takes ownership of.
+ to_result(unsafe {
+ // `devm_add_action_or_reset()` also calls `callback` on failure, such that the
+ // `ForeignOwnable` is released eventually.
+ bindings::devm_add_action_or_reset(dev.as_raw(), Some(callback::<P>), ptr.cast())
+ })
+}
+
+/// Encapsulate `data` in a [`KBox`] and [`Drop::drop`] `data` once `dev` is unbound.
+///
+/// # Examples
+///
+/// ```no_run
+/// use kernel::{device::{Bound, Device}, devres};
+///
+/// /// Registration of e.g. a class device, IRQ, etc.
+/// struct Registration;
+///
+/// impl Registration {
+/// fn new() -> Self {
+/// // register
+///
+/// Self
+/// }
+/// }
+///
+/// impl Drop for Registration {
+/// fn drop(&mut self) {
+/// // unregister
+/// }
+/// }
+///
+/// fn from_bound_context(dev: &Device<Bound>) -> Result {
+/// devres::register(dev, Registration::new(), GFP_KERNEL)
+/// }
+/// ```
+pub fn register<T, E>(dev: &Device<Bound>, data: impl PinInit<T, E>, flags: Flags) -> Result
+where
+ T: Send + 'static,
+ Error: From<E>,
+{
+ let data = KBox::pin_init(data, flags)?;
+
+ register_foreign(dev, data)
}
diff --git a/rust/kernel/dma.rs b/rust/kernel/dma.rs
index a33261c62e0c..4e0af3e1a3b9 100644
--- a/rust/kernel/dma.rs
+++ b/rust/kernel/dma.rs
@@ -5,14 +5,166 @@
//! C header: [`include/linux/dma-mapping.h`](srctree/include/linux/dma-mapping.h)
use crate::{
- bindings, build_assert,
- device::{Bound, Device},
- error::code::*,
- error::Result,
+ bindings, build_assert, device,
+ device::{Bound, Core},
+ error::{to_result, Result},
+ prelude::*,
+ sync::aref::ARef,
transmute::{AsBytes, FromBytes},
- types::ARef,
};
+/// DMA address type.
+///
+/// Represents a bus address used for Direct Memory Access (DMA) operations.
+///
+/// This is an alias of the kernel's `dma_addr_t`, which may be `u32` or `u64` depending on
+/// `CONFIG_ARCH_DMA_ADDR_T_64BIT`.
+///
+/// Note that this may be `u64` even on 32-bit architectures.
+pub type DmaAddress = bindings::dma_addr_t;
+
+/// Trait to be implemented by DMA capable bus devices.
+///
+/// The [`dma::Device`](Device) trait should be implemented by bus specific device representations,
+/// where the underlying bus is DMA capable, such as [`pci::Device`](::kernel::pci::Device) or
+/// [`platform::Device`](::kernel::platform::Device).
+pub trait Device: AsRef<device::Device<Core>> {
+ /// Set up the device's DMA streaming addressing capabilities.
+ ///
+ /// This method is usually called once from `probe()` as soon as the device capabilities are
+ /// known.
+ ///
+ /// # Safety
+ ///
+ /// This method must not be called concurrently with any DMA allocation or mapping primitives,
+ /// such as [`CoherentAllocation::alloc_attrs`].
+ unsafe fn dma_set_mask(&self, mask: DmaMask) -> Result {
+ // SAFETY:
+ // - By the type invariant of `device::Device`, `self.as_ref().as_raw()` is valid.
+ // - The safety requirement of this function guarantees that there are no concurrent calls
+ // to DMA allocation and mapping primitives using this mask.
+ to_result(unsafe { bindings::dma_set_mask(self.as_ref().as_raw(), mask.value()) })
+ }
+
+ /// Set up the device's DMA coherent addressing capabilities.
+ ///
+ /// This method is usually called once from `probe()` as soon as the device capabilities are
+ /// known.
+ ///
+ /// # Safety
+ ///
+ /// This method must not be called concurrently with any DMA allocation or mapping primitives,
+ /// such as [`CoherentAllocation::alloc_attrs`].
+ unsafe fn dma_set_coherent_mask(&self, mask: DmaMask) -> Result {
+ // SAFETY:
+ // - By the type invariant of `device::Device`, `self.as_ref().as_raw()` is valid.
+ // - The safety requirement of this function guarantees that there are no concurrent calls
+ // to DMA allocation and mapping primitives using this mask.
+ to_result(unsafe { bindings::dma_set_coherent_mask(self.as_ref().as_raw(), mask.value()) })
+ }
+
+ /// Set up the device's DMA addressing capabilities.
+ ///
+ /// This is a combination of [`Device::dma_set_mask`] and [`Device::dma_set_coherent_mask`].
+ ///
+ /// This method is usually called once from `probe()` as soon as the device capabilities are
+ /// known.
+ ///
+ /// # Safety
+ ///
+ /// This method must not be called concurrently with any DMA allocation or mapping primitives,
+ /// such as [`CoherentAllocation::alloc_attrs`].
+ unsafe fn dma_set_mask_and_coherent(&self, mask: DmaMask) -> Result {
+ // SAFETY:
+ // - By the type invariant of `device::Device`, `self.as_ref().as_raw()` is valid.
+ // - The safety requirement of this function guarantees that there are no concurrent calls
+ // to DMA allocation and mapping primitives using this mask.
+ to_result(unsafe {
+ bindings::dma_set_mask_and_coherent(self.as_ref().as_raw(), mask.value())
+ })
+ }
+}
+
+/// A DMA mask that holds a bitmask with the lowest `n` bits set.
+///
+/// Use [`DmaMask::new`] or [`DmaMask::try_new`] to construct a value. Values
+/// are guaranteed to never exceed the bit width of `u64`.
+///
+/// This is the Rust equivalent of the C macro `DMA_BIT_MASK()`.
+#[derive(Debug, Clone, Copy, PartialEq, Eq)]
+pub struct DmaMask(u64);
+
+impl DmaMask {
+ /// Constructs a `DmaMask` with the lowest `n` bits set to `1`.
+ ///
+ /// For `n <= 64`, sets exactly the lowest `n` bits.
+ /// For `n > 64`, results in a build error.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use kernel::dma::DmaMask;
+ ///
+ /// let mask0 = DmaMask::new::<0>();
+ /// assert_eq!(mask0.value(), 0);
+ ///
+ /// let mask1 = DmaMask::new::<1>();
+ /// assert_eq!(mask1.value(), 0b1);
+ ///
+ /// let mask64 = DmaMask::new::<64>();
+ /// assert_eq!(mask64.value(), u64::MAX);
+ ///
+ /// // Build failure.
+ /// // let mask_overflow = DmaMask::new::<100>();
+ /// ```
+ #[inline]
+ pub const fn new<const N: u32>() -> Self {
+ let Ok(mask) = Self::try_new(N) else {
+ build_error!("Invalid DMA Mask.");
+ };
+
+ mask
+ }
+
+ /// Constructs a `DmaMask` with the lowest `n` bits set to `1`.
+ ///
+ /// For `n <= 64`, sets exactly the lowest `n` bits.
+ /// For `n > 64`, returns [`EINVAL`].
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use kernel::dma::DmaMask;
+ ///
+ /// let mask0 = DmaMask::try_new(0)?;
+ /// assert_eq!(mask0.value(), 0);
+ ///
+ /// let mask1 = DmaMask::try_new(1)?;
+ /// assert_eq!(mask1.value(), 0b1);
+ ///
+ /// let mask64 = DmaMask::try_new(64)?;
+ /// assert_eq!(mask64.value(), u64::MAX);
+ ///
+ /// let mask_overflow = DmaMask::try_new(100);
+ /// assert!(mask_overflow.is_err());
+ /// # Ok::<(), Error>(())
+ /// ```
+ #[inline]
+ pub const fn try_new(n: u32) -> Result<Self> {
+ Ok(Self(match n {
+ 0 => 0,
+ 1..=64 => u64::MAX >> (64 - n),
+ _ => return Err(EINVAL),
+ }))
+ }
+
+ /// Returns the underlying `u64` bitmask value.
+ #[inline]
+ pub const fn value(&self) -> u64 {
+ self.0
+ }
+}
+
/// Possible attributes associated with a DMA mapping.
///
/// They can be combined with the operators `|`, `&`, and `!`.
@@ -38,7 +190,7 @@ pub struct Attrs(u32);
impl Attrs {
/// Get the raw representation of this attribute.
pub(crate) fn as_raw(self) -> crate::ffi::c_ulong {
- self.0 as _
+ self.0 as crate::ffi::c_ulong
}
/// Check whether `flags` is contained in `self`.
@@ -89,7 +241,7 @@ pub mod attrs {
/// Forces contiguous allocation of the buffer in physical memory.
pub const DMA_ATTR_FORCE_CONTIGUOUS: Attrs = Attrs(bindings::DMA_ATTR_FORCE_CONTIGUOUS);
- /// This is a hint to the DMA-mapping subsystem that it's probably not worth the time to try
+ /// Hints DMA-mapping subsystem that it's probably not worth the time to try
/// to allocate memory to in a way that gives better TLB efficiency.
pub const DMA_ATTR_ALLOC_SINGLE_PAGES: Attrs = Attrs(bindings::DMA_ATTR_ALLOC_SINGLE_PAGES);
@@ -97,15 +249,86 @@ pub mod attrs {
/// `__GFP_NOWARN`).
pub const DMA_ATTR_NO_WARN: Attrs = Attrs(bindings::DMA_ATTR_NO_WARN);
- /// Used to indicate that the buffer is fully accessible at an elevated privilege level (and
+ /// Indicates that the buffer is fully accessible at an elevated privilege level (and
/// ideally inaccessible or at least read-only at lesser-privileged levels).
pub const DMA_ATTR_PRIVILEGED: Attrs = Attrs(bindings::DMA_ATTR_PRIVILEGED);
+
+ /// Indicates that the buffer is MMIO memory.
+ pub const DMA_ATTR_MMIO: Attrs = Attrs(bindings::DMA_ATTR_MMIO);
+}
+
+/// DMA data direction.
+///
+/// Corresponds to the C [`enum dma_data_direction`].
+///
+/// [`enum dma_data_direction`]: srctree/include/linux/dma-direction.h
+#[derive(Copy, Clone, PartialEq, Eq, Debug)]
+#[repr(u32)]
+pub enum DataDirection {
+ /// The DMA mapping is for bidirectional data transfer.
+ ///
+ /// This is used when the buffer can be both read from and written to by the device.
+ /// The cache for the corresponding memory region is both flushed and invalidated.
+ Bidirectional = Self::const_cast(bindings::dma_data_direction_DMA_BIDIRECTIONAL),
+
+ /// The DMA mapping is for data transfer from memory to the device (write).
+ ///
+ /// The CPU has prepared data in the buffer, and the device will read it.
+ /// The cache for the corresponding memory region is flushed before device access.
+ ToDevice = Self::const_cast(bindings::dma_data_direction_DMA_TO_DEVICE),
+
+ /// The DMA mapping is for data transfer from the device to memory (read).
+ ///
+ /// The device will write data into the buffer for the CPU to read.
+ /// The cache for the corresponding memory region is invalidated before CPU access.
+ FromDevice = Self::const_cast(bindings::dma_data_direction_DMA_FROM_DEVICE),
+
+ /// The DMA mapping is not for data transfer.
+ ///
+ /// This is primarily for debugging purposes. With this direction, the DMA mapping API
+ /// will not perform any cache coherency operations.
+ None = Self::const_cast(bindings::dma_data_direction_DMA_NONE),
+}
+
+impl DataDirection {
+ /// Casts the bindgen-generated enum type to a `u32` at compile time.
+ ///
+ /// This function will cause a compile-time error if the underlying value of the
+ /// C enum is out of bounds for `u32`.
+ const fn const_cast(val: bindings::dma_data_direction) -> u32 {
+ // CAST: The C standard allows compilers to choose different integer types for enums.
+ // To safely check the value, we cast it to a wide signed integer type (`i128`)
+ // which can hold any standard C integer enum type without truncation.
+ let wide_val = val as i128;
+
+ // Check if the value is outside the valid range for the target type `u32`.
+ // CAST: `u32::MAX` is cast to `i128` to match the type of `wide_val` for the comparison.
+ if wide_val < 0 || wide_val > u32::MAX as i128 {
+ // Trigger a compile-time error in a const context.
+ build_error!("C enum value is out of bounds for the target type `u32`.");
+ }
+
+ // CAST: This cast is valid because the check above guarantees that `wide_val`
+ // is within the representable range of `u32`.
+ wide_val as u32
+ }
+}
+
+impl From<DataDirection> for bindings::dma_data_direction {
+ /// Returns the raw representation of [`enum dma_data_direction`].
+ fn from(direction: DataDirection) -> Self {
+ // CAST: `direction as u32` gets the underlying representation of our `#[repr(u32)]` enum.
+ // The subsequent cast to `Self` (the bindgen type) assumes the C enum is compatible
+ // with the enum variants of `DataDirection`, which is a valid assumption given our
+ // compile-time checks.
+ direction as u32 as Self
+ }
}
/// An abstraction of the `dma_alloc_coherent` API.
///
/// This is an abstraction around the `dma_alloc_coherent` API which is used to allocate and map
-/// large consistent DMA regions.
+/// large coherent DMA regions.
///
/// A [`CoherentAllocation`] instance contains a pointer to the allocated region (in the
/// processor's virtual address space) and the device address which can be given to the device
@@ -114,9 +337,11 @@ pub mod attrs {
///
/// # Invariants
///
-/// For the lifetime of an instance of [`CoherentAllocation`], the `cpu_addr` is a valid pointer
-/// to an allocated region of consistent memory and `dma_handle` is the DMA address base of
-/// the region.
+/// - For the lifetime of an instance of [`CoherentAllocation`], the `cpu_addr` is a valid pointer
+/// to an allocated region of coherent memory and `dma_handle` is the DMA address base of the
+/// region.
+/// - The size in bytes of the allocation is equal to `size_of::<T> * count`.
+/// - `size_of::<T> * count` fits into a `usize`.
// TODO
//
// DMA allocations potentially carry device resources (e.g.IOMMU mappings), hence for soundness
@@ -130,15 +355,15 @@ pub mod attrs {
// Hence, find a way to revoke the device resources of a `CoherentAllocation`, but not the
// entire `CoherentAllocation` including the allocated memory itself.
pub struct CoherentAllocation<T: AsBytes + FromBytes> {
- dev: ARef<Device>,
- dma_handle: bindings::dma_addr_t,
+ dev: ARef<device::Device>,
+ dma_handle: DmaAddress,
count: usize,
cpu_addr: *mut T,
dma_attrs: Attrs,
}
impl<T: AsBytes + FromBytes> CoherentAllocation<T> {
- /// Allocates a region of `size_of::<T> * count` of consistent memory.
+ /// Allocates a region of `size_of::<T> * count` of coherent memory.
///
/// # Examples
///
@@ -152,7 +377,7 @@ impl<T: AsBytes + FromBytes> CoherentAllocation<T> {
/// # Ok::<(), Error>(()) }
/// ```
pub fn alloc_attrs(
- dev: &Device<Bound>,
+ dev: &device::Device<Bound>,
count: usize,
gfp_flags: kernel::alloc::Flags,
dma_attrs: Attrs,
@@ -179,14 +404,17 @@ impl<T: AsBytes + FromBytes> CoherentAllocation<T> {
if ret.is_null() {
return Err(ENOMEM);
}
- // INVARIANT: We just successfully allocated a coherent region which is accessible for
- // `count` elements, hence the cpu address is valid. We also hold a refcounted reference
- // to the device.
+ // INVARIANT:
+ // - We just successfully allocated a coherent region which is accessible for
+ // `count` elements, hence the cpu address is valid. We also hold a refcounted reference
+ // to the device.
+ // - The allocated `size` is equal to `size_of::<T> * count`.
+ // - The allocated `size` fits into a `usize`.
Ok(Self {
dev: dev.into(),
dma_handle,
count,
- cpu_addr: ret as *mut T,
+ cpu_addr: ret.cast::<T>(),
dma_attrs,
})
}
@@ -194,13 +422,28 @@ impl<T: AsBytes + FromBytes> CoherentAllocation<T> {
/// Performs the same functionality as [`CoherentAllocation::alloc_attrs`], except the
/// `dma_attrs` is 0 by default.
pub fn alloc_coherent(
- dev: &Device<Bound>,
+ dev: &device::Device<Bound>,
count: usize,
gfp_flags: kernel::alloc::Flags,
) -> Result<CoherentAllocation<T>> {
CoherentAllocation::alloc_attrs(dev, count, gfp_flags, Attrs(0))
}
+ /// Returns the number of elements `T` in this allocation.
+ ///
+ /// Note that this is not the size of the allocation in bytes, which is provided by
+ /// [`Self::size`].
+ pub fn count(&self) -> usize {
+ self.count
+ }
+
+ /// Returns the size in bytes of this allocation.
+ pub fn size(&self) -> usize {
+ // INVARIANT: The type invariant of `Self` guarantees that `size_of::<T> * count` fits into
+ // a `usize`.
+ self.count * core::mem::size_of::<T>()
+ }
+
/// Returns the base address to the allocated region in the CPU's virtual address space.
pub fn start_ptr(&self) -> *const T {
self.cpu_addr
@@ -212,12 +455,113 @@ impl<T: AsBytes + FromBytes> CoherentAllocation<T> {
self.cpu_addr
}
- /// Returns a DMA handle which may given to the device as the DMA address base of
+ /// Returns a DMA handle which may be given to the device as the DMA address base of
/// the region.
- pub fn dma_handle(&self) -> bindings::dma_addr_t {
+ pub fn dma_handle(&self) -> DmaAddress {
self.dma_handle
}
+ /// Returns a DMA handle starting at `offset` (in units of `T`) which may be given to the
+ /// device as the DMA address base of the region.
+ ///
+ /// Returns `EINVAL` if `offset` is not within the bounds of the allocation.
+ pub fn dma_handle_with_offset(&self, offset: usize) -> Result<DmaAddress> {
+ if offset >= self.count {
+ Err(EINVAL)
+ } else {
+ // INVARIANT: The type invariant of `Self` guarantees that `size_of::<T> * count` fits
+ // into a `usize`, and `offset` is inferior to `count`.
+ Ok(self.dma_handle + (offset * core::mem::size_of::<T>()) as DmaAddress)
+ }
+ }
+
+ /// Common helper to validate a range applied from the allocated region in the CPU's virtual
+ /// address space.
+ fn validate_range(&self, offset: usize, count: usize) -> Result {
+ if offset.checked_add(count).ok_or(EOVERFLOW)? > self.count {
+ return Err(EINVAL);
+ }
+ Ok(())
+ }
+
+ /// Returns the data from the region starting from `offset` as a slice.
+ /// `offset` and `count` are in units of `T`, not the number of bytes.
+ ///
+ /// For ringbuffer type of r/w access or use-cases where the pointer to the live data is needed,
+ /// [`CoherentAllocation::start_ptr`] or [`CoherentAllocation::start_ptr_mut`] could be used
+ /// instead.
+ ///
+ /// # Safety
+ ///
+ /// * Callers must ensure that the device does not read/write to/from memory while the returned
+ /// slice is live.
+ /// * Callers must ensure that this call does not race with a write to the same region while
+ /// the returned slice is live.
+ pub unsafe fn as_slice(&self, offset: usize, count: usize) -> Result<&[T]> {
+ self.validate_range(offset, count)?;
+ // SAFETY:
+ // - The pointer is valid due to type invariant on `CoherentAllocation`,
+ // we've just checked that the range and index is within bounds. The immutability of the
+ // data is also guaranteed by the safety requirements of the function.
+ // - `offset + count` can't overflow since it is smaller than `self.count` and we've checked
+ // that `self.count` won't overflow early in the constructor.
+ Ok(unsafe { core::slice::from_raw_parts(self.cpu_addr.add(offset), count) })
+ }
+
+ /// Performs the same functionality as [`CoherentAllocation::as_slice`], except that a mutable
+ /// slice is returned.
+ ///
+ /// # Safety
+ ///
+ /// * Callers must ensure that the device does not read/write to/from memory while the returned
+ /// slice is live.
+ /// * Callers must ensure that this call does not race with a read or write to the same region
+ /// while the returned slice is live.
+ pub unsafe fn as_slice_mut(&mut self, offset: usize, count: usize) -> Result<&mut [T]> {
+ self.validate_range(offset, count)?;
+ // SAFETY:
+ // - The pointer is valid due to type invariant on `CoherentAllocation`,
+ // we've just checked that the range and index is within bounds. The immutability of the
+ // data is also guaranteed by the safety requirements of the function.
+ // - `offset + count` can't overflow since it is smaller than `self.count` and we've checked
+ // that `self.count` won't overflow early in the constructor.
+ Ok(unsafe { core::slice::from_raw_parts_mut(self.cpu_addr.add(offset), count) })
+ }
+
+ /// Writes data to the region starting from `offset`. `offset` is in units of `T`, not the
+ /// number of bytes.
+ ///
+ /// # Safety
+ ///
+ /// * Callers must ensure that the device does not read/write to/from memory while the returned
+ /// slice is live.
+ /// * Callers must ensure that this call does not race with a read or write to the same region
+ /// that overlaps with this write.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// # fn test(alloc: &mut kernel::dma::CoherentAllocation<u8>) -> Result {
+ /// let somedata: [u8; 4] = [0xf; 4];
+ /// let buf: &[u8] = &somedata;
+ /// // SAFETY: There is no concurrent HW operation on the device and no other R/W access to the
+ /// // region.
+ /// unsafe { alloc.write(buf, 0)?; }
+ /// # Ok::<(), Error>(()) }
+ /// ```
+ pub unsafe fn write(&mut self, src: &[T], offset: usize) -> Result {
+ self.validate_range(offset, src.len())?;
+ // SAFETY:
+ // - The pointer is valid due to type invariant on `CoherentAllocation`
+ // and we've just checked that the range and index is within bounds.
+ // - `offset + count` can't overflow since it is smaller than `self.count` and we've checked
+ // that `self.count` won't overflow early in the constructor.
+ unsafe {
+ core::ptr::copy_nonoverlapping(src.as_ptr(), self.cpu_addr.add(offset), src.len())
+ };
+ Ok(())
+ }
+
/// Returns a pointer to an element from the region with bounds checking. `offset` is in
/// units of `T`, not the number of bytes.
///
@@ -293,7 +637,7 @@ impl<T: AsBytes + FromBytes> Drop for CoherentAllocation<T> {
bindings::dma_free_attrs(
self.dev.as_raw(),
size,
- self.cpu_addr as _,
+ self.cpu_addr.cast(),
self.dma_handle,
self.dma_attrs.as_raw(),
)
@@ -328,20 +672,24 @@ unsafe impl<T: AsBytes + FromBytes + Send> Send for CoherentAllocation<T> {}
#[macro_export]
macro_rules! dma_read {
($dma:expr, $idx: expr, $($field:tt)*) => {{
- let item = $crate::dma::CoherentAllocation::item_from_index(&$dma, $idx)?;
- // SAFETY: `item_from_index` ensures that `item` is always a valid pointer and can be
- // dereferenced. The compiler also further validates the expression on whether `field`
- // is a member of `item` when expanded by the macro.
- unsafe {
- let ptr_field = ::core::ptr::addr_of!((*item) $($field)*);
- $crate::dma::CoherentAllocation::field_read(&$dma, ptr_field)
- }
+ (|| -> ::core::result::Result<_, $crate::error::Error> {
+ let item = $crate::dma::CoherentAllocation::item_from_index(&$dma, $idx)?;
+ // SAFETY: `item_from_index` ensures that `item` is always a valid pointer and can be
+ // dereferenced. The compiler also further validates the expression on whether `field`
+ // is a member of `item` when expanded by the macro.
+ unsafe {
+ let ptr_field = ::core::ptr::addr_of!((*item) $($field)*);
+ ::core::result::Result::Ok(
+ $crate::dma::CoherentAllocation::field_read(&$dma, ptr_field)
+ )
+ }
+ })()
}};
($dma:ident [ $idx:expr ] $($field:tt)* ) => {
- $crate::dma_read!($dma, $idx, $($field)*);
+ $crate::dma_read!($dma, $idx, $($field)*)
};
($($dma:ident).* [ $idx:expr ] $($field:tt)* ) => {
- $crate::dma_read!($($dma).*, $idx, $($field)*);
+ $crate::dma_read!($($dma).*, $idx, $($field)*)
};
}
@@ -368,24 +716,30 @@ macro_rules! dma_read {
#[macro_export]
macro_rules! dma_write {
($dma:ident [ $idx:expr ] $($field:tt)*) => {{
- $crate::dma_write!($dma, $idx, $($field)*);
+ $crate::dma_write!($dma, $idx, $($field)*)
}};
($($dma:ident).* [ $idx:expr ] $($field:tt)* ) => {{
- $crate::dma_write!($($dma).*, $idx, $($field)*);
+ $crate::dma_write!($($dma).*, $idx, $($field)*)
}};
($dma:expr, $idx: expr, = $val:expr) => {
- let item = $crate::dma::CoherentAllocation::item_from_index(&$dma, $idx)?;
- // SAFETY: `item_from_index` ensures that `item` is always a valid item.
- unsafe { $crate::dma::CoherentAllocation::field_write(&$dma, item, $val) }
+ (|| -> ::core::result::Result<_, $crate::error::Error> {
+ let item = $crate::dma::CoherentAllocation::item_from_index(&$dma, $idx)?;
+ // SAFETY: `item_from_index` ensures that `item` is always a valid item.
+ unsafe { $crate::dma::CoherentAllocation::field_write(&$dma, item, $val) }
+ ::core::result::Result::Ok(())
+ })()
};
($dma:expr, $idx: expr, $(.$field:ident)* = $val:expr) => {
- let item = $crate::dma::CoherentAllocation::item_from_index(&$dma, $idx)?;
- // SAFETY: `item_from_index` ensures that `item` is always a valid pointer and can be
- // dereferenced. The compiler also further validates the expression on whether `field`
- // is a member of `item` when expanded by the macro.
- unsafe {
- let ptr_field = ::core::ptr::addr_of_mut!((*item) $(.$field)*);
- $crate::dma::CoherentAllocation::field_write(&$dma, ptr_field, $val)
- }
+ (|| -> ::core::result::Result<_, $crate::error::Error> {
+ let item = $crate::dma::CoherentAllocation::item_from_index(&$dma, $idx)?;
+ // SAFETY: `item_from_index` ensures that `item` is always a valid pointer and can be
+ // dereferenced. The compiler also further validates the expression on whether `field`
+ // is a member of `item` when expanded by the macro.
+ unsafe {
+ let ptr_field = ::core::ptr::addr_of_mut!((*item) $(.$field)*);
+ $crate::dma::CoherentAllocation::field_write(&$dma, ptr_field, $val)
+ }
+ ::core::result::Result::Ok(())
+ })()
};
}
diff --git a/rust/kernel/driver.rs b/rust/kernel/driver.rs
index ec9166cedfa7..279e3af20682 100644
--- a/rust/kernel/driver.rs
+++ b/rust/kernel/driver.rs
@@ -2,11 +2,96 @@
//! Generic support for drivers of different buses (e.g., PCI, Platform, Amba, etc.).
//!
-//! Each bus / subsystem is expected to implement [`RegistrationOps`], which allows drivers to
-//! register using the [`Registration`] class.
+//! This documentation describes how to implement a bus specific driver API and how to align it with
+//! the design of (bus specific) devices.
+//!
+//! Note: Readers are expected to know the content of the documentation of [`Device`] and
+//! [`DeviceContext`].
+//!
+//! # Driver Trait
+//!
+//! The main driver interface is defined by a bus specific driver trait. For instance:
+//!
+//! ```ignore
+//! pub trait Driver: Send {
+//! /// The type holding information about each device ID supported by the driver.
+//! type IdInfo: 'static;
+//!
+//! /// The table of OF device ids supported by the driver.
+//! const OF_ID_TABLE: Option<of::IdTable<Self::IdInfo>> = None;
+//!
+//! /// The table of ACPI device ids supported by the driver.
+//! const ACPI_ID_TABLE: Option<acpi::IdTable<Self::IdInfo>> = None;
+//!
+//! /// Driver probe.
+//! fn probe(dev: &Device<device::Core>, id_info: &Self::IdInfo) -> Result<Pin<KBox<Self>>>;
+//!
+//! /// Driver unbind (optional).
+//! fn unbind(dev: &Device<device::Core>, this: Pin<&Self>) {
+//! let _ = (dev, this);
+//! }
+//! }
+//! ```
+//!
+//! For specific examples see [`auxiliary::Driver`], [`pci::Driver`] and [`platform::Driver`].
+//!
+//! The `probe()` callback should return a `Result<Pin<KBox<Self>>>`, i.e. the driver's private
+//! data. The bus abstraction should store the pointer in the corresponding bus device. The generic
+//! [`Device`] infrastructure provides common helpers for this purpose on its
+//! [`Device<CoreInternal>`] implementation.
+//!
+//! All driver callbacks should provide a reference to the driver's private data. Once the driver
+//! is unbound from the device, the bus abstraction should take back the ownership of the driver's
+//! private data from the corresponding [`Device`] and [`drop`] it.
+//!
+//! All driver callbacks should provide a [`Device<Core>`] reference (see also [`device::Core`]).
+//!
+//! # Adapter
+//!
+//! The adapter implementation of a bus represents the abstraction layer between the C bus
+//! callbacks and the Rust bus callbacks. It therefore has to be generic over an implementation of
+//! the [driver trait](#driver-trait).
+//!
+//! ```ignore
+//! pub struct Adapter<T: Driver>;
+//! ```
+//!
+//! There's a common [`Adapter`] trait that can be implemented to inherit common driver
+//! infrastructure, such as finding the ID info from an [`of::IdTable`] or [`acpi::IdTable`].
+//!
+//! # Driver Registration
+//!
+//! In order to register C driver types (such as `struct platform_driver`) the [adapter](#adapter)
+//! should implement the [`RegistrationOps`] trait.
+//!
+//! This trait implementation can be used to create the actual registration with the common
+//! [`Registration`] type.
+//!
+//! Typically, bus abstractions want to provide a bus specific `module_bus_driver!` macro, which
+//! creates a kernel module with exactly one [`Registration`] for the bus specific adapter.
+//!
+//! The generic driver infrastructure provides a helper for this with the [`module_driver`] macro.
+//!
+//! # Device IDs
+//!
+//! Besides the common device ID types, such as [`of::DeviceId`] and [`acpi::DeviceId`], most buses
+//! may need to implement their own device ID types.
+//!
+//! For this purpose the generic infrastructure in [`device_id`] should be used.
+//!
+//! [`auxiliary::Driver`]: kernel::auxiliary::Driver
+//! [`Core`]: device::Core
+//! [`Device`]: device::Device
+//! [`Device<Core>`]: device::Device<device::Core>
+//! [`Device<CoreInternal>`]: device::Device<device::CoreInternal>
+//! [`DeviceContext`]: device::DeviceContext
+//! [`device_id`]: kernel::device_id
+//! [`module_driver`]: kernel::module_driver
+//! [`pci::Driver`]: kernel::pci::Driver
+//! [`platform::Driver`]: kernel::platform::Driver
use crate::error::{Error, Result};
-use crate::{device, of, str::CStr, try_pin_init, types::Opaque, ThisModule};
+use crate::{acpi, device, of, str::CStr, try_pin_init, types::Opaque, ThisModule};
use core::pin::Pin;
use pin_init::{pin_data, pinned_drop, PinInit};
@@ -141,36 +226,76 @@ pub trait Adapter {
/// The type holding driver private data about each device id supported by the driver.
type IdInfo: 'static;
+ /// The [`acpi::IdTable`] of the corresponding driver
+ fn acpi_id_table() -> Option<acpi::IdTable<Self::IdInfo>>;
+
+ /// Returns the driver's private data from the matching entry in the [`acpi::IdTable`], if any.
+ ///
+ /// If this returns `None`, it means there is no match with an entry in the [`acpi::IdTable`].
+ fn acpi_id_info(dev: &device::Device) -> Option<&'static Self::IdInfo> {
+ #[cfg(not(CONFIG_ACPI))]
+ {
+ let _ = dev;
+ None
+ }
+
+ #[cfg(CONFIG_ACPI)]
+ {
+ let table = Self::acpi_id_table()?;
+
+ // SAFETY:
+ // - `table` has static lifetime, hence it's valid for read,
+ // - `dev` is guaranteed to be valid while it's alive, and so is `dev.as_raw()`.
+ let raw_id = unsafe { bindings::acpi_match_device(table.as_ptr(), dev.as_raw()) };
+
+ if raw_id.is_null() {
+ None
+ } else {
+ // SAFETY: `DeviceId` is a `#[repr(transparent)]` wrapper of `struct acpi_device_id`
+ // and does not add additional invariants, so it's safe to transmute.
+ let id = unsafe { &*raw_id.cast::<acpi::DeviceId>() };
+
+ Some(table.info(<acpi::DeviceId as crate::device_id::RawDeviceIdIndex>::index(id)))
+ }
+ }
+ }
+
/// The [`of::IdTable`] of the corresponding driver.
fn of_id_table() -> Option<of::IdTable<Self::IdInfo>>;
/// Returns the driver's private data from the matching entry in the [`of::IdTable`], if any.
///
/// If this returns `None`, it means there is no match with an entry in the [`of::IdTable`].
- #[cfg(CONFIG_OF)]
fn of_id_info(dev: &device::Device) -> Option<&'static Self::IdInfo> {
- let table = Self::of_id_table()?;
+ #[cfg(not(CONFIG_OF))]
+ {
+ let _ = dev;
+ None
+ }
- // SAFETY:
- // - `table` has static lifetime, hence it's valid for read,
- // - `dev` is guaranteed to be valid while it's alive, and so is `pdev.as_ref().as_raw()`.
- let raw_id = unsafe { bindings::of_match_device(table.as_ptr(), dev.as_raw()) };
+ #[cfg(CONFIG_OF)]
+ {
+ let table = Self::of_id_table()?;
- if raw_id.is_null() {
- None
- } else {
- // SAFETY: `DeviceId` is a `#[repr(transparent)` wrapper of `struct of_device_id` and
- // does not add additional invariants, so it's safe to transmute.
- let id = unsafe { &*raw_id.cast::<of::DeviceId>() };
+ // SAFETY:
+ // - `table` has static lifetime, hence it's valid for read,
+ // - `dev` is guaranteed to be valid while it's alive, and so is `dev.as_raw()`.
+ let raw_id = unsafe { bindings::of_match_device(table.as_ptr(), dev.as_raw()) };
- Some(table.info(<of::DeviceId as crate::device_id::RawDeviceId>::index(id)))
- }
- }
+ if raw_id.is_null() {
+ None
+ } else {
+ // SAFETY: `DeviceId` is a `#[repr(transparent)]` wrapper of `struct of_device_id`
+ // and does not add additional invariants, so it's safe to transmute.
+ let id = unsafe { &*raw_id.cast::<of::DeviceId>() };
- #[cfg(not(CONFIG_OF))]
- #[allow(missing_docs)]
- fn of_id_info(_dev: &device::Device) -> Option<&'static Self::IdInfo> {
- None
+ Some(
+ table.info(<of::DeviceId as crate::device_id::RawDeviceIdIndex>::index(
+ id,
+ )),
+ )
+ }
+ }
}
/// Returns the driver's private data from the matching entry of any of the ID tables, if any.
@@ -178,6 +303,11 @@ pub trait Adapter {
/// If this returns `None`, it means that there is no match in any of the ID tables directly
/// associated with a [`device::Device`].
fn id_info(dev: &device::Device) -> Option<&'static Self::IdInfo> {
+ let id = Self::acpi_id_info(dev);
+ if id.is_some() {
+ return id;
+ }
+
let id = Self::of_id_info(dev);
if id.is_some() {
return id;
diff --git a/rust/kernel/drm/device.rs b/rust/kernel/drm/device.rs
index 14c1aa402951..3ce8f62a0056 100644
--- a/rust/kernel/drm/device.rs
+++ b/rust/kernel/drm/device.rs
@@ -2,17 +2,19 @@
//! DRM device.
//!
-//! C header: [`include/linux/drm/drm_device.h`](srctree/include/linux/drm/drm_device.h)
+//! C header: [`include/drm/drm_device.h`](srctree/include/drm/drm_device.h)
use crate::{
+ alloc::allocator::Kmalloc,
bindings, device, drm,
drm::driver::AllocImpl,
error::from_err_ptr,
error::Result,
prelude::*,
- types::{ARef, AlwaysRefCounted, Opaque},
+ sync::aref::{ARef, AlwaysRefCounted},
+ types::Opaque,
};
-use core::{mem, ops::Deref, ptr, ptr::NonNull};
+use core::{alloc::Layout, mem, ops::Deref, ptr, ptr::NonNull};
#[cfg(CONFIG_DRM_LEGACY)]
macro_rules! drm_legacy_fields {
@@ -53,10 +55,8 @@ macro_rules! drm_legacy_fields {
///
/// `self.dev` is a valid instance of a `struct device`.
#[repr(C)]
-#[pin_data]
pub struct Device<T: drm::Driver> {
dev: Opaque<bindings::drm_device>,
- #[pin]
data: T::Data,
}
@@ -83,19 +83,23 @@ impl<T: drm::Driver> Device<T> {
major: T::INFO.major,
minor: T::INFO.minor,
patchlevel: T::INFO.patchlevel,
- name: T::INFO.name.as_char_ptr() as *mut _,
- desc: T::INFO.desc.as_char_ptr() as *mut _,
+ name: crate::str::as_char_ptr_in_const_context(T::INFO.name).cast_mut(),
+ desc: crate::str::as_char_ptr_in_const_context(T::INFO.desc).cast_mut(),
driver_features: drm::driver::FEAT_GEM,
ioctls: T::IOCTLS.as_ptr(),
num_ioctls: T::IOCTLS.len() as i32,
- fops: &Self::GEM_FOPS as _,
+ fops: &Self::GEM_FOPS,
};
const GEM_FOPS: bindings::file_operations = drm::gem::create_fops();
/// Create a new `drm::Device` for a `drm::Driver`.
pub fn new(dev: &device::Device, data: impl PinInit<T::Data, Error>) -> Result<ARef<Self>> {
+ // `__drm_dev_alloc` uses `kmalloc()` to allocate memory, hence ensure a `kmalloc()`
+ // compatible `Layout`.
+ let layout = Kmalloc::aligned_layout(Layout::new::<Self>());
+
// SAFETY:
// - `VTABLE`, as a `const` is pinned to the read-only section of the compilation,
// - `dev` is valid by its type invarants,
@@ -103,7 +107,7 @@ impl<T: drm::Driver> Device<T> {
bindings::__drm_dev_alloc(
dev.as_raw(),
&Self::VTABLE,
- mem::size_of::<Self>(),
+ layout.size(),
mem::offset_of!(Self, dev),
)
}
@@ -117,9 +121,13 @@ impl<T: drm::Driver> Device<T> {
// - `raw_data` is a valid pointer to uninitialized memory.
// - `raw_data` will not move until it is dropped.
unsafe { data.__pinned_init(raw_data) }.inspect_err(|_| {
- // SAFETY: `__drm_dev_alloc()` was successful, hence `raw_drm` must be valid and the
+ // SAFETY: `raw_drm` is a valid pointer to `Self`, given that `__drm_dev_alloc` was
+ // successful.
+ let drm_dev = unsafe { Self::into_drm_device(raw_drm) };
+
+ // SAFETY: `__drm_dev_alloc()` was successful, hence `drm_dev` must be valid and the
// refcount must be non-zero.
- unsafe { bindings::drm_dev_put(ptr::addr_of_mut!((*raw_drm.as_ptr()).dev).cast()) };
+ unsafe { bindings::drm_dev_put(drm_dev) };
})?;
// SAFETY: The reference count is one, and now we take ownership of that reference as a
@@ -135,11 +143,17 @@ impl<T: drm::Driver> Device<T> {
///
/// `ptr` must be a valid pointer to a `struct device` embedded in `Self`.
unsafe fn from_drm_device(ptr: *const bindings::drm_device) -> *mut Self {
- let ptr: *const Opaque<bindings::drm_device> = ptr.cast();
-
// SAFETY: By the safety requirements of this function `ptr` is a valid pointer to a
// `struct drm_device` embedded in `Self`.
- unsafe { crate::container_of!(ptr, Self, dev) }.cast_mut()
+ unsafe { crate::container_of!(Opaque::cast_from(ptr), Self, dev) }.cast_mut()
+ }
+
+ /// # Safety
+ ///
+ /// `ptr` must be a valid pointer to `Self`.
+ unsafe fn into_drm_device(ptr: NonNull<Self>) -> *mut bindings::drm_device {
+ // SAFETY: By the safety requirements of this function, `ptr` is a valid pointer to `Self`.
+ unsafe { &raw mut (*ptr.as_ptr()).dev }.cast()
}
/// Not intended to be called externally, except via declare_drm_ioctls!()
@@ -154,7 +168,7 @@ impl<T: drm::Driver> Device<T> {
/// Additionally, callers must ensure that the `struct device`, `ptr` is pointing to, is
/// embedded in `Self`.
#[doc(hidden)]
- pub unsafe fn as_ref<'a>(ptr: *const bindings::drm_device) -> &'a Self {
+ pub unsafe fn from_raw<'a>(ptr: *const bindings::drm_device) -> &'a Self {
// SAFETY: By the safety requirements of this function `ptr` is a valid pointer to a
// `struct drm_device` embedded in `Self`.
let ptr = unsafe { Self::from_drm_device(ptr) };
@@ -191,8 +205,11 @@ unsafe impl<T: drm::Driver> AlwaysRefCounted for Device<T> {
}
unsafe fn dec_ref(obj: NonNull<Self>) {
+ // SAFETY: `obj` is a valid pointer to `Self`.
+ let drm_dev = unsafe { Self::into_drm_device(obj) };
+
// SAFETY: The safety requirements guarantee that the refcount is non-zero.
- unsafe { bindings::drm_dev_put(obj.cast().as_ptr()) };
+ unsafe { bindings::drm_dev_put(drm_dev) };
}
}
@@ -200,7 +217,7 @@ impl<T: drm::Driver> AsRef<device::Device> for Device<T> {
fn as_ref(&self) -> &device::Device {
// SAFETY: `bindings::drm_device::dev` is valid as long as the DRM device itself is valid,
// which is guaranteed by the type invariant.
- unsafe { device::Device::as_ref((*self.as_raw()).dev) }
+ unsafe { device::Device::from_raw((*self.as_raw()).dev) }
}
}
diff --git a/rust/kernel/drm/driver.rs b/rust/kernel/drm/driver.rs
index af93d46d03d3..f30ee4c6245c 100644
--- a/rust/kernel/drm/driver.rs
+++ b/rust/kernel/drm/driver.rs
@@ -2,15 +2,13 @@
//! DRM driver core.
//!
-//! C header: [`include/linux/drm/drm_drv.h`](srctree/include/linux/drm/drm_drv.h)
+//! C header: [`include/drm/drm_drv.h`](srctree/include/drm/drm_drv.h)
use crate::{
- bindings, device,
- devres::Devres,
- drm,
+ bindings, device, devres, drm,
error::{to_result, Result},
prelude::*,
- types::ARef,
+ sync::aref::ARef,
};
use macros::vtable;
@@ -88,6 +86,9 @@ pub struct AllocOps {
/// Trait for memory manager implementations. Implemented internally.
pub trait AllocImpl: super::private::Sealed + drm::gem::IntoGEMObject {
+ /// The [`Driver`] implementation for this [`AllocImpl`].
+ type Driver: drm::Driver;
+
/// The C callback operations for this memory manager.
const ALLOC_OPS: AllocOps;
}
@@ -129,18 +130,22 @@ impl<T: Driver> Registration<T> {
}
/// Same as [`Registration::new`}, but transfers ownership of the [`Registration`] to
- /// [`Devres`].
+ /// [`devres::register`].
pub fn new_foreign_owned(
drm: &drm::Device<T>,
dev: &device::Device<device::Bound>,
flags: usize,
- ) -> Result {
+ ) -> Result
+ where
+ T: 'static,
+ {
if drm.as_ref().as_raw() != dev.as_raw() {
return Err(EINVAL);
}
let reg = Registration::<T>::new(drm, flags)?;
- Devres::new_foreign_owned(dev, reg, GFP_KERNEL)
+
+ devres::register(dev, reg, GFP_KERNEL)
}
/// Returns a reference to the `Device` instance for this registration.
diff --git a/rust/kernel/drm/file.rs b/rust/kernel/drm/file.rs
index b9527705e551..8c46f8d51951 100644
--- a/rust/kernel/drm/file.rs
+++ b/rust/kernel/drm/file.rs
@@ -2,7 +2,7 @@
//! DRM File objects.
//!
-//! C header: [`include/linux/drm/drm_file.h`](srctree/include/linux/drm/drm_file.h)
+//! C header: [`include/drm/drm_file.h`](srctree/include/drm/drm_file.h)
use crate::{bindings, drm, error::Result, prelude::*, types::Opaque};
use core::marker::PhantomData;
@@ -32,7 +32,7 @@ impl<T: DriverFile> File<T> {
/// # Safety
///
/// `raw_file` must be a valid pointer to an open `struct drm_file`, opened through `T::open`.
- pub unsafe fn as_ref<'a>(ptr: *mut bindings::drm_file) -> &'a File<T> {
+ pub unsafe fn from_raw<'a>(ptr: *mut bindings::drm_file) -> &'a File<T> {
// SAFETY: `raw_file` is valid by the safety requirements of this function.
unsafe { &*ptr.cast() }
}
@@ -61,10 +61,10 @@ impl<T: DriverFile> File<T> {
// SAFETY: A callback from `struct drm_driver::open` guarantees that
// - `raw_dev` is valid pointer to a `struct drm_device`,
// - the corresponding `struct drm_device` has been registered.
- let drm = unsafe { drm::Device::as_ref(raw_dev) };
+ let drm = unsafe { drm::Device::from_raw(raw_dev) };
// SAFETY: `raw_file` is a valid pointer to a `struct drm_file`.
- let file = unsafe { File::<T>::as_ref(raw_file) };
+ let file = unsafe { File::<T>::from_raw(raw_file) };
let inner = match T::open(drm) {
Err(e) => {
@@ -89,7 +89,7 @@ impl<T: DriverFile> File<T> {
raw_file: *mut bindings::drm_file,
) {
// SAFETY: This reference won't escape this function
- let file = unsafe { File::<T>::as_ref(raw_file) };
+ let file = unsafe { File::<T>::from_raw(raw_file) };
// SAFETY: `file.driver_priv` has been created in `open_callback` through `KBox::into_raw`.
let _ = unsafe { KBox::from_raw(file.driver_priv()) };
diff --git a/rust/kernel/drm/gem/mod.rs b/rust/kernel/drm/gem/mod.rs
index 4cd69fa84318..30c853988b94 100644
--- a/rust/kernel/drm/gem/mod.rs
+++ b/rust/kernel/drm/gem/mod.rs
@@ -2,7 +2,7 @@
//! DRM GEM API
//!
-//! C header: [`include/linux/drm/drm_gem.h`](srctree/include/linux/drm/drm_gem.h)
+//! C header: [`include/drm/drm_gem.h`](srctree/include/drm/drm_gem.h)
use crate::{
alloc::flags::*,
@@ -10,36 +10,37 @@ use crate::{
drm::driver::{AllocImpl, AllocOps},
error::{to_result, Result},
prelude::*,
- types::{ARef, AlwaysRefCounted, Opaque},
+ sync::aref::{ARef, AlwaysRefCounted},
+ types::Opaque,
};
-use core::{mem, ops::Deref, ptr::NonNull};
+use core::{ops::Deref, ptr::NonNull};
+
+/// A type alias for retrieving a [`Driver`]s [`DriverFile`] implementation from its
+/// [`DriverObject`] implementation.
+///
+/// [`Driver`]: drm::Driver
+/// [`DriverFile`]: drm::file::DriverFile
+pub type DriverFile<T> = drm::File<<<T as DriverObject>::Driver as drm::Driver>::File>;
/// GEM object functions, which must be implemented by drivers.
-pub trait BaseDriverObject<T: BaseObject>: Sync + Send + Sized {
+pub trait DriverObject: Sync + Send + Sized {
+ /// Parent `Driver` for this object.
+ type Driver: drm::Driver;
+
/// Create a new driver data object for a GEM object of a given size.
- fn new(dev: &drm::Device<T::Driver>, size: usize) -> impl PinInit<Self, Error>;
+ fn new(dev: &drm::Device<Self::Driver>, size: usize) -> impl PinInit<Self, Error>;
/// Open a new handle to an existing object, associated with a File.
- fn open(
- _obj: &<<T as IntoGEMObject>::Driver as drm::Driver>::Object,
- _file: &drm::File<<<T as IntoGEMObject>::Driver as drm::Driver>::File>,
- ) -> Result {
+ fn open(_obj: &<Self::Driver as drm::Driver>::Object, _file: &DriverFile<Self>) -> Result {
Ok(())
}
/// Close a handle to an existing object, associated with a File.
- fn close(
- _obj: &<<T as IntoGEMObject>::Driver as drm::Driver>::Object,
- _file: &drm::File<<<T as IntoGEMObject>::Driver as drm::Driver>::File>,
- ) {
- }
+ fn close(_obj: &<Self::Driver as drm::Driver>::Object, _file: &DriverFile<Self>) {}
}
/// Trait that represents a GEM object subtype
pub trait IntoGEMObject: Sized + super::private::Sealed + AlwaysRefCounted {
- /// Owning driver for this type
- type Driver: drm::Driver;
-
/// Returns a reference to the raw `drm_gem_object` structure, which must be valid as long as
/// this owning object is valid.
fn as_raw(&self) -> *mut bindings::drm_gem_object;
@@ -51,7 +52,7 @@ pub trait IntoGEMObject: Sized + super::private::Sealed + AlwaysRefCounted {
/// - `self_ptr` must be a valid pointer to `Self`.
/// - The caller promises that holding the immutable reference returned by this function does
/// not violate rust's data aliasing rules and remains valid throughout the lifetime of `'a`.
- unsafe fn as_ref<'a>(self_ptr: *mut bindings::drm_gem_object) -> &'a Self;
+ unsafe fn from_raw<'a>(self_ptr: *mut bindings::drm_gem_object) -> &'a Self;
}
// SAFETY: All gem objects are refcounted.
@@ -74,25 +75,16 @@ unsafe impl<T: IntoGEMObject> AlwaysRefCounted for T {
}
}
-/// Trait which must be implemented by drivers using base GEM objects.
-pub trait DriverObject: BaseDriverObject<Object<Self>> {
- /// Parent `Driver` for this object.
- type Driver: drm::Driver;
-}
-
-extern "C" fn open_callback<T: BaseDriverObject<U>, U: BaseObject>(
+extern "C" fn open_callback<T: DriverObject>(
raw_obj: *mut bindings::drm_gem_object,
raw_file: *mut bindings::drm_file,
) -> core::ffi::c_int {
// SAFETY: `open_callback` is only ever called with a valid pointer to a `struct drm_file`.
- let file = unsafe {
- drm::File::<<<U as IntoGEMObject>::Driver as drm::Driver>::File>::as_ref(raw_file)
- };
- // SAFETY: `open_callback` is specified in the AllocOps structure for `Object<T>`, ensuring that
- // `raw_obj` is indeed contained within a `Object<T>`.
- let obj = unsafe {
- <<<U as IntoGEMObject>::Driver as drm::Driver>::Object as IntoGEMObject>::as_ref(raw_obj)
- };
+ let file = unsafe { DriverFile::<T>::from_raw(raw_file) };
+
+ // SAFETY: `open_callback` is specified in the AllocOps structure for `DriverObject<T>`,
+ // ensuring that `raw_obj` is contained within a `DriverObject<T>`
+ let obj = unsafe { <<T::Driver as drm::Driver>::Object as IntoGEMObject>::from_raw(raw_obj) };
match T::open(obj, file) {
Err(e) => e.to_errno(),
@@ -100,36 +92,29 @@ extern "C" fn open_callback<T: BaseDriverObject<U>, U: BaseObject>(
}
}
-extern "C" fn close_callback<T: BaseDriverObject<U>, U: BaseObject>(
+extern "C" fn close_callback<T: DriverObject>(
raw_obj: *mut bindings::drm_gem_object,
raw_file: *mut bindings::drm_file,
) {
// SAFETY: `open_callback` is only ever called with a valid pointer to a `struct drm_file`.
- let file = unsafe {
- drm::File::<<<U as IntoGEMObject>::Driver as drm::Driver>::File>::as_ref(raw_file)
- };
+ let file = unsafe { DriverFile::<T>::from_raw(raw_file) };
+
// SAFETY: `close_callback` is specified in the AllocOps structure for `Object<T>`, ensuring
// that `raw_obj` is indeed contained within a `Object<T>`.
- let obj = unsafe {
- <<<U as IntoGEMObject>::Driver as drm::Driver>::Object as IntoGEMObject>::as_ref(raw_obj)
- };
+ let obj = unsafe { <<T::Driver as drm::Driver>::Object as IntoGEMObject>::from_raw(raw_obj) };
T::close(obj, file);
}
impl<T: DriverObject> IntoGEMObject for Object<T> {
- type Driver = T::Driver;
-
fn as_raw(&self) -> *mut bindings::drm_gem_object {
self.obj.get()
}
- unsafe fn as_ref<'a>(self_ptr: *mut bindings::drm_gem_object) -> &'a Self {
- let self_ptr: *mut Opaque<bindings::drm_gem_object> = self_ptr.cast();
-
+ unsafe fn from_raw<'a>(self_ptr: *mut bindings::drm_gem_object) -> &'a Self {
// SAFETY: `obj` is guaranteed to be in an `Object<T>` via the safety contract of this
// function
- unsafe { &*crate::container_of!(self_ptr, Object<T>, obj) }
+ unsafe { &*crate::container_of!(Opaque::cast_from(self_ptr), Object<T>, obj) }
}
}
@@ -143,10 +128,12 @@ pub trait BaseObject: IntoGEMObject {
/// Creates a new handle for the object associated with a given `File`
/// (or returns an existing one).
- fn create_handle(
- &self,
- file: &drm::File<<<Self as IntoGEMObject>::Driver as drm::Driver>::File>,
- ) -> Result<u32> {
+ fn create_handle<D, F>(&self, file: &drm::File<F>) -> Result<u32>
+ where
+ Self: AllocImpl<Driver = D>,
+ D: drm::Driver<Object = Self, File = F>,
+ F: drm::file::DriverFile<Driver = D>,
+ {
let mut handle: u32 = 0;
// SAFETY: The arguments are all valid per the type invariants.
to_result(unsafe {
@@ -156,10 +143,12 @@ pub trait BaseObject: IntoGEMObject {
}
/// Looks up an object by its handle for a given `File`.
- fn lookup_handle(
- file: &drm::File<<<Self as IntoGEMObject>::Driver as drm::Driver>::File>,
- handle: u32,
- ) -> Result<ARef<Self>> {
+ fn lookup_handle<D, F>(file: &drm::File<F>, handle: u32) -> Result<ARef<Self>>
+ where
+ Self: AllocImpl<Driver = D>,
+ D: drm::Driver<Object = Self, File = F>,
+ F: drm::file::DriverFile<Driver = D>,
+ {
// SAFETY: The arguments are all valid per the type invariants.
let ptr = unsafe { bindings::drm_gem_object_lookup(file.as_raw().cast(), handle) };
if ptr.is_null() {
@@ -170,9 +159,9 @@ pub trait BaseObject: IntoGEMObject {
// - A `drm::Driver` can only have a single `File` implementation.
// - `file` uses the same `drm::Driver` as `Self`.
// - Therefore, we're guaranteed that `ptr` must be a gem object embedded within `Self`.
- // - And we check if the pointer is null befoe calling as_ref(), ensuring that `ptr` is a
+ // - And we check if the pointer is null befoe calling from_raw(), ensuring that `ptr` is a
// valid pointer to an initialized `Self`.
- let obj = unsafe { Self::as_ref(ptr) };
+ let obj = unsafe { Self::from_raw(ptr) };
// SAFETY:
// - We take ownership of the reference of `drm_gem_object_lookup()`.
@@ -209,13 +198,10 @@ pub struct Object<T: DriverObject + Send + Sync> {
}
impl<T: DriverObject> Object<T> {
- /// The size of this object's structure.
- pub const SIZE: usize = mem::size_of::<Self>();
-
const OBJECT_FUNCS: bindings::drm_gem_object_funcs = bindings::drm_gem_object_funcs {
free: Some(Self::free_callback),
- open: Some(open_callback::<T, Object<T>>),
- close: Some(close_callback::<T, Object<T>>),
+ open: Some(open_callback::<T>),
+ close: Some(close_callback::<T>),
print_info: None,
export: None,
pin: None,
@@ -298,6 +284,8 @@ impl<T: DriverObject> Deref for Object<T> {
}
impl<T: DriverObject> AllocImpl for Object<T> {
+ type Driver = T::Driver;
+
const ALLOC_OPS: AllocOps = AllocOps {
gem_create_object: None,
prime_handle_to_fd: None,
diff --git a/rust/kernel/drm/ioctl.rs b/rust/kernel/drm/ioctl.rs
index 445639404fb7..69efbdb4c85a 100644
--- a/rust/kernel/drm/ioctl.rs
+++ b/rust/kernel/drm/ioctl.rs
@@ -2,7 +2,7 @@
//! DRM IOCTL definitions.
//!
-//! C header: [`include/linux/drm/drm_ioctl.h`](srctree/include/linux/drm/drm_ioctl.h)
+//! C header: [`include/drm/drm_ioctl.h`](srctree/include/drm/drm_ioctl.h)
use crate::ioctl;
@@ -83,7 +83,7 @@ pub mod internal {
///
/// ```ignore
/// fn foo(device: &kernel::drm::Device<Self>,
-/// data: &Opaque<uapi::argument_type>,
+/// data: &mut uapi::argument_type,
/// file: &kernel::drm::File<Self::File>,
/// ) -> Result<u32>
/// ```
@@ -134,15 +134,18 @@ macro_rules! declare_drm_ioctls {
// FIXME: Currently there is nothing enforcing that the types of the
// dev/file match the current driver these ioctls are being declared
// for, and it's not clear how to enforce this within the type system.
- let dev = $crate::drm::device::Device::as_ref(raw_dev);
+ let dev = $crate::drm::device::Device::from_raw(raw_dev);
// SAFETY: The ioctl argument has size `_IOC_SIZE(cmd)`, which we
// asserted above matches the size of this type, and all bit patterns of
// UAPI structs must be valid.
- let data = unsafe {
- &*(raw_data as *const $crate::types::Opaque<$crate::uapi::$struct>)
- };
+ // The `ioctl` argument is exclusively owned by the handler
+ // and guaranteed by the C implementation (`drm_ioctl()`) to remain
+ // valid for the entire lifetime of the reference taken here.
+ // There is no concurrent access or aliasing; no other references
+ // to this object exist during this call.
+ let data = unsafe { &mut *(raw_data.cast::<$crate::uapi::$struct>()) };
// SAFETY: This is just the DRM file structure
- let file = unsafe { $crate::drm::File::as_ref(raw_file) };
+ let file = unsafe { $crate::drm::File::from_raw(raw_file) };
match $func(dev, data, file) {
Err(e) => e.to_errno(),
diff --git a/rust/kernel/error.rs b/rust/kernel/error.rs
index 3dee3139fcd4..1c0e0e241daa 100644
--- a/rust/kernel/error.rs
+++ b/rust/kernel/error.rs
@@ -2,14 +2,16 @@
//! Kernel errors.
//!
-//! C header: [`include/uapi/asm-generic/errno-base.h`](srctree/include/uapi/asm-generic/errno-base.h)
+//! C header: [`include/uapi/asm-generic/errno-base.h`](srctree/include/uapi/asm-generic/errno-base.h)\
+//! C header: [`include/uapi/asm-generic/errno.h`](srctree/include/uapi/asm-generic/errno.h)\
+//! C header: [`include/linux/errno.h`](srctree/include/linux/errno.h)
use crate::{
alloc::{layout::LayoutError, AllocError},
+ fmt,
str::CStr,
};
-use core::fmt;
use core::num::NonZeroI32;
use core::num::TryFromIntError;
use core::str::Utf8Error;
@@ -65,6 +67,7 @@ pub mod code {
declare_err!(EDOM, "Math argument out of domain of func.");
declare_err!(ERANGE, "Math result not representable.");
declare_err!(EOVERFLOW, "Value too large for defined data type.");
+ declare_err!(ETIMEDOUT, "Connection timed out.");
declare_err!(ERESTARTSYS, "Restart the system call.");
declare_err!(ERESTARTNOINTR, "System call was interrupted by a signal and will be restarted.");
declare_err!(ERESTARTNOHAND, "Restart if no handler.");
@@ -100,8 +103,23 @@ pub struct Error(NonZeroI32);
impl Error {
/// Creates an [`Error`] from a kernel error code.
///
- /// It is a bug to pass an out-of-range `errno`. `EINVAL` would
- /// be returned in such a case.
+ /// `errno` must be within error code range (i.e. `>= -MAX_ERRNO && < 0`).
+ ///
+ /// It is a bug to pass an out-of-range `errno`. [`code::EINVAL`] is returned in such a case.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// assert_eq!(Error::from_errno(-1), EPERM);
+ /// assert_eq!(Error::from_errno(-2), ENOENT);
+ /// ```
+ ///
+ /// The following calls are considered a bug:
+ ///
+ /// ```
+ /// assert_eq!(Error::from_errno(0), EINVAL);
+ /// assert_eq!(Error::from_errno(-1000000), EINVAL);
+ /// ```
pub fn from_errno(errno: crate::ffi::c_int) -> Error {
if let Some(error) = Self::try_from_errno(errno) {
error
@@ -153,11 +171,11 @@ impl Error {
/// Returns the error encoded as a pointer.
pub fn to_ptr<T>(self) -> *mut T {
// SAFETY: `self.0` is a valid error due to its invariant.
- unsafe { bindings::ERR_PTR(self.0.get() as _) as *mut _ }
+ unsafe { bindings::ERR_PTR(self.0.get() as crate::ffi::c_long).cast() }
}
/// Returns a string representing the error, if one exists.
- #[cfg(not(any(test, testlib)))]
+ #[cfg(not(testlib))]
pub fn name(&self) -> Option<&'static CStr> {
// SAFETY: Just an FFI call, there are no extra safety requirements.
let ptr = unsafe { bindings::errname(-self.0.get()) };
@@ -174,7 +192,7 @@ impl Error {
/// When `testlib` is configured, this always returns `None` to avoid the dependency on a
/// kernel function so that tests that use this (e.g., by calling [`Result::unwrap`]) can still
/// run in userspace.
- #[cfg(any(test, testlib))]
+ #[cfg(testlib)]
pub fn name(&self) -> Option<&'static CStr> {
None
}
@@ -188,7 +206,7 @@ impl fmt::Debug for Error {
Some(name) => f
.debug_tuple(
// SAFETY: These strings are ASCII-only.
- unsafe { core::str::from_utf8_unchecked(name) },
+ unsafe { core::str::from_utf8_unchecked(name.to_bytes()) },
)
.finish(),
}
@@ -219,8 +237,8 @@ impl From<LayoutError> for Error {
}
}
-impl From<core::fmt::Error> for Error {
- fn from(_: core::fmt::Error) -> Error {
+impl From<fmt::Error> for Error {
+ fn from(_: fmt::Error) -> Error {
code::EINVAL
}
}
@@ -374,8 +392,43 @@ impl From<core::convert::Infallible> for Error {
/// [Rust documentation]: https://doc.rust-lang.org/book/ch09-02-recoverable-errors-with-result.html
pub type Result<T = (), E = Error> = core::result::Result<T, E>;
-/// Converts an integer as returned by a C kernel function to an error if it's negative, and
-/// `Ok(())` otherwise.
+/// Converts an integer as returned by a C kernel function to a [`Result`].
+///
+/// If the integer is negative, an [`Err`] with an [`Error`] as given by [`Error::from_errno`] is
+/// returned. This means the integer must be `>= -MAX_ERRNO`.
+///
+/// Otherwise, it returns [`Ok`].
+///
+/// It is a bug to pass an out-of-range negative integer. `Err(EINVAL)` is returned in such a case.
+///
+/// # Examples
+///
+/// This function may be used to easily perform early returns with the [`?`] operator when working
+/// with C APIs within Rust abstractions:
+///
+/// ```
+/// # use kernel::error::to_result;
+/// # mod bindings {
+/// # #![expect(clippy::missing_safety_doc)]
+/// # use kernel::prelude::*;
+/// # pub(super) unsafe fn f1() -> c_int { 0 }
+/// # pub(super) unsafe fn f2() -> c_int { EINVAL.to_errno() }
+/// # }
+/// fn f() -> Result {
+/// // SAFETY: ...
+/// to_result(unsafe { bindings::f1() })?;
+///
+/// // SAFETY: ...
+/// to_result(unsafe { bindings::f2() })?;
+///
+/// // ...
+///
+/// Ok(())
+/// }
+/// # assert_eq!(f(), Err(EINVAL));
+/// ```
+///
+/// [`?`]: https://doc.rust-lang.org/reference/expressions/operator-expr.html#the-question-mark-operator
pub fn to_result(err: crate::ffi::c_int) -> Result {
if err < 0 {
Err(Error::from_errno(err))
diff --git a/rust/kernel/faux.rs b/rust/kernel/faux.rs
index 8a50fcd4c9bb..7fe2dd197e37 100644
--- a/rust/kernel/faux.rs
+++ b/rust/kernel/faux.rs
@@ -4,7 +4,7 @@
//!
//! This module provides bindings for working with faux devices in kernel modules.
//!
-//! C header: [`include/linux/device/faux.h`]
+//! C header: [`include/linux/device/faux.h`](srctree/include/linux/device/faux.h)
use crate::{bindings, device, error::code::*, prelude::*};
use core::ptr::{addr_of_mut, null, null_mut, NonNull};
@@ -54,7 +54,7 @@ impl AsRef<device::Device> for Registration {
fn as_ref(&self) -> &device::Device {
// SAFETY: The underlying `device` in `faux_device` is guaranteed by the C API to be
// a valid initialized `device`.
- unsafe { device::Device::as_ref(addr_of_mut!((*self.as_raw()).dev)) }
+ unsafe { device::Device::from_raw(addr_of_mut!((*self.as_raw()).dev)) }
}
}
diff --git a/rust/kernel/firmware.rs b/rust/kernel/firmware.rs
index 4fe621f35716..94e6bb88b903 100644
--- a/rust/kernel/firmware.rs
+++ b/rust/kernel/firmware.rs
@@ -62,10 +62,11 @@ impl Firmware {
fn request_internal(name: &CStr, dev: &Device, func: FwFunc) -> Result<Self> {
let mut fw: *mut bindings::firmware = core::ptr::null_mut();
let pfw: *mut *mut bindings::firmware = &mut fw;
+ let pfw: *mut *const bindings::firmware = pfw.cast();
// SAFETY: `pfw` is a valid pointer to a NULL initialized `bindings::firmware` pointer.
// `name` and `dev` are valid as by their type invariants.
- let ret = unsafe { func.0(pfw as _, name.as_char_ptr(), dev.as_raw()) };
+ let ret = unsafe { func.0(pfw, name.as_char_ptr(), dev.as_raw()) };
if ret != 0 {
return Err(Error::from_errno(ret));
}
@@ -139,7 +140,7 @@ unsafe impl Sync for Firmware {}
/// Typically, such contracts would be enforced by a trait, however traits do not (yet) support
/// const functions.
///
-/// # Example
+/// # Examples
///
/// ```
/// # mod module_firmware_test {
@@ -181,7 +182,7 @@ unsafe impl Sync for Firmware {}
/// module! {
/// type: MyModule,
/// name: "module_firmware_test",
-/// author: "Rust for Linux",
+/// authors: ["Rust for Linux"],
/// description: "module_firmware! test module",
/// license: "GPL",
/// }
@@ -261,7 +262,7 @@ impl<const N: usize> ModInfoBuilder<N> {
/// Append path components to the [`ModInfoBuilder`] instance. Paths need to be separated
/// with [`ModInfoBuilder::new_entry`].
///
- /// # Example
+ /// # Examples
///
/// ```
/// use kernel::firmware::ModInfoBuilder;
@@ -290,7 +291,7 @@ impl<const N: usize> ModInfoBuilder<N> {
let module_name = this.module_name;
if !this.module_name.is_empty() {
- this = this.push_internal(module_name.as_bytes_with_nul());
+ this = this.push_internal(module_name.to_bytes_with_nul());
if N != 0 {
// Re-use the space taken by the NULL terminator and swap it with the '.' separator.
diff --git a/rust/kernel/fmt.rs b/rust/kernel/fmt.rs
new file mode 100644
index 000000000000..0306e8388968
--- /dev/null
+++ b/rust/kernel/fmt.rs
@@ -0,0 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0
+
+//! Formatting utilities.
+//!
+//! This module is intended to be used in place of `core::fmt` in kernel code.
+
+pub use core::fmt::{Arguments, Debug, Display, Error, Formatter, Result, Write};
diff --git a/rust/kernel/fs.rs b/rust/kernel/fs.rs
index 0121b38c59e6..6ba6bdf143cb 100644
--- a/rust/kernel/fs.rs
+++ b/rust/kernel/fs.rs
@@ -6,3 +6,6 @@
pub mod file;
pub use self::file::{File, LocalFile};
+
+mod kiocb;
+pub use self::kiocb::Kiocb;
diff --git a/rust/kernel/fs/file.rs b/rust/kernel/fs/file.rs
index 72d84fb0e266..cf06e73a6da0 100644
--- a/rust/kernel/fs/file.rs
+++ b/rust/kernel/fs/file.rs
@@ -10,8 +10,10 @@
use crate::{
bindings,
cred::Credential,
- error::{code::*, Error, Result},
- types::{ARef, AlwaysRefCounted, NotThreadSafe, Opaque},
+ error::{code::*, to_result, Error, Result},
+ fmt,
+ sync::aref::{ARef, AlwaysRefCounted},
+ types::{NotThreadSafe, Opaque},
};
use core::ptr;
@@ -366,7 +368,7 @@ impl core::ops::Deref for File {
//
// By the type invariants, there are no `fdget_pos` calls that did not take the
// `f_pos_lock` mutex.
- unsafe { LocalFile::from_raw_file(self as *const File as *const bindings::file) }
+ unsafe { LocalFile::from_raw_file(core::ptr::from_ref(self).cast()) }
}
}
@@ -398,9 +400,8 @@ impl FileDescriptorReservation {
pub fn get_unused_fd_flags(flags: u32) -> Result<Self> {
// SAFETY: FFI call, there are no safety requirements on `flags`.
let fd: i32 = unsafe { bindings::get_unused_fd_flags(flags) };
- if fd < 0 {
- return Err(Error::from_errno(fd));
- }
+ to_result(fd)?;
+
Ok(Self {
fd: fd as u32,
_not_send: NotThreadSafe,
@@ -460,8 +461,8 @@ impl From<BadFdError> for Error {
}
}
-impl core::fmt::Debug for BadFdError {
- fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
+impl fmt::Debug for BadFdError {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.pad("EBADF")
}
}
diff --git a/rust/kernel/fs/kiocb.rs b/rust/kernel/fs/kiocb.rs
new file mode 100644
index 000000000000..84c936cd69b0
--- /dev/null
+++ b/rust/kernel/fs/kiocb.rs
@@ -0,0 +1,68 @@
+// SPDX-License-Identifier: GPL-2.0
+
+// Copyright (C) 2024 Google LLC.
+
+//! Kernel IO callbacks.
+//!
+//! C headers: [`include/linux/fs.h`](srctree/include/linux/fs.h)
+
+use core::marker::PhantomData;
+use core::ptr::NonNull;
+use kernel::types::ForeignOwnable;
+
+/// Wrapper for the kernel's `struct kiocb`.
+///
+/// Currently this abstractions is incomplete and is essentially just a tuple containing a
+/// reference to a file and a file position.
+///
+/// The type `T` represents the filesystem or driver specific data associated with the file.
+///
+/// # Invariants
+///
+/// `inner` points at a valid `struct kiocb` whose file has the type `T` as its private data.
+pub struct Kiocb<'a, T> {
+ inner: NonNull<bindings::kiocb>,
+ _phantom: PhantomData<&'a T>,
+}
+
+impl<'a, T: ForeignOwnable> Kiocb<'a, T> {
+ /// Create a `Kiocb` from a raw pointer.
+ ///
+ /// # Safety
+ ///
+ /// The pointer must reference a valid `struct kiocb` for the duration of `'a`. The private
+ /// data of the file must be `T`.
+ pub unsafe fn from_raw(kiocb: *mut bindings::kiocb) -> Self {
+ Self {
+ // SAFETY: If a pointer is valid it is not null.
+ inner: unsafe { NonNull::new_unchecked(kiocb) },
+ _phantom: PhantomData,
+ }
+ }
+
+ /// Access the underlying `struct kiocb` directly.
+ pub fn as_raw(&self) -> *mut bindings::kiocb {
+ self.inner.as_ptr()
+ }
+
+ /// Get the filesystem or driver specific data associated with the file.
+ pub fn file(&self) -> <T as ForeignOwnable>::Borrowed<'a> {
+ // SAFETY: We have shared access to this kiocb and hence the underlying file, so we can
+ // read the file's private data.
+ let private = unsafe { (*(*self.as_raw()).ki_filp).private_data };
+ // SAFETY: The kiocb has shared access to the private data.
+ unsafe { <T as ForeignOwnable>::borrow(private) }
+ }
+
+ /// Gets the current value of `ki_pos`.
+ pub fn ki_pos(&self) -> i64 {
+ // SAFETY: We have shared access to the kiocb, so we can read its `ki_pos` field.
+ unsafe { (*self.as_raw()).ki_pos }
+ }
+
+ /// Gets a mutable reference to the `ki_pos` field.
+ pub fn ki_pos_mut(&mut self) -> &mut i64 {
+ // SAFETY: We have exclusive access to the kiocb, so we can write to `ki_pos`.
+ unsafe { &mut (*self.as_raw()).ki_pos }
+ }
+}
diff --git a/rust/kernel/generated_arch_reachable_asm.rs.S b/rust/kernel/generated_arch_reachable_asm.rs.S
new file mode 100644
index 000000000000..3886a9ad3a99
--- /dev/null
+++ b/rust/kernel/generated_arch_reachable_asm.rs.S
@@ -0,0 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#include <linux/bug.h>
+
+// Cut here.
+
+::kernel::concat_literals!(ARCH_WARN_REACHABLE)
diff --git a/rust/kernel/generated_arch_warn_asm.rs.S b/rust/kernel/generated_arch_warn_asm.rs.S
new file mode 100644
index 000000000000..409eb4c2d3a1
--- /dev/null
+++ b/rust/kernel/generated_arch_warn_asm.rs.S
@@ -0,0 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#include <linux/bug.h>
+
+// Cut here.
+
+::kernel::concat_literals!(ARCH_WARN_ASM("{file}", "{line}", "{flags}", "{size}"))
diff --git a/rust/kernel/id_pool.rs b/rust/kernel/id_pool.rs
new file mode 100644
index 000000000000..a41a3404213c
--- /dev/null
+++ b/rust/kernel/id_pool.rs
@@ -0,0 +1,226 @@
+// SPDX-License-Identifier: GPL-2.0
+
+// Copyright (C) 2025 Google LLC.
+
+//! Rust API for an ID pool backed by a [`BitmapVec`].
+
+use crate::alloc::{AllocError, Flags};
+use crate::bitmap::BitmapVec;
+
+const BITS_PER_LONG: usize = bindings::BITS_PER_LONG as usize;
+
+/// Represents a dynamic ID pool backed by a [`BitmapVec`].
+///
+/// Clients acquire and release IDs from unset bits in a bitmap.
+///
+/// The capacity of the ID pool may be adjusted by users as
+/// needed. The API supports the scenario where users need precise control
+/// over the time of allocation of a new backing bitmap, which may require
+/// release of spinlock.
+/// Due to concurrent updates, all operations are re-verified to determine
+/// if the grow or shrink is sill valid.
+///
+/// # Examples
+///
+/// Basic usage
+///
+/// ```
+/// use kernel::alloc::{AllocError, flags::GFP_KERNEL};
+/// use kernel::id_pool::IdPool;
+///
+/// let mut pool = IdPool::new(64, GFP_KERNEL)?;
+/// for i in 0..64 {
+/// assert_eq!(i, pool.acquire_next_id(i).ok_or(ENOSPC)?);
+/// }
+///
+/// pool.release_id(23);
+/// assert_eq!(23, pool.acquire_next_id(0).ok_or(ENOSPC)?);
+///
+/// assert_eq!(None, pool.acquire_next_id(0)); // time to realloc.
+/// let resizer = pool.grow_request().ok_or(ENOSPC)?.realloc(GFP_KERNEL)?;
+/// pool.grow(resizer);
+///
+/// assert_eq!(pool.acquire_next_id(0), Some(64));
+/// # Ok::<(), Error>(())
+/// ```
+///
+/// Releasing spinlock to grow the pool
+///
+/// ```no_run
+/// use kernel::alloc::{AllocError, flags::GFP_KERNEL};
+/// use kernel::sync::{new_spinlock, SpinLock};
+/// use kernel::id_pool::IdPool;
+///
+/// fn get_id_maybe_realloc(guarded_pool: &SpinLock<IdPool>) -> Result<usize, AllocError> {
+/// let mut pool = guarded_pool.lock();
+/// loop {
+/// match pool.acquire_next_id(0) {
+/// Some(index) => return Ok(index),
+/// None => {
+/// let alloc_request = pool.grow_request();
+/// drop(pool);
+/// let resizer = alloc_request.ok_or(AllocError)?.realloc(GFP_KERNEL)?;
+/// pool = guarded_pool.lock();
+/// pool.grow(resizer)
+/// }
+/// }
+/// }
+/// }
+/// ```
+pub struct IdPool {
+ map: BitmapVec,
+}
+
+/// Indicates that an [`IdPool`] should change to a new target size.
+pub struct ReallocRequest {
+ num_ids: usize,
+}
+
+/// Contains a [`BitmapVec`] of a size suitable for reallocating [`IdPool`].
+pub struct PoolResizer {
+ new: BitmapVec,
+}
+
+impl ReallocRequest {
+ /// Allocates a new backing [`BitmapVec`] for [`IdPool`].
+ ///
+ /// This method only prepares reallocation and does not complete it.
+ /// Reallocation will complete after passing the [`PoolResizer`] to the
+ /// [`IdPool::grow`] or [`IdPool::shrink`] operation, which will check
+ /// that reallocation still makes sense.
+ pub fn realloc(&self, flags: Flags) -> Result<PoolResizer, AllocError> {
+ let new = BitmapVec::new(self.num_ids, flags)?;
+ Ok(PoolResizer { new })
+ }
+}
+
+impl IdPool {
+ /// Constructs a new [`IdPool`].
+ ///
+ /// A capacity below [`BITS_PER_LONG`] is adjusted to
+ /// [`BITS_PER_LONG`].
+ ///
+ /// [`BITS_PER_LONG`]: srctree/include/asm-generic/bitsperlong.h
+ #[inline]
+ pub fn new(num_ids: usize, flags: Flags) -> Result<Self, AllocError> {
+ let num_ids = core::cmp::max(num_ids, BITS_PER_LONG);
+ let map = BitmapVec::new(num_ids, flags)?;
+ Ok(Self { map })
+ }
+
+ /// Returns how many IDs this pool can currently have.
+ #[inline]
+ pub fn capacity(&self) -> usize {
+ self.map.len()
+ }
+
+ /// Returns a [`ReallocRequest`] if the [`IdPool`] can be shrunk, [`None`] otherwise.
+ ///
+ /// The capacity of an [`IdPool`] cannot be shrunk below [`BITS_PER_LONG`].
+ ///
+ /// [`BITS_PER_LONG`]: srctree/include/asm-generic/bitsperlong.h
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use kernel::alloc::{AllocError, flags::GFP_KERNEL};
+ /// use kernel::id_pool::{ReallocRequest, IdPool};
+ ///
+ /// let mut pool = IdPool::new(1024, GFP_KERNEL)?;
+ /// let alloc_request = pool.shrink_request().ok_or(AllocError)?;
+ /// let resizer = alloc_request.realloc(GFP_KERNEL)?;
+ /// pool.shrink(resizer);
+ /// assert_eq!(pool.capacity(), kernel::bindings::BITS_PER_LONG as usize);
+ /// # Ok::<(), AllocError>(())
+ /// ```
+ #[inline]
+ pub fn shrink_request(&self) -> Option<ReallocRequest> {
+ let cap = self.capacity();
+ // Shrinking below [`BITS_PER_LONG`] is never possible.
+ if cap <= BITS_PER_LONG {
+ return None;
+ }
+ // Determine if the bitmap can shrink based on the position of
+ // its last set bit. If the bit is within the first quarter of
+ // the bitmap then shrinking is possible. In this case, the
+ // bitmap should shrink to half its current size.
+ let Some(bit) = self.map.last_bit() else {
+ return Some(ReallocRequest {
+ num_ids: BITS_PER_LONG,
+ });
+ };
+ if bit >= (cap / 4) {
+ return None;
+ }
+ let num_ids = usize::max(BITS_PER_LONG, cap / 2);
+ Some(ReallocRequest { num_ids })
+ }
+
+ /// Shrinks pool by using a new [`BitmapVec`], if still possible.
+ #[inline]
+ pub fn shrink(&mut self, mut resizer: PoolResizer) {
+ // Between request to shrink that led to allocation of `resizer` and now,
+ // bits may have changed.
+ // Verify that shrinking is still possible. In case shrinking to
+ // the size of `resizer` is no longer possible, do nothing,
+ // drop `resizer` and move on.
+ let Some(updated) = self.shrink_request() else {
+ return;
+ };
+ if updated.num_ids > resizer.new.len() {
+ return;
+ }
+
+ resizer.new.copy_and_extend(&self.map);
+ self.map = resizer.new;
+ }
+
+ /// Returns a [`ReallocRequest`] for growing this [`IdPool`], if possible.
+ ///
+ /// The capacity of an [`IdPool`] cannot be grown above [`i32::MAX`].
+ #[inline]
+ pub fn grow_request(&self) -> Option<ReallocRequest> {
+ let num_ids = self.capacity() * 2;
+ if num_ids > i32::MAX.try_into().unwrap() {
+ return None;
+ }
+ Some(ReallocRequest { num_ids })
+ }
+
+ /// Grows pool by using a new [`BitmapVec`], if still necessary.
+ ///
+ /// The `resizer` arguments has to be obtained by calling [`Self::grow_request`]
+ /// on this object and performing a [`ReallocRequest::realloc`].
+ #[inline]
+ pub fn grow(&mut self, mut resizer: PoolResizer) {
+ // Between request to grow that led to allocation of `resizer` and now,
+ // another thread may have already grown the capacity.
+ // In this case, do nothing, drop `resizer` and move on.
+ if resizer.new.len() <= self.capacity() {
+ return;
+ }
+
+ resizer.new.copy_and_extend(&self.map);
+ self.map = resizer.new;
+ }
+
+ /// Acquires a new ID by finding and setting the next zero bit in the
+ /// bitmap.
+ ///
+ /// Upon success, returns its index. Otherwise, returns [`None`]
+ /// to indicate that a [`Self::grow_request`] is needed.
+ #[inline]
+ pub fn acquire_next_id(&mut self, offset: usize) -> Option<usize> {
+ let next_zero_bit = self.map.next_zero_bit(offset);
+ if let Some(nr) = next_zero_bit {
+ self.map.set_bit(nr);
+ }
+ next_zero_bit
+ }
+
+ /// Releases an ID.
+ #[inline]
+ pub fn release_id(&mut self, id: usize) {
+ self.map.clear_bit(id);
+ }
+}
diff --git a/rust/kernel/init.rs b/rust/kernel/init.rs
index 21ef202ab0db..4949047af8d7 100644
--- a/rust/kernel/init.rs
+++ b/rust/kernel/init.rs
@@ -29,15 +29,15 @@
//!
//! ## General Examples
//!
-//! ```rust,ignore
-//! # #![allow(clippy::disallowed_names)]
+//! ```rust
+//! # #![expect(clippy::disallowed_names, clippy::undocumented_unsafe_blocks)]
//! use kernel::types::Opaque;
//! use pin_init::pin_init_from_closure;
//!
//! // assume we have some `raw_foo` type in C:
//! #[repr(C)]
//! struct RawFoo([u8; 16]);
-//! extern {
+//! extern "C" {
//! fn init_foo(_: *mut RawFoo);
//! }
//!
@@ -66,25 +66,17 @@
//! });
//! ```
//!
-//! ```rust,ignore
-//! # #![allow(unreachable_pub, clippy::disallowed_names)]
+//! ```rust
+//! # #![expect(unreachable_pub, clippy::disallowed_names)]
//! use kernel::{prelude::*, types::Opaque};
//! use core::{ptr::addr_of_mut, marker::PhantomPinned, pin::Pin};
//! # mod bindings {
-//! # #![allow(non_camel_case_types)]
+//! # #![expect(non_camel_case_types, clippy::missing_safety_doc)]
//! # pub struct foo;
//! # pub unsafe fn init_foo(_ptr: *mut foo) {}
//! # pub unsafe fn destroy_foo(_ptr: *mut foo) {}
//! # pub unsafe fn enable_foo(_ptr: *mut foo, _flags: u32) -> i32 { 0 }
//! # }
-//! # // `Error::from_errno` is `pub(crate)` in the `kernel` crate, thus provide a workaround.
-//! # trait FromErrno {
-//! # fn from_errno(errno: core::ffi::c_int) -> Error {
-//! # // Dummy error that can be constructed outside the `kernel` crate.
-//! # Error::from(core::fmt::Error)
-//! # }
-//! # }
-//! # impl FromErrno for Error {}
//! /// # Invariants
//! ///
//! /// `foo` is always initialized
@@ -108,13 +100,13 @@
//! let foo = addr_of_mut!((*slot).foo);
//!
//! // Initialize the `foo`
-//! bindings::init_foo(Opaque::raw_get(foo));
+//! bindings::init_foo(Opaque::cast_into(foo));
//!
//! // Try to enable it.
-//! let err = bindings::enable_foo(Opaque::raw_get(foo), flags);
+//! let err = bindings::enable_foo(Opaque::cast_into(foo), flags);
//! if err != 0 {
//! // Enabling has failed, first clean up the foo and then return the error.
-//! bindings::destroy_foo(Opaque::raw_get(foo));
+//! bindings::destroy_foo(Opaque::cast_into(foo));
//! return Err(Error::from_errno(err));
//! }
//!
@@ -206,7 +198,7 @@ pub trait InPlaceInit<T>: Sized {
///
/// ```rust
/// use kernel::error::Error;
-/// use pin_init::zeroed;
+/// use pin_init::init_zeroed;
/// struct BigBuf {
/// big: KBox<[u8; 1024 * 1024 * 1024]>,
/// small: [u8; 1024 * 1024],
@@ -215,7 +207,7 @@ pub trait InPlaceInit<T>: Sized {
/// impl BigBuf {
/// fn new() -> impl Init<Self, Error> {
/// try_init!(Self {
-/// big: KBox::init(zeroed(), GFP_KERNEL)?,
+/// big: KBox::init(init_zeroed(), GFP_KERNEL)?,
/// small: [0; 1024 * 1024],
/// }? Error)
/// }
@@ -264,7 +256,7 @@ macro_rules! try_init {
/// ```rust
/// # #![feature(new_uninit)]
/// use kernel::error::Error;
-/// use pin_init::zeroed;
+/// use pin_init::init_zeroed;
/// #[pin_data]
/// struct BigBuf {
/// big: KBox<[u8; 1024 * 1024 * 1024]>,
@@ -275,7 +267,7 @@ macro_rules! try_init {
/// impl BigBuf {
/// fn new() -> impl PinInit<Self, Error> {
/// try_pin_init!(Self {
-/// big: KBox::init(zeroed(), GFP_KERNEL)?,
+/// big: KBox::init(init_zeroed(), GFP_KERNEL)?,
/// small: [0; 1024 * 1024],
/// ptr: core::ptr::null_mut(),
/// }? Error)
diff --git a/rust/kernel/io.rs b/rust/kernel/io.rs
index 72d80a6f131e..ee182b0b5452 100644
--- a/rust/kernel/io.rs
+++ b/rust/kernel/io.rs
@@ -5,7 +5,13 @@
//! C header: [`include/asm-generic/io.h`](srctree/include/asm-generic/io.h)
use crate::error::{code::EINVAL, Result};
-use crate::{bindings, build_assert};
+use crate::{bindings, build_assert, ffi::c_void};
+
+pub mod mem;
+pub mod poll;
+pub mod resource;
+
+pub use resource::Resource;
/// Raw representation of an MMIO region.
///
@@ -43,7 +49,7 @@ impl<const SIZE: usize> IoRaw<SIZE> {
}
}
-/// IO-mapped memory, starting at the base address @addr and spanning @maxlen bytes.
+/// IO-mapped memory region.
///
/// The creator (usually a subsystem / bus such as PCI) is responsible for creating the
/// mapping, performing an additional region request etc.
@@ -56,7 +62,7 @@ impl<const SIZE: usize> IoRaw<SIZE> {
/// # Examples
///
/// ```no_run
-/// # use kernel::{bindings, io::{Io, IoRaw}};
+/// # use kernel::{bindings, ffi::c_void, io::{Io, IoRaw}};
/// # use core::ops::Deref;
///
/// // See also [`pci::Bar`] for a real example.
@@ -70,19 +76,19 @@ impl<const SIZE: usize> IoRaw<SIZE> {
/// unsafe fn new(paddr: usize) -> Result<Self>{
/// // SAFETY: By the safety requirements of this function [`paddr`, `paddr` + `SIZE`) is
/// // valid for `ioremap`.
-/// let addr = unsafe { bindings::ioremap(paddr as _, SIZE as _) };
+/// let addr = unsafe { bindings::ioremap(paddr as bindings::phys_addr_t, SIZE) };
/// if addr.is_null() {
/// return Err(ENOMEM);
/// }
///
-/// Ok(IoMem(IoRaw::new(addr as _, SIZE)?))
+/// Ok(IoMem(IoRaw::new(addr as usize, SIZE)?))
/// }
/// }
///
/// impl<const SIZE: usize> Drop for IoMem<SIZE> {
/// fn drop(&mut self) {
/// // SAFETY: `self.0.addr()` is guaranteed to be properly mapped by `Self::new`.
-/// unsafe { bindings::iounmap(self.0.addr() as _); };
+/// unsafe { bindings::iounmap(self.0.addr() as *mut c_void); };
/// }
/// }
///
@@ -119,7 +125,7 @@ macro_rules! define_read {
let addr = self.io_addr_assert::<$type_name>(offset);
// SAFETY: By the type invariant `addr` is a valid address for MMIO operations.
- unsafe { bindings::$c_fn(addr as _) }
+ unsafe { bindings::$c_fn(addr as *const c_void) }
}
/// Read IO data from a given offset.
@@ -131,7 +137,7 @@ macro_rules! define_read {
let addr = self.io_addr::<$type_name>(offset)?;
// SAFETY: By the type invariant `addr` is a valid address for MMIO operations.
- Ok(unsafe { bindings::$c_fn(addr as _) })
+ Ok(unsafe { bindings::$c_fn(addr as *const c_void) })
}
};
}
@@ -148,7 +154,7 @@ macro_rules! define_write {
let addr = self.io_addr_assert::<$type_name>(offset);
// SAFETY: By the type invariant `addr` is a valid address for MMIO operations.
- unsafe { bindings::$c_fn(value, addr as _, ) }
+ unsafe { bindings::$c_fn(value, addr as *mut c_void) }
}
/// Write IO data from a given offset.
@@ -160,7 +166,7 @@ macro_rules! define_write {
let addr = self.io_addr::<$type_name>(offset)?;
// SAFETY: By the type invariant `addr` is a valid address for MMIO operations.
- unsafe { bindings::$c_fn(value, addr as _) }
+ unsafe { bindings::$c_fn(value, addr as *mut c_void) }
Ok(())
}
};
diff --git a/rust/kernel/io/mem.rs b/rust/kernel/io/mem.rs
new file mode 100644
index 000000000000..6f99510bfc3a
--- /dev/null
+++ b/rust/kernel/io/mem.rs
@@ -0,0 +1,279 @@
+// SPDX-License-Identifier: GPL-2.0
+
+//! Generic memory-mapped IO.
+
+use core::ops::Deref;
+
+use crate::c_str;
+use crate::device::Bound;
+use crate::device::Device;
+use crate::devres::Devres;
+use crate::io;
+use crate::io::resource::Region;
+use crate::io::resource::Resource;
+use crate::io::Io;
+use crate::io::IoRaw;
+use crate::prelude::*;
+
+/// An IO request for a specific device and resource.
+pub struct IoRequest<'a> {
+ device: &'a Device<Bound>,
+ resource: &'a Resource,
+}
+
+impl<'a> IoRequest<'a> {
+ /// Creates a new [`IoRequest`] instance.
+ ///
+ /// # Safety
+ ///
+ /// Callers must ensure that `resource` is valid for `device` during the
+ /// lifetime `'a`.
+ pub(crate) unsafe fn new(device: &'a Device<Bound>, resource: &'a Resource) -> Self {
+ IoRequest { device, resource }
+ }
+
+ /// Maps an [`IoRequest`] where the size is known at compile time.
+ ///
+ /// This uses the [`ioremap()`] C API.
+ ///
+ /// [`ioremap()`]: https://docs.kernel.org/driver-api/device-io.html#getting-access-to-the-device
+ ///
+ /// # Examples
+ ///
+ /// The following example uses a [`kernel::platform::Device`] for
+ /// illustration purposes.
+ ///
+ /// ```no_run
+ /// use kernel::{bindings, c_str, platform, of, device::Core};
+ /// struct SampleDriver;
+ ///
+ /// impl platform::Driver for SampleDriver {
+ /// # type IdInfo = ();
+ ///
+ /// fn probe(
+ /// pdev: &platform::Device<Core>,
+ /// info: Option<&Self::IdInfo>,
+ /// ) -> Result<Pin<KBox<Self>>> {
+ /// let offset = 0; // Some offset.
+ ///
+ /// // If the size is known at compile time, use [`Self::iomap_sized`].
+ /// //
+ /// // No runtime checks will apply when reading and writing.
+ /// let request = pdev.io_request_by_index(0).ok_or(ENODEV)?;
+ /// let iomem = request.iomap_sized::<42>();
+ /// let iomem = KBox::pin_init(iomem, GFP_KERNEL)?;
+ ///
+ /// let io = iomem.access(pdev.as_ref())?;
+ ///
+ /// // Read and write a 32-bit value at `offset`.
+ /// let data = io.read32_relaxed(offset);
+ ///
+ /// io.write32_relaxed(data, offset);
+ ///
+ /// # Ok(KBox::new(SampleDriver, GFP_KERNEL)?.into())
+ /// }
+ /// }
+ /// ```
+ pub fn iomap_sized<const SIZE: usize>(self) -> impl PinInit<Devres<IoMem<SIZE>>, Error> + 'a {
+ IoMem::new(self)
+ }
+
+ /// Same as [`Self::iomap_sized`] but with exclusive access to the
+ /// underlying region.
+ ///
+ /// This uses the [`ioremap()`] C API.
+ ///
+ /// [`ioremap()`]: https://docs.kernel.org/driver-api/device-io.html#getting-access-to-the-device
+ pub fn iomap_exclusive_sized<const SIZE: usize>(
+ self,
+ ) -> impl PinInit<Devres<ExclusiveIoMem<SIZE>>, Error> + 'a {
+ ExclusiveIoMem::new(self)
+ }
+
+ /// Maps an [`IoRequest`] where the size is not known at compile time,
+ ///
+ /// This uses the [`ioremap()`] C API.
+ ///
+ /// [`ioremap()`]: https://docs.kernel.org/driver-api/device-io.html#getting-access-to-the-device
+ ///
+ /// # Examples
+ ///
+ /// The following example uses a [`kernel::platform::Device`] for
+ /// illustration purposes.
+ ///
+ /// ```no_run
+ /// use kernel::{bindings, c_str, platform, of, device::Core};
+ /// struct SampleDriver;
+ ///
+ /// impl platform::Driver for SampleDriver {
+ /// # type IdInfo = ();
+ ///
+ /// fn probe(
+ /// pdev: &platform::Device<Core>,
+ /// info: Option<&Self::IdInfo>,
+ /// ) -> Result<Pin<KBox<Self>>> {
+ /// let offset = 0; // Some offset.
+ ///
+ /// // Unlike [`Self::iomap_sized`], here the size of the memory region
+ /// // is not known at compile time, so only the `try_read*` and `try_write*`
+ /// // family of functions should be used, leading to runtime checks on every
+ /// // access.
+ /// let request = pdev.io_request_by_index(0).ok_or(ENODEV)?;
+ /// let iomem = request.iomap();
+ /// let iomem = KBox::pin_init(iomem, GFP_KERNEL)?;
+ ///
+ /// let io = iomem.access(pdev.as_ref())?;
+ ///
+ /// let data = io.try_read32_relaxed(offset)?;
+ ///
+ /// io.try_write32_relaxed(data, offset)?;
+ ///
+ /// # Ok(KBox::new(SampleDriver, GFP_KERNEL)?.into())
+ /// }
+ /// }
+ /// ```
+ pub fn iomap(self) -> impl PinInit<Devres<IoMem<0>>, Error> + 'a {
+ Self::iomap_sized::<0>(self)
+ }
+
+ /// Same as [`Self::iomap`] but with exclusive access to the underlying
+ /// region.
+ pub fn iomap_exclusive(self) -> impl PinInit<Devres<ExclusiveIoMem<0>>, Error> + 'a {
+ Self::iomap_exclusive_sized::<0>(self)
+ }
+}
+
+/// An exclusive memory-mapped IO region.
+///
+/// # Invariants
+///
+/// - [`ExclusiveIoMem`] has exclusive access to the underlying [`IoMem`].
+pub struct ExclusiveIoMem<const SIZE: usize> {
+ /// The underlying `IoMem` instance.
+ iomem: IoMem<SIZE>,
+
+ /// The region abstraction. This represents exclusive access to the
+ /// range represented by the underlying `iomem`.
+ ///
+ /// This field is needed for ownership of the region.
+ _region: Region,
+}
+
+impl<const SIZE: usize> ExclusiveIoMem<SIZE> {
+ /// Creates a new `ExclusiveIoMem` instance.
+ fn ioremap(resource: &Resource) -> Result<Self> {
+ let start = resource.start();
+ let size = resource.size();
+ let name = resource.name().unwrap_or(c_str!(""));
+
+ let region = resource
+ .request_region(
+ start,
+ size,
+ name.to_cstring()?,
+ io::resource::Flags::IORESOURCE_MEM,
+ )
+ .ok_or(EBUSY)?;
+
+ let iomem = IoMem::ioremap(resource)?;
+
+ let iomem = ExclusiveIoMem {
+ iomem,
+ _region: region,
+ };
+
+ Ok(iomem)
+ }
+
+ /// Creates a new `ExclusiveIoMem` instance from a previously acquired [`IoRequest`].
+ pub fn new<'a>(io_request: IoRequest<'a>) -> impl PinInit<Devres<Self>, Error> + 'a {
+ let dev = io_request.device;
+ let res = io_request.resource;
+
+ Devres::new(dev, Self::ioremap(res))
+ }
+}
+
+impl<const SIZE: usize> Deref for ExclusiveIoMem<SIZE> {
+ type Target = Io<SIZE>;
+
+ fn deref(&self) -> &Self::Target {
+ &self.iomem
+ }
+}
+
+/// A generic memory-mapped IO region.
+///
+/// Accesses to the underlying region is checked either at compile time, if the
+/// region's size is known at that point, or at runtime otherwise.
+///
+/// # Invariants
+///
+/// [`IoMem`] always holds an [`IoRaw`] instance that holds a valid pointer to the
+/// start of the I/O memory mapped region.
+pub struct IoMem<const SIZE: usize = 0> {
+ io: IoRaw<SIZE>,
+}
+
+impl<const SIZE: usize> IoMem<SIZE> {
+ fn ioremap(resource: &Resource) -> Result<Self> {
+ // Note: Some ioremap() implementations use types that depend on the CPU
+ // word width rather than the bus address width.
+ //
+ // TODO: Properly address this in the C code to avoid this `try_into`.
+ let size = resource.size().try_into()?;
+ if size == 0 {
+ return Err(EINVAL);
+ }
+
+ let res_start = resource.start();
+
+ let addr = if resource
+ .flags()
+ .contains(io::resource::Flags::IORESOURCE_MEM_NONPOSTED)
+ {
+ // SAFETY:
+ // - `res_start` and `size` are read from a presumably valid `struct resource`.
+ // - `size` is known not to be zero at this point.
+ unsafe { bindings::ioremap_np(res_start, size) }
+ } else {
+ // SAFETY:
+ // - `res_start` and `size` are read from a presumably valid `struct resource`.
+ // - `size` is known not to be zero at this point.
+ unsafe { bindings::ioremap(res_start, size) }
+ };
+
+ if addr.is_null() {
+ return Err(ENOMEM);
+ }
+
+ let io = IoRaw::new(addr as usize, size)?;
+ let io = IoMem { io };
+
+ Ok(io)
+ }
+
+ /// Creates a new `IoMem` instance from a previously acquired [`IoRequest`].
+ pub fn new<'a>(io_request: IoRequest<'a>) -> impl PinInit<Devres<Self>, Error> + 'a {
+ let dev = io_request.device;
+ let res = io_request.resource;
+
+ Devres::new(dev, Self::ioremap(res))
+ }
+}
+
+impl<const SIZE: usize> Drop for IoMem<SIZE> {
+ fn drop(&mut self) {
+ // SAFETY: Safe as by the invariant of `Io`.
+ unsafe { bindings::iounmap(self.io.addr() as *mut c_void) }
+ }
+}
+
+impl<const SIZE: usize> Deref for IoMem<SIZE> {
+ type Target = Io<SIZE>;
+
+ fn deref(&self) -> &Self::Target {
+ // SAFETY: Safe as by the invariant of `IoMem`.
+ unsafe { Io::from_raw(&self.io) }
+ }
+}
diff --git a/rust/kernel/io/poll.rs b/rust/kernel/io/poll.rs
new file mode 100644
index 000000000000..613eb25047ef
--- /dev/null
+++ b/rust/kernel/io/poll.rs
@@ -0,0 +1,104 @@
+// SPDX-License-Identifier: GPL-2.0
+
+//! IO polling.
+//!
+//! C header: [`include/linux/iopoll.h`](srctree/include/linux/iopoll.h).
+
+use crate::{
+ error::{code::*, Result},
+ processor::cpu_relax,
+ task::might_sleep,
+ time::{delay::fsleep, Delta, Instant, Monotonic},
+};
+
+/// Polls periodically until a condition is met, an error occurs,
+/// or the timeout is reached.
+///
+/// The function repeatedly executes the given operation `op` closure and
+/// checks its result using the condition closure `cond`.
+///
+/// If `cond` returns `true`, the function returns successfully with
+/// the result of `op`. Otherwise, it waits for a duration specified
+/// by `sleep_delta` before executing `op` again.
+///
+/// This process continues until either `op` returns an error, `cond`
+/// returns `true`, or the timeout specified by `timeout_delta` is
+/// reached.
+///
+/// This function can only be used in a nonatomic context.
+///
+/// # Errors
+///
+/// If `op` returns an error, then that error is returned directly.
+///
+/// If the timeout specified by `timeout_delta` is reached, then
+/// `Err(ETIMEDOUT)` is returned.
+///
+/// # Examples
+///
+/// ```no_run
+/// use kernel::io::{Io, poll::read_poll_timeout};
+/// use kernel::time::Delta;
+///
+/// const HW_READY: u16 = 0x01;
+///
+/// fn wait_for_hardware<const SIZE: usize>(io: &Io<SIZE>) -> Result<()> {
+/// match read_poll_timeout(
+/// // The `op` closure reads the value of a specific status register.
+/// || io.try_read16(0x1000),
+/// // The `cond` closure takes a reference to the value returned by `op`
+/// // and checks whether the hardware is ready.
+/// |val: &u16| *val == HW_READY,
+/// Delta::from_millis(50),
+/// Delta::from_secs(3),
+/// ) {
+/// Ok(_) => {
+/// // The hardware is ready. The returned value of the `op` closure
+/// // isn't used.
+/// Ok(())
+/// }
+/// Err(e) => Err(e),
+/// }
+/// }
+/// ```
+#[track_caller]
+pub fn read_poll_timeout<Op, Cond, T>(
+ mut op: Op,
+ mut cond: Cond,
+ sleep_delta: Delta,
+ timeout_delta: Delta,
+) -> Result<T>
+where
+ Op: FnMut() -> Result<T>,
+ Cond: FnMut(&T) -> bool,
+{
+ let start: Instant<Monotonic> = Instant::now();
+
+ // Unlike the C version, we always call `might_sleep()` unconditionally,
+ // as conditional calls are error-prone. We clearly separate
+ // `read_poll_timeout()` and `read_poll_timeout_atomic()` to aid
+ // tools like klint.
+ might_sleep();
+
+ loop {
+ let val = op()?;
+ if cond(&val) {
+ // Unlike the C version, we immediately return.
+ // We know the condition is met so we don't need to check again.
+ return Ok(val);
+ }
+
+ if start.elapsed() > timeout_delta {
+ // Unlike the C version, we immediately return.
+ // We have just called `op()` so we don't need to call it again.
+ return Err(ETIMEDOUT);
+ }
+
+ if !sleep_delta.is_zero() {
+ fsleep(sleep_delta);
+ }
+
+ // `fsleep()` could be a busy-wait loop so we always call `cpu_relax()`.
+ cpu_relax();
+ }
+}
diff --git a/rust/kernel/io/resource.rs b/rust/kernel/io/resource.rs
new file mode 100644
index 000000000000..bea3ee0ed87b
--- /dev/null
+++ b/rust/kernel/io/resource.rs
@@ -0,0 +1,230 @@
+// SPDX-License-Identifier: GPL-2.0
+
+//! Abstractions for [system
+//! resources](https://docs.kernel.org/core-api/kernel-api.html#resources-management).
+//!
+//! C header: [`include/linux/ioport.h`](srctree/include/linux/ioport.h)
+
+use core::ops::Deref;
+use core::ptr::NonNull;
+
+use crate::prelude::*;
+use crate::str::{CStr, CString};
+use crate::types::Opaque;
+
+/// Resource Size type.
+///
+/// This is a type alias to either `u32` or `u64` depending on the config option
+/// `CONFIG_PHYS_ADDR_T_64BIT`, and it can be a u64 even on 32-bit architectures.
+pub type ResourceSize = bindings::phys_addr_t;
+
+/// A region allocated from a parent [`Resource`].
+///
+/// # Invariants
+///
+/// - `self.0` points to a valid `bindings::resource` that was obtained through
+/// `bindings::__request_region`.
+pub struct Region {
+ /// The resource returned when the region was requested.
+ resource: NonNull<bindings::resource>,
+ /// The name that was passed in when the region was requested. We need to
+ /// store it for ownership reasons.
+ _name: CString,
+}
+
+impl Deref for Region {
+ type Target = Resource;
+
+ fn deref(&self) -> &Self::Target {
+ // SAFETY: Safe as per the invariant of `Region`.
+ unsafe { Resource::from_raw(self.resource.as_ptr()) }
+ }
+}
+
+impl Drop for Region {
+ fn drop(&mut self) {
+ let (flags, start, size) = {
+ let res = &**self;
+ (res.flags(), res.start(), res.size())
+ };
+
+ let release_fn = if flags.contains(Flags::IORESOURCE_MEM) {
+ bindings::release_mem_region
+ } else {
+ bindings::release_region
+ };
+
+ // SAFETY: Safe as per the invariant of `Region`.
+ unsafe { release_fn(start, size) };
+ }
+}
+
+// SAFETY: `Region` only holds a pointer to a C `struct resource`, which is safe to be used from
+// any thread.
+unsafe impl Send for Region {}
+
+// SAFETY: `Region` only holds a pointer to a C `struct resource`, references to which are
+// safe to be used from any thread.
+unsafe impl Sync for Region {}
+
+/// A resource abstraction.
+///
+/// # Invariants
+///
+/// [`Resource`] is a transparent wrapper around a valid `bindings::resource`.
+#[repr(transparent)]
+pub struct Resource(Opaque<bindings::resource>);
+
+impl Resource {
+ /// Creates a reference to a [`Resource`] from a valid pointer.
+ ///
+ /// # Safety
+ ///
+ /// The caller must ensure that for the duration of 'a, the pointer will
+ /// point at a valid `bindings::resource`.
+ ///
+ /// The caller must also ensure that the [`Resource`] is only accessed via the
+ /// returned reference for the duration of 'a.
+ pub(crate) const unsafe fn from_raw<'a>(ptr: *mut bindings::resource) -> &'a Self {
+ // SAFETY: Self is a transparent wrapper around `Opaque<bindings::resource>`.
+ unsafe { &*ptr.cast() }
+ }
+
+ /// Requests a resource region.
+ ///
+ /// Exclusive access will be given and the region will be marked as busy.
+ /// Further calls to [`Self::request_region`] will return [`None`] if
+ /// the region, or a part of it, is already in use.
+ pub fn request_region(
+ &self,
+ start: ResourceSize,
+ size: ResourceSize,
+ name: CString,
+ flags: Flags,
+ ) -> Option<Region> {
+ // SAFETY:
+ // - Safe as per the invariant of `Resource`.
+ // - `__request_region` will store a reference to the name, but that is
+ // safe as we own it and it will not be dropped until the `Region` is
+ // dropped.
+ let region = unsafe {
+ bindings::__request_region(
+ self.0.get(),
+ start,
+ size,
+ name.as_char_ptr(),
+ flags.0 as c_int,
+ )
+ };
+
+ Some(Region {
+ resource: NonNull::new(region)?,
+ _name: name,
+ })
+ }
+
+ /// Returns the size of the resource.
+ pub fn size(&self) -> ResourceSize {
+ let inner = self.0.get();
+ // SAFETY: Safe as per the invariants of `Resource`.
+ unsafe { bindings::resource_size(inner) }
+ }
+
+ /// Returns the start address of the resource.
+ pub fn start(&self) -> ResourceSize {
+ let inner = self.0.get();
+ // SAFETY: Safe as per the invariants of `Resource`.
+ unsafe { (*inner).start }
+ }
+
+ /// Returns the name of the resource.
+ pub fn name(&self) -> Option<&CStr> {
+ let inner = self.0.get();
+
+ // SAFETY: Safe as per the invariants of `Resource`.
+ let name = unsafe { (*inner).name };
+
+ if name.is_null() {
+ return None;
+ }
+
+ // SAFETY: In the C code, `resource::name` either contains a null
+ // pointer or points to a valid NUL-terminated C string, and at this
+ // point we know it is not null, so we can safely convert it to a
+ // `CStr`.
+ Some(unsafe { CStr::from_char_ptr(name) })
+ }
+
+ /// Returns the flags associated with the resource.
+ pub fn flags(&self) -> Flags {
+ let inner = self.0.get();
+ // SAFETY: Safe as per the invariants of `Resource`.
+ let flags = unsafe { (*inner).flags };
+
+ Flags(flags)
+ }
+}
+
+// SAFETY: `Resource` only holds a pointer to a C `struct resource`, which is
+// safe to be used from any thread.
+unsafe impl Send for Resource {}
+
+// SAFETY: `Resource` only holds a pointer to a C `struct resource`, references
+// to which are safe to be used from any thread.
+unsafe impl Sync for Resource {}
+
+/// Resource flags as stored in the C `struct resource::flags` field.
+///
+/// They can be combined with the operators `|`, `&`, and `!`.
+///
+/// Values can be used from the associated constants such as
+/// [`Flags::IORESOURCE_IO`].
+#[derive(Clone, Copy, PartialEq)]
+pub struct Flags(c_ulong);
+
+impl Flags {
+ /// Check whether `flags` is contained in `self`.
+ pub fn contains(self, flags: Flags) -> bool {
+ (self & flags) == flags
+ }
+}
+
+impl core::ops::BitOr for Flags {
+ type Output = Self;
+ fn bitor(self, rhs: Self) -> Self::Output {
+ Self(self.0 | rhs.0)
+ }
+}
+
+impl core::ops::BitAnd for Flags {
+ type Output = Self;
+ fn bitand(self, rhs: Self) -> Self::Output {
+ Self(self.0 & rhs.0)
+ }
+}
+
+impl core::ops::Not for Flags {
+ type Output = Self;
+ fn not(self) -> Self::Output {
+ Self(!self.0)
+ }
+}
+
+impl Flags {
+ /// PCI/ISA I/O ports.
+ pub const IORESOURCE_IO: Flags = Flags::new(bindings::IORESOURCE_IO);
+
+ /// Resource is software muxed.
+ pub const IORESOURCE_MUXED: Flags = Flags::new(bindings::IORESOURCE_MUXED);
+
+ /// Resource represents a memory region.
+ pub const IORESOURCE_MEM: Flags = Flags::new(bindings::IORESOURCE_MEM);
+
+ /// Resource represents a memory region that must be ioremaped using `ioremap_np`.
+ pub const IORESOURCE_MEM_NONPOSTED: Flags = Flags::new(bindings::IORESOURCE_MEM_NONPOSTED);
+
+ const fn new(value: u32) -> Self {
+ crate::build_assert!(value as u64 <= c_ulong::MAX as u64);
+ Flags(value as c_ulong)
+ }
+}
diff --git a/rust/kernel/iov.rs b/rust/kernel/iov.rs
new file mode 100644
index 000000000000..43bae8923c46
--- /dev/null
+++ b/rust/kernel/iov.rs
@@ -0,0 +1,314 @@
+// SPDX-License-Identifier: GPL-2.0
+
+// Copyright (C) 2025 Google LLC.
+
+//! IO vectors.
+//!
+//! C headers: [`include/linux/iov_iter.h`](srctree/include/linux/iov_iter.h),
+//! [`include/linux/uio.h`](srctree/include/linux/uio.h)
+
+use crate::{
+ alloc::{Allocator, Flags},
+ bindings,
+ prelude::*,
+ types::Opaque,
+};
+use core::{marker::PhantomData, mem::MaybeUninit, ptr, slice};
+
+const ITER_SOURCE: bool = bindings::ITER_SOURCE != 0;
+const ITER_DEST: bool = bindings::ITER_DEST != 0;
+
+// Compile-time assertion for the above constants.
+const _: () = {
+ build_assert!(
+ ITER_SOURCE != ITER_DEST,
+ "ITER_DEST and ITER_SOURCE should be different."
+ );
+};
+
+/// An IO vector that acts as a source of data.
+///
+/// The data may come from many different sources. This includes both things in kernel-space and
+/// reading from userspace. It's not necessarily the case that the data source is immutable, so
+/// rewinding the IO vector to read the same data twice is not guaranteed to result in the same
+/// bytes. It's also possible that the data source is mapped in a thread-local manner using e.g.
+/// `kmap_local_page()`, so this type is not `Send` to ensure that the mapping is read from the
+/// right context in that scenario.
+///
+/// # Invariants
+///
+/// Must hold a valid `struct iov_iter` with `data_source` set to `ITER_SOURCE`. For the duration
+/// of `'data`, it must be safe to read from this IO vector using the standard C methods for this
+/// purpose.
+#[repr(transparent)]
+pub struct IovIterSource<'data> {
+ iov: Opaque<bindings::iov_iter>,
+ /// Represent to the type system that this value contains a pointer to readable data it does
+ /// not own.
+ _source: PhantomData<&'data [u8]>,
+}
+
+impl<'data> IovIterSource<'data> {
+ /// Obtain an `IovIterSource` from a raw pointer.
+ ///
+ /// # Safety
+ ///
+ /// * The referenced `struct iov_iter` must be valid and must only be accessed through the
+ /// returned reference for the duration of `'iov`.
+ /// * The referenced `struct iov_iter` must have `data_source` set to `ITER_SOURCE`.
+ /// * For the duration of `'data`, it must be safe to read from this IO vector using the
+ /// standard C methods for this purpose.
+ #[track_caller]
+ #[inline]
+ pub unsafe fn from_raw<'iov>(ptr: *mut bindings::iov_iter) -> &'iov mut IovIterSource<'data> {
+ // SAFETY: The caller ensures that `ptr` is valid.
+ let data_source = unsafe { (*ptr).data_source };
+ assert_eq!(data_source, ITER_SOURCE);
+
+ // SAFETY: The caller ensures the type invariants for the right durations, and
+ // `IovIterSource` is layout compatible with `struct iov_iter`.
+ unsafe { &mut *ptr.cast::<IovIterSource<'data>>() }
+ }
+
+ /// Access this as a raw `struct iov_iter`.
+ #[inline]
+ pub fn as_raw(&mut self) -> *mut bindings::iov_iter {
+ self.iov.get()
+ }
+
+ /// Returns the number of bytes available in this IO vector.
+ ///
+ /// Note that this may overestimate the number of bytes. For example, reading from userspace
+ /// memory could fail with `EFAULT`, which will be treated as the end of the IO vector.
+ #[inline]
+ pub fn len(&self) -> usize {
+ // SAFETY: We have shared access to this IO vector, so we can read its `count` field.
+ unsafe {
+ (*self.iov.get())
+ .__bindgen_anon_1
+ .__bindgen_anon_1
+ .as_ref()
+ .count
+ }
+ }
+
+ /// Returns whether there are any bytes left in this IO vector.
+ ///
+ /// This may return `true` even if there are no more bytes available. For example, reading from
+ /// userspace memory could fail with `EFAULT`, which will be treated as the end of the IO vector.
+ #[inline]
+ pub fn is_empty(&self) -> bool {
+ self.len() == 0
+ }
+
+ /// Advance this IO vector by `bytes` bytes.
+ ///
+ /// If `bytes` is larger than the size of this IO vector, it is advanced to the end.
+ #[inline]
+ pub fn advance(&mut self, bytes: usize) {
+ // SAFETY: By the type invariants, `self.iov` is a valid IO vector.
+ unsafe { bindings::iov_iter_advance(self.as_raw(), bytes) };
+ }
+
+ /// Advance this IO vector backwards by `bytes` bytes.
+ ///
+ /// # Safety
+ ///
+ /// The IO vector must not be reverted to before its beginning.
+ #[inline]
+ pub unsafe fn revert(&mut self, bytes: usize) {
+ // SAFETY: By the type invariants, `self.iov` is a valid IO vector, and the caller
+ // ensures that `bytes` is in bounds.
+ unsafe { bindings::iov_iter_revert(self.as_raw(), bytes) };
+ }
+
+ /// Read data from this IO vector.
+ ///
+ /// Returns the number of bytes that have been copied.
+ #[inline]
+ pub fn copy_from_iter(&mut self, out: &mut [u8]) -> usize {
+ // SAFETY: `Self::copy_from_iter_raw` guarantees that it will not write any uninitialized
+ // bytes in the provided buffer, so `out` is still a valid `u8` slice after this call.
+ let out = unsafe { &mut *(ptr::from_mut(out) as *mut [MaybeUninit<u8>]) };
+
+ self.copy_from_iter_raw(out).len()
+ }
+
+ /// Read data from this IO vector and append it to a vector.
+ ///
+ /// Returns the number of bytes that have been copied.
+ #[inline]
+ pub fn copy_from_iter_vec<A: Allocator>(
+ &mut self,
+ out: &mut Vec<u8, A>,
+ flags: Flags,
+ ) -> Result<usize> {
+ out.reserve(self.len(), flags)?;
+ let len = self.copy_from_iter_raw(out.spare_capacity_mut()).len();
+ // SAFETY:
+ // - `len` is the length of a subslice of the spare capacity, so `len` is at most the
+ // length of the spare capacity.
+ // - `Self::copy_from_iter_raw` guarantees that the first `len` bytes of the spare capacity
+ // have been initialized.
+ unsafe { out.inc_len(len) };
+ Ok(len)
+ }
+
+ /// Read data from this IO vector into potentially uninitialized memory.
+ ///
+ /// Returns the sub-slice of the output that has been initialized. If the returned slice is
+ /// shorter than the input buffer, then the entire IO vector has been read.
+ ///
+ /// This will never write uninitialized bytes to the provided buffer.
+ #[inline]
+ pub fn copy_from_iter_raw(&mut self, out: &mut [MaybeUninit<u8>]) -> &mut [u8] {
+ let capacity = out.len();
+ let out = out.as_mut_ptr().cast::<u8>();
+
+ // GUARANTEES: The C API guarantees that it does not write uninitialized bytes to the
+ // provided buffer.
+ // SAFETY:
+ // * By the type invariants, it is still valid to read from this IO vector.
+ // * `out` is valid for writing for `capacity` bytes because it comes from a slice of
+ // that length.
+ let len = unsafe { bindings::_copy_from_iter(out.cast(), capacity, self.as_raw()) };
+
+ // SAFETY: The underlying C api guarantees that initialized bytes have been written to the
+ // first `len` bytes of the spare capacity.
+ unsafe { slice::from_raw_parts_mut(out, len) }
+ }
+}
+
+/// An IO vector that acts as a destination for data.
+///
+/// IO vectors support many different types of destinations. This includes both buffers in
+/// kernel-space and writing to userspace. It's possible that the destination buffer is mapped in a
+/// thread-local manner using e.g. `kmap_local_page()`, so this type is not `Send` to ensure that
+/// the mapping is written to the right context in that scenario.
+///
+/// # Invariants
+///
+/// Must hold a valid `struct iov_iter` with `data_source` set to `ITER_DEST`. For the duration of
+/// `'data`, it must be safe to write to this IO vector using the standard C methods for this
+/// purpose.
+#[repr(transparent)]
+pub struct IovIterDest<'data> {
+ iov: Opaque<bindings::iov_iter>,
+ /// Represent to the type system that this value contains a pointer to writable data it does
+ /// not own.
+ _source: PhantomData<&'data mut [u8]>,
+}
+
+impl<'data> IovIterDest<'data> {
+ /// Obtain an `IovIterDest` from a raw pointer.
+ ///
+ /// # Safety
+ ///
+ /// * The referenced `struct iov_iter` must be valid and must only be accessed through the
+ /// returned reference for the duration of `'iov`.
+ /// * The referenced `struct iov_iter` must have `data_source` set to `ITER_DEST`.
+ /// * For the duration of `'data`, it must be safe to write to this IO vector using the
+ /// standard C methods for this purpose.
+ #[track_caller]
+ #[inline]
+ pub unsafe fn from_raw<'iov>(ptr: *mut bindings::iov_iter) -> &'iov mut IovIterDest<'data> {
+ // SAFETY: The caller ensures that `ptr` is valid.
+ let data_source = unsafe { (*ptr).data_source };
+ assert_eq!(data_source, ITER_DEST);
+
+ // SAFETY: The caller ensures the type invariants for the right durations, and
+ // `IovIterSource` is layout compatible with `struct iov_iter`.
+ unsafe { &mut *ptr.cast::<IovIterDest<'data>>() }
+ }
+
+ /// Access this as a raw `struct iov_iter`.
+ #[inline]
+ pub fn as_raw(&mut self) -> *mut bindings::iov_iter {
+ self.iov.get()
+ }
+
+ /// Returns the number of bytes available in this IO vector.
+ ///
+ /// Note that this may overestimate the number of bytes. For example, reading from userspace
+ /// memory could fail with EFAULT, which will be treated as the end of the IO vector.
+ #[inline]
+ pub fn len(&self) -> usize {
+ // SAFETY: We have shared access to this IO vector, so we can read its `count` field.
+ unsafe {
+ (*self.iov.get())
+ .__bindgen_anon_1
+ .__bindgen_anon_1
+ .as_ref()
+ .count
+ }
+ }
+
+ /// Returns whether there are any bytes left in this IO vector.
+ ///
+ /// This may return `true` even if there are no more bytes available. For example, reading from
+ /// userspace memory could fail with EFAULT, which will be treated as the end of the IO vector.
+ #[inline]
+ pub fn is_empty(&self) -> bool {
+ self.len() == 0
+ }
+
+ /// Advance this IO vector by `bytes` bytes.
+ ///
+ /// If `bytes` is larger than the size of this IO vector, it is advanced to the end.
+ #[inline]
+ pub fn advance(&mut self, bytes: usize) {
+ // SAFETY: By the type invariants, `self.iov` is a valid IO vector.
+ unsafe { bindings::iov_iter_advance(self.as_raw(), bytes) };
+ }
+
+ /// Advance this IO vector backwards by `bytes` bytes.
+ ///
+ /// # Safety
+ ///
+ /// The IO vector must not be reverted to before its beginning.
+ #[inline]
+ pub unsafe fn revert(&mut self, bytes: usize) {
+ // SAFETY: By the type invariants, `self.iov` is a valid IO vector, and the caller
+ // ensures that `bytes` is in bounds.
+ unsafe { bindings::iov_iter_revert(self.as_raw(), bytes) };
+ }
+
+ /// Write data to this IO vector.
+ ///
+ /// Returns the number of bytes that were written. If this is shorter than the provided slice,
+ /// then no more bytes can be written.
+ #[inline]
+ pub fn copy_to_iter(&mut self, input: &[u8]) -> usize {
+ // SAFETY:
+ // * By the type invariants, it is still valid to write to this IO vector.
+ // * `input` is valid for `input.len()` bytes.
+ unsafe { bindings::_copy_to_iter(input.as_ptr().cast(), input.len(), self.as_raw()) }
+ }
+
+ /// Utility for implementing `read_iter` given the full contents of the file.
+ ///
+ /// The full contents of the file being read from is represented by `contents`. This call will
+ /// write the appropriate sub-slice of `contents` and update the file position in `ppos` so
+ /// that the file will appear to contain `contents` even if takes multiple reads to read the
+ /// entire file.
+ #[inline]
+ pub fn simple_read_from_buffer(&mut self, ppos: &mut i64, contents: &[u8]) -> Result<usize> {
+ if *ppos < 0 {
+ return Err(EINVAL);
+ }
+ let Ok(pos) = usize::try_from(*ppos) else {
+ return Ok(0);
+ };
+ if pos >= contents.len() {
+ return Ok(0);
+ }
+
+ // BOUNDS: We just checked that `pos < contents.len()` above.
+ let num_written = self.copy_to_iter(&contents[pos..]);
+
+ // OVERFLOW: `pos+num_written <= contents.len() <= isize::MAX <= i64::MAX`.
+ *ppos = (pos + num_written) as i64;
+
+ Ok(num_written)
+ }
+}
diff --git a/rust/kernel/irq.rs b/rust/kernel/irq.rs
new file mode 100644
index 000000000000..20abd4056655
--- /dev/null
+++ b/rust/kernel/irq.rs
@@ -0,0 +1,24 @@
+// SPDX-License-Identifier: GPL-2.0
+
+//! IRQ abstractions.
+//!
+//! An IRQ is an interrupt request from a device. It is used to get the CPU's
+//! attention so it can service a hardware event in a timely manner.
+//!
+//! The current abstractions handle IRQ requests and handlers, i.e.: it allows
+//! drivers to register a handler for a given IRQ line.
+//!
+//! C header: [`include/linux/device.h`](srctree/include/linux/interrupt.h)
+
+/// Flags to be used when registering IRQ handlers.
+mod flags;
+
+/// IRQ allocation and handling.
+mod request;
+
+pub use flags::Flags;
+
+pub use request::{
+ Handler, IrqRequest, IrqReturn, Registration, ThreadedHandler, ThreadedIrqReturn,
+ ThreadedRegistration,
+};
diff --git a/rust/kernel/irq/flags.rs b/rust/kernel/irq/flags.rs
new file mode 100644
index 000000000000..adfde96ec47c
--- /dev/null
+++ b/rust/kernel/irq/flags.rs
@@ -0,0 +1,124 @@
+// SPDX-License-Identifier: GPL-2.0
+// SPDX-FileCopyrightText: Copyright 2025 Collabora ltd.
+
+use crate::bindings;
+use crate::prelude::*;
+
+/// Flags to be used when registering IRQ handlers.
+///
+/// Flags can be used to request specific behaviors when registering an IRQ
+/// handler, and can be combined using the `|`, `&`, and `!` operators to
+/// further control the system's behavior.
+///
+/// A common use case is to register a shared interrupt, as sharing the line
+/// between devices is increasingly common in modern systems and is even
+/// required for some buses. This requires setting [`Flags::SHARED`] when
+/// requesting the interrupt. Other use cases include setting the trigger type
+/// through `Flags::TRIGGER_*`, which determines when the interrupt fires, or
+/// controlling whether the interrupt is masked after the handler runs by using
+/// [`Flags::ONESHOT`].
+///
+/// If an invalid combination of flags is provided, the system will refuse to
+/// register the handler, and lower layers will enforce certain flags when
+/// necessary. This means, for example, that all the
+/// [`crate::irq::Registration`] for a shared interrupt have to agree on
+/// [`Flags::SHARED`] and on the same trigger type, if set.
+#[derive(Clone, Copy, PartialEq, Eq)]
+pub struct Flags(c_ulong);
+
+impl Flags {
+ /// Use the interrupt line as already configured.
+ pub const TRIGGER_NONE: Flags = Flags::new(bindings::IRQF_TRIGGER_NONE);
+
+ /// The interrupt is triggered when the signal goes from low to high.
+ pub const TRIGGER_RISING: Flags = Flags::new(bindings::IRQF_TRIGGER_RISING);
+
+ /// The interrupt is triggered when the signal goes from high to low.
+ pub const TRIGGER_FALLING: Flags = Flags::new(bindings::IRQF_TRIGGER_FALLING);
+
+ /// The interrupt is triggered while the signal is held high.
+ pub const TRIGGER_HIGH: Flags = Flags::new(bindings::IRQF_TRIGGER_HIGH);
+
+ /// The interrupt is triggered while the signal is held low.
+ pub const TRIGGER_LOW: Flags = Flags::new(bindings::IRQF_TRIGGER_LOW);
+
+ /// Allow sharing the IRQ among several devices.
+ pub const SHARED: Flags = Flags::new(bindings::IRQF_SHARED);
+
+ /// Set by callers when they expect sharing mismatches to occur.
+ pub const PROBE_SHARED: Flags = Flags::new(bindings::IRQF_PROBE_SHARED);
+
+ /// Flag to mark this interrupt as timer interrupt.
+ pub const TIMER: Flags = Flags::new(bindings::IRQF_TIMER);
+
+ /// Interrupt is per CPU.
+ pub const PERCPU: Flags = Flags::new(bindings::IRQF_PERCPU);
+
+ /// Flag to exclude this interrupt from irq balancing.
+ pub const NOBALANCING: Flags = Flags::new(bindings::IRQF_NOBALANCING);
+
+ /// Interrupt is used for polling (only the interrupt that is registered
+ /// first in a shared interrupt is considered for performance reasons).
+ pub const IRQPOLL: Flags = Flags::new(bindings::IRQF_IRQPOLL);
+
+ /// Interrupt is not re-enabled after the hardirq handler finished. Used by
+ /// threaded interrupts which need to keep the irq line disabled until the
+ /// threaded handler has been run.
+ pub const ONESHOT: Flags = Flags::new(bindings::IRQF_ONESHOT);
+
+ /// Do not disable this IRQ during suspend. Does not guarantee that this
+ /// interrupt will wake the system from a suspended state.
+ pub const NO_SUSPEND: Flags = Flags::new(bindings::IRQF_NO_SUSPEND);
+
+ /// Force enable it on resume even if [`Flags::NO_SUSPEND`] is set.
+ pub const FORCE_RESUME: Flags = Flags::new(bindings::IRQF_FORCE_RESUME);
+
+ /// Interrupt cannot be threaded.
+ pub const NO_THREAD: Flags = Flags::new(bindings::IRQF_NO_THREAD);
+
+ /// Resume IRQ early during syscore instead of at device resume time.
+ pub const EARLY_RESUME: Flags = Flags::new(bindings::IRQF_EARLY_RESUME);
+
+ /// If the IRQ is shared with a [`Flags::NO_SUSPEND`] user, execute this
+ /// interrupt handler after suspending interrupts. For system wakeup devices
+ /// users need to implement wakeup detection in their interrupt handlers.
+ pub const COND_SUSPEND: Flags = Flags::new(bindings::IRQF_COND_SUSPEND);
+
+ /// Don't enable IRQ or NMI automatically when users request it. Users will
+ /// enable it explicitly by `enable_irq` or `enable_nmi` later.
+ pub const NO_AUTOEN: Flags = Flags::new(bindings::IRQF_NO_AUTOEN);
+
+ /// Exclude from runnaway detection for IPI and similar handlers, depends on
+ /// `PERCPU`.
+ pub const NO_DEBUG: Flags = Flags::new(bindings::IRQF_NO_DEBUG);
+
+ pub(crate) fn into_inner(self) -> c_ulong {
+ self.0
+ }
+
+ const fn new(value: u32) -> Self {
+ build_assert!(value as u64 <= c_ulong::MAX as u64);
+ Self(value as c_ulong)
+ }
+}
+
+impl core::ops::BitOr for Flags {
+ type Output = Self;
+ fn bitor(self, rhs: Self) -> Self::Output {
+ Self(self.0 | rhs.0)
+ }
+}
+
+impl core::ops::BitAnd for Flags {
+ type Output = Self;
+ fn bitand(self, rhs: Self) -> Self::Output {
+ Self(self.0 & rhs.0)
+ }
+}
+
+impl core::ops::Not for Flags {
+ type Output = Self;
+ fn not(self) -> Self::Output {
+ Self(!self.0)
+ }
+}
diff --git a/rust/kernel/irq/request.rs b/rust/kernel/irq/request.rs
new file mode 100644
index 000000000000..b150563fdef8
--- /dev/null
+++ b/rust/kernel/irq/request.rs
@@ -0,0 +1,507 @@
+// SPDX-License-Identifier: GPL-2.0
+// SPDX-FileCopyrightText: Copyright 2025 Collabora ltd.
+
+//! This module provides types like [`Registration`] and
+//! [`ThreadedRegistration`], which allow users to register handlers for a given
+//! IRQ line.
+
+use core::marker::PhantomPinned;
+
+use crate::alloc::Allocator;
+use crate::device::{Bound, Device};
+use crate::devres::Devres;
+use crate::error::to_result;
+use crate::irq::flags::Flags;
+use crate::prelude::*;
+use crate::str::CStr;
+use crate::sync::Arc;
+
+/// The value that can be returned from a [`Handler`] or a [`ThreadedHandler`].
+#[repr(u32)]
+pub enum IrqReturn {
+ /// The interrupt was not from this device or was not handled.
+ None = bindings::irqreturn_IRQ_NONE,
+
+ /// The interrupt was handled by this device.
+ Handled = bindings::irqreturn_IRQ_HANDLED,
+}
+
+/// Callbacks for an IRQ handler.
+pub trait Handler: Sync {
+ /// The hard IRQ handler.
+ ///
+ /// This is executed in interrupt context, hence all corresponding
+ /// limitations do apply.
+ ///
+ /// All work that does not necessarily need to be executed from
+ /// interrupt context, should be deferred to a threaded handler.
+ /// See also [`ThreadedRegistration`].
+ fn handle(&self, device: &Device<Bound>) -> IrqReturn;
+}
+
+impl<T: ?Sized + Handler + Send> Handler for Arc<T> {
+ fn handle(&self, device: &Device<Bound>) -> IrqReturn {
+ T::handle(self, device)
+ }
+}
+
+impl<T: ?Sized + Handler, A: Allocator> Handler for Box<T, A> {
+ fn handle(&self, device: &Device<Bound>) -> IrqReturn {
+ T::handle(self, device)
+ }
+}
+
+/// # Invariants
+///
+/// - `self.irq` is the same as the one passed to `request_{threaded}_irq`.
+/// - `cookie` was passed to `request_{threaded}_irq` as the cookie. It is guaranteed to be unique
+/// by the type system, since each call to `new` will return a different instance of
+/// `Registration`.
+#[pin_data(PinnedDrop)]
+struct RegistrationInner {
+ irq: u32,
+ cookie: *mut c_void,
+}
+
+impl RegistrationInner {
+ fn synchronize(&self) {
+ // SAFETY: safe as per the invariants of `RegistrationInner`
+ unsafe { bindings::synchronize_irq(self.irq) };
+ }
+}
+
+#[pinned_drop]
+impl PinnedDrop for RegistrationInner {
+ fn drop(self: Pin<&mut Self>) {
+ // SAFETY:
+ //
+ // Safe as per the invariants of `RegistrationInner` and:
+ //
+ // - The containing struct is `!Unpin` and was initialized using
+ // pin-init, so it occupied the same memory location for the entirety of
+ // its lifetime.
+ //
+ // Notice that this will block until all handlers finish executing,
+ // i.e.: at no point will &self be invalid while the handler is running.
+ unsafe { bindings::free_irq(self.irq, self.cookie) };
+ }
+}
+
+// SAFETY: We only use `inner` on drop, which called at most once with no
+// concurrent access.
+unsafe impl Sync for RegistrationInner {}
+
+// SAFETY: It is safe to send `RegistrationInner` across threads.
+unsafe impl Send for RegistrationInner {}
+
+/// A request for an IRQ line for a given device.
+///
+/// # Invariants
+///
+/// - `ìrq` is the number of an interrupt source of `dev`.
+/// - `irq` has not been registered yet.
+pub struct IrqRequest<'a> {
+ dev: &'a Device<Bound>,
+ irq: u32,
+}
+
+impl<'a> IrqRequest<'a> {
+ /// Creates a new IRQ request for the given device and IRQ number.
+ ///
+ /// # Safety
+ ///
+ /// - `irq` should be a valid IRQ number for `dev`.
+ pub(crate) unsafe fn new(dev: &'a Device<Bound>, irq: u32) -> Self {
+ // INVARIANT: `irq` is a valid IRQ number for `dev`.
+ IrqRequest { dev, irq }
+ }
+
+ /// Returns the IRQ number of an [`IrqRequest`].
+ pub fn irq(&self) -> u32 {
+ self.irq
+ }
+}
+
+/// A registration of an IRQ handler for a given IRQ line.
+///
+/// # Examples
+///
+/// The following is an example of using `Registration`. It uses a
+/// [`Completion`] to coordinate between the IRQ
+/// handler and process context. [`Completion`] uses interior mutability, so the
+/// handler can signal with [`Completion::complete_all()`] and the process
+/// context can wait with [`Completion::wait_for_completion()`] even though
+/// there is no way to get a mutable reference to the any of the fields in
+/// `Data`.
+///
+/// [`Completion`]: kernel::sync::Completion
+/// [`Completion::complete_all()`]: kernel::sync::Completion::complete_all
+/// [`Completion::wait_for_completion()`]: kernel::sync::Completion::wait_for_completion
+///
+/// ```
+/// use kernel::c_str;
+/// use kernel::device::{Bound, Device};
+/// use kernel::irq::{self, Flags, IrqRequest, IrqReturn, Registration};
+/// use kernel::prelude::*;
+/// use kernel::sync::{Arc, Completion};
+///
+/// // Data shared between process and IRQ context.
+/// #[pin_data]
+/// struct Data {
+/// #[pin]
+/// completion: Completion,
+/// }
+///
+/// impl irq::Handler for Data {
+/// // Executed in IRQ context.
+/// fn handle(&self, _dev: &Device<Bound>) -> IrqReturn {
+/// self.completion.complete_all();
+/// IrqReturn::Handled
+/// }
+/// }
+///
+/// // Registers an IRQ handler for the given IrqRequest.
+/// //
+/// // This runs in process context and assumes `request` was previously acquired from a device.
+/// fn register_irq(
+/// handler: impl PinInit<Data, Error>,
+/// request: IrqRequest<'_>,
+/// ) -> Result<Arc<Registration<Data>>> {
+/// let registration = Registration::new(request, Flags::SHARED, c_str!("my_device"), handler);
+///
+/// let registration = Arc::pin_init(registration, GFP_KERNEL)?;
+///
+/// registration.handler().completion.wait_for_completion();
+///
+/// Ok(registration)
+/// }
+/// # Ok::<(), Error>(())
+/// ```
+///
+/// # Invariants
+///
+/// * We own an irq handler whose cookie is a pointer to `Self`.
+#[pin_data]
+pub struct Registration<T: Handler + 'static> {
+ #[pin]
+ inner: Devres<RegistrationInner>,
+
+ #[pin]
+ handler: T,
+
+ /// Pinned because we need address stability so that we can pass a pointer
+ /// to the callback.
+ #[pin]
+ _pin: PhantomPinned,
+}
+
+impl<T: Handler + 'static> Registration<T> {
+ /// Registers the IRQ handler with the system for the given IRQ number.
+ pub fn new<'a>(
+ request: IrqRequest<'a>,
+ flags: Flags,
+ name: &'static CStr,
+ handler: impl PinInit<T, Error> + 'a,
+ ) -> impl PinInit<Self, Error> + 'a {
+ try_pin_init!(&this in Self {
+ handler <- handler,
+ inner <- Devres::new(
+ request.dev,
+ try_pin_init!(RegistrationInner {
+ // INVARIANT: `this` is a valid pointer to the `Registration` instance
+ cookie: this.as_ptr().cast::<c_void>(),
+ irq: {
+ // SAFETY:
+ // - The callbacks are valid for use with request_irq.
+ // - If this succeeds, the slot is guaranteed to be valid until the
+ // destructor of Self runs, which will deregister the callbacks
+ // before the memory location becomes invalid.
+ // - When request_irq is called, everything that handle_irq_callback will
+ // touch has already been initialized, so it's safe for the callback to
+ // be called immediately.
+ to_result(unsafe {
+ bindings::request_irq(
+ request.irq,
+ Some(handle_irq_callback::<T>),
+ flags.into_inner(),
+ name.as_char_ptr(),
+ this.as_ptr().cast::<c_void>(),
+ )
+ })?;
+ request.irq
+ }
+ })
+ ),
+ _pin: PhantomPinned,
+ })
+ }
+
+ /// Returns a reference to the handler that was registered with the system.
+ pub fn handler(&self) -> &T {
+ &self.handler
+ }
+
+ /// Wait for pending IRQ handlers on other CPUs.
+ ///
+ /// This will attempt to access the inner [`Devres`] container.
+ pub fn try_synchronize(&self) -> Result {
+ let inner = self.inner.try_access().ok_or(ENODEV)?;
+ inner.synchronize();
+ Ok(())
+ }
+
+ /// Wait for pending IRQ handlers on other CPUs.
+ pub fn synchronize(&self, dev: &Device<Bound>) -> Result {
+ let inner = self.inner.access(dev)?;
+ inner.synchronize();
+ Ok(())
+ }
+}
+
+/// # Safety
+///
+/// This function should be only used as the callback in `request_irq`.
+unsafe extern "C" fn handle_irq_callback<T: Handler>(_irq: i32, ptr: *mut c_void) -> c_uint {
+ // SAFETY: `ptr` is a pointer to `Registration<T>` set in `Registration::new`
+ let registration = unsafe { &*(ptr as *const Registration<T>) };
+ // SAFETY: The irq callback is removed before the device is unbound, so the fact that the irq
+ // callback is running implies that the device has not yet been unbound.
+ let device = unsafe { registration.inner.device().as_bound() };
+
+ T::handle(&registration.handler, device) as c_uint
+}
+
+/// The value that can be returned from [`ThreadedHandler::handle`].
+#[repr(u32)]
+pub enum ThreadedIrqReturn {
+ /// The interrupt was not from this device or was not handled.
+ None = bindings::irqreturn_IRQ_NONE,
+
+ /// The interrupt was handled by this device.
+ Handled = bindings::irqreturn_IRQ_HANDLED,
+
+ /// The handler wants the handler thread to wake up.
+ WakeThread = bindings::irqreturn_IRQ_WAKE_THREAD,
+}
+
+/// Callbacks for a threaded IRQ handler.
+pub trait ThreadedHandler: Sync {
+ /// The hard IRQ handler.
+ ///
+ /// This is executed in interrupt context, hence all corresponding
+ /// limitations do apply. All work that does not necessarily need to be
+ /// executed from interrupt context, should be deferred to the threaded
+ /// handler, i.e. [`ThreadedHandler::handle_threaded`].
+ ///
+ /// The default implementation returns [`ThreadedIrqReturn::WakeThread`].
+ #[expect(unused_variables)]
+ fn handle(&self, device: &Device<Bound>) -> ThreadedIrqReturn {
+ ThreadedIrqReturn::WakeThread
+ }
+
+ /// The threaded IRQ handler.
+ ///
+ /// This is executed in process context. The kernel creates a dedicated
+ /// `kthread` for this purpose.
+ fn handle_threaded(&self, device: &Device<Bound>) -> IrqReturn;
+}
+
+impl<T: ?Sized + ThreadedHandler + Send> ThreadedHandler for Arc<T> {
+ fn handle(&self, device: &Device<Bound>) -> ThreadedIrqReturn {
+ T::handle(self, device)
+ }
+
+ fn handle_threaded(&self, device: &Device<Bound>) -> IrqReturn {
+ T::handle_threaded(self, device)
+ }
+}
+
+impl<T: ?Sized + ThreadedHandler, A: Allocator> ThreadedHandler for Box<T, A> {
+ fn handle(&self, device: &Device<Bound>) -> ThreadedIrqReturn {
+ T::handle(self, device)
+ }
+
+ fn handle_threaded(&self, device: &Device<Bound>) -> IrqReturn {
+ T::handle_threaded(self, device)
+ }
+}
+
+/// A registration of a threaded IRQ handler for a given IRQ line.
+///
+/// Two callbacks are required: one to handle the IRQ, and one to handle any
+/// other work in a separate thread.
+///
+/// The thread handler is only called if the IRQ handler returns
+/// [`ThreadedIrqReturn::WakeThread`].
+///
+/// # Examples
+///
+/// The following is an example of using [`ThreadedRegistration`]. It uses a
+/// [`Mutex`](kernel::sync::Mutex) to provide interior mutability.
+///
+/// ```
+/// use kernel::c_str;
+/// use kernel::device::{Bound, Device};
+/// use kernel::irq::{
+/// self, Flags, IrqRequest, IrqReturn, ThreadedHandler, ThreadedIrqReturn,
+/// ThreadedRegistration,
+/// };
+/// use kernel::prelude::*;
+/// use kernel::sync::{Arc, Mutex};
+///
+/// // Declare a struct that will be passed in when the interrupt fires. The u32
+/// // merely serves as an example of some internal data.
+/// //
+/// // [`irq::ThreadedHandler::handle`] takes `&self`. This example
+/// // illustrates how interior mutability can be used when sharing the data
+/// // between process context and IRQ context.
+/// #[pin_data]
+/// struct Data {
+/// #[pin]
+/// value: Mutex<u32>,
+/// }
+///
+/// impl ThreadedHandler for Data {
+/// // This will run (in a separate kthread) if and only if
+/// // [`ThreadedHandler::handle`] returns [`WakeThread`], which it does by
+/// // default.
+/// fn handle_threaded(&self, _dev: &Device<Bound>) -> IrqReturn {
+/// let mut data = self.value.lock();
+/// *data += 1;
+/// IrqReturn::Handled
+/// }
+/// }
+///
+/// // Registers a threaded IRQ handler for the given [`IrqRequest`].
+/// //
+/// // This is executing in process context and assumes that `request` was
+/// // previously acquired from a device.
+/// fn register_threaded_irq(
+/// handler: impl PinInit<Data, Error>,
+/// request: IrqRequest<'_>,
+/// ) -> Result<Arc<ThreadedRegistration<Data>>> {
+/// let registration =
+/// ThreadedRegistration::new(request, Flags::SHARED, c_str!("my_device"), handler);
+///
+/// let registration = Arc::pin_init(registration, GFP_KERNEL)?;
+///
+/// {
+/// // The data can be accessed from process context too.
+/// let mut data = registration.handler().value.lock();
+/// *data += 1;
+/// }
+///
+/// Ok(registration)
+/// }
+/// # Ok::<(), Error>(())
+/// ```
+///
+/// # Invariants
+///
+/// * We own an irq handler whose cookie is a pointer to `Self`.
+#[pin_data]
+pub struct ThreadedRegistration<T: ThreadedHandler + 'static> {
+ #[pin]
+ inner: Devres<RegistrationInner>,
+
+ #[pin]
+ handler: T,
+
+ /// Pinned because we need address stability so that we can pass a pointer
+ /// to the callback.
+ #[pin]
+ _pin: PhantomPinned,
+}
+
+impl<T: ThreadedHandler + 'static> ThreadedRegistration<T> {
+ /// Registers the IRQ handler with the system for the given IRQ number.
+ pub fn new<'a>(
+ request: IrqRequest<'a>,
+ flags: Flags,
+ name: &'static CStr,
+ handler: impl PinInit<T, Error> + 'a,
+ ) -> impl PinInit<Self, Error> + 'a {
+ try_pin_init!(&this in Self {
+ handler <- handler,
+ inner <- Devres::new(
+ request.dev,
+ try_pin_init!(RegistrationInner {
+ // INVARIANT: `this` is a valid pointer to the `ThreadedRegistration` instance.
+ cookie: this.as_ptr().cast::<c_void>(),
+ irq: {
+ // SAFETY:
+ // - The callbacks are valid for use with request_threaded_irq.
+ // - If this succeeds, the slot is guaranteed to be valid until the
+ // destructor of Self runs, which will deregister the callbacks
+ // before the memory location becomes invalid.
+ // - When request_threaded_irq is called, everything that the two callbacks
+ // will touch has already been initialized, so it's safe for the
+ // callbacks to be called immediately.
+ to_result(unsafe {
+ bindings::request_threaded_irq(
+ request.irq,
+ Some(handle_threaded_irq_callback::<T>),
+ Some(thread_fn_callback::<T>),
+ flags.into_inner(),
+ name.as_char_ptr(),
+ this.as_ptr().cast::<c_void>(),
+ )
+ })?;
+ request.irq
+ }
+ })
+ ),
+ _pin: PhantomPinned,
+ })
+ }
+
+ /// Returns a reference to the handler that was registered with the system.
+ pub fn handler(&self) -> &T {
+ &self.handler
+ }
+
+ /// Wait for pending IRQ handlers on other CPUs.
+ ///
+ /// This will attempt to access the inner [`Devres`] container.
+ pub fn try_synchronize(&self) -> Result {
+ let inner = self.inner.try_access().ok_or(ENODEV)?;
+ inner.synchronize();
+ Ok(())
+ }
+
+ /// Wait for pending IRQ handlers on other CPUs.
+ pub fn synchronize(&self, dev: &Device<Bound>) -> Result {
+ let inner = self.inner.access(dev)?;
+ inner.synchronize();
+ Ok(())
+ }
+}
+
+/// # Safety
+///
+/// This function should be only used as the callback in `request_threaded_irq`.
+unsafe extern "C" fn handle_threaded_irq_callback<T: ThreadedHandler>(
+ _irq: i32,
+ ptr: *mut c_void,
+) -> c_uint {
+ // SAFETY: `ptr` is a pointer to `ThreadedRegistration<T>` set in `ThreadedRegistration::new`
+ let registration = unsafe { &*(ptr as *const ThreadedRegistration<T>) };
+ // SAFETY: The irq callback is removed before the device is unbound, so the fact that the irq
+ // callback is running implies that the device has not yet been unbound.
+ let device = unsafe { registration.inner.device().as_bound() };
+
+ T::handle(&registration.handler, device) as c_uint
+}
+
+/// # Safety
+///
+/// This function should be only used as the callback in `request_threaded_irq`.
+unsafe extern "C" fn thread_fn_callback<T: ThreadedHandler>(_irq: i32, ptr: *mut c_void) -> c_uint {
+ // SAFETY: `ptr` is a pointer to `ThreadedRegistration<T>` set in `ThreadedRegistration::new`
+ let registration = unsafe { &*(ptr as *const ThreadedRegistration<T>) };
+ // SAFETY: The irq callback is removed before the device is unbound, so the fact that the irq
+ // callback is running implies that the device has not yet been unbound.
+ let device = unsafe { registration.inner.device().as_bound() };
+
+ T::handle_threaded(&registration.handler, device) as c_uint
+}
diff --git a/rust/kernel/kunit.rs b/rust/kernel/kunit.rs
index b9e65905e121..79436509dd73 100644
--- a/rust/kernel/kunit.rs
+++ b/rust/kernel/kunit.rs
@@ -6,8 +6,11 @@
//!
//! Reference: <https://docs.kernel.org/dev-tools/kunit/index.html>
+use crate::fmt;
use crate::prelude::*;
-use core::{ffi::c_void, fmt};
+
+#[cfg(CONFIG_PRINTK)]
+use crate::c_str;
/// Prints a KUnit error-level message.
///
@@ -19,8 +22,8 @@ pub fn err(args: fmt::Arguments<'_>) {
#[cfg(CONFIG_PRINTK)]
unsafe {
bindings::_printk(
- c"\x013%pA".as_ptr() as _,
- &args as *const _ as *const c_void,
+ c_str!("\x013%pA").as_char_ptr(),
+ core::ptr::from_ref(&args).cast::<c_void>(),
);
}
}
@@ -35,8 +38,8 @@ pub fn info(args: fmt::Arguments<'_>) {
#[cfg(CONFIG_PRINTK)]
unsafe {
bindings::_printk(
- c"\x016%pA".as_ptr() as _,
- &args as *const _ as *const c_void,
+ c_str!("\x016%pA").as_char_ptr(),
+ core::ptr::from_ref(&args).cast::<c_void>(),
);
}
}
@@ -71,14 +74,14 @@ macro_rules! kunit_assert {
// mistake (it is hidden to prevent that).
//
// This mimics KUnit's failed assertion format.
- $crate::kunit::err(format_args!(
+ $crate::kunit::err($crate::prelude::fmt!(
" # {}: ASSERTION FAILED at {FILE}:{LINE}\n",
$name
));
- $crate::kunit::err(format_args!(
+ $crate::kunit::err($crate::prelude::fmt!(
" Expected {CONDITION} to be true, but is false\n"
));
- $crate::kunit::err(format_args!(
+ $crate::kunit::err($crate::prelude::fmt!(
" Failure not reported to KUnit since this is a non-KUnit task\n"
));
break 'out;
@@ -99,12 +102,12 @@ macro_rules! kunit_assert {
unsafe impl Sync for UnaryAssert {}
static LOCATION: Location = Location($crate::bindings::kunit_loc {
- file: FILE.as_char_ptr(),
+ file: $crate::str::as_char_ptr_in_const_context(FILE),
line: LINE,
});
static ASSERTION: UnaryAssert = UnaryAssert($crate::bindings::kunit_unary_assert {
assert: $crate::bindings::kunit_assert {},
- condition: CONDITION.as_char_ptr(),
+ condition: $crate::str::as_char_ptr_in_const_context(CONDITION),
expected_true: true,
});
@@ -199,7 +202,7 @@ pub const fn kunit_case(
) -> kernel::bindings::kunit_case {
kernel::bindings::kunit_case {
run_case: Some(run_case),
- name: name.as_char_ptr(),
+ name: kernel::str::as_char_ptr_in_const_context(name),
attr: kernel::bindings::kunit_attributes {
speed: kernel::bindings::kunit_speed_KUNIT_SPEED_NORMAL,
},
@@ -207,6 +210,8 @@ pub const fn kunit_case(
status: kernel::bindings::kunit_status_KUNIT_SUCCESS,
module_name: core::ptr::null_mut(),
log: core::ptr::null_mut(),
+ param_init: None,
+ param_exit: None,
}
}
@@ -226,6 +231,8 @@ pub const fn kunit_case_null() -> kernel::bindings::kunit_case {
status: kernel::bindings::kunit_status_KUNIT_SUCCESS,
module_name: core::ptr::null_mut(),
log: core::ptr::null_mut(),
+ param_init: None,
+ param_exit: None,
}
}
@@ -354,4 +361,11 @@ mod tests {
fn rust_test_kunit_in_kunit_test() {
assert!(in_kunit_test());
}
+
+ #[test]
+ #[cfg(not(all()))]
+ fn rust_test_kunit_always_disabled_test() {
+ // This test should never run because of the `cfg`.
+ assert!(false);
+ }
}
diff --git a/rust/kernel/lib.rs b/rust/kernel/lib.rs
index e13d6ed88fa6..3dd7bebe7888 100644
--- a/rust/kernel/lib.rs
+++ b/rust/kernel/lib.rs
@@ -17,7 +17,9 @@
// the unstable features in use.
//
// Stable since Rust 1.79.0.
+#![feature(generic_nonzero)]
#![feature(inline_const)]
+#![feature(pointer_is_aligned)]
//
// Stable since Rust 1.81.0.
#![feature(lint_reasons)]
@@ -28,6 +30,7 @@
// Stable since Rust 1.83.0.
#![feature(const_maybe_uninit_as_mut_ptr)]
#![feature(const_mut_refs)]
+#![feature(const_option)]
#![feature(const_ptr_write)]
#![feature(const_refs_to_cell)]
//
@@ -43,6 +46,10 @@
#![cfg_attr(not(CONFIG_RUSTC_HAS_COERCE_POINTEE), feature(coerce_unsized))]
#![cfg_attr(not(CONFIG_RUSTC_HAS_COERCE_POINTEE), feature(dispatch_from_dyn))]
#![cfg_attr(not(CONFIG_RUSTC_HAS_COERCE_POINTEE), feature(unsize))]
+//
+// `feature(file_with_nul)` is expected to become stable. Before Rust 1.89.0, it did not exist, so
+// enable it conditionally.
+#![cfg_attr(CONFIG_RUSTC_HAS_FILE_WITH_NUL, feature(file_with_nul))]
// Ensure conditional compilation based on the kernel configuration works;
// otherwise we may silently break things like initcall handling.
@@ -54,11 +61,15 @@ extern crate self as kernel;
pub use ffi;
+pub mod acpi;
pub mod alloc;
#[cfg(CONFIG_AUXILIARY_BUS)]
pub mod auxiliary;
+pub mod bitmap;
+pub mod bits;
#[cfg(CONFIG_BLOCK)]
pub mod block;
+pub mod bug;
#[doc(hidden)]
pub mod build_assert;
pub mod clk;
@@ -69,6 +80,7 @@ pub mod cpu;
pub mod cpufreq;
pub mod cpumask;
pub mod cred;
+pub mod debugfs;
pub mod device;
pub mod device_id;
pub mod devres;
@@ -80,14 +92,19 @@ pub mod error;
pub mod faux;
#[cfg(CONFIG_RUST_FW_LOADER_ABSTRACTIONS)]
pub mod firmware;
+pub mod fmt;
pub mod fs;
+pub mod id_pool;
pub mod init;
pub mod io;
pub mod ioctl;
+pub mod iov;
+pub mod irq;
pub mod jump_label;
#[cfg(CONFIG_KUNIT)]
pub mod kunit;
pub mod list;
+pub mod maple_tree;
pub mod miscdevice;
pub mod mm;
#[cfg(CONFIG_NET)]
@@ -102,8 +119,12 @@ pub mod pid_namespace;
pub mod platform;
pub mod prelude;
pub mod print;
+pub mod processor;
+pub mod ptr;
pub mod rbtree;
+pub mod regulator;
pub mod revocable;
+pub mod scatterlist;
pub mod security;
pub mod seq_file;
pub mod sizes;
@@ -197,7 +218,7 @@ impl ThisModule {
}
}
-#[cfg(not(any(testlib, test)))]
+#[cfg(not(testlib))]
#[panic_handler]
fn panic(info: &core::panic::PanicInfo<'_>) -> ! {
pr_emerg!("{}\n", info);
@@ -207,6 +228,13 @@ fn panic(info: &core::panic::PanicInfo<'_>) -> ! {
/// Produces a pointer to an object from a pointer to one of its fields.
///
+/// If you encounter a type mismatch due to the [`Opaque`] type, then use [`Opaque::cast_into`] or
+/// [`Opaque::cast_from`] to resolve the mismatch.
+///
+/// [`Opaque`]: crate::types::Opaque
+/// [`Opaque::cast_into`]: crate::types::Opaque::cast_into
+/// [`Opaque::cast_from`]: crate::types::Opaque::cast_from
+///
/// # Safety
///
/// The pointer passed to this macro, and the pointer returned by this macro, must both be in
@@ -277,3 +305,52 @@ macro_rules! asm {
::core::arch::asm!( $($asm)*, $($rest)* )
};
}
+
+/// Gets the C string file name of a [`Location`].
+///
+/// If `Location::file_as_c_str()` is not available, returns a string that warns about it.
+///
+/// [`Location`]: core::panic::Location
+///
+/// # Examples
+///
+/// ```
+/// # use kernel::file_from_location;
+///
+/// #[track_caller]
+/// fn foo() {
+/// let caller = core::panic::Location::caller();
+///
+/// // Output:
+/// // - A path like "rust/kernel/example.rs" if `file_as_c_str()` is available.
+/// // - "<Location::file_as_c_str() not supported>" otherwise.
+/// let caller_file = file_from_location(caller);
+///
+/// // Prints out the message with caller's file name.
+/// pr_info!("foo() called in file {caller_file:?}\n");
+///
+/// # if cfg!(CONFIG_RUSTC_HAS_FILE_WITH_NUL) {
+/// # assert_eq!(Ok(caller.file()), caller_file.to_str());
+/// # }
+/// }
+///
+/// # foo();
+/// ```
+#[inline]
+pub fn file_from_location<'a>(loc: &'a core::panic::Location<'a>) -> &'a core::ffi::CStr {
+ #[cfg(CONFIG_RUSTC_HAS_FILE_AS_C_STR)]
+ {
+ loc.file_as_c_str()
+ }
+
+ #[cfg(all(CONFIG_RUSTC_HAS_FILE_WITH_NUL, not(CONFIG_RUSTC_HAS_FILE_AS_C_STR)))]
+ {
+ loc.file_with_nul()
+ }
+
+ #[cfg(not(CONFIG_RUSTC_HAS_FILE_WITH_NUL))]
+ {
+ let _ = loc;
+ c"<Location::file_as_c_str() not supported>"
+ }
+}
diff --git a/rust/kernel/list.rs b/rust/kernel/list.rs
index c391c30b80f8..7355bbac16a7 100644
--- a/rust/kernel/list.rs
+++ b/rust/kernel/list.rs
@@ -38,6 +38,8 @@ pub use self::arc_field::{define_list_arc_field_getter, ListArcField};
///
/// # Examples
///
+/// Use [`ListLinks`] as the type of the intrusive field.
+///
/// ```
/// use kernel::list::*;
///
@@ -57,14 +59,11 @@ pub use self::arc_field::{define_list_arc_field_getter, ListArcField};
/// }
/// }
///
-/// impl_has_list_links! {
-/// impl HasListLinks<0> for BasicItem { self.links }
-/// }
/// impl_list_arc_safe! {
/// impl ListArcSafe<0> for BasicItem { untracked; }
/// }
/// impl_list_item! {
-/// impl ListItem<0> for BasicItem { using ListLinks; }
+/// impl ListItem<0> for BasicItem { using ListLinks { self.links }; }
/// }
///
/// // Create a new empty list.
@@ -82,9 +81,9 @@ pub use self::arc_field::{define_list_arc_field_getter, ListArcField};
/// // [15, 10, 30]
/// {
/// let mut iter = list.iter();
-/// assert_eq!(iter.next().unwrap().value, 15);
-/// assert_eq!(iter.next().unwrap().value, 10);
-/// assert_eq!(iter.next().unwrap().value, 30);
+/// assert_eq!(iter.next().ok_or(EINVAL)?.value, 15);
+/// assert_eq!(iter.next().ok_or(EINVAL)?.value, 10);
+/// assert_eq!(iter.next().ok_or(EINVAL)?.value, 30);
/// assert!(iter.next().is_none());
///
/// // Verify the length of the list.
@@ -93,9 +92,9 @@ pub use self::arc_field::{define_list_arc_field_getter, ListArcField};
///
/// // Pop the items from the list using `pop_back()` and verify the content.
/// {
-/// assert_eq!(list.pop_back().unwrap().value, 30);
-/// assert_eq!(list.pop_back().unwrap().value, 10);
-/// assert_eq!(list.pop_back().unwrap().value, 15);
+/// assert_eq!(list.pop_back().ok_or(EINVAL)?.value, 30);
+/// assert_eq!(list.pop_back().ok_or(EINVAL)?.value, 10);
+/// assert_eq!(list.pop_back().ok_or(EINVAL)?.value, 15);
/// }
///
/// // Insert 3 elements using `push_front()`.
@@ -107,9 +106,9 @@ pub use self::arc_field::{define_list_arc_field_getter, ListArcField};
/// // [30, 10, 15]
/// {
/// let mut iter = list.iter();
-/// assert_eq!(iter.next().unwrap().value, 30);
-/// assert_eq!(iter.next().unwrap().value, 10);
-/// assert_eq!(iter.next().unwrap().value, 15);
+/// assert_eq!(iter.next().ok_or(EINVAL)?.value, 30);
+/// assert_eq!(iter.next().ok_or(EINVAL)?.value, 10);
+/// assert_eq!(iter.next().ok_or(EINVAL)?.value, 15);
/// assert!(iter.next().is_none());
///
/// // Verify the length of the list.
@@ -118,8 +117,8 @@ pub use self::arc_field::{define_list_arc_field_getter, ListArcField};
///
/// // Pop the items from the list using `pop_front()` and verify the content.
/// {
-/// assert_eq!(list.pop_front().unwrap().value, 30);
-/// assert_eq!(list.pop_front().unwrap().value, 10);
+/// assert_eq!(list.pop_front().ok_or(EINVAL)?.value, 30);
+/// assert_eq!(list.pop_front().ok_or(EINVAL)?.value, 10);
/// }
///
/// // Push `list2` to `list` through `push_all_back()`.
@@ -135,9 +134,127 @@ pub use self::arc_field::{define_list_arc_field_getter, ListArcField};
/// // list: [15, 25, 35]
/// // list2: []
/// let mut iter = list.iter();
-/// assert_eq!(iter.next().unwrap().value, 15);
-/// assert_eq!(iter.next().unwrap().value, 25);
-/// assert_eq!(iter.next().unwrap().value, 35);
+/// assert_eq!(iter.next().ok_or(EINVAL)?.value, 15);
+/// assert_eq!(iter.next().ok_or(EINVAL)?.value, 25);
+/// assert_eq!(iter.next().ok_or(EINVAL)?.value, 35);
+/// assert!(iter.next().is_none());
+/// assert!(list2.is_empty());
+/// }
+/// # Result::<(), Error>::Ok(())
+/// ```
+///
+/// Use [`ListLinksSelfPtr`] as the type of the intrusive field. This allows a list of trait object
+/// type.
+///
+/// ```
+/// use kernel::list::*;
+///
+/// trait Foo {
+/// fn foo(&self) -> (&'static str, i32);
+/// }
+///
+/// #[pin_data]
+/// struct DTWrap<T: ?Sized> {
+/// #[pin]
+/// links: ListLinksSelfPtr<DTWrap<dyn Foo>>,
+/// value: T,
+/// }
+///
+/// impl<T> DTWrap<T> {
+/// fn new(value: T) -> Result<ListArc<Self>> {
+/// ListArc::pin_init(try_pin_init!(Self {
+/// value,
+/// links <- ListLinksSelfPtr::new(),
+/// }), GFP_KERNEL)
+/// }
+/// }
+///
+/// impl_list_arc_safe! {
+/// impl{T: ?Sized} ListArcSafe<0> for DTWrap<T> { untracked; }
+/// }
+/// impl_list_item! {
+/// impl ListItem<0> for DTWrap<dyn Foo> { using ListLinksSelfPtr { self.links }; }
+/// }
+///
+/// // Create a new empty list.
+/// let mut list = List::<DTWrap<dyn Foo>>::new();
+/// {
+/// assert!(list.is_empty());
+/// }
+///
+/// struct A(i32);
+/// // `A` returns the inner value for `foo`.
+/// impl Foo for A { fn foo(&self) -> (&'static str, i32) { ("a", self.0) } }
+///
+/// struct B;
+/// // `B` always returns 42.
+/// impl Foo for B { fn foo(&self) -> (&'static str, i32) { ("b", 42) } }
+///
+/// // Insert 3 element using `push_back()`.
+/// list.push_back(DTWrap::new(A(15))?);
+/// list.push_back(DTWrap::new(A(32))?);
+/// list.push_back(DTWrap::new(B)?);
+///
+/// // Iterate over the list to verify the nodes were inserted correctly.
+/// // [A(15), A(32), B]
+/// {
+/// let mut iter = list.iter();
+/// assert_eq!(iter.next().ok_or(EINVAL)?.value.foo(), ("a", 15));
+/// assert_eq!(iter.next().ok_or(EINVAL)?.value.foo(), ("a", 32));
+/// assert_eq!(iter.next().ok_or(EINVAL)?.value.foo(), ("b", 42));
+/// assert!(iter.next().is_none());
+///
+/// // Verify the length of the list.
+/// assert_eq!(list.iter().count(), 3);
+/// }
+///
+/// // Pop the items from the list using `pop_back()` and verify the content.
+/// {
+/// assert_eq!(list.pop_back().ok_or(EINVAL)?.value.foo(), ("b", 42));
+/// assert_eq!(list.pop_back().ok_or(EINVAL)?.value.foo(), ("a", 32));
+/// assert_eq!(list.pop_back().ok_or(EINVAL)?.value.foo(), ("a", 15));
+/// }
+///
+/// // Insert 3 elements using `push_front()`.
+/// list.push_front(DTWrap::new(A(15))?);
+/// list.push_front(DTWrap::new(A(32))?);
+/// list.push_front(DTWrap::new(B)?);
+///
+/// // Iterate over the list to verify the nodes were inserted correctly.
+/// // [B, A(32), A(15)]
+/// {
+/// let mut iter = list.iter();
+/// assert_eq!(iter.next().ok_or(EINVAL)?.value.foo(), ("b", 42));
+/// assert_eq!(iter.next().ok_or(EINVAL)?.value.foo(), ("a", 32));
+/// assert_eq!(iter.next().ok_or(EINVAL)?.value.foo(), ("a", 15));
+/// assert!(iter.next().is_none());
+///
+/// // Verify the length of the list.
+/// assert_eq!(list.iter().count(), 3);
+/// }
+///
+/// // Pop the items from the list using `pop_front()` and verify the content.
+/// {
+/// assert_eq!(list.pop_back().ok_or(EINVAL)?.value.foo(), ("a", 15));
+/// assert_eq!(list.pop_back().ok_or(EINVAL)?.value.foo(), ("a", 32));
+/// }
+///
+/// // Push `list2` to `list` through `push_all_back()`.
+/// // list: [B]
+/// // list2: [B, A(25)]
+/// {
+/// let mut list2 = List::<DTWrap<dyn Foo>>::new();
+/// list2.push_back(DTWrap::new(B)?);
+/// list2.push_back(DTWrap::new(A(25))?);
+///
+/// list.push_all_back(&mut list2);
+///
+/// // list: [B, B, A(25)]
+/// // list2: []
+/// let mut iter = list.iter();
+/// assert_eq!(iter.next().ok_or(EINVAL)?.value.foo(), ("b", 42));
+/// assert_eq!(iter.next().ok_or(EINVAL)?.value.foo(), ("b", 42));
+/// assert_eq!(iter.next().ok_or(EINVAL)?.value.foo(), ("a", 25));
/// assert!(iter.next().is_none());
/// assert!(list2.is_empty());
/// }
@@ -284,7 +401,7 @@ impl<const ID: u64> ListLinks<ID> {
#[inline]
unsafe fn fields(me: *mut Self) -> *mut ListLinksFields {
// SAFETY: The caller promises that the pointer is valid.
- unsafe { Opaque::raw_get(ptr::addr_of!((*me).inner)) }
+ unsafe { Opaque::cast_into(ptr::addr_of!((*me).inner)) }
}
/// # Safety
@@ -320,9 +437,6 @@ unsafe impl<T: ?Sized + Send, const ID: u64> Send for ListLinksSelfPtr<T, ID> {}
unsafe impl<T: ?Sized + Sync, const ID: u64> Sync for ListLinksSelfPtr<T, ID> {}
impl<T: ?Sized, const ID: u64> ListLinksSelfPtr<T, ID> {
- /// The offset from the [`ListLinks`] to the self pointer field.
- pub const LIST_LINKS_SELF_PTR_OFFSET: usize = core::mem::offset_of!(Self, self_ptr);
-
/// Creates a new initializer for this type.
pub fn new() -> impl PinInit<Self> {
// INVARIANT: Pin-init initializers can't be used on an existing `Arc`, so this value will
@@ -337,6 +451,16 @@ impl<T: ?Sized, const ID: u64> ListLinksSelfPtr<T, ID> {
self_ptr: Opaque::uninit(),
}
}
+
+ /// Returns a pointer to the self pointer.
+ ///
+ /// # Safety
+ ///
+ /// The provided pointer must point at a valid struct of type `Self`.
+ pub unsafe fn raw_get_self_ptr(me: *const Self) -> *const Opaque<*const T> {
+ // SAFETY: The caller promises that the pointer is valid.
+ unsafe { ptr::addr_of!((*me).self_ptr) }
+ }
}
impl<T: ?Sized + ListItem<ID>, const ID: u64> List<T, ID> {
@@ -711,14 +835,11 @@ impl<'a, T: ?Sized + ListItem<ID>, const ID: u64> Iterator for Iter<'a, T, ID> {
/// }
/// }
///
-/// kernel::list::impl_has_list_links! {
-/// impl HasListLinks<0> for ListItem { self.links }
-/// }
/// kernel::list::impl_list_arc_safe! {
/// impl ListArcSafe<0> for ListItem { untracked; }
/// }
/// kernel::list::impl_list_item! {
-/// impl ListItem<0> for ListItem { using ListLinks; }
+/// impl ListItem<0> for ListItem { using ListLinks { self.links }; }
/// }
///
/// // Use a cursor to remove the first element with the given value.
@@ -809,11 +930,11 @@ impl<'a, T: ?Sized + ListItem<ID>, const ID: u64> Iterator for Iter<'a, T, ID> {
/// merge_sorted(&mut list, list2);
///
/// let mut items = list.into_iter();
-/// assert_eq!(items.next().unwrap().value, 10);
-/// assert_eq!(items.next().unwrap().value, 11);
-/// assert_eq!(items.next().unwrap().value, 12);
-/// assert_eq!(items.next().unwrap().value, 13);
-/// assert_eq!(items.next().unwrap().value, 14);
+/// assert_eq!(items.next().ok_or(EINVAL)?.value, 10);
+/// assert_eq!(items.next().ok_or(EINVAL)?.value, 11);
+/// assert_eq!(items.next().ok_or(EINVAL)?.value, 12);
+/// assert_eq!(items.next().ok_or(EINVAL)?.value, 13);
+/// assert_eq!(items.next().ok_or(EINVAL)?.value, 14);
/// assert!(items.next().is_none());
/// # Result::<(), Error>::Ok(())
/// ```
diff --git a/rust/kernel/list/impl_list_item_mod.rs b/rust/kernel/list/impl_list_item_mod.rs
index a0438537cee1..202bc6f97c13 100644
--- a/rust/kernel/list/impl_list_item_mod.rs
+++ b/rust/kernel/list/impl_list_item_mod.rs
@@ -4,60 +4,48 @@
//! Helpers for implementing list traits safely.
-use crate::list::ListLinks;
-
-/// Declares that this type has a `ListLinks<ID>` field at a fixed offset.
+/// Declares that this type has a [`ListLinks<ID>`] field.
///
-/// This trait is only used to help implement `ListItem` safely. If `ListItem` is implemented
+/// This trait is only used to help implement [`ListItem`] safely. If [`ListItem`] is implemented
/// manually, then this trait is not needed. Use the [`impl_has_list_links!`] macro to implement
/// this trait.
///
/// # Safety
///
-/// All values of this type must have a `ListLinks<ID>` field at the given offset.
+/// The methods on this trait must have exactly the behavior that the definitions given below have.
///
-/// The behavior of `raw_get_list_links` must not be changed.
+/// [`ListLinks<ID>`]: crate::list::ListLinks
+/// [`ListItem`]: crate::list::ListItem
pub unsafe trait HasListLinks<const ID: u64 = 0> {
- /// The offset of the `ListLinks` field.
- const OFFSET: usize;
-
- /// Returns a pointer to the [`ListLinks<T, ID>`] field.
+ /// Returns a pointer to the [`ListLinks<ID>`] field.
///
/// # Safety
///
/// The provided pointer must point at a valid struct of type `Self`.
///
- /// [`ListLinks<T, ID>`]: ListLinks
- // We don't really need this method, but it's necessary for the implementation of
- // `impl_has_list_links!` to be correct.
- #[inline]
- unsafe fn raw_get_list_links(ptr: *mut Self) -> *mut ListLinks<ID> {
- // SAFETY: The caller promises that the pointer is valid. The implementer promises that the
- // `OFFSET` constant is correct.
- unsafe { (ptr as *mut u8).add(Self::OFFSET) as *mut ListLinks<ID> }
- }
+ /// [`ListLinks<ID>`]: crate::list::ListLinks
+ unsafe fn raw_get_list_links(ptr: *mut Self) -> *mut crate::list::ListLinks<ID>;
}
/// Implements the [`HasListLinks`] trait for the given type.
#[macro_export]
macro_rules! impl_has_list_links {
- ($(impl$(<$($implarg:ident),*>)?
+ ($(impl$({$($generics:tt)*})?
HasListLinks$(<$id:tt>)?
- for $self:ident $(<$($selfarg:ty),*>)?
+ for $self:ty
{ self$(.$field:ident)* }
)*) => {$(
// SAFETY: The implementation of `raw_get_list_links` only compiles if the field has the
// right type.
- //
- // The behavior of `raw_get_list_links` is not changed since the `addr_of_mut!` macro is
- // equivalent to the pointer offset operation in the trait definition.
- unsafe impl$(<$($implarg),*>)? $crate::list::HasListLinks$(<$id>)? for
- $self $(<$($selfarg),*>)?
- {
- const OFFSET: usize = ::core::mem::offset_of!(Self, $($field).*) as usize;
-
+ unsafe impl$(<$($generics)*>)? $crate::list::HasListLinks$(<$id>)? for $self {
#[inline]
unsafe fn raw_get_list_links(ptr: *mut Self) -> *mut $crate::list::ListLinks$(<$id>)? {
+ // Statically ensure that `$(.field)*` doesn't follow any pointers.
+ //
+ // Cannot be `const` because `$self` may contain generics and E0401 says constants
+ // "can't use {`Self`,generic parameters} from outer item".
+ if false { let _: usize = ::core::mem::offset_of!(Self, $($field).*); }
+
// SAFETY: The caller promises that the pointer is not dangling. We know that this
// expression doesn't follow any pointers, as the `offset_of!` invocation above
// would otherwise not compile.
@@ -68,12 +56,16 @@ macro_rules! impl_has_list_links {
}
pub use impl_has_list_links;
-/// Declares that the `ListLinks<ID>` field in this struct is inside a `ListLinksSelfPtr<T, ID>`.
+/// Declares that the [`ListLinks<ID>`] field in this struct is inside a
+/// [`ListLinksSelfPtr<T, ID>`].
///
/// # Safety
///
-/// The `ListLinks<ID>` field of this struct at the offset `HasListLinks<ID>::OFFSET` must be
-/// inside a `ListLinksSelfPtr<T, ID>`.
+/// The [`ListLinks<ID>`] field of this struct at [`HasListLinks<ID>::raw_get_list_links`] must be
+/// inside a [`ListLinksSelfPtr<T, ID>`].
+///
+/// [`ListLinks<ID>`]: crate::list::ListLinks
+/// [`ListLinksSelfPtr<T, ID>`]: crate::list::ListLinksSelfPtr
pub unsafe trait HasSelfPtr<T: ?Sized, const ID: u64 = 0>
where
Self: HasListLinks<ID>,
@@ -83,27 +75,21 @@ where
/// Implements the [`HasListLinks`] and [`HasSelfPtr`] traits for the given type.
#[macro_export]
macro_rules! impl_has_list_links_self_ptr {
- ($(impl$({$($implarg:tt)*})?
+ ($(impl$({$($generics:tt)*})?
HasSelfPtr<$item_type:ty $(, $id:tt)?>
- for $self:ident $(<$($selfarg:ty),*>)?
- { self.$field:ident }
+ for $self:ty
+ { self$(.$field:ident)* }
)*) => {$(
// SAFETY: The implementation of `raw_get_list_links` only compiles if the field has the
// right type.
- unsafe impl$(<$($implarg)*>)? $crate::list::HasSelfPtr<$item_type $(, $id)?> for
- $self $(<$($selfarg),*>)?
- {}
-
- unsafe impl$(<$($implarg)*>)? $crate::list::HasListLinks$(<$id>)? for
- $self $(<$($selfarg),*>)?
- {
- const OFFSET: usize = ::core::mem::offset_of!(Self, $field) as usize;
+ unsafe impl$(<$($generics)*>)? $crate::list::HasSelfPtr<$item_type $(, $id)?> for $self {}
+ unsafe impl$(<$($generics)*>)? $crate::list::HasListLinks$(<$id>)? for $self {
#[inline]
unsafe fn raw_get_list_links(ptr: *mut Self) -> *mut $crate::list::ListLinks$(<$id>)? {
// SAFETY: The caller promises that the pointer is not dangling.
let ptr: *mut $crate::list::ListLinksSelfPtr<$item_type $(, $id)?> =
- unsafe { ::core::ptr::addr_of_mut!((*ptr).$field) };
+ unsafe { ::core::ptr::addr_of_mut!((*ptr)$(.$field)*) };
ptr.cast()
}
}
@@ -117,15 +103,95 @@ pub use impl_has_list_links_self_ptr;
/// implement that trait.
///
/// [`ListItem`]: crate::list::ListItem
+///
+/// # Examples
+///
+/// ```
+/// #[pin_data]
+/// struct SimpleListItem {
+/// value: u32,
+/// #[pin]
+/// links: kernel::list::ListLinks,
+/// }
+///
+/// kernel::list::impl_list_arc_safe! {
+/// impl ListArcSafe<0> for SimpleListItem { untracked; }
+/// }
+///
+/// kernel::list::impl_list_item! {
+/// impl ListItem<0> for SimpleListItem { using ListLinks { self.links }; }
+/// }
+///
+/// struct ListLinksHolder {
+/// inner: kernel::list::ListLinks,
+/// }
+///
+/// #[pin_data]
+/// struct ComplexListItem<T, U> {
+/// value: Result<T, U>,
+/// #[pin]
+/// links: ListLinksHolder,
+/// }
+///
+/// kernel::list::impl_list_arc_safe! {
+/// impl{T, U} ListArcSafe<0> for ComplexListItem<T, U> { untracked; }
+/// }
+///
+/// kernel::list::impl_list_item! {
+/// impl{T, U} ListItem<0> for ComplexListItem<T, U> { using ListLinks { self.links.inner }; }
+/// }
+/// ```
+///
+/// ```
+/// #[pin_data]
+/// struct SimpleListItem {
+/// value: u32,
+/// #[pin]
+/// links: kernel::list::ListLinksSelfPtr<SimpleListItem>,
+/// }
+///
+/// kernel::list::impl_list_arc_safe! {
+/// impl ListArcSafe<0> for SimpleListItem { untracked; }
+/// }
+///
+/// kernel::list::impl_list_item! {
+/// impl ListItem<0> for SimpleListItem { using ListLinksSelfPtr { self.links }; }
+/// }
+///
+/// struct ListLinksSelfPtrHolder<T, U> {
+/// inner: kernel::list::ListLinksSelfPtr<ComplexListItem<T, U>>,
+/// }
+///
+/// #[pin_data]
+/// struct ComplexListItem<T, U> {
+/// value: Result<T, U>,
+/// #[pin]
+/// links: ListLinksSelfPtrHolder<T, U>,
+/// }
+///
+/// kernel::list::impl_list_arc_safe! {
+/// impl{T, U} ListArcSafe<0> for ComplexListItem<T, U> { untracked; }
+/// }
+///
+/// kernel::list::impl_list_item! {
+/// impl{T, U} ListItem<0> for ComplexListItem<T, U> {
+/// using ListLinksSelfPtr { self.links.inner };
+/// }
+/// }
+/// ```
#[macro_export]
macro_rules! impl_list_item {
(
- $(impl$({$($generics:tt)*})? ListItem<$num:tt> for $t:ty {
- using ListLinks;
+ $(impl$({$($generics:tt)*})? ListItem<$num:tt> for $self:ty {
+ using ListLinks { self$(.$field:ident)* };
})*
) => {$(
+ $crate::list::impl_has_list_links! {
+ impl$({$($generics)*})? HasListLinks<$num> for $self { self$(.$field)* }
+ }
+
// SAFETY: See GUARANTEES comment on each method.
- unsafe impl$(<$($generics)*>)? $crate::list::ListItem<$num> for $t {
+ unsafe impl$(<$($generics)*>)? $crate::list::ListItem<$num> for $self {
// GUARANTEES:
// * This returns the same pointer as `prepare_to_insert` because `prepare_to_insert`
// is implemented in terms of `view_links`.
@@ -139,20 +205,19 @@ macro_rules! impl_list_item {
}
// GUARANTEES:
- // * `me` originates from the most recent call to `prepare_to_insert`, which just added
- // `offset` to the pointer passed to `prepare_to_insert`. This method subtracts
- // `offset` from `me` so it returns the pointer originally passed to
- // `prepare_to_insert`.
+ // * `me` originates from the most recent call to `prepare_to_insert`, which calls
+ // `raw_get_list_link`, which is implemented using `addr_of_mut!((*self)$(.$field)*)`.
+ // This method uses `container_of` to perform the inverse operation, so it returns the
+ // pointer originally passed to `prepare_to_insert`.
// * The pointer remains valid until the next call to `post_remove` because the caller
// of the most recent call to `prepare_to_insert` promised to retain ownership of the
// `ListArc` containing `Self` until the next call to `post_remove`. The value cannot
// be destroyed while a `ListArc` reference exists.
unsafe fn view_value(me: *mut $crate::list::ListLinks<$num>) -> *const Self {
- let offset = <Self as $crate::list::HasListLinks<$num>>::OFFSET;
// SAFETY: `me` originates from the most recent call to `prepare_to_insert`, so it
- // points at the field at offset `offset` in a value of type `Self`. Thus,
- // subtracting `offset` from `me` is still in-bounds of the allocation.
- unsafe { (me as *const u8).sub(offset) as *const Self }
+ // points at the field `$field` in a value of type `Self`. Thus, reversing that
+ // operation is still in-bounds of the allocation.
+ $crate::container_of!(me, Self, $($field).*)
}
// GUARANTEES:
@@ -169,27 +234,30 @@ macro_rules! impl_list_item {
}
// GUARANTEES:
- // * `me` originates from the most recent call to `prepare_to_insert`, which just added
- // `offset` to the pointer passed to `prepare_to_insert`. This method subtracts
- // `offset` from `me` so it returns the pointer originally passed to
- // `prepare_to_insert`.
+ // * `me` originates from the most recent call to `prepare_to_insert`, which calls
+ // `raw_get_list_link`, which is implemented using `addr_of_mut!((*self)$(.$field)*)`.
+ // This method uses `container_of` to perform the inverse operation, so it returns the
+ // pointer originally passed to `prepare_to_insert`.
unsafe fn post_remove(me: *mut $crate::list::ListLinks<$num>) -> *const Self {
- let offset = <Self as $crate::list::HasListLinks<$num>>::OFFSET;
// SAFETY: `me` originates from the most recent call to `prepare_to_insert`, so it
- // points at the field at offset `offset` in a value of type `Self`. Thus,
- // subtracting `offset` from `me` is still in-bounds of the allocation.
- unsafe { (me as *const u8).sub(offset) as *const Self }
+ // points at the field `$field` in a value of type `Self`. Thus, reversing that
+ // operation is still in-bounds of the allocation.
+ $crate::container_of!(me, Self, $($field).*)
}
}
)*};
(
- $(impl$({$($generics:tt)*})? ListItem<$num:tt> for $t:ty {
- using ListLinksSelfPtr;
+ $(impl$({$($generics:tt)*})? ListItem<$num:tt> for $self:ty {
+ using ListLinksSelfPtr { self$(.$field:ident)* };
})*
) => {$(
+ $crate::list::impl_has_list_links_self_ptr! {
+ impl$({$($generics)*})? HasSelfPtr<$self> for $self { self$(.$field)* }
+ }
+
// SAFETY: See GUARANTEES comment on each method.
- unsafe impl$(<$($generics)*>)? $crate::list::ListItem<$num> for $t {
+ unsafe impl$(<$($generics)*>)? $crate::list::ListItem<$num> for $self {
// GUARANTEES:
// This implementation of `ListItem` will not give out exclusive access to the same
// `ListLinks` several times because calls to `prepare_to_insert` and `post_remove`
@@ -202,14 +270,16 @@ macro_rules! impl_list_item {
// SAFETY: The caller promises that `me` points at a valid value of type `Self`.
let links_field = unsafe { <Self as $crate::list::ListItem<$num>>::view_links(me) };
- let spoff = $crate::list::ListLinksSelfPtr::<Self, $num>::LIST_LINKS_SELF_PTR_OFFSET;
- // Goes via the offset as the field is private.
- //
- // SAFETY: The constant is equal to `offset_of!(ListLinksSelfPtr, self_ptr)`, so
- // the pointer stays in bounds of the allocation.
- let self_ptr = unsafe { (links_field as *const u8).add(spoff) }
- as *const $crate::types::Opaque<*const Self>;
- let cell_inner = $crate::types::Opaque::raw_get(self_ptr);
+ let container = $crate::container_of!(
+ links_field, $crate::list::ListLinksSelfPtr<Self, $num>, inner
+ );
+
+ // SAFETY: By the same reasoning above, `links_field` is a valid pointer.
+ let self_ptr = unsafe {
+ $crate::list::ListLinksSelfPtr::raw_get_self_ptr(container)
+ };
+
+ let cell_inner = $crate::types::Opaque::cast_into(self_ptr);
// SAFETY: This value is not accessed in any other places than `prepare_to_insert`,
// `post_remove`, or `view_value`. By the safety requirements of those methods,
@@ -228,7 +298,9 @@ macro_rules! impl_list_item {
// this value is not in a list.
unsafe fn view_links(me: *const Self) -> *mut $crate::list::ListLinks<$num> {
// SAFETY: The caller promises that `me` points at a valid value of type `Self`.
- unsafe { <Self as HasListLinks<$num>>::raw_get_list_links(me.cast_mut()) }
+ unsafe {
+ <Self as $crate::list::HasListLinks<$num>>::raw_get_list_links(me.cast_mut())
+ }
}
// This function is also used as the implementation of `post_remove`, so the caller
@@ -247,12 +319,17 @@ macro_rules! impl_list_item {
// `ListArc` containing `Self` until the next call to `post_remove`. The value cannot
// be destroyed while a `ListArc` reference exists.
unsafe fn view_value(links_field: *mut $crate::list::ListLinks<$num>) -> *const Self {
- let spoff = $crate::list::ListLinksSelfPtr::<Self, $num>::LIST_LINKS_SELF_PTR_OFFSET;
- // SAFETY: The constant is equal to `offset_of!(ListLinksSelfPtr, self_ptr)`, so
- // the pointer stays in bounds of the allocation.
- let self_ptr = unsafe { (links_field as *const u8).add(spoff) }
- as *const ::core::cell::UnsafeCell<*const Self>;
- let cell_inner = ::core::cell::UnsafeCell::raw_get(self_ptr);
+ let container = $crate::container_of!(
+ links_field, $crate::list::ListLinksSelfPtr<Self, $num>, inner
+ );
+
+ // SAFETY: By the same reasoning above, `links_field` is a valid pointer.
+ let self_ptr = unsafe {
+ $crate::list::ListLinksSelfPtr::raw_get_self_ptr(container)
+ };
+
+ let cell_inner = $crate::types::Opaque::cast_into(self_ptr);
+
// SAFETY: This is not a data race, because the only function that writes to this
// value is `prepare_to_insert`, but by the safety requirements the
// `prepare_to_insert` method may not be called in parallel with `view_value` or
diff --git a/rust/kernel/maple_tree.rs b/rust/kernel/maple_tree.rs
new file mode 100644
index 000000000000..e72eec56bf57
--- /dev/null
+++ b/rust/kernel/maple_tree.rs
@@ -0,0 +1,647 @@
+// SPDX-License-Identifier: GPL-2.0
+
+//! Maple trees.
+//!
+//! C header: [`include/linux/maple_tree.h`](srctree/include/linux/maple_tree.h)
+//!
+//! Reference: <https://docs.kernel.org/core-api/maple_tree.html>
+
+use core::{
+ marker::PhantomData,
+ ops::{Bound, RangeBounds},
+ ptr,
+};
+
+use kernel::{
+ alloc::Flags,
+ error::to_result,
+ prelude::*,
+ types::{ForeignOwnable, Opaque},
+};
+
+/// A maple tree optimized for storing non-overlapping ranges.
+///
+/// # Invariants
+///
+/// Each range in the maple tree owns an instance of `T`.
+#[pin_data(PinnedDrop)]
+#[repr(transparent)]
+pub struct MapleTree<T: ForeignOwnable> {
+ #[pin]
+ tree: Opaque<bindings::maple_tree>,
+ _p: PhantomData<T>,
+}
+
+/// A maple tree with `MT_FLAGS_ALLOC_RANGE` set.
+///
+/// All methods on [`MapleTree`] are also accessible on this type.
+#[pin_data]
+#[repr(transparent)]
+pub struct MapleTreeAlloc<T: ForeignOwnable> {
+ #[pin]
+ tree: MapleTree<T>,
+}
+
+// Make MapleTree methods usable on MapleTreeAlloc.
+impl<T: ForeignOwnable> core::ops::Deref for MapleTreeAlloc<T> {
+ type Target = MapleTree<T>;
+
+ #[inline]
+ fn deref(&self) -> &MapleTree<T> {
+ &self.tree
+ }
+}
+
+#[inline]
+fn to_maple_range(range: impl RangeBounds<usize>) -> Option<(usize, usize)> {
+ let first = match range.start_bound() {
+ Bound::Included(start) => *start,
+ Bound::Excluded(start) => start.checked_add(1)?,
+ Bound::Unbounded => 0,
+ };
+
+ let last = match range.end_bound() {
+ Bound::Included(end) => *end,
+ Bound::Excluded(end) => end.checked_sub(1)?,
+ Bound::Unbounded => usize::MAX,
+ };
+
+ if last < first {
+ return None;
+ }
+
+ Some((first, last))
+}
+
+impl<T: ForeignOwnable> MapleTree<T> {
+ /// Create a new maple tree.
+ ///
+ /// The tree will use the regular implementation with a higher branching factor, rather than
+ /// the allocation tree.
+ #[inline]
+ pub fn new() -> impl PinInit<Self> {
+ pin_init!(MapleTree {
+ // SAFETY: This initializes a maple tree into a pinned slot. The maple tree will be
+ // destroyed in Drop before the memory location becomes invalid.
+ tree <- Opaque::ffi_init(|slot| unsafe { bindings::mt_init_flags(slot, 0) }),
+ _p: PhantomData,
+ })
+ }
+
+ /// Insert the value at the given index.
+ ///
+ /// # Errors
+ ///
+ /// If the maple tree already contains a range using the given index, then this call will
+ /// return an [`InsertErrorKind::Occupied`]. It may also fail if memory allocation fails.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use kernel::maple_tree::{InsertErrorKind, MapleTree};
+ ///
+ /// let tree = KBox::pin_init(MapleTree::<KBox<i32>>::new(), GFP_KERNEL)?;
+ ///
+ /// let ten = KBox::new(10, GFP_KERNEL)?;
+ /// let twenty = KBox::new(20, GFP_KERNEL)?;
+ /// let the_answer = KBox::new(42, GFP_KERNEL)?;
+ ///
+ /// // These calls will succeed.
+ /// tree.insert(100, ten, GFP_KERNEL)?;
+ /// tree.insert(101, twenty, GFP_KERNEL)?;
+ ///
+ /// // This will fail because the index is already in use.
+ /// assert_eq!(
+ /// tree.insert(100, the_answer, GFP_KERNEL).unwrap_err().cause,
+ /// InsertErrorKind::Occupied,
+ /// );
+ /// # Ok::<_, Error>(())
+ /// ```
+ #[inline]
+ pub fn insert(&self, index: usize, value: T, gfp: Flags) -> Result<(), InsertError<T>> {
+ self.insert_range(index..=index, value, gfp)
+ }
+
+ /// Insert a value to the specified range, failing on overlap.
+ ///
+ /// This accepts the usual types of Rust ranges using the `..` and `..=` syntax for exclusive
+ /// and inclusive ranges respectively. The range must not be empty, and must not overlap with
+ /// any existing range.
+ ///
+ /// # Errors
+ ///
+ /// If the maple tree already contains an overlapping range, then this call will return an
+ /// [`InsertErrorKind::Occupied`]. It may also fail if memory allocation fails or if the
+ /// requested range is invalid (e.g. empty).
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use kernel::maple_tree::{InsertErrorKind, MapleTree};
+ ///
+ /// let tree = KBox::pin_init(MapleTree::<KBox<i32>>::new(), GFP_KERNEL)?;
+ ///
+ /// let ten = KBox::new(10, GFP_KERNEL)?;
+ /// let twenty = KBox::new(20, GFP_KERNEL)?;
+ /// let the_answer = KBox::new(42, GFP_KERNEL)?;
+ /// let hundred = KBox::new(100, GFP_KERNEL)?;
+ ///
+ /// // Insert the value 10 at the indices 100 to 499.
+ /// tree.insert_range(100..500, ten, GFP_KERNEL)?;
+ ///
+ /// // Insert the value 20 at the indices 500 to 1000.
+ /// tree.insert_range(500..=1000, twenty, GFP_KERNEL)?;
+ ///
+ /// // This will fail due to overlap with the previous range on index 1000.
+ /// assert_eq!(
+ /// tree.insert_range(1000..1200, the_answer, GFP_KERNEL).unwrap_err().cause,
+ /// InsertErrorKind::Occupied,
+ /// );
+ ///
+ /// // When using .. to specify the range, you must be careful to ensure that the range is
+ /// // non-empty.
+ /// assert_eq!(
+ /// tree.insert_range(72..72, hundred, GFP_KERNEL).unwrap_err().cause,
+ /// InsertErrorKind::InvalidRequest,
+ /// );
+ /// # Ok::<_, Error>(())
+ /// ```
+ pub fn insert_range<R>(&self, range: R, value: T, gfp: Flags) -> Result<(), InsertError<T>>
+ where
+ R: RangeBounds<usize>,
+ {
+ let Some((first, last)) = to_maple_range(range) else {
+ return Err(InsertError {
+ value,
+ cause: InsertErrorKind::InvalidRequest,
+ });
+ };
+
+ let ptr = T::into_foreign(value);
+
+ // SAFETY: The tree is valid, and we are passing a pointer to an owned instance of `T`.
+ let res = to_result(unsafe {
+ bindings::mtree_insert_range(self.tree.get(), first, last, ptr, gfp.as_raw())
+ });
+
+ if let Err(err) = res {
+ // SAFETY: As `mtree_insert_range` failed, it is safe to take back ownership.
+ let value = unsafe { T::from_foreign(ptr) };
+
+ let cause = if err == ENOMEM {
+ InsertErrorKind::AllocError(kernel::alloc::AllocError)
+ } else if err == EEXIST {
+ InsertErrorKind::Occupied
+ } else {
+ InsertErrorKind::InvalidRequest
+ };
+ Err(InsertError { value, cause })
+ } else {
+ Ok(())
+ }
+ }
+
+ /// Erase the range containing the given index.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use kernel::maple_tree::MapleTree;
+ ///
+ /// let tree = KBox::pin_init(MapleTree::<KBox<i32>>::new(), GFP_KERNEL)?;
+ ///
+ /// let ten = KBox::new(10, GFP_KERNEL)?;
+ /// let twenty = KBox::new(20, GFP_KERNEL)?;
+ ///
+ /// tree.insert_range(100..500, ten, GFP_KERNEL)?;
+ /// tree.insert(67, twenty, GFP_KERNEL)?;
+ ///
+ /// assert_eq!(tree.erase(67).map(|v| *v), Some(20));
+ /// assert_eq!(tree.erase(275).map(|v| *v), Some(10));
+ ///
+ /// // The previous call erased the entire range, not just index 275.
+ /// assert!(tree.erase(127).is_none());
+ /// # Ok::<_, Error>(())
+ /// ```
+ #[inline]
+ pub fn erase(&self, index: usize) -> Option<T> {
+ // SAFETY: `self.tree` contains a valid maple tree.
+ let ret = unsafe { bindings::mtree_erase(self.tree.get(), index) };
+
+ // SAFETY: If the pointer is not null, then we took ownership of a valid instance of `T`
+ // from the tree.
+ unsafe { T::try_from_foreign(ret) }
+ }
+
+ /// Lock the internal spinlock.
+ #[inline]
+ pub fn lock(&self) -> MapleGuard<'_, T> {
+ // SAFETY: It's safe to lock the spinlock in a maple tree.
+ unsafe { bindings::spin_lock(self.ma_lock()) };
+
+ // INVARIANT: We just took the spinlock.
+ MapleGuard(self)
+ }
+
+ #[inline]
+ fn ma_lock(&self) -> *mut bindings::spinlock_t {
+ // SAFETY: This pointer offset operation stays in-bounds.
+ let lock_ptr = unsafe { &raw mut (*self.tree.get()).__bindgen_anon_1.ma_lock };
+ lock_ptr.cast()
+ }
+
+ /// Free all `T` instances in this tree.
+ ///
+ /// # Safety
+ ///
+ /// This frees Rust data referenced by the maple tree without removing it from the maple tree,
+ /// leaving it in an invalid state. The caller must ensure that this invalid state cannot be
+ /// observed by the end-user.
+ unsafe fn free_all_entries(self: Pin<&mut Self>) {
+ // SAFETY: The caller provides exclusive access to the entire maple tree, so we have
+ // exclusive access to the entire maple tree despite not holding the lock.
+ let mut ma_state = unsafe { MaState::new_raw(self.into_ref().get_ref(), 0, usize::MAX) };
+
+ loop {
+ // This uses the raw accessor because we're destroying pointers without removing them
+ // from the maple tree, which is only valid because this is the destructor.
+ let ptr = ma_state.mas_find_raw(usize::MAX);
+ if ptr.is_null() {
+ break;
+ }
+ // SAFETY: By the type invariants, this pointer references a valid value of type `T`.
+ // By the safety requirements, it is okay to free it without removing it from the maple
+ // tree.
+ drop(unsafe { T::from_foreign(ptr) });
+ }
+ }
+}
+
+#[pinned_drop]
+impl<T: ForeignOwnable> PinnedDrop for MapleTree<T> {
+ #[inline]
+ fn drop(mut self: Pin<&mut Self>) {
+ // We only iterate the tree if the Rust value has a destructor.
+ if core::mem::needs_drop::<T>() {
+ // SAFETY: Other than the below `mtree_destroy` call, the tree will not be accessed
+ // after this call.
+ unsafe { self.as_mut().free_all_entries() };
+ }
+
+ // SAFETY: The tree is valid, and will not be accessed after this call.
+ unsafe { bindings::mtree_destroy(self.tree.get()) };
+ }
+}
+
+/// A reference to a [`MapleTree`] that owns the inner lock.
+///
+/// # Invariants
+///
+/// This guard owns the inner spinlock.
+#[must_use = "if unused, the lock will be immediately unlocked"]
+pub struct MapleGuard<'tree, T: ForeignOwnable>(&'tree MapleTree<T>);
+
+impl<'tree, T: ForeignOwnable> Drop for MapleGuard<'tree, T> {
+ #[inline]
+ fn drop(&mut self) {
+ // SAFETY: By the type invariants, we hold this spinlock.
+ unsafe { bindings::spin_unlock(self.0.ma_lock()) };
+ }
+}
+
+impl<'tree, T: ForeignOwnable> MapleGuard<'tree, T> {
+ /// Create a [`MaState`] protected by this lock guard.
+ pub fn ma_state(&mut self, first: usize, end: usize) -> MaState<'_, T> {
+ // SAFETY: The `MaState` borrows this `MapleGuard`, so it can also borrow the `MapleGuard`s
+ // read/write permissions to the maple tree.
+ unsafe { MaState::new_raw(self.0, first, end) }
+ }
+
+ /// Load the value at the given index.
+ ///
+ /// # Examples
+ ///
+ /// Read the value while holding the spinlock.
+ ///
+ /// ```
+ /// use kernel::maple_tree::MapleTree;
+ ///
+ /// let tree = KBox::pin_init(MapleTree::<KBox<i32>>::new(), GFP_KERNEL)?;
+ ///
+ /// let ten = KBox::new(10, GFP_KERNEL)?;
+ /// let twenty = KBox::new(20, GFP_KERNEL)?;
+ /// tree.insert(100, ten, GFP_KERNEL)?;
+ /// tree.insert(200, twenty, GFP_KERNEL)?;
+ ///
+ /// let mut lock = tree.lock();
+ /// assert_eq!(lock.load(100).map(|v| *v), Some(10));
+ /// assert_eq!(lock.load(200).map(|v| *v), Some(20));
+ /// assert_eq!(lock.load(300).map(|v| *v), None);
+ /// # Ok::<_, Error>(())
+ /// ```
+ ///
+ /// Increment refcount under the lock, to keep value alive afterwards.
+ ///
+ /// ```
+ /// use kernel::maple_tree::MapleTree;
+ /// use kernel::sync::Arc;
+ ///
+ /// let tree = KBox::pin_init(MapleTree::<Arc<i32>>::new(), GFP_KERNEL)?;
+ ///
+ /// let ten = Arc::new(10, GFP_KERNEL)?;
+ /// let twenty = Arc::new(20, GFP_KERNEL)?;
+ /// tree.insert(100, ten, GFP_KERNEL)?;
+ /// tree.insert(200, twenty, GFP_KERNEL)?;
+ ///
+ /// // Briefly take the lock to increment the refcount.
+ /// let value = tree.lock().load(100).map(Arc::from);
+ ///
+ /// // At this point, another thread might remove the value.
+ /// tree.erase(100);
+ ///
+ /// // But we can still access it because we took a refcount.
+ /// assert_eq!(value.map(|v| *v), Some(10));
+ /// # Ok::<_, Error>(())
+ /// ```
+ #[inline]
+ pub fn load(&mut self, index: usize) -> Option<T::BorrowedMut<'_>> {
+ // SAFETY: `self.tree` contains a valid maple tree.
+ let ret = unsafe { bindings::mtree_load(self.0.tree.get(), index) };
+ if ret.is_null() {
+ return None;
+ }
+
+ // SAFETY: If the pointer is not null, then it references a valid instance of `T`. It is
+ // safe to borrow the instance mutably because the signature of this function enforces that
+ // the mutable borrow is not used after the spinlock is dropped.
+ Some(unsafe { T::borrow_mut(ret) })
+ }
+}
+
+impl<T: ForeignOwnable> MapleTreeAlloc<T> {
+ /// Create a new allocation tree.
+ pub fn new() -> impl PinInit<Self> {
+ let tree = pin_init!(MapleTree {
+ // SAFETY: This initializes a maple tree into a pinned slot. The maple tree will be
+ // destroyed in Drop before the memory location becomes invalid.
+ tree <- Opaque::ffi_init(|slot| unsafe {
+ bindings::mt_init_flags(slot, bindings::MT_FLAGS_ALLOC_RANGE)
+ }),
+ _p: PhantomData,
+ });
+
+ pin_init!(MapleTreeAlloc { tree <- tree })
+ }
+
+ /// Insert an entry with the given size somewhere in the given range.
+ ///
+ /// The maple tree will search for a location in the given range where there is space to insert
+ /// the new range. If there is not enough available space, then an error will be returned.
+ ///
+ /// The index of the new range is returned.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use kernel::maple_tree::{MapleTreeAlloc, AllocErrorKind};
+ ///
+ /// let tree = KBox::pin_init(MapleTreeAlloc::<KBox<i32>>::new(), GFP_KERNEL)?;
+ ///
+ /// let ten = KBox::new(10, GFP_KERNEL)?;
+ /// let twenty = KBox::new(20, GFP_KERNEL)?;
+ /// let thirty = KBox::new(30, GFP_KERNEL)?;
+ /// let hundred = KBox::new(100, GFP_KERNEL)?;
+ ///
+ /// // Allocate three ranges.
+ /// let idx1 = tree.alloc_range(100, ten, ..1000, GFP_KERNEL)?;
+ /// let idx2 = tree.alloc_range(100, twenty, ..1000, GFP_KERNEL)?;
+ /// let idx3 = tree.alloc_range(100, thirty, ..1000, GFP_KERNEL)?;
+ ///
+ /// assert_eq!(idx1, 0);
+ /// assert_eq!(idx2, 100);
+ /// assert_eq!(idx3, 200);
+ ///
+ /// // This will fail because the remaining space is too small.
+ /// assert_eq!(
+ /// tree.alloc_range(800, hundred, ..1000, GFP_KERNEL).unwrap_err().cause,
+ /// AllocErrorKind::Busy,
+ /// );
+ /// # Ok::<_, Error>(())
+ /// ```
+ pub fn alloc_range<R>(
+ &self,
+ size: usize,
+ value: T,
+ range: R,
+ gfp: Flags,
+ ) -> Result<usize, AllocError<T>>
+ where
+ R: RangeBounds<usize>,
+ {
+ let Some((min, max)) = to_maple_range(range) else {
+ return Err(AllocError {
+ value,
+ cause: AllocErrorKind::InvalidRequest,
+ });
+ };
+
+ let ptr = T::into_foreign(value);
+ let mut index = 0;
+
+ // SAFETY: The tree is valid, and we are passing a pointer to an owned instance of `T`.
+ let res = to_result(unsafe {
+ bindings::mtree_alloc_range(
+ self.tree.tree.get(),
+ &mut index,
+ ptr,
+ size,
+ min,
+ max,
+ gfp.as_raw(),
+ )
+ });
+
+ if let Err(err) = res {
+ // SAFETY: As `mtree_alloc_range` failed, it is safe to take back ownership.
+ let value = unsafe { T::from_foreign(ptr) };
+
+ let cause = if err == ENOMEM {
+ AllocErrorKind::AllocError(kernel::alloc::AllocError)
+ } else if err == EBUSY {
+ AllocErrorKind::Busy
+ } else {
+ AllocErrorKind::InvalidRequest
+ };
+ Err(AllocError { value, cause })
+ } else {
+ Ok(index)
+ }
+ }
+}
+
+/// A helper type used for navigating a [`MapleTree`].
+///
+/// # Invariants
+///
+/// For the duration of `'tree`:
+///
+/// * The `ma_state` references a valid `MapleTree<T>`.
+/// * The `ma_state` has read/write access to the tree.
+pub struct MaState<'tree, T: ForeignOwnable> {
+ state: bindings::ma_state,
+ _phantom: PhantomData<&'tree mut MapleTree<T>>,
+}
+
+impl<'tree, T: ForeignOwnable> MaState<'tree, T> {
+ /// Initialize a new `MaState` with the given tree.
+ ///
+ /// # Safety
+ ///
+ /// The caller must ensure that this `MaState` has read/write access to the maple tree.
+ #[inline]
+ unsafe fn new_raw(mt: &'tree MapleTree<T>, first: usize, end: usize) -> Self {
+ // INVARIANT:
+ // * Having a reference ensures that the `MapleTree<T>` is valid for `'tree`.
+ // * The caller ensures that we have read/write access.
+ Self {
+ state: bindings::ma_state {
+ tree: mt.tree.get(),
+ index: first,
+ last: end,
+ node: ptr::null_mut(),
+ status: bindings::maple_status_ma_start,
+ min: 0,
+ max: usize::MAX,
+ alloc: ptr::null_mut(),
+ mas_flags: 0,
+ store_type: bindings::store_type_wr_invalid,
+ ..Default::default()
+ },
+ _phantom: PhantomData,
+ }
+ }
+
+ #[inline]
+ fn as_raw(&mut self) -> *mut bindings::ma_state {
+ &raw mut self.state
+ }
+
+ #[inline]
+ fn mas_find_raw(&mut self, max: usize) -> *mut c_void {
+ // SAFETY: By the type invariants, the `ma_state` is active and we have read/write access
+ // to the tree.
+ unsafe { bindings::mas_find(self.as_raw(), max) }
+ }
+
+ /// Find the next entry in the maple tree.
+ ///
+ /// # Examples
+ ///
+ /// Iterate the maple tree.
+ ///
+ /// ```
+ /// use kernel::maple_tree::MapleTree;
+ /// use kernel::sync::Arc;
+ ///
+ /// let tree = KBox::pin_init(MapleTree::<Arc<i32>>::new(), GFP_KERNEL)?;
+ ///
+ /// let ten = Arc::new(10, GFP_KERNEL)?;
+ /// let twenty = Arc::new(20, GFP_KERNEL)?;
+ /// tree.insert(100, ten, GFP_KERNEL)?;
+ /// tree.insert(200, twenty, GFP_KERNEL)?;
+ ///
+ /// let mut ma_lock = tree.lock();
+ /// let mut iter = ma_lock.ma_state(0, usize::MAX);
+ ///
+ /// assert_eq!(iter.find(usize::MAX).map(|v| *v), Some(10));
+ /// assert_eq!(iter.find(usize::MAX).map(|v| *v), Some(20));
+ /// assert!(iter.find(usize::MAX).is_none());
+ /// # Ok::<_, Error>(())
+ /// ```
+ #[inline]
+ pub fn find(&mut self, max: usize) -> Option<T::BorrowedMut<'_>> {
+ let ret = self.mas_find_raw(max);
+ if ret.is_null() {
+ return None;
+ }
+
+ // SAFETY: If the pointer is not null, then it references a valid instance of `T`. It's
+ // safe to access it mutably as the returned reference borrows this `MaState`, and the
+ // `MaState` has read/write access to the maple tree.
+ Some(unsafe { T::borrow_mut(ret) })
+ }
+}
+
+/// Error type for failure to insert a new value.
+pub struct InsertError<T> {
+ /// The value that could not be inserted.
+ pub value: T,
+ /// The reason for the failure to insert.
+ pub cause: InsertErrorKind,
+}
+
+/// The reason for the failure to insert.
+#[derive(PartialEq, Eq, Copy, Clone, Debug)]
+pub enum InsertErrorKind {
+ /// There is already a value in the requested range.
+ Occupied,
+ /// Failure to allocate memory.
+ AllocError(kernel::alloc::AllocError),
+ /// The insertion request was invalid.
+ InvalidRequest,
+}
+
+impl From<InsertErrorKind> for Error {
+ #[inline]
+ fn from(kind: InsertErrorKind) -> Error {
+ match kind {
+ InsertErrorKind::Occupied => EEXIST,
+ InsertErrorKind::AllocError(kernel::alloc::AllocError) => ENOMEM,
+ InsertErrorKind::InvalidRequest => EINVAL,
+ }
+ }
+}
+
+impl<T> From<InsertError<T>> for Error {
+ #[inline]
+ fn from(insert_err: InsertError<T>) -> Error {
+ Error::from(insert_err.cause)
+ }
+}
+
+/// Error type for failure to insert a new value.
+pub struct AllocError<T> {
+ /// The value that could not be inserted.
+ pub value: T,
+ /// The reason for the failure to insert.
+ pub cause: AllocErrorKind,
+}
+
+/// The reason for the failure to insert.
+#[derive(PartialEq, Eq, Copy, Clone)]
+pub enum AllocErrorKind {
+ /// There is not enough space for the requested allocation.
+ Busy,
+ /// Failure to allocate memory.
+ AllocError(kernel::alloc::AllocError),
+ /// The insertion request was invalid.
+ InvalidRequest,
+}
+
+impl From<AllocErrorKind> for Error {
+ #[inline]
+ fn from(kind: AllocErrorKind) -> Error {
+ match kind {
+ AllocErrorKind::Busy => EBUSY,
+ AllocErrorKind::AllocError(kernel::alloc::AllocError) => ENOMEM,
+ AllocErrorKind::InvalidRequest => EINVAL,
+ }
+ }
+}
+
+impl<T> From<AllocError<T>> for Error {
+ #[inline]
+ fn from(insert_err: AllocError<T>) -> Error {
+ Error::from(insert_err.cause)
+ }
+}
diff --git a/rust/kernel/miscdevice.rs b/rust/kernel/miscdevice.rs
index 939278bc7b03..d698cddcb4a5 100644
--- a/rust/kernel/miscdevice.rs
+++ b/rust/kernel/miscdevice.rs
@@ -13,11 +13,11 @@ use crate::{
device::Device,
error::{to_result, Error, Result, VTABLE_DEFAULT_ERROR},
ffi::{c_int, c_long, c_uint, c_ulong},
- fs::File,
+ fs::{File, Kiocb},
+ iov::{IovIterDest, IovIterSource},
mm::virt::VmaNew,
prelude::*,
seq_file::SeqFile,
- str::CStr,
types::{ForeignOwnable, Opaque},
};
use core::{marker::PhantomData, mem::MaybeUninit, pin::Pin};
@@ -34,8 +34,8 @@ impl MiscDeviceOptions {
pub const fn into_raw<T: MiscDevice>(self) -> bindings::miscdevice {
// SAFETY: All zeros is valid for this C type.
let mut result: bindings::miscdevice = unsafe { MaybeUninit::zeroed().assume_init() };
- result.minor = bindings::MISC_DYNAMIC_MINOR as _;
- result.name = self.name.as_char_ptr();
+ result.minor = bindings::MISC_DYNAMIC_MINOR as ffi::c_int;
+ result.name = crate::str::as_char_ptr_in_const_context(self.name);
result.fops = MiscdeviceVTable::<T>::build();
result
}
@@ -45,7 +45,13 @@ impl MiscDeviceOptions {
///
/// # Invariants
///
-/// `inner` is a registered misc device.
+/// - `inner` contains a `struct miscdevice` that is registered using
+/// `misc_register()`.
+/// - This registration remains valid for the entire lifetime of the
+/// [`MiscDeviceRegistration`] instance.
+/// - Deregistration occurs exactly once in [`Drop`] via `misc_deregister()`.
+/// - `inner` wraps a valid, pinned `miscdevice` created using
+/// [`MiscDeviceOptions::into_raw`].
#[repr(transparent)]
#[pin_data(PinnedDrop)]
pub struct MiscDeviceRegistration<T> {
@@ -92,7 +98,7 @@ impl<T: MiscDevice> MiscDeviceRegistration<T> {
// function tells the borrow-checker that the `&Device` reference must not outlive the
// `&MiscDeviceRegistration<T>` used to obtain it, so the last use of the reference must be
// before the underlying `struct miscdevice` is destroyed.
- unsafe { Device::as_ref((*self.as_raw()).this_device) }
+ unsafe { Device::from_raw((*self.as_raw()).this_device) }
}
}
@@ -136,6 +142,16 @@ pub trait MiscDevice: Sized {
build_error!(VTABLE_DEFAULT_ERROR)
}
+ /// Read from this miscdevice.
+ fn read_iter(_kiocb: Kiocb<'_, Self::Ptr>, _iov: &mut IovIterDest<'_>) -> Result<usize> {
+ build_error!(VTABLE_DEFAULT_ERROR)
+ }
+
+ /// Write to this miscdevice.
+ fn write_iter(_kiocb: Kiocb<'_, Self::Ptr>, _iov: &mut IovIterSource<'_>) -> Result<usize> {
+ build_error!(VTABLE_DEFAULT_ERROR)
+ }
+
/// Handler for ioctls.
///
/// The `cmd` argument is usually manipulated using the utilities in [`kernel::ioctl`].
@@ -217,7 +233,7 @@ impl<T: MiscDevice> MiscdeviceVTable<T> {
// type.
//
// SAFETY: The open call of a file can access the private data.
- unsafe { (*raw_file).private_data = ptr.into_foreign().cast() };
+ unsafe { (*raw_file).private_data = ptr.into_foreign() };
0
}
@@ -228,7 +244,7 @@ impl<T: MiscDevice> MiscdeviceVTable<T> {
/// must be associated with a `MiscDeviceRegistration<T>`.
unsafe extern "C" fn release(_inode: *mut bindings::inode, file: *mut bindings::file) -> c_int {
// SAFETY: The release call of a file owns the private data.
- let private = unsafe { (*file).private_data }.cast();
+ let private = unsafe { (*file).private_data };
// SAFETY: The release call of a file owns the private data.
let ptr = unsafe { <T::Ptr as ForeignOwnable>::from_foreign(private) };
@@ -242,6 +258,46 @@ impl<T: MiscDevice> MiscdeviceVTable<T> {
/// # Safety
///
+ /// `kiocb` must be correspond to a valid file that is associated with a
+ /// `MiscDeviceRegistration<T>`. `iter` must be a valid `struct iov_iter` for writing.
+ unsafe extern "C" fn read_iter(
+ kiocb: *mut bindings::kiocb,
+ iter: *mut bindings::iov_iter,
+ ) -> isize {
+ // SAFETY: The caller provides a valid `struct kiocb` associated with a
+ // `MiscDeviceRegistration<T>` file.
+ let kiocb = unsafe { Kiocb::from_raw(kiocb) };
+ // SAFETY: This is a valid `struct iov_iter` for writing.
+ let iov = unsafe { IovIterDest::from_raw(iter) };
+
+ match T::read_iter(kiocb, iov) {
+ Ok(res) => res as isize,
+ Err(err) => err.to_errno() as isize,
+ }
+ }
+
+ /// # Safety
+ ///
+ /// `kiocb` must be correspond to a valid file that is associated with a
+ /// `MiscDeviceRegistration<T>`. `iter` must be a valid `struct iov_iter` for writing.
+ unsafe extern "C" fn write_iter(
+ kiocb: *mut bindings::kiocb,
+ iter: *mut bindings::iov_iter,
+ ) -> isize {
+ // SAFETY: The caller provides a valid `struct kiocb` associated with a
+ // `MiscDeviceRegistration<T>` file.
+ let kiocb = unsafe { Kiocb::from_raw(kiocb) };
+ // SAFETY: This is a valid `struct iov_iter` for reading.
+ let iov = unsafe { IovIterSource::from_raw(iter) };
+
+ match T::write_iter(kiocb, iov) {
+ Ok(res) => res as isize,
+ Err(err) => err.to_errno() as isize,
+ }
+ }
+
+ /// # Safety
+ ///
/// `file` must be a valid file that is associated with a `MiscDeviceRegistration<T>`.
/// `vma` must be a vma that is currently being mmap'ed with this file.
unsafe extern "C" fn mmap(
@@ -272,7 +328,7 @@ impl<T: MiscDevice> MiscdeviceVTable<T> {
/// `file` must be a valid file that is associated with a `MiscDeviceRegistration<T>`.
unsafe extern "C" fn ioctl(file: *mut bindings::file, cmd: c_uint, arg: c_ulong) -> c_long {
// SAFETY: The ioctl call of a file can access the private data.
- let private = unsafe { (*file).private_data }.cast();
+ let private = unsafe { (*file).private_data };
// SAFETY: Ioctl calls can borrow the private data of the file.
let device = unsafe { <T::Ptr as ForeignOwnable>::borrow(private) };
@@ -297,7 +353,7 @@ impl<T: MiscDevice> MiscdeviceVTable<T> {
arg: c_ulong,
) -> c_long {
// SAFETY: The compat ioctl call of a file can access the private data.
- let private = unsafe { (*file).private_data }.cast();
+ let private = unsafe { (*file).private_data };
// SAFETY: Ioctl calls can borrow the private data of the file.
let device = unsafe { <T::Ptr as ForeignOwnable>::borrow(private) };
@@ -318,7 +374,7 @@ impl<T: MiscDevice> MiscdeviceVTable<T> {
/// - `seq_file` must be a valid `struct seq_file` that we can write to.
unsafe extern "C" fn show_fdinfo(seq_file: *mut bindings::seq_file, file: *mut bindings::file) {
// SAFETY: The release call of a file owns the private data.
- let private = unsafe { (*file).private_data }.cast();
+ let private = unsafe { (*file).private_data };
// SAFETY: Ioctl calls can borrow the private data of the file.
let device = unsafe { <T::Ptr as ForeignOwnable>::borrow(private) };
// SAFETY:
@@ -336,6 +392,16 @@ impl<T: MiscDevice> MiscdeviceVTable<T> {
open: Some(Self::open),
release: Some(Self::release),
mmap: if T::HAS_MMAP { Some(Self::mmap) } else { None },
+ read_iter: if T::HAS_READ_ITER {
+ Some(Self::read_iter)
+ } else {
+ None
+ },
+ write_iter: if T::HAS_WRITE_ITER {
+ Some(Self::write_iter)
+ } else {
+ None
+ },
unlocked_ioctl: if T::HAS_IOCTL {
Some(Self::ioctl)
} else {
diff --git a/rust/kernel/mm.rs b/rust/kernel/mm.rs
index 43f525c0d16c..4764d7b68f2a 100644
--- a/rust/kernel/mm.rs
+++ b/rust/kernel/mm.rs
@@ -13,7 +13,8 @@
use crate::{
bindings,
- types::{ARef, AlwaysRefCounted, NotThreadSafe, Opaque},
+ sync::aref::{ARef, AlwaysRefCounted},
+ types::{NotThreadSafe, Opaque},
};
use core::{ops::Deref, ptr::NonNull};
diff --git a/rust/kernel/mm/mmput_async.rs b/rust/kernel/mm/mmput_async.rs
index 9289e05f7a67..b8d2f051225c 100644
--- a/rust/kernel/mm/mmput_async.rs
+++ b/rust/kernel/mm/mmput_async.rs
@@ -10,7 +10,7 @@
use crate::{
bindings,
mm::MmWithUser,
- types::{ARef, AlwaysRefCounted},
+ sync::aref::{ARef, AlwaysRefCounted},
};
use core::{ops::Deref, ptr::NonNull};
diff --git a/rust/kernel/mm/virt.rs b/rust/kernel/mm/virt.rs
index 31803674aecc..a1bfa4e19293 100644
--- a/rust/kernel/mm/virt.rs
+++ b/rust/kernel/mm/virt.rs
@@ -209,6 +209,7 @@ impl VmaMixedMap {
///
/// For the duration of 'a, the referenced vma must be undergoing initialization in an
/// `f_ops->mmap()` hook.
+#[repr(transparent)]
pub struct VmaNew {
vma: VmaRef,
}
@@ -392,80 +393,80 @@ pub mod flags {
use crate::bindings;
/// No flags are set.
- pub const NONE: vm_flags_t = bindings::VM_NONE as _;
+ pub const NONE: vm_flags_t = bindings::VM_NONE as vm_flags_t;
/// Mapping allows reads.
- pub const READ: vm_flags_t = bindings::VM_READ as _;
+ pub const READ: vm_flags_t = bindings::VM_READ as vm_flags_t;
/// Mapping allows writes.
- pub const WRITE: vm_flags_t = bindings::VM_WRITE as _;
+ pub const WRITE: vm_flags_t = bindings::VM_WRITE as vm_flags_t;
/// Mapping allows execution.
- pub const EXEC: vm_flags_t = bindings::VM_EXEC as _;
+ pub const EXEC: vm_flags_t = bindings::VM_EXEC as vm_flags_t;
/// Mapping is shared.
- pub const SHARED: vm_flags_t = bindings::VM_SHARED as _;
+ pub const SHARED: vm_flags_t = bindings::VM_SHARED as vm_flags_t;
/// Mapping may be updated to allow reads.
- pub const MAYREAD: vm_flags_t = bindings::VM_MAYREAD as _;
+ pub const MAYREAD: vm_flags_t = bindings::VM_MAYREAD as vm_flags_t;
/// Mapping may be updated to allow writes.
- pub const MAYWRITE: vm_flags_t = bindings::VM_MAYWRITE as _;
+ pub const MAYWRITE: vm_flags_t = bindings::VM_MAYWRITE as vm_flags_t;
/// Mapping may be updated to allow execution.
- pub const MAYEXEC: vm_flags_t = bindings::VM_MAYEXEC as _;
+ pub const MAYEXEC: vm_flags_t = bindings::VM_MAYEXEC as vm_flags_t;
/// Mapping may be updated to be shared.
- pub const MAYSHARE: vm_flags_t = bindings::VM_MAYSHARE as _;
+ pub const MAYSHARE: vm_flags_t = bindings::VM_MAYSHARE as vm_flags_t;
/// Page-ranges managed without `struct page`, just pure PFN.
- pub const PFNMAP: vm_flags_t = bindings::VM_PFNMAP as _;
+ pub const PFNMAP: vm_flags_t = bindings::VM_PFNMAP as vm_flags_t;
/// Memory mapped I/O or similar.
- pub const IO: vm_flags_t = bindings::VM_IO as _;
+ pub const IO: vm_flags_t = bindings::VM_IO as vm_flags_t;
/// Do not copy this vma on fork.
- pub const DONTCOPY: vm_flags_t = bindings::VM_DONTCOPY as _;
+ pub const DONTCOPY: vm_flags_t = bindings::VM_DONTCOPY as vm_flags_t;
/// Cannot expand with mremap().
- pub const DONTEXPAND: vm_flags_t = bindings::VM_DONTEXPAND as _;
+ pub const DONTEXPAND: vm_flags_t = bindings::VM_DONTEXPAND as vm_flags_t;
/// Lock the pages covered when they are faulted in.
- pub const LOCKONFAULT: vm_flags_t = bindings::VM_LOCKONFAULT as _;
+ pub const LOCKONFAULT: vm_flags_t = bindings::VM_LOCKONFAULT as vm_flags_t;
/// Is a VM accounted object.
- pub const ACCOUNT: vm_flags_t = bindings::VM_ACCOUNT as _;
+ pub const ACCOUNT: vm_flags_t = bindings::VM_ACCOUNT as vm_flags_t;
/// Should the VM suppress accounting.
- pub const NORESERVE: vm_flags_t = bindings::VM_NORESERVE as _;
+ pub const NORESERVE: vm_flags_t = bindings::VM_NORESERVE as vm_flags_t;
/// Huge TLB Page VM.
- pub const HUGETLB: vm_flags_t = bindings::VM_HUGETLB as _;
+ pub const HUGETLB: vm_flags_t = bindings::VM_HUGETLB as vm_flags_t;
/// Synchronous page faults. (DAX-specific)
- pub const SYNC: vm_flags_t = bindings::VM_SYNC as _;
+ pub const SYNC: vm_flags_t = bindings::VM_SYNC as vm_flags_t;
/// Architecture-specific flag.
- pub const ARCH_1: vm_flags_t = bindings::VM_ARCH_1 as _;
+ pub const ARCH_1: vm_flags_t = bindings::VM_ARCH_1 as vm_flags_t;
/// Wipe VMA contents in child on fork.
- pub const WIPEONFORK: vm_flags_t = bindings::VM_WIPEONFORK as _;
+ pub const WIPEONFORK: vm_flags_t = bindings::VM_WIPEONFORK as vm_flags_t;
/// Do not include in the core dump.
- pub const DONTDUMP: vm_flags_t = bindings::VM_DONTDUMP as _;
+ pub const DONTDUMP: vm_flags_t = bindings::VM_DONTDUMP as vm_flags_t;
/// Not soft dirty clean area.
- pub const SOFTDIRTY: vm_flags_t = bindings::VM_SOFTDIRTY as _;
+ pub const SOFTDIRTY: vm_flags_t = bindings::VM_SOFTDIRTY as vm_flags_t;
/// Can contain `struct page` and pure PFN pages.
- pub const MIXEDMAP: vm_flags_t = bindings::VM_MIXEDMAP as _;
+ pub const MIXEDMAP: vm_flags_t = bindings::VM_MIXEDMAP as vm_flags_t;
/// MADV_HUGEPAGE marked this vma.
- pub const HUGEPAGE: vm_flags_t = bindings::VM_HUGEPAGE as _;
+ pub const HUGEPAGE: vm_flags_t = bindings::VM_HUGEPAGE as vm_flags_t;
/// MADV_NOHUGEPAGE marked this vma.
- pub const NOHUGEPAGE: vm_flags_t = bindings::VM_NOHUGEPAGE as _;
+ pub const NOHUGEPAGE: vm_flags_t = bindings::VM_NOHUGEPAGE as vm_flags_t;
/// KSM may merge identical pages.
- pub const MERGEABLE: vm_flags_t = bindings::VM_MERGEABLE as _;
+ pub const MERGEABLE: vm_flags_t = bindings::VM_MERGEABLE as vm_flags_t;
}
diff --git a/rust/kernel/net/phy.rs b/rust/kernel/net/phy.rs
index 32ea43ece646..bf6272d87a7b 100644
--- a/rust/kernel/net/phy.rs
+++ b/rust/kernel/net/phy.rs
@@ -6,7 +6,7 @@
//!
//! C headers: [`include/linux/phy.h`](srctree/include/linux/phy.h).
-use crate::{error::*, prelude::*, types::Opaque};
+use crate::{device_id::RawDeviceId, error::*, prelude::*, types::Opaque};
use core::{marker::PhantomData, ptr::addr_of_mut};
pub mod reg;
@@ -142,7 +142,7 @@ impl Device {
// SAFETY: The struct invariant ensures that we may access
// this field without additional synchronization.
let bit_field = unsafe { &(*self.0.get())._bitfield_1 };
- bit_field.get(13, 1) == bindings::AUTONEG_ENABLE as u64
+ bit_field.get(13, 1) == u64::from(bindings::AUTONEG_ENABLE)
}
/// Gets the current auto-negotiation state.
@@ -163,20 +163,20 @@ impl Device {
let phydev = self.0.get();
// SAFETY: The struct invariant ensures that we may access
// this field without additional synchronization.
- unsafe { (*phydev).speed = speed as i32 };
+ unsafe { (*phydev).speed = speed as c_int };
}
/// Sets duplex mode.
pub fn set_duplex(&mut self, mode: DuplexMode) {
let phydev = self.0.get();
let v = match mode {
- DuplexMode::Full => bindings::DUPLEX_FULL as i32,
- DuplexMode::Half => bindings::DUPLEX_HALF as i32,
- DuplexMode::Unknown => bindings::DUPLEX_UNKNOWN as i32,
+ DuplexMode::Full => bindings::DUPLEX_FULL,
+ DuplexMode::Half => bindings::DUPLEX_HALF,
+ DuplexMode::Unknown => bindings::DUPLEX_UNKNOWN,
};
// SAFETY: The struct invariant ensures that we may access
// this field without additional synchronization.
- unsafe { (*phydev).duplex = v };
+ unsafe { (*phydev).duplex = v as c_int };
}
/// Reads a PHY register.
@@ -196,11 +196,8 @@ impl Device {
// SAFETY: `phydev` is pointing to a valid object by the type invariant of `Self`.
// So it's just an FFI call.
let ret = unsafe { bindings::phy_read_paged(phydev, page.into(), regnum.into()) };
- if ret < 0 {
- Err(Error::from_errno(ret))
- } else {
- Ok(ret as u16)
- }
+
+ to_result(ret).map(|()| ret as u16)
}
/// Resolves the advertisements into PHY settings.
@@ -285,7 +282,7 @@ impl AsRef<kernel::device::Device> for Device {
fn as_ref(&self) -> &kernel::device::Device {
let phydev = self.0.get();
// SAFETY: The struct invariant ensures that `mdio.dev` is valid.
- unsafe { kernel::device::Device::as_ref(addr_of_mut!((*phydev).mdio.dev)) }
+ unsafe { kernel::device::Device::from_raw(addr_of_mut!((*phydev).mdio.dev)) }
}
}
@@ -312,9 +309,7 @@ impl<T: Driver> Adapter<T> {
/// # Safety
///
/// `phydev` must be passed by the corresponding callback in `phy_driver`.
- unsafe extern "C" fn soft_reset_callback(
- phydev: *mut bindings::phy_device,
- ) -> crate::ffi::c_int {
+ unsafe extern "C" fn soft_reset_callback(phydev: *mut bindings::phy_device) -> c_int {
from_result(|| {
// SAFETY: This callback is called only in contexts
// where we hold `phy_device->lock`, so the accessors on
@@ -328,7 +323,7 @@ impl<T: Driver> Adapter<T> {
/// # Safety
///
/// `phydev` must be passed by the corresponding callback in `phy_driver`.
- unsafe extern "C" fn probe_callback(phydev: *mut bindings::phy_device) -> crate::ffi::c_int {
+ unsafe extern "C" fn probe_callback(phydev: *mut bindings::phy_device) -> c_int {
from_result(|| {
// SAFETY: This callback is called only in contexts
// where we can exclusively access `phy_device` because
@@ -343,9 +338,7 @@ impl<T: Driver> Adapter<T> {
/// # Safety
///
/// `phydev` must be passed by the corresponding callback in `phy_driver`.
- unsafe extern "C" fn get_features_callback(
- phydev: *mut bindings::phy_device,
- ) -> crate::ffi::c_int {
+ unsafe extern "C" fn get_features_callback(phydev: *mut bindings::phy_device) -> c_int {
from_result(|| {
// SAFETY: This callback is called only in contexts
// where we hold `phy_device->lock`, so the accessors on
@@ -359,7 +352,7 @@ impl<T: Driver> Adapter<T> {
/// # Safety
///
/// `phydev` must be passed by the corresponding callback in `phy_driver`.
- unsafe extern "C" fn suspend_callback(phydev: *mut bindings::phy_device) -> crate::ffi::c_int {
+ unsafe extern "C" fn suspend_callback(phydev: *mut bindings::phy_device) -> c_int {
from_result(|| {
// SAFETY: The C core code ensures that the accessors on
// `Device` are okay to call even though `phy_device->lock`
@@ -373,7 +366,7 @@ impl<T: Driver> Adapter<T> {
/// # Safety
///
/// `phydev` must be passed by the corresponding callback in `phy_driver`.
- unsafe extern "C" fn resume_callback(phydev: *mut bindings::phy_device) -> crate::ffi::c_int {
+ unsafe extern "C" fn resume_callback(phydev: *mut bindings::phy_device) -> c_int {
from_result(|| {
// SAFETY: The C core code ensures that the accessors on
// `Device` are okay to call even though `phy_device->lock`
@@ -387,9 +380,7 @@ impl<T: Driver> Adapter<T> {
/// # Safety
///
/// `phydev` must be passed by the corresponding callback in `phy_driver`.
- unsafe extern "C" fn config_aneg_callback(
- phydev: *mut bindings::phy_device,
- ) -> crate::ffi::c_int {
+ unsafe extern "C" fn config_aneg_callback(phydev: *mut bindings::phy_device) -> c_int {
from_result(|| {
// SAFETY: This callback is called only in contexts
// where we hold `phy_device->lock`, so the accessors on
@@ -403,9 +394,7 @@ impl<T: Driver> Adapter<T> {
/// # Safety
///
/// `phydev` must be passed by the corresponding callback in `phy_driver`.
- unsafe extern "C" fn read_status_callback(
- phydev: *mut bindings::phy_device,
- ) -> crate::ffi::c_int {
+ unsafe extern "C" fn read_status_callback(phydev: *mut bindings::phy_device) -> c_int {
from_result(|| {
// SAFETY: This callback is called only in contexts
// where we hold `phy_device->lock`, so the accessors on
@@ -422,12 +411,12 @@ impl<T: Driver> Adapter<T> {
unsafe extern "C" fn match_phy_device_callback(
phydev: *mut bindings::phy_device,
_phydrv: *const bindings::phy_driver,
- ) -> crate::ffi::c_int {
+ ) -> c_int {
// SAFETY: This callback is called only in contexts
// where we hold `phy_device->lock`, so the accessors on
// `Device` are okay to call.
let dev = unsafe { Device::from_raw(phydev) };
- T::match_phy_device(dev) as i32
+ T::match_phy_device(dev).into()
}
/// # Safety
@@ -505,9 +494,9 @@ unsafe impl Sync for DriverVTable {}
pub const fn create_phy_driver<T: Driver>() -> DriverVTable {
// INVARIANT: All the fields of `struct phy_driver` are initialized properly.
DriverVTable(Opaque::new(bindings::phy_driver {
- name: T::NAME.as_char_ptr().cast_mut(),
+ name: crate::str::as_char_ptr_in_const_context(T::NAME).cast_mut(),
flags: T::FLAGS,
- phy_id: T::PHY_DEVICE_ID.id,
+ phy_id: T::PHY_DEVICE_ID.id(),
phy_id_mask: T::PHY_DEVICE_ID.mask_as_int(),
soft_reset: if T::HAS_SOFT_RESET {
Some(Adapter::<T>::soft_reset_callback)
@@ -691,42 +680,41 @@ impl Drop for Registration {
///
/// Represents the kernel's `struct mdio_device_id`. This is used to find an appropriate
/// PHY driver.
-pub struct DeviceId {
- id: u32,
- mask: DeviceMask,
-}
+#[repr(transparent)]
+#[derive(Clone, Copy)]
+pub struct DeviceId(bindings::mdio_device_id);
impl DeviceId {
/// Creates a new instance with the exact match mask.
pub const fn new_with_exact_mask(id: u32) -> Self {
- DeviceId {
- id,
- mask: DeviceMask::Exact,
- }
+ Self(bindings::mdio_device_id {
+ phy_id: id,
+ phy_id_mask: DeviceMask::Exact.as_int(),
+ })
}
/// Creates a new instance with the model match mask.
pub const fn new_with_model_mask(id: u32) -> Self {
- DeviceId {
- id,
- mask: DeviceMask::Model,
- }
+ Self(bindings::mdio_device_id {
+ phy_id: id,
+ phy_id_mask: DeviceMask::Model.as_int(),
+ })
}
/// Creates a new instance with the vendor match mask.
pub const fn new_with_vendor_mask(id: u32) -> Self {
- DeviceId {
- id,
- mask: DeviceMask::Vendor,
- }
+ Self(bindings::mdio_device_id {
+ phy_id: id,
+ phy_id_mask: DeviceMask::Vendor.as_int(),
+ })
}
/// Creates a new instance with a custom match mask.
pub const fn new_with_custom_mask(id: u32, mask: u32) -> Self {
- DeviceId {
- id,
- mask: DeviceMask::Custom(mask),
- }
+ Self(bindings::mdio_device_id {
+ phy_id: id,
+ phy_id_mask: DeviceMask::Custom(mask).as_int(),
+ })
}
/// Creates a new instance from [`Driver`].
@@ -734,21 +722,29 @@ impl DeviceId {
T::PHY_DEVICE_ID
}
- /// Get a `mask` as u32.
+ /// Get the MDIO device's PHY ID.
+ pub const fn id(&self) -> u32 {
+ self.0.phy_id
+ }
+
+ /// Get the MDIO device's match mask.
pub const fn mask_as_int(&self) -> u32 {
- self.mask.as_int()
+ self.0.phy_id_mask
}
// macro use only
#[doc(hidden)]
pub const fn mdio_device_id(&self) -> bindings::mdio_device_id {
- bindings::mdio_device_id {
- phy_id: self.id,
- phy_id_mask: self.mask.as_int(),
- }
+ self.0
}
}
+// SAFETY: `DeviceId` is a `#[repr(transparent)]` wrapper of `struct mdio_device_id`
+// and does not add additional invariants, so it's safe to transmute to `RawType`.
+unsafe impl RawDeviceId for DeviceId {
+ type RawType = bindings::mdio_device_id;
+}
+
enum DeviceMask {
Exact,
Model,
@@ -849,19 +845,18 @@ impl DeviceMask {
/// }
/// };
///
-/// const _DEVICE_TABLE: [::kernel::bindings::mdio_device_id; 2] = [
-/// ::kernel::bindings::mdio_device_id {
-/// phy_id: 0x00000001,
-/// phy_id_mask: 0xffffffff,
-/// },
-/// ::kernel::bindings::mdio_device_id {
-/// phy_id: 0,
-/// phy_id_mask: 0,
-/// },
-/// ];
-/// #[cfg(MODULE)]
-/// #[no_mangle]
-/// static __mod_device_table__mdio__phydev: [::kernel::bindings::mdio_device_id; 2] = _DEVICE_TABLE;
+/// const N: usize = 1;
+///
+/// const TABLE: ::kernel::device_id::IdArray<::kernel::net::phy::DeviceId, (), N> =
+/// ::kernel::device_id::IdArray::new_without_index([
+/// ::kernel::net::phy::DeviceId(
+/// ::kernel::bindings::mdio_device_id {
+/// phy_id: 0x00000001,
+/// phy_id_mask: 0xffffffff,
+/// }),
+/// ]);
+///
+/// ::kernel::module_device_table!("mdio", phydev, TABLE);
/// ```
#[macro_export]
macro_rules! module_phy_driver {
@@ -872,20 +867,12 @@ macro_rules! module_phy_driver {
};
(@device_table [$($dev:expr),+]) => {
- // SAFETY: C will not read off the end of this constant since the last element is zero.
- const _DEVICE_TABLE: [$crate::bindings::mdio_device_id;
- $crate::module_phy_driver!(@count_devices $($dev),+) + 1] = [
- $($dev.mdio_device_id()),+,
- $crate::bindings::mdio_device_id {
- phy_id: 0,
- phy_id_mask: 0
- }
- ];
+ const N: usize = $crate::module_phy_driver!(@count_devices $($dev),+);
+
+ const TABLE: $crate::device_id::IdArray<$crate::net::phy::DeviceId, (), N> =
+ $crate::device_id::IdArray::new_without_index([ $(($dev,())),+, ]);
- #[cfg(MODULE)]
- #[no_mangle]
- static __mod_device_table__mdio__phydev: [$crate::bindings::mdio_device_id;
- $crate::module_phy_driver!(@count_devices $($dev),+) + 1] = _DEVICE_TABLE;
+ $crate::module_device_table!("mdio", phydev, TABLE);
};
(drivers: [$($driver:ident),+ $(,)?], device_table: [$($dev:expr),+ $(,)?], $($f:tt)*) => {
diff --git a/rust/kernel/of.rs b/rust/kernel/of.rs
index 04f2d8ef29cb..58b20c367f99 100644
--- a/rust/kernel/of.rs
+++ b/rust/kernel/of.rs
@@ -2,7 +2,11 @@
//! Device Tree / Open Firmware abstractions.
-use crate::{bindings, device_id::RawDeviceId, prelude::*};
+use crate::{
+ bindings,
+ device_id::{RawDeviceId, RawDeviceIdIndex},
+ prelude::*,
+};
/// IdTable type for OF drivers.
pub type IdTable<T> = &'static dyn kernel::device_id::IdTable<DeviceId, T>;
@@ -12,32 +16,33 @@ pub type IdTable<T> = &'static dyn kernel::device_id::IdTable<DeviceId, T>;
#[derive(Clone, Copy)]
pub struct DeviceId(bindings::of_device_id);
-// SAFETY:
-// * `DeviceId` is a `#[repr(transparent)` wrapper of `struct of_device_id` and does not add
-// additional invariants, so it's safe to transmute to `RawType`.
-// * `DRIVER_DATA_OFFSET` is the offset to the `data` field.
+// SAFETY: `DeviceId` is a `#[repr(transparent)]` wrapper of `struct of_device_id` and
+// does not add additional invariants, so it's safe to transmute to `RawType`.
unsafe impl RawDeviceId for DeviceId {
type RawType = bindings::of_device_id;
+}
+// SAFETY: `DRIVER_DATA_OFFSET` is the offset to the `data` field.
+unsafe impl RawDeviceIdIndex for DeviceId {
const DRIVER_DATA_OFFSET: usize = core::mem::offset_of!(bindings::of_device_id, data);
fn index(&self) -> usize {
- self.0.data as _
+ self.0.data as usize
}
}
impl DeviceId {
/// Create a new device id from an OF 'compatible' string.
pub const fn new(compatible: &'static CStr) -> Self {
- let src = compatible.as_bytes_with_nul();
+ let src = compatible.to_bytes_with_nul();
// Replace with `bindings::of_device_id::default()` once stabilized for `const`.
// SAFETY: FFI type is valid to be zero-initialized.
let mut of: bindings::of_device_id = unsafe { core::mem::zeroed() };
- // TODO: Use `clone_from_slice` once the corresponding types do match.
+ // TODO: Use `copy_from_slice` once stabilized for `const`.
let mut i = 0;
while i < src.len() {
- of.compatible[i] = src[i] as _;
+ of.compatible[i] = src[i];
i += 1;
}
diff --git a/rust/kernel/opp.rs b/rust/kernel/opp.rs
index a566fc3e7dcb..2c763fa9276d 100644
--- a/rust/kernel/opp.rs
+++ b/rust/kernel/opp.rs
@@ -12,11 +12,12 @@ use crate::{
clk::Hertz,
cpumask::{Cpumask, CpumaskVar},
device::Device,
- error::{code::*, from_err_ptr, from_result, to_result, Error, Result, VTABLE_DEFAULT_ERROR},
+ error::{code::*, from_err_ptr, from_result, to_result, Result, VTABLE_DEFAULT_ERROR},
ffi::c_ulong,
prelude::*,
str::CString,
- types::{ARef, AlwaysRefCounted, Opaque},
+ sync::aref::{ARef, AlwaysRefCounted},
+ types::Opaque,
};
#[cfg(CONFIG_CPU_FREQ)]
@@ -92,7 +93,7 @@ fn to_c_str_array(names: &[CString]) -> Result<KVec<*const u8>> {
let mut list = KVec::with_capacity(names.len() + 1, GFP_KERNEL)?;
for name in names.iter() {
- list.push(name.as_ptr() as _, GFP_KERNEL)?;
+ list.push(name.as_ptr().cast(), GFP_KERNEL)?;
}
list.push(ptr::null(), GFP_KERNEL)?;
@@ -103,7 +104,7 @@ fn to_c_str_array(names: &[CString]) -> Result<KVec<*const u8>> {
///
/// Represents voltage in microvolts, wrapping a [`c_ulong`] value.
///
-/// ## Examples
+/// # Examples
///
/// ```
/// use kernel::opp::MicroVolt;
@@ -128,7 +129,7 @@ impl From<MicroVolt> for c_ulong {
///
/// Represents power in microwatts, wrapping a [`c_ulong`] value.
///
-/// ## Examples
+/// # Examples
///
/// ```
/// use kernel::opp::MicroWatt;
@@ -153,7 +154,7 @@ impl From<MicroWatt> for c_ulong {
///
/// The associated [`OPP`] is automatically removed when the [`Token`] is dropped.
///
-/// ## Examples
+/// # Examples
///
/// The following example demonstrates how to create an [`OPP`] dynamically.
///
@@ -162,7 +163,7 @@ impl From<MicroWatt> for c_ulong {
/// use kernel::device::Device;
/// use kernel::error::Result;
/// use kernel::opp::{Data, MicroVolt, Token};
-/// use kernel::types::ARef;
+/// use kernel::sync::aref::ARef;
///
/// fn create_opp(dev: &ARef<Device>, freq: Hertz, volt: MicroVolt, level: u32) -> Result<Token> {
/// let data = Data::new(freq, volt, level, false);
@@ -202,7 +203,7 @@ impl Drop for Token {
/// Rust abstraction for the C `struct dev_pm_opp_data`, used to define operating performance
/// points (OPPs) dynamically.
///
-/// ## Examples
+/// # Examples
///
/// The following example demonstrates how to create an [`OPP`] with [`Data`].
///
@@ -211,7 +212,7 @@ impl Drop for Token {
/// use kernel::device::Device;
/// use kernel::error::Result;
/// use kernel::opp::{Data, MicroVolt, Token};
-/// use kernel::types::ARef;
+/// use kernel::sync::aref::ARef;
///
/// fn create_opp(dev: &ARef<Device>, freq: Hertz, volt: MicroVolt, level: u32) -> Result<Token> {
/// let data = Data::new(freq, volt, level, false);
@@ -254,7 +255,7 @@ impl Data {
/// [`OPP`] search options.
///
-/// ## Examples
+/// # Examples
///
/// Defines how to search for an [`OPP`] in a [`Table`] relative to a frequency.
///
@@ -262,7 +263,7 @@ impl Data {
/// use kernel::clk::Hertz;
/// use kernel::error::Result;
/// use kernel::opp::{OPP, SearchType, Table};
-/// use kernel::types::ARef;
+/// use kernel::sync::aref::ARef;
///
/// fn find_opp(table: &Table, freq: Hertz) -> Result<ARef<OPP>> {
/// let opp = table.opp_from_freq(freq, Some(true), None, SearchType::Exact)?;
@@ -326,7 +327,7 @@ impl Drop for ConfigToken {
///
/// Rust abstraction for the C `struct dev_pm_opp_config`.
///
-/// ## Examples
+/// # Examples
///
/// The following example demonstrates how to set OPP property-name configuration for a [`Device`].
///
@@ -335,7 +336,7 @@ impl Drop for ConfigToken {
/// use kernel::error::Result;
/// use kernel::opp::{Config, ConfigOps, ConfigToken};
/// use kernel::str::CString;
-/// use kernel::types::ARef;
+/// use kernel::sync::aref::ARef;
/// use kernel::macros::vtable;
///
/// #[derive(Default)]
@@ -345,7 +346,7 @@ impl Drop for ConfigToken {
/// impl ConfigOps for Driver {}
///
/// fn configure(dev: &ARef<Device>) -> Result<ConfigToken> {
-/// let name = CString::try_from_fmt(fmt!("{}", "slow"))?;
+/// let name = CString::try_from_fmt(fmt!("slow"))?;
///
/// // The OPP configuration is cleared once the [`ConfigToken`] goes out of scope.
/// Config::<Driver>::new()
@@ -500,11 +501,8 @@ impl<T: ConfigOps + Default> Config<T> {
// requirements. The OPP core guarantees not to access fields of [`Config`] after this call
// and so we don't need to save a copy of them for future use.
let ret = unsafe { bindings::dev_pm_opp_set_config(dev.as_raw(), &mut config) };
- if ret < 0 {
- Err(Error::from_errno(ret))
- } else {
- Ok(ConfigToken(ret))
- }
+
+ to_result(ret).map(|()| ConfigToken(ret))
}
/// Config's clk callback.
@@ -514,9 +512,9 @@ impl<T: ConfigOps + Default> Config<T> {
dev: *mut bindings::device,
opp_table: *mut bindings::opp_table,
opp: *mut bindings::dev_pm_opp,
- _data: *mut kernel::ffi::c_void,
+ _data: *mut c_void,
scaling_down: bool,
- ) -> kernel::ffi::c_int {
+ ) -> c_int {
from_result(|| {
// SAFETY: 'dev' is guaranteed by the C code to be valid.
let dev = unsafe { Device::get_device(dev) };
@@ -540,8 +538,8 @@ impl<T: ConfigOps + Default> Config<T> {
old_opp: *mut bindings::dev_pm_opp,
new_opp: *mut bindings::dev_pm_opp,
regulators: *mut *mut bindings::regulator,
- count: kernel::ffi::c_uint,
- ) -> kernel::ffi::c_int {
+ count: c_uint,
+ ) -> c_int {
from_result(|| {
// SAFETY: 'dev' is guaranteed by the C code to be valid.
let dev = unsafe { Device::get_device(dev) };
@@ -569,7 +567,7 @@ impl<T: ConfigOps + Default> Config<T> {
///
/// Instances of this type are reference-counted.
///
-/// ## Examples
+/// # Examples
///
/// The following example demonstrates how to get OPP [`Table`] for a [`Cpumask`] and set its
/// frequency.
@@ -581,7 +579,7 @@ impl<T: ConfigOps + Default> Config<T> {
/// use kernel::device::Device;
/// use kernel::error::Result;
/// use kernel::opp::Table;
-/// use kernel::types::ARef;
+/// use kernel::sync::aref::ARef;
///
/// fn get_table(dev: &ARef<Device>, mask: &mut Cpumask, freq: Hertz) -> Result<Table> {
/// let mut opp_table = Table::from_of_cpumask(dev, mask)?;
@@ -713,11 +711,8 @@ impl Table {
// SAFETY: The requirements are satisfied by the existence of [`Device`] and its safety
// requirements.
let ret = unsafe { bindings::dev_pm_opp_get_opp_count(self.dev.as_raw()) };
- if ret < 0 {
- Err(Error::from_errno(ret))
- } else {
- Ok(ret as u32)
- }
+
+ to_result(ret).map(|()| ret as u32)
}
/// Returns max clock latency (in nanoseconds) of the [`OPP`]s in the [`Table`].
@@ -1011,7 +1006,7 @@ impl Drop for Table {
///
/// A reference to the [`OPP`], &[`OPP`], isn't refcounted by the Rust code.
///
-/// ## Examples
+/// # Examples
///
/// The following example demonstrates how to get [`OPP`] corresponding to a frequency value and
/// configure the device with it.
diff --git a/rust/kernel/page.rs b/rust/kernel/page.rs
index 7c1b17246ed5..432fc0297d4a 100644
--- a/rust/kernel/page.rs
+++ b/rust/kernel/page.rs
@@ -9,7 +9,12 @@ use crate::{
error::Result,
uaccess::UserSliceReader,
};
-use core::ptr::{self, NonNull};
+use core::{
+ marker::PhantomData,
+ mem::ManuallyDrop,
+ ops::Deref,
+ ptr::{self, NonNull},
+};
/// A bitwise shift for the page size.
pub const PAGE_SHIFT: usize = bindings::PAGE_SHIFT as usize;
@@ -30,6 +35,86 @@ pub const fn page_align(addr: usize) -> usize {
(addr + (PAGE_SIZE - 1)) & PAGE_MASK
}
+/// Representation of a non-owning reference to a [`Page`].
+///
+/// This type provides a borrowed version of a [`Page`] that is owned by some other entity, e.g. a
+/// [`Vmalloc`] allocation such as [`VBox`].
+///
+/// # Example
+///
+/// ```
+/// # use kernel::{bindings, prelude::*};
+/// use kernel::page::{BorrowedPage, Page, PAGE_SIZE};
+/// # use core::{mem::MaybeUninit, ptr, ptr::NonNull };
+///
+/// fn borrow_page<'a>(vbox: &'a mut VBox<MaybeUninit<[u8; PAGE_SIZE]>>) -> BorrowedPage<'a> {
+/// let ptr = ptr::from_ref(&**vbox);
+///
+/// // SAFETY: `ptr` is a valid pointer to `Vmalloc` memory.
+/// let page = unsafe { bindings::vmalloc_to_page(ptr.cast()) };
+///
+/// // SAFETY: `vmalloc_to_page` returns a valid pointer to a `struct page` for a valid
+/// // pointer to `Vmalloc` memory.
+/// let page = unsafe { NonNull::new_unchecked(page) };
+///
+/// // SAFETY:
+/// // - `self.0` is a valid pointer to a `struct page`.
+/// // - `self.0` is valid for the entire lifetime of `self`.
+/// unsafe { BorrowedPage::from_raw(page) }
+/// }
+///
+/// let mut vbox = VBox::<[u8; PAGE_SIZE]>::new_uninit(GFP_KERNEL)?;
+/// let page = borrow_page(&mut vbox);
+///
+/// // SAFETY: There is no concurrent read or write to this page.
+/// unsafe { page.fill_zero_raw(0, PAGE_SIZE)? };
+/// # Ok::<(), Error>(())
+/// ```
+///
+/// # Invariants
+///
+/// The borrowed underlying pointer to a `struct page` is valid for the entire lifetime `'a`.
+///
+/// [`VBox`]: kernel::alloc::VBox
+/// [`Vmalloc`]: kernel::alloc::allocator::Vmalloc
+pub struct BorrowedPage<'a>(ManuallyDrop<Page>, PhantomData<&'a Page>);
+
+impl<'a> BorrowedPage<'a> {
+ /// Constructs a [`BorrowedPage`] from a raw pointer to a `struct page`.
+ ///
+ /// # Safety
+ ///
+ /// - `ptr` must point to a valid `bindings::page`.
+ /// - `ptr` must remain valid for the entire lifetime `'a`.
+ pub unsafe fn from_raw(ptr: NonNull<bindings::page>) -> Self {
+ let page = Page { page: ptr };
+
+ // INVARIANT: The safety requirements guarantee that `ptr` is valid for the entire lifetime
+ // `'a`.
+ Self(ManuallyDrop::new(page), PhantomData)
+ }
+}
+
+impl<'a> Deref for BorrowedPage<'a> {
+ type Target = Page;
+
+ fn deref(&self) -> &Self::Target {
+ &self.0
+ }
+}
+
+/// Trait to be implemented by types which provide an [`Iterator`] implementation of
+/// [`BorrowedPage`] items, such as [`VmallocPageIter`](kernel::alloc::allocator::VmallocPageIter).
+pub trait AsPageIter {
+ /// The [`Iterator`] type, e.g. [`VmallocPageIter`](kernel::alloc::allocator::VmallocPageIter).
+ type Iter<'a>: Iterator<Item = BorrowedPage<'a>>
+ where
+ Self: 'a;
+
+ /// Returns an [`Iterator`] of [`BorrowedPage`] items over all pages owned by `self`.
+ fn page_iter(&mut self) -> Self::Iter<'_>;
+}
+
/// A pointer to a page that owns the page allocation.
///
/// # Invariants
@@ -85,6 +170,12 @@ impl Page {
self.page.as_ptr()
}
+ /// Get the node id containing this page.
+ pub fn nid(&self) -> i32 {
+ // SAFETY: Always safe to call with a valid page.
+ unsafe { bindings::page_to_nid(self.as_ptr()) }
+ }
+
/// Runs a piece of code with this page mapped to an address.
///
/// The page is unmapped when this call returns.
diff --git a/rust/kernel/pci.rs b/rust/kernel/pci.rs
index 8435f8132e38..7fcc5f6022c1 100644
--- a/rust/kernel/pci.rs
+++ b/rust/kernel/pci.rs
@@ -5,16 +5,16 @@
//! C header: [`include/linux/pci.h`](srctree/include/linux/pci.h)
use crate::{
- alloc::flags::*,
bindings, container_of, device,
- device_id::RawDeviceId,
+ device_id::{RawDeviceId, RawDeviceIdIndex},
devres::Devres,
driver,
- error::{to_result, Result},
- io::Io,
- io::IoRaw,
+ error::{from_result, to_result, Result},
+ io::{Io, IoRaw},
+ irq::{self, IrqRequest},
str::CStr,
- types::{ARef, ForeignOwnable, Opaque},
+ sync::aref::ARef,
+ types::Opaque,
ThisModule,
};
use core::{
@@ -24,6 +24,10 @@ use core::{
};
use kernel::prelude::*;
+mod id;
+
+pub use self::id::{Class, ClassMask, Vendor};
+
/// An adapter for the registration of PCI drivers.
pub struct Adapter<T: Driver>(T);
@@ -61,46 +65,45 @@ impl<T: Driver + 'static> Adapter<T> {
extern "C" fn probe_callback(
pdev: *mut bindings::pci_dev,
id: *const bindings::pci_device_id,
- ) -> kernel::ffi::c_int {
+ ) -> c_int {
// SAFETY: The PCI bus only ever calls the probe callback with a valid pointer to a
// `struct pci_dev`.
//
// INVARIANT: `pdev` is valid for the duration of `probe_callback()`.
- let pdev = unsafe { &*pdev.cast::<Device<device::Core>>() };
+ let pdev = unsafe { &*pdev.cast::<Device<device::CoreInternal>>() };
- // SAFETY: `DeviceId` is a `#[repr(transparent)` wrapper of `struct pci_device_id` and
+ // SAFETY: `DeviceId` is a `#[repr(transparent)]` wrapper of `struct pci_device_id` and
// does not add additional invariants, so it's safe to transmute.
let id = unsafe { &*id.cast::<DeviceId>() };
let info = T::ID_TABLE.info(id.index());
- match T::probe(pdev, info) {
- Ok(data) => {
- // Let the `struct pci_dev` own a reference of the driver's private data.
- // SAFETY: By the type invariant `pdev.as_raw` returns a valid pointer to a
- // `struct pci_dev`.
- unsafe { bindings::pci_set_drvdata(pdev.as_raw(), data.into_foreign() as _) };
- }
- Err(err) => return Error::to_errno(err),
- }
+ from_result(|| {
+ let data = T::probe(pdev, info)?;
- 0
+ pdev.as_ref().set_drvdata(data);
+ Ok(0)
+ })
}
extern "C" fn remove_callback(pdev: *mut bindings::pci_dev) {
// SAFETY: The PCI bus only ever calls the remove callback with a valid pointer to a
// `struct pci_dev`.
- let ptr = unsafe { bindings::pci_get_drvdata(pdev) }.cast();
+ //
+ // INVARIANT: `pdev` is valid for the duration of `remove_callback()`.
+ let pdev = unsafe { &*pdev.cast::<Device<device::CoreInternal>>() };
// SAFETY: `remove_callback` is only ever called after a successful call to
- // `probe_callback`, hence it's guaranteed that `ptr` points to a valid and initialized
- // `KBox<T>` pointer created through `KBox::into_foreign`.
- let _ = unsafe { KBox::<T>::from_foreign(ptr) };
+ // `probe_callback`, hence it's guaranteed that `Device::set_drvdata()` has been called
+ // and stored a `Pin<KBox<T>>`.
+ let data = unsafe { pdev.as_ref().drvdata_obtain::<Pin<KBox<T>>>() };
+
+ T::unbind(pdev, data.as_ref());
}
}
/// Declares a kernel module that exposes a single PCI driver.
///
-/// # Example
+/// # Examples
///
///```ignore
/// kernel::module_pci_driver! {
@@ -130,10 +133,11 @@ impl DeviceId {
/// Equivalent to C's `PCI_DEVICE` macro.
///
- /// Create a new `pci::DeviceId` from a vendor and device ID number.
- pub const fn from_id(vendor: u32, device: u32) -> Self {
+ /// Create a new `pci::DeviceId` from a vendor and device ID.
+ #[inline]
+ pub const fn from_id(vendor: Vendor, device: u32) -> Self {
Self(bindings::pci_device_id {
- vendor,
+ vendor: vendor.as_raw() as u32,
device,
subvendor: DeviceId::PCI_ANY_ID,
subdevice: DeviceId::PCI_ANY_ID,
@@ -147,6 +151,7 @@ impl DeviceId {
/// Equivalent to C's `PCI_DEVICE_CLASS` macro.
///
/// Create a new `pci::DeviceId` from a class number and mask.
+ #[inline]
pub const fn from_class(class: u32, class_mask: u32) -> Self {
Self(bindings::pci_device_id {
vendor: DeviceId::PCI_ANY_ID,
@@ -159,19 +164,43 @@ impl DeviceId {
override_only: 0,
})
}
+
+ /// Create a new [`DeviceId`] from a class number, mask, and specific vendor.
+ ///
+ /// This is more targeted than [`DeviceId::from_class`]: in addition to matching by [`Vendor`],
+ /// it also matches the PCI [`Class`] (up to the entire 24 bits, depending on the
+ /// [`ClassMask`]).
+ #[inline]
+ pub const fn from_class_and_vendor(
+ class: Class,
+ class_mask: ClassMask,
+ vendor: Vendor,
+ ) -> Self {
+ Self(bindings::pci_device_id {
+ vendor: vendor.as_raw() as u32,
+ device: DeviceId::PCI_ANY_ID,
+ subvendor: DeviceId::PCI_ANY_ID,
+ subdevice: DeviceId::PCI_ANY_ID,
+ class: class.as_raw(),
+ class_mask: class_mask.as_raw(),
+ driver_data: 0,
+ override_only: 0,
+ })
+ }
}
-// SAFETY:
-// * `DeviceId` is a `#[repr(transparent)` wrapper of `pci_device_id` and does not add
-// additional invariants, so it's safe to transmute to `RawType`.
-// * `DRIVER_DATA_OFFSET` is the offset to the `driver_data` field.
+// SAFETY: `DeviceId` is a `#[repr(transparent)]` wrapper of `pci_device_id` and does not add
+// additional invariants, so it's safe to transmute to `RawType`.
unsafe impl RawDeviceId for DeviceId {
type RawType = bindings::pci_device_id;
+}
+// SAFETY: `DRIVER_DATA_OFFSET` is the offset to the `driver_data` field.
+unsafe impl RawDeviceIdIndex for DeviceId {
const DRIVER_DATA_OFFSET: usize = core::mem::offset_of!(bindings::pci_device_id, driver_data);
fn index(&self) -> usize {
- self.0.driver_data as _
+ self.0.driver_data
}
}
@@ -194,7 +223,7 @@ macro_rules! pci_device_table {
/// The PCI driver trait.
///
-/// # Example
+/// # Examples
///
///```
/// # use kernel::{bindings, device::Core, pci};
@@ -206,7 +235,10 @@ macro_rules! pci_device_table {
/// MODULE_PCI_TABLE,
/// <MyDriver as pci::Driver>::IdInfo,
/// [
-/// (pci::DeviceId::from_id(bindings::PCI_VENDOR_ID_REDHAT, bindings::PCI_ANY_ID as _), ())
+/// (
+/// pci::DeviceId::from_id(pci::Vendor::REDHAT, bindings::PCI_ANY_ID as u32),
+/// (),
+/// )
/// ]
/// );
///
@@ -238,9 +270,23 @@ pub trait Driver: Send {
/// PCI driver probe.
///
- /// Called when a new platform device is added or discovered.
- /// Implementers should attempt to initialize the device here.
+ /// Called when a new pci device is added or discovered. Implementers should
+ /// attempt to initialize the device here.
fn probe(dev: &Device<device::Core>, id_info: &Self::IdInfo) -> Result<Pin<KBox<Self>>>;
+
+ /// PCI driver unbind.
+ ///
+ /// Called when a [`Device`] is unbound from its bound [`Driver`]. Implementing this callback
+ /// is optional.
+ ///
+ /// This callback serves as a place for drivers to perform teardown operations that require a
+ /// `&Device<Core>` or `&Device<Bound>` reference. For instance, drivers may try to perform I/O
+ /// operations to gracefully tear down the device.
+ ///
+ /// Otherwise, release operations for driver resources should be performed in `Self::drop`.
+ fn unbind(dev: &Device<device::Core>, this: Pin<&Self>) {
+ let _ = (dev, this);
+ }
}
/// The PCI device representation.
@@ -251,7 +297,8 @@ pub trait Driver: Send {
///
/// # Invariants
///
-/// A [`Device`] instance represents a valid `struct device` created by the C portion of the kernel.
+/// A [`Device`] instance represents a valid `struct pci_dev` created by the C portion of the
+/// kernel.
#[repr(transparent)]
pub struct Device<Ctx: device::DeviceContext = device::Normal>(
Opaque<bindings::pci_dev>,
@@ -330,7 +377,7 @@ impl<const SIZE: usize> Bar<SIZE> {
// `ioptr` is valid by the safety requirements.
// `num` is valid by the safety requirements.
unsafe {
- bindings::pci_iounmap(pdev.as_raw(), ioptr as _);
+ bindings::pci_iounmap(pdev.as_raw(), ioptr as *mut c_void);
bindings::pci_release_region(pdev.as_raw(), num);
}
}
@@ -342,6 +389,7 @@ impl<const SIZE: usize> Bar<SIZE> {
}
impl Bar {
+ #[inline]
fn index_is_valid(index: u32) -> bool {
// A `struct pci_dev` owns an array of resources with at most `PCI_NUM_RESOURCES` entries.
index < bindings::PCI_NUM_RESOURCES
@@ -364,24 +412,90 @@ impl<const SIZE: usize> Deref for Bar<SIZE> {
}
impl<Ctx: device::DeviceContext> Device<Ctx> {
+ #[inline]
fn as_raw(&self) -> *mut bindings::pci_dev {
self.0.get()
}
}
impl Device {
- /// Returns the PCI vendor ID.
- pub fn vendor_id(&self) -> u16 {
+ /// Returns the PCI vendor ID as [`Vendor`].
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// # use kernel::{device::Core, pci::{self, Vendor}, prelude::*};
+ /// fn log_device_info(pdev: &pci::Device<Core>) -> Result {
+ /// // Get an instance of `Vendor`.
+ /// let vendor = pdev.vendor_id();
+ /// dev_info!(
+ /// pdev.as_ref(),
+ /// "Device: Vendor={}, Device=0x{:x}\n",
+ /// vendor,
+ /// pdev.device_id()
+ /// );
+ /// Ok(())
+ /// }
+ /// ```
+ #[inline]
+ pub fn vendor_id(&self) -> Vendor {
// SAFETY: `self.as_raw` is a valid pointer to a `struct pci_dev`.
- unsafe { (*self.as_raw()).vendor }
+ let vendor_id = unsafe { (*self.as_raw()).vendor };
+ Vendor::from_raw(vendor_id)
}
/// Returns the PCI device ID.
+ #[inline]
pub fn device_id(&self) -> u16 {
- // SAFETY: `self.as_raw` is a valid pointer to a `struct pci_dev`.
+ // SAFETY: By its type invariant `self.as_raw` is always a valid pointer to a
+ // `struct pci_dev`.
unsafe { (*self.as_raw()).device }
}
+ /// Returns the PCI revision ID.
+ #[inline]
+ pub fn revision_id(&self) -> u8 {
+ // SAFETY: By its type invariant `self.as_raw` is always a valid pointer to a
+ // `struct pci_dev`.
+ unsafe { (*self.as_raw()).revision }
+ }
+
+ /// Returns the PCI bus device/function.
+ #[inline]
+ pub fn dev_id(&self) -> u16 {
+ // SAFETY: By its type invariant `self.as_raw` is always a valid pointer to a
+ // `struct pci_dev`.
+ unsafe { bindings::pci_dev_id(self.as_raw()) }
+ }
+
+ /// Returns the PCI subsystem vendor ID.
+ #[inline]
+ pub fn subsystem_vendor_id(&self) -> u16 {
+ // SAFETY: By its type invariant `self.as_raw` is always a valid pointer to a
+ // `struct pci_dev`.
+ unsafe { (*self.as_raw()).subsystem_vendor }
+ }
+
+ /// Returns the PCI subsystem device ID.
+ #[inline]
+ pub fn subsystem_device_id(&self) -> u16 {
+ // SAFETY: By its type invariant `self.as_raw` is always a valid pointer to a
+ // `struct pci_dev`.
+ unsafe { (*self.as_raw()).subsystem_device }
+ }
+
+ /// Returns the start of the given PCI bar resource.
+ pub fn resource_start(&self, bar: u32) -> Result<bindings::resource_size_t> {
+ if !Bar::index_is_valid(bar) {
+ return Err(EINVAL);
+ }
+
+ // SAFETY:
+ // - `bar` is a valid bar number, as guaranteed by the above call to `Bar::index_is_valid`,
+ // - by its type invariant `self.as_raw` is always a valid pointer to a `struct pci_dev`.
+ Ok(unsafe { bindings::pci_resource_start(self.as_raw(), bar.try_into()?) })
+ }
+
/// Returns the size of the given PCI bar resource.
pub fn resource_len(&self, bar: u32) -> Result<bindings::resource_size_t> {
if !Bar::index_is_valid(bar) {
@@ -393,26 +507,75 @@ impl Device {
// - by its type invariant `self.as_raw` is always a valid pointer to a `struct pci_dev`.
Ok(unsafe { bindings::pci_resource_len(self.as_raw(), bar.try_into()?) })
}
+
+ /// Returns the PCI class as a `Class` struct.
+ #[inline]
+ pub fn pci_class(&self) -> Class {
+ // SAFETY: `self.as_raw` is a valid pointer to a `struct pci_dev`.
+ Class::from_raw(unsafe { (*self.as_raw()).class })
+ }
}
impl Device<device::Bound> {
/// Mapps an entire PCI-BAR after performing a region-request on it. I/O operation bound checks
/// can be performed on compile time for offsets (plus the requested type size) < SIZE.
- pub fn iomap_region_sized<const SIZE: usize>(
- &self,
+ pub fn iomap_region_sized<'a, const SIZE: usize>(
+ &'a self,
bar: u32,
- name: &CStr,
- ) -> Result<Devres<Bar<SIZE>>> {
- let bar = Bar::<SIZE>::new(self, bar, name)?;
- let devres = Devres::new(self.as_ref(), bar, GFP_KERNEL)?;
-
- Ok(devres)
+ name: &'a CStr,
+ ) -> impl PinInit<Devres<Bar<SIZE>>, Error> + 'a {
+ Devres::new(self.as_ref(), Bar::<SIZE>::new(self, bar, name))
}
/// Mapps an entire PCI-BAR after performing a region-request on it.
- pub fn iomap_region(&self, bar: u32, name: &CStr) -> Result<Devres<Bar>> {
+ pub fn iomap_region<'a>(
+ &'a self,
+ bar: u32,
+ name: &'a CStr,
+ ) -> impl PinInit<Devres<Bar>, Error> + 'a {
self.iomap_region_sized::<0>(bar, name)
}
+
+ /// Returns an [`IrqRequest`] for the IRQ vector at the given index, if any.
+ pub fn irq_vector(&self, index: u32) -> Result<IrqRequest<'_>> {
+ // SAFETY: `self.as_raw` returns a valid pointer to a `struct pci_dev`.
+ let irq = unsafe { crate::bindings::pci_irq_vector(self.as_raw(), index) };
+ if irq < 0 {
+ return Err(crate::error::Error::from_errno(irq));
+ }
+ // SAFETY: `irq` is guaranteed to be a valid IRQ number for `&self`.
+ Ok(unsafe { IrqRequest::new(self.as_ref(), irq as u32) })
+ }
+
+ /// Returns a [`kernel::irq::Registration`] for the IRQ vector at the given
+ /// index.
+ pub fn request_irq<'a, T: crate::irq::Handler + 'static>(
+ &'a self,
+ index: u32,
+ flags: irq::Flags,
+ name: &'static CStr,
+ handler: impl PinInit<T, Error> + 'a,
+ ) -> Result<impl PinInit<irq::Registration<T>, Error> + 'a> {
+ let request = self.irq_vector(index)?;
+
+ Ok(irq::Registration::<T>::new(request, flags, name, handler))
+ }
+
+ /// Returns a [`kernel::irq::ThreadedRegistration`] for the IRQ vector at
+ /// the given index.
+ pub fn request_threaded_irq<'a, T: crate::irq::ThreadedHandler + 'static>(
+ &'a self,
+ index: u32,
+ flags: irq::Flags,
+ name: &'static CStr,
+ handler: impl PinInit<T, Error> + 'a,
+ ) -> Result<impl PinInit<irq::ThreadedRegistration<T>, Error> + 'a> {
+ let request = self.irq_vector(index)?;
+
+ Ok(irq::ThreadedRegistration::<T>::new(
+ request, flags, name, handler,
+ ))
+ }
}
impl Device<device::Core> {
@@ -423,6 +586,7 @@ impl Device<device::Core> {
}
/// Enable bus-mastering for this device.
+ #[inline]
pub fn set_master(&self) {
// SAFETY: `self.as_raw` is guaranteed to be a pointer to a valid `struct pci_dev`.
unsafe { bindings::pci_set_master(self.as_raw()) };
@@ -434,8 +598,10 @@ impl Device<device::Core> {
kernel::impl_device_context_deref!(unsafe { Device });
kernel::impl_device_context_into_aref!(Device);
+impl crate::dma::Device for Device<device::Core> {}
+
// SAFETY: Instances of `Device` are always reference-counted.
-unsafe impl crate::types::AlwaysRefCounted for Device {
+unsafe impl crate::sync::aref::AlwaysRefCounted for Device {
fn inc_ref(&self) {
// SAFETY: The existence of a shared reference guarantees that the refcount is non-zero.
unsafe { bindings::pci_dev_get(self.as_raw()) };
@@ -454,7 +620,7 @@ impl<Ctx: device::DeviceContext> AsRef<device::Device<Ctx>> for Device<Ctx> {
let dev = unsafe { addr_of_mut!((*self.as_raw()).dev) };
// SAFETY: `dev` points to a valid `struct device`.
- unsafe { device::Device::as_ref(dev) }
+ unsafe { device::Device::from_raw(dev) }
}
}
diff --git a/rust/kernel/pci/id.rs b/rust/kernel/pci/id.rs
new file mode 100644
index 000000000000..7f2a7f57507f
--- /dev/null
+++ b/rust/kernel/pci/id.rs
@@ -0,0 +1,578 @@
+// SPDX-License-Identifier: GPL-2.0
+
+//! PCI device identifiers and related types.
+//!
+//! This module contains PCI class codes, Vendor IDs, and supporting types.
+
+use crate::{bindings, error::code::EINVAL, error::Error, prelude::*};
+use core::fmt;
+
+/// PCI device class codes.
+///
+/// Each entry contains the full 24-bit PCI class code (base class in bits
+/// 23-16, subclass in bits 15-8, programming interface in bits 7-0).
+///
+/// # Examples
+///
+/// ```
+/// # use kernel::{device::Core, pci::{self, Class}, prelude::*};
+/// fn probe_device(pdev: &pci::Device<Core>) -> Result {
+/// let pci_class = pdev.pci_class();
+/// dev_info!(
+/// pdev.as_ref(),
+/// "Detected PCI class: {}\n",
+/// pci_class
+/// );
+/// Ok(())
+/// }
+/// ```
+#[derive(Clone, Copy, PartialEq, Eq)]
+#[repr(transparent)]
+pub struct Class(u32);
+
+/// PCI class mask constants for matching [`Class`] codes.
+#[repr(u32)]
+#[derive(Debug, Clone, Copy, PartialEq, Eq)]
+pub enum ClassMask {
+ /// Match the full 24-bit class code.
+ Full = 0xffffff,
+ /// Match the upper 16 bits of the class code (base class and subclass only)
+ ClassSubclass = 0xffff00,
+}
+
+macro_rules! define_all_pci_classes {
+ (
+ $($variant:ident = $binding:expr,)+
+ ) => {
+ impl Class {
+ $(
+ #[allow(missing_docs)]
+ pub const $variant: Self = Self(Self::to_24bit_class($binding));
+ )+
+ }
+
+ impl fmt::Display for Class {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ match self {
+ $(
+ &Self::$variant => write!(f, stringify!($variant)),
+ )+
+ _ => <Self as fmt::Debug>::fmt(self, f),
+ }
+ }
+ }
+ };
+}
+
+/// Once constructed, a [`Class`] contains a valid PCI class code.
+impl Class {
+ /// Create a [`Class`] from a raw 24-bit class code.
+ #[inline]
+ pub(super) fn from_raw(class_code: u32) -> Self {
+ Self(class_code)
+ }
+
+ /// Get the raw 24-bit class code value.
+ #[inline]
+ pub const fn as_raw(self) -> u32 {
+ self.0
+ }
+
+ // Converts a PCI class constant to 24-bit format.
+ //
+ // Many device drivers use only the upper 16 bits (base class and subclass),
+ // but some use the full 24 bits. In order to support both cases, store the
+ // class code as a 24-bit value, where 16-bit values are shifted up 8 bits.
+ const fn to_24bit_class(val: u32) -> u32 {
+ if val > 0xFFFF {
+ val
+ } else {
+ val << 8
+ }
+ }
+}
+
+impl fmt::Debug for Class {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ write!(f, "0x{:06x}", self.0)
+ }
+}
+
+impl ClassMask {
+ /// Get the raw mask value.
+ #[inline]
+ pub const fn as_raw(self) -> u32 {
+ self as u32
+ }
+}
+
+impl TryFrom<u32> for ClassMask {
+ type Error = Error;
+
+ fn try_from(value: u32) -> Result<Self, Self::Error> {
+ match value {
+ 0xffffff => Ok(ClassMask::Full),
+ 0xffff00 => Ok(ClassMask::ClassSubclass),
+ _ => Err(EINVAL),
+ }
+ }
+}
+
+/// PCI vendor IDs.
+///
+/// Each entry contains the 16-bit PCI vendor ID as assigned by the PCI SIG.
+#[derive(Clone, Copy, PartialEq, Eq)]
+#[repr(transparent)]
+pub struct Vendor(u16);
+
+macro_rules! define_all_pci_vendors {
+ (
+ $($variant:ident = $binding:expr,)+
+ ) => {
+ impl Vendor {
+ $(
+ #[allow(missing_docs)]
+ pub const $variant: Self = Self($binding as u16);
+ )+
+ }
+
+ impl fmt::Display for Vendor {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ match self {
+ $(
+ &Self::$variant => write!(f, stringify!($variant)),
+ )+
+ _ => <Self as fmt::Debug>::fmt(self, f),
+ }
+ }
+ }
+ };
+}
+
+/// Once constructed, a `Vendor` contains a valid PCI Vendor ID.
+impl Vendor {
+ /// Create a Vendor from a raw 16-bit vendor ID.
+ #[inline]
+ pub(super) fn from_raw(vendor_id: u16) -> Self {
+ Self(vendor_id)
+ }
+
+ /// Get the raw 16-bit vendor ID value.
+ #[inline]
+ pub const fn as_raw(self) -> u16 {
+ self.0
+ }
+}
+
+impl fmt::Debug for Vendor {
+ #[inline]
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ write!(f, "0x{:04x}", self.0)
+ }
+}
+
+define_all_pci_classes! {
+ NOT_DEFINED = bindings::PCI_CLASS_NOT_DEFINED, // 0x000000
+ NOT_DEFINED_VGA = bindings::PCI_CLASS_NOT_DEFINED_VGA, // 0x000100
+
+ STORAGE_SCSI = bindings::PCI_CLASS_STORAGE_SCSI, // 0x010000
+ STORAGE_IDE = bindings::PCI_CLASS_STORAGE_IDE, // 0x010100
+ STORAGE_FLOPPY = bindings::PCI_CLASS_STORAGE_FLOPPY, // 0x010200
+ STORAGE_IPI = bindings::PCI_CLASS_STORAGE_IPI, // 0x010300
+ STORAGE_RAID = bindings::PCI_CLASS_STORAGE_RAID, // 0x010400
+ STORAGE_SATA = bindings::PCI_CLASS_STORAGE_SATA, // 0x010600
+ STORAGE_SATA_AHCI = bindings::PCI_CLASS_STORAGE_SATA_AHCI, // 0x010601
+ STORAGE_SAS = bindings::PCI_CLASS_STORAGE_SAS, // 0x010700
+ STORAGE_EXPRESS = bindings::PCI_CLASS_STORAGE_EXPRESS, // 0x010802
+ STORAGE_OTHER = bindings::PCI_CLASS_STORAGE_OTHER, // 0x018000
+
+ NETWORK_ETHERNET = bindings::PCI_CLASS_NETWORK_ETHERNET, // 0x020000
+ NETWORK_TOKEN_RING = bindings::PCI_CLASS_NETWORK_TOKEN_RING, // 0x020100
+ NETWORK_FDDI = bindings::PCI_CLASS_NETWORK_FDDI, // 0x020200
+ NETWORK_ATM = bindings::PCI_CLASS_NETWORK_ATM, // 0x020300
+ NETWORK_OTHER = bindings::PCI_CLASS_NETWORK_OTHER, // 0x028000
+
+ DISPLAY_VGA = bindings::PCI_CLASS_DISPLAY_VGA, // 0x030000
+ DISPLAY_XGA = bindings::PCI_CLASS_DISPLAY_XGA, // 0x030100
+ DISPLAY_3D = bindings::PCI_CLASS_DISPLAY_3D, // 0x030200
+ DISPLAY_OTHER = bindings::PCI_CLASS_DISPLAY_OTHER, // 0x038000
+
+ MULTIMEDIA_VIDEO = bindings::PCI_CLASS_MULTIMEDIA_VIDEO, // 0x040000
+ MULTIMEDIA_AUDIO = bindings::PCI_CLASS_MULTIMEDIA_AUDIO, // 0x040100
+ MULTIMEDIA_PHONE = bindings::PCI_CLASS_MULTIMEDIA_PHONE, // 0x040200
+ MULTIMEDIA_HD_AUDIO = bindings::PCI_CLASS_MULTIMEDIA_HD_AUDIO, // 0x040300
+ MULTIMEDIA_OTHER = bindings::PCI_CLASS_MULTIMEDIA_OTHER, // 0x048000
+
+ MEMORY_RAM = bindings::PCI_CLASS_MEMORY_RAM, // 0x050000
+ MEMORY_FLASH = bindings::PCI_CLASS_MEMORY_FLASH, // 0x050100
+ MEMORY_CXL = bindings::PCI_CLASS_MEMORY_CXL, // 0x050200
+ MEMORY_OTHER = bindings::PCI_CLASS_MEMORY_OTHER, // 0x058000
+
+ BRIDGE_HOST = bindings::PCI_CLASS_BRIDGE_HOST, // 0x060000
+ BRIDGE_ISA = bindings::PCI_CLASS_BRIDGE_ISA, // 0x060100
+ BRIDGE_EISA = bindings::PCI_CLASS_BRIDGE_EISA, // 0x060200
+ BRIDGE_MC = bindings::PCI_CLASS_BRIDGE_MC, // 0x060300
+ BRIDGE_PCI_NORMAL = bindings::PCI_CLASS_BRIDGE_PCI_NORMAL, // 0x060400
+ BRIDGE_PCI_SUBTRACTIVE = bindings::PCI_CLASS_BRIDGE_PCI_SUBTRACTIVE, // 0x060401
+ BRIDGE_PCMCIA = bindings::PCI_CLASS_BRIDGE_PCMCIA, // 0x060500
+ BRIDGE_NUBUS = bindings::PCI_CLASS_BRIDGE_NUBUS, // 0x060600
+ BRIDGE_CARDBUS = bindings::PCI_CLASS_BRIDGE_CARDBUS, // 0x060700
+ BRIDGE_RACEWAY = bindings::PCI_CLASS_BRIDGE_RACEWAY, // 0x060800
+ BRIDGE_OTHER = bindings::PCI_CLASS_BRIDGE_OTHER, // 0x068000
+
+ COMMUNICATION_SERIAL = bindings::PCI_CLASS_COMMUNICATION_SERIAL, // 0x070000
+ COMMUNICATION_PARALLEL = bindings::PCI_CLASS_COMMUNICATION_PARALLEL, // 0x070100
+ COMMUNICATION_MULTISERIAL = bindings::PCI_CLASS_COMMUNICATION_MULTISERIAL, // 0x070200
+ COMMUNICATION_MODEM = bindings::PCI_CLASS_COMMUNICATION_MODEM, // 0x070300
+ COMMUNICATION_OTHER = bindings::PCI_CLASS_COMMUNICATION_OTHER, // 0x078000
+
+ SYSTEM_PIC = bindings::PCI_CLASS_SYSTEM_PIC, // 0x080000
+ SYSTEM_PIC_IOAPIC = bindings::PCI_CLASS_SYSTEM_PIC_IOAPIC, // 0x080010
+ SYSTEM_PIC_IOXAPIC = bindings::PCI_CLASS_SYSTEM_PIC_IOXAPIC, // 0x080020
+ SYSTEM_DMA = bindings::PCI_CLASS_SYSTEM_DMA, // 0x080100
+ SYSTEM_TIMER = bindings::PCI_CLASS_SYSTEM_TIMER, // 0x080200
+ SYSTEM_RTC = bindings::PCI_CLASS_SYSTEM_RTC, // 0x080300
+ SYSTEM_PCI_HOTPLUG = bindings::PCI_CLASS_SYSTEM_PCI_HOTPLUG, // 0x080400
+ SYSTEM_SDHCI = bindings::PCI_CLASS_SYSTEM_SDHCI, // 0x080500
+ SYSTEM_RCEC = bindings::PCI_CLASS_SYSTEM_RCEC, // 0x080700
+ SYSTEM_OTHER = bindings::PCI_CLASS_SYSTEM_OTHER, // 0x088000
+
+ INPUT_KEYBOARD = bindings::PCI_CLASS_INPUT_KEYBOARD, // 0x090000
+ INPUT_PEN = bindings::PCI_CLASS_INPUT_PEN, // 0x090100
+ INPUT_MOUSE = bindings::PCI_CLASS_INPUT_MOUSE, // 0x090200
+ INPUT_SCANNER = bindings::PCI_CLASS_INPUT_SCANNER, // 0x090300
+ INPUT_GAMEPORT = bindings::PCI_CLASS_INPUT_GAMEPORT, // 0x090400
+ INPUT_OTHER = bindings::PCI_CLASS_INPUT_OTHER, // 0x098000
+
+ DOCKING_GENERIC = bindings::PCI_CLASS_DOCKING_GENERIC, // 0x0a0000
+ DOCKING_OTHER = bindings::PCI_CLASS_DOCKING_OTHER, // 0x0a8000
+
+ PROCESSOR_386 = bindings::PCI_CLASS_PROCESSOR_386, // 0x0b0000
+ PROCESSOR_486 = bindings::PCI_CLASS_PROCESSOR_486, // 0x0b0100
+ PROCESSOR_PENTIUM = bindings::PCI_CLASS_PROCESSOR_PENTIUM, // 0x0b0200
+ PROCESSOR_ALPHA = bindings::PCI_CLASS_PROCESSOR_ALPHA, // 0x0b1000
+ PROCESSOR_POWERPC = bindings::PCI_CLASS_PROCESSOR_POWERPC, // 0x0b2000
+ PROCESSOR_MIPS = bindings::PCI_CLASS_PROCESSOR_MIPS, // 0x0b3000
+ PROCESSOR_CO = bindings::PCI_CLASS_PROCESSOR_CO, // 0x0b4000
+
+ SERIAL_FIREWIRE = bindings::PCI_CLASS_SERIAL_FIREWIRE, // 0x0c0000
+ SERIAL_FIREWIRE_OHCI = bindings::PCI_CLASS_SERIAL_FIREWIRE_OHCI, // 0x0c0010
+ SERIAL_ACCESS = bindings::PCI_CLASS_SERIAL_ACCESS, // 0x0c0100
+ SERIAL_SSA = bindings::PCI_CLASS_SERIAL_SSA, // 0x0c0200
+ SERIAL_USB_UHCI = bindings::PCI_CLASS_SERIAL_USB_UHCI, // 0x0c0300
+ SERIAL_USB_OHCI = bindings::PCI_CLASS_SERIAL_USB_OHCI, // 0x0c0310
+ SERIAL_USB_EHCI = bindings::PCI_CLASS_SERIAL_USB_EHCI, // 0x0c0320
+ SERIAL_USB_XHCI = bindings::PCI_CLASS_SERIAL_USB_XHCI, // 0x0c0330
+ SERIAL_USB_CDNS = bindings::PCI_CLASS_SERIAL_USB_CDNS, // 0x0c0380
+ SERIAL_USB_DEVICE = bindings::PCI_CLASS_SERIAL_USB_DEVICE, // 0x0c03fe
+ SERIAL_FIBER = bindings::PCI_CLASS_SERIAL_FIBER, // 0x0c0400
+ SERIAL_SMBUS = bindings::PCI_CLASS_SERIAL_SMBUS, // 0x0c0500
+ SERIAL_IPMI_SMIC = bindings::PCI_CLASS_SERIAL_IPMI_SMIC, // 0x0c0700
+ SERIAL_IPMI_KCS = bindings::PCI_CLASS_SERIAL_IPMI_KCS, // 0x0c0701
+ SERIAL_IPMI_BT = bindings::PCI_CLASS_SERIAL_IPMI_BT, // 0x0c0702
+
+ WIRELESS_RF_CONTROLLER = bindings::PCI_CLASS_WIRELESS_RF_CONTROLLER, // 0x0d1000
+ WIRELESS_WHCI = bindings::PCI_CLASS_WIRELESS_WHCI, // 0x0d1010
+
+ INTELLIGENT_I2O = bindings::PCI_CLASS_INTELLIGENT_I2O, // 0x0e0000
+
+ SATELLITE_TV = bindings::PCI_CLASS_SATELLITE_TV, // 0x0f0000
+ SATELLITE_AUDIO = bindings::PCI_CLASS_SATELLITE_AUDIO, // 0x0f0100
+ SATELLITE_VOICE = bindings::PCI_CLASS_SATELLITE_VOICE, // 0x0f0300
+ SATELLITE_DATA = bindings::PCI_CLASS_SATELLITE_DATA, // 0x0f0400
+
+ CRYPT_NETWORK = bindings::PCI_CLASS_CRYPT_NETWORK, // 0x100000
+ CRYPT_ENTERTAINMENT = bindings::PCI_CLASS_CRYPT_ENTERTAINMENT, // 0x100100
+ CRYPT_OTHER = bindings::PCI_CLASS_CRYPT_OTHER, // 0x108000
+
+ SP_DPIO = bindings::PCI_CLASS_SP_DPIO, // 0x110000
+ SP_OTHER = bindings::PCI_CLASS_SP_OTHER, // 0x118000
+
+ ACCELERATOR_PROCESSING = bindings::PCI_CLASS_ACCELERATOR_PROCESSING, // 0x120000
+
+ OTHERS = bindings::PCI_CLASS_OTHERS, // 0xff0000
+}
+
+define_all_pci_vendors! {
+ PCI_SIG = bindings::PCI_VENDOR_ID_PCI_SIG, // 0x0001
+ LOONGSON = bindings::PCI_VENDOR_ID_LOONGSON, // 0x0014
+ SOLIDIGM = bindings::PCI_VENDOR_ID_SOLIDIGM, // 0x025e
+ TTTECH = bindings::PCI_VENDOR_ID_TTTECH, // 0x0357
+ DYNALINK = bindings::PCI_VENDOR_ID_DYNALINK, // 0x0675
+ UBIQUITI = bindings::PCI_VENDOR_ID_UBIQUITI, // 0x0777
+ BERKOM = bindings::PCI_VENDOR_ID_BERKOM, // 0x0871
+ ITTIM = bindings::PCI_VENDOR_ID_ITTIM, // 0x0b48
+ COMPAQ = bindings::PCI_VENDOR_ID_COMPAQ, // 0x0e11
+ LSI_LOGIC = bindings::PCI_VENDOR_ID_LSI_LOGIC, // 0x1000
+ ATI = bindings::PCI_VENDOR_ID_ATI, // 0x1002
+ VLSI = bindings::PCI_VENDOR_ID_VLSI, // 0x1004
+ ADL = bindings::PCI_VENDOR_ID_ADL, // 0x1005
+ NS = bindings::PCI_VENDOR_ID_NS, // 0x100b
+ TSENG = bindings::PCI_VENDOR_ID_TSENG, // 0x100c
+ WEITEK = bindings::PCI_VENDOR_ID_WEITEK, // 0x100e
+ DEC = bindings::PCI_VENDOR_ID_DEC, // 0x1011
+ CIRRUS = bindings::PCI_VENDOR_ID_CIRRUS, // 0x1013
+ IBM = bindings::PCI_VENDOR_ID_IBM, // 0x1014
+ UNISYS = bindings::PCI_VENDOR_ID_UNISYS, // 0x1018
+ COMPEX2 = bindings::PCI_VENDOR_ID_COMPEX2, // 0x101a
+ WD = bindings::PCI_VENDOR_ID_WD, // 0x101c
+ AMI = bindings::PCI_VENDOR_ID_AMI, // 0x101e
+ AMD = bindings::PCI_VENDOR_ID_AMD, // 0x1022
+ TRIDENT = bindings::PCI_VENDOR_ID_TRIDENT, // 0x1023
+ AI = bindings::PCI_VENDOR_ID_AI, // 0x1025
+ DELL = bindings::PCI_VENDOR_ID_DELL, // 0x1028
+ MATROX = bindings::PCI_VENDOR_ID_MATROX, // 0x102B
+ MOBILITY_ELECTRONICS = bindings::PCI_VENDOR_ID_MOBILITY_ELECTRONICS, // 0x14f2
+ CT = bindings::PCI_VENDOR_ID_CT, // 0x102c
+ MIRO = bindings::PCI_VENDOR_ID_MIRO, // 0x1031
+ NEC = bindings::PCI_VENDOR_ID_NEC, // 0x1033
+ FD = bindings::PCI_VENDOR_ID_FD, // 0x1036
+ SI = bindings::PCI_VENDOR_ID_SI, // 0x1039
+ HP = bindings::PCI_VENDOR_ID_HP, // 0x103c
+ HP_3PAR = bindings::PCI_VENDOR_ID_HP_3PAR, // 0x1590
+ PCTECH = bindings::PCI_VENDOR_ID_PCTECH, // 0x1042
+ ASUSTEK = bindings::PCI_VENDOR_ID_ASUSTEK, // 0x1043
+ DPT = bindings::PCI_VENDOR_ID_DPT, // 0x1044
+ OPTI = bindings::PCI_VENDOR_ID_OPTI, // 0x1045
+ ELSA = bindings::PCI_VENDOR_ID_ELSA, // 0x1048
+ STMICRO = bindings::PCI_VENDOR_ID_STMICRO, // 0x104A
+ BUSLOGIC = bindings::PCI_VENDOR_ID_BUSLOGIC, // 0x104B
+ TI = bindings::PCI_VENDOR_ID_TI, // 0x104c
+ SONY = bindings::PCI_VENDOR_ID_SONY, // 0x104d
+ WINBOND2 = bindings::PCI_VENDOR_ID_WINBOND2, // 0x1050
+ ANIGMA = bindings::PCI_VENDOR_ID_ANIGMA, // 0x1051
+ EFAR = bindings::PCI_VENDOR_ID_EFAR, // 0x1055
+ MOTOROLA = bindings::PCI_VENDOR_ID_MOTOROLA, // 0x1057
+ PROMISE = bindings::PCI_VENDOR_ID_PROMISE, // 0x105a
+ FOXCONN = bindings::PCI_VENDOR_ID_FOXCONN, // 0x105b
+ UMC = bindings::PCI_VENDOR_ID_UMC, // 0x1060
+ PICOPOWER = bindings::PCI_VENDOR_ID_PICOPOWER, // 0x1066
+ MYLEX = bindings::PCI_VENDOR_ID_MYLEX, // 0x1069
+ APPLE = bindings::PCI_VENDOR_ID_APPLE, // 0x106b
+ YAMAHA = bindings::PCI_VENDOR_ID_YAMAHA, // 0x1073
+ QLOGIC = bindings::PCI_VENDOR_ID_QLOGIC, // 0x1077
+ CYRIX = bindings::PCI_VENDOR_ID_CYRIX, // 0x1078
+ CONTAQ = bindings::PCI_VENDOR_ID_CONTAQ, // 0x1080
+ OLICOM = bindings::PCI_VENDOR_ID_OLICOM, // 0x108d
+ SUN = bindings::PCI_VENDOR_ID_SUN, // 0x108e
+ NI = bindings::PCI_VENDOR_ID_NI, // 0x1093
+ CMD = bindings::PCI_VENDOR_ID_CMD, // 0x1095
+ BROOKTREE = bindings::PCI_VENDOR_ID_BROOKTREE, // 0x109e
+ SGI = bindings::PCI_VENDOR_ID_SGI, // 0x10a9
+ WINBOND = bindings::PCI_VENDOR_ID_WINBOND, // 0x10ad
+ PLX = bindings::PCI_VENDOR_ID_PLX, // 0x10b5
+ MADGE = bindings::PCI_VENDOR_ID_MADGE, // 0x10b6
+ THREECOM = bindings::PCI_VENDOR_ID_3COM, // 0x10b7
+ AL = bindings::PCI_VENDOR_ID_AL, // 0x10b9
+ NEOMAGIC = bindings::PCI_VENDOR_ID_NEOMAGIC, // 0x10c8
+ TCONRAD = bindings::PCI_VENDOR_ID_TCONRAD, // 0x10da
+ ROHM = bindings::PCI_VENDOR_ID_ROHM, // 0x10db
+ NVIDIA = bindings::PCI_VENDOR_ID_NVIDIA, // 0x10de
+ IMS = bindings::PCI_VENDOR_ID_IMS, // 0x10e0
+ AMCC = bindings::PCI_VENDOR_ID_AMCC, // 0x10e8
+ AMPERE = bindings::PCI_VENDOR_ID_AMPERE, // 0x1def
+ INTERG = bindings::PCI_VENDOR_ID_INTERG, // 0x10ea
+ REALTEK = bindings::PCI_VENDOR_ID_REALTEK, // 0x10ec
+ XILINX = bindings::PCI_VENDOR_ID_XILINX, // 0x10ee
+ INIT = bindings::PCI_VENDOR_ID_INIT, // 0x1101
+ CREATIVE = bindings::PCI_VENDOR_ID_CREATIVE, // 0x1102
+ TTI = bindings::PCI_VENDOR_ID_TTI, // 0x1103
+ SIGMA = bindings::PCI_VENDOR_ID_SIGMA, // 0x1105
+ VIA = bindings::PCI_VENDOR_ID_VIA, // 0x1106
+ SIEMENS = bindings::PCI_VENDOR_ID_SIEMENS, // 0x110A
+ VORTEX = bindings::PCI_VENDOR_ID_VORTEX, // 0x1119
+ EF = bindings::PCI_VENDOR_ID_EF, // 0x111a
+ IDT = bindings::PCI_VENDOR_ID_IDT, // 0x111d
+ FORE = bindings::PCI_VENDOR_ID_FORE, // 0x1127
+ PHILIPS = bindings::PCI_VENDOR_ID_PHILIPS, // 0x1131
+ EICON = bindings::PCI_VENDOR_ID_EICON, // 0x1133
+ CISCO = bindings::PCI_VENDOR_ID_CISCO, // 0x1137
+ ZIATECH = bindings::PCI_VENDOR_ID_ZIATECH, // 0x1138
+ SYSKONNECT = bindings::PCI_VENDOR_ID_SYSKONNECT, // 0x1148
+ DIGI = bindings::PCI_VENDOR_ID_DIGI, // 0x114f
+ XIRCOM = bindings::PCI_VENDOR_ID_XIRCOM, // 0x115d
+ SERVERWORKS = bindings::PCI_VENDOR_ID_SERVERWORKS, // 0x1166
+ ALTERA = bindings::PCI_VENDOR_ID_ALTERA, // 0x1172
+ SBE = bindings::PCI_VENDOR_ID_SBE, // 0x1176
+ TOSHIBA = bindings::PCI_VENDOR_ID_TOSHIBA, // 0x1179
+ TOSHIBA_2 = bindings::PCI_VENDOR_ID_TOSHIBA_2, // 0x102f
+ ATTO = bindings::PCI_VENDOR_ID_ATTO, // 0x117c
+ RICOH = bindings::PCI_VENDOR_ID_RICOH, // 0x1180
+ DLINK = bindings::PCI_VENDOR_ID_DLINK, // 0x1186
+ ARTOP = bindings::PCI_VENDOR_ID_ARTOP, // 0x1191
+ ZEITNET = bindings::PCI_VENDOR_ID_ZEITNET, // 0x1193
+ FUJITSU_ME = bindings::PCI_VENDOR_ID_FUJITSU_ME, // 0x119e
+ MARVELL = bindings::PCI_VENDOR_ID_MARVELL, // 0x11ab
+ MARVELL_EXT = bindings::PCI_VENDOR_ID_MARVELL_EXT, // 0x1b4b
+ V3 = bindings::PCI_VENDOR_ID_V3, // 0x11b0
+ ATT = bindings::PCI_VENDOR_ID_ATT, // 0x11c1
+ SPECIALIX = bindings::PCI_VENDOR_ID_SPECIALIX, // 0x11cb
+ ANALOG_DEVICES = bindings::PCI_VENDOR_ID_ANALOG_DEVICES, // 0x11d4
+ ZORAN = bindings::PCI_VENDOR_ID_ZORAN, // 0x11de
+ COMPEX = bindings::PCI_VENDOR_ID_COMPEX, // 0x11f6
+ MICROSEMI = bindings::PCI_VENDOR_ID_MICROSEMI, // 0x11f8
+ RP = bindings::PCI_VENDOR_ID_RP, // 0x11fe
+ CYCLADES = bindings::PCI_VENDOR_ID_CYCLADES, // 0x120e
+ ESSENTIAL = bindings::PCI_VENDOR_ID_ESSENTIAL, // 0x120f
+ O2 = bindings::PCI_VENDOR_ID_O2, // 0x1217
+ THREEDX = bindings::PCI_VENDOR_ID_3DFX, // 0x121a
+ AVM = bindings::PCI_VENDOR_ID_AVM, // 0x1244
+ STALLION = bindings::PCI_VENDOR_ID_STALLION, // 0x124d
+ AT = bindings::PCI_VENDOR_ID_AT, // 0x1259
+ ASIX = bindings::PCI_VENDOR_ID_ASIX, // 0x125b
+ ESS = bindings::PCI_VENDOR_ID_ESS, // 0x125d
+ SATSAGEM = bindings::PCI_VENDOR_ID_SATSAGEM, // 0x1267
+ ENSONIQ = bindings::PCI_VENDOR_ID_ENSONIQ, // 0x1274
+ TRANSMETA = bindings::PCI_VENDOR_ID_TRANSMETA, // 0x1279
+ ROCKWELL = bindings::PCI_VENDOR_ID_ROCKWELL, // 0x127A
+ ITE = bindings::PCI_VENDOR_ID_ITE, // 0x1283
+ ALTEON = bindings::PCI_VENDOR_ID_ALTEON, // 0x12ae
+ NVIDIA_SGS = bindings::PCI_VENDOR_ID_NVIDIA_SGS, // 0x12d2
+ PERICOM = bindings::PCI_VENDOR_ID_PERICOM, // 0x12D8
+ AUREAL = bindings::PCI_VENDOR_ID_AUREAL, // 0x12eb
+ ELECTRONICDESIGNGMBH = bindings::PCI_VENDOR_ID_ELECTRONICDESIGNGMBH, // 0x12f8
+ ESDGMBH = bindings::PCI_VENDOR_ID_ESDGMBH, // 0x12fe
+ CB = bindings::PCI_VENDOR_ID_CB, // 0x1307
+ SIIG = bindings::PCI_VENDOR_ID_SIIG, // 0x131f
+ RADISYS = bindings::PCI_VENDOR_ID_RADISYS, // 0x1331
+ MICRO_MEMORY = bindings::PCI_VENDOR_ID_MICRO_MEMORY, // 0x1332
+ DOMEX = bindings::PCI_VENDOR_ID_DOMEX, // 0x134a
+ INTASHIELD = bindings::PCI_VENDOR_ID_INTASHIELD, // 0x135a
+ QUATECH = bindings::PCI_VENDOR_ID_QUATECH, // 0x135C
+ SEALEVEL = bindings::PCI_VENDOR_ID_SEALEVEL, // 0x135e
+ HYPERCOPE = bindings::PCI_VENDOR_ID_HYPERCOPE, // 0x1365
+ DIGIGRAM = bindings::PCI_VENDOR_ID_DIGIGRAM, // 0x1369
+ KAWASAKI = bindings::PCI_VENDOR_ID_KAWASAKI, // 0x136b
+ CNET = bindings::PCI_VENDOR_ID_CNET, // 0x1371
+ LMC = bindings::PCI_VENDOR_ID_LMC, // 0x1376
+ NETGEAR = bindings::PCI_VENDOR_ID_NETGEAR, // 0x1385
+ APPLICOM = bindings::PCI_VENDOR_ID_APPLICOM, // 0x1389
+ MOXA = bindings::PCI_VENDOR_ID_MOXA, // 0x1393
+ CCD = bindings::PCI_VENDOR_ID_CCD, // 0x1397
+ EXAR = bindings::PCI_VENDOR_ID_EXAR, // 0x13a8
+ MICROGATE = bindings::PCI_VENDOR_ID_MICROGATE, // 0x13c0
+ THREEWARE = bindings::PCI_VENDOR_ID_3WARE, // 0x13C1
+ IOMEGA = bindings::PCI_VENDOR_ID_IOMEGA, // 0x13ca
+ ABOCOM = bindings::PCI_VENDOR_ID_ABOCOM, // 0x13D1
+ SUNDANCE = bindings::PCI_VENDOR_ID_SUNDANCE, // 0x13f0
+ CMEDIA = bindings::PCI_VENDOR_ID_CMEDIA, // 0x13f6
+ ADVANTECH = bindings::PCI_VENDOR_ID_ADVANTECH, // 0x13fe
+ MEILHAUS = bindings::PCI_VENDOR_ID_MEILHAUS, // 0x1402
+ LAVA = bindings::PCI_VENDOR_ID_LAVA, // 0x1407
+ TIMEDIA = bindings::PCI_VENDOR_ID_TIMEDIA, // 0x1409
+ ICE = bindings::PCI_VENDOR_ID_ICE, // 0x1412
+ MICROSOFT = bindings::PCI_VENDOR_ID_MICROSOFT, // 0x1414
+ OXSEMI = bindings::PCI_VENDOR_ID_OXSEMI, // 0x1415
+ CHELSIO = bindings::PCI_VENDOR_ID_CHELSIO, // 0x1425
+ EDIMAX = bindings::PCI_VENDOR_ID_EDIMAX, // 0x1432
+ ADLINK = bindings::PCI_VENDOR_ID_ADLINK, // 0x144a
+ SAMSUNG = bindings::PCI_VENDOR_ID_SAMSUNG, // 0x144d
+ GIGABYTE = bindings::PCI_VENDOR_ID_GIGABYTE, // 0x1458
+ AMBIT = bindings::PCI_VENDOR_ID_AMBIT, // 0x1468
+ MYRICOM = bindings::PCI_VENDOR_ID_MYRICOM, // 0x14c1
+ MEDIATEK = bindings::PCI_VENDOR_ID_MEDIATEK, // 0x14c3
+ TITAN = bindings::PCI_VENDOR_ID_TITAN, // 0x14D2
+ PANACOM = bindings::PCI_VENDOR_ID_PANACOM, // 0x14d4
+ SIPACKETS = bindings::PCI_VENDOR_ID_SIPACKETS, // 0x14d9
+ AFAVLAB = bindings::PCI_VENDOR_ID_AFAVLAB, // 0x14db
+ AMPLICON = bindings::PCI_VENDOR_ID_AMPLICON, // 0x14dc
+ BCM_GVC = bindings::PCI_VENDOR_ID_BCM_GVC, // 0x14a4
+ BROADCOM = bindings::PCI_VENDOR_ID_BROADCOM, // 0x14e4
+ TOPIC = bindings::PCI_VENDOR_ID_TOPIC, // 0x151f
+ MAINPINE = bindings::PCI_VENDOR_ID_MAINPINE, // 0x1522
+ ENE = bindings::PCI_VENDOR_ID_ENE, // 0x1524
+ SYBA = bindings::PCI_VENDOR_ID_SYBA, // 0x1592
+ MORETON = bindings::PCI_VENDOR_ID_MORETON, // 0x15aa
+ VMWARE = bindings::PCI_VENDOR_ID_VMWARE, // 0x15ad
+ ZOLTRIX = bindings::PCI_VENDOR_ID_ZOLTRIX, // 0x15b0
+ MELLANOX = bindings::PCI_VENDOR_ID_MELLANOX, // 0x15b3
+ DFI = bindings::PCI_VENDOR_ID_DFI, // 0x15bd
+ QUICKNET = bindings::PCI_VENDOR_ID_QUICKNET, // 0x15e2
+ ADDIDATA = bindings::PCI_VENDOR_ID_ADDIDATA, // 0x15B8
+ PDC = bindings::PCI_VENDOR_ID_PDC, // 0x15e9
+ FARSITE = bindings::PCI_VENDOR_ID_FARSITE, // 0x1619
+ ARIMA = bindings::PCI_VENDOR_ID_ARIMA, // 0x161f
+ BROCADE = bindings::PCI_VENDOR_ID_BROCADE, // 0x1657
+ SIBYTE = bindings::PCI_VENDOR_ID_SIBYTE, // 0x166d
+ ATHEROS = bindings::PCI_VENDOR_ID_ATHEROS, // 0x168c
+ NETCELL = bindings::PCI_VENDOR_ID_NETCELL, // 0x169c
+ CENATEK = bindings::PCI_VENDOR_ID_CENATEK, // 0x16CA
+ SYNOPSYS = bindings::PCI_VENDOR_ID_SYNOPSYS, // 0x16c3
+ USR = bindings::PCI_VENDOR_ID_USR, // 0x16ec
+ VITESSE = bindings::PCI_VENDOR_ID_VITESSE, // 0x1725
+ LINKSYS = bindings::PCI_VENDOR_ID_LINKSYS, // 0x1737
+ ALTIMA = bindings::PCI_VENDOR_ID_ALTIMA, // 0x173b
+ CAVIUM = bindings::PCI_VENDOR_ID_CAVIUM, // 0x177d
+ TECHWELL = bindings::PCI_VENDOR_ID_TECHWELL, // 0x1797
+ BELKIN = bindings::PCI_VENDOR_ID_BELKIN, // 0x1799
+ RDC = bindings::PCI_VENDOR_ID_RDC, // 0x17f3
+ GLI = bindings::PCI_VENDOR_ID_GLI, // 0x17a0
+ LENOVO = bindings::PCI_VENDOR_ID_LENOVO, // 0x17aa
+ QCOM = bindings::PCI_VENDOR_ID_QCOM, // 0x17cb
+ CDNS = bindings::PCI_VENDOR_ID_CDNS, // 0x17cd
+ ARECA = bindings::PCI_VENDOR_ID_ARECA, // 0x17d3
+ S2IO = bindings::PCI_VENDOR_ID_S2IO, // 0x17d5
+ SITECOM = bindings::PCI_VENDOR_ID_SITECOM, // 0x182d
+ TOPSPIN = bindings::PCI_VENDOR_ID_TOPSPIN, // 0x1867
+ COMMTECH = bindings::PCI_VENDOR_ID_COMMTECH, // 0x18f7
+ SILAN = bindings::PCI_VENDOR_ID_SILAN, // 0x1904
+ RENESAS = bindings::PCI_VENDOR_ID_RENESAS, // 0x1912
+ SOLARFLARE = bindings::PCI_VENDOR_ID_SOLARFLARE, // 0x1924
+ TDI = bindings::PCI_VENDOR_ID_TDI, // 0x192E
+ NXP = bindings::PCI_VENDOR_ID_NXP, // 0x1957
+ PASEMI = bindings::PCI_VENDOR_ID_PASEMI, // 0x1959
+ ATTANSIC = bindings::PCI_VENDOR_ID_ATTANSIC, // 0x1969
+ JMICRON = bindings::PCI_VENDOR_ID_JMICRON, // 0x197B
+ KORENIX = bindings::PCI_VENDOR_ID_KORENIX, // 0x1982
+ HUAWEI = bindings::PCI_VENDOR_ID_HUAWEI, // 0x19e5
+ NETRONOME = bindings::PCI_VENDOR_ID_NETRONOME, // 0x19ee
+ QMI = bindings::PCI_VENDOR_ID_QMI, // 0x1a32
+ AZWAVE = bindings::PCI_VENDOR_ID_AZWAVE, // 0x1a3b
+ REDHAT_QUMRANET = bindings::PCI_VENDOR_ID_REDHAT_QUMRANET, // 0x1af4
+ ASMEDIA = bindings::PCI_VENDOR_ID_ASMEDIA, // 0x1b21
+ REDHAT = bindings::PCI_VENDOR_ID_REDHAT, // 0x1b36
+ WCHIC = bindings::PCI_VENDOR_ID_WCHIC, // 0x1c00
+ SILICOM_DENMARK = bindings::PCI_VENDOR_ID_SILICOM_DENMARK, // 0x1c2c
+ AMAZON_ANNAPURNA_LABS = bindings::PCI_VENDOR_ID_AMAZON_ANNAPURNA_LABS, // 0x1c36
+ CIRCUITCO = bindings::PCI_VENDOR_ID_CIRCUITCO, // 0x1cc8
+ AMAZON = bindings::PCI_VENDOR_ID_AMAZON, // 0x1d0f
+ ZHAOXIN = bindings::PCI_VENDOR_ID_ZHAOXIN, // 0x1d17
+ ROCKCHIP = bindings::PCI_VENDOR_ID_ROCKCHIP, // 0x1d87
+ HYGON = bindings::PCI_VENDOR_ID_HYGON, // 0x1d94
+ META = bindings::PCI_VENDOR_ID_META, // 0x1d9b
+ FUNGIBLE = bindings::PCI_VENDOR_ID_FUNGIBLE, // 0x1dad
+ HXT = bindings::PCI_VENDOR_ID_HXT, // 0x1dbf
+ TEKRAM = bindings::PCI_VENDOR_ID_TEKRAM, // 0x1de1
+ RPI = bindings::PCI_VENDOR_ID_RPI, // 0x1de4
+ ALIBABA = bindings::PCI_VENDOR_ID_ALIBABA, // 0x1ded
+ CXL = bindings::PCI_VENDOR_ID_CXL, // 0x1e98
+ TEHUTI = bindings::PCI_VENDOR_ID_TEHUTI, // 0x1fc9
+ SUNIX = bindings::PCI_VENDOR_ID_SUNIX, // 0x1fd4
+ HINT = bindings::PCI_VENDOR_ID_HINT, // 0x3388
+ THREEDLABS = bindings::PCI_VENDOR_ID_3DLABS, // 0x3d3d
+ NETXEN = bindings::PCI_VENDOR_ID_NETXEN, // 0x4040
+ AKS = bindings::PCI_VENDOR_ID_AKS, // 0x416c
+ WCHCN = bindings::PCI_VENDOR_ID_WCHCN, // 0x4348
+ ACCESSIO = bindings::PCI_VENDOR_ID_ACCESSIO, // 0x494f
+ S3 = bindings::PCI_VENDOR_ID_S3, // 0x5333
+ DUNORD = bindings::PCI_VENDOR_ID_DUNORD, // 0x5544
+ DCI = bindings::PCI_VENDOR_ID_DCI, // 0x6666
+ GLENFLY = bindings::PCI_VENDOR_ID_GLENFLY, // 0x6766
+ INTEL = bindings::PCI_VENDOR_ID_INTEL, // 0x8086
+ WANGXUN = bindings::PCI_VENDOR_ID_WANGXUN, // 0x8088
+ SCALEMP = bindings::PCI_VENDOR_ID_SCALEMP, // 0x8686
+ COMPUTONE = bindings::PCI_VENDOR_ID_COMPUTONE, // 0x8e0e
+ KTI = bindings::PCI_VENDOR_ID_KTI, // 0x8e2e
+ ADAPTEC = bindings::PCI_VENDOR_ID_ADAPTEC, // 0x9004
+ ADAPTEC2 = bindings::PCI_VENDOR_ID_ADAPTEC2, // 0x9005
+ HOLTEK = bindings::PCI_VENDOR_ID_HOLTEK, // 0x9412
+ NETMOS = bindings::PCI_VENDOR_ID_NETMOS, // 0x9710
+ THREECOM_2 = bindings::PCI_VENDOR_ID_3COM_2, // 0xa727
+ SOLIDRUN = bindings::PCI_VENDOR_ID_SOLIDRUN, // 0xd063
+ DIGIUM = bindings::PCI_VENDOR_ID_DIGIUM, // 0xd161
+ TIGERJET = bindings::PCI_VENDOR_ID_TIGERJET, // 0xe159
+ XILINX_RME = bindings::PCI_VENDOR_ID_XILINX_RME, // 0xea60
+ XEN = bindings::PCI_VENDOR_ID_XEN, // 0x5853
+ OCZ = bindings::PCI_VENDOR_ID_OCZ, // 0x1b85
+ NCUBE = bindings::PCI_VENDOR_ID_NCUBE, // 0x10ff
+}
diff --git a/rust/kernel/pid_namespace.rs b/rust/kernel/pid_namespace.rs
index 0e93808e4639..979a9718f153 100644
--- a/rust/kernel/pid_namespace.rs
+++ b/rust/kernel/pid_namespace.rs
@@ -7,10 +7,7 @@
//! C header: [`include/linux/pid_namespace.h`](srctree/include/linux/pid_namespace.h) and
//! [`include/linux/pid.h`](srctree/include/linux/pid.h)
-use crate::{
- bindings,
- types::{AlwaysRefCounted, Opaque},
-};
+use crate::{bindings, sync::aref::AlwaysRefCounted, types::Opaque};
use core::ptr;
/// Wraps the kernel's `struct pid_namespace`. Thread safe.
diff --git a/rust/kernel/platform.rs b/rust/kernel/platform.rs
index 5b21fa517e55..7205fe3416d3 100644
--- a/rust/kernel/platform.rs
+++ b/rust/kernel/platform.rs
@@ -5,12 +5,15 @@
//! C header: [`include/linux/platform_device.h`](srctree/include/linux/platform_device.h)
use crate::{
- bindings, container_of, device, driver,
- error::{to_result, Result},
+ acpi, bindings, container_of,
+ device::{self, Bound},
+ driver,
+ error::{from_result, to_result, Result},
+ io::{mem::IoRequest, Resource},
+ irq::{self, IrqRequest},
of,
prelude::*,
- str::CStr,
- types::{ForeignOwnable, Opaque},
+ types::Opaque,
ThisModule,
};
@@ -37,12 +40,18 @@ unsafe impl<T: Driver + 'static> driver::RegistrationOps for Adapter<T> {
None => core::ptr::null(),
};
+ let acpi_table = match T::ACPI_ID_TABLE {
+ Some(table) => table.as_ptr(),
+ None => core::ptr::null(),
+ };
+
// SAFETY: It's safe to set the fields of `struct platform_driver` on initialization.
unsafe {
(*pdrv.get()).driver.name = name.as_char_ptr();
(*pdrv.get()).probe = Some(Self::probe_callback);
(*pdrv.get()).remove = Some(Self::remove_callback);
(*pdrv.get()).driver.of_match_table = of_table;
+ (*pdrv.get()).driver.acpi_match_table = acpi_table;
}
// SAFETY: `pdrv` is guaranteed to be a valid `RegType`.
@@ -61,30 +70,30 @@ impl<T: Driver + 'static> Adapter<T> {
// `struct platform_device`.
//
// INVARIANT: `pdev` is valid for the duration of `probe_callback()`.
- let pdev = unsafe { &*pdev.cast::<Device<device::Core>>() };
-
+ let pdev = unsafe { &*pdev.cast::<Device<device::CoreInternal>>() };
let info = <Self as driver::Adapter>::id_info(pdev.as_ref());
- match T::probe(pdev, info) {
- Ok(data) => {
- // Let the `struct platform_device` own a reference of the driver's private data.
- // SAFETY: By the type invariant `pdev.as_raw` returns a valid pointer to a
- // `struct platform_device`.
- unsafe { bindings::platform_set_drvdata(pdev.as_raw(), data.into_foreign() as _) };
- }
- Err(err) => return Error::to_errno(err),
- }
- 0
+ from_result(|| {
+ let data = T::probe(pdev, info)?;
+
+ pdev.as_ref().set_drvdata(data);
+ Ok(0)
+ })
}
extern "C" fn remove_callback(pdev: *mut bindings::platform_device) {
- // SAFETY: `pdev` is a valid pointer to a `struct platform_device`.
- let ptr = unsafe { bindings::platform_get_drvdata(pdev) }.cast();
+ // SAFETY: The platform bus only ever calls the remove callback with a valid pointer to a
+ // `struct platform_device`.
+ //
+ // INVARIANT: `pdev` is valid for the duration of `probe_callback()`.
+ let pdev = unsafe { &*pdev.cast::<Device<device::CoreInternal>>() };
// SAFETY: `remove_callback` is only ever called after a successful call to
- // `probe_callback`, hence it's guaranteed that `ptr` points to a valid and initialized
- // `KBox<T>` pointer created through `KBox::into_foreign`.
- let _ = unsafe { KBox::<T>::from_foreign(ptr) };
+ // `probe_callback`, hence it's guaranteed that `Device::set_drvdata()` has been called
+ // and stored a `Pin<KBox<T>>`.
+ let data = unsafe { pdev.as_ref().drvdata_obtain::<Pin<KBox<T>>>() };
+
+ T::unbind(pdev, data.as_ref());
}
}
@@ -94,6 +103,10 @@ impl<T: Driver + 'static> driver::Adapter for Adapter<T> {
fn of_id_table() -> Option<of::IdTable<Self::IdInfo>> {
T::OF_ID_TABLE
}
+
+ fn acpi_id_table() -> Option<acpi::IdTable<Self::IdInfo>> {
+ T::ACPI_ID_TABLE
+ }
}
/// Declares a kernel module that exposes a single platform driver.
@@ -120,10 +133,10 @@ macro_rules! module_platform_driver {
///
/// Drivers must implement this trait in order to get a platform driver registered.
///
-/// # Example
+/// # Examples
///
///```
-/// # use kernel::{bindings, c_str, device::Core, of, platform};
+/// # use kernel::{acpi, bindings, c_str, device::Core, of, platform};
///
/// struct MyDriver;
///
@@ -136,9 +149,19 @@ macro_rules! module_platform_driver {
/// ]
/// );
///
+/// kernel::acpi_device_table!(
+/// ACPI_TABLE,
+/// MODULE_ACPI_TABLE,
+/// <MyDriver as platform::Driver>::IdInfo,
+/// [
+/// (acpi::DeviceId::new(c_str!("LNUXBEEF")), ())
+/// ]
+/// );
+///
/// impl platform::Driver for MyDriver {
/// type IdInfo = ();
/// const OF_ID_TABLE: Option<of::IdTable<Self::IdInfo>> = Some(&OF_TABLE);
+/// const ACPI_ID_TABLE: Option<acpi::IdTable<Self::IdInfo>> = Some(&ACPI_TABLE);
///
/// fn probe(
/// _pdev: &platform::Device<Core>,
@@ -158,7 +181,10 @@ pub trait Driver: Send {
type IdInfo: 'static;
/// The table of OF device ids supported by the driver.
- const OF_ID_TABLE: Option<of::IdTable<Self::IdInfo>>;
+ const OF_ID_TABLE: Option<of::IdTable<Self::IdInfo>> = None;
+
+ /// The table of ACPI device ids supported by the driver.
+ const ACPI_ID_TABLE: Option<acpi::IdTable<Self::IdInfo>> = None;
/// Platform driver probe.
///
@@ -166,6 +192,20 @@ pub trait Driver: Send {
/// Implementers should attempt to initialize the device here.
fn probe(dev: &Device<device::Core>, id_info: Option<&Self::IdInfo>)
-> Result<Pin<KBox<Self>>>;
+
+ /// Platform driver unbind.
+ ///
+ /// Called when a [`Device`] is unbound from its bound [`Driver`]. Implementing this callback
+ /// is optional.
+ ///
+ /// This callback serves as a place for drivers to perform teardown operations that require a
+ /// `&Device<Core>` or `&Device<Bound>` reference. For instance, drivers may try to perform I/O
+ /// operations to gracefully tear down the device.
+ ///
+ /// Otherwise, release operations for driver resources should be performed in `Self::drop`.
+ fn unbind(dev: &Device<device::Core>, this: Pin<&Self>) {
+ let _ = (dev, this);
+ }
}
/// The platform device representation.
@@ -188,6 +228,236 @@ impl<Ctx: device::DeviceContext> Device<Ctx> {
fn as_raw(&self) -> *mut bindings::platform_device {
self.0.get()
}
+
+ /// Returns the resource at `index`, if any.
+ pub fn resource_by_index(&self, index: u32) -> Option<&Resource> {
+ // SAFETY: `self.as_raw()` returns a valid pointer to a `struct platform_device`.
+ let resource = unsafe {
+ bindings::platform_get_resource(self.as_raw(), bindings::IORESOURCE_MEM, index)
+ };
+
+ if resource.is_null() {
+ return None;
+ }
+
+ // SAFETY: `resource` is a valid pointer to a `struct resource` as
+ // returned by `platform_get_resource`.
+ Some(unsafe { Resource::from_raw(resource) })
+ }
+
+ /// Returns the resource with a given `name`, if any.
+ pub fn resource_by_name(&self, name: &CStr) -> Option<&Resource> {
+ // SAFETY: `self.as_raw()` returns a valid pointer to a `struct
+ // platform_device` and `name` points to a valid C string.
+ let resource = unsafe {
+ bindings::platform_get_resource_byname(
+ self.as_raw(),
+ bindings::IORESOURCE_MEM,
+ name.as_char_ptr(),
+ )
+ };
+
+ if resource.is_null() {
+ return None;
+ }
+
+ // SAFETY: `resource` is a valid pointer to a `struct resource` as
+ // returned by `platform_get_resource`.
+ Some(unsafe { Resource::from_raw(resource) })
+ }
+}
+
+impl Device<Bound> {
+ /// Returns an `IoRequest` for the resource at `index`, if any.
+ pub fn io_request_by_index(&self, index: u32) -> Option<IoRequest<'_>> {
+ self.resource_by_index(index)
+ // SAFETY: `resource` is a valid resource for `&self` during the
+ // lifetime of the `IoRequest`.
+ .map(|resource| unsafe { IoRequest::new(self.as_ref(), resource) })
+ }
+
+ /// Returns an `IoRequest` for the resource with a given `name`, if any.
+ pub fn io_request_by_name(&self, name: &CStr) -> Option<IoRequest<'_>> {
+ self.resource_by_name(name)
+ // SAFETY: `resource` is a valid resource for `&self` during the
+ // lifetime of the `IoRequest`.
+ .map(|resource| unsafe { IoRequest::new(self.as_ref(), resource) })
+ }
+}
+
+macro_rules! define_irq_accessor_by_index {
+ (
+ $(#[$meta:meta])* $fn_name:ident,
+ $request_fn:ident,
+ $reg_type:ident,
+ $handler_trait:ident
+ ) => {
+ $(#[$meta])*
+ pub fn $fn_name<'a, T: irq::$handler_trait + 'static>(
+ &'a self,
+ flags: irq::Flags,
+ index: u32,
+ name: &'static CStr,
+ handler: impl PinInit<T, Error> + 'a,
+ ) -> Result<impl PinInit<irq::$reg_type<T>, Error> + 'a> {
+ let request = self.$request_fn(index)?;
+
+ Ok(irq::$reg_type::<T>::new(
+ request,
+ flags,
+ name,
+ handler,
+ ))
+ }
+ };
+}
+
+macro_rules! define_irq_accessor_by_name {
+ (
+ $(#[$meta:meta])* $fn_name:ident,
+ $request_fn:ident,
+ $reg_type:ident,
+ $handler_trait:ident
+ ) => {
+ $(#[$meta])*
+ pub fn $fn_name<'a, T: irq::$handler_trait + 'static>(
+ &'a self,
+ flags: irq::Flags,
+ irq_name: &CStr,
+ name: &'static CStr,
+ handler: impl PinInit<T, Error> + 'a,
+ ) -> Result<impl PinInit<irq::$reg_type<T>, Error> + 'a> {
+ let request = self.$request_fn(irq_name)?;
+
+ Ok(irq::$reg_type::<T>::new(
+ request,
+ flags,
+ name,
+ handler,
+ ))
+ }
+ };
+}
+
+impl Device<Bound> {
+ /// Returns an [`IrqRequest`] for the IRQ at the given index, if any.
+ pub fn irq_by_index(&self, index: u32) -> Result<IrqRequest<'_>> {
+ // SAFETY: `self.as_raw` returns a valid pointer to a `struct platform_device`.
+ let irq = unsafe { bindings::platform_get_irq(self.as_raw(), index) };
+
+ if irq < 0 {
+ return Err(Error::from_errno(irq));
+ }
+
+ // SAFETY: `irq` is guaranteed to be a valid IRQ number for `&self`.
+ Ok(unsafe { IrqRequest::new(self.as_ref(), irq as u32) })
+ }
+
+ /// Returns an [`IrqRequest`] for the IRQ at the given index, but does not
+ /// print an error if the IRQ cannot be obtained.
+ pub fn optional_irq_by_index(&self, index: u32) -> Result<IrqRequest<'_>> {
+ // SAFETY: `self.as_raw` returns a valid pointer to a `struct platform_device`.
+ let irq = unsafe { bindings::platform_get_irq_optional(self.as_raw(), index) };
+
+ if irq < 0 {
+ return Err(Error::from_errno(irq));
+ }
+
+ // SAFETY: `irq` is guaranteed to be a valid IRQ number for `&self`.
+ Ok(unsafe { IrqRequest::new(self.as_ref(), irq as u32) })
+ }
+
+ /// Returns an [`IrqRequest`] for the IRQ with the given name, if any.
+ pub fn irq_by_name(&self, name: &CStr) -> Result<IrqRequest<'_>> {
+ // SAFETY: `self.as_raw` returns a valid pointer to a `struct platform_device`.
+ let irq = unsafe { bindings::platform_get_irq_byname(self.as_raw(), name.as_char_ptr()) };
+
+ if irq < 0 {
+ return Err(Error::from_errno(irq));
+ }
+
+ // SAFETY: `irq` is guaranteed to be a valid IRQ number for `&self`.
+ Ok(unsafe { IrqRequest::new(self.as_ref(), irq as u32) })
+ }
+
+ /// Returns an [`IrqRequest`] for the IRQ with the given name, but does not
+ /// print an error if the IRQ cannot be obtained.
+ pub fn optional_irq_by_name(&self, name: &CStr) -> Result<IrqRequest<'_>> {
+ // SAFETY: `self.as_raw` returns a valid pointer to a `struct platform_device`.
+ let irq = unsafe {
+ bindings::platform_get_irq_byname_optional(self.as_raw(), name.as_char_ptr())
+ };
+
+ if irq < 0 {
+ return Err(Error::from_errno(irq));
+ }
+
+ // SAFETY: `irq` is guaranteed to be a valid IRQ number for `&self`.
+ Ok(unsafe { IrqRequest::new(self.as_ref(), irq as u32) })
+ }
+
+ define_irq_accessor_by_index!(
+ /// Returns a [`irq::Registration`] for the IRQ at the given index.
+ request_irq_by_index,
+ irq_by_index,
+ Registration,
+ Handler
+ );
+ define_irq_accessor_by_name!(
+ /// Returns a [`irq::Registration`] for the IRQ with the given name.
+ request_irq_by_name,
+ irq_by_name,
+ Registration,
+ Handler
+ );
+ define_irq_accessor_by_index!(
+ /// Does the same as [`Self::request_irq_by_index`], except that it does
+ /// not print an error message if the IRQ cannot be obtained.
+ request_optional_irq_by_index,
+ optional_irq_by_index,
+ Registration,
+ Handler
+ );
+ define_irq_accessor_by_name!(
+ /// Does the same as [`Self::request_irq_by_name`], except that it does
+ /// not print an error message if the IRQ cannot be obtained.
+ request_optional_irq_by_name,
+ optional_irq_by_name,
+ Registration,
+ Handler
+ );
+
+ define_irq_accessor_by_index!(
+ /// Returns a [`irq::ThreadedRegistration`] for the IRQ at the given index.
+ request_threaded_irq_by_index,
+ irq_by_index,
+ ThreadedRegistration,
+ ThreadedHandler
+ );
+ define_irq_accessor_by_name!(
+ /// Returns a [`irq::ThreadedRegistration`] for the IRQ with the given name.
+ request_threaded_irq_by_name,
+ irq_by_name,
+ ThreadedRegistration,
+ ThreadedHandler
+ );
+ define_irq_accessor_by_index!(
+ /// Does the same as [`Self::request_threaded_irq_by_index`], except
+ /// that it does not print an error message if the IRQ cannot be
+ /// obtained.
+ request_optional_threaded_irq_by_index,
+ optional_irq_by_index,
+ ThreadedRegistration,
+ ThreadedHandler
+ );
+ define_irq_accessor_by_name!(
+ /// Does the same as [`Self::request_threaded_irq_by_name`], except that
+ /// it does not print an error message if the IRQ cannot be obtained.
+ request_optional_threaded_irq_by_name,
+ optional_irq_by_name,
+ ThreadedRegistration,
+ ThreadedHandler
+ );
}
// SAFETY: `Device` is a transparent wrapper of a type that doesn't depend on `Device`'s generic
@@ -195,8 +465,10 @@ impl<Ctx: device::DeviceContext> Device<Ctx> {
kernel::impl_device_context_deref!(unsafe { Device });
kernel::impl_device_context_into_aref!(Device);
+impl crate::dma::Device for Device<device::Core> {}
+
// SAFETY: Instances of `Device` are always reference-counted.
-unsafe impl crate::types::AlwaysRefCounted for Device {
+unsafe impl crate::sync::aref::AlwaysRefCounted for Device {
fn inc_ref(&self) {
// SAFETY: The existence of a shared reference guarantees that the refcount is non-zero.
unsafe { bindings::get_device(self.as_ref().as_raw()) };
@@ -215,7 +487,7 @@ impl<Ctx: device::DeviceContext> AsRef<device::Device<Ctx>> for Device<Ctx> {
let dev = unsafe { addr_of_mut!((*self.as_raw()).dev) };
// SAFETY: `dev` points to a valid `struct device`.
- unsafe { device::Device::as_ref(dev) }
+ unsafe { device::Device::from_raw(dev) }
}
}
diff --git a/rust/kernel/prelude.rs b/rust/kernel/prelude.rs
index 2f30a398dddd..198d09a31449 100644
--- a/rust/kernel/prelude.rs
+++ b/rust/kernel/prelude.rs
@@ -12,7 +12,10 @@
//! ```
#[doc(no_inline)]
-pub use core::pin::Pin;
+pub use core::{
+ mem::{align_of, align_of_val, size_of, size_of_val},
+ pin::Pin,
+};
pub use ::ffi::{
c_char, c_int, c_long, c_longlong, c_schar, c_short, c_uchar, c_uint, c_ulong, c_ulonglong,
@@ -31,9 +34,9 @@ pub use super::{build_assert, build_error};
// `super::std_vendor` is hidden, which makes the macro inline for some reason.
#[doc(no_inline)]
pub use super::dbg;
-pub use super::fmt;
pub use super::{dev_alert, dev_crit, dev_dbg, dev_emerg, dev_err, dev_info, dev_notice, dev_warn};
pub use super::{pr_alert, pr_crit, pr_debug, pr_emerg, pr_err, pr_info, pr_notice, pr_warn};
+pub use core::format_args as fmt;
pub use super::{try_init, try_pin_init};
@@ -46,3 +49,5 @@ pub use super::{str::CStr, ThisModule};
pub use super::init::InPlaceInit;
pub use super::current;
+
+pub use super::uaccess::UserPtr;
diff --git a/rust/kernel/print.rs b/rust/kernel/print.rs
index 9783d960a97a..2d743d78d220 100644
--- a/rust/kernel/print.rs
+++ b/rust/kernel/print.rs
@@ -8,10 +8,10 @@
use crate::{
ffi::{c_char, c_void},
+ fmt,
prelude::*,
str::RawFormatter,
};
-use core::fmt;
// Called from `vsprintf` with format specifier `%pA`.
#[expect(clippy::missing_safety_doc)]
@@ -25,7 +25,7 @@ unsafe extern "C" fn rust_fmt_argument(
// SAFETY: The C contract guarantees that `buf` is valid if it's less than `end`.
let mut w = unsafe { RawFormatter::from_ptrs(buf.cast(), end.cast()) };
// SAFETY: TODO.
- let _ = w.write_fmt(unsafe { *(ptr as *const fmt::Arguments<'_>) });
+ let _ = w.write_fmt(unsafe { *ptr.cast::<fmt::Arguments<'_>>() });
w.pos().cast()
}
@@ -109,7 +109,7 @@ pub unsafe fn call_printk(
bindings::_printk(
format_string.as_ptr(),
module_name.as_ptr(),
- &args as *const _ as *const c_void,
+ core::ptr::from_ref(&args).cast::<c_void>(),
);
}
}
@@ -129,7 +129,7 @@ pub fn call_printk_cont(args: fmt::Arguments<'_>) {
unsafe {
bindings::_printk(
format_strings::CONT.as_ptr(),
- &args as *const _ as *const c_void,
+ core::ptr::from_ref(&args).cast::<c_void>(),
);
}
}
@@ -149,7 +149,7 @@ macro_rules! print_macro (
// takes borrows on the arguments, but does not extend the scope of temporaries.
// Therefore, a `match` expression is used to keep them around, since
// the scrutinee is kept until the end of the `match`.
- match format_args!($($arg)+) {
+ match $crate::prelude::fmt!($($arg)+) {
// SAFETY: This hidden macro should only be called by the documented
// printing macros which ensure the format string is one of the fixed
// ones. All `__LOG_PREFIX`s are null-terminated as they are generated
@@ -168,7 +168,7 @@ macro_rules! print_macro (
// The `CONT` case.
($format_string:path, true, $($arg:tt)+) => (
$crate::print::call_printk_cont(
- format_args!($($arg)+),
+ $crate::prelude::fmt!($($arg)+),
);
);
);
diff --git a/rust/kernel/processor.rs b/rust/kernel/processor.rs
new file mode 100644
index 000000000000..85b49b3614dd
--- /dev/null
+++ b/rust/kernel/processor.rs
@@ -0,0 +1,14 @@
+// SPDX-License-Identifier: GPL-2.0
+
+//! Processor related primitives.
+//!
+//! C header: [`include/linux/processor.h`](srctree/include/linux/processor.h)
+
+/// Lower CPU power consumption or yield to a hyperthreaded twin processor.
+///
+/// It also happens to serve as a compiler barrier.
+#[inline]
+pub fn cpu_relax() {
+ // SAFETY: Always safe to call.
+ unsafe { bindings::cpu_relax() }
+}
diff --git a/rust/kernel/ptr.rs b/rust/kernel/ptr.rs
new file mode 100644
index 000000000000..2e5e2a090480
--- /dev/null
+++ b/rust/kernel/ptr.rs
@@ -0,0 +1,228 @@
+// SPDX-License-Identifier: GPL-2.0
+
+//! Types and functions to work with pointers and addresses.
+
+use core::fmt::Debug;
+use core::mem::align_of;
+use core::num::NonZero;
+
+use crate::build_assert;
+
+/// Type representing an alignment, which is always a power of two.
+///
+/// It is used to validate that a given value is a valid alignment, and to perform masking and
+/// alignment operations.
+///
+/// This is a temporary substitute for the [`Alignment`] nightly type from the standard library,
+/// and to be eventually replaced by it.
+///
+/// [`Alignment`]: https://github.com/rust-lang/rust/issues/102070
+///
+/// # Invariants
+///
+/// An alignment is always a power of two.
+#[repr(transparent)]
+#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)]
+pub struct Alignment(NonZero<usize>);
+
+impl Alignment {
+ /// Validates that `ALIGN` is a power of two at build-time, and returns an [`Alignment`] of the
+ /// same value.
+ ///
+ /// A build error is triggered if `ALIGN` is not a power of two.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use kernel::ptr::Alignment;
+ ///
+ /// let v = Alignment::new::<16>();
+ /// assert_eq!(v.as_usize(), 16);
+ /// ```
+ #[inline(always)]
+ pub const fn new<const ALIGN: usize>() -> Self {
+ build_assert!(
+ ALIGN.is_power_of_two(),
+ "Provided alignment is not a power of two."
+ );
+
+ // INVARIANT: `align` is a power of two.
+ // SAFETY: `align` is a power of two, and thus non-zero.
+ Self(unsafe { NonZero::new_unchecked(ALIGN) })
+ }
+
+ /// Validates that `align` is a power of two at runtime, and returns an
+ /// [`Alignment`] of the same value.
+ ///
+ /// Returns [`None`] if `align` is not a power of two.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use kernel::ptr::Alignment;
+ ///
+ /// assert_eq!(Alignment::new_checked(16), Some(Alignment::new::<16>()));
+ /// assert_eq!(Alignment::new_checked(15), None);
+ /// assert_eq!(Alignment::new_checked(1), Some(Alignment::new::<1>()));
+ /// assert_eq!(Alignment::new_checked(0), None);
+ /// ```
+ #[inline(always)]
+ pub const fn new_checked(align: usize) -> Option<Self> {
+ if align.is_power_of_two() {
+ // INVARIANT: `align` is a power of two.
+ // SAFETY: `align` is a power of two, and thus non-zero.
+ Some(Self(unsafe { NonZero::new_unchecked(align) }))
+ } else {
+ None
+ }
+ }
+
+ /// Returns the alignment of `T`.
+ ///
+ /// This is equivalent to [`align_of`], but with the return value provided as an [`Alignment`].
+ #[inline(always)]
+ pub const fn of<T>() -> Self {
+ #![allow(clippy::incompatible_msrv)]
+ // This cannot panic since alignments are always powers of two.
+ //
+ // We unfortunately cannot use `new` as it would require the `generic_const_exprs` feature.
+ const { Alignment::new_checked(align_of::<T>()).unwrap() }
+ }
+
+ /// Returns this alignment as a [`usize`].
+ ///
+ /// It is guaranteed to be a power of two.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use kernel::ptr::Alignment;
+ ///
+ /// assert_eq!(Alignment::new::<16>().as_usize(), 16);
+ /// ```
+ #[inline(always)]
+ pub const fn as_usize(self) -> usize {
+ self.as_nonzero().get()
+ }
+
+ /// Returns this alignment as a [`NonZero`].
+ ///
+ /// It is guaranteed to be a power of two.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use kernel::ptr::Alignment;
+ ///
+ /// assert_eq!(Alignment::new::<16>().as_nonzero().get(), 16);
+ /// ```
+ #[inline(always)]
+ pub const fn as_nonzero(self) -> NonZero<usize> {
+ // Allow the compiler to know that the value is indeed a power of two. This can help
+ // optimize some operations down the line, like e.g. replacing divisions by bit shifts.
+ if !self.0.is_power_of_two() {
+ // SAFETY: Per the invariants, `self.0` is always a power of two so this block will
+ // never be reached.
+ unsafe { core::hint::unreachable_unchecked() }
+ }
+ self.0
+ }
+
+ /// Returns the base-2 logarithm of the alignment.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use kernel::ptr::Alignment;
+ ///
+ /// assert_eq!(Alignment::of::<u8>().log2(), 0);
+ /// assert_eq!(Alignment::new::<16>().log2(), 4);
+ /// ```
+ #[inline(always)]
+ pub const fn log2(self) -> u32 {
+ self.0.ilog2()
+ }
+
+ /// Returns the mask for this alignment.
+ ///
+ /// This is equivalent to `!(self.as_usize() - 1)`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use kernel::ptr::Alignment;
+ ///
+ /// assert_eq!(Alignment::new::<0x10>().mask(), !0xf);
+ /// ```
+ #[inline(always)]
+ pub const fn mask(self) -> usize {
+ // No underflow can occur as the alignment is guaranteed to be a power of two, and thus is
+ // non-zero.
+ !(self.as_usize() - 1)
+ }
+}
+
+/// Trait for items that can be aligned against an [`Alignment`].
+pub trait Alignable: Sized {
+ /// Aligns `self` down to `alignment`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use kernel::ptr::{Alignable, Alignment};
+ ///
+ /// assert_eq!(0x2f_usize.align_down(Alignment::new::<0x10>()), 0x20);
+ /// assert_eq!(0x30usize.align_down(Alignment::new::<0x10>()), 0x30);
+ /// assert_eq!(0xf0u8.align_down(Alignment::new::<0x1000>()), 0x0);
+ /// ```
+ fn align_down(self, alignment: Alignment) -> Self;
+
+ /// Aligns `self` up to `alignment`, returning `None` if aligning would result in an overflow.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use kernel::ptr::{Alignable, Alignment};
+ ///
+ /// assert_eq!(0x4fusize.align_up(Alignment::new::<0x10>()), Some(0x50));
+ /// assert_eq!(0x40usize.align_up(Alignment::new::<0x10>()), Some(0x40));
+ /// assert_eq!(0x0usize.align_up(Alignment::new::<0x10>()), Some(0x0));
+ /// assert_eq!(u8::MAX.align_up(Alignment::new::<0x10>()), None);
+ /// assert_eq!(0x10u8.align_up(Alignment::new::<0x100>()), None);
+ /// assert_eq!(0x0u8.align_up(Alignment::new::<0x100>()), Some(0x0));
+ /// ```
+ fn align_up(self, alignment: Alignment) -> Option<Self>;
+}
+
+/// Implement [`Alignable`] for unsigned integer types.
+macro_rules! impl_alignable_uint {
+ ($($t:ty),*) => {
+ $(
+ impl Alignable for $t {
+ #[inline(always)]
+ fn align_down(self, alignment: Alignment) -> Self {
+ // The operands of `&` need to be of the same type so convert the alignment to
+ // `Self`. This means we need to compute the mask ourselves.
+ ::core::num::NonZero::<Self>::try_from(alignment.as_nonzero())
+ .map(|align| self & !(align.get() - 1))
+ // An alignment larger than `Self` always aligns down to `0`.
+ .unwrap_or(0)
+ }
+
+ #[inline(always)]
+ fn align_up(self, alignment: Alignment) -> Option<Self> {
+ let aligned_down = self.align_down(alignment);
+ if self == aligned_down {
+ Some(aligned_down)
+ } else {
+ Self::try_from(alignment.as_usize())
+ .ok()
+ .and_then(|align| aligned_down.checked_add(align))
+ }
+ }
+ }
+ )*
+ };
+}
+
+impl_alignable_uint!(u8, u16, u32, u64, usize);
diff --git a/rust/kernel/rbtree.rs b/rust/kernel/rbtree.rs
index 8d978c896747..b8fe6be6fcc4 100644
--- a/rust/kernel/rbtree.rs
+++ b/rust/kernel/rbtree.rs
@@ -191,6 +191,12 @@ impl<K, V> RBTree<K, V> {
}
}
+ /// Returns true if this tree is empty.
+ #[inline]
+ pub fn is_empty(&self) -> bool {
+ self.root.rb_node.is_null()
+ }
+
/// Returns an iterator over the tree nodes, sorted by key.
pub fn iter(&self) -> Iter<'_, K, V> {
Iter {
@@ -769,23 +775,14 @@ impl<'a, K, V> Cursor<'a, K, V> {
// the tree cannot change. By the tree invariant, all nodes are valid.
unsafe { bindings::rb_erase(&mut (*this).links, addr_of_mut!(self.tree.root)) };
- let current = match (prev, next) {
- (_, Some(next)) => next,
- (Some(prev), None) => prev,
- (None, None) => {
- return (None, node);
- }
- };
+ // INVARIANT:
+ // - `current` is a valid node in the [`RBTree`] pointed to by `self.tree`.
+ let cursor = next.or(prev).map(|current| Self {
+ current,
+ tree: self.tree,
+ });
- (
- // INVARIANT:
- // - `current` is a valid node in the [`RBTree`] pointed to by `self.tree`.
- Some(Self {
- current,
- tree: self.tree,
- }),
- node,
- )
+ (cursor, node)
}
/// Remove the previous node, returning it if it exists.
diff --git a/rust/kernel/regulator.rs b/rust/kernel/regulator.rs
new file mode 100644
index 000000000000..b55a201e5029
--- /dev/null
+++ b/rust/kernel/regulator.rs
@@ -0,0 +1,395 @@
+// SPDX-License-Identifier: GPL-2.0
+
+//! Regulator abstractions, providing a standard kernel interface to control
+//! voltage and current regulators.
+//!
+//! The intention is to allow systems to dynamically control regulator power
+//! output in order to save power and prolong battery life. This applies to both
+//! voltage regulators (where voltage output is controllable) and current sinks
+//! (where current limit is controllable).
+//!
+//! C header: [`include/linux/regulator/consumer.h`](srctree/include/linux/regulator/consumer.h)
+//!
+//! Regulators are modeled in Rust with a collection of states. Each state may
+//! enforce a given invariant, and they may convert between each other where applicable.
+//!
+//! See [Voltage and current regulator API](https://docs.kernel.org/driver-api/regulator.html)
+//! for more information.
+
+use crate::{
+ bindings,
+ device::{Bound, Device},
+ error::{from_err_ptr, to_result, Result},
+ prelude::*,
+};
+
+use core::{marker::PhantomData, mem::ManuallyDrop, ptr::NonNull};
+
+mod private {
+ pub trait Sealed {}
+
+ impl Sealed for super::Enabled {}
+ impl Sealed for super::Disabled {}
+}
+
+/// A trait representing the different states a [`Regulator`] can be in.
+pub trait RegulatorState: private::Sealed + 'static {
+ /// Whether the regulator should be disabled when dropped.
+ const DISABLE_ON_DROP: bool;
+}
+
+/// A state where the [`Regulator`] is known to be enabled.
+///
+/// The `enable` reference count held by this state is decremented when it is
+/// dropped.
+pub struct Enabled;
+
+/// A state where this [`Regulator`] handle has not specifically asked for the
+/// underlying regulator to be enabled. This means that this reference does not
+/// own an `enable` reference count, but the regulator may still be on.
+pub struct Disabled;
+
+impl RegulatorState for Enabled {
+ const DISABLE_ON_DROP: bool = true;
+}
+
+impl RegulatorState for Disabled {
+ const DISABLE_ON_DROP: bool = false;
+}
+
+/// A trait that abstracts the ability to check if a [`Regulator`] is enabled.
+pub trait IsEnabled: RegulatorState {}
+impl IsEnabled for Disabled {}
+
+/// An error that can occur when trying to convert a [`Regulator`] between states.
+pub struct Error<State: RegulatorState> {
+ /// The error that occurred.
+ pub error: kernel::error::Error,
+
+ /// The regulator that caused the error, so that the operation may be retried.
+ pub regulator: Regulator<State>,
+}
+/// Obtains and enables a [`devres`]-managed regulator for a device.
+///
+/// This calls [`regulator_disable()`] and [`regulator_put()`] automatically on
+/// driver detach.
+///
+/// This API is identical to `devm_regulator_get_enable()`, and should be
+/// preferred over the [`Regulator<T: RegulatorState>`] API if the caller only
+/// cares about the regulator being enabled.
+///
+/// [`devres`]: https://docs.kernel.org/driver-api/driver-model/devres.html
+/// [`regulator_disable()`]: https://docs.kernel.org/driver-api/regulator.html#c.regulator_disable
+/// [`regulator_put()`]: https://docs.kernel.org/driver-api/regulator.html#c.regulator_put
+pub fn devm_enable(dev: &Device<Bound>, name: &CStr) -> Result {
+ // SAFETY: `dev` is a valid and bound device, while `name` is a valid C
+ // string.
+ to_result(unsafe { bindings::devm_regulator_get_enable(dev.as_raw(), name.as_ptr()) })
+}
+
+/// Same as [`devm_enable`], but calls `devm_regulator_get_enable_optional`
+/// instead.
+///
+/// This obtains and enables a [`devres`]-managed regulator for a device, but
+/// does not print a message nor provides a dummy if the regulator is not found.
+///
+/// This calls [`regulator_disable()`] and [`regulator_put()`] automatically on
+/// driver detach.
+///
+/// [`devres`]: https://docs.kernel.org/driver-api/driver-model/devres.html
+/// [`regulator_disable()`]: https://docs.kernel.org/driver-api/regulator.html#c.regulator_disable
+/// [`regulator_put()`]: https://docs.kernel.org/driver-api/regulator.html#c.regulator_put
+pub fn devm_enable_optional(dev: &Device<Bound>, name: &CStr) -> Result {
+ // SAFETY: `dev` is a valid and bound device, while `name` is a valid C
+ // string.
+ to_result(unsafe { bindings::devm_regulator_get_enable_optional(dev.as_raw(), name.as_ptr()) })
+}
+
+/// A `struct regulator` abstraction.
+///
+/// # Examples
+///
+/// ## Enabling a regulator
+///
+/// This example uses [`Regulator<Enabled>`], which is suitable for drivers that
+/// enable a regulator at probe time and leave them on until the device is
+/// removed or otherwise shutdown.
+///
+/// These users can store [`Regulator<Enabled>`] directly in their driver's
+/// private data struct.
+///
+/// ```
+/// # use kernel::prelude::*;
+/// # use kernel::c_str;
+/// # use kernel::device::Device;
+/// # use kernel::regulator::{Voltage, Regulator, Disabled, Enabled};
+/// fn enable(dev: &Device, min_voltage: Voltage, max_voltage: Voltage) -> Result {
+/// // Obtain a reference to a (fictitious) regulator.
+/// let regulator: Regulator<Disabled> = Regulator::<Disabled>::get(dev, c_str!("vcc"))?;
+///
+/// // The voltage can be set before enabling the regulator if needed, e.g.:
+/// regulator.set_voltage(min_voltage, max_voltage)?;
+///
+/// // The same applies for `get_voltage()`, i.e.:
+/// let voltage: Voltage = regulator.get_voltage()?;
+///
+/// // Enables the regulator, consuming the previous value.
+/// //
+/// // From now on, the regulator is known to be enabled because of the type
+/// // `Enabled`.
+/// //
+/// // If this operation fails, the `Error` will contain the regulator
+/// // reference, so that the operation may be retried.
+/// let regulator: Regulator<Enabled> =
+/// regulator.try_into_enabled().map_err(|error| error.error)?;
+///
+/// // The voltage can also be set after enabling the regulator, e.g.:
+/// regulator.set_voltage(min_voltage, max_voltage)?;
+///
+/// // The same applies for `get_voltage()`, i.e.:
+/// let voltage: Voltage = regulator.get_voltage()?;
+///
+/// // Dropping an enabled regulator will disable it. The refcount will be
+/// // decremented.
+/// drop(regulator);
+///
+/// // ...
+///
+/// Ok(())
+/// }
+/// ```
+///
+/// A more concise shortcut is available for enabling a regulator. This is
+/// equivalent to `regulator_get_enable()`:
+///
+/// ```
+/// # use kernel::prelude::*;
+/// # use kernel::c_str;
+/// # use kernel::device::Device;
+/// # use kernel::regulator::{Voltage, Regulator, Enabled};
+/// fn enable(dev: &Device) -> Result {
+/// // Obtain a reference to a (fictitious) regulator and enable it.
+/// let regulator: Regulator<Enabled> = Regulator::<Enabled>::get(dev, c_str!("vcc"))?;
+///
+/// // Dropping an enabled regulator will disable it. The refcount will be
+/// // decremented.
+/// drop(regulator);
+///
+/// // ...
+///
+/// Ok(())
+/// }
+/// ```
+///
+/// If a driver only cares about the regulator being on for as long it is bound
+/// to a device, then it should use [`devm_enable`] or [`devm_enable_optional`].
+/// This should be the default use-case unless more fine-grained control over
+/// the regulator's state is required.
+///
+/// [`devm_enable`]: crate::regulator::devm_enable
+/// [`devm_optional`]: crate::regulator::devm_enable_optional
+///
+/// ```
+/// # use kernel::prelude::*;
+/// # use kernel::c_str;
+/// # use kernel::device::{Bound, Device};
+/// # use kernel::regulator;
+/// fn enable(dev: &Device<Bound>) -> Result {
+/// // Obtain a reference to a (fictitious) regulator and enable it. This
+/// // call only returns whether the operation succeeded.
+/// regulator::devm_enable(dev, c_str!("vcc"))?;
+///
+/// // The regulator will be disabled and put when `dev` is unbound.
+/// Ok(())
+/// }
+/// ```
+///
+/// ## Disabling a regulator
+///
+/// ```
+/// # use kernel::prelude::*;
+/// # use kernel::device::Device;
+/// # use kernel::regulator::{Regulator, Enabled, Disabled};
+/// fn disable(dev: &Device, regulator: Regulator<Enabled>) -> Result {
+/// // We can also disable an enabled regulator without reliquinshing our
+/// // refcount:
+/// //
+/// // If this operation fails, the `Error` will contain the regulator
+/// // reference, so that the operation may be retried.
+/// let regulator: Regulator<Disabled> =
+/// regulator.try_into_disabled().map_err(|error| error.error)?;
+///
+/// // The refcount will be decremented when `regulator` is dropped.
+/// drop(regulator);
+///
+/// // ...
+///
+/// Ok(())
+/// }
+/// ```
+///
+/// # Invariants
+///
+/// - `inner` is a non-null wrapper over a pointer to a `struct
+/// regulator` obtained from [`regulator_get()`].
+///
+/// [`regulator_get()`]: https://docs.kernel.org/driver-api/regulator.html#c.regulator_get
+pub struct Regulator<State>
+where
+ State: RegulatorState,
+{
+ inner: NonNull<bindings::regulator>,
+ _phantom: PhantomData<State>,
+}
+
+impl<T: RegulatorState> Regulator<T> {
+ /// Sets the voltage for the regulator.
+ ///
+ /// This can be used to ensure that the device powers up cleanly.
+ pub fn set_voltage(&self, min_voltage: Voltage, max_voltage: Voltage) -> Result {
+ // SAFETY: Safe as per the type invariants of `Regulator`.
+ to_result(unsafe {
+ bindings::regulator_set_voltage(
+ self.inner.as_ptr(),
+ min_voltage.as_microvolts(),
+ max_voltage.as_microvolts(),
+ )
+ })
+ }
+
+ /// Gets the current voltage of the regulator.
+ pub fn get_voltage(&self) -> Result<Voltage> {
+ // SAFETY: Safe as per the type invariants of `Regulator`.
+ let voltage = unsafe { bindings::regulator_get_voltage(self.inner.as_ptr()) };
+
+ to_result(voltage).map(|()| Voltage::from_microvolts(voltage))
+ }
+
+ fn get_internal(dev: &Device, name: &CStr) -> Result<Regulator<T>> {
+ // SAFETY: It is safe to call `regulator_get()`, on a device pointer
+ // received from the C code.
+ let inner = from_err_ptr(unsafe { bindings::regulator_get(dev.as_raw(), name.as_ptr()) })?;
+
+ // SAFETY: We can safely trust `inner` to be a pointer to a valid
+ // regulator if `ERR_PTR` was not returned.
+ let inner = unsafe { NonNull::new_unchecked(inner) };
+
+ Ok(Self {
+ inner,
+ _phantom: PhantomData,
+ })
+ }
+
+ fn enable_internal(&self) -> Result {
+ // SAFETY: Safe as per the type invariants of `Regulator`.
+ to_result(unsafe { bindings::regulator_enable(self.inner.as_ptr()) })
+ }
+
+ fn disable_internal(&self) -> Result {
+ // SAFETY: Safe as per the type invariants of `Regulator`.
+ to_result(unsafe { bindings::regulator_disable(self.inner.as_ptr()) })
+ }
+}
+
+impl Regulator<Disabled> {
+ /// Obtains a [`Regulator`] instance from the system.
+ pub fn get(dev: &Device, name: &CStr) -> Result<Self> {
+ Regulator::get_internal(dev, name)
+ }
+
+ /// Attempts to convert the regulator to an enabled state.
+ pub fn try_into_enabled(self) -> Result<Regulator<Enabled>, Error<Disabled>> {
+ // We will be transferring the ownership of our `regulator_get()` count to
+ // `Regulator<Enabled>`.
+ let regulator = ManuallyDrop::new(self);
+
+ regulator
+ .enable_internal()
+ .map(|()| Regulator {
+ inner: regulator.inner,
+ _phantom: PhantomData,
+ })
+ .map_err(|error| Error {
+ error,
+ regulator: ManuallyDrop::into_inner(regulator),
+ })
+ }
+}
+
+impl Regulator<Enabled> {
+ /// Obtains a [`Regulator`] instance from the system and enables it.
+ ///
+ /// This is equivalent to calling `regulator_get_enable()` in the C API.
+ pub fn get(dev: &Device, name: &CStr) -> Result<Self> {
+ Regulator::<Disabled>::get_internal(dev, name)?
+ .try_into_enabled()
+ .map_err(|error| error.error)
+ }
+
+ /// Attempts to convert the regulator to a disabled state.
+ pub fn try_into_disabled(self) -> Result<Regulator<Disabled>, Error<Enabled>> {
+ // We will be transferring the ownership of our `regulator_get()` count
+ // to `Regulator<Disabled>`.
+ let regulator = ManuallyDrop::new(self);
+
+ regulator
+ .disable_internal()
+ .map(|()| Regulator {
+ inner: regulator.inner,
+ _phantom: PhantomData,
+ })
+ .map_err(|error| Error {
+ error,
+ regulator: ManuallyDrop::into_inner(regulator),
+ })
+ }
+}
+
+impl<T: IsEnabled> Regulator<T> {
+ /// Checks if the regulator is enabled.
+ pub fn is_enabled(&self) -> bool {
+ // SAFETY: Safe as per the type invariants of `Regulator`.
+ unsafe { bindings::regulator_is_enabled(self.inner.as_ptr()) != 0 }
+ }
+}
+
+impl<T: RegulatorState> Drop for Regulator<T> {
+ fn drop(&mut self) {
+ if T::DISABLE_ON_DROP {
+ // SAFETY: By the type invariants, we know that `self` owns a
+ // reference on the enabled refcount, so it is safe to relinquish it
+ // now.
+ unsafe { bindings::regulator_disable(self.inner.as_ptr()) };
+ }
+ // SAFETY: By the type invariants, we know that `self` owns a reference,
+ // so it is safe to relinquish it now.
+ unsafe { bindings::regulator_put(self.inner.as_ptr()) };
+ }
+}
+
+// SAFETY: It is safe to send a `Regulator<T>` across threads. In particular, a
+// Regulator<T> can be dropped from any thread.
+unsafe impl<T: RegulatorState> Send for Regulator<T> {}
+
+// SAFETY: It is safe to send a &Regulator<T> across threads because the C side
+// handles its own locking.
+unsafe impl<T: RegulatorState> Sync for Regulator<T> {}
+
+/// A voltage.
+///
+/// This type represents a voltage value in microvolts.
+#[repr(transparent)]
+#[derive(Copy, Clone, PartialEq, Eq)]
+pub struct Voltage(i32);
+
+impl Voltage {
+ /// Creates a new `Voltage` from a value in microvolts.
+ pub fn from_microvolts(uv: i32) -> Self {
+ Self(uv)
+ }
+
+ /// Returns the value of the voltage in microvolts as an [`i32`].
+ pub fn as_microvolts(self) -> i32 {
+ self.0
+ }
+}
diff --git a/rust/kernel/revocable.rs b/rust/kernel/revocable.rs
index 06a3cdfce344..0f4ae673256d 100644
--- a/rust/kernel/revocable.rs
+++ b/rust/kernel/revocable.rs
@@ -5,6 +5,8 @@
//! The [`Revocable`] type wraps other types and allows access to them to be revoked. The existence
//! of a [`RevocableGuard`] ensures that objects remain valid.
+use pin_init::Wrapper;
+
use crate::{bindings, prelude::*, sync::rcu, types::Opaque};
use core::{
marker::PhantomData,
@@ -80,11 +82,11 @@ unsafe impl<T: Sync + Send> Sync for Revocable<T> {}
impl<T> Revocable<T> {
/// Creates a new revocable instance of the given data.
- pub fn new(data: impl PinInit<T>) -> impl PinInit<Self> {
- pin_init!(Self {
+ pub fn new<E>(data: impl PinInit<T, E>) -> impl PinInit<Self, E> {
+ try_pin_init!(Self {
is_available: AtomicBool::new(true),
data <- Opaque::pin_init(data),
- })
+ }? E)
}
/// Tries to access the revocable wrapped object.
@@ -231,6 +233,10 @@ impl<T> PinnedDrop for Revocable<T> {
///
/// The RCU read-side lock is held while the guard is alive.
pub struct RevocableGuard<'a, T> {
+ // This can't use the `&'a T` type because references that appear in function arguments must
+ // not become dangling during the execution of the function, which can happen if the
+ // `RevocableGuard` is passed as a function argument and then dropped during execution of the
+ // function.
data_ref: *const T,
_rcu_guard: rcu::Guard,
_p: PhantomData<&'a ()>,
diff --git a/rust/kernel/scatterlist.rs b/rust/kernel/scatterlist.rs
new file mode 100644
index 000000000000..9709dff60b5a
--- /dev/null
+++ b/rust/kernel/scatterlist.rs
@@ -0,0 +1,491 @@
+// SPDX-License-Identifier: GPL-2.0
+
+//! Abstractions for scatter-gather lists.
+//!
+//! C header: [`include/linux/scatterlist.h`](srctree/include/linux/scatterlist.h)
+//!
+//! Scatter-gather (SG) I/O is a memory access technique that allows devices to perform DMA
+//! operations on data buffers that are not physically contiguous in memory. It works by creating a
+//! "scatter-gather list", an array where each entry specifies the address and length of a
+//! physically contiguous memory segment.
+//!
+//! The device's DMA controller can then read this list and process the segments sequentially as
+//! part of one logical I/O request. This avoids the need for a single, large, physically contiguous
+//! memory buffer, which can be difficult or impossible to allocate.
+//!
+//! This module provides safe Rust abstractions over the kernel's `struct scatterlist` and
+//! `struct sg_table` types.
+//!
+//! The main entry point is the [`SGTable`] type, which represents a complete scatter-gather table.
+//! It can be either:
+//!
+//! - An owned table ([`SGTable<Owned<P>>`]), created from a Rust memory buffer (e.g., [`VVec`]).
+//! This type manages the allocation of the `struct sg_table`, the DMA mapping of the buffer, and
+//! the automatic cleanup of all resources.
+//! - A borrowed reference (&[`SGTable`]), which provides safe, read-only access to a table that was
+//! allocated by other (e.g., C) code.
+//!
+//! Individual entries in the table are represented by [`SGEntry`], which can be accessed by
+//! iterating over an [`SGTable`].
+
+use crate::{
+ alloc,
+ alloc::allocator::VmallocPageIter,
+ bindings,
+ device::{Bound, Device},
+ devres::Devres,
+ dma, error,
+ io::resource::ResourceSize,
+ page,
+ prelude::*,
+ types::{ARef, Opaque},
+};
+use core::{ops::Deref, ptr::NonNull};
+
+/// A single entry in a scatter-gather list.
+///
+/// An `SGEntry` represents a single, physically contiguous segment of memory that has been mapped
+/// for DMA.
+///
+/// Instances of this struct are obtained by iterating over an [`SGTable`]. Drivers do not create
+/// or own [`SGEntry`] objects directly.
+#[repr(transparent)]
+pub struct SGEntry(Opaque<bindings::scatterlist>);
+
+// SAFETY: `SGEntry` can be sent to any task.
+unsafe impl Send for SGEntry {}
+
+// SAFETY: `SGEntry` has no interior mutability and can be accessed concurrently.
+unsafe impl Sync for SGEntry {}
+
+impl SGEntry {
+ /// Convert a raw `struct scatterlist *` to a `&'a SGEntry`.
+ ///
+ /// # Safety
+ ///
+ /// Callers must ensure that the `struct scatterlist` pointed to by `ptr` is valid for the
+ /// lifetime `'a`.
+ #[inline]
+ unsafe fn from_raw<'a>(ptr: *mut bindings::scatterlist) -> &'a Self {
+ // SAFETY: The safety requirements of this function guarantee that `ptr` is a valid pointer
+ // to a `struct scatterlist` for the duration of `'a`.
+ unsafe { &*ptr.cast() }
+ }
+
+ /// Obtain the raw `struct scatterlist *`.
+ #[inline]
+ fn as_raw(&self) -> *mut bindings::scatterlist {
+ self.0.get()
+ }
+
+ /// Returns the DMA address of this SG entry.
+ ///
+ /// This is the address that the device should use to access the memory segment.
+ #[inline]
+ pub fn dma_address(&self) -> dma::DmaAddress {
+ // SAFETY: `self.as_raw()` is a valid pointer to a `struct scatterlist`.
+ unsafe { bindings::sg_dma_address(self.as_raw()) }
+ }
+
+ /// Returns the length of this SG entry in bytes.
+ #[inline]
+ pub fn dma_len(&self) -> ResourceSize {
+ #[allow(clippy::useless_conversion)]
+ // SAFETY: `self.as_raw()` is a valid pointer to a `struct scatterlist`.
+ unsafe { bindings::sg_dma_len(self.as_raw()) }.into()
+ }
+}
+
+/// The borrowed generic type of an [`SGTable`], representing a borrowed or externally managed
+/// table.
+#[repr(transparent)]
+pub struct Borrowed(Opaque<bindings::sg_table>);
+
+// SAFETY: `Borrowed` can be sent to any task.
+unsafe impl Send for Borrowed {}
+
+// SAFETY: `Borrowed` has no interior mutability and can be accessed concurrently.
+unsafe impl Sync for Borrowed {}
+
+/// A scatter-gather table.
+///
+/// This struct is a wrapper around the kernel's `struct sg_table`. It manages a list of DMA-mapped
+/// memory segments that can be passed to a device for I/O operations.
+///
+/// The generic parameter `T` is used as a generic type to distinguish between owned and borrowed
+/// tables.
+///
+/// - [`SGTable<Owned>`]: An owned table created and managed entirely by Rust code. It handles
+/// allocation, DMA mapping, and cleanup of all associated resources. See [`SGTable::new`].
+/// - [`SGTable<Borrowed>`} (or simply [`SGTable`]): Represents a table whose lifetime is managed
+/// externally. It can be used safely via a borrowed reference `&'a SGTable`, where `'a` is the
+/// external lifetime.
+///
+/// All [`SGTable`] variants can be iterated over the individual [`SGEntry`]s.
+#[repr(transparent)]
+#[pin_data]
+pub struct SGTable<T: private::Sealed = Borrowed> {
+ #[pin]
+ inner: T,
+}
+
+impl SGTable {
+ /// Creates a borrowed `&'a SGTable` from a raw `struct sg_table` pointer.
+ ///
+ /// This allows safe access to an `sg_table` that is managed elsewhere (for example, in C code).
+ ///
+ /// # Safety
+ ///
+ /// Callers must ensure that:
+ ///
+ /// - the `struct sg_table` pointed to by `ptr` is valid for the entire lifetime of `'a`,
+ /// - the data behind `ptr` is not modified concurrently for the duration of `'a`.
+ #[inline]
+ pub unsafe fn from_raw<'a>(ptr: *mut bindings::sg_table) -> &'a Self {
+ // SAFETY: The safety requirements of this function guarantee that `ptr` is a valid pointer
+ // to a `struct sg_table` for the duration of `'a`.
+ unsafe { &*ptr.cast() }
+ }
+
+ #[inline]
+ fn as_raw(&self) -> *mut bindings::sg_table {
+ self.inner.0.get()
+ }
+
+ /// Returns an [`SGTableIter`] bound to the lifetime of `self`.
+ pub fn iter(&self) -> SGTableIter<'_> {
+ // SAFETY: `self.as_raw()` is a valid pointer to a `struct sg_table`.
+ let nents = unsafe { (*self.as_raw()).nents };
+
+ let pos = if nents > 0 {
+ // SAFETY: `self.as_raw()` is a valid pointer to a `struct sg_table`.
+ let ptr = unsafe { (*self.as_raw()).sgl };
+
+ // SAFETY: `ptr` is guaranteed to be a valid pointer to a `struct scatterlist`.
+ Some(unsafe { SGEntry::from_raw(ptr) })
+ } else {
+ None
+ };
+
+ SGTableIter { pos, nents }
+ }
+}
+
+/// Represents the DMA mapping state of a `struct sg_table`.
+///
+/// This is used as an inner type of [`Owned`] to manage the DMA mapping lifecycle.
+///
+/// # Invariants
+///
+/// - `sgt` is a valid pointer to a `struct sg_table` for the entire lifetime of the
+/// [`DmaMappedSgt`].
+/// - `sgt` is always DMA mapped.
+struct DmaMappedSgt {
+ sgt: NonNull<bindings::sg_table>,
+ dev: ARef<Device>,
+ dir: dma::DataDirection,
+}
+
+// SAFETY: `DmaMappedSgt` can be sent to any task.
+unsafe impl Send for DmaMappedSgt {}
+
+// SAFETY: `DmaMappedSgt` has no interior mutability and can be accessed concurrently.
+unsafe impl Sync for DmaMappedSgt {}
+
+impl DmaMappedSgt {
+ /// # Safety
+ ///
+ /// - `sgt` must be a valid pointer to a `struct sg_table` for the entire lifetime of the
+ /// returned [`DmaMappedSgt`].
+ /// - The caller must guarantee that `sgt` remains DMA mapped for the entire lifetime of
+ /// [`DmaMappedSgt`].
+ unsafe fn new(
+ sgt: NonNull<bindings::sg_table>,
+ dev: &Device<Bound>,
+ dir: dma::DataDirection,
+ ) -> Result<Self> {
+ // SAFETY:
+ // - `dev.as_raw()` is a valid pointer to a `struct device`, which is guaranteed to be
+ // bound to a driver for the duration of this call.
+ // - `sgt` is a valid pointer to a `struct sg_table`.
+ error::to_result(unsafe {
+ bindings::dma_map_sgtable(dev.as_raw(), sgt.as_ptr(), dir.into(), 0)
+ })?;
+
+ // INVARIANT: By the safety requirements of this function it is guaranteed that `sgt` is
+ // valid for the entire lifetime of this object instance.
+ Ok(Self {
+ sgt,
+ dev: dev.into(),
+ dir,
+ })
+ }
+}
+
+impl Drop for DmaMappedSgt {
+ #[inline]
+ fn drop(&mut self) {
+ // SAFETY:
+ // - `self.dev.as_raw()` is a pointer to a valid `struct device`.
+ // - `self.dev` is the same device the mapping has been created for in `Self::new()`.
+ // - `self.sgt.as_ptr()` is a valid pointer to a `struct sg_table` by the type invariants
+ // of `Self`.
+ // - `self.dir` is the same `dma::DataDirection` the mapping has been created with in
+ // `Self::new()`.
+ unsafe {
+ bindings::dma_unmap_sgtable(self.dev.as_raw(), self.sgt.as_ptr(), self.dir.into(), 0)
+ };
+ }
+}
+
+/// A transparent wrapper around a `struct sg_table`.
+///
+/// While we could also create the `struct sg_table` in the constructor of [`Owned`], we can't tear
+/// down the `struct sg_table` in [`Owned::drop`]; the drop order in [`Owned`] matters.
+#[repr(transparent)]
+struct RawSGTable(Opaque<bindings::sg_table>);
+
+// SAFETY: `RawSGTable` can be sent to any task.
+unsafe impl Send for RawSGTable {}
+
+// SAFETY: `RawSGTable` has no interior mutability and can be accessed concurrently.
+unsafe impl Sync for RawSGTable {}
+
+impl RawSGTable {
+ /// # Safety
+ ///
+ /// - `pages` must be a slice of valid `struct page *`.
+ /// - The pages pointed to by `pages` must remain valid for the entire lifetime of the returned
+ /// [`RawSGTable`].
+ unsafe fn new(
+ pages: &mut [*mut bindings::page],
+ size: usize,
+ max_segment: u32,
+ flags: alloc::Flags,
+ ) -> Result<Self> {
+ // `sg_alloc_table_from_pages_segment()` expects at least one page, otherwise it
+ // produces a NPE.
+ if pages.is_empty() {
+ return Err(EINVAL);
+ }
+
+ let sgt = Opaque::zeroed();
+ // SAFETY:
+ // - `sgt.get()` is a valid pointer to uninitialized memory.
+ // - As by the check above, `pages` is not empty.
+ error::to_result(unsafe {
+ bindings::sg_alloc_table_from_pages_segment(
+ sgt.get(),
+ pages.as_mut_ptr(),
+ pages.len().try_into()?,
+ 0,
+ size,
+ max_segment,
+ flags.as_raw(),
+ )
+ })?;
+
+ Ok(Self(sgt))
+ }
+
+ #[inline]
+ fn as_raw(&self) -> *mut bindings::sg_table {
+ self.0.get()
+ }
+}
+
+impl Drop for RawSGTable {
+ #[inline]
+ fn drop(&mut self) {
+ // SAFETY: `sgt` is a valid and initialized `struct sg_table`.
+ unsafe { bindings::sg_free_table(self.0.get()) };
+ }
+}
+
+/// The [`Owned`] generic type of an [`SGTable`].
+///
+/// A [`SGTable<Owned>`] signifies that the [`SGTable`] owns all associated resources:
+///
+/// - The backing memory pages.
+/// - The `struct sg_table` allocation (`sgt`).
+/// - The DMA mapping, managed through a [`Devres`]-managed `DmaMappedSgt`.
+///
+/// Users interact with this type through the [`SGTable`] handle and do not need to manage
+/// [`Owned`] directly.
+#[pin_data]
+pub struct Owned<P> {
+ // Note: The drop order is relevant; we first have to unmap the `struct sg_table`, then free the
+ // `struct sg_table` and finally free the backing pages.
+ #[pin]
+ dma: Devres<DmaMappedSgt>,
+ sgt: RawSGTable,
+ _pages: P,
+}
+
+// SAFETY: `Owned` can be sent to any task if `P` can be send to any task.
+unsafe impl<P: Send> Send for Owned<P> {}
+
+// SAFETY: `Owned` has no interior mutability and can be accessed concurrently if `P` can be
+// accessed concurrently.
+unsafe impl<P: Sync> Sync for Owned<P> {}
+
+impl<P> Owned<P>
+where
+ for<'a> P: page::AsPageIter<Iter<'a> = VmallocPageIter<'a>> + 'static,
+{
+ fn new(
+ dev: &Device<Bound>,
+ mut pages: P,
+ dir: dma::DataDirection,
+ flags: alloc::Flags,
+ ) -> Result<impl PinInit<Self, Error> + '_> {
+ let page_iter = pages.page_iter();
+ let size = page_iter.size();
+
+ let mut page_vec: KVec<*mut bindings::page> =
+ KVec::with_capacity(page_iter.page_count(), flags)?;
+
+ for page in page_iter {
+ page_vec.push(page.as_ptr(), flags)?;
+ }
+
+ // `dma_max_mapping_size` returns `size_t`, but `sg_alloc_table_from_pages_segment()` takes
+ // an `unsigned int`.
+ //
+ // SAFETY: `dev.as_raw()` is a valid pointer to a `struct device`.
+ let max_segment = match unsafe { bindings::dma_max_mapping_size(dev.as_raw()) } {
+ 0 => u32::MAX,
+ max_segment => u32::try_from(max_segment).unwrap_or(u32::MAX),
+ };
+
+ Ok(try_pin_init!(&this in Self {
+ // SAFETY:
+ // - `page_vec` is a `KVec` of valid `struct page *` obtained from `pages`.
+ // - The pages contained in `pages` remain valid for the entire lifetime of the
+ // `RawSGTable`.
+ sgt: unsafe { RawSGTable::new(&mut page_vec, size, max_segment, flags) }?,
+ dma <- {
+ // SAFETY: `this` is a valid pointer to uninitialized memory.
+ let sgt = unsafe { &raw mut (*this.as_ptr()).sgt }.cast();
+
+ // SAFETY: `sgt` is guaranteed to be non-null.
+ let sgt = unsafe { NonNull::new_unchecked(sgt) };
+
+ // SAFETY:
+ // - It is guaranteed that the object returned by `DmaMappedSgt::new` won't out-live
+ // `sgt`.
+ // - `sgt` is never DMA unmapped manually.
+ Devres::new(dev, unsafe { DmaMappedSgt::new(sgt, dev, dir) })
+ },
+ _pages: pages,
+ }))
+ }
+}
+
+impl<P> SGTable<Owned<P>>
+where
+ for<'a> P: page::AsPageIter<Iter<'a> = VmallocPageIter<'a>> + 'static,
+{
+ /// Allocates a new scatter-gather table from the given pages and maps it for DMA.
+ ///
+ /// This constructor creates a new [`SGTable<Owned>`] that takes ownership of `P`.
+ /// It allocates a `struct sg_table`, populates it with entries corresponding to the physical
+ /// pages of `P`, and maps the table for DMA with the specified [`Device`] and
+ /// [`dma::DataDirection`].
+ ///
+ /// The DMA mapping is managed through [`Devres`], ensuring that the DMA mapping is unmapped
+ /// once the associated [`Device`] is unbound, or when the [`SGTable<Owned>`] is dropped.
+ ///
+ /// # Parameters
+ ///
+ /// * `dev`: The [`Device`] that will be performing the DMA.
+ /// * `pages`: The entity providing the backing pages. It must implement [`page::AsPageIter`].
+ /// The ownership of this entity is moved into the new [`SGTable<Owned>`].
+ /// * `dir`: The [`dma::DataDirection`] of the DMA transfer.
+ /// * `flags`: Allocation flags for internal allocations (e.g., [`GFP_KERNEL`]).
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use kernel::{
+ /// device::{Bound, Device},
+ /// dma, page,
+ /// prelude::*,
+ /// scatterlist::{SGTable, Owned},
+ /// };
+ ///
+ /// fn test(dev: &Device<Bound>) -> Result {
+ /// let size = 4 * page::PAGE_SIZE;
+ /// let pages = VVec::<u8>::with_capacity(size, GFP_KERNEL)?;
+ ///
+ /// let sgt = KBox::pin_init(SGTable::new(
+ /// dev,
+ /// pages,
+ /// dma::DataDirection::ToDevice,
+ /// GFP_KERNEL,
+ /// ), GFP_KERNEL)?;
+ ///
+ /// Ok(())
+ /// }
+ /// ```
+ pub fn new(
+ dev: &Device<Bound>,
+ pages: P,
+ dir: dma::DataDirection,
+ flags: alloc::Flags,
+ ) -> impl PinInit<Self, Error> + '_ {
+ try_pin_init!(Self {
+ inner <- Owned::new(dev, pages, dir, flags)?
+ })
+ }
+}
+
+impl<P> Deref for SGTable<Owned<P>> {
+ type Target = SGTable;
+
+ #[inline]
+ fn deref(&self) -> &Self::Target {
+ // SAFETY:
+ // - `self.inner.sgt.as_raw()` is a valid pointer to a `struct sg_table` for the entire
+ // lifetime of `self`.
+ // - The backing `struct sg_table` is not modified for the entire lifetime of `self`.
+ unsafe { SGTable::from_raw(self.inner.sgt.as_raw()) }
+ }
+}
+
+mod private {
+ pub trait Sealed {}
+
+ impl Sealed for super::Borrowed {}
+ impl<P> Sealed for super::Owned<P> {}
+}
+
+/// An [`Iterator`] over the DMA mapped [`SGEntry`] items of an [`SGTable`].
+///
+/// Note that the existence of an [`SGTableIter`] does not guarantee that the [`SGEntry`] items
+/// actually remain DMA mapped; they are prone to be unmapped on device unbind.
+pub struct SGTableIter<'a> {
+ pos: Option<&'a SGEntry>,
+ /// The number of DMA mapped entries in a `struct sg_table`.
+ nents: c_uint,
+}
+
+impl<'a> Iterator for SGTableIter<'a> {
+ type Item = &'a SGEntry;
+
+ fn next(&mut self) -> Option<Self::Item> {
+ let entry = self.pos?;
+ self.nents = self.nents.saturating_sub(1);
+
+ // SAFETY: `entry.as_raw()` is a valid pointer to a `struct scatterlist`.
+ let next = unsafe { bindings::sg_next(entry.as_raw()) };
+
+ self.pos = (!next.is_null() && self.nents > 0).then(|| {
+ // SAFETY: If `next` is not NULL, `sg_next()` guarantees to return a valid pointer to
+ // the next `struct scatterlist`.
+ unsafe { SGEntry::from_raw(next) }
+ });
+
+ Some(entry)
+ }
+}
diff --git a/rust/kernel/security.rs b/rust/kernel/security.rs
index 0c63e9e7e564..9d271695265f 100644
--- a/rust/kernel/security.rs
+++ b/rust/kernel/security.rs
@@ -8,9 +8,46 @@
use crate::{
bindings,
+ cred::Credential,
error::{to_result, Result},
+ fs::File,
};
+/// Calls the security modules to determine if the given task can become the manager of a binder
+/// context.
+#[inline]
+pub fn binder_set_context_mgr(mgr: &Credential) -> Result {
+ // SAFETY: `mrg.0` is valid because the shared reference guarantees a nonzero refcount.
+ to_result(unsafe { bindings::security_binder_set_context_mgr(mgr.as_ptr()) })
+}
+
+/// Calls the security modules to determine if binder transactions are allowed from task `from` to
+/// task `to`.
+#[inline]
+pub fn binder_transaction(from: &Credential, to: &Credential) -> Result {
+ // SAFETY: `from` and `to` are valid because the shared references guarantee nonzero refcounts.
+ to_result(unsafe { bindings::security_binder_transaction(from.as_ptr(), to.as_ptr()) })
+}
+
+/// Calls the security modules to determine if task `from` is allowed to send binder objects
+/// (owned by itself or other processes) to task `to` through a binder transaction.
+#[inline]
+pub fn binder_transfer_binder(from: &Credential, to: &Credential) -> Result {
+ // SAFETY: `from` and `to` are valid because the shared references guarantee nonzero refcounts.
+ to_result(unsafe { bindings::security_binder_transfer_binder(from.as_ptr(), to.as_ptr()) })
+}
+
+/// Calls the security modules to determine if task `from` is allowed to send the given file to
+/// task `to` (which would get its own file descriptor) through a binder transaction.
+#[inline]
+pub fn binder_transfer_file(from: &Credential, to: &Credential, file: &File) -> Result {
+ // SAFETY: `from`, `to` and `file` are valid because the shared references guarantee nonzero
+ // refcounts.
+ to_result(unsafe {
+ bindings::security_binder_transfer_file(from.as_ptr(), to.as_ptr(), file.as_ptr())
+ })
+}
+
/// A security context string.
///
/// # Invariants
diff --git a/rust/kernel/seq_file.rs b/rust/kernel/seq_file.rs
index 7a9403eb6e5b..59fbfc2473f8 100644
--- a/rust/kernel/seq_file.rs
+++ b/rust/kernel/seq_file.rs
@@ -4,7 +4,7 @@
//!
//! C header: [`include/linux/seq_file.h`](srctree/include/linux/seq_file.h)
-use crate::{bindings, c_str, types::NotThreadSafe, types::Opaque};
+use crate::{bindings, c_str, fmt, types::NotThreadSafe, types::Opaque};
/// A utility for generating the contents of a seq file.
#[repr(transparent)]
@@ -31,13 +31,13 @@ impl SeqFile {
/// Used by the [`seq_print`] macro.
#[inline]
- pub fn call_printf(&self, args: core::fmt::Arguments<'_>) {
+ pub fn call_printf(&self, args: fmt::Arguments<'_>) {
// SAFETY: Passing a void pointer to `Arguments` is valid for `%pA`.
unsafe {
bindings::seq_printf(
self.inner.get(),
c_str!("%pA").as_char_ptr(),
- &args as *const _ as *const crate::ffi::c_void,
+ core::ptr::from_ref(&args).cast::<crate::ffi::c_void>(),
);
}
}
@@ -47,7 +47,7 @@ impl SeqFile {
#[macro_export]
macro_rules! seq_print {
($m:expr, $($arg:tt)+) => (
- $m.call_printf(format_args!($($arg)+))
+ $m.call_printf($crate::prelude::fmt!($($arg)+))
);
}
pub use seq_print;
diff --git a/rust/kernel/sizes.rs b/rust/kernel/sizes.rs
index 834c343e4170..661e680d9330 100644
--- a/rust/kernel/sizes.rs
+++ b/rust/kernel/sizes.rs
@@ -24,3 +24,27 @@ pub const SZ_128K: usize = bindings::SZ_128K as usize;
pub const SZ_256K: usize = bindings::SZ_256K as usize;
/// 0x00080000
pub const SZ_512K: usize = bindings::SZ_512K as usize;
+/// 0x00100000
+pub const SZ_1M: usize = bindings::SZ_1M as usize;
+/// 0x00200000
+pub const SZ_2M: usize = bindings::SZ_2M as usize;
+/// 0x00400000
+pub const SZ_4M: usize = bindings::SZ_4M as usize;
+/// 0x00800000
+pub const SZ_8M: usize = bindings::SZ_8M as usize;
+/// 0x01000000
+pub const SZ_16M: usize = bindings::SZ_16M as usize;
+/// 0x02000000
+pub const SZ_32M: usize = bindings::SZ_32M as usize;
+/// 0x04000000
+pub const SZ_64M: usize = bindings::SZ_64M as usize;
+/// 0x08000000
+pub const SZ_128M: usize = bindings::SZ_128M as usize;
+/// 0x10000000
+pub const SZ_256M: usize = bindings::SZ_256M as usize;
+/// 0x20000000
+pub const SZ_512M: usize = bindings::SZ_512M as usize;
+/// 0x40000000
+pub const SZ_1G: usize = bindings::SZ_1G as usize;
+/// 0x80000000
+pub const SZ_2G: usize = bindings::SZ_2G as usize;
diff --git a/rust/kernel/str.rs b/rust/kernel/str.rs
index a927db8e079c..5c74e5f77601 100644
--- a/rust/kernel/str.rs
+++ b/rust/kernel/str.rs
@@ -2,11 +2,16 @@
//! String representations.
-use crate::alloc::{flags::*, AllocError, KVec};
-use core::fmt::{self, Write};
-use core::ops::{self, Deref, DerefMut, Index};
-
-use crate::prelude::*;
+use crate::{
+ alloc::{flags::*, AllocError, KVec},
+ error::{to_result, Result},
+ fmt::{self, Write},
+ prelude::*,
+};
+use core::{
+ marker::PhantomData,
+ ops::{self, Deref, DerefMut, Index},
+};
/// Byte string without UTF-8 validity guarantee.
#[repr(transparent)]
@@ -29,7 +34,7 @@ impl BStr {
#[inline]
pub const fn from_bytes(bytes: &[u8]) -> &Self {
// SAFETY: `BStr` is transparent to `[u8]`.
- unsafe { &*(bytes as *const [u8] as *const BStr) }
+ unsafe { &*(core::ptr::from_ref(bytes) as *const BStr) }
}
/// Strip a prefix from `self`. Delegates to [`slice::strip_prefix`].
@@ -54,14 +59,14 @@ impl fmt::Display for BStr {
/// Formats printable ASCII characters, escaping the rest.
///
/// ```
- /// # use kernel::{fmt, b_str, str::{BStr, CString}};
+ /// # use kernel::{prelude::fmt, b_str, str::{BStr, CString}};
/// let ascii = b_str!("Hello, BStr!");
- /// let s = CString::try_from_fmt(fmt!("{}", ascii))?;
- /// assert_eq!(s.as_bytes(), "Hello, BStr!".as_bytes());
+ /// let s = CString::try_from_fmt(fmt!("{ascii}"))?;
+ /// assert_eq!(s.to_bytes(), "Hello, BStr!".as_bytes());
///
/// let non_ascii = b_str!("🦀");
- /// let s = CString::try_from_fmt(fmt!("{}", non_ascii))?;
- /// assert_eq!(s.as_bytes(), "\\xf0\\x9f\\xa6\\x80".as_bytes());
+ /// let s = CString::try_from_fmt(fmt!("{non_ascii}"))?;
+ /// assert_eq!(s.to_bytes(), "\\xf0\\x9f\\xa6\\x80".as_bytes());
/// # Ok::<(), kernel::error::Error>(())
/// ```
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
@@ -85,15 +90,15 @@ impl fmt::Debug for BStr {
/// escaping the rest.
///
/// ```
- /// # use kernel::{fmt, b_str, str::{BStr, CString}};
+ /// # use kernel::{prelude::fmt, b_str, str::{BStr, CString}};
/// // Embedded double quotes are escaped.
/// let ascii = b_str!("Hello, \"BStr\"!");
- /// let s = CString::try_from_fmt(fmt!("{:?}", ascii))?;
- /// assert_eq!(s.as_bytes(), "\"Hello, \\\"BStr\\\"!\"".as_bytes());
+ /// let s = CString::try_from_fmt(fmt!("{ascii:?}"))?;
+ /// assert_eq!(s.to_bytes(), "\"Hello, \\\"BStr\\\"!\"".as_bytes());
///
/// let non_ascii = b_str!("😺");
- /// let s = CString::try_from_fmt(fmt!("{:?}", non_ascii))?;
- /// assert_eq!(s.as_bytes(), "\"\\xf0\\x9f\\x98\\xba\"".as_bytes());
+ /// let s = CString::try_from_fmt(fmt!("{non_ascii:?}"))?;
+ /// assert_eq!(s.to_bytes(), "\"\\xf0\\x9f\\x98\\xba\"".as_bytes());
/// # Ok::<(), kernel::error::Error>(())
/// ```
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
@@ -175,6 +180,15 @@ macro_rules! b_str {
}};
}
+/// Returns a C pointer to the string.
+// It is a free function rather than a method on an extension trait because:
+//
+// - error[E0379]: functions in trait impls cannot be declared const
+#[inline]
+pub const fn as_char_ptr_in_const_context(c_str: &CStr) -> *const c_char {
+ c_str.0.as_ptr()
+}
+
/// Possible errors when using conversion functions in [`CStr`].
#[derive(Debug, Clone, Copy)]
pub enum CStrConvertError {
@@ -232,12 +246,12 @@ impl CStr {
/// last at least `'a`. When `CStr` is alive, the memory pointed by `ptr`
/// must not be mutated.
#[inline]
- pub unsafe fn from_char_ptr<'a>(ptr: *const crate::ffi::c_char) -> &'a Self {
+ pub unsafe fn from_char_ptr<'a>(ptr: *const c_char) -> &'a Self {
// SAFETY: The safety precondition guarantees `ptr` is a valid pointer
// to a `NUL`-terminated C string.
let len = unsafe { bindings::strlen(ptr) } + 1;
// SAFETY: Lifetime guaranteed by the safety precondition.
- let bytes = unsafe { core::slice::from_raw_parts(ptr as _, len) };
+ let bytes = unsafe { core::slice::from_raw_parts(ptr.cast(), len) };
// SAFETY: As `len` is returned by `strlen`, `bytes` does not contain interior `NUL`.
// As we have added 1 to `len`, the last byte is known to be `NUL`.
unsafe { Self::from_bytes_with_nul_unchecked(bytes) }
@@ -290,27 +304,49 @@ impl CStr {
#[inline]
pub unsafe fn from_bytes_with_nul_unchecked_mut(bytes: &mut [u8]) -> &mut CStr {
// SAFETY: Properties of `bytes` guaranteed by the safety precondition.
- unsafe { &mut *(bytes as *mut [u8] as *mut CStr) }
+ unsafe { &mut *(core::ptr::from_mut(bytes) as *mut CStr) }
}
/// Returns a C pointer to the string.
+ ///
+ /// Using this function in a const context is deprecated in favor of
+ /// [`as_char_ptr_in_const_context`] in preparation for replacing `CStr` with `core::ffi::CStr`
+ /// which does not have this method.
#[inline]
- pub const fn as_char_ptr(&self) -> *const crate::ffi::c_char {
- self.0.as_ptr()
+ pub const fn as_char_ptr(&self) -> *const c_char {
+ as_char_ptr_in_const_context(self)
}
/// Convert the string to a byte slice without the trailing `NUL` byte.
#[inline]
- pub fn as_bytes(&self) -> &[u8] {
+ pub fn to_bytes(&self) -> &[u8] {
&self.0[..self.len()]
}
+ /// Convert the string to a byte slice without the trailing `NUL` byte.
+ ///
+ /// This function is deprecated in favor of [`Self::to_bytes`] in preparation for replacing
+ /// `CStr` with `core::ffi::CStr` which does not have this method.
+ #[inline]
+ pub fn as_bytes(&self) -> &[u8] {
+ self.to_bytes()
+ }
+
/// Convert the string to a byte slice containing the trailing `NUL` byte.
#[inline]
- pub const fn as_bytes_with_nul(&self) -> &[u8] {
+ pub const fn to_bytes_with_nul(&self) -> &[u8] {
&self.0
}
+ /// Convert the string to a byte slice containing the trailing `NUL` byte.
+ ///
+ /// This function is deprecated in favor of [`Self::to_bytes_with_nul`] in preparation for
+ /// replacing `CStr` with `core::ffi::CStr` which does not have this method.
+ #[inline]
+ pub const fn as_bytes_with_nul(&self) -> &[u8] {
+ self.to_bytes_with_nul()
+ }
+
/// Yields a [`&str`] slice if the [`CStr`] contains valid UTF-8.
///
/// If the contents of the [`CStr`] are valid UTF-8 data, this
@@ -429,20 +465,20 @@ impl fmt::Display for CStr {
///
/// ```
/// # use kernel::c_str;
- /// # use kernel::fmt;
+ /// # use kernel::prelude::fmt;
/// # use kernel::str::CStr;
/// # use kernel::str::CString;
/// let penguin = c_str!("🐧");
- /// let s = CString::try_from_fmt(fmt!("{}", penguin))?;
- /// assert_eq!(s.as_bytes_with_nul(), "\\xf0\\x9f\\x90\\xa7\0".as_bytes());
+ /// let s = CString::try_from_fmt(fmt!("{penguin}"))?;
+ /// assert_eq!(s.to_bytes_with_nul(), "\\xf0\\x9f\\x90\\xa7\0".as_bytes());
///
/// let ascii = c_str!("so \"cool\"");
- /// let s = CString::try_from_fmt(fmt!("{}", ascii))?;
- /// assert_eq!(s.as_bytes_with_nul(), "so \"cool\"\0".as_bytes());
+ /// let s = CString::try_from_fmt(fmt!("{ascii}"))?;
+ /// assert_eq!(s.to_bytes_with_nul(), "so \"cool\"\0".as_bytes());
/// # Ok::<(), kernel::error::Error>(())
/// ```
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
- for &c in self.as_bytes() {
+ for &c in self.to_bytes() {
if (0x20..0x7f).contains(&c) {
// Printable character.
f.write_char(c as char)?;
@@ -459,16 +495,16 @@ impl fmt::Debug for CStr {
///
/// ```
/// # use kernel::c_str;
- /// # use kernel::fmt;
+ /// # use kernel::prelude::fmt;
/// # use kernel::str::CStr;
/// # use kernel::str::CString;
/// let penguin = c_str!("🐧");
- /// let s = CString::try_from_fmt(fmt!("{:?}", penguin))?;
+ /// let s = CString::try_from_fmt(fmt!("{penguin:?}"))?;
/// assert_eq!(s.as_bytes_with_nul(), "\"\\xf0\\x9f\\x90\\xa7\"\0".as_bytes());
///
/// // Embedded double quotes are escaped.
/// let ascii = c_str!("so \"cool\"");
- /// let s = CString::try_from_fmt(fmt!("{:?}", ascii))?;
+ /// let s = CString::try_from_fmt(fmt!("{ascii:?}"))?;
/// assert_eq!(s.as_bytes_with_nul(), "\"so \\\"cool\\\"\"\0".as_bytes());
/// # Ok::<(), kernel::error::Error>(())
/// ```
@@ -578,7 +614,7 @@ mod tests {
macro_rules! format {
($($f:tt)*) => ({
- CString::try_from_fmt(::kernel::fmt!($($f)*))?.to_str()?
+ CString::try_from_fmt(fmt!($($f)*))?.to_str()?
})
}
@@ -701,7 +737,7 @@ mod tests {
///
/// The memory region between `pos` (inclusive) and `end` (exclusive) is valid for writes if `pos`
/// is less than `end`.
-pub(crate) struct RawFormatter {
+pub struct RawFormatter {
// Use `usize` to use `saturating_*` functions.
beg: usize,
pos: usize,
@@ -728,9 +764,9 @@ impl RawFormatter {
pub(crate) unsafe fn from_ptrs(pos: *mut u8, end: *mut u8) -> Self {
// INVARIANT: The safety requirements guarantee the type invariants.
Self {
- beg: pos as _,
- pos: pos as _,
- end: end as _,
+ beg: pos as usize,
+ pos: pos as usize,
+ end: end as usize,
}
}
@@ -755,11 +791,11 @@ impl RawFormatter {
///
/// N.B. It may point to invalid memory.
pub(crate) fn pos(&self) -> *mut u8 {
- self.pos as _
+ self.pos as *mut u8
}
/// Returns the number of bytes written to the formatter.
- pub(crate) fn bytes_written(&self) -> usize {
+ pub fn bytes_written(&self) -> usize {
self.pos - self.beg
}
}
@@ -793,9 +829,9 @@ impl fmt::Write for RawFormatter {
/// Allows formatting of [`fmt::Arguments`] into a raw buffer.
///
/// Fails if callers attempt to write more than will fit in the buffer.
-pub(crate) struct Formatter(RawFormatter);
+pub struct Formatter<'a>(RawFormatter, PhantomData<&'a mut ()>);
-impl Formatter {
+impl Formatter<'_> {
/// Creates a new instance of [`Formatter`] with the given buffer.
///
/// # Safety
@@ -804,11 +840,18 @@ impl Formatter {
/// for the lifetime of the returned [`Formatter`].
pub(crate) unsafe fn from_buffer(buf: *mut u8, len: usize) -> Self {
// SAFETY: The safety requirements of this function satisfy those of the callee.
- Self(unsafe { RawFormatter::from_buffer(buf, len) })
+ Self(unsafe { RawFormatter::from_buffer(buf, len) }, PhantomData)
+ }
+
+ /// Create a new [`Self`] instance.
+ pub fn new(buffer: &mut [u8]) -> Self {
+ // SAFETY: `buffer` is valid for writes for the entire length for
+ // the lifetime of `Self`.
+ unsafe { Formatter::from_buffer(buffer.as_mut_ptr(), buffer.len()) }
}
}
-impl Deref for Formatter {
+impl Deref for Formatter<'_> {
type Target = RawFormatter;
fn deref(&self) -> &Self::Target {
@@ -816,7 +859,7 @@ impl Deref for Formatter {
}
}
-impl fmt::Write for Formatter {
+impl fmt::Write for Formatter<'_> {
fn write_str(&mut self, s: &str) -> fmt::Result {
self.0.write_str(s)?;
@@ -829,6 +872,132 @@ impl fmt::Write for Formatter {
}
}
+/// A mutable reference to a byte buffer where a string can be written into.
+///
+/// The buffer will be automatically null terminated after the last written character.
+///
+/// # Invariants
+///
+/// * The first byte of `buffer` is always zero.
+/// * The length of `buffer` is at least 1.
+pub(crate) struct NullTerminatedFormatter<'a> {
+ buffer: &'a mut [u8],
+}
+
+impl<'a> NullTerminatedFormatter<'a> {
+ /// Create a new [`Self`] instance.
+ pub(crate) fn new(buffer: &'a mut [u8]) -> Option<NullTerminatedFormatter<'a>> {
+ *(buffer.first_mut()?) = 0;
+
+ // INVARIANT:
+ // - We wrote zero to the first byte above.
+ // - If buffer was not at least length 1, `buffer.first_mut()` would return None.
+ Some(Self { buffer })
+ }
+}
+
+impl Write for NullTerminatedFormatter<'_> {
+ fn write_str(&mut self, s: &str) -> fmt::Result {
+ let bytes = s.as_bytes();
+ let len = bytes.len();
+
+ // We want space for a zero. By type invariant, buffer length is always at least 1, so no
+ // underflow.
+ if len > self.buffer.len() - 1 {
+ return Err(fmt::Error);
+ }
+
+ let buffer = core::mem::take(&mut self.buffer);
+ // We break the zero start invariant for a short while.
+ buffer[..len].copy_from_slice(bytes);
+ // INVARIANT: We checked above that buffer will have size at least 1 after this assignment.
+ self.buffer = &mut buffer[len..];
+
+ // INVARIANT: We write zero to the first byte of the buffer.
+ self.buffer[0] = 0;
+
+ Ok(())
+ }
+}
+
+/// # Safety
+///
+/// - `string` must point to a null terminated string that is valid for read.
+unsafe fn kstrtobool_raw(string: *const u8) -> Result<bool> {
+ let mut result: bool = false;
+
+ // SAFETY:
+ // - By function safety requirement, `string` is a valid null-terminated string.
+ // - `result` is a valid `bool` that we own.
+ to_result(unsafe { bindings::kstrtobool(string, &mut result) })?;
+ Ok(result)
+}
+
+/// Convert common user inputs into boolean values using the kernel's `kstrtobool` function.
+///
+/// This routine returns `Ok(bool)` if the first character is one of 'YyTt1NnFf0', or
+/// \[oO\]\[NnFf\] for "on" and "off". Otherwise it will return `Err(EINVAL)`.
+///
+/// # Examples
+///
+/// ```
+/// # use kernel::{c_str, str::kstrtobool};
+///
+/// // Lowercase
+/// assert_eq!(kstrtobool(c_str!("true")), Ok(true));
+/// assert_eq!(kstrtobool(c_str!("tr")), Ok(true));
+/// assert_eq!(kstrtobool(c_str!("t")), Ok(true));
+/// assert_eq!(kstrtobool(c_str!("twrong")), Ok(true));
+/// assert_eq!(kstrtobool(c_str!("false")), Ok(false));
+/// assert_eq!(kstrtobool(c_str!("f")), Ok(false));
+/// assert_eq!(kstrtobool(c_str!("yes")), Ok(true));
+/// assert_eq!(kstrtobool(c_str!("no")), Ok(false));
+/// assert_eq!(kstrtobool(c_str!("on")), Ok(true));
+/// assert_eq!(kstrtobool(c_str!("off")), Ok(false));
+///
+/// // Camel case
+/// assert_eq!(kstrtobool(c_str!("True")), Ok(true));
+/// assert_eq!(kstrtobool(c_str!("False")), Ok(false));
+/// assert_eq!(kstrtobool(c_str!("Yes")), Ok(true));
+/// assert_eq!(kstrtobool(c_str!("No")), Ok(false));
+/// assert_eq!(kstrtobool(c_str!("On")), Ok(true));
+/// assert_eq!(kstrtobool(c_str!("Off")), Ok(false));
+///
+/// // All caps
+/// assert_eq!(kstrtobool(c_str!("TRUE")), Ok(true));
+/// assert_eq!(kstrtobool(c_str!("FALSE")), Ok(false));
+/// assert_eq!(kstrtobool(c_str!("YES")), Ok(true));
+/// assert_eq!(kstrtobool(c_str!("NO")), Ok(false));
+/// assert_eq!(kstrtobool(c_str!("ON")), Ok(true));
+/// assert_eq!(kstrtobool(c_str!("OFF")), Ok(false));
+///
+/// // Numeric
+/// assert_eq!(kstrtobool(c_str!("1")), Ok(true));
+/// assert_eq!(kstrtobool(c_str!("0")), Ok(false));
+///
+/// // Invalid input
+/// assert_eq!(kstrtobool(c_str!("invalid")), Err(EINVAL));
+/// assert_eq!(kstrtobool(c_str!("2")), Err(EINVAL));
+/// ```
+pub fn kstrtobool(string: &CStr) -> Result<bool> {
+ // SAFETY:
+ // - The pointer returned by `CStr::as_char_ptr` is guaranteed to be
+ // null terminated.
+ // - `string` is live and thus the string is valid for read.
+ unsafe { kstrtobool_raw(string.as_char_ptr()) }
+}
+
+/// Convert `&[u8]` to `bool` by deferring to [`kernel::str::kstrtobool`].
+///
+/// Only considers at most the first two bytes of `bytes`.
+pub fn kstrtobool_bytes(bytes: &[u8]) -> Result<bool> {
+ // `ktostrbool` only considers the first two bytes of the input.
+ let stack_string = [*bytes.first().unwrap_or(&0), *bytes.get(1).unwrap_or(&0), 0];
+ // SAFETY: `stack_string` is null terminated and it is live on the stack so
+ // it is valid for read.
+ unsafe { kstrtobool_raw(stack_string.as_ptr()) }
+}
+
/// An owned string that is guaranteed to have exactly one `NUL` byte, which is at the end.
///
/// Used for interoperability with kernel APIs that take C strings.
@@ -840,14 +1009,14 @@ impl fmt::Write for Formatter {
/// # Examples
///
/// ```
-/// use kernel::{str::CString, fmt};
+/// use kernel::{str::CString, prelude::fmt};
///
/// let s = CString::try_from_fmt(fmt!("{}{}{}", "abc", 10, 20))?;
-/// assert_eq!(s.as_bytes_with_nul(), "abc1020\0".as_bytes());
+/// assert_eq!(s.to_bytes_with_nul(), "abc1020\0".as_bytes());
///
/// let tmp = "testing";
/// let s = CString::try_from_fmt(fmt!("{tmp}{}", 123))?;
-/// assert_eq!(s.as_bytes_with_nul(), "testing123\0".as_bytes());
+/// assert_eq!(s.to_bytes_with_nul(), "testing123\0".as_bytes());
///
/// // This fails because it has an embedded `NUL` byte.
/// let s = CString::try_from_fmt(fmt!("a\0b{}", 123));
@@ -917,7 +1086,7 @@ impl<'a> TryFrom<&'a CStr> for CString {
fn try_from(cstr: &'a CStr) -> Result<CString, AllocError> {
let mut buf = KVec::new();
- buf.extend_from_slice(cstr.as_bytes_with_nul(), GFP_KERNEL)?;
+ buf.extend_from_slice(cstr.to_bytes_with_nul(), GFP_KERNEL)?;
// INVARIANT: The `CStr` and `CString` types have the same invariants for
// the string data, and we copied it over without changes.
@@ -930,9 +1099,3 @@ impl fmt::Debug for CString {
fmt::Debug::fmt(&**self, f)
}
}
-
-/// A convenience alias for [`core::format_args`].
-#[macro_export]
-macro_rules! fmt {
- ($($f:tt)*) => ( ::core::format_args!($($f)*) )
-}
diff --git a/rust/kernel/sync.rs b/rust/kernel/sync.rs
index c23a12639924..cf5b638a097d 100644
--- a/rust/kernel/sync.rs
+++ b/rust/kernel/sync.rs
@@ -10,12 +10,16 @@ use crate::types::Opaque;
use pin_init;
mod arc;
+pub mod aref;
+pub mod atomic;
+pub mod barrier;
pub mod completion;
mod condvar;
pub mod lock;
mod locked_by;
pub mod poll;
pub mod rcu;
+mod refcount;
pub use arc::{Arc, ArcBorrow, UniqueArc};
pub use completion::Completion;
@@ -24,6 +28,7 @@ pub use lock::global::{global_lock, GlobalGuard, GlobalLock, GlobalLockBackend,
pub use lock::mutex::{new_mutex, Mutex, MutexGuard};
pub use lock::spinlock::{new_spinlock, SpinLock, SpinLockGuard};
pub use locked_by::LockedBy;
+pub use refcount::Refcount;
/// Represents a lockdep class. It's a wrapper around C's `lock_class_key`.
#[repr(transparent)]
@@ -41,7 +46,7 @@ impl LockClassKey {
/// Initializes a dynamically allocated lock class key. In the common case of using a
/// statically allocated lock class key, the static_lock_class! macro should be used instead.
///
- /// # Example
+ /// # Examples
/// ```
/// # use kernel::c_str;
/// # use kernel::alloc::KBox;
@@ -95,8 +100,11 @@ impl PinnedDrop for LockClassKey {
macro_rules! static_lock_class {
() => {{
static CLASS: $crate::sync::LockClassKey =
- // SAFETY: lockdep expects uninitialized memory when it's handed a statically allocated
- // lock_class_key
+ // Lockdep expects uninitialized memory when it's handed a statically allocated `struct
+ // lock_class_key`.
+ //
+ // SAFETY: `LockClassKey` transparently wraps `Opaque` which permits uninitialized
+ // memory.
unsafe { ::core::mem::MaybeUninit::uninit().assume_init() };
$crate::prelude::Pin::static_ref(&CLASS)
}};
diff --git a/rust/kernel/sync/arc.rs b/rust/kernel/sync/arc.rs
index c7af0aa48a0a..289f77abf415 100644
--- a/rust/kernel/sync/arc.rs
+++ b/rust/kernel/sync/arc.rs
@@ -8,7 +8,7 @@
//! threads.
//!
//! It is different from the standard library's [`Arc`] in a few ways:
-//! 1. It is backed by the kernel's `refcount_t` type.
+//! 1. It is backed by the kernel's [`Refcount`] type.
//! 2. It does not support weak references, which allows it to be half the size.
//! 3. It saturates the reference count instead of aborting when it goes over a threshold.
//! 4. It does not provide a `get_mut` method, so the ref counted object is pinned.
@@ -18,14 +18,16 @@
use crate::{
alloc::{AllocError, Flags, KBox},
- bindings,
+ ffi::c_void,
+ fmt,
init::InPlaceInit,
+ sync::Refcount,
try_init,
- types::{ForeignOwnable, Opaque},
+ types::ForeignOwnable,
};
use core::{
alloc::Layout,
- fmt,
+ borrow::{Borrow, BorrowMut},
marker::PhantomData,
mem::{ManuallyDrop, MaybeUninit},
ops::{Deref, DerefMut},
@@ -140,11 +142,10 @@ pub struct Arc<T: ?Sized> {
_p: PhantomData<ArcInner<T>>,
}
-#[doc(hidden)]
#[pin_data]
#[repr(C)]
-pub struct ArcInner<T: ?Sized> {
- refcount: Opaque<bindings::refcount_t>,
+struct ArcInner<T: ?Sized> {
+ refcount: Refcount,
data: T,
}
@@ -156,7 +157,7 @@ impl<T: ?Sized> ArcInner<T> {
/// `ptr` must have been returned by a previous call to [`Arc::into_raw`], and the `Arc` must
/// not yet have been destroyed.
unsafe fn container_of(ptr: *const T) -> NonNull<ArcInner<T>> {
- let refcount_layout = Layout::new::<bindings::refcount_t>();
+ let refcount_layout = Layout::new::<Refcount>();
// SAFETY: The caller guarantees that the pointer is valid.
let val_layout = Layout::for_value(unsafe { &*ptr });
// SAFETY: We're computing the layout of a real struct that existed when compiling this
@@ -228,8 +229,7 @@ impl<T> Arc<T> {
pub fn new(contents: T, flags: Flags) -> Result<Self, AllocError> {
// INVARIANT: The refcount is initialised to a non-zero value.
let value = ArcInner {
- // SAFETY: There are no safety requirements for this FFI call.
- refcount: Opaque::new(unsafe { bindings::REFCOUNT_INIT(1) }),
+ refcount: Refcount::new(1),
data: contents,
};
@@ -320,7 +320,7 @@ impl<T: ?Sized> Arc<T> {
/// use kernel::sync::{Arc, UniqueArc};
///
/// let arc = Arc::new(42, GFP_KERNEL)?;
- /// let unique_arc = arc.into_unique_or_drop();
+ /// let unique_arc = Arc::into_unique_or_drop(arc);
///
/// // The above conversion should succeed since refcount of `arc` is 1.
/// assert!(unique_arc.is_some());
@@ -336,35 +336,30 @@ impl<T: ?Sized> Arc<T> {
/// let arc = Arc::new(42, GFP_KERNEL)?;
/// let another = arc.clone();
///
- /// let unique_arc = arc.into_unique_or_drop();
+ /// let unique_arc = Arc::into_unique_or_drop(arc);
///
/// // The above conversion should fail since refcount of `arc` is >1.
/// assert!(unique_arc.is_none());
///
/// # Ok::<(), Error>(())
/// ```
- pub fn into_unique_or_drop(self) -> Option<Pin<UniqueArc<T>>> {
+ pub fn into_unique_or_drop(this: Self) -> Option<Pin<UniqueArc<T>>> {
// We will manually manage the refcount in this method, so we disable the destructor.
- let me = ManuallyDrop::new(self);
+ let this = ManuallyDrop::new(this);
// SAFETY: We own a refcount, so the pointer is still valid.
- let refcount = unsafe { me.ptr.as_ref() }.refcount.get();
+ let refcount = unsafe { &this.ptr.as_ref().refcount };
// If the refcount reaches a non-zero value, then we have destroyed this `Arc` and will
// return without further touching the `Arc`. If the refcount reaches zero, then there are
// no other arcs, and we can create a `UniqueArc`.
- //
- // SAFETY: We own a refcount, so the pointer is not dangling.
- let is_zero = unsafe { bindings::refcount_dec_and_test(refcount) };
- if is_zero {
- // SAFETY: We have exclusive access to the arc, so we can perform unsynchronized
- // accesses to the refcount.
- unsafe { core::ptr::write(refcount, bindings::REFCOUNT_INIT(1)) };
+ if refcount.dec_and_test() {
+ refcount.set(1);
// INVARIANT: We own the only refcount to this arc, so we may create a `UniqueArc`. We
// must pin the `UniqueArc` because the values was previously in an `Arc`, and they pin
// their values.
Some(Pin::from(UniqueArc {
- inner: ManuallyDrop::into_inner(me),
+ inner: ManuallyDrop::into_inner(this),
}))
} else {
None
@@ -372,20 +367,22 @@ impl<T: ?Sized> Arc<T> {
}
}
-// SAFETY: The `into_foreign` function returns a pointer that is well-aligned.
+// SAFETY: The pointer returned by `into_foreign` was originally allocated as an
+// `KBox<ArcInner<T>>`, so that type is what determines the alignment.
unsafe impl<T: 'static> ForeignOwnable for Arc<T> {
- type PointedTo = ArcInner<T>;
+ const FOREIGN_ALIGN: usize = <KBox<ArcInner<T>> as ForeignOwnable>::FOREIGN_ALIGN;
+
type Borrowed<'a> = ArcBorrow<'a, T>;
type BorrowedMut<'a> = Self::Borrowed<'a>;
- fn into_foreign(self) -> *mut Self::PointedTo {
- ManuallyDrop::new(self).ptr.as_ptr()
+ fn into_foreign(self) -> *mut c_void {
+ ManuallyDrop::new(self).ptr.as_ptr().cast()
}
- unsafe fn from_foreign(ptr: *mut Self::PointedTo) -> Self {
+ unsafe fn from_foreign(ptr: *mut c_void) -> Self {
// SAFETY: The safety requirements of this function ensure that `ptr` comes from a previous
// call to `Self::into_foreign`.
- let inner = unsafe { NonNull::new_unchecked(ptr) };
+ let inner = unsafe { NonNull::new_unchecked(ptr.cast::<ArcInner<T>>()) };
// SAFETY: By the safety requirement of this function, we know that `ptr` came from
// a previous call to `Arc::into_foreign`, which guarantees that `ptr` is valid and
@@ -393,20 +390,20 @@ unsafe impl<T: 'static> ForeignOwnable for Arc<T> {
unsafe { Self::from_inner(inner) }
}
- unsafe fn borrow<'a>(ptr: *mut Self::PointedTo) -> ArcBorrow<'a, T> {
+ unsafe fn borrow<'a>(ptr: *mut c_void) -> ArcBorrow<'a, T> {
// SAFETY: The safety requirements of this function ensure that `ptr` comes from a previous
// call to `Self::into_foreign`.
- let inner = unsafe { NonNull::new_unchecked(ptr) };
+ let inner = unsafe { NonNull::new_unchecked(ptr.cast::<ArcInner<T>>()) };
// SAFETY: The safety requirements of `from_foreign` ensure that the object remains alive
// for the lifetime of the returned value.
unsafe { ArcBorrow::new(inner) }
}
- unsafe fn borrow_mut<'a>(ptr: *mut Self::PointedTo) -> ArcBorrow<'a, T> {
+ unsafe fn borrow_mut<'a>(ptr: *mut c_void) -> ArcBorrow<'a, T> {
// SAFETY: The safety requirements for `borrow_mut` are a superset of the safety
// requirements for `borrow`.
- unsafe { Self::borrow(ptr) }
+ unsafe { <Self as ForeignOwnable>::borrow(ptr) }
}
}
@@ -426,16 +423,37 @@ impl<T: ?Sized> AsRef<T> for Arc<T> {
}
}
+/// # Examples
+///
+/// ```
+/// # use core::borrow::Borrow;
+/// # use kernel::sync::Arc;
+/// struct Foo<B: Borrow<u32>>(B);
+///
+/// // Owned instance.
+/// let owned = Foo(1);
+///
+/// // Shared instance.
+/// let arc = Arc::new(1, GFP_KERNEL)?;
+/// let shared = Foo(arc.clone());
+///
+/// let i = 1;
+/// // Borrowed from `i`.
+/// let borrowed = Foo(&i);
+/// # Ok::<(), Error>(())
+/// ```
+impl<T: ?Sized> Borrow<T> for Arc<T> {
+ fn borrow(&self) -> &T {
+ self.deref()
+ }
+}
+
impl<T: ?Sized> Clone for Arc<T> {
fn clone(&self) -> Self {
- // SAFETY: By the type invariant, there is necessarily a reference to the object, so it is
- // safe to dereference it.
- let refcount = unsafe { self.ptr.as_ref() }.refcount.get();
-
- // INVARIANT: C `refcount_inc` saturates the refcount, so it cannot overflow to zero.
+ // INVARIANT: `Refcount` saturates the refcount, so it cannot overflow to zero.
// SAFETY: By the type invariant, there is necessarily a reference to the object, so it is
// safe to increment the refcount.
- unsafe { bindings::refcount_inc(refcount) };
+ unsafe { self.ptr.as_ref() }.refcount.inc();
// SAFETY: We just incremented the refcount. This increment is now owned by the new `Arc`.
unsafe { Self::from_inner(self.ptr) }
@@ -444,16 +462,10 @@ impl<T: ?Sized> Clone for Arc<T> {
impl<T: ?Sized> Drop for Arc<T> {
fn drop(&mut self) {
- // SAFETY: By the type invariant, there is necessarily a reference to the object. We cannot
- // touch `refcount` after it's decremented to a non-zero value because another thread/CPU
- // may concurrently decrement it to zero and free it. It is ok to have a raw pointer to
- // freed/invalid memory as long as it is never dereferenced.
- let refcount = unsafe { self.ptr.as_ref() }.refcount.get();
-
// INVARIANT: If the refcount reaches zero, there are no other instances of `Arc`, and
// this instance is being dropped, so the broken invariant is not observable.
- // SAFETY: Also by the type invariant, we are allowed to decrement the refcount.
- let is_zero = unsafe { bindings::refcount_dec_and_test(refcount) };
+ // SAFETY: By the type invariant, there is necessarily a reference to the object.
+ let is_zero = unsafe { self.ptr.as_ref() }.refcount.dec_and_test();
if is_zero {
// The count reached zero, we must free the memory.
//
@@ -747,8 +759,7 @@ impl<T> UniqueArc<T> {
// INVARIANT: The refcount is initialised to a non-zero value.
let inner = KBox::try_init::<AllocError>(
try_init!(ArcInner {
- // SAFETY: There are no safety requirements for this FFI call.
- refcount: Opaque::new(unsafe { bindings::REFCOUNT_INIT(1) }),
+ refcount: Refcount::new(1),
data <- pin_init::uninit::<T, AllocError>(),
}? AllocError),
flags,
@@ -834,6 +845,56 @@ impl<T: ?Sized> DerefMut for UniqueArc<T> {
}
}
+/// # Examples
+///
+/// ```
+/// # use core::borrow::Borrow;
+/// # use kernel::sync::UniqueArc;
+/// struct Foo<B: Borrow<u32>>(B);
+///
+/// // Owned instance.
+/// let owned = Foo(1);
+///
+/// // Owned instance using `UniqueArc`.
+/// let arc = UniqueArc::new(1, GFP_KERNEL)?;
+/// let shared = Foo(arc);
+///
+/// let i = 1;
+/// // Borrowed from `i`.
+/// let borrowed = Foo(&i);
+/// # Ok::<(), Error>(())
+/// ```
+impl<T: ?Sized> Borrow<T> for UniqueArc<T> {
+ fn borrow(&self) -> &T {
+ self.deref()
+ }
+}
+
+/// # Examples
+///
+/// ```
+/// # use core::borrow::BorrowMut;
+/// # use kernel::sync::UniqueArc;
+/// struct Foo<B: BorrowMut<u32>>(B);
+///
+/// // Owned instance.
+/// let owned = Foo(1);
+///
+/// // Owned instance using `UniqueArc`.
+/// let arc = UniqueArc::new(1, GFP_KERNEL)?;
+/// let shared = Foo(arc);
+///
+/// let mut i = 1;
+/// // Borrowed from `i`.
+/// let borrowed = Foo(&mut i);
+/// # Ok::<(), Error>(())
+/// ```
+impl<T: ?Sized> BorrowMut<T> for UniqueArc<T> {
+ fn borrow_mut(&mut self) -> &mut T {
+ self.deref_mut()
+ }
+}
+
impl<T: fmt::Display + ?Sized> fmt::Display for UniqueArc<T> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
fmt::Display::fmt(self.deref(), f)
diff --git a/rust/kernel/sync/aref.rs b/rust/kernel/sync/aref.rs
new file mode 100644
index 000000000000..0d24a0432015
--- /dev/null
+++ b/rust/kernel/sync/aref.rs
@@ -0,0 +1,169 @@
+// SPDX-License-Identifier: GPL-2.0
+
+//! Internal reference counting support.
+//!
+//! Many C types already have their own reference counting mechanism (e.g. by storing a
+//! `refcount_t`). This module provides support for directly using their internal reference count
+//! from Rust; instead of making users have to use an additional Rust-reference count in the form of
+//! [`Arc`].
+//!
+//! The smart pointer [`ARef<T>`] acts similarly to [`Arc<T>`] in that it holds a refcount on the
+//! underlying object, but this refcount is internal to the object. It essentially is a Rust
+//! implementation of the `get_` and `put_` pattern used in C for reference counting.
+//!
+//! To make use of [`ARef<MyType>`], `MyType` needs to implement [`AlwaysRefCounted`]. It is a trait
+//! for accessing the internal reference count of an object of the `MyType` type.
+//!
+//! [`Arc`]: crate::sync::Arc
+//! [`Arc<T>`]: crate::sync::Arc
+
+use core::{marker::PhantomData, mem::ManuallyDrop, ops::Deref, ptr::NonNull};
+
+/// Types that are _always_ reference counted.
+///
+/// It allows such types to define their own custom ref increment and decrement functions.
+/// Additionally, it allows users to convert from a shared reference `&T` to an owned reference
+/// [`ARef<T>`].
+///
+/// This is usually implemented by wrappers to existing structures on the C side of the code. For
+/// Rust code, the recommendation is to use [`Arc`](crate::sync::Arc) to create reference-counted
+/// instances of a type.
+///
+/// # Safety
+///
+/// Implementers must ensure that increments to the reference count keep the object alive in memory
+/// at least until matching decrements are performed.
+///
+/// Implementers must also ensure that all instances are reference-counted. (Otherwise they
+/// won't be able to honour the requirement that [`AlwaysRefCounted::inc_ref`] keep the object
+/// alive.)
+pub unsafe trait AlwaysRefCounted {
+ /// Increments the reference count on the object.
+ fn inc_ref(&self);
+
+ /// Decrements the reference count on the object.
+ ///
+ /// Frees the object when the count reaches zero.
+ ///
+ /// # Safety
+ ///
+ /// Callers must ensure that there was a previous matching increment to the reference count,
+ /// and that the object is no longer used after its reference count is decremented (as it may
+ /// result in the object being freed), unless the caller owns another increment on the refcount
+ /// (e.g., it calls [`AlwaysRefCounted::inc_ref`] twice, then calls
+ /// [`AlwaysRefCounted::dec_ref`] once).
+ unsafe fn dec_ref(obj: NonNull<Self>);
+}
+
+/// An owned reference to an always-reference-counted object.
+///
+/// The object's reference count is automatically decremented when an instance of [`ARef`] is
+/// dropped. It is also automatically incremented when a new instance is created via
+/// [`ARef::clone`].
+///
+/// # Invariants
+///
+/// The pointer stored in `ptr` is non-null and valid for the lifetime of the [`ARef`] instance. In
+/// particular, the [`ARef`] instance owns an increment on the underlying object's reference count.
+pub struct ARef<T: AlwaysRefCounted> {
+ ptr: NonNull<T>,
+ _p: PhantomData<T>,
+}
+
+// SAFETY: It is safe to send `ARef<T>` to another thread when the underlying `T` is `Sync` because
+// it effectively means sharing `&T` (which is safe because `T` is `Sync`); additionally, it needs
+// `T` to be `Send` because any thread that has an `ARef<T>` may ultimately access `T` using a
+// mutable reference, for example, when the reference count reaches zero and `T` is dropped.
+unsafe impl<T: AlwaysRefCounted + Sync + Send> Send for ARef<T> {}
+
+// SAFETY: It is safe to send `&ARef<T>` to another thread when the underlying `T` is `Sync`
+// because it effectively means sharing `&T` (which is safe because `T` is `Sync`); additionally,
+// it needs `T` to be `Send` because any thread that has a `&ARef<T>` may clone it and get an
+// `ARef<T>` on that thread, so the thread may ultimately access `T` using a mutable reference, for
+// example, when the reference count reaches zero and `T` is dropped.
+unsafe impl<T: AlwaysRefCounted + Sync + Send> Sync for ARef<T> {}
+
+impl<T: AlwaysRefCounted> ARef<T> {
+ /// Creates a new instance of [`ARef`].
+ ///
+ /// It takes over an increment of the reference count on the underlying object.
+ ///
+ /// # Safety
+ ///
+ /// Callers must ensure that the reference count was incremented at least once, and that they
+ /// are properly relinquishing one increment. That is, if there is only one increment, callers
+ /// must not use the underlying object anymore -- it is only safe to do so via the newly
+ /// created [`ARef`].
+ pub unsafe fn from_raw(ptr: NonNull<T>) -> Self {
+ // INVARIANT: The safety requirements guarantee that the new instance now owns the
+ // increment on the refcount.
+ Self {
+ ptr,
+ _p: PhantomData,
+ }
+ }
+
+ /// Consumes the `ARef`, returning a raw pointer.
+ ///
+ /// This function does not change the refcount. After calling this function, the caller is
+ /// responsible for the refcount previously managed by the `ARef`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use core::ptr::NonNull;
+ /// use kernel::sync::aref::{ARef, AlwaysRefCounted};
+ ///
+ /// struct Empty {}
+ ///
+ /// # // SAFETY: TODO.
+ /// unsafe impl AlwaysRefCounted for Empty {
+ /// fn inc_ref(&self) {}
+ /// unsafe fn dec_ref(_obj: NonNull<Self>) {}
+ /// }
+ ///
+ /// let mut data = Empty {};
+ /// let ptr = NonNull::<Empty>::new(&mut data).unwrap();
+ /// # // SAFETY: TODO.
+ /// let data_ref: ARef<Empty> = unsafe { ARef::from_raw(ptr) };
+ /// let raw_ptr: NonNull<Empty> = ARef::into_raw(data_ref);
+ ///
+ /// assert_eq!(ptr, raw_ptr);
+ /// ```
+ pub fn into_raw(me: Self) -> NonNull<T> {
+ ManuallyDrop::new(me).ptr
+ }
+}
+
+impl<T: AlwaysRefCounted> Clone for ARef<T> {
+ fn clone(&self) -> Self {
+ self.inc_ref();
+ // SAFETY: We just incremented the refcount above.
+ unsafe { Self::from_raw(self.ptr) }
+ }
+}
+
+impl<T: AlwaysRefCounted> Deref for ARef<T> {
+ type Target = T;
+
+ fn deref(&self) -> &Self::Target {
+ // SAFETY: The type invariants guarantee that the object is valid.
+ unsafe { self.ptr.as_ref() }
+ }
+}
+
+impl<T: AlwaysRefCounted> From<&T> for ARef<T> {
+ fn from(b: &T) -> Self {
+ b.inc_ref();
+ // SAFETY: We just incremented the refcount above.
+ unsafe { Self::from_raw(NonNull::from(b)) }
+ }
+}
+
+impl<T: AlwaysRefCounted> Drop for ARef<T> {
+ fn drop(&mut self) {
+ // SAFETY: The type invariants guarantee that the `ARef` owns the reference we're about to
+ // decrement.
+ unsafe { T::dec_ref(self.ptr) };
+ }
+}
diff --git a/rust/kernel/sync/atomic.rs b/rust/kernel/sync/atomic.rs
new file mode 100644
index 000000000000..016a6bcaf080
--- /dev/null
+++ b/rust/kernel/sync/atomic.rs
@@ -0,0 +1,551 @@
+// SPDX-License-Identifier: GPL-2.0
+
+//! Atomic primitives.
+//!
+//! These primitives have the same semantics as their C counterparts: and the precise definitions of
+//! semantics can be found at [`LKMM`]. Note that Linux Kernel Memory (Consistency) Model is the
+//! only model for Rust code in kernel, and Rust's own atomics should be avoided.
+//!
+//! # Data races
+//!
+//! [`LKMM`] atomics have different rules regarding data races:
+//!
+//! - A normal write from C side is treated as an atomic write if
+//! CONFIG_KCSAN_ASSUME_PLAIN_WRITES_ATOMIC=y.
+//! - Mixed-size atomic accesses don't cause data races.
+//!
+//! [`LKMM`]: srctree/tools/memory-model/
+
+mod internal;
+pub mod ordering;
+mod predefine;
+
+pub use internal::AtomicImpl;
+pub use ordering::{Acquire, Full, Relaxed, Release};
+
+use crate::build_error;
+use internal::{AtomicArithmeticOps, AtomicBasicOps, AtomicExchangeOps, AtomicRepr};
+use ordering::OrderingType;
+
+/// A memory location which can be safely modified from multiple execution contexts.
+///
+/// This has the same size, alignment and bit validity as the underlying type `T`. And it disables
+/// niche optimization for the same reason as [`UnsafeCell`].
+///
+/// The atomic operations are implemented in a way that is fully compatible with the [Linux Kernel
+/// Memory (Consistency) Model][LKMM], hence they should be modeled as the corresponding
+/// [`LKMM`][LKMM] atomic primitives. With the help of [`Atomic::from_ptr()`] and
+/// [`Atomic::as_ptr()`], this provides a way to interact with [C-side atomic operations]
+/// (including those without the `atomic` prefix, e.g. `READ_ONCE()`, `WRITE_ONCE()`,
+/// `smp_load_acquire()` and `smp_store_release()`).
+///
+/// # Invariants
+///
+/// `self.0` is a valid `T`.
+///
+/// [`UnsafeCell`]: core::cell::UnsafeCell
+/// [LKMM]: srctree/tools/memory-model/
+/// [C-side atomic operations]: srctree/Documentation/atomic_t.txt
+#[repr(transparent)]
+pub struct Atomic<T: AtomicType>(AtomicRepr<T::Repr>);
+
+// SAFETY: `Atomic<T>` is safe to share among execution contexts because all accesses are atomic.
+unsafe impl<T: AtomicType> Sync for Atomic<T> {}
+
+/// Types that support basic atomic operations.
+///
+/// # Round-trip transmutability
+///
+/// `T` is round-trip transmutable to `U` if and only if both of these properties hold:
+///
+/// - Any valid bit pattern for `T` is also a valid bit pattern for `U`.
+/// - Transmuting (e.g. using [`transmute()`]) a value of type `T` to `U` and then to `T` again
+/// yields a value that is in all aspects equivalent to the original value.
+///
+/// # Safety
+///
+/// - [`Self`] must have the same size and alignment as [`Self::Repr`].
+/// - [`Self`] must be [round-trip transmutable] to [`Self::Repr`].
+///
+/// Note that this is more relaxed than requiring the bi-directional transmutability (i.e.
+/// [`transmute()`] is always sound between `U` and `T`) because of the support for atomic
+/// variables over unit-only enums, see [Examples].
+///
+/// # Limitations
+///
+/// Because C primitives are used to implement the atomic operations, and a C function requires a
+/// valid object of a type to operate on (i.e. no `MaybeUninit<_>`), hence at the Rust <-> C
+/// surface, only types with all the bits initialized can be passed. As a result, types like `(u8,
+/// u16)` (padding bytes are uninitialized) are currently not supported.
+///
+/// # Examples
+///
+/// A unit-only enum that implements [`AtomicType`]:
+///
+/// ```
+/// use kernel::sync::atomic::{AtomicType, Atomic, Relaxed};
+///
+/// #[derive(Clone, Copy, PartialEq, Eq)]
+/// #[repr(i32)]
+/// enum State {
+/// Uninit = 0,
+/// Working = 1,
+/// Done = 2,
+/// };
+///
+/// // SAFETY: `State` and `i32` has the same size and alignment, and it's round-trip
+/// // transmutable to `i32`.
+/// unsafe impl AtomicType for State {
+/// type Repr = i32;
+/// }
+///
+/// let s = Atomic::new(State::Uninit);
+///
+/// assert_eq!(State::Uninit, s.load(Relaxed));
+/// ```
+/// [`transmute()`]: core::mem::transmute
+/// [round-trip transmutable]: AtomicType#round-trip-transmutability
+/// [Examples]: AtomicType#examples
+pub unsafe trait AtomicType: Sized + Send + Copy {
+ /// The backing atomic implementation type.
+ type Repr: AtomicImpl;
+}
+
+/// Types that support atomic add operations.
+///
+/// # Safety
+///
+// TODO: Properly defines `wrapping_add` in the following comment.
+/// `wrapping_add` any value of type `Self::Repr::Delta` obtained by [`Self::rhs_into_delta()`] to
+/// any value of type `Self::Repr` obtained through transmuting a value of type `Self` to must
+/// yield a value with a bit pattern also valid for `Self`.
+pub unsafe trait AtomicAdd<Rhs = Self>: AtomicType {
+ /// Converts `Rhs` into the `Delta` type of the atomic implementation.
+ fn rhs_into_delta(rhs: Rhs) -> <Self::Repr as AtomicImpl>::Delta;
+}
+
+#[inline(always)]
+const fn into_repr<T: AtomicType>(v: T) -> T::Repr {
+ // SAFETY: Per the safety requirement of `AtomicType`, `T` is round-trip transmutable to
+ // `T::Repr`, therefore the transmute operation is sound.
+ unsafe { core::mem::transmute_copy(&v) }
+}
+
+/// # Safety
+///
+/// `r` must be a valid bit pattern of `T`.
+#[inline(always)]
+const unsafe fn from_repr<T: AtomicType>(r: T::Repr) -> T {
+ // SAFETY: Per the safety requirement of the function, the transmute operation is sound.
+ unsafe { core::mem::transmute_copy(&r) }
+}
+
+impl<T: AtomicType> Atomic<T> {
+ /// Creates a new atomic `T`.
+ pub const fn new(v: T) -> Self {
+ // INVARIANT: Per the safety requirement of `AtomicType`, `into_repr(v)` is a valid `T`.
+ Self(AtomicRepr::new(into_repr(v)))
+ }
+
+ /// Creates a reference to an atomic `T` from a pointer of `T`.
+ ///
+ /// This usually is used when communicating with C side or manipulating a C struct, see
+ /// examples below.
+ ///
+ /// # Safety
+ ///
+ /// - `ptr` is aligned to `align_of::<T>()`.
+ /// - `ptr` is valid for reads and writes for `'a`.
+ /// - For the duration of `'a`, other accesses to `*ptr` must not cause data races (defined
+ /// by [`LKMM`]) against atomic operations on the returned reference. Note that if all other
+ /// accesses are atomic, then this safety requirement is trivially fulfilled.
+ ///
+ /// [`LKMM`]: srctree/tools/memory-model
+ ///
+ /// # Examples
+ ///
+ /// Using [`Atomic::from_ptr()`] combined with [`Atomic::load()`] or [`Atomic::store()`] can
+ /// achieve the same functionality as `READ_ONCE()`/`smp_load_acquire()` or
+ /// `WRITE_ONCE()`/`smp_store_release()` in C side:
+ ///
+ /// ```
+ /// # use kernel::types::Opaque;
+ /// use kernel::sync::atomic::{Atomic, Relaxed, Release};
+ ///
+ /// // Assume there is a C struct `foo`.
+ /// mod cbindings {
+ /// #[repr(C)]
+ /// pub(crate) struct foo {
+ /// pub(crate) a: i32,
+ /// pub(crate) b: i32
+ /// }
+ /// }
+ ///
+ /// let tmp = Opaque::new(cbindings::foo { a: 1, b: 2 });
+ ///
+ /// // struct foo *foo_ptr = ..;
+ /// let foo_ptr = tmp.get();
+ ///
+ /// // SAFETY: `foo_ptr` is valid, and `.a` is in bounds.
+ /// let foo_a_ptr = unsafe { &raw mut (*foo_ptr).a };
+ ///
+ /// // a = READ_ONCE(foo_ptr->a);
+ /// //
+ /// // SAFETY: `foo_a_ptr` is valid for read, and all other accesses on it is atomic, so no
+ /// // data race.
+ /// let a = unsafe { Atomic::from_ptr(foo_a_ptr) }.load(Relaxed);
+ /// # assert_eq!(a, 1);
+ ///
+ /// // smp_store_release(&foo_ptr->a, 2);
+ /// //
+ /// // SAFETY: `foo_a_ptr` is valid for writes, and all other accesses on it is atomic, so
+ /// // no data race.
+ /// unsafe { Atomic::from_ptr(foo_a_ptr) }.store(2, Release);
+ /// ```
+ pub unsafe fn from_ptr<'a>(ptr: *mut T) -> &'a Self
+ where
+ T: Sync,
+ {
+ // CAST: `T` and `Atomic<T>` have the same size, alignment and bit validity.
+ // SAFETY: Per function safety requirement, `ptr` is a valid pointer and the object will
+ // live long enough. It's safe to return a `&Atomic<T>` because function safety requirement
+ // guarantees other accesses won't cause data races.
+ unsafe { &*ptr.cast::<Self>() }
+ }
+
+ /// Returns a pointer to the underlying atomic `T`.
+ ///
+ /// Note that use of the return pointer must not cause data races defined by [`LKMM`].
+ ///
+ /// # Guarantees
+ ///
+ /// The returned pointer is valid and properly aligned (i.e. aligned to [`align_of::<T>()`]).
+ ///
+ /// [`LKMM`]: srctree/tools/memory-model
+ /// [`align_of::<T>()`]: core::mem::align_of
+ pub const fn as_ptr(&self) -> *mut T {
+ // GUARANTEE: Per the function guarantee of `AtomicRepr::as_ptr()`, the `self.0.as_ptr()`
+ // must be a valid and properly aligned pointer for `T::Repr`, and per the safety guarantee
+ // of `AtomicType`, it's a valid and properly aligned pointer of `T`.
+ self.0.as_ptr().cast()
+ }
+
+ /// Returns a mutable reference to the underlying atomic `T`.
+ ///
+ /// This is safe because the mutable reference of the atomic `T` guarantees exclusive access.
+ pub fn get_mut(&mut self) -> &mut T {
+ // CAST: `T` and `T::Repr` has the same size and alignment per the safety requirement of
+ // `AtomicType`, and per the type invariants `self.0` is a valid `T`, therefore the casting
+ // result is a valid pointer of `T`.
+ // SAFETY: The pointer is valid per the CAST comment above, and the mutable reference
+ // guarantees exclusive access.
+ unsafe { &mut *self.0.as_ptr().cast() }
+ }
+}
+
+impl<T: AtomicType> Atomic<T>
+where
+ T::Repr: AtomicBasicOps,
+{
+ /// Loads the value from the atomic `T`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use kernel::sync::atomic::{Atomic, Relaxed};
+ ///
+ /// let x = Atomic::new(42i32);
+ ///
+ /// assert_eq!(42, x.load(Relaxed));
+ ///
+ /// let x = Atomic::new(42i64);
+ ///
+ /// assert_eq!(42, x.load(Relaxed));
+ /// ```
+ #[doc(alias("atomic_read", "atomic64_read"))]
+ #[inline(always)]
+ pub fn load<Ordering: ordering::AcquireOrRelaxed>(&self, _: Ordering) -> T {
+ let v = {
+ match Ordering::TYPE {
+ OrderingType::Relaxed => T::Repr::atomic_read(&self.0),
+ OrderingType::Acquire => T::Repr::atomic_read_acquire(&self.0),
+ _ => build_error!("Wrong ordering"),
+ }
+ };
+
+ // SAFETY: `v` comes from reading `self.0`, which is a valid `T` per the type invariants.
+ unsafe { from_repr(v) }
+ }
+
+ /// Stores a value to the atomic `T`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use kernel::sync::atomic::{Atomic, Relaxed};
+ ///
+ /// let x = Atomic::new(42i32);
+ ///
+ /// assert_eq!(42, x.load(Relaxed));
+ ///
+ /// x.store(43, Relaxed);
+ ///
+ /// assert_eq!(43, x.load(Relaxed));
+ /// ```
+ #[doc(alias("atomic_set", "atomic64_set"))]
+ #[inline(always)]
+ pub fn store<Ordering: ordering::ReleaseOrRelaxed>(&self, v: T, _: Ordering) {
+ let v = into_repr(v);
+
+ // INVARIANT: `v` is a valid `T`, and is stored to `self.0` by `atomic_set*()`.
+ match Ordering::TYPE {
+ OrderingType::Relaxed => T::Repr::atomic_set(&self.0, v),
+ OrderingType::Release => T::Repr::atomic_set_release(&self.0, v),
+ _ => build_error!("Wrong ordering"),
+ }
+ }
+}
+
+impl<T: AtomicType> Atomic<T>
+where
+ T::Repr: AtomicExchangeOps,
+{
+ /// Atomic exchange.
+ ///
+ /// Atomically updates `*self` to `v` and returns the old value of `*self`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use kernel::sync::atomic::{Atomic, Acquire, Relaxed};
+ ///
+ /// let x = Atomic::new(42);
+ ///
+ /// assert_eq!(42, x.xchg(52, Acquire));
+ /// assert_eq!(52, x.load(Relaxed));
+ /// ```
+ #[doc(alias("atomic_xchg", "atomic64_xchg", "swap"))]
+ #[inline(always)]
+ pub fn xchg<Ordering: ordering::Ordering>(&self, v: T, _: Ordering) -> T {
+ let v = into_repr(v);
+
+ // INVARIANT: `self.0` is a valid `T` after `atomic_xchg*()` because `v` is transmutable to
+ // `T`.
+ let ret = {
+ match Ordering::TYPE {
+ OrderingType::Full => T::Repr::atomic_xchg(&self.0, v),
+ OrderingType::Acquire => T::Repr::atomic_xchg_acquire(&self.0, v),
+ OrderingType::Release => T::Repr::atomic_xchg_release(&self.0, v),
+ OrderingType::Relaxed => T::Repr::atomic_xchg_relaxed(&self.0, v),
+ }
+ };
+
+ // SAFETY: `ret` comes from reading `*self`, which is a valid `T` per type invariants.
+ unsafe { from_repr(ret) }
+ }
+
+ /// Atomic compare and exchange.
+ ///
+ /// If `*self` == `old`, atomically updates `*self` to `new`. Otherwise, `*self` is not
+ /// modified.
+ ///
+ /// Compare: The comparison is done via the byte level comparison between `*self` and `old`.
+ ///
+ /// Ordering: When succeeds, provides the corresponding ordering as the `Ordering` type
+ /// parameter indicates, and a failed one doesn't provide any ordering, the load part of a
+ /// failed cmpxchg is a [`Relaxed`] load.
+ ///
+ /// Returns `Ok(value)` if cmpxchg succeeds, and `value` is guaranteed to be equal to `old`,
+ /// otherwise returns `Err(value)`, and `value` is the current value of `*self`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use kernel::sync::atomic::{Atomic, Full, Relaxed};
+ ///
+ /// let x = Atomic::new(42);
+ ///
+ /// // Checks whether cmpxchg succeeded.
+ /// let success = x.cmpxchg(52, 64, Relaxed).is_ok();
+ /// # assert!(!success);
+ ///
+ /// // Checks whether cmpxchg failed.
+ /// let failure = x.cmpxchg(52, 64, Relaxed).is_err();
+ /// # assert!(failure);
+ ///
+ /// // Uses the old value if failed, probably re-try cmpxchg.
+ /// match x.cmpxchg(52, 64, Relaxed) {
+ /// Ok(_) => { },
+ /// Err(old) => {
+ /// // do something with `old`.
+ /// # assert_eq!(old, 42);
+ /// }
+ /// }
+ ///
+ /// // Uses the latest value regardlessly, same as atomic_cmpxchg() in C.
+ /// let latest = x.cmpxchg(42, 64, Full).unwrap_or_else(|old| old);
+ /// # assert_eq!(42, latest);
+ /// assert_eq!(64, x.load(Relaxed));
+ /// ```
+ ///
+ /// [`Relaxed`]: ordering::Relaxed
+ #[doc(alias(
+ "atomic_cmpxchg",
+ "atomic64_cmpxchg",
+ "atomic_try_cmpxchg",
+ "atomic64_try_cmpxchg",
+ "compare_exchange"
+ ))]
+ #[inline(always)]
+ pub fn cmpxchg<Ordering: ordering::Ordering>(
+ &self,
+ mut old: T,
+ new: T,
+ o: Ordering,
+ ) -> Result<T, T> {
+ // Note on code generation:
+ //
+ // try_cmpxchg() is used to implement cmpxchg(), and if the helper functions are inlined,
+ // the compiler is able to figure out that branch is not needed if the users don't care
+ // about whether the operation succeeds or not. One exception is on x86, due to commit
+ // 44fe84459faf ("locking/atomic: Fix atomic_try_cmpxchg() semantics"), the
+ // atomic_try_cmpxchg() on x86 has a branch even if the caller doesn't care about the
+ // success of cmpxchg and only wants to use the old value. For example, for code like:
+ //
+ // let latest = x.cmpxchg(42, 64, Full).unwrap_or_else(|old| old);
+ //
+ // It will still generate code:
+ //
+ // movl $0x40, %ecx
+ // movl $0x34, %eax
+ // lock
+ // cmpxchgl %ecx, 0x4(%rsp)
+ // jne 1f
+ // 2:
+ // ...
+ // 1: movl %eax, %ecx
+ // jmp 2b
+ //
+ // This might be "fixed" by introducing a try_cmpxchg_exclusive() that knows the "*old"
+ // location in the C function is always safe to write.
+ if self.try_cmpxchg(&mut old, new, o) {
+ Ok(old)
+ } else {
+ Err(old)
+ }
+ }
+
+ /// Atomic compare and exchange and returns whether the operation succeeds.
+ ///
+ /// If `*self` == `old`, atomically updates `*self` to `new`. Otherwise, `*self` is not
+ /// modified, `*old` is updated to the current value of `*self`.
+ ///
+ /// "Compare" and "Ordering" part are the same as [`Atomic::cmpxchg()`].
+ ///
+ /// Returns `true` means the cmpxchg succeeds otherwise returns `false`.
+ #[inline(always)]
+ fn try_cmpxchg<Ordering: ordering::Ordering>(&self, old: &mut T, new: T, _: Ordering) -> bool {
+ let mut tmp = into_repr(*old);
+ let new = into_repr(new);
+
+ // INVARIANT: `self.0` is a valid `T` after `atomic_try_cmpxchg*()` because `new` is
+ // transmutable to `T`.
+ let ret = {
+ match Ordering::TYPE {
+ OrderingType::Full => T::Repr::atomic_try_cmpxchg(&self.0, &mut tmp, new),
+ OrderingType::Acquire => {
+ T::Repr::atomic_try_cmpxchg_acquire(&self.0, &mut tmp, new)
+ }
+ OrderingType::Release => {
+ T::Repr::atomic_try_cmpxchg_release(&self.0, &mut tmp, new)
+ }
+ OrderingType::Relaxed => {
+ T::Repr::atomic_try_cmpxchg_relaxed(&self.0, &mut tmp, new)
+ }
+ }
+ };
+
+ // SAFETY: `tmp` comes from reading `*self`, which is a valid `T` per type invariants.
+ *old = unsafe { from_repr(tmp) };
+
+ ret
+ }
+}
+
+impl<T: AtomicType> Atomic<T>
+where
+ T::Repr: AtomicArithmeticOps,
+{
+ /// Atomic add.
+ ///
+ /// Atomically updates `*self` to `(*self).wrapping_add(v)`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use kernel::sync::atomic::{Atomic, Relaxed};
+ ///
+ /// let x = Atomic::new(42);
+ ///
+ /// assert_eq!(42, x.load(Relaxed));
+ ///
+ /// x.add(12, Relaxed);
+ ///
+ /// assert_eq!(54, x.load(Relaxed));
+ /// ```
+ #[inline(always)]
+ pub fn add<Rhs>(&self, v: Rhs, _: ordering::Relaxed)
+ where
+ T: AtomicAdd<Rhs>,
+ {
+ let v = T::rhs_into_delta(v);
+
+ // INVARIANT: `self.0` is a valid `T` after `atomic_add()` due to safety requirement of
+ // `AtomicAdd`.
+ T::Repr::atomic_add(&self.0, v);
+ }
+
+ /// Atomic fetch and add.
+ ///
+ /// Atomically updates `*self` to `(*self).wrapping_add(v)`, and returns the value of `*self`
+ /// before the update.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use kernel::sync::atomic::{Atomic, Acquire, Full, Relaxed};
+ ///
+ /// let x = Atomic::new(42);
+ ///
+ /// assert_eq!(42, x.load(Relaxed));
+ ///
+ /// assert_eq!(54, { x.fetch_add(12, Acquire); x.load(Relaxed) });
+ ///
+ /// let x = Atomic::new(42);
+ ///
+ /// assert_eq!(42, x.load(Relaxed));
+ ///
+ /// assert_eq!(54, { x.fetch_add(12, Full); x.load(Relaxed) } );
+ /// ```
+ #[inline(always)]
+ pub fn fetch_add<Rhs, Ordering: ordering::Ordering>(&self, v: Rhs, _: Ordering) -> T
+ where
+ T: AtomicAdd<Rhs>,
+ {
+ let v = T::rhs_into_delta(v);
+
+ // INVARIANT: `self.0` is a valid `T` after `atomic_fetch_add*()` due to safety requirement
+ // of `AtomicAdd`.
+ let ret = {
+ match Ordering::TYPE {
+ OrderingType::Full => T::Repr::atomic_fetch_add(&self.0, v),
+ OrderingType::Acquire => T::Repr::atomic_fetch_add_acquire(&self.0, v),
+ OrderingType::Release => T::Repr::atomic_fetch_add_release(&self.0, v),
+ OrderingType::Relaxed => T::Repr::atomic_fetch_add_relaxed(&self.0, v),
+ }
+ };
+
+ // SAFETY: `ret` comes from reading `self.0`, which is a valid `T` per type invariants.
+ unsafe { from_repr(ret) }
+ }
+}
diff --git a/rust/kernel/sync/atomic/internal.rs b/rust/kernel/sync/atomic/internal.rs
new file mode 100644
index 000000000000..6fdd8e59f45b
--- /dev/null
+++ b/rust/kernel/sync/atomic/internal.rs
@@ -0,0 +1,265 @@
+// SPDX-License-Identifier: GPL-2.0
+
+//! Atomic internal implementations.
+//!
+//! Provides 1:1 mapping to the C atomic operations.
+
+use crate::bindings;
+use crate::macros::paste;
+use core::cell::UnsafeCell;
+
+mod private {
+ /// Sealed trait marker to disable customized impls on atomic implementation traits.
+ pub trait Sealed {}
+}
+
+// `i32` and `i64` are only supported atomic implementations.
+impl private::Sealed for i32 {}
+impl private::Sealed for i64 {}
+
+/// A marker trait for types that implement atomic operations with C side primitives.
+///
+/// This trait is sealed, and only types that have directly mapping to the C side atomics should
+/// impl this:
+///
+/// - `i32` maps to `atomic_t`.
+/// - `i64` maps to `atomic64_t`.
+pub trait AtomicImpl: Sized + Send + Copy + private::Sealed {
+ /// The type of the delta in arithmetic or logical operations.
+ ///
+ /// For example, in `atomic_add(ptr, v)`, it's the type of `v`. Usually it's the same type of
+ /// [`Self`], but it may be different for the atomic pointer type.
+ type Delta;
+}
+
+// `atomic_t` implements atomic operations on `i32`.
+impl AtomicImpl for i32 {
+ type Delta = Self;
+}
+
+// `atomic64_t` implements atomic operations on `i64`.
+impl AtomicImpl for i64 {
+ type Delta = Self;
+}
+
+/// Atomic representation.
+#[repr(transparent)]
+pub struct AtomicRepr<T: AtomicImpl>(UnsafeCell<T>);
+
+impl<T: AtomicImpl> AtomicRepr<T> {
+ /// Creates a new atomic representation `T`.
+ pub const fn new(v: T) -> Self {
+ Self(UnsafeCell::new(v))
+ }
+
+ /// Returns a pointer to the underlying `T`.
+ ///
+ /// # Guarantees
+ ///
+ /// The returned pointer is valid and properly aligned (i.e. aligned to [`align_of::<T>()`]).
+ pub const fn as_ptr(&self) -> *mut T {
+ // GUARANTEE: `self.0` is an `UnsafeCell<T>`, therefore the pointer returned by `.get()`
+ // must be valid and properly aligned.
+ self.0.get()
+ }
+}
+
+// This macro generates the function signature with given argument list and return type.
+macro_rules! declare_atomic_method {
+ (
+ $(#[doc=$doc:expr])*
+ $func:ident($($arg:ident : $arg_type:ty),*) $(-> $ret:ty)?
+ ) => {
+ paste!(
+ $(#[doc = $doc])*
+ fn [< atomic_ $func >]($($arg: $arg_type,)*) $(-> $ret)?;
+ );
+ };
+ (
+ $(#[doc=$doc:expr])*
+ $func:ident [$variant:ident $($rest:ident)*]($($arg_sig:tt)*) $(-> $ret:ty)?
+ ) => {
+ paste!(
+ declare_atomic_method!(
+ $(#[doc = $doc])*
+ [< $func _ $variant >]($($arg_sig)*) $(-> $ret)?
+ );
+ );
+
+ declare_atomic_method!(
+ $(#[doc = $doc])*
+ $func [$($rest)*]($($arg_sig)*) $(-> $ret)?
+ );
+ };
+ (
+ $(#[doc=$doc:expr])*
+ $func:ident []($($arg_sig:tt)*) $(-> $ret:ty)?
+ ) => {
+ declare_atomic_method!(
+ $(#[doc = $doc])*
+ $func($($arg_sig)*) $(-> $ret)?
+ );
+ }
+}
+
+// This macro generates the function implementation with given argument list and return type, and it
+// will replace "call(...)" expression with "$ctype _ $func" to call the real C function.
+macro_rules! impl_atomic_method {
+ (
+ ($ctype:ident) $func:ident($($arg:ident: $arg_type:ty),*) $(-> $ret:ty)? {
+ $unsafe:tt { call($($c_arg:expr),*) }
+ }
+ ) => {
+ paste!(
+ #[inline(always)]
+ fn [< atomic_ $func >]($($arg: $arg_type,)*) $(-> $ret)? {
+ // TODO: Ideally we want to use the SAFETY comments written at the macro invocation
+ // (e.g. in `declare_and_impl_atomic_methods!()`, however, since SAFETY comments
+ // are just comments, and they are not passed to macros as tokens, therefore we
+ // cannot use them here. One potential improvement is that if we support using
+ // attributes as an alternative for SAFETY comments, then we can use that for macro
+ // generating code.
+ //
+ // SAFETY: specified on macro invocation.
+ $unsafe { bindings::[< $ctype _ $func >]($($c_arg,)*) }
+ }
+ );
+ };
+ (
+ ($ctype:ident) $func:ident[$variant:ident $($rest:ident)*]($($arg_sig:tt)*) $(-> $ret:ty)? {
+ $unsafe:tt { call($($arg:tt)*) }
+ }
+ ) => {
+ paste!(
+ impl_atomic_method!(
+ ($ctype) [< $func _ $variant >]($($arg_sig)*) $( -> $ret)? {
+ $unsafe { call($($arg)*) }
+ }
+ );
+ );
+ impl_atomic_method!(
+ ($ctype) $func [$($rest)*]($($arg_sig)*) $( -> $ret)? {
+ $unsafe { call($($arg)*) }
+ }
+ );
+ };
+ (
+ ($ctype:ident) $func:ident[]($($arg_sig:tt)*) $( -> $ret:ty)? {
+ $unsafe:tt { call($($arg:tt)*) }
+ }
+ ) => {
+ impl_atomic_method!(
+ ($ctype) $func($($arg_sig)*) $(-> $ret)? {
+ $unsafe { call($($arg)*) }
+ }
+ );
+ }
+}
+
+// Delcares $ops trait with methods and implements the trait for `i32` and `i64`.
+macro_rules! declare_and_impl_atomic_methods {
+ ($(#[$attr:meta])* $pub:vis trait $ops:ident {
+ $(
+ $(#[doc=$doc:expr])*
+ fn $func:ident [$($variant:ident),*]($($arg_sig:tt)*) $( -> $ret:ty)? {
+ $unsafe:tt { bindings::#call($($arg:tt)*) }
+ }
+ )*
+ }) => {
+ $(#[$attr])*
+ $pub trait $ops: AtomicImpl {
+ $(
+ declare_atomic_method!(
+ $(#[doc=$doc])*
+ $func[$($variant)*]($($arg_sig)*) $(-> $ret)?
+ );
+ )*
+ }
+
+ impl $ops for i32 {
+ $(
+ impl_atomic_method!(
+ (atomic) $func[$($variant)*]($($arg_sig)*) $(-> $ret)? {
+ $unsafe { call($($arg)*) }
+ }
+ );
+ )*
+ }
+
+ impl $ops for i64 {
+ $(
+ impl_atomic_method!(
+ (atomic64) $func[$($variant)*]($($arg_sig)*) $(-> $ret)? {
+ $unsafe { call($($arg)*) }
+ }
+ );
+ )*
+ }
+ }
+}
+
+declare_and_impl_atomic_methods!(
+ /// Basic atomic operations
+ pub trait AtomicBasicOps {
+ /// Atomic read (load).
+ fn read[acquire](a: &AtomicRepr<Self>) -> Self {
+ // SAFETY: `a.as_ptr()` is valid and properly aligned.
+ unsafe { bindings::#call(a.as_ptr().cast()) }
+ }
+
+ /// Atomic set (store).
+ fn set[release](a: &AtomicRepr<Self>, v: Self) {
+ // SAFETY: `a.as_ptr()` is valid and properly aligned.
+ unsafe { bindings::#call(a.as_ptr().cast(), v) }
+ }
+ }
+);
+
+declare_and_impl_atomic_methods!(
+ /// Exchange and compare-and-exchange atomic operations
+ pub trait AtomicExchangeOps {
+ /// Atomic exchange.
+ ///
+ /// Atomically updates `*a` to `v` and returns the old value.
+ fn xchg[acquire, release, relaxed](a: &AtomicRepr<Self>, v: Self) -> Self {
+ // SAFETY: `a.as_ptr()` is valid and properly aligned.
+ unsafe { bindings::#call(a.as_ptr().cast(), v) }
+ }
+
+ /// Atomic compare and exchange.
+ ///
+ /// If `*a` == `*old`, atomically updates `*a` to `new`. Otherwise, `*a` is not
+ /// modified, `*old` is updated to the current value of `*a`.
+ ///
+ /// Return `true` if the update of `*a` occurred, `false` otherwise.
+ fn try_cmpxchg[acquire, release, relaxed](
+ a: &AtomicRepr<Self>, old: &mut Self, new: Self
+ ) -> bool {
+ // SAFETY: `a.as_ptr()` is valid and properly aligned. `core::ptr::from_mut(old)`
+ // is valid and properly aligned.
+ unsafe { bindings::#call(a.as_ptr().cast(), core::ptr::from_mut(old), new) }
+ }
+ }
+);
+
+declare_and_impl_atomic_methods!(
+ /// Atomic arithmetic operations
+ pub trait AtomicArithmeticOps {
+ /// Atomic add (wrapping).
+ ///
+ /// Atomically updates `*a` to `(*a).wrapping_add(v)`.
+ fn add[](a: &AtomicRepr<Self>, v: Self::Delta) {
+ // SAFETY: `a.as_ptr()` is valid and properly aligned.
+ unsafe { bindings::#call(v, a.as_ptr().cast()) }
+ }
+
+ /// Atomic fetch and add (wrapping).
+ ///
+ /// Atomically updates `*a` to `(*a).wrapping_add(v)`, and returns the value of `*a`
+ /// before the update.
+ fn fetch_add[acquire, release, relaxed](a: &AtomicRepr<Self>, v: Self::Delta) -> Self {
+ // SAFETY: `a.as_ptr()` is valid and properly aligned.
+ unsafe { bindings::#call(v, a.as_ptr().cast()) }
+ }
+ }
+);
diff --git a/rust/kernel/sync/atomic/ordering.rs b/rust/kernel/sync/atomic/ordering.rs
new file mode 100644
index 000000000000..3f103aa8db99
--- /dev/null
+++ b/rust/kernel/sync/atomic/ordering.rs
@@ -0,0 +1,104 @@
+// SPDX-License-Identifier: GPL-2.0
+
+//! Memory orderings.
+//!
+//! The semantics of these orderings follows the [`LKMM`] definitions and rules.
+//!
+//! - [`Acquire`] provides ordering between the load part of the annotated operation and all the
+//! following memory accesses, and if there is a store part, the store part has the [`Relaxed`]
+//! ordering.
+//! - [`Release`] provides ordering between all the preceding memory accesses and the store part of
+//! the annotated operation, and if there is a load part, the load part has the [`Relaxed`]
+//! ordering.
+//! - [`Full`] means "fully-ordered", that is:
+//! - It provides ordering between all the preceding memory accesses and the annotated operation.
+//! - It provides ordering between the annotated operation and all the following memory accesses.
+//! - It provides ordering between all the preceding memory accesses and all the following memory
+//! accesses.
+//! - All the orderings are the same strength as a full memory barrier (i.e. `smp_mb()`).
+//! - [`Relaxed`] provides no ordering except the dependency orderings. Dependency orderings are
+//! described in "DEPENDENCY RELATIONS" in [`LKMM`]'s [`explanation`].
+//!
+//! [`LKMM`]: srctree/tools/memory-model/
+//! [`explanation`]: srctree/tools/memory-model/Documentation/explanation.txt
+
+/// The annotation type for relaxed memory ordering, for the description of relaxed memory
+/// ordering, see [module-level documentation].
+///
+/// [module-level documentation]: crate::sync::atomic::ordering
+pub struct Relaxed;
+
+/// The annotation type for acquire memory ordering, for the description of acquire memory
+/// ordering, see [module-level documentation].
+///
+/// [module-level documentation]: crate::sync::atomic::ordering
+pub struct Acquire;
+
+/// The annotation type for release memory ordering, for the description of release memory
+/// ordering, see [module-level documentation].
+///
+/// [module-level documentation]: crate::sync::atomic::ordering
+pub struct Release;
+
+/// The annotation type for fully-ordered memory ordering, for the description fully-ordered memory
+/// ordering, see [module-level documentation].
+///
+/// [module-level documentation]: crate::sync::atomic::ordering
+pub struct Full;
+
+/// Describes the exact memory ordering.
+#[doc(hidden)]
+pub enum OrderingType {
+ /// Relaxed ordering.
+ Relaxed,
+ /// Acquire ordering.
+ Acquire,
+ /// Release ordering.
+ Release,
+ /// Fully-ordered.
+ Full,
+}
+
+mod internal {
+ /// Sealed trait, can be only implemented inside atomic mod.
+ pub trait Sealed {}
+
+ impl Sealed for super::Relaxed {}
+ impl Sealed for super::Acquire {}
+ impl Sealed for super::Release {}
+ impl Sealed for super::Full {}
+}
+
+/// The trait bound for annotating operations that support any ordering.
+pub trait Ordering: internal::Sealed {
+ /// Describes the exact memory ordering.
+ const TYPE: OrderingType;
+}
+
+impl Ordering for Relaxed {
+ const TYPE: OrderingType = OrderingType::Relaxed;
+}
+
+impl Ordering for Acquire {
+ const TYPE: OrderingType = OrderingType::Acquire;
+}
+
+impl Ordering for Release {
+ const TYPE: OrderingType = OrderingType::Release;
+}
+
+impl Ordering for Full {
+ const TYPE: OrderingType = OrderingType::Full;
+}
+
+/// The trait bound for operations that only support acquire or relaxed ordering.
+pub trait AcquireOrRelaxed: Ordering {}
+
+impl AcquireOrRelaxed for Acquire {}
+impl AcquireOrRelaxed for Relaxed {}
+
+/// The trait bound for operations that only support release or relaxed ordering.
+pub trait ReleaseOrRelaxed: Ordering {}
+
+impl ReleaseOrRelaxed for Release {}
+impl ReleaseOrRelaxed for Relaxed {}
diff --git a/rust/kernel/sync/atomic/predefine.rs b/rust/kernel/sync/atomic/predefine.rs
new file mode 100644
index 000000000000..45a17985cda4
--- /dev/null
+++ b/rust/kernel/sync/atomic/predefine.rs
@@ -0,0 +1,169 @@
+// SPDX-License-Identifier: GPL-2.0
+
+//! Pre-defined atomic types
+
+use crate::static_assert;
+use core::mem::{align_of, size_of};
+
+// SAFETY: `i32` has the same size and alignment with itself, and is round-trip transmutable to
+// itself.
+unsafe impl super::AtomicType for i32 {
+ type Repr = i32;
+}
+
+// SAFETY: The wrapping add result of two `i32`s is a valid `i32`.
+unsafe impl super::AtomicAdd<i32> for i32 {
+ fn rhs_into_delta(rhs: i32) -> i32 {
+ rhs
+ }
+}
+
+// SAFETY: `i64` has the same size and alignment with itself, and is round-trip transmutable to
+// itself.
+unsafe impl super::AtomicType for i64 {
+ type Repr = i64;
+}
+
+// SAFETY: The wrapping add result of two `i64`s is a valid `i64`.
+unsafe impl super::AtomicAdd<i64> for i64 {
+ fn rhs_into_delta(rhs: i64) -> i64 {
+ rhs
+ }
+}
+
+// Defines an internal type that always maps to the integer type which has the same size alignment
+// as `isize` and `usize`, and `isize` and `usize` are always bi-directional transmutable to
+// `isize_atomic_repr`, which also always implements `AtomicImpl`.
+#[allow(non_camel_case_types)]
+#[cfg(not(CONFIG_64BIT))]
+type isize_atomic_repr = i32;
+#[allow(non_camel_case_types)]
+#[cfg(CONFIG_64BIT)]
+type isize_atomic_repr = i64;
+
+// Ensure size and alignment requirements are checked.
+static_assert!(size_of::<isize>() == size_of::<isize_atomic_repr>());
+static_assert!(align_of::<isize>() == align_of::<isize_atomic_repr>());
+static_assert!(size_of::<usize>() == size_of::<isize_atomic_repr>());
+static_assert!(align_of::<usize>() == align_of::<isize_atomic_repr>());
+
+// SAFETY: `isize` has the same size and alignment with `isize_atomic_repr`, and is round-trip
+// transmutable to `isize_atomic_repr`.
+unsafe impl super::AtomicType for isize {
+ type Repr = isize_atomic_repr;
+}
+
+// SAFETY: The wrapping add result of two `isize_atomic_repr`s is a valid `usize`.
+unsafe impl super::AtomicAdd<isize> for isize {
+ fn rhs_into_delta(rhs: isize) -> isize_atomic_repr {
+ rhs as isize_atomic_repr
+ }
+}
+
+// SAFETY: `u32` and `i32` has the same size and alignment, and `u32` is round-trip transmutable to
+// `i32`.
+unsafe impl super::AtomicType for u32 {
+ type Repr = i32;
+}
+
+// SAFETY: The wrapping add result of two `i32`s is a valid `u32`.
+unsafe impl super::AtomicAdd<u32> for u32 {
+ fn rhs_into_delta(rhs: u32) -> i32 {
+ rhs as i32
+ }
+}
+
+// SAFETY: `u64` and `i64` has the same size and alignment, and `u64` is round-trip transmutable to
+// `i64`.
+unsafe impl super::AtomicType for u64 {
+ type Repr = i64;
+}
+
+// SAFETY: The wrapping add result of two `i64`s is a valid `u64`.
+unsafe impl super::AtomicAdd<u64> for u64 {
+ fn rhs_into_delta(rhs: u64) -> i64 {
+ rhs as i64
+ }
+}
+
+// SAFETY: `usize` has the same size and alignment with `isize_atomic_repr`, and is round-trip
+// transmutable to `isize_atomic_repr`.
+unsafe impl super::AtomicType for usize {
+ type Repr = isize_atomic_repr;
+}
+
+// SAFETY: The wrapping add result of two `isize_atomic_repr`s is a valid `usize`.
+unsafe impl super::AtomicAdd<usize> for usize {
+ fn rhs_into_delta(rhs: usize) -> isize_atomic_repr {
+ rhs as isize_atomic_repr
+ }
+}
+
+use crate::macros::kunit_tests;
+
+#[kunit_tests(rust_atomics)]
+mod tests {
+ use super::super::*;
+
+ // Call $fn($val) with each $type of $val.
+ macro_rules! for_each_type {
+ ($val:literal in [$($type:ty),*] $fn:expr) => {
+ $({
+ let v: $type = $val;
+
+ $fn(v);
+ })*
+ }
+ }
+
+ #[test]
+ fn atomic_basic_tests() {
+ for_each_type!(42 in [i32, i64, u32, u64, isize, usize] |v| {
+ let x = Atomic::new(v);
+
+ assert_eq!(v, x.load(Relaxed));
+ });
+ }
+
+ #[test]
+ fn atomic_xchg_tests() {
+ for_each_type!(42 in [i32, i64, u32, u64, isize, usize] |v| {
+ let x = Atomic::new(v);
+
+ let old = v;
+ let new = v + 1;
+
+ assert_eq!(old, x.xchg(new, Full));
+ assert_eq!(new, x.load(Relaxed));
+ });
+ }
+
+ #[test]
+ fn atomic_cmpxchg_tests() {
+ for_each_type!(42 in [i32, i64, u32, u64, isize, usize] |v| {
+ let x = Atomic::new(v);
+
+ let old = v;
+ let new = v + 1;
+
+ assert_eq!(Err(old), x.cmpxchg(new, new, Full));
+ assert_eq!(old, x.load(Relaxed));
+ assert_eq!(Ok(old), x.cmpxchg(old, new, Relaxed));
+ assert_eq!(new, x.load(Relaxed));
+ });
+ }
+
+ #[test]
+ fn atomic_arithmetic_tests() {
+ for_each_type!(42 in [i32, i64, u32, u64, isize, usize] |v| {
+ let x = Atomic::new(v);
+
+ assert_eq!(v, x.fetch_add(12, Full));
+ assert_eq!(v + 12, x.load(Relaxed));
+
+ x.add(13, Relaxed);
+
+ assert_eq!(v + 25, x.load(Relaxed));
+ });
+ }
+}
diff --git a/rust/kernel/sync/barrier.rs b/rust/kernel/sync/barrier.rs
new file mode 100644
index 000000000000..8f2d435fcd94
--- /dev/null
+++ b/rust/kernel/sync/barrier.rs
@@ -0,0 +1,61 @@
+// SPDX-License-Identifier: GPL-2.0
+
+//! Memory barriers.
+//!
+//! These primitives have the same semantics as their C counterparts: and the precise definitions
+//! of semantics can be found at [`LKMM`].
+//!
+//! [`LKMM`]: srctree/tools/memory-model/
+
+/// A compiler barrier.
+///
+/// A barrier that prevents compiler from reordering memory accesses across the barrier.
+#[inline(always)]
+pub(crate) fn barrier() {
+ // By default, Rust inline asms are treated as being able to access any memory or flags, hence
+ // it suffices as a compiler barrier.
+ //
+ // SAFETY: An empty asm block.
+ unsafe { core::arch::asm!("") };
+}
+
+/// A full memory barrier.
+///
+/// A barrier that prevents compiler and CPU from reordering memory accesses across the barrier.
+#[inline(always)]
+pub fn smp_mb() {
+ if cfg!(CONFIG_SMP) {
+ // SAFETY: `smp_mb()` is safe to call.
+ unsafe { bindings::smp_mb() };
+ } else {
+ barrier();
+ }
+}
+
+/// A write-write memory barrier.
+///
+/// A barrier that prevents compiler and CPU from reordering memory write accesses across the
+/// barrier.
+#[inline(always)]
+pub fn smp_wmb() {
+ if cfg!(CONFIG_SMP) {
+ // SAFETY: `smp_wmb()` is safe to call.
+ unsafe { bindings::smp_wmb() };
+ } else {
+ barrier();
+ }
+}
+
+/// A read-read memory barrier.
+///
+/// A barrier that prevents compiler and CPU from reordering memory read accesses across the
+/// barrier.
+#[inline(always)]
+pub fn smp_rmb() {
+ if cfg!(CONFIG_SMP) {
+ // SAFETY: `smp_rmb()` is safe to call.
+ unsafe { bindings::smp_rmb() };
+ } else {
+ barrier();
+ }
+}
diff --git a/rust/kernel/sync/condvar.rs b/rust/kernel/sync/condvar.rs
index caebf03f553b..c6ec64295c9f 100644
--- a/rust/kernel/sync/condvar.rs
+++ b/rust/kernel/sync/condvar.rs
@@ -216,6 +216,7 @@ impl CondVar {
/// This method behaves like `notify_one`, except that it hints to the scheduler that the
/// current thread is about to go to sleep, so it should schedule the target thread on the same
/// CPU.
+ #[inline]
pub fn notify_sync(&self) {
// SAFETY: `wait_queue_head` points to valid memory.
unsafe { bindings::__wake_up_sync(self.wait_queue_head.get(), TASK_NORMAL) };
@@ -225,6 +226,7 @@ impl CondVar {
///
/// This is not 'sticky' in the sense that if no thread is waiting, the notification is lost
/// completely (as opposed to automatically waking up the next waiter).
+ #[inline]
pub fn notify_one(&self) {
self.notify(1);
}
@@ -233,6 +235,7 @@ impl CondVar {
///
/// This is not 'sticky' in the sense that if no thread is waiting, the notification is lost
/// completely (as opposed to automatically waking up the next waiter).
+ #[inline]
pub fn notify_all(&self) {
self.notify(0);
}
diff --git a/rust/kernel/sync/lock.rs b/rust/kernel/sync/lock.rs
index e82fa5be289c..27202beef90c 100644
--- a/rust/kernel/sync/lock.rs
+++ b/rust/kernel/sync/lock.rs
@@ -175,6 +175,8 @@ impl<T: ?Sized, B: Backend> Lock<T, B> {
/// Tries to acquire the lock.
///
/// Returns a guard that can be used to access the data protected by the lock if successful.
+ // `Option<T>` is not `#[must_use]` even if `T` is, thus the attribute is needed here.
+ #[must_use = "if unused, the lock will be immediately unlocked"]
pub fn try_lock(&self) -> Option<Guard<'_, T, B>> {
// SAFETY: The constructor of the type calls `init`, so the existence of the object proves
// that `init` was called.
diff --git a/rust/kernel/sync/poll.rs b/rust/kernel/sync/poll.rs
index d7e6e59e124b..0ec985d560c8 100644
--- a/rust/kernel/sync/poll.rs
+++ b/rust/kernel/sync/poll.rs
@@ -9,9 +9,8 @@ use crate::{
fs::File,
prelude::*,
sync::{CondVar, LockClassKey},
- types::Opaque,
};
-use core::ops::Deref;
+use core::{marker::PhantomData, ops::Deref};
/// Creates a [`PollCondVar`] initialiser with the given name and a newly-created lock class.
#[macro_export]
@@ -23,58 +22,43 @@ macro_rules! new_poll_condvar {
};
}
-/// Wraps the kernel's `struct poll_table`.
+/// Wraps the kernel's `poll_table`.
///
/// # Invariants
///
-/// This struct contains a valid `struct poll_table`.
-///
-/// For a `struct poll_table` to be valid, its `_qproc` function must follow the safety
-/// requirements of `_qproc` functions:
-///
-/// * The `_qproc` function is given permission to enqueue a waiter to the provided `poll_table`
-/// during the call. Once the waiter is removed and an rcu grace period has passed, it must no
-/// longer access the `wait_queue_head`.
+/// The pointer must be null or reference a valid `poll_table`.
#[repr(transparent)]
-pub struct PollTable(Opaque<bindings::poll_table>);
+pub struct PollTable<'a> {
+ table: *mut bindings::poll_table,
+ _lifetime: PhantomData<&'a bindings::poll_table>,
+}
-impl PollTable {
- /// Creates a reference to a [`PollTable`] from a valid pointer.
+impl<'a> PollTable<'a> {
+ /// Creates a [`PollTable`] from a valid pointer.
///
/// # Safety
///
- /// The caller must ensure that for the duration of `'a`, the pointer will point at a valid poll
- /// table (as defined in the type invariants).
- ///
- /// The caller must also ensure that the `poll_table` is only accessed via the returned
- /// reference for the duration of `'a`.
- pub unsafe fn from_ptr<'a>(ptr: *mut bindings::poll_table) -> &'a mut PollTable {
- // SAFETY: The safety requirements guarantee the validity of the dereference, while the
- // `PollTable` type being transparent makes the cast ok.
- unsafe { &mut *ptr.cast() }
- }
-
- fn get_qproc(&self) -> bindings::poll_queue_proc {
- let ptr = self.0.get();
- // SAFETY: The `ptr` is valid because it originates from a reference, and the `_qproc`
- // field is not modified concurrently with this call since we have an immutable reference.
- unsafe { (*ptr)._qproc }
+ /// The pointer must be null or reference a valid `poll_table` for the duration of `'a`.
+ pub unsafe fn from_raw(table: *mut bindings::poll_table) -> Self {
+ // INVARIANTS: The safety requirements are the same as the struct invariants.
+ PollTable {
+ table,
+ _lifetime: PhantomData,
+ }
}
/// Register this [`PollTable`] with the provided [`PollCondVar`], so that it can be notified
/// using the condition variable.
- pub fn register_wait(&mut self, file: &File, cv: &PollCondVar) {
- if let Some(qproc) = self.get_qproc() {
- // SAFETY: The pointers to `file` and `self` need to be valid for the duration of this
- // call to `qproc`, which they are because they are references.
- //
- // The `cv.wait_queue_head` pointer must be valid until an rcu grace period after the
- // waiter is removed. The `PollCondVar` is pinned, so before `cv.wait_queue_head` can
- // be destroyed, the destructor must run. That destructor first removes all waiters,
- // and then waits for an rcu grace period. Therefore, `cv.wait_queue_head` is valid for
- // long enough.
- unsafe { qproc(file.as_ptr() as _, cv.wait_queue_head.get(), self.0.get()) };
- }
+ pub fn register_wait(&self, file: &File, cv: &PollCondVar) {
+ // SAFETY:
+ // * `file.as_ptr()` references a valid file for the duration of this call.
+ // * `self.table` is null or references a valid poll_table for the duration of this call.
+ // * Since `PollCondVar` is pinned, its destructor is guaranteed to run before the memory
+ // containing `cv.wait_queue_head` is invalidated. Since the destructor clears all
+ // waiters and then waits for an rcu grace period, it's guaranteed that
+ // `cv.wait_queue_head` remains valid for at least an rcu grace period after the removal
+ // of the last waiter.
+ unsafe { bindings::poll_wait(file.as_ptr(), cv.wait_queue_head.get(), self.table) }
}
}
@@ -107,6 +91,7 @@ impl Deref for PollCondVar {
#[pinned_drop]
impl PinnedDrop for PollCondVar {
+ #[inline]
fn drop(self: Pin<&mut Self>) {
// Clear anything registered using `register_wait`.
//
diff --git a/rust/kernel/sync/refcount.rs b/rust/kernel/sync/refcount.rs
new file mode 100644
index 000000000000..19236a5bccde
--- /dev/null
+++ b/rust/kernel/sync/refcount.rs
@@ -0,0 +1,113 @@
+// SPDX-License-Identifier: GPL-2.0
+
+//! Atomic reference counting.
+//!
+//! C header: [`include/linux/refcount.h`](srctree/include/linux/refcount.h)
+
+use crate::build_assert;
+use crate::sync::atomic::Atomic;
+use crate::types::Opaque;
+
+/// Atomic reference counter.
+///
+/// This type is conceptually an atomic integer, but provides saturation semantics compared to
+/// normal atomic integers. Values in the negative range when viewed as a signed integer are
+/// saturation (bad) values. For details about the saturation semantics, please refer to top of
+/// [`include/linux/refcount.h`](srctree/include/linux/refcount.h).
+///
+/// Wraps the kernel's C `refcount_t`.
+#[repr(transparent)]
+pub struct Refcount(Opaque<bindings::refcount_t>);
+
+impl Refcount {
+ /// Construct a new [`Refcount`] from an initial value.
+ ///
+ /// The initial value should be non-saturated.
+ #[inline]
+ pub fn new(value: i32) -> Self {
+ build_assert!(value >= 0, "initial value saturated");
+ // SAFETY: There are no safety requirements for this FFI call.
+ Self(Opaque::new(unsafe { bindings::REFCOUNT_INIT(value) }))
+ }
+
+ #[inline]
+ fn as_ptr(&self) -> *mut bindings::refcount_t {
+ self.0.get()
+ }
+
+ /// Get the underlying atomic counter that backs the refcount.
+ ///
+ /// NOTE: Usage of this function is discouraged as it can circumvent the protections offered by
+ /// `refcount.h`. If there is no way to achieve the result using APIs in `refcount.h`, then
+ /// this function can be used. Otherwise consider adding a binding for the required API.
+ #[inline]
+ pub fn as_atomic(&self) -> &Atomic<i32> {
+ let ptr = self.0.get().cast();
+ // SAFETY: `refcount_t` is a transparent wrapper of `atomic_t`, which is an atomic 32-bit
+ // integer that is layout-wise compatible with `Atomic<i32>`. All values are valid for
+ // `refcount_t`, despite some of the values being considered saturated and "bad".
+ unsafe { &*ptr }
+ }
+
+ /// Set a refcount's value.
+ #[inline]
+ pub fn set(&self, value: i32) {
+ // SAFETY: `self.as_ptr()` is valid.
+ unsafe { bindings::refcount_set(self.as_ptr(), value) }
+ }
+
+ /// Increment a refcount.
+ ///
+ /// It will saturate if overflows and `WARN`. It will also `WARN` if the refcount is 0, as this
+ /// represents a possible use-after-free condition.
+ ///
+ /// Provides no memory ordering, it is assumed that caller already has a reference on the
+ /// object.
+ #[inline]
+ pub fn inc(&self) {
+ // SAFETY: self is valid.
+ unsafe { bindings::refcount_inc(self.as_ptr()) }
+ }
+
+ /// Decrement a refcount.
+ ///
+ /// It will `WARN` on underflow and fail to decrement when saturated.
+ ///
+ /// Provides release memory ordering, such that prior loads and stores are done
+ /// before.
+ #[inline]
+ pub fn dec(&self) {
+ // SAFETY: `self.as_ptr()` is valid.
+ unsafe { bindings::refcount_dec(self.as_ptr()) }
+ }
+
+ /// Decrement a refcount and test if it is 0.
+ ///
+ /// It will `WARN` on underflow and fail to decrement when saturated.
+ ///
+ /// Provides release memory ordering, such that prior loads and stores are done
+ /// before, and provides an acquire ordering on success such that memory deallocation
+ /// must come after.
+ ///
+ /// Returns true if the resulting refcount is 0, false otherwise.
+ ///
+ /// # Notes
+ ///
+ /// A common pattern of using `Refcount` is to free memory when the reference count reaches
+ /// zero. This means that the reference to `Refcount` could become invalid after calling this
+ /// function. This is fine as long as the reference to `Refcount` is no longer used when this
+ /// function returns `false`. It is not necessary to use raw pointers in this scenario, see
+ /// <https://github.com/rust-lang/rust/issues/55005>.
+ #[inline]
+ #[must_use = "use `dec` instead if you do not need to test if it is 0"]
+ pub fn dec_and_test(&self) -> bool {
+ // SAFETY: `self.as_ptr()` is valid.
+ unsafe { bindings::refcount_dec_and_test(self.as_ptr()) }
+ }
+}
+
+// SAFETY: `refcount_t` is thread-safe.
+unsafe impl Send for Refcount {}
+
+// SAFETY: `refcount_t` is thread-safe.
+unsafe impl Sync for Refcount {}
diff --git a/rust/kernel/task.rs b/rust/kernel/task.rs
index 927413d85484..49fad6de0674 100644
--- a/rust/kernel/task.rs
+++ b/rust/kernel/task.rs
@@ -9,7 +9,8 @@ use crate::{
ffi::{c_int, c_long, c_uint},
mm::MmWithUser,
pid_namespace::PidNamespace,
- types::{ARef, NotThreadSafe, Opaque},
+ sync::aref::ARef,
+ types::{NotThreadSafe, Opaque},
};
use core::{
cmp::{Eq, PartialEq},
@@ -76,7 +77,7 @@ macro_rules! current {
/// incremented when creating `State` and decremented when it is dropped:
///
/// ```
-/// use kernel::{task::Task, types::ARef};
+/// use kernel::{task::Task, sync::aref::ARef};
///
/// struct State {
/// creator: ARef<Task>,
@@ -173,6 +174,7 @@ impl Task {
/// Callers must ensure that the returned object is only used to access a [`CurrentTask`]
/// within the task context that was active when this function was called. For more details,
/// see the invariants section for [`CurrentTask`].
+ #[inline]
pub unsafe fn current() -> impl Deref<Target = CurrentTask> {
struct TaskRef {
task: *const CurrentTask,
@@ -222,24 +224,28 @@ impl Task {
}
/// Returns the UID of the given task.
+ #[inline]
pub fn uid(&self) -> Kuid {
// SAFETY: It's always safe to call `task_uid` on a valid task.
Kuid::from_raw(unsafe { bindings::task_uid(self.as_ptr()) })
}
/// Returns the effective UID of the given task.
+ #[inline]
pub fn euid(&self) -> Kuid {
// SAFETY: It's always safe to call `task_euid` on a valid task.
Kuid::from_raw(unsafe { bindings::task_euid(self.as_ptr()) })
}
/// Determines whether the given task has pending signals.
+ #[inline]
pub fn signal_pending(&self) -> bool {
// SAFETY: It's always safe to call `signal_pending` on a valid task.
unsafe { bindings::signal_pending(self.as_ptr()) != 0 }
}
/// Returns task's pid namespace with elevated reference count
+ #[inline]
pub fn get_pid_ns(&self) -> Option<ARef<PidNamespace>> {
// SAFETY: By the type invariant, we know that `self.0` is valid.
let ptr = unsafe { bindings::task_get_pid_ns(self.as_ptr()) };
@@ -255,6 +261,7 @@ impl Task {
/// Returns the given task's pid in the provided pid namespace.
#[doc(alias = "task_tgid_nr_ns")]
+ #[inline]
pub fn tgid_nr_ns(&self, pidns: Option<&PidNamespace>) -> Pid {
let pidns = match pidns {
Some(pidns) => pidns.as_ptr(),
@@ -268,6 +275,7 @@ impl Task {
}
/// Wakes up the task.
+ #[inline]
pub fn wake_up(&self) {
// SAFETY: It's always safe to call `wake_up_process` on a valid task, even if the task
// running.
@@ -340,12 +348,14 @@ impl CurrentTask {
}
// SAFETY: The type invariants guarantee that `Task` is always refcounted.
-unsafe impl crate::types::AlwaysRefCounted for Task {
+unsafe impl crate::sync::aref::AlwaysRefCounted for Task {
+ #[inline]
fn inc_ref(&self) {
// SAFETY: The existence of a shared reference means that the refcount is nonzero.
unsafe { bindings::get_task_struct(self.as_ptr()) };
}
+ #[inline]
unsafe fn dec_ref(obj: ptr::NonNull<Self>) {
// SAFETY: The safety requirements guarantee that the refcount is nonzero.
unsafe { bindings::put_task_struct(obj.cast().as_ptr()) }
@@ -391,3 +401,27 @@ impl PartialEq for Kuid {
}
impl Eq for Kuid {}
+
+/// Annotation for functions that can sleep.
+///
+/// Equivalent to the C side [`might_sleep()`], this function serves as
+/// a debugging aid and a potential scheduling point.
+///
+/// This function can only be used in a nonatomic context.
+///
+/// [`might_sleep()`]: https://docs.kernel.org/driver-api/basics.html#c.might_sleep
+#[track_caller]
+#[inline]
+pub fn might_sleep() {
+ #[cfg(CONFIG_DEBUG_ATOMIC_SLEEP)]
+ {
+ let loc = core::panic::Location::caller();
+ let file = kernel::file_from_location(loc);
+
+ // SAFETY: `file.as_ptr()` is valid for reading and guaranteed to be nul-terminated.
+ unsafe { crate::bindings::__might_sleep(file.as_ptr().cast(), loc.line() as i32) }
+ }
+
+ // SAFETY: Always safe to call.
+ unsafe { crate::bindings::might_resched() }
+}
diff --git a/rust/kernel/time.rs b/rust/kernel/time.rs
index a8089a98da9e..6ea98dfcd027 100644
--- a/rust/kernel/time.rs
+++ b/rust/kernel/time.rs
@@ -24,6 +24,10 @@
//! C header: [`include/linux/jiffies.h`](srctree/include/linux/jiffies.h).
//! C header: [`include/linux/ktime.h`](srctree/include/linux/ktime.h).
+use core::marker::PhantomData;
+use core::ops;
+
+pub mod delay;
pub mod hrtimer;
/// The number of nanoseconds per microsecond.
@@ -49,26 +53,141 @@ pub fn msecs_to_jiffies(msecs: Msecs) -> Jiffies {
unsafe { bindings::__msecs_to_jiffies(msecs) }
}
+/// Trait for clock sources.
+///
+/// Selection of the clock source depends on the use case. In some cases the usage of a
+/// particular clock is mandatory, e.g. in network protocols, filesystems. In other
+/// cases the user of the clock has to decide which clock is best suited for the
+/// purpose. In most scenarios clock [`Monotonic`] is the best choice as it
+/// provides a accurate monotonic notion of time (leap second smearing ignored).
+pub trait ClockSource {
+ /// The kernel clock ID associated with this clock source.
+ ///
+ /// This constant corresponds to the C side `clockid_t` value.
+ const ID: bindings::clockid_t;
+
+ /// Get the current time from the clock source.
+ ///
+ /// The function must return a value in the range from 0 to `KTIME_MAX`.
+ fn ktime_get() -> bindings::ktime_t;
+}
+
+/// A monotonically increasing clock.
+///
+/// A nonsettable system-wide clock that represents monotonic time since as
+/// described by POSIX, "some unspecified point in the past". On Linux, that
+/// point corresponds to the number of seconds that the system has been
+/// running since it was booted.
+///
+/// The CLOCK_MONOTONIC clock is not affected by discontinuous jumps in the
+/// CLOCK_REAL (e.g., if the system administrator manually changes the
+/// clock), but is affected by frequency adjustments. This clock does not
+/// count time that the system is suspended.
+pub struct Monotonic;
+
+impl ClockSource for Monotonic {
+ const ID: bindings::clockid_t = bindings::CLOCK_MONOTONIC as bindings::clockid_t;
+
+ fn ktime_get() -> bindings::ktime_t {
+ // SAFETY: It is always safe to call `ktime_get()` outside of NMI context.
+ unsafe { bindings::ktime_get() }
+ }
+}
+
+/// A settable system-wide clock that measures real (i.e., wall-clock) time.
+///
+/// Setting this clock requires appropriate privileges. This clock is
+/// affected by discontinuous jumps in the system time (e.g., if the system
+/// administrator manually changes the clock), and by frequency adjustments
+/// performed by NTP and similar applications via adjtime(3), adjtimex(2),
+/// clock_adjtime(2), and ntp_adjtime(3). This clock normally counts the
+/// number of seconds since 1970-01-01 00:00:00 Coordinated Universal Time
+/// (UTC) except that it ignores leap seconds; near a leap second it may be
+/// adjusted by leap second smearing to stay roughly in sync with UTC. Leap
+/// second smearing applies frequency adjustments to the clock to speed up
+/// or slow down the clock to account for the leap second without
+/// discontinuities in the clock. If leap second smearing is not applied,
+/// the clock will experience discontinuity around leap second adjustment.
+pub struct RealTime;
+
+impl ClockSource for RealTime {
+ const ID: bindings::clockid_t = bindings::CLOCK_REALTIME as bindings::clockid_t;
+
+ fn ktime_get() -> bindings::ktime_t {
+ // SAFETY: It is always safe to call `ktime_get_real()` outside of NMI context.
+ unsafe { bindings::ktime_get_real() }
+ }
+}
+
+/// A monotonic that ticks while system is suspended.
+///
+/// A nonsettable system-wide clock that is identical to CLOCK_MONOTONIC,
+/// except that it also includes any time that the system is suspended. This
+/// allows applications to get a suspend-aware monotonic clock without
+/// having to deal with the complications of CLOCK_REALTIME, which may have
+/// discontinuities if the time is changed using settimeofday(2) or similar.
+pub struct BootTime;
+
+impl ClockSource for BootTime {
+ const ID: bindings::clockid_t = bindings::CLOCK_BOOTTIME as bindings::clockid_t;
+
+ fn ktime_get() -> bindings::ktime_t {
+ // SAFETY: It is always safe to call `ktime_get_boottime()` outside of NMI context.
+ unsafe { bindings::ktime_get_boottime() }
+ }
+}
+
+/// International Atomic Time.
+///
+/// A system-wide clock derived from wall-clock time but counting leap seconds.
+///
+/// This clock is coupled to CLOCK_REALTIME and will be set when CLOCK_REALTIME is
+/// set, or when the offset to CLOCK_REALTIME is changed via adjtimex(2). This
+/// usually happens during boot and **should** not happen during normal operations.
+/// However, if NTP or another application adjusts CLOCK_REALTIME by leap second
+/// smearing, this clock will not be precise during leap second smearing.
+///
+/// The acronym TAI refers to International Atomic Time.
+pub struct Tai;
+
+impl ClockSource for Tai {
+ const ID: bindings::clockid_t = bindings::CLOCK_TAI as bindings::clockid_t;
+
+ fn ktime_get() -> bindings::ktime_t {
+ // SAFETY: It is always safe to call `ktime_get_tai()` outside of NMI context.
+ unsafe { bindings::ktime_get_clocktai() }
+ }
+}
+
/// A specific point in time.
///
/// # Invariants
///
/// The `inner` value is in the range from 0 to `KTIME_MAX`.
#[repr(transparent)]
-#[derive(Copy, Clone, PartialEq, PartialOrd, Eq, Ord)]
-pub struct Instant {
+#[derive(PartialEq, PartialOrd, Eq, Ord)]
+pub struct Instant<C: ClockSource> {
inner: bindings::ktime_t,
+ _c: PhantomData<C>,
+}
+
+impl<C: ClockSource> Clone for Instant<C> {
+ fn clone(&self) -> Self {
+ *self
+ }
}
-impl Instant {
- /// Get the current time using `CLOCK_MONOTONIC`.
+impl<C: ClockSource> Copy for Instant<C> {}
+
+impl<C: ClockSource> Instant<C> {
+ /// Get the current time from the clock source.
#[inline]
pub fn now() -> Self {
- // INVARIANT: The `ktime_get()` function returns a value in the range
+ // INVARIANT: The `ClockSource::ktime_get()` function returns a value in the range
// from 0 to `KTIME_MAX`.
Self {
- // SAFETY: It is always safe to call `ktime_get()` outside of NMI context.
- inner: unsafe { bindings::ktime_get() },
+ inner: C::ktime_get(),
+ _c: PhantomData,
}
}
@@ -77,83 +196,84 @@ impl Instant {
pub fn elapsed(&self) -> Delta {
Self::now() - *self
}
+
+ #[inline]
+ pub(crate) fn as_nanos(&self) -> i64 {
+ self.inner
+ }
+
+ /// Create an [`Instant`] from a `ktime_t` without checking if it is non-negative.
+ ///
+ /// # Panics
+ ///
+ /// On debug builds, this function will panic if `ktime` is not in the range from 0 to
+ /// `KTIME_MAX`.
+ ///
+ /// # Safety
+ ///
+ /// The caller promises that `ktime` is in the range from 0 to `KTIME_MAX`.
+ #[inline]
+ pub(crate) unsafe fn from_ktime(ktime: bindings::ktime_t) -> Self {
+ debug_assert!(ktime >= 0);
+
+ // INVARIANT: Our safety contract ensures that `ktime` is in the range from 0 to
+ // `KTIME_MAX`.
+ Self {
+ inner: ktime,
+ _c: PhantomData,
+ }
+ }
}
-impl core::ops::Sub for Instant {
+impl<C: ClockSource> ops::Sub for Instant<C> {
type Output = Delta;
// By the type invariant, it never overflows.
#[inline]
- fn sub(self, other: Instant) -> Delta {
+ fn sub(self, other: Instant<C>) -> Delta {
Delta {
nanos: self.inner - other.inner,
}
}
}
-/// An identifier for a clock. Used when specifying clock sources.
-///
-///
-/// Selection of the clock depends on the use case. In some cases the usage of a
-/// particular clock is mandatory, e.g. in network protocols, filesystems.In other
-/// cases the user of the clock has to decide which clock is best suited for the
-/// purpose. In most scenarios clock [`ClockId::Monotonic`] is the best choice as it
-/// provides a accurate monotonic notion of time (leap second smearing ignored).
-#[derive(Clone, Copy, PartialEq, Eq, Debug)]
-#[repr(u32)]
-pub enum ClockId {
- /// A settable system-wide clock that measures real (i.e., wall-clock) time.
- ///
- /// Setting this clock requires appropriate privileges. This clock is
- /// affected by discontinuous jumps in the system time (e.g., if the system
- /// administrator manually changes the clock), and by frequency adjustments
- /// performed by NTP and similar applications via adjtime(3), adjtimex(2),
- /// clock_adjtime(2), and ntp_adjtime(3). This clock normally counts the
- /// number of seconds since 1970-01-01 00:00:00 Coordinated Universal Time
- /// (UTC) except that it ignores leap seconds; near a leap second it may be
- /// adjusted by leap second smearing to stay roughly in sync with UTC. Leap
- /// second smearing applies frequency adjustments to the clock to speed up
- /// or slow down the clock to account for the leap second without
- /// discontinuities in the clock. If leap second smearing is not applied,
- /// the clock will experience discontinuity around leap second adjustment.
- RealTime = bindings::CLOCK_REALTIME,
- /// A monotonically increasing clock.
- ///
- /// A nonsettable system-wide clock that represents monotonic time since—as
- /// described by POSIX—"some unspecified point in the past". On Linux, that
- /// point corresponds to the number of seconds that the system has been
- /// running since it was booted.
- ///
- /// The CLOCK_MONOTONIC clock is not affected by discontinuous jumps in the
- /// CLOCK_REAL (e.g., if the system administrator manually changes the
- /// clock), but is affected by frequency adjustments. This clock does not
- /// count time that the system is suspended.
- Monotonic = bindings::CLOCK_MONOTONIC,
- /// A monotonic that ticks while system is suspended.
- ///
- /// A nonsettable system-wide clock that is identical to CLOCK_MONOTONIC,
- /// except that it also includes any time that the system is suspended. This
- /// allows applications to get a suspend-aware monotonic clock without
- /// having to deal with the complications of CLOCK_REALTIME, which may have
- /// discontinuities if the time is changed using settimeofday(2) or similar.
- BootTime = bindings::CLOCK_BOOTTIME,
- /// International Atomic Time.
- ///
- /// A system-wide clock derived from wall-clock time but counting leap seconds.
- ///
- /// This clock is coupled to CLOCK_REALTIME and will be set when CLOCK_REALTIME is
- /// set, or when the offset to CLOCK_REALTIME is changed via adjtimex(2). This
- /// usually happens during boot and **should** not happen during normal operations.
- /// However, if NTP or another application adjusts CLOCK_REALTIME by leap second
- /// smearing, this clock will not be precise during leap second smearing.
- ///
- /// The acronym TAI refers to International Atomic Time.
- TAI = bindings::CLOCK_TAI,
+impl<T: ClockSource> ops::Add<Delta> for Instant<T> {
+ type Output = Self;
+
+ #[inline]
+ fn add(self, rhs: Delta) -> Self::Output {
+ // INVARIANT: With arithmetic over/underflow checks enabled, this will panic if we overflow
+ // (e.g. go above `KTIME_MAX`)
+ let res = self.inner + rhs.nanos;
+
+ // INVARIANT: With overflow checks enabled, we verify here that the value is >= 0
+ #[cfg(CONFIG_RUST_OVERFLOW_CHECKS)]
+ assert!(res >= 0);
+
+ Self {
+ inner: res,
+ _c: PhantomData,
+ }
+ }
}
-impl ClockId {
- fn into_c(self) -> bindings::clockid_t {
- self as bindings::clockid_t
+impl<T: ClockSource> ops::Sub<Delta> for Instant<T> {
+ type Output = Self;
+
+ #[inline]
+ fn sub(self, rhs: Delta) -> Self::Output {
+ // INVARIANT: With arithmetic over/underflow checks enabled, this will panic if we overflow
+ // (e.g. go above `KTIME_MAX`)
+ let res = self.inner - rhs.nanos;
+
+ // INVARIANT: With overflow checks enabled, we verify here that the value is >= 0
+ #[cfg(CONFIG_RUST_OVERFLOW_CHECKS)]
+ assert!(res >= 0);
+
+ Self {
+ inner: res,
+ _c: PhantomData,
+ }
}
}
@@ -167,6 +287,78 @@ pub struct Delta {
nanos: i64,
}
+impl ops::Add for Delta {
+ type Output = Self;
+
+ #[inline]
+ fn add(self, rhs: Self) -> Self {
+ Self {
+ nanos: self.nanos + rhs.nanos,
+ }
+ }
+}
+
+impl ops::AddAssign for Delta {
+ #[inline]
+ fn add_assign(&mut self, rhs: Self) {
+ self.nanos += rhs.nanos;
+ }
+}
+
+impl ops::Sub for Delta {
+ type Output = Self;
+
+ #[inline]
+ fn sub(self, rhs: Self) -> Self::Output {
+ Self {
+ nanos: self.nanos - rhs.nanos,
+ }
+ }
+}
+
+impl ops::SubAssign for Delta {
+ #[inline]
+ fn sub_assign(&mut self, rhs: Self) {
+ self.nanos -= rhs.nanos;
+ }
+}
+
+impl ops::Mul<i64> for Delta {
+ type Output = Self;
+
+ #[inline]
+ fn mul(self, rhs: i64) -> Self::Output {
+ Self {
+ nanos: self.nanos * rhs,
+ }
+ }
+}
+
+impl ops::MulAssign<i64> for Delta {
+ #[inline]
+ fn mul_assign(&mut self, rhs: i64) {
+ self.nanos *= rhs;
+ }
+}
+
+impl ops::Div for Delta {
+ type Output = i64;
+
+ #[inline]
+ fn div(self, rhs: Self) -> Self::Output {
+ #[cfg(CONFIG_64BIT)]
+ {
+ self.nanos / rhs.nanos
+ }
+
+ #[cfg(not(CONFIG_64BIT))]
+ {
+ // SAFETY: This function is always safe to call regardless of the input values
+ unsafe { bindings::div64_s64(self.nanos, rhs.nanos) }
+ }
+ }
+}
+
impl Delta {
/// A span of time equal to zero.
pub const ZERO: Self = Self { nanos: 0 };
@@ -228,13 +420,57 @@ impl Delta {
/// Return the smallest number of microseconds greater than or equal
/// to the value in the [`Delta`].
#[inline]
- pub const fn as_micros_ceil(self) -> i64 {
- self.as_nanos().saturating_add(NSEC_PER_USEC - 1) / NSEC_PER_USEC
+ pub fn as_micros_ceil(self) -> i64 {
+ #[cfg(CONFIG_64BIT)]
+ {
+ self.as_nanos().saturating_add(NSEC_PER_USEC - 1) / NSEC_PER_USEC
+ }
+
+ #[cfg(not(CONFIG_64BIT))]
+ // SAFETY: It is always safe to call `ktime_to_us()` with any value.
+ unsafe {
+ bindings::ktime_to_us(self.as_nanos().saturating_add(NSEC_PER_USEC - 1))
+ }
}
/// Return the number of milliseconds in the [`Delta`].
#[inline]
- pub const fn as_millis(self) -> i64 {
- self.as_nanos() / NSEC_PER_MSEC
+ pub fn as_millis(self) -> i64 {
+ #[cfg(CONFIG_64BIT)]
+ {
+ self.as_nanos() / NSEC_PER_MSEC
+ }
+
+ #[cfg(not(CONFIG_64BIT))]
+ // SAFETY: It is always safe to call `ktime_to_ms()` with any value.
+ unsafe {
+ bindings::ktime_to_ms(self.as_nanos())
+ }
+ }
+
+ /// Return `self % dividend` where `dividend` is in nanoseconds.
+ ///
+ /// The kernel doesn't have any emulation for `s64 % s64` on 32 bit platforms, so this is
+ /// limited to 32 bit dividends.
+ #[inline]
+ pub fn rem_nanos(self, dividend: i32) -> Self {
+ #[cfg(CONFIG_64BIT)]
+ {
+ Self {
+ nanos: self.as_nanos() % i64::from(dividend),
+ }
+ }
+
+ #[cfg(not(CONFIG_64BIT))]
+ {
+ let mut rem = 0;
+
+ // SAFETY: `rem` is in the stack, so we can always provide a valid pointer to it.
+ unsafe { bindings::div_s64_rem(self.as_nanos(), dividend, &mut rem) };
+
+ Self {
+ nanos: i64::from(rem),
+ }
+ }
}
}
diff --git a/rust/kernel/time/delay.rs b/rust/kernel/time/delay.rs
new file mode 100644
index 000000000000..eb8838da62bc
--- /dev/null
+++ b/rust/kernel/time/delay.rs
@@ -0,0 +1,49 @@
+// SPDX-License-Identifier: GPL-2.0
+
+//! Delay and sleep primitives.
+//!
+//! This module contains the kernel APIs related to delay and sleep that
+//! have been ported or wrapped for usage by Rust code in the kernel.
+//!
+//! C header: [`include/linux/delay.h`](srctree/include/linux/delay.h).
+
+use super::Delta;
+use crate::prelude::*;
+
+/// Sleeps for a given duration at least.
+///
+/// Equivalent to the C side [`fsleep()`], flexible sleep function,
+/// which automatically chooses the best sleep method based on a duration.
+///
+/// `delta` must be within `[0, i32::MAX]` microseconds;
+/// otherwise, it is erroneous behavior. That is, it is considered a bug
+/// to call this function with an out-of-range value, in which case the function
+/// will sleep for at least the maximum value in the range and may warn
+/// in the future.
+///
+/// The behavior above differs from the C side [`fsleep()`] for which out-of-range
+/// values mean "infinite timeout" instead.
+///
+/// This function can only be used in a nonatomic context.
+///
+/// [`fsleep()`]: https://docs.kernel.org/timers/delay_sleep_functions.html#c.fsleep
+pub fn fsleep(delta: Delta) {
+ // The maximum value is set to `i32::MAX` microseconds to prevent integer
+ // overflow inside fsleep, which could lead to unintentional infinite sleep.
+ const MAX_DELTA: Delta = Delta::from_micros(i32::MAX as i64);
+
+ let delta = if (Delta::ZERO..=MAX_DELTA).contains(&delta) {
+ delta
+ } else {
+ // TODO: Add WARN_ONCE() when it's supported.
+ MAX_DELTA
+ };
+
+ // SAFETY: It is always safe to call `fsleep()` with any duration.
+ unsafe {
+ // Convert the duration to microseconds and round up to preserve
+ // the guarantee; `fsleep()` sleeps for at least the provided duration,
+ // but that it may sleep for longer under some circumstances.
+ bindings::fsleep(delta.as_micros_ceil() as c_ulong)
+ }
+}
diff --git a/rust/kernel/time/hrtimer.rs b/rust/kernel/time/hrtimer.rs
index 36e1290cd079..856d2d929a00 100644
--- a/rust/kernel/time/hrtimer.rs
+++ b/rust/kernel/time/hrtimer.rs
@@ -67,26 +67,15 @@
//! A `restart` operation on a timer in the **stopped** state is equivalent to a
//! `start` operation.
-use super::ClockId;
+use super::{ClockSource, Delta, Instant};
use crate::{prelude::*, types::Opaque};
-use core::marker::PhantomData;
+use core::{marker::PhantomData, ptr::NonNull};
use pin_init::PinInit;
-/// A Rust wrapper around a `ktime_t`.
-// NOTE: Ktime is going to be removed when hrtimer is converted to Instant/Delta.
-#[repr(transparent)]
-#[derive(Copy, Clone, PartialEq, PartialOrd, Eq, Ord)]
-pub struct Ktime {
- inner: bindings::ktime_t,
-}
-
-impl Ktime {
- /// Returns the number of nanoseconds.
- #[inline]
- pub fn to_ns(self) -> i64 {
- self.inner
- }
-}
+/// A type-alias to refer to the [`Instant<C>`] for a given `T` from [`HrTimer<T>`].
+///
+/// Where `C` is the [`ClockSource`] of the [`HrTimer`].
+pub type HrTimerInstant<T> = Instant<<<T as HasHrTimer<T>>::TimerMode as HrTimerMode>::Clock>;
/// A timer backed by a C `struct hrtimer`.
///
@@ -98,7 +87,6 @@ impl Ktime {
pub struct HrTimer<T> {
#[pin]
timer: Opaque<bindings::hrtimer>,
- mode: HrTimerMode,
_t: PhantomData<T>,
}
@@ -112,9 +100,10 @@ unsafe impl<T> Sync for HrTimer<T> {}
impl<T> HrTimer<T> {
/// Return an initializer for a new timer instance.
- pub fn new(mode: HrTimerMode, clock: ClockId) -> impl PinInit<Self>
+ pub fn new() -> impl PinInit<Self>
where
T: HrTimerCallback,
+ T: HasHrTimer<T>,
{
pin_init!(Self {
// INVARIANT: We initialize `timer` with `hrtimer_setup` below.
@@ -126,12 +115,11 @@ impl<T> HrTimer<T> {
bindings::hrtimer_setup(
place,
Some(T::Pointer::run),
- clock.into_c(),
- mode.into_c(),
+ <<T as HasHrTimer<T>>::TimerMode as HrTimerMode>::Clock::ID,
+ <T as HasHrTimer<T>>::TimerMode::C_MODE,
);
}
}),
- mode: mode,
_t: PhantomData,
})
}
@@ -148,7 +136,7 @@ impl<T> HrTimer<T> {
// SAFETY: The field projection to `timer` does not go out of bounds,
// because the caller of this function promises that `this` points to an
// allocation of at least the size of `Self`.
- unsafe { Opaque::raw_get(core::ptr::addr_of!((*this).timer)) }
+ unsafe { Opaque::cast_into(core::ptr::addr_of!((*this).timer)) }
}
/// Cancel an initialized and potentially running timer.
@@ -180,6 +168,84 @@ impl<T> HrTimer<T> {
// handled on the C side.
unsafe { bindings::hrtimer_cancel(c_timer_ptr) != 0 }
}
+
+ /// Forward the timer expiry for a given timer pointer.
+ ///
+ /// # Safety
+ ///
+ /// - `self_ptr` must point to a valid `Self`.
+ /// - The caller must either have exclusive access to the data pointed at by `self_ptr`, or be
+ /// within the context of the timer callback.
+ #[inline]
+ unsafe fn raw_forward(self_ptr: *mut Self, now: HrTimerInstant<T>, interval: Delta) -> u64
+ where
+ T: HasHrTimer<T>,
+ {
+ // SAFETY:
+ // * The C API requirements for this function are fulfilled by our safety contract.
+ // * `self_ptr` is guaranteed to point to a valid `Self` via our safety contract
+ unsafe {
+ bindings::hrtimer_forward(Self::raw_get(self_ptr), now.as_nanos(), interval.as_nanos())
+ }
+ }
+
+ /// Conditionally forward the timer.
+ ///
+ /// If the timer expires after `now`, this function does nothing and returns 0. If the timer
+ /// expired at or before `now`, this function forwards the timer by `interval` until the timer
+ /// expires after `now` and then returns the number of times the timer was forwarded by
+ /// `interval`.
+ ///
+ /// This function is mainly useful for timer types which can provide exclusive access to the
+ /// timer when the timer is not running. For forwarding the timer from within the timer callback
+ /// context, see [`HrTimerCallbackContext::forward()`].
+ ///
+ /// Returns the number of overruns that occurred as a result of the timer expiry change.
+ pub fn forward(self: Pin<&mut Self>, now: HrTimerInstant<T>, interval: Delta) -> u64
+ where
+ T: HasHrTimer<T>,
+ {
+ // SAFETY: `raw_forward` does not move `Self`
+ let this = unsafe { self.get_unchecked_mut() };
+
+ // SAFETY: By existence of `Pin<&mut Self>`, the pointer passed to `raw_forward` points to a
+ // valid `Self` that we have exclusive access to.
+ unsafe { Self::raw_forward(this, now, interval) }
+ }
+
+ /// Conditionally forward the timer.
+ ///
+ /// This is a variant of [`forward()`](Self::forward) that uses an interval after the current
+ /// time of the base clock for the [`HrTimer`].
+ pub fn forward_now(self: Pin<&mut Self>, interval: Delta) -> u64
+ where
+ T: HasHrTimer<T>,
+ {
+ self.forward(HrTimerInstant::<T>::now(), interval)
+ }
+
+ /// Return the time expiry for this [`HrTimer`].
+ ///
+ /// This value should only be used as a snapshot, as the actual expiry time could change after
+ /// this function is called.
+ pub fn expires(&self) -> HrTimerInstant<T>
+ where
+ T: HasHrTimer<T>,
+ {
+ // SAFETY: `self` is an immutable reference and thus always points to a valid `HrTimer`.
+ let c_timer_ptr = unsafe { HrTimer::raw_get(self) };
+
+ // SAFETY:
+ // - Timers cannot have negative ktime_t values as their expiration time.
+ // - There's no actual locking here, a racy read is fine and expected
+ unsafe {
+ Instant::from_ktime(
+ // This `read_volatile` is intended to correspond to a READ_ONCE call.
+ // FIXME(read_once): Replace with `read_once` when available on the Rust side.
+ core::ptr::read_volatile(&raw const ((*c_timer_ptr).node.expires)),
+ )
+ }
+ }
}
/// Implemented by pointer types that point to structs that contain a [`HrTimer`].
@@ -193,6 +259,11 @@ impl<T> HrTimer<T> {
/// exist. A timer can be manipulated through any of the handles, and a handle
/// may represent a cancelled timer.
pub trait HrTimerPointer: Sync + Sized {
+ /// The operational mode associated with this timer.
+ ///
+ /// This defines how the expiration value is interpreted.
+ type TimerMode: HrTimerMode;
+
/// A handle representing a started or restarted timer.
///
/// If the timer is running or if the timer callback is executing when the
@@ -205,7 +276,7 @@ pub trait HrTimerPointer: Sync + Sized {
/// Start the timer with expiry after `expires` time units. If the timer was
/// already running, it is restarted with the new expiry time.
- fn start(self, expires: Ktime) -> Self::TimerHandle;
+ fn start(self, expires: <Self::TimerMode as HrTimerMode>::Expires) -> Self::TimerHandle;
}
/// Unsafe version of [`HrTimerPointer`] for situations where leaking the
@@ -220,6 +291,11 @@ pub trait HrTimerPointer: Sync + Sized {
/// [`UnsafeHrTimerPointer`] outlives any associated [`HrTimerPointer::TimerHandle`]
/// instances.
pub unsafe trait UnsafeHrTimerPointer: Sync + Sized {
+ /// The operational mode associated with this timer.
+ ///
+ /// This defines how the expiration value is interpreted.
+ type TimerMode: HrTimerMode;
+
/// A handle representing a running timer.
///
/// # Safety
@@ -236,7 +312,7 @@ pub unsafe trait UnsafeHrTimerPointer: Sync + Sized {
///
/// Caller promises keep the timer structure alive until the timer is dead.
/// Caller can ensure this by not leaking the returned [`Self::TimerHandle`].
- unsafe fn start(self, expires: Ktime) -> Self::TimerHandle;
+ unsafe fn start(self, expires: <Self::TimerMode as HrTimerMode>::Expires) -> Self::TimerHandle;
}
/// A trait for stack allocated timers.
@@ -246,9 +322,14 @@ pub unsafe trait UnsafeHrTimerPointer: Sync + Sized {
/// Implementers must ensure that `start_scoped` does not return until the
/// timer is dead and the timer handler is not running.
pub unsafe trait ScopedHrTimerPointer {
+ /// The operational mode associated with this timer.
+ ///
+ /// This defines how the expiration value is interpreted.
+ type TimerMode: HrTimerMode;
+
/// Start the timer to run after `expires` time units and immediately
/// after call `f`. When `f` returns, the timer is cancelled.
- fn start_scoped<T, F>(self, expires: Ktime, f: F) -> T
+ fn start_scoped<T, F>(self, expires: <Self::TimerMode as HrTimerMode>::Expires, f: F) -> T
where
F: FnOnce() -> T;
}
@@ -260,7 +341,13 @@ unsafe impl<T> ScopedHrTimerPointer for T
where
T: UnsafeHrTimerPointer,
{
- fn start_scoped<U, F>(self, expires: Ktime, f: F) -> U
+ type TimerMode = T::TimerMode;
+
+ fn start_scoped<U, F>(
+ self,
+ expires: <<T as UnsafeHrTimerPointer>::TimerMode as HrTimerMode>::Expires,
+ f: F,
+ ) -> U
where
F: FnOnce() -> U,
{
@@ -296,9 +383,13 @@ pub trait HrTimerCallback {
type Pointer<'a>: RawHrTimerCallback;
/// Called by the timer logic when the timer fires.
- fn run(this: <Self::Pointer<'_> as RawHrTimerCallback>::CallbackTarget<'_>) -> HrTimerRestart
+ fn run(
+ this: <Self::Pointer<'_> as RawHrTimerCallback>::CallbackTarget<'_>,
+ ctx: HrTimerCallbackContext<'_, Self>,
+ ) -> HrTimerRestart
where
- Self: Sized;
+ Self: Sized,
+ Self: HasHrTimer<Self>;
}
/// A handle representing a potentially running timer.
@@ -320,6 +411,8 @@ pub unsafe trait HrTimerHandle {
/// Note that the timer might be started by a concurrent start operation. If
/// so, the timer might not be in the **stopped** state when this function
/// returns.
+ ///
+ /// Returns `true` if the timer was running.
fn cancel(&mut self) -> bool;
}
@@ -335,6 +428,11 @@ pub unsafe trait HrTimerHandle {
/// their documentation. All the methods of this trait must operate on the same
/// field.
pub unsafe trait HasHrTimer<T> {
+ /// The operational mode associated with this timer.
+ ///
+ /// This defines how the expiration value is interpreted.
+ type TimerMode: HrTimerMode;
+
/// Return a pointer to the [`HrTimer`] within `Self`.
///
/// This function is useful to get access to the value without creating
@@ -382,14 +480,14 @@ pub unsafe trait HasHrTimer<T> {
/// - `this` must point to a valid `Self`.
/// - Caller must ensure that the pointee of `this` lives until the timer
/// fires or is canceled.
- unsafe fn start(this: *const Self, expires: Ktime) {
+ unsafe fn start(this: *const Self, expires: <Self::TimerMode as HrTimerMode>::Expires) {
// SAFETY: By function safety requirement, `this` is a valid `Self`.
unsafe {
bindings::hrtimer_start_range_ns(
Self::c_timer_ptr(this).cast_mut(),
- expires.to_ns(),
+ expires.as_nanos(),
0,
- (*Self::raw_get_timer(this)).mode.into_c(),
+ <Self::TimerMode as HrTimerMode>::C_MODE,
);
}
}
@@ -411,77 +509,225 @@ impl HrTimerRestart {
}
}
+/// Time representations that can be used as expiration values in [`HrTimer`].
+pub trait HrTimerExpires {
+ /// Converts the expiration time into a nanosecond representation.
+ ///
+ /// This value corresponds to a raw ktime_t value, suitable for passing to kernel
+ /// timer functions. The interpretation (absolute vs relative) depends on the
+ /// associated [HrTimerMode] in use.
+ fn as_nanos(&self) -> i64;
+}
+
+impl<C: ClockSource> HrTimerExpires for Instant<C> {
+ #[inline]
+ fn as_nanos(&self) -> i64 {
+ Instant::<C>::as_nanos(self)
+ }
+}
+
+impl HrTimerExpires for Delta {
+ #[inline]
+ fn as_nanos(&self) -> i64 {
+ Delta::as_nanos(*self)
+ }
+}
+
+mod private {
+ use crate::time::ClockSource;
+
+ pub trait Sealed {}
+
+ impl<C: ClockSource> Sealed for super::AbsoluteMode<C> {}
+ impl<C: ClockSource> Sealed for super::RelativeMode<C> {}
+ impl<C: ClockSource> Sealed for super::AbsolutePinnedMode<C> {}
+ impl<C: ClockSource> Sealed for super::RelativePinnedMode<C> {}
+ impl<C: ClockSource> Sealed for super::AbsoluteSoftMode<C> {}
+ impl<C: ClockSource> Sealed for super::RelativeSoftMode<C> {}
+ impl<C: ClockSource> Sealed for super::AbsolutePinnedSoftMode<C> {}
+ impl<C: ClockSource> Sealed for super::RelativePinnedSoftMode<C> {}
+ impl<C: ClockSource> Sealed for super::AbsoluteHardMode<C> {}
+ impl<C: ClockSource> Sealed for super::RelativeHardMode<C> {}
+ impl<C: ClockSource> Sealed for super::AbsolutePinnedHardMode<C> {}
+ impl<C: ClockSource> Sealed for super::RelativePinnedHardMode<C> {}
+}
+
/// Operational mode of [`HrTimer`].
-// NOTE: Some of these have the same encoding on the C side, so we keep
-// `repr(Rust)` and convert elsewhere.
-#[derive(Clone, Copy, PartialEq, Eq, Debug)]
-pub enum HrTimerMode {
- /// Timer expires at the given expiration time.
- Absolute,
- /// Timer expires after the given expiration time interpreted as a duration from now.
- Relative,
- /// Timer does not move between CPU cores.
- Pinned,
- /// Timer handler is executed in soft irq context.
- Soft,
- /// Timer handler is executed in hard irq context.
- Hard,
- /// Timer expires at the given expiration time.
- /// Timer does not move between CPU cores.
- AbsolutePinned,
- /// Timer expires after the given expiration time interpreted as a duration from now.
- /// Timer does not move between CPU cores.
- RelativePinned,
- /// Timer expires at the given expiration time.
- /// Timer handler is executed in soft irq context.
- AbsoluteSoft,
- /// Timer expires after the given expiration time interpreted as a duration from now.
- /// Timer handler is executed in soft irq context.
- RelativeSoft,
- /// Timer expires at the given expiration time.
- /// Timer does not move between CPU cores.
- /// Timer handler is executed in soft irq context.
- AbsolutePinnedSoft,
- /// Timer expires after the given expiration time interpreted as a duration from now.
- /// Timer does not move between CPU cores.
- /// Timer handler is executed in soft irq context.
- RelativePinnedSoft,
- /// Timer expires at the given expiration time.
- /// Timer handler is executed in hard irq context.
- AbsoluteHard,
- /// Timer expires after the given expiration time interpreted as a duration from now.
- /// Timer handler is executed in hard irq context.
- RelativeHard,
- /// Timer expires at the given expiration time.
- /// Timer does not move between CPU cores.
- /// Timer handler is executed in hard irq context.
- AbsolutePinnedHard,
- /// Timer expires after the given expiration time interpreted as a duration from now.
- /// Timer does not move between CPU cores.
- /// Timer handler is executed in hard irq context.
- RelativePinnedHard,
+pub trait HrTimerMode: private::Sealed {
+ /// The C representation of hrtimer mode.
+ const C_MODE: bindings::hrtimer_mode;
+
+ /// Type representing the clock source.
+ type Clock: ClockSource;
+
+ /// Type representing the expiration specification (absolute or relative time).
+ type Expires: HrTimerExpires;
}
-impl HrTimerMode {
- fn into_c(self) -> bindings::hrtimer_mode {
- use bindings::*;
- match self {
- HrTimerMode::Absolute => hrtimer_mode_HRTIMER_MODE_ABS,
- HrTimerMode::Relative => hrtimer_mode_HRTIMER_MODE_REL,
- HrTimerMode::Pinned => hrtimer_mode_HRTIMER_MODE_PINNED,
- HrTimerMode::Soft => hrtimer_mode_HRTIMER_MODE_SOFT,
- HrTimerMode::Hard => hrtimer_mode_HRTIMER_MODE_HARD,
- HrTimerMode::AbsolutePinned => hrtimer_mode_HRTIMER_MODE_ABS_PINNED,
- HrTimerMode::RelativePinned => hrtimer_mode_HRTIMER_MODE_REL_PINNED,
- HrTimerMode::AbsoluteSoft => hrtimer_mode_HRTIMER_MODE_ABS_SOFT,
- HrTimerMode::RelativeSoft => hrtimer_mode_HRTIMER_MODE_REL_SOFT,
- HrTimerMode::AbsolutePinnedSoft => hrtimer_mode_HRTIMER_MODE_ABS_PINNED_SOFT,
- HrTimerMode::RelativePinnedSoft => hrtimer_mode_HRTIMER_MODE_REL_PINNED_SOFT,
- HrTimerMode::AbsoluteHard => hrtimer_mode_HRTIMER_MODE_ABS_HARD,
- HrTimerMode::RelativeHard => hrtimer_mode_HRTIMER_MODE_REL_HARD,
- HrTimerMode::AbsolutePinnedHard => hrtimer_mode_HRTIMER_MODE_ABS_PINNED_HARD,
- HrTimerMode::RelativePinnedHard => hrtimer_mode_HRTIMER_MODE_REL_PINNED_HARD,
- }
+/// Timer that expires at a fixed point in time.
+pub struct AbsoluteMode<C: ClockSource>(PhantomData<C>);
+
+impl<C: ClockSource> HrTimerMode for AbsoluteMode<C> {
+ const C_MODE: bindings::hrtimer_mode = bindings::hrtimer_mode_HRTIMER_MODE_ABS;
+
+ type Clock = C;
+ type Expires = Instant<C>;
+}
+
+/// Timer that expires after a delay from now.
+pub struct RelativeMode<C: ClockSource>(PhantomData<C>);
+
+impl<C: ClockSource> HrTimerMode for RelativeMode<C> {
+ const C_MODE: bindings::hrtimer_mode = bindings::hrtimer_mode_HRTIMER_MODE_REL;
+
+ type Clock = C;
+ type Expires = Delta;
+}
+
+/// Timer with absolute expiration time, pinned to its current CPU.
+pub struct AbsolutePinnedMode<C: ClockSource>(PhantomData<C>);
+impl<C: ClockSource> HrTimerMode for AbsolutePinnedMode<C> {
+ const C_MODE: bindings::hrtimer_mode = bindings::hrtimer_mode_HRTIMER_MODE_ABS_PINNED;
+
+ type Clock = C;
+ type Expires = Instant<C>;
+}
+
+/// Timer with relative expiration time, pinned to its current CPU.
+pub struct RelativePinnedMode<C: ClockSource>(PhantomData<C>);
+impl<C: ClockSource> HrTimerMode for RelativePinnedMode<C> {
+ const C_MODE: bindings::hrtimer_mode = bindings::hrtimer_mode_HRTIMER_MODE_REL_PINNED;
+
+ type Clock = C;
+ type Expires = Delta;
+}
+
+/// Timer with absolute expiration, handled in soft irq context.
+pub struct AbsoluteSoftMode<C: ClockSource>(PhantomData<C>);
+impl<C: ClockSource> HrTimerMode for AbsoluteSoftMode<C> {
+ const C_MODE: bindings::hrtimer_mode = bindings::hrtimer_mode_HRTIMER_MODE_ABS_SOFT;
+
+ type Clock = C;
+ type Expires = Instant<C>;
+}
+
+/// Timer with relative expiration, handled in soft irq context.
+pub struct RelativeSoftMode<C: ClockSource>(PhantomData<C>);
+impl<C: ClockSource> HrTimerMode for RelativeSoftMode<C> {
+ const C_MODE: bindings::hrtimer_mode = bindings::hrtimer_mode_HRTIMER_MODE_REL_SOFT;
+
+ type Clock = C;
+ type Expires = Delta;
+}
+
+/// Timer with absolute expiration, pinned to CPU and handled in soft irq context.
+pub struct AbsolutePinnedSoftMode<C: ClockSource>(PhantomData<C>);
+impl<C: ClockSource> HrTimerMode for AbsolutePinnedSoftMode<C> {
+ const C_MODE: bindings::hrtimer_mode = bindings::hrtimer_mode_HRTIMER_MODE_ABS_PINNED_SOFT;
+
+ type Clock = C;
+ type Expires = Instant<C>;
+}
+
+/// Timer with absolute expiration, pinned to CPU and handled in soft irq context.
+pub struct RelativePinnedSoftMode<C: ClockSource>(PhantomData<C>);
+impl<C: ClockSource> HrTimerMode for RelativePinnedSoftMode<C> {
+ const C_MODE: bindings::hrtimer_mode = bindings::hrtimer_mode_HRTIMER_MODE_REL_PINNED_SOFT;
+
+ type Clock = C;
+ type Expires = Delta;
+}
+
+/// Timer with absolute expiration, handled in hard irq context.
+pub struct AbsoluteHardMode<C: ClockSource>(PhantomData<C>);
+impl<C: ClockSource> HrTimerMode for AbsoluteHardMode<C> {
+ const C_MODE: bindings::hrtimer_mode = bindings::hrtimer_mode_HRTIMER_MODE_ABS_HARD;
+
+ type Clock = C;
+ type Expires = Instant<C>;
+}
+
+/// Timer with relative expiration, handled in hard irq context.
+pub struct RelativeHardMode<C: ClockSource>(PhantomData<C>);
+impl<C: ClockSource> HrTimerMode for RelativeHardMode<C> {
+ const C_MODE: bindings::hrtimer_mode = bindings::hrtimer_mode_HRTIMER_MODE_REL_HARD;
+
+ type Clock = C;
+ type Expires = Delta;
+}
+
+/// Timer with absolute expiration, pinned to CPU and handled in hard irq context.
+pub struct AbsolutePinnedHardMode<C: ClockSource>(PhantomData<C>);
+impl<C: ClockSource> HrTimerMode for AbsolutePinnedHardMode<C> {
+ const C_MODE: bindings::hrtimer_mode = bindings::hrtimer_mode_HRTIMER_MODE_ABS_PINNED_HARD;
+
+ type Clock = C;
+ type Expires = Instant<C>;
+}
+
+/// Timer with relative expiration, pinned to CPU and handled in hard irq context.
+pub struct RelativePinnedHardMode<C: ClockSource>(PhantomData<C>);
+impl<C: ClockSource> HrTimerMode for RelativePinnedHardMode<C> {
+ const C_MODE: bindings::hrtimer_mode = bindings::hrtimer_mode_HRTIMER_MODE_REL_PINNED_HARD;
+
+ type Clock = C;
+ type Expires = Delta;
+}
+
+/// Privileged smart-pointer for a [`HrTimer`] callback context.
+///
+/// Many [`HrTimer`] methods can only be called in two situations:
+///
+/// * When the caller has exclusive access to the `HrTimer` and the `HrTimer` is guaranteed not to
+/// be running.
+/// * From within the context of an `HrTimer`'s callback method.
+///
+/// This type provides access to said methods from within a timer callback context.
+///
+/// # Invariants
+///
+/// * The existence of this type means the caller is currently within the callback for an
+/// [`HrTimer`].
+/// * `self.0` always points to a live instance of [`HrTimer<T>`].
+pub struct HrTimerCallbackContext<'a, T: HasHrTimer<T>>(NonNull<HrTimer<T>>, PhantomData<&'a ()>);
+
+impl<'a, T: HasHrTimer<T>> HrTimerCallbackContext<'a, T> {
+ /// Create a new [`HrTimerCallbackContext`].
+ ///
+ /// # Safety
+ ///
+ /// This function relies on the caller being within the context of a timer callback, so it must
+ /// not be used anywhere except for within implementations of [`RawHrTimerCallback::run`]. The
+ /// caller promises that `timer` points to a valid initialized instance of
+ /// [`bindings::hrtimer`].
+ ///
+ /// The returned `Self` must not outlive the function context of [`RawHrTimerCallback::run`]
+ /// where this function is called.
+ pub(crate) unsafe fn from_raw(timer: *mut HrTimer<T>) -> Self {
+ // SAFETY: The caller guarantees `timer` is a valid pointer to an initialized
+ // `bindings::hrtimer`
+ // INVARIANT: Our safety contract ensures that we're within the context of a timer callback
+ // and that `timer` points to a live instance of `HrTimer<T>`.
+ Self(unsafe { NonNull::new_unchecked(timer) }, PhantomData)
+ }
+
+ /// Conditionally forward the timer.
+ ///
+ /// This function is identical to [`HrTimer::forward()`] except that it may only be used from
+ /// within the context of a [`HrTimer`] callback.
+ pub fn forward(&mut self, now: HrTimerInstant<T>, interval: Delta) -> u64 {
+ // SAFETY:
+ // - We are guaranteed to be within the context of a timer callback by our type invariants
+ // - By our type invariants, `self.0` always points to a valid `HrTimer<T>`
+ unsafe { HrTimer::<T>::raw_forward(self.0.as_ptr(), now, interval) }
+ }
+
+ /// Conditionally forward the timer.
+ ///
+ /// This is a variant of [`HrTimerCallbackContext::forward()`] that uses an interval after the
+ /// current time of the base clock for the [`HrTimer`].
+ pub fn forward_now(&mut self, duration: Delta) -> u64 {
+ self.forward(HrTimerInstant::<T>::now(), duration)
}
}
@@ -496,12 +742,16 @@ macro_rules! impl_has_hr_timer {
impl$({$($generics:tt)*})?
HasHrTimer<$timer_type:ty>
for $self:ty
- { self.$field:ident }
+ {
+ mode : $mode:ty,
+ field : self.$field:ident $(,)?
+ }
$($rest:tt)*
) => {
// SAFETY: This implementation of `raw_get_timer` only compiles if the
// field has the right type.
unsafe impl$(<$($generics)*>)? $crate::time::hrtimer::HasHrTimer<$timer_type> for $self {
+ type TimerMode = $mode;
#[inline]
unsafe fn raw_get_timer(
diff --git a/rust/kernel/time/hrtimer/arc.rs b/rust/kernel/time/hrtimer/arc.rs
index ccf1e66e5b2d..7be82bcb352a 100644
--- a/rust/kernel/time/hrtimer/arc.rs
+++ b/rust/kernel/time/hrtimer/arc.rs
@@ -3,9 +3,10 @@
use super::HasHrTimer;
use super::HrTimer;
use super::HrTimerCallback;
+use super::HrTimerCallbackContext;
use super::HrTimerHandle;
+use super::HrTimerMode;
use super::HrTimerPointer;
-use super::Ktime;
use super::RawHrTimerCallback;
use crate::sync::Arc;
use crate::sync::ArcBorrow;
@@ -54,9 +55,13 @@ where
T: HasHrTimer<T>,
T: for<'a> HrTimerCallback<Pointer<'a> = Self>,
{
+ type TimerMode = <T as HasHrTimer<T>>::TimerMode;
type TimerHandle = ArcHrTimerHandle<T>;
- fn start(self, expires: Ktime) -> ArcHrTimerHandle<T> {
+ fn start(
+ self,
+ expires: <<T as HasHrTimer<T>>::TimerMode as HrTimerMode>::Expires,
+ ) -> ArcHrTimerHandle<T> {
// SAFETY:
// - We keep `self` alive by wrapping it in a handle below.
// - Since we generate the pointer passed to `start` from a valid
@@ -95,6 +100,12 @@ where
// allocation from other `Arc` clones.
let receiver = unsafe { ArcBorrow::from_raw(data_ptr) };
- T::run(receiver).into_c()
+ // SAFETY:
+ // - By C API contract `timer_ptr` is the pointer that we passed when queuing the timer, so
+ // it is a valid pointer to a `HrTimer<T>` embedded in a `T`.
+ // - We are within `RawHrTimerCallback::run`
+ let context = unsafe { HrTimerCallbackContext::from_raw(timer_ptr) };
+
+ T::run(receiver, context).into_c()
}
}
diff --git a/rust/kernel/time/hrtimer/pin.rs b/rust/kernel/time/hrtimer/pin.rs
index 293ca9cf058c..4d39ef781697 100644
--- a/rust/kernel/time/hrtimer/pin.rs
+++ b/rust/kernel/time/hrtimer/pin.rs
@@ -3,8 +3,9 @@
use super::HasHrTimer;
use super::HrTimer;
use super::HrTimerCallback;
+use super::HrTimerCallbackContext;
use super::HrTimerHandle;
-use super::Ktime;
+use super::HrTimerMode;
use super::RawHrTimerCallback;
use super::UnsafeHrTimerPointer;
use core::pin::Pin;
@@ -54,9 +55,13 @@ where
T: HasHrTimer<T>,
T: HrTimerCallback<Pointer<'a> = Self>,
{
+ type TimerMode = <T as HasHrTimer<T>>::TimerMode;
type TimerHandle = PinHrTimerHandle<'a, T>;
- unsafe fn start(self, expires: Ktime) -> Self::TimerHandle {
+ unsafe fn start(
+ self,
+ expires: <<T as HasHrTimer<T>>::TimerMode as HrTimerMode>::Expires,
+ ) -> Self::TimerHandle {
// Cast to pointer
let self_ptr: *const T = self.get_ref();
@@ -79,7 +84,7 @@ where
unsafe extern "C" fn run(ptr: *mut bindings::hrtimer) -> bindings::hrtimer_restart {
// `HrTimer` is `repr(C)`
- let timer_ptr = ptr as *mut HrTimer<T>;
+ let timer_ptr = ptr.cast::<HrTimer<T>>();
// SAFETY: By the safety requirement of this function, `timer_ptr`
// points to a `HrTimer<T>` contained in an `T`.
@@ -99,6 +104,12 @@ where
// here.
let receiver_pin = unsafe { Pin::new_unchecked(receiver_ref) };
- T::run(receiver_pin).into_c()
+ // SAFETY:
+ // - By C API contract `timer_ptr` is the pointer that we passed when queuing the timer, so
+ // it is a valid pointer to a `HrTimer<T>` embedded in a `T`.
+ // - We are within `RawHrTimerCallback::run`
+ let context = unsafe { HrTimerCallbackContext::from_raw(timer_ptr) };
+
+ T::run(receiver_pin, context).into_c()
}
}
diff --git a/rust/kernel/time/hrtimer/pin_mut.rs b/rust/kernel/time/hrtimer/pin_mut.rs
index 6033572d35ad..9d9447d4d57e 100644
--- a/rust/kernel/time/hrtimer/pin_mut.rs
+++ b/rust/kernel/time/hrtimer/pin_mut.rs
@@ -1,8 +1,8 @@
// SPDX-License-Identifier: GPL-2.0
use super::{
- HasHrTimer, HrTimer, HrTimerCallback, HrTimerHandle, Ktime, RawHrTimerCallback,
- UnsafeHrTimerPointer,
+ HasHrTimer, HrTimer, HrTimerCallback, HrTimerCallbackContext, HrTimerHandle, HrTimerMode,
+ RawHrTimerCallback, UnsafeHrTimerPointer,
};
use core::{marker::PhantomData, pin::Pin, ptr::NonNull};
@@ -52,9 +52,13 @@ where
T: HasHrTimer<T>,
T: HrTimerCallback<Pointer<'a> = Self>,
{
+ type TimerMode = <T as HasHrTimer<T>>::TimerMode;
type TimerHandle = PinMutHrTimerHandle<'a, T>;
- unsafe fn start(mut self, expires: Ktime) -> Self::TimerHandle {
+ unsafe fn start(
+ mut self,
+ expires: <<T as HasHrTimer<T>>::TimerMode as HrTimerMode>::Expires,
+ ) -> Self::TimerHandle {
// SAFETY:
// - We promise not to move out of `self`. We only pass `self`
// back to the caller as a `Pin<&mut self>`.
@@ -83,7 +87,7 @@ where
unsafe extern "C" fn run(ptr: *mut bindings::hrtimer) -> bindings::hrtimer_restart {
// `HrTimer` is `repr(C)`
- let timer_ptr = ptr as *mut HrTimer<T>;
+ let timer_ptr = ptr.cast::<HrTimer<T>>();
// SAFETY: By the safety requirement of this function, `timer_ptr`
// points to a `HrTimer<T>` contained in an `T`.
@@ -103,6 +107,12 @@ where
// here.
let receiver_pin = unsafe { Pin::new_unchecked(receiver_ref) };
- T::run(receiver_pin).into_c()
+ // SAFETY:
+ // - By C API contract `timer_ptr` is the pointer that we passed when queuing the timer, so
+ // it is a valid pointer to a `HrTimer<T>` embedded in a `T`.
+ // - We are within `RawHrTimerCallback::run`
+ let context = unsafe { HrTimerCallbackContext::from_raw(timer_ptr) };
+
+ T::run(receiver_pin, context).into_c()
}
}
diff --git a/rust/kernel/time/hrtimer/tbox.rs b/rust/kernel/time/hrtimer/tbox.rs
index 29526a5da203..aa1ee31a7195 100644
--- a/rust/kernel/time/hrtimer/tbox.rs
+++ b/rust/kernel/time/hrtimer/tbox.rs
@@ -3,9 +3,10 @@
use super::HasHrTimer;
use super::HrTimer;
use super::HrTimerCallback;
+use super::HrTimerCallbackContext;
use super::HrTimerHandle;
+use super::HrTimerMode;
use super::HrTimerPointer;
-use super::Ktime;
use super::RawHrTimerCallback;
use crate::prelude::*;
use core::ptr::NonNull;
@@ -64,9 +65,13 @@ where
T: for<'a> HrTimerCallback<Pointer<'a> = Pin<Box<T, A>>>,
A: crate::alloc::Allocator,
{
+ type TimerMode = <T as HasHrTimer<T>>::TimerMode;
type TimerHandle = BoxHrTimerHandle<T, A>;
- fn start(self, expires: Ktime) -> Self::TimerHandle {
+ fn start(
+ self,
+ expires: <<T as HasHrTimer<T>>::TimerMode as HrTimerMode>::Expires,
+ ) -> Self::TimerHandle {
// SAFETY:
// - We will not move out of this box during timer callback (we pass an
// immutable reference to the callback).
@@ -115,6 +120,12 @@ where
// `data_ptr` exist.
let data_mut_ref = unsafe { Pin::new_unchecked(&mut *data_ptr) };
- T::run(data_mut_ref).into_c()
+ // SAFETY:
+ // - By C API contract `timer_ptr` is the pointer that we passed when queuing the timer, so
+ // it is a valid pointer to a `HrTimer<T>` embedded in a `T`.
+ // - We are within `RawHrTimerCallback::run`
+ let context = unsafe { HrTimerCallbackContext::from_raw(timer_ptr) };
+
+ T::run(data_mut_ref, context).into_c()
}
}
diff --git a/rust/kernel/transmute.rs b/rust/kernel/transmute.rs
index 1c7d43771a37..cfc37d81adf2 100644
--- a/rust/kernel/transmute.rs
+++ b/rust/kernel/transmute.rs
@@ -2,6 +2,8 @@
//! Traits for transmuting types.
+use core::mem::size_of;
+
/// Types for which any bit pattern is valid.
///
/// Not all types are valid for all values. For example, a `bool` must be either zero or one, so
@@ -9,10 +11,93 @@
///
/// It's okay for the type to have padding, as initializing those bytes has no effect.
///
+/// # Examples
+///
+/// ```
+/// use kernel::transmute::FromBytes;
+///
+/// # fn test() -> Option<()> {
+/// let raw = [1, 2, 3, 4];
+///
+/// let result = u32::from_bytes(&raw)?;
+///
+/// #[cfg(target_endian = "little")]
+/// assert_eq!(*result, 0x4030201);
+///
+/// #[cfg(target_endian = "big")]
+/// assert_eq!(*result, 0x1020304);
+///
+/// # Some(()) }
+/// # test().ok_or(EINVAL)?;
+/// # Ok::<(), Error>(())
+/// ```
+///
/// # Safety
///
/// All bit-patterns must be valid for this type. This type must not have interior mutability.
-pub unsafe trait FromBytes {}
+pub unsafe trait FromBytes {
+ /// Converts a slice of bytes to a reference to `Self`.
+ ///
+ /// Succeeds if the reference is properly aligned, and the size of `bytes` is equal to that of
+ /// `T` and different from zero.
+ ///
+ /// Otherwise, returns [`None`].
+ fn from_bytes(bytes: &[u8]) -> Option<&Self>
+ where
+ Self: Sized,
+ {
+ let slice_ptr = bytes.as_ptr().cast::<Self>();
+ let size = size_of::<Self>();
+
+ #[allow(clippy::incompatible_msrv)]
+ if bytes.len() == size && slice_ptr.is_aligned() {
+ // SAFETY: Size and alignment were just checked.
+ unsafe { Some(&*slice_ptr) }
+ } else {
+ None
+ }
+ }
+
+ /// Converts a mutable slice of bytes to a reference to `Self`.
+ ///
+ /// Succeeds if the reference is properly aligned, and the size of `bytes` is equal to that of
+ /// `T` and different from zero.
+ ///
+ /// Otherwise, returns [`None`].
+ fn from_bytes_mut(bytes: &mut [u8]) -> Option<&mut Self>
+ where
+ Self: AsBytes + Sized,
+ {
+ let slice_ptr = bytes.as_mut_ptr().cast::<Self>();
+ let size = size_of::<Self>();
+
+ #[allow(clippy::incompatible_msrv)]
+ if bytes.len() == size && slice_ptr.is_aligned() {
+ // SAFETY: Size and alignment were just checked.
+ unsafe { Some(&mut *slice_ptr) }
+ } else {
+ None
+ }
+ }
+
+ /// Creates an owned instance of `Self` by copying `bytes`.
+ ///
+ /// Unlike [`FromBytes::from_bytes`], which requires aligned input, this method can be used on
+ /// non-aligned data at the cost of a copy.
+ fn from_bytes_copy(bytes: &[u8]) -> Option<Self>
+ where
+ Self: Sized,
+ {
+ if bytes.len() == size_of::<Self>() {
+ // SAFETY: we just verified that `bytes` has the same size as `Self`, and per the
+ // invariants of `FromBytes`, any byte sequence of the correct length is a valid value
+ // for `Self`.
+ Some(unsafe { core::ptr::read_unaligned(bytes.as_ptr().cast::<Self>()) })
+ } else {
+ None
+ }
+ }
+}
macro_rules! impl_frombytes {
($($({$($generics:tt)*})? $t:ty, )*) => {
@@ -47,7 +132,32 @@ impl_frombytes! {
///
/// Values of this type may not contain any uninitialized bytes. This type must not have interior
/// mutability.
-pub unsafe trait AsBytes {}
+pub unsafe trait AsBytes {
+ /// Returns `self` as a slice of bytes.
+ fn as_bytes(&self) -> &[u8] {
+ // CAST: `Self` implements `AsBytes` thus all bytes of `self` are initialized.
+ let data = core::ptr::from_ref(self).cast::<u8>();
+ let len = core::mem::size_of_val(self);
+
+ // SAFETY: `data` is non-null and valid for reads of `len * sizeof::<u8>()` bytes.
+ unsafe { core::slice::from_raw_parts(data, len) }
+ }
+
+ /// Returns `self` as a mutable slice of bytes.
+ fn as_bytes_mut(&mut self) -> &mut [u8]
+ where
+ Self: FromBytes,
+ {
+ // CAST: `Self` implements both `AsBytes` and `FromBytes` thus making `Self`
+ // bi-directionally transmutable to `[u8; size_of_val(self)]`.
+ let data = core::ptr::from_mut(self).cast::<u8>();
+ let len = core::mem::size_of_val(self);
+
+ // SAFETY: `data` is non-null and valid for read and writes of `len * sizeof::<u8>()`
+ // bytes.
+ unsafe { core::slice::from_raw_parts_mut(data, len) }
+ }
+}
macro_rules! impl_asbytes {
($($({$($generics:tt)*})? $t:ty, )*) => {
diff --git a/rust/kernel/types.rs b/rust/kernel/types.rs
index 22985b6f6982..dc0a02f5c3cf 100644
--- a/rust/kernel/types.rs
+++ b/rust/kernel/types.rs
@@ -2,14 +2,16 @@
//! Kernel types.
+use crate::ffi::c_void;
use core::{
cell::UnsafeCell,
marker::{PhantomData, PhantomPinned},
- mem::{ManuallyDrop, MaybeUninit},
+ mem::MaybeUninit,
ops::{Deref, DerefMut},
- ptr::NonNull,
};
-use pin_init::{PinInit, Zeroable};
+use pin_init::{PinInit, Wrapper, Zeroable};
+
+pub use crate::sync::aref::{ARef, AlwaysRefCounted};
/// Used to transfer ownership to and from foreign (non-Rust) languages.
///
@@ -21,15 +23,10 @@ use pin_init::{PinInit, Zeroable};
///
/// # Safety
///
-/// Implementers must ensure that [`into_foreign`] returns a pointer which meets the alignment
-/// requirements of [`PointedTo`].
-///
-/// [`into_foreign`]: Self::into_foreign
-/// [`PointedTo`]: Self::PointedTo
+/// - Implementations must satisfy the guarantees of [`Self::into_foreign`].
pub unsafe trait ForeignOwnable: Sized {
- /// Type used when the value is foreign-owned. In practical terms only defines the alignment of
- /// the pointer.
- type PointedTo;
+ /// The alignment of pointers returned by `into_foreign`.
+ const FOREIGN_ALIGN: usize;
/// Type used to immutably borrow a value that is currently foreign-owned.
type Borrowed<'a>;
@@ -39,18 +36,21 @@ pub unsafe trait ForeignOwnable: Sized {
/// Converts a Rust-owned object to a foreign-owned one.
///
+ /// The foreign representation is a pointer to void. Aside from the guarantees listed below,
+ /// there are no other guarantees for this pointer. For example, it might be invalid, dangling
+ /// or pointing to uninitialized memory. Using it in any way except for [`from_foreign`],
+ /// [`try_from_foreign`], [`borrow`], or [`borrow_mut`] can result in undefined behavior.
+ ///
/// # Guarantees
///
- /// The return value is guaranteed to be well-aligned, but there are no other guarantees for
- /// this pointer. For example, it might be null, dangling, or point to uninitialized memory.
- /// Using it in any way except for [`ForeignOwnable::from_foreign`], [`ForeignOwnable::borrow`],
- /// [`ForeignOwnable::try_from_foreign`] can result in undefined behavior.
+ /// - Minimum alignment of returned pointer is [`Self::FOREIGN_ALIGN`].
+ /// - The returned pointer is not null.
///
/// [`from_foreign`]: Self::from_foreign
/// [`try_from_foreign`]: Self::try_from_foreign
/// [`borrow`]: Self::borrow
/// [`borrow_mut`]: Self::borrow_mut
- fn into_foreign(self) -> *mut Self::PointedTo;
+ fn into_foreign(self) -> *mut c_void;
/// Converts a foreign-owned object back to a Rust-owned one.
///
@@ -60,7 +60,7 @@ pub unsafe trait ForeignOwnable: Sized {
/// must not be passed to `from_foreign` more than once.
///
/// [`into_foreign`]: Self::into_foreign
- unsafe fn from_foreign(ptr: *mut Self::PointedTo) -> Self;
+ unsafe fn from_foreign(ptr: *mut c_void) -> Self;
/// Tries to convert a foreign-owned object back to a Rust-owned one.
///
@@ -72,7 +72,7 @@ pub unsafe trait ForeignOwnable: Sized {
/// `ptr` must either be null or satisfy the safety requirements for [`from_foreign`].
///
/// [`from_foreign`]: Self::from_foreign
- unsafe fn try_from_foreign(ptr: *mut Self::PointedTo) -> Option<Self> {
+ unsafe fn try_from_foreign(ptr: *mut c_void) -> Option<Self> {
if ptr.is_null() {
None
} else {
@@ -95,7 +95,7 @@ pub unsafe trait ForeignOwnable: Sized {
///
/// [`into_foreign`]: Self::into_foreign
/// [`from_foreign`]: Self::from_foreign
- unsafe fn borrow<'a>(ptr: *mut Self::PointedTo) -> Self::Borrowed<'a>;
+ unsafe fn borrow<'a>(ptr: *mut c_void) -> Self::Borrowed<'a>;
/// Borrows a foreign-owned object mutably.
///
@@ -123,23 +123,24 @@ pub unsafe trait ForeignOwnable: Sized {
/// [`from_foreign`]: Self::from_foreign
/// [`borrow`]: Self::borrow
/// [`Arc`]: crate::sync::Arc
- unsafe fn borrow_mut<'a>(ptr: *mut Self::PointedTo) -> Self::BorrowedMut<'a>;
+ unsafe fn borrow_mut<'a>(ptr: *mut c_void) -> Self::BorrowedMut<'a>;
}
-// SAFETY: The `into_foreign` function returns a pointer that is dangling, but well-aligned.
+// SAFETY: The pointer returned by `into_foreign` comes from a well aligned
+// pointer to `()`.
unsafe impl ForeignOwnable for () {
- type PointedTo = ();
+ const FOREIGN_ALIGN: usize = core::mem::align_of::<()>();
type Borrowed<'a> = ();
type BorrowedMut<'a> = ();
- fn into_foreign(self) -> *mut Self::PointedTo {
+ fn into_foreign(self) -> *mut c_void {
core::ptr::NonNull::dangling().as_ptr()
}
- unsafe fn from_foreign(_: *mut Self::PointedTo) -> Self {}
+ unsafe fn from_foreign(_: *mut c_void) -> Self {}
- unsafe fn borrow<'a>(_: *mut Self::PointedTo) -> Self::Borrowed<'a> {}
- unsafe fn borrow_mut<'a>(_: *mut Self::PointedTo) -> Self::BorrowedMut<'a> {}
+ unsafe fn borrow<'a>(_: *mut c_void) -> Self::Borrowed<'a> {}
+ unsafe fn borrow_mut<'a>(_: *mut c_void) -> Self::BorrowedMut<'a> {}
}
/// Runs a cleanup function/closure when dropped.
@@ -353,17 +354,6 @@ impl<T> Opaque<T> {
}
}
- /// Create an opaque pin-initializer from the given pin-initializer.
- pub fn pin_init(slot: impl PinInit<T>) -> impl PinInit<Self> {
- Self::ffi_init(|ptr: *mut T| {
- // SAFETY:
- // - `ptr` is a valid pointer to uninitialized memory,
- // - `slot` is not accessed on error; the call is infallible,
- // - `slot` is pinned in memory.
- let _ = unsafe { PinInit::<T>::__pinned_init(slot, ptr) };
- })
- }
-
/// Creates a pin-initializer from the given initializer closure.
///
/// The returned initializer calls the given closure with the pointer to the inner `T` of this
@@ -377,7 +367,7 @@ impl<T> Opaque<T> {
// initialize the `T`.
unsafe {
pin_init::pin_init_from_closure::<_, ::core::convert::Infallible>(move |slot| {
- init_func(Self::raw_get(slot));
+ init_func(Self::cast_into(slot));
Ok(())
})
}
@@ -397,7 +387,7 @@ impl<T> Opaque<T> {
// SAFETY: We contain a `MaybeUninit`, so it is OK for the `init_func` to not fully
// initialize the `T`.
unsafe {
- pin_init::pin_init_from_closure::<_, E>(move |slot| init_func(Self::raw_get(slot)))
+ pin_init::pin_init_from_closure::<_, E>(move |slot| init_func(Self::cast_into(slot)))
}
}
@@ -410,178 +400,29 @@ impl<T> Opaque<T> {
///
/// This function is useful to get access to the value without creating intermediate
/// references.
- pub const fn raw_get(this: *const Self) -> *mut T {
+ pub const fn cast_into(this: *const Self) -> *mut T {
UnsafeCell::raw_get(this.cast::<UnsafeCell<MaybeUninit<T>>>()).cast::<T>()
}
-}
-
-/// Types that are _always_ reference counted.
-///
-/// It allows such types to define their own custom ref increment and decrement functions.
-/// Additionally, it allows users to convert from a shared reference `&T` to an owned reference
-/// [`ARef<T>`].
-///
-/// This is usually implemented by wrappers to existing structures on the C side of the code. For
-/// Rust code, the recommendation is to use [`Arc`](crate::sync::Arc) to create reference-counted
-/// instances of a type.
-///
-/// # Safety
-///
-/// Implementers must ensure that increments to the reference count keep the object alive in memory
-/// at least until matching decrements are performed.
-///
-/// Implementers must also ensure that all instances are reference-counted. (Otherwise they
-/// won't be able to honour the requirement that [`AlwaysRefCounted::inc_ref`] keep the object
-/// alive.)
-pub unsafe trait AlwaysRefCounted {
- /// Increments the reference count on the object.
- fn inc_ref(&self);
- /// Decrements the reference count on the object.
- ///
- /// Frees the object when the count reaches zero.
- ///
- /// # Safety
- ///
- /// Callers must ensure that there was a previous matching increment to the reference count,
- /// and that the object is no longer used after its reference count is decremented (as it may
- /// result in the object being freed), unless the caller owns another increment on the refcount
- /// (e.g., it calls [`AlwaysRefCounted::inc_ref`] twice, then calls
- /// [`AlwaysRefCounted::dec_ref`] once).
- unsafe fn dec_ref(obj: NonNull<Self>);
-}
-
-/// An owned reference to an always-reference-counted object.
-///
-/// The object's reference count is automatically decremented when an instance of [`ARef`] is
-/// dropped. It is also automatically incremented when a new instance is created via
-/// [`ARef::clone`].
-///
-/// # Invariants
-///
-/// The pointer stored in `ptr` is non-null and valid for the lifetime of the [`ARef`] instance. In
-/// particular, the [`ARef`] instance owns an increment on the underlying object's reference count.
-pub struct ARef<T: AlwaysRefCounted> {
- ptr: NonNull<T>,
- _p: PhantomData<T>,
-}
-
-// SAFETY: It is safe to send `ARef<T>` to another thread when the underlying `T` is `Sync` because
-// it effectively means sharing `&T` (which is safe because `T` is `Sync`); additionally, it needs
-// `T` to be `Send` because any thread that has an `ARef<T>` may ultimately access `T` using a
-// mutable reference, for example, when the reference count reaches zero and `T` is dropped.
-unsafe impl<T: AlwaysRefCounted + Sync + Send> Send for ARef<T> {}
-
-// SAFETY: It is safe to send `&ARef<T>` to another thread when the underlying `T` is `Sync`
-// because it effectively means sharing `&T` (which is safe because `T` is `Sync`); additionally,
-// it needs `T` to be `Send` because any thread that has a `&ARef<T>` may clone it and get an
-// `ARef<T>` on that thread, so the thread may ultimately access `T` using a mutable reference, for
-// example, when the reference count reaches zero and `T` is dropped.
-unsafe impl<T: AlwaysRefCounted + Sync + Send> Sync for ARef<T> {}
-
-impl<T: AlwaysRefCounted> ARef<T> {
- /// Creates a new instance of [`ARef`].
- ///
- /// It takes over an increment of the reference count on the underlying object.
- ///
- /// # Safety
- ///
- /// Callers must ensure that the reference count was incremented at least once, and that they
- /// are properly relinquishing one increment. That is, if there is only one increment, callers
- /// must not use the underlying object anymore -- it is only safe to do so via the newly
- /// created [`ARef`].
- pub unsafe fn from_raw(ptr: NonNull<T>) -> Self {
- // INVARIANT: The safety requirements guarantee that the new instance now owns the
- // increment on the refcount.
- Self {
- ptr,
- _p: PhantomData,
- }
- }
-
- /// Consumes the `ARef`, returning a raw pointer.
- ///
- /// This function does not change the refcount. After calling this function, the caller is
- /// responsible for the refcount previously managed by the `ARef`.
- ///
- /// # Examples
- ///
- /// ```
- /// use core::ptr::NonNull;
- /// use kernel::types::{ARef, AlwaysRefCounted};
- ///
- /// struct Empty {}
- ///
- /// # // SAFETY: TODO.
- /// unsafe impl AlwaysRefCounted for Empty {
- /// fn inc_ref(&self) {}
- /// unsafe fn dec_ref(_obj: NonNull<Self>) {}
- /// }
- ///
- /// let mut data = Empty {};
- /// let ptr = NonNull::<Empty>::new(&mut data).unwrap();
- /// # // SAFETY: TODO.
- /// let data_ref: ARef<Empty> = unsafe { ARef::from_raw(ptr) };
- /// let raw_ptr: NonNull<Empty> = ARef::into_raw(data_ref);
- ///
- /// assert_eq!(ptr, raw_ptr);
- /// ```
- pub fn into_raw(me: Self) -> NonNull<T> {
- ManuallyDrop::new(me).ptr
+ /// The opposite operation of [`Opaque::cast_into`].
+ pub const fn cast_from(this: *const T) -> *const Self {
+ this.cast()
}
}
-impl<T: AlwaysRefCounted> Clone for ARef<T> {
- fn clone(&self) -> Self {
- self.inc_ref();
- // SAFETY: We just incremented the refcount above.
- unsafe { Self::from_raw(self.ptr) }
- }
-}
-
-impl<T: AlwaysRefCounted> Deref for ARef<T> {
- type Target = T;
-
- fn deref(&self) -> &Self::Target {
- // SAFETY: The type invariants guarantee that the object is valid.
- unsafe { self.ptr.as_ref() }
- }
-}
-
-impl<T: AlwaysRefCounted> From<&T> for ARef<T> {
- fn from(b: &T) -> Self {
- b.inc_ref();
- // SAFETY: We just incremented the refcount above.
- unsafe { Self::from_raw(NonNull::from(b)) }
- }
-}
-
-impl<T: AlwaysRefCounted> Drop for ARef<T> {
- fn drop(&mut self) {
- // SAFETY: The type invariants guarantee that the `ARef` owns the reference we're about to
- // decrement.
- unsafe { T::dec_ref(self.ptr) };
+impl<T> Wrapper<T> for Opaque<T> {
+ /// Create an opaque pin-initializer from the given pin-initializer.
+ fn pin_init<E>(slot: impl PinInit<T, E>) -> impl PinInit<Self, E> {
+ Self::try_ffi_init(|ptr: *mut T| {
+ // SAFETY:
+ // - `ptr` is a valid pointer to uninitialized memory,
+ // - `slot` is not accessed on error,
+ // - `slot` is pinned in memory.
+ unsafe { PinInit::<T, E>::__pinned_init(slot, ptr) }
+ })
}
}
-/// A sum type that always holds either a value of type `L` or `R`.
-///
-/// # Examples
-///
-/// ```
-/// use kernel::types::Either;
-///
-/// let left_value: Either<i32, &str> = Either::Left(7);
-/// let right_value: Either<i32, &str> = Either::Right("right value");
-/// ```
-pub enum Either<L, R> {
- /// Constructs an instance of [`Either`] containing a value of type `L`.
- Left(L),
-
- /// Constructs an instance of [`Either`] containing a value of type `R`.
- Right(R),
-}
-
/// Zero-sized type to mark types not [`Send`].
///
/// Add this type as a field to your struct if your type should not be sent to a different task.
diff --git a/rust/kernel/uaccess.rs b/rust/kernel/uaccess.rs
index 6d70edd8086a..a8fb4764185a 100644
--- a/rust/kernel/uaccess.rs
+++ b/rust/kernel/uaccess.rs
@@ -8,14 +8,57 @@ use crate::{
alloc::{Allocator, Flags},
bindings,
error::Result,
- ffi::c_void,
+ ffi::{c_char, c_void},
prelude::*,
transmute::{AsBytes, FromBytes},
};
use core::mem::{size_of, MaybeUninit};
-/// The type used for userspace addresses.
-pub type UserPtr = usize;
+/// A pointer into userspace.
+///
+/// This is the Rust equivalent to C pointers tagged with `__user`.
+#[repr(transparent)]
+#[derive(Copy, Clone)]
+pub struct UserPtr(*mut c_void);
+
+impl UserPtr {
+ /// Create a `UserPtr` from an integer representing the userspace address.
+ #[inline]
+ pub fn from_addr(addr: usize) -> Self {
+ Self(addr as *mut c_void)
+ }
+
+ /// Create a `UserPtr` from a pointer representing the userspace address.
+ #[inline]
+ pub fn from_ptr(addr: *mut c_void) -> Self {
+ Self(addr)
+ }
+
+ /// Cast this userspace pointer to a raw const void pointer.
+ ///
+ /// It is up to the caller to use the returned pointer correctly.
+ #[inline]
+ pub fn as_const_ptr(self) -> *const c_void {
+ self.0
+ }
+
+ /// Cast this userspace pointer to a raw mutable void pointer.
+ ///
+ /// It is up to the caller to use the returned pointer correctly.
+ #[inline]
+ pub fn as_mut_ptr(self) -> *mut c_void {
+ self.0
+ }
+
+ /// Increment this user pointer by `add` bytes.
+ ///
+ /// This addition is wrapping, so wrapping around the address space does not result in a panic
+ /// even if `CONFIG_RUST_OVERFLOW_CHECKS` is enabled.
+ #[inline]
+ pub fn wrapping_byte_add(self, add: usize) -> UserPtr {
+ UserPtr(self.0.wrapping_byte_add(add))
+ }
+}
/// A pointer to an area in userspace memory, which can be either read-only or read-write.
///
@@ -177,7 +220,7 @@ impl UserSliceReader {
pub fn skip(&mut self, num_skip: usize) -> Result {
// Update `self.length` first since that's the fallible part of this operation.
self.length = self.length.checked_sub(num_skip).ok_or(EFAULT)?;
- self.ptr = self.ptr.wrapping_add(num_skip);
+ self.ptr = self.ptr.wrapping_byte_add(num_skip);
Ok(())
}
@@ -224,11 +267,11 @@ impl UserSliceReader {
}
// SAFETY: `out_ptr` points into a mutable slice of length `len`, so we may write
// that many bytes to it.
- let res = unsafe { bindings::copy_from_user(out_ptr, self.ptr as *const c_void, len) };
+ let res = unsafe { bindings::copy_from_user(out_ptr, self.ptr.as_const_ptr(), len) };
if res != 0 {
return Err(EFAULT);
}
- self.ptr = self.ptr.wrapping_add(len);
+ self.ptr = self.ptr.wrapping_byte_add(len);
self.length -= len;
Ok(())
}
@@ -240,7 +283,7 @@ impl UserSliceReader {
pub fn read_slice(&mut self, out: &mut [u8]) -> Result {
// SAFETY: The types are compatible and `read_raw` doesn't write uninitialized bytes to
// `out`.
- let out = unsafe { &mut *(out as *mut [u8] as *mut [MaybeUninit<u8>]) };
+ let out = unsafe { &mut *(core::ptr::from_mut(out) as *mut [MaybeUninit<u8>]) };
self.read_raw(out)
}
@@ -262,14 +305,14 @@ impl UserSliceReader {
let res = unsafe {
bindings::_copy_from_user(
out.as_mut_ptr().cast::<c_void>(),
- self.ptr as *const c_void,
+ self.ptr.as_const_ptr(),
len,
)
};
if res != 0 {
return Err(EFAULT);
}
- self.ptr = self.ptr.wrapping_add(len);
+ self.ptr = self.ptr.wrapping_byte_add(len);
self.length -= len;
// SAFETY: The read above has initialized all bytes in `out`, and since `T` implements
// `FromBytes`, any bit-pattern is a valid value for this type.
@@ -291,6 +334,65 @@ impl UserSliceReader {
unsafe { buf.inc_len(len) };
Ok(())
}
+
+ /// Read a NUL-terminated string from userspace and return it.
+ ///
+ /// The string is read into `buf` and a NUL-terminator is added if the end of `buf` is reached.
+ /// Since there must be space to add a NUL-terminator, the buffer must not be empty. The
+ /// returned `&CStr` points into `buf`.
+ ///
+ /// Fails with [`EFAULT`] if the read happens on a bad address (some data may have been
+ /// copied).
+ #[doc(alias = "strncpy_from_user")]
+ pub fn strcpy_into_buf<'buf>(self, buf: &'buf mut [u8]) -> Result<&'buf CStr> {
+ if buf.is_empty() {
+ return Err(EINVAL);
+ }
+
+ // SAFETY: The types are compatible and `strncpy_from_user` doesn't write uninitialized
+ // bytes to `buf`.
+ let mut dst = unsafe { &mut *(core::ptr::from_mut(buf) as *mut [MaybeUninit<u8>]) };
+
+ // We never read more than `self.length` bytes.
+ if dst.len() > self.length {
+ dst = &mut dst[..self.length];
+ }
+
+ let mut len = raw_strncpy_from_user(dst, self.ptr)?;
+ if len < dst.len() {
+ // Add one to include the NUL-terminator.
+ len += 1;
+ } else if len < buf.len() {
+ // This implies that `len == dst.len() < buf.len()`.
+ //
+ // This means that we could not fill the entire buffer, but we had to stop reading
+ // because we hit the `self.length` limit of this `UserSliceReader`. Since we did not
+ // fill the buffer, we treat this case as if we tried to read past the `self.length`
+ // limit and received a page fault, which is consistent with other `UserSliceReader`
+ // methods that also return page faults when you exceed `self.length`.
+ return Err(EFAULT);
+ } else {
+ // This implies that `len == buf.len()`.
+ //
+ // This means that we filled the buffer exactly. In this case, we add a NUL-terminator
+ // and return it. Unlike the `len < dst.len()` branch, don't modify `len` because it
+ // already represents the length including the NUL-terminator.
+ //
+ // SAFETY: Due to the check at the beginning, the buffer is not empty.
+ unsafe { *buf.last_mut().unwrap_unchecked() = 0 };
+ }
+
+ // This method consumes `self`, so it can only be called once, thus we do not need to
+ // update `self.length`. This sidesteps concerns such as whether `self.length` should be
+ // incremented by `len` or `len-1` in the `len == buf.len()` case.
+
+ // SAFETY: There are two cases:
+ // * If we hit the `len < dst.len()` case, then `raw_strncpy_from_user` guarantees that
+ // this slice contains exactly one NUL byte at the end of the string.
+ // * Otherwise, `raw_strncpy_from_user` guarantees that the string contained no NUL bytes,
+ // and we have since added a NUL byte at the end.
+ Ok(unsafe { CStr::from_bytes_with_nul_unchecked(&buf[..len]) })
+ }
}
/// A writer for [`UserSlice`].
@@ -327,11 +429,11 @@ impl UserSliceWriter {
}
// SAFETY: `data_ptr` points into an immutable slice of length `len`, so we may read
// that many bytes from it.
- let res = unsafe { bindings::copy_to_user(self.ptr as *mut c_void, data_ptr, len) };
+ let res = unsafe { bindings::copy_to_user(self.ptr.as_mut_ptr(), data_ptr, len) };
if res != 0 {
return Err(EFAULT);
}
- self.ptr = self.ptr.wrapping_add(len);
+ self.ptr = self.ptr.wrapping_byte_add(len);
self.length -= len;
Ok(())
}
@@ -354,16 +456,53 @@ impl UserSliceWriter {
// is a compile-time constant.
let res = unsafe {
bindings::_copy_to_user(
- self.ptr as *mut c_void,
- (value as *const T).cast::<c_void>(),
+ self.ptr.as_mut_ptr(),
+ core::ptr::from_ref(value).cast::<c_void>(),
len,
)
};
if res != 0 {
return Err(EFAULT);
}
- self.ptr = self.ptr.wrapping_add(len);
+ self.ptr = self.ptr.wrapping_byte_add(len);
self.length -= len;
Ok(())
}
}
+
+/// Reads a nul-terminated string into `dst` and returns the length.
+///
+/// This reads from userspace until a NUL byte is encountered, or until `dst.len()` bytes have been
+/// read. Fails with [`EFAULT`] if a read happens on a bad address (some data may have been
+/// copied). When the end of the buffer is encountered, no NUL byte is added, so the string is
+/// *not* guaranteed to be NUL-terminated when `Ok(dst.len())` is returned.
+///
+/// # Guarantees
+///
+/// When this function returns `Ok(len)`, it is guaranteed that the first `len` bytes of `dst` are
+/// initialized and non-zero. Furthermore, if `len < dst.len()`, then `dst[len]` is a NUL byte.
+#[inline]
+fn raw_strncpy_from_user(dst: &mut [MaybeUninit<u8>], src: UserPtr) -> Result<usize> {
+ // CAST: Slice lengths are guaranteed to be `<= isize::MAX`.
+ let len = dst.len() as isize;
+
+ // SAFETY: `dst` is valid for writing `dst.len()` bytes.
+ let res = unsafe {
+ bindings::strncpy_from_user(
+ dst.as_mut_ptr().cast::<c_char>(),
+ src.as_const_ptr().cast::<c_char>(),
+ len,
+ )
+ };
+
+ if res < 0 {
+ return Err(Error::from_errno(res as i32));
+ }
+
+ #[cfg(CONFIG_RUST_OVERFLOW_CHECKS)]
+ assert!(res <= len);
+
+ // GUARANTEES: `strncpy_from_user` was successful, so `dst` has contents in accordance with the
+ // guarantees of this function.
+ Ok(res as usize)
+}
diff --git a/rust/kernel/usb.rs b/rust/kernel/usb.rs
new file mode 100644
index 000000000000..14ddb711bab3
--- /dev/null
+++ b/rust/kernel/usb.rs
@@ -0,0 +1,456 @@
+// SPDX-License-Identifier: GPL-2.0
+// SPDX-FileCopyrightText: Copyright (C) 2025 Collabora Ltd.
+
+//! Abstractions for the USB bus.
+//!
+//! C header: [`include/linux/usb.h`](srctree/include/linux/usb.h)
+
+use crate::{
+ bindings, device,
+ device_id::{RawDeviceId, RawDeviceIdIndex},
+ driver,
+ error::{from_result, to_result, Result},
+ prelude::*,
+ str::CStr,
+ types::{AlwaysRefCounted, Opaque},
+ ThisModule,
+};
+use core::{marker::PhantomData, mem::MaybeUninit, ptr::NonNull};
+
+/// An adapter for the registration of USB drivers.
+pub struct Adapter<T: Driver>(T);
+
+// SAFETY: A call to `unregister` for a given instance of `RegType` is guaranteed to be valid if
+// a preceding call to `register` has been successful.
+unsafe impl<T: Driver + 'static> driver::RegistrationOps for Adapter<T> {
+ type RegType = bindings::usb_driver;
+
+ unsafe fn register(
+ udrv: &Opaque<Self::RegType>,
+ name: &'static CStr,
+ module: &'static ThisModule,
+ ) -> Result {
+ // SAFETY: It's safe to set the fields of `struct usb_driver` on initialization.
+ unsafe {
+ (*udrv.get()).name = name.as_char_ptr();
+ (*udrv.get()).probe = Some(Self::probe_callback);
+ (*udrv.get()).disconnect = Some(Self::disconnect_callback);
+ (*udrv.get()).id_table = T::ID_TABLE.as_ptr();
+ }
+
+ // SAFETY: `udrv` is guaranteed to be a valid `RegType`.
+ to_result(unsafe {
+ bindings::usb_register_driver(udrv.get(), module.0, name.as_char_ptr())
+ })
+ }
+
+ unsafe fn unregister(udrv: &Opaque<Self::RegType>) {
+ // SAFETY: `udrv` is guaranteed to be a valid `RegType`.
+ unsafe { bindings::usb_deregister(udrv.get()) };
+ }
+}
+
+impl<T: Driver + 'static> Adapter<T> {
+ extern "C" fn probe_callback(
+ intf: *mut bindings::usb_interface,
+ id: *const bindings::usb_device_id,
+ ) -> kernel::ffi::c_int {
+ // SAFETY: The USB core only ever calls the probe callback with a valid pointer to a
+ // `struct usb_interface` and `struct usb_device_id`.
+ //
+ // INVARIANT: `intf` is valid for the duration of `probe_callback()`.
+ let intf = unsafe { &*intf.cast::<Interface<device::CoreInternal>>() };
+
+ from_result(|| {
+ // SAFETY: `DeviceId` is a `#[repr(transparent)]` wrapper of `struct usb_device_id` and
+ // does not add additional invariants, so it's safe to transmute.
+ let id = unsafe { &*id.cast::<DeviceId>() };
+
+ let info = T::ID_TABLE.info(id.index());
+ let data = T::probe(intf, id, info)?;
+
+ let dev: &device::Device<device::CoreInternal> = intf.as_ref();
+ dev.set_drvdata(data);
+ Ok(0)
+ })
+ }
+
+ extern "C" fn disconnect_callback(intf: *mut bindings::usb_interface) {
+ // SAFETY: The USB core only ever calls the disconnect callback with a valid pointer to a
+ // `struct usb_interface`.
+ //
+ // INVARIANT: `intf` is valid for the duration of `disconnect_callback()`.
+ let intf = unsafe { &*intf.cast::<Interface<device::CoreInternal>>() };
+
+ let dev: &device::Device<device::CoreInternal> = intf.as_ref();
+
+ // SAFETY: `disconnect_callback` is only ever called after a successful call to
+ // `probe_callback`, hence it's guaranteed that `Device::set_drvdata()` has been called
+ // and stored a `Pin<KBox<T>>`.
+ let data = unsafe { dev.drvdata_obtain::<Pin<KBox<T>>>() };
+
+ T::disconnect(intf, data.as_ref());
+ }
+}
+
+/// Abstraction for the USB device ID structure, i.e. [`struct usb_device_id`].
+///
+/// [`struct usb_device_id`]: https://docs.kernel.org/driver-api/basics.html#c.usb_device_id
+#[repr(transparent)]
+#[derive(Clone, Copy)]
+pub struct DeviceId(bindings::usb_device_id);
+
+impl DeviceId {
+ /// Equivalent to C's `USB_DEVICE` macro.
+ pub const fn from_id(vendor: u16, product: u16) -> Self {
+ Self(bindings::usb_device_id {
+ match_flags: bindings::USB_DEVICE_ID_MATCH_DEVICE as u16,
+ idVendor: vendor,
+ idProduct: product,
+ // SAFETY: It is safe to use all zeroes for the other fields of `usb_device_id`.
+ ..unsafe { MaybeUninit::zeroed().assume_init() }
+ })
+ }
+
+ /// Equivalent to C's `USB_DEVICE_VER` macro.
+ pub const fn from_device_ver(vendor: u16, product: u16, bcd_lo: u16, bcd_hi: u16) -> Self {
+ Self(bindings::usb_device_id {
+ match_flags: bindings::USB_DEVICE_ID_MATCH_DEVICE_AND_VERSION as u16,
+ idVendor: vendor,
+ idProduct: product,
+ bcdDevice_lo: bcd_lo,
+ bcdDevice_hi: bcd_hi,
+ // SAFETY: It is safe to use all zeroes for the other fields of `usb_device_id`.
+ ..unsafe { MaybeUninit::zeroed().assume_init() }
+ })
+ }
+
+ /// Equivalent to C's `USB_DEVICE_INFO` macro.
+ pub const fn from_device_info(class: u8, subclass: u8, protocol: u8) -> Self {
+ Self(bindings::usb_device_id {
+ match_flags: bindings::USB_DEVICE_ID_MATCH_DEV_INFO as u16,
+ bDeviceClass: class,
+ bDeviceSubClass: subclass,
+ bDeviceProtocol: protocol,
+ // SAFETY: It is safe to use all zeroes for the other fields of `usb_device_id`.
+ ..unsafe { MaybeUninit::zeroed().assume_init() }
+ })
+ }
+
+ /// Equivalent to C's `USB_INTERFACE_INFO` macro.
+ pub const fn from_interface_info(class: u8, subclass: u8, protocol: u8) -> Self {
+ Self(bindings::usb_device_id {
+ match_flags: bindings::USB_DEVICE_ID_MATCH_INT_INFO as u16,
+ bInterfaceClass: class,
+ bInterfaceSubClass: subclass,
+ bInterfaceProtocol: protocol,
+ // SAFETY: It is safe to use all zeroes for the other fields of `usb_device_id`.
+ ..unsafe { MaybeUninit::zeroed().assume_init() }
+ })
+ }
+
+ /// Equivalent to C's `USB_DEVICE_INTERFACE_CLASS` macro.
+ pub const fn from_device_interface_class(vendor: u16, product: u16, class: u8) -> Self {
+ Self(bindings::usb_device_id {
+ match_flags: (bindings::USB_DEVICE_ID_MATCH_DEVICE
+ | bindings::USB_DEVICE_ID_MATCH_INT_CLASS) as u16,
+ idVendor: vendor,
+ idProduct: product,
+ bInterfaceClass: class,
+ // SAFETY: It is safe to use all zeroes for the other fields of `usb_device_id`.
+ ..unsafe { MaybeUninit::zeroed().assume_init() }
+ })
+ }
+
+ /// Equivalent to C's `USB_DEVICE_INTERFACE_PROTOCOL` macro.
+ pub const fn from_device_interface_protocol(vendor: u16, product: u16, protocol: u8) -> Self {
+ Self(bindings::usb_device_id {
+ match_flags: (bindings::USB_DEVICE_ID_MATCH_DEVICE
+ | bindings::USB_DEVICE_ID_MATCH_INT_PROTOCOL) as u16,
+ idVendor: vendor,
+ idProduct: product,
+ bInterfaceProtocol: protocol,
+ // SAFETY: It is safe to use all zeroes for the other fields of `usb_device_id`.
+ ..unsafe { MaybeUninit::zeroed().assume_init() }
+ })
+ }
+
+ /// Equivalent to C's `USB_DEVICE_INTERFACE_NUMBER` macro.
+ pub const fn from_device_interface_number(vendor: u16, product: u16, number: u8) -> Self {
+ Self(bindings::usb_device_id {
+ match_flags: (bindings::USB_DEVICE_ID_MATCH_DEVICE
+ | bindings::USB_DEVICE_ID_MATCH_INT_NUMBER) as u16,
+ idVendor: vendor,
+ idProduct: product,
+ bInterfaceNumber: number,
+ // SAFETY: It is safe to use all zeroes for the other fields of `usb_device_id`.
+ ..unsafe { MaybeUninit::zeroed().assume_init() }
+ })
+ }
+
+ /// Equivalent to C's `USB_DEVICE_AND_INTERFACE_INFO` macro.
+ pub const fn from_device_and_interface_info(
+ vendor: u16,
+ product: u16,
+ class: u8,
+ subclass: u8,
+ protocol: u8,
+ ) -> Self {
+ Self(bindings::usb_device_id {
+ match_flags: (bindings::USB_DEVICE_ID_MATCH_INT_INFO
+ | bindings::USB_DEVICE_ID_MATCH_DEVICE) as u16,
+ idVendor: vendor,
+ idProduct: product,
+ bInterfaceClass: class,
+ bInterfaceSubClass: subclass,
+ bInterfaceProtocol: protocol,
+ // SAFETY: It is safe to use all zeroes for the other fields of `usb_device_id`.
+ ..unsafe { MaybeUninit::zeroed().assume_init() }
+ })
+ }
+}
+
+// SAFETY: `DeviceId` is a `#[repr(transparent)]` wrapper of `usb_device_id` and does not add
+// additional invariants, so it's safe to transmute to `RawType`.
+unsafe impl RawDeviceId for DeviceId {
+ type RawType = bindings::usb_device_id;
+}
+
+// SAFETY: `DRIVER_DATA_OFFSET` is the offset to the `driver_info` field.
+unsafe impl RawDeviceIdIndex for DeviceId {
+ const DRIVER_DATA_OFFSET: usize = core::mem::offset_of!(bindings::usb_device_id, driver_info);
+
+ fn index(&self) -> usize {
+ self.0.driver_info
+ }
+}
+
+/// [`IdTable`](kernel::device_id::IdTable) type for USB.
+pub type IdTable<T> = &'static dyn kernel::device_id::IdTable<DeviceId, T>;
+
+/// Create a USB `IdTable` with its alias for modpost.
+#[macro_export]
+macro_rules! usb_device_table {
+ ($table_name:ident, $module_table_name:ident, $id_info_type: ty, $table_data: expr) => {
+ const $table_name: $crate::device_id::IdArray<
+ $crate::usb::DeviceId,
+ $id_info_type,
+ { $table_data.len() },
+ > = $crate::device_id::IdArray::new($table_data);
+
+ $crate::module_device_table!("usb", $module_table_name, $table_name);
+ };
+}
+
+/// The USB driver trait.
+///
+/// # Examples
+///
+///```
+/// # use kernel::{bindings, device::Core, usb};
+/// use kernel::prelude::*;
+///
+/// struct MyDriver;
+///
+/// kernel::usb_device_table!(
+/// USB_TABLE,
+/// MODULE_USB_TABLE,
+/// <MyDriver as usb::Driver>::IdInfo,
+/// [
+/// (usb::DeviceId::from_id(0x1234, 0x5678), ()),
+/// (usb::DeviceId::from_id(0xabcd, 0xef01), ()),
+/// ]
+/// );
+///
+/// impl usb::Driver for MyDriver {
+/// type IdInfo = ();
+/// const ID_TABLE: usb::IdTable<Self::IdInfo> = &USB_TABLE;
+///
+/// fn probe(
+/// _interface: &usb::Interface<Core>,
+/// _id: &usb::DeviceId,
+/// _info: &Self::IdInfo,
+/// ) -> Result<Pin<KBox<Self>>> {
+/// Err(ENODEV)
+/// }
+///
+/// fn disconnect(_interface: &usb::Interface<Core>, _data: Pin<&Self>) {}
+/// }
+///```
+pub trait Driver {
+ /// The type holding information about each one of the device ids supported by the driver.
+ type IdInfo: 'static;
+
+ /// The table of device ids supported by the driver.
+ const ID_TABLE: IdTable<Self::IdInfo>;
+
+ /// USB driver probe.
+ ///
+ /// Called when a new USB interface is bound to this driver.
+ /// Implementers should attempt to initialize the interface here.
+ fn probe(
+ interface: &Interface<device::Core>,
+ id: &DeviceId,
+ id_info: &Self::IdInfo,
+ ) -> Result<Pin<KBox<Self>>>;
+
+ /// USB driver disconnect.
+ ///
+ /// Called when the USB interface is about to be unbound from this driver.
+ fn disconnect(interface: &Interface<device::Core>, data: Pin<&Self>);
+}
+
+/// A USB interface.
+///
+/// This structure represents the Rust abstraction for a C [`struct usb_interface`].
+/// The implementation abstracts the usage of a C [`struct usb_interface`] passed
+/// in from the C side.
+///
+/// # Invariants
+///
+/// An [`Interface`] instance represents a valid [`struct usb_interface`] created
+/// by the C portion of the kernel.
+///
+/// [`struct usb_interface`]: https://www.kernel.org/doc/html/latest/driver-api/usb/usb.html#c.usb_interface
+#[repr(transparent)]
+pub struct Interface<Ctx: device::DeviceContext = device::Normal>(
+ Opaque<bindings::usb_interface>,
+ PhantomData<Ctx>,
+);
+
+impl<Ctx: device::DeviceContext> Interface<Ctx> {
+ fn as_raw(&self) -> *mut bindings::usb_interface {
+ self.0.get()
+ }
+}
+
+// SAFETY: `Interface` is a transparent wrapper of a type that doesn't depend on
+// `Interface`'s generic argument.
+kernel::impl_device_context_deref!(unsafe { Interface });
+kernel::impl_device_context_into_aref!(Interface);
+
+impl<Ctx: device::DeviceContext> AsRef<device::Device<Ctx>> for Interface<Ctx> {
+ fn as_ref(&self) -> &device::Device<Ctx> {
+ // SAFETY: By the type invariant of `Self`, `self.as_raw()` is a pointer to a valid
+ // `struct usb_interface`.
+ let dev = unsafe { &raw mut ((*self.as_raw()).dev) };
+
+ // SAFETY: `dev` points to a valid `struct device`.
+ unsafe { device::Device::from_raw(dev) }
+ }
+}
+
+impl<Ctx: device::DeviceContext> AsRef<Device> for Interface<Ctx> {
+ fn as_ref(&self) -> &Device {
+ // SAFETY: `self.as_raw()` is valid by the type invariants.
+ let usb_dev = unsafe { bindings::interface_to_usbdev(self.as_raw()) };
+
+ // SAFETY: For a valid `struct usb_interface` pointer, the above call to
+ // `interface_to_usbdev()` guarantees to return a valid pointer to a `struct usb_device`.
+ unsafe { &*(usb_dev.cast()) }
+ }
+}
+
+// SAFETY: Instances of `Interface` are always reference-counted.
+unsafe impl AlwaysRefCounted for Interface {
+ fn inc_ref(&self) {
+ // SAFETY: The invariants of `Interface` guarantee that `self.as_raw()`
+ // returns a valid `struct usb_interface` pointer, for which we will
+ // acquire a new refcount.
+ unsafe { bindings::usb_get_intf(self.as_raw()) };
+ }
+
+ unsafe fn dec_ref(obj: NonNull<Self>) {
+ // SAFETY: The safety requirements guarantee that the refcount is non-zero.
+ unsafe { bindings::usb_put_intf(obj.cast().as_ptr()) }
+ }
+}
+
+// SAFETY: A `Interface` is always reference-counted and can be released from any thread.
+unsafe impl Send for Interface {}
+
+// SAFETY: It is safe to send a &Interface to another thread because we do not
+// allow any mutation through a shared reference.
+unsafe impl Sync for Interface {}
+
+/// A USB device.
+///
+/// This structure represents the Rust abstraction for a C [`struct usb_device`].
+/// The implementation abstracts the usage of a C [`struct usb_device`] passed in
+/// from the C side.
+///
+/// # Invariants
+///
+/// A [`Device`] instance represents a valid [`struct usb_device`] created by the C portion of the
+/// kernel.
+///
+/// [`struct usb_device`]: https://www.kernel.org/doc/html/latest/driver-api/usb/usb.html#c.usb_device
+#[repr(transparent)]
+struct Device<Ctx: device::DeviceContext = device::Normal>(
+ Opaque<bindings::usb_device>,
+ PhantomData<Ctx>,
+);
+
+impl<Ctx: device::DeviceContext> Device<Ctx> {
+ fn as_raw(&self) -> *mut bindings::usb_device {
+ self.0.get()
+ }
+}
+
+// SAFETY: `Device` is a transparent wrapper of a type that doesn't depend on `Device`'s generic
+// argument.
+kernel::impl_device_context_deref!(unsafe { Device });
+kernel::impl_device_context_into_aref!(Device);
+
+// SAFETY: Instances of `Device` are always reference-counted.
+unsafe impl AlwaysRefCounted for Device {
+ fn inc_ref(&self) {
+ // SAFETY: The invariants of `Device` guarantee that `self.as_raw()`
+ // returns a valid `struct usb_device` pointer, for which we will
+ // acquire a new refcount.
+ unsafe { bindings::usb_get_dev(self.as_raw()) };
+ }
+
+ unsafe fn dec_ref(obj: NonNull<Self>) {
+ // SAFETY: The safety requirements guarantee that the refcount is non-zero.
+ unsafe { bindings::usb_put_dev(obj.cast().as_ptr()) }
+ }
+}
+
+impl<Ctx: device::DeviceContext> AsRef<device::Device<Ctx>> for Device<Ctx> {
+ fn as_ref(&self) -> &device::Device<Ctx> {
+ // SAFETY: By the type invariant of `Self`, `self.as_raw()` is a pointer to a valid
+ // `struct usb_device`.
+ let dev = unsafe { &raw mut ((*self.as_raw()).dev) };
+
+ // SAFETY: `dev` points to a valid `struct device`.
+ unsafe { device::Device::from_raw(dev) }
+ }
+}
+
+// SAFETY: A `Device` is always reference-counted and can be released from any thread.
+unsafe impl Send for Device {}
+
+// SAFETY: It is safe to send a &Device to another thread because we do not
+// allow any mutation through a shared reference.
+unsafe impl Sync for Device {}
+
+/// Declares a kernel module that exposes a single USB driver.
+///
+/// # Examples
+///
+/// ```ignore
+/// module_usb_driver! {
+/// type: MyDriver,
+/// name: "Module name",
+/// author: ["Author name"],
+/// description: "Description",
+/// license: "GPL v2",
+/// }
+/// ```
+#[macro_export]
+macro_rules! module_usb_driver {
+ ($($f:tt)*) => {
+ $crate::module_driver!(<T>, $crate::usb::Adapter<T>, { $($f)* });
+ }
+}
diff --git a/rust/kernel/workqueue.rs b/rust/kernel/workqueue.rs
index d092112d843f..706e833e9702 100644
--- a/rust/kernel/workqueue.rs
+++ b/rust/kernel/workqueue.rs
@@ -26,7 +26,7 @@
//! * The [`WorkItemPointer`] trait is implemented for the pointer type that points at a something
//! that implements [`WorkItem`].
//!
-//! ## Example
+//! ## Examples
//!
//! This example defines a struct that holds an integer and can be scheduled on the workqueue. When
//! the struct is executed, it will print the integer. Since there is only one `work_struct` field,
@@ -131,10 +131,69 @@
//! # print_2_later(MyStruct::new(41, 42).unwrap());
//! ```
//!
+//! This example shows how you can schedule delayed work items:
+//!
+//! ```
+//! use kernel::sync::Arc;
+//! use kernel::workqueue::{self, impl_has_delayed_work, new_delayed_work, DelayedWork, WorkItem};
+//!
+//! #[pin_data]
+//! struct MyStruct {
+//! value: i32,
+//! #[pin]
+//! work: DelayedWork<MyStruct>,
+//! }
+//!
+//! impl_has_delayed_work! {
+//! impl HasDelayedWork<Self> for MyStruct { self.work }
+//! }
+//!
+//! impl MyStruct {
+//! fn new(value: i32) -> Result<Arc<Self>> {
+//! Arc::pin_init(
+//! pin_init!(MyStruct {
+//! value,
+//! work <- new_delayed_work!("MyStruct::work"),
+//! }),
+//! GFP_KERNEL,
+//! )
+//! }
+//! }
+//!
+//! impl WorkItem for MyStruct {
+//! type Pointer = Arc<MyStruct>;
+//!
+//! fn run(this: Arc<MyStruct>) {
+//! pr_info!("The value is: {}\n", this.value);
+//! }
+//! }
+//!
+//! /// This method will enqueue the struct for execution on the system workqueue, where its value
+//! /// will be printed 12 jiffies later.
+//! fn print_later(val: Arc<MyStruct>) {
+//! let _ = workqueue::system().enqueue_delayed(val, 12);
+//! }
+//!
+//! /// It is also possible to use the ordinary `enqueue` method together with `DelayedWork`. This
+//! /// is equivalent to calling `enqueue_delayed` with a delay of zero.
+//! fn print_now(val: Arc<MyStruct>) {
+//! let _ = workqueue::system().enqueue(val);
+//! }
+//! # print_later(MyStruct::new(42).unwrap());
+//! # print_now(MyStruct::new(42).unwrap());
+//! ```
+//!
//! C header: [`include/linux/workqueue.h`](srctree/include/linux/workqueue.h)
-use crate::alloc::{AllocError, Flags};
-use crate::{prelude::*, sync::Arc, sync::LockClassKey, types::Opaque};
+use crate::{
+ alloc::{AllocError, Flags},
+ container_of,
+ prelude::*,
+ sync::Arc,
+ sync::LockClassKey,
+ time::Jiffies,
+ types::Opaque,
+};
use core::marker::PhantomData;
/// Creates a [`Work`] initialiser with the given name and a newly-created lock class.
@@ -146,6 +205,33 @@ macro_rules! new_work {
}
pub use new_work;
+/// Creates a [`DelayedWork`] initialiser with the given name and a newly-created lock class.
+#[macro_export]
+macro_rules! new_delayed_work {
+ () => {
+ $crate::workqueue::DelayedWork::new(
+ $crate::optional_name!(),
+ $crate::static_lock_class!(),
+ $crate::c_str!(::core::concat!(
+ ::core::file!(),
+ ":",
+ ::core::line!(),
+ "_timer"
+ )),
+ $crate::static_lock_class!(),
+ )
+ };
+ ($name:literal) => {
+ $crate::workqueue::DelayedWork::new(
+ $crate::c_str!($name),
+ $crate::static_lock_class!(),
+ $crate::c_str!(::core::concat!($name, "_timer")),
+ $crate::static_lock_class!(),
+ )
+ };
+}
+pub use new_delayed_work;
+
/// A kernel work queue.
///
/// Wraps the kernel's C `struct workqueue_struct`.
@@ -170,7 +256,7 @@ impl Queue {
pub unsafe fn from_raw<'a>(ptr: *const bindings::workqueue_struct) -> &'a Queue {
// SAFETY: The `Queue` type is `#[repr(transparent)]`, so the pointer cast is valid. The
// caller promises that the pointer is not dangling.
- unsafe { &*(ptr as *const Queue) }
+ unsafe { &*ptr.cast::<Queue>() }
}
/// Enqueues a work item.
@@ -198,7 +284,7 @@ impl Queue {
unsafe {
w.__enqueue(move |work_ptr| {
bindings::queue_work_on(
- bindings::wq_misc_consts_WORK_CPU_UNBOUND as _,
+ bindings::wq_misc_consts_WORK_CPU_UNBOUND as ffi::c_int,
queue_ptr,
work_ptr,
)
@@ -206,6 +292,42 @@ impl Queue {
}
}
+ /// Enqueues a delayed work item.
+ ///
+ /// This may fail if the work item is already enqueued in a workqueue.
+ ///
+ /// The work item will be submitted using `WORK_CPU_UNBOUND`.
+ pub fn enqueue_delayed<W, const ID: u64>(&self, w: W, delay: Jiffies) -> W::EnqueueOutput
+ where
+ W: RawDelayedWorkItem<ID> + Send + 'static,
+ {
+ let queue_ptr = self.0.get();
+
+ // SAFETY: We only return `false` if the `work_struct` is already in a workqueue. The other
+ // `__enqueue` requirements are not relevant since `W` is `Send` and static.
+ //
+ // The call to `bindings::queue_delayed_work_on` will dereference the provided raw pointer,
+ // which is ok because `__enqueue` guarantees that the pointer is valid for the duration of
+ // this closure, and the safety requirements of `RawDelayedWorkItem` expands this
+ // requirement to apply to the entire `delayed_work`.
+ //
+ // Furthermore, if the C workqueue code accesses the pointer after this call to
+ // `__enqueue`, then the work item was successfully enqueued, and
+ // `bindings::queue_delayed_work_on` will have returned true. In this case, `__enqueue`
+ // promises that the raw pointer will stay valid until we call the function pointer in the
+ // `work_struct`, so the access is ok.
+ unsafe {
+ w.__enqueue(move |work_ptr| {
+ bindings::queue_delayed_work_on(
+ bindings::wq_misc_consts_WORK_CPU_UNBOUND as ffi::c_int,
+ queue_ptr,
+ container_of!(work_ptr, bindings::delayed_work, work),
+ delay,
+ )
+ })
+ }
+ }
+
/// Tries to spawn the given function or closure as a work item.
///
/// This method can fail because it allocates memory to store the work item.
@@ -234,18 +356,11 @@ struct ClosureWork<T> {
func: Option<T>,
}
-impl<T> ClosureWork<T> {
- fn project(self: Pin<&mut Self>) -> &mut Option<T> {
- // SAFETY: The `func` field is not structurally pinned.
- unsafe { &mut self.get_unchecked_mut().func }
- }
-}
-
impl<T: FnOnce()> WorkItem for ClosureWork<T> {
type Pointer = Pin<KBox<Self>>;
fn run(mut this: Pin<KBox<Self>>) {
- if let Some(func) = this.as_mut().project().take() {
+ if let Some(func) = this.as_mut().project().func.take() {
(func)()
}
}
@@ -298,6 +413,16 @@ pub unsafe trait RawWorkItem<const ID: u64> {
F: FnOnce(*mut bindings::work_struct) -> bool;
}
+/// A raw delayed work item.
+///
+/// # Safety
+///
+/// If the `__enqueue` method in the `RawWorkItem` implementation calls the closure, then the
+/// provided pointer must point at the `work` field of a valid `delayed_work`, and the guarantees
+/// that `__enqueue` provides about accessing the `work_struct` must also apply to the rest of the
+/// `delayed_work` struct.
+pub unsafe trait RawDelayedWorkItem<const ID: u64>: RawWorkItem<ID> {}
+
/// Defines the method that should be called directly when a work item is executed.
///
/// This trait is implemented by `Pin<KBox<T>>` and [`Arc<T>`], and is mainly intended to be
@@ -403,11 +528,11 @@ impl<T: ?Sized, const ID: u64> Work<T, ID> {
//
// A pointer cast would also be ok due to `#[repr(transparent)]`. We use `addr_of!` so that
// the compiler does not complain that the `work` field is unused.
- unsafe { Opaque::raw_get(core::ptr::addr_of!((*ptr).work)) }
+ unsafe { Opaque::cast_into(core::ptr::addr_of!((*ptr).work)) }
}
}
-/// Declares that a type has a [`Work<T, ID>`] field.
+/// Declares that a type contains a [`Work<T, ID>`].
///
/// The intended way of using this trait is via the [`impl_has_work!`] macro. You can use the macro
/// like this:
@@ -506,6 +631,178 @@ impl_has_work! {
impl{T} HasWork<Self> for ClosureWork<T> { self.work }
}
+/// Links for a delayed work item.
+///
+/// This struct contains a function pointer to the [`run`] function from the [`WorkItemPointer`]
+/// trait, and defines the linked list pointers necessary to enqueue a work item in a workqueue in
+/// a delayed manner.
+///
+/// Wraps the kernel's C `struct delayed_work`.
+///
+/// This is a helper type used to associate a `delayed_work` with the [`WorkItem`] that uses it.
+///
+/// [`run`]: WorkItemPointer::run
+#[pin_data]
+#[repr(transparent)]
+pub struct DelayedWork<T: ?Sized, const ID: u64 = 0> {
+ #[pin]
+ dwork: Opaque<bindings::delayed_work>,
+ _inner: PhantomData<T>,
+}
+
+// SAFETY: Kernel work items are usable from any thread.
+//
+// We do not need to constrain `T` since the work item does not actually contain a `T`.
+unsafe impl<T: ?Sized, const ID: u64> Send for DelayedWork<T, ID> {}
+// SAFETY: Kernel work items are usable from any thread.
+//
+// We do not need to constrain `T` since the work item does not actually contain a `T`.
+unsafe impl<T: ?Sized, const ID: u64> Sync for DelayedWork<T, ID> {}
+
+impl<T: ?Sized, const ID: u64> DelayedWork<T, ID> {
+ /// Creates a new instance of [`DelayedWork`].
+ #[inline]
+ pub fn new(
+ work_name: &'static CStr,
+ work_key: Pin<&'static LockClassKey>,
+ timer_name: &'static CStr,
+ timer_key: Pin<&'static LockClassKey>,
+ ) -> impl PinInit<Self>
+ where
+ T: WorkItem<ID>,
+ {
+ pin_init!(Self {
+ dwork <- Opaque::ffi_init(|slot: *mut bindings::delayed_work| {
+ // SAFETY: The `WorkItemPointer` implementation promises that `run` can be used as
+ // the work item function.
+ unsafe {
+ bindings::init_work_with_key(
+ core::ptr::addr_of_mut!((*slot).work),
+ Some(T::Pointer::run),
+ false,
+ work_name.as_char_ptr(),
+ work_key.as_ptr(),
+ )
+ }
+
+ // SAFETY: The `delayed_work_timer_fn` function pointer can be used here because
+ // the timer is embedded in a `struct delayed_work`, and only ever scheduled via
+ // the core workqueue code, and configured to run in irqsafe context.
+ unsafe {
+ bindings::timer_init_key(
+ core::ptr::addr_of_mut!((*slot).timer),
+ Some(bindings::delayed_work_timer_fn),
+ bindings::TIMER_IRQSAFE,
+ timer_name.as_char_ptr(),
+ timer_key.as_ptr(),
+ )
+ }
+ }),
+ _inner: PhantomData,
+ })
+ }
+
+ /// Get a pointer to the inner `delayed_work`.
+ ///
+ /// # Safety
+ ///
+ /// The provided pointer must not be dangling and must be properly aligned. (But the memory
+ /// need not be initialized.)
+ #[inline]
+ pub unsafe fn raw_as_work(ptr: *const Self) -> *mut Work<T, ID> {
+ // SAFETY: The caller promises that the pointer is aligned and not dangling.
+ let dw: *mut bindings::delayed_work =
+ unsafe { Opaque::cast_into(core::ptr::addr_of!((*ptr).dwork)) };
+ // SAFETY: The caller promises that the pointer is aligned and not dangling.
+ let wrk: *mut bindings::work_struct = unsafe { core::ptr::addr_of_mut!((*dw).work) };
+ // CAST: Work and work_struct have compatible layouts.
+ wrk.cast()
+ }
+}
+
+/// Declares that a type contains a [`DelayedWork<T, ID>`].
+///
+/// # Safety
+///
+/// The `HasWork<T, ID>` implementation must return a `work_struct` that is stored in the `work`
+/// field of a `delayed_work` with the same access rules as the `work_struct`.
+pub unsafe trait HasDelayedWork<T, const ID: u64 = 0>: HasWork<T, ID> {}
+
+/// Used to safely implement the [`HasDelayedWork<T, ID>`] trait.
+///
+/// This macro also implements the [`HasWork`] trait, so you do not need to use [`impl_has_work!`]
+/// when using this macro.
+///
+/// # Examples
+///
+/// ```
+/// use kernel::sync::Arc;
+/// use kernel::workqueue::{self, impl_has_delayed_work, DelayedWork};
+///
+/// struct MyStruct<'a, T, const N: usize> {
+/// work_field: DelayedWork<MyStruct<'a, T, N>, 17>,
+/// f: fn(&'a [T; N]),
+/// }
+///
+/// impl_has_delayed_work! {
+/// impl{'a, T, const N: usize} HasDelayedWork<MyStruct<'a, T, N>, 17>
+/// for MyStruct<'a, T, N> { self.work_field }
+/// }
+/// ```
+#[macro_export]
+macro_rules! impl_has_delayed_work {
+ ($(impl$({$($generics:tt)*})?
+ HasDelayedWork<$work_type:ty $(, $id:tt)?>
+ for $self:ty
+ { self.$field:ident }
+ )*) => {$(
+ // SAFETY: The implementation of `raw_get_work` only compiles if the field has the right
+ // type.
+ unsafe impl$(<$($generics)+>)?
+ $crate::workqueue::HasDelayedWork<$work_type $(, $id)?> for $self {}
+
+ // SAFETY: The implementation of `raw_get_work` only compiles if the field has the right
+ // type.
+ unsafe impl$(<$($generics)+>)? $crate::workqueue::HasWork<$work_type $(, $id)?> for $self {
+ #[inline]
+ unsafe fn raw_get_work(
+ ptr: *mut Self
+ ) -> *mut $crate::workqueue::Work<$work_type $(, $id)?> {
+ // SAFETY: The caller promises that the pointer is not dangling.
+ let ptr: *mut $crate::workqueue::DelayedWork<$work_type $(, $id)?> = unsafe {
+ ::core::ptr::addr_of_mut!((*ptr).$field)
+ };
+
+ // SAFETY: The caller promises that the pointer is not dangling.
+ unsafe { $crate::workqueue::DelayedWork::raw_as_work(ptr) }
+ }
+
+ #[inline]
+ unsafe fn work_container_of(
+ ptr: *mut $crate::workqueue::Work<$work_type $(, $id)?>,
+ ) -> *mut Self {
+ // SAFETY: The caller promises that the pointer points at a field of the right type
+ // in the right kind of struct.
+ let ptr = unsafe { $crate::workqueue::Work::raw_get(ptr) };
+
+ // SAFETY: The caller promises that the pointer points at a field of the right type
+ // in the right kind of struct.
+ let delayed_work = unsafe {
+ $crate::container_of!(ptr, $crate::bindings::delayed_work, work)
+ };
+
+ let delayed_work: *mut $crate::workqueue::DelayedWork<$work_type $(, $id)?> =
+ delayed_work.cast();
+
+ // SAFETY: The caller promises that the pointer points at a field of the right type
+ // in the right kind of struct.
+ unsafe { $crate::container_of!(delayed_work, Self, $field) }
+ }
+ }
+ )*};
+}
+pub use impl_has_delayed_work;
+
// SAFETY: The `__enqueue` implementation in RawWorkItem uses a `work_struct` initialized with the
// `run` method of this trait as the function pointer because:
// - `__enqueue` gets the `work_struct` from the `Work` field, using `T::raw_get_work`.
@@ -522,7 +819,7 @@ where
{
unsafe extern "C" fn run(ptr: *mut bindings::work_struct) {
// The `__enqueue` method always uses a `work_struct` stored in a `Work<T, ID>`.
- let ptr = ptr as *mut Work<T, ID>;
+ let ptr = ptr.cast::<Work<T, ID>>();
// SAFETY: This computes the pointer that `__enqueue` got from `Arc::into_raw`.
let ptr = unsafe { T::work_container_of(ptr) };
// SAFETY: This pointer comes from `Arc::into_raw` and we've been given back ownership.
@@ -567,6 +864,16 @@ where
}
}
+// SAFETY: By the safety requirements of `HasDelayedWork`, the `work_struct` returned by methods in
+// `HasWork` provides a `work_struct` that is the `work` field of a `delayed_work`, and the rest of
+// the `delayed_work` has the same access rules as its `work` field.
+unsafe impl<T, const ID: u64> RawDelayedWorkItem<ID> for Arc<T>
+where
+ T: WorkItem<ID, Pointer = Self>,
+ T: HasDelayedWork<T, ID>,
+{
+}
+
// SAFETY: TODO.
unsafe impl<T, const ID: u64> WorkItemPointer<ID> for Pin<KBox<T>>
where
@@ -575,7 +882,7 @@ where
{
unsafe extern "C" fn run(ptr: *mut bindings::work_struct) {
// The `__enqueue` method always uses a `work_struct` stored in a `Work<T, ID>`.
- let ptr = ptr as *mut Work<T, ID>;
+ let ptr = ptr.cast::<Work<T, ID>>();
// SAFETY: This computes the pointer that `__enqueue` got from `Arc::into_raw`.
let ptr = unsafe { T::work_container_of(ptr) };
// SAFETY: This pointer comes from `Arc::into_raw` and we've been given back ownership.
@@ -617,6 +924,16 @@ where
}
}
+// SAFETY: By the safety requirements of `HasDelayedWork`, the `work_struct` returned by methods in
+// `HasWork` provides a `work_struct` that is the `work` field of a `delayed_work`, and the rest of
+// the `delayed_work` has the same access rules as its `work` field.
+unsafe impl<T, const ID: u64> RawDelayedWorkItem<ID> for Pin<KBox<T>>
+where
+ T: WorkItem<ID, Pointer = Self>,
+ T: HasDelayedWork<T, ID>,
+{
+}
+
/// Returns the system work queue (`system_wq`).
///
/// It is the one used by `schedule[_delayed]_work[_on]()`. Multi-CPU multi-threaded. There are
diff --git a/rust/kernel/xarray.rs b/rust/kernel/xarray.rs
index 75719e7bb491..a49d6db28845 100644
--- a/rust/kernel/xarray.rs
+++ b/rust/kernel/xarray.rs
@@ -7,9 +7,10 @@
use crate::{
alloc, bindings, build_assert,
error::{Error, Result},
+ ffi::c_void,
types::{ForeignOwnable, NotThreadSafe, Opaque},
};
-use core::{iter, marker::PhantomData, mem, pin::Pin, ptr::NonNull};
+use core::{iter, marker::PhantomData, pin::Pin, ptr::NonNull};
use pin_init::{pin_data, pin_init, pinned_drop, PinInit};
/// An array which efficiently maps sparse integer indices to owned objects.
@@ -101,7 +102,7 @@ impl<T: ForeignOwnable> XArray<T> {
})
}
- fn iter(&self) -> impl Iterator<Item = NonNull<T::PointedTo>> + '_ {
+ fn iter(&self) -> impl Iterator<Item = NonNull<c_void>> + '_ {
let mut index = 0;
// SAFETY: `self.xa` is always valid by the type invariant.
@@ -179,7 +180,7 @@ impl<T> From<StoreError<T>> for Error {
impl<'a, T: ForeignOwnable> Guard<'a, T> {
fn load<F, U>(&self, index: usize, f: F) -> Option<U>
where
- F: FnOnce(NonNull<T::PointedTo>) -> U,
+ F: FnOnce(NonNull<c_void>) -> U,
{
// SAFETY: `self.xa.xa` is always valid by the type invariant.
let ptr = unsafe { bindings::xa_load(self.xa.xa.get(), index) };
@@ -230,7 +231,7 @@ impl<'a, T: ForeignOwnable> Guard<'a, T> {
gfp: alloc::Flags,
) -> Result<Option<T>, StoreError<T>> {
build_assert!(
- mem::align_of::<T::PointedTo>() >= 4,
+ T::FOREIGN_ALIGN >= 4,
"pointers stored in XArray must be 4-byte aligned"
);
let new = value.into_foreign();