summaryrefslogtreecommitdiff
path: root/rust/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'rust/kernel')
-rw-r--r--rust/kernel/auxiliary.rs4
-rw-r--r--rust/kernel/debugfs.rs594
-rw-r--r--rust/kernel/debugfs/callback_adapters.rs122
-rw-r--r--rust/kernel/debugfs/entry.rs164
-rw-r--r--rust/kernel/debugfs/file_ops.rs247
-rw-r--r--rust/kernel/debugfs/traits.rs102
-rw-r--r--rust/kernel/device.rs7
-rw-r--r--rust/kernel/devres.rs4
-rw-r--r--rust/kernel/io.rs1
-rw-r--r--rust/kernel/io/poll.rs104
-rw-r--r--rust/kernel/irq.rs24
-rw-r--r--rust/kernel/irq/flags.rs124
-rw-r--r--rust/kernel/irq/request.rs507
-rw-r--r--rust/kernel/lib.rs3
-rw-r--r--rust/kernel/pci.rs180
-rw-r--r--rust/kernel/pci/id.rs578
-rw-r--r--rust/kernel/platform.rs178
-rw-r--r--rust/kernel/processor.rs14
-rw-r--r--rust/kernel/regulator.rs171
19 files changed, 3006 insertions, 122 deletions
diff --git a/rust/kernel/auxiliary.rs b/rust/kernel/auxiliary.rs
index 58be09871397..e11848bbf206 100644
--- a/rust/kernel/auxiliary.rs
+++ b/rust/kernel/auxiliary.rs
@@ -55,7 +55,7 @@ impl<T: Driver + 'static> Adapter<T> {
extern "C" fn probe_callback(
adev: *mut bindings::auxiliary_device,
id: *const bindings::auxiliary_device_id,
- ) -> kernel::ffi::c_int {
+ ) -> c_int {
// SAFETY: The auxiliary bus only ever calls the probe callback with a valid pointer to a
// `struct auxiliary_device`.
//
@@ -245,7 +245,7 @@ kernel::impl_device_context_deref!(unsafe { Device });
kernel::impl_device_context_into_aref!(Device);
// SAFETY: Instances of `Device` are always reference-counted.
-unsafe impl crate::types::AlwaysRefCounted for Device {
+unsafe impl crate::sync::aref::AlwaysRefCounted for Device {
fn inc_ref(&self) {
// SAFETY: The existence of a shared reference guarantees that the refcount is non-zero.
unsafe { bindings::get_device(self.as_ref().as_raw()) };
diff --git a/rust/kernel/debugfs.rs b/rust/kernel/debugfs.rs
new file mode 100644
index 000000000000..381c23b3dd83
--- /dev/null
+++ b/rust/kernel/debugfs.rs
@@ -0,0 +1,594 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2025 Google LLC.
+
+//! DebugFS Abstraction
+//!
+//! C header: [`include/linux/debugfs.h`](srctree/include/linux/debugfs.h)
+
+// When DebugFS is disabled, many parameters are dead. Linting for this isn't helpful.
+#![cfg_attr(not(CONFIG_DEBUG_FS), allow(unused_variables))]
+
+use crate::prelude::*;
+use crate::str::CStr;
+#[cfg(CONFIG_DEBUG_FS)]
+use crate::sync::Arc;
+use crate::uaccess::UserSliceReader;
+use core::fmt;
+use core::marker::PhantomData;
+use core::marker::PhantomPinned;
+#[cfg(CONFIG_DEBUG_FS)]
+use core::mem::ManuallyDrop;
+use core::ops::Deref;
+
+mod traits;
+pub use traits::{Reader, Writer};
+
+mod callback_adapters;
+use callback_adapters::{FormatAdapter, NoWriter, WritableAdapter};
+mod file_ops;
+use file_ops::{FileOps, ReadFile, ReadWriteFile, WriteFile};
+#[cfg(CONFIG_DEBUG_FS)]
+mod entry;
+#[cfg(CONFIG_DEBUG_FS)]
+use entry::Entry;
+
+/// Owning handle to a DebugFS directory.
+///
+/// The directory in the filesystem represented by [`Dir`] will be removed when handle has been
+/// dropped *and* all children have been removed.
+// If we have a parent, we hold a reference to it in the `Entry`. This prevents the `dentry`
+// we point to from being cleaned up if our parent `Dir`/`Entry` is dropped before us.
+//
+// The `None` option indicates that the `Arc` could not be allocated, so our children would not be
+// able to refer to us. In this case, we need to silently fail. All future child directories/files
+// will silently fail as well.
+#[derive(Clone)]
+pub struct Dir(#[cfg(CONFIG_DEBUG_FS)] Option<Arc<Entry<'static>>>);
+
+impl Dir {
+ /// Create a new directory in DebugFS. If `parent` is [`None`], it will be created at the root.
+ fn create(name: &CStr, parent: Option<&Dir>) -> Self {
+ #[cfg(CONFIG_DEBUG_FS)]
+ {
+ let parent_entry = match parent {
+ // If the parent couldn't be allocated, just early-return
+ Some(Dir(None)) => return Self(None),
+ Some(Dir(Some(entry))) => Some(entry.clone()),
+ None => None,
+ };
+ Self(
+ // If Arc creation fails, the `Entry` will be dropped, so the directory will be
+ // cleaned up.
+ Arc::new(Entry::dynamic_dir(name, parent_entry), GFP_KERNEL).ok(),
+ )
+ }
+ #[cfg(not(CONFIG_DEBUG_FS))]
+ Self()
+ }
+
+ /// Creates a DebugFS file which will own the data produced by the initializer provided in
+ /// `data`.
+ fn create_file<'a, T, E: 'a>(
+ &'a self,
+ name: &'a CStr,
+ data: impl PinInit<T, E> + 'a,
+ file_ops: &'static FileOps<T>,
+ ) -> impl PinInit<File<T>, E> + 'a
+ where
+ T: Sync + 'static,
+ {
+ let scope = Scope::<T>::new(data, move |data| {
+ #[cfg(CONFIG_DEBUG_FS)]
+ if let Some(parent) = &self.0 {
+ // SAFETY: Because data derives from a scope, and our entry will be dropped before
+ // the data is dropped, it is guaranteed to outlive the entry we return.
+ unsafe { Entry::dynamic_file(name, parent.clone(), data, file_ops) }
+ } else {
+ Entry::empty()
+ }
+ });
+ try_pin_init! {
+ File {
+ scope <- scope
+ } ? E
+ }
+ }
+
+ /// Create a new directory in DebugFS at the root.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// # use kernel::c_str;
+ /// # use kernel::debugfs::Dir;
+ /// let debugfs = Dir::new(c_str!("parent"));
+ /// ```
+ pub fn new(name: &CStr) -> Self {
+ Dir::create(name, None)
+ }
+
+ /// Creates a subdirectory within this directory.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// # use kernel::c_str;
+ /// # use kernel::debugfs::Dir;
+ /// let parent = Dir::new(c_str!("parent"));
+ /// let child = parent.subdir(c_str!("child"));
+ /// ```
+ pub fn subdir(&self, name: &CStr) -> Self {
+ Dir::create(name, Some(self))
+ }
+
+ /// Creates a read-only file in this directory.
+ ///
+ /// The file's contents are produced by invoking [`Writer::write`] on the value initialized by
+ /// `data`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// # use kernel::c_str;
+ /// # use kernel::debugfs::Dir;
+ /// # use kernel::prelude::*;
+ /// # let dir = Dir::new(c_str!("my_debugfs_dir"));
+ /// let file = KBox::pin_init(dir.read_only_file(c_str!("foo"), 200), GFP_KERNEL)?;
+ /// // "my_debugfs_dir/foo" now contains the number 200.
+ /// // The file is removed when `file` is dropped.
+ /// # Ok::<(), Error>(())
+ /// ```
+ pub fn read_only_file<'a, T, E: 'a>(
+ &'a self,
+ name: &'a CStr,
+ data: impl PinInit<T, E> + 'a,
+ ) -> impl PinInit<File<T>, E> + 'a
+ where
+ T: Writer + Send + Sync + 'static,
+ {
+ let file_ops = &<T as ReadFile<_>>::FILE_OPS;
+ self.create_file(name, data, file_ops)
+ }
+
+ /// Creates a read-only file in this directory, with contents from a callback.
+ ///
+ /// `f` must be a function item or a non-capturing closure.
+ /// This is statically asserted and not a safety requirement.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// # use core::sync::atomic::{AtomicU32, Ordering};
+ /// # use kernel::c_str;
+ /// # use kernel::debugfs::Dir;
+ /// # use kernel::prelude::*;
+ /// # let dir = Dir::new(c_str!("foo"));
+ /// let file = KBox::pin_init(
+ /// dir.read_callback_file(c_str!("bar"),
+ /// AtomicU32::new(3),
+ /// &|val, f| {
+ /// let out = val.load(Ordering::Relaxed);
+ /// writeln!(f, "{out:#010x}")
+ /// }),
+ /// GFP_KERNEL)?;
+ /// // Reading "foo/bar" will show "0x00000003".
+ /// file.store(10, Ordering::Relaxed);
+ /// // Reading "foo/bar" will now show "0x0000000a".
+ /// # Ok::<(), Error>(())
+ /// ```
+ pub fn read_callback_file<'a, T, E: 'a, F>(
+ &'a self,
+ name: &'a CStr,
+ data: impl PinInit<T, E> + 'a,
+ _f: &'static F,
+ ) -> impl PinInit<File<T>, E> + 'a
+ where
+ T: Send + Sync + 'static,
+ F: Fn(&T, &mut fmt::Formatter<'_>) -> fmt::Result + Send + Sync,
+ {
+ let file_ops = <FormatAdapter<T, F>>::FILE_OPS.adapt();
+ self.create_file(name, data, file_ops)
+ }
+
+ /// Creates a read-write file in this directory.
+ ///
+ /// Reading the file uses the [`Writer`] implementation.
+ /// Writing to the file uses the [`Reader`] implementation.
+ pub fn read_write_file<'a, T, E: 'a>(
+ &'a self,
+ name: &'a CStr,
+ data: impl PinInit<T, E> + 'a,
+ ) -> impl PinInit<File<T>, E> + 'a
+ where
+ T: Writer + Reader + Send + Sync + 'static,
+ {
+ let file_ops = &<T as ReadWriteFile<_>>::FILE_OPS;
+ self.create_file(name, data, file_ops)
+ }
+
+ /// Creates a read-write file in this directory, with logic from callbacks.
+ ///
+ /// Reading from the file is handled by `f`. Writing to the file is handled by `w`.
+ ///
+ /// `f` and `w` must be function items or non-capturing closures.
+ /// This is statically asserted and not a safety requirement.
+ pub fn read_write_callback_file<'a, T, E: 'a, F, W>(
+ &'a self,
+ name: &'a CStr,
+ data: impl PinInit<T, E> + 'a,
+ _f: &'static F,
+ _w: &'static W,
+ ) -> impl PinInit<File<T>, E> + 'a
+ where
+ T: Send + Sync + 'static,
+ F: Fn(&T, &mut fmt::Formatter<'_>) -> fmt::Result + Send + Sync,
+ W: Fn(&T, &mut UserSliceReader) -> Result + Send + Sync,
+ {
+ let file_ops =
+ <WritableAdapter<FormatAdapter<T, F>, W> as file_ops::ReadWriteFile<_>>::FILE_OPS
+ .adapt()
+ .adapt();
+ self.create_file(name, data, file_ops)
+ }
+
+ /// Creates a write-only file in this directory.
+ ///
+ /// The file owns its backing data. Writing to the file uses the [`Reader`]
+ /// implementation.
+ ///
+ /// The file is removed when the returned [`File`] is dropped.
+ pub fn write_only_file<'a, T, E: 'a>(
+ &'a self,
+ name: &'a CStr,
+ data: impl PinInit<T, E> + 'a,
+ ) -> impl PinInit<File<T>, E> + 'a
+ where
+ T: Reader + Send + Sync + 'static,
+ {
+ self.create_file(name, data, &T::FILE_OPS)
+ }
+
+ /// Creates a write-only file in this directory, with write logic from a callback.
+ ///
+ /// `w` must be a function item or a non-capturing closure.
+ /// This is statically asserted and not a safety requirement.
+ pub fn write_callback_file<'a, T, E: 'a, W>(
+ &'a self,
+ name: &'a CStr,
+ data: impl PinInit<T, E> + 'a,
+ _w: &'static W,
+ ) -> impl PinInit<File<T>, E> + 'a
+ where
+ T: Send + Sync + 'static,
+ W: Fn(&T, &mut UserSliceReader) -> Result + Send + Sync,
+ {
+ let file_ops = <WritableAdapter<NoWriter<T>, W> as WriteFile<_>>::FILE_OPS
+ .adapt()
+ .adapt();
+ self.create_file(name, data, file_ops)
+ }
+
+ // While this function is safe, it is intentionally not public because it's a bit of a
+ // footgun.
+ //
+ // Unless you also extract the `entry` later and schedule it for `Drop` at the appropriate
+ // time, a `ScopedDir` with a `Dir` parent will never be deleted.
+ fn scoped_dir<'data>(&self, name: &CStr) -> ScopedDir<'data, 'static> {
+ #[cfg(CONFIG_DEBUG_FS)]
+ {
+ let parent_entry = match &self.0 {
+ None => return ScopedDir::empty(),
+ Some(entry) => entry.clone(),
+ };
+ ScopedDir {
+ entry: ManuallyDrop::new(Entry::dynamic_dir(name, Some(parent_entry))),
+ _phantom: PhantomData,
+ }
+ }
+ #[cfg(not(CONFIG_DEBUG_FS))]
+ ScopedDir::empty()
+ }
+
+ /// Creates a new scope, which is a directory associated with some data `T`.
+ ///
+ /// The created directory will be a subdirectory of `self`. The `init` closure is called to
+ /// populate the directory with files and subdirectories. These files can reference the data
+ /// stored in the scope.
+ ///
+ /// The entire directory tree created within the scope will be removed when the returned
+ /// `Scope` handle is dropped.
+ pub fn scope<'a, T: 'a, E: 'a, F>(
+ &'a self,
+ data: impl PinInit<T, E> + 'a,
+ name: &'a CStr,
+ init: F,
+ ) -> impl PinInit<Scope<T>, E> + 'a
+ where
+ F: for<'data, 'dir> FnOnce(&'data T, &'dir ScopedDir<'data, 'dir>) + 'a,
+ {
+ Scope::new(data, |data| {
+ let scoped = self.scoped_dir(name);
+ init(data, &scoped);
+ scoped.into_entry()
+ })
+ }
+}
+
+#[pin_data]
+/// Handle to a DebugFS scope, which ensures that attached `data` will outlive the DebugFS entry
+/// without moving.
+///
+/// This is internally used to back [`File`], and used in the API to represent the attachment
+/// of a directory lifetime to a data structure which may be jointly accessed by a number of
+/// different files.
+///
+/// When dropped, a `Scope` will remove all directories and files in the filesystem backed by the
+/// attached data structure prior to releasing the attached data.
+pub struct Scope<T> {
+ // This order is load-bearing for drops - `_entry` must be dropped before `data`.
+ #[cfg(CONFIG_DEBUG_FS)]
+ _entry: Entry<'static>,
+ #[pin]
+ data: T,
+ // Even if `T` is `Unpin`, we still can't allow it to be moved.
+ #[pin]
+ _pin: PhantomPinned,
+}
+
+#[pin_data]
+/// Handle to a DebugFS file, owning its backing data.
+///
+/// When dropped, the DebugFS file will be removed and the attached data will be dropped.
+pub struct File<T> {
+ #[pin]
+ scope: Scope<T>,
+}
+
+#[cfg(not(CONFIG_DEBUG_FS))]
+impl<'b, T: 'b> Scope<T> {
+ fn new<E: 'b, F>(data: impl PinInit<T, E> + 'b, init: F) -> impl PinInit<Self, E> + 'b
+ where
+ F: for<'a> FnOnce(&'a T) + 'b,
+ {
+ try_pin_init! {
+ Self {
+ data <- data,
+ _pin: PhantomPinned
+ } ? E
+ }
+ .pin_chain(|scope| {
+ init(&scope.data);
+ Ok(())
+ })
+ }
+}
+
+#[cfg(CONFIG_DEBUG_FS)]
+impl<'b, T: 'b> Scope<T> {
+ fn entry_mut(self: Pin<&mut Self>) -> &mut Entry<'static> {
+ // SAFETY: _entry is not structurally pinned.
+ unsafe { &mut Pin::into_inner_unchecked(self)._entry }
+ }
+
+ fn new<E: 'b, F>(data: impl PinInit<T, E> + 'b, init: F) -> impl PinInit<Self, E> + 'b
+ where
+ F: for<'a> FnOnce(&'a T) -> Entry<'static> + 'b,
+ {
+ try_pin_init! {
+ Self {
+ _entry: Entry::empty(),
+ data <- data,
+ _pin: PhantomPinned
+ } ? E
+ }
+ .pin_chain(|scope| {
+ *scope.entry_mut() = init(&scope.data);
+ Ok(())
+ })
+ }
+}
+
+impl<'a, T: 'a> Scope<T> {
+ /// Creates a new scope, which is a directory at the root of the debugfs filesystem,
+ /// associated with some data `T`.
+ ///
+ /// The `init` closure is called to populate the directory with files and subdirectories. These
+ /// files can reference the data stored in the scope.
+ ///
+ /// The entire directory tree created within the scope will be removed when the returned
+ /// `Scope` handle is dropped.
+ pub fn dir<E: 'a, F>(
+ data: impl PinInit<T, E> + 'a,
+ name: &'a CStr,
+ init: F,
+ ) -> impl PinInit<Self, E> + 'a
+ where
+ F: for<'data, 'dir> FnOnce(&'data T, &'dir ScopedDir<'data, 'dir>) + 'a,
+ {
+ Scope::new(data, |data| {
+ let scoped = ScopedDir::new(name);
+ init(data, &scoped);
+ scoped.into_entry()
+ })
+ }
+}
+
+impl<T> Deref for Scope<T> {
+ type Target = T;
+ fn deref(&self) -> &T {
+ &self.data
+ }
+}
+
+impl<T> Deref for File<T> {
+ type Target = T;
+ fn deref(&self) -> &T {
+ &self.scope
+ }
+}
+
+/// A handle to a directory which will live at most `'dir`, accessing data that will live for at
+/// least `'data`.
+///
+/// Dropping a ScopedDir will not delete or clean it up, this is expected to occur through dropping
+/// the `Scope` that created it.
+pub struct ScopedDir<'data, 'dir> {
+ #[cfg(CONFIG_DEBUG_FS)]
+ entry: ManuallyDrop<Entry<'dir>>,
+ _phantom: PhantomData<fn(&'data ()) -> &'dir ()>,
+}
+
+impl<'data, 'dir> ScopedDir<'data, 'dir> {
+ /// Creates a subdirectory inside this `ScopedDir`.
+ ///
+ /// The returned directory handle cannot outlive this one.
+ pub fn dir<'dir2>(&'dir2 self, name: &CStr) -> ScopedDir<'data, 'dir2> {
+ #[cfg(not(CONFIG_DEBUG_FS))]
+ let _ = name;
+ ScopedDir {
+ #[cfg(CONFIG_DEBUG_FS)]
+ entry: ManuallyDrop::new(Entry::dir(name, Some(&*self.entry))),
+ _phantom: PhantomData,
+ }
+ }
+
+ fn create_file<T: Sync>(&self, name: &CStr, data: &'data T, vtable: &'static FileOps<T>) {
+ #[cfg(CONFIG_DEBUG_FS)]
+ core::mem::forget(Entry::file(name, &self.entry, data, vtable));
+ }
+
+ /// Creates a read-only file in this directory.
+ ///
+ /// The file's contents are produced by invoking [`Writer::write`].
+ ///
+ /// This function does not produce an owning handle to the file. The created
+ /// file is removed when the [`Scope`] that this directory belongs
+ /// to is dropped.
+ pub fn read_only_file<T: Writer + Send + Sync + 'static>(&self, name: &CStr, data: &'data T) {
+ self.create_file(name, data, &T::FILE_OPS)
+ }
+
+ /// Creates a read-only file in this directory, with contents from a callback.
+ ///
+ /// The file contents are generated by calling `f` with `data`.
+ ///
+ ///
+ /// `f` must be a function item or a non-capturing closure.
+ /// This is statically asserted and not a safety requirement.
+ ///
+ /// This function does not produce an owning handle to the file. The created
+ /// file is removed when the [`Scope`] that this directory belongs
+ /// to is dropped.
+ pub fn read_callback_file<T, F>(&self, name: &CStr, data: &'data T, _f: &'static F)
+ where
+ T: Send + Sync + 'static,
+ F: Fn(&T, &mut fmt::Formatter<'_>) -> fmt::Result + Send + Sync,
+ {
+ let vtable = <FormatAdapter<T, F> as ReadFile<_>>::FILE_OPS.adapt();
+ self.create_file(name, data, vtable)
+ }
+
+ /// Creates a read-write file in this directory.
+ ///
+ /// Reading the file uses the [`Writer`] implementation on `data`. Writing to the file uses
+ /// the [`Reader`] implementation on `data`.
+ ///
+ /// This function does not produce an owning handle to the file. The created
+ /// file is removed when the [`Scope`] that this directory belongs
+ /// to is dropped.
+ pub fn read_write_file<T: Writer + Reader + Send + Sync + 'static>(
+ &self,
+ name: &CStr,
+ data: &'data T,
+ ) {
+ let vtable = &<T as ReadWriteFile<_>>::FILE_OPS;
+ self.create_file(name, data, vtable)
+ }
+
+ /// Creates a read-write file in this directory, with logic from callbacks.
+ ///
+ /// Reading from the file is handled by `f`. Writing to the file is handled by `w`.
+ ///
+ /// `f` and `w` must be function items or non-capturing closures.
+ /// This is statically asserted and not a safety requirement.
+ ///
+ /// This function does not produce an owning handle to the file. The created
+ /// file is removed when the [`Scope`] that this directory belongs
+ /// to is dropped.
+ pub fn read_write_callback_file<T, F, W>(
+ &self,
+ name: &CStr,
+ data: &'data T,
+ _f: &'static F,
+ _w: &'static W,
+ ) where
+ T: Send + Sync + 'static,
+ F: Fn(&T, &mut fmt::Formatter<'_>) -> fmt::Result + Send + Sync,
+ W: Fn(&T, &mut UserSliceReader) -> Result + Send + Sync,
+ {
+ let vtable = <WritableAdapter<FormatAdapter<T, F>, W> as ReadWriteFile<_>>::FILE_OPS
+ .adapt()
+ .adapt();
+ self.create_file(name, data, vtable)
+ }
+
+ /// Creates a write-only file in this directory.
+ ///
+ /// Writing to the file uses the [`Reader`] implementation on `data`.
+ ///
+ /// This function does not produce an owning handle to the file. The created
+ /// file is removed when the [`Scope`] that this directory belongs
+ /// to is dropped.
+ pub fn write_only_file<T: Reader + Send + Sync + 'static>(&self, name: &CStr, data: &'data T) {
+ let vtable = &<T as WriteFile<_>>::FILE_OPS;
+ self.create_file(name, data, vtable)
+ }
+
+ /// Creates a write-only file in this directory, with write logic from a callback.
+ ///
+ /// Writing to the file is handled by `w`.
+ ///
+ /// `w` must be a function item or a non-capturing closure.
+ /// This is statically asserted and not a safety requirement.
+ ///
+ /// This function does not produce an owning handle to the file. The created
+ /// file is removed when the [`Scope`] that this directory belongs
+ /// to is dropped.
+ pub fn write_only_callback_file<T, W>(&self, name: &CStr, data: &'data T, _w: &'static W)
+ where
+ T: Send + Sync + 'static,
+ W: Fn(&T, &mut UserSliceReader) -> Result + Send + Sync,
+ {
+ let vtable = &<WritableAdapter<NoWriter<T>, W> as WriteFile<_>>::FILE_OPS
+ .adapt()
+ .adapt();
+ self.create_file(name, data, vtable)
+ }
+
+ fn empty() -> Self {
+ ScopedDir {
+ #[cfg(CONFIG_DEBUG_FS)]
+ entry: ManuallyDrop::new(Entry::empty()),
+ _phantom: PhantomData,
+ }
+ }
+ #[cfg(CONFIG_DEBUG_FS)]
+ fn into_entry(self) -> Entry<'dir> {
+ ManuallyDrop::into_inner(self.entry)
+ }
+ #[cfg(not(CONFIG_DEBUG_FS))]
+ fn into_entry(self) {}
+}
+
+impl<'data> ScopedDir<'data, 'static> {
+ // This is safe, but intentionally not exported due to footgun status. A ScopedDir with no
+ // parent will never be released by default, and needs to have its entry extracted and used
+ // somewhere.
+ fn new(name: &CStr) -> ScopedDir<'data, 'static> {
+ ScopedDir {
+ #[cfg(CONFIG_DEBUG_FS)]
+ entry: ManuallyDrop::new(Entry::dir(name, None)),
+ _phantom: PhantomData,
+ }
+ }
+}
diff --git a/rust/kernel/debugfs/callback_adapters.rs b/rust/kernel/debugfs/callback_adapters.rs
new file mode 100644
index 000000000000..6c024230f676
--- /dev/null
+++ b/rust/kernel/debugfs/callback_adapters.rs
@@ -0,0 +1,122 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2025 Google LLC.
+
+//! Adapters which allow the user to supply a write or read implementation as a value rather
+//! than a trait implementation. If provided, it will override the trait implementation.
+
+use super::{Reader, Writer};
+use crate::prelude::*;
+use crate::uaccess::UserSliceReader;
+use core::fmt;
+use core::fmt::Formatter;
+use core::marker::PhantomData;
+use core::ops::Deref;
+
+/// # Safety
+///
+/// To implement this trait, it must be safe to cast a `&Self` to a `&Inner`.
+/// It is intended for use in unstacking adapters out of `FileOps` backings.
+pub(crate) unsafe trait Adapter {
+ type Inner;
+}
+
+/// Adapter to implement `Reader` via a callback with the same representation as `T`.
+///
+/// * Layer it on top of `WriterAdapter` if you want to add a custom callback for `write`.
+/// * Layer it on top of `NoWriter` to pass through any support present on the underlying type.
+///
+/// # Invariants
+///
+/// If an instance for `WritableAdapter<_, W>` is constructed, `W` is inhabited.
+#[repr(transparent)]
+pub(crate) struct WritableAdapter<D, W> {
+ inner: D,
+ _writer: PhantomData<W>,
+}
+
+// SAFETY: Stripping off the adapter only removes constraints
+unsafe impl<D, W> Adapter for WritableAdapter<D, W> {
+ type Inner = D;
+}
+
+impl<D: Writer, W> Writer for WritableAdapter<D, W> {
+ fn write(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
+ self.inner.write(fmt)
+ }
+}
+
+impl<D: Deref, W> Reader for WritableAdapter<D, W>
+where
+ W: Fn(&D::Target, &mut UserSliceReader) -> Result + Send + Sync + 'static,
+{
+ fn read_from_slice(&self, reader: &mut UserSliceReader) -> Result {
+ // SAFETY: WritableAdapter<_, W> can only be constructed if W is inhabited
+ let w: &W = unsafe { materialize_zst() };
+ w(self.inner.deref(), reader)
+ }
+}
+
+/// Adapter to implement `Writer` via a callback with the same representation as `T`.
+///
+/// # Invariants
+///
+/// If an instance for `FormatAdapter<_, F>` is constructed, `F` is inhabited.
+#[repr(transparent)]
+pub(crate) struct FormatAdapter<D, F> {
+ inner: D,
+ _formatter: PhantomData<F>,
+}
+
+impl<D, F> Deref for FormatAdapter<D, F> {
+ type Target = D;
+ fn deref(&self) -> &D {
+ &self.inner
+ }
+}
+
+impl<D, F> Writer for FormatAdapter<D, F>
+where
+ F: Fn(&D, &mut Formatter<'_>) -> fmt::Result + 'static,
+{
+ fn write(&self, fmt: &mut Formatter<'_>) -> fmt::Result {
+ // SAFETY: FormatAdapter<_, F> can only be constructed if F is inhabited
+ let f: &F = unsafe { materialize_zst() };
+ f(&self.inner, fmt)
+ }
+}
+
+// SAFETY: Stripping off the adapter only removes constraints
+unsafe impl<D, F> Adapter for FormatAdapter<D, F> {
+ type Inner = D;
+}
+
+#[repr(transparent)]
+pub(crate) struct NoWriter<D> {
+ inner: D,
+}
+
+// SAFETY: Stripping off the adapter only removes constraints
+unsafe impl<D> Adapter for NoWriter<D> {
+ type Inner = D;
+}
+
+impl<D> Deref for NoWriter<D> {
+ type Target = D;
+ fn deref(&self) -> &D {
+ &self.inner
+ }
+}
+
+/// For types with a unique value, produce a static reference to it.
+///
+/// # Safety
+///
+/// The caller asserts that F is inhabited
+unsafe fn materialize_zst<F>() -> &'static F {
+ const { assert!(core::mem::size_of::<F>() == 0) };
+ let zst_dangle: core::ptr::NonNull<F> = core::ptr::NonNull::dangling();
+ // SAFETY: While the pointer is dangling, it is a dangling pointer to a ZST, based on the
+ // assertion above. The type is also inhabited, by the caller's assertion. This means
+ // we can materialize it.
+ unsafe { zst_dangle.as_ref() }
+}
diff --git a/rust/kernel/debugfs/entry.rs b/rust/kernel/debugfs/entry.rs
new file mode 100644
index 000000000000..f99402cd3ba0
--- /dev/null
+++ b/rust/kernel/debugfs/entry.rs
@@ -0,0 +1,164 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2025 Google LLC.
+
+use crate::debugfs::file_ops::FileOps;
+use crate::ffi::c_void;
+use crate::str::CStr;
+use crate::sync::Arc;
+use core::marker::PhantomData;
+
+/// Owning handle to a DebugFS entry.
+///
+/// # Invariants
+///
+/// The wrapped pointer will always be `NULL`, an error, or an owned DebugFS `dentry`.
+pub(crate) struct Entry<'a> {
+ entry: *mut bindings::dentry,
+ // If we were created with an owning parent, this is the keep-alive
+ _parent: Option<Arc<Entry<'static>>>,
+ // If we were created with a non-owning parent, this prevents us from outliving it
+ _phantom: PhantomData<&'a ()>,
+}
+
+// SAFETY: [`Entry`] is just a `dentry` under the hood, which the API promises can be transferred
+// between threads.
+unsafe impl Send for Entry<'_> {}
+
+// SAFETY: All the C functions we call on the `dentry` pointer are threadsafe.
+unsafe impl Sync for Entry<'_> {}
+
+impl Entry<'static> {
+ pub(crate) fn dynamic_dir(name: &CStr, parent: Option<Arc<Self>>) -> Self {
+ let parent_ptr = match &parent {
+ Some(entry) => entry.as_ptr(),
+ None => core::ptr::null_mut(),
+ };
+ // SAFETY: The invariants of this function's arguments ensure the safety of this call.
+ // * `name` is a valid C string by the invariants of `&CStr`.
+ // * `parent_ptr` is either `NULL` (if `parent` is `None`), or a pointer to a valid
+ // `dentry` by our invariant. `debugfs_create_dir` handles `NULL` pointers correctly.
+ let entry = unsafe { bindings::debugfs_create_dir(name.as_char_ptr(), parent_ptr) };
+
+ Entry {
+ entry,
+ _parent: parent,
+ _phantom: PhantomData,
+ }
+ }
+
+ /// # Safety
+ ///
+ /// * `data` must outlive the returned `Entry`.
+ pub(crate) unsafe fn dynamic_file<T>(
+ name: &CStr,
+ parent: Arc<Self>,
+ data: &T,
+ file_ops: &'static FileOps<T>,
+ ) -> Self {
+ // SAFETY: The invariants of this function's arguments ensure the safety of this call.
+ // * `name` is a valid C string by the invariants of `&CStr`.
+ // * `parent.as_ptr()` is a pointer to a valid `dentry` by invariant.
+ // * The caller guarantees that `data` will outlive the returned `Entry`.
+ // * The guarantees on `FileOps` assert the vtable will be compatible with the data we have
+ // provided.
+ let entry = unsafe {
+ bindings::debugfs_create_file_full(
+ name.as_char_ptr(),
+ file_ops.mode(),
+ parent.as_ptr(),
+ core::ptr::from_ref(data) as *mut c_void,
+ core::ptr::null(),
+ &**file_ops,
+ )
+ };
+
+ Entry {
+ entry,
+ _parent: Some(parent),
+ _phantom: PhantomData,
+ }
+ }
+}
+
+impl<'a> Entry<'a> {
+ pub(crate) fn dir(name: &CStr, parent: Option<&'a Entry<'_>>) -> Self {
+ let parent_ptr = match &parent {
+ Some(entry) => entry.as_ptr(),
+ None => core::ptr::null_mut(),
+ };
+ // SAFETY: The invariants of this function's arguments ensure the safety of this call.
+ // * `name` is a valid C string by the invariants of `&CStr`.
+ // * `parent_ptr` is either `NULL` (if `parent` is `None`), or a pointer to a valid
+ // `dentry` (because `parent` is a valid reference to an `Entry`). The lifetime `'a`
+ // ensures that the parent outlives this entry.
+ let entry = unsafe { bindings::debugfs_create_dir(name.as_char_ptr(), parent_ptr) };
+
+ Entry {
+ entry,
+ _parent: None,
+ _phantom: PhantomData,
+ }
+ }
+
+ pub(crate) fn file<T>(
+ name: &CStr,
+ parent: &'a Entry<'_>,
+ data: &'a T,
+ file_ops: &FileOps<T>,
+ ) -> Self {
+ // SAFETY: The invariants of this function's arguments ensure the safety of this call.
+ // * `name` is a valid C string by the invariants of `&CStr`.
+ // * `parent.as_ptr()` is a pointer to a valid `dentry` because we have `&'a Entry`.
+ // * `data` is a valid pointer to `T` for lifetime `'a`.
+ // * The returned `Entry` has lifetime `'a`, so it cannot outlive `parent` or `data`.
+ // * The caller guarantees that `vtable` is compatible with `data`.
+ // * The guarantees on `FileOps` assert the vtable will be compatible with the data we have
+ // provided.
+ let entry = unsafe {
+ bindings::debugfs_create_file_full(
+ name.as_char_ptr(),
+ file_ops.mode(),
+ parent.as_ptr(),
+ core::ptr::from_ref(data) as *mut c_void,
+ core::ptr::null(),
+ &**file_ops,
+ )
+ };
+
+ Entry {
+ entry,
+ _parent: None,
+ _phantom: PhantomData,
+ }
+ }
+}
+
+impl Entry<'_> {
+ /// Constructs a placeholder DebugFS [`Entry`].
+ pub(crate) fn empty() -> Self {
+ Self {
+ entry: core::ptr::null_mut(),
+ _parent: None,
+ _phantom: PhantomData,
+ }
+ }
+
+ /// Returns the pointer representation of the DebugFS directory.
+ ///
+ /// # Guarantees
+ ///
+ /// Due to the type invariant, the value returned from this function will always be an error
+ /// code, NULL, or a live DebugFS directory. If it is live, it will remain live at least as
+ /// long as this entry lives.
+ pub(crate) fn as_ptr(&self) -> *mut bindings::dentry {
+ self.entry
+ }
+}
+
+impl Drop for Entry<'_> {
+ fn drop(&mut self) {
+ // SAFETY: `debugfs_remove` can take `NULL`, error values, and legal DebugFS dentries.
+ // `as_ptr` guarantees that the pointer is of this form.
+ unsafe { bindings::debugfs_remove(self.as_ptr()) }
+ }
+}
diff --git a/rust/kernel/debugfs/file_ops.rs b/rust/kernel/debugfs/file_ops.rs
new file mode 100644
index 000000000000..50fead17b6f3
--- /dev/null
+++ b/rust/kernel/debugfs/file_ops.rs
@@ -0,0 +1,247 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2025 Google LLC.
+
+use super::{Reader, Writer};
+use crate::debugfs::callback_adapters::Adapter;
+use crate::prelude::*;
+use crate::seq_file::SeqFile;
+use crate::seq_print;
+use crate::uaccess::UserSlice;
+use core::fmt::{Display, Formatter, Result};
+use core::marker::PhantomData;
+
+#[cfg(CONFIG_DEBUG_FS)]
+use core::ops::Deref;
+
+/// # Invariant
+///
+/// `FileOps<T>` will always contain an `operations` which is safe to use for a file backed
+/// off an inode which has a pointer to a `T` in its private data that is safe to convert
+/// into a reference.
+pub(super) struct FileOps<T> {
+ #[cfg(CONFIG_DEBUG_FS)]
+ operations: bindings::file_operations,
+ #[cfg(CONFIG_DEBUG_FS)]
+ mode: u16,
+ _phantom: PhantomData<T>,
+}
+
+impl<T> FileOps<T> {
+ /// # Safety
+ ///
+ /// The caller asserts that the provided `operations` is safe to use for a file whose
+ /// inode has a pointer to `T` in its private data that is safe to convert into a reference.
+ const unsafe fn new(operations: bindings::file_operations, mode: u16) -> Self {
+ Self {
+ #[cfg(CONFIG_DEBUG_FS)]
+ operations,
+ #[cfg(CONFIG_DEBUG_FS)]
+ mode,
+ _phantom: PhantomData,
+ }
+ }
+
+ #[cfg(CONFIG_DEBUG_FS)]
+ pub(crate) const fn mode(&self) -> u16 {
+ self.mode
+ }
+}
+
+impl<T: Adapter> FileOps<T> {
+ pub(super) const fn adapt(&self) -> &FileOps<T::Inner> {
+ // SAFETY: `Adapter` asserts that `T` can be legally cast to `T::Inner`.
+ unsafe { core::mem::transmute(self) }
+ }
+}
+
+#[cfg(CONFIG_DEBUG_FS)]
+impl<T> Deref for FileOps<T> {
+ type Target = bindings::file_operations;
+
+ fn deref(&self) -> &Self::Target {
+ &self.operations
+ }
+}
+
+struct WriterAdapter<T>(T);
+
+impl<'a, T: Writer> Display for WriterAdapter<&'a T> {
+ fn fmt(&self, f: &mut Formatter<'_>) -> Result {
+ self.0.write(f)
+ }
+}
+
+/// Implements `open` for `file_operations` via `single_open` to fill out a `seq_file`.
+///
+/// # Safety
+///
+/// * `inode`'s private pointer must point to a value of type `T` which will outlive the `inode`
+/// and will not have any unique references alias it during the call.
+/// * `file` must point to a live, not-yet-initialized file object.
+unsafe extern "C" fn writer_open<T: Writer + Sync>(
+ inode: *mut bindings::inode,
+ file: *mut bindings::file,
+) -> c_int {
+ // SAFETY: The caller ensures that `inode` is a valid pointer.
+ let data = unsafe { (*inode).i_private };
+ // SAFETY:
+ // * `file` is acceptable by caller precondition.
+ // * `print_act` will be called on a `seq_file` with private data set to the third argument,
+ // so we meet its safety requirements.
+ // * The `data` pointer passed in the third argument is a valid `T` pointer that outlives
+ // this call by caller preconditions.
+ unsafe { bindings::single_open(file, Some(writer_act::<T>), data) }
+}
+
+/// Prints private data stashed in a seq_file to that seq file.
+///
+/// # Safety
+///
+/// `seq` must point to a live `seq_file` whose private data is a valid pointer to a `T` which may
+/// not have any unique references alias it during the call.
+unsafe extern "C" fn writer_act<T: Writer + Sync>(
+ seq: *mut bindings::seq_file,
+ _: *mut c_void,
+) -> c_int {
+ // SAFETY: By caller precondition, this pointer is valid pointer to a `T`, and
+ // there are not and will not be any unique references until we are done.
+ let data = unsafe { &*((*seq).private.cast::<T>()) };
+ // SAFETY: By caller precondition, `seq_file` points to a live `seq_file`, so we can lift
+ // it.
+ let seq_file = unsafe { SeqFile::from_raw(seq) };
+ seq_print!(seq_file, "{}", WriterAdapter(data));
+ 0
+}
+
+// Work around lack of generic const items.
+pub(crate) trait ReadFile<T> {
+ const FILE_OPS: FileOps<T>;
+}
+
+impl<T: Writer + Sync> ReadFile<T> for T {
+ const FILE_OPS: FileOps<T> = {
+ let operations = bindings::file_operations {
+ read: Some(bindings::seq_read),
+ llseek: Some(bindings::seq_lseek),
+ release: Some(bindings::single_release),
+ open: Some(writer_open::<Self>),
+ // SAFETY: `file_operations` supports zeroes in all fields.
+ ..unsafe { core::mem::zeroed() }
+ };
+ // SAFETY: `operations` is all stock `seq_file` implementations except for `writer_open`.
+ // `open`'s only requirement beyond what is provided to all open functions is that the
+ // inode's data pointer must point to a `T` that will outlive it, which matches the
+ // `FileOps` requirements.
+ unsafe { FileOps::new(operations, 0o400) }
+ };
+}
+
+fn read<T: Reader + Sync>(data: &T, buf: *const c_char, count: usize) -> isize {
+ let mut reader = UserSlice::new(UserPtr::from_ptr(buf as *mut c_void), count).reader();
+
+ if let Err(e) = data.read_from_slice(&mut reader) {
+ return e.to_errno() as isize;
+ }
+
+ count as isize
+}
+
+/// # Safety
+///
+/// `file` must be a valid pointer to a `file` struct.
+/// The `private_data` of the file must contain a valid pointer to a `seq_file` whose
+/// `private` data in turn points to a `T` that implements `Reader`.
+/// `buf` must be a valid user-space buffer.
+pub(crate) unsafe extern "C" fn write<T: Reader + Sync>(
+ file: *mut bindings::file,
+ buf: *const c_char,
+ count: usize,
+ _ppos: *mut bindings::loff_t,
+) -> isize {
+ // SAFETY: The file was opened with `single_open`, which sets `private_data` to a `seq_file`.
+ let seq = unsafe { &mut *((*file).private_data.cast::<bindings::seq_file>()) };
+ // SAFETY: By caller precondition, this pointer is live and points to a value of type `T`.
+ let data = unsafe { &*(seq.private as *const T) };
+ read(data, buf, count)
+}
+
+// A trait to get the file operations for a type.
+pub(crate) trait ReadWriteFile<T> {
+ const FILE_OPS: FileOps<T>;
+}
+
+impl<T: Writer + Reader + Sync> ReadWriteFile<T> for T {
+ const FILE_OPS: FileOps<T> = {
+ let operations = bindings::file_operations {
+ open: Some(writer_open::<T>),
+ read: Some(bindings::seq_read),
+ write: Some(write::<T>),
+ llseek: Some(bindings::seq_lseek),
+ release: Some(bindings::single_release),
+ // SAFETY: `file_operations` supports zeroes in all fields.
+ ..unsafe { core::mem::zeroed() }
+ };
+ // SAFETY: `operations` is all stock `seq_file` implementations except for `writer_open`
+ // and `write`.
+ // `writer_open`'s only requirement beyond what is provided to all open functions is that
+ // the inode's data pointer must point to a `T` that will outlive it, which matches the
+ // `FileOps` requirements.
+ // `write` only requires that the file's private data pointer points to `seq_file`
+ // which points to a `T` that will outlive it, which matches what `writer_open`
+ // provides.
+ unsafe { FileOps::new(operations, 0o600) }
+ };
+}
+
+/// # Safety
+///
+/// `inode` must be a valid pointer to an `inode` struct.
+/// `file` must be a valid pointer to a `file` struct.
+unsafe extern "C" fn write_only_open(
+ inode: *mut bindings::inode,
+ file: *mut bindings::file,
+) -> c_int {
+ // SAFETY: The caller ensures that `inode` and `file` are valid pointers.
+ unsafe { (*file).private_data = (*inode).i_private };
+ 0
+}
+
+/// # Safety
+///
+/// * `file` must be a valid pointer to a `file` struct.
+/// * The `private_data` of the file must contain a valid pointer to a `T` that implements
+/// `Reader`.
+/// * `buf` must be a valid user-space buffer.
+pub(crate) unsafe extern "C" fn write_only_write<T: Reader + Sync>(
+ file: *mut bindings::file,
+ buf: *const c_char,
+ count: usize,
+ _ppos: *mut bindings::loff_t,
+) -> isize {
+ // SAFETY: The caller ensures that `file` is a valid pointer and that `private_data` holds a
+ // valid pointer to `T`.
+ let data = unsafe { &*((*file).private_data as *const T) };
+ read(data, buf, count)
+}
+
+pub(crate) trait WriteFile<T> {
+ const FILE_OPS: FileOps<T>;
+}
+
+impl<T: Reader + Sync> WriteFile<T> for T {
+ const FILE_OPS: FileOps<T> = {
+ let operations = bindings::file_operations {
+ open: Some(write_only_open),
+ write: Some(write_only_write::<T>),
+ llseek: Some(bindings::noop_llseek),
+ // SAFETY: `file_operations` supports zeroes in all fields.
+ ..unsafe { core::mem::zeroed() }
+ };
+ // SAFETY:
+ // * `write_only_open` populates the file private data with the inode private data
+ // * `write_only_write`'s only requirement is that the private data of the file point to
+ // a `T` and be legal to convert to a shared reference, which `write_only_open`
+ // satisfies.
+ unsafe { FileOps::new(operations, 0o200) }
+ };
+}
diff --git a/rust/kernel/debugfs/traits.rs b/rust/kernel/debugfs/traits.rs
new file mode 100644
index 000000000000..ab009eb254b3
--- /dev/null
+++ b/rust/kernel/debugfs/traits.rs
@@ -0,0 +1,102 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2025 Google LLC.
+
+//! Traits for rendering or updating values exported to DebugFS.
+
+use crate::prelude::*;
+use crate::sync::Mutex;
+use crate::uaccess::UserSliceReader;
+use core::fmt::{self, Debug, Formatter};
+use core::str::FromStr;
+use core::sync::atomic::{
+ AtomicI16, AtomicI32, AtomicI64, AtomicI8, AtomicIsize, AtomicU16, AtomicU32, AtomicU64,
+ AtomicU8, AtomicUsize, Ordering,
+};
+
+/// A trait for types that can be written into a string.
+///
+/// This works very similarly to `Debug`, and is automatically implemented if `Debug` is
+/// implemented for a type. It is also implemented for any writable type inside a `Mutex`.
+///
+/// The derived implementation of `Debug` [may
+/// change](https://doc.rust-lang.org/std/fmt/trait.Debug.html#stability)
+/// between Rust versions, so if stability is key for your use case, please implement `Writer`
+/// explicitly instead.
+pub trait Writer {
+ /// Formats the value using the given formatter.
+ fn write(&self, f: &mut Formatter<'_>) -> fmt::Result;
+}
+
+impl<T: Writer> Writer for Mutex<T> {
+ fn write(&self, f: &mut Formatter<'_>) -> fmt::Result {
+ self.lock().write(f)
+ }
+}
+
+impl<T: Debug> Writer for T {
+ fn write(&self, f: &mut Formatter<'_>) -> fmt::Result {
+ writeln!(f, "{self:?}")
+ }
+}
+
+/// A trait for types that can be updated from a user slice.
+///
+/// This works similarly to `FromStr`, but operates on a `UserSliceReader` rather than a &str.
+///
+/// It is automatically implemented for all atomic integers, or any type that implements `FromStr`
+/// wrapped in a `Mutex`.
+pub trait Reader {
+ /// Updates the value from the given user slice.
+ fn read_from_slice(&self, reader: &mut UserSliceReader) -> Result;
+}
+
+impl<T: FromStr> Reader for Mutex<T> {
+ fn read_from_slice(&self, reader: &mut UserSliceReader) -> Result {
+ let mut buf = [0u8; 128];
+ if reader.len() > buf.len() {
+ return Err(EINVAL);
+ }
+ let n = reader.len();
+ reader.read_slice(&mut buf[..n])?;
+
+ let s = core::str::from_utf8(&buf[..n]).map_err(|_| EINVAL)?;
+ let val = s.trim().parse::<T>().map_err(|_| EINVAL)?;
+ *self.lock() = val;
+ Ok(())
+ }
+}
+
+macro_rules! impl_reader_for_atomic {
+ ($(($atomic_type:ty, $int_type:ty)),*) => {
+ $(
+ impl Reader for $atomic_type {
+ fn read_from_slice(&self, reader: &mut UserSliceReader) -> Result {
+ let mut buf = [0u8; 21]; // Enough for a 64-bit number.
+ if reader.len() > buf.len() {
+ return Err(EINVAL);
+ }
+ let n = reader.len();
+ reader.read_slice(&mut buf[..n])?;
+
+ let s = core::str::from_utf8(&buf[..n]).map_err(|_| EINVAL)?;
+ let val = s.trim().parse::<$int_type>().map_err(|_| EINVAL)?;
+ self.store(val, Ordering::Relaxed);
+ Ok(())
+ }
+ }
+ )*
+ };
+}
+
+impl_reader_for_atomic!(
+ (AtomicI16, i16),
+ (AtomicI32, i32),
+ (AtomicI64, i64),
+ (AtomicI8, i8),
+ (AtomicIsize, isize),
+ (AtomicU16, u16),
+ (AtomicU32, u32),
+ (AtomicU64, u64),
+ (AtomicU8, u8),
+ (AtomicUsize, usize)
+);
diff --git a/rust/kernel/device.rs b/rust/kernel/device.rs
index 9cefa0810e0b..1321e6f0b53c 100644
--- a/rust/kernel/device.rs
+++ b/rust/kernel/device.rs
@@ -6,7 +6,8 @@
use crate::{
bindings, fmt,
- types::{ARef, ForeignOwnable, Opaque},
+ sync::aref::ARef,
+ types::{ForeignOwnable, Opaque},
};
use core::{marker::PhantomData, ptr};
@@ -406,7 +407,7 @@ kernel::impl_device_context_deref!(unsafe { Device });
kernel::impl_device_context_into_aref!(Device);
// SAFETY: Instances of `Device` are always reference-counted.
-unsafe impl crate::types::AlwaysRefCounted for Device {
+unsafe impl crate::sync::aref::AlwaysRefCounted for Device {
fn inc_ref(&self) {
// SAFETY: The existence of a shared reference guarantees that the refcount is non-zero.
unsafe { bindings::get_device(self.as_raw()) };
@@ -572,7 +573,7 @@ macro_rules! impl_device_context_deref {
#[macro_export]
macro_rules! __impl_device_context_into_aref {
($src:ty, $device:tt) => {
- impl ::core::convert::From<&$device<$src>> for $crate::types::ARef<$device> {
+ impl ::core::convert::From<&$device<$src>> for $crate::sync::aref::ARef<$device> {
fn from(dev: &$device<$src>) -> Self {
(&**dev).into()
}
diff --git a/rust/kernel/devres.rs b/rust/kernel/devres.rs
index d04e3fcebafb..132545962218 100644
--- a/rust/kernel/devres.rs
+++ b/rust/kernel/devres.rs
@@ -13,8 +13,8 @@ use crate::{
ffi::c_void,
prelude::*,
revocable::{Revocable, RevocableGuard},
- sync::{rcu, Completion},
- types::{ARef, ForeignOwnable, Opaque, ScopeGuard},
+ sync::{aref::ARef, rcu, Completion},
+ types::{ForeignOwnable, Opaque, ScopeGuard},
};
use pin_init::Wrapper;
diff --git a/rust/kernel/io.rs b/rust/kernel/io.rs
index 03b467722b86..ee182b0b5452 100644
--- a/rust/kernel/io.rs
+++ b/rust/kernel/io.rs
@@ -8,6 +8,7 @@ use crate::error::{code::EINVAL, Result};
use crate::{bindings, build_assert, ffi::c_void};
pub mod mem;
+pub mod poll;
pub mod resource;
pub use resource::Resource;
diff --git a/rust/kernel/io/poll.rs b/rust/kernel/io/poll.rs
new file mode 100644
index 000000000000..613eb25047ef
--- /dev/null
+++ b/rust/kernel/io/poll.rs
@@ -0,0 +1,104 @@
+// SPDX-License-Identifier: GPL-2.0
+
+//! IO polling.
+//!
+//! C header: [`include/linux/iopoll.h`](srctree/include/linux/iopoll.h).
+
+use crate::{
+ error::{code::*, Result},
+ processor::cpu_relax,
+ task::might_sleep,
+ time::{delay::fsleep, Delta, Instant, Monotonic},
+};
+
+/// Polls periodically until a condition is met, an error occurs,
+/// or the timeout is reached.
+///
+/// The function repeatedly executes the given operation `op` closure and
+/// checks its result using the condition closure `cond`.
+///
+/// If `cond` returns `true`, the function returns successfully with
+/// the result of `op`. Otherwise, it waits for a duration specified
+/// by `sleep_delta` before executing `op` again.
+///
+/// This process continues until either `op` returns an error, `cond`
+/// returns `true`, or the timeout specified by `timeout_delta` is
+/// reached.
+///
+/// This function can only be used in a nonatomic context.
+///
+/// # Errors
+///
+/// If `op` returns an error, then that error is returned directly.
+///
+/// If the timeout specified by `timeout_delta` is reached, then
+/// `Err(ETIMEDOUT)` is returned.
+///
+/// # Examples
+///
+/// ```no_run
+/// use kernel::io::{Io, poll::read_poll_timeout};
+/// use kernel::time::Delta;
+///
+/// const HW_READY: u16 = 0x01;
+///
+/// fn wait_for_hardware<const SIZE: usize>(io: &Io<SIZE>) -> Result<()> {
+/// match read_poll_timeout(
+/// // The `op` closure reads the value of a specific status register.
+/// || io.try_read16(0x1000),
+/// // The `cond` closure takes a reference to the value returned by `op`
+/// // and checks whether the hardware is ready.
+/// |val: &u16| *val == HW_READY,
+/// Delta::from_millis(50),
+/// Delta::from_secs(3),
+/// ) {
+/// Ok(_) => {
+/// // The hardware is ready. The returned value of the `op` closure
+/// // isn't used.
+/// Ok(())
+/// }
+/// Err(e) => Err(e),
+/// }
+/// }
+/// ```
+#[track_caller]
+pub fn read_poll_timeout<Op, Cond, T>(
+ mut op: Op,
+ mut cond: Cond,
+ sleep_delta: Delta,
+ timeout_delta: Delta,
+) -> Result<T>
+where
+ Op: FnMut() -> Result<T>,
+ Cond: FnMut(&T) -> bool,
+{
+ let start: Instant<Monotonic> = Instant::now();
+
+ // Unlike the C version, we always call `might_sleep()` unconditionally,
+ // as conditional calls are error-prone. We clearly separate
+ // `read_poll_timeout()` and `read_poll_timeout_atomic()` to aid
+ // tools like klint.
+ might_sleep();
+
+ loop {
+ let val = op()?;
+ if cond(&val) {
+ // Unlike the C version, we immediately return.
+ // We know the condition is met so we don't need to check again.
+ return Ok(val);
+ }
+
+ if start.elapsed() > timeout_delta {
+ // Unlike the C version, we immediately return.
+ // We have just called `op()` so we don't need to call it again.
+ return Err(ETIMEDOUT);
+ }
+
+ if !sleep_delta.is_zero() {
+ fsleep(sleep_delta);
+ }
+
+ // `fsleep()` could be a busy-wait loop so we always call `cpu_relax()`.
+ cpu_relax();
+ }
+}
diff --git a/rust/kernel/irq.rs b/rust/kernel/irq.rs
new file mode 100644
index 000000000000..20abd4056655
--- /dev/null
+++ b/rust/kernel/irq.rs
@@ -0,0 +1,24 @@
+// SPDX-License-Identifier: GPL-2.0
+
+//! IRQ abstractions.
+//!
+//! An IRQ is an interrupt request from a device. It is used to get the CPU's
+//! attention so it can service a hardware event in a timely manner.
+//!
+//! The current abstractions handle IRQ requests and handlers, i.e.: it allows
+//! drivers to register a handler for a given IRQ line.
+//!
+//! C header: [`include/linux/device.h`](srctree/include/linux/interrupt.h)
+
+/// Flags to be used when registering IRQ handlers.
+mod flags;
+
+/// IRQ allocation and handling.
+mod request;
+
+pub use flags::Flags;
+
+pub use request::{
+ Handler, IrqRequest, IrqReturn, Registration, ThreadedHandler, ThreadedIrqReturn,
+ ThreadedRegistration,
+};
diff --git a/rust/kernel/irq/flags.rs b/rust/kernel/irq/flags.rs
new file mode 100644
index 000000000000..adfde96ec47c
--- /dev/null
+++ b/rust/kernel/irq/flags.rs
@@ -0,0 +1,124 @@
+// SPDX-License-Identifier: GPL-2.0
+// SPDX-FileCopyrightText: Copyright 2025 Collabora ltd.
+
+use crate::bindings;
+use crate::prelude::*;
+
+/// Flags to be used when registering IRQ handlers.
+///
+/// Flags can be used to request specific behaviors when registering an IRQ
+/// handler, and can be combined using the `|`, `&`, and `!` operators to
+/// further control the system's behavior.
+///
+/// A common use case is to register a shared interrupt, as sharing the line
+/// between devices is increasingly common in modern systems and is even
+/// required for some buses. This requires setting [`Flags::SHARED`] when
+/// requesting the interrupt. Other use cases include setting the trigger type
+/// through `Flags::TRIGGER_*`, which determines when the interrupt fires, or
+/// controlling whether the interrupt is masked after the handler runs by using
+/// [`Flags::ONESHOT`].
+///
+/// If an invalid combination of flags is provided, the system will refuse to
+/// register the handler, and lower layers will enforce certain flags when
+/// necessary. This means, for example, that all the
+/// [`crate::irq::Registration`] for a shared interrupt have to agree on
+/// [`Flags::SHARED`] and on the same trigger type, if set.
+#[derive(Clone, Copy, PartialEq, Eq)]
+pub struct Flags(c_ulong);
+
+impl Flags {
+ /// Use the interrupt line as already configured.
+ pub const TRIGGER_NONE: Flags = Flags::new(bindings::IRQF_TRIGGER_NONE);
+
+ /// The interrupt is triggered when the signal goes from low to high.
+ pub const TRIGGER_RISING: Flags = Flags::new(bindings::IRQF_TRIGGER_RISING);
+
+ /// The interrupt is triggered when the signal goes from high to low.
+ pub const TRIGGER_FALLING: Flags = Flags::new(bindings::IRQF_TRIGGER_FALLING);
+
+ /// The interrupt is triggered while the signal is held high.
+ pub const TRIGGER_HIGH: Flags = Flags::new(bindings::IRQF_TRIGGER_HIGH);
+
+ /// The interrupt is triggered while the signal is held low.
+ pub const TRIGGER_LOW: Flags = Flags::new(bindings::IRQF_TRIGGER_LOW);
+
+ /// Allow sharing the IRQ among several devices.
+ pub const SHARED: Flags = Flags::new(bindings::IRQF_SHARED);
+
+ /// Set by callers when they expect sharing mismatches to occur.
+ pub const PROBE_SHARED: Flags = Flags::new(bindings::IRQF_PROBE_SHARED);
+
+ /// Flag to mark this interrupt as timer interrupt.
+ pub const TIMER: Flags = Flags::new(bindings::IRQF_TIMER);
+
+ /// Interrupt is per CPU.
+ pub const PERCPU: Flags = Flags::new(bindings::IRQF_PERCPU);
+
+ /// Flag to exclude this interrupt from irq balancing.
+ pub const NOBALANCING: Flags = Flags::new(bindings::IRQF_NOBALANCING);
+
+ /// Interrupt is used for polling (only the interrupt that is registered
+ /// first in a shared interrupt is considered for performance reasons).
+ pub const IRQPOLL: Flags = Flags::new(bindings::IRQF_IRQPOLL);
+
+ /// Interrupt is not re-enabled after the hardirq handler finished. Used by
+ /// threaded interrupts which need to keep the irq line disabled until the
+ /// threaded handler has been run.
+ pub const ONESHOT: Flags = Flags::new(bindings::IRQF_ONESHOT);
+
+ /// Do not disable this IRQ during suspend. Does not guarantee that this
+ /// interrupt will wake the system from a suspended state.
+ pub const NO_SUSPEND: Flags = Flags::new(bindings::IRQF_NO_SUSPEND);
+
+ /// Force enable it on resume even if [`Flags::NO_SUSPEND`] is set.
+ pub const FORCE_RESUME: Flags = Flags::new(bindings::IRQF_FORCE_RESUME);
+
+ /// Interrupt cannot be threaded.
+ pub const NO_THREAD: Flags = Flags::new(bindings::IRQF_NO_THREAD);
+
+ /// Resume IRQ early during syscore instead of at device resume time.
+ pub const EARLY_RESUME: Flags = Flags::new(bindings::IRQF_EARLY_RESUME);
+
+ /// If the IRQ is shared with a [`Flags::NO_SUSPEND`] user, execute this
+ /// interrupt handler after suspending interrupts. For system wakeup devices
+ /// users need to implement wakeup detection in their interrupt handlers.
+ pub const COND_SUSPEND: Flags = Flags::new(bindings::IRQF_COND_SUSPEND);
+
+ /// Don't enable IRQ or NMI automatically when users request it. Users will
+ /// enable it explicitly by `enable_irq` or `enable_nmi` later.
+ pub const NO_AUTOEN: Flags = Flags::new(bindings::IRQF_NO_AUTOEN);
+
+ /// Exclude from runnaway detection for IPI and similar handlers, depends on
+ /// `PERCPU`.
+ pub const NO_DEBUG: Flags = Flags::new(bindings::IRQF_NO_DEBUG);
+
+ pub(crate) fn into_inner(self) -> c_ulong {
+ self.0
+ }
+
+ const fn new(value: u32) -> Self {
+ build_assert!(value as u64 <= c_ulong::MAX as u64);
+ Self(value as c_ulong)
+ }
+}
+
+impl core::ops::BitOr for Flags {
+ type Output = Self;
+ fn bitor(self, rhs: Self) -> Self::Output {
+ Self(self.0 | rhs.0)
+ }
+}
+
+impl core::ops::BitAnd for Flags {
+ type Output = Self;
+ fn bitand(self, rhs: Self) -> Self::Output {
+ Self(self.0 & rhs.0)
+ }
+}
+
+impl core::ops::Not for Flags {
+ type Output = Self;
+ fn not(self) -> Self::Output {
+ Self(!self.0)
+ }
+}
diff --git a/rust/kernel/irq/request.rs b/rust/kernel/irq/request.rs
new file mode 100644
index 000000000000..b150563fdef8
--- /dev/null
+++ b/rust/kernel/irq/request.rs
@@ -0,0 +1,507 @@
+// SPDX-License-Identifier: GPL-2.0
+// SPDX-FileCopyrightText: Copyright 2025 Collabora ltd.
+
+//! This module provides types like [`Registration`] and
+//! [`ThreadedRegistration`], which allow users to register handlers for a given
+//! IRQ line.
+
+use core::marker::PhantomPinned;
+
+use crate::alloc::Allocator;
+use crate::device::{Bound, Device};
+use crate::devres::Devres;
+use crate::error::to_result;
+use crate::irq::flags::Flags;
+use crate::prelude::*;
+use crate::str::CStr;
+use crate::sync::Arc;
+
+/// The value that can be returned from a [`Handler`] or a [`ThreadedHandler`].
+#[repr(u32)]
+pub enum IrqReturn {
+ /// The interrupt was not from this device or was not handled.
+ None = bindings::irqreturn_IRQ_NONE,
+
+ /// The interrupt was handled by this device.
+ Handled = bindings::irqreturn_IRQ_HANDLED,
+}
+
+/// Callbacks for an IRQ handler.
+pub trait Handler: Sync {
+ /// The hard IRQ handler.
+ ///
+ /// This is executed in interrupt context, hence all corresponding
+ /// limitations do apply.
+ ///
+ /// All work that does not necessarily need to be executed from
+ /// interrupt context, should be deferred to a threaded handler.
+ /// See also [`ThreadedRegistration`].
+ fn handle(&self, device: &Device<Bound>) -> IrqReturn;
+}
+
+impl<T: ?Sized + Handler + Send> Handler for Arc<T> {
+ fn handle(&self, device: &Device<Bound>) -> IrqReturn {
+ T::handle(self, device)
+ }
+}
+
+impl<T: ?Sized + Handler, A: Allocator> Handler for Box<T, A> {
+ fn handle(&self, device: &Device<Bound>) -> IrqReturn {
+ T::handle(self, device)
+ }
+}
+
+/// # Invariants
+///
+/// - `self.irq` is the same as the one passed to `request_{threaded}_irq`.
+/// - `cookie` was passed to `request_{threaded}_irq` as the cookie. It is guaranteed to be unique
+/// by the type system, since each call to `new` will return a different instance of
+/// `Registration`.
+#[pin_data(PinnedDrop)]
+struct RegistrationInner {
+ irq: u32,
+ cookie: *mut c_void,
+}
+
+impl RegistrationInner {
+ fn synchronize(&self) {
+ // SAFETY: safe as per the invariants of `RegistrationInner`
+ unsafe { bindings::synchronize_irq(self.irq) };
+ }
+}
+
+#[pinned_drop]
+impl PinnedDrop for RegistrationInner {
+ fn drop(self: Pin<&mut Self>) {
+ // SAFETY:
+ //
+ // Safe as per the invariants of `RegistrationInner` and:
+ //
+ // - The containing struct is `!Unpin` and was initialized using
+ // pin-init, so it occupied the same memory location for the entirety of
+ // its lifetime.
+ //
+ // Notice that this will block until all handlers finish executing,
+ // i.e.: at no point will &self be invalid while the handler is running.
+ unsafe { bindings::free_irq(self.irq, self.cookie) };
+ }
+}
+
+// SAFETY: We only use `inner` on drop, which called at most once with no
+// concurrent access.
+unsafe impl Sync for RegistrationInner {}
+
+// SAFETY: It is safe to send `RegistrationInner` across threads.
+unsafe impl Send for RegistrationInner {}
+
+/// A request for an IRQ line for a given device.
+///
+/// # Invariants
+///
+/// - `ìrq` is the number of an interrupt source of `dev`.
+/// - `irq` has not been registered yet.
+pub struct IrqRequest<'a> {
+ dev: &'a Device<Bound>,
+ irq: u32,
+}
+
+impl<'a> IrqRequest<'a> {
+ /// Creates a new IRQ request for the given device and IRQ number.
+ ///
+ /// # Safety
+ ///
+ /// - `irq` should be a valid IRQ number for `dev`.
+ pub(crate) unsafe fn new(dev: &'a Device<Bound>, irq: u32) -> Self {
+ // INVARIANT: `irq` is a valid IRQ number for `dev`.
+ IrqRequest { dev, irq }
+ }
+
+ /// Returns the IRQ number of an [`IrqRequest`].
+ pub fn irq(&self) -> u32 {
+ self.irq
+ }
+}
+
+/// A registration of an IRQ handler for a given IRQ line.
+///
+/// # Examples
+///
+/// The following is an example of using `Registration`. It uses a
+/// [`Completion`] to coordinate between the IRQ
+/// handler and process context. [`Completion`] uses interior mutability, so the
+/// handler can signal with [`Completion::complete_all()`] and the process
+/// context can wait with [`Completion::wait_for_completion()`] even though
+/// there is no way to get a mutable reference to the any of the fields in
+/// `Data`.
+///
+/// [`Completion`]: kernel::sync::Completion
+/// [`Completion::complete_all()`]: kernel::sync::Completion::complete_all
+/// [`Completion::wait_for_completion()`]: kernel::sync::Completion::wait_for_completion
+///
+/// ```
+/// use kernel::c_str;
+/// use kernel::device::{Bound, Device};
+/// use kernel::irq::{self, Flags, IrqRequest, IrqReturn, Registration};
+/// use kernel::prelude::*;
+/// use kernel::sync::{Arc, Completion};
+///
+/// // Data shared between process and IRQ context.
+/// #[pin_data]
+/// struct Data {
+/// #[pin]
+/// completion: Completion,
+/// }
+///
+/// impl irq::Handler for Data {
+/// // Executed in IRQ context.
+/// fn handle(&self, _dev: &Device<Bound>) -> IrqReturn {
+/// self.completion.complete_all();
+/// IrqReturn::Handled
+/// }
+/// }
+///
+/// // Registers an IRQ handler for the given IrqRequest.
+/// //
+/// // This runs in process context and assumes `request` was previously acquired from a device.
+/// fn register_irq(
+/// handler: impl PinInit<Data, Error>,
+/// request: IrqRequest<'_>,
+/// ) -> Result<Arc<Registration<Data>>> {
+/// let registration = Registration::new(request, Flags::SHARED, c_str!("my_device"), handler);
+///
+/// let registration = Arc::pin_init(registration, GFP_KERNEL)?;
+///
+/// registration.handler().completion.wait_for_completion();
+///
+/// Ok(registration)
+/// }
+/// # Ok::<(), Error>(())
+/// ```
+///
+/// # Invariants
+///
+/// * We own an irq handler whose cookie is a pointer to `Self`.
+#[pin_data]
+pub struct Registration<T: Handler + 'static> {
+ #[pin]
+ inner: Devres<RegistrationInner>,
+
+ #[pin]
+ handler: T,
+
+ /// Pinned because we need address stability so that we can pass a pointer
+ /// to the callback.
+ #[pin]
+ _pin: PhantomPinned,
+}
+
+impl<T: Handler + 'static> Registration<T> {
+ /// Registers the IRQ handler with the system for the given IRQ number.
+ pub fn new<'a>(
+ request: IrqRequest<'a>,
+ flags: Flags,
+ name: &'static CStr,
+ handler: impl PinInit<T, Error> + 'a,
+ ) -> impl PinInit<Self, Error> + 'a {
+ try_pin_init!(&this in Self {
+ handler <- handler,
+ inner <- Devres::new(
+ request.dev,
+ try_pin_init!(RegistrationInner {
+ // INVARIANT: `this` is a valid pointer to the `Registration` instance
+ cookie: this.as_ptr().cast::<c_void>(),
+ irq: {
+ // SAFETY:
+ // - The callbacks are valid for use with request_irq.
+ // - If this succeeds, the slot is guaranteed to be valid until the
+ // destructor of Self runs, which will deregister the callbacks
+ // before the memory location becomes invalid.
+ // - When request_irq is called, everything that handle_irq_callback will
+ // touch has already been initialized, so it's safe for the callback to
+ // be called immediately.
+ to_result(unsafe {
+ bindings::request_irq(
+ request.irq,
+ Some(handle_irq_callback::<T>),
+ flags.into_inner(),
+ name.as_char_ptr(),
+ this.as_ptr().cast::<c_void>(),
+ )
+ })?;
+ request.irq
+ }
+ })
+ ),
+ _pin: PhantomPinned,
+ })
+ }
+
+ /// Returns a reference to the handler that was registered with the system.
+ pub fn handler(&self) -> &T {
+ &self.handler
+ }
+
+ /// Wait for pending IRQ handlers on other CPUs.
+ ///
+ /// This will attempt to access the inner [`Devres`] container.
+ pub fn try_synchronize(&self) -> Result {
+ let inner = self.inner.try_access().ok_or(ENODEV)?;
+ inner.synchronize();
+ Ok(())
+ }
+
+ /// Wait for pending IRQ handlers on other CPUs.
+ pub fn synchronize(&self, dev: &Device<Bound>) -> Result {
+ let inner = self.inner.access(dev)?;
+ inner.synchronize();
+ Ok(())
+ }
+}
+
+/// # Safety
+///
+/// This function should be only used as the callback in `request_irq`.
+unsafe extern "C" fn handle_irq_callback<T: Handler>(_irq: i32, ptr: *mut c_void) -> c_uint {
+ // SAFETY: `ptr` is a pointer to `Registration<T>` set in `Registration::new`
+ let registration = unsafe { &*(ptr as *const Registration<T>) };
+ // SAFETY: The irq callback is removed before the device is unbound, so the fact that the irq
+ // callback is running implies that the device has not yet been unbound.
+ let device = unsafe { registration.inner.device().as_bound() };
+
+ T::handle(&registration.handler, device) as c_uint
+}
+
+/// The value that can be returned from [`ThreadedHandler::handle`].
+#[repr(u32)]
+pub enum ThreadedIrqReturn {
+ /// The interrupt was not from this device or was not handled.
+ None = bindings::irqreturn_IRQ_NONE,
+
+ /// The interrupt was handled by this device.
+ Handled = bindings::irqreturn_IRQ_HANDLED,
+
+ /// The handler wants the handler thread to wake up.
+ WakeThread = bindings::irqreturn_IRQ_WAKE_THREAD,
+}
+
+/// Callbacks for a threaded IRQ handler.
+pub trait ThreadedHandler: Sync {
+ /// The hard IRQ handler.
+ ///
+ /// This is executed in interrupt context, hence all corresponding
+ /// limitations do apply. All work that does not necessarily need to be
+ /// executed from interrupt context, should be deferred to the threaded
+ /// handler, i.e. [`ThreadedHandler::handle_threaded`].
+ ///
+ /// The default implementation returns [`ThreadedIrqReturn::WakeThread`].
+ #[expect(unused_variables)]
+ fn handle(&self, device: &Device<Bound>) -> ThreadedIrqReturn {
+ ThreadedIrqReturn::WakeThread
+ }
+
+ /// The threaded IRQ handler.
+ ///
+ /// This is executed in process context. The kernel creates a dedicated
+ /// `kthread` for this purpose.
+ fn handle_threaded(&self, device: &Device<Bound>) -> IrqReturn;
+}
+
+impl<T: ?Sized + ThreadedHandler + Send> ThreadedHandler for Arc<T> {
+ fn handle(&self, device: &Device<Bound>) -> ThreadedIrqReturn {
+ T::handle(self, device)
+ }
+
+ fn handle_threaded(&self, device: &Device<Bound>) -> IrqReturn {
+ T::handle_threaded(self, device)
+ }
+}
+
+impl<T: ?Sized + ThreadedHandler, A: Allocator> ThreadedHandler for Box<T, A> {
+ fn handle(&self, device: &Device<Bound>) -> ThreadedIrqReturn {
+ T::handle(self, device)
+ }
+
+ fn handle_threaded(&self, device: &Device<Bound>) -> IrqReturn {
+ T::handle_threaded(self, device)
+ }
+}
+
+/// A registration of a threaded IRQ handler for a given IRQ line.
+///
+/// Two callbacks are required: one to handle the IRQ, and one to handle any
+/// other work in a separate thread.
+///
+/// The thread handler is only called if the IRQ handler returns
+/// [`ThreadedIrqReturn::WakeThread`].
+///
+/// # Examples
+///
+/// The following is an example of using [`ThreadedRegistration`]. It uses a
+/// [`Mutex`](kernel::sync::Mutex) to provide interior mutability.
+///
+/// ```
+/// use kernel::c_str;
+/// use kernel::device::{Bound, Device};
+/// use kernel::irq::{
+/// self, Flags, IrqRequest, IrqReturn, ThreadedHandler, ThreadedIrqReturn,
+/// ThreadedRegistration,
+/// };
+/// use kernel::prelude::*;
+/// use kernel::sync::{Arc, Mutex};
+///
+/// // Declare a struct that will be passed in when the interrupt fires. The u32
+/// // merely serves as an example of some internal data.
+/// //
+/// // [`irq::ThreadedHandler::handle`] takes `&self`. This example
+/// // illustrates how interior mutability can be used when sharing the data
+/// // between process context and IRQ context.
+/// #[pin_data]
+/// struct Data {
+/// #[pin]
+/// value: Mutex<u32>,
+/// }
+///
+/// impl ThreadedHandler for Data {
+/// // This will run (in a separate kthread) if and only if
+/// // [`ThreadedHandler::handle`] returns [`WakeThread`], which it does by
+/// // default.
+/// fn handle_threaded(&self, _dev: &Device<Bound>) -> IrqReturn {
+/// let mut data = self.value.lock();
+/// *data += 1;
+/// IrqReturn::Handled
+/// }
+/// }
+///
+/// // Registers a threaded IRQ handler for the given [`IrqRequest`].
+/// //
+/// // This is executing in process context and assumes that `request` was
+/// // previously acquired from a device.
+/// fn register_threaded_irq(
+/// handler: impl PinInit<Data, Error>,
+/// request: IrqRequest<'_>,
+/// ) -> Result<Arc<ThreadedRegistration<Data>>> {
+/// let registration =
+/// ThreadedRegistration::new(request, Flags::SHARED, c_str!("my_device"), handler);
+///
+/// let registration = Arc::pin_init(registration, GFP_KERNEL)?;
+///
+/// {
+/// // The data can be accessed from process context too.
+/// let mut data = registration.handler().value.lock();
+/// *data += 1;
+/// }
+///
+/// Ok(registration)
+/// }
+/// # Ok::<(), Error>(())
+/// ```
+///
+/// # Invariants
+///
+/// * We own an irq handler whose cookie is a pointer to `Self`.
+#[pin_data]
+pub struct ThreadedRegistration<T: ThreadedHandler + 'static> {
+ #[pin]
+ inner: Devres<RegistrationInner>,
+
+ #[pin]
+ handler: T,
+
+ /// Pinned because we need address stability so that we can pass a pointer
+ /// to the callback.
+ #[pin]
+ _pin: PhantomPinned,
+}
+
+impl<T: ThreadedHandler + 'static> ThreadedRegistration<T> {
+ /// Registers the IRQ handler with the system for the given IRQ number.
+ pub fn new<'a>(
+ request: IrqRequest<'a>,
+ flags: Flags,
+ name: &'static CStr,
+ handler: impl PinInit<T, Error> + 'a,
+ ) -> impl PinInit<Self, Error> + 'a {
+ try_pin_init!(&this in Self {
+ handler <- handler,
+ inner <- Devres::new(
+ request.dev,
+ try_pin_init!(RegistrationInner {
+ // INVARIANT: `this` is a valid pointer to the `ThreadedRegistration` instance.
+ cookie: this.as_ptr().cast::<c_void>(),
+ irq: {
+ // SAFETY:
+ // - The callbacks are valid for use with request_threaded_irq.
+ // - If this succeeds, the slot is guaranteed to be valid until the
+ // destructor of Self runs, which will deregister the callbacks
+ // before the memory location becomes invalid.
+ // - When request_threaded_irq is called, everything that the two callbacks
+ // will touch has already been initialized, so it's safe for the
+ // callbacks to be called immediately.
+ to_result(unsafe {
+ bindings::request_threaded_irq(
+ request.irq,
+ Some(handle_threaded_irq_callback::<T>),
+ Some(thread_fn_callback::<T>),
+ flags.into_inner(),
+ name.as_char_ptr(),
+ this.as_ptr().cast::<c_void>(),
+ )
+ })?;
+ request.irq
+ }
+ })
+ ),
+ _pin: PhantomPinned,
+ })
+ }
+
+ /// Returns a reference to the handler that was registered with the system.
+ pub fn handler(&self) -> &T {
+ &self.handler
+ }
+
+ /// Wait for pending IRQ handlers on other CPUs.
+ ///
+ /// This will attempt to access the inner [`Devres`] container.
+ pub fn try_synchronize(&self) -> Result {
+ let inner = self.inner.try_access().ok_or(ENODEV)?;
+ inner.synchronize();
+ Ok(())
+ }
+
+ /// Wait for pending IRQ handlers on other CPUs.
+ pub fn synchronize(&self, dev: &Device<Bound>) -> Result {
+ let inner = self.inner.access(dev)?;
+ inner.synchronize();
+ Ok(())
+ }
+}
+
+/// # Safety
+///
+/// This function should be only used as the callback in `request_threaded_irq`.
+unsafe extern "C" fn handle_threaded_irq_callback<T: ThreadedHandler>(
+ _irq: i32,
+ ptr: *mut c_void,
+) -> c_uint {
+ // SAFETY: `ptr` is a pointer to `ThreadedRegistration<T>` set in `ThreadedRegistration::new`
+ let registration = unsafe { &*(ptr as *const ThreadedRegistration<T>) };
+ // SAFETY: The irq callback is removed before the device is unbound, so the fact that the irq
+ // callback is running implies that the device has not yet been unbound.
+ let device = unsafe { registration.inner.device().as_bound() };
+
+ T::handle(&registration.handler, device) as c_uint
+}
+
+/// # Safety
+///
+/// This function should be only used as the callback in `request_threaded_irq`.
+unsafe extern "C" fn thread_fn_callback<T: ThreadedHandler>(_irq: i32, ptr: *mut c_void) -> c_uint {
+ // SAFETY: `ptr` is a pointer to `ThreadedRegistration<T>` set in `ThreadedRegistration::new`
+ let registration = unsafe { &*(ptr as *const ThreadedRegistration<T>) };
+ // SAFETY: The irq callback is removed before the device is unbound, so the fact that the irq
+ // callback is running implies that the device has not yet been unbound.
+ let device = unsafe { registration.inner.device().as_bound() };
+
+ T::handle_threaded(&registration.handler, device) as c_uint
+}
diff --git a/rust/kernel/lib.rs b/rust/kernel/lib.rs
index f910a5ab80ba..09ee3d17ee0a 100644
--- a/rust/kernel/lib.rs
+++ b/rust/kernel/lib.rs
@@ -78,6 +78,7 @@ pub mod cpu;
pub mod cpufreq;
pub mod cpumask;
pub mod cred;
+pub mod debugfs;
pub mod device;
pub mod device_id;
pub mod devres;
@@ -94,6 +95,7 @@ pub mod fs;
pub mod init;
pub mod io;
pub mod ioctl;
+pub mod irq;
pub mod jump_label;
#[cfg(CONFIG_KUNIT)]
pub mod kunit;
@@ -112,6 +114,7 @@ pub mod pid_namespace;
pub mod platform;
pub mod prelude;
pub mod print;
+pub mod processor;
pub mod ptr;
pub mod rbtree;
pub mod regulator;
diff --git a/rust/kernel/pci.rs b/rust/kernel/pci.rs
index 887ee611b553..7fcc5f6022c1 100644
--- a/rust/kernel/pci.rs
+++ b/rust/kernel/pci.rs
@@ -10,10 +10,11 @@ use crate::{
devres::Devres,
driver,
error::{from_result, to_result, Result},
- io::Io,
- io::IoRaw,
+ io::{Io, IoRaw},
+ irq::{self, IrqRequest},
str::CStr,
- types::{ARef, Opaque},
+ sync::aref::ARef,
+ types::Opaque,
ThisModule,
};
use core::{
@@ -23,6 +24,10 @@ use core::{
};
use kernel::prelude::*;
+mod id;
+
+pub use self::id::{Class, ClassMask, Vendor};
+
/// An adapter for the registration of PCI drivers.
pub struct Adapter<T: Driver>(T);
@@ -60,7 +65,7 @@ impl<T: Driver + 'static> Adapter<T> {
extern "C" fn probe_callback(
pdev: *mut bindings::pci_dev,
id: *const bindings::pci_device_id,
- ) -> kernel::ffi::c_int {
+ ) -> c_int {
// SAFETY: The PCI bus only ever calls the probe callback with a valid pointer to a
// `struct pci_dev`.
//
@@ -128,10 +133,11 @@ impl DeviceId {
/// Equivalent to C's `PCI_DEVICE` macro.
///
- /// Create a new `pci::DeviceId` from a vendor and device ID number.
- pub const fn from_id(vendor: u32, device: u32) -> Self {
+ /// Create a new `pci::DeviceId` from a vendor and device ID.
+ #[inline]
+ pub const fn from_id(vendor: Vendor, device: u32) -> Self {
Self(bindings::pci_device_id {
- vendor,
+ vendor: vendor.as_raw() as u32,
device,
subvendor: DeviceId::PCI_ANY_ID,
subdevice: DeviceId::PCI_ANY_ID,
@@ -145,6 +151,7 @@ impl DeviceId {
/// Equivalent to C's `PCI_DEVICE_CLASS` macro.
///
/// Create a new `pci::DeviceId` from a class number and mask.
+ #[inline]
pub const fn from_class(class: u32, class_mask: u32) -> Self {
Self(bindings::pci_device_id {
vendor: DeviceId::PCI_ANY_ID,
@@ -157,6 +164,29 @@ impl DeviceId {
override_only: 0,
})
}
+
+ /// Create a new [`DeviceId`] from a class number, mask, and specific vendor.
+ ///
+ /// This is more targeted than [`DeviceId::from_class`]: in addition to matching by [`Vendor`],
+ /// it also matches the PCI [`Class`] (up to the entire 24 bits, depending on the
+ /// [`ClassMask`]).
+ #[inline]
+ pub const fn from_class_and_vendor(
+ class: Class,
+ class_mask: ClassMask,
+ vendor: Vendor,
+ ) -> Self {
+ Self(bindings::pci_device_id {
+ vendor: vendor.as_raw() as u32,
+ device: DeviceId::PCI_ANY_ID,
+ subvendor: DeviceId::PCI_ANY_ID,
+ subdevice: DeviceId::PCI_ANY_ID,
+ class: class.as_raw(),
+ class_mask: class_mask.as_raw(),
+ driver_data: 0,
+ override_only: 0,
+ })
+ }
}
// SAFETY: `DeviceId` is a `#[repr(transparent)]` wrapper of `pci_device_id` and does not add
@@ -206,7 +236,7 @@ macro_rules! pci_device_table {
/// <MyDriver as pci::Driver>::IdInfo,
/// [
/// (
-/// pci::DeviceId::from_id(bindings::PCI_VENDOR_ID_REDHAT, bindings::PCI_ANY_ID as u32),
+/// pci::DeviceId::from_id(pci::Vendor::REDHAT, bindings::PCI_ANY_ID as u32),
/// (),
/// )
/// ]
@@ -240,11 +270,11 @@ pub trait Driver: Send {
/// PCI driver probe.
///
- /// Called when a new platform device is added or discovered.
- /// Implementers should attempt to initialize the device here.
+ /// Called when a new pci device is added or discovered. Implementers should
+ /// attempt to initialize the device here.
fn probe(dev: &Device<device::Core>, id_info: &Self::IdInfo) -> Result<Pin<KBox<Self>>>;
- /// Platform driver unbind.
+ /// PCI driver unbind.
///
/// Called when a [`Device`] is unbound from its bound [`Driver`]. Implementing this callback
/// is optional.
@@ -347,7 +377,7 @@ impl<const SIZE: usize> Bar<SIZE> {
// `ioptr` is valid by the safety requirements.
// `num` is valid by the safety requirements.
unsafe {
- bindings::pci_iounmap(pdev.as_raw(), ioptr as *mut kernel::ffi::c_void);
+ bindings::pci_iounmap(pdev.as_raw(), ioptr as *mut c_void);
bindings::pci_release_region(pdev.as_raw(), num);
}
}
@@ -359,6 +389,7 @@ impl<const SIZE: usize> Bar<SIZE> {
}
impl Bar {
+ #[inline]
fn index_is_valid(index: u32) -> bool {
// A `struct pci_dev` owns an array of resources with at most `PCI_NUM_RESOURCES` entries.
index < bindings::PCI_NUM_RESOURCES
@@ -381,24 +412,90 @@ impl<const SIZE: usize> Deref for Bar<SIZE> {
}
impl<Ctx: device::DeviceContext> Device<Ctx> {
+ #[inline]
fn as_raw(&self) -> *mut bindings::pci_dev {
self.0.get()
}
}
impl Device {
- /// Returns the PCI vendor ID.
- pub fn vendor_id(&self) -> u16 {
+ /// Returns the PCI vendor ID as [`Vendor`].
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// # use kernel::{device::Core, pci::{self, Vendor}, prelude::*};
+ /// fn log_device_info(pdev: &pci::Device<Core>) -> Result {
+ /// // Get an instance of `Vendor`.
+ /// let vendor = pdev.vendor_id();
+ /// dev_info!(
+ /// pdev.as_ref(),
+ /// "Device: Vendor={}, Device=0x{:x}\n",
+ /// vendor,
+ /// pdev.device_id()
+ /// );
+ /// Ok(())
+ /// }
+ /// ```
+ #[inline]
+ pub fn vendor_id(&self) -> Vendor {
// SAFETY: `self.as_raw` is a valid pointer to a `struct pci_dev`.
- unsafe { (*self.as_raw()).vendor }
+ let vendor_id = unsafe { (*self.as_raw()).vendor };
+ Vendor::from_raw(vendor_id)
}
/// Returns the PCI device ID.
+ #[inline]
pub fn device_id(&self) -> u16 {
- // SAFETY: `self.as_raw` is a valid pointer to a `struct pci_dev`.
+ // SAFETY: By its type invariant `self.as_raw` is always a valid pointer to a
+ // `struct pci_dev`.
unsafe { (*self.as_raw()).device }
}
+ /// Returns the PCI revision ID.
+ #[inline]
+ pub fn revision_id(&self) -> u8 {
+ // SAFETY: By its type invariant `self.as_raw` is always a valid pointer to a
+ // `struct pci_dev`.
+ unsafe { (*self.as_raw()).revision }
+ }
+
+ /// Returns the PCI bus device/function.
+ #[inline]
+ pub fn dev_id(&self) -> u16 {
+ // SAFETY: By its type invariant `self.as_raw` is always a valid pointer to a
+ // `struct pci_dev`.
+ unsafe { bindings::pci_dev_id(self.as_raw()) }
+ }
+
+ /// Returns the PCI subsystem vendor ID.
+ #[inline]
+ pub fn subsystem_vendor_id(&self) -> u16 {
+ // SAFETY: By its type invariant `self.as_raw` is always a valid pointer to a
+ // `struct pci_dev`.
+ unsafe { (*self.as_raw()).subsystem_vendor }
+ }
+
+ /// Returns the PCI subsystem device ID.
+ #[inline]
+ pub fn subsystem_device_id(&self) -> u16 {
+ // SAFETY: By its type invariant `self.as_raw` is always a valid pointer to a
+ // `struct pci_dev`.
+ unsafe { (*self.as_raw()).subsystem_device }
+ }
+
+ /// Returns the start of the given PCI bar resource.
+ pub fn resource_start(&self, bar: u32) -> Result<bindings::resource_size_t> {
+ if !Bar::index_is_valid(bar) {
+ return Err(EINVAL);
+ }
+
+ // SAFETY:
+ // - `bar` is a valid bar number, as guaranteed by the above call to `Bar::index_is_valid`,
+ // - by its type invariant `self.as_raw` is always a valid pointer to a `struct pci_dev`.
+ Ok(unsafe { bindings::pci_resource_start(self.as_raw(), bar.try_into()?) })
+ }
+
/// Returns the size of the given PCI bar resource.
pub fn resource_len(&self, bar: u32) -> Result<bindings::resource_size_t> {
if !Bar::index_is_valid(bar) {
@@ -410,6 +507,13 @@ impl Device {
// - by its type invariant `self.as_raw` is always a valid pointer to a `struct pci_dev`.
Ok(unsafe { bindings::pci_resource_len(self.as_raw(), bar.try_into()?) })
}
+
+ /// Returns the PCI class as a `Class` struct.
+ #[inline]
+ pub fn pci_class(&self) -> Class {
+ // SAFETY: `self.as_raw` is a valid pointer to a `struct pci_dev`.
+ Class::from_raw(unsafe { (*self.as_raw()).class })
+ }
}
impl Device<device::Bound> {
@@ -431,6 +535,47 @@ impl Device<device::Bound> {
) -> impl PinInit<Devres<Bar>, Error> + 'a {
self.iomap_region_sized::<0>(bar, name)
}
+
+ /// Returns an [`IrqRequest`] for the IRQ vector at the given index, if any.
+ pub fn irq_vector(&self, index: u32) -> Result<IrqRequest<'_>> {
+ // SAFETY: `self.as_raw` returns a valid pointer to a `struct pci_dev`.
+ let irq = unsafe { crate::bindings::pci_irq_vector(self.as_raw(), index) };
+ if irq < 0 {
+ return Err(crate::error::Error::from_errno(irq));
+ }
+ // SAFETY: `irq` is guaranteed to be a valid IRQ number for `&self`.
+ Ok(unsafe { IrqRequest::new(self.as_ref(), irq as u32) })
+ }
+
+ /// Returns a [`kernel::irq::Registration`] for the IRQ vector at the given
+ /// index.
+ pub fn request_irq<'a, T: crate::irq::Handler + 'static>(
+ &'a self,
+ index: u32,
+ flags: irq::Flags,
+ name: &'static CStr,
+ handler: impl PinInit<T, Error> + 'a,
+ ) -> Result<impl PinInit<irq::Registration<T>, Error> + 'a> {
+ let request = self.irq_vector(index)?;
+
+ Ok(irq::Registration::<T>::new(request, flags, name, handler))
+ }
+
+ /// Returns a [`kernel::irq::ThreadedRegistration`] for the IRQ vector at
+ /// the given index.
+ pub fn request_threaded_irq<'a, T: crate::irq::ThreadedHandler + 'static>(
+ &'a self,
+ index: u32,
+ flags: irq::Flags,
+ name: &'static CStr,
+ handler: impl PinInit<T, Error> + 'a,
+ ) -> Result<impl PinInit<irq::ThreadedRegistration<T>, Error> + 'a> {
+ let request = self.irq_vector(index)?;
+
+ Ok(irq::ThreadedRegistration::<T>::new(
+ request, flags, name, handler,
+ ))
+ }
}
impl Device<device::Core> {
@@ -441,6 +586,7 @@ impl Device<device::Core> {
}
/// Enable bus-mastering for this device.
+ #[inline]
pub fn set_master(&self) {
// SAFETY: `self.as_raw` is guaranteed to be a pointer to a valid `struct pci_dev`.
unsafe { bindings::pci_set_master(self.as_raw()) };
@@ -455,7 +601,7 @@ kernel::impl_device_context_into_aref!(Device);
impl crate::dma::Device for Device<device::Core> {}
// SAFETY: Instances of `Device` are always reference-counted.
-unsafe impl crate::types::AlwaysRefCounted for Device {
+unsafe impl crate::sync::aref::AlwaysRefCounted for Device {
fn inc_ref(&self) {
// SAFETY: The existence of a shared reference guarantees that the refcount is non-zero.
unsafe { bindings::pci_dev_get(self.as_raw()) };
diff --git a/rust/kernel/pci/id.rs b/rust/kernel/pci/id.rs
new file mode 100644
index 000000000000..7f2a7f57507f
--- /dev/null
+++ b/rust/kernel/pci/id.rs
@@ -0,0 +1,578 @@
+// SPDX-License-Identifier: GPL-2.0
+
+//! PCI device identifiers and related types.
+//!
+//! This module contains PCI class codes, Vendor IDs, and supporting types.
+
+use crate::{bindings, error::code::EINVAL, error::Error, prelude::*};
+use core::fmt;
+
+/// PCI device class codes.
+///
+/// Each entry contains the full 24-bit PCI class code (base class in bits
+/// 23-16, subclass in bits 15-8, programming interface in bits 7-0).
+///
+/// # Examples
+///
+/// ```
+/// # use kernel::{device::Core, pci::{self, Class}, prelude::*};
+/// fn probe_device(pdev: &pci::Device<Core>) -> Result {
+/// let pci_class = pdev.pci_class();
+/// dev_info!(
+/// pdev.as_ref(),
+/// "Detected PCI class: {}\n",
+/// pci_class
+/// );
+/// Ok(())
+/// }
+/// ```
+#[derive(Clone, Copy, PartialEq, Eq)]
+#[repr(transparent)]
+pub struct Class(u32);
+
+/// PCI class mask constants for matching [`Class`] codes.
+#[repr(u32)]
+#[derive(Debug, Clone, Copy, PartialEq, Eq)]
+pub enum ClassMask {
+ /// Match the full 24-bit class code.
+ Full = 0xffffff,
+ /// Match the upper 16 bits of the class code (base class and subclass only)
+ ClassSubclass = 0xffff00,
+}
+
+macro_rules! define_all_pci_classes {
+ (
+ $($variant:ident = $binding:expr,)+
+ ) => {
+ impl Class {
+ $(
+ #[allow(missing_docs)]
+ pub const $variant: Self = Self(Self::to_24bit_class($binding));
+ )+
+ }
+
+ impl fmt::Display for Class {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ match self {
+ $(
+ &Self::$variant => write!(f, stringify!($variant)),
+ )+
+ _ => <Self as fmt::Debug>::fmt(self, f),
+ }
+ }
+ }
+ };
+}
+
+/// Once constructed, a [`Class`] contains a valid PCI class code.
+impl Class {
+ /// Create a [`Class`] from a raw 24-bit class code.
+ #[inline]
+ pub(super) fn from_raw(class_code: u32) -> Self {
+ Self(class_code)
+ }
+
+ /// Get the raw 24-bit class code value.
+ #[inline]
+ pub const fn as_raw(self) -> u32 {
+ self.0
+ }
+
+ // Converts a PCI class constant to 24-bit format.
+ //
+ // Many device drivers use only the upper 16 bits (base class and subclass),
+ // but some use the full 24 bits. In order to support both cases, store the
+ // class code as a 24-bit value, where 16-bit values are shifted up 8 bits.
+ const fn to_24bit_class(val: u32) -> u32 {
+ if val > 0xFFFF {
+ val
+ } else {
+ val << 8
+ }
+ }
+}
+
+impl fmt::Debug for Class {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ write!(f, "0x{:06x}", self.0)
+ }
+}
+
+impl ClassMask {
+ /// Get the raw mask value.
+ #[inline]
+ pub const fn as_raw(self) -> u32 {
+ self as u32
+ }
+}
+
+impl TryFrom<u32> for ClassMask {
+ type Error = Error;
+
+ fn try_from(value: u32) -> Result<Self, Self::Error> {
+ match value {
+ 0xffffff => Ok(ClassMask::Full),
+ 0xffff00 => Ok(ClassMask::ClassSubclass),
+ _ => Err(EINVAL),
+ }
+ }
+}
+
+/// PCI vendor IDs.
+///
+/// Each entry contains the 16-bit PCI vendor ID as assigned by the PCI SIG.
+#[derive(Clone, Copy, PartialEq, Eq)]
+#[repr(transparent)]
+pub struct Vendor(u16);
+
+macro_rules! define_all_pci_vendors {
+ (
+ $($variant:ident = $binding:expr,)+
+ ) => {
+ impl Vendor {
+ $(
+ #[allow(missing_docs)]
+ pub const $variant: Self = Self($binding as u16);
+ )+
+ }
+
+ impl fmt::Display for Vendor {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ match self {
+ $(
+ &Self::$variant => write!(f, stringify!($variant)),
+ )+
+ _ => <Self as fmt::Debug>::fmt(self, f),
+ }
+ }
+ }
+ };
+}
+
+/// Once constructed, a `Vendor` contains a valid PCI Vendor ID.
+impl Vendor {
+ /// Create a Vendor from a raw 16-bit vendor ID.
+ #[inline]
+ pub(super) fn from_raw(vendor_id: u16) -> Self {
+ Self(vendor_id)
+ }
+
+ /// Get the raw 16-bit vendor ID value.
+ #[inline]
+ pub const fn as_raw(self) -> u16 {
+ self.0
+ }
+}
+
+impl fmt::Debug for Vendor {
+ #[inline]
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ write!(f, "0x{:04x}", self.0)
+ }
+}
+
+define_all_pci_classes! {
+ NOT_DEFINED = bindings::PCI_CLASS_NOT_DEFINED, // 0x000000
+ NOT_DEFINED_VGA = bindings::PCI_CLASS_NOT_DEFINED_VGA, // 0x000100
+
+ STORAGE_SCSI = bindings::PCI_CLASS_STORAGE_SCSI, // 0x010000
+ STORAGE_IDE = bindings::PCI_CLASS_STORAGE_IDE, // 0x010100
+ STORAGE_FLOPPY = bindings::PCI_CLASS_STORAGE_FLOPPY, // 0x010200
+ STORAGE_IPI = bindings::PCI_CLASS_STORAGE_IPI, // 0x010300
+ STORAGE_RAID = bindings::PCI_CLASS_STORAGE_RAID, // 0x010400
+ STORAGE_SATA = bindings::PCI_CLASS_STORAGE_SATA, // 0x010600
+ STORAGE_SATA_AHCI = bindings::PCI_CLASS_STORAGE_SATA_AHCI, // 0x010601
+ STORAGE_SAS = bindings::PCI_CLASS_STORAGE_SAS, // 0x010700
+ STORAGE_EXPRESS = bindings::PCI_CLASS_STORAGE_EXPRESS, // 0x010802
+ STORAGE_OTHER = bindings::PCI_CLASS_STORAGE_OTHER, // 0x018000
+
+ NETWORK_ETHERNET = bindings::PCI_CLASS_NETWORK_ETHERNET, // 0x020000
+ NETWORK_TOKEN_RING = bindings::PCI_CLASS_NETWORK_TOKEN_RING, // 0x020100
+ NETWORK_FDDI = bindings::PCI_CLASS_NETWORK_FDDI, // 0x020200
+ NETWORK_ATM = bindings::PCI_CLASS_NETWORK_ATM, // 0x020300
+ NETWORK_OTHER = bindings::PCI_CLASS_NETWORK_OTHER, // 0x028000
+
+ DISPLAY_VGA = bindings::PCI_CLASS_DISPLAY_VGA, // 0x030000
+ DISPLAY_XGA = bindings::PCI_CLASS_DISPLAY_XGA, // 0x030100
+ DISPLAY_3D = bindings::PCI_CLASS_DISPLAY_3D, // 0x030200
+ DISPLAY_OTHER = bindings::PCI_CLASS_DISPLAY_OTHER, // 0x038000
+
+ MULTIMEDIA_VIDEO = bindings::PCI_CLASS_MULTIMEDIA_VIDEO, // 0x040000
+ MULTIMEDIA_AUDIO = bindings::PCI_CLASS_MULTIMEDIA_AUDIO, // 0x040100
+ MULTIMEDIA_PHONE = bindings::PCI_CLASS_MULTIMEDIA_PHONE, // 0x040200
+ MULTIMEDIA_HD_AUDIO = bindings::PCI_CLASS_MULTIMEDIA_HD_AUDIO, // 0x040300
+ MULTIMEDIA_OTHER = bindings::PCI_CLASS_MULTIMEDIA_OTHER, // 0x048000
+
+ MEMORY_RAM = bindings::PCI_CLASS_MEMORY_RAM, // 0x050000
+ MEMORY_FLASH = bindings::PCI_CLASS_MEMORY_FLASH, // 0x050100
+ MEMORY_CXL = bindings::PCI_CLASS_MEMORY_CXL, // 0x050200
+ MEMORY_OTHER = bindings::PCI_CLASS_MEMORY_OTHER, // 0x058000
+
+ BRIDGE_HOST = bindings::PCI_CLASS_BRIDGE_HOST, // 0x060000
+ BRIDGE_ISA = bindings::PCI_CLASS_BRIDGE_ISA, // 0x060100
+ BRIDGE_EISA = bindings::PCI_CLASS_BRIDGE_EISA, // 0x060200
+ BRIDGE_MC = bindings::PCI_CLASS_BRIDGE_MC, // 0x060300
+ BRIDGE_PCI_NORMAL = bindings::PCI_CLASS_BRIDGE_PCI_NORMAL, // 0x060400
+ BRIDGE_PCI_SUBTRACTIVE = bindings::PCI_CLASS_BRIDGE_PCI_SUBTRACTIVE, // 0x060401
+ BRIDGE_PCMCIA = bindings::PCI_CLASS_BRIDGE_PCMCIA, // 0x060500
+ BRIDGE_NUBUS = bindings::PCI_CLASS_BRIDGE_NUBUS, // 0x060600
+ BRIDGE_CARDBUS = bindings::PCI_CLASS_BRIDGE_CARDBUS, // 0x060700
+ BRIDGE_RACEWAY = bindings::PCI_CLASS_BRIDGE_RACEWAY, // 0x060800
+ BRIDGE_OTHER = bindings::PCI_CLASS_BRIDGE_OTHER, // 0x068000
+
+ COMMUNICATION_SERIAL = bindings::PCI_CLASS_COMMUNICATION_SERIAL, // 0x070000
+ COMMUNICATION_PARALLEL = bindings::PCI_CLASS_COMMUNICATION_PARALLEL, // 0x070100
+ COMMUNICATION_MULTISERIAL = bindings::PCI_CLASS_COMMUNICATION_MULTISERIAL, // 0x070200
+ COMMUNICATION_MODEM = bindings::PCI_CLASS_COMMUNICATION_MODEM, // 0x070300
+ COMMUNICATION_OTHER = bindings::PCI_CLASS_COMMUNICATION_OTHER, // 0x078000
+
+ SYSTEM_PIC = bindings::PCI_CLASS_SYSTEM_PIC, // 0x080000
+ SYSTEM_PIC_IOAPIC = bindings::PCI_CLASS_SYSTEM_PIC_IOAPIC, // 0x080010
+ SYSTEM_PIC_IOXAPIC = bindings::PCI_CLASS_SYSTEM_PIC_IOXAPIC, // 0x080020
+ SYSTEM_DMA = bindings::PCI_CLASS_SYSTEM_DMA, // 0x080100
+ SYSTEM_TIMER = bindings::PCI_CLASS_SYSTEM_TIMER, // 0x080200
+ SYSTEM_RTC = bindings::PCI_CLASS_SYSTEM_RTC, // 0x080300
+ SYSTEM_PCI_HOTPLUG = bindings::PCI_CLASS_SYSTEM_PCI_HOTPLUG, // 0x080400
+ SYSTEM_SDHCI = bindings::PCI_CLASS_SYSTEM_SDHCI, // 0x080500
+ SYSTEM_RCEC = bindings::PCI_CLASS_SYSTEM_RCEC, // 0x080700
+ SYSTEM_OTHER = bindings::PCI_CLASS_SYSTEM_OTHER, // 0x088000
+
+ INPUT_KEYBOARD = bindings::PCI_CLASS_INPUT_KEYBOARD, // 0x090000
+ INPUT_PEN = bindings::PCI_CLASS_INPUT_PEN, // 0x090100
+ INPUT_MOUSE = bindings::PCI_CLASS_INPUT_MOUSE, // 0x090200
+ INPUT_SCANNER = bindings::PCI_CLASS_INPUT_SCANNER, // 0x090300
+ INPUT_GAMEPORT = bindings::PCI_CLASS_INPUT_GAMEPORT, // 0x090400
+ INPUT_OTHER = bindings::PCI_CLASS_INPUT_OTHER, // 0x098000
+
+ DOCKING_GENERIC = bindings::PCI_CLASS_DOCKING_GENERIC, // 0x0a0000
+ DOCKING_OTHER = bindings::PCI_CLASS_DOCKING_OTHER, // 0x0a8000
+
+ PROCESSOR_386 = bindings::PCI_CLASS_PROCESSOR_386, // 0x0b0000
+ PROCESSOR_486 = bindings::PCI_CLASS_PROCESSOR_486, // 0x0b0100
+ PROCESSOR_PENTIUM = bindings::PCI_CLASS_PROCESSOR_PENTIUM, // 0x0b0200
+ PROCESSOR_ALPHA = bindings::PCI_CLASS_PROCESSOR_ALPHA, // 0x0b1000
+ PROCESSOR_POWERPC = bindings::PCI_CLASS_PROCESSOR_POWERPC, // 0x0b2000
+ PROCESSOR_MIPS = bindings::PCI_CLASS_PROCESSOR_MIPS, // 0x0b3000
+ PROCESSOR_CO = bindings::PCI_CLASS_PROCESSOR_CO, // 0x0b4000
+
+ SERIAL_FIREWIRE = bindings::PCI_CLASS_SERIAL_FIREWIRE, // 0x0c0000
+ SERIAL_FIREWIRE_OHCI = bindings::PCI_CLASS_SERIAL_FIREWIRE_OHCI, // 0x0c0010
+ SERIAL_ACCESS = bindings::PCI_CLASS_SERIAL_ACCESS, // 0x0c0100
+ SERIAL_SSA = bindings::PCI_CLASS_SERIAL_SSA, // 0x0c0200
+ SERIAL_USB_UHCI = bindings::PCI_CLASS_SERIAL_USB_UHCI, // 0x0c0300
+ SERIAL_USB_OHCI = bindings::PCI_CLASS_SERIAL_USB_OHCI, // 0x0c0310
+ SERIAL_USB_EHCI = bindings::PCI_CLASS_SERIAL_USB_EHCI, // 0x0c0320
+ SERIAL_USB_XHCI = bindings::PCI_CLASS_SERIAL_USB_XHCI, // 0x0c0330
+ SERIAL_USB_CDNS = bindings::PCI_CLASS_SERIAL_USB_CDNS, // 0x0c0380
+ SERIAL_USB_DEVICE = bindings::PCI_CLASS_SERIAL_USB_DEVICE, // 0x0c03fe
+ SERIAL_FIBER = bindings::PCI_CLASS_SERIAL_FIBER, // 0x0c0400
+ SERIAL_SMBUS = bindings::PCI_CLASS_SERIAL_SMBUS, // 0x0c0500
+ SERIAL_IPMI_SMIC = bindings::PCI_CLASS_SERIAL_IPMI_SMIC, // 0x0c0700
+ SERIAL_IPMI_KCS = bindings::PCI_CLASS_SERIAL_IPMI_KCS, // 0x0c0701
+ SERIAL_IPMI_BT = bindings::PCI_CLASS_SERIAL_IPMI_BT, // 0x0c0702
+
+ WIRELESS_RF_CONTROLLER = bindings::PCI_CLASS_WIRELESS_RF_CONTROLLER, // 0x0d1000
+ WIRELESS_WHCI = bindings::PCI_CLASS_WIRELESS_WHCI, // 0x0d1010
+
+ INTELLIGENT_I2O = bindings::PCI_CLASS_INTELLIGENT_I2O, // 0x0e0000
+
+ SATELLITE_TV = bindings::PCI_CLASS_SATELLITE_TV, // 0x0f0000
+ SATELLITE_AUDIO = bindings::PCI_CLASS_SATELLITE_AUDIO, // 0x0f0100
+ SATELLITE_VOICE = bindings::PCI_CLASS_SATELLITE_VOICE, // 0x0f0300
+ SATELLITE_DATA = bindings::PCI_CLASS_SATELLITE_DATA, // 0x0f0400
+
+ CRYPT_NETWORK = bindings::PCI_CLASS_CRYPT_NETWORK, // 0x100000
+ CRYPT_ENTERTAINMENT = bindings::PCI_CLASS_CRYPT_ENTERTAINMENT, // 0x100100
+ CRYPT_OTHER = bindings::PCI_CLASS_CRYPT_OTHER, // 0x108000
+
+ SP_DPIO = bindings::PCI_CLASS_SP_DPIO, // 0x110000
+ SP_OTHER = bindings::PCI_CLASS_SP_OTHER, // 0x118000
+
+ ACCELERATOR_PROCESSING = bindings::PCI_CLASS_ACCELERATOR_PROCESSING, // 0x120000
+
+ OTHERS = bindings::PCI_CLASS_OTHERS, // 0xff0000
+}
+
+define_all_pci_vendors! {
+ PCI_SIG = bindings::PCI_VENDOR_ID_PCI_SIG, // 0x0001
+ LOONGSON = bindings::PCI_VENDOR_ID_LOONGSON, // 0x0014
+ SOLIDIGM = bindings::PCI_VENDOR_ID_SOLIDIGM, // 0x025e
+ TTTECH = bindings::PCI_VENDOR_ID_TTTECH, // 0x0357
+ DYNALINK = bindings::PCI_VENDOR_ID_DYNALINK, // 0x0675
+ UBIQUITI = bindings::PCI_VENDOR_ID_UBIQUITI, // 0x0777
+ BERKOM = bindings::PCI_VENDOR_ID_BERKOM, // 0x0871
+ ITTIM = bindings::PCI_VENDOR_ID_ITTIM, // 0x0b48
+ COMPAQ = bindings::PCI_VENDOR_ID_COMPAQ, // 0x0e11
+ LSI_LOGIC = bindings::PCI_VENDOR_ID_LSI_LOGIC, // 0x1000
+ ATI = bindings::PCI_VENDOR_ID_ATI, // 0x1002
+ VLSI = bindings::PCI_VENDOR_ID_VLSI, // 0x1004
+ ADL = bindings::PCI_VENDOR_ID_ADL, // 0x1005
+ NS = bindings::PCI_VENDOR_ID_NS, // 0x100b
+ TSENG = bindings::PCI_VENDOR_ID_TSENG, // 0x100c
+ WEITEK = bindings::PCI_VENDOR_ID_WEITEK, // 0x100e
+ DEC = bindings::PCI_VENDOR_ID_DEC, // 0x1011
+ CIRRUS = bindings::PCI_VENDOR_ID_CIRRUS, // 0x1013
+ IBM = bindings::PCI_VENDOR_ID_IBM, // 0x1014
+ UNISYS = bindings::PCI_VENDOR_ID_UNISYS, // 0x1018
+ COMPEX2 = bindings::PCI_VENDOR_ID_COMPEX2, // 0x101a
+ WD = bindings::PCI_VENDOR_ID_WD, // 0x101c
+ AMI = bindings::PCI_VENDOR_ID_AMI, // 0x101e
+ AMD = bindings::PCI_VENDOR_ID_AMD, // 0x1022
+ TRIDENT = bindings::PCI_VENDOR_ID_TRIDENT, // 0x1023
+ AI = bindings::PCI_VENDOR_ID_AI, // 0x1025
+ DELL = bindings::PCI_VENDOR_ID_DELL, // 0x1028
+ MATROX = bindings::PCI_VENDOR_ID_MATROX, // 0x102B
+ MOBILITY_ELECTRONICS = bindings::PCI_VENDOR_ID_MOBILITY_ELECTRONICS, // 0x14f2
+ CT = bindings::PCI_VENDOR_ID_CT, // 0x102c
+ MIRO = bindings::PCI_VENDOR_ID_MIRO, // 0x1031
+ NEC = bindings::PCI_VENDOR_ID_NEC, // 0x1033
+ FD = bindings::PCI_VENDOR_ID_FD, // 0x1036
+ SI = bindings::PCI_VENDOR_ID_SI, // 0x1039
+ HP = bindings::PCI_VENDOR_ID_HP, // 0x103c
+ HP_3PAR = bindings::PCI_VENDOR_ID_HP_3PAR, // 0x1590
+ PCTECH = bindings::PCI_VENDOR_ID_PCTECH, // 0x1042
+ ASUSTEK = bindings::PCI_VENDOR_ID_ASUSTEK, // 0x1043
+ DPT = bindings::PCI_VENDOR_ID_DPT, // 0x1044
+ OPTI = bindings::PCI_VENDOR_ID_OPTI, // 0x1045
+ ELSA = bindings::PCI_VENDOR_ID_ELSA, // 0x1048
+ STMICRO = bindings::PCI_VENDOR_ID_STMICRO, // 0x104A
+ BUSLOGIC = bindings::PCI_VENDOR_ID_BUSLOGIC, // 0x104B
+ TI = bindings::PCI_VENDOR_ID_TI, // 0x104c
+ SONY = bindings::PCI_VENDOR_ID_SONY, // 0x104d
+ WINBOND2 = bindings::PCI_VENDOR_ID_WINBOND2, // 0x1050
+ ANIGMA = bindings::PCI_VENDOR_ID_ANIGMA, // 0x1051
+ EFAR = bindings::PCI_VENDOR_ID_EFAR, // 0x1055
+ MOTOROLA = bindings::PCI_VENDOR_ID_MOTOROLA, // 0x1057
+ PROMISE = bindings::PCI_VENDOR_ID_PROMISE, // 0x105a
+ FOXCONN = bindings::PCI_VENDOR_ID_FOXCONN, // 0x105b
+ UMC = bindings::PCI_VENDOR_ID_UMC, // 0x1060
+ PICOPOWER = bindings::PCI_VENDOR_ID_PICOPOWER, // 0x1066
+ MYLEX = bindings::PCI_VENDOR_ID_MYLEX, // 0x1069
+ APPLE = bindings::PCI_VENDOR_ID_APPLE, // 0x106b
+ YAMAHA = bindings::PCI_VENDOR_ID_YAMAHA, // 0x1073
+ QLOGIC = bindings::PCI_VENDOR_ID_QLOGIC, // 0x1077
+ CYRIX = bindings::PCI_VENDOR_ID_CYRIX, // 0x1078
+ CONTAQ = bindings::PCI_VENDOR_ID_CONTAQ, // 0x1080
+ OLICOM = bindings::PCI_VENDOR_ID_OLICOM, // 0x108d
+ SUN = bindings::PCI_VENDOR_ID_SUN, // 0x108e
+ NI = bindings::PCI_VENDOR_ID_NI, // 0x1093
+ CMD = bindings::PCI_VENDOR_ID_CMD, // 0x1095
+ BROOKTREE = bindings::PCI_VENDOR_ID_BROOKTREE, // 0x109e
+ SGI = bindings::PCI_VENDOR_ID_SGI, // 0x10a9
+ WINBOND = bindings::PCI_VENDOR_ID_WINBOND, // 0x10ad
+ PLX = bindings::PCI_VENDOR_ID_PLX, // 0x10b5
+ MADGE = bindings::PCI_VENDOR_ID_MADGE, // 0x10b6
+ THREECOM = bindings::PCI_VENDOR_ID_3COM, // 0x10b7
+ AL = bindings::PCI_VENDOR_ID_AL, // 0x10b9
+ NEOMAGIC = bindings::PCI_VENDOR_ID_NEOMAGIC, // 0x10c8
+ TCONRAD = bindings::PCI_VENDOR_ID_TCONRAD, // 0x10da
+ ROHM = bindings::PCI_VENDOR_ID_ROHM, // 0x10db
+ NVIDIA = bindings::PCI_VENDOR_ID_NVIDIA, // 0x10de
+ IMS = bindings::PCI_VENDOR_ID_IMS, // 0x10e0
+ AMCC = bindings::PCI_VENDOR_ID_AMCC, // 0x10e8
+ AMPERE = bindings::PCI_VENDOR_ID_AMPERE, // 0x1def
+ INTERG = bindings::PCI_VENDOR_ID_INTERG, // 0x10ea
+ REALTEK = bindings::PCI_VENDOR_ID_REALTEK, // 0x10ec
+ XILINX = bindings::PCI_VENDOR_ID_XILINX, // 0x10ee
+ INIT = bindings::PCI_VENDOR_ID_INIT, // 0x1101
+ CREATIVE = bindings::PCI_VENDOR_ID_CREATIVE, // 0x1102
+ TTI = bindings::PCI_VENDOR_ID_TTI, // 0x1103
+ SIGMA = bindings::PCI_VENDOR_ID_SIGMA, // 0x1105
+ VIA = bindings::PCI_VENDOR_ID_VIA, // 0x1106
+ SIEMENS = bindings::PCI_VENDOR_ID_SIEMENS, // 0x110A
+ VORTEX = bindings::PCI_VENDOR_ID_VORTEX, // 0x1119
+ EF = bindings::PCI_VENDOR_ID_EF, // 0x111a
+ IDT = bindings::PCI_VENDOR_ID_IDT, // 0x111d
+ FORE = bindings::PCI_VENDOR_ID_FORE, // 0x1127
+ PHILIPS = bindings::PCI_VENDOR_ID_PHILIPS, // 0x1131
+ EICON = bindings::PCI_VENDOR_ID_EICON, // 0x1133
+ CISCO = bindings::PCI_VENDOR_ID_CISCO, // 0x1137
+ ZIATECH = bindings::PCI_VENDOR_ID_ZIATECH, // 0x1138
+ SYSKONNECT = bindings::PCI_VENDOR_ID_SYSKONNECT, // 0x1148
+ DIGI = bindings::PCI_VENDOR_ID_DIGI, // 0x114f
+ XIRCOM = bindings::PCI_VENDOR_ID_XIRCOM, // 0x115d
+ SERVERWORKS = bindings::PCI_VENDOR_ID_SERVERWORKS, // 0x1166
+ ALTERA = bindings::PCI_VENDOR_ID_ALTERA, // 0x1172
+ SBE = bindings::PCI_VENDOR_ID_SBE, // 0x1176
+ TOSHIBA = bindings::PCI_VENDOR_ID_TOSHIBA, // 0x1179
+ TOSHIBA_2 = bindings::PCI_VENDOR_ID_TOSHIBA_2, // 0x102f
+ ATTO = bindings::PCI_VENDOR_ID_ATTO, // 0x117c
+ RICOH = bindings::PCI_VENDOR_ID_RICOH, // 0x1180
+ DLINK = bindings::PCI_VENDOR_ID_DLINK, // 0x1186
+ ARTOP = bindings::PCI_VENDOR_ID_ARTOP, // 0x1191
+ ZEITNET = bindings::PCI_VENDOR_ID_ZEITNET, // 0x1193
+ FUJITSU_ME = bindings::PCI_VENDOR_ID_FUJITSU_ME, // 0x119e
+ MARVELL = bindings::PCI_VENDOR_ID_MARVELL, // 0x11ab
+ MARVELL_EXT = bindings::PCI_VENDOR_ID_MARVELL_EXT, // 0x1b4b
+ V3 = bindings::PCI_VENDOR_ID_V3, // 0x11b0
+ ATT = bindings::PCI_VENDOR_ID_ATT, // 0x11c1
+ SPECIALIX = bindings::PCI_VENDOR_ID_SPECIALIX, // 0x11cb
+ ANALOG_DEVICES = bindings::PCI_VENDOR_ID_ANALOG_DEVICES, // 0x11d4
+ ZORAN = bindings::PCI_VENDOR_ID_ZORAN, // 0x11de
+ COMPEX = bindings::PCI_VENDOR_ID_COMPEX, // 0x11f6
+ MICROSEMI = bindings::PCI_VENDOR_ID_MICROSEMI, // 0x11f8
+ RP = bindings::PCI_VENDOR_ID_RP, // 0x11fe
+ CYCLADES = bindings::PCI_VENDOR_ID_CYCLADES, // 0x120e
+ ESSENTIAL = bindings::PCI_VENDOR_ID_ESSENTIAL, // 0x120f
+ O2 = bindings::PCI_VENDOR_ID_O2, // 0x1217
+ THREEDX = bindings::PCI_VENDOR_ID_3DFX, // 0x121a
+ AVM = bindings::PCI_VENDOR_ID_AVM, // 0x1244
+ STALLION = bindings::PCI_VENDOR_ID_STALLION, // 0x124d
+ AT = bindings::PCI_VENDOR_ID_AT, // 0x1259
+ ASIX = bindings::PCI_VENDOR_ID_ASIX, // 0x125b
+ ESS = bindings::PCI_VENDOR_ID_ESS, // 0x125d
+ SATSAGEM = bindings::PCI_VENDOR_ID_SATSAGEM, // 0x1267
+ ENSONIQ = bindings::PCI_VENDOR_ID_ENSONIQ, // 0x1274
+ TRANSMETA = bindings::PCI_VENDOR_ID_TRANSMETA, // 0x1279
+ ROCKWELL = bindings::PCI_VENDOR_ID_ROCKWELL, // 0x127A
+ ITE = bindings::PCI_VENDOR_ID_ITE, // 0x1283
+ ALTEON = bindings::PCI_VENDOR_ID_ALTEON, // 0x12ae
+ NVIDIA_SGS = bindings::PCI_VENDOR_ID_NVIDIA_SGS, // 0x12d2
+ PERICOM = bindings::PCI_VENDOR_ID_PERICOM, // 0x12D8
+ AUREAL = bindings::PCI_VENDOR_ID_AUREAL, // 0x12eb
+ ELECTRONICDESIGNGMBH = bindings::PCI_VENDOR_ID_ELECTRONICDESIGNGMBH, // 0x12f8
+ ESDGMBH = bindings::PCI_VENDOR_ID_ESDGMBH, // 0x12fe
+ CB = bindings::PCI_VENDOR_ID_CB, // 0x1307
+ SIIG = bindings::PCI_VENDOR_ID_SIIG, // 0x131f
+ RADISYS = bindings::PCI_VENDOR_ID_RADISYS, // 0x1331
+ MICRO_MEMORY = bindings::PCI_VENDOR_ID_MICRO_MEMORY, // 0x1332
+ DOMEX = bindings::PCI_VENDOR_ID_DOMEX, // 0x134a
+ INTASHIELD = bindings::PCI_VENDOR_ID_INTASHIELD, // 0x135a
+ QUATECH = bindings::PCI_VENDOR_ID_QUATECH, // 0x135C
+ SEALEVEL = bindings::PCI_VENDOR_ID_SEALEVEL, // 0x135e
+ HYPERCOPE = bindings::PCI_VENDOR_ID_HYPERCOPE, // 0x1365
+ DIGIGRAM = bindings::PCI_VENDOR_ID_DIGIGRAM, // 0x1369
+ KAWASAKI = bindings::PCI_VENDOR_ID_KAWASAKI, // 0x136b
+ CNET = bindings::PCI_VENDOR_ID_CNET, // 0x1371
+ LMC = bindings::PCI_VENDOR_ID_LMC, // 0x1376
+ NETGEAR = bindings::PCI_VENDOR_ID_NETGEAR, // 0x1385
+ APPLICOM = bindings::PCI_VENDOR_ID_APPLICOM, // 0x1389
+ MOXA = bindings::PCI_VENDOR_ID_MOXA, // 0x1393
+ CCD = bindings::PCI_VENDOR_ID_CCD, // 0x1397
+ EXAR = bindings::PCI_VENDOR_ID_EXAR, // 0x13a8
+ MICROGATE = bindings::PCI_VENDOR_ID_MICROGATE, // 0x13c0
+ THREEWARE = bindings::PCI_VENDOR_ID_3WARE, // 0x13C1
+ IOMEGA = bindings::PCI_VENDOR_ID_IOMEGA, // 0x13ca
+ ABOCOM = bindings::PCI_VENDOR_ID_ABOCOM, // 0x13D1
+ SUNDANCE = bindings::PCI_VENDOR_ID_SUNDANCE, // 0x13f0
+ CMEDIA = bindings::PCI_VENDOR_ID_CMEDIA, // 0x13f6
+ ADVANTECH = bindings::PCI_VENDOR_ID_ADVANTECH, // 0x13fe
+ MEILHAUS = bindings::PCI_VENDOR_ID_MEILHAUS, // 0x1402
+ LAVA = bindings::PCI_VENDOR_ID_LAVA, // 0x1407
+ TIMEDIA = bindings::PCI_VENDOR_ID_TIMEDIA, // 0x1409
+ ICE = bindings::PCI_VENDOR_ID_ICE, // 0x1412
+ MICROSOFT = bindings::PCI_VENDOR_ID_MICROSOFT, // 0x1414
+ OXSEMI = bindings::PCI_VENDOR_ID_OXSEMI, // 0x1415
+ CHELSIO = bindings::PCI_VENDOR_ID_CHELSIO, // 0x1425
+ EDIMAX = bindings::PCI_VENDOR_ID_EDIMAX, // 0x1432
+ ADLINK = bindings::PCI_VENDOR_ID_ADLINK, // 0x144a
+ SAMSUNG = bindings::PCI_VENDOR_ID_SAMSUNG, // 0x144d
+ GIGABYTE = bindings::PCI_VENDOR_ID_GIGABYTE, // 0x1458
+ AMBIT = bindings::PCI_VENDOR_ID_AMBIT, // 0x1468
+ MYRICOM = bindings::PCI_VENDOR_ID_MYRICOM, // 0x14c1
+ MEDIATEK = bindings::PCI_VENDOR_ID_MEDIATEK, // 0x14c3
+ TITAN = bindings::PCI_VENDOR_ID_TITAN, // 0x14D2
+ PANACOM = bindings::PCI_VENDOR_ID_PANACOM, // 0x14d4
+ SIPACKETS = bindings::PCI_VENDOR_ID_SIPACKETS, // 0x14d9
+ AFAVLAB = bindings::PCI_VENDOR_ID_AFAVLAB, // 0x14db
+ AMPLICON = bindings::PCI_VENDOR_ID_AMPLICON, // 0x14dc
+ BCM_GVC = bindings::PCI_VENDOR_ID_BCM_GVC, // 0x14a4
+ BROADCOM = bindings::PCI_VENDOR_ID_BROADCOM, // 0x14e4
+ TOPIC = bindings::PCI_VENDOR_ID_TOPIC, // 0x151f
+ MAINPINE = bindings::PCI_VENDOR_ID_MAINPINE, // 0x1522
+ ENE = bindings::PCI_VENDOR_ID_ENE, // 0x1524
+ SYBA = bindings::PCI_VENDOR_ID_SYBA, // 0x1592
+ MORETON = bindings::PCI_VENDOR_ID_MORETON, // 0x15aa
+ VMWARE = bindings::PCI_VENDOR_ID_VMWARE, // 0x15ad
+ ZOLTRIX = bindings::PCI_VENDOR_ID_ZOLTRIX, // 0x15b0
+ MELLANOX = bindings::PCI_VENDOR_ID_MELLANOX, // 0x15b3
+ DFI = bindings::PCI_VENDOR_ID_DFI, // 0x15bd
+ QUICKNET = bindings::PCI_VENDOR_ID_QUICKNET, // 0x15e2
+ ADDIDATA = bindings::PCI_VENDOR_ID_ADDIDATA, // 0x15B8
+ PDC = bindings::PCI_VENDOR_ID_PDC, // 0x15e9
+ FARSITE = bindings::PCI_VENDOR_ID_FARSITE, // 0x1619
+ ARIMA = bindings::PCI_VENDOR_ID_ARIMA, // 0x161f
+ BROCADE = bindings::PCI_VENDOR_ID_BROCADE, // 0x1657
+ SIBYTE = bindings::PCI_VENDOR_ID_SIBYTE, // 0x166d
+ ATHEROS = bindings::PCI_VENDOR_ID_ATHEROS, // 0x168c
+ NETCELL = bindings::PCI_VENDOR_ID_NETCELL, // 0x169c
+ CENATEK = bindings::PCI_VENDOR_ID_CENATEK, // 0x16CA
+ SYNOPSYS = bindings::PCI_VENDOR_ID_SYNOPSYS, // 0x16c3
+ USR = bindings::PCI_VENDOR_ID_USR, // 0x16ec
+ VITESSE = bindings::PCI_VENDOR_ID_VITESSE, // 0x1725
+ LINKSYS = bindings::PCI_VENDOR_ID_LINKSYS, // 0x1737
+ ALTIMA = bindings::PCI_VENDOR_ID_ALTIMA, // 0x173b
+ CAVIUM = bindings::PCI_VENDOR_ID_CAVIUM, // 0x177d
+ TECHWELL = bindings::PCI_VENDOR_ID_TECHWELL, // 0x1797
+ BELKIN = bindings::PCI_VENDOR_ID_BELKIN, // 0x1799
+ RDC = bindings::PCI_VENDOR_ID_RDC, // 0x17f3
+ GLI = bindings::PCI_VENDOR_ID_GLI, // 0x17a0
+ LENOVO = bindings::PCI_VENDOR_ID_LENOVO, // 0x17aa
+ QCOM = bindings::PCI_VENDOR_ID_QCOM, // 0x17cb
+ CDNS = bindings::PCI_VENDOR_ID_CDNS, // 0x17cd
+ ARECA = bindings::PCI_VENDOR_ID_ARECA, // 0x17d3
+ S2IO = bindings::PCI_VENDOR_ID_S2IO, // 0x17d5
+ SITECOM = bindings::PCI_VENDOR_ID_SITECOM, // 0x182d
+ TOPSPIN = bindings::PCI_VENDOR_ID_TOPSPIN, // 0x1867
+ COMMTECH = bindings::PCI_VENDOR_ID_COMMTECH, // 0x18f7
+ SILAN = bindings::PCI_VENDOR_ID_SILAN, // 0x1904
+ RENESAS = bindings::PCI_VENDOR_ID_RENESAS, // 0x1912
+ SOLARFLARE = bindings::PCI_VENDOR_ID_SOLARFLARE, // 0x1924
+ TDI = bindings::PCI_VENDOR_ID_TDI, // 0x192E
+ NXP = bindings::PCI_VENDOR_ID_NXP, // 0x1957
+ PASEMI = bindings::PCI_VENDOR_ID_PASEMI, // 0x1959
+ ATTANSIC = bindings::PCI_VENDOR_ID_ATTANSIC, // 0x1969
+ JMICRON = bindings::PCI_VENDOR_ID_JMICRON, // 0x197B
+ KORENIX = bindings::PCI_VENDOR_ID_KORENIX, // 0x1982
+ HUAWEI = bindings::PCI_VENDOR_ID_HUAWEI, // 0x19e5
+ NETRONOME = bindings::PCI_VENDOR_ID_NETRONOME, // 0x19ee
+ QMI = bindings::PCI_VENDOR_ID_QMI, // 0x1a32
+ AZWAVE = bindings::PCI_VENDOR_ID_AZWAVE, // 0x1a3b
+ REDHAT_QUMRANET = bindings::PCI_VENDOR_ID_REDHAT_QUMRANET, // 0x1af4
+ ASMEDIA = bindings::PCI_VENDOR_ID_ASMEDIA, // 0x1b21
+ REDHAT = bindings::PCI_VENDOR_ID_REDHAT, // 0x1b36
+ WCHIC = bindings::PCI_VENDOR_ID_WCHIC, // 0x1c00
+ SILICOM_DENMARK = bindings::PCI_VENDOR_ID_SILICOM_DENMARK, // 0x1c2c
+ AMAZON_ANNAPURNA_LABS = bindings::PCI_VENDOR_ID_AMAZON_ANNAPURNA_LABS, // 0x1c36
+ CIRCUITCO = bindings::PCI_VENDOR_ID_CIRCUITCO, // 0x1cc8
+ AMAZON = bindings::PCI_VENDOR_ID_AMAZON, // 0x1d0f
+ ZHAOXIN = bindings::PCI_VENDOR_ID_ZHAOXIN, // 0x1d17
+ ROCKCHIP = bindings::PCI_VENDOR_ID_ROCKCHIP, // 0x1d87
+ HYGON = bindings::PCI_VENDOR_ID_HYGON, // 0x1d94
+ META = bindings::PCI_VENDOR_ID_META, // 0x1d9b
+ FUNGIBLE = bindings::PCI_VENDOR_ID_FUNGIBLE, // 0x1dad
+ HXT = bindings::PCI_VENDOR_ID_HXT, // 0x1dbf
+ TEKRAM = bindings::PCI_VENDOR_ID_TEKRAM, // 0x1de1
+ RPI = bindings::PCI_VENDOR_ID_RPI, // 0x1de4
+ ALIBABA = bindings::PCI_VENDOR_ID_ALIBABA, // 0x1ded
+ CXL = bindings::PCI_VENDOR_ID_CXL, // 0x1e98
+ TEHUTI = bindings::PCI_VENDOR_ID_TEHUTI, // 0x1fc9
+ SUNIX = bindings::PCI_VENDOR_ID_SUNIX, // 0x1fd4
+ HINT = bindings::PCI_VENDOR_ID_HINT, // 0x3388
+ THREEDLABS = bindings::PCI_VENDOR_ID_3DLABS, // 0x3d3d
+ NETXEN = bindings::PCI_VENDOR_ID_NETXEN, // 0x4040
+ AKS = bindings::PCI_VENDOR_ID_AKS, // 0x416c
+ WCHCN = bindings::PCI_VENDOR_ID_WCHCN, // 0x4348
+ ACCESSIO = bindings::PCI_VENDOR_ID_ACCESSIO, // 0x494f
+ S3 = bindings::PCI_VENDOR_ID_S3, // 0x5333
+ DUNORD = bindings::PCI_VENDOR_ID_DUNORD, // 0x5544
+ DCI = bindings::PCI_VENDOR_ID_DCI, // 0x6666
+ GLENFLY = bindings::PCI_VENDOR_ID_GLENFLY, // 0x6766
+ INTEL = bindings::PCI_VENDOR_ID_INTEL, // 0x8086
+ WANGXUN = bindings::PCI_VENDOR_ID_WANGXUN, // 0x8088
+ SCALEMP = bindings::PCI_VENDOR_ID_SCALEMP, // 0x8686
+ COMPUTONE = bindings::PCI_VENDOR_ID_COMPUTONE, // 0x8e0e
+ KTI = bindings::PCI_VENDOR_ID_KTI, // 0x8e2e
+ ADAPTEC = bindings::PCI_VENDOR_ID_ADAPTEC, // 0x9004
+ ADAPTEC2 = bindings::PCI_VENDOR_ID_ADAPTEC2, // 0x9005
+ HOLTEK = bindings::PCI_VENDOR_ID_HOLTEK, // 0x9412
+ NETMOS = bindings::PCI_VENDOR_ID_NETMOS, // 0x9710
+ THREECOM_2 = bindings::PCI_VENDOR_ID_3COM_2, // 0xa727
+ SOLIDRUN = bindings::PCI_VENDOR_ID_SOLIDRUN, // 0xd063
+ DIGIUM = bindings::PCI_VENDOR_ID_DIGIUM, // 0xd161
+ TIGERJET = bindings::PCI_VENDOR_ID_TIGERJET, // 0xe159
+ XILINX_RME = bindings::PCI_VENDOR_ID_XILINX_RME, // 0xea60
+ XEN = bindings::PCI_VENDOR_ID_XEN, // 0x5853
+ OCZ = bindings::PCI_VENDOR_ID_OCZ, // 0x1b85
+ NCUBE = bindings::PCI_VENDOR_ID_NCUBE, // 0x10ff
+}
diff --git a/rust/kernel/platform.rs b/rust/kernel/platform.rs
index 8f028c76f9fa..7205fe3416d3 100644
--- a/rust/kernel/platform.rs
+++ b/rust/kernel/platform.rs
@@ -10,6 +10,7 @@ use crate::{
driver,
error::{from_result, to_result, Result},
io::{mem::IoRequest, Resource},
+ irq::{self, IrqRequest},
of,
prelude::*,
types::Opaque,
@@ -284,6 +285,181 @@ impl Device<Bound> {
}
}
+macro_rules! define_irq_accessor_by_index {
+ (
+ $(#[$meta:meta])* $fn_name:ident,
+ $request_fn:ident,
+ $reg_type:ident,
+ $handler_trait:ident
+ ) => {
+ $(#[$meta])*
+ pub fn $fn_name<'a, T: irq::$handler_trait + 'static>(
+ &'a self,
+ flags: irq::Flags,
+ index: u32,
+ name: &'static CStr,
+ handler: impl PinInit<T, Error> + 'a,
+ ) -> Result<impl PinInit<irq::$reg_type<T>, Error> + 'a> {
+ let request = self.$request_fn(index)?;
+
+ Ok(irq::$reg_type::<T>::new(
+ request,
+ flags,
+ name,
+ handler,
+ ))
+ }
+ };
+}
+
+macro_rules! define_irq_accessor_by_name {
+ (
+ $(#[$meta:meta])* $fn_name:ident,
+ $request_fn:ident,
+ $reg_type:ident,
+ $handler_trait:ident
+ ) => {
+ $(#[$meta])*
+ pub fn $fn_name<'a, T: irq::$handler_trait + 'static>(
+ &'a self,
+ flags: irq::Flags,
+ irq_name: &CStr,
+ name: &'static CStr,
+ handler: impl PinInit<T, Error> + 'a,
+ ) -> Result<impl PinInit<irq::$reg_type<T>, Error> + 'a> {
+ let request = self.$request_fn(irq_name)?;
+
+ Ok(irq::$reg_type::<T>::new(
+ request,
+ flags,
+ name,
+ handler,
+ ))
+ }
+ };
+}
+
+impl Device<Bound> {
+ /// Returns an [`IrqRequest`] for the IRQ at the given index, if any.
+ pub fn irq_by_index(&self, index: u32) -> Result<IrqRequest<'_>> {
+ // SAFETY: `self.as_raw` returns a valid pointer to a `struct platform_device`.
+ let irq = unsafe { bindings::platform_get_irq(self.as_raw(), index) };
+
+ if irq < 0 {
+ return Err(Error::from_errno(irq));
+ }
+
+ // SAFETY: `irq` is guaranteed to be a valid IRQ number for `&self`.
+ Ok(unsafe { IrqRequest::new(self.as_ref(), irq as u32) })
+ }
+
+ /// Returns an [`IrqRequest`] for the IRQ at the given index, but does not
+ /// print an error if the IRQ cannot be obtained.
+ pub fn optional_irq_by_index(&self, index: u32) -> Result<IrqRequest<'_>> {
+ // SAFETY: `self.as_raw` returns a valid pointer to a `struct platform_device`.
+ let irq = unsafe { bindings::platform_get_irq_optional(self.as_raw(), index) };
+
+ if irq < 0 {
+ return Err(Error::from_errno(irq));
+ }
+
+ // SAFETY: `irq` is guaranteed to be a valid IRQ number for `&self`.
+ Ok(unsafe { IrqRequest::new(self.as_ref(), irq as u32) })
+ }
+
+ /// Returns an [`IrqRequest`] for the IRQ with the given name, if any.
+ pub fn irq_by_name(&self, name: &CStr) -> Result<IrqRequest<'_>> {
+ // SAFETY: `self.as_raw` returns a valid pointer to a `struct platform_device`.
+ let irq = unsafe { bindings::platform_get_irq_byname(self.as_raw(), name.as_char_ptr()) };
+
+ if irq < 0 {
+ return Err(Error::from_errno(irq));
+ }
+
+ // SAFETY: `irq` is guaranteed to be a valid IRQ number for `&self`.
+ Ok(unsafe { IrqRequest::new(self.as_ref(), irq as u32) })
+ }
+
+ /// Returns an [`IrqRequest`] for the IRQ with the given name, but does not
+ /// print an error if the IRQ cannot be obtained.
+ pub fn optional_irq_by_name(&self, name: &CStr) -> Result<IrqRequest<'_>> {
+ // SAFETY: `self.as_raw` returns a valid pointer to a `struct platform_device`.
+ let irq = unsafe {
+ bindings::platform_get_irq_byname_optional(self.as_raw(), name.as_char_ptr())
+ };
+
+ if irq < 0 {
+ return Err(Error::from_errno(irq));
+ }
+
+ // SAFETY: `irq` is guaranteed to be a valid IRQ number for `&self`.
+ Ok(unsafe { IrqRequest::new(self.as_ref(), irq as u32) })
+ }
+
+ define_irq_accessor_by_index!(
+ /// Returns a [`irq::Registration`] for the IRQ at the given index.
+ request_irq_by_index,
+ irq_by_index,
+ Registration,
+ Handler
+ );
+ define_irq_accessor_by_name!(
+ /// Returns a [`irq::Registration`] for the IRQ with the given name.
+ request_irq_by_name,
+ irq_by_name,
+ Registration,
+ Handler
+ );
+ define_irq_accessor_by_index!(
+ /// Does the same as [`Self::request_irq_by_index`], except that it does
+ /// not print an error message if the IRQ cannot be obtained.
+ request_optional_irq_by_index,
+ optional_irq_by_index,
+ Registration,
+ Handler
+ );
+ define_irq_accessor_by_name!(
+ /// Does the same as [`Self::request_irq_by_name`], except that it does
+ /// not print an error message if the IRQ cannot be obtained.
+ request_optional_irq_by_name,
+ optional_irq_by_name,
+ Registration,
+ Handler
+ );
+
+ define_irq_accessor_by_index!(
+ /// Returns a [`irq::ThreadedRegistration`] for the IRQ at the given index.
+ request_threaded_irq_by_index,
+ irq_by_index,
+ ThreadedRegistration,
+ ThreadedHandler
+ );
+ define_irq_accessor_by_name!(
+ /// Returns a [`irq::ThreadedRegistration`] for the IRQ with the given name.
+ request_threaded_irq_by_name,
+ irq_by_name,
+ ThreadedRegistration,
+ ThreadedHandler
+ );
+ define_irq_accessor_by_index!(
+ /// Does the same as [`Self::request_threaded_irq_by_index`], except
+ /// that it does not print an error message if the IRQ cannot be
+ /// obtained.
+ request_optional_threaded_irq_by_index,
+ optional_irq_by_index,
+ ThreadedRegistration,
+ ThreadedHandler
+ );
+ define_irq_accessor_by_name!(
+ /// Does the same as [`Self::request_threaded_irq_by_name`], except that
+ /// it does not print an error message if the IRQ cannot be obtained.
+ request_optional_threaded_irq_by_name,
+ optional_irq_by_name,
+ ThreadedRegistration,
+ ThreadedHandler
+ );
+}
+
// SAFETY: `Device` is a transparent wrapper of a type that doesn't depend on `Device`'s generic
// argument.
kernel::impl_device_context_deref!(unsafe { Device });
@@ -292,7 +468,7 @@ kernel::impl_device_context_into_aref!(Device);
impl crate::dma::Device for Device<device::Core> {}
// SAFETY: Instances of `Device` are always reference-counted.
-unsafe impl crate::types::AlwaysRefCounted for Device {
+unsafe impl crate::sync::aref::AlwaysRefCounted for Device {
fn inc_ref(&self) {
// SAFETY: The existence of a shared reference guarantees that the refcount is non-zero.
unsafe { bindings::get_device(self.as_ref().as_raw()) };
diff --git a/rust/kernel/processor.rs b/rust/kernel/processor.rs
new file mode 100644
index 000000000000..85b49b3614dd
--- /dev/null
+++ b/rust/kernel/processor.rs
@@ -0,0 +1,14 @@
+// SPDX-License-Identifier: GPL-2.0
+
+//! Processor related primitives.
+//!
+//! C header: [`include/linux/processor.h`](srctree/include/linux/processor.h)
+
+/// Lower CPU power consumption or yield to a hyperthreaded twin processor.
+///
+/// It also happens to serve as a compiler barrier.
+#[inline]
+pub fn cpu_relax() {
+ // SAFETY: Always safe to call.
+ unsafe { bindings::cpu_relax() }
+}
diff --git a/rust/kernel/regulator.rs b/rust/kernel/regulator.rs
index 65f3a125348f..b55a201e5029 100644
--- a/rust/kernel/regulator.rs
+++ b/rust/kernel/regulator.rs
@@ -18,7 +18,7 @@
use crate::{
bindings,
- device::Device,
+ device::{Bound, Device},
error::{from_err_ptr, to_result, Result},
prelude::*,
};
@@ -30,7 +30,6 @@ mod private {
impl Sealed for super::Enabled {}
impl Sealed for super::Disabled {}
- impl Sealed for super::Dynamic {}
}
/// A trait representing the different states a [`Regulator`] can be in.
@@ -50,13 +49,6 @@ pub struct Enabled;
/// own an `enable` reference count, but the regulator may still be on.
pub struct Disabled;
-/// A state that models the C API. The [`Regulator`] can be either enabled or
-/// disabled, and the user is in control of the reference count. This is also
-/// the default state.
-///
-/// Use [`Regulator::is_enabled`] to check the regulator's current state.
-pub struct Dynamic;
-
impl RegulatorState for Enabled {
const DISABLE_ON_DROP: bool = true;
}
@@ -65,14 +57,9 @@ impl RegulatorState for Disabled {
const DISABLE_ON_DROP: bool = false;
}
-impl RegulatorState for Dynamic {
- const DISABLE_ON_DROP: bool = false;
-}
-
/// A trait that abstracts the ability to check if a [`Regulator`] is enabled.
pub trait IsEnabled: RegulatorState {}
impl IsEnabled for Disabled {}
-impl IsEnabled for Dynamic {}
/// An error that can occur when trying to convert a [`Regulator`] between states.
pub struct Error<State: RegulatorState> {
@@ -82,6 +69,41 @@ pub struct Error<State: RegulatorState> {
/// The regulator that caused the error, so that the operation may be retried.
pub regulator: Regulator<State>,
}
+/// Obtains and enables a [`devres`]-managed regulator for a device.
+///
+/// This calls [`regulator_disable()`] and [`regulator_put()`] automatically on
+/// driver detach.
+///
+/// This API is identical to `devm_regulator_get_enable()`, and should be
+/// preferred over the [`Regulator<T: RegulatorState>`] API if the caller only
+/// cares about the regulator being enabled.
+///
+/// [`devres`]: https://docs.kernel.org/driver-api/driver-model/devres.html
+/// [`regulator_disable()`]: https://docs.kernel.org/driver-api/regulator.html#c.regulator_disable
+/// [`regulator_put()`]: https://docs.kernel.org/driver-api/regulator.html#c.regulator_put
+pub fn devm_enable(dev: &Device<Bound>, name: &CStr) -> Result {
+ // SAFETY: `dev` is a valid and bound device, while `name` is a valid C
+ // string.
+ to_result(unsafe { bindings::devm_regulator_get_enable(dev.as_raw(), name.as_ptr()) })
+}
+
+/// Same as [`devm_enable`], but calls `devm_regulator_get_enable_optional`
+/// instead.
+///
+/// This obtains and enables a [`devres`]-managed regulator for a device, but
+/// does not print a message nor provides a dummy if the regulator is not found.
+///
+/// This calls [`regulator_disable()`] and [`regulator_put()`] automatically on
+/// driver detach.
+///
+/// [`devres`]: https://docs.kernel.org/driver-api/driver-model/devres.html
+/// [`regulator_disable()`]: https://docs.kernel.org/driver-api/regulator.html#c.regulator_disable
+/// [`regulator_put()`]: https://docs.kernel.org/driver-api/regulator.html#c.regulator_put
+pub fn devm_enable_optional(dev: &Device<Bound>, name: &CStr) -> Result {
+ // SAFETY: `dev` is a valid and bound device, while `name` is a valid C
+ // string.
+ to_result(unsafe { bindings::devm_regulator_get_enable_optional(dev.as_raw(), name.as_ptr()) })
+}
/// A `struct regulator` abstraction.
///
@@ -159,6 +181,29 @@ pub struct Error<State: RegulatorState> {
/// }
/// ```
///
+/// If a driver only cares about the regulator being on for as long it is bound
+/// to a device, then it should use [`devm_enable`] or [`devm_enable_optional`].
+/// This should be the default use-case unless more fine-grained control over
+/// the regulator's state is required.
+///
+/// [`devm_enable`]: crate::regulator::devm_enable
+/// [`devm_optional`]: crate::regulator::devm_enable_optional
+///
+/// ```
+/// # use kernel::prelude::*;
+/// # use kernel::c_str;
+/// # use kernel::device::{Bound, Device};
+/// # use kernel::regulator;
+/// fn enable(dev: &Device<Bound>) -> Result {
+/// // Obtain a reference to a (fictitious) regulator and enable it. This
+/// // call only returns whether the operation succeeded.
+/// regulator::devm_enable(dev, c_str!("vcc"))?;
+///
+/// // The regulator will be disabled and put when `dev` is unbound.
+/// Ok(())
+/// }
+/// ```
+///
/// ## Disabling a regulator
///
/// ```
@@ -183,64 +228,13 @@ pub struct Error<State: RegulatorState> {
/// }
/// ```
///
-/// ## Using [`Regulator<Dynamic>`]
-///
-/// This example mimics the behavior of the C API, where the user is in
-/// control of the enabled reference count. This is useful for drivers that
-/// might call enable and disable to manage the `enable` reference count at
-/// runtime, perhaps as a result of `open()` and `close()` calls or whatever
-/// other driver-specific or subsystem-specific hooks.
-///
-/// ```
-/// # use kernel::prelude::*;
-/// # use kernel::c_str;
-/// # use kernel::device::Device;
-/// # use kernel::regulator::{Regulator, Dynamic};
-/// struct PrivateData {
-/// regulator: Regulator<Dynamic>,
-/// }
-///
-/// // A fictictious probe function that obtains a regulator and sets it up.
-/// fn probe(dev: &Device) -> Result<PrivateData> {
-/// // Obtain a reference to a (fictitious) regulator.
-/// let mut regulator = Regulator::<Dynamic>::get(dev, c_str!("vcc"))?;
-///
-/// Ok(PrivateData { regulator })
-/// }
-///
-/// // A fictictious function that indicates that the device is going to be used.
-/// fn open(dev: &Device, data: &mut PrivateData) -> Result {
-/// // Increase the `enabled` reference count.
-/// data.regulator.enable()?;
-///
-/// Ok(())
-/// }
-///
-/// fn close(dev: &Device, data: &mut PrivateData) -> Result {
-/// // Decrease the `enabled` reference count.
-/// data.regulator.disable()?;
-///
-/// Ok(())
-/// }
-///
-/// fn remove(dev: &Device, data: PrivateData) -> Result {
-/// // `PrivateData` is dropped here, which will drop the
-/// // `Regulator<Dynamic>` in turn.
-/// //
-/// // The reference that was obtained by `regulator_get()` will be
-/// // released, but it is up to the user to make sure that the number of calls
-/// // to `enable()` and `disabled()` are balanced before this point.
-/// Ok(())
-/// }
-/// ```
-///
/// # Invariants
///
/// - `inner` is a non-null wrapper over a pointer to a `struct
/// regulator` obtained from [`regulator_get()`].
///
/// [`regulator_get()`]: https://docs.kernel.org/driver-api/regulator.html#c.regulator_get
-pub struct Regulator<State = Dynamic>
+pub struct Regulator<State>
where
State: RegulatorState,
{
@@ -267,11 +261,8 @@ impl<T: RegulatorState> Regulator<T> {
pub fn get_voltage(&self) -> Result<Voltage> {
// SAFETY: Safe as per the type invariants of `Regulator`.
let voltage = unsafe { bindings::regulator_get_voltage(self.inner.as_ptr()) };
- if voltage < 0 {
- Err(kernel::error::Error::from_errno(voltage))
- } else {
- Ok(Voltage::from_microvolts(voltage))
- }
+
+ to_result(voltage).map(|()| Voltage::from_microvolts(voltage))
}
fn get_internal(dev: &Device, name: &CStr) -> Result<Regulator<T>> {
@@ -289,12 +280,12 @@ impl<T: RegulatorState> Regulator<T> {
})
}
- fn enable_internal(&mut self) -> Result {
+ fn enable_internal(&self) -> Result {
// SAFETY: Safe as per the type invariants of `Regulator`.
to_result(unsafe { bindings::regulator_enable(self.inner.as_ptr()) })
}
- fn disable_internal(&mut self) -> Result {
+ fn disable_internal(&self) -> Result {
// SAFETY: Safe as per the type invariants of `Regulator`.
to_result(unsafe { bindings::regulator_disable(self.inner.as_ptr()) })
}
@@ -310,7 +301,7 @@ impl Regulator<Disabled> {
pub fn try_into_enabled(self) -> Result<Regulator<Enabled>, Error<Disabled>> {
// We will be transferring the ownership of our `regulator_get()` count to
// `Regulator<Enabled>`.
- let mut regulator = ManuallyDrop::new(self);
+ let regulator = ManuallyDrop::new(self);
regulator
.enable_internal()
@@ -339,7 +330,7 @@ impl Regulator<Enabled> {
pub fn try_into_disabled(self) -> Result<Regulator<Disabled>, Error<Enabled>> {
// We will be transferring the ownership of our `regulator_get()` count
// to `Regulator<Disabled>`.
- let mut regulator = ManuallyDrop::new(self);
+ let regulator = ManuallyDrop::new(self);
regulator
.disable_internal()
@@ -354,28 +345,6 @@ impl Regulator<Enabled> {
}
}
-impl Regulator<Dynamic> {
- /// Obtains a [`Regulator`] instance from the system. The current state of
- /// the regulator is unknown and it is up to the user to manage the enabled
- /// reference count.
- ///
- /// This closely mimics the behavior of the C API and can be used to
- /// dynamically manage the enabled reference count at runtime.
- pub fn get(dev: &Device, name: &CStr) -> Result<Self> {
- Regulator::get_internal(dev, name)
- }
-
- /// Increases the `enabled` reference count.
- pub fn enable(&mut self) -> Result {
- self.enable_internal()
- }
-
- /// Decreases the `enabled` reference count.
- pub fn disable(&mut self) -> Result {
- self.disable_internal()
- }
-}
-
impl<T: IsEnabled> Regulator<T> {
/// Checks if the regulator is enabled.
pub fn is_enabled(&self) -> bool {
@@ -398,6 +367,14 @@ impl<T: RegulatorState> Drop for Regulator<T> {
}
}
+// SAFETY: It is safe to send a `Regulator<T>` across threads. In particular, a
+// Regulator<T> can be dropped from any thread.
+unsafe impl<T: RegulatorState> Send for Regulator<T> {}
+
+// SAFETY: It is safe to send a &Regulator<T> across threads because the C side
+// handles its own locking.
+unsafe impl<T: RegulatorState> Sync for Regulator<T> {}
+
/// A voltage.
///
/// This type represents a voltage value in microvolts.