From ce735e73dd59b169b877cedd0753297c81c2a091 Mon Sep 17 00:00:00 2001 From: Danilo Krummrich Date: Mon, 14 Apr 2025 15:18:06 +0200 Subject: rust: auxiliary: add auxiliary device / driver abstractions Implement the basic auxiliary abstractions required to implement a driver matching an auxiliary device. The design and implementation is analogous to PCI and platform and is based on the generic device / driver abstractions. Acked-by: Greg Kroah-Hartman Reviewed-by: Alice Ryhl Link: https://lore.kernel.org/r/20250414131934.28418-4-dakr@kernel.org [ Fix typos, `let _ =` => `drop()`, use `kernel::ffi`. - Danilo ] Signed-off-by: Danilo Krummrich --- rust/helpers/helpers.c | 1 + 1 file changed, 1 insertion(+) (limited to 'rust/helpers/helpers.c') diff --git a/rust/helpers/helpers.c b/rust/helpers/helpers.c index e1c21eba9b15..6b279279cb12 100644 --- a/rust/helpers/helpers.c +++ b/rust/helpers/helpers.c @@ -7,6 +7,7 @@ * Sorted alphabetically. */ +#include "auxiliary.c" #include "blk.c" #include "bug.c" #include "build_assert.c" -- cgit v1.2.3 From c284d3e423382be3591d5b1e402e330e6c3f726c Mon Sep 17 00:00:00 2001 From: Asahi Lina Date: Fri, 11 Apr 2025 01:55:26 +0200 Subject: rust: drm: gem: Add GEM object abstraction DRM GEM is the DRM memory management subsystem used by most modern drivers; add a Rust abstraction for DRM GEM. This includes the BaseObject trait, which contains operations shared by all GEM object classes. Signed-off-by: Asahi Lina Reviewed-by: Alyssa Rosenzweig Reviewed-by: Lyude Paul Link: https://lore.kernel.org/r/20250410235546.43736-8-dakr@kernel.org [ Rework of GEM object abstractions * switch to the Opaque type * fix (mutable) references to struct drm_gem_object (which in this context is UB) * drop all custom reference types in favor of AlwaysRefCounted * bunch of minor changes and simplifications (e.g. IntoGEMObject trait) * write and fix safety and invariant comments * remove necessity for and convert 'as' casts * original source archive: https://archive.is/dD5SL - Danilo ] [ Fix missing CONFIG_DRM guards in rust/helpers/drm.c. - Danilo ] Signed-off-by: Danilo Krummrich --- rust/bindings/bindings_helper.h | 2 + rust/helpers/drm.c | 23 +++ rust/helpers/helpers.c | 1 + rust/kernel/drm/device.rs | 4 +- rust/kernel/drm/driver.rs | 2 +- rust/kernel/drm/gem/mod.rs | 320 ++++++++++++++++++++++++++++++++++++++++ rust/kernel/drm/mod.rs | 1 + 7 files changed, 351 insertions(+), 2 deletions(-) create mode 100644 rust/helpers/drm.c create mode 100644 rust/kernel/drm/gem/mod.rs (limited to 'rust/helpers/helpers.c') diff --git a/rust/bindings/bindings_helper.h b/rust/bindings/bindings_helper.h index 7b7c270db401..e0676ba99d37 100644 --- a/rust/bindings/bindings_helper.h +++ b/rust/bindings/bindings_helper.h @@ -9,6 +9,7 @@ #include #include #include +#include #include #include #include @@ -60,3 +61,4 @@ const gfp_t RUST_CONST_HELPER___GFP_ZERO = __GFP_ZERO; const gfp_t RUST_CONST_HELPER___GFP_HIGHMEM = ___GFP_HIGHMEM; const gfp_t RUST_CONST_HELPER___GFP_NOWARN = ___GFP_NOWARN; const blk_features_t RUST_CONST_HELPER_BLK_FEAT_ROTATIONAL = BLK_FEAT_ROTATIONAL; +const fop_flags_t RUST_CONST_HELPER_FOP_UNSIGNED_OFFSET = FOP_UNSIGNED_OFFSET; diff --git a/rust/helpers/drm.c b/rust/helpers/drm.c new file mode 100644 index 000000000000..450b406c6f27 --- /dev/null +++ b/rust/helpers/drm.c @@ -0,0 +1,23 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include +#include + +#ifdef CONFIG_DRM + +void rust_helper_drm_gem_object_get(struct drm_gem_object *obj) +{ + drm_gem_object_get(obj); +} + +void rust_helper_drm_gem_object_put(struct drm_gem_object *obj) +{ + drm_gem_object_put(obj); +} + +__u64 rust_helper_drm_vma_node_offset_addr(struct drm_vma_offset_node *node) +{ + return drm_vma_node_offset_addr(node); +} + +#endif diff --git a/rust/helpers/helpers.c b/rust/helpers/helpers.c index 6b279279cb12..6c205454a18e 100644 --- a/rust/helpers/helpers.c +++ b/rust/helpers/helpers.c @@ -15,6 +15,7 @@ #include "cpumask.c" #include "cred.c" #include "device.c" +#include "drm.c" #include "err.c" #include "fs.c" #include "io.c" diff --git a/rust/kernel/drm/device.rs b/rust/kernel/drm/device.rs index 520f51bb40af..74c9a3dd719e 100644 --- a/rust/kernel/drm/device.rs +++ b/rust/kernel/drm/device.rs @@ -89,9 +89,11 @@ impl Device { driver_features: drm::driver::FEAT_GEM, ioctls: T::IOCTLS.as_ptr(), num_ioctls: T::IOCTLS.len() as i32, - fops: core::ptr::null_mut() as _, + fops: &Self::GEM_FOPS as _, }; + const GEM_FOPS: bindings::file_operations = drm::gem::create_fops(); + /// Create a new `drm::Device` for a `drm::Driver`. pub fn new(dev: &device::Device, data: impl PinInit) -> Result> { // SAFETY: diff --git a/rust/kernel/drm/driver.rs b/rust/kernel/drm/driver.rs index f0c2936c3374..acb638086131 100644 --- a/rust/kernel/drm/driver.rs +++ b/rust/kernel/drm/driver.rs @@ -88,7 +88,7 @@ pub struct AllocOps { } /// Trait for memory manager implementations. Implemented internally. -pub trait AllocImpl: super::private::Sealed { +pub trait AllocImpl: super::private::Sealed + drm::gem::IntoGEMObject { /// The C callback operations for this memory manager. const ALLOC_OPS: AllocOps; } diff --git a/rust/kernel/drm/gem/mod.rs b/rust/kernel/drm/gem/mod.rs new file mode 100644 index 000000000000..0cafa4a42420 --- /dev/null +++ b/rust/kernel/drm/gem/mod.rs @@ -0,0 +1,320 @@ +// SPDX-License-Identifier: GPL-2.0 OR MIT + +//! DRM GEM API +//! +//! C header: [`include/linux/drm/drm_gem.h`](srctree/include/linux/drm/drm_gem.h) + +use crate::{ + alloc::flags::*, + bindings, drm, + drm::driver::{AllocImpl, AllocOps}, + error::{to_result, Result}, + prelude::*, + types::{ARef, Opaque}, +}; +use core::{mem, ops::Deref, ptr, ptr::NonNull}; + +/// GEM object functions, which must be implemented by drivers. +pub trait BaseDriverObject: Sync + Send + Sized { + /// Create a new driver data object for a GEM object of a given size. + fn new(dev: &drm::Device, size: usize) -> impl PinInit; + + /// Open a new handle to an existing object, associated with a File. + fn open( + _obj: &<::Driver as drm::Driver>::Object, + _file: &drm::File<<::Driver as drm::Driver>::File>, + ) -> Result { + Ok(()) + } + + /// Close a handle to an existing object, associated with a File. + fn close( + _obj: &<::Driver as drm::Driver>::Object, + _file: &drm::File<<::Driver as drm::Driver>::File>, + ) { + } +} + +/// Trait that represents a GEM object subtype +pub trait IntoGEMObject: Sized + super::private::Sealed { + /// Owning driver for this type + type Driver: drm::Driver; + + /// Returns a reference to the raw `drm_gem_object` structure, which must be valid as long as + /// this owning object is valid. + #[allow(clippy::wrong_self_convention)] + fn into_gem_obj(&self) -> &Opaque; + + /// Converts a pointer to a `struct drm_gem_object` into a pointer to `Self`. + fn from_gem_obj(obj: *mut bindings::drm_gem_object) -> *mut Self; +} + +/// Trait which must be implemented by drivers using base GEM objects. +pub trait DriverObject: BaseDriverObject> { + /// Parent `Driver` for this object. + type Driver: drm::Driver; +} + +extern "C" fn open_callback, U: BaseObject>( + raw_obj: *mut bindings::drm_gem_object, + raw_file: *mut bindings::drm_file, +) -> core::ffi::c_int { + // SAFETY: `open_callback` is only ever called with a valid pointer to a `struct drm_file`. + let file = unsafe { + drm::File::<<::Driver as drm::Driver>::File>::as_ref(raw_file) + }; + let obj = + <<::Driver as drm::Driver>::Object as IntoGEMObject>::from_gem_obj( + raw_obj, + ); + + // SAFETY: `from_gem_obj()` returns a valid pointer as long as the type is correct and the + // `raw_obj` we got is valid. + match T::open(unsafe { &*obj }, file) { + Err(e) => e.to_errno(), + Ok(()) => 0, + } +} + +extern "C" fn close_callback, U: BaseObject>( + raw_obj: *mut bindings::drm_gem_object, + raw_file: *mut bindings::drm_file, +) { + // SAFETY: `open_callback` is only ever called with a valid pointer to a `struct drm_file`. + let file = unsafe { + drm::File::<<::Driver as drm::Driver>::File>::as_ref(raw_file) + }; + let obj = + <<::Driver as drm::Driver>::Object as IntoGEMObject>::from_gem_obj( + raw_obj, + ); + + // SAFETY: `from_gem_obj()` returns a valid pointer as long as the type is correct and the + // `raw_obj` we got is valid. + T::close(unsafe { &*obj }, file); +} + +impl IntoGEMObject for Object { + type Driver = T::Driver; + + fn into_gem_obj(&self) -> &Opaque { + &self.obj + } + + fn from_gem_obj(obj: *mut bindings::drm_gem_object) -> *mut Self { + // SAFETY: All of our objects are Object. + unsafe { crate::container_of!(obj, Object, obj).cast_mut() } + } +} + +/// Base operations shared by all GEM object classes +pub trait BaseObject +where + Self: crate::types::AlwaysRefCounted + IntoGEMObject, +{ + /// Returns the size of the object in bytes. + fn size(&self) -> usize { + // SAFETY: `self.into_gem_obj()` is guaranteed to be a pointer to a valid `struct + // drm_gem_object`. + unsafe { (*self.into_gem_obj().get()).size } + } + + /// Creates a new handle for the object associated with a given `File` + /// (or returns an existing one). + fn create_handle( + &self, + file: &drm::File<<::Driver as drm::Driver>::File>, + ) -> Result { + let mut handle: u32 = 0; + // SAFETY: The arguments are all valid per the type invariants. + to_result(unsafe { + bindings::drm_gem_handle_create( + file.as_raw().cast(), + self.into_gem_obj().get(), + &mut handle, + ) + })?; + Ok(handle) + } + + /// Looks up an object by its handle for a given `File`. + fn lookup_handle( + file: &drm::File<<::Driver as drm::Driver>::File>, + handle: u32, + ) -> Result> { + // SAFETY: The arguments are all valid per the type invariants. + let ptr = unsafe { bindings::drm_gem_object_lookup(file.as_raw().cast(), handle) }; + let ptr = ::from_gem_obj(ptr); + let ptr = NonNull::new(ptr).ok_or(ENOENT)?; + + // SAFETY: We take ownership of the reference of `drm_gem_object_lookup()`. + Ok(unsafe { ARef::from_raw(ptr) }) + } + + /// Creates an mmap offset to map the object from userspace. + fn create_mmap_offset(&self) -> Result { + // SAFETY: The arguments are valid per the type invariant. + to_result(unsafe { bindings::drm_gem_create_mmap_offset(self.into_gem_obj().get()) })?; + + // SAFETY: The arguments are valid per the type invariant. + Ok(unsafe { + bindings::drm_vma_node_offset_addr(ptr::addr_of_mut!( + (*self.into_gem_obj().get()).vma_node + )) + }) + } +} + +impl BaseObject for T where Self: crate::types::AlwaysRefCounted + IntoGEMObject {} + +/// A base GEM object. +/// +/// Invariants +/// +/// - `self.obj` is a valid instance of a `struct drm_gem_object`. +/// - `self.dev` is always a valid pointer to a `struct drm_device`. +#[repr(C)] +#[pin_data] +pub struct Object { + obj: Opaque, + dev: *const drm::Device, + #[pin] + data: T, +} + +impl Object { + /// The size of this object's structure. + pub const SIZE: usize = mem::size_of::(); + + const OBJECT_FUNCS: bindings::drm_gem_object_funcs = bindings::drm_gem_object_funcs { + free: Some(Self::free_callback), + open: Some(open_callback::>), + close: Some(close_callback::>), + print_info: None, + export: None, + pin: None, + unpin: None, + get_sg_table: None, + vmap: None, + vunmap: None, + mmap: None, + status: None, + vm_ops: core::ptr::null_mut(), + evict: None, + rss: None, + }; + + /// Create a new GEM object. + pub fn new(dev: &drm::Device, size: usize) -> Result> { + let obj: Pin> = KBox::pin_init( + try_pin_init!(Self { + obj: Opaque::new(bindings::drm_gem_object::default()), + data <- T::new(dev, size), + // INVARIANT: The drm subsystem guarantees that the `struct drm_device` will live + // as long as the GEM object lives. + dev, + }), + GFP_KERNEL, + )?; + + // SAFETY: `obj.as_raw()` is guaranteed to be valid by the initialization above. + unsafe { (*obj.as_raw()).funcs = &Self::OBJECT_FUNCS }; + + // SAFETY: The arguments are all valid per the type invariants. + to_result(unsafe { bindings::drm_gem_object_init(dev.as_raw(), obj.obj.get(), size) })?; + + // SAFETY: We never move out of `Self`. + let ptr = KBox::into_raw(unsafe { Pin::into_inner_unchecked(obj) }); + + // SAFETY: `ptr` comes from `KBox::into_raw` and hence can't be NULL. + let ptr = unsafe { NonNull::new_unchecked(ptr) }; + + // SAFETY: We take over the initial reference count from `drm_gem_object_init()`. + Ok(unsafe { ARef::from_raw(ptr) }) + } + + /// Returns the `Device` that owns this GEM object. + pub fn dev(&self) -> &drm::Device { + // SAFETY: The DRM subsystem guarantees that the `struct drm_device` will live as long as + // the GEM object lives, hence the pointer must be valid. + unsafe { &*self.dev } + } + + fn as_raw(&self) -> *mut bindings::drm_gem_object { + self.obj.get() + } + + extern "C" fn free_callback(obj: *mut bindings::drm_gem_object) { + // SAFETY: All of our objects are of type `Object`. + let this = unsafe { crate::container_of!(obj, Self, obj) }.cast_mut(); + + // SAFETY: The C code only ever calls this callback with a valid pointer to a `struct + // drm_gem_object`. + unsafe { bindings::drm_gem_object_release(obj) }; + + // SAFETY: All of our objects are allocated via `KBox`, and we're in the + // free callback which guarantees this object has zero remaining references, + // so we can drop it. + let _ = unsafe { KBox::from_raw(this) }; + } +} + +// SAFETY: Instances of `Object` are always reference-counted. +unsafe impl crate::types::AlwaysRefCounted for Object { + fn inc_ref(&self) { + // SAFETY: The existence of a shared reference guarantees that the refcount is non-zero. + unsafe { bindings::drm_gem_object_get(self.as_raw()) }; + } + + unsafe fn dec_ref(obj: NonNull) { + // SAFETY: `obj` is a valid pointer to an `Object`. + let obj = unsafe { obj.as_ref() }; + + // SAFETY: The safety requirements guarantee that the refcount is non-zero. + unsafe { bindings::drm_gem_object_put(obj.as_raw()) } + } +} + +impl super::private::Sealed for Object {} + +impl Deref for Object { + type Target = T; + + fn deref(&self) -> &Self::Target { + &self.data + } +} + +impl AllocImpl for Object { + const ALLOC_OPS: AllocOps = AllocOps { + gem_create_object: None, + prime_handle_to_fd: None, + prime_fd_to_handle: None, + gem_prime_import: None, + gem_prime_import_sg_table: None, + dumb_create: None, + dumb_map_offset: None, + }; +} + +pub(super) const fn create_fops() -> bindings::file_operations { + // SAFETY: As by the type invariant, it is safe to initialize `bindings::file_operations` + // zeroed. + let mut fops: bindings::file_operations = unsafe { core::mem::zeroed() }; + + fops.owner = core::ptr::null_mut(); + fops.open = Some(bindings::drm_open); + fops.release = Some(bindings::drm_release); + fops.unlocked_ioctl = Some(bindings::drm_ioctl); + #[cfg(CONFIG_COMPAT)] + { + fops.compat_ioctl = Some(bindings::drm_compat_ioctl); + } + fops.poll = Some(bindings::drm_poll); + fops.read = Some(bindings::drm_read); + fops.llseek = Some(bindings::noop_llseek); + fops.mmap = Some(bindings::drm_gem_mmap); + fops.fop_flags = bindings::FOP_UNSIGNED_OFFSET; + + fops +} diff --git a/rust/kernel/drm/mod.rs b/rust/kernel/drm/mod.rs index b36223e5bd98..1b82b6945edf 100644 --- a/rust/kernel/drm/mod.rs +++ b/rust/kernel/drm/mod.rs @@ -5,6 +5,7 @@ pub mod device; pub mod driver; pub mod file; +pub mod gem; pub mod ioctl; pub use self::device::Device; -- cgit v1.2.3 From 210b81578efbe5c5e7748e50d313e1a90b03df55 Mon Sep 17 00:00:00 2001 From: Tamir Duberstein Date: Wed, 23 Apr 2025 09:54:38 -0400 Subject: rust: xarray: Add an abstraction for XArray MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit `XArray` is an efficient sparse array of pointers. Add a Rust abstraction for this type. This implementation bounds the element type on `ForeignOwnable` and requires explicit locking for all operations. Future work may leverage RCU to enable lockless operation. Inspired-by: Maíra Canal Inspired-by: Asahi Lina Reviewed-by: Andreas Hindborg Reviewed-by: Alice Ryhl Signed-off-by: Tamir Duberstein Link: https://lore.kernel.org/r/20250423-rust-xarray-bindings-v19-2-83cdcf11c114@gmail.com Signed-off-by: Andreas Hindborg --- rust/bindings/bindings_helper.h | 6 + rust/helpers/helpers.c | 1 + rust/helpers/xarray.c | 28 ++++ rust/kernel/lib.rs | 1 + rust/kernel/xarray.rs | 275 ++++++++++++++++++++++++++++++++++++++++ 5 files changed, 311 insertions(+) create mode 100644 rust/helpers/xarray.c create mode 100644 rust/kernel/xarray.rs (limited to 'rust/helpers/helpers.c') diff --git a/rust/bindings/bindings_helper.h b/rust/bindings/bindings_helper.h index ab37e1d35c70..e0bcd130b494 100644 --- a/rust/bindings/bindings_helper.h +++ b/rust/bindings/bindings_helper.h @@ -37,6 +37,7 @@ #include #include #include +#include #include #if defined(CONFIG_DRM_PANIC_SCREEN_QR_CODE) @@ -55,3 +56,8 @@ const gfp_t RUST_CONST_HELPER___GFP_ZERO = __GFP_ZERO; const gfp_t RUST_CONST_HELPER___GFP_HIGHMEM = ___GFP_HIGHMEM; const gfp_t RUST_CONST_HELPER___GFP_NOWARN = ___GFP_NOWARN; const blk_features_t RUST_CONST_HELPER_BLK_FEAT_ROTATIONAL = BLK_FEAT_ROTATIONAL; + +const xa_mark_t RUST_CONST_HELPER_XA_PRESENT = XA_PRESENT; + +const gfp_t RUST_CONST_HELPER_XA_FLAGS_ALLOC = XA_FLAGS_ALLOC; +const gfp_t RUST_CONST_HELPER_XA_FLAGS_ALLOC1 = XA_FLAGS_ALLOC1; diff --git a/rust/helpers/helpers.c b/rust/helpers/helpers.c index 1e7c84df7252..80785b1e7a63 100644 --- a/rust/helpers/helpers.c +++ b/rust/helpers/helpers.c @@ -38,3 +38,4 @@ #include "vmalloc.c" #include "wait.c" #include "workqueue.c" +#include "xarray.c" diff --git a/rust/helpers/xarray.c b/rust/helpers/xarray.c new file mode 100644 index 000000000000..60b299f11451 --- /dev/null +++ b/rust/helpers/xarray.c @@ -0,0 +1,28 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include + +int rust_helper_xa_err(void *entry) +{ + return xa_err(entry); +} + +void rust_helper_xa_init_flags(struct xarray *xa, gfp_t flags) +{ + return xa_init_flags(xa, flags); +} + +int rust_helper_xa_trylock(struct xarray *xa) +{ + return xa_trylock(xa); +} + +void rust_helper_xa_lock(struct xarray *xa) +{ + return xa_lock(xa); +} + +void rust_helper_xa_unlock(struct xarray *xa) +{ + return xa_unlock(xa); +} diff --git a/rust/kernel/lib.rs b/rust/kernel/lib.rs index de07aadd1ff5..715fab6b1345 100644 --- a/rust/kernel/lib.rs +++ b/rust/kernel/lib.rs @@ -88,6 +88,7 @@ pub mod transmute; pub mod types; pub mod uaccess; pub mod workqueue; +pub mod xarray; #[doc(hidden)] pub use bindings; diff --git a/rust/kernel/xarray.rs b/rust/kernel/xarray.rs new file mode 100644 index 000000000000..75719e7bb491 --- /dev/null +++ b/rust/kernel/xarray.rs @@ -0,0 +1,275 @@ +// SPDX-License-Identifier: GPL-2.0 + +//! XArray abstraction. +//! +//! C header: [`include/linux/xarray.h`](srctree/include/linux/xarray.h) + +use crate::{ + alloc, bindings, build_assert, + error::{Error, Result}, + types::{ForeignOwnable, NotThreadSafe, Opaque}, +}; +use core::{iter, marker::PhantomData, mem, pin::Pin, ptr::NonNull}; +use pin_init::{pin_data, pin_init, pinned_drop, PinInit}; + +/// An array which efficiently maps sparse integer indices to owned objects. +/// +/// This is similar to a [`crate::alloc::kvec::Vec>`], but more efficient when there are +/// holes in the index space, and can be efficiently grown. +/// +/// # Invariants +/// +/// `self.xa` is always an initialized and valid [`bindings::xarray`] whose entries are either +/// `XA_ZERO_ENTRY` or came from `T::into_foreign`. +/// +/// # Examples +/// +/// ```rust +/// use kernel::alloc::KBox; +/// use kernel::xarray::{AllocKind, XArray}; +/// +/// let xa = KBox::pin_init(XArray::new(AllocKind::Alloc1), GFP_KERNEL)?; +/// +/// let dead = KBox::new(0xdead, GFP_KERNEL)?; +/// let beef = KBox::new(0xbeef, GFP_KERNEL)?; +/// +/// let mut guard = xa.lock(); +/// +/// assert_eq!(guard.get(0), None); +/// +/// assert_eq!(guard.store(0, dead, GFP_KERNEL)?.as_deref(), None); +/// assert_eq!(guard.get(0).copied(), Some(0xdead)); +/// +/// *guard.get_mut(0).unwrap() = 0xffff; +/// assert_eq!(guard.get(0).copied(), Some(0xffff)); +/// +/// assert_eq!(guard.store(0, beef, GFP_KERNEL)?.as_deref().copied(), Some(0xffff)); +/// assert_eq!(guard.get(0).copied(), Some(0xbeef)); +/// +/// guard.remove(0); +/// assert_eq!(guard.get(0), None); +/// +/// # Ok::<(), Error>(()) +/// ``` +#[pin_data(PinnedDrop)] +pub struct XArray { + #[pin] + xa: Opaque, + _p: PhantomData, +} + +#[pinned_drop] +impl PinnedDrop for XArray { + fn drop(self: Pin<&mut Self>) { + self.iter().for_each(|ptr| { + let ptr = ptr.as_ptr(); + // SAFETY: `ptr` came from `T::into_foreign`. + // + // INVARIANT: we own the only reference to the array which is being dropped so the + // broken invariant is not observable on function exit. + drop(unsafe { T::from_foreign(ptr) }) + }); + + // SAFETY: `self.xa` is always valid by the type invariant. + unsafe { bindings::xa_destroy(self.xa.get()) }; + } +} + +/// Flags passed to [`XArray::new`] to configure the array's allocation tracking behavior. +pub enum AllocKind { + /// Consider the first element to be at index 0. + Alloc, + /// Consider the first element to be at index 1. + Alloc1, +} + +impl XArray { + /// Creates a new initializer for this type. + pub fn new(kind: AllocKind) -> impl PinInit { + let flags = match kind { + AllocKind::Alloc => bindings::XA_FLAGS_ALLOC, + AllocKind::Alloc1 => bindings::XA_FLAGS_ALLOC1, + }; + pin_init!(Self { + // SAFETY: `xa` is valid while the closure is called. + // + // INVARIANT: `xa` is initialized here to an empty, valid [`bindings::xarray`]. + xa <- Opaque::ffi_init(|xa| unsafe { + bindings::xa_init_flags(xa, flags) + }), + _p: PhantomData, + }) + } + + fn iter(&self) -> impl Iterator> + '_ { + let mut index = 0; + + // SAFETY: `self.xa` is always valid by the type invariant. + iter::once(unsafe { + bindings::xa_find(self.xa.get(), &mut index, usize::MAX, bindings::XA_PRESENT) + }) + .chain(iter::from_fn(move || { + // SAFETY: `self.xa` is always valid by the type invariant. + Some(unsafe { + bindings::xa_find_after(self.xa.get(), &mut index, usize::MAX, bindings::XA_PRESENT) + }) + })) + .map_while(|ptr| NonNull::new(ptr.cast())) + } + + /// Attempts to lock the [`XArray`] for exclusive access. + pub fn try_lock(&self) -> Option> { + // SAFETY: `self.xa` is always valid by the type invariant. + if (unsafe { bindings::xa_trylock(self.xa.get()) } != 0) { + Some(Guard { + xa: self, + _not_send: NotThreadSafe, + }) + } else { + None + } + } + + /// Locks the [`XArray`] for exclusive access. + pub fn lock(&self) -> Guard<'_, T> { + // SAFETY: `self.xa` is always valid by the type invariant. + unsafe { bindings::xa_lock(self.xa.get()) }; + + Guard { + xa: self, + _not_send: NotThreadSafe, + } + } +} + +/// A lock guard. +/// +/// The lock is unlocked when the guard goes out of scope. +#[must_use = "the lock unlocks immediately when the guard is unused"] +pub struct Guard<'a, T: ForeignOwnable> { + xa: &'a XArray, + _not_send: NotThreadSafe, +} + +impl Drop for Guard<'_, T> { + fn drop(&mut self) { + // SAFETY: + // - `self.xa.xa` is always valid by the type invariant. + // - The caller holds the lock, so it is safe to unlock it. + unsafe { bindings::xa_unlock(self.xa.xa.get()) }; + } +} + +/// The error returned by [`store`](Guard::store). +/// +/// Contains the underlying error and the value that was not stored. +pub struct StoreError { + /// The error that occurred. + pub error: Error, + /// The value that was not stored. + pub value: T, +} + +impl From> for Error { + fn from(value: StoreError) -> Self { + value.error + } +} + +impl<'a, T: ForeignOwnable> Guard<'a, T> { + fn load(&self, index: usize, f: F) -> Option + where + F: FnOnce(NonNull) -> U, + { + // SAFETY: `self.xa.xa` is always valid by the type invariant. + let ptr = unsafe { bindings::xa_load(self.xa.xa.get(), index) }; + let ptr = NonNull::new(ptr.cast())?; + Some(f(ptr)) + } + + /// Provides a reference to the element at the given index. + pub fn get(&self, index: usize) -> Option> { + self.load(index, |ptr| { + // SAFETY: `ptr` came from `T::into_foreign`. + unsafe { T::borrow(ptr.as_ptr()) } + }) + } + + /// Provides a mutable reference to the element at the given index. + pub fn get_mut(&mut self, index: usize) -> Option> { + self.load(index, |ptr| { + // SAFETY: `ptr` came from `T::into_foreign`. + unsafe { T::borrow_mut(ptr.as_ptr()) } + }) + } + + /// Removes and returns the element at the given index. + pub fn remove(&mut self, index: usize) -> Option { + // SAFETY: + // - `self.xa.xa` is always valid by the type invariant. + // - The caller holds the lock. + let ptr = unsafe { bindings::__xa_erase(self.xa.xa.get(), index) }.cast(); + // SAFETY: + // - `ptr` is either NULL or came from `T::into_foreign`. + // - `&mut self` guarantees that the lifetimes of [`T::Borrowed`] and [`T::BorrowedMut`] + // borrowed from `self` have ended. + unsafe { T::try_from_foreign(ptr) } + } + + /// Stores an element at the given index. + /// + /// May drop the lock if needed to allocate memory, and then reacquire it afterwards. + /// + /// On success, returns the element which was previously at the given index. + /// + /// On failure, returns the element which was attempted to be stored. + pub fn store( + &mut self, + index: usize, + value: T, + gfp: alloc::Flags, + ) -> Result, StoreError> { + build_assert!( + mem::align_of::() >= 4, + "pointers stored in XArray must be 4-byte aligned" + ); + let new = value.into_foreign(); + + let old = { + let new = new.cast(); + // SAFETY: + // - `self.xa.xa` is always valid by the type invariant. + // - The caller holds the lock. + // + // INVARIANT: `new` came from `T::into_foreign`. + unsafe { bindings::__xa_store(self.xa.xa.get(), index, new, gfp.as_raw()) } + }; + + // SAFETY: `__xa_store` returns the old entry at this index on success or `xa_err` if an + // error happened. + let errno = unsafe { bindings::xa_err(old) }; + if errno != 0 { + // SAFETY: `new` came from `T::into_foreign` and `__xa_store` does not take + // ownership of the value on error. + let value = unsafe { T::from_foreign(new) }; + Err(StoreError { + value, + error: Error::from_errno(errno), + }) + } else { + let old = old.cast(); + // SAFETY: `ptr` is either NULL or came from `T::into_foreign`. + // + // NB: `XA_ZERO_ENTRY` is never returned by functions belonging to the Normal XArray + // API; such entries present as `NULL`. + Ok(unsafe { T::try_from_foreign(old) }) + } + } +} + +// SAFETY: `XArray` has no shared mutable state so it is `Send` iff `T` is `Send`. +unsafe impl Send for XArray {} + +// SAFETY: `XArray` serialises the interior mutability it provides so it is `Sync` iff `T` is +// `Send`. +unsafe impl Sync for XArray {} -- cgit v1.2.3 From 5bb9ed6cdfeb75883652fd0ed3e3885083a92b4b Mon Sep 17 00:00:00 2001 From: Alice Ryhl Date: Tue, 8 Apr 2025 09:22:38 +0000 Subject: mm: rust: add abstraction for struct mm_struct MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Patch series "Rust support for mm_struct, vm_area_struct, and mmap", v16. This updates the vm_area_struct support to use the approach we discussed at LPC where there are several different Rust wrappers for vm_area_struct depending on the kind of access you have to the vma. Each case allows a different set of operations on the vma. This includes an MM MAINTAINERS entry as proposed by Lorenzo: https://lore.kernel.org/all/33e64b12-aa07-4e78-933a-b07c37ff1d84@lucifer.local/ This patch (of 9): These abstractions allow you to reference a `struct mm_struct` using both mmgrab and mmget refcounts. This is done using two Rust types: * Mm - represents an mm_struct where you don't know anything about the value of mm_users. * MmWithUser - represents an mm_struct where you know at compile time that mm_users is non-zero. This allows us to encode in the type system whether a method requires that mm_users is non-zero or not. For instance, you can always call `mmget_not_zero` but you can only call `mmap_read_lock` when mm_users is non-zero. The struct is called Mm to keep consistency with the C side. The ability to obtain `current->mm` is added later in this series. The mm module is defined to only exist when CONFIG_MMU is set. This avoids various errors due to missing types and functions when CONFIG_MMU is disabled. More fine-grained cfgs can be considered in the future. See the thread at [1] for more info. Link: https://lkml.kernel.org/r/20250408-vma-v16-9-d8b446e885d9@google.com Link: https://lkml.kernel.org/r/20250408-vma-v16-1-d8b446e885d9@google.com Link: https://lore.kernel.org/all/202503091916.QousmtcY-lkp@intel.com/ Signed-off-by: Alice Ryhl Acked-by: Lorenzo Stoakes Acked-by: Liam R. Howlett Acked-by: Balbir Singh Reviewed-by: Andreas Hindborg Reviewed-by: Gary Guo Cc: Alex Gaynor Cc: Arnd Bergmann Cc: Benno Lossin Cc: Björn Roy Baron Cc: Boqun Feng Cc: Greg Kroah-Hartman Cc: Jann Horn Cc: John Hubbard Cc: Matthew Wilcox (Oracle) Cc: Miguel Ojeda Cc: Suren Baghdasaryan Cc: Trevor Gross Cc: Vlastimil Babka Signed-off-by: Andrew Morton --- rust/helpers/helpers.c | 1 + rust/helpers/mm.c | 39 +++++++++ rust/kernel/lib.rs | 1 + rust/kernel/mm.rs | 210 +++++++++++++++++++++++++++++++++++++++++++++++++ 4 files changed, 251 insertions(+) create mode 100644 rust/helpers/mm.c create mode 100644 rust/kernel/mm.rs (limited to 'rust/helpers/helpers.c') diff --git a/rust/helpers/helpers.c b/rust/helpers/helpers.c index 1e7c84df7252..0aea103c16be 100644 --- a/rust/helpers/helpers.c +++ b/rust/helpers/helpers.c @@ -20,6 +20,7 @@ #include "io.c" #include "jump_label.c" #include "kunit.c" +#include "mm.c" #include "mutex.c" #include "page.c" #include "platform.c" diff --git a/rust/helpers/mm.c b/rust/helpers/mm.c new file mode 100644 index 000000000000..7201747a5d31 --- /dev/null +++ b/rust/helpers/mm.c @@ -0,0 +1,39 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include +#include + +void rust_helper_mmgrab(struct mm_struct *mm) +{ + mmgrab(mm); +} + +void rust_helper_mmdrop(struct mm_struct *mm) +{ + mmdrop(mm); +} + +void rust_helper_mmget(struct mm_struct *mm) +{ + mmget(mm); +} + +bool rust_helper_mmget_not_zero(struct mm_struct *mm) +{ + return mmget_not_zero(mm); +} + +void rust_helper_mmap_read_lock(struct mm_struct *mm) +{ + mmap_read_lock(mm); +} + +bool rust_helper_mmap_read_trylock(struct mm_struct *mm) +{ + return mmap_read_trylock(mm); +} + +void rust_helper_mmap_read_unlock(struct mm_struct *mm) +{ + mmap_read_unlock(mm); +} diff --git a/rust/kernel/lib.rs b/rust/kernel/lib.rs index de07aadd1ff5..42ab6cf4053f 100644 --- a/rust/kernel/lib.rs +++ b/rust/kernel/lib.rs @@ -61,6 +61,7 @@ pub mod jump_label; pub mod kunit; pub mod list; pub mod miscdevice; +pub mod mm; #[cfg(CONFIG_NET)] pub mod net; pub mod of; diff --git a/rust/kernel/mm.rs b/rust/kernel/mm.rs new file mode 100644 index 000000000000..eda7a479cff7 --- /dev/null +++ b/rust/kernel/mm.rs @@ -0,0 +1,210 @@ +// SPDX-License-Identifier: GPL-2.0 + +// Copyright (C) 2024 Google LLC. + +//! Memory management. +//! +//! This module deals with managing the address space of userspace processes. Each process has an +//! instance of [`Mm`], which keeps track of multiple VMAs (virtual memory areas). Each VMA +//! corresponds to a region of memory that the userspace process can access, and the VMA lets you +//! control what happens when userspace reads or writes to that region of memory. +//! +//! C header: [`include/linux/mm.h`](srctree/include/linux/mm.h) +#![cfg(CONFIG_MMU)] + +use crate::{ + bindings, + types::{ARef, AlwaysRefCounted, NotThreadSafe, Opaque}, +}; +use core::{ops::Deref, ptr::NonNull}; + +/// A wrapper for the kernel's `struct mm_struct`. +/// +/// This represents the address space of a userspace process, so each process has one `Mm` +/// instance. It may hold many VMAs internally. +/// +/// There is a counter called `mm_users` that counts the users of the address space; this includes +/// the userspace process itself, but can also include kernel threads accessing the address space. +/// Once `mm_users` reaches zero, this indicates that the address space can be destroyed. To access +/// the address space, you must prevent `mm_users` from reaching zero while you are accessing it. +/// The [`MmWithUser`] type represents an address space where this is guaranteed, and you can +/// create one using [`mmget_not_zero`]. +/// +/// The `ARef` smart pointer holds an `mmgrab` refcount. Its destructor may sleep. +/// +/// # Invariants +/// +/// Values of this type are always refcounted using `mmgrab`. +/// +/// [`mmget_not_zero`]: Mm::mmget_not_zero +#[repr(transparent)] +pub struct Mm { + mm: Opaque, +} + +// SAFETY: It is safe to call `mmdrop` on another thread than where `mmgrab` was called. +unsafe impl Send for Mm {} +// SAFETY: All methods on `Mm` can be called in parallel from several threads. +unsafe impl Sync for Mm {} + +// SAFETY: By the type invariants, this type is always refcounted. +unsafe impl AlwaysRefCounted for Mm { + #[inline] + fn inc_ref(&self) { + // SAFETY: The pointer is valid since self is a reference. + unsafe { bindings::mmgrab(self.as_raw()) }; + } + + #[inline] + unsafe fn dec_ref(obj: NonNull) { + // SAFETY: The caller is giving up their refcount. + unsafe { bindings::mmdrop(obj.cast().as_ptr()) }; + } +} + +/// A wrapper for the kernel's `struct mm_struct`. +/// +/// This type is like [`Mm`], but with non-zero `mm_users`. It can only be used when `mm_users` can +/// be proven to be non-zero at compile-time, usually because the relevant code holds an `mmget` +/// refcount. It can be used to access the associated address space. +/// +/// The `ARef` smart pointer holds an `mmget` refcount. Its destructor may sleep. +/// +/// # Invariants +/// +/// Values of this type are always refcounted using `mmget`. The value of `mm_users` is non-zero. +#[repr(transparent)] +pub struct MmWithUser { + mm: Mm, +} + +// SAFETY: It is safe to call `mmput` on another thread than where `mmget` was called. +unsafe impl Send for MmWithUser {} +// SAFETY: All methods on `MmWithUser` can be called in parallel from several threads. +unsafe impl Sync for MmWithUser {} + +// SAFETY: By the type invariants, this type is always refcounted. +unsafe impl AlwaysRefCounted for MmWithUser { + #[inline] + fn inc_ref(&self) { + // SAFETY: The pointer is valid since self is a reference. + unsafe { bindings::mmget(self.as_raw()) }; + } + + #[inline] + unsafe fn dec_ref(obj: NonNull) { + // SAFETY: The caller is giving up their refcount. + unsafe { bindings::mmput(obj.cast().as_ptr()) }; + } +} + +// Make all `Mm` methods available on `MmWithUser`. +impl Deref for MmWithUser { + type Target = Mm; + + #[inline] + fn deref(&self) -> &Mm { + &self.mm + } +} + +// These methods are safe to call even if `mm_users` is zero. +impl Mm { + /// Returns a raw pointer to the inner `mm_struct`. + #[inline] + pub fn as_raw(&self) -> *mut bindings::mm_struct { + self.mm.get() + } + + /// Obtain a reference from a raw pointer. + /// + /// # Safety + /// + /// The caller must ensure that `ptr` points at an `mm_struct`, and that it is not deallocated + /// during the lifetime 'a. + #[inline] + pub unsafe fn from_raw<'a>(ptr: *const bindings::mm_struct) -> &'a Mm { + // SAFETY: Caller promises that the pointer is valid for 'a. Layouts are compatible due to + // repr(transparent). + unsafe { &*ptr.cast() } + } + + /// Calls `mmget_not_zero` and returns a handle if it succeeds. + #[inline] + pub fn mmget_not_zero(&self) -> Option> { + // SAFETY: The pointer is valid since self is a reference. + let success = unsafe { bindings::mmget_not_zero(self.as_raw()) }; + + if success { + // SAFETY: We just created an `mmget` refcount. + Some(unsafe { ARef::from_raw(NonNull::new_unchecked(self.as_raw().cast())) }) + } else { + None + } + } +} + +// These methods require `mm_users` to be non-zero. +impl MmWithUser { + /// Obtain a reference from a raw pointer. + /// + /// # Safety + /// + /// The caller must ensure that `ptr` points at an `mm_struct`, and that `mm_users` remains + /// non-zero for the duration of the lifetime 'a. + #[inline] + pub unsafe fn from_raw<'a>(ptr: *const bindings::mm_struct) -> &'a MmWithUser { + // SAFETY: Caller promises that the pointer is valid for 'a. The layout is compatible due + // to repr(transparent). + unsafe { &*ptr.cast() } + } + + /// Lock the mmap read lock. + #[inline] + pub fn mmap_read_lock(&self) -> MmapReadGuard<'_> { + // SAFETY: The pointer is valid since self is a reference. + unsafe { bindings::mmap_read_lock(self.as_raw()) }; + + // INVARIANT: We just acquired the read lock. + MmapReadGuard { + mm: self, + _nts: NotThreadSafe, + } + } + + /// Try to lock the mmap read lock. + #[inline] + pub fn mmap_read_trylock(&self) -> Option> { + // SAFETY: The pointer is valid since self is a reference. + let success = unsafe { bindings::mmap_read_trylock(self.as_raw()) }; + + if success { + // INVARIANT: We just acquired the read lock. + Some(MmapReadGuard { + mm: self, + _nts: NotThreadSafe, + }) + } else { + None + } + } +} + +/// A guard for the mmap read lock. +/// +/// # Invariants +/// +/// This `MmapReadGuard` guard owns the mmap read lock. +pub struct MmapReadGuard<'a> { + mm: &'a MmWithUser, + // `mmap_read_lock` and `mmap_read_unlock` must be called on the same thread + _nts: NotThreadSafe, +} + +impl Drop for MmapReadGuard<'_> { + #[inline] + fn drop(&mut self) { + // SAFETY: We hold the read lock by the type invariants. + unsafe { bindings::mmap_read_unlock(self.mm.as_raw()) }; + } +} -- cgit v1.2.3 From b7b7b981cb037d250ad06c3a6558d69e04ed77c7 Mon Sep 17 00:00:00 2001 From: Viresh Kumar Date: Thu, 20 Feb 2025 16:34:00 +0530 Subject: rust: clk: Add helpers for Rust code Non-trivial C macros and inlined C functions cannot be used directly in the Rust code and are used via functions ("helpers") that wrap those so that they can be called from Rust. In order to prepare for adding Rust abstractions for the clock APIs, add clock helpers required by the Rust implementation. Reviewed-by: Daniel Almeida Signed-off-by: Viresh Kumar --- MAINTAINERS | 1 + rust/bindings/bindings_helper.h | 1 + rust/helpers/clk.c | 66 +++++++++++++++++++++++++++++++++++++++++ rust/helpers/helpers.c | 1 + 4 files changed, 69 insertions(+) create mode 100644 rust/helpers/clk.c (limited to 'rust/helpers/helpers.c') diff --git a/MAINTAINERS b/MAINTAINERS index bd7c54af4fd4..608689342aaf 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -5883,6 +5883,7 @@ F: include/dt-bindings/clock/ F: include/linux/clk-pr* F: include/linux/clk/ F: include/linux/of_clk.h +F: rust/helpers/clk.c X: drivers/clk/clkdev.c COMMON INTERNET FILE SYSTEM CLIENT (CIFS and SMB3) diff --git a/rust/bindings/bindings_helper.h b/rust/bindings/bindings_helper.h index ab37e1d35c70..f53d6e1a21f2 100644 --- a/rust/bindings/bindings_helper.h +++ b/rust/bindings/bindings_helper.h @@ -10,6 +10,7 @@ #include #include #include +#include #include #include #include diff --git a/rust/helpers/clk.c b/rust/helpers/clk.c new file mode 100644 index 000000000000..6d04372c9f3b --- /dev/null +++ b/rust/helpers/clk.c @@ -0,0 +1,66 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include + +/* + * The "inline" implementation of below helpers are only available when + * CONFIG_HAVE_CLK or CONFIG_HAVE_CLK_PREPARE aren't set. + */ +#ifndef CONFIG_HAVE_CLK +struct clk *rust_helper_clk_get(struct device *dev, const char *id) +{ + return clk_get(dev, id); +} + +void rust_helper_clk_put(struct clk *clk) +{ + clk_put(clk); +} + +int rust_helper_clk_enable(struct clk *clk) +{ + return clk_enable(clk); +} + +void rust_helper_clk_disable(struct clk *clk) +{ + clk_disable(clk); +} + +unsigned long rust_helper_clk_get_rate(struct clk *clk) +{ + return clk_get_rate(clk); +} + +int rust_helper_clk_set_rate(struct clk *clk, unsigned long rate) +{ + return clk_set_rate(clk, rate); +} +#endif + +#ifndef CONFIG_HAVE_CLK_PREPARE +int rust_helper_clk_prepare(struct clk *clk) +{ + return clk_prepare(clk); +} + +void rust_helper_clk_unprepare(struct clk *clk) +{ + clk_unprepare(clk); +} +#endif + +struct clk *rust_helper_clk_get_optional(struct device *dev, const char *id) +{ + return clk_get_optional(dev, id); +} + +int rust_helper_clk_prepare_enable(struct clk *clk) +{ + return clk_prepare_enable(clk); +} + +void rust_helper_clk_disable_unprepare(struct clk *clk) +{ + clk_disable_unprepare(clk); +} diff --git a/rust/helpers/helpers.c b/rust/helpers/helpers.c index e1c21eba9b15..ae595c9cd91b 100644 --- a/rust/helpers/helpers.c +++ b/rust/helpers/helpers.c @@ -11,6 +11,7 @@ #include "bug.c" #include "build_assert.c" #include "build_bug.c" +#include "clk.c" #include "cpumask.c" #include "cred.c" #include "device.c" -- cgit v1.2.3 From 2207856ff0bc8d953d6e89bda70b8978c2de8bab Mon Sep 17 00:00:00 2001 From: Viresh Kumar Date: Tue, 7 Jan 2025 12:51:08 +0530 Subject: rust: cpufreq: Add initial abstractions for cpufreq framework Introduce initial Rust abstractions for the cpufreq core. This includes basic representations for cpufreq flags, relation types, and the cpufreq table. Signed-off-by: Viresh Kumar --- MAINTAINERS | 1 + rust/bindings/bindings_helper.h | 1 + rust/helpers/cpufreq.c | 10 ++ rust/helpers/helpers.c | 1 + rust/kernel/cpufreq.rs | 365 ++++++++++++++++++++++++++++++++++++++++ rust/kernel/lib.rs | 2 + 6 files changed, 380 insertions(+) create mode 100644 rust/helpers/cpufreq.c create mode 100644 rust/kernel/cpufreq.rs (limited to 'rust/helpers/helpers.c') diff --git a/MAINTAINERS b/MAINTAINERS index 931e418f89ed..aa56eacbda71 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -6142,6 +6142,7 @@ F: drivers/cpufreq/ F: include/linux/cpufreq.h F: include/linux/sched/cpufreq.h F: kernel/sched/cpufreq*.c +F: rust/kernel/cpufreq.rs F: tools/testing/selftests/cpufreq/ CPU HOTPLUG diff --git a/rust/bindings/bindings_helper.h b/rust/bindings/bindings_helper.h index 529f22891e0b..7c1d78f68076 100644 --- a/rust/bindings/bindings_helper.h +++ b/rust/bindings/bindings_helper.h @@ -12,6 +12,7 @@ #include #include #include +#include #include #include #include diff --git a/rust/helpers/cpufreq.c b/rust/helpers/cpufreq.c new file mode 100644 index 000000000000..7c1343c4d65e --- /dev/null +++ b/rust/helpers/cpufreq.c @@ -0,0 +1,10 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include + +#ifdef CONFIG_CPU_FREQ +void rust_helper_cpufreq_register_em_with_opp(struct cpufreq_policy *policy) +{ + cpufreq_register_em_with_opp(policy); +} +#endif diff --git a/rust/helpers/helpers.c b/rust/helpers/helpers.c index ae595c9cd91b..df1fcfb3adf3 100644 --- a/rust/helpers/helpers.c +++ b/rust/helpers/helpers.c @@ -12,6 +12,7 @@ #include "build_assert.c" #include "build_bug.c" #include "clk.c" +#include "cpufreq.c" #include "cpumask.c" #include "cred.c" #include "device.c" diff --git a/rust/kernel/cpufreq.rs b/rust/kernel/cpufreq.rs new file mode 100644 index 000000000000..94fa63bda75a --- /dev/null +++ b/rust/kernel/cpufreq.rs @@ -0,0 +1,365 @@ +// SPDX-License-Identifier: GPL-2.0 + +//! CPU frequency scaling. +//! +//! This module provides rust abstractions for interacting with the cpufreq subsystem. +//! +//! C header: [`include/linux/cpufreq.h`](srctree/include/linux/cpufreq.h) +//! +//! Reference: + +use crate::{ + clk::Hertz, + error::{code::*, to_result, Result}, + ffi::c_ulong, + prelude::*, + types::Opaque, +}; + +use core::{ops::Deref, pin::Pin}; + +/// Default transition latency value in nanoseconds. +pub const ETERNAL_LATENCY_NS: u32 = bindings::CPUFREQ_ETERNAL as u32; + +/// CPU frequency driver flags. +pub mod flags { + /// Driver needs to update internal limits even if frequency remains unchanged. + pub const NEED_UPDATE_LIMITS: u16 = 1 << 0; + + /// Platform where constants like `loops_per_jiffy` are unaffected by frequency changes. + pub const CONST_LOOPS: u16 = 1 << 1; + + /// Register driver as a thermal cooling device automatically. + pub const IS_COOLING_DEV: u16 = 1 << 2; + + /// Supports multiple clock domains with per-policy governors in `cpu/cpuN/cpufreq/`. + pub const HAVE_GOVERNOR_PER_POLICY: u16 = 1 << 3; + + /// Allows post-change notifications outside of the `target()` routine. + pub const ASYNC_NOTIFICATION: u16 = 1 << 4; + + /// Ensure CPU starts at a valid frequency from the driver's freq-table. + pub const NEED_INITIAL_FREQ_CHECK: u16 = 1 << 5; + + /// Disallow governors with `dynamic_switching` capability. + pub const NO_AUTO_DYNAMIC_SWITCHING: u16 = 1 << 6; +} + +/// Relations from the C code. +const CPUFREQ_RELATION_L: u32 = 0; +const CPUFREQ_RELATION_H: u32 = 1; +const CPUFREQ_RELATION_C: u32 = 2; + +/// Can be used with any of the above values. +const CPUFREQ_RELATION_E: u32 = 1 << 2; + +/// CPU frequency selection relations. +/// +/// CPU frequency selection relations, each optionally marked as "efficient". +#[derive(Copy, Clone, Debug, Eq, PartialEq)] +pub enum Relation { + /// Select the lowest frequency at or above target. + Low(bool), + /// Select the highest frequency below or at target. + High(bool), + /// Select the closest frequency to the target. + Close(bool), +} + +impl Relation { + // Construct from a C-compatible `u32` value. + fn new(val: u32) -> Result { + let efficient = val & CPUFREQ_RELATION_E != 0; + + Ok(match val & !CPUFREQ_RELATION_E { + CPUFREQ_RELATION_L => Self::Low(efficient), + CPUFREQ_RELATION_H => Self::High(efficient), + CPUFREQ_RELATION_C => Self::Close(efficient), + _ => return Err(EINVAL), + }) + } +} + +impl From for u32 { + // Convert to a C-compatible `u32` value. + fn from(rel: Relation) -> Self { + let (mut val, efficient) = match rel { + Relation::Low(e) => (CPUFREQ_RELATION_L, e), + Relation::High(e) => (CPUFREQ_RELATION_H, e), + Relation::Close(e) => (CPUFREQ_RELATION_C, e), + }; + + if efficient { + val |= CPUFREQ_RELATION_E; + } + + val + } +} + +/// Policy data. +/// +/// Rust abstraction for the C `struct cpufreq_policy_data`. +/// +/// # Invariants +/// +/// A [`PolicyData`] instance always corresponds to a valid C `struct cpufreq_policy_data`. +/// +/// The callers must ensure that the `struct cpufreq_policy_data` is valid for access and remains +/// valid for the lifetime of the returned reference. +#[repr(transparent)] +pub struct PolicyData(Opaque); + +impl PolicyData { + /// Creates a mutable reference to an existing `struct cpufreq_policy_data` pointer. + /// + /// # Safety + /// + /// The caller must ensure that `ptr` is valid for writing and remains valid for the lifetime + /// of the returned reference. + #[inline] + pub unsafe fn from_raw_mut<'a>(ptr: *mut bindings::cpufreq_policy_data) -> &'a mut Self { + // SAFETY: Guaranteed by the safety requirements of the function. + // + // INVARIANT: The caller ensures that `ptr` is valid for writing and remains valid for the + // lifetime of the returned reference. + unsafe { &mut *ptr.cast() } + } + + /// Returns a raw pointer to the underlying C `cpufreq_policy_data`. + #[inline] + pub fn as_raw(&self) -> *mut bindings::cpufreq_policy_data { + let this: *const Self = self; + this.cast_mut().cast() + } + + /// Wrapper for `cpufreq_generic_frequency_table_verify`. + #[inline] + pub fn generic_verify(&self) -> Result { + // SAFETY: By the type invariant, the pointer stored in `self` is valid. + to_result(unsafe { bindings::cpufreq_generic_frequency_table_verify(self.as_raw()) }) + } +} + +/// The frequency table index. +/// +/// Represents index with a frequency table. +/// +/// # Invariants +/// +/// The index must correspond to a valid entry in the [`Table`] it is used for. +#[derive(Copy, Clone, PartialEq, Eq, Debug)] +pub struct TableIndex(usize); + +impl TableIndex { + /// Creates an instance of [`TableIndex`]. + /// + /// # Safety + /// + /// The caller must ensure that `index` correspond to a valid entry in the [`Table`] it is used + /// for. + pub unsafe fn new(index: usize) -> Self { + // INVARIANT: The caller ensures that `index` correspond to a valid entry in the [`Table`]. + Self(index) + } +} + +impl From for usize { + #[inline] + fn from(index: TableIndex) -> Self { + index.0 + } +} + +/// CPU frequency table. +/// +/// Rust abstraction for the C `struct cpufreq_frequency_table`. +/// +/// # Invariants +/// +/// A [`Table`] instance always corresponds to a valid C `struct cpufreq_frequency_table`. +/// +/// The callers must ensure that the `struct cpufreq_frequency_table` is valid for access and +/// remains valid for the lifetime of the returned reference. +/// +/// ## Examples +/// +/// The following example demonstrates how to read a frequency value from [`Table`]. +/// +/// ``` +/// use kernel::cpufreq::{Policy, TableIndex}; +/// +/// fn show_freq(policy: &Policy) -> Result { +/// let table = policy.freq_table()?; +/// +/// // SAFETY: Index is a valid entry in the table. +/// let index = unsafe { TableIndex::new(0) }; +/// +/// pr_info!("The frequency at index 0 is: {:?}\n", table.freq(index)?); +/// pr_info!("The flags at index 0 is: {}\n", table.flags(index)); +/// pr_info!("The data at index 0 is: {}\n", table.data(index)); +/// Ok(()) +/// } +/// ``` +#[repr(transparent)] +pub struct Table(Opaque); + +impl Table { + /// Creates a reference to an existing C `struct cpufreq_frequency_table` pointer. + /// + /// # Safety + /// + /// The caller must ensure that `ptr` is valid for reading and remains valid for the lifetime + /// of the returned reference. + #[inline] + pub unsafe fn from_raw<'a>(ptr: *const bindings::cpufreq_frequency_table) -> &'a Self { + // SAFETY: Guaranteed by the safety requirements of the function. + // + // INVARIANT: The caller ensures that `ptr` is valid for reading and remains valid for the + // lifetime of the returned reference. + unsafe { &*ptr.cast() } + } + + /// Returns the raw mutable pointer to the C `struct cpufreq_frequency_table`. + #[inline] + pub fn as_raw(&self) -> *mut bindings::cpufreq_frequency_table { + let this: *const Self = self; + this.cast_mut().cast() + } + + /// Returns frequency at `index` in the [`Table`]. + #[inline] + pub fn freq(&self, index: TableIndex) -> Result { + // SAFETY: By the type invariant, the pointer stored in `self` is valid and `index` is + // guaranteed to be valid by its safety requirements. + Ok(Hertz::from_khz(unsafe { + (*self.as_raw().add(index.into())).frequency.try_into()? + })) + } + + /// Returns flags at `index` in the [`Table`]. + #[inline] + pub fn flags(&self, index: TableIndex) -> u32 { + // SAFETY: By the type invariant, the pointer stored in `self` is valid and `index` is + // guaranteed to be valid by its safety requirements. + unsafe { (*self.as_raw().add(index.into())).flags } + } + + /// Returns data at `index` in the [`Table`]. + #[inline] + pub fn data(&self, index: TableIndex) -> u32 { + // SAFETY: By the type invariant, the pointer stored in `self` is valid and `index` is + // guaranteed to be valid by its safety requirements. + unsafe { (*self.as_raw().add(index.into())).driver_data } + } +} + +/// CPU frequency table owned and pinned in memory, created from a [`TableBuilder`]. +pub struct TableBox { + entries: Pin>, +} + +impl TableBox { + /// Constructs a new [`TableBox`] from a [`KVec`] of entries. + /// + /// # Errors + /// + /// Returns `EINVAL` if the entries list is empty. + #[inline] + fn new(entries: KVec) -> Result { + if entries.is_empty() { + return Err(EINVAL); + } + + Ok(Self { + // Pin the entries to memory, since we are passing its pointer to the C code. + entries: Pin::new(entries), + }) + } + + /// Returns a raw pointer to the underlying C `cpufreq_frequency_table`. + #[inline] + fn as_raw(&self) -> *const bindings::cpufreq_frequency_table { + // The pointer is valid until the table gets dropped. + self.entries.as_ptr() + } +} + +impl Deref for TableBox { + type Target = Table; + + fn deref(&self) -> &Self::Target { + // SAFETY: The caller owns TableBox, it is safe to deref. + unsafe { Self::Target::from_raw(self.as_raw()) } + } +} + +/// CPU frequency table builder. +/// +/// This is used by the CPU frequency drivers to build a frequency table dynamically. +/// +/// ## Examples +/// +/// The following example demonstrates how to create a CPU frequency table. +/// +/// ``` +/// use kernel::cpufreq::{TableBuilder, TableIndex}; +/// use kernel::clk::Hertz; +/// +/// let mut builder = TableBuilder::new(); +/// +/// // Adds few entries to the table. +/// builder.add(Hertz::from_mhz(700), 0, 1).unwrap(); +/// builder.add(Hertz::from_mhz(800), 2, 3).unwrap(); +/// builder.add(Hertz::from_mhz(900), 4, 5).unwrap(); +/// builder.add(Hertz::from_ghz(1), 6, 7).unwrap(); +/// +/// let table = builder.to_table().unwrap(); +/// +/// // SAFETY: Index values correspond to valid entries in the table. +/// let (index0, index2) = unsafe { (TableIndex::new(0), TableIndex::new(2)) }; +/// +/// assert_eq!(table.freq(index0), Ok(Hertz::from_mhz(700))); +/// assert_eq!(table.flags(index0), 0); +/// assert_eq!(table.data(index0), 1); +/// +/// assert_eq!(table.freq(index2), Ok(Hertz::from_mhz(900))); +/// assert_eq!(table.flags(index2), 4); +/// assert_eq!(table.data(index2), 5); +/// ``` +#[derive(Default)] +#[repr(transparent)] +pub struct TableBuilder { + entries: KVec, +} + +impl TableBuilder { + /// Creates a new instance of [`TableBuilder`]. + #[inline] + pub fn new() -> Self { + Self { + entries: KVec::new(), + } + } + + /// Adds a new entry to the table. + pub fn add(&mut self, freq: Hertz, flags: u32, driver_data: u32) -> Result { + // Adds the new entry at the end of the vector. + Ok(self.entries.push( + bindings::cpufreq_frequency_table { + flags, + driver_data, + frequency: freq.as_khz() as u32, + }, + GFP_KERNEL, + )?) + } + + /// Consumes the [`TableBuilder`] and returns [`TableBox`]. + pub fn to_table(mut self) -> Result { + // Add last entry to the table. + self.add(Hertz(c_ulong::MAX), 0, 0)?; + + TableBox::new(self.entries) + } +} diff --git a/rust/kernel/lib.rs b/rust/kernel/lib.rs index ea589254b4ac..133ebee4f9d3 100644 --- a/rust/kernel/lib.rs +++ b/rust/kernel/lib.rs @@ -44,6 +44,8 @@ pub mod block; pub mod build_assert; pub mod clk; pub mod cpu; +#[cfg(CONFIG_CPU_FREQ)] +pub mod cpufreq; pub mod cpumask; pub mod cred; pub mod device; -- cgit v1.2.3