diff options
Diffstat (limited to 'rust/kernel/alloc')
-rw-r--r-- | rust/kernel/alloc/allocator.rs | 157 | ||||
-rw-r--r-- | rust/kernel/alloc/allocator/iter.rs | 102 | ||||
-rw-r--r-- | rust/kernel/alloc/kbox.rs | 44 | ||||
-rw-r--r-- | rust/kernel/alloc/kvec.rs | 55 | ||||
-rw-r--r-- | rust/kernel/alloc/layout.rs | 5 |
5 files changed, 330 insertions, 33 deletions
diff --git a/rust/kernel/alloc/allocator.rs b/rust/kernel/alloc/allocator.rs index 869d9fd69527..63bfb91b3671 100644 --- a/rust/kernel/alloc/allocator.rs +++ b/rust/kernel/alloc/allocator.rs @@ -13,12 +13,15 @@ use core::alloc::Layout; use core::ptr; use core::ptr::NonNull; -use crate::alloc::{AllocError, Allocator}; +use crate::alloc::{AllocError, Allocator, NumaNode}; use crate::bindings; -use crate::pr_warn; +use crate::page; const ARCH_KMALLOC_MINALIGN: usize = bindings::ARCH_KMALLOC_MINALIGN; +mod iter; +pub use self::iter::VmallocPageIter; + /// The contiguous kernel allocator. /// /// `Kmalloc` is typically used for physically contiguous allocations up to page size, but also @@ -47,20 +50,26 @@ pub struct KVmalloc; /// # Invariants /// -/// One of the following: `krealloc`, `vrealloc`, `kvrealloc`. +/// One of the following: `krealloc_node_align`, `vrealloc_node_align`, `kvrealloc_node_align`. struct ReallocFunc( - unsafe extern "C" fn(*const crate::ffi::c_void, usize, u32) -> *mut crate::ffi::c_void, + unsafe extern "C" fn( + *const crate::ffi::c_void, + usize, + crate::ffi::c_ulong, + u32, + crate::ffi::c_int, + ) -> *mut crate::ffi::c_void, ); impl ReallocFunc { - // INVARIANT: `krealloc` satisfies the type invariants. - const KREALLOC: Self = Self(bindings::krealloc); + // INVARIANT: `krealloc_node_align` satisfies the type invariants. + const KREALLOC: Self = Self(bindings::krealloc_node_align); - // INVARIANT: `vrealloc` satisfies the type invariants. - const VREALLOC: Self = Self(bindings::vrealloc); + // INVARIANT: `vrealloc_node_align` satisfies the type invariants. + const VREALLOC: Self = Self(bindings::vrealloc_node_align); - // INVARIANT: `kvrealloc` satisfies the type invariants. - const KVREALLOC: Self = Self(bindings::kvrealloc); + // INVARIANT: `kvrealloc_node_align` satisfies the type invariants. + const KVREALLOC: Self = Self(bindings::kvrealloc_node_align); /// # Safety /// @@ -78,6 +87,7 @@ impl ReallocFunc { layout: Layout, old_layout: Layout, flags: Flags, + nid: NumaNode, ) -> Result<NonNull<[u8]>, AllocError> { let size = layout.size(); let ptr = match ptr { @@ -101,7 +111,7 @@ impl ReallocFunc { // - Those functions provide the guarantees of this function. let raw_ptr = unsafe { // If `size == 0` and `ptr != NULL` the memory behind the pointer is freed. - self.0(ptr.cast(), size, flags.0).cast() + self.0(ptr.cast(), size, layout.align(), flags.0, nid.0).cast() }; let ptr = if size == 0 { @@ -138,11 +148,60 @@ unsafe impl Allocator for Kmalloc { layout: Layout, old_layout: Layout, flags: Flags, + nid: NumaNode, ) -> Result<NonNull<[u8]>, AllocError> { let layout = Kmalloc::aligned_layout(layout); // SAFETY: `ReallocFunc::call` has the same safety requirements as `Allocator::realloc`. - unsafe { ReallocFunc::KREALLOC.call(ptr, layout, old_layout, flags) } + unsafe { ReallocFunc::KREALLOC.call(ptr, layout, old_layout, flags, nid) } + } +} + +impl Vmalloc { + /// Convert a pointer to a [`Vmalloc`] allocation to a [`page::BorrowedPage`]. + /// + /// # Examples + /// + /// ``` + /// # use core::ptr::{NonNull, from_mut}; + /// # use kernel::{page, prelude::*}; + /// use kernel::alloc::allocator::Vmalloc; + /// + /// let mut vbox = VBox::<[u8; page::PAGE_SIZE]>::new_uninit(GFP_KERNEL)?; + /// + /// { + /// // SAFETY: By the type invariant of `Box` the inner pointer of `vbox` is non-null. + /// let ptr = unsafe { NonNull::new_unchecked(from_mut(&mut *vbox)) }; + /// + /// // SAFETY: + /// // `ptr` is a valid pointer to a `Vmalloc` allocation. + /// // `ptr` is valid for the entire lifetime of `page`. + /// let page = unsafe { Vmalloc::to_page(ptr.cast()) }; + /// + /// // SAFETY: There is no concurrent read or write to the same page. + /// unsafe { page.fill_zero_raw(0, page::PAGE_SIZE)? }; + /// } + /// # Ok::<(), Error>(()) + /// ``` + /// + /// # Safety + /// + /// - `ptr` must be a valid pointer to a [`Vmalloc`] allocation. + /// - `ptr` must remain valid for the entire duration of `'a`. + pub unsafe fn to_page<'a>(ptr: NonNull<u8>) -> page::BorrowedPage<'a> { + // SAFETY: `ptr` is a valid pointer to `Vmalloc` memory. + let page = unsafe { bindings::vmalloc_to_page(ptr.as_ptr().cast()) }; + + // SAFETY: `vmalloc_to_page` returns a valid pointer to a `struct page` for a valid pointer + // to `Vmalloc` memory. + let page = unsafe { NonNull::new_unchecked(page) }; + + // SAFETY: + // - `page` is a valid pointer to a `struct page`, given that by the safety requirements of + // this function `ptr` is a valid pointer to a `Vmalloc` allocation. + // - By the safety requirements of this function `ptr` is valid for the entire lifetime of + // `'a`. + unsafe { page::BorrowedPage::from_raw(page) } } } @@ -159,16 +218,11 @@ unsafe impl Allocator for Vmalloc { layout: Layout, old_layout: Layout, flags: Flags, + nid: NumaNode, ) -> Result<NonNull<[u8]>, AllocError> { - // TODO: Support alignments larger than PAGE_SIZE. - if layout.align() > bindings::PAGE_SIZE { - pr_warn!("Vmalloc does not support alignments larger than PAGE_SIZE yet.\n"); - return Err(AllocError); - } - // SAFETY: If not `None`, `ptr` is guaranteed to point to valid memory, which was previously // allocated with this `Allocator`. - unsafe { ReallocFunc::VREALLOC.call(ptr, layout, old_layout, flags) } + unsafe { ReallocFunc::VREALLOC.call(ptr, layout, old_layout, flags, nid) } } } @@ -185,19 +239,70 @@ unsafe impl Allocator for KVmalloc { layout: Layout, old_layout: Layout, flags: Flags, + nid: NumaNode, ) -> Result<NonNull<[u8]>, AllocError> { // `KVmalloc` may use the `Kmalloc` backend, hence we have to enforce a `Kmalloc` // compatible layout. let layout = Kmalloc::aligned_layout(layout); - // TODO: Support alignments larger than PAGE_SIZE. - if layout.align() > bindings::PAGE_SIZE { - pr_warn!("KVmalloc does not support alignments larger than PAGE_SIZE yet.\n"); - return Err(AllocError); - } - // SAFETY: If not `None`, `ptr` is guaranteed to point to valid memory, which was previously // allocated with this `Allocator`. - unsafe { ReallocFunc::KVREALLOC.call(ptr, layout, old_layout, flags) } + unsafe { ReallocFunc::KVREALLOC.call(ptr, layout, old_layout, flags, nid) } + } +} + +#[macros::kunit_tests(rust_allocator)] +mod tests { + use super::*; + use core::mem::MaybeUninit; + use kernel::prelude::*; + + #[test] + fn test_alignment() -> Result { + const TEST_SIZE: usize = 1024; + const TEST_LARGE_ALIGN_SIZE: usize = kernel::page::PAGE_SIZE * 4; + + // These two structs are used to test allocating aligned memory. + // they don't need to be accessed, so they're marked as dead_code. + #[expect(dead_code)] + #[repr(align(128))] + struct Blob([u8; TEST_SIZE]); + #[expect(dead_code)] + #[repr(align(8192))] + struct LargeAlignBlob([u8; TEST_LARGE_ALIGN_SIZE]); + + struct TestAlign<T, A: Allocator>(Box<MaybeUninit<T>, A>); + impl<T, A: Allocator> TestAlign<T, A> { + fn new() -> Result<Self> { + Ok(Self(Box::<_, A>::new_uninit(GFP_KERNEL)?)) + } + + fn is_aligned_to(&self, align: usize) -> bool { + assert!(align.is_power_of_two()); + + let addr = self.0.as_ptr() as usize; + addr & (align - 1) == 0 + } + } + + let ta = TestAlign::<Blob, Kmalloc>::new()?; + assert!(ta.is_aligned_to(128)); + + let ta = TestAlign::<LargeAlignBlob, Kmalloc>::new()?; + assert!(ta.is_aligned_to(8192)); + + let ta = TestAlign::<Blob, Vmalloc>::new()?; + assert!(ta.is_aligned_to(128)); + + let ta = TestAlign::<LargeAlignBlob, Vmalloc>::new()?; + assert!(ta.is_aligned_to(8192)); + + let ta = TestAlign::<Blob, KVmalloc>::new()?; + assert!(ta.is_aligned_to(128)); + + let ta = TestAlign::<LargeAlignBlob, KVmalloc>::new()?; + assert!(ta.is_aligned_to(8192)); + + Ok(()) } } diff --git a/rust/kernel/alloc/allocator/iter.rs b/rust/kernel/alloc/allocator/iter.rs new file mode 100644 index 000000000000..5759f86029b7 --- /dev/null +++ b/rust/kernel/alloc/allocator/iter.rs @@ -0,0 +1,102 @@ +// SPDX-License-Identifier: GPL-2.0 + +use super::Vmalloc; +use crate::page; +use core::marker::PhantomData; +use core::ptr::NonNull; + +/// An [`Iterator`] of [`page::BorrowedPage`] items owned by a [`Vmalloc`] allocation. +/// +/// # Guarantees +/// +/// The pages iterated by the [`Iterator`] appear in the order as they are mapped in the CPU's +/// virtual address space ascendingly. +/// +/// # Invariants +/// +/// - `buf` is a valid and [`page::PAGE_SIZE`] aligned pointer into a [`Vmalloc`] allocation. +/// - `size` is the number of bytes from `buf` until the end of the [`Vmalloc`] allocation `buf` +/// points to. +pub struct VmallocPageIter<'a> { + /// The base address of the [`Vmalloc`] buffer. + buf: NonNull<u8>, + /// The size of the buffer pointed to by `buf` in bytes. + size: usize, + /// The current page index of the [`Iterator`]. + index: usize, + _p: PhantomData<page::BorrowedPage<'a>>, +} + +impl<'a> Iterator for VmallocPageIter<'a> { + type Item = page::BorrowedPage<'a>; + + fn next(&mut self) -> Option<Self::Item> { + let offset = self.index.checked_mul(page::PAGE_SIZE)?; + + // Even though `self.size()` may be smaller than `Self::page_count() * page::PAGE_SIZE`, it + // is always a number between `(Self::page_count() - 1) * page::PAGE_SIZE` and + // `Self::page_count() * page::PAGE_SIZE`, hence the check below is sufficient. + if offset < self.size() { + self.index += 1; + } else { + return None; + } + + // TODO: Use `NonNull::add()` instead, once the minimum supported compiler version is + // bumped to 1.80 or later. + // + // SAFETY: `offset` is in the interval `[0, (self.page_count() - 1) * page::PAGE_SIZE]`, + // hence the resulting pointer is guaranteed to be within the same allocation. + let ptr = unsafe { self.buf.as_ptr().add(offset) }; + + // SAFETY: `ptr` is guaranteed to be non-null given that it is derived from `self.buf`. + let ptr = unsafe { NonNull::new_unchecked(ptr) }; + + // SAFETY: + // - `ptr` is a valid pointer to a `Vmalloc` allocation. + // - `ptr` is valid for the duration of `'a`. + Some(unsafe { Vmalloc::to_page(ptr) }) + } + + fn size_hint(&self) -> (usize, Option<usize>) { + let remaining = self.page_count().saturating_sub(self.index); + + (remaining, Some(remaining)) + } +} + +impl<'a> VmallocPageIter<'a> { + /// Creates a new [`VmallocPageIter`] instance. + /// + /// # Safety + /// + /// - `buf` must be a [`page::PAGE_SIZE`] aligned pointer into a [`Vmalloc`] allocation. + /// - `buf` must be valid for at least the lifetime of `'a`. + /// - `size` must be the number of bytes from `buf` until the end of the [`Vmalloc`] allocation + /// `buf` points to. + pub unsafe fn new(buf: NonNull<u8>, size: usize) -> Self { + // INVARIANT: By the safety requirements, `buf` is a valid and `page::PAGE_SIZE` aligned + // pointer into a [`Vmalloc`] allocation. + Self { + buf, + size, + index: 0, + _p: PhantomData, + } + } + + /// Returns the size of the backing [`Vmalloc`] allocation in bytes. + /// + /// Note that this is the size the [`Vmalloc`] allocation has been allocated with. Hence, this + /// number may be smaller than `[`Self::page_count`] * [`page::PAGE_SIZE`]`. + #[inline] + pub fn size(&self) -> usize { + self.size + } + + /// Returns the number of pages owned by the backing [`Vmalloc`] allocation. + #[inline] + pub fn page_count(&self) -> usize { + self.size().div_ceil(page::PAGE_SIZE) + } +} diff --git a/rust/kernel/alloc/kbox.rs b/rust/kernel/alloc/kbox.rs index 27c4b5a9b61d..622b3529edfc 100644 --- a/rust/kernel/alloc/kbox.rs +++ b/rust/kernel/alloc/kbox.rs @@ -3,8 +3,8 @@ //! Implementation of [`Box`]. #[allow(unused_imports)] // Used in doc comments. -use super::allocator::{KVmalloc, Kmalloc, Vmalloc}; -use super::{AllocError, Allocator, Flags}; +use super::allocator::{KVmalloc, Kmalloc, Vmalloc, VmallocPageIter}; +use super::{AllocError, Allocator, Flags, NumaNode}; use core::alloc::Layout; use core::borrow::{Borrow, BorrowMut}; use core::marker::PhantomData; @@ -18,6 +18,7 @@ use core::result::Result; use crate::ffi::c_void; use crate::fmt; use crate::init::InPlaceInit; +use crate::page::AsPageIter; use crate::types::ForeignOwnable; use pin_init::{InPlaceWrite, Init, PinInit, ZeroableOption}; @@ -273,7 +274,7 @@ where /// ``` pub fn new_uninit(flags: Flags) -> Result<Box<MaybeUninit<T>, A>, AllocError> { let layout = Layout::new::<MaybeUninit<T>>(); - let ptr = A::alloc(layout, flags)?; + let ptr = A::alloc(layout, flags, NumaNode::NO_NODE)?; // INVARIANT: `ptr` is either a dangling pointer or points to memory allocated with `A`, // which is sufficient in size and alignment for storing a `T`. @@ -680,3 +681,40 @@ where unsafe { A::free(self.0.cast(), layout) }; } } + +/// # Examples +/// +/// ``` +/// # use kernel::prelude::*; +/// use kernel::alloc::allocator::VmallocPageIter; +/// use kernel::page::{AsPageIter, PAGE_SIZE}; +/// +/// let mut vbox = VBox::new((), GFP_KERNEL)?; +/// +/// assert!(vbox.page_iter().next().is_none()); +/// +/// let mut vbox = VBox::<[u8; PAGE_SIZE]>::new_uninit(GFP_KERNEL)?; +/// +/// let page = vbox.page_iter().next().expect("At least one page should be available.\n"); +/// +/// // SAFETY: There is no concurrent read or write to the same page. +/// unsafe { page.fill_zero_raw(0, PAGE_SIZE)? }; +/// # Ok::<(), Error>(()) +/// ``` +impl<T> AsPageIter for VBox<T> { + type Iter<'a> + = VmallocPageIter<'a> + where + T: 'a; + + fn page_iter(&mut self) -> Self::Iter<'_> { + let ptr = self.0.cast(); + let size = core::mem::size_of::<T>(); + + // SAFETY: + // - `ptr` is a valid pointer to the beginning of a `Vmalloc` allocation. + // - `ptr` is guaranteed to be valid for the lifetime of `'a`. + // - `size` is the size of the `Vmalloc` allocation `ptr` points to. + unsafe { VmallocPageIter::new(ptr, size) } + } +} diff --git a/rust/kernel/alloc/kvec.rs b/rust/kernel/alloc/kvec.rs index dfc101e03f35..e94aebd084c8 100644 --- a/rust/kernel/alloc/kvec.rs +++ b/rust/kernel/alloc/kvec.rs @@ -3,11 +3,14 @@ //! Implementation of [`Vec`]. use super::{ - allocator::{KVmalloc, Kmalloc, Vmalloc}, + allocator::{KVmalloc, Kmalloc, Vmalloc, VmallocPageIter}, layout::ArrayLayout, - AllocError, Allocator, Box, Flags, + AllocError, Allocator, Box, Flags, NumaNode, +}; +use crate::{ + fmt, + page::AsPageIter, }; -use crate::fmt; use core::{ borrow::{Borrow, BorrowMut}, marker::PhantomData, @@ -644,6 +647,7 @@ where layout.into(), self.layout.into(), flags, + NumaNode::NO_NODE, )? }; @@ -1027,6 +1031,43 @@ where } } +/// # Examples +/// +/// ``` +/// # use kernel::prelude::*; +/// use kernel::alloc::allocator::VmallocPageIter; +/// use kernel::page::{AsPageIter, PAGE_SIZE}; +/// +/// let mut vec = VVec::<u8>::new(); +/// +/// assert!(vec.page_iter().next().is_none()); +/// +/// vec.reserve(PAGE_SIZE, GFP_KERNEL)?; +/// +/// let page = vec.page_iter().next().expect("At least one page should be available.\n"); +/// +/// // SAFETY: There is no concurrent read or write to the same page. +/// unsafe { page.fill_zero_raw(0, PAGE_SIZE)? }; +/// # Ok::<(), Error>(()) +/// ``` +impl<T> AsPageIter for VVec<T> { + type Iter<'a> + = VmallocPageIter<'a> + where + T: 'a; + + fn page_iter(&mut self) -> Self::Iter<'_> { + let ptr = self.ptr.cast(); + let size = self.layout.size(); + + // SAFETY: + // - `ptr` is a valid pointer to the beginning of a `Vmalloc` allocation. + // - `ptr` is guaranteed to be valid for the lifetime of `'a`. + // - `size` is the size of the `Vmalloc` allocation `ptr` points to. + unsafe { VmallocPageIter::new(ptr, size) } + } +} + /// An [`Iterator`] implementation for [`Vec`] that moves elements out of a vector. /// /// This structure is created by the [`Vec::into_iter`] method on [`Vec`] (provided by the @@ -1121,7 +1162,13 @@ where // the type invariant to be smaller than `cap`. Depending on `realloc` this operation // may shrink the buffer or leave it as it is. ptr = match unsafe { - A::realloc(Some(buf.cast()), layout.into(), old_layout.into(), flags) + A::realloc( + Some(buf.cast()), + layout.into(), + old_layout.into(), + flags, + NumaNode::NO_NODE, + ) } { // If we fail to shrink, which likely can't even happen, continue with the existing // buffer. diff --git a/rust/kernel/alloc/layout.rs b/rust/kernel/alloc/layout.rs index 52cbf61c4539..9f8be72feb7a 100644 --- a/rust/kernel/alloc/layout.rs +++ b/rust/kernel/alloc/layout.rs @@ -98,6 +98,11 @@ impl<T> ArrayLayout<T> { pub const fn is_empty(&self) -> bool { self.len == 0 } + + /// Returns the size of the [`ArrayLayout`] in bytes. + pub const fn size(&self) -> usize { + self.len() * core::mem::size_of::<T>() + } } impl<T> From<ArrayLayout<T>> for Layout { |