summaryrefslogtreecommitdiff
path: root/rust/kernel/alloc
diff options
context:
space:
mode:
Diffstat (limited to 'rust/kernel/alloc')
-rw-r--r--rust/kernel/alloc/allocator.rs60
-rw-r--r--rust/kernel/alloc/allocator/iter.rs102
-rw-r--r--rust/kernel/alloc/allocator_test.rs124
-rw-r--r--rust/kernel/alloc/kbox.rs132
-rw-r--r--rust/kernel/alloc/kvec.rs66
-rw-r--r--rust/kernel/alloc/kvec/errors.rs2
-rw-r--r--rust/kernel/alloc/layout.rs7
7 files changed, 353 insertions, 140 deletions
diff --git a/rust/kernel/alloc/allocator.rs b/rust/kernel/alloc/allocator.rs
index 2692cf90c948..08fd31bf72d2 100644
--- a/rust/kernel/alloc/allocator.rs
+++ b/rust/kernel/alloc/allocator.rs
@@ -15,8 +15,14 @@ use core::ptr::NonNull;
use crate::alloc::{AllocError, Allocator};
use crate::bindings;
+use crate::page;
use crate::pr_warn;
+const ARCH_KMALLOC_MINALIGN: usize = bindings::ARCH_KMALLOC_MINALIGN;
+
+mod iter;
+pub use self::iter::VmallocPageIter;
+
/// The contiguous kernel allocator.
///
/// `Kmalloc` is typically used for physically contiguous allocations up to page size, but also
@@ -128,6 +134,8 @@ impl Kmalloc {
// - passing a pointer to a valid memory allocation is OK,
// - `realloc` satisfies the guarantees, since `ReallocFunc::call` has the same.
unsafe impl Allocator for Kmalloc {
+ const MIN_ALIGN: usize = ARCH_KMALLOC_MINALIGN;
+
#[inline]
unsafe fn realloc(
ptr: Option<NonNull<u8>>,
@@ -142,11 +150,61 @@ unsafe impl Allocator for Kmalloc {
}
}
+impl Vmalloc {
+ /// Convert a pointer to a [`Vmalloc`] allocation to a [`page::BorrowedPage`].
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// # use core::ptr::{NonNull, from_mut};
+ /// # use kernel::{page, prelude::*};
+ /// use kernel::alloc::allocator::Vmalloc;
+ ///
+ /// let mut vbox = VBox::<[u8; page::PAGE_SIZE]>::new_uninit(GFP_KERNEL)?;
+ ///
+ /// {
+ /// // SAFETY: By the type invariant of `Box` the inner pointer of `vbox` is non-null.
+ /// let ptr = unsafe { NonNull::new_unchecked(from_mut(&mut *vbox)) };
+ ///
+ /// // SAFETY:
+ /// // `ptr` is a valid pointer to a `Vmalloc` allocation.
+ /// // `ptr` is valid for the entire lifetime of `page`.
+ /// let page = unsafe { Vmalloc::to_page(ptr.cast()) };
+ ///
+ /// // SAFETY: There is no concurrent read or write to the same page.
+ /// unsafe { page.fill_zero_raw(0, page::PAGE_SIZE)? };
+ /// }
+ /// # Ok::<(), Error>(())
+ /// ```
+ ///
+ /// # Safety
+ ///
+ /// - `ptr` must be a valid pointer to a [`Vmalloc`] allocation.
+ /// - `ptr` must remain valid for the entire duration of `'a`.
+ pub unsafe fn to_page<'a>(ptr: NonNull<u8>) -> page::BorrowedPage<'a> {
+ // SAFETY: `ptr` is a valid pointer to `Vmalloc` memory.
+ let page = unsafe { bindings::vmalloc_to_page(ptr.as_ptr().cast()) };
+
+ // SAFETY: `vmalloc_to_page` returns a valid pointer to a `struct page` for a valid pointer
+ // to `Vmalloc` memory.
+ let page = unsafe { NonNull::new_unchecked(page) };
+
+ // SAFETY:
+ // - `page` is a valid pointer to a `struct page`, given that by the safety requirements of
+ // this function `ptr` is a valid pointer to a `Vmalloc` allocation.
+ // - By the safety requirements of this function `ptr` is valid for the entire lifetime of
+ // `'a`.
+ unsafe { page::BorrowedPage::from_raw(page) }
+ }
+}
+
// SAFETY: `realloc` delegates to `ReallocFunc::call`, which guarantees that
// - memory remains valid until it is explicitly freed,
// - passing a pointer to a valid memory allocation is OK,
// - `realloc` satisfies the guarantees, since `ReallocFunc::call` has the same.
unsafe impl Allocator for Vmalloc {
+ const MIN_ALIGN: usize = kernel::page::PAGE_SIZE;
+
#[inline]
unsafe fn realloc(
ptr: Option<NonNull<u8>>,
@@ -171,6 +229,8 @@ unsafe impl Allocator for Vmalloc {
// - passing a pointer to a valid memory allocation is OK,
// - `realloc` satisfies the guarantees, since `ReallocFunc::call` has the same.
unsafe impl Allocator for KVmalloc {
+ const MIN_ALIGN: usize = ARCH_KMALLOC_MINALIGN;
+
#[inline]
unsafe fn realloc(
ptr: Option<NonNull<u8>>,
diff --git a/rust/kernel/alloc/allocator/iter.rs b/rust/kernel/alloc/allocator/iter.rs
new file mode 100644
index 000000000000..5759f86029b7
--- /dev/null
+++ b/rust/kernel/alloc/allocator/iter.rs
@@ -0,0 +1,102 @@
+// SPDX-License-Identifier: GPL-2.0
+
+use super::Vmalloc;
+use crate::page;
+use core::marker::PhantomData;
+use core::ptr::NonNull;
+
+/// An [`Iterator`] of [`page::BorrowedPage`] items owned by a [`Vmalloc`] allocation.
+///
+/// # Guarantees
+///
+/// The pages iterated by the [`Iterator`] appear in the order as they are mapped in the CPU's
+/// virtual address space ascendingly.
+///
+/// # Invariants
+///
+/// - `buf` is a valid and [`page::PAGE_SIZE`] aligned pointer into a [`Vmalloc`] allocation.
+/// - `size` is the number of bytes from `buf` until the end of the [`Vmalloc`] allocation `buf`
+/// points to.
+pub struct VmallocPageIter<'a> {
+ /// The base address of the [`Vmalloc`] buffer.
+ buf: NonNull<u8>,
+ /// The size of the buffer pointed to by `buf` in bytes.
+ size: usize,
+ /// The current page index of the [`Iterator`].
+ index: usize,
+ _p: PhantomData<page::BorrowedPage<'a>>,
+}
+
+impl<'a> Iterator for VmallocPageIter<'a> {
+ type Item = page::BorrowedPage<'a>;
+
+ fn next(&mut self) -> Option<Self::Item> {
+ let offset = self.index.checked_mul(page::PAGE_SIZE)?;
+
+ // Even though `self.size()` may be smaller than `Self::page_count() * page::PAGE_SIZE`, it
+ // is always a number between `(Self::page_count() - 1) * page::PAGE_SIZE` and
+ // `Self::page_count() * page::PAGE_SIZE`, hence the check below is sufficient.
+ if offset < self.size() {
+ self.index += 1;
+ } else {
+ return None;
+ }
+
+ // TODO: Use `NonNull::add()` instead, once the minimum supported compiler version is
+ // bumped to 1.80 or later.
+ //
+ // SAFETY: `offset` is in the interval `[0, (self.page_count() - 1) * page::PAGE_SIZE]`,
+ // hence the resulting pointer is guaranteed to be within the same allocation.
+ let ptr = unsafe { self.buf.as_ptr().add(offset) };
+
+ // SAFETY: `ptr` is guaranteed to be non-null given that it is derived from `self.buf`.
+ let ptr = unsafe { NonNull::new_unchecked(ptr) };
+
+ // SAFETY:
+ // - `ptr` is a valid pointer to a `Vmalloc` allocation.
+ // - `ptr` is valid for the duration of `'a`.
+ Some(unsafe { Vmalloc::to_page(ptr) })
+ }
+
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ let remaining = self.page_count().saturating_sub(self.index);
+
+ (remaining, Some(remaining))
+ }
+}
+
+impl<'a> VmallocPageIter<'a> {
+ /// Creates a new [`VmallocPageIter`] instance.
+ ///
+ /// # Safety
+ ///
+ /// - `buf` must be a [`page::PAGE_SIZE`] aligned pointer into a [`Vmalloc`] allocation.
+ /// - `buf` must be valid for at least the lifetime of `'a`.
+ /// - `size` must be the number of bytes from `buf` until the end of the [`Vmalloc`] allocation
+ /// `buf` points to.
+ pub unsafe fn new(buf: NonNull<u8>, size: usize) -> Self {
+ // INVARIANT: By the safety requirements, `buf` is a valid and `page::PAGE_SIZE` aligned
+ // pointer into a [`Vmalloc`] allocation.
+ Self {
+ buf,
+ size,
+ index: 0,
+ _p: PhantomData,
+ }
+ }
+
+ /// Returns the size of the backing [`Vmalloc`] allocation in bytes.
+ ///
+ /// Note that this is the size the [`Vmalloc`] allocation has been allocated with. Hence, this
+ /// number may be smaller than `[`Self::page_count`] * [`page::PAGE_SIZE`]`.
+ #[inline]
+ pub fn size(&self) -> usize {
+ self.size
+ }
+
+ /// Returns the number of pages owned by the backing [`Vmalloc`] allocation.
+ #[inline]
+ pub fn page_count(&self) -> usize {
+ self.size().div_ceil(page::PAGE_SIZE)
+ }
+}
diff --git a/rust/kernel/alloc/allocator_test.rs b/rust/kernel/alloc/allocator_test.rs
deleted file mode 100644
index 90dd987d40e4..000000000000
--- a/rust/kernel/alloc/allocator_test.rs
+++ /dev/null
@@ -1,124 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-
-//! So far the kernel's `Box` and `Vec` types can't be used by userspace test cases, since all users
-//! of those types (e.g. `CString`) use kernel allocators for instantiation.
-//!
-//! In order to allow userspace test cases to make use of such types as well, implement the
-//! `Cmalloc` allocator within the `allocator_test` module and type alias all kernel allocators to
-//! `Cmalloc`. The `Cmalloc` allocator uses libc's `realloc()` function as allocator backend.
-
-#![allow(missing_docs)]
-
-use super::{flags::*, AllocError, Allocator, Flags};
-use core::alloc::Layout;
-use core::cmp;
-use core::ptr;
-use core::ptr::NonNull;
-
-/// The userspace allocator based on libc.
-pub struct Cmalloc;
-
-pub type Kmalloc = Cmalloc;
-pub type Vmalloc = Kmalloc;
-pub type KVmalloc = Kmalloc;
-
-impl Cmalloc {
- /// Returns a [`Layout`] that makes [`Kmalloc`] fulfill the requested size and alignment of
- /// `layout`.
- pub fn aligned_layout(layout: Layout) -> Layout {
- // Note that `layout.size()` (after padding) is guaranteed to be a multiple of
- // `layout.align()` which together with the slab guarantees means that `Kmalloc` will return
- // a properly aligned object (see comments in `kmalloc()` for more information).
- layout.pad_to_align()
- }
-}
-
-extern "C" {
- #[link_name = "aligned_alloc"]
- fn libc_aligned_alloc(align: usize, size: usize) -> *mut crate::ffi::c_void;
-
- #[link_name = "free"]
- fn libc_free(ptr: *mut crate::ffi::c_void);
-}
-
-// SAFETY:
-// - memory remains valid until it is explicitly freed,
-// - passing a pointer to a valid memory allocation created by this `Allocator` is always OK,
-// - `realloc` provides the guarantees as provided in the `# Guarantees` section.
-unsafe impl Allocator for Cmalloc {
- unsafe fn realloc(
- ptr: Option<NonNull<u8>>,
- layout: Layout,
- old_layout: Layout,
- flags: Flags,
- ) -> Result<NonNull<[u8]>, AllocError> {
- let src = match ptr {
- Some(src) => {
- if old_layout.size() == 0 {
- ptr::null_mut()
- } else {
- src.as_ptr()
- }
- }
- None => ptr::null_mut(),
- };
-
- if layout.size() == 0 {
- // SAFETY: `src` is either NULL or was previously allocated with this `Allocator`
- unsafe { libc_free(src.cast()) };
-
- return Ok(NonNull::slice_from_raw_parts(
- crate::alloc::dangling_from_layout(layout),
- 0,
- ));
- }
-
- // ISO C (ISO/IEC 9899:2011) defines `aligned_alloc`:
- //
- // > The value of alignment shall be a valid alignment supported by the implementation
- // [...].
- //
- // As an example of the "supported by the implementation" requirement, POSIX.1-2001 (IEEE
- // 1003.1-2001) defines `posix_memalign`:
- //
- // > The value of alignment shall be a power of two multiple of sizeof (void *).
- //
- // and POSIX-based implementations of `aligned_alloc` inherit this requirement. At the time
- // of writing, this is known to be the case on macOS (but not in glibc).
- //
- // Satisfy the stricter requirement to avoid spurious test failures on some platforms.
- let min_align = core::mem::size_of::<*const crate::ffi::c_void>();
- let layout = layout.align_to(min_align).map_err(|_| AllocError)?;
- let layout = layout.pad_to_align();
-
- // SAFETY: Returns either NULL or a pointer to a memory allocation that satisfies or
- // exceeds the given size and alignment requirements.
- let dst = unsafe { libc_aligned_alloc(layout.align(), layout.size()) }.cast::<u8>();
- let dst = NonNull::new(dst).ok_or(AllocError)?;
-
- if flags.contains(__GFP_ZERO) {
- // SAFETY: The preceding calls to `libc_aligned_alloc` and `NonNull::new`
- // guarantee that `dst` points to memory of at least `layout.size()` bytes.
- unsafe { dst.as_ptr().write_bytes(0, layout.size()) };
- }
-
- if !src.is_null() {
- // SAFETY:
- // - `src` has previously been allocated with this `Allocator`; `dst` has just been
- // newly allocated, hence the memory regions do not overlap.
- // - both` src` and `dst` are properly aligned and valid for reads and writes
- unsafe {
- ptr::copy_nonoverlapping(
- src,
- dst.as_ptr(),
- cmp::min(layout.size(), old_layout.size()),
- )
- };
- }
-
- // SAFETY: `src` is either NULL or was previously allocated with this `Allocator`
- unsafe { libc_free(src.cast()) };
-
- Ok(NonNull::slice_from_raw_parts(dst, layout.size()))
- }
-}
diff --git a/rust/kernel/alloc/kbox.rs b/rust/kernel/alloc/kbox.rs
index 856d05aa60f1..2137c3700004 100644
--- a/rust/kernel/alloc/kbox.rs
+++ b/rust/kernel/alloc/kbox.rs
@@ -3,11 +3,10 @@
//! Implementation of [`Box`].
#[allow(unused_imports)] // Used in doc comments.
-use super::allocator::{KVmalloc, Kmalloc, Vmalloc};
+use super::allocator::{KVmalloc, Kmalloc, Vmalloc, VmallocPageIter};
use super::{AllocError, Allocator, Flags};
use core::alloc::Layout;
use core::borrow::{Borrow, BorrowMut};
-use core::fmt;
use core::marker::PhantomData;
use core::mem::ManuallyDrop;
use core::mem::MaybeUninit;
@@ -17,7 +16,9 @@ use core::ptr::NonNull;
use core::result::Result;
use crate::ffi::c_void;
+use crate::fmt;
use crate::init::InPlaceInit;
+use crate::page::AsPageIter;
use crate::types::ForeignOwnable;
use pin_init::{InPlaceWrite, Init, PinInit, ZeroableOption};
@@ -290,6 +291,83 @@ where
Ok(Self::new(x, flags)?.into())
}
+ /// Construct a pinned slice of elements `Pin<Box<[T], A>>`.
+ ///
+ /// This is a convenient means for creation of e.g. slices of structrures containing spinlocks
+ /// or mutexes.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use kernel::sync::{new_spinlock, SpinLock};
+ ///
+ /// struct Inner {
+ /// a: u32,
+ /// b: u32,
+ /// }
+ ///
+ /// #[pin_data]
+ /// struct Example {
+ /// c: u32,
+ /// #[pin]
+ /// d: SpinLock<Inner>,
+ /// }
+ ///
+ /// impl Example {
+ /// fn new() -> impl PinInit<Self, Error> {
+ /// try_pin_init!(Self {
+ /// c: 10,
+ /// d <- new_spinlock!(Inner { a: 20, b: 30 }),
+ /// })
+ /// }
+ /// }
+ ///
+ /// // Allocate a boxed slice of 10 `Example`s.
+ /// let s = KBox::pin_slice(
+ /// | _i | Example::new(),
+ /// 10,
+ /// GFP_KERNEL
+ /// )?;
+ ///
+ /// assert_eq!(s[5].c, 10);
+ /// assert_eq!(s[3].d.lock().a, 20);
+ /// # Ok::<(), Error>(())
+ /// ```
+ pub fn pin_slice<Func, Item, E>(
+ mut init: Func,
+ len: usize,
+ flags: Flags,
+ ) -> Result<Pin<Box<[T], A>>, E>
+ where
+ Func: FnMut(usize) -> Item,
+ Item: PinInit<T, E>,
+ E: From<AllocError>,
+ {
+ let mut buffer = super::Vec::<T, A>::with_capacity(len, flags)?;
+ for i in 0..len {
+ let ptr = buffer.spare_capacity_mut().as_mut_ptr().cast();
+ // SAFETY:
+ // - `ptr` is a valid pointer to uninitialized memory.
+ // - `ptr` is not used if an error is returned.
+ // - `ptr` won't be moved until it is dropped, i.e. it is pinned.
+ unsafe { init(i).__pinned_init(ptr)? };
+
+ // SAFETY:
+ // - `i + 1 <= len`, hence we don't exceed the capacity, due to the call to
+ // `with_capacity()` above.
+ // - The new value at index buffer.len() + 1 is the only element being added here, and
+ // it has been initialized above by `init(i).__pinned_init(ptr)`.
+ unsafe { buffer.inc_len(1) };
+ }
+
+ let (ptr, _, _) = buffer.into_raw_parts();
+ let slice = core::ptr::slice_from_raw_parts_mut(ptr, len);
+
+ // SAFETY: `slice` points to an allocation allocated with `A` (`buffer`) and holds a valid
+ // `[T]`.
+ Ok(Pin::from(unsafe { Box::from_raw(slice) }))
+ }
+
/// Convert a [`Box<T,A>`] to a [`Pin<Box<T,A>>`]. If `T` does not implement
/// [`Unpin`], then `x` will be pinned in memory and can't be moved.
pub fn into_pin(this: Self) -> Pin<Self> {
@@ -401,12 +479,17 @@ where
}
// SAFETY: The pointer returned by `into_foreign` comes from a well aligned
-// pointer to `T`.
+// pointer to `T` allocated by `A`.
unsafe impl<T: 'static, A> ForeignOwnable for Box<T, A>
where
A: Allocator,
{
- const FOREIGN_ALIGN: usize = core::mem::align_of::<T>();
+ const FOREIGN_ALIGN: usize = if core::mem::align_of::<T>() < A::MIN_ALIGN {
+ A::MIN_ALIGN
+ } else {
+ core::mem::align_of::<T>()
+ };
+
type Borrowed<'a> = &'a T;
type BorrowedMut<'a> = &'a mut T;
@@ -435,12 +518,12 @@ where
}
// SAFETY: The pointer returned by `into_foreign` comes from a well aligned
-// pointer to `T`.
+// pointer to `T` allocated by `A`.
unsafe impl<T: 'static, A> ForeignOwnable for Pin<Box<T, A>>
where
A: Allocator,
{
- const FOREIGN_ALIGN: usize = core::mem::align_of::<T>();
+ const FOREIGN_ALIGN: usize = <Box<T, A> as ForeignOwnable>::FOREIGN_ALIGN;
type Borrowed<'a> = Pin<&'a T>;
type BorrowedMut<'a> = Pin<&'a mut T>;
@@ -598,3 +681,40 @@ where
unsafe { A::free(self.0.cast(), layout) };
}
}
+
+/// # Examples
+///
+/// ```
+/// # use kernel::prelude::*;
+/// use kernel::alloc::allocator::VmallocPageIter;
+/// use kernel::page::{AsPageIter, PAGE_SIZE};
+///
+/// let mut vbox = VBox::new((), GFP_KERNEL)?;
+///
+/// assert!(vbox.page_iter().next().is_none());
+///
+/// let mut vbox = VBox::<[u8; PAGE_SIZE]>::new_uninit(GFP_KERNEL)?;
+///
+/// let page = vbox.page_iter().next().expect("At least one page should be available.\n");
+///
+/// // SAFETY: There is no concurrent read or write to the same page.
+/// unsafe { page.fill_zero_raw(0, PAGE_SIZE)? };
+/// # Ok::<(), Error>(())
+/// ```
+impl<T> AsPageIter for VBox<T> {
+ type Iter<'a>
+ = VmallocPageIter<'a>
+ where
+ T: 'a;
+
+ fn page_iter(&mut self) -> Self::Iter<'_> {
+ let ptr = self.0.cast();
+ let size = core::mem::size_of::<T>();
+
+ // SAFETY:
+ // - `ptr` is a valid pointer to the beginning of a `Vmalloc` allocation.
+ // - `ptr` is guaranteed to be valid for the lifetime of `'a`.
+ // - `size` is the size of the `Vmalloc` allocation `ptr` points to.
+ unsafe { VmallocPageIter::new(ptr, size) }
+ }
+}
diff --git a/rust/kernel/alloc/kvec.rs b/rust/kernel/alloc/kvec.rs
index 3c72e0bdddb8..5c3496b31e8b 100644
--- a/rust/kernel/alloc/kvec.rs
+++ b/rust/kernel/alloc/kvec.rs
@@ -3,13 +3,16 @@
//! Implementation of [`Vec`].
use super::{
- allocator::{KVmalloc, Kmalloc, Vmalloc},
+ allocator::{KVmalloc, Kmalloc, Vmalloc, VmallocPageIter},
layout::ArrayLayout,
AllocError, Allocator, Box, Flags,
};
+use crate::{
+ fmt,
+ page::AsPageIter,
+};
use core::{
borrow::{Borrow, BorrowMut},
- fmt,
marker::PhantomData,
mem::{ManuallyDrop, MaybeUninit},
ops::Deref,
@@ -175,7 +178,7 @@ where
/// Returns the number of elements that can be stored within the vector without allocating
/// additional memory.
- pub fn capacity(&self) -> usize {
+ pub const fn capacity(&self) -> usize {
if const { Self::is_zst() } {
usize::MAX
} else {
@@ -185,7 +188,7 @@ where
/// Returns the number of elements stored within the vector.
#[inline]
- pub fn len(&self) -> usize {
+ pub const fn len(&self) -> usize {
self.len
}
@@ -196,7 +199,7 @@ where
/// - `additional` must be less than or equal to `self.capacity - self.len`.
/// - All elements within the interval [`self.len`,`self.len + additional`) must be initialized.
#[inline]
- pub unsafe fn inc_len(&mut self, additional: usize) {
+ pub const unsafe fn inc_len(&mut self, additional: usize) {
// Guaranteed by the type invariant to never underflow.
debug_assert!(additional <= self.capacity() - self.len());
// INVARIANT: By the safety requirements of this method this represents the exact number of
@@ -224,6 +227,16 @@ where
}
/// Returns a slice of the entire vector.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let mut v = KVec::new();
+ /// v.push(1, GFP_KERNEL)?;
+ /// v.push(2, GFP_KERNEL)?;
+ /// assert_eq!(v.as_slice(), &[1, 2]);
+ /// # Ok::<(), Error>(())
+ /// ```
#[inline]
pub fn as_slice(&self) -> &[T] {
self
@@ -245,7 +258,7 @@ where
/// Returns a raw pointer to the vector's backing buffer, or, if `T` is a ZST, a dangling raw
/// pointer.
#[inline]
- pub fn as_ptr(&self) -> *const T {
+ pub const fn as_ptr(&self) -> *const T {
self.ptr.as_ptr()
}
@@ -261,7 +274,7 @@ where
/// assert!(!v.is_empty());
/// ```
#[inline]
- pub fn is_empty(&self) -> bool {
+ pub const fn is_empty(&self) -> bool {
self.len() == 0
}
@@ -1017,6 +1030,43 @@ where
}
}
+/// # Examples
+///
+/// ```
+/// # use kernel::prelude::*;
+/// use kernel::alloc::allocator::VmallocPageIter;
+/// use kernel::page::{AsPageIter, PAGE_SIZE};
+///
+/// let mut vec = VVec::<u8>::new();
+///
+/// assert!(vec.page_iter().next().is_none());
+///
+/// vec.reserve(PAGE_SIZE, GFP_KERNEL)?;
+///
+/// let page = vec.page_iter().next().expect("At least one page should be available.\n");
+///
+/// // SAFETY: There is no concurrent read or write to the same page.
+/// unsafe { page.fill_zero_raw(0, PAGE_SIZE)? };
+/// # Ok::<(), Error>(())
+/// ```
+impl<T> AsPageIter for VVec<T> {
+ type Iter<'a>
+ = VmallocPageIter<'a>
+ where
+ T: 'a;
+
+ fn page_iter(&mut self) -> Self::Iter<'_> {
+ let ptr = self.ptr.cast();
+ let size = self.layout.size();
+
+ // SAFETY:
+ // - `ptr` is a valid pointer to the beginning of a `Vmalloc` allocation.
+ // - `ptr` is guaranteed to be valid for the lifetime of `'a`.
+ // - `size` is the size of the `Vmalloc` allocation `ptr` points to.
+ unsafe { VmallocPageIter::new(ptr, size) }
+ }
+}
+
/// An [`Iterator`] implementation for [`Vec`] that moves elements out of a vector.
///
/// This structure is created by the [`Vec::into_iter`] method on [`Vec`] (provided by the
@@ -1294,7 +1344,7 @@ impl<'vec, T> Drop for DrainAll<'vec, T> {
}
}
-#[macros::kunit_tests(rust_kvec_kunit)]
+#[macros::kunit_tests(rust_kvec)]
mod tests {
use super::*;
use crate::prelude::*;
diff --git a/rust/kernel/alloc/kvec/errors.rs b/rust/kernel/alloc/kvec/errors.rs
index 348b8d27e102..21a920a4b09b 100644
--- a/rust/kernel/alloc/kvec/errors.rs
+++ b/rust/kernel/alloc/kvec/errors.rs
@@ -2,7 +2,7 @@
//! Errors for the [`Vec`] type.
-use core::fmt::{self, Debug, Formatter};
+use kernel::fmt::{self, Debug, Formatter};
use kernel::prelude::*;
/// Error type for [`Vec::push_within_capacity`].
diff --git a/rust/kernel/alloc/layout.rs b/rust/kernel/alloc/layout.rs
index 93ed514f7cc7..9f8be72feb7a 100644
--- a/rust/kernel/alloc/layout.rs
+++ b/rust/kernel/alloc/layout.rs
@@ -80,7 +80,7 @@ impl<T> ArrayLayout<T> {
/// # Safety
///
/// `len` must be a value, for which `len * size_of::<T>() <= isize::MAX` is true.
- pub unsafe fn new_unchecked(len: usize) -> Self {
+ pub const unsafe fn new_unchecked(len: usize) -> Self {
// INVARIANT: By the safety requirements of this function
// `len * size_of::<T>() <= isize::MAX`.
Self {
@@ -98,6 +98,11 @@ impl<T> ArrayLayout<T> {
pub const fn is_empty(&self) -> bool {
self.len == 0
}
+
+ /// Returns the size of the [`ArrayLayout`] in bytes.
+ pub const fn size(&self) -> usize {
+ self.len() * core::mem::size_of::<T>()
+ }
}
impl<T> From<ArrayLayout<T>> for Layout {