summaryrefslogtreecommitdiff
path: root/rust/kernel/time.rs
diff options
context:
space:
mode:
Diffstat (limited to 'rust/kernel/time.rs')
-rw-r--r--rust/kernel/time.rs163
1 files changed, 162 insertions, 1 deletions
diff --git a/rust/kernel/time.rs b/rust/kernel/time.rs
index 64c8dcf548d6..6ea98dfcd027 100644
--- a/rust/kernel/time.rs
+++ b/rust/kernel/time.rs
@@ -25,6 +25,7 @@
//! C header: [`include/linux/ktime.h`](srctree/include/linux/ktime.h).
use core::marker::PhantomData;
+use core::ops;
pub mod delay;
pub mod hrtimer;
@@ -200,9 +201,31 @@ impl<C: ClockSource> Instant<C> {
pub(crate) fn as_nanos(&self) -> i64 {
self.inner
}
+
+ /// Create an [`Instant`] from a `ktime_t` without checking if it is non-negative.
+ ///
+ /// # Panics
+ ///
+ /// On debug builds, this function will panic if `ktime` is not in the range from 0 to
+ /// `KTIME_MAX`.
+ ///
+ /// # Safety
+ ///
+ /// The caller promises that `ktime` is in the range from 0 to `KTIME_MAX`.
+ #[inline]
+ pub(crate) unsafe fn from_ktime(ktime: bindings::ktime_t) -> Self {
+ debug_assert!(ktime >= 0);
+
+ // INVARIANT: Our safety contract ensures that `ktime` is in the range from 0 to
+ // `KTIME_MAX`.
+ Self {
+ inner: ktime,
+ _c: PhantomData,
+ }
+ }
}
-impl<C: ClockSource> core::ops::Sub for Instant<C> {
+impl<C: ClockSource> ops::Sub for Instant<C> {
type Output = Delta;
// By the type invariant, it never overflows.
@@ -214,6 +237,46 @@ impl<C: ClockSource> core::ops::Sub for Instant<C> {
}
}
+impl<T: ClockSource> ops::Add<Delta> for Instant<T> {
+ type Output = Self;
+
+ #[inline]
+ fn add(self, rhs: Delta) -> Self::Output {
+ // INVARIANT: With arithmetic over/underflow checks enabled, this will panic if we overflow
+ // (e.g. go above `KTIME_MAX`)
+ let res = self.inner + rhs.nanos;
+
+ // INVARIANT: With overflow checks enabled, we verify here that the value is >= 0
+ #[cfg(CONFIG_RUST_OVERFLOW_CHECKS)]
+ assert!(res >= 0);
+
+ Self {
+ inner: res,
+ _c: PhantomData,
+ }
+ }
+}
+
+impl<T: ClockSource> ops::Sub<Delta> for Instant<T> {
+ type Output = Self;
+
+ #[inline]
+ fn sub(self, rhs: Delta) -> Self::Output {
+ // INVARIANT: With arithmetic over/underflow checks enabled, this will panic if we overflow
+ // (e.g. go above `KTIME_MAX`)
+ let res = self.inner - rhs.nanos;
+
+ // INVARIANT: With overflow checks enabled, we verify here that the value is >= 0
+ #[cfg(CONFIG_RUST_OVERFLOW_CHECKS)]
+ assert!(res >= 0);
+
+ Self {
+ inner: res,
+ _c: PhantomData,
+ }
+ }
+}
+
/// A span of time.
///
/// This struct represents a span of time, with its value stored as nanoseconds.
@@ -224,6 +287,78 @@ pub struct Delta {
nanos: i64,
}
+impl ops::Add for Delta {
+ type Output = Self;
+
+ #[inline]
+ fn add(self, rhs: Self) -> Self {
+ Self {
+ nanos: self.nanos + rhs.nanos,
+ }
+ }
+}
+
+impl ops::AddAssign for Delta {
+ #[inline]
+ fn add_assign(&mut self, rhs: Self) {
+ self.nanos += rhs.nanos;
+ }
+}
+
+impl ops::Sub for Delta {
+ type Output = Self;
+
+ #[inline]
+ fn sub(self, rhs: Self) -> Self::Output {
+ Self {
+ nanos: self.nanos - rhs.nanos,
+ }
+ }
+}
+
+impl ops::SubAssign for Delta {
+ #[inline]
+ fn sub_assign(&mut self, rhs: Self) {
+ self.nanos -= rhs.nanos;
+ }
+}
+
+impl ops::Mul<i64> for Delta {
+ type Output = Self;
+
+ #[inline]
+ fn mul(self, rhs: i64) -> Self::Output {
+ Self {
+ nanos: self.nanos * rhs,
+ }
+ }
+}
+
+impl ops::MulAssign<i64> for Delta {
+ #[inline]
+ fn mul_assign(&mut self, rhs: i64) {
+ self.nanos *= rhs;
+ }
+}
+
+impl ops::Div for Delta {
+ type Output = i64;
+
+ #[inline]
+ fn div(self, rhs: Self) -> Self::Output {
+ #[cfg(CONFIG_64BIT)]
+ {
+ self.nanos / rhs.nanos
+ }
+
+ #[cfg(not(CONFIG_64BIT))]
+ {
+ // SAFETY: This function is always safe to call regardless of the input values
+ unsafe { bindings::div64_s64(self.nanos, rhs.nanos) }
+ }
+ }
+}
+
impl Delta {
/// A span of time equal to zero.
pub const ZERO: Self = Self { nanos: 0 };
@@ -312,4 +447,30 @@ impl Delta {
bindings::ktime_to_ms(self.as_nanos())
}
}
+
+ /// Return `self % dividend` where `dividend` is in nanoseconds.
+ ///
+ /// The kernel doesn't have any emulation for `s64 % s64` on 32 bit platforms, so this is
+ /// limited to 32 bit dividends.
+ #[inline]
+ pub fn rem_nanos(self, dividend: i32) -> Self {
+ #[cfg(CONFIG_64BIT)]
+ {
+ Self {
+ nanos: self.as_nanos() % i64::from(dividend),
+ }
+ }
+
+ #[cfg(not(CONFIG_64BIT))]
+ {
+ let mut rem = 0;
+
+ // SAFETY: `rem` is in the stack, so we can always provide a valid pointer to it.
+ unsafe { bindings::div_s64_rem(self.as_nanos(), dividend, &mut rem) };
+
+ Self {
+ nanos: i64::from(rem),
+ }
+ }
+ }
}