1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
|
// SPDX-License-Identifier: GPL-2.0
//! Direct memory access (DMA).
//!
//! C header: [`include/linux/dma-mapping.h`](srctree/include/linux/dma-mapping.h)
use crate::{
bindings, build_assert, device,
device::{Bound, Core},
error::{to_result, Result},
prelude::*,
transmute::{AsBytes, FromBytes},
types::ARef,
};
/// Trait to be implemented by DMA capable bus devices.
///
/// The [`dma::Device`](Device) trait should be implemented by bus specific device representations,
/// where the underlying bus is DMA capable, such as [`pci::Device`](::kernel::pci::Device) or
/// [`platform::Device`](::kernel::platform::Device).
pub trait Device: AsRef<device::Device<Core>> {
/// Set up the device's DMA streaming addressing capabilities.
///
/// This method is usually called once from `probe()` as soon as the device capabilities are
/// known.
///
/// # Safety
///
/// This method must not be called concurrently with any DMA allocation or mapping primitives,
/// such as [`CoherentAllocation::alloc_attrs`].
unsafe fn dma_set_mask(&self, mask: DmaMask) -> Result {
// SAFETY:
// - By the type invariant of `device::Device`, `self.as_ref().as_raw()` is valid.
// - The safety requirement of this function guarantees that there are no concurrent calls
// to DMA allocation and mapping primitives using this mask.
to_result(unsafe { bindings::dma_set_mask(self.as_ref().as_raw(), mask.value()) })
}
/// Set up the device's DMA coherent addressing capabilities.
///
/// This method is usually called once from `probe()` as soon as the device capabilities are
/// known.
///
/// # Safety
///
/// This method must not be called concurrently with any DMA allocation or mapping primitives,
/// such as [`CoherentAllocation::alloc_attrs`].
unsafe fn dma_set_coherent_mask(&self, mask: DmaMask) -> Result {
// SAFETY:
// - By the type invariant of `device::Device`, `self.as_ref().as_raw()` is valid.
// - The safety requirement of this function guarantees that there are no concurrent calls
// to DMA allocation and mapping primitives using this mask.
to_result(unsafe { bindings::dma_set_coherent_mask(self.as_ref().as_raw(), mask.value()) })
}
/// Set up the device's DMA addressing capabilities.
///
/// This is a combination of [`Device::dma_set_mask`] and [`Device::dma_set_coherent_mask`].
///
/// This method is usually called once from `probe()` as soon as the device capabilities are
/// known.
///
/// # Safety
///
/// This method must not be called concurrently with any DMA allocation or mapping primitives,
/// such as [`CoherentAllocation::alloc_attrs`].
unsafe fn dma_set_mask_and_coherent(&self, mask: DmaMask) -> Result {
// SAFETY:
// - By the type invariant of `device::Device`, `self.as_ref().as_raw()` is valid.
// - The safety requirement of this function guarantees that there are no concurrent calls
// to DMA allocation and mapping primitives using this mask.
to_result(unsafe {
bindings::dma_set_mask_and_coherent(self.as_ref().as_raw(), mask.value())
})
}
}
/// A DMA mask that holds a bitmask with the lowest `n` bits set.
///
/// Use [`DmaMask::new`] or [`DmaMask::try_new`] to construct a value. Values
/// are guaranteed to never exceed the bit width of `u64`.
///
/// This is the Rust equivalent of the C macro `DMA_BIT_MASK()`.
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub struct DmaMask(u64);
impl DmaMask {
/// Constructs a `DmaMask` with the lowest `n` bits set to `1`.
///
/// For `n <= 64`, sets exactly the lowest `n` bits.
/// For `n > 64`, results in a build error.
///
/// # Examples
///
/// ```
/// use kernel::dma::DmaMask;
///
/// let mask0 = DmaMask::new::<0>();
/// assert_eq!(mask0.value(), 0);
///
/// let mask1 = DmaMask::new::<1>();
/// assert_eq!(mask1.value(), 0b1);
///
/// let mask64 = DmaMask::new::<64>();
/// assert_eq!(mask64.value(), u64::MAX);
///
/// // Build failure.
/// // let mask_overflow = DmaMask::new::<100>();
/// ```
#[inline]
pub const fn new<const N: u32>() -> Self {
let Ok(mask) = Self::try_new(N) else {
build_error!("Invalid DMA Mask.");
};
mask
}
/// Constructs a `DmaMask` with the lowest `n` bits set to `1`.
///
/// For `n <= 64`, sets exactly the lowest `n` bits.
/// For `n > 64`, returns [`EINVAL`].
///
/// # Examples
///
/// ```
/// use kernel::dma::DmaMask;
///
/// let mask0 = DmaMask::try_new(0)?;
/// assert_eq!(mask0.value(), 0);
///
/// let mask1 = DmaMask::try_new(1)?;
/// assert_eq!(mask1.value(), 0b1);
///
/// let mask64 = DmaMask::try_new(64)?;
/// assert_eq!(mask64.value(), u64::MAX);
///
/// let mask_overflow = DmaMask::try_new(100);
/// assert!(mask_overflow.is_err());
/// # Ok::<(), Error>(())
/// ```
#[inline]
pub const fn try_new(n: u32) -> Result<Self> {
Ok(Self(match n {
0 => 0,
1..=64 => u64::MAX >> (64 - n),
_ => return Err(EINVAL),
}))
}
/// Returns the underlying `u64` bitmask value.
#[inline]
pub const fn value(&self) -> u64 {
self.0
}
}
/// Possible attributes associated with a DMA mapping.
///
/// They can be combined with the operators `|`, `&`, and `!`.
///
/// Values can be used from the [`attrs`] module.
///
/// # Examples
///
/// ```
/// # use kernel::device::{Bound, Device};
/// use kernel::dma::{attrs::*, CoherentAllocation};
///
/// # fn test(dev: &Device<Bound>) -> Result {
/// let attribs = DMA_ATTR_FORCE_CONTIGUOUS | DMA_ATTR_NO_WARN;
/// let c: CoherentAllocation<u64> =
/// CoherentAllocation::alloc_attrs(dev, 4, GFP_KERNEL, attribs)?;
/// # Ok::<(), Error>(()) }
/// ```
#[derive(Clone, Copy, PartialEq)]
#[repr(transparent)]
pub struct Attrs(u32);
impl Attrs {
/// Get the raw representation of this attribute.
pub(crate) fn as_raw(self) -> crate::ffi::c_ulong {
self.0 as crate::ffi::c_ulong
}
/// Check whether `flags` is contained in `self`.
pub fn contains(self, flags: Attrs) -> bool {
(self & flags) == flags
}
}
impl core::ops::BitOr for Attrs {
type Output = Self;
fn bitor(self, rhs: Self) -> Self::Output {
Self(self.0 | rhs.0)
}
}
impl core::ops::BitAnd for Attrs {
type Output = Self;
fn bitand(self, rhs: Self) -> Self::Output {
Self(self.0 & rhs.0)
}
}
impl core::ops::Not for Attrs {
type Output = Self;
fn not(self) -> Self::Output {
Self(!self.0)
}
}
/// DMA mapping attributes.
pub mod attrs {
use super::Attrs;
/// Specifies that reads and writes to the mapping may be weakly ordered, that is that reads
/// and writes may pass each other.
pub const DMA_ATTR_WEAK_ORDERING: Attrs = Attrs(bindings::DMA_ATTR_WEAK_ORDERING);
/// Specifies that writes to the mapping may be buffered to improve performance.
pub const DMA_ATTR_WRITE_COMBINE: Attrs = Attrs(bindings::DMA_ATTR_WRITE_COMBINE);
/// Lets the platform to avoid creating a kernel virtual mapping for the allocated buffer.
pub const DMA_ATTR_NO_KERNEL_MAPPING: Attrs = Attrs(bindings::DMA_ATTR_NO_KERNEL_MAPPING);
/// Allows platform code to skip synchronization of the CPU cache for the given buffer assuming
/// that it has been already transferred to 'device' domain.
pub const DMA_ATTR_SKIP_CPU_SYNC: Attrs = Attrs(bindings::DMA_ATTR_SKIP_CPU_SYNC);
/// Forces contiguous allocation of the buffer in physical memory.
pub const DMA_ATTR_FORCE_CONTIGUOUS: Attrs = Attrs(bindings::DMA_ATTR_FORCE_CONTIGUOUS);
/// Hints DMA-mapping subsystem that it's probably not worth the time to try
/// to allocate memory to in a way that gives better TLB efficiency.
pub const DMA_ATTR_ALLOC_SINGLE_PAGES: Attrs = Attrs(bindings::DMA_ATTR_ALLOC_SINGLE_PAGES);
/// This tells the DMA-mapping subsystem to suppress allocation failure reports (similarly to
/// `__GFP_NOWARN`).
pub const DMA_ATTR_NO_WARN: Attrs = Attrs(bindings::DMA_ATTR_NO_WARN);
/// Indicates that the buffer is fully accessible at an elevated privilege level (and
/// ideally inaccessible or at least read-only at lesser-privileged levels).
pub const DMA_ATTR_PRIVILEGED: Attrs = Attrs(bindings::DMA_ATTR_PRIVILEGED);
}
/// An abstraction of the `dma_alloc_coherent` API.
///
/// This is an abstraction around the `dma_alloc_coherent` API which is used to allocate and map
/// large coherent DMA regions.
///
/// A [`CoherentAllocation`] instance contains a pointer to the allocated region (in the
/// processor's virtual address space) and the device address which can be given to the device
/// as the DMA address base of the region. The region is released once [`CoherentAllocation`]
/// is dropped.
///
/// # Invariants
///
/// - For the lifetime of an instance of [`CoherentAllocation`], the `cpu_addr` is a valid pointer
/// to an allocated region of coherent memory and `dma_handle` is the DMA address base of the
/// region.
/// - The size in bytes of the allocation is equal to `size_of::<T> * count`.
/// - `size_of::<T> * count` fits into a `usize`.
// TODO
//
// DMA allocations potentially carry device resources (e.g.IOMMU mappings), hence for soundness
// reasons DMA allocation would need to be embedded in a `Devres` container, in order to ensure
// that device resources can never survive device unbind.
//
// However, it is neither desirable nor necessary to protect the allocated memory of the DMA
// allocation from surviving device unbind; it would require RCU read side critical sections to
// access the memory, which may require subsequent unnecessary copies.
//
// Hence, find a way to revoke the device resources of a `CoherentAllocation`, but not the
// entire `CoherentAllocation` including the allocated memory itself.
pub struct CoherentAllocation<T: AsBytes + FromBytes> {
dev: ARef<device::Device>,
dma_handle: bindings::dma_addr_t,
count: usize,
cpu_addr: *mut T,
dma_attrs: Attrs,
}
impl<T: AsBytes + FromBytes> CoherentAllocation<T> {
/// Allocates a region of `size_of::<T> * count` of coherent memory.
///
/// # Examples
///
/// ```
/// # use kernel::device::{Bound, Device};
/// use kernel::dma::{attrs::*, CoherentAllocation};
///
/// # fn test(dev: &Device<Bound>) -> Result {
/// let c: CoherentAllocation<u64> =
/// CoherentAllocation::alloc_attrs(dev, 4, GFP_KERNEL, DMA_ATTR_NO_WARN)?;
/// # Ok::<(), Error>(()) }
/// ```
pub fn alloc_attrs(
dev: &device::Device<Bound>,
count: usize,
gfp_flags: kernel::alloc::Flags,
dma_attrs: Attrs,
) -> Result<CoherentAllocation<T>> {
build_assert!(
core::mem::size_of::<T>() > 0,
"It doesn't make sense for the allocated type to be a ZST"
);
let size = count
.checked_mul(core::mem::size_of::<T>())
.ok_or(EOVERFLOW)?;
let mut dma_handle = 0;
// SAFETY: Device pointer is guaranteed as valid by the type invariant on `Device`.
let ret = unsafe {
bindings::dma_alloc_attrs(
dev.as_raw(),
size,
&mut dma_handle,
gfp_flags.as_raw(),
dma_attrs.as_raw(),
)
};
if ret.is_null() {
return Err(ENOMEM);
}
// INVARIANT:
// - We just successfully allocated a coherent region which is accessible for
// `count` elements, hence the cpu address is valid. We also hold a refcounted reference
// to the device.
// - The allocated `size` is equal to `size_of::<T> * count`.
// - The allocated `size` fits into a `usize`.
Ok(Self {
dev: dev.into(),
dma_handle,
count,
cpu_addr: ret.cast::<T>(),
dma_attrs,
})
}
/// Performs the same functionality as [`CoherentAllocation::alloc_attrs`], except the
/// `dma_attrs` is 0 by default.
pub fn alloc_coherent(
dev: &device::Device<Bound>,
count: usize,
gfp_flags: kernel::alloc::Flags,
) -> Result<CoherentAllocation<T>> {
CoherentAllocation::alloc_attrs(dev, count, gfp_flags, Attrs(0))
}
/// Returns the number of elements `T` in this allocation.
///
/// Note that this is not the size of the allocation in bytes, which is provided by
/// [`Self::size`].
pub fn count(&self) -> usize {
self.count
}
/// Returns the size in bytes of this allocation.
pub fn size(&self) -> usize {
// INVARIANT: The type invariant of `Self` guarantees that `size_of::<T> * count` fits into
// a `usize`.
self.count * core::mem::size_of::<T>()
}
/// Returns the base address to the allocated region in the CPU's virtual address space.
pub fn start_ptr(&self) -> *const T {
self.cpu_addr
}
/// Returns the base address to the allocated region in the CPU's virtual address space as
/// a mutable pointer.
pub fn start_ptr_mut(&mut self) -> *mut T {
self.cpu_addr
}
/// Returns a DMA handle which may be given to the device as the DMA address base of
/// the region.
pub fn dma_handle(&self) -> bindings::dma_addr_t {
self.dma_handle
}
/// Returns a DMA handle starting at `offset` (in units of `T`) which may be given to the
/// device as the DMA address base of the region.
///
/// Returns `EINVAL` if `offset` is not within the bounds of the allocation.
pub fn dma_handle_with_offset(&self, offset: usize) -> Result<bindings::dma_addr_t> {
if offset >= self.count {
Err(EINVAL)
} else {
// INVARIANT: The type invariant of `Self` guarantees that `size_of::<T> * count` fits
// into a `usize`, and `offset` is inferior to `count`.
Ok(self.dma_handle + (offset * core::mem::size_of::<T>()) as bindings::dma_addr_t)
}
}
/// Common helper to validate a range applied from the allocated region in the CPU's virtual
/// address space.
fn validate_range(&self, offset: usize, count: usize) -> Result {
if offset.checked_add(count).ok_or(EOVERFLOW)? > self.count {
return Err(EINVAL);
}
Ok(())
}
/// Returns the data from the region starting from `offset` as a slice.
/// `offset` and `count` are in units of `T`, not the number of bytes.
///
/// For ringbuffer type of r/w access or use-cases where the pointer to the live data is needed,
/// [`CoherentAllocation::start_ptr`] or [`CoherentAllocation::start_ptr_mut`] could be used
/// instead.
///
/// # Safety
///
/// * Callers must ensure that the device does not read/write to/from memory while the returned
/// slice is live.
/// * Callers must ensure that this call does not race with a write to the same region while
/// the returned slice is live.
pub unsafe fn as_slice(&self, offset: usize, count: usize) -> Result<&[T]> {
self.validate_range(offset, count)?;
// SAFETY:
// - The pointer is valid due to type invariant on `CoherentAllocation`,
// we've just checked that the range and index is within bounds. The immutability of the
// data is also guaranteed by the safety requirements of the function.
// - `offset + count` can't overflow since it is smaller than `self.count` and we've checked
// that `self.count` won't overflow early in the constructor.
Ok(unsafe { core::slice::from_raw_parts(self.cpu_addr.add(offset), count) })
}
/// Performs the same functionality as [`CoherentAllocation::as_slice`], except that a mutable
/// slice is returned.
///
/// # Safety
///
/// * Callers must ensure that the device does not read/write to/from memory while the returned
/// slice is live.
/// * Callers must ensure that this call does not race with a read or write to the same region
/// while the returned slice is live.
pub unsafe fn as_slice_mut(&mut self, offset: usize, count: usize) -> Result<&mut [T]> {
self.validate_range(offset, count)?;
// SAFETY:
// - The pointer is valid due to type invariant on `CoherentAllocation`,
// we've just checked that the range and index is within bounds. The immutability of the
// data is also guaranteed by the safety requirements of the function.
// - `offset + count` can't overflow since it is smaller than `self.count` and we've checked
// that `self.count` won't overflow early in the constructor.
Ok(unsafe { core::slice::from_raw_parts_mut(self.cpu_addr.add(offset), count) })
}
/// Writes data to the region starting from `offset`. `offset` is in units of `T`, not the
/// number of bytes.
///
/// # Safety
///
/// * Callers must ensure that the device does not read/write to/from memory while the returned
/// slice is live.
/// * Callers must ensure that this call does not race with a read or write to the same region
/// that overlaps with this write.
///
/// # Examples
///
/// ```
/// # fn test(alloc: &mut kernel::dma::CoherentAllocation<u8>) -> Result {
/// let somedata: [u8; 4] = [0xf; 4];
/// let buf: &[u8] = &somedata;
/// // SAFETY: There is no concurrent HW operation on the device and no other R/W access to the
/// // region.
/// unsafe { alloc.write(buf, 0)?; }
/// # Ok::<(), Error>(()) }
/// ```
pub unsafe fn write(&mut self, src: &[T], offset: usize) -> Result {
self.validate_range(offset, src.len())?;
// SAFETY:
// - The pointer is valid due to type invariant on `CoherentAllocation`
// and we've just checked that the range and index is within bounds.
// - `offset + count` can't overflow since it is smaller than `self.count` and we've checked
// that `self.count` won't overflow early in the constructor.
unsafe {
core::ptr::copy_nonoverlapping(src.as_ptr(), self.cpu_addr.add(offset), src.len())
};
Ok(())
}
/// Returns a pointer to an element from the region with bounds checking. `offset` is in
/// units of `T`, not the number of bytes.
///
/// Public but hidden since it should only be used from [`dma_read`] and [`dma_write`] macros.
#[doc(hidden)]
pub fn item_from_index(&self, offset: usize) -> Result<*mut T> {
if offset >= self.count {
return Err(EINVAL);
}
// SAFETY:
// - The pointer is valid due to type invariant on `CoherentAllocation`
// and we've just checked that the range and index is within bounds.
// - `offset` can't overflow since it is smaller than `self.count` and we've checked
// that `self.count` won't overflow early in the constructor.
Ok(unsafe { self.cpu_addr.add(offset) })
}
/// Reads the value of `field` and ensures that its type is [`FromBytes`].
///
/// # Safety
///
/// This must be called from the [`dma_read`] macro which ensures that the `field` pointer is
/// validated beforehand.
///
/// Public but hidden since it should only be used from [`dma_read`] macro.
#[doc(hidden)]
pub unsafe fn field_read<F: FromBytes>(&self, field: *const F) -> F {
// SAFETY:
// - By the safety requirements field is valid.
// - Using read_volatile() here is not sound as per the usual rules, the usage here is
// a special exception with the following notes in place. When dealing with a potential
// race from a hardware or code outside kernel (e.g. user-space program), we need that
// read on a valid memory is not UB. Currently read_volatile() is used for this, and the
// rationale behind is that it should generate the same code as READ_ONCE() which the
// kernel already relies on to avoid UB on data races. Note that the usage of
// read_volatile() is limited to this particular case, it cannot be used to prevent
// the UB caused by racing between two kernel functions nor do they provide atomicity.
unsafe { field.read_volatile() }
}
/// Writes a value to `field` and ensures that its type is [`AsBytes`].
///
/// # Safety
///
/// This must be called from the [`dma_write`] macro which ensures that the `field` pointer is
/// validated beforehand.
///
/// Public but hidden since it should only be used from [`dma_write`] macro.
#[doc(hidden)]
pub unsafe fn field_write<F: AsBytes>(&self, field: *mut F, val: F) {
// SAFETY:
// - By the safety requirements field is valid.
// - Using write_volatile() here is not sound as per the usual rules, the usage here is
// a special exception with the following notes in place. When dealing with a potential
// race from a hardware or code outside kernel (e.g. user-space program), we need that
// write on a valid memory is not UB. Currently write_volatile() is used for this, and the
// rationale behind is that it should generate the same code as WRITE_ONCE() which the
// kernel already relies on to avoid UB on data races. Note that the usage of
// write_volatile() is limited to this particular case, it cannot be used to prevent
// the UB caused by racing between two kernel functions nor do they provide atomicity.
unsafe { field.write_volatile(val) }
}
}
/// Note that the device configured to do DMA must be halted before this object is dropped.
impl<T: AsBytes + FromBytes> Drop for CoherentAllocation<T> {
fn drop(&mut self) {
let size = self.count * core::mem::size_of::<T>();
// SAFETY: Device pointer is guaranteed as valid by the type invariant on `Device`.
// The cpu address, and the dma handle are valid due to the type invariants on
// `CoherentAllocation`.
unsafe {
bindings::dma_free_attrs(
self.dev.as_raw(),
size,
self.cpu_addr.cast(),
self.dma_handle,
self.dma_attrs.as_raw(),
)
}
}
}
// SAFETY: It is safe to send a `CoherentAllocation` to another thread if `T`
// can be sent to another thread.
unsafe impl<T: AsBytes + FromBytes + Send> Send for CoherentAllocation<T> {}
/// Reads a field of an item from an allocated region of structs.
///
/// # Examples
///
/// ```
/// use kernel::device::Device;
/// use kernel::dma::{attrs::*, CoherentAllocation};
///
/// struct MyStruct { field: u32, }
///
/// // SAFETY: All bit patterns are acceptable values for `MyStruct`.
/// unsafe impl kernel::transmute::FromBytes for MyStruct{};
/// // SAFETY: Instances of `MyStruct` have no uninitialized portions.
/// unsafe impl kernel::transmute::AsBytes for MyStruct{};
///
/// # fn test(alloc: &kernel::dma::CoherentAllocation<MyStruct>) -> Result {
/// let whole = kernel::dma_read!(alloc[2]);
/// let field = kernel::dma_read!(alloc[1].field);
/// # Ok::<(), Error>(()) }
/// ```
#[macro_export]
macro_rules! dma_read {
($dma:expr, $idx: expr, $($field:tt)*) => {{
(|| -> ::core::result::Result<_, $crate::error::Error> {
let item = $crate::dma::CoherentAllocation::item_from_index(&$dma, $idx)?;
// SAFETY: `item_from_index` ensures that `item` is always a valid pointer and can be
// dereferenced. The compiler also further validates the expression on whether `field`
// is a member of `item` when expanded by the macro.
unsafe {
let ptr_field = ::core::ptr::addr_of!((*item) $($field)*);
::core::result::Result::Ok(
$crate::dma::CoherentAllocation::field_read(&$dma, ptr_field)
)
}
})()
}};
($dma:ident [ $idx:expr ] $($field:tt)* ) => {
$crate::dma_read!($dma, $idx, $($field)*)
};
($($dma:ident).* [ $idx:expr ] $($field:tt)* ) => {
$crate::dma_read!($($dma).*, $idx, $($field)*)
};
}
/// Writes to a field of an item from an allocated region of structs.
///
/// # Examples
///
/// ```
/// use kernel::device::Device;
/// use kernel::dma::{attrs::*, CoherentAllocation};
///
/// struct MyStruct { member: u32, }
///
/// // SAFETY: All bit patterns are acceptable values for `MyStruct`.
/// unsafe impl kernel::transmute::FromBytes for MyStruct{};
/// // SAFETY: Instances of `MyStruct` have no uninitialized portions.
/// unsafe impl kernel::transmute::AsBytes for MyStruct{};
///
/// # fn test(alloc: &kernel::dma::CoherentAllocation<MyStruct>) -> Result {
/// kernel::dma_write!(alloc[2].member = 0xf);
/// kernel::dma_write!(alloc[1] = MyStruct { member: 0xf });
/// # Ok::<(), Error>(()) }
/// ```
#[macro_export]
macro_rules! dma_write {
($dma:ident [ $idx:expr ] $($field:tt)*) => {{
$crate::dma_write!($dma, $idx, $($field)*)
}};
($($dma:ident).* [ $idx:expr ] $($field:tt)* ) => {{
$crate::dma_write!($($dma).*, $idx, $($field)*)
}};
($dma:expr, $idx: expr, = $val:expr) => {
(|| -> ::core::result::Result<_, $crate::error::Error> {
let item = $crate::dma::CoherentAllocation::item_from_index(&$dma, $idx)?;
// SAFETY: `item_from_index` ensures that `item` is always a valid item.
unsafe { $crate::dma::CoherentAllocation::field_write(&$dma, item, $val) }
::core::result::Result::Ok(())
})()
};
($dma:expr, $idx: expr, $(.$field:ident)* = $val:expr) => {
(|| -> ::core::result::Result<_, $crate::error::Error> {
let item = $crate::dma::CoherentAllocation::item_from_index(&$dma, $idx)?;
// SAFETY: `item_from_index` ensures that `item` is always a valid pointer and can be
// dereferenced. The compiler also further validates the expression on whether `field`
// is a member of `item` when expanded by the macro.
unsafe {
let ptr_field = ::core::ptr::addr_of_mut!((*item) $(.$field)*);
$crate::dma::CoherentAllocation::field_write(&$dma, ptr_field, $val)
}
::core::result::Result::Ok(())
})()
};
}
|