summaryrefslogtreecommitdiff
path: root/kernel
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2026-02-13 12:02:18 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2026-02-13 12:02:18 -0800
commita353e7260b5951a62dce43630ae9265accd96a4b (patch)
tree17718c7136b02a2f63be0b26cd2ff7fffc279bf2 /kernel
parentcb5573868ea85ddbc74dd9a917acd1e434d21390 (diff)
parentebcff9dacaf2c1418f8bc927388186d7d3674603 (diff)
Merge tag 'for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mst/vhost
Pull virtio updates from Michael Tsirkin: - in-order support in virtio core - multiple address space support in vduse - fixes, cleanups all over the place, notably dma alignment fixes for non-cache-coherent systems * tag 'for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mst/vhost: (59 commits) vduse: avoid adding implicit padding vhost: fix caching attributes of MMIO regions by setting them explicitly vdpa/mlx5: update MAC address handling in mlx5_vdpa_set_attr() vdpa/mlx5: reuse common function for MAC address updates vdpa/mlx5: update mlx_features with driver state check crypto: virtio: Replace package id with numa node id crypto: virtio: Remove duplicated virtqueue_kick in virtio_crypto_skcipher_crypt_req crypto: virtio: Add spinlock protection with virtqueue notification Documentation: Add documentation for VDUSE Address Space IDs vduse: bump version number vduse: add vq group asid support vduse: merge tree search logic of IOTLB_GET_FD and IOTLB_GET_INFO ioctls vduse: take out allocations from vduse_dev_alloc_coherent vduse: remove unused vaddr parameter of vduse_domain_free_coherent vduse: refactor vdpa_dev_add for goto err handling vhost: forbid change vq groups ASID if DRIVER_OK is set vdpa: document set_group_asid thread safety vduse: return internal vq group struct as map token vduse: add vq group support vduse: add v1 API definition ...
Diffstat (limited to 'kernel')
-rw-r--r--kernel/dma/debug.c28
1 files changed, 23 insertions, 5 deletions
diff --git a/kernel/dma/debug.c b/kernel/dma/debug.c
index 138ede653de4..43d6a996d7a7 100644
--- a/kernel/dma/debug.c
+++ b/kernel/dma/debug.c
@@ -63,6 +63,7 @@ enum map_err_types {
* @sg_mapped_ents: 'mapped_ents' from dma_map_sg
* @paddr: physical start address of the mapping
* @map_err_type: track whether dma_mapping_error() was checked
+ * @is_cache_clean: driver promises not to write to buffer while mapped
* @stack_len: number of backtrace entries in @stack_entries
* @stack_entries: stack of backtrace history
*/
@@ -76,7 +77,8 @@ struct dma_debug_entry {
int sg_call_ents;
int sg_mapped_ents;
phys_addr_t paddr;
- enum map_err_types map_err_type;
+ enum map_err_types map_err_type;
+ bool is_cache_clean;
#ifdef CONFIG_STACKTRACE
unsigned int stack_len;
unsigned long stack_entries[DMA_DEBUG_STACKTRACE_ENTRIES];
@@ -472,12 +474,15 @@ static int active_cacheline_dec_overlap(phys_addr_t cln)
return active_cacheline_set_overlap(cln, --overlap);
}
-static int active_cacheline_insert(struct dma_debug_entry *entry)
+static int active_cacheline_insert(struct dma_debug_entry *entry,
+ bool *overlap_cache_clean)
{
phys_addr_t cln = to_cacheline_number(entry);
unsigned long flags;
int rc;
+ *overlap_cache_clean = false;
+
/* If the device is not writing memory then we don't have any
* concerns about the cpu consuming stale data. This mitigates
* legitimate usages of overlapping mappings.
@@ -487,8 +492,16 @@ static int active_cacheline_insert(struct dma_debug_entry *entry)
spin_lock_irqsave(&radix_lock, flags);
rc = radix_tree_insert(&dma_active_cacheline, cln, entry);
- if (rc == -EEXIST)
+ if (rc == -EEXIST) {
+ struct dma_debug_entry *existing;
+
active_cacheline_inc_overlap(cln);
+ existing = radix_tree_lookup(&dma_active_cacheline, cln);
+ /* A lookup failure here after we got -EEXIST is unexpected. */
+ WARN_ON(!existing);
+ if (existing)
+ *overlap_cache_clean = existing->is_cache_clean;
+ }
spin_unlock_irqrestore(&radix_lock, flags);
return rc;
@@ -583,19 +596,24 @@ DEFINE_SHOW_ATTRIBUTE(dump);
*/
static void add_dma_entry(struct dma_debug_entry *entry, unsigned long attrs)
{
+ bool overlap_cache_clean;
struct hash_bucket *bucket;
unsigned long flags;
int rc;
+ entry->is_cache_clean = !!(attrs & DMA_ATTR_CPU_CACHE_CLEAN);
+
bucket = get_hash_bucket(entry, &flags);
hash_bucket_add(bucket, entry);
put_hash_bucket(bucket, flags);
- rc = active_cacheline_insert(entry);
+ rc = active_cacheline_insert(entry, &overlap_cache_clean);
if (rc == -ENOMEM) {
pr_err_once("cacheline tracking ENOMEM, dma-debug disabled\n");
global_disable = true;
- } else if (rc == -EEXIST && !(attrs & DMA_ATTR_SKIP_CPU_SYNC) &&
+ } else if (rc == -EEXIST &&
+ !(attrs & DMA_ATTR_SKIP_CPU_SYNC) &&
+ !(entry->is_cache_clean && overlap_cache_clean) &&
!(IS_ENABLED(CONFIG_DMA_BOUNCE_UNALIGNED_KMALLOC) &&
is_swiotlb_active(entry->dev))) {
err_printk(entry->dev, entry,