summaryrefslogtreecommitdiff
path: root/virt
diff options
context:
space:
mode:
authorMichael Roth <michael.roth@amd.com>2026-01-08 15:46:18 -0600
committerSean Christopherson <seanjc@google.com>2026-01-15 12:30:48 -0800
commit6538b6221cc2feda415ca1946e66a5ef02dc6a0a (patch)
tree8c366f716bc59f3c2d0e32480217150826e8aaf8 /virt
parent60b590de8b30dad8b11e9e4fba0df2eae81afb98 (diff)
KVM: guest_memfd: Remove partial hugepage handling from kvm_gmem_populate()
kvm_gmem_populate(), and the associated post-populate callbacks, have some limited support for dealing with guests backed by hugepages by passing the order information along to each post-populate callback and iterating through the pages passed to kvm_gmem_populate() in hugepage-chunks. However, guest_memfd doesn't yet support hugepages, and in most cases additional changes in the kvm_gmem_populate() path would also be needed to actually allow for this functionality. This makes the existing code unnecessarily complex, and makes changes difficult to work through upstream due to theoretical impacts on hugepage support that can't be considered properly without an actual hugepage implementation to reference. So for now, remove what's there so changes for things like in-place conversion can be implemented/reviewed more efficiently. Suggested-by: Vishal Annapurve <vannapurve@google.com> Co-developed-by: Vishal Annapurve <vannapurve@google.com> Signed-off-by: Vishal Annapurve <vannapurve@google.com> Tested-by: Vishal Annapurve <vannapurve@google.com> Tested-by: Kai Huang <kai.huang@intel.com> Signed-off-by: Michael Roth <michael.roth@amd.com> Tested-by: Yan Zhao <yan.y.zhao@intel.com> Reviewed-by: Yan Zhao <yan.y.zhao@intel.com> Link: https://patch.msgid.link/20260108214622.1084057-3-michael.roth@amd.com [sean: check for !IS_ERR() before checking folio_order()] Signed-off-by: Sean Christopherson <seanjc@google.com>
Diffstat (limited to 'virt')
-rw-r--r--virt/kvm/guest_memfd.c30
1 files changed, 17 insertions, 13 deletions
diff --git a/virt/kvm/guest_memfd.c b/virt/kvm/guest_memfd.c
index fdaea3422c30..24eb33c7948d 100644
--- a/virt/kvm/guest_memfd.c
+++ b/virt/kvm/guest_memfd.c
@@ -151,6 +151,15 @@ static struct folio *kvm_gmem_get_folio(struct inode *inode, pgoff_t index)
mapping_gfp_mask(inode->i_mapping), policy);
mpol_cond_put(policy);
+ /*
+ * External interfaces like kvm_gmem_get_pfn() support dealing
+ * with hugepages to a degree, but internally, guest_memfd currently
+ * assumes that all folios are order-0 and handling would need
+ * to be updated for anything otherwise (e.g. page-clearing
+ * operations).
+ */
+ WARN_ON_ONCE(!IS_ERR(folio) && folio_order(folio));
+
return folio;
}
@@ -829,7 +838,7 @@ long kvm_gmem_populate(struct kvm *kvm, gfn_t start_gfn, void __user *src, long
struct kvm_memory_slot *slot;
void __user *p;
- int ret = 0, max_order;
+ int ret = 0;
long i;
lockdep_assert_held(&kvm->slots_lock);
@@ -848,7 +857,7 @@ long kvm_gmem_populate(struct kvm *kvm, gfn_t start_gfn, void __user *src, long
filemap_invalidate_lock(file->f_mapping);
npages = min_t(ulong, slot->npages - (start_gfn - slot->base_gfn), npages);
- for (i = 0; i < npages; i += (1 << max_order)) {
+ for (i = 0; i < npages; i++) {
struct folio *folio;
gfn_t gfn = start_gfn + i;
pgoff_t index = kvm_gmem_get_index(slot, gfn);
@@ -860,7 +869,7 @@ long kvm_gmem_populate(struct kvm *kvm, gfn_t start_gfn, void __user *src, long
break;
}
- folio = __kvm_gmem_get_pfn(file, slot, index, &pfn, &is_prepared, &max_order);
+ folio = __kvm_gmem_get_pfn(file, slot, index, &pfn, &is_prepared, NULL);
if (IS_ERR(folio)) {
ret = PTR_ERR(folio);
break;
@@ -874,20 +883,15 @@ long kvm_gmem_populate(struct kvm *kvm, gfn_t start_gfn, void __user *src, long
}
folio_unlock(folio);
- WARN_ON(!IS_ALIGNED(gfn, 1 << max_order) ||
- (npages - i) < (1 << max_order));
ret = -EINVAL;
- while (!kvm_range_has_memory_attributes(kvm, gfn, gfn + (1 << max_order),
- KVM_MEMORY_ATTRIBUTE_PRIVATE,
- KVM_MEMORY_ATTRIBUTE_PRIVATE)) {
- if (!max_order)
- goto put_folio_and_exit;
- max_order--;
- }
+ if (!kvm_range_has_memory_attributes(kvm, gfn, gfn + 1,
+ KVM_MEMORY_ATTRIBUTE_PRIVATE,
+ KVM_MEMORY_ATTRIBUTE_PRIVATE))
+ goto put_folio_and_exit;
p = src ? src + i * PAGE_SIZE : NULL;
- ret = post_populate(kvm, gfn, pfn, p, max_order, opaque);
+ ret = post_populate(kvm, gfn, pfn, p, opaque);
if (!ret)
kvm_gmem_mark_prepared(folio);