summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/xe/xe_svm.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/xe/xe_svm.c')
-rw-r--r--drivers/gpu/drm/xe/xe_svm.c723
1 files changed, 561 insertions, 162 deletions
diff --git a/drivers/gpu/drm/xe/xe_svm.c b/drivers/gpu/drm/xe/xe_svm.c
index a7ff5975873f..7f2f1f041f1d 100644
--- a/drivers/gpu/drm/xe/xe_svm.c
+++ b/drivers/gpu/drm/xe/xe_svm.c
@@ -6,8 +6,8 @@
#include <drm/drm_drv.h>
#include "xe_bo.h"
+#include "xe_exec_queue_types.h"
#include "xe_gt_stats.h"
-#include "xe_gt_tlb_invalidation.h"
#include "xe_migrate.h"
#include "xe_module.h"
#include "xe_pm.h"
@@ -17,6 +17,7 @@
#include "xe_ttm_vram_mgr.h"
#include "xe_vm.h"
#include "xe_vm_types.h"
+#include "xe_vram_types.h"
static bool xe_svm_range_in_vram(struct xe_svm_range *range)
{
@@ -25,9 +26,9 @@ static bool xe_svm_range_in_vram(struct xe_svm_range *range)
* memory.
*/
- struct drm_gpusvm_range_flags flags = {
+ struct drm_gpusvm_pages_flags flags = {
/* Pairs with WRITE_ONCE in drm_gpusvm.c */
- .__flags = READ_ONCE(range->base.flags.__flags),
+ .__flags = READ_ONCE(range->base.pages.flags.__flags),
};
return flags.has_devmem_pages;
@@ -49,15 +50,15 @@ static struct xe_vm *range_to_vm(struct drm_gpusvm_range *r)
return gpusvm_to_vm(r->gpusvm);
}
-#define range_debug(r__, operaton__) \
+#define range_debug(r__, operation__) \
vm_dbg(&range_to_vm(&(r__)->base)->xe->drm, \
"%s: asid=%u, gpusvm=%p, vram=%d,%d, seqno=%lu, " \
"start=0x%014lx, end=0x%014lx, size=%lu", \
- (operaton__), range_to_vm(&(r__)->base)->usm.asid, \
+ (operation__), range_to_vm(&(r__)->base)->usm.asid, \
(r__)->base.gpusvm, \
xe_svm_range_in_vram((r__)) ? 1 : 0, \
xe_svm_range_has_vram_binding((r__)) ? 1 : 0, \
- (r__)->base.notifier_seq, \
+ (r__)->base.pages.notifier_seq, \
xe_svm_range_start((r__)), xe_svm_range_end((r__)), \
xe_svm_range_size((r__)))
@@ -112,6 +113,11 @@ xe_svm_garbage_collector_add_range(struct xe_vm *vm, struct xe_svm_range *range,
&vm->svm.garbage_collector.work);
}
+static void xe_svm_tlb_inval_count_stats_incr(struct xe_gt *gt)
+{
+ xe_gt_stats_incr(gt, XE_GT_STATS_ID_SVM_TLB_INVAL_COUNT, 1);
+}
+
static u8
xe_svm_range_notifier_event_begin(struct xe_vm *vm, struct drm_gpusvm_range *r,
const struct mmu_notifier_range *mmu_range,
@@ -128,7 +134,7 @@ xe_svm_range_notifier_event_begin(struct xe_vm *vm, struct drm_gpusvm_range *r,
range_debug(range, "NOTIFIER");
/* Skip if already unmapped or if no binding exist */
- if (range->base.flags.unmapped || !range->tile_present)
+ if (range->base.pages.flags.unmapped || !range->tile_present)
return 0;
range_debug(range, "NOTIFIER - EXECUTE");
@@ -144,13 +150,19 @@ xe_svm_range_notifier_event_begin(struct xe_vm *vm, struct drm_gpusvm_range *r,
*/
for_each_tile(tile, xe, id)
if (xe_pt_zap_ptes_range(tile, vm, range)) {
- tile_mask |= BIT(id);
/*
* WRITE_ONCE pairs with READ_ONCE in
* xe_vm_has_valid_gpu_mapping()
*/
WRITE_ONCE(range->tile_invalidated,
range->tile_invalidated | BIT(id));
+
+ if (!(tile_mask & BIT(id))) {
+ xe_svm_tlb_inval_count_stats_incr(tile->primary_gt);
+ if (tile->media_gt)
+ xe_svm_tlb_inval_count_stats_incr(tile->media_gt);
+ tile_mask |= BIT(id);
+ }
}
return tile_mask;
@@ -170,6 +182,24 @@ xe_svm_range_notifier_event_end(struct xe_vm *vm, struct drm_gpusvm_range *r,
mmu_range);
}
+static s64 xe_svm_stats_ktime_us_delta(ktime_t start)
+{
+ return IS_ENABLED(CONFIG_DEBUG_FS) ?
+ ktime_us_delta(ktime_get(), start) : 0;
+}
+
+static void xe_svm_tlb_inval_us_stats_incr(struct xe_gt *gt, ktime_t start)
+{
+ s64 us_delta = xe_svm_stats_ktime_us_delta(start);
+
+ xe_gt_stats_incr(gt, XE_GT_STATS_ID_SVM_TLB_INVAL_US, us_delta);
+}
+
+static ktime_t xe_svm_stats_ktime_get(void)
+{
+ return IS_ENABLED(CONFIG_DEBUG_FS) ? ktime_get() : 0;
+}
+
static void xe_svm_invalidate(struct drm_gpusvm *gpusvm,
struct drm_gpusvm_notifier *notifier,
const struct mmu_notifier_range *mmu_range)
@@ -177,8 +207,10 @@ static void xe_svm_invalidate(struct drm_gpusvm *gpusvm,
struct xe_vm *vm = gpusvm_to_vm(gpusvm);
struct xe_device *xe = vm->xe;
struct drm_gpusvm_range *r, *first;
+ struct xe_tile *tile;
+ ktime_t start = xe_svm_stats_ktime_get();
u64 adj_start = mmu_range->start, adj_end = mmu_range->end;
- u8 tile_mask = 0;
+ u8 tile_mask = 0, id;
long err;
xe_svm_assert_in_notifier(vm);
@@ -224,13 +256,20 @@ static void xe_svm_invalidate(struct drm_gpusvm *gpusvm,
xe_device_wmb(xe);
- err = xe_vm_range_tilemask_tlb_invalidation(vm, adj_start, adj_end, tile_mask);
+ err = xe_vm_range_tilemask_tlb_inval(vm, adj_start, adj_end, tile_mask);
WARN_ON_ONCE(err);
range_notifier_event_end:
r = first;
drm_gpusvm_for_each_range(r, notifier, adj_start, adj_end)
xe_svm_range_notifier_event_end(vm, r, mmu_range);
+ for_each_tile(tile, xe, id) {
+ if (tile_mask & BIT(id)) {
+ xe_svm_tlb_inval_us_stats_incr(tile->primary_gt, start);
+ if (tile->media_gt)
+ xe_svm_tlb_inval_us_stats_incr(tile->media_gt, start);
+ }
+ }
}
static int __xe_svm_garbage_collector(struct xe_vm *vm,
@@ -252,24 +291,73 @@ static int __xe_svm_garbage_collector(struct xe_vm *vm,
return 0;
}
+static int xe_svm_range_set_default_attr(struct xe_vm *vm, u64 range_start, u64 range_end)
+{
+ struct xe_vma *vma;
+ struct xe_vma_mem_attr default_attr = {
+ .preferred_loc = {
+ .devmem_fd = DRM_XE_PREFERRED_LOC_DEFAULT_DEVICE,
+ .migration_policy = DRM_XE_MIGRATE_ALL_PAGES,
+ },
+ .atomic_access = DRM_XE_ATOMIC_UNDEFINED,
+ };
+ int err = 0;
+
+ vma = xe_vm_find_vma_by_addr(vm, range_start);
+ if (!vma)
+ return -EINVAL;
+
+ if (xe_vma_has_default_mem_attrs(vma))
+ return 0;
+
+ vm_dbg(&vm->xe->drm, "Existing VMA start=0x%016llx, vma_end=0x%016llx",
+ xe_vma_start(vma), xe_vma_end(vma));
+
+ if (xe_vma_start(vma) == range_start && xe_vma_end(vma) == range_end) {
+ default_attr.pat_index = vma->attr.default_pat_index;
+ default_attr.default_pat_index = vma->attr.default_pat_index;
+ vma->attr = default_attr;
+ } else {
+ vm_dbg(&vm->xe->drm, "Split VMA start=0x%016llx, vma_end=0x%016llx",
+ range_start, range_end);
+ err = xe_vm_alloc_cpu_addr_mirror_vma(vm, range_start, range_end - range_start);
+ if (err) {
+ drm_warn(&vm->xe->drm, "VMA SPLIT failed: %pe\n", ERR_PTR(err));
+ xe_vm_kill(vm, true);
+ return err;
+ }
+ }
+
+ /*
+ * On call from xe_svm_handle_pagefault original VMA might be changed
+ * signal this to lookup for VMA again.
+ */
+ return -EAGAIN;
+}
+
static int xe_svm_garbage_collector(struct xe_vm *vm)
{
struct xe_svm_range *range;
- int err;
+ u64 range_start;
+ u64 range_end;
+ int err, ret = 0;
lockdep_assert_held_write(&vm->lock);
if (xe_vm_is_closed_or_banned(vm))
return -ENOENT;
- spin_lock(&vm->svm.garbage_collector.lock);
for (;;) {
+ spin_lock(&vm->svm.garbage_collector.lock);
range = list_first_entry_or_null(&vm->svm.garbage_collector.range_list,
typeof(*range),
garbage_collector_link);
if (!range)
break;
+ range_start = xe_svm_range_start(range);
+ range_end = xe_svm_range_end(range);
+
list_del(&range->garbage_collector_link);
spin_unlock(&vm->svm.garbage_collector.lock);
@@ -282,11 +370,17 @@ static int xe_svm_garbage_collector(struct xe_vm *vm)
return err;
}
- spin_lock(&vm->svm.garbage_collector.lock);
+ err = xe_svm_range_set_default_attr(vm, range_start, range_end);
+ if (err) {
+ if (err == -EAGAIN)
+ ret = -EAGAIN;
+ else
+ return err;
+ }
}
spin_unlock(&vm->svm.garbage_collector.lock);
- return 0;
+ return ret;
}
static void xe_svm_garbage_collector_work_func(struct work_struct *w)
@@ -306,21 +400,15 @@ static struct xe_vram_region *page_to_vr(struct page *page)
return container_of(page_pgmap(page), struct xe_vram_region, pagemap);
}
-static struct xe_tile *vr_to_tile(struct xe_vram_region *vr)
-{
- return container_of(vr, struct xe_tile, mem.vram);
-}
-
static u64 xe_vram_region_page_to_dpa(struct xe_vram_region *vr,
struct page *page)
{
u64 dpa;
- struct xe_tile *tile = vr_to_tile(vr);
u64 pfn = page_to_pfn(page);
u64 offset;
- xe_tile_assert(tile, is_device_private_page(page));
- xe_tile_assert(tile, (pfn << PAGE_SHIFT) >= vr->hpa_base);
+ xe_assert(vr->xe, is_device_private_page(page));
+ xe_assert(vr->xe, (pfn << PAGE_SHIFT) >= vr->hpa_base);
offset = (pfn << PAGE_SHIFT) - vr->hpa_base;
dpa = vr->dpa_base + offset;
@@ -333,17 +421,74 @@ enum xe_svm_copy_dir {
XE_SVM_COPY_TO_SRAM,
};
-static int xe_svm_copy(struct page **pages, dma_addr_t *dma_addr,
+static void xe_svm_copy_kb_stats_incr(struct xe_gt *gt,
+ const enum xe_svm_copy_dir dir,
+ int kb)
+{
+ if (dir == XE_SVM_COPY_TO_VRAM)
+ xe_gt_stats_incr(gt, XE_GT_STATS_ID_SVM_DEVICE_COPY_KB, kb);
+ else
+ xe_gt_stats_incr(gt, XE_GT_STATS_ID_SVM_CPU_COPY_KB, kb);
+}
+
+static void xe_svm_copy_us_stats_incr(struct xe_gt *gt,
+ const enum xe_svm_copy_dir dir,
+ unsigned long npages,
+ ktime_t start)
+{
+ s64 us_delta = xe_svm_stats_ktime_us_delta(start);
+
+ if (dir == XE_SVM_COPY_TO_VRAM) {
+ switch (npages) {
+ case 1:
+ xe_gt_stats_incr(gt, XE_GT_STATS_ID_SVM_4K_DEVICE_COPY_US,
+ us_delta);
+ break;
+ case 16:
+ xe_gt_stats_incr(gt, XE_GT_STATS_ID_SVM_64K_DEVICE_COPY_US,
+ us_delta);
+ break;
+ case 512:
+ xe_gt_stats_incr(gt, XE_GT_STATS_ID_SVM_2M_DEVICE_COPY_US,
+ us_delta);
+ break;
+ }
+ xe_gt_stats_incr(gt, XE_GT_STATS_ID_SVM_DEVICE_COPY_US,
+ us_delta);
+ } else {
+ switch (npages) {
+ case 1:
+ xe_gt_stats_incr(gt, XE_GT_STATS_ID_SVM_4K_CPU_COPY_US,
+ us_delta);
+ break;
+ case 16:
+ xe_gt_stats_incr(gt, XE_GT_STATS_ID_SVM_64K_CPU_COPY_US,
+ us_delta);
+ break;
+ case 512:
+ xe_gt_stats_incr(gt, XE_GT_STATS_ID_SVM_2M_CPU_COPY_US,
+ us_delta);
+ break;
+ }
+ xe_gt_stats_incr(gt, XE_GT_STATS_ID_SVM_CPU_COPY_US,
+ us_delta);
+ }
+}
+
+static int xe_svm_copy(struct page **pages,
+ struct drm_pagemap_addr *pagemap_addr,
unsigned long npages, const enum xe_svm_copy_dir dir)
{
struct xe_vram_region *vr = NULL;
- struct xe_tile *tile;
+ struct xe_gt *gt = NULL;
+ struct xe_device *xe;
struct dma_fence *fence = NULL;
unsigned long i;
#define XE_VRAM_ADDR_INVALID ~0x0ull
u64 vram_addr = XE_VRAM_ADDR_INVALID;
int err = 0, pos = 0;
bool sram = dir == XE_SVM_COPY_TO_SRAM;
+ ktime_t start = xe_svm_stats_ktime_get();
/*
* This flow is complex: it locates physically contiguous device pages,
@@ -365,12 +510,13 @@ static int xe_svm_copy(struct page **pages, dma_addr_t *dma_addr,
last = (i + 1) == npages;
/* No CPU page and no device pages queue'd to copy */
- if (!dma_addr[i] && vram_addr == XE_VRAM_ADDR_INVALID)
+ if (!pagemap_addr[i].addr && vram_addr == XE_VRAM_ADDR_INVALID)
continue;
if (!vr && spage) {
vr = page_to_vr(spage);
- tile = vr_to_tile(vr);
+ gt = xe_migrate_exec_queue(vr->migrate)->gt;
+ xe = vr->xe;
}
XE_WARN_ON(spage && page_to_vr(spage) != vr);
@@ -379,7 +525,7 @@ static int xe_svm_copy(struct page **pages, dma_addr_t *dma_addr,
* first device page, check if physical contiguous on subsequent
* device pages.
*/
- if (dma_addr[i] && spage) {
+ if (pagemap_addr[i].addr && spage) {
__vram_addr = xe_vram_region_page_to_dpa(vr, spage);
if (vram_addr == XE_VRAM_ADDR_INVALID) {
vram_addr = __vram_addr;
@@ -387,6 +533,14 @@ static int xe_svm_copy(struct page **pages, dma_addr_t *dma_addr,
}
match = vram_addr + PAGE_SIZE * (i - pos) == __vram_addr;
+ /* Expected with contiguous memory */
+ xe_assert(vr->xe, match);
+
+ if (pagemap_addr[i].order) {
+ i += NR_PAGES(pagemap_addr[i].order) - 1;
+ chunk = (i - pos) == (XE_MIGRATE_CHUNK_SIZE / PAGE_SIZE);
+ last = (i + 1) == npages;
+ }
}
/*
@@ -401,21 +555,26 @@ static int xe_svm_copy(struct page **pages, dma_addr_t *dma_addr,
int incr = (match && last) ? 1 : 0;
if (vram_addr != XE_VRAM_ADDR_INVALID) {
+ xe_svm_copy_kb_stats_incr(gt, dir,
+ (i - pos + incr) *
+ (PAGE_SIZE / SZ_1K));
if (sram) {
- vm_dbg(&tile->xe->drm,
+ vm_dbg(&xe->drm,
"COPY TO SRAM - 0x%016llx -> 0x%016llx, NPAGES=%ld",
- vram_addr, (u64)dma_addr[pos], i - pos + incr);
- __fence = xe_migrate_from_vram(tile->migrate,
+ vram_addr,
+ (u64)pagemap_addr[pos].addr, i - pos + incr);
+ __fence = xe_migrate_from_vram(vr->migrate,
i - pos + incr,
vram_addr,
- dma_addr + pos);
+ &pagemap_addr[pos]);
} else {
- vm_dbg(&tile->xe->drm,
+ vm_dbg(&xe->drm,
"COPY TO VRAM - 0x%016llx -> 0x%016llx, NPAGES=%ld",
- (u64)dma_addr[pos], vram_addr, i - pos + incr);
- __fence = xe_migrate_to_vram(tile->migrate,
+ (u64)pagemap_addr[pos].addr, vram_addr,
+ i - pos + incr);
+ __fence = xe_migrate_to_vram(vr->migrate,
i - pos + incr,
- dma_addr + pos,
+ &pagemap_addr[pos],
vram_addr);
}
if (IS_ERR(__fence)) {
@@ -428,7 +587,7 @@ static int xe_svm_copy(struct page **pages, dma_addr_t *dma_addr,
}
/* Setup physical address of next device page */
- if (dma_addr[i] && spage) {
+ if (pagemap_addr[i].addr && spage) {
vram_addr = __vram_addr;
pos = i;
} else {
@@ -437,19 +596,21 @@ static int xe_svm_copy(struct page **pages, dma_addr_t *dma_addr,
/* Extra mismatched device page, copy it */
if (!match && last && vram_addr != XE_VRAM_ADDR_INVALID) {
+ xe_svm_copy_kb_stats_incr(gt, dir,
+ (PAGE_SIZE / SZ_1K));
if (sram) {
- vm_dbg(&tile->xe->drm,
+ vm_dbg(&xe->drm,
"COPY TO SRAM - 0x%016llx -> 0x%016llx, NPAGES=%d",
- vram_addr, (u64)dma_addr[pos], 1);
- __fence = xe_migrate_from_vram(tile->migrate, 1,
+ vram_addr, (u64)pagemap_addr[pos].addr, 1);
+ __fence = xe_migrate_from_vram(vr->migrate, 1,
vram_addr,
- dma_addr + pos);
+ &pagemap_addr[pos]);
} else {
- vm_dbg(&tile->xe->drm,
+ vm_dbg(&xe->drm,
"COPY TO VRAM - 0x%016llx -> 0x%016llx, NPAGES=%d",
- (u64)dma_addr[pos], vram_addr, 1);
- __fence = xe_migrate_to_vram(tile->migrate, 1,
- dma_addr + pos,
+ (u64)pagemap_addr[pos].addr, vram_addr, 1);
+ __fence = xe_migrate_to_vram(vr->migrate, 1,
+ &pagemap_addr[pos],
vram_addr);
}
if (IS_ERR(__fence)) {
@@ -470,21 +631,31 @@ err_out:
dma_fence_put(fence);
}
+ /*
+ * XXX: We can't derive the GT here (or anywhere in this functions, but
+ * compute always uses the primary GT so accumlate stats on the likely
+ * GT of the fault.
+ */
+ if (gt)
+ xe_svm_copy_us_stats_incr(gt, dir, npages, start);
+
return err;
#undef XE_MIGRATE_CHUNK_SIZE
#undef XE_VRAM_ADDR_INVALID
}
-static int xe_svm_copy_to_devmem(struct page **pages, dma_addr_t *dma_addr,
+static int xe_svm_copy_to_devmem(struct page **pages,
+ struct drm_pagemap_addr *pagemap_addr,
unsigned long npages)
{
- return xe_svm_copy(pages, dma_addr, npages, XE_SVM_COPY_TO_VRAM);
+ return xe_svm_copy(pages, pagemap_addr, npages, XE_SVM_COPY_TO_VRAM);
}
-static int xe_svm_copy_to_ram(struct page **pages, dma_addr_t *dma_addr,
+static int xe_svm_copy_to_ram(struct page **pages,
+ struct drm_pagemap_addr *pagemap_addr,
unsigned long npages)
{
- return xe_svm_copy(pages, dma_addr, npages, XE_SVM_COPY_TO_SRAM);
+ return xe_svm_copy(pages, pagemap_addr, npages, XE_SVM_COPY_TO_SRAM);
}
static struct xe_bo *to_xe_bo(struct drm_pagemap_devmem *devmem_allocation)
@@ -506,9 +677,9 @@ static u64 block_offset_to_pfn(struct xe_vram_region *vr, u64 offset)
return PHYS_PFN(offset + vr->hpa_base);
}
-static struct drm_buddy *tile_to_buddy(struct xe_tile *tile)
+static struct drm_buddy *vram_to_buddy(struct xe_vram_region *vram)
{
- return &tile->mem.vram.ttm.mm;
+ return &vram->ttm.mm;
}
static int xe_svm_populate_devmem_pfn(struct drm_pagemap_devmem *devmem_allocation,
@@ -522,8 +693,7 @@ static int xe_svm_populate_devmem_pfn(struct drm_pagemap_devmem *devmem_allocati
list_for_each_entry(block, blocks, link) {
struct xe_vram_region *vr = block->private;
- struct xe_tile *tile = vr_to_tile(vr);
- struct drm_buddy *buddy = tile_to_buddy(tile);
+ struct drm_buddy *buddy = vram_to_buddy(vr);
u64 block_pfn = block_offset_to_pfn(vr, drm_buddy_block_offset(block));
int i;
@@ -567,22 +737,26 @@ int xe_svm_init(struct xe_vm *vm)
{
int err;
- spin_lock_init(&vm->svm.garbage_collector.lock);
- INIT_LIST_HEAD(&vm->svm.garbage_collector.range_list);
- INIT_WORK(&vm->svm.garbage_collector.work,
- xe_svm_garbage_collector_work_func);
-
- err = drm_gpusvm_init(&vm->svm.gpusvm, "Xe SVM", &vm->xe->drm,
- current->mm, xe_svm_devm_owner(vm->xe), 0,
- vm->size, xe_modparam.svm_notifier_size * SZ_1M,
- &gpusvm_ops, fault_chunk_sizes,
- ARRAY_SIZE(fault_chunk_sizes));
- if (err)
- return err;
-
- drm_gpusvm_driver_set_lock(&vm->svm.gpusvm, &vm->lock);
+ if (vm->flags & XE_VM_FLAG_FAULT_MODE) {
+ spin_lock_init(&vm->svm.garbage_collector.lock);
+ INIT_LIST_HEAD(&vm->svm.garbage_collector.range_list);
+ INIT_WORK(&vm->svm.garbage_collector.work,
+ xe_svm_garbage_collector_work_func);
+
+ err = drm_gpusvm_init(&vm->svm.gpusvm, "Xe SVM", &vm->xe->drm,
+ current->mm, xe_svm_devm_owner(vm->xe), 0,
+ vm->size,
+ xe_modparam.svm_notifier_size * SZ_1M,
+ &gpusvm_ops, fault_chunk_sizes,
+ ARRAY_SIZE(fault_chunk_sizes));
+ drm_gpusvm_driver_set_lock(&vm->svm.gpusvm, &vm->lock);
+ } else {
+ err = drm_gpusvm_init(&vm->svm.gpusvm, "Xe SVM (simple)",
+ &vm->xe->drm, NULL, NULL, 0, 0, 0, NULL,
+ NULL, 0);
+ }
- return 0;
+ return err;
}
/**
@@ -653,7 +827,7 @@ bool xe_svm_range_validate(struct xe_vm *vm,
xe_svm_notifier_lock(vm);
ret = (range->tile_present & ~range->tile_invalidated & tile_mask) == tile_mask &&
- (devmem_preferred == range->base.flags.has_devmem_pages);
+ (devmem_preferred == range->base.pages.flags.has_devmem_pages);
xe_svm_notifier_unlock(vm);
@@ -683,66 +857,57 @@ u64 xe_svm_find_vma_start(struct xe_vm *vm, u64 start, u64 end, struct xe_vma *v
}
#if IS_ENABLED(CONFIG_DRM_XE_PAGEMAP)
-static struct xe_vram_region *tile_to_vr(struct xe_tile *tile)
-{
- return &tile->mem.vram;
-}
-
static int xe_drm_pagemap_populate_mm(struct drm_pagemap *dpagemap,
unsigned long start, unsigned long end,
struct mm_struct *mm,
unsigned long timeslice_ms)
{
- struct xe_tile *tile = container_of(dpagemap, typeof(*tile), mem.vram.dpagemap);
- struct xe_device *xe = tile_to_xe(tile);
+ struct xe_vram_region *vr = container_of(dpagemap, typeof(*vr), dpagemap);
+ struct xe_device *xe = vr->xe;
struct device *dev = xe->drm.dev;
- struct xe_vram_region *vr = tile_to_vr(tile);
struct drm_buddy_block *block;
+ struct xe_validation_ctx vctx;
struct list_head *blocks;
+ struct drm_exec exec;
struct xe_bo *bo;
- ktime_t time_end = 0;
- int err, idx;
+ int err = 0, idx;
if (!drm_dev_enter(&xe->drm, &idx))
return -ENODEV;
xe_pm_runtime_get(xe);
- retry:
- bo = xe_bo_create_locked(tile_to_xe(tile), NULL, NULL, end - start,
- ttm_bo_type_device,
- XE_BO_FLAG_VRAM_IF_DGFX(tile) |
- XE_BO_FLAG_CPU_ADDR_MIRROR);
- if (IS_ERR(bo)) {
- err = PTR_ERR(bo);
- if (xe_vm_validate_should_retry(NULL, err, &time_end))
- goto retry;
- goto out_pm_put;
- }
-
- drm_pagemap_devmem_init(&bo->devmem_allocation, dev, mm,
- &dpagemap_devmem_ops,
- &tile->mem.vram.dpagemap,
- end - start);
-
- blocks = &to_xe_ttm_vram_mgr_resource(bo->ttm.resource)->blocks;
- list_for_each_entry(block, blocks, link)
- block->private = vr;
+ xe_validation_guard(&vctx, &xe->val, &exec, (struct xe_val_flags) {}, err) {
+ bo = xe_bo_create_locked(xe, NULL, NULL, end - start,
+ ttm_bo_type_device,
+ (IS_DGFX(xe) ? XE_BO_FLAG_VRAM(vr) : XE_BO_FLAG_SYSTEM) |
+ XE_BO_FLAG_CPU_ADDR_MIRROR, &exec);
+ drm_exec_retry_on_contention(&exec);
+ if (IS_ERR(bo)) {
+ err = PTR_ERR(bo);
+ xe_validation_retry_on_oom(&vctx, &err);
+ break;
+ }
- xe_bo_get(bo);
+ drm_pagemap_devmem_init(&bo->devmem_allocation, dev, mm,
+ &dpagemap_devmem_ops, dpagemap, end - start);
- /* Ensure the device has a pm ref while there are device pages active. */
- xe_pm_runtime_get_noresume(xe);
- err = drm_pagemap_migrate_to_devmem(&bo->devmem_allocation, mm,
- start, end, timeslice_ms,
- xe_svm_devm_owner(xe));
- if (err)
- xe_svm_devmem_release(&bo->devmem_allocation);
+ blocks = &to_xe_ttm_vram_mgr_resource(bo->ttm.resource)->blocks;
+ list_for_each_entry(block, blocks, link)
+ block->private = vr;
- xe_bo_unlock(bo);
- xe_bo_put(bo);
+ xe_bo_get(bo);
-out_pm_put:
+ /* Ensure the device has a pm ref while there are device pages active. */
+ xe_pm_runtime_get_noresume(xe);
+ err = drm_pagemap_migrate_to_devmem(&bo->devmem_allocation, mm,
+ start, end, timeslice_ms,
+ xe_svm_devm_owner(xe));
+ if (err)
+ xe_svm_devmem_release(&bo->devmem_allocation);
+ xe_bo_unlock(bo);
+ xe_bo_put(bo);
+ }
xe_pm_runtime_put(xe);
drm_dev_exit(idx);
@@ -772,17 +937,17 @@ bool xe_svm_range_needs_migrate_to_vram(struct xe_svm_range *range, struct xe_vm
struct xe_vm *vm = range_to_vm(&range->base);
u64 range_size = xe_svm_range_size(range);
- if (!range->base.flags.migrate_devmem || !preferred_region_is_vram)
+ if (!range->base.pages.flags.migrate_devmem || !preferred_region_is_vram)
return false;
xe_assert(vm->xe, IS_DGFX(vm->xe));
- if (preferred_region_is_vram && xe_svm_range_in_vram(range)) {
+ if (xe_svm_range_in_vram(range)) {
drm_info(&vm->xe->drm, "Range is already in VRAM\n");
return false;
}
- if (preferred_region_is_vram && range_size < SZ_64K && !supports_4K_migration(vm->xe)) {
+ if (range_size < SZ_64K && !supports_4K_migration(vm->xe)) {
drm_dbg(&vm->xe->drm, "Platform doesn't support SZ_4K range migration\n");
return false;
}
@@ -790,40 +955,77 @@ bool xe_svm_range_needs_migrate_to_vram(struct xe_svm_range *range, struct xe_vm
return true;
}
-/**
- * xe_svm_handle_pagefault() - SVM handle page fault
- * @vm: The VM.
- * @vma: The CPU address mirror VMA.
- * @gt: The gt upon the fault occurred.
- * @fault_addr: The GPU fault address.
- * @atomic: The fault atomic access bit.
- *
- * Create GPU bindings for a SVM page fault. Optionally migrate to device
- * memory.
- *
- * Return: 0 on success, negative error code on error.
- */
-int xe_svm_handle_pagefault(struct xe_vm *vm, struct xe_vma *vma,
- struct xe_gt *gt, u64 fault_addr,
- bool atomic)
+#define DECL_SVM_RANGE_COUNT_STATS(elem, stat) \
+static void xe_svm_range_##elem##_count_stats_incr(struct xe_gt *gt, \
+ struct xe_svm_range *range) \
+{ \
+ switch (xe_svm_range_size(range)) { \
+ case SZ_4K: \
+ xe_gt_stats_incr(gt, XE_GT_STATS_ID_SVM_4K_##stat##_COUNT, 1); \
+ break; \
+ case SZ_64K: \
+ xe_gt_stats_incr(gt, XE_GT_STATS_ID_SVM_64K_##stat##_COUNT, 1); \
+ break; \
+ case SZ_2M: \
+ xe_gt_stats_incr(gt, XE_GT_STATS_ID_SVM_2M_##stat##_COUNT, 1); \
+ break; \
+ } \
+} \
+
+DECL_SVM_RANGE_COUNT_STATS(fault, PAGEFAULT)
+DECL_SVM_RANGE_COUNT_STATS(valid_fault, VALID_PAGEFAULT)
+DECL_SVM_RANGE_COUNT_STATS(migrate, MIGRATE)
+
+#define DECL_SVM_RANGE_US_STATS(elem, stat) \
+static void xe_svm_range_##elem##_us_stats_incr(struct xe_gt *gt, \
+ struct xe_svm_range *range, \
+ ktime_t start) \
+{ \
+ s64 us_delta = xe_svm_stats_ktime_us_delta(start); \
+\
+ switch (xe_svm_range_size(range)) { \
+ case SZ_4K: \
+ xe_gt_stats_incr(gt, XE_GT_STATS_ID_SVM_4K_##stat##_US, \
+ us_delta); \
+ break; \
+ case SZ_64K: \
+ xe_gt_stats_incr(gt, XE_GT_STATS_ID_SVM_64K_##stat##_US, \
+ us_delta); \
+ break; \
+ case SZ_2M: \
+ xe_gt_stats_incr(gt, XE_GT_STATS_ID_SVM_2M_##stat##_US, \
+ us_delta); \
+ break; \
+ } \
+} \
+
+DECL_SVM_RANGE_US_STATS(migrate, MIGRATE)
+DECL_SVM_RANGE_US_STATS(get_pages, GET_PAGES)
+DECL_SVM_RANGE_US_STATS(bind, BIND)
+DECL_SVM_RANGE_US_STATS(fault, PAGEFAULT)
+
+static int __xe_svm_handle_pagefault(struct xe_vm *vm, struct xe_vma *vma,
+ struct xe_gt *gt, u64 fault_addr,
+ bool need_vram)
{
+ int devmem_possible = IS_DGFX(vm->xe) &&
+ IS_ENABLED(CONFIG_DRM_XE_PAGEMAP);
struct drm_gpusvm_ctx ctx = {
.read_only = xe_vma_read_only(vma),
- .devmem_possible = IS_DGFX(vm->xe) &&
- IS_ENABLED(CONFIG_DRM_XE_PAGEMAP),
- .check_pages_threshold = IS_DGFX(vm->xe) &&
- IS_ENABLED(CONFIG_DRM_XE_PAGEMAP) ? SZ_64K : 0,
- .devmem_only = atomic && IS_DGFX(vm->xe) &&
- IS_ENABLED(CONFIG_DRM_XE_PAGEMAP),
- .timeslice_ms = atomic && IS_DGFX(vm->xe) &&
- IS_ENABLED(CONFIG_DRM_XE_PAGEMAP) ?
+ .devmem_possible = devmem_possible,
+ .check_pages_threshold = devmem_possible ? SZ_64K : 0,
+ .devmem_only = need_vram && devmem_possible,
+ .timeslice_ms = need_vram && devmem_possible ?
vm->xe->atomic_svm_timeslice_ms : 0,
};
+ struct xe_validation_ctx vctx;
+ struct drm_exec exec;
struct xe_svm_range *range;
struct dma_fence *fence;
+ struct drm_pagemap *dpagemap;
struct xe_tile *tile = gt_to_tile(gt);
int migrate_try_count = ctx.devmem_only ? 3 : 1;
- ktime_t end = 0;
+ ktime_t start = xe_svm_stats_ktime_get(), bind_start, get_pages_start;
int err;
lockdep_assert_held_write(&vm->lock);
@@ -842,17 +1044,34 @@ retry:
if (IS_ERR(range))
return PTR_ERR(range);
- if (ctx.devmem_only && !range->base.flags.migrate_devmem)
- return -EACCES;
+ xe_svm_range_fault_count_stats_incr(gt, range);
- if (xe_svm_range_is_valid(range, tile, ctx.devmem_only))
- return 0;
+ if (ctx.devmem_only && !range->base.pages.flags.migrate_devmem) {
+ err = -EACCES;
+ goto out;
+ }
+
+ if (xe_svm_range_is_valid(range, tile, ctx.devmem_only)) {
+ xe_svm_range_valid_fault_count_stats_incr(gt, range);
+ range_debug(range, "PAGE FAULT - VALID");
+ goto out;
+ }
range_debug(range, "PAGE FAULT");
+ dpagemap = xe_vma_resolve_pagemap(vma, tile);
if (--migrate_try_count >= 0 &&
- xe_svm_range_needs_migrate_to_vram(range, vma, IS_DGFX(vm->xe))) {
+ xe_svm_range_needs_migrate_to_vram(range, vma, !!dpagemap || ctx.devmem_only)) {
+ ktime_t migrate_start = xe_svm_stats_ktime_get();
+
+ /* TODO : For multi-device dpagemap will be used to find the
+ * remote tile and remote device. Will need to modify
+ * xe_svm_alloc_vram to use dpagemap for future multi-device
+ * support.
+ */
+ xe_svm_range_migrate_count_stats_incr(gt, range);
err = xe_svm_alloc_vram(tile, range, &ctx);
+ xe_svm_range_migrate_us_stats_incr(gt, range, migrate_start);
ctx.timeslice_ms <<= 1; /* Double timeslice if we have to retry */
if (err) {
if (migrate_try_count || !ctx.devmem_only) {
@@ -869,6 +1088,8 @@ retry:
}
}
+ get_pages_start = xe_svm_stats_ktime_get();
+
range_debug(range, "GET PAGES");
err = xe_svm_range_get_pages(vm, range, &ctx);
/* Corner where CPU mappings have changed */
@@ -888,37 +1109,89 @@ retry:
}
if (err) {
range_debug(range, "PAGE FAULT - FAIL PAGE COLLECT");
- goto err_out;
+ goto out;
}
+ xe_svm_range_get_pages_us_stats_incr(gt, range, get_pages_start);
range_debug(range, "PAGE FAULT - BIND");
-retry_bind:
- xe_vm_lock(vm, false);
- fence = xe_vm_range_rebind(vm, vma, range, BIT(tile->id));
- if (IS_ERR(fence)) {
- xe_vm_unlock(vm);
- err = PTR_ERR(fence);
- if (err == -EAGAIN) {
- ctx.timeslice_ms <<= 1; /* Double timeslice if we have to retry */
- range_debug(range, "PAGE FAULT - RETRY BIND");
- goto retry;
+ bind_start = xe_svm_stats_ktime_get();
+ xe_validation_guard(&vctx, &vm->xe->val, &exec, (struct xe_val_flags) {}, err) {
+ err = xe_vm_drm_exec_lock(vm, &exec);
+ drm_exec_retry_on_contention(&exec);
+
+ xe_vm_set_validation_exec(vm, &exec);
+ fence = xe_vm_range_rebind(vm, vma, range, BIT(tile->id));
+ xe_vm_set_validation_exec(vm, NULL);
+ if (IS_ERR(fence)) {
+ drm_exec_retry_on_contention(&exec);
+ err = PTR_ERR(fence);
+ xe_validation_retry_on_oom(&vctx, &err);
+ xe_svm_range_bind_us_stats_incr(gt, range, bind_start);
+ break;
}
- if (xe_vm_validate_should_retry(NULL, err, &end))
- goto retry_bind;
- goto err_out;
}
- xe_vm_unlock(vm);
+ if (err)
+ goto err_out;
dma_fence_wait(fence, false);
dma_fence_put(fence);
+ xe_svm_range_bind_us_stats_incr(gt, range, bind_start);
+
+out:
+ xe_svm_range_fault_us_stats_incr(gt, range, start);
+ return 0;
err_out:
+ if (err == -EAGAIN) {
+ ctx.timeslice_ms <<= 1; /* Double timeslice if we have to retry */
+ range_debug(range, "PAGE FAULT - RETRY BIND");
+ goto retry;
+ }
return err;
}
/**
+ * xe_svm_handle_pagefault() - SVM handle page fault
+ * @vm: The VM.
+ * @vma: The CPU address mirror VMA.
+ * @gt: The gt upon the fault occurred.
+ * @fault_addr: The GPU fault address.
+ * @atomic: The fault atomic access bit.
+ *
+ * Create GPU bindings for a SVM page fault. Optionally migrate to device
+ * memory.
+ *
+ * Return: 0 on success, negative error code on error.
+ */
+int xe_svm_handle_pagefault(struct xe_vm *vm, struct xe_vma *vma,
+ struct xe_gt *gt, u64 fault_addr,
+ bool atomic)
+{
+ int need_vram, ret;
+retry:
+ need_vram = xe_vma_need_vram_for_atomic(vm->xe, vma, atomic);
+ if (need_vram < 0)
+ return need_vram;
+
+ ret = __xe_svm_handle_pagefault(vm, vma, gt, fault_addr,
+ need_vram ? true : false);
+ if (ret == -EAGAIN) {
+ /*
+ * Retry once on -EAGAIN to re-lookup the VMA, as the original VMA
+ * may have been split by xe_svm_range_set_default_attr.
+ */
+ vma = xe_vm_find_vma_by_addr(vm, fault_addr);
+ if (!vma)
+ return -EINVAL;
+
+ goto retry;
+ }
+ return ret;
+}
+
+/**
* xe_svm_has_mapping() - SVM has mappings
* @vm: The VM.
* @start: Start address.
@@ -934,6 +1207,41 @@ bool xe_svm_has_mapping(struct xe_vm *vm, u64 start, u64 end)
}
/**
+ * xe_svm_unmap_address_range - UNMAP SVM mappings and ranges
+ * @vm: The VM
+ * @start: start addr
+ * @end: end addr
+ *
+ * This function UNMAPS svm ranges if start or end address are inside them.
+ */
+void xe_svm_unmap_address_range(struct xe_vm *vm, u64 start, u64 end)
+{
+ struct drm_gpusvm_notifier *notifier, *next;
+
+ lockdep_assert_held_write(&vm->lock);
+
+ drm_gpusvm_for_each_notifier_safe(notifier, next, &vm->svm.gpusvm, start, end) {
+ struct drm_gpusvm_range *range, *__next;
+
+ drm_gpusvm_for_each_range_safe(range, __next, notifier, start, end) {
+ if (start > drm_gpusvm_range_start(range) ||
+ end < drm_gpusvm_range_end(range)) {
+ if (IS_DGFX(vm->xe) && xe_svm_range_in_vram(to_xe_range(range)))
+ drm_gpusvm_range_evict(&vm->svm.gpusvm, range);
+ drm_gpusvm_range_get(range);
+ __xe_svm_garbage_collector(vm, to_xe_range(range));
+ if (!list_empty(&to_xe_range(range)->garbage_collector_link)) {
+ spin_lock(&vm->svm.garbage_collector.lock);
+ list_del(&to_xe_range(range)->garbage_collector_link);
+ spin_unlock(&vm->svm.garbage_collector.lock);
+ }
+ drm_gpusvm_range_put(range);
+ }
+ }
+ }
+}
+
+/**
* xe_svm_bo_evict() - SVM evict BO to system memory
* @bo: BO to evict
*
@@ -967,7 +1275,7 @@ struct xe_svm_range *xe_svm_range_find_or_insert(struct xe_vm *vm, u64 addr,
r = drm_gpusvm_range_find_or_insert(&vm->svm.gpusvm, max(addr, xe_vma_start(vma)),
xe_vma_start(vma), xe_vma_end(vma), ctx);
if (IS_ERR(r))
- return ERR_PTR(PTR_ERR(r));
+ return ERR_CAST(r);
return to_xe_range(r);
}
@@ -997,8 +1305,94 @@ int xe_svm_range_get_pages(struct xe_vm *vm, struct xe_svm_range *range,
return err;
}
+/**
+ * xe_svm_ranges_zap_ptes_in_range - clear ptes of svm ranges in input range
+ * @vm: Pointer to the xe_vm structure
+ * @start: Start of the input range
+ * @end: End of the input range
+ *
+ * This function removes the page table entries (PTEs) associated
+ * with the svm ranges within the given input start and end
+ *
+ * Return: tile_mask for which gt's need to be tlb invalidated.
+ */
+u8 xe_svm_ranges_zap_ptes_in_range(struct xe_vm *vm, u64 start, u64 end)
+{
+ struct drm_gpusvm_notifier *notifier;
+ struct xe_svm_range *range;
+ u64 adj_start, adj_end;
+ struct xe_tile *tile;
+ u8 tile_mask = 0;
+ u8 id;
+
+ lockdep_assert(lockdep_is_held_type(&vm->svm.gpusvm.notifier_lock, 1) &&
+ lockdep_is_held_type(&vm->lock, 0));
+
+ drm_gpusvm_for_each_notifier(notifier, &vm->svm.gpusvm, start, end) {
+ struct drm_gpusvm_range *r = NULL;
+
+ adj_start = max(start, drm_gpusvm_notifier_start(notifier));
+ adj_end = min(end, drm_gpusvm_notifier_end(notifier));
+ drm_gpusvm_for_each_range(r, notifier, adj_start, adj_end) {
+ range = to_xe_range(r);
+ for_each_tile(tile, vm->xe, id) {
+ if (xe_pt_zap_ptes_range(tile, vm, range)) {
+ tile_mask |= BIT(id);
+ /*
+ * WRITE_ONCE pairs with READ_ONCE in
+ * xe_vm_has_valid_gpu_mapping().
+ * Must not fail after setting
+ * tile_invalidated and before
+ * TLB invalidation.
+ */
+ WRITE_ONCE(range->tile_invalidated,
+ range->tile_invalidated | BIT(id));
+ }
+ }
+ }
+ }
+
+ return tile_mask;
+}
+
#if IS_ENABLED(CONFIG_DRM_XE_PAGEMAP)
+static struct drm_pagemap *tile_local_pagemap(struct xe_tile *tile)
+{
+ return &tile->mem.vram->dpagemap;
+}
+
+/**
+ * xe_vma_resolve_pagemap - Resolve the appropriate DRM pagemap for a VMA
+ * @vma: Pointer to the xe_vma structure containing memory attributes
+ * @tile: Pointer to the xe_tile structure used as fallback for VRAM mapping
+ *
+ * This function determines the correct DRM pagemap to use for a given VMA.
+ * It first checks if a valid devmem_fd is provided in the VMA's preferred
+ * location. If the devmem_fd is negative, it returns NULL, indicating no
+ * pagemap is available and smem to be used as preferred location.
+ * If the devmem_fd is equal to the default faulting
+ * GT identifier, it returns the VRAM pagemap associated with the tile.
+ *
+ * Future support for multi-device configurations may use drm_pagemap_from_fd()
+ * to resolve pagemaps from arbitrary file descriptors.
+ *
+ * Return: A pointer to the resolved drm_pagemap, or NULL if none is applicable.
+ */
+struct drm_pagemap *xe_vma_resolve_pagemap(struct xe_vma *vma, struct xe_tile *tile)
+{
+ s32 fd = (s32)vma->attr.preferred_loc.devmem_fd;
+
+ if (fd == DRM_XE_PREFERRED_LOC_DEFAULT_SYSTEM)
+ return NULL;
+
+ if (fd == DRM_XE_PREFERRED_LOC_DEFAULT_DEVICE)
+ return IS_DGFX(tile_to_xe(tile)) ? tile_local_pagemap(tile) : NULL;
+
+ /* TODO: Support multi-device with drm_pagemap_from_fd(fd) */
+ return NULL;
+}
+
/**
* xe_svm_alloc_vram()- Allocate device memory pages for range,
* migrating existing data.
@@ -1013,17 +1407,17 @@ int xe_svm_alloc_vram(struct xe_tile *tile, struct xe_svm_range *range,
{
struct drm_pagemap *dpagemap;
- xe_assert(tile_to_xe(tile), range->base.flags.migrate_devmem);
+ xe_assert(tile_to_xe(tile), range->base.pages.flags.migrate_devmem);
range_debug(range, "ALLOCATE VRAM");
- dpagemap = xe_tile_local_pagemap(tile);
+ dpagemap = tile_local_pagemap(tile);
return drm_pagemap_populate_mm(dpagemap, xe_svm_range_start(range),
xe_svm_range_end(range),
range->base.gpusvm->mm,
ctx->timeslice_ms);
}
-static struct drm_pagemap_device_addr
+static struct drm_pagemap_addr
xe_drm_pagemap_device_map(struct drm_pagemap *dpagemap,
struct device *dev,
struct page *page,
@@ -1042,7 +1436,7 @@ xe_drm_pagemap_device_map(struct drm_pagemap *dpagemap,
prot = 0;
}
- return drm_pagemap_device_addr_encode(addr, prot, order, dir);
+ return drm_pagemap_addr_encode(addr, prot, order, dir);
}
static const struct drm_pagemap_ops xe_drm_pagemap_ops = {
@@ -1111,6 +1505,11 @@ int xe_devm_add(struct xe_tile *tile, struct xe_vram_region *vr)
{
return 0;
}
+
+struct drm_pagemap *xe_vma_resolve_pagemap(struct xe_vma *vma, struct xe_tile *tile)
+{
+ return NULL;
+}
#endif
/**