diff options
| author | Matthew Brost <matthew.brost@intel.com> | 2025-10-31 09:54:13 -0700 |
|---|---|---|
| committer | Matthew Brost <matthew.brost@intel.com> | 2025-11-04 09:04:29 -0800 |
| commit | 143aa16572c5508f95b2a78b1769b0c83bfee09c (patch) | |
| tree | cc542b9d4684c1f49480c51481c7f090906b190e | |
| parent | 79be336d1a5d0dce7c51ef7baccb2bcc3773800d (diff) | |
drm/xe: Implement xe_pagefault_handler
Enqueue (copy) the input struct xe_pagefault into a queue (i.e., into a
memory buffer) and schedule a worker to service it.
Signed-off-by: Matthew Brost <matthew.brost@intel.com>
Reviewed-by: Francois Dugast <francois.dugast@intel.com>
Tested-by: Francois Dugast <francois.dugast@intel.com>
Link: https://patch.msgid.link/20251031165416.2871503-5-matthew.brost@intel.com
| -rw-r--r-- | drivers/gpu/drm/xe/xe_pagefault.c | 32 |
1 files changed, 30 insertions, 2 deletions
diff --git a/drivers/gpu/drm/xe/xe_pagefault.c b/drivers/gpu/drm/xe/xe_pagefault.c index b1decad9b54c..7b2ac01a558e 100644 --- a/drivers/gpu/drm/xe/xe_pagefault.c +++ b/drivers/gpu/drm/xe/xe_pagefault.c @@ -3,6 +3,8 @@ * Copyright © 2025 Intel Corporation */ +#include <linux/circ_buf.h> + #include <drm/drm_managed.h> #include "xe_device.h" @@ -167,6 +169,14 @@ void xe_pagefault_reset(struct xe_device *xe, struct xe_gt *gt) xe_pagefault_queue_reset(xe, gt, xe->usm.pf_queue + i); } +static bool xe_pagefault_queue_full(struct xe_pagefault_queue *pf_queue) +{ + lockdep_assert_held(&pf_queue->lock); + + return CIRC_SPACE(pf_queue->head, pf_queue->tail, pf_queue->size) <= + xe_pagefault_entry_size(); +} + /** * xe_pagefault_handler() - Page fault handler * @xe: xe device instance @@ -179,6 +189,24 @@ void xe_pagefault_reset(struct xe_device *xe, struct xe_gt *gt) */ int xe_pagefault_handler(struct xe_device *xe, struct xe_pagefault *pf) { - /* TODO - implement */ - return 0; + struct xe_pagefault_queue *pf_queue = xe->usm.pf_queue + + (pf->consumer.asid % XE_PAGEFAULT_QUEUE_COUNT); + unsigned long flags; + bool full; + + spin_lock_irqsave(&pf_queue->lock, flags); + full = xe_pagefault_queue_full(pf_queue); + if (!full) { + memcpy(pf_queue->data + pf_queue->head, pf, sizeof(*pf)); + pf_queue->head = (pf_queue->head + xe_pagefault_entry_size()) % + pf_queue->size; + queue_work(xe->usm.pf_wq, &pf_queue->worker); + } else { + drm_warn(&xe->drm, + "PageFault Queue (%d) full, shouldn't be possible\n", + pf->consumer.asid % XE_PAGEFAULT_QUEUE_COUNT); + } + spin_unlock_irqrestore(&pf_queue->lock, flags); + + return full ? -ENOSPC : 0; } |
