summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
diff options
context:
space:
mode:
authorIan Forbes <ian.forbes@broadcom.com>2025-05-30 13:35:08 -0500
committerZack Rusin <zack.rusin@broadcom.com>2025-06-17 22:49:31 -0400
commitc82f55f4aa57bf5ba412d55856fe50514b47b971 (patch)
treed3ed4789760119f211eaf2a9f096f59f0efee154 /drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
parenta72002cb181f350734108228b24c5d10d358f95a (diff)
drm/vmwgfx: Update last_read_seqno under the fence lock
There was a possible race in vmw_update_seqno. Because of this race it was possible for last_read_seqno to go backwards. Remove this function and replace it with vmw_update_fences which now sets and returns the last_read_seqno while holding the fence lock. This serialization via the fence lock ensures that last_read_seqno is monotonic again. Signed-off-by: Ian Forbes <ian.forbes@broadcom.com> Signed-off-by: Zack Rusin <zack.rusin@broadcom.com> Link: https://lore.kernel.org/r/20250530183510.733175-1-ian.forbes@broadcom.com
Diffstat (limited to 'drivers/gpu/drm/vmwgfx/vmwgfx_irq.c')
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_irq.c17
1 files changed, 4 insertions, 13 deletions
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
index 086e69a130d4..592cd78e10e0 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
@@ -123,26 +123,17 @@ static bool vmw_fifo_idle(struct vmw_private *dev_priv, uint32_t seqno)
return (vmw_read(dev_priv, SVGA_REG_BUSY) == 0);
}
-void vmw_update_seqno(struct vmw_private *dev_priv)
-{
- uint32_t seqno = vmw_fence_read(dev_priv);
-
- if (dev_priv->last_read_seqno != seqno) {
- dev_priv->last_read_seqno = seqno;
- vmw_fences_update(dev_priv->fman);
- }
-}
-
bool vmw_seqno_passed(struct vmw_private *dev_priv,
uint32_t seqno)
{
bool ret;
+ u32 last_read_seqno = atomic_read_acquire(&dev_priv->last_read_seqno);
- if (likely(dev_priv->last_read_seqno - seqno < VMW_FENCE_WRAP))
+ if (last_read_seqno - seqno < VMW_FENCE_WRAP)
return true;
- vmw_update_seqno(dev_priv);
- if (likely(dev_priv->last_read_seqno - seqno < VMW_FENCE_WRAP))
+ last_read_seqno = vmw_fences_update(dev_priv->fman);
+ if (last_read_seqno - seqno < VMW_FENCE_WRAP)
return true;
if (!vmw_has_fences(dev_priv) && vmw_fifo_idle(dev_priv, seqno))