diff options
author | Noah Misch <noah@leadboat.com> | 2024-11-02 09:05:00 -0700 |
---|---|---|
committer | Noah Misch <noah@leadboat.com> | 2024-11-02 09:05:08 -0700 |
commit | 4b0f7d6c162e9a52e5c2bf1098d4ffd0b107425c (patch) | |
tree | d9bba9e1e83767d2f26185f61d2eff0fffa78599 /src/backend/access/heap/heapam.c | |
parent | 5e503e10d13ef7987a5b44b2c463a5d1bf5fd103 (diff) |
Revert "For inplace update, send nontransactional invalidations."
This reverts commit 95c5acb3fc261067ab65ddc0b2dca8e162f09442 (v17) and
counterparts in each other non-master branch. If released, that commit
would have caused a worst-in-years minor release regression, via
undetected LWLock self-deadlock. This commit and its self-deadlock fix
warrant more bake time in the master branch.
Reported by Alexander Lakhin.
Discussion: https://postgr.es/m/10ec0bc3-5933-1189-6bb8-5dec4114558e@gmail.com
Diffstat (limited to 'src/backend/access/heap/heapam.c')
-rw-r--r-- | src/backend/access/heap/heapam.c | 43 |
1 files changed, 7 insertions, 36 deletions
diff --git a/src/backend/access/heap/heapam.c b/src/backend/access/heap/heapam.c index 36d1ae38fe6..b5da0f64eb1 100644 --- a/src/backend/access/heap/heapam.c +++ b/src/backend/access/heap/heapam.c @@ -6146,24 +6146,6 @@ heap_inplace_update_and_unlock(Relation relation, if (oldlen != newlen || htup->t_hoff != tuple->t_data->t_hoff) elog(ERROR, "wrong tuple length"); - /* - * Construct shared cache inval if necessary. Note that because we only - * pass the new version of the tuple, this mustn't be used for any - * operations that could change catcache lookup keys. But we aren't - * bothering with index updates either, so that's true a fortiori. - */ - CacheInvalidateHeapTupleInplace(relation, tuple, NULL); - - /* - * Unlink relcache init files as needed. If unlinking, acquire - * RelCacheInitLock until after associated invalidations. By doing this - * in advance, if we checkpoint and then crash between inplace - * XLogInsert() and inval, we don't rely on StartupXLOG() -> - * RelationCacheInitFileRemove(). That uses elevel==LOG, so replay would - * neglect to PANIC on EIO. - */ - PreInplace_Inval(); - /* NO EREPORT(ERROR) from here till changes are logged */ START_CRIT_SECTION(); @@ -6207,28 +6189,17 @@ heap_inplace_update_and_unlock(Relation relation, PageSetLSN(BufferGetPage(buffer), recptr); } - LockBuffer(buffer, BUFFER_LOCK_UNLOCK); - - /* - * Send invalidations to shared queue. SearchSysCacheLocked1() assumes we - * do this before UnlockTuple(). - * - * If we're mutating a tuple visible only to this transaction, there's an - * equivalent transactional inval from the action that created the tuple, - * and this inval is superfluous. - */ - AtInplace_Inval(); - END_CRIT_SECTION(); - UnlockTuple(relation, &tuple->t_self, InplaceUpdateTupleLock); - AcceptInvalidationMessages(); /* local processing of just-sent inval */ + heap_inplace_unlock(relation, oldtup, buffer); /* - * Queue a transactional inval. The immediate invalidation we just sent - * is the only one known to be necessary. To reduce risk from the - * transition to immediate invalidation, continue sending a transactional - * invalidation like we've long done. Third-party code might rely on it. + * Send out shared cache inval if necessary. Note that because we only + * pass the new version of the tuple, this mustn't be used for any + * operations that could change catcache lookup keys. But we aren't + * bothering with index updates either, so that's true a fortiori. + * + * XXX ROLLBACK discards the invalidation. See test inplace-inval.spec. */ if (!IsBootstrapProcessingMode()) CacheInvalidateHeapTuple(relation, tuple, NULL); |