summaryrefslogtreecommitdiff
path: root/src/backend/access/heap
diff options
context:
space:
mode:
authorNoah Misch <noah@leadboat.com>2024-09-24 15:25:18 -0700
committerNoah Misch <noah@leadboat.com>2024-09-24 15:25:24 -0700
commit14c57cb63907eb7af0f973022b919c0f777db0d9 (patch)
tree58f3fa6513e20513cad1ee3affde7fe8424814e8 /src/backend/access/heap
parenta8ad1929d2ec04a5e46dd51d2ef5768c7179ef0b (diff)
For inplace update durability, make heap_update() callers wait.
The previous commit fixed some ways of losing an inplace update. It remained possible to lose one when a backend working toward a heap_update() copied a tuple into memory just before inplace update of that tuple. In catalogs eligible for inplace update, use LOCKTAG_TUPLE to govern admission to the steps of copying an old tuple, modifying it, and issuing heap_update(). This includes MERGE commands. To avoid changing most of the pg_class DDL, don't require LOCKTAG_TUPLE when holding a relation lock sufficient to exclude inplace updaters. Back-patch to v12 (all supported versions). In v13 and v12, "UPDATE pg_class" or "UPDATE pg_database" can still lose an inplace update. The v14+ UPDATE fix needs commit 86dc90056dfdbd9d1b891718d2e5614e3e432f35, and it wasn't worth reimplementing that fix without such infrastructure. Reviewed by Nitin Motiani and (in earlier versions) Heikki Linnakangas. Discussion: https://postgr.es/m/20231027214946.79.nmisch@google.com
Diffstat (limited to 'src/backend/access/heap')
-rw-r--r--src/backend/access/heap/README.tuplock42
-rw-r--r--src/backend/access/heap/heapam.c150
2 files changed, 191 insertions, 1 deletions
diff --git a/src/backend/access/heap/README.tuplock b/src/backend/access/heap/README.tuplock
index ddb2defd28b..818cd7f9806 100644
--- a/src/backend/access/heap/README.tuplock
+++ b/src/backend/access/heap/README.tuplock
@@ -154,6 +154,48 @@ The following infomask bits are applicable:
We currently never set the HEAP_XMAX_COMMITTED when the HEAP_XMAX_IS_MULTI bit
is set.
+Locking to write inplace-updated tables
+---------------------------------------
+
+If IsInplaceUpdateRelation() returns true for a table, the table is a system
+catalog that receives systable_inplace_update_begin() calls. Preparing a
+heap_update() of these tables follows additional locking rules, to ensure we
+don't lose the effects of an inplace update. In particular, consider a moment
+when a backend has fetched the old tuple to modify, not yet having called
+heap_update(). Another backend's inplace update starting then can't conclude
+until the heap_update() places its new tuple in a buffer. We enforce that
+using locktags as follows. While DDL code is the main audience, the executor
+follows these rules to make e.g. "MERGE INTO pg_class" safer. Locking rules
+are per-catalog:
+
+ pg_class systable_inplace_update_begin() callers: before the call, acquire a
+ lock on the relation in mode ShareUpdateExclusiveLock or stricter. If the
+ update targets a row of RELKIND_INDEX (but not RELKIND_PARTITIONED_INDEX),
+ that lock must be on the table. Locking the index rel is not necessary.
+ (This allows VACUUM to overwrite per-index pg_class while holding a lock on
+ the table alone.) systable_inplace_update_begin() acquires and releases
+ LOCKTAG_TUPLE in InplaceUpdateTupleLock, an alias for ExclusiveLock, on each
+ tuple it overwrites.
+
+ pg_class heap_update() callers: before copying the tuple to modify, take a
+ lock on the tuple, a ShareUpdateExclusiveLock on the relation, or a
+ ShareRowExclusiveLock or stricter on the relation.
+
+ SearchSysCacheLocked1() is one convenient way to acquire the tuple lock.
+ Most heap_update() callers already hold a suitable lock on the relation for
+ other reasons and can skip the tuple lock. If you do acquire the tuple
+ lock, release it immediately after the update.
+
+
+ pg_database: before copying the tuple to modify, all updaters of pg_database
+ rows acquire LOCKTAG_TUPLE. (Few updaters acquire LOCKTAG_OBJECT on the
+ database OID, so it wasn't worth extending that as a second option.)
+
+Ideally, DDL might want to perform permissions checks before LockTuple(), as
+we do with RangeVarGetRelidExtended() callbacks. We typically don't bother.
+LOCKTAG_TUPLE acquirers release it after each row, so the potential
+inconvenience is lower.
+
Reading inplace-updated columns
-------------------------------
diff --git a/src/backend/access/heap/heapam.c b/src/backend/access/heap/heapam.c
index 4de923a0ad8..846650c0ec0 100644
--- a/src/backend/access/heap/heapam.c
+++ b/src/backend/access/heap/heapam.c
@@ -51,6 +51,8 @@
#include "access/xloginsert.h"
#include "access/xlogutils.h"
#include "catalog/catalog.h"
+#include "catalog/pg_database.h"
+#include "catalog/pg_database_d.h"
#include "miscadmin.h"
#include "pgstat.h"
#include "port/atomics.h"
@@ -76,6 +78,12 @@ static XLogRecPtr log_heap_update(Relation reln, Buffer oldbuf,
Buffer newbuf, HeapTuple oldtup,
HeapTuple newtup, HeapTuple old_key_tuple,
bool all_visible_cleared, bool new_all_visible_cleared);
+#ifdef USE_ASSERT_CHECKING
+static void check_lock_if_inplace_updateable_rel(Relation relation,
+ ItemPointer otid,
+ HeapTuple newtup);
+static void check_inplace_rel_lock(HeapTuple oldtup);
+#endif
static Bitmapset *HeapDetermineColumnsInfo(Relation relation,
Bitmapset *interesting_cols,
Bitmapset *external_cols,
@@ -115,6 +123,8 @@ static HeapTuple ExtractReplicaIdentity(Relation rel, HeapTuple tup, bool key_re
* heavyweight lock mode and MultiXactStatus values to use for any particular
* tuple lock strength.
*
+ * These interact with InplaceUpdateTupleLock, an alias for ExclusiveLock.
+ *
* Don't look at lockstatus/updstatus directly! Use get_mxact_status_for_lock
* instead.
*/
@@ -2975,6 +2985,10 @@ heap_update(Relation relation, ItemPointer otid, HeapTuple newtup,
(errcode(ERRCODE_INVALID_TRANSACTION_STATE),
errmsg("cannot update tuples during a parallel operation")));
+#ifdef USE_ASSERT_CHECKING
+ check_lock_if_inplace_updateable_rel(relation, otid, newtup);
+#endif
+
/*
* Fetch the list of attributes to be checked for various operations.
*
@@ -3821,6 +3835,128 @@ l2:
return TM_Ok;
}
+#ifdef USE_ASSERT_CHECKING
+/*
+ * Confirm adequate lock held during heap_update(), per rules from
+ * README.tuplock section "Locking to write inplace-updated tables".
+ */
+static void
+check_lock_if_inplace_updateable_rel(Relation relation,
+ ItemPointer otid,
+ HeapTuple newtup)
+{
+ /* LOCKTAG_TUPLE acceptable for any catalog */
+ switch (RelationGetRelid(relation))
+ {
+ case RelationRelationId:
+ case DatabaseRelationId:
+ {
+ LOCKTAG tuptag;
+
+ SET_LOCKTAG_TUPLE(tuptag,
+ relation->rd_lockInfo.lockRelId.dbId,
+ relation->rd_lockInfo.lockRelId.relId,
+ ItemPointerGetBlockNumber(otid),
+ ItemPointerGetOffsetNumber(otid));
+ if (LockHeldByMe(&tuptag, InplaceUpdateTupleLock))
+ return;
+ }
+ break;
+ default:
+ Assert(!IsInplaceUpdateRelation(relation));
+ return;
+ }
+
+ switch (RelationGetRelid(relation))
+ {
+ case RelationRelationId:
+ {
+ /* LOCKTAG_TUPLE or LOCKTAG_RELATION ok */
+ Form_pg_class classForm = (Form_pg_class) GETSTRUCT(newtup);
+ Oid relid = classForm->oid;
+ Oid dbid;
+ LOCKTAG tag;
+
+ if (IsSharedRelation(relid))
+ dbid = InvalidOid;
+ else
+ dbid = MyDatabaseId;
+
+ if (classForm->relkind == RELKIND_INDEX)
+ {
+ Relation irel = index_open(relid, AccessShareLock);
+
+ SET_LOCKTAG_RELATION(tag, dbid, irel->rd_index->indrelid);
+ index_close(irel, AccessShareLock);
+ }
+ else
+ SET_LOCKTAG_RELATION(tag, dbid, relid);
+
+ if (!LockHeldByMe(&tag, ShareUpdateExclusiveLock) &&
+ !LockOrStrongerHeldByMe(&tag, ShareRowExclusiveLock))
+ elog(WARNING,
+ "missing lock for relation \"%s\" (OID %u, relkind %c) @ TID (%u,%u)",
+ NameStr(classForm->relname),
+ relid,
+ classForm->relkind,
+ ItemPointerGetBlockNumber(otid),
+ ItemPointerGetOffsetNumber(otid));
+ }
+ break;
+ case DatabaseRelationId:
+ {
+ /* LOCKTAG_TUPLE required */
+ Form_pg_database dbForm = (Form_pg_database) GETSTRUCT(newtup);
+
+ elog(WARNING,
+ "missing lock on database \"%s\" (OID %u) @ TID (%u,%u)",
+ NameStr(dbForm->datname),
+ dbForm->oid,
+ ItemPointerGetBlockNumber(otid),
+ ItemPointerGetOffsetNumber(otid));
+ }
+ break;
+ }
+}
+
+/*
+ * Confirm adequate relation lock held, per rules from README.tuplock section
+ * "Locking to write inplace-updated tables".
+ */
+static void
+check_inplace_rel_lock(HeapTuple oldtup)
+{
+ Form_pg_class classForm = (Form_pg_class) GETSTRUCT(oldtup);
+ Oid relid = classForm->oid;
+ Oid dbid;
+ LOCKTAG tag;
+
+ if (IsSharedRelation(relid))
+ dbid = InvalidOid;
+ else
+ dbid = MyDatabaseId;
+
+ if (classForm->relkind == RELKIND_INDEX)
+ {
+ Relation irel = index_open(relid, AccessShareLock);
+
+ SET_LOCKTAG_RELATION(tag, dbid, irel->rd_index->indrelid);
+ index_close(irel, AccessShareLock);
+ }
+ else
+ SET_LOCKTAG_RELATION(tag, dbid, relid);
+
+ if (!LockOrStrongerHeldByMe(&tag, ShareUpdateExclusiveLock))
+ elog(WARNING,
+ "missing lock for relation \"%s\" (OID %u, relkind %c) @ TID (%u,%u)",
+ NameStr(classForm->relname),
+ relid,
+ classForm->relkind,
+ ItemPointerGetBlockNumber(&oldtup->t_self),
+ ItemPointerGetOffsetNumber(&oldtup->t_self));
+}
+#endif
+
/*
* Check if the specified attribute's values are the same. Subroutine for
* HeapDetermineColumnsInfo.
@@ -5848,15 +5984,21 @@ heap_inplace_lock(Relation relation,
TM_Result result;
bool ret;
+#ifdef USE_ASSERT_CHECKING
+ if (RelationGetRelid(relation) == RelationRelationId)
+ check_inplace_rel_lock(oldtup_ptr);
+#endif
+
Assert(BufferIsValid(buffer));
+ LockTuple(relation, &oldtup.t_self, InplaceUpdateTupleLock);
LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE);
/*----------
* Interpret HeapTupleSatisfiesUpdate() like heap_update() does, except:
*
* - wait unconditionally
- * - no tuple locks
+ * - already locked tuple above, since inplace needs that unconditionally
* - don't recheck header after wait: simpler to defer to next iteration
* - don't try to continue even if the updater aborts: likewise
* - no crosscheck
@@ -5940,7 +6082,10 @@ heap_inplace_lock(Relation relation,
* don't bother optimizing that.
*/
if (!ret)
+ {
+ UnlockTuple(relation, &oldtup.t_self, InplaceUpdateTupleLock);
InvalidateCatalogSnapshot();
+ }
return ret;
}
@@ -5949,6 +6094,8 @@ heap_inplace_lock(Relation relation,
*
* The tuple cannot change size, and therefore its header fields and null
* bitmap (if any) don't change either.
+ *
+ * Since we hold LOCKTAG_TUPLE, no updater has a local copy of this tuple.
*/
void
heap_inplace_update_and_unlock(Relation relation,
@@ -6032,6 +6179,7 @@ heap_inplace_unlock(Relation relation,
HeapTuple oldtup, Buffer buffer)
{
LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
+ UnlockTuple(relation, &oldtup->t_self, InplaceUpdateTupleLock);
}
/*