summaryrefslogtreecommitdiff
path: root/src/backend/executor/nodeModifyTable.c
diff options
context:
space:
mode:
Diffstat (limited to 'src/backend/executor/nodeModifyTable.c')
-rw-r--r--src/backend/executor/nodeModifyTable.c26
1 files changed, 26 insertions, 0 deletions
diff --git a/src/backend/executor/nodeModifyTable.c b/src/backend/executor/nodeModifyTable.c
index 8c0f3126058..bfdf015f913 100644
--- a/src/backend/executor/nodeModifyTable.c
+++ b/src/backend/executor/nodeModifyTable.c
@@ -1199,6 +1199,7 @@ ExecUpdate(ModifyTableState *mtstate,
}
else
{
+ ItemPointerData lockedtid PG_USED_FOR_ASSERTS_ONLY;
LockTupleMode lockmode;
bool partition_constraint_failed;
bool update_indexes;
@@ -1390,6 +1391,26 @@ lreplace:
ExecConstraints(resultRelInfo, slot, estate);
/*
+ * We lack the infrastructure to follow rules in README.tuplock
+ * section "Locking to write inplace-updated tables". Specifically,
+ * we lack infrastructure to lock tupleid before this file's
+ * ExecProcNode() call fetches the tuple's old columns. Just take a
+ * lock that silences check_lock_if_inplace_updateable_rel(). This
+ * doesn't actually protect inplace updates like those rules intend,
+ * so we may lose an inplace update that overlaps a superuser running
+ * "UPDATE pg_class" or "UPDATE pg_database".
+ */
+#ifdef USE_ASSERT_CHECKING
+ if (IsInplaceUpdateRelation(resultRelationDesc))
+ {
+ lockedtid = *tupleid;
+ LockTuple(resultRelationDesc, &lockedtid, InplaceUpdateTupleLock);
+ }
+ else
+ ItemPointerSetInvalid(&lockedtid);
+#endif
+
+ /*
* replace the heap tuple
*
* Note: if es_crosscheck_snapshot isn't InvalidSnapshot, we check
@@ -1405,6 +1426,11 @@ lreplace:
true /* wait for commit */ ,
&tmfd, &lockmode, &update_indexes);
+#ifdef USE_ASSERT_CHECKING
+ if (ItemPointerIsValid(&lockedtid))
+ UnlockTuple(resultRelationDesc, &lockedtid, InplaceUpdateTupleLock);
+#endif
+
switch (result)
{
case TM_SelfModified: