summaryrefslogtreecommitdiff
path: root/src/backend/access/heap/heapam.c
diff options
context:
space:
mode:
Diffstat (limited to 'src/backend/access/heap/heapam.c')
-rw-r--r--src/backend/access/heap/heapam.c60
1 files changed, 52 insertions, 8 deletions
diff --git a/src/backend/access/heap/heapam.c b/src/backend/access/heap/heapam.c
index 82a0492aac5..3a13671a1ef 100644
--- a/src/backend/access/heap/heapam.c
+++ b/src/backend/access/heap/heapam.c
@@ -6326,6 +6326,9 @@ heap_inplace_update_and_unlock(Relation relation,
HeapTupleHeader htup = oldtup->t_data;
uint32 oldlen;
uint32 newlen;
+ int nmsgs = 0;
+ SharedInvalidationMessage *invalMessages = NULL;
+ bool RelcacheInitFileInval = false;
Assert(ItemPointerEquals(&oldtup->t_self, &tuple->t_self));
oldlen = oldtup->t_len - htup->t_hoff;
@@ -6333,6 +6336,29 @@ heap_inplace_update_and_unlock(Relation relation,
if (oldlen != newlen || htup->t_hoff != tuple->t_data->t_hoff)
elog(ERROR, "wrong tuple length");
+ /*
+ * Construct shared cache inval if necessary. Note that because we only
+ * pass the new version of the tuple, this mustn't be used for any
+ * operations that could change catcache lookup keys. But we aren't
+ * bothering with index updates either, so that's true a fortiori.
+ */
+ CacheInvalidateHeapTupleInplace(relation, tuple, NULL);
+
+ /* Like RecordTransactionCommit(), log only if needed */
+ if (XLogStandbyInfoActive())
+ nmsgs = inplaceGetInvalidationMessages(&invalMessages,
+ &RelcacheInitFileInval);
+
+ /*
+ * Unlink relcache init files as needed. If unlinking, acquire
+ * RelCacheInitLock until after associated invalidations. By doing this
+ * in advance, if we checkpoint and then crash between inplace
+ * XLogInsert() and inval, we don't rely on StartupXLOG() ->
+ * RelationCacheInitFileRemove(). That uses elevel==LOG, so replay would
+ * neglect to PANIC on EIO.
+ */
+ PreInplace_Inval();
+
/* NO EREPORT(ERROR) from here till changes are logged */
START_CRIT_SECTION();
@@ -6362,9 +6388,16 @@ heap_inplace_update_and_unlock(Relation relation,
XLogRecPtr recptr;
xlrec.offnum = ItemPointerGetOffsetNumber(&tuple->t_self);
+ xlrec.dbId = MyDatabaseId;
+ xlrec.tsId = MyDatabaseTableSpace;
+ xlrec.relcacheInitFileInval = RelcacheInitFileInval;
+ xlrec.nmsgs = nmsgs;
XLogBeginInsert();
- XLogRegisterData((char *) &xlrec, SizeOfHeapInplace);
+ XLogRegisterData((char *) &xlrec, MinSizeOfHeapInplace);
+ if (nmsgs != 0)
+ XLogRegisterData((char *) invalMessages,
+ nmsgs * sizeof(SharedInvalidationMessage));
XLogRegisterBuffer(0, buffer, REGBUF_STANDARD);
XLogRegisterBufData(0, (char *) htup + htup->t_hoff, newlen);
@@ -6376,17 +6409,28 @@ heap_inplace_update_and_unlock(Relation relation,
PageSetLSN(BufferGetPage(buffer), recptr);
}
+ LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
+
+ /*
+ * Send invalidations to shared queue. SearchSysCacheLocked1() assumes we
+ * do this before UnlockTuple().
+ *
+ * If we're mutating a tuple visible only to this transaction, there's an
+ * equivalent transactional inval from the action that created the tuple,
+ * and this inval is superfluous.
+ */
+ AtInplace_Inval();
+
END_CRIT_SECTION();
+ UnlockTuple(relation, &tuple->t_self, InplaceUpdateTupleLock);
- heap_inplace_unlock(relation, oldtup, buffer);
+ AcceptInvalidationMessages(); /* local processing of just-sent inval */
/*
- * Send out shared cache inval if necessary. Note that because we only
- * pass the new version of the tuple, this mustn't be used for any
- * operations that could change catcache lookup keys. But we aren't
- * bothering with index updates either, so that's true a fortiori.
- *
- * XXX ROLLBACK discards the invalidation. See test inplace-inval.spec.
+ * Queue a transactional inval. The immediate invalidation we just sent
+ * is the only one known to be necessary. To reduce risk from the
+ * transition to immediate invalidation, continue sending a transactional
+ * invalidation like we've long done. Third-party code might rely on it.
*/
if (!IsBootstrapProcessingMode())
CacheInvalidateHeapTuple(relation, tuple, NULL);