summaryrefslogtreecommitdiff
path: root/src/backend/access/heap/heapam_xlog.c
diff options
context:
space:
mode:
authorMelanie Plageman <melanieplageman@gmail.com>2025-10-09 16:25:50 -0400
committerMelanie Plageman <melanieplageman@gmail.com>2025-10-09 16:29:01 -0400
commitd96f87332b3786abd23cba47459546799c562b8c (patch)
tree4f4fdd37f5047ec9b09dee98679983c6545999df /src/backend/access/heap/heapam_xlog.c
parent1b073cba4993b31fbf820504f297efce5d951c00 (diff)
Eliminate COPY FREEZE use of XLOG_HEAP2_VISIBLE
Instead of emitting a separate WAL XLOG_HEAP2_VISIBLE record for setting bits in the VM, specify the VM block changes in the XLOG_HEAP2_MULTI_INSERT record. This halves the number of WAL records emitted by COPY FREEZE. Author: Melanie Plageman <melanieplageman@gmail.com> Reviewed-by: Andres Freund <andres@anarazel.de> Reviewed-by: Robert Haas <robertmhaas@gmail.com> Reviewed-by: Kirill Reshke <reshkekirill@gmail.com> Discussion: https://postgr.es/m/flat/CAAKRu_ZMw6Npd_qm2KM%2BFwQ3cMOMx1Dh3VMhp8-V7SOLxdK9-g%40mail.gmail.com
Diffstat (limited to 'src/backend/access/heap/heapam_xlog.c')
-rw-r--r--src/backend/access/heap/heapam_xlog.c49
1 files changed, 48 insertions, 1 deletions
diff --git a/src/backend/access/heap/heapam_xlog.c b/src/backend/access/heap/heapam_xlog.c
index cf843277938..30e339d8fe2 100644
--- a/src/backend/access/heap/heapam_xlog.c
+++ b/src/backend/access/heap/heapam_xlog.c
@@ -551,6 +551,7 @@ heap_xlog_multi_insert(XLogReaderState *record)
int i;
bool isinit = (XLogRecGetInfo(record) & XLOG_HEAP_INIT_PAGE) != 0;
XLogRedoAction action;
+ Buffer vmbuffer = InvalidBuffer;
/*
* Insertion doesn't overwrite MVCC data, so no conflict processing is
@@ -571,11 +572,11 @@ heap_xlog_multi_insert(XLogReaderState *record)
if (xlrec->flags & XLH_INSERT_ALL_VISIBLE_CLEARED)
{
Relation reln = CreateFakeRelcacheEntry(rlocator);
- Buffer vmbuffer = InvalidBuffer;
visibilitymap_pin(reln, blkno, &vmbuffer);
visibilitymap_clear(reln, blkno, vmbuffer, VISIBILITYMAP_VALID_BITS);
ReleaseBuffer(vmbuffer);
+ vmbuffer = InvalidBuffer;
FreeFakeRelcacheEntry(reln);
}
@@ -662,6 +663,52 @@ heap_xlog_multi_insert(XLogReaderState *record)
if (BufferIsValid(buffer))
UnlockReleaseBuffer(buffer);
+ buffer = InvalidBuffer;
+
+ /*
+ * Read and update the visibility map (VM) block.
+ *
+ * We must always redo VM changes, even if the corresponding heap page
+ * update was skipped due to the LSN interlock. Each VM block covers
+ * multiple heap pages, so later WAL records may update other bits in the
+ * same block. If this record includes an FPI (full-page image),
+ * subsequent WAL records may depend on it to guard against torn pages.
+ *
+ * Heap page changes are replayed first to preserve the invariant:
+ * PD_ALL_VISIBLE must be set on the heap page if the VM bit is set.
+ *
+ * Note that we released the heap page lock above. During normal
+ * operation, this would be unsafe — a concurrent modification could
+ * clear PD_ALL_VISIBLE while the VM bit remained set, violating the
+ * invariant.
+ *
+ * During recovery, however, no concurrent writers exist. Therefore,
+ * updating the VM without holding the heap page lock is safe enough. This
+ * same approach is taken when replaying xl_heap_visible records (see
+ * heap_xlog_visible()).
+ */
+ if ((xlrec->flags & XLH_INSERT_ALL_FROZEN_SET) &&
+ XLogReadBufferForRedoExtended(record, 1, RBM_ZERO_ON_ERROR, false,
+ &vmbuffer) == BLK_NEEDS_REDO)
+ {
+ Page vmpage = BufferGetPage(vmbuffer);
+
+ /* initialize the page if it was read as zeros */
+ if (PageIsNew(vmpage))
+ PageInit(vmpage, BLCKSZ, 0);
+
+ visibilitymap_set_vmbits(blkno,
+ vmbuffer,
+ VISIBILITYMAP_ALL_VISIBLE |
+ VISIBILITYMAP_ALL_FROZEN,
+ rlocator);
+
+ PageSetLSN(vmpage, lsn);
+ }
+
+ if (BufferIsValid(vmbuffer))
+ UnlockReleaseBuffer(vmbuffer);
+
/*
* If the page is running low on free space, update the FSM as well.
* Arbitrarily, our definition of "low" is less than 20%. We can't do much