summaryrefslogtreecommitdiff
path: root/src/backend/access/heap/vacuumlazy.c
diff options
context:
space:
mode:
Diffstat (limited to 'src/backend/access/heap/vacuumlazy.c')
-rw-r--r--src/backend/access/heap/vacuumlazy.c166
1 files changed, 124 insertions, 42 deletions
diff --git a/src/backend/access/heap/vacuumlazy.c b/src/backend/access/heap/vacuumlazy.c
index ab6938d1da1..6b9645e3681 100644
--- a/src/backend/access/heap/vacuumlazy.c
+++ b/src/backend/access/heap/vacuumlazy.c
@@ -463,8 +463,15 @@ static void dead_items_add(LVRelState *vacrel, BlockNumber blkno, OffsetNumber *
int num_offsets);
static void dead_items_reset(LVRelState *vacrel);
static void dead_items_cleanup(LVRelState *vacrel);
+#ifdef USE_ASSERT_CHECKING
static bool heap_page_is_all_visible(LVRelState *vacrel, Buffer buf,
TransactionId *visibility_cutoff_xid, bool *all_frozen);
+#endif
+static bool heap_page_would_be_all_visible(LVRelState *vacrel, Buffer buf,
+ OffsetNumber *deadoffsets,
+ int ndeadoffsets,
+ bool *all_frozen,
+ TransactionId *visibility_cutoff_xid);
static void update_relstats_all_indexes(LVRelState *vacrel);
static void vacuum_error_callback(void *arg);
static void update_vacuum_error_info(LVRelState *vacrel,
@@ -2848,8 +2855,10 @@ lazy_vacuum_heap_page(LVRelState *vacrel, BlockNumber blkno, Buffer buffer,
OffsetNumber unused[MaxHeapTuplesPerPage];
int nunused = 0;
TransactionId visibility_cutoff_xid;
+ TransactionId conflict_xid = InvalidTransactionId;
bool all_frozen;
LVSavedErrInfo saved_err_info;
+ uint8 vmflags = 0;
Assert(vacrel->do_index_vacuuming);
@@ -2860,6 +2869,33 @@ lazy_vacuum_heap_page(LVRelState *vacrel, BlockNumber blkno, Buffer buffer,
VACUUM_ERRCB_PHASE_VACUUM_HEAP, blkno,
InvalidOffsetNumber);
+ /*
+ * Before marking dead items unused, check whether the page will become
+ * all-visible once that change is applied. This lets us reap the tuples
+ * and mark the page all-visible within the same critical section,
+ * enabling both changes to be emitted in a single WAL record. Since the
+ * visibility checks may perform I/O and allocate memory, they must be
+ * done outside the critical section.
+ */
+ if (heap_page_would_be_all_visible(vacrel, buffer,
+ deadoffsets, num_offsets,
+ &all_frozen, &visibility_cutoff_xid))
+ {
+ vmflags |= VISIBILITYMAP_ALL_VISIBLE;
+ if (all_frozen)
+ {
+ vmflags |= VISIBILITYMAP_ALL_FROZEN;
+ Assert(!TransactionIdIsValid(visibility_cutoff_xid));
+ }
+
+ /*
+ * Take the lock on the vmbuffer before entering a critical section.
+ * The heap page lock must also be held while updating the VM to
+ * ensure consistency.
+ */
+ LockBuffer(vmbuffer, BUFFER_LOCK_EXCLUSIVE);
+ }
+
START_CRIT_SECTION();
for (int i = 0; i < num_offsets; i++)
@@ -2879,6 +2915,19 @@ lazy_vacuum_heap_page(LVRelState *vacrel, BlockNumber blkno, Buffer buffer,
/* Attempt to truncate line pointer array now */
PageTruncateLinePointerArray(page);
+ if ((vmflags & VISIBILITYMAP_VALID_BITS) != 0)
+ {
+ /*
+ * The page is guaranteed to have had dead line pointers, so we always
+ * set PD_ALL_VISIBLE.
+ */
+ PageSetAllVisible(page);
+ visibilitymap_set_vmbits(blkno,
+ vmbuffer, vmflags,
+ vacrel->rel->rd_locator);
+ conflict_xid = visibility_cutoff_xid;
+ }
+
/*
* Mark buffer dirty before we write WAL.
*/
@@ -2888,7 +2937,9 @@ lazy_vacuum_heap_page(LVRelState *vacrel, BlockNumber blkno, Buffer buffer,
if (RelationNeedsWAL(vacrel->rel))
{
log_heap_prune_and_freeze(vacrel->rel, buffer,
- InvalidTransactionId,
+ vmflags != 0 ? vmbuffer : InvalidBuffer,
+ vmflags,
+ conflict_xid,
false, /* no cleanup lock required */
PRUNE_VACUUM_CLEANUP,
NULL, 0, /* frozen */
@@ -2897,39 +2948,12 @@ lazy_vacuum_heap_page(LVRelState *vacrel, BlockNumber blkno, Buffer buffer,
unused, nunused);
}
- /*
- * End critical section, so we safely can do visibility tests (which
- * possibly need to perform IO and allocate memory!). If we crash now the
- * page (including the corresponding vm bit) might not be marked all
- * visible, but that's fine. A later vacuum will fix that.
- */
END_CRIT_SECTION();
- /*
- * Now that we have removed the LP_DEAD items from the page, once again
- * check if the page has become all-visible. The page is already marked
- * dirty, exclusively locked, and, if needed, a full page image has been
- * emitted.
- */
- Assert(!PageIsAllVisible(page));
- if (heap_page_is_all_visible(vacrel, buffer, &visibility_cutoff_xid,
- &all_frozen))
+ if ((vmflags & VISIBILITYMAP_ALL_VISIBLE) != 0)
{
- uint8 flags = VISIBILITYMAP_ALL_VISIBLE;
-
- if (all_frozen)
- {
- Assert(!TransactionIdIsValid(visibility_cutoff_xid));
- flags |= VISIBILITYMAP_ALL_FROZEN;
- }
-
- PageSetAllVisible(page);
- visibilitymap_set(vacrel->rel, blkno, buffer,
- InvalidXLogRecPtr,
- vmbuffer, visibility_cutoff_xid,
- flags);
-
/* Count the newly set VM page for logging */
+ LockBuffer(vmbuffer, BUFFER_LOCK_UNLOCK);
vacrel->vm_new_visible_pages++;
if (all_frozen)
vacrel->vm_new_visible_frozen_pages++;
@@ -3597,31 +3621,80 @@ dead_items_cleanup(LVRelState *vacrel)
vacrel->pvs = NULL;
}
+#ifdef USE_ASSERT_CHECKING
+
/*
- * Check if every tuple in the given page is visible to all current and future
- * transactions. Also return the visibility_cutoff_xid which is the highest
- * xmin amongst the visible tuples. Set *all_frozen to true if every tuple
- * on this page is frozen.
- *
- * This is a stripped down version of lazy_scan_prune(). If you change
- * anything here, make sure that everything stays in sync. Note that an
- * assertion calls us to verify that everybody still agrees. Be sure to avoid
- * introducing new side-effects here.
+ * Wrapper for heap_page_would_be_all_visible() which can be used for callers
+ * that expect no LP_DEAD on the page. Currently assert-only, but there is no
+ * reason not to use it outside of asserts.
*/
static bool
heap_page_is_all_visible(LVRelState *vacrel, Buffer buf,
TransactionId *visibility_cutoff_xid,
bool *all_frozen)
{
+
+ return heap_page_would_be_all_visible(vacrel, buf,
+ NULL, 0,
+ all_frozen,
+ visibility_cutoff_xid);
+}
+#endif
+
+/*
+ * Check whether the heap page in buf is all-visible except for the dead
+ * tuples referenced in the deadoffsets array.
+ *
+ * Vacuum uses this to check if a page would become all-visible after reaping
+ * known dead tuples. This function does not remove the dead items.
+ *
+ * This cannot be called in a critical section, as the visibility checks may
+ * perform IO and allocate memory.
+ *
+ * Returns true if the page is all-visible other than the provided
+ * deadoffsets and false otherwise.
+ *
+ * Output parameters:
+ *
+ * - *all_frozen: true if every tuple on the page is frozen
+ * - *visibility_cutoff_xid: newest xmin; valid only if page is all-visible
+ *
+ * Callers looking to verify that the page is already all-visible can call
+ * heap_page_is_all_visible().
+ *
+ * This logic is closely related to heap_prune_record_unchanged_lp_normal().
+ * If you modify this function, ensure consistency with that code. An
+ * assertion cross-checks that both remain in agreement. Do not introduce new
+ * side-effects.
+ */
+static bool
+heap_page_would_be_all_visible(LVRelState *vacrel, Buffer buf,
+ OffsetNumber *deadoffsets,
+ int ndeadoffsets,
+ bool *all_frozen,
+ TransactionId *visibility_cutoff_xid)
+{
Page page = BufferGetPage(buf);
BlockNumber blockno = BufferGetBlockNumber(buf);
OffsetNumber offnum,
maxoff;
bool all_visible = true;
+ int matched_dead_count = 0;
*visibility_cutoff_xid = InvalidTransactionId;
*all_frozen = true;
+ Assert(ndeadoffsets == 0 || deadoffsets);
+
+#ifdef USE_ASSERT_CHECKING
+ /* Confirm input deadoffsets[] is strictly sorted */
+ if (ndeadoffsets > 1)
+ {
+ for (int i = 1; i < ndeadoffsets; i++)
+ Assert(deadoffsets[i - 1] < deadoffsets[i]);
+ }
+#endif
+
maxoff = PageGetMaxOffsetNumber(page);
for (offnum = FirstOffsetNumber;
offnum <= maxoff && all_visible;
@@ -3649,9 +3722,15 @@ heap_page_is_all_visible(LVRelState *vacrel, Buffer buf,
*/
if (ItemIdIsDead(itemid))
{
- all_visible = false;
- *all_frozen = false;
- break;
+ if (!deadoffsets ||
+ matched_dead_count >= ndeadoffsets ||
+ deadoffsets[matched_dead_count] != offnum)
+ {
+ *all_frozen = all_visible = false;
+ break;
+ }
+ matched_dead_count++;
+ continue;
}
Assert(ItemIdIsNormal(itemid));
@@ -3660,6 +3739,9 @@ heap_page_is_all_visible(LVRelState *vacrel, Buffer buf,
tuple.t_len = ItemIdGetLength(itemid);
tuple.t_tableOid = RelationGetRelid(vacrel->rel);
+ /* Visibility checks may do IO or allocate memory */
+ Assert(CritSectionCount == 0);
+
switch (HeapTupleSatisfiesVacuum(&tuple, vacrel->cutoffs.OldestXmin,
buf))
{