diff options
author | Robert Haas <rhaas@postgresql.org> | 2012-04-26 20:00:21 -0400 |
---|---|---|
committer | Robert Haas <rhaas@postgresql.org> | 2012-04-26 20:00:21 -0400 |
commit | 3424bff90f40532527b9cf4f2ad9eaff750682f7 (patch) | |
tree | 028c10eea2a93f672d9462ebb4dcef7097d316ca /src/backend/access/heap/heapam.c | |
parent | 92df2203437603d40417fe711c3cb7066ac4fdf5 (diff) |
Prevent index-only scans from returning wrong answers under Hot Standby.
The alternative of disallowing index-only scans in HS operation was
discussed, but the consensus was that it was better to treat marking
a page all-visible as a recovery conflict for snapshots that could still
fail to see XIDs on that page. We may in the future try to soften this,
so that we simply force index scans to do heap fetches in cases where
this may be an issue, rather than throwing a hard conflict.
Diffstat (limited to 'src/backend/access/heap/heapam.c')
-rw-r--r-- | src/backend/access/heap/heapam.c | 18 |
1 files changed, 16 insertions, 2 deletions
diff --git a/src/backend/access/heap/heapam.c b/src/backend/access/heap/heapam.c index 98d1e559d32..3259354d5e0 100644 --- a/src/backend/access/heap/heapam.c +++ b/src/backend/access/heap/heapam.c @@ -4368,7 +4368,8 @@ log_heap_freeze(Relation reln, Buffer buffer, * and dirtied. */ XLogRecPtr -log_heap_visible(RelFileNode rnode, BlockNumber block, Buffer vm_buffer) +log_heap_visible(RelFileNode rnode, BlockNumber block, Buffer vm_buffer, + TransactionId cutoff_xid) { xl_heap_visible xlrec; XLogRecPtr recptr; @@ -4376,6 +4377,7 @@ log_heap_visible(RelFileNode rnode, BlockNumber block, Buffer vm_buffer) xlrec.node = rnode; xlrec.block = block; + xlrec.cutoff_xid = cutoff_xid; rdata[0].data = (char *) &xlrec; rdata[0].len = SizeOfHeapVisible; @@ -4708,6 +4710,17 @@ heap_xlog_visible(XLogRecPtr lsn, XLogRecord *record) return; page = (Page) BufferGetPage(buffer); + /* + * If there are any Hot Standby transactions running that have an xmin + * horizon old enough that this page isn't all-visible for them, they + * might incorrectly decide that an index-only scan can skip a heap fetch. + * + * NB: It might be better to throw some kind of "soft" conflict here that + * forces any index-only scan that is in flight to perform heap fetches, + * rather than killing the transaction outright. + */ + ResolveRecoveryConflictWithSnapshot(xlrec->cutoff_xid, xlrec->node); + LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE); /* @@ -4760,7 +4773,8 @@ heap_xlog_visible(XLogRecPtr lsn, XLogRecord *record) * harm is done; and the next VACUUM will fix it. */ if (!XLByteLE(lsn, PageGetLSN(BufferGetPage(vmbuffer)))) - visibilitymap_set(reln, xlrec->block, lsn, vmbuffer); + visibilitymap_set(reln, xlrec->block, lsn, vmbuffer, + xlrec->cutoff_xid); ReleaseBuffer(vmbuffer); FreeFakeRelcacheEntry(reln); |