diff options
Diffstat (limited to 'src/backend')
-rw-r--r-- | src/backend/access/common/heaptuple.c | 14 | ||||
-rw-r--r-- | src/backend/access/heap/heapam.c | 2 | ||||
-rw-r--r-- | src/backend/access/heap/heapam_visibility.c | 16 | ||||
-rw-r--r-- | src/backend/access/heap/visibilitymap.c | 56 | ||||
-rw-r--r-- | src/backend/access/table/tableam.c | 11 | ||||
-rw-r--r-- | src/backend/access/transam/multixact.c | 6 |
6 files changed, 52 insertions, 53 deletions
diff --git a/src/backend/access/common/heaptuple.c b/src/backend/access/common/heaptuple.c index 503cda46eff..7e355585a01 100644 --- a/src/backend/access/common/heaptuple.c +++ b/src/backend/access/common/heaptuple.c @@ -420,13 +420,13 @@ heap_attisnull(HeapTuple tup, int attnum, TupleDesc tupleDesc) * ---------------- */ Datum -nocachegetattr(HeapTuple tuple, +nocachegetattr(HeapTuple tup, int attnum, TupleDesc tupleDesc) { - HeapTupleHeader tup = tuple->t_data; + HeapTupleHeader td = tup->t_data; char *tp; /* ptr to data part of tuple */ - bits8 *bp = tup->t_bits; /* ptr to null bitmap in tuple */ + bits8 *bp = td->t_bits; /* ptr to null bitmap in tuple */ bool slow = false; /* do we have to walk attrs? */ int off; /* current offset within data */ @@ -441,7 +441,7 @@ nocachegetattr(HeapTuple tuple, attnum--; - if (!HeapTupleNoNulls(tuple)) + if (!HeapTupleNoNulls(tup)) { /* * there's a null somewhere in the tuple @@ -470,7 +470,7 @@ nocachegetattr(HeapTuple tuple, } } - tp = (char *) tup + tup->t_hoff; + tp = (char *) td + td->t_hoff; if (!slow) { @@ -489,7 +489,7 @@ nocachegetattr(HeapTuple tuple, * target. If there aren't any, it's safe to cheaply initialize the * cached offsets for these attrs. */ - if (HeapTupleHasVarWidth(tuple)) + if (HeapTupleHasVarWidth(tup)) { int j; @@ -565,7 +565,7 @@ nocachegetattr(HeapTuple tuple, { Form_pg_attribute att = TupleDescAttr(tupleDesc, i); - if (HeapTupleHasNulls(tuple) && att_isnull(i, bp)) + if (HeapTupleHasNulls(tup) && att_isnull(i, bp)) { usecache = false; continue; /* this cannot be the target att */ diff --git a/src/backend/access/heap/heapam.c b/src/backend/access/heap/heapam.c index 5887166061a..eb811d751e5 100644 --- a/src/backend/access/heap/heapam.c +++ b/src/backend/access/heap/heapam.c @@ -108,7 +108,7 @@ static bool ConditionalMultiXactIdWait(MultiXactId multi, MultiXactStatus status static void index_delete_sort(TM_IndexDeleteOp *delstate); static int bottomup_sort_and_shrink(TM_IndexDeleteOp *delstate); static XLogRecPtr log_heap_new_cid(Relation relation, HeapTuple tup); -static HeapTuple ExtractReplicaIdentity(Relation rel, HeapTuple tup, bool key_required, +static HeapTuple ExtractReplicaIdentity(Relation relation, HeapTuple tp, bool key_required, bool *copy); diff --git a/src/backend/access/heap/heapam_visibility.c b/src/backend/access/heap/heapam_visibility.c index ff0b8a688de..6e33d1c8812 100644 --- a/src/backend/access/heap/heapam_visibility.c +++ b/src/backend/access/heap/heapam_visibility.c @@ -1763,30 +1763,30 @@ HeapTupleSatisfiesHistoricMVCC(HeapTuple htup, Snapshot snapshot, * if so, the indicated buffer is marked dirty. */ bool -HeapTupleSatisfiesVisibility(HeapTuple tup, Snapshot snapshot, Buffer buffer) +HeapTupleSatisfiesVisibility(HeapTuple htup, Snapshot snapshot, Buffer buffer) { switch (snapshot->snapshot_type) { case SNAPSHOT_MVCC: - return HeapTupleSatisfiesMVCC(tup, snapshot, buffer); + return HeapTupleSatisfiesMVCC(htup, snapshot, buffer); break; case SNAPSHOT_SELF: - return HeapTupleSatisfiesSelf(tup, snapshot, buffer); + return HeapTupleSatisfiesSelf(htup, snapshot, buffer); break; case SNAPSHOT_ANY: - return HeapTupleSatisfiesAny(tup, snapshot, buffer); + return HeapTupleSatisfiesAny(htup, snapshot, buffer); break; case SNAPSHOT_TOAST: - return HeapTupleSatisfiesToast(tup, snapshot, buffer); + return HeapTupleSatisfiesToast(htup, snapshot, buffer); break; case SNAPSHOT_DIRTY: - return HeapTupleSatisfiesDirty(tup, snapshot, buffer); + return HeapTupleSatisfiesDirty(htup, snapshot, buffer); break; case SNAPSHOT_HISTORIC_MVCC: - return HeapTupleSatisfiesHistoricMVCC(tup, snapshot, buffer); + return HeapTupleSatisfiesHistoricMVCC(htup, snapshot, buffer); break; case SNAPSHOT_NON_VACUUMABLE: - return HeapTupleSatisfiesNonVacuumable(tup, snapshot, buffer); + return HeapTupleSatisfiesNonVacuumable(htup, snapshot, buffer); break; } diff --git a/src/backend/access/heap/visibilitymap.c b/src/backend/access/heap/visibilitymap.c index ed72eb7b631..d62761728b0 100644 --- a/src/backend/access/heap/visibilitymap.c +++ b/src/backend/access/heap/visibilitymap.c @@ -137,7 +137,7 @@ static void vm_extend(Relation rel, BlockNumber vm_nblocks); * any I/O. Returns true if any bits have been cleared and false otherwise. */ bool -visibilitymap_clear(Relation rel, BlockNumber heapBlk, Buffer buf, uint8 flags) +visibilitymap_clear(Relation rel, BlockNumber heapBlk, Buffer vmbuf, uint8 flags) { BlockNumber mapBlock = HEAPBLK_TO_MAPBLOCK(heapBlk); int mapByte = HEAPBLK_TO_MAPBYTE(heapBlk); @@ -152,21 +152,21 @@ visibilitymap_clear(Relation rel, BlockNumber heapBlk, Buffer buf, uint8 flags) elog(DEBUG1, "vm_clear %s %d", RelationGetRelationName(rel), heapBlk); #endif - if (!BufferIsValid(buf) || BufferGetBlockNumber(buf) != mapBlock) + if (!BufferIsValid(vmbuf) || BufferGetBlockNumber(vmbuf) != mapBlock) elog(ERROR, "wrong buffer passed to visibilitymap_clear"); - LockBuffer(buf, BUFFER_LOCK_EXCLUSIVE); - map = PageGetContents(BufferGetPage(buf)); + LockBuffer(vmbuf, BUFFER_LOCK_EXCLUSIVE); + map = PageGetContents(BufferGetPage(vmbuf)); if (map[mapByte] & mask) { map[mapByte] &= ~mask; - MarkBufferDirty(buf); + MarkBufferDirty(vmbuf); cleared = true; } - LockBuffer(buf, BUFFER_LOCK_UNLOCK); + LockBuffer(vmbuf, BUFFER_LOCK_UNLOCK); return cleared; } @@ -180,43 +180,43 @@ visibilitymap_clear(Relation rel, BlockNumber heapBlk, Buffer buf, uint8 flags) * shouldn't hold a lock on the heap page while doing that. Then, call * visibilitymap_set to actually set the bit. * - * On entry, *buf should be InvalidBuffer or a valid buffer returned by + * On entry, *vmbuf should be InvalidBuffer or a valid buffer returned by * an earlier call to visibilitymap_pin or visibilitymap_get_status on the same - * relation. On return, *buf is a valid buffer with the map page containing + * relation. On return, *vmbuf is a valid buffer with the map page containing * the bit for heapBlk. * * If the page doesn't exist in the map file yet, it is extended. */ void -visibilitymap_pin(Relation rel, BlockNumber heapBlk, Buffer *buf) +visibilitymap_pin(Relation rel, BlockNumber heapBlk, Buffer *vmbuf) { BlockNumber mapBlock = HEAPBLK_TO_MAPBLOCK(heapBlk); /* Reuse the old pinned buffer if possible */ - if (BufferIsValid(*buf)) + if (BufferIsValid(*vmbuf)) { - if (BufferGetBlockNumber(*buf) == mapBlock) + if (BufferGetBlockNumber(*vmbuf) == mapBlock) return; - ReleaseBuffer(*buf); + ReleaseBuffer(*vmbuf); } - *buf = vm_readbuf(rel, mapBlock, true); + *vmbuf = vm_readbuf(rel, mapBlock, true); } /* * visibilitymap_pin_ok - do we already have the correct page pinned? * - * On entry, buf should be InvalidBuffer or a valid buffer returned by + * On entry, vmbuf should be InvalidBuffer or a valid buffer returned by * an earlier call to visibilitymap_pin or visibilitymap_get_status on the same * relation. The return value indicates whether the buffer covers the * given heapBlk. */ bool -visibilitymap_pin_ok(BlockNumber heapBlk, Buffer buf) +visibilitymap_pin_ok(BlockNumber heapBlk, Buffer vmbuf) { BlockNumber mapBlock = HEAPBLK_TO_MAPBLOCK(heapBlk); - return BufferIsValid(buf) && BufferGetBlockNumber(buf) == mapBlock; + return BufferIsValid(vmbuf) && BufferGetBlockNumber(vmbuf) == mapBlock; } /* @@ -314,11 +314,11 @@ visibilitymap_set(Relation rel, BlockNumber heapBlk, Buffer heapBuf, * Are all tuples on heapBlk visible to all or are marked frozen, according * to the visibility map? * - * On entry, *buf should be InvalidBuffer or a valid buffer returned by an + * On entry, *vmbuf should be InvalidBuffer or a valid buffer returned by an * earlier call to visibilitymap_pin or visibilitymap_get_status on the same - * relation. On return, *buf is a valid buffer with the map page containing + * relation. On return, *vmbuf is a valid buffer with the map page containing * the bit for heapBlk, or InvalidBuffer. The caller is responsible for - * releasing *buf after it's done testing and setting bits. + * releasing *vmbuf after it's done testing and setting bits. * * NOTE: This function is typically called without a lock on the heap page, * so somebody else could change the bit just after we look at it. In fact, @@ -328,7 +328,7 @@ visibilitymap_set(Relation rel, BlockNumber heapBlk, Buffer heapBuf, * all concurrency issues! */ uint8 -visibilitymap_get_status(Relation rel, BlockNumber heapBlk, Buffer *buf) +visibilitymap_get_status(Relation rel, BlockNumber heapBlk, Buffer *vmbuf) { BlockNumber mapBlock = HEAPBLK_TO_MAPBLOCK(heapBlk); uint32 mapByte = HEAPBLK_TO_MAPBYTE(heapBlk); @@ -341,23 +341,23 @@ visibilitymap_get_status(Relation rel, BlockNumber heapBlk, Buffer *buf) #endif /* Reuse the old pinned buffer if possible */ - if (BufferIsValid(*buf)) + if (BufferIsValid(*vmbuf)) { - if (BufferGetBlockNumber(*buf) != mapBlock) + if (BufferGetBlockNumber(*vmbuf) != mapBlock) { - ReleaseBuffer(*buf); - *buf = InvalidBuffer; + ReleaseBuffer(*vmbuf); + *vmbuf = InvalidBuffer; } } - if (!BufferIsValid(*buf)) + if (!BufferIsValid(*vmbuf)) { - *buf = vm_readbuf(rel, mapBlock, false); - if (!BufferIsValid(*buf)) + *vmbuf = vm_readbuf(rel, mapBlock, false); + if (!BufferIsValid(*vmbuf)) return false; } - map = PageGetContents(BufferGetPage(*buf)); + map = PageGetContents(BufferGetPage(*vmbuf)); /* * A single byte read is atomic. There could be memory-ordering effects diff --git a/src/backend/access/table/tableam.c b/src/backend/access/table/tableam.c index b3d1a6c3f8f..094b24c7c9c 100644 --- a/src/backend/access/table/tableam.c +++ b/src/backend/access/table/tableam.c @@ -172,19 +172,18 @@ table_parallelscan_initialize(Relation rel, ParallelTableScanDesc pscan, } TableScanDesc -table_beginscan_parallel(Relation relation, ParallelTableScanDesc parallel_scan) +table_beginscan_parallel(Relation relation, ParallelTableScanDesc pscan) { Snapshot snapshot; uint32 flags = SO_TYPE_SEQSCAN | SO_ALLOW_STRAT | SO_ALLOW_SYNC | SO_ALLOW_PAGEMODE; - Assert(RelationGetRelid(relation) == parallel_scan->phs_relid); + Assert(RelationGetRelid(relation) == pscan->phs_relid); - if (!parallel_scan->phs_snapshot_any) + if (!pscan->phs_snapshot_any) { /* Snapshot was serialized -- restore it */ - snapshot = RestoreSnapshot((char *) parallel_scan + - parallel_scan->phs_snapshot_off); + snapshot = RestoreSnapshot((char *) pscan + pscan->phs_snapshot_off); RegisterSnapshot(snapshot); flags |= SO_TEMP_SNAPSHOT; } @@ -195,7 +194,7 @@ table_beginscan_parallel(Relation relation, ParallelTableScanDesc parallel_scan) } return relation->rd_tableam->scan_begin(relation, snapshot, 0, NULL, - parallel_scan, flags); + pscan, flags); } diff --git a/src/backend/access/transam/multixact.c b/src/backend/access/transam/multixact.c index ec57f56adf3..a7383f553b3 100644 --- a/src/backend/access/transam/multixact.c +++ b/src/backend/access/transam/multixact.c @@ -1214,14 +1214,14 @@ GetNewMultiXactId(int nmembers, MultiXactOffset *offset) * range, that is, greater to or equal than oldestMultiXactId, and less than * nextMXact. Otherwise, an error is raised. * - * onlyLock must be set to true if caller is certain that the given multi + * isLockOnly must be set to true if caller is certain that the given multi * is used only to lock tuples; can be false without loss of correctness, * but passing a true means we can return quickly without checking for * old updates. */ int GetMultiXactIdMembers(MultiXactId multi, MultiXactMember **members, - bool from_pgupgrade, bool onlyLock) + bool from_pgupgrade, bool isLockOnly) { int pageno; int prev_pageno; @@ -1263,7 +1263,7 @@ GetMultiXactIdMembers(MultiXactId multi, MultiXactMember **members, * we can skip checking if the value is older than our oldest visible * multi. It cannot possibly still be running. */ - if (onlyLock && + if (isLockOnly && MultiXactIdPrecedes(multi, OldestVisibleMXactId[MyBackendId])) { debug_elog2(DEBUG2, "GetMembers: a locker-only multi is too old"); |