summaryrefslogtreecommitdiff
path: root/src/backend/access
diff options
context:
space:
mode:
Diffstat (limited to 'src/backend/access')
-rw-r--r--src/backend/access/common/heaptuple.c4
-rw-r--r--src/backend/access/gin/ginvacuum.c10
-rw-r--r--src/backend/access/gin/ginxlog.c12
-rw-r--r--src/backend/access/gist/gistutil.c12
-rw-r--r--src/backend/access/hash/hashfunc.c8
-rw-r--r--src/backend/access/heap/heapam.c4
-rw-r--r--src/backend/access/heap/heapam_handler.c17
-rw-r--r--src/backend/access/heap/rewriteheap.c2
-rw-r--r--src/backend/access/heap/tuptoaster.c8
-rw-r--r--src/backend/access/heap/vacuumlazy.c18
-rw-r--r--src/backend/access/nbtree/nbtinsert.c10
-rw-r--r--src/backend/access/nbtree/nbtsearch.c6
-rw-r--r--src/backend/access/nbtree/nbtsort.c8
-rw-r--r--src/backend/access/nbtree/nbtutils.c2
-rw-r--r--src/backend/access/spgist/spgscan.c18
-rw-r--r--src/backend/access/spgist/spgtextproc.c4
-rw-r--r--src/backend/access/spgist/spgvacuum.c6
-rw-r--r--src/backend/access/table/tableam.c6
-rw-r--r--src/backend/access/transam/xact.c12
-rw-r--r--src/backend/access/transam/xlog.c6
20 files changed, 88 insertions, 85 deletions
diff --git a/src/backend/access/common/heaptuple.c b/src/backend/access/common/heaptuple.c
index 783b04a3cb9..a48a6cd757f 100644
--- a/src/backend/access/common/heaptuple.c
+++ b/src/backend/access/common/heaptuple.c
@@ -787,8 +787,8 @@ expand_tuple(HeapTuple *targetHeapTuple,
}
/*
- * Now walk the missing attributes. If there is a missing value
- * make space for it. Otherwise, it's going to be NULL.
+ * Now walk the missing attributes. If there is a missing value make
+ * space for it. Otherwise, it's going to be NULL.
*/
for (attnum = firstmissingnum;
attnum < natts;
diff --git a/src/backend/access/gin/ginvacuum.c b/src/backend/access/gin/ginvacuum.c
index b9a28d18633..dc46f2460e2 100644
--- a/src/backend/access/gin/ginvacuum.c
+++ b/src/backend/access/gin/ginvacuum.c
@@ -394,17 +394,17 @@ ginVacuumPostingTree(GinVacuumState *gvs, BlockNumber rootBlkno)
* There is at least one empty page. So we have to rescan the tree
* deleting empty pages.
*/
- Buffer buffer;
+ Buffer buffer;
DataPageDeleteStack root,
- *ptr,
- *tmp;
+ *ptr,
+ *tmp;
buffer = ReadBufferExtended(gvs->index, MAIN_FORKNUM, rootBlkno,
RBM_NORMAL, gvs->strategy);
/*
- * Lock posting tree root for cleanup to ensure there are no concurrent
- * inserts.
+ * Lock posting tree root for cleanup to ensure there are no
+ * concurrent inserts.
*/
LockBufferForCleanup(buffer);
diff --git a/src/backend/access/gin/ginxlog.c b/src/backend/access/gin/ginxlog.c
index b648af1ff65..c945b282721 100644
--- a/src/backend/access/gin/ginxlog.c
+++ b/src/backend/access/gin/ginxlog.c
@@ -205,8 +205,8 @@ ginRedoRecompress(Page page, ginxlogRecompressDataLeaf *data)
while (segno < a_segno)
{
/*
- * Once modification is started and page tail is copied, we've
- * to copy unmodified segments.
+ * Once modification is started and page tail is copied, we've to
+ * copy unmodified segments.
*/
segsize = SizeOfGinPostingList(oldseg);
if (tailCopy)
@@ -257,12 +257,12 @@ ginRedoRecompress(Page page, ginxlogRecompressDataLeaf *data)
}
/*
- * We're about to start modification of the page. So, copy tail of the
- * page if it's not done already.
+ * We're about to start modification of the page. So, copy tail of
+ * the page if it's not done already.
*/
if (!tailCopy && segptr != segmentend)
{
- int tailSize = segmentend - segptr;
+ int tailSize = segmentend - segptr;
tailCopy = (Pointer) palloc(tailSize);
memcpy(tailCopy, segptr, tailSize);
@@ -304,7 +304,7 @@ ginRedoRecompress(Page page, ginxlogRecompressDataLeaf *data)
segptr = (Pointer) oldseg;
if (segptr != segmentend && tailCopy)
{
- int restSize = segmentend - segptr;
+ int restSize = segmentend - segptr;
Assert(writePtr + restSize <= PageGetSpecialPointer(page));
memcpy(writePtr, segptr, restSize);
diff --git a/src/backend/access/gist/gistutil.c b/src/backend/access/gist/gistutil.c
index 94b6ad6a59b..49df05653b3 100644
--- a/src/backend/access/gist/gistutil.c
+++ b/src/backend/access/gist/gistutil.c
@@ -839,16 +839,16 @@ gistNewBuffer(Relation r)
gistcheckpage(r, buffer);
/*
- * Otherwise, recycle it if deleted, and too old to have any processes
- * interested in it.
+ * Otherwise, recycle it if deleted, and too old to have any
+ * processes interested in it.
*/
if (gistPageRecyclable(page))
{
/*
- * If we are generating WAL for Hot Standby then create a
- * WAL record that will allow us to conflict with queries
- * running on standby, in case they have snapshots older
- * than the page's deleteXid.
+ * If we are generating WAL for Hot Standby then create a WAL
+ * record that will allow us to conflict with queries running
+ * on standby, in case they have snapshots older than the
+ * page's deleteXid.
*/
if (XLogStandbyInfoActive() && RelationNeedsWAL(r))
gistXLogPageReuse(r, blkno, GistPageGetDeleteXid(page));
diff --git a/src/backend/access/hash/hashfunc.c b/src/backend/access/hash/hashfunc.c
index 0bf15ae7236..6ec1ec3df3a 100644
--- a/src/backend/access/hash/hashfunc.c
+++ b/src/backend/access/hash/hashfunc.c
@@ -246,7 +246,7 @@ hashtext(PG_FUNCTION_ARGS)
{
text *key = PG_GETARG_TEXT_PP(0);
Oid collid = PG_GET_COLLATION();
- pg_locale_t mylocale = 0;
+ pg_locale_t mylocale = 0;
Datum result;
if (!collid)
@@ -271,7 +271,7 @@ hashtext(PG_FUNCTION_ARGS)
int32_t ulen = -1;
UChar *uchar = NULL;
Size bsize;
- uint8_t *buf;
+ uint8_t *buf;
ulen = icu_to_uchar(&uchar, VARDATA_ANY(key), VARSIZE_ANY_EXHDR(key));
@@ -302,7 +302,7 @@ hashtextextended(PG_FUNCTION_ARGS)
{
text *key = PG_GETARG_TEXT_PP(0);
Oid collid = PG_GET_COLLATION();
- pg_locale_t mylocale = 0;
+ pg_locale_t mylocale = 0;
Datum result;
if (!collid)
@@ -328,7 +328,7 @@ hashtextextended(PG_FUNCTION_ARGS)
int32_t ulen = -1;
UChar *uchar = NULL;
Size bsize;
- uint8_t *buf;
+ uint8_t *buf;
ulen = icu_to_uchar(&uchar, VARDATA_ANY(key), VARSIZE_ANY_EXHDR(key));
diff --git a/src/backend/access/heap/heapam.c b/src/backend/access/heap/heapam.c
index 19d2c529d80..723e153705d 100644
--- a/src/backend/access/heap/heapam.c
+++ b/src/backend/access/heap/heapam.c
@@ -1684,8 +1684,8 @@ void
heap_get_latest_tid(TableScanDesc sscan,
ItemPointer tid)
{
- Relation relation = sscan->rs_rd;
- Snapshot snapshot = sscan->rs_snapshot;
+ Relation relation = sscan->rs_rd;
+ Snapshot snapshot = sscan->rs_snapshot;
ItemPointerData ctid;
TransactionId priorXmax;
diff --git a/src/backend/access/heap/heapam_handler.c b/src/backend/access/heap/heapam_handler.c
index 56b2abda5fb..674c1d3a818 100644
--- a/src/backend/access/heap/heapam_handler.c
+++ b/src/backend/access/heap/heapam_handler.c
@@ -474,6 +474,7 @@ tuple_lock_retry:
HeapTupleHeaderGetCmin(tuple->t_data) >= cid)
{
tmfd->xmax = priorXmax;
+
/*
* Cmin is the problematic value, so store that. See
* above.
@@ -1172,7 +1173,7 @@ heapam_index_build_range_scan(Relation heapRelation,
Snapshot snapshot;
bool need_unregister_snapshot = false;
TransactionId OldestXmin;
- BlockNumber previous_blkno = InvalidBlockNumber;
+ BlockNumber previous_blkno = InvalidBlockNumber;
BlockNumber root_blkno = InvalidBlockNumber;
OffsetNumber root_offsets[MaxHeapTuplesPerPage];
@@ -1263,7 +1264,7 @@ heapam_index_build_range_scan(Relation heapRelation,
/* Publish number of blocks to scan */
if (progress)
{
- BlockNumber nblocks;
+ BlockNumber nblocks;
if (hscan->rs_base.rs_parallel != NULL)
{
@@ -1314,7 +1315,7 @@ heapam_index_build_range_scan(Relation heapRelation,
/* Report scan progress, if asked to. */
if (progress)
{
- BlockNumber blocks_done = heapam_scan_get_blocks_done(hscan);
+ BlockNumber blocks_done = heapam_scan_get_blocks_done(hscan);
if (blocks_done != previous_blkno)
{
@@ -1668,7 +1669,7 @@ heapam_index_build_range_scan(Relation heapRelation,
/* Report scan progress one last time. */
if (progress)
{
- BlockNumber blks_done;
+ BlockNumber blks_done;
if (hscan->rs_base.rs_parallel != NULL)
{
@@ -1720,7 +1721,7 @@ heapam_index_validate_scan(Relation heapRelation,
BlockNumber root_blkno = InvalidBlockNumber;
OffsetNumber root_offsets[MaxHeapTuplesPerPage];
bool in_index[MaxHeapTuplesPerPage];
- BlockNumber previous_blkno = InvalidBlockNumber;
+ BlockNumber previous_blkno = InvalidBlockNumber;
/* state variables for the merge */
ItemPointer indexcursor = NULL;
@@ -1955,8 +1956,8 @@ static BlockNumber
heapam_scan_get_blocks_done(HeapScanDesc hscan)
{
ParallelBlockTableScanDesc bpscan = NULL;
- BlockNumber startblock;
- BlockNumber blocks_done;
+ BlockNumber startblock;
+ BlockNumber blocks_done;
if (hscan->rs_base.rs_parallel != NULL)
{
@@ -1974,7 +1975,7 @@ heapam_scan_get_blocks_done(HeapScanDesc hscan)
blocks_done = hscan->rs_cblock - startblock;
else
{
- BlockNumber nblocks;
+ BlockNumber nblocks;
nblocks = bpscan != NULL ? bpscan->phs_nblocks : hscan->rs_nblocks;
blocks_done = nblocks - startblock +
diff --git a/src/backend/access/heap/rewriteheap.c b/src/backend/access/heap/rewriteheap.c
index bce4274362c..131ec7b8d7f 100644
--- a/src/backend/access/heap/rewriteheap.c
+++ b/src/backend/access/heap/rewriteheap.c
@@ -652,7 +652,7 @@ raw_heap_insert(RewriteState state, HeapTuple tup)
}
else if (HeapTupleHasExternal(tup) || tup->t_len > TOAST_TUPLE_THRESHOLD)
{
- int options = HEAP_INSERT_SKIP_FSM;
+ int options = HEAP_INSERT_SKIP_FSM;
if (!state->rs_use_wal)
options |= HEAP_INSERT_SKIP_WAL;
diff --git a/src/backend/access/heap/tuptoaster.c b/src/backend/access/heap/tuptoaster.c
index 74e957abb72..e10715a7755 100644
--- a/src/backend/access/heap/tuptoaster.c
+++ b/src/backend/access/heap/tuptoaster.c
@@ -2295,16 +2295,16 @@ static struct varlena *
toast_decompress_datum_slice(struct varlena *attr, int32 slicelength)
{
struct varlena *result;
- int32 rawsize;
+ int32 rawsize;
Assert(VARATT_IS_COMPRESSED(attr));
result = (struct varlena *) palloc(slicelength + VARHDRSZ);
rawsize = pglz_decompress(TOAST_COMPRESS_RAWDATA(attr),
- VARSIZE(attr) - TOAST_COMPRESS_HDRSZ,
- VARDATA(result),
- slicelength, false);
+ VARSIZE(attr) - TOAST_COMPRESS_HDRSZ,
+ VARDATA(result),
+ slicelength, false);
if (rawsize < 0)
elog(ERROR, "compressed data is corrupted");
diff --git a/src/backend/access/heap/vacuumlazy.c b/src/backend/access/heap/vacuumlazy.c
index 9e17acc110e..637e47c08ce 100644
--- a/src/backend/access/heap/vacuumlazy.c
+++ b/src/backend/access/heap/vacuumlazy.c
@@ -164,7 +164,7 @@ static void lazy_cleanup_index(Relation indrel,
static int lazy_vacuum_page(Relation onerel, BlockNumber blkno, Buffer buffer,
int tupindex, LVRelStats *vacrelstats, Buffer *vmbuffer);
static bool should_attempt_truncation(VacuumParams *params,
- LVRelStats *vacrelstats);
+ LVRelStats *vacrelstats);
static void lazy_truncate_heap(Relation onerel, LVRelStats *vacrelstats);
static BlockNumber count_nondeletable_pages(Relation onerel,
LVRelStats *vacrelstats);
@@ -1067,9 +1067,9 @@ lazy_scan_heap(Relation onerel, VacuumParams *params, LVRelStats *vacrelstats,
* cheaper to get rid of it in the next pruning pass than
* to treat it like an indexed tuple. Finally, if index
* cleanup is disabled, the second heap pass will not
- * execute, and the tuple will not get removed, so we
- * must treat it like any other dead tuple that we choose
- * to keep.
+ * execute, and the tuple will not get removed, so we must
+ * treat it like any other dead tuple that we choose to
+ * keep.
*
* If this were to happen for a tuple that actually needed
* to be deleted, we'd be in trouble, because it'd
@@ -1087,6 +1087,7 @@ lazy_scan_heap(Relation onerel, VacuumParams *params, LVRelStats *vacrelstats,
all_visible = false;
break;
case HEAPTUPLE_LIVE:
+
/*
* Count it as live. Not only is this natural, but it's
* also what acquire_sample_rows() does.
@@ -1251,13 +1252,14 @@ lazy_scan_heap(Relation onerel, VacuumParams *params, LVRelStats *vacrelstats,
else
{
/*
- * Here, we have indexes but index cleanup is disabled. Instead of
- * vacuuming the dead tuples on the heap, we just forget them.
+ * Here, we have indexes but index cleanup is disabled.
+ * Instead of vacuuming the dead tuples on the heap, we just
+ * forget them.
*
* Note that vacrelstats->dead_tuples could have tuples which
* became dead after HOT-pruning but are not marked dead yet.
- * We do not process them because it's a very rare condition, and
- * the next vacuum will process them anyway.
+ * We do not process them because it's a very rare condition,
+ * and the next vacuum will process them anyway.
*/
Assert(params->index_cleanup == VACOPT_TERNARY_DISABLED);
}
diff --git a/src/backend/access/nbtree/nbtinsert.c b/src/backend/access/nbtree/nbtinsert.c
index 0a9472c71b5..36a570045ac 100644
--- a/src/backend/access/nbtree/nbtinsert.c
+++ b/src/backend/access/nbtree/nbtinsert.c
@@ -1811,11 +1811,11 @@ _bt_insert_parent(Relation rel,
/*
* Re-find and write lock the parent of buf.
*
- * It's possible that the location of buf's downlink has changed
- * since our initial _bt_search() descent. _bt_getstackbuf() will
- * detect and recover from this, updating the stack, which ensures
- * that the new downlink will be inserted at the correct offset.
- * Even buf's parent may have changed.
+ * It's possible that the location of buf's downlink has changed since
+ * our initial _bt_search() descent. _bt_getstackbuf() will detect
+ * and recover from this, updating the stack, which ensures that the
+ * new downlink will be inserted at the correct offset. Even buf's
+ * parent may have changed.
*/
stack->bts_btentry = bknum;
pbuf = _bt_getstackbuf(rel, stack);
diff --git a/src/backend/access/nbtree/nbtsearch.c b/src/backend/access/nbtree/nbtsearch.c
index 5906c41f316..dc42213ac6c 100644
--- a/src/backend/access/nbtree/nbtsearch.c
+++ b/src/backend/access/nbtree/nbtsearch.c
@@ -166,8 +166,8 @@ _bt_search(Relation rel, BTScanInsert key, Buffer *bufP, int access,
new_stack->bts_parent = stack_in;
/*
- * Page level 1 is lowest non-leaf page level prior to leaves. So,
- * if we're on the level 1 and asked to lock leaf page in write mode,
+ * Page level 1 is lowest non-leaf page level prior to leaves. So, if
+ * we're on the level 1 and asked to lock leaf page in write mode,
* then lock next page in write mode, because it must be a leaf.
*/
if (opaque->btpo.level == 1 && access == BT_WRITE)
@@ -1235,7 +1235,7 @@ _bt_first(IndexScanDesc scan, ScanDirection dir)
/* Initialize remaining insertion scan key fields */
inskey.heapkeyspace = _bt_heapkeyspace(rel);
- inskey.anynullkeys = false; /* unusued */
+ inskey.anynullkeys = false; /* unused */
inskey.nextkey = nextkey;
inskey.pivotsearch = false;
inskey.scantid = NULL;
diff --git a/src/backend/access/nbtree/nbtsort.c b/src/backend/access/nbtree/nbtsort.c
index 0b5be776d63..d6fa5742384 100644
--- a/src/backend/access/nbtree/nbtsort.c
+++ b/src/backend/access/nbtree/nbtsort.c
@@ -962,10 +962,10 @@ _bt_buildadd(BTWriteState *wstate, BTPageState *state, IndexTuple itup)
* much smaller.
*
* Since the truncated tuple is often smaller than the original
- * tuple, it cannot just be copied in place (besides, we want
- * to actually save space on the leaf page). We delete the
- * original high key, and add our own truncated high key at the
- * same offset.
+ * tuple, it cannot just be copied in place (besides, we want to
+ * actually save space on the leaf page). We delete the original
+ * high key, and add our own truncated high key at the same
+ * offset.
*
* Note that the page layout won't be changed very much. oitup is
* already located at the physical beginning of tuple space, so we
diff --git a/src/backend/access/nbtree/nbtutils.c b/src/backend/access/nbtree/nbtutils.c
index 77c9c7285cd..1238d544cd3 100644
--- a/src/backend/access/nbtree/nbtutils.c
+++ b/src/backend/access/nbtree/nbtutils.c
@@ -107,7 +107,7 @@ _bt_mkscankey(Relation rel, IndexTuple itup)
key = palloc(offsetof(BTScanInsertData, scankeys) +
sizeof(ScanKeyData) * indnkeyatts);
key->heapkeyspace = itup == NULL || _bt_heapkeyspace(rel);
- key->anynullkeys = false; /* initial assumption */
+ key->anynullkeys = false; /* initial assumption */
key->nextkey = false;
key->pivotsearch = false;
key->keysz = Min(indnkeyatts, tupnatts);
diff --git a/src/backend/access/spgist/spgscan.c b/src/backend/access/spgist/spgscan.c
index 9365bc57ad5..7bc5ec09bf9 100644
--- a/src/backend/access/spgist/spgscan.c
+++ b/src/backend/access/spgist/spgscan.c
@@ -39,8 +39,8 @@ static int
pairingheap_SpGistSearchItem_cmp(const pairingheap_node *a,
const pairingheap_node *b, void *arg)
{
- const SpGistSearchItem *sa = (const SpGistSearchItem *) a;
- const SpGistSearchItem *sb = (const SpGistSearchItem *) b;
+ const SpGistSearchItem *sa = (const SpGistSearchItem *) a;
+ const SpGistSearchItem *sb = (const SpGistSearchItem *) b;
SpGistScanOpaque so = (SpGistScanOpaque) arg;
int i;
@@ -79,7 +79,7 @@ pairingheap_SpGistSearchItem_cmp(const pairingheap_node *a,
}
static void
-spgFreeSearchItem(SpGistScanOpaque so, SpGistSearchItem * item)
+spgFreeSearchItem(SpGistScanOpaque so, SpGistSearchItem *item)
{
if (!so->state.attLeafType.attbyval &&
DatumGetPointer(item->value) != NULL)
@@ -97,7 +97,7 @@ spgFreeSearchItem(SpGistScanOpaque so, SpGistSearchItem * item)
* Called in queue context
*/
static void
-spgAddSearchItemToQueue(SpGistScanOpaque so, SpGistSearchItem * item)
+spgAddSearchItemToQueue(SpGistScanOpaque so, SpGistSearchItem *item)
{
pairingheap_add(so->scanQueue, &item->phNode);
}
@@ -439,7 +439,7 @@ spgNewHeapItem(SpGistScanOpaque so, int level, ItemPointer heapPtr,
* the scan is not ordered AND the item satisfies the scankeys
*/
static bool
-spgLeafTest(SpGistScanOpaque so, SpGistSearchItem * item,
+spgLeafTest(SpGistScanOpaque so, SpGistSearchItem *item,
SpGistLeafTuple leafTuple, bool isnull,
bool *reportedSome, storeRes_func storeRes)
{
@@ -530,7 +530,7 @@ spgLeafTest(SpGistScanOpaque so, SpGistSearchItem * item,
static void
spgInitInnerConsistentIn(spgInnerConsistentIn *in,
SpGistScanOpaque so,
- SpGistSearchItem * item,
+ SpGistSearchItem *item,
SpGistInnerTuple innerTuple)
{
in->scankeys = so->keyData;
@@ -551,7 +551,7 @@ spgInitInnerConsistentIn(spgInnerConsistentIn *in,
static SpGistSearchItem *
spgMakeInnerItem(SpGistScanOpaque so,
- SpGistSearchItem * parentItem,
+ SpGistSearchItem *parentItem,
SpGistNodeTuple tuple,
spgInnerConsistentOut *out, int i, bool isnull,
double *distances)
@@ -585,7 +585,7 @@ spgMakeInnerItem(SpGistScanOpaque so,
}
static void
-spgInnerTest(SpGistScanOpaque so, SpGistSearchItem * item,
+spgInnerTest(SpGistScanOpaque so, SpGistSearchItem *item,
SpGistInnerTuple innerTuple, bool isnull)
{
MemoryContext oldCxt = MemoryContextSwitchTo(so->tempCxt);
@@ -683,7 +683,7 @@ enum SpGistSpecialOffsetNumbers
static OffsetNumber
spgTestLeafTuple(SpGistScanOpaque so,
- SpGistSearchItem * item,
+ SpGistSearchItem *item,
Page page, OffsetNumber offset,
bool isnull, bool isroot,
bool *reportedSome,
diff --git a/src/backend/access/spgist/spgtextproc.c b/src/backend/access/spgist/spgtextproc.c
index d22998c54bf..a7c1a09e05f 100644
--- a/src/backend/access/spgist/spgtextproc.c
+++ b/src/backend/access/spgist/spgtextproc.c
@@ -632,8 +632,8 @@ spg_text_leaf_consistent(PG_FUNCTION_ARGS)
res = (level >= queryLen) ||
DatumGetBool(DirectFunctionCall2Coll(text_starts_with,
PG_GET_COLLATION(),
- out->leafValue,
- PointerGetDatum(query)));
+ out->leafValue,
+ PointerGetDatum(query)));
if (!res) /* no need to consider remaining conditions */
break;
diff --git a/src/backend/access/spgist/spgvacuum.c b/src/backend/access/spgist/spgvacuum.c
index fc85c6f9407..2b1662a267d 100644
--- a/src/backend/access/spgist/spgvacuum.c
+++ b/src/backend/access/spgist/spgvacuum.c
@@ -192,9 +192,9 @@ vacuumLeafPage(spgBulkDeleteState *bds, Relation index, Buffer buffer,
* happened since VACUUM started.
*
* Note: we could make a tighter test by seeing if the xid is
- * "running" according to the active snapshot; but snapmgr.c doesn't
- * currently export a suitable API, and it's not entirely clear
- * that a tighter test is worth the cycles anyway.
+ * "running" according to the active snapshot; but snapmgr.c
+ * doesn't currently export a suitable API, and it's not entirely
+ * clear that a tighter test is worth the cycles anyway.
*/
if (TransactionIdFollowsOrEquals(dt->xid, bds->myXmin))
spgAddPendingTID(bds, &dt->pointer);
diff --git a/src/backend/access/table/tableam.c b/src/backend/access/table/tableam.c
index c3455bc48ba..12adf590853 100644
--- a/src/backend/access/table/tableam.c
+++ b/src/backend/access/table/tableam.c
@@ -94,7 +94,7 @@ TableScanDesc
table_beginscan_catalog(Relation relation, int nkeys, struct ScanKeyData *key)
{
uint32 flags = SO_TYPE_SEQSCAN |
- SO_ALLOW_STRAT | SO_ALLOW_SYNC | SO_ALLOW_PAGEMODE | SO_TEMP_SNAPSHOT;
+ SO_ALLOW_STRAT | SO_ALLOW_SYNC | SO_ALLOW_PAGEMODE | SO_TEMP_SNAPSHOT;
Oid relid = RelationGetRelid(relation);
Snapshot snapshot = RegisterSnapshot(GetCatalogSnapshot(relid));
@@ -158,7 +158,7 @@ table_beginscan_parallel(Relation relation, ParallelTableScanDesc parallel_scan)
{
Snapshot snapshot;
uint32 flags = SO_TYPE_SEQSCAN |
- SO_ALLOW_STRAT | SO_ALLOW_SYNC | SO_ALLOW_PAGEMODE;
+ SO_ALLOW_STRAT | SO_ALLOW_SYNC | SO_ALLOW_PAGEMODE;
Assert(RelationGetRelid(relation) == parallel_scan->phs_relid);
@@ -223,7 +223,7 @@ table_index_fetch_tuple_check(Relation rel,
void
table_get_latest_tid(TableScanDesc scan, ItemPointer tid)
{
- Relation rel = scan->rs_rd;
+ Relation rel = scan->rs_rd;
const TableAmRoutine *tableam = rel->rd_tableam;
/*
diff --git a/src/backend/access/transam/xact.c b/src/backend/access/transam/xact.c
index 20feeec3270..b40da74e092 100644
--- a/src/backend/access/transam/xact.c
+++ b/src/backend/access/transam/xact.c
@@ -570,9 +570,9 @@ AssignTransactionId(TransactionState s)
/*
* Ensure parent(s) have XIDs, so that a child always has an XID later
- * than its parent. Mustn't recurse here, or we might get a stack overflow
- * if we're at the bottom of a huge stack of subtransactions none of which
- * have XIDs yet.
+ * than its parent. Mustn't recurse here, or we might get a stack
+ * overflow if we're at the bottom of a huge stack of subtransactions none
+ * of which have XIDs yet.
*/
if (isSubXact && !FullTransactionIdIsValid(s->parent->fullTransactionId))
{
@@ -2868,8 +2868,8 @@ StartTransactionCommand(void)
* just skipping the reset in StartTransaction() won't work.)
*/
static int save_XactIsoLevel;
-static bool save_XactReadOnly;
-static bool save_XactDeferrable;
+static bool save_XactReadOnly;
+static bool save_XactDeferrable;
void
SaveTransactionCharacteristics(void)
@@ -5193,7 +5193,7 @@ SerializeTransactionState(Size maxsize, char *start_address)
nxids = add_size(nxids, s->nChildXids);
}
Assert(SerializedTransactionStateHeaderSize + nxids * sizeof(TransactionId)
- <= maxsize);
+ <= maxsize);
/* Copy them to our scratch space. */
workspace = palloc(nxids * sizeof(TransactionId));
diff --git a/src/backend/access/transam/xlog.c b/src/backend/access/transam/xlog.c
index 527522f1655..c7c9e91b6a4 100644
--- a/src/backend/access/transam/xlog.c
+++ b/src/backend/access/transam/xlog.c
@@ -6397,9 +6397,9 @@ StartupXLOG(void)
ereport(FATAL,
(errmsg("could not find redo location referenced by checkpoint record"),
errhint("If you are restoring from a backup, touch \"%s/recovery.signal\" and add required recovery options.\n"
- "If you are not restoring from a backup, try removing the file \"%s/backup_label\".\n"
- "Be careful: removing \"%s/backup_label\" will result in a corrupt cluster if restoring from a backup.",
- DataDir, DataDir, DataDir)));
+ "If you are not restoring from a backup, try removing the file \"%s/backup_label\".\n"
+ "Be careful: removing \"%s/backup_label\" will result in a corrupt cluster if restoring from a backup.",
+ DataDir, DataDir, DataDir)));
}
}
else