diff options
Diffstat (limited to 'src/backend/access')
| -rw-r--r-- | src/backend/access/brin/brin_minmax_multi.c | 2 | ||||
| -rw-r--r-- | src/backend/access/common/tidstore.c | 2 | ||||
| -rw-r--r-- | src/backend/access/gin/ginget.c | 2 | ||||
| -rw-r--r-- | src/backend/access/gin/ginpostinglist.c | 4 | ||||
| -rw-r--r-- | src/backend/access/hash/hashsort.c | 2 | ||||
| -rw-r--r-- | src/backend/access/hash/hashutil.c | 2 | ||||
| -rw-r--r-- | src/backend/access/heap/heapam.c | 34 | ||||
| -rw-r--r-- | src/backend/access/heap/heaptoast.c | 4 | ||||
| -rw-r--r-- | src/backend/access/nbtree/nbtdedup.c | 2 | ||||
| -rw-r--r-- | src/backend/access/nbtree/nbtpreprocesskeys.c | 4 | ||||
| -rw-r--r-- | src/backend/access/nbtree/nbtsearch.c | 21 | ||||
| -rw-r--r-- | src/backend/access/nbtree/nbtsort.c | 6 | ||||
| -rw-r--r-- | src/backend/access/nbtree/nbtsplitloc.c | 4 | ||||
| -rw-r--r-- | src/backend/access/nbtree/nbtutils.c | 68 | ||||
| -rw-r--r-- | src/backend/access/spgist/spgdoinsert.c | 2 | ||||
| -rw-r--r-- | src/backend/access/spgist/spgtextproc.c | 2 | ||||
| -rw-r--r-- | src/backend/access/spgist/spgutils.c | 2 | ||||
| -rw-r--r-- | src/backend/access/spgist/spgvacuum.c | 2 | ||||
| -rw-r--r-- | src/backend/access/transam/xlog.c | 2 | ||||
| -rw-r--r-- | src/backend/access/transam/xloginsert.c | 15 | 
20 files changed, 117 insertions, 65 deletions
diff --git a/src/backend/access/brin/brin_minmax_multi.c b/src/backend/access/brin/brin_minmax_multi.c index c87f1b9cd7e..f8a11444d66 100644 --- a/src/backend/access/brin/brin_minmax_multi.c +++ b/src/backend/access/brin/brin_minmax_multi.c @@ -276,7 +276,7 @@ static int	compare_values(const void *a, const void *b, void *arg);   * function (which should be BTLessStrategyNumber).   */  static void -AssertArrayOrder(FmgrInfo *cmp, Oid colloid, Datum *values, int nvalues) +AssertArrayOrder(FmgrInfo *cmp, Oid colloid, const Datum *values, int nvalues)  {  	int			i;  	Datum		lt; diff --git a/src/backend/access/common/tidstore.c b/src/backend/access/common/tidstore.c index 5bd75fb499c..fb807d9fe59 100644 --- a/src/backend/access/common/tidstore.c +++ b/src/backend/access/common/tidstore.c @@ -418,7 +418,7 @@ TidStoreSetBlockOffsets(TidStore *ts, BlockNumber blkno, OffsetNumber *offsets,  /* Return true if the given TID is present in the TidStore */  bool -TidStoreIsMember(TidStore *ts, ItemPointer tid) +TidStoreIsMember(TidStore *ts, const ItemPointerData *tid)  {  	int			wordnum;  	int			bitnum; diff --git a/src/backend/access/gin/ginget.c b/src/backend/access/gin/ginget.c index 656299b1b52..0d4108d05a3 100644 --- a/src/backend/access/gin/ginget.c +++ b/src/backend/access/gin/ginget.c @@ -489,7 +489,7 @@ restartScanEntry:  static int  entryIndexByFrequencyCmp(const void *a1, const void *a2, void *arg)  { -	const GinScanKey key = (const GinScanKey) arg; +	const GinScanKeyData *key = arg;  	int			i1 = *(const int *) a1;  	int			i2 = *(const int *) a2;  	uint32		n1 = key->scanEntry[i1]->predictNumberResult; diff --git a/src/backend/access/gin/ginpostinglist.c b/src/backend/access/gin/ginpostinglist.c index 48eadec87b0..1bf061803da 100644 --- a/src/backend/access/gin/ginpostinglist.c +++ b/src/backend/access/gin/ginpostinglist.c @@ -84,7 +84,7 @@  #define MaxBytesPerInteger				7  static inline uint64 -itemptr_to_uint64(const ItemPointer iptr) +itemptr_to_uint64(const ItemPointerData *iptr)  {  	uint64		val; @@ -194,7 +194,7 @@ decode_varbyte(unsigned char **ptr)   * byte at the end, if any, is zero.   */  GinPostingList * -ginCompressPostingList(const ItemPointer ipd, int nipd, int maxsize, +ginCompressPostingList(const ItemPointerData *ipd, int nipd, int maxsize,  					   int *nwritten)  {  	uint64		prev; diff --git a/src/backend/access/hash/hashsort.c b/src/backend/access/hash/hashsort.c index 6e8c0e68a92..92ae3cf53f5 100644 --- a/src/backend/access/hash/hashsort.c +++ b/src/backend/access/hash/hashsort.c @@ -106,7 +106,7 @@ _h_spooldestroy(HSpool *hspool)   * spool an index entry into the sort file.   */  void -_h_spool(HSpool *hspool, ItemPointer self, const Datum *values, const bool *isnull) +_h_spool(HSpool *hspool, const ItemPointerData *self, const Datum *values, const bool *isnull)  {  	tuplesort_putindextuplevalues(hspool->sortstate, hspool->index,  								  self, values, isnull); diff --git a/src/backend/access/hash/hashutil.c b/src/backend/access/hash/hashutil.c index 66c39f60654..f41233fcd07 100644 --- a/src/backend/access/hash/hashutil.c +++ b/src/backend/access/hash/hashutil.c @@ -316,7 +316,7 @@ _hash_get_indextuple_hashkey(IndexTuple itup)   */  bool  _hash_convert_tuple(Relation index, -					Datum *user_values, bool *user_isnull, +					const Datum *user_values, const bool *user_isnull,  					Datum *index_values, bool *index_isnull)  {  	uint32		hashkey; diff --git a/src/backend/access/heap/heapam.c b/src/backend/access/heap/heapam.c index 568696333c2..36fee9c994e 100644 --- a/src/backend/access/heap/heapam.c +++ b/src/backend/access/heap/heapam.c @@ -63,7 +63,7 @@ static XLogRecPtr log_heap_update(Relation reln, Buffer oldbuf,  								  bool all_visible_cleared, bool new_all_visible_cleared);  #ifdef USE_ASSERT_CHECKING  static void check_lock_if_inplace_updateable_rel(Relation relation, -												 ItemPointer otid, +												 const ItemPointerData *otid,  												 HeapTuple newtup);  static void check_inplace_rel_lock(HeapTuple oldtup);  #endif @@ -72,7 +72,7 @@ static Bitmapset *HeapDetermineColumnsInfo(Relation relation,  										   Bitmapset *external_cols,  										   HeapTuple oldtup, HeapTuple newtup,  										   bool *has_external); -static bool heap_acquire_tuplock(Relation relation, ItemPointer tid, +static bool heap_acquire_tuplock(Relation relation, const ItemPointerData *tid,  								 LockTupleMode mode, LockWaitPolicy wait_policy,  								 bool *have_tuple_lock);  static inline BlockNumber heapgettup_advance_block(HeapScanDesc scan, @@ -86,7 +86,7 @@ static void compute_new_xmax_infomask(TransactionId xmax, uint16 old_infomask,  									  TransactionId *result_xmax, uint16 *result_infomask,  									  uint16 *result_infomask2);  static TM_Result heap_lock_updated_tuple(Relation rel, HeapTuple tuple, -										 ItemPointer ctid, TransactionId xid, +										 const ItemPointerData *ctid, TransactionId xid,  										 LockTupleMode mode);  static void GetMultiXactIdHintBits(MultiXactId multi, uint16 *new_infomask,  								   uint16 *new_infomask2); @@ -95,7 +95,7 @@ static TransactionId MultiXactIdGetUpdateXid(TransactionId xmax,  static bool DoesMultiXactIdConflict(MultiXactId multi, uint16 infomask,  									LockTupleMode lockmode, bool *current_is_member);  static void MultiXactIdWait(MultiXactId multi, MultiXactStatus status, uint16 infomask, -							Relation rel, ItemPointer ctid, XLTW_Oper oper, +							Relation rel, const ItemPointerData *ctid, XLTW_Oper oper,  							int *remaining);  static bool ConditionalMultiXactIdWait(MultiXactId multi, MultiXactStatus status,  									   uint16 infomask, Relation rel, int *remaining, @@ -2786,7 +2786,7 @@ xmax_infomask_changed(uint16 new_infomask, uint16 old_infomask)   * generated by another transaction).   */  TM_Result -heap_delete(Relation relation, ItemPointer tid, +heap_delete(Relation relation, const ItemPointerData *tid,  			CommandId cid, Snapshot crosscheck, bool wait,  			TM_FailureData *tmfd, bool changingPart)  { @@ -3209,7 +3209,7 @@ l1:   * via ereport().   */  void -simple_heap_delete(Relation relation, ItemPointer tid) +simple_heap_delete(Relation relation, const ItemPointerData *tid)  {  	TM_Result	result;  	TM_FailureData tmfd; @@ -3255,7 +3255,7 @@ simple_heap_delete(Relation relation, ItemPointer tid)   * generated by another transaction).   */  TM_Result -heap_update(Relation relation, ItemPointer otid, HeapTuple newtup, +heap_update(Relation relation, const ItemPointerData *otid, HeapTuple newtup,  			CommandId cid, Snapshot crosscheck, bool wait,  			TM_FailureData *tmfd, LockTupleMode *lockmode,  			TU_UpdateIndexes *update_indexes) @@ -4238,7 +4238,7 @@ l2:   */  static void  check_lock_if_inplace_updateable_rel(Relation relation, -									 ItemPointer otid, +									 const ItemPointerData *otid,  									 HeapTuple newtup)  {  	/* LOCKTAG_TUPLE acceptable for any catalog */ @@ -4499,7 +4499,7 @@ HeapDetermineColumnsInfo(Relation relation,   * via ereport().   */  void -simple_heap_update(Relation relation, ItemPointer otid, HeapTuple tup, +simple_heap_update(Relation relation, const ItemPointerData *otid, HeapTuple tup,  				   TU_UpdateIndexes *update_indexes)  {  	TM_Result	result; @@ -5285,7 +5285,7 @@ out_unlocked:   * wait_policy is Skip.   */  static bool -heap_acquire_tuplock(Relation relation, ItemPointer tid, LockTupleMode mode, +heap_acquire_tuplock(Relation relation, const ItemPointerData *tid, LockTupleMode mode,  					 LockWaitPolicy wait_policy, bool *have_tuple_lock)  {  	if (*have_tuple_lock) @@ -5706,7 +5706,7 @@ test_lockmode_for_conflict(MultiXactStatus status, TransactionId xid,   * version as well.   */  static TM_Result -heap_lock_updated_tuple_rec(Relation rel, ItemPointer tid, TransactionId xid, +heap_lock_updated_tuple_rec(Relation rel, const ItemPointerData *tid, TransactionId xid,  							LockTupleMode mode)  {  	TM_Result	result; @@ -6051,7 +6051,7 @@ out_unlocked:   * levels, because that would lead to a serializability failure.   */  static TM_Result -heap_lock_updated_tuple(Relation rel, HeapTuple tuple, ItemPointer ctid, +heap_lock_updated_tuple(Relation rel, HeapTuple tuple, const ItemPointerData *ctid,  						TransactionId xid, LockTupleMode mode)  {  	/* @@ -6096,7 +6096,7 @@ heap_lock_updated_tuple(Relation rel, HeapTuple tuple, ItemPointer ctid,   * An explicit confirmation WAL record also makes logical decoding simpler.   */  void -heap_finish_speculative(Relation relation, ItemPointer tid) +heap_finish_speculative(Relation relation, const ItemPointerData *tid)  {  	Buffer		buffer;  	Page		page; @@ -6183,7 +6183,7 @@ heap_finish_speculative(Relation relation, ItemPointer tid)   * confirmation records.   */  void -heap_abort_speculative(Relation relation, ItemPointer tid) +heap_abort_speculative(Relation relation, const ItemPointerData *tid)  {  	TransactionId xid = GetCurrentTransactionId();  	ItemId		lp; @@ -7705,7 +7705,7 @@ DoesMultiXactIdConflict(MultiXactId multi, uint16 infomask,  static bool  Do_MultiXactIdWait(MultiXactId multi, MultiXactStatus status,  				   uint16 infomask, bool nowait, -				   Relation rel, ItemPointer ctid, XLTW_Oper oper, +				   Relation rel, const ItemPointerData *ctid, XLTW_Oper oper,  				   int *remaining, bool logLockFailure)  {  	bool		result = true; @@ -7782,7 +7782,7 @@ Do_MultiXactIdWait(MultiXactId multi, MultiXactStatus status,   */  static void  MultiXactIdWait(MultiXactId multi, MultiXactStatus status, uint16 infomask, -				Relation rel, ItemPointer ctid, XLTW_Oper oper, +				Relation rel, const ItemPointerData *ctid, XLTW_Oper oper,  				int *remaining)  {  	(void) Do_MultiXactIdWait(multi, status, infomask, false, @@ -8068,7 +8068,7 @@ index_delete_prefetch_buffer(Relation rel,  static inline void  index_delete_check_htid(TM_IndexDeleteOp *delstate,  						Page page, OffsetNumber maxoff, -						ItemPointer htid, TM_IndexStatus *istatus) +						const ItemPointerData *htid, TM_IndexStatus *istatus)  {  	OffsetNumber indexpagehoffnum = ItemPointerGetOffsetNumber(htid);  	ItemId		iid; diff --git a/src/backend/access/heap/heaptoast.c b/src/backend/access/heap/heaptoast.c index cb1e57030f6..e148c9be482 100644 --- a/src/backend/access/heap/heaptoast.c +++ b/src/backend/access/heap/heaptoast.c @@ -561,8 +561,8 @@ toast_flatten_tuple_to_datum(HeapTupleHeader tup,   */  HeapTuple  toast_build_flattened_tuple(TupleDesc tupleDesc, -							Datum *values, -							bool *isnull) +							const Datum *values, +							const bool *isnull)  {  	HeapTuple	new_tuple;  	int			numAttrs = tupleDesc->natts; diff --git a/src/backend/access/nbtree/nbtdedup.c b/src/backend/access/nbtree/nbtdedup.c index 07e63962f81..a746de45dd3 100644 --- a/src/backend/access/nbtree/nbtdedup.c +++ b/src/backend/access/nbtree/nbtdedup.c @@ -859,7 +859,7 @@ _bt_singleval_fillfactor(Page page, BTDedupState state, Size newitemsz)   * returned posting list tuple (they must be included in htids array.)   */  IndexTuple -_bt_form_posting(IndexTuple base, ItemPointer htids, int nhtids) +_bt_form_posting(IndexTuple base, const ItemPointerData *htids, int nhtids)  {  	uint32		keysize,  				newsize; diff --git a/src/backend/access/nbtree/nbtpreprocesskeys.c b/src/backend/access/nbtree/nbtpreprocesskeys.c index 7b7d7860d8f..a871bf62cab 100644 --- a/src/backend/access/nbtree/nbtpreprocesskeys.c +++ b/src/backend/access/nbtree/nbtpreprocesskeys.c @@ -67,7 +67,7 @@ static int	_bt_num_array_keys(IndexScanDesc scan, Oid *skip_eq_ops_out,  							   int *numSkipArrayKeys_out);  static Datum _bt_find_extreme_element(IndexScanDesc scan, ScanKey skey,  									  Oid elemtype, StrategyNumber strat, -									  Datum *elems, int nelems); +									  const Datum *elems, int nelems);  static void _bt_setup_array_cmp(IndexScanDesc scan, ScanKey skey, Oid elemtype,  								FmgrInfo *orderproc, FmgrInfo **sortprocp);  static int	_bt_sort_array_elements(ScanKey skey, FmgrInfo *sortproc, @@ -2569,7 +2569,7 @@ _bt_num_array_keys(IndexScanDesc scan, Oid *skip_eq_ops_out,  static Datum  _bt_find_extreme_element(IndexScanDesc scan, ScanKey skey, Oid elemtype,  						 StrategyNumber strat, -						 Datum *elems, int nelems) +						 const Datum *elems, int nelems)  {  	Relation	rel = scan->indexRelation;  	Oid			cmp_op; diff --git a/src/backend/access/nbtree/nbtsearch.c b/src/backend/access/nbtree/nbtsearch.c index d69798795b4..0605356ec9f 100644 --- a/src/backend/access/nbtree/nbtsearch.c +++ b/src/backend/access/nbtree/nbtsearch.c @@ -37,7 +37,7 @@ static bool _bt_readpage(IndexScanDesc scan, ScanDirection dir,  static void _bt_saveitem(BTScanOpaque so, int itemIndex,  						 OffsetNumber offnum, IndexTuple itup);  static int	_bt_setuppostingitems(BTScanOpaque so, int itemIndex, -								  OffsetNumber offnum, ItemPointer heapTid, +								  OffsetNumber offnum, const ItemPointerData *heapTid,  								  IndexTuple itup);  static inline void _bt_savepostingitem(BTScanOpaque so, int itemIndex,  									   OffsetNumber offnum, @@ -1288,6 +1288,8 @@ _bt_first(IndexScanDesc scan, ScanDirection dir)  			 * our row compare header key must be the final startKeys[] entry.  			 */  			Assert(subkey->sk_flags & (SK_BT_REQFWD | SK_BT_REQBKWD)); +			Assert(subkey->sk_strategy == bkey->sk_strategy); +			Assert(subkey->sk_strategy == strat_total);  			Assert(i == keysz - 1);  			/* @@ -1344,9 +1346,9 @@ _bt_first(IndexScanDesc scan, ScanDirection dir)  				Assert(subkey->sk_strategy == bkey->sk_strategy);  				Assert(keysz < INDEX_MAX_KEYS); -				memcpy(inskey.scankeys + keysz, subkey, -					   sizeof(ScanKeyData)); +				memcpy(inskey.scankeys + keysz, subkey, sizeof(ScanKeyData));  				keysz++; +  				if (subkey->sk_flags & SK_ROW_END)  					break;  			} @@ -1378,7 +1380,7 @@ _bt_first(IndexScanDesc scan, ScanDirection dir)  				}  			} -			/* done adding to inskey (row comparison keys always come last) */ +			/* Done (row compare header key is always last startKeys[] key) */  			break;  		} @@ -2079,7 +2081,7 @@ _bt_saveitem(BTScanOpaque so, int itemIndex,   */  static int  _bt_setuppostingitems(BTScanOpaque so, int itemIndex, OffsetNumber offnum, -					  ItemPointer heapTid, IndexTuple itup) +					  const ItemPointerData *heapTid, IndexTuple itup)  {  	BTScanPosItem *currItem = &so->currPos.items[itemIndex]; @@ -2246,12 +2248,9 @@ _bt_steppage(IndexScanDesc scan, ScanDirection dir)   *   * _bt_first caller passes us an offnum returned by _bt_binsrch, which might   * be an out of bounds offnum such as "maxoff + 1" in certain corner cases. - * _bt_checkkeys will stop the scan as soon as an equality qual fails (when - * its scan key was marked required), so _bt_first _must_ pass us an offnum - * exactly at the beginning of where equal tuples are to be found.  When we're - * passed an offnum past the end of the page, we might still manage to stop - * the scan on this page by calling _bt_checkkeys against the high key.  See - * _bt_readpage for full details. + * When we're passed an offnum past the end of the page, we might still manage + * to stop the scan on this page by calling _bt_checkkeys against the high + * key.  See _bt_readpage for full details.   *   * On entry, so->currPos must be pinned and locked (so offnum stays valid).   * Parallel scan callers must have seized the scan before calling here. diff --git a/src/backend/access/nbtree/nbtsort.c b/src/backend/access/nbtree/nbtsort.c index 313fe66bc96..454adaee7dc 100644 --- a/src/backend/access/nbtree/nbtsort.c +++ b/src/backend/access/nbtree/nbtsort.c @@ -257,8 +257,8 @@ typedef struct BTWriteState  static double _bt_spools_heapscan(Relation heap, Relation index,  								  BTBuildState *buildstate, IndexInfo *indexInfo);  static void _bt_spooldestroy(BTSpool *btspool); -static void _bt_spool(BTSpool *btspool, ItemPointer self, -					  Datum *values, bool *isnull); +static void _bt_spool(BTSpool *btspool, const ItemPointerData *self, +					  const Datum *values, const bool *isnull);  static void _bt_leafbuild(BTSpool *btspool, BTSpool *btspool2);  static void _bt_build_callback(Relation index, ItemPointer tid, Datum *values,  							   bool *isnull, bool tupleIsAlive, void *state); @@ -525,7 +525,7 @@ _bt_spooldestroy(BTSpool *btspool)   * spool an index entry into the sort file.   */  static void -_bt_spool(BTSpool *btspool, ItemPointer self, Datum *values, bool *isnull) +_bt_spool(BTSpool *btspool, const ItemPointerData *self, const Datum *values, const bool *isnull)  {  	tuplesort_putindextuplevalues(btspool->sortstate, btspool->index,  								  self, values, isnull); diff --git a/src/backend/access/nbtree/nbtsplitloc.c b/src/backend/access/nbtree/nbtsplitloc.c index b88c396195a..f0082f88c76 100644 --- a/src/backend/access/nbtree/nbtsplitloc.c +++ b/src/backend/access/nbtree/nbtsplitloc.c @@ -69,7 +69,7 @@ static void _bt_deltasortsplits(FindSplitData *state, double fillfactormult,  static int	_bt_splitcmp(const void *arg1, const void *arg2);  static bool _bt_afternewitemoff(FindSplitData *state, OffsetNumber maxoff,  								int leaffillfactor, bool *usemult); -static bool _bt_adjacenthtid(ItemPointer lowhtid, ItemPointer highhtid); +static bool _bt_adjacenthtid(const ItemPointerData *lowhtid, const ItemPointerData *highhtid);  static OffsetNumber _bt_bestsplitloc(FindSplitData *state, int perfectpenalty,  									 bool *newitemonleft, FindSplitStrat strategy);  static int	_bt_defaultinterval(FindSplitData *state); @@ -747,7 +747,7 @@ _bt_afternewitemoff(FindSplitData *state, OffsetNumber maxoff,   * transaction.   */  static bool -_bt_adjacenthtid(ItemPointer lowhtid, ItemPointer highhtid) +_bt_adjacenthtid(const ItemPointerData *lowhtid, const ItemPointerData *highhtid)  {  	BlockNumber lowblk,  				highblk; diff --git a/src/backend/access/nbtree/nbtutils.c b/src/backend/access/nbtree/nbtutils.c index 288da8b68ab..ab0f98b0287 100644 --- a/src/backend/access/nbtree/nbtutils.c +++ b/src/backend/access/nbtree/nbtutils.c @@ -63,7 +63,7 @@ static bool _bt_check_compare(IndexScanDesc scan, ScanDirection dir,  							  bool advancenonrequired, bool forcenonrequired,  							  bool *continuescan, int *ikey);  static bool _bt_rowcompare_cmpresult(ScanKey subkey, int cmpresult); -static bool _bt_check_rowcompare(ScanKey skey, +static bool _bt_check_rowcompare(ScanKey header,  								 IndexTuple tuple, int tupnatts, TupleDesc tupdesc,  								 ScanDirection dir, bool forcenonrequired, bool *continuescan);  static void _bt_checkkeys_look_ahead(IndexScanDesc scan, BTReadPageState *pstate, @@ -2969,11 +2969,6 @@ _bt_check_compare(IndexScanDesc scan, ScanDirection dir,  			 * Tuple fails this qual.  If it's a required qual for the current  			 * scan direction, then we can conclude no further tuples will  			 * pass, either. -			 * -			 * Note: because we stop the scan as soon as any required equality -			 * qual fails, it is critical that equality quals be used for the -			 * initial positioning in _bt_first() when they are available. See -			 * comments in _bt_first().  			 */  			if (requiredSameDir)  				*continuescan = false; @@ -3013,6 +3008,8 @@ _bt_rowcompare_cmpresult(ScanKey subkey, int cmpresult)  {  	bool		satisfied; +	Assert(subkey->sk_flags & SK_ROW_MEMBER); +  	switch (subkey->sk_strategy)  	{  		case BTLessStrategyNumber: @@ -3044,19 +3041,64 @@ _bt_rowcompare_cmpresult(ScanKey subkey, int cmpresult)   * it's not possible for any future tuples in the current scan direction   * to pass the qual.   * - * This is a subroutine for _bt_checkkeys/_bt_check_compare. + * This is a subroutine for _bt_checkkeys/_bt_check_compare.  Caller passes us + * a row compare header key taken from so->keyData[]. + * + * Row value comparisons can be described in terms of logical expansions that + * use only scalar operators.  Consider the following example row comparison: + * + * "(a, b, c) > (7, 'bar', 62)" + * + * This can be evaluated as: + * + * "(a = 7 AND b = 'bar' AND c > 62) OR (a = 7 AND b > 'bar') OR (a > 7)". + * + * Notice that this condition is satisfied by _all_ rows that satisfy "a > 7", + * and by a subset of all rows that satisfy "a >= 7" (possibly all such rows). + * It _can't_ be satisfied by other rows (where "a < 7" or where "a IS NULL"). + * A row comparison header key can therefore often be treated as if it was a + * simple scalar inequality on the row compare's most significant column. + * (For example, _bt_advance_array_keys and most preprocessing routines treat + * row compares like any other same-strategy inequality on the same column.) + * + * Things get more complicated for our row compare given a row where "a = 7". + * Note that a row compare isn't necessarily satisfied by _every_ tuple that + * appears between the first and last satisfied tuple returned by the scan, + * due to the way that its lower-order subkeys are only conditionally applied. + * A forwards scan that uses our example qual might initially return a tuple + * "(a, b, c) = (7, 'zebra', 54)".  But it won't subsequently return a tuple + * "(a, b, c) = (7, NULL, 1)" located to the right of the first matching tuple + * (assume that "b" was declared NULLS LAST here).  The scan will only return + * additional matches upon reaching tuples where "a > 7".  If you rereview our + * example row comparison's logical expansion, you'll understand why this is. + * (Here we assume that all subkeys could be marked required, guaranteeing + * that row comparison order matches index order.  This is the common case.) + * + * Note that a row comparison header key behaves _exactly_ the same as a + * similar scalar inequality key on the row's most significant column once the + * scan reaches the point where it no longer needs to evaluate lower-order + * subkeys (or before the point where it starts needing to evaluate them). + * For example, once a forwards scan that uses our example qual reaches the + * first tuple "a > 7", we'll behave in just the same way as our caller would + * behave with a similar scalar inequality "a > 7" for the remainder of the + * scan (assuming that the scan never changes direction/never goes backwards). + * We'll even set continuescan=false according to exactly the same rules as + * the ones our caller applies with simple scalar inequalities, including the + * rules it applies when NULL tuple values don't satisfy an inequality qual.   */  static bool -_bt_check_rowcompare(ScanKey skey, IndexTuple tuple, int tupnatts, +_bt_check_rowcompare(ScanKey header, IndexTuple tuple, int tupnatts,  					 TupleDesc tupdesc, ScanDirection dir,  					 bool forcenonrequired, bool *continuescan)  { -	ScanKey		subkey = (ScanKey) DatumGetPointer(skey->sk_argument); +	ScanKey		subkey = (ScanKey) DatumGetPointer(header->sk_argument);  	int32		cmpresult = 0;  	bool		result;  	/* First subkey should be same as the header says */ -	Assert(subkey->sk_attno == skey->sk_attno); +	Assert(header->sk_flags & SK_ROW_HEADER); +	Assert(subkey->sk_attno == header->sk_attno); +	Assert(subkey->sk_strategy == header->sk_strategy);  	/* Loop over columns of the row condition */  	for (;;) @@ -3076,7 +3118,7 @@ _bt_check_rowcompare(ScanKey skey, IndexTuple tuple, int tupnatts,  			 * columns are required for the scan direction, we can stop the  			 * scan, because there can't be another tuple that will succeed.  			 */ -			Assert(subkey != (ScanKey) DatumGetPointer(skey->sk_argument)); +			Assert(subkey != (ScanKey) DatumGetPointer(header->sk_argument));  			subkey--;  			if (forcenonrequired)  			{ @@ -3147,7 +3189,7 @@ _bt_check_rowcompare(ScanKey skey, IndexTuple tuple, int tupnatts,  				 * can only happen with an "a" NULL some time after the scan  				 * completely stops needing to use its "b" and "c" members.)  				 */ -				if (subkey == (ScanKey) DatumGetPointer(skey->sk_argument)) +				if (subkey == (ScanKey) DatumGetPointer(header->sk_argument))  					reqflags |= SK_BT_REQFWD;	/* safe, first row member */  				if ((subkey->sk_flags & reqflags) && @@ -3185,7 +3227,7 @@ _bt_check_rowcompare(ScanKey skey, IndexTuple tuple, int tupnatts,  				 * happen with an "a" NULL some time after the scan completely  				 * stops needing to use its "b" and "c" members.)  				 */ -				if (subkey == (ScanKey) DatumGetPointer(skey->sk_argument)) +				if (subkey == (ScanKey) DatumGetPointer(header->sk_argument))  					reqflags |= SK_BT_REQBKWD;	/* safe, first row member */  				if ((subkey->sk_flags & reqflags) && diff --git a/src/backend/access/spgist/spgdoinsert.c b/src/backend/access/spgist/spgdoinsert.c index e00bd0e2636..4eadb518776 100644 --- a/src/backend/access/spgist/spgdoinsert.c +++ b/src/backend/access/spgist/spgdoinsert.c @@ -1908,7 +1908,7 @@ spgSplitNodeAction(Relation index, SpGistState *state,   */  bool  spgdoinsert(Relation index, SpGistState *state, -			ItemPointer heapPtr, Datum *datums, bool *isnulls) +			const ItemPointerData *heapPtr, const Datum *datums, const bool *isnulls)  {  	bool		result = true;  	TupleDesc	leafDescriptor = state->leafTupDesc; diff --git a/src/backend/access/spgist/spgtextproc.c b/src/backend/access/spgist/spgtextproc.c index 73842655f08..91f4ab260c2 100644 --- a/src/backend/access/spgist/spgtextproc.c +++ b/src/backend/access/spgist/spgtextproc.c @@ -155,7 +155,7 @@ commonPrefix(const char *a, const char *b, int lena, int lenb)   * On success, *i gets the match location; on failure, it gets where to insert   */  static bool -searchChar(Datum *nodeLabels, int nNodes, int16 c, int *i) +searchChar(const Datum *nodeLabels, int nNodes, int16 c, int *i)  {  	int			StopLow = 0,  				StopHigh = nNodes; diff --git a/src/backend/access/spgist/spgutils.c b/src/backend/access/spgist/spgutils.c index 245ec05e4bb..87c31da71a5 100644 --- a/src/backend/access/spgist/spgutils.c +++ b/src/backend/access/spgist/spgutils.c @@ -868,7 +868,7 @@ SpGistGetLeafTupleSize(TupleDesc tupleDescriptor,   * Construct a leaf tuple containing the given heap TID and datum values   */  SpGistLeafTuple -spgFormLeafTuple(SpGistState *state, ItemPointer heapPtr, +spgFormLeafTuple(SpGistState *state, const ItemPointerData *heapPtr,  				 const Datum *datums, const bool *isnulls)  {  	SpGistLeafTuple tup; diff --git a/src/backend/access/spgist/spgvacuum.c b/src/backend/access/spgist/spgvacuum.c index 8f8a1ad7796..71ef2e5036f 100644 --- a/src/backend/access/spgist/spgvacuum.c +++ b/src/backend/access/spgist/spgvacuum.c @@ -61,7 +61,7 @@ typedef struct spgBulkDeleteState   * ensures that scans of the list don't miss items added during the scan.   */  static void -spgAddPendingTID(spgBulkDeleteState *bds, ItemPointer tid) +spgAddPendingTID(spgBulkDeleteState *bds, const ItemPointerData *tid)  {  	spgVacPendingItem *pitem;  	spgVacPendingItem **listLink; diff --git a/src/backend/access/transam/xlog.c b/src/backend/access/transam/xlog.c index eceab341255..fd91bcd68ec 100644 --- a/src/backend/access/transam/xlog.c +++ b/src/backend/access/transam/xlog.c @@ -749,6 +749,7 @@ XLogInsertRecord(XLogRecData *rdata,  				 XLogRecPtr fpw_lsn,  				 uint8 flags,  				 int num_fpi, +				 uint64 fpi_bytes,  				 bool topxid_included)  {  	XLogCtlInsert *Insert = &XLogCtl->Insert; @@ -1081,6 +1082,7 @@ XLogInsertRecord(XLogRecData *rdata,  		pgWalUsage.wal_bytes += rechdr->xl_tot_len;  		pgWalUsage.wal_records++;  		pgWalUsage.wal_fpi += num_fpi; +		pgWalUsage.wal_fpi_bytes += fpi_bytes;  		/* Required for the flush of pending stats WAL data */  		pgstat_report_fixed = true; diff --git a/src/backend/access/transam/xloginsert.c b/src/backend/access/transam/xloginsert.c index 496e0fa4ac6..58cb4b1b00c 100644 --- a/src/backend/access/transam/xloginsert.c +++ b/src/backend/access/transam/xloginsert.c @@ -33,12 +33,14 @@  #include "access/xloginsert.h"  #include "catalog/pg_control.h"  #include "common/pg_lzcompress.h" +#include "executor/instrument.h"  #include "miscadmin.h"  #include "pg_trace.h"  #include "replication/origin.h"  #include "storage/bufmgr.h"  #include "storage/proc.h"  #include "utils/memutils.h" +#include "utils/pgstat_internal.h"  /*   * Guess the maximum buffer size required to store a compressed version of @@ -137,6 +139,7 @@ static MemoryContext xloginsert_cxt;  static XLogRecData *XLogRecordAssemble(RmgrId rmid, uint8 info,  									   XLogRecPtr RedoRecPtr, bool doPageWrites,  									   XLogRecPtr *fpw_lsn, int *num_fpi, +									   uint64 *fpi_bytes,  									   bool *topxid_included);  static bool XLogCompressBackupBlock(const PageData *page, uint16 hole_offset,  									uint16 hole_length, void *dest, uint16 *dlen); @@ -510,6 +513,7 @@ XLogInsert(RmgrId rmid, uint8 info)  		XLogRecPtr	fpw_lsn;  		XLogRecData *rdt;  		int			num_fpi = 0; +		uint64		fpi_bytes = 0;  		/*  		 * Get values needed to decide whether to do full-page writes. Since @@ -519,10 +523,11 @@ XLogInsert(RmgrId rmid, uint8 info)  		GetFullPageWriteInfo(&RedoRecPtr, &doPageWrites);  		rdt = XLogRecordAssemble(rmid, info, RedoRecPtr, doPageWrites, -								 &fpw_lsn, &num_fpi, &topxid_included); +								 &fpw_lsn, &num_fpi, &fpi_bytes, +								 &topxid_included);  		EndPos = XLogInsertRecord(rdt, fpw_lsn, curinsert_flags, num_fpi, -								  topxid_included); +								  fpi_bytes, topxid_included);  	} while (EndPos == InvalidXLogRecPtr);  	XLogResetInsertion(); @@ -560,7 +565,8 @@ XLogSimpleInsertInt64(RmgrId rmid, uint8 info, int64 value)  static XLogRecData *  XLogRecordAssemble(RmgrId rmid, uint8 info,  				   XLogRecPtr RedoRecPtr, bool doPageWrites, -				   XLogRecPtr *fpw_lsn, int *num_fpi, bool *topxid_included) +				   XLogRecPtr *fpw_lsn, int *num_fpi, uint64 *fpi_bytes, +				   bool *topxid_included)  {  	XLogRecData *rdt;  	uint64		total_len = 0; @@ -796,6 +802,9 @@ XLogRecordAssemble(RmgrId rmid, uint8 info,  			}  			total_len += bimg.length; + +			/* Track the WAL full page images in bytes */ +			*fpi_bytes += bimg.length;  		}  		if (needs_data)  | 
