summaryrefslogtreecommitdiff
path: root/src/backend/storage
diff options
context:
space:
mode:
Diffstat (limited to 'src/backend/storage')
-rw-r--r--src/backend/storage/buffer/buf_init.c4
-rw-r--r--src/backend/storage/buffer/buf_table.c4
-rw-r--r--src/backend/storage/buffer/bufmgr.c40
-rw-r--r--src/backend/storage/buffer/freelist.c12
-rw-r--r--src/backend/storage/buffer/localbuf.c6
-rw-r--r--src/backend/storage/file/buffile.c6
-rw-r--r--src/backend/storage/file/fd.c26
-rw-r--r--src/backend/storage/freespace/freespace.c4
-rw-r--r--src/backend/storage/freespace/fsmpage.c4
-rw-r--r--src/backend/storage/ipc/ipc.c10
-rw-r--r--src/backend/storage/ipc/ipci.c4
-rw-r--r--src/backend/storage/ipc/pmsignal.c6
-rw-r--r--src/backend/storage/ipc/procarray.c34
-rw-r--r--src/backend/storage/ipc/shmem.c18
-rw-r--r--src/backend/storage/ipc/shmqueue.c2
-rw-r--r--src/backend/storage/ipc/sinval.c12
-rw-r--r--src/backend/storage/ipc/sinvaladt.c22
-rw-r--r--src/backend/storage/ipc/standby.c4
-rw-r--r--src/backend/storage/large_object/inv_api.c2
-rw-r--r--src/backend/storage/lmgr/deadlock.c18
-rw-r--r--src/backend/storage/lmgr/lmgr.c8
-rw-r--r--src/backend/storage/lmgr/lock.c40
-rw-r--r--src/backend/storage/lmgr/lwlock.c10
-rw-r--r--src/backend/storage/lmgr/predicate.c52
-rw-r--r--src/backend/storage/lmgr/proc.c32
-rw-r--r--src/backend/storage/lmgr/s_lock.c6
-rw-r--r--src/backend/storage/lmgr/spin.c2
-rw-r--r--src/backend/storage/page/bufpage.c6
-rw-r--r--src/backend/storage/smgr/md.c32
-rw-r--r--src/backend/storage/smgr/smgr.c2
30 files changed, 214 insertions, 214 deletions
diff --git a/src/backend/storage/buffer/buf_init.c b/src/backend/storage/buffer/buf_init.c
index 94cefba3001..e669b48d3cb 100644
--- a/src/backend/storage/buffer/buf_init.c
+++ b/src/backend/storage/buffer/buf_init.c
@@ -44,7 +44,7 @@ int32 *PrivateRefCount;
*
* IO_IN_PROGRESS -- this is a flag in the buffer descriptor.
* It must be set when an IO is initiated and cleared at
- * the end of the IO. It is there to make sure that one
+ * the end of the IO. It is there to make sure that one
* process doesn't start to use a buffer while another is
* faulting it in. see WaitIO and related routines.
*
@@ -54,7 +54,7 @@ int32 *PrivateRefCount;
*
* PrivateRefCount -- Each buffer also has a private refcount that keeps
* track of the number of times the buffer is pinned in the current
- * process. This is used for two purposes: first, if we pin a
+ * process. This is used for two purposes: first, if we pin a
* a buffer more than once, we only need to change the shared refcount
* once, thus only lock the shared state once; second, when a transaction
* aborts, it should only unpin the buffers exactly the number of times it
diff --git a/src/backend/storage/buffer/buf_table.c b/src/backend/storage/buffer/buf_table.c
index 40da9f5d41f..e2017727330 100644
--- a/src/backend/storage/buffer/buf_table.c
+++ b/src/backend/storage/buffer/buf_table.c
@@ -3,7 +3,7 @@
* buf_table.c
* routines for mapping BufferTags to buffer indexes.
*
- * Note: the routines in this file do no locking of their own. The caller
+ * Note: the routines in this file do no locking of their own. The caller
* must hold a suitable lock on the appropriate BufMappingLock, as specified
* in the comments. We can't do the locking inside these functions because
* in most cases the caller needs to adjust the buffer header contents
@@ -112,7 +112,7 @@ BufTableLookup(BufferTag *tagPtr, uint32 hashcode)
* Insert a hashtable entry for given tag and buffer ID,
* unless an entry already exists for that tag
*
- * Returns -1 on successful insertion. If a conflicting entry exists
+ * Returns -1 on successful insertion. If a conflicting entry exists
* already, returns the buffer ID in that entry.
*
* Caller must hold exclusive lock on BufMappingLock for tag's partition
diff --git a/src/backend/storage/buffer/bufmgr.c b/src/backend/storage/buffer/bufmgr.c
index c60158d675f..46373b60126 100644
--- a/src/backend/storage/buffer/bufmgr.c
+++ b/src/backend/storage/buffer/bufmgr.c
@@ -113,7 +113,7 @@ static void AtProcExit_Buffers(int code, Datum arg);
* PrefetchBuffer -- initiate asynchronous read of a block of a relation
*
* This is named by analogy to ReadBuffer but doesn't actually allocate a
- * buffer. Instead it tries to ensure that a future ReadBuffer for the given
+ * buffer. Instead it tries to ensure that a future ReadBuffer for the given
* block will not be delayed by the I/O. Prefetching is optional.
* No-op if prefetching isn't compiled in.
*/
@@ -203,7 +203,7 @@ ReadBuffer(Relation reln, BlockNumber blockNum)
* Assume when this function is called, that reln has been opened already.
*
* In RBM_NORMAL mode, the page is read from disk, and the page header is
- * validated. An error is thrown if the page header is not valid. (But
+ * validated. An error is thrown if the page header is not valid. (But
* note that an all-zero page is considered "valid"; see PageIsVerified().)
*
* RBM_ZERO_ON_ERROR is like the normal mode, but if the page header is not
@@ -211,7 +211,7 @@ ReadBuffer(Relation reln, BlockNumber blockNum)
* for non-critical data, where the caller is prepared to repair errors.
*
* In RBM_ZERO mode, if the page isn't in buffer cache already, it's filled
- * with zeros instead of reading it from disk. Useful when the caller is
+ * with zeros instead of reading it from disk. Useful when the caller is
* going to fill the page from scratch, since this saves I/O and avoids
* unnecessary failure if the page-on-disk has corrupt page headers.
* Caution: do not use this mode to read a page that is beyond the relation's
@@ -368,7 +368,7 @@ ReadBuffer_common(SMgrRelation smgr, char relpersistence, ForkNumber forkNum,
* This can happen because mdread doesn't complain about reads beyond
* EOF (when zero_damaged_pages is ON) and so a previous attempt to
* read a block beyond EOF could have left a "valid" zero-filled
- * buffer. Unfortunately, we have also seen this case occurring
+ * buffer. Unfortunately, we have also seen this case occurring
* because of buggy Linux kernels that sometimes return an
* lseek(SEEK_END) result that doesn't account for a recent write. In
* that situation, the pre-existing buffer would contain valid data
@@ -593,7 +593,7 @@ BufferAlloc(SMgrRelation smgr, char relpersistence, ForkNumber forkNum,
/*
* Didn't find it in the buffer pool. We'll have to initialize a new
- * buffer. Remember to unlock the mapping lock while doing the work.
+ * buffer. Remember to unlock the mapping lock while doing the work.
*/
LWLockRelease(newPartitionLock);
@@ -603,7 +603,7 @@ BufferAlloc(SMgrRelation smgr, char relpersistence, ForkNumber forkNum,
bool lock_held;
/*
- * Select a victim buffer. The buffer is returned with its header
+ * Select a victim buffer. The buffer is returned with its header
* spinlock still held! Also (in most cases) the BufFreelistLock is
* still held, since it would be bad to hold the spinlock while
* possibly waking up other processes.
@@ -652,7 +652,7 @@ BufferAlloc(SMgrRelation smgr, char relpersistence, ForkNumber forkNum,
* If using a nondefault strategy, and writing the buffer
* would require a WAL flush, let the strategy decide whether
* to go ahead and write/reuse the buffer or to choose another
- * victim. We need lock to inspect the page LSN, so this
+ * victim. We need lock to inspect the page LSN, so this
* can't be done inside StrategyGetBuffer.
*/
if (strategy != NULL &&
@@ -773,7 +773,7 @@ BufferAlloc(SMgrRelation smgr, char relpersistence, ForkNumber forkNum,
{
/*
* We can only get here if (a) someone else is still reading
- * in the page, or (b) a previous read attempt failed. We
+ * in the page, or (b) a previous read attempt failed. We
* have to wait for any active read attempt to finish, and
* then set up our own read attempt if the page is still not
* BM_VALID. StartBufferIO does it all.
@@ -866,7 +866,7 @@ BufferAlloc(SMgrRelation smgr, char relpersistence, ForkNumber forkNum,
* This is used only in contexts such as dropping a relation. We assume
* that no other backend could possibly be interested in using the page,
* so the only reason the buffer might be pinned is if someone else is
- * trying to write it out. We have to let them finish before we can
+ * trying to write it out. We have to let them finish before we can
* reclaim the buffer.
*
* The buffer could get reclaimed by someone else while we are waiting
@@ -965,7 +965,7 @@ retry:
*
* Marks buffer contents as dirty (actual write happens later).
*
- * Buffer must be pinned and exclusive-locked. (If caller does not hold
+ * Buffer must be pinned and exclusive-locked. (If caller does not hold
* exclusive lock, then somebody could be in process of writing the buffer,
* leading to risk of bad data written to disk.)
*/
@@ -1014,7 +1014,7 @@ MarkBufferDirty(Buffer buffer)
*
* Formerly, this saved one cycle of acquiring/releasing the BufMgrLock
* compared to calling the two routines separately. Now it's mainly just
- * a convenience function. However, if the passed buffer is valid and
+ * a convenience function. However, if the passed buffer is valid and
* already contains the desired block, we just return it as-is; and that
* does save considerable work compared to a full release and reacquire.
*
@@ -1066,7 +1066,7 @@ ReleaseAndReadBuffer(Buffer buffer,
* when we first pin it; for other strategies we just make sure the usage_count
* isn't zero. (The idea of the latter is that we don't want synchronized
* heap scans to inflate the count, but we need it to not be zero to discourage
- * other backends from stealing buffers from our ring. As long as we cycle
+ * other backends from stealing buffers from our ring. As long as we cycle
* through the ring faster than the global clock-sweep cycles, buffers in
* our ring won't be chosen as victims for replacement by other backends.)
*
@@ -1074,7 +1074,7 @@ ReleaseAndReadBuffer(Buffer buffer,
*
* Note that ResourceOwnerEnlargeBuffers must have been done already.
*
- * Returns TRUE if buffer is BM_VALID, else FALSE. This provision allows
+ * Returns TRUE if buffer is BM_VALID, else FALSE. This provision allows
* some callers to avoid an extra spinlock cycle.
*/
static bool
@@ -1227,7 +1227,7 @@ BufferSync(int flags)
* have the flag set.
*
* Note that if we fail to write some buffer, we may leave buffers with
- * BM_CHECKPOINT_NEEDED still set. This is OK since any such buffer would
+ * BM_CHECKPOINT_NEEDED still set. This is OK since any such buffer would
* certainly need to be written for the next checkpoint attempt, too.
*/
num_to_write = 0;
@@ -1330,7 +1330,7 @@ BufferSync(int flags)
* This is called periodically by the background writer process.
*
* Returns true if it's appropriate for the bgwriter process to go into
- * low-power hibernation mode. (This happens if the strategy clock sweep
+ * low-power hibernation mode. (This happens if the strategy clock sweep
* has been "lapped" and no buffer allocations have occurred recently,
* or if the bgwriter has been effectively disabled by setting
* bgwriter_lru_maxpages to 0.)
@@ -2022,7 +2022,7 @@ BufferIsPermanent(Buffer buffer)
* specified relation fork that have block numbers >= firstDelBlock.
* (In particular, with firstDelBlock = 0, all pages are removed.)
* Dirty pages are simply dropped, without bothering to write them
- * out first. Therefore, this is NOT rollback-able, and so should be
+ * out first. Therefore, this is NOT rollback-able, and so should be
* used only with extreme caution!
*
* Currently, this is called only from smgr.c when the underlying file
@@ -2031,7 +2031,7 @@ BufferIsPermanent(Buffer buffer)
* be deleted momentarily anyway, and there is no point in writing it.
* It is the responsibility of higher-level code to ensure that the
* deletion or truncation does not lose any data that could be needed
- * later. It is also the responsibility of higher-level code to ensure
+ * later. It is also the responsibility of higher-level code to ensure
* that no other process could be trying to load more pages of the
* relation into buffers.
*
@@ -2133,9 +2133,9 @@ DropRelFileNodeAllBuffers(RelFileNodeBackend rnode)
*
* This function removes all the buffers in the buffer cache for a
* particular database. Dirty pages are simply dropped, without
- * bothering to write them out first. This is used when we destroy a
+ * bothering to write them out first. This is used when we destroy a
* database, to avoid trying to flush data to disk when the directory
- * tree no longer exists. Implementation is pretty similar to
+ * tree no longer exists. Implementation is pretty similar to
* DropRelFileNodeBuffers() which is for destroying just one relation.
* --------------------------------------------------------------------
*/
@@ -2457,7 +2457,7 @@ SetBufferCommitInfoNeedsSave(Buffer buffer)
/*
* This routine might get called many times on the same page, if we are
* making the first scan after commit of an xact that added/deleted many
- * tuples. So, be as quick as we can if the buffer is already dirty. We
+ * tuples. So, be as quick as we can if the buffer is already dirty. We
* do this by not acquiring spinlock if it looks like the status bits are
* already. Since we make this test unlocked, there's a chance we might
* fail to notice that the flags have just been cleared, and failed to
diff --git a/src/backend/storage/buffer/freelist.c b/src/backend/storage/buffer/freelist.c
index c92774798cf..7c6ba70eb85 100644
--- a/src/backend/storage/buffer/freelist.c
+++ b/src/backend/storage/buffer/freelist.c
@@ -36,7 +36,7 @@ typedef struct
*/
/*
- * Statistics. These counters should be wide enough that they can't
+ * Statistics. These counters should be wide enough that they can't
* overflow during a single bgwriter cycle.
*/
uint32 completePasses; /* Complete cycles of the clock sweep */
@@ -135,7 +135,7 @@ StrategyGetBuffer(BufferAccessStrategy strategy, bool *lock_held)
/*
* We count buffer allocation requests so that the bgwriter can estimate
- * the rate of buffer consumption. Note that buffers recycled by a
+ * the rate of buffer consumption. Note that buffers recycled by a
* strategy object are intentionally not counted here.
*/
StrategyControl->numBufferAllocs++;
@@ -269,7 +269,7 @@ StrategyFreeBuffer(volatile BufferDesc *buf)
*
* In addition, we return the completed-pass count (which is effectively
* the higher-order bits of nextVictimBuffer) and the count of recent buffer
- * allocs if non-NULL pointers are passed. The alloc count is reset after
+ * allocs if non-NULL pointers are passed. The alloc count is reset after
* being read.
*/
int
@@ -294,7 +294,7 @@ StrategySyncStart(uint32 *complete_passes, uint32 *num_buf_alloc)
* StrategyNotifyBgWriter -- set or clear allocation notification latch
*
* If bgwriterLatch isn't NULL, the next invocation of StrategyGetBuffer will
- * set that latch. Pass NULL to clear the pending notification before it
+ * set that latch. Pass NULL to clear the pending notification before it
* happens. This feature is used by the bgwriter process to wake itself up
* from hibernation, and is not meant for anybody else to use.
*/
@@ -487,7 +487,7 @@ GetBufferFromRing(BufferAccessStrategy strategy)
/*
* If the slot hasn't been filled yet, tell the caller to allocate a new
- * buffer with the normal allocation strategy. He will then fill this
+ * buffer with the normal allocation strategy. He will then fill this
* slot by calling AddBufferToRing with the new buffer.
*/
bufnum = strategy->buffers[strategy->current];
@@ -540,7 +540,7 @@ AddBufferToRing(BufferAccessStrategy strategy, volatile BufferDesc *buf)
*
* When a nondefault strategy is used, the buffer manager calls this function
* when it turns out that the buffer selected by StrategyGetBuffer needs to
- * be written out and doing so would require flushing WAL too. This gives us
+ * be written out and doing so would require flushing WAL too. This gives us
* a chance to choose a different victim.
*
* Returns true if buffer manager should ask for a new victim, and false
diff --git a/src/backend/storage/buffer/localbuf.c b/src/backend/storage/buffer/localbuf.c
index 46eeaf742d8..9dfcf81ae82 100644
--- a/src/backend/storage/buffer/localbuf.c
+++ b/src/backend/storage/buffer/localbuf.c
@@ -94,7 +94,7 @@ LocalPrefetchBuffer(SMgrRelation smgr, ForkNumber forkNum,
* Find or create a local buffer for the given page of the given relation.
*
* API is similar to bufmgr.c's BufferAlloc, except that we do not need
- * to do any locking since this is all local. Also, IO_IN_PROGRESS
+ * to do any locking since this is all local. Also, IO_IN_PROGRESS
* does not get set. Lastly, we support only default access strategy
* (hence, usage_count is always advanced).
*/
@@ -289,7 +289,7 @@ MarkLocalBufferDirty(Buffer buffer)
* specified relation that have block numbers >= firstDelBlock.
* (In particular, with firstDelBlock = 0, all pages are removed.)
* Dirty pages are simply dropped, without bothering to write them
- * out first. Therefore, this is NOT rollback-able, and so should be
+ * out first. Therefore, this is NOT rollback-able, and so should be
* used only with extreme caution!
*
* See DropRelFileNodeBuffers in bufmgr.c for more notes.
@@ -456,7 +456,7 @@ GetLocalBufferStorage(void)
/*
* We allocate local buffers in a context of their own, so that the
* space eaten for them is easily recognizable in MemoryContextStats
- * output. Create the context on first use.
+ * output. Create the context on first use.
*/
if (LocalBufferContext == NULL)
LocalBufferContext =
diff --git a/src/backend/storage/file/buffile.c b/src/backend/storage/file/buffile.c
index 919cec47742..df04fa26d17 100644
--- a/src/backend/storage/file/buffile.c
+++ b/src/backend/storage/file/buffile.c
@@ -29,7 +29,7 @@
* that was current at that time.
*
* BufFile also supports temporary files that exceed the OS file size limit
- * (by opening multiple fd.c temporary files). This is an essential feature
+ * (by opening multiple fd.c temporary files). This is an essential feature
* for sorts and hashjoins on large amounts of data.
*-------------------------------------------------------------------------
*/
@@ -72,7 +72,7 @@ struct BufFile
bool dirty; /* does buffer need to be written? */
/*
- * resowner is the ResourceOwner to use for underlying temp files. (We
+ * resowner is the ResourceOwner to use for underlying temp files. (We
* don't need to remember the memory context we're using explicitly,
* because after creation we only repalloc our arrays larger.)
*/
@@ -519,7 +519,7 @@ BufFileSeek(BufFile *file, int fileno, off_t offset, int whence)
{
/*
* Seek is to a point within existing buffer; we can just adjust
- * pos-within-buffer, without flushing buffer. Note this is OK
+ * pos-within-buffer, without flushing buffer. Note this is OK
* whether reading or writing, but buffer remains dirty if we were
* writing.
*/
diff --git a/src/backend/storage/file/fd.c b/src/backend/storage/file/fd.c
index 3a595a3295f..a74a0f9c073 100644
--- a/src/backend/storage/file/fd.c
+++ b/src/backend/storage/file/fd.c
@@ -65,7 +65,7 @@
* and other code that tries to open files without consulting fd.c. This
* is the number left free. (While we can be pretty sure we won't get
* EMFILE, there's never any guarantee that we won't get ENFILE due to
- * other processes chewing up FDs. So it's a bad idea to try to open files
+ * other processes chewing up FDs. So it's a bad idea to try to open files
* without consulting fd.c. Nonetheless we cannot control all code.)
*
* Because this is just a fixed setting, we are effectively assuming that
@@ -150,8 +150,8 @@ typedef struct vfd
} Vfd;
/*
- * Virtual File Descriptor array pointer and size. This grows as
- * needed. 'File' values are indexes into this array.
+ * Virtual File Descriptor array pointer and size. This grows as
+ * needed. 'File' values are indexes into this array.
* Note that VfdCache[0] is not a usable VFD, just a list header.
*/
static Vfd *VfdCache;
@@ -171,7 +171,7 @@ static bool have_xact_temporary_files = false;
/*
* Tracks the total size of all temporary files. Note: when temp_file_limit
* is being enforced, this cannot overflow since the limit cannot be more
- * than INT_MAX kilobytes. When not enforcing, it could theoretically
+ * than INT_MAX kilobytes. When not enforcing, it could theoretically
* overflow, but we don't care.
*/
static uint64 temporary_files_size = 0;
@@ -231,7 +231,7 @@ static int nextTempTableSpace = 0;
*
* The Least Recently Used ring is a doubly linked list that begins and
* ends on element zero. Element zero is special -- it doesn't represent
- * a file and its "fd" field always == VFD_CLOSED. Element zero is just an
+ * a file and its "fd" field always == VFD_CLOSED. Element zero is just an
* anchor that shows us the beginning/end of the ring.
* Only VFD elements that are currently really open (have an FD assigned) are
* in the Lru ring. Elements that are "virtually" open can be recognized
@@ -389,7 +389,7 @@ InitFileAccess(void)
* We stop counting if usable_fds reaches max_to_probe. Note: a small
* value of max_to_probe might result in an underestimate of already_open;
* we must fill in any "gaps" in the set of used FDs before the calculation
- * of already_open will give the right answer. In practice, max_to_probe
+ * of already_open will give the right answer. In practice, max_to_probe
* of a couple of dozen should be enough to ensure good results.
*
* We assume stdin (FD 0) is available for dup'ing
@@ -466,7 +466,7 @@ count_usable_fds(int max_to_probe, int *usable_fds, int *already_open)
pfree(fd);
/*
- * Return results. usable_fds is just the number of successful dups. We
+ * Return results. usable_fds is just the number of successful dups. We
* assume that the system limit is highestfd+1 (remember 0 is a legal FD
* number) and so already_open is highestfd+1 - usable_fds.
*/
@@ -961,7 +961,7 @@ OpenTemporaryFile(bool interXact)
/*
* If not, or if tablespace is bad, create in database's default
- * tablespace. MyDatabaseTableSpace should normally be set before we get
+ * tablespace. MyDatabaseTableSpace should normally be set before we get
* here, but just in case it isn't, fall back to pg_default tablespace.
*/
if (file <= 0)
@@ -1255,7 +1255,7 @@ FileWrite(File file, char *buffer, int amount)
/*
* If enforcing temp_file_limit and it's a temp file, check to see if the
- * write would overrun temp_file_limit, and throw error if so. Note: it's
+ * write would overrun temp_file_limit, and throw error if so. Note: it's
* really a modularity violation to throw error here; we should set errno
* and return -1. However, there's no way to report a suitable error
* message if we do that. All current callers would just throw error
@@ -1534,7 +1534,7 @@ reserveAllocatedDesc(void)
/*
* Routines that want to use stdio (ie, FILE*) should use AllocateFile
* rather than plain fopen(). This lets fd.c deal with freeing FDs if
- * necessary to open the file. When done, call FreeFile rather than fclose.
+ * necessary to open the file. When done, call FreeFile rather than fclose.
*
* Note that files that will be open for any significant length of time
* should NOT be handled this way, since they cannot share kernel file
@@ -1713,7 +1713,7 @@ TryAgain:
* Read a directory opened with AllocateDir, ereport'ing any error.
*
* This is easier to use than raw readdir() since it takes care of some
- * otherwise rather tedious and error-prone manipulation of errno. Also,
+ * otherwise rather tedious and error-prone manipulation of errno. Also,
* if you are happy with a generic error message for AllocateDir failure,
* you can just do
*
@@ -1829,7 +1829,7 @@ SetTempTablespaces(Oid *tableSpaces, int numSpaces)
numTempTableSpaces = numSpaces;
/*
- * Select a random starting point in the list. This is to minimize
+ * Select a random starting point in the list. This is to minimize
* conflicts between backends that are most likely sharing the same list
* of temp tablespaces. Note that if we create multiple temp files in the
* same transaction, we'll advance circularly through the list --- this
@@ -1858,7 +1858,7 @@ TempTablespacesAreSet(void)
/*
* GetNextTempTableSpace
*
- * Select the next temp tablespace to use. A result of InvalidOid means
+ * Select the next temp tablespace to use. A result of InvalidOid means
* to use the current database's default tablespace.
*/
Oid
diff --git a/src/backend/storage/freespace/freespace.c b/src/backend/storage/freespace/freespace.c
index 7840adb3501..3f475848bd0 100644
--- a/src/backend/storage/freespace/freespace.c
+++ b/src/backend/storage/freespace/freespace.c
@@ -48,7 +48,7 @@
* Range Category
* 0 - 31 0
* 32 - 63 1
- * ... ... ...
+ * ... ... ...
* 8096 - 8127 253
* 8128 - 8163 254
* 8164 - 8192 255
@@ -123,7 +123,7 @@ static uint8 fsm_vacuum_page(Relation rel, FSMAddress addr, bool *eof);
* will turn out to have too little space available by the time the caller
* gets a lock on it. In that case, the caller should report the actual
* amount of free space available on that page and then try again (see
- * RecordAndGetPageWithFreeSpace). If InvalidBlockNumber is returned,
+ * RecordAndGetPageWithFreeSpace). If InvalidBlockNumber is returned,
* extend the relation.
*/
BlockNumber
diff --git a/src/backend/storage/freespace/fsmpage.c b/src/backend/storage/freespace/fsmpage.c
index e8e2e424248..2fd6828a32b 100644
--- a/src/backend/storage/freespace/fsmpage.c
+++ b/src/backend/storage/freespace/fsmpage.c
@@ -185,13 +185,13 @@ restart:
/*----------
* Start the search from the target slot. At every step, move one
- * node to the right, then climb up to the parent. Stop when we reach
+ * node to the right, then climb up to the parent. Stop when we reach
* a node with enough free space (as we must, since the root has enough
* space).
*
* The idea is to gradually expand our "search triangle", that is, all
* nodes covered by the current node, and to be sure we search to the
- * right from the start point. At the first step, only the target slot
+ * right from the start point. At the first step, only the target slot
* is examined. When we move up from a left child to its parent, we are
* adding the right-hand subtree of that parent to the search triangle.
* When we move right then up from a right child, we are dropping the
diff --git a/src/backend/storage/ipc/ipc.c b/src/backend/storage/ipc/ipc.c
index 067b24a47b0..43b7783a79e 100644
--- a/src/backend/storage/ipc/ipc.c
+++ b/src/backend/storage/ipc/ipc.c
@@ -4,7 +4,7 @@
* POSTGRES inter-process communication definitions.
*
* This file is misnamed, as it no longer has much of anything directly
- * to do with IPC. The functionality here is concerned with managing
+ * to do with IPC. The functionality here is concerned with managing
* exit-time cleanup for either a postmaster or a backend.
*
*
@@ -84,7 +84,7 @@ static int on_proc_exit_index,
* -cim 2/6/90
*
* Unfortunately, we can't really guarantee that add-on code
- * obeys the rule of not calling exit() directly. So, while
+ * obeys the rule of not calling exit() directly. So, while
* this is the preferred way out of the system, we also register
* an atexit callback that will make sure cleanup happens.
* ----------------------------------------------------------------
@@ -103,7 +103,7 @@ proc_exit(int code)
* fixed file name, each backend will overwrite earlier profiles. To
* fix that, we create a separate subdirectory for each backend
* (./gprof/pid) and 'cd' to that subdirectory before we exit() - that
- * forces mcleanup() to write each profile into its own directory. We
+ * forces mcleanup() to write each profile into its own directory. We
* end up with something like: $PGDATA/gprof/8829/gmon.out
* $PGDATA/gprof/8845/gmon.out ...
*
@@ -246,7 +246,7 @@ atexit_callback(void)
* on_proc_exit
*
* this function adds a callback function to the list of
- * functions invoked by proc_exit(). -cim 2/6/90
+ * functions invoked by proc_exit(). -cim 2/6/90
* ----------------------------------------------------------------
*/
void
@@ -273,7 +273,7 @@ on_proc_exit(pg_on_exit_callback function, Datum arg)
* on_shmem_exit
*
* this function adds a callback function to the list of
- * functions invoked by shmem_exit(). -cim 2/6/90
+ * functions invoked by shmem_exit(). -cim 2/6/90
* ----------------------------------------------------------------
*/
void
diff --git a/src/backend/storage/ipc/ipci.c b/src/backend/storage/ipc/ipci.c
index 7e04e08545a..dded397a617 100644
--- a/src/backend/storage/ipc/ipci.c
+++ b/src/backend/storage/ipc/ipci.c
@@ -51,7 +51,7 @@ static bool addin_request_allowed = true;
* a loadable module.
*
* This is only useful if called from the _PG_init hook of a library that
- * is loaded into the postmaster via shared_preload_libraries. Once
+ * is loaded into the postmaster via shared_preload_libraries. Once
* shared memory has been allocated, calls will be ignored. (We could
* raise an error, but it seems better to make it a no-op, so that
* libraries containing such calls can be reloaded if needed.)
@@ -81,7 +81,7 @@ RequestAddinShmemSpace(Size size)
* This is a bit code-wasteful and could be cleaned up.)
*
* If "makePrivate" is true then we only need private memory, not shared
- * memory. This is true for a standalone backend, false for a postmaster.
+ * memory. This is true for a standalone backend, false for a postmaster.
*/
void
CreateSharedMemoryAndSemaphores(bool makePrivate, int port)
diff --git a/src/backend/storage/ipc/pmsignal.c b/src/backend/storage/ipc/pmsignal.c
index 8ad7a97eeb1..1bc2cadcd4e 100644
--- a/src/backend/storage/ipc/pmsignal.c
+++ b/src/backend/storage/ipc/pmsignal.c
@@ -26,9 +26,9 @@
/*
* The postmaster is signaled by its children by sending SIGUSR1. The
- * specific reason is communicated via flags in shared memory. We keep
+ * specific reason is communicated via flags in shared memory. We keep
* a boolean flag for each possible "reason", so that different reasons
- * can be signaled by different backends at the same time. (However,
+ * can be signaled by different backends at the same time. (However,
* if the same reason is signaled more than once simultaneously, the
* postmaster will observe it only once.)
*
@@ -42,7 +42,7 @@
* have three possible states: UNUSED, ASSIGNED, ACTIVE. An UNUSED slot is
* available for assignment. An ASSIGNED slot is associated with a postmaster
* child process, but either the process has not touched shared memory yet,
- * or it has successfully cleaned up after itself. A ACTIVE slot means the
+ * or it has successfully cleaned up after itself. A ACTIVE slot means the
* process is actively using shared memory. The slots are assigned to
* child processes at random, and postmaster.c is responsible for tracking
* which one goes with which PID.
diff --git a/src/backend/storage/ipc/procarray.c b/src/backend/storage/ipc/procarray.c
index 4e6dca9daaf..85742710d32 100644
--- a/src/backend/storage/ipc/procarray.c
+++ b/src/backend/storage/ipc/procarray.c
@@ -19,11 +19,11 @@
*
* During hot standby, we also keep a list of XIDs representing transactions
* that are known to be running in the master (or more precisely, were running
- * as of the current point in the WAL stream). This list is kept in the
+ * as of the current point in the WAL stream). This list is kept in the
* KnownAssignedXids array, and is updated by watching the sequence of
* arriving XIDs. This is necessary because if we leave those XIDs out of
* snapshots taken for standby queries, then they will appear to be already
- * complete, leading to MVCC failures. Note that in hot standby, the PGPROC
+ * complete, leading to MVCC failures. Note that in hot standby, the PGPROC
* array represents standby processes, which by definition are not running
* transactions that have XIDs.
*
@@ -267,7 +267,7 @@ ProcArrayAdd(PGPROC *proc)
if (arrayP->numProcs >= arrayP->maxProcs)
{
/*
- * Ooops, no room. (This really shouldn't happen, since there is a
+ * Ooops, no room. (This really shouldn't happen, since there is a
* fixed supply of PGPROC structs too, and so we should have failed
* earlier.)
*/
@@ -717,7 +717,7 @@ ProcArrayApplyRecoveryInfo(RunningTransactions running)
* ShmemVariableCache->nextXid must be beyond any observed xid.
*
* We don't expect anyone else to modify nextXid, hence we don't need to
- * hold a lock while examining it. We still acquire the lock to modify
+ * hold a lock while examining it. We still acquire the lock to modify
* it, though.
*/
nextXid = latestObservedXid;
@@ -1103,9 +1103,9 @@ TransactionIdIsActive(TransactionId xid)
* ignored.
*
* This is used by VACUUM to decide which deleted tuples must be preserved
- * in a table. allDbs = TRUE is needed for shared relations, but allDbs =
+ * in a table. allDbs = TRUE is needed for shared relations, but allDbs =
* FALSE is sufficient for non-shared relations, since only backends in my
- * own database could ever see the tuples in them. Also, we can ignore
+ * own database could ever see the tuples in them. Also, we can ignore
* concurrently running lazy VACUUMs because (a) they must be working on other
* tables, and (b) they don't need to do snapshot-based lookups.
*
@@ -1424,7 +1424,7 @@ GetSnapshotData(Snapshot snapshot)
* do that much work while holding the ProcArrayLock.
*
* The other backend can add more subxids concurrently, but cannot
- * remove any. Hence it's important to fetch nxids just once.
+ * remove any. Hence it's important to fetch nxids just once.
* Should be safe to use memcpy, though. (We needn't worry about
* missing any xids added concurrently, because they must postdate
* xmax.)
@@ -1948,7 +1948,7 @@ BackendPidGetProc(int pid)
* Only main transaction Ids are considered. This function is mainly
* useful for determining what backend owns a lock.
*
- * Beware that not every xact has an XID assigned. However, as long as you
+ * Beware that not every xact has an XID assigned. However, as long as you
* only call this using an XID found on disk, you're safe.
*/
int
@@ -2010,7 +2010,7 @@ IsBackendPid(int pid)
* some snapshot we have. Since we examine the procarray with only shared
* lock, there are race conditions: a backend could set its xmin just after
* we look. Indeed, on multiprocessors with weak memory ordering, the
- * other backend could have set its xmin *before* we look. We know however
+ * other backend could have set its xmin *before* we look. We know however
* that such a backend must have held shared ProcArrayLock overlapping our
* own hold of ProcArrayLock, else we would see its xmin update. Therefore,
* any snapshot the other backend is taking concurrently with our scan cannot
@@ -2475,7 +2475,7 @@ CountOtherDBBackends(Oid databaseId, int *nbackends, int *nprepared)
* XidCacheRemoveRunningXids
*
* Remove a bunch of TransactionIds from the list of known-running
- * subtransactions for my backend. Both the specified xid and those in
+ * subtransactions for my backend. Both the specified xid and those in
* the xids[] array (of length nxids) are removed from the subxids cache.
* latestXid must be the latest XID among the group.
*/
@@ -2581,7 +2581,7 @@ DisplayXidCache(void)
* treated as running by standby transactions, even though they are not in
* the standby server's PGXACT array.
*
- * We record all XIDs that we know have been assigned. That includes all the
+ * We record all XIDs that we know have been assigned. That includes all the
* XIDs seen in WAL records, plus all unobserved XIDs that we can deduce have
* been assigned. We can deduce the existence of unobserved XIDs because we
* know XIDs are assigned in sequence, with no gaps. The KnownAssignedXids
@@ -2590,7 +2590,7 @@ DisplayXidCache(void)
*
* During hot standby we do not fret too much about the distinction between
* top-level XIDs and subtransaction XIDs. We store both together in the
- * KnownAssignedXids list. In backends, this is copied into snapshots in
+ * KnownAssignedXids list. In backends, this is copied into snapshots in
* GetSnapshotData(), taking advantage of the fact that XidInMVCCSnapshot()
* doesn't care about the distinction either. Subtransaction XIDs are
* effectively treated as top-level XIDs and in the typical case pg_subtrans
@@ -2805,14 +2805,14 @@ ExpireOldKnownAssignedTransactionIds(TransactionId xid)
* must hold shared ProcArrayLock to examine the array. To remove XIDs from
* the array, the startup process must hold ProcArrayLock exclusively, for
* the usual transactional reasons (compare commit/abort of a transaction
- * during normal running). Compressing unused entries out of the array
+ * during normal running). Compressing unused entries out of the array
* likewise requires exclusive lock. To add XIDs to the array, we just insert
* them into slots to the right of the head pointer and then advance the head
* pointer. This wouldn't require any lock at all, except that on machines
* with weak memory ordering we need to be careful that other processors
* see the array element changes before they see the head pointer change.
* We handle this by using a spinlock to protect reads and writes of the
- * head/tail pointers. (We could dispense with the spinlock if we were to
+ * head/tail pointers. (We could dispense with the spinlock if we were to
* create suitable memory access barrier primitives and use those instead.)
* The spinlock must be taken to read or write the head/tail pointers unless
* the caller holds ProcArrayLock exclusively.
@@ -2909,7 +2909,7 @@ KnownAssignedXidsCompress(bool force)
* If exclusive_lock is true then caller already holds ProcArrayLock in
* exclusive mode, so we need no extra locking here. Else caller holds no
* lock, so we need to be sure we maintain sufficient interlocks against
- * concurrent readers. (Only the startup process ever calls this, so no need
+ * concurrent readers. (Only the startup process ever calls this, so no need
* to worry about concurrent writers.)
*/
static void
@@ -2955,7 +2955,7 @@ KnownAssignedXidsAdd(TransactionId from_xid, TransactionId to_xid,
Assert(tail >= 0 && tail < pArray->maxKnownAssignedXids);
/*
- * Verify that insertions occur in TransactionId sequence. Note that even
+ * Verify that insertions occur in TransactionId sequence. Note that even
* if the last existing element is marked invalid, it must still have a
* correctly sequenced XID value.
*/
@@ -3058,7 +3058,7 @@ KnownAssignedXidsSearch(TransactionId xid, bool remove)
}
/*
- * Standard binary search. Note we can ignore the KnownAssignedXidsValid
+ * Standard binary search. Note we can ignore the KnownAssignedXidsValid
* array here, since even invalid entries will contain sorted XIDs.
*/
first = tail;
diff --git a/src/backend/storage/ipc/shmem.c b/src/backend/storage/ipc/shmem.c
index 1ee86fbe4df..5868405d063 100644
--- a/src/backend/storage/ipc/shmem.c
+++ b/src/backend/storage/ipc/shmem.c
@@ -26,7 +26,7 @@
* for a module and should never be allocated after the shared memory
* initialization phase. Hash tables have a fixed maximum size, but
* their actual size can vary dynamically. When entries are added
- * to the table, more space is allocated. Queues link data structures
+ * to the table, more space is allocated. Queues link data structures
* that have been allocated either within fixed-size structures or as hash
* buckets. Each shared data structure has a string name to identify
* it (assigned in the module that declares it).
@@ -40,7 +40,7 @@
* The shmem index has two purposes: first, it gives us
* a simple model of how the world looks when a backend process
* initializes. If something is present in the shmem index,
- * it is initialized. If it is not, it is uninitialized. Second,
+ * it is initialized. If it is not, it is uninitialized. Second,
* the shmem index allows us to allocate shared memory on demand
* instead of trying to preallocate structures and hard-wire the
* sizes and locations in header files. If you are using a lot
@@ -55,8 +55,8 @@
* pointers using the method described in (b) above.
*
* (d) memory allocation model: shared memory can never be
- * freed, once allocated. Each hash table has its own free list,
- * so hash buckets can be reused when an item is deleted. However,
+ * freed, once allocated. Each hash table has its own free list,
+ * so hash buckets can be reused when an item is deleted. However,
* if one hash table grows very large and then shrinks, its space
* cannot be redistributed to other tables. We could build a simple
* hash bucket garbage collector if need be. Right now, it seems
@@ -116,7 +116,7 @@ InitShmemAllocation(void)
Assert(shmhdr != NULL);
/*
- * Initialize the spinlock used by ShmemAlloc. We have to do the space
+ * Initialize the spinlock used by ShmemAlloc. We have to do the space
* allocation the hard way, since obviously ShmemAlloc can't be called
* yet.
*/
@@ -217,7 +217,7 @@ InitShmemIndex(void)
*
* Since ShmemInitHash calls ShmemInitStruct, which expects the ShmemIndex
* hashtable to exist already, we have a bit of a circularity problem in
- * initializing the ShmemIndex itself. The special "ShmemIndex" hash
+ * initializing the ShmemIndex itself. The special "ShmemIndex" hash
* table name will tell ShmemInitStruct to fake it.
*/
info.keysize = SHMEM_INDEX_KEYSIZE;
@@ -294,7 +294,7 @@ ShmemInitHash(const char *name, /* table string name for shmem index */
* ShmemInitStruct -- Create/attach to a structure in shared memory.
*
* This is called during initialization to find or allocate
- * a data structure in shared memory. If no other process
+ * a data structure in shared memory. If no other process
* has created the structure, this routine allocates space
* for it. If it exists already, a pointer to the existing
* structure is returned.
@@ -303,7 +303,7 @@ ShmemInitHash(const char *name, /* table string name for shmem index */
* already in the shmem index (hence, already initialized).
*
* Note: before Postgres 9.0, this function returned NULL for some failure
- * cases. Now, it always throws error instead, so callers need not check
+ * cases. Now, it always throws error instead, so callers need not check
* for NULL.
*/
void *
@@ -335,7 +335,7 @@ ShmemInitStruct(const char *name, Size size, bool *foundPtr)
* be trying to init the shmem index itself.
*
* Notice that the ShmemIndexLock is released before the shmem
- * index has been initialized. This should be OK because no other
+ * index has been initialized. This should be OK because no other
* process can be accessing shared memory yet.
*/
Assert(shmemseghdr->index == NULL);
diff --git a/src/backend/storage/ipc/shmqueue.c b/src/backend/storage/ipc/shmqueue.c
index c009c182818..32d17d08954 100644
--- a/src/backend/storage/ipc/shmqueue.c
+++ b/src/backend/storage/ipc/shmqueue.c
@@ -14,7 +14,7 @@
*
* Package for managing doubly-linked lists in shared memory.
* The only tricky thing is that SHM_QUEUE will usually be a field
- * in a larger record. SHMQueueNext has to return a pointer
+ * in a larger record. SHMQueueNext has to return a pointer
* to the record itself instead of a pointer to the SHMQueue field
* of the record. It takes an extra parameter and does some extra
* pointer arithmetic to do this correctly.
diff --git a/src/backend/storage/ipc/sinval.c b/src/backend/storage/ipc/sinval.c
index 06b72c2bc0f..5479dd1473c 100644
--- a/src/backend/storage/ipc/sinval.c
+++ b/src/backend/storage/ipc/sinval.c
@@ -29,7 +29,7 @@ uint64 SharedInvalidMessageCounter;
* Because backends sitting idle will not be reading sinval events, we
* need a way to give an idle backend a swift kick in the rear and make
* it catch up before the sinval queue overflows and forces it to go
- * through a cache reset exercise. This is done by sending
+ * through a cache reset exercise. This is done by sending
* PROCSIG_CATCHUP_INTERRUPT to any backend that gets too far behind.
*
* State for catchup events consists of two flags: one saying whether
@@ -68,7 +68,7 @@ SendSharedInvalidMessages(const SharedInvalidationMessage *msgs, int n)
* NOTE: it is entirely possible for this routine to be invoked recursively
* as a consequence of processing inside the invalFunction or resetFunction.
* Furthermore, such a recursive call must guarantee that all outstanding
- * inval messages have been processed before it exits. This is the reason
+ * inval messages have been processed before it exits. This is the reason
* for the strange-looking choice to use a statically allocated buffer array
* and counters; it's so that a recursive call can process messages already
* sucked out of sinvaladt.c.
@@ -137,7 +137,7 @@ ReceiveSharedInvalidMessages(
* We are now caught up. If we received a catchup signal, reset that
* flag, and call SICleanupQueue(). This is not so much because we need
* to flush dead messages right now, as that we want to pass on the
- * catchup signal to the next slowest backend. "Daisy chaining" the
+ * catchup signal to the next slowest backend. "Daisy chaining" the
* catchup signal this way avoids creating spikes in system load for what
* should be just a background maintenance activity.
*/
@@ -157,7 +157,7 @@ ReceiveSharedInvalidMessages(
*
* If we are idle (catchupInterruptEnabled is set), we can safely
* invoke ProcessCatchupEvent directly. Otherwise, just set a flag
- * to do it later. (Note that it's quite possible for normal processing
+ * to do it later. (Note that it's quite possible for normal processing
* of the current transaction to cause ReceiveSharedInvalidMessages()
* to be run later on; in that case the flag will get cleared again,
* since there's no longer any reason to do anything.)
@@ -233,7 +233,7 @@ HandleCatchupInterrupt(void)
* EnableCatchupInterrupt
*
* This is called by the PostgresMain main loop just before waiting
- * for a frontend command. We process any pending catchup events,
+ * for a frontend command. We process any pending catchup events,
* and enable the signal handler to process future events directly.
*
* NOTE: the signal handler starts out disabled, and stays so until
@@ -278,7 +278,7 @@ EnableCatchupInterrupt(void)
* DisableCatchupInterrupt
*
* This is called by the PostgresMain main loop just after receiving
- * a frontend command. Signal handler execution of catchup events
+ * a frontend command. Signal handler execution of catchup events
* is disabled until the next EnableCatchupInterrupt call.
*
* The PROCSIG_NOTIFY_INTERRUPT signal handler also needs to call this,
diff --git a/src/backend/storage/ipc/sinvaladt.c b/src/backend/storage/ipc/sinvaladt.c
index ec0153e115a..30d7d8f5239 100644
--- a/src/backend/storage/ipc/sinvaladt.c
+++ b/src/backend/storage/ipc/sinvaladt.c
@@ -45,7 +45,7 @@
* In reality, the messages are stored in a circular buffer of MAXNUMMESSAGES
* entries. We translate MsgNum values into circular-buffer indexes by
* computing MsgNum % MAXNUMMESSAGES (this should be fast as long as
- * MAXNUMMESSAGES is a constant and a power of 2). As long as maxMsgNum
+ * MAXNUMMESSAGES is a constant and a power of 2). As long as maxMsgNum
* doesn't exceed minMsgNum by more than MAXNUMMESSAGES, we have enough space
* in the buffer. If the buffer does overflow, we recover by setting the
* "reset" flag for each backend that has fallen too far behind. A backend
@@ -58,7 +58,7 @@
* normal behavior is that at most one such interrupt is in flight at a time;
* when a backend completes processing a catchup interrupt, it executes
* SICleanupQueue, which will signal the next-furthest-behind backend if
- * needed. This avoids undue contention from multiple backends all trying
+ * needed. This avoids undue contention from multiple backends all trying
* to catch up at once. However, the furthest-back backend might be stuck
* in a state where it can't catch up. Eventually it will get reset, so it
* won't cause any more problems for anyone but itself. But we don't want
@@ -89,7 +89,7 @@
* the writer wants to change maxMsgNum while readers need to read it.
* We deal with that by having a spinlock that readers must take for just
* long enough to read maxMsgNum, while writers take it for just long enough
- * to write maxMsgNum. (The exact rule is that you need the spinlock to
+ * to write maxMsgNum. (The exact rule is that you need the spinlock to
* read maxMsgNum if you are not holding SInvalWriteLock, and you need the
* spinlock to write maxMsgNum unless you are holding both locks.)
*
@@ -410,7 +410,7 @@ SIInsertDataEntries(const SharedInvalidationMessage *data, int n)
SISeg *segP = shmInvalBuffer;
/*
- * N can be arbitrarily large. We divide the work into groups of no more
+ * N can be arbitrarily large. We divide the work into groups of no more
* than WRITE_QUANTUM messages, to be sure that we don't hold the lock for
* an unreasonably long time. (This is not so much because we care about
* letting in other writers, as that some just-caught-up backend might be
@@ -433,7 +433,7 @@ SIInsertDataEntries(const SharedInvalidationMessage *data, int n)
* If the buffer is full, we *must* acquire some space. Clean the
* queue and reset anyone who is preventing space from being freed.
* Otherwise, clean the queue only when it's exceeded the next
- * fullness threshold. We have to loop and recheck the buffer state
+ * fullness threshold. We have to loop and recheck the buffer state
* after any call of SICleanupQueue.
*/
for (;;)
@@ -501,11 +501,11 @@ SIInsertDataEntries(const SharedInvalidationMessage *data, int n)
* executing on behalf of other backends, since each instance will modify only
* fields of its own backend's ProcState, and no instance will look at fields
* of other backends' ProcStates. We express this by grabbing SInvalReadLock
- * in shared mode. Note that this is not exactly the normal (read-only)
+ * in shared mode. Note that this is not exactly the normal (read-only)
* interpretation of a shared lock! Look closely at the interactions before
* allowing SInvalReadLock to be grabbed in shared mode for any other reason!
*
- * NB: this can also run in parallel with SIInsertDataEntries. It is not
+ * NB: this can also run in parallel with SIInsertDataEntries. It is not
* guaranteed that we will return any messages added after the routine is
* entered.
*
@@ -525,10 +525,10 @@ SIGetDataEntries(SharedInvalidationMessage *data, int datasize)
/*
* Before starting to take locks, do a quick, unlocked test to see whether
- * there can possibly be anything to read. On a multiprocessor system,
+ * there can possibly be anything to read. On a multiprocessor system,
* it's possible that this load could migrate backwards and occur before
* we actually enter this function, so we might miss a sinval message that
- * was just added by some other processor. But they can't migrate
+ * was just added by some other processor. But they can't migrate
* backwards over a preceding lock acquisition, so it should be OK. If we
* haven't acquired a lock preventing against further relevant
* invalidations, any such occurrence is not much different than if the
@@ -619,7 +619,7 @@ SIGetDataEntries(SharedInvalidationMessage *data, int datasize)
*
* Caution: because we transiently release write lock when we have to signal
* some other backend, it is NOT guaranteed that there are still minFree
- * free message slots at exit. Caller must recheck and perhaps retry.
+ * free message slots at exit. Caller must recheck and perhaps retry.
*/
void
SICleanupQueue(bool callerHasWriteLock, int minFree)
@@ -640,7 +640,7 @@ SICleanupQueue(bool callerHasWriteLock, int minFree)
/*
* Recompute minMsgNum = minimum of all backends' nextMsgNum, identify the
* furthest-back backend that needs signaling (if any), and reset any
- * backends that are too far back. Note that because we ignore sendOnly
+ * backends that are too far back. Note that because we ignore sendOnly
* backends here it is possible for them to keep sending messages without
* a problem even when they are the only active backend.
*/
diff --git a/src/backend/storage/ipc/standby.c b/src/backend/storage/ipc/standby.c
index 3ebbb702aa1..d07393313a1 100644
--- a/src/backend/storage/ipc/standby.c
+++ b/src/backend/storage/ipc/standby.c
@@ -128,7 +128,7 @@ GetStandbyLimitTime(void)
/*
* The cutoff time is the last WAL data receipt time plus the appropriate
- * delay variable. Delay of -1 means wait forever.
+ * delay variable. Delay of -1 means wait forever.
*/
GetXLogReceiptTime(&rtime, &fromStream);
if (fromStream)
@@ -470,7 +470,7 @@ SendRecoveryConflictWithBufferPin(ProcSignalReason reason)
* determine whether an actual deadlock condition is present: the lock we
* need to wait for might be unrelated to any held by the Startup process.
* Sooner or later, this mechanism should get ripped out in favor of somehow
- * accounting for buffer locks in DeadLockCheck(). However, errors here
+ * accounting for buffer locks in DeadLockCheck(). However, errors here
* seem to be very low-probability in practice, so for now it's not worth
* the trouble.
*/
diff --git a/src/backend/storage/large_object/inv_api.c b/src/backend/storage/large_object/inv_api.c
index 0f1dacdc28d..e488528efbe 100644
--- a/src/backend/storage/large_object/inv_api.c
+++ b/src/backend/storage/large_object/inv_api.c
@@ -797,7 +797,7 @@ inv_truncate(LargeObjectDesc *obj_desc, int len)
/*
* If we found the page of the truncation point we need to truncate the
- * data in it. Otherwise if we're in a hole, we need to create a page to
+ * data in it. Otherwise if we're in a hole, we need to create a page to
* mark the end of data.
*/
if (olddata != NULL && olddata->pageno == pageno)
diff --git a/src/backend/storage/lmgr/deadlock.c b/src/backend/storage/lmgr/deadlock.c
index 697ef6e495a..e569fc3eb4e 100644
--- a/src/backend/storage/lmgr/deadlock.c
+++ b/src/backend/storage/lmgr/deadlock.c
@@ -51,7 +51,7 @@ typedef struct
} WAIT_ORDER;
/*
- * Information saved about each edge in a detected deadlock cycle. This
+ * Information saved about each edge in a detected deadlock cycle. This
* is used to print a diagnostic message upon failure.
*
* Note: because we want to examine this info after releasing the lock
@@ -119,7 +119,7 @@ static PGPROC *blocking_autovacuum_proc = NULL;
* InitDeadLockChecking -- initialize deadlock checker during backend startup
*
* This does per-backend initialization of the deadlock checker; primarily,
- * allocation of working memory for DeadLockCheck. We do this per-backend
+ * allocation of working memory for DeadLockCheck. We do this per-backend
* since there's no percentage in making the kernel do copy-on-write
* inheritance of workspace from the postmaster. We want to allocate the
* space at startup because (a) the deadlock checker might be invoked when
@@ -291,10 +291,10 @@ GetBlockingAutoVacuumPgproc(void)
* DeadLockCheckRecurse -- recursively search for valid orderings
*
* curConstraints[] holds the current set of constraints being considered
- * by an outer level of recursion. Add to this each possible solution
+ * by an outer level of recursion. Add to this each possible solution
* constraint for any cycle detected at this level.
*
- * Returns TRUE if no solution exists. Returns FALSE if a deadlock-free
+ * Returns TRUE if no solution exists. Returns FALSE if a deadlock-free
* state is attainable, in which case waitOrders[] shows the required
* rearrangements of lock wait queues (if any).
*/
@@ -429,7 +429,7 @@ TestConfiguration(PGPROC *startProc)
*
* Since we need to be able to check hypothetical configurations that would
* exist after wait queue rearrangement, the routine pays attention to the
- * table of hypothetical queue orders in waitOrders[]. These orders will
+ * table of hypothetical queue orders in waitOrders[]. These orders will
* be believed in preference to the actual ordering seen in the locktable.
*/
static bool
@@ -506,7 +506,7 @@ FindLockCycleRecurse(PGPROC *checkProc,
conflictMask = lockMethodTable->conflictTab[checkProc->waitLockMode];
/*
- * Scan for procs that already hold conflicting locks. These are "hard"
+ * Scan for procs that already hold conflicting locks. These are "hard"
* edges in the waits-for graph.
*/
procLocks = &(lock->procLocks);
@@ -705,7 +705,7 @@ ExpandConstraints(EDGE *constraints,
nWaitOrders = 0;
/*
- * Scan constraint list backwards. This is because the last-added
+ * Scan constraint list backwards. This is because the last-added
* constraint is the only one that could fail, and so we want to test it
* for inconsistency first.
*/
@@ -759,7 +759,7 @@ ExpandConstraints(EDGE *constraints,
* The initial queue ordering is taken directly from the lock's wait queue.
* The output is an array of PGPROC pointers, of length equal to the lock's
* wait queue length (the caller is responsible for providing this space).
- * The partial order is specified by an array of EDGE structs. Each EDGE
+ * The partial order is specified by an array of EDGE structs. Each EDGE
* is one that we need to reverse, therefore the "waiter" must appear before
* the "blocker" in the output array. The EDGE array may well contain
* edges associated with other locks; these should be ignored.
@@ -829,7 +829,7 @@ TopoSort(LOCK *lock,
afterConstraints[k] = i + 1;
}
/*--------------------
- * Now scan the topoProcs array backwards. At each step, output the
+ * Now scan the topoProcs array backwards. At each step, output the
* last proc that has no remaining before-constraints, and decrease
* the beforeConstraints count of each of the procs it was constrained
* against.
diff --git a/src/backend/storage/lmgr/lmgr.c b/src/backend/storage/lmgr/lmgr.c
index e1fa74f9601..06f148ef298 100644
--- a/src/backend/storage/lmgr/lmgr.c
+++ b/src/backend/storage/lmgr/lmgr.c
@@ -65,7 +65,7 @@ SetLocktagRelationOid(LOCKTAG *tag, Oid relid)
/*
* LockRelationOid
*
- * Lock a relation given only its OID. This should generally be used
+ * Lock a relation given only its OID. This should generally be used
* before attempting to open the relation's relcache entry.
*/
void
@@ -253,7 +253,7 @@ LockHasWaitersRelation(Relation relation, LOCKMODE lockmode)
/*
* LockRelationIdForSession
*
- * This routine grabs a session-level lock on the target relation. The
+ * This routine grabs a session-level lock on the target relation. The
* session lock persists across transaction boundaries. It will be removed
* when UnlockRelationIdForSession() is called, or if an ereport(ERROR) occurs,
* or if the backend exits.
@@ -456,7 +456,7 @@ XactLockTableInsert(TransactionId xid)
*
* Delete the lock showing that the given transaction ID is running.
* (This is never used for main transaction IDs; those locks are only
- * released implicitly at transaction end. But we do use it for subtrans IDs.)
+ * released implicitly at transaction end. But we do use it for subtrans IDs.)
*/
void
XactLockTableDelete(TransactionId xid)
@@ -477,7 +477,7 @@ XactLockTableDelete(TransactionId xid)
* subtransaction, we will exit as soon as it aborts or its top parent commits.
* It takes some extra work to ensure this, because to save on shared memory
* the XID lock of a subtransaction is released when it ends, whether
- * successfully or unsuccessfully. So we have to check if it's "still running"
+ * successfully or unsuccessfully. So we have to check if it's "still running"
* and if so wait for its parent.
*/
void
diff --git a/src/backend/storage/lmgr/lock.c b/src/backend/storage/lmgr/lock.c
index 15fbe86107d..e62ba62a0f1 100644
--- a/src/backend/storage/lmgr/lock.c
+++ b/src/backend/storage/lmgr/lock.c
@@ -186,7 +186,7 @@ static int FastPathLocalUseCount = 0;
/*
* The fast-path lock mechanism is concerned only with relation locks on
- * unshared relations by backends bound to a database. The fast-path
+ * unshared relations by backends bound to a database. The fast-path
* mechanism exists mostly to accelerate acquisition and release of locks
* that rarely conflict. Because ShareUpdateExclusiveLock is
* self-conflicting, it can't use the fast-path mechanism; but it also does
@@ -899,7 +899,7 @@ LockAcquireExtended(const LOCKTAG *locktag,
/*
* If lock requested conflicts with locks requested by waiters, must join
- * wait queue. Otherwise, check for conflict with already-held locks.
+ * wait queue. Otherwise, check for conflict with already-held locks.
* (That's last because most complex check.)
*/
if (lockMethodTable->conflictTab[lockmode] & lock->waitMask)
@@ -980,7 +980,7 @@ LockAcquireExtended(const LOCKTAG *locktag,
/*
* NOTE: do not do any material change of state between here and
- * return. All required changes in locktable state must have been
+ * return. All required changes in locktable state must have been
* done when the lock was granted to us --- see notes in WaitOnLock.
*/
@@ -1017,7 +1017,7 @@ LockAcquireExtended(const LOCKTAG *locktag,
{
/*
* Decode the locktag back to the original values, to avoid sending
- * lots of empty bytes with every message. See lock.h to check how a
+ * lots of empty bytes with every message. See lock.h to check how a
* locktag is defined for LOCKTAG_RELATION
*/
LogAccessExclusiveLock(locktag->locktag_field1,
@@ -1266,7 +1266,7 @@ LockCheckConflicts(LockMethod lockMethodTable,
}
/*
- * Rats. Something conflicts. But it could still be my own lock. We have
+ * Rats. Something conflicts. But it could still be my own lock. We have
* to construct a conflict mask that does not reflect our own locks, but
* only lock types held by other processes.
*/
@@ -1358,7 +1358,7 @@ UnGrantLock(LOCK *lock, LOCKMODE lockmode,
/*
* We need only run ProcLockWakeup if the released lock conflicts with at
- * least one of the lock types requested by waiter(s). Otherwise whatever
+ * least one of the lock types requested by waiter(s). Otherwise whatever
* conflict made them wait must still exist. NOTE: before MVCC, we could
* skip wakeup if lock->granted[lockmode] was still positive. But that's
* not true anymore, because the remaining granted locks might belong to
@@ -1378,7 +1378,7 @@ UnGrantLock(LOCK *lock, LOCKMODE lockmode,
}
/*
- * CleanUpLock -- clean up after releasing a lock. We garbage-collect the
+ * CleanUpLock -- clean up after releasing a lock. We garbage-collect the
* proclock and lock objects if possible, and call ProcLockWakeup if there
* are remaining requests and the caller says it's OK. (Normally, this
* should be called after UnGrantLock, and wakeupNeeded is the result from
@@ -1796,7 +1796,7 @@ LockRelease(const LOCKTAG *locktag, LOCKMODE lockmode, bool sessionLock)
}
/*
- * Decrease the total local count. If we're still holding the lock, we're
+ * Decrease the total local count. If we're still holding the lock, we're
* done.
*/
locallock->nLocks--;
@@ -1928,7 +1928,7 @@ LockReleaseAll(LOCKMETHODID lockmethodid, bool allLocks)
#endif
/*
- * Get rid of our fast-path VXID lock, if appropriate. Note that this is
+ * Get rid of our fast-path VXID lock, if appropriate. Note that this is
* the only way that the lock we hold on our own VXID can ever get
* released: it is always and only released when a toplevel transaction
* ends.
@@ -2014,7 +2014,7 @@ LockReleaseAll(LOCKMETHODID lockmethodid, bool allLocks)
* fast-path data structures, we must acquire it before attempting
* to release the lock via the fast-path. We will continue to
* hold the LWLock until we're done scanning the locallock table,
- * unless we hit a transferred fast-path lock. (XXX is this
+ * unless we hit a transferred fast-path lock. (XXX is this
* really such a good idea? There could be a lot of entries ...)
*/
if (!have_fast_path_lwlock)
@@ -2033,7 +2033,7 @@ LockReleaseAll(LOCKMETHODID lockmethodid, bool allLocks)
/*
* Our lock, originally taken via the fast path, has been
- * transferred to the main lock table. That's going to require
+ * transferred to the main lock table. That's going to require
* some extra work, so release our fast-path lock before starting.
*/
LWLockRelease(MyProc->backendLock);
@@ -2042,7 +2042,7 @@ LockReleaseAll(LOCKMETHODID lockmethodid, bool allLocks)
/*
* Now dump the lock. We haven't got a pointer to the LOCK or
* PROCLOCK in this case, so we have to handle this a bit
- * differently than a normal lock release. Unfortunately, this
+ * differently than a normal lock release. Unfortunately, this
* requires an extra LWLock acquire-and-release cycle on the
* partitionLock, but hopefully it shouldn't happen often.
*/
@@ -2434,9 +2434,9 @@ FastPathTransferRelationLocks(LockMethod lockMethodTable, const LOCKTAG *locktag
* acquiring proc->backendLock. In particular, it's certainly safe to
* assume that if the target backend holds any fast-path locks, it
* must have performed a memory-fencing operation (in particular, an
- * LWLock acquisition) since setting proc->databaseId. However, it's
+ * LWLock acquisition) since setting proc->databaseId. However, it's
* less clear that our backend is certain to have performed a memory
- * fencing operation since the other backend set proc->databaseId. So
+ * fencing operation since the other backend set proc->databaseId. So
* for now, we test it after acquiring the LWLock just to be safe.
*/
if (proc->databaseId != locktag->locktag_field1)
@@ -2950,7 +2950,7 @@ AtPrepare_Locks(void)
continue;
/*
- * If we have both session- and transaction-level locks, fail. This
+ * If we have both session- and transaction-level locks, fail. This
* should never happen with regular locks, since we only take those at
* session level in some special operations like VACUUM. It's
* possible to hit this with advisory locks, though.
@@ -2959,7 +2959,7 @@ AtPrepare_Locks(void)
* the transactional hold to the prepared xact. However, that would
* require two PROCLOCK objects, and we cannot be sure that another
* PROCLOCK will be available when it comes time for PostPrepare_Locks
- * to do the deed. So for now, we error out while we can still do so
+ * to do the deed. So for now, we error out while we can still do so
* safely.
*/
if (haveSessionLock)
@@ -3152,7 +3152,7 @@ PostPrepare_Locks(TransactionId xid)
/*
* We cannot simply modify proclock->tag.myProc to reassign
* ownership of the lock, because that's part of the hash key and
- * the proclock would then be in the wrong hash chain. So, unlink
+ * the proclock would then be in the wrong hash chain. So, unlink
* and delete the old proclock; create a new one with the right
* contents; and link it into place. We do it in this order to be
* certain we won't run out of shared memory (the way dynahash.c
@@ -3272,7 +3272,7 @@ GetLockStatusData(void)
/*
* First, we iterate through the per-backend fast-path arrays, locking
- * them one at a time. This might produce an inconsistent picture of the
+ * them one at a time. This might produce an inconsistent picture of the
* system state, but taking all of those LWLocks at the same time seems
* impractical (in particular, note MAX_SIMUL_LWLOCKS). It shouldn't
* matter too much, because none of these locks can be involved in lock
@@ -3351,7 +3351,7 @@ GetLockStatusData(void)
* will be self-consistent.
*
* Since this is a read-only operation, we take shared instead of
- * exclusive lock. There's not a whole lot of point to this, because all
+ * exclusive lock. There's not a whole lot of point to this, because all
* the normal operations require exclusive lock, but it doesn't hurt
* anything either. It will at least allow two backends to do
* GetLockStatusData in parallel.
@@ -3870,7 +3870,7 @@ lock_twophase_postabort(TransactionId xid, uint16 info,
* as MyProc->lxid, you might wonder if we really need both. The
* difference is that MyProc->lxid is set and cleared unlocked, and
* examined by procarray.c, while fpLocalTransactionId is protected by
- * backendLock and is used only by the locking subsystem. Doing it this
+ * backendLock and is used only by the locking subsystem. Doing it this
* way makes it easier to verify that there are no funny race conditions.
*
* We don't bother recording this lock in the local lock table, since it's
diff --git a/src/backend/storage/lmgr/lwlock.c b/src/backend/storage/lmgr/lwlock.c
index 95d4b37bef3..b6d2221f86f 100644
--- a/src/backend/storage/lmgr/lwlock.c
+++ b/src/backend/storage/lmgr/lwlock.c
@@ -6,7 +6,7 @@
* Lightweight locks are intended primarily to provide mutual exclusion of
* access to shared-memory data structures. Therefore, they offer both
* exclusive and shared lock modes (to support read/write and read-only
- * access to a shared object). There are few other frammishes. User-level
+ * access to a shared object). There are few other frammishes. User-level
* locking should be done with the full lock manager --- which depends on
* LWLocks to protect its shared state.
*
@@ -53,7 +53,7 @@ typedef struct LWLock
* (LWLockIds are indexes into the array.) We force the array stride to
* be a power of 2, which saves a few cycles in indexing, but more
* importantly also ensures that individual LWLocks don't cross cache line
- * boundaries. This reduces cache contention problems, especially on AMD
+ * boundaries. This reduces cache contention problems, especially on AMD
* Opterons. (Of course, we have to also ensure that the array start
* address is suitably aligned.)
*
@@ -219,7 +219,7 @@ NumLWLocks(void)
* a loadable module.
*
* This is only useful if called from the _PG_init hook of a library that
- * is loaded into the postmaster via shared_preload_libraries. Once
+ * is loaded into the postmaster via shared_preload_libraries. Once
* shared memory has been allocated, calls will be ignored. (We could
* raise an error, but it seems better to make it a no-op, so that
* libraries containing such calls can be reloaded if needed.)
@@ -388,7 +388,7 @@ LWLockAcquire(LWLockId lockid, LWLockMode mode)
* in the presence of contention. The efficiency of being able to do that
* outweighs the inefficiency of sometimes wasting a process dispatch
* cycle because the lock is not free when a released waiter finally gets
- * to run. See pgsql-hackers archives for 29-Dec-01.
+ * to run. See pgsql-hackers archives for 29-Dec-01.
*/
for (;;)
{
@@ -574,7 +574,7 @@ LWLockConditionalAcquire(LWLockId lockid, LWLockMode mode)
/*
* LWLockAcquireOrWait - Acquire lock, or wait until it's free
*
- * The semantics of this function are a bit funky. If the lock is currently
+ * The semantics of this function are a bit funky. If the lock is currently
* free, it is acquired in the given mode, and the function returns true. If
* the lock isn't immediately free, the function waits until it is released
* and returns false, but does not acquire the lock.
diff --git a/src/backend/storage/lmgr/predicate.c b/src/backend/storage/lmgr/predicate.c
index 4e8f7cf81a7..84260f68022 100644
--- a/src/backend/storage/lmgr/predicate.c
+++ b/src/backend/storage/lmgr/predicate.c
@@ -32,11 +32,11 @@
* examining the MVCC data.)
*
* (1) Besides tuples actually read, they must cover ranges of tuples
- * which would have been read based on the predicate. This will
+ * which would have been read based on the predicate. This will
* require modelling the predicates through locks against database
* objects such as pages, index ranges, or entire tables.
*
- * (2) They must be kept in RAM for quick access. Because of this, it
+ * (2) They must be kept in RAM for quick access. Because of this, it
* isn't possible to always maintain tuple-level granularity -- when
* the space allocated to store these approaches exhaustion, a
* request for a lock may need to scan for situations where a single
@@ -49,7 +49,7 @@
*
* (4) While they are associated with a transaction, they must survive
* a successful COMMIT of that transaction, and remain until all
- * overlapping transactions complete. This even means that they
+ * overlapping transactions complete. This even means that they
* must survive termination of the transaction's process. If a
* top level transaction is rolled back, however, it is immediately
* flagged so that it can be ignored, and its SIREAD locks can be
@@ -90,7 +90,7 @@
* may yet matter because they overlap still-active transactions.
*
* SerializablePredicateLockListLock
- * - Protects the linked list of locks held by a transaction. Note
+ * - Protects the linked list of locks held by a transaction. Note
* that the locks themselves are also covered by the partition
* locks of their respective lock targets; this lock only affects
* the linked list connecting the locks related to a transaction.
@@ -101,11 +101,11 @@
* - It is relatively infrequent that another process needs to
* modify the list for a transaction, but it does happen for such
* things as index page splits for pages with predicate locks and
- * freeing of predicate locked pages by a vacuum process. When
+ * freeing of predicate locked pages by a vacuum process. When
* removing a lock in such cases, the lock itself contains the
* pointers needed to remove it from the list. When adding a
* lock in such cases, the lock can be added using the anchor in
- * the transaction structure. Neither requires walking the list.
+ * the transaction structure. Neither requires walking the list.
* - Cleaning up the list for a terminated transaction is sometimes
* not done on a retail basis, in which case no lock is required.
* - Due to the above, a process accessing its active transaction's
@@ -350,7 +350,7 @@ int max_predicate_locks_per_xact; /* set by guc.c */
/*
* This provides a list of objects in order to track transactions
- * participating in predicate locking. Entries in the list are fixed size,
+ * participating in predicate locking. Entries in the list are fixed size,
* and reside in shared memory. The memory address of an entry must remain
* fixed during its lifetime. The list will be protected from concurrent
* update externally; no provision is made in this code to manage that. The
@@ -541,7 +541,7 @@ SerializationNeededForWrite(Relation relation)
/*
* These functions are a simple implementation of a list for this specific
- * type of struct. If there is ever a generalized shared memory list, we
+ * type of struct. If there is ever a generalized shared memory list, we
* should probably switch to that.
*/
static SERIALIZABLEXACT *
@@ -761,7 +761,7 @@ OldSerXidPagePrecedesLogically(int p, int q)
int diff;
/*
- * We have to compare modulo (OLDSERXID_MAX_PAGE+1)/2. Both inputs should
+ * We have to compare modulo (OLDSERXID_MAX_PAGE+1)/2. Both inputs should
* be in the range 0..OLDSERXID_MAX_PAGE.
*/
Assert(p >= 0 && p <= OLDSERXID_MAX_PAGE);
@@ -923,7 +923,7 @@ OldSerXidAdd(TransactionId xid, SerCommitSeqNo minConflictCommitSeqNo)
}
/*
- * Get the minimum commitSeqNo for any conflict out for the given xid. For
+ * Get the minimum commitSeqNo for any conflict out for the given xid. For
* a transaction which exists but has no conflict out, InvalidSerCommitSeqNo
* will be returned.
*/
@@ -976,7 +976,7 @@ OldSerXidSetActiveSerXmin(TransactionId xid)
/*
* When no sxacts are active, nothing overlaps, set the xid values to
* invalid to show that there are no valid entries. Don't clear headPage,
- * though. A new xmin might still land on that page, and we don't want to
+ * though. A new xmin might still land on that page, and we don't want to
* repeatedly zero out the same page.
*/
if (!TransactionIdIsValid(xid))
@@ -1461,7 +1461,7 @@ SummarizeOldestCommittedSxact(void)
/*
* Grab the first sxact off the finished list -- this will be the earliest
- * commit. Remove it from the list.
+ * commit. Remove it from the list.
*/
sxact = (SERIALIZABLEXACT *)
SHMQueueNext(FinishedSerializableTransactions,
@@ -1614,7 +1614,7 @@ SetSerializableTransactionSnapshot(Snapshot snapshot,
/*
* We do not allow SERIALIZABLE READ ONLY DEFERRABLE transactions to
* import snapshots, since there's no way to wait for a safe snapshot when
- * we're using the snap we're told to. (XXX instead of throwing an error,
+ * we're using the snap we're told to. (XXX instead of throwing an error,
* we could just ignore the XactDeferrable flag?)
*/
if (XactReadOnly && XactDeferrable)
@@ -1663,7 +1663,7 @@ GetSerializableTransactionSnapshotInt(Snapshot snapshot,
* release SerializableXactHashLock to call SummarizeOldestCommittedSxact,
* this means we have to create the sxact first, which is a bit annoying
* (in particular, an elog(ERROR) in procarray.c would cause us to leak
- * the sxact). Consider refactoring to avoid this.
+ * the sxact). Consider refactoring to avoid this.
*/
#ifdef TEST_OLDSERXID
SummarizeOldestCommittedSxact();
@@ -2045,7 +2045,7 @@ RemoveTargetIfNoLongerUsed(PREDICATELOCKTARGET *target, uint32 targettaghash)
/*
* Delete child target locks owned by this process.
* This implementation is assuming that the usage of each target tag field
- * is uniform. No need to make this hard if we don't have to.
+ * is uniform. No need to make this hard if we don't have to.
*
* We aren't acquiring lightweight locks for the predicate lock or lock
* target structures associated with this transaction unless we're going
@@ -2491,7 +2491,7 @@ PredicateLockTuple(Relation relation, HeapTuple tuple, Snapshot snapshot)
}
/*
- * Do quick-but-not-definitive test for a relation lock first. This will
+ * Do quick-but-not-definitive test for a relation lock first. This will
* never cause a return when the relation is *not* locked, but will
* occasionally let the check continue when there really *is* a relation
* level lock.
@@ -2803,7 +2803,7 @@ exit:
* transaction which is not serializable.
*
* NOTE: This is currently only called with transfer set to true, but that may
- * change. If we decide to clean up the locks from a table on commit of a
+ * change. If we decide to clean up the locks from a table on commit of a
* transaction which executed DROP TABLE, the false condition will be useful.
*/
static void
@@ -2884,7 +2884,7 @@ DropAllPredicateLocksFromTable(Relation relation, bool transfer)
continue; /* already the right lock */
/*
- * If we made it here, we have work to do. We make sure the heap
+ * If we made it here, we have work to do. We make sure the heap
* relation lock exists, then we walk the list of predicate locks for
* the old target we found, moving all locks to the heap relation lock
* -- unless they already hold that.
@@ -3229,7 +3229,7 @@ ReleasePredicateLocks(bool isCommit)
* If this value is changing, we don't care that much whether we get the
* old or new value -- it is just used to determine how far
* GlobalSerizableXmin must advance before this transaction can be fully
- * cleaned up. The worst that could happen is we wait for one more
+ * cleaned up. The worst that could happen is we wait for one more
* transaction to complete before freeing some RAM; correctness of visible
* behavior is not affected.
*/
@@ -3332,7 +3332,7 @@ ReleasePredicateLocks(bool isCommit)
}
/*
- * Release all outConflicts to committed transactions. If we're rolling
+ * Release all outConflicts to committed transactions. If we're rolling
* back clear them all. Set SXACT_FLAG_CONFLICT_OUT if any point to
* previously committed transactions.
*/
@@ -3651,7 +3651,7 @@ ClearOldPredicateLocks(void)
* matter -- but keep the transaction entry itself and any outConflicts.
*
* When the summarize flag is set, we've run short of room for sxact data
- * and must summarize to the SLRU. Predicate locks are transferred to a
+ * and must summarize to the SLRU. Predicate locks are transferred to a
* dummy "old" transaction, with duplicate locks on a single target
* collapsing to a single lock with the "latest" commitSeqNo from among
* the conflicting locks..
@@ -3844,7 +3844,7 @@ XidIsConcurrent(TransactionId xid)
/*
* CheckForSerializableConflictOut
* We are reading a tuple which has been modified. If it is visible to
- * us but has been deleted, that indicates a rw-conflict out. If it's
+ * us but has been deleted, that indicates a rw-conflict out. If it's
* not visible and was created by a concurrent (overlapping)
* serializable transaction, that is also a rw-conflict out,
*
@@ -3931,7 +3931,7 @@ CheckForSerializableConflictOut(bool visible, Relation relation,
Assert(TransactionIdFollowsOrEquals(xid, TransactionXmin));
/*
- * Find top level xid. Bail out if xid is too early to be a conflict, or
+ * Find top level xid. Bail out if xid is too early to be a conflict, or
* if it's our own xid.
*/
if (TransactionIdEquals(xid, GetTopTransactionIdIfAny()))
@@ -3996,7 +3996,7 @@ CheckForSerializableConflictOut(bool visible, Relation relation,
/*
* We have a conflict out to a transaction which has a conflict out to a
- * summarized transaction. That summarized transaction must have
+ * summarized transaction. That summarized transaction must have
* committed first, and we can't tell when it committed in relation to our
* snapshot acquisition, so something needs to be canceled.
*/
@@ -4030,7 +4030,7 @@ CheckForSerializableConflictOut(bool visible, Relation relation,
&& (!SxactHasConflictOut(sxact)
|| MySerializableXact->SeqNo.lastCommitBeforeSnapshot < sxact->SeqNo.earliestOutConflictCommit))
{
- /* Read-only transaction will appear to run first. No conflict. */
+ /* Read-only transaction will appear to run first. No conflict. */
LWLockRelease(SerializableXactHashLock);
return;
}
@@ -4621,7 +4621,7 @@ OnConflict_CheckForSerializationFailure(const SERIALIZABLEXACT *reader,
*
* If a dangerous structure is found, the pivot (the near conflict) is
* marked for death, because rolling back another transaction might mean
- * that we flail without ever making progress. This transaction is
+ * that we flail without ever making progress. This transaction is
* committing writes, so letting it commit ensures progress. If we
* canceled the far conflict, it might immediately fail again on retry.
*/
diff --git a/src/backend/storage/lmgr/proc.c b/src/backend/storage/lmgr/proc.c
index c27c48fc91f..6045bf2b79e 100644
--- a/src/backend/storage/lmgr/proc.c
+++ b/src/backend/storage/lmgr/proc.c
@@ -236,10 +236,10 @@ InitProcGlobal(void)
/*
* Newly created PGPROCs for normal backends or for autovacuum must be
- * queued up on the appropriate free list. Because there can only
+ * queued up on the appropriate free list. Because there can only
* ever be a small, fixed number of auxiliary processes, no free list
* is used in that case; InitAuxiliaryProcess() instead uses a linear
- * search. PGPROCs for prepared transactions are added to a free list
+ * search. PGPROCs for prepared transactions are added to a free list
* by TwoPhaseShmemInit().
*/
if (i < MaxConnections)
@@ -398,7 +398,7 @@ InitProcess(void)
/*
* We might be reusing a semaphore that belonged to a failed process. So
- * be careful and reinitialize its value here. (This is not strictly
+ * be careful and reinitialize its value here. (This is not strictly
* necessary anymore, but seems like a good idea for cleanliness.)
*/
PGSemaphoreReset(&MyProc->sem);
@@ -448,7 +448,7 @@ InitProcessPhase2(void)
*
* Auxiliary processes are presently not expected to wait for real (lockmgr)
* locks, so we need not set up the deadlock checker. They are never added
- * to the ProcArray or the sinval messaging mechanism, either. They also
+ * to the ProcArray or the sinval messaging mechanism, either. They also
* don't get a VXID assigned, since this is only useful when we actually
* hold lockmgr locks.
*
@@ -555,7 +555,7 @@ InitAuxiliaryProcess(void)
/*
* We might be reusing a semaphore that belonged to a failed process. So
- * be careful and reinitialize its value here. (This is not strictly
+ * be careful and reinitialize its value here. (This is not strictly
* necessary anymore, but seems like a good idea for cleanliness.)
*/
PGSemaphoreReset(&MyProc->sem);
@@ -701,7 +701,7 @@ LockErrorCleanup(void)
/*
* We used to do PGSemaphoreReset() here to ensure that our proc's wait
- * semaphore is reset to zero. This prevented a leftover wakeup signal
+ * semaphore is reset to zero. This prevented a leftover wakeup signal
* from remaining in the semaphore if someone else had granted us the lock
* we wanted before we were able to remove ourselves from the wait-list.
* However, now that ProcSleep loops until waitStatus changes, a leftover
@@ -828,7 +828,7 @@ ProcKill(int code, Datum arg)
/*
* AuxiliaryProcKill() -- Cut-down version of ProcKill for auxiliary
- * processes (bgwriter, etc). The PGPROC and sema are not released, only
+ * processes (bgwriter, etc). The PGPROC and sema are not released, only
* marked as not-in-use.
*/
static void
@@ -954,7 +954,7 @@ ProcSleep(LOCALLOCK *locallock, LockMethod lockMethodTable)
*
* Special case: if I find I should go in front of some waiter, check to
* see if I conflict with already-held locks or the requests before that
- * waiter. If not, then just grant myself the requested lock immediately.
+ * waiter. If not, then just grant myself the requested lock immediately.
* This is the same as the test for immediate grant in LockAcquire, except
* we are only considering the part of the wait queue before my insertion
* point.
@@ -973,7 +973,7 @@ ProcSleep(LOCALLOCK *locallock, LockMethod lockMethodTable)
if (lockMethodTable->conflictTab[lockmode] & proc->heldLocks)
{
/*
- * Yes, so we have a deadlock. Easiest way to clean up
+ * Yes, so we have a deadlock. Easiest way to clean up
* correctly is to call RemoveFromWaitQueue(), but we
* can't do that until we are *on* the wait queue. So, set
* a flag to check below, and break out of loop. Also,
@@ -1080,8 +1080,8 @@ ProcSleep(LOCALLOCK *locallock, LockMethod lockMethodTable)
/*
* If someone wakes us between LWLockRelease and PGSemaphoreLock,
- * PGSemaphoreLock will not block. The wakeup is "saved" by the semaphore
- * implementation. While this is normally good, there are cases where a
+ * PGSemaphoreLock will not block. The wakeup is "saved" by the semaphore
+ * implementation. While this is normally good, there are cases where a
* saved wakeup might be leftover from a previous operation (for example,
* we aborted ProcWaitForSignal just before someone did ProcSendSignal).
* So, loop to wait again if the waitStatus shows we haven't been granted
@@ -1101,7 +1101,7 @@ ProcSleep(LOCALLOCK *locallock, LockMethod lockMethodTable)
/*
* waitStatus could change from STATUS_WAITING to something else
- * asynchronously. Read it just once per loop to prevent surprising
+ * asynchronously. Read it just once per loop to prevent surprising
* behavior (such as missing log messages).
*/
myWaitStatus = MyProc->waitStatus;
@@ -1496,10 +1496,10 @@ check_done:
* This can share the semaphore normally used for waiting for locks,
* since a backend could never be waiting for a lock and a signal at
* the same time. As with locks, it's OK if the signal arrives just
- * before we actually reach the waiting state. Also as with locks,
+ * before we actually reach the waiting state. Also as with locks,
* it's necessary that the caller be robust against bogus wakeups:
* always check that the desired state has occurred, and wait again
- * if not. This copes with possible "leftover" wakeups.
+ * if not. This copes with possible "leftover" wakeups.
*/
void
ProcWaitForSignal(void)
@@ -1553,7 +1553,7 @@ ProcSendSignal(int pid)
/*
* Enable the SIGALRM interrupt to fire after the specified delay
*
- * Delay is given in milliseconds. Caller should be sure a SIGALRM
+ * Delay is given in milliseconds. Caller should be sure a SIGALRM
* signal handler is installed before this is called.
*
* This code properly handles nesting of deadlock timeout alarms within
@@ -1604,7 +1604,7 @@ enable_sig_alarm(int delayms, bool is_statement_timeout)
* NOTE: in this case it is possible that this routine will be
* interrupted by the previously-set timer alarm. This is okay
* because the signal handler will do only what it should do according
- * to the state variables. The deadlock checker may get run earlier
+ * to the state variables. The deadlock checker may get run earlier
* than normal, but that does no harm.
*/
timeout_start_time = GetCurrentTimestamp();
diff --git a/src/backend/storage/lmgr/s_lock.c b/src/backend/storage/lmgr/s_lock.c
index bc8d89f8c17..7107aaeae8a 100644
--- a/src/backend/storage/lmgr/s_lock.c
+++ b/src/backend/storage/lmgr/s_lock.c
@@ -78,7 +78,7 @@ s_lock(volatile slock_t *lock, const char *file, int line)
*
* We time out and declare error after NUM_DELAYS delays (thus, exactly
* that many tries). With the given settings, this will usually take 2 or
- * so minutes. It seems better to fix the total number of tries (and thus
+ * so minutes. It seems better to fix the total number of tries (and thus
* the probability of unintended failure) than to fix the total time
* spent.
*
@@ -141,7 +141,7 @@ s_lock(volatile slock_t *lock, const char *file, int line)
* Note: spins_per_delay is local within our current process. We want to
* average these observations across multiple backends, since it's
* relatively rare for this function to even get entered, and so a single
- * backend might not live long enough to converge on a good value. That
+ * backend might not live long enough to converge on a good value. That
* is handled by the two routines below.
*/
if (cur_delay == 0)
@@ -180,7 +180,7 @@ update_spins_per_delay(int shared_spins_per_delay)
/*
* We use an exponential moving average with a relatively slow adaption
* rate, so that noise in any one backend's result won't affect the shared
- * value too much. As long as both inputs are within the allowed range,
+ * value too much. As long as both inputs are within the allowed range,
* the result must be too, so we need not worry about clamping the result.
*
* We deliberately truncate rather than rounding; this is so that single
diff --git a/src/backend/storage/lmgr/spin.c b/src/backend/storage/lmgr/spin.c
index 56442ed073e..479e71225f9 100644
--- a/src/backend/storage/lmgr/spin.c
+++ b/src/backend/storage/lmgr/spin.c
@@ -5,7 +5,7 @@
*
*
* For machines that have test-and-set (TAS) instructions, s_lock.h/.c
- * define the spinlock implementation. This file contains only a stub
+ * define the spinlock implementation. This file contains only a stub
* implementation for spinlocks using PGSemaphores. Unless semaphores
* are implemented in a way that doesn't involve a kernel call, this
* is too slow to be very useful :-(
diff --git a/src/backend/storage/page/bufpage.c b/src/backend/storage/page/bufpage.c
index 4d7e46a3b2c..eb09722e8c2 100644
--- a/src/backend/storage/page/bufpage.c
+++ b/src/backend/storage/page/bufpage.c
@@ -53,7 +53,7 @@ PageInit(Page page, Size pageSize, Size specialSize)
* PageHeaderIsValid
* Check that the header fields of a page appear valid.
*
- * This is called when a page has just been read in from disk. The idea is
+ * This is called when a page has just been read in from disk. The idea is
* to cheaply detect trashed pages before we go nuts following bogus item
* pointers, testing invalid transaction identifiers, etc.
*
@@ -98,7 +98,7 @@ PageHeaderIsValid(PageHeader page)
/*
* PageAddItem
*
- * Add an item to a page. Return value is offset at which it was
+ * Add an item to a page. Return value is offset at which it was
* inserted, or InvalidOffsetNumber if there's not room to insert.
*
* If overwrite is true, we just store the item at the specified
@@ -698,7 +698,7 @@ PageIndexTupleDelete(Page page, OffsetNumber offnum)
* PageIndexMultiDelete
*
* This routine handles the case of deleting multiple tuples from an
- * index page at once. It is considerably faster than a loop around
+ * index page at once. It is considerably faster than a loop around
* PageIndexTupleDelete ... however, the caller *must* supply the array
* of item numbers to be deleted in item number order!
*/
diff --git a/src/backend/storage/smgr/md.c b/src/backend/storage/smgr/md.c
index 3f4ab49dab4..3005a22b2d8 100644
--- a/src/backend/storage/smgr/md.c
+++ b/src/backend/storage/smgr/md.c
@@ -79,7 +79,7 @@
* not needed because of an mdtruncate() operation. The reason for leaving
* them present at size zero, rather than unlinking them, is that other
* backends and/or the checkpointer might be holding open file references to
- * such segments. If the relation expands again after mdtruncate(), such
+ * such segments. If the relation expands again after mdtruncate(), such
* that a deactivated segment becomes active again, it is important that
* such file references still be valid --- else data might get written
* out to an unlinked old copy of a segment file that will eventually
@@ -116,7 +116,7 @@ static MemoryContext MdCxt; /* context for all md.c allocations */
* we keep track of pending fsync operations: we need to remember all relation
* segments that have been written since the last checkpoint, so that we can
* fsync them down to disk before completing the next checkpoint. This hash
- * table remembers the pending operations. We use a hash table mostly as
+ * table remembers the pending operations. We use a hash table mostly as
* a convenient way of merging duplicate requests.
*
* We use a similar mechanism to remember no-longer-needed files that can
@@ -284,7 +284,7 @@ mdcreate(SMgrRelation reln, ForkNumber forkNum, bool isRedo)
* During bootstrap, there are cases where a system relation will be
* accessed (by internal backend processes) before the bootstrap
* script nominally creates it. Therefore, allow the file to exist
- * already, even if isRedo is not set. (See also mdopen)
+ * already, even if isRedo is not set. (See also mdopen)
*/
if (isRedo || IsBootstrapProcessingMode())
fd = PathNameOpenFile(path, O_RDWR | PG_BINARY, 0600);
@@ -329,7 +329,7 @@ mdcreate(SMgrRelation reln, ForkNumber forkNum, bool isRedo)
* if the contents of the file were repopulated by subsequent WAL entries.
* But if we didn't WAL-log insertions, but instead relied on fsyncing the
* file after populating it (as for instance CLUSTER and CREATE INDEX do),
- * the contents of the file would be lost forever. By leaving the empty file
+ * the contents of the file would be lost forever. By leaving the empty file
* until after the next checkpoint, we prevent reassignment of the relfilenode
* number until it's safe, because relfilenode assignment skips over any
* existing file.
@@ -342,7 +342,7 @@ mdcreate(SMgrRelation reln, ForkNumber forkNum, bool isRedo)
*
* All the above applies only to the relation's main fork; other forks can
* just be removed immediately, since they are not needed to prevent the
- * relfilenode number from being recycled. Also, we do not carefully
+ * relfilenode number from being recycled. Also, we do not carefully
* track whether other forks have been created or not, but just attempt to
* unlink them unconditionally; so we should never complain about ENOENT.
*
@@ -359,7 +359,7 @@ mdunlink(RelFileNodeBackend rnode, ForkNumber forkNum, bool isRedo)
{
/*
* We have to clean out any pending fsync requests for the doomed
- * relation, else the next mdsync() will fail. There can't be any such
+ * relation, else the next mdsync() will fail. There can't be any such
* requests for a temp relation, though. We can send just one request
* even when deleting multiple forks, since the fsync queuing code accepts
* the "InvalidForkNumber = all forks" convention.
@@ -496,7 +496,7 @@ mdextend(SMgrRelation reln, ForkNumber forknum, BlockNumber blocknum,
/*
* Note: because caller usually obtained blocknum by calling mdnblocks,
* which did a seek(SEEK_END), this seek is often redundant and will be
- * optimized away by fd.c. It's not redundant, however, if there is a
+ * optimized away by fd.c. It's not redundant, however, if there is a
* partial page at the end of the file. In that case we want to try to
* overwrite the partial page with a full page. It's also not redundant
* if bufmgr.c had to dump another buffer of the same file to make room
@@ -796,9 +796,9 @@ mdnblocks(SMgrRelation reln, ForkNumber forknum)
* exactly RELSEG_SIZE long, and it's useless to recheck that each time.
*
* NOTE: this assumption could only be wrong if another backend has
- * truncated the relation. We rely on higher code levels to handle that
+ * truncated the relation. We rely on higher code levels to handle that
* scenario by closing and re-opening the md fd, which is handled via
- * relcache flush. (Since the checkpointer doesn't participate in
+ * relcache flush. (Since the checkpointer doesn't participate in
* relcache flush, it could have segment chain entries for inactive
* segments; that's OK because the checkpointer never needs to compute
* relation size.)
@@ -992,7 +992,7 @@ mdsync(void)
/*
* If we are in the checkpointer, the sync had better include all fsync
- * requests that were queued by backends up to this point. The tightest
+ * requests that were queued by backends up to this point. The tightest
* race condition that could occur is that a buffer that must be written
* and fsync'd for the checkpoint could have been dumped by a backend just
* before it was visited by BufferSync(). We know the backend will have
@@ -1108,7 +1108,7 @@ mdsync(void)
* that have been deleted (unlinked) by the time we get to
* them. Rather than just hoping an ENOENT (or EACCES on
* Windows) error can be ignored, what we do on error is
- * absorb pending requests and then retry. Since mdunlink()
+ * absorb pending requests and then retry. Since mdunlink()
* queues a "cancel" message before actually unlinking, the
* fsync request is guaranteed to be marked canceled after the
* absorb if it really was this case. DROP DATABASE likewise
@@ -1212,7 +1212,7 @@ mdsync(void)
/*
* We've finished everything that was requested before we started to
- * scan the entry. If no new requests have been inserted meanwhile,
+ * scan the entry. If no new requests have been inserted meanwhile,
* remove the entry. Otherwise, update its cycle counter, as all the
* requests now in it must have arrived during this cycle.
*/
@@ -1317,7 +1317,7 @@ mdpostckpt(void)
/*
* As in mdsync, we don't want to stop absorbing fsync requests for a
- * long time when there are many deletions to be done. We can safely
+ * long time when there are many deletions to be done. We can safely
* call AbsorbFsyncRequests() at this point in the loop (note it might
* try to delete list entries).
*/
@@ -1442,7 +1442,7 @@ RememberFsyncRequest(RelFileNode rnode, ForkNumber forknum, BlockNumber segno)
/*
* We can't just delete the entry since mdsync could have an
* active hashtable scan. Instead we delete the bitmapsets; this
- * is safe because of the way mdsync is coded. We also set the
+ * is safe because of the way mdsync is coded. We also set the
* "canceled" flags so that mdsync can tell that a cancel arrived
* for the fork(s).
*/
@@ -1544,7 +1544,7 @@ RememberFsyncRequest(RelFileNode rnode, ForkNumber forknum, BlockNumber segno)
/*
* NB: it's intentional that we don't change cycle_ctr if the entry
- * already exists. The cycle_ctr must represent the oldest fsync
+ * already exists. The cycle_ctr must represent the oldest fsync
* request that could be in the entry.
*/
@@ -1715,7 +1715,7 @@ _mdfd_getseg(SMgrRelation reln, ForkNumber forknum, BlockNumber blkno,
{
/*
* Normally we will create new segments only if authorized by the
- * caller (i.e., we are doing mdextend()). But when doing WAL
+ * caller (i.e., we are doing mdextend()). But when doing WAL
* recovery, create segments anyway; this allows cases such as
* replaying WAL data that has a write into a high-numbered
* segment of a relation that was later deleted. We want to go
diff --git a/src/backend/storage/smgr/smgr.c b/src/backend/storage/smgr/smgr.c
index 18b972f2c69..7908fb8fbe1 100644
--- a/src/backend/storage/smgr/smgr.c
+++ b/src/backend/storage/smgr/smgr.c
@@ -598,7 +598,7 @@ smgrtruncate(SMgrRelation reln, ForkNumber forknum, BlockNumber nblocks)
* Send a shared-inval message to force other backends to close any smgr
* references they may have for this rel. This is useful because they
* might have open file pointers to segments that got removed, and/or
- * smgr_targblock variables pointing past the new rel end. (The inval
+ * smgr_targblock variables pointing past the new rel end. (The inval
* message will come back to our backend, too, causing a
* probably-unnecessary local smgr flush. But we don't expect that this
* is a performance-critical path.) As in the unlink code, we want to be