diff options
author | Bruce Momjian <bruce@momjian.us> | 2014-05-06 11:26:26 -0400 |
---|---|---|
committer | Bruce Momjian <bruce@momjian.us> | 2014-05-06 11:26:26 -0400 |
commit | 2616a5d300e5bb5a2838d2a065afa3740e08727f (patch) | |
tree | 5939408c63409abda810217fe812749a5da7345b /src/backend/storage/buffer | |
parent | e0070a6858cfcd2c4129dfa93bc042d6d86732c8 (diff) |
Remove tabs after spaces in C comments
This was not changed in HEAD, but will be done later as part of a
pgindent run. Future pgindent runs will also do this.
Report by Tom Lane
Backpatch through all supported branches, but not HEAD
Diffstat (limited to 'src/backend/storage/buffer')
-rw-r--r-- | src/backend/storage/buffer/buf_init.c | 4 | ||||
-rw-r--r-- | src/backend/storage/buffer/buf_table.c | 4 | ||||
-rw-r--r-- | src/backend/storage/buffer/bufmgr.c | 40 | ||||
-rw-r--r-- | src/backend/storage/buffer/freelist.c | 10 | ||||
-rw-r--r-- | src/backend/storage/buffer/localbuf.c | 6 |
5 files changed, 32 insertions, 32 deletions
diff --git a/src/backend/storage/buffer/buf_init.c b/src/backend/storage/buffer/buf_init.c index dadb49dae9e..0f1b77713c3 100644 --- a/src/backend/storage/buffer/buf_init.c +++ b/src/backend/storage/buffer/buf_init.c @@ -44,7 +44,7 @@ int32 *PrivateRefCount; * * IO_IN_PROGRESS -- this is a flag in the buffer descriptor. * It must be set when an IO is initiated and cleared at - * the end of the IO. It is there to make sure that one + * the end of the IO. It is there to make sure that one * process doesn't start to use a buffer while another is * faulting it in. see WaitIO and related routines. * @@ -54,7 +54,7 @@ int32 *PrivateRefCount; * * PrivateRefCount -- Each buffer also has a private refcount that keeps * track of the number of times the buffer is pinned in the current - * process. This is used for two purposes: first, if we pin a + * process. This is used for two purposes: first, if we pin a * a buffer more than once, we only need to change the shared refcount * once, thus only lock the shared state once; second, when a transaction * aborts, it should only unpin the buffers exactly the number of times it diff --git a/src/backend/storage/buffer/buf_table.c b/src/backend/storage/buffer/buf_table.c index 33ecd1214e5..899fa593b29 100644 --- a/src/backend/storage/buffer/buf_table.c +++ b/src/backend/storage/buffer/buf_table.c @@ -3,7 +3,7 @@ * buf_table.c * routines for mapping BufferTags to buffer indexes. * - * Note: the routines in this file do no locking of their own. The caller + * Note: the routines in this file do no locking of their own. The caller * must hold a suitable lock on the appropriate BufMappingLock, as specified * in the comments. We can't do the locking inside these functions because * in most cases the caller needs to adjust the buffer header contents @@ -112,7 +112,7 @@ BufTableLookup(BufferTag *tagPtr, uint32 hashcode) * Insert a hashtable entry for given tag and buffer ID, * unless an entry already exists for that tag * - * Returns -1 on successful insertion. If a conflicting entry exists + * Returns -1 on successful insertion. If a conflicting entry exists * already, returns the buffer ID in that entry. * * Caller must hold exclusive lock on BufMappingLock for tag's partition diff --git a/src/backend/storage/buffer/bufmgr.c b/src/backend/storage/buffer/bufmgr.c index b6539514f89..58b64f6570d 100644 --- a/src/backend/storage/buffer/bufmgr.c +++ b/src/backend/storage/buffer/bufmgr.c @@ -111,7 +111,7 @@ static void AtProcExit_Buffers(int code, Datum arg); * PrefetchBuffer -- initiate asynchronous read of a block of a relation * * This is named by analogy to ReadBuffer but doesn't actually allocate a - * buffer. Instead it tries to ensure that a future ReadBuffer for the given + * buffer. Instead it tries to ensure that a future ReadBuffer for the given * block will not be delayed by the I/O. Prefetching is optional. * No-op if prefetching isn't compiled in. */ @@ -201,7 +201,7 @@ ReadBuffer(Relation reln, BlockNumber blockNum) * Assume when this function is called, that reln has been opened already. * * In RBM_NORMAL mode, the page is read from disk, and the page header is - * validated. An error is thrown if the page header is not valid. (But + * validated. An error is thrown if the page header is not valid. (But * note that an all-zero page is considered "valid"; see PageIsVerified().) * * RBM_ZERO_ON_ERROR is like the normal mode, but if the page header is not @@ -209,7 +209,7 @@ ReadBuffer(Relation reln, BlockNumber blockNum) * for non-critical data, where the caller is prepared to repair errors. * * In RBM_ZERO mode, if the page isn't in buffer cache already, it's filled - * with zeros instead of reading it from disk. Useful when the caller is + * with zeros instead of reading it from disk. Useful when the caller is * going to fill the page from scratch, since this saves I/O and avoids * unnecessary failure if the page-on-disk has corrupt page headers. * Caution: do not use this mode to read a page that is beyond the relation's @@ -365,7 +365,7 @@ ReadBuffer_common(SMgrRelation smgr, char relpersistence, ForkNumber forkNum, * This can happen because mdread doesn't complain about reads beyond * EOF (when zero_damaged_pages is ON) and so a previous attempt to * read a block beyond EOF could have left a "valid" zero-filled - * buffer. Unfortunately, we have also seen this case occurring + * buffer. Unfortunately, we have also seen this case occurring * because of buggy Linux kernels that sometimes return an * lseek(SEEK_END) result that doesn't account for a recent write. In * that situation, the pre-existing buffer would contain valid data @@ -575,7 +575,7 @@ BufferAlloc(SMgrRelation smgr, char relpersistence, ForkNumber forkNum, /* * Didn't find it in the buffer pool. We'll have to initialize a new - * buffer. Remember to unlock the mapping lock while doing the work. + * buffer. Remember to unlock the mapping lock while doing the work. */ LWLockRelease(newPartitionLock); @@ -585,7 +585,7 @@ BufferAlloc(SMgrRelation smgr, char relpersistence, ForkNumber forkNum, bool lock_held; /* - * Select a victim buffer. The buffer is returned with its header + * Select a victim buffer. The buffer is returned with its header * spinlock still held! Also (in most cases) the BufFreelistLock is * still held, since it would be bad to hold the spinlock while * possibly waking up other processes. @@ -634,7 +634,7 @@ BufferAlloc(SMgrRelation smgr, char relpersistence, ForkNumber forkNum, * If using a nondefault strategy, and writing the buffer * would require a WAL flush, let the strategy decide whether * to go ahead and write/reuse the buffer or to choose another - * victim. We need lock to inspect the page LSN, so this + * victim. We need lock to inspect the page LSN, so this * can't be done inside StrategyGetBuffer. */ if (strategy != NULL && @@ -755,7 +755,7 @@ BufferAlloc(SMgrRelation smgr, char relpersistence, ForkNumber forkNum, { /* * We can only get here if (a) someone else is still reading - * in the page, or (b) a previous read attempt failed. We + * in the page, or (b) a previous read attempt failed. We * have to wait for any active read attempt to finish, and * then set up our own read attempt if the page is still not * BM_VALID. StartBufferIO does it all. @@ -848,7 +848,7 @@ BufferAlloc(SMgrRelation smgr, char relpersistence, ForkNumber forkNum, * This is used only in contexts such as dropping a relation. We assume * that no other backend could possibly be interested in using the page, * so the only reason the buffer might be pinned is if someone else is - * trying to write it out. We have to let them finish before we can + * trying to write it out. We have to let them finish before we can * reclaim the buffer. * * The buffer could get reclaimed by someone else while we are waiting @@ -947,7 +947,7 @@ retry: * * Marks buffer contents as dirty (actual write happens later). * - * Buffer must be pinned and exclusive-locked. (If caller does not hold + * Buffer must be pinned and exclusive-locked. (If caller does not hold * exclusive lock, then somebody could be in process of writing the buffer, * leading to risk of bad data written to disk.) */ @@ -991,7 +991,7 @@ MarkBufferDirty(Buffer buffer) * * Formerly, this saved one cycle of acquiring/releasing the BufMgrLock * compared to calling the two routines separately. Now it's mainly just - * a convenience function. However, if the passed buffer is valid and + * a convenience function. However, if the passed buffer is valid and * already contains the desired block, we just return it as-is; and that * does save considerable work compared to a full release and reacquire. * @@ -1043,7 +1043,7 @@ ReleaseAndReadBuffer(Buffer buffer, * when we first pin it; for other strategies we just make sure the usage_count * isn't zero. (The idea of the latter is that we don't want synchronized * heap scans to inflate the count, but we need it to not be zero to discourage - * other backends from stealing buffers from our ring. As long as we cycle + * other backends from stealing buffers from our ring. As long as we cycle * through the ring faster than the global clock-sweep cycles, buffers in * our ring won't be chosen as victims for replacement by other backends.) * @@ -1051,7 +1051,7 @@ ReleaseAndReadBuffer(Buffer buffer, * * Note that ResourceOwnerEnlargeBuffers must have been done already. * - * Returns TRUE if buffer is BM_VALID, else FALSE. This provision allows + * Returns TRUE if buffer is BM_VALID, else FALSE. This provision allows * some callers to avoid an extra spinlock cycle. */ static bool @@ -1204,7 +1204,7 @@ BufferSync(int flags) * have the flag set. * * Note that if we fail to write some buffer, we may leave buffers with - * BM_CHECKPOINT_NEEDED still set. This is OK since any such buffer would + * BM_CHECKPOINT_NEEDED still set. This is OK since any such buffer would * certainly need to be written for the next checkpoint attempt, too. */ num_to_write = 0; @@ -1945,7 +1945,7 @@ RelationGetNumberOfBlocksInFork(Relation relation, ForkNumber forkNum) * specified relation that have block numbers >= firstDelBlock. * (In particular, with firstDelBlock = 0, all pages are removed.) * Dirty pages are simply dropped, without bothering to write them - * out first. Therefore, this is NOT rollback-able, and so should be + * out first. Therefore, this is NOT rollback-able, and so should be * used only with extreme caution! * * Currently, this is called only from smgr.c when the underlying file @@ -1954,7 +1954,7 @@ RelationGetNumberOfBlocksInFork(Relation relation, ForkNumber forkNum) * be deleted momentarily anyway, and there is no point in writing it. * It is the responsibility of higher-level code to ensure that the * deletion or truncation does not lose any data that could be needed - * later. It is also the responsibility of higher-level code to ensure + * later. It is also the responsibility of higher-level code to ensure * that no other process could be trying to load more pages of the * relation into buffers. * @@ -1997,9 +1997,9 @@ DropRelFileNodeBuffers(RelFileNodeBackend rnode, ForkNumber forkNum, * * This function removes all the buffers in the buffer cache for a * particular database. Dirty pages are simply dropped, without - * bothering to write them out first. This is used when we destroy a + * bothering to write them out first. This is used when we destroy a * database, to avoid trying to flush data to disk when the directory - * tree no longer exists. Implementation is pretty similar to + * tree no longer exists. Implementation is pretty similar to * DropRelFileNodeBuffers() which is for destroying just one relation. * -------------------------------------------------------------------- */ @@ -2298,9 +2298,9 @@ SetBufferCommitInfoNeedsSave(Buffer buffer) /* * This routine might get called many times on the same page, if we are * making the first scan after commit of an xact that added/deleted many - * tuples. So, be as quick as we can if the buffer is already dirty. We + * tuples. So, be as quick as we can if the buffer is already dirty. We * do this by not acquiring spinlock if it looks like the status bits are - * already OK. (Note it is okay if someone else clears BM_JUST_DIRTIED + * already OK. (Note it is okay if someone else clears BM_JUST_DIRTIED * immediately after we look, because the buffer content update is already * done and will be reflected in the I/O.) */ diff --git a/src/backend/storage/buffer/freelist.c b/src/backend/storage/buffer/freelist.c index bf9903b9f32..73d42961e8a 100644 --- a/src/backend/storage/buffer/freelist.c +++ b/src/backend/storage/buffer/freelist.c @@ -36,7 +36,7 @@ typedef struct */ /* - * Statistics. These counters should be wide enough that they can't + * Statistics. These counters should be wide enough that they can't * overflow during a single bgwriter cycle. */ uint32 completePasses; /* Complete cycles of the clock sweep */ @@ -129,7 +129,7 @@ StrategyGetBuffer(BufferAccessStrategy strategy, bool *lock_held) /* * We count buffer allocation requests so that the bgwriter can estimate - * the rate of buffer consumption. Note that buffers recycled by a + * the rate of buffer consumption. Note that buffers recycled by a * strategy object are intentionally not counted here. */ StrategyControl->numBufferAllocs++; @@ -248,7 +248,7 @@ StrategyFreeBuffer(volatile BufferDesc *buf) * * In addition, we return the completed-pass count (which is effectively * the higher-order bits of nextVictimBuffer) and the count of recent buffer - * allocs if non-NULL pointers are passed. The alloc count is reset after + * allocs if non-NULL pointers are passed. The alloc count is reset after * being read. */ int @@ -442,7 +442,7 @@ GetBufferFromRing(BufferAccessStrategy strategy) /* * If the slot hasn't been filled yet, tell the caller to allocate a new - * buffer with the normal allocation strategy. He will then fill this + * buffer with the normal allocation strategy. He will then fill this * slot by calling AddBufferToRing with the new buffer. */ bufnum = strategy->buffers[strategy->current]; @@ -495,7 +495,7 @@ AddBufferToRing(BufferAccessStrategy strategy, volatile BufferDesc *buf) * * When a nondefault strategy is used, the buffer manager calls this function * when it turns out that the buffer selected by StrategyGetBuffer needs to - * be written out and doing so would require flushing WAL too. This gives us + * be written out and doing so would require flushing WAL too. This gives us * a chance to choose a different victim. * * Returns true if buffer manager should ask for a new victim, and false diff --git a/src/backend/storage/buffer/localbuf.c b/src/backend/storage/buffer/localbuf.c index 8816a5dfab4..f908584589c 100644 --- a/src/backend/storage/buffer/localbuf.c +++ b/src/backend/storage/buffer/localbuf.c @@ -95,7 +95,7 @@ LocalPrefetchBuffer(SMgrRelation smgr, ForkNumber forkNum, * Find or create a local buffer for the given page of the given relation. * * API is similar to bufmgr.c's BufferAlloc, except that we do not need - * to do any locking since this is all local. Also, IO_IN_PROGRESS + * to do any locking since this is all local. Also, IO_IN_PROGRESS * does not get set. Lastly, we support only default access strategy * (hence, usage_count is always advanced). */ @@ -286,7 +286,7 @@ MarkLocalBufferDirty(Buffer buffer) * specified relation that have block numbers >= firstDelBlock. * (In particular, with firstDelBlock = 0, all pages are removed.) * Dirty pages are simply dropped, without bothering to write them - * out first. Therefore, this is NOT rollback-able, and so should be + * out first. Therefore, this is NOT rollback-able, and so should be * used only with extreme caution! * * See DropRelFileNodeBuffers in bufmgr.c for more notes. @@ -413,7 +413,7 @@ GetLocalBufferStorage(void) /* * We allocate local buffers in a context of their own, so that the * space eaten for them is easily recognizable in MemoryContextStats - * output. Create the context on first use. + * output. Create the context on first use. */ if (LocalBufferContext == NULL) LocalBufferContext = |